aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig15
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/acpi/acpi_memhotplug.c11
-rw-r--r--drivers/acpi/asus_acpi.c227
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/bay.c411
-rw-r--r--drivers/acpi/bus.c78
-rw-r--r--drivers/acpi/button.c6
-rw-r--r--drivers/acpi/cm_sbs.c8
-rw-r--r--drivers/acpi/container.c4
-rw-r--r--drivers/acpi/debug.c2
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c3
-rw-r--r--drivers/acpi/dispatcher/dsmthdat.c83
-rw-r--r--drivers/acpi/dispatcher/dsobject.c62
-rw-r--r--drivers/acpi/dispatcher/dsopcode.c2
-rw-r--r--drivers/acpi/dispatcher/dswexec.c12
-rw-r--r--drivers/acpi/dock.c388
-rw-r--r--drivers/acpi/ec.c400
-rw-r--r--drivers/acpi/executer/exconfig.c123
-rw-r--r--drivers/acpi/executer/exconvrt.c32
-rw-r--r--drivers/acpi/executer/exdump.c87
-rw-r--r--drivers/acpi/executer/exmisc.c14
-rw-r--r--drivers/acpi/executer/exoparg1.c26
-rw-r--r--drivers/acpi/executer/exoparg2.c4
-rw-r--r--drivers/acpi/executer/exresnte.c16
-rw-r--r--drivers/acpi/executer/exresolv.c60
-rw-r--r--drivers/acpi/executer/exresop.c50
-rw-r--r--drivers/acpi/executer/exstore.c61
-rw-r--r--drivers/acpi/executer/exstoren.c3
-rw-r--r--drivers/acpi/fan.c6
-rw-r--r--drivers/acpi/hardware/hwsleep.c44
-rw-r--r--drivers/acpi/namespace/Makefile2
-rw-r--r--drivers/acpi/namespace/nsdump.c5
-rw-r--r--drivers/acpi/namespace/nseval.c73
-rw-r--r--drivers/acpi/namespace/nsnames.c7
-rw-r--r--drivers/acpi/namespace/nspredef.c900
-rw-r--r--drivers/acpi/namespace/nssearch.c2
-rw-r--r--drivers/acpi/namespace/nsxfeval.c78
-rw-r--r--drivers/acpi/namespace/nsxfname.c5
-rw-r--r--drivers/acpi/numa.c2
-rw-r--r--drivers/acpi/osl.c55
-rw-r--r--drivers/acpi/parser/psloop.c2
-rw-r--r--drivers/acpi/parser/psparse.c32
-rw-r--r--drivers/acpi/pci_link.c6
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/pci_slot.c12
-rw-r--r--drivers/acpi/power.c74
-rw-r--r--drivers/acpi/processor_core.c6
-rw-r--r--drivers/acpi/processor_idle.c1
-rw-r--r--drivers/acpi/processor_perflib.c30
-rw-r--r--drivers/acpi/processor_throttling.c12
-rw-r--r--drivers/acpi/reboot.c25
-rw-r--r--drivers/acpi/resources/rscalc.c5
-rw-r--r--drivers/acpi/resources/rscreate.c10
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/sbshc.c6
-rw-r--r--drivers/acpi/scan.c90
-rw-r--r--drivers/acpi/sleep/main.c66
-rw-r--r--drivers/acpi/system.c4
-rw-r--r--drivers/acpi/tables/tbfadt.c32
-rw-r--r--drivers/acpi/tables/tbinstal.c61
-rw-r--r--drivers/acpi/thermal.c57
-rw-r--r--drivers/acpi/toshiba_acpi.c2
-rw-r--r--drivers/acpi/utilities/utalloc.c53
-rw-r--r--drivers/acpi/utilities/utcopy.c29
-rw-r--r--drivers/acpi/utilities/utdelete.c12
-rw-r--r--drivers/acpi/utilities/utglobal.c52
-rw-r--r--drivers/acpi/utilities/utmisc.c9
-rw-r--r--drivers/acpi/utilities/utobject.c15
-rw-r--r--drivers/acpi/utilities/utxface.c7
-rw-r--r--drivers/acpi/utils.c4
-rw-r--r--drivers/acpi/video.c60
-rw-r--r--drivers/acpi/wmi.c49
-rw-r--r--drivers/ata/libata-acpi.c135
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-eh.c26
-rw-r--r--drivers/ata/libata-sff.c11
-rw-r--r--drivers/ata/sata_via.c35
-rw-r--r--drivers/block/DAC960.c6
-rw-r--r--drivers/block/amiflop.c46
-rw-r--r--drivers/block/aoe/aoeblk.c12
-rw-r--r--drivers/block/ataflop.c37
-rw-r--r--drivers/block/brd.c5
-rw-r--r--drivers/block/cciss.c71
-rw-r--r--drivers/block/cpqarray.c28
-rw-r--r--drivers/block/floppy.c54
-rw-r--r--drivers/block/loop.c59
-rw-r--r--drivers/block/nbd.c28
-rw-r--r--drivers/block/paride/pcd.c21
-rw-r--r--drivers/block/paride/pd.c14
-rw-r--r--drivers/block/paride/pf.c22
-rw-r--r--drivers/block/paride/pt.c2
-rw-r--r--drivers/block/pktcdvd.c56
-rw-r--r--drivers/block/swim3.c32
-rw-r--r--drivers/block/ub.c22
-rw-r--r--drivers/block/viodasd.c10
-rw-r--r--drivers/block/virtio_blk.c8
-rw-r--r--drivers/block/xd.c4
-rw-r--r--drivers/block/xd.h2
-rw-r--r--drivers/block/xen-blkfront.c15
-rw-r--r--drivers/block/xsysace.c11
-rw-r--r--drivers/block/z2ram.c7
-rw-r--r--drivers/cdrom/cdrom.c23
-rw-r--r--drivers/cdrom/gdrom.c14
-rw-r--r--drivers/cdrom/viocd.c21
-rw-r--r--drivers/char/hvc_console.c86
-rw-r--r--drivers/char/hvc_console.h12
-rw-r--r--drivers/char/hvc_irq.c5
-rw-r--r--drivers/char/hvc_iseries.c1
-rw-r--r--drivers/char/hvc_vio.c1
-rw-r--r--drivers/char/hvc_xen.c1
-rw-r--r--drivers/char/nvram.c6
-rw-r--r--drivers/char/raw.c8
-rw-r--r--drivers/char/tty_port.c2
-rw-r--r--drivers/char/virtio_console.c1
-rw-r--r--drivers/cpuidle/cpuidle.c9
-rw-r--r--drivers/dma/ioat_dma.c3
-rw-r--r--drivers/gpio/Kconfig7
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/twl4030-gpio.c521
-rw-r--r--drivers/gpu/drm/drm_drawable.c15
-rw-r--r--drivers/gpu/drm/drm_ioc32.c34
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/drm_lock.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c15
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c277
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c21
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-cpm.c1
-rw-r--r--drivers/i2c/busses/i2c-elektor.c3
-rw-r--r--drivers/i2c/busses/i2c-hydra.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c3
-rw-r--r--drivers/i2c/chips/Kconfig2
-rw-r--r--drivers/i2c/chips/Makefile3
-rw-r--r--drivers/i2c/i2c-core.c3
-rw-r--r--drivers/ide/Makefile84
-rw-r--r--drivers/ide/aec62xx.c (renamed from drivers/ide/pci/aec62xx.c)0
-rw-r--r--drivers/ide/ali14xx.c (renamed from drivers/ide/legacy/ali14xx.c)0
-rw-r--r--drivers/ide/alim15x3.c (renamed from drivers/ide/pci/alim15x3.c)0
-rw-r--r--drivers/ide/amd74xx.c (renamed from drivers/ide/pci/amd74xx.c)0
-rw-r--r--drivers/ide/arm/Makefile10
-rw-r--r--drivers/ide/atiixp.c (renamed from drivers/ide/pci/atiixp.c)0
-rw-r--r--drivers/ide/au1xxx-ide.c (renamed from drivers/ide/mips/au1xxx-ide.c)0
-rw-r--r--drivers/ide/buddha.c (renamed from drivers/ide/legacy/buddha.c)0
-rw-r--r--drivers/ide/cmd640.c (renamed from drivers/ide/pci/cmd640.c)0
-rw-r--r--drivers/ide/cmd64x.c (renamed from drivers/ide/pci/cmd64x.c)0
-rw-r--r--drivers/ide/cs5520.c (renamed from drivers/ide/pci/cs5520.c)0
-rw-r--r--drivers/ide/cs5530.c (renamed from drivers/ide/pci/cs5530.c)0
-rw-r--r--drivers/ide/cs5535.c (renamed from drivers/ide/pci/cs5535.c)0
-rw-r--r--drivers/ide/cy82c693.c (renamed from drivers/ide/pci/cy82c693.c)0
-rw-r--r--drivers/ide/delkin_cb.c (renamed from drivers/ide/pci/delkin_cb.c)0
-rw-r--r--drivers/ide/dtc2278.c (renamed from drivers/ide/legacy/dtc2278.c)0
-rw-r--r--drivers/ide/falconide.c (renamed from drivers/ide/legacy/falconide.c)0
-rw-r--r--drivers/ide/gayle.c (renamed from drivers/ide/legacy/gayle.c)0
-rw-r--r--drivers/ide/generic.c (renamed from drivers/ide/pci/generic.c)0
-rw-r--r--drivers/ide/h8300/Makefile2
-rw-r--r--drivers/ide/hpt366.c (renamed from drivers/ide/pci/hpt366.c)0
-rw-r--r--drivers/ide/ht6560b.c (renamed from drivers/ide/legacy/ht6560b.c)0
-rw-r--r--drivers/ide/icside.c (renamed from drivers/ide/arm/icside.c)0
-rw-r--r--drivers/ide/ide-4drives.c (renamed from drivers/ide/legacy/ide-4drives.c)0
-rw-r--r--drivers/ide/ide-cd.c22
-rw-r--r--drivers/ide/ide-cs.c (renamed from drivers/ide/legacy/ide-cs.c)0
-rw-r--r--drivers/ide/ide-disk.h2
-rw-r--r--drivers/ide/ide-disk_ioctl.c5
-rw-r--r--drivers/ide/ide-floppy.h4
-rw-r--r--drivers/ide/ide-floppy_ioctl.c17
-rw-r--r--drivers/ide/ide-gd.c20
-rw-r--r--drivers/ide/ide-h8300.c (renamed from drivers/ide/h8300/ide-h8300.c)0
-rw-r--r--drivers/ide/ide-ioctls.c3
-rw-r--r--drivers/ide/ide-tape.c17
-rw-r--r--drivers/ide/ide_arm.c (renamed from drivers/ide/arm/ide_arm.c)0
-rw-r--r--drivers/ide/ide_platform.c (renamed from drivers/ide/legacy/ide_platform.c)0
-rw-r--r--drivers/ide/it8213.c (renamed from drivers/ide/pci/it8213.c)0
-rw-r--r--drivers/ide/it821x.c (renamed from drivers/ide/pci/it821x.c)0
-rw-r--r--drivers/ide/jmicron.c (renamed from drivers/ide/pci/jmicron.c)0
-rw-r--r--drivers/ide/legacy/Makefile25
-rw-r--r--drivers/ide/macide.c (renamed from drivers/ide/legacy/macide.c)0
-rw-r--r--drivers/ide/mips/Makefile3
-rw-r--r--drivers/ide/ns87415.c (renamed from drivers/ide/pci/ns87415.c)0
-rw-r--r--drivers/ide/opti621.c (renamed from drivers/ide/pci/opti621.c)0
-rw-r--r--drivers/ide/palm_bk3710.c (renamed from drivers/ide/arm/palm_bk3710.c)0
-rw-r--r--drivers/ide/pci/Makefile43
-rw-r--r--drivers/ide/pdc202xx_new.c (renamed from drivers/ide/pci/pdc202xx_new.c)0
-rw-r--r--drivers/ide/pdc202xx_old.c (renamed from drivers/ide/pci/pdc202xx_old.c)0
-rw-r--r--drivers/ide/piix.c (renamed from drivers/ide/pci/piix.c)0
-rw-r--r--drivers/ide/pmac.c (renamed from drivers/ide/ppc/pmac.c)0
-rw-r--r--drivers/ide/ppc/Makefile2
-rw-r--r--drivers/ide/q40ide.c (renamed from drivers/ide/legacy/q40ide.c)0
-rw-r--r--drivers/ide/qd65xx.c (renamed from drivers/ide/legacy/qd65xx.c)0
-rw-r--r--drivers/ide/qd65xx.h (renamed from drivers/ide/legacy/qd65xx.h)0
-rw-r--r--drivers/ide/rapide.c (renamed from drivers/ide/arm/rapide.c)0
-rw-r--r--drivers/ide/rz1000.c (renamed from drivers/ide/pci/rz1000.c)0
-rw-r--r--drivers/ide/sc1200.c (renamed from drivers/ide/pci/sc1200.c)0
-rw-r--r--drivers/ide/scc_pata.c (renamed from drivers/ide/pci/scc_pata.c)0
-rw-r--r--drivers/ide/serverworks.c (renamed from drivers/ide/pci/serverworks.c)0
-rw-r--r--drivers/ide/sgiioc4.c (renamed from drivers/ide/pci/sgiioc4.c)0
-rw-r--r--drivers/ide/siimage.c (renamed from drivers/ide/pci/siimage.c)0
-rw-r--r--drivers/ide/sis5513.c (renamed from drivers/ide/pci/sis5513.c)0
-rw-r--r--drivers/ide/sl82c105.c (renamed from drivers/ide/pci/sl82c105.c)0
-rw-r--r--drivers/ide/slc90e66.c (renamed from drivers/ide/pci/slc90e66.c)0
-rw-r--r--drivers/ide/tc86c001.c (renamed from drivers/ide/pci/tc86c001.c)0
-rw-r--r--drivers/ide/triflex.c (renamed from drivers/ide/pci/triflex.c)0
-rw-r--r--drivers/ide/trm290.c (renamed from drivers/ide/pci/trm290.c)0
-rw-r--r--drivers/ide/umc8672.c (renamed from drivers/ide/legacy/umc8672.c)0
-rw-r--r--drivers/ide/via82cxxx.c (renamed from drivers/ide/pci/via82cxxx.c)0
-rw-r--r--drivers/idle/Kconfig16
-rw-r--r--drivers/idle/Makefile2
-rw-r--r--drivers/idle/i7300_idle.c674
-rw-r--r--drivers/infiniband/core/mad.c14
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c83
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c10
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c21
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c67
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c4
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/dm-crypt.c56
-rw-r--r--drivers/md/dm-delay.c3
-rw-r--r--drivers/md/dm-exception-store.c108
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-ioctl.c4
-rw-r--r--drivers/md/dm-kcopyd.c14
-rw-r--r--drivers/md/dm-linear.c15
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-mpath.c17
-rw-r--r--drivers/md/dm-path-selector.c3
-rw-r--r--drivers/md/dm-raid1.c791
-rw-r--r--drivers/md/dm-region-hash.c704
-rw-r--r--drivers/md/dm-round-robin.c3
-rw-r--r--drivers/md/dm-snap.c11
-rw-r--r--drivers/md/dm-snap.h5
-rw-r--r--drivers/md/dm-stripe.c6
-rw-r--r--drivers/md/dm-table.c37
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c76
-rw-r--r--drivers/md/dm.h9
-rw-r--r--drivers/md/md.c20
-rw-r--r--drivers/media/common/saa7146_fops.c4
-rw-r--r--drivers/media/common/saa7146_video.c12
-rw-r--r--drivers/media/dvb/frontends/s5h1411.c84
-rw-r--r--drivers/media/dvb/frontends/s5h1411.h2
-rw-r--r--drivers/media/radio/dsbr100.c62
-rw-r--r--drivers/media/radio/radio-mr800.c5
-rw-r--r--drivers/media/video/arv.c2
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c6
-rw-r--r--drivers/media/video/c-qcam.c2
-rw-r--r--drivers/media/video/cafe_ccic.c4
-rw-r--r--drivers/media/video/cpia.c6
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c2
-rw-r--r--drivers/media/video/cx18/cx18-driver.c11
-rw-r--r--drivers/media/video/cx18/cx18-io.h4
-rw-r--r--drivers/media/video/cx18/cx18-streams.c36
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c2
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/video/cx88/cx88-cards.c4
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c11
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c2
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c15
-rw-r--r--drivers/media/video/cx88/cx88-video.c6
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c24
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c12
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c1
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c13
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.h3
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c4
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-encoder.c4
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c17
-rw-r--r--drivers/media/video/pwc/pwc-if.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c6
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/video/se401.c2
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c24
-rw-r--r--drivers/media/video/stk-webcam.c4
-rw-r--r--drivers/media/video/stv680.c3
-rw-r--r--drivers/media/video/usbvideo/usbvideo.c2
-rw-r--r--drivers/media/video/usbvideo/vicam.c3
-rw-r--r--drivers/media/video/usbvision/usbvision-i2c.c2
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c12
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c12
-rw-r--r--drivers/media/video/v4l1-compat.c221
-rw-r--r--drivers/media/video/v4l2-int-device.c5
-rw-r--r--drivers/media/video/v4l2-ioctl.c19
-rw-r--r--drivers/media/video/videobuf-dvb.c52
-rw-r--r--drivers/media/video/vivi.c6
-rw-r--r--drivers/media/video/w9968cf.c16
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c24
-rw-r--r--drivers/media/video/zr364xx.c2
-rw-r--r--drivers/memstick/core/mspro_block.c9
-rw-r--r--drivers/message/i2o/i2o_block.c13
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/sm501.c25
-rw-r--r--drivers/mfd/twl4030-core.c421
-rw-r--r--drivers/mfd/twl4030-irq.c743
-rw-r--r--drivers/mfd/wm8350-core.c5
-rw-r--r--drivers/misc/Kconfig12
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/acer-wmi.c225
-rw-r--r--drivers/misc/asus-laptop.c17
-rw-r--r--drivers/misc/eeepc-laptop.c232
-rw-r--r--drivers/misc/fujitsu-laptop.c135
-rw-r--r--drivers/misc/intel_menlow.c31
-rw-r--r--drivers/misc/panasonic-laptop.c767
-rw-r--r--drivers/misc/sony-laptop.c2
-rw-r--r--drivers/misc/thinkpad_acpi.c65
-rw-r--r--drivers/mmc/card/block.c13
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/mtd_blkdevs.c24
-rw-r--r--drivers/mtd/mtdchar.c10
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/fec_mpc52xx_phy.c55
-rw-r--r--drivers/net/ibm_newemac/core.c10
-rw-r--r--drivers/net/ibm_newemac/mal.c15
-rw-r--r--drivers/net/mlx4/Makefile7
-rw-r--r--drivers/net/mlx4/alloc.c97
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/en_cq.c146
-rw-r--r--drivers/net/mlx4/en_main.c254
-rw-r--r--drivers/net/mlx4/en_netdev.c1088
-rw-r--r--drivers/net/mlx4/en_params.c480
-rw-r--r--drivers/net/mlx4/en_port.c261
-rw-r--r--drivers/net/mlx4/en_port.h570
-rw-r--r--drivers/net/mlx4/en_resources.c96
-rw-r--r--drivers/net/mlx4/en_rx.c1080
-rw-r--r--drivers/net/mlx4/en_tx.c820
-rw-r--r--drivers/net/mlx4/eq.c2
-rw-r--r--drivers/net/mlx4/fw.c20
-rw-r--r--drivers/net/mlx4/fw.h7
-rw-r--r--drivers/net/mlx4/main.c287
-rw-r--r--drivers/net/mlx4/mcg.c4
-rw-r--r--drivers/net/mlx4/mlx4.h45
-rw-r--r--drivers/net/mlx4/mlx4_en.h561
-rw-r--r--drivers/net/mlx4/mr.c2
-rw-r--r--drivers/net/mlx4/pd.c4
-rw-r--r--drivers/net/mlx4/port.c282
-rw-r--r--drivers/net/mlx4/qp.c81
-rw-r--r--drivers/net/mlx4/srq.c2
-rw-r--r--drivers/net/xtsonic.c319
-rw-r--r--drivers/of/of_i2c.c2
-rw-r--r--drivers/of/of_spi.c2
-rw-r--r--drivers/oprofile/buffer_sync.c41
-rw-r--r--drivers/oprofile/buffer_sync.h4
-rw-r--r--drivers/oprofile/cpu_buffer.c106
-rw-r--r--drivers/oprofile/cpu_buffer.h12
-rw-r--r--drivers/oprofile/event_buffer.c34
-rw-r--r--drivers/oprofile/event_buffer.h17
-rw-r--r--drivers/oprofile/oprof.c26
-rw-r--r--drivers/oprofile/oprof.h12
-rw-r--r--drivers/oprofile/oprofile_files.c36
-rw-r--r--drivers/oprofile/oprofile_stats.c24
-rw-r--r--drivers/oprofile/oprofile_stats.h10
-rw-r--r--drivers/oprofile/oprofilefs.c78
-rw-r--r--drivers/oprofile/timer_int.c4
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/parport/parport_pc.c20
-rw-r--r--drivers/pci/dmar.c119
-rw-r--r--drivers/pci/hotplug/acpiphp.h9
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c32
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c20
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h6
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c75
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c4
-rw-r--r--drivers/pci/hotplug/cpqphp.h13
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c43
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c2
-rw-r--r--drivers/pci/hotplug/fakephp.c26
-rw-r--r--drivers/pci/hotplug/ibmphp.h5
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c19
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c64
-rw-r--r--drivers/pci/hotplug/pciehp.h10
-rw-r--r--drivers/pci/hotplug/pciehp_core.c49
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c56
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c60
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c10
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c22
-rw-r--r--drivers/pci/hotplug/shpchp.h9
-rw-r--r--drivers/pci/hotplug/shpchp_core.c52
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c48
-rw-r--r--drivers/pci/intel-iommu.c250
-rw-r--r--drivers/pci/msi.c21
-rw-r--r--drivers/pci/pci-acpi.c85
-rw-r--r--drivers/pci/pci.c101
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/probe.c12
-rw-r--r--drivers/pci/quirks.c14
-rw-r--r--drivers/pci/search.c9
-rw-r--r--drivers/pci/slot.c160
-rw-r--r--drivers/pcmcia/Makefile2
-rw-r--r--drivers/pnp/Kconfig20
-rw-r--r--drivers/pnp/Makefile4
-rw-r--r--drivers/pnp/base.h10
-rw-r--r--drivers/pnp/core.c29
-rw-r--r--drivers/pnp/driver.c4
-rw-r--r--drivers/pnp/isapnp/Makefile4
-rw-r--r--drivers/pnp/isapnp/core.c12
-rw-r--r--drivers/pnp/manager.c34
-rw-r--r--drivers/pnp/pnpacpi/Makefile4
-rw-r--r--drivers/pnp/pnpacpi/core.c16
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c43
-rw-r--r--drivers/pnp/pnpbios/Makefile4
-rw-r--r--drivers/pnp/pnpbios/core.c4
-rw-r--r--drivers/pnp/pnpbios/rsparser.c18
-rw-r--r--drivers/pnp/quirks.c5
-rw-r--r--drivers/pnp/resource.c12
-rw-r--r--drivers/pnp/support.c14
-rw-r--r--drivers/rtc/Kconfig14
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-twl4030.c564
-rw-r--r--drivers/s390/block/dasd.c11
-rw-r--r--drivers/s390/block/dasd_genhd.c4
-rw-r--r--drivers/s390/block/dasd_int.h3
-rw-r--r--drivers/s390/block/dasd_ioctl.c15
-rw-r--r--drivers/s390/block/dcssblk.c17
-rw-r--r--drivers/s390/char/tape_block.c25
-rw-r--r--drivers/scsi/ide-scsi.c22
-rw-r--r--drivers/scsi/scsi_ioctl.c6
-rw-r--r--drivers/scsi/sd.c35
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sr.c40
-rw-r--r--drivers/scsi/st.c7
-rw-r--r--drivers/serial/8250_pci.c231
-rw-r--r--drivers/serial/Kconfig2
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/at76_usb/at76_usb.c4
-rw-r--r--drivers/staging/echo/bit_operations.h205
-rw-r--r--drivers/staging/echo/echo.c835
-rw-r--r--drivers/staging/echo/echo.h58
-rw-r--r--drivers/staging/echo/fir.h376
-rw-r--r--drivers/staging/echo/mmx.h29
-rw-r--r--drivers/staging/echo/oslec.h86
-rw-r--r--drivers/staging/et131x/et1310_phy.c2
-rw-r--r--drivers/staging/et131x/et131x_debug.c1
-rw-r--r--drivers/staging/et131x/et131x_initpci.c1
-rw-r--r--drivers/staging/go7007/go7007-driver.c1
-rw-r--r--drivers/staging/go7007/go7007-fw.c1
-rw-r--r--drivers/staging/go7007/go7007-i2c.c1
-rw-r--r--drivers/staging/go7007/go7007-usb.c1
-rw-r--r--drivers/staging/go7007/snd-go7007.c1
-rw-r--r--drivers/staging/go7007/wis-ov7640.c1
-rw-r--r--drivers/staging/go7007/wis-saa7113.c1
-rw-r--r--drivers/staging/go7007/wis-saa7115.c1
-rw-r--r--drivers/staging/go7007/wis-sony-tuner.c1
-rw-r--r--drivers/staging/go7007/wis-tw2804.c1
-rw-r--r--drivers/staging/go7007/wis-tw9903.c1
-rw-r--r--drivers/staging/go7007/wis-uda1342.c1
-rw-r--r--drivers/staging/me4000/me4000.c908
-rw-r--r--drivers/staging/me4000/me4000.h194
-rw-r--r--drivers/staging/pcc-acpi/Kconfig11
-rw-r--r--drivers/staging/pcc-acpi/Makefile1
-rw-r--r--drivers/staging/pcc-acpi/TODO7
-rw-r--r--drivers/staging/pcc-acpi/pcc-acpi.c1111
-rw-r--r--drivers/staging/poch/Kconfig6
-rw-r--r--drivers/staging/poch/Makefile1
-rw-r--r--drivers/staging/poch/README7
-rw-r--r--drivers/staging/poch/poch.c1425
-rw-r--r--drivers/staging/poch/poch.h29
-rw-r--r--drivers/staging/slicoss/slicoss.c18
-rw-r--r--drivers/staging/sxg/README1
-rw-r--r--drivers/staging/sxg/sxg.c1379
-rw-r--r--drivers/staging/sxg/sxg_os.h41
-rw-r--r--drivers/staging/sxg/sxgdbg.h2
-rw-r--r--drivers/staging/sxg/sxghif.h410
-rw-r--r--drivers/staging/sxg/sxghw.h404
-rw-r--r--drivers/staging/sxg/sxgphycode.h12
-rw-r--r--drivers/staging/usbip/usbip_common.c2
-rw-r--r--drivers/staging/usbip/vhci_rx.c2
-rw-r--r--drivers/staging/winbond/Kconfig2
-rw-r--r--drivers/staging/winbond/README1
-rw-r--r--drivers/staging/winbond/bss_f.h6
-rw-r--r--drivers/staging/winbond/ds_tkip.h6
-rw-r--r--drivers/staging/winbond/linux/common.h17
-rw-r--r--drivers/staging/winbond/linux/wb35reg.c63
-rw-r--r--drivers/staging/winbond/linux/wb35reg_f.h12
-rw-r--r--drivers/staging/winbond/linux/wb35reg_s.h4
-rw-r--r--drivers/staging/winbond/linux/wb35rx.c175
-rw-r--r--drivers/staging/winbond/linux/wb35rx_s.h2
-rw-r--r--drivers/staging/winbond/linux/wb35tx.c138
-rw-r--r--drivers/staging/winbond/linux/wb35tx_f.h2
-rw-r--r--drivers/staging/winbond/linux/wbusb.c259
-rw-r--r--drivers/staging/winbond/mds.c30
-rw-r--r--drivers/staging/winbond/mds_f.h6
-rw-r--r--drivers/staging/winbond/mds_s.h8
-rw-r--r--drivers/staging/winbond/mlme_s.h4
-rw-r--r--drivers/staging/winbond/mlmetxrx.c4
-rw-r--r--drivers/staging/winbond/mlmetxrx_f.h4
-rw-r--r--drivers/staging/winbond/reg.c24
-rw-r--r--drivers/staging/winbond/sme_api.c1
-rw-r--r--drivers/staging/winbond/sme_api.h2
-rw-r--r--drivers/staging/winbond/wbhal.c32
-rw-r--r--drivers/staging/winbond/wbhal_f.h28
-rw-r--r--drivers/staging/winbond/wbhal_s.h4
-rw-r--r--drivers/staging/winbond/wblinux.c208
-rw-r--r--drivers/staging/winbond/wblinux_s.h4
-rw-r--r--drivers/staging/wlan-ng/Kconfig2
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h2
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c1
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c2
-rw-r--r--drivers/staging/wlan-ng/wlan_compat.h8
-rw-r--r--drivers/usb/Kconfig5
-rw-r--r--drivers/usb/Makefile3
-rw-r--r--drivers/usb/atm/speedtch.c12
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/core/driver.c3
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/gadget/config.c2
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c7
-rw-r--r--drivers/usb/host/Kconfig29
-rw-r--r--drivers/usb/host/Makefile3
-rw-r--r--drivers/usb/host/hwa-hc.c925
-rw-r--r--drivers/usb/host/ohci-hcd.c21
-rw-r--r--drivers/usb/host/ohci-tmio.c376
-rw-r--r--drivers/usb/host/whci/Kbuild11
-rw-r--r--drivers/usb/host/whci/asl.c367
-rw-r--r--drivers/usb/host/whci/hcd.c339
-rw-r--r--drivers/usb/host/whci/hw.c87
-rw-r--r--drivers/usb/host/whci/init.c188
-rw-r--r--drivers/usb/host/whci/int.c95
-rw-r--r--drivers/usb/host/whci/pzl.c398
-rw-r--r--drivers/usb/host/whci/qset.c567
-rw-r--r--drivers/usb/host/whci/whcd.h197
-rw-r--r--drivers/usb/host/whci/whci-hc.h416
-rw-r--r--drivers/usb/host/whci/wusb.c241
-rw-r--r--drivers/usb/misc/usbtest.c3
-rw-r--r--drivers/usb/serial/option.c96
-rw-r--r--drivers/usb/storage/initializers.c3
-rw-r--r--drivers/usb/storage/unusual_devs.h291
-rw-r--r--drivers/usb/wusbcore/Kconfig41
-rw-r--r--drivers/usb/wusbcore/Makefile26
-rw-r--r--drivers/usb/wusbcore/cbaf.c673
-rw-r--r--drivers/usb/wusbcore/crypto.c538
-rw-r--r--drivers/usb/wusbcore/dev-sysfs.c143
-rw-r--r--drivers/usb/wusbcore/devconnect.c1297
-rw-r--r--drivers/usb/wusbcore/mmc.c321
-rw-r--r--drivers/usb/wusbcore/pal.c42
-rw-r--r--drivers/usb/wusbcore/reservation.c115
-rw-r--r--drivers/usb/wusbcore/rh.c477
-rw-r--r--drivers/usb/wusbcore/security.c642
-rw-r--r--drivers/usb/wusbcore/wa-hc.c95
-rw-r--r--drivers/usb/wusbcore/wa-hc.h417
-rw-r--r--drivers/usb/wusbcore/wa-nep.c310
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c562
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c1709
-rw-r--r--drivers/usb/wusbcore/wusbhc.c418
-rw-r--r--drivers/usb/wusbcore/wusbhc.h495
-rw-r--r--drivers/uwb/Kconfig90
-rw-r--r--drivers/uwb/Makefile29
-rw-r--r--drivers/uwb/address.c374
-rw-r--r--drivers/uwb/beacon.c642
-rw-r--r--drivers/uwb/driver.c144
-rw-r--r--drivers/uwb/drp-avail.c288
-rw-r--r--drivers/uwb/drp-ie.c232
-rw-r--r--drivers/uwb/drp.c461
-rw-r--r--drivers/uwb/est.c477
-rw-r--r--drivers/uwb/hwa-rc.c926
-rw-r--r--drivers/uwb/i1480/Makefile2
-rw-r--r--drivers/uwb/i1480/dfu/Makefile9
-rw-r--r--drivers/uwb/i1480/dfu/dfu.c217
-rw-r--r--drivers/uwb/i1480/dfu/i1480-dfu.h260
-rw-r--r--drivers/uwb/i1480/dfu/mac.c527
-rw-r--r--drivers/uwb/i1480/dfu/phy.c203
-rw-r--r--drivers/uwb/i1480/dfu/usb.c500
-rw-r--r--drivers/uwb/i1480/i1480-est.c99
-rw-r--r--drivers/uwb/i1480/i1480-wlp.h200
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/Makefile8
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h284
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/lc.c421
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/netdev.c368
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/rx.c486
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/sysfs.c408
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/tx.c632
-rw-r--r--drivers/uwb/ie.c541
-rw-r--r--drivers/uwb/lc-dev.c492
-rw-r--r--drivers/uwb/lc-rc.c495
-rw-r--r--drivers/uwb/neh.c616
-rw-r--r--drivers/uwb/pal.c91
-rw-r--r--drivers/uwb/reset.c362
-rw-r--r--drivers/uwb/rsv.c680
-rw-r--r--drivers/uwb/scan.c133
-rw-r--r--drivers/uwb/umc-bus.c218
-rw-r--r--drivers/uwb/umc-dev.c104
-rw-r--r--drivers/uwb/umc-drv.c31
-rw-r--r--drivers/uwb/uwb-debug.c367
-rw-r--r--drivers/uwb/uwb-internal.h305
-rw-r--r--drivers/uwb/uwbd.c410
-rw-r--r--drivers/uwb/whc-rc.c520
-rw-r--r--drivers/uwb/whci.c269
-rw-r--r--drivers/uwb/wlp/Makefile10
-rw-r--r--drivers/uwb/wlp/driver.c43
-rw-r--r--drivers/uwb/wlp/eda.c449
-rw-r--r--drivers/uwb/wlp/messages.c1946
-rw-r--r--drivers/uwb/wlp/sysfs.c709
-rw-r--r--drivers/uwb/wlp/txrx.c374
-rw-r--r--drivers/uwb/wlp/wlp-internal.h228
-rw-r--r--drivers/uwb/wlp/wlp-lc.c585
-rw-r--r--drivers/uwb/wlp/wss-lc.c1055
-rw-r--r--drivers/watchdog/w83697ug_wdt.c2
-rw-r--r--drivers/xen/cpu_hotplug.c2
620 files changed, 55088 insertions, 8344 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index d19b6f5a110..d38f43f593d 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -78,6 +78,8 @@ source "drivers/hid/Kconfig"
source "drivers/usb/Kconfig"
+source "drivers/uwb/Kconfig"
+
source "drivers/mmc/Kconfig"
source "drivers/memstick/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 46c8681a07f..2503f7b99b2 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -82,6 +82,7 @@ obj-$(CONFIG_EISA) += eisa/
obj-y += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
+obj-y += idle/
obj-$(CONFIG_MMC) += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-$(CONFIG_NEW_LEDS) += leds/
@@ -100,3 +101,4 @@ obj-$(CONFIG_SSB) += ssb/
obj-$(CONFIG_VIRTIO) += virtio/
obj-$(CONFIG_REGULATOR) += regulator/
obj-$(CONFIG_STAGING) += staging/
+obj-$(CONFIG_UWB) += uwb/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index da49b006bcc..f4f63291750 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -42,7 +42,7 @@ if ACPI
config ACPI_SLEEP
bool
- depends on PM_SLEEP
+ depends on SUSPEND || HIBERNATION
default y
config ACPI_PROCFS
@@ -157,18 +157,11 @@ config ACPI_FAN
applications to perform basic fan control (on, off, status).
config ACPI_DOCK
- tristate "Dock"
+ bool "Dock"
depends on EXPERIMENTAL
help
- This driver adds support for ACPI controlled docking stations
-
-config ACPI_BAY
- tristate "Removable Drive Bay (EXPERIMENTAL)"
- depends on EXPERIMENTAL
- depends on ACPI_DOCK
- help
- This driver adds support for ACPI controlled removable drive
- bays such as the IBM ultrabay or the Dell Module Bay.
+ This driver adds support for ACPI controlled docking stations and removable
+ drive bays such as the IBM ultrabay or the Dell Module Bay.
config ACPI_PROCESSOR
tristate "Processor"
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 52a4cd4b81d..d91c027ece8 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -45,14 +45,13 @@ obj-$(CONFIG_ACPI_BATTERY) += battery.o
obj-$(CONFIG_ACPI_BUTTON) += button.o
obj-$(CONFIG_ACPI_FAN) += fan.o
obj-$(CONFIG_ACPI_DOCK) += dock.o
-obj-$(CONFIG_ACPI_BAY) += bay.o
obj-$(CONFIG_ACPI_VIDEO) += video.o
obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o
-obj-$(CONFIG_ACPI_POWER) += power.o
obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
obj-$(CONFIG_ACPI_CONTAINER) += container.o
obj-$(CONFIG_ACPI_THERMAL) += thermal.o
+obj-$(CONFIG_ACPI_POWER) += power.o
obj-$(CONFIG_ACPI_SYSTEM) += system.o event.o
obj-$(CONFIG_ACPI_DEBUG) += debug.o
obj-$(CONFIG_ACPI_NUMA) += numa.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 831883b7d6c..d72a1b6c8a9 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -85,7 +85,7 @@ struct acpi_ac {
struct power_supply charger;
#endif
struct acpi_device * device;
- unsigned long state;
+ unsigned long long state;
};
#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger);
@@ -269,7 +269,7 @@ static int acpi_ac_add(struct acpi_device *device)
ac->device = device;
strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_AC_CLASS);
- acpi_driver_data(device) = ac;
+ device->driver_data = ac;
result = acpi_ac_get_state(ac);
if (result)
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 5f1127ad5a9..71d21c51c45 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -194,8 +194,7 @@ acpi_memory_get_device(acpi_handle handle,
static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
{
- unsigned long current_status;
-
+ unsigned long long current_status;
/* Get device present/absent information from the _STA */
if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle, "_STA",
@@ -264,7 +263,7 @@ static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device)
acpi_status status;
struct acpi_object_list arg_list;
union acpi_object arg;
- unsigned long current_status;
+ unsigned long long current_status;
/* Issue the _EJ0 command */
@@ -403,7 +402,7 @@ static int acpi_memory_device_add(struct acpi_device *device)
mem_device->device = device;
sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
- acpi_driver_data(device) = mem_device;
+ device->driver_data = mem_device;
/* Get the range from the _CRS */
result = acpi_memory_get_device_resources(mem_device);
@@ -454,8 +453,8 @@ static int acpi_memory_device_start (struct acpi_device *device)
/* call add_memory func */
result = acpi_memory_enable_device(mem_device);
if (result)
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Error in acpi_memory_enable_device\n"));
+ printk(KERN_ERR PREFIX
+ "Error in acpi_memory_enable_device\n");
}
return result;
}
diff --git a/drivers/acpi/asus_acpi.c b/drivers/acpi/asus_acpi.c
index d3d0886d637..1e74988c7b2 100644
--- a/drivers/acpi/asus_acpi.c
+++ b/drivers/acpi/asus_acpi.c
@@ -42,7 +42,7 @@
#define ASUS_ACPI_VERSION "0.30"
-#define PROC_ASUS "asus" //the directory
+#define PROC_ASUS "asus" /* The directory */
#define PROC_MLED "mled"
#define PROC_WLED "wled"
#define PROC_TLED "tled"
@@ -66,10 +66,10 @@
/*
* Flags for hotk status
*/
-#define MLED_ON 0x01 //mail LED
-#define WLED_ON 0x02 //wireless LED
-#define TLED_ON 0x04 //touchpad LED
-#define BT_ON 0x08 //internal Bluetooth
+#define MLED_ON 0x01 /* Mail LED */
+#define WLED_ON 0x02 /* Wireless LED */
+#define TLED_ON 0x04 /* Touchpad LED */
+#define BT_ON 0x08 /* Internal Bluetooth */
MODULE_AUTHOR("Julien Lerouge, Karol Kozimor");
MODULE_DESCRIPTION(ACPI_HOTK_NAME);
@@ -82,28 +82,28 @@ MODULE_PARM_DESC(asus_uid, "UID for entries in /proc/acpi/asus");
module_param(asus_gid, uint, 0);
MODULE_PARM_DESC(asus_gid, "GID for entries in /proc/acpi/asus");
-/* For each model, all features implemented,
+/* For each model, all features implemented,
* those marked with R are relative to HOTK, A for absolute */
struct model_data {
- char *name; //name of the laptop________________A
- char *mt_mled; //method to handle mled_____________R
- char *mled_status; //node to handle mled reading_______A
- char *mt_wled; //method to handle wled_____________R
- char *wled_status; //node to handle wled reading_______A
- char *mt_tled; //method to handle tled_____________R
- char *tled_status; //node to handle tled reading_______A
- char *mt_ledd; //method to handle LED display______R
- char *mt_bt_switch; //method to switch Bluetooth on/off_R
- char *bt_status; //no model currently supports this__?
- char *mt_lcd_switch; //method to turn LCD on/off_________A
- char *lcd_status; //node to read LCD panel state______A
- char *brightness_up; //method to set brightness up_______A
- char *brightness_down; //guess what ?______________________A
- char *brightness_set; //method to set absolute brightness_R
- char *brightness_get; //method to get absolute brightness_R
- char *brightness_status; //node to get brightness____________A
- char *display_set; //method to set video output________R
- char *display_get; //method to get video output________R
+ char *name; /* name of the laptop________________A */
+ char *mt_mled; /* method to handle mled_____________R */
+ char *mled_status; /* node to handle mled reading_______A */
+ char *mt_wled; /* method to handle wled_____________R */
+ char *wled_status; /* node to handle wled reading_______A */
+ char *mt_tled; /* method to handle tled_____________R */
+ char *tled_status; /* node to handle tled reading_______A */
+ char *mt_ledd; /* method to handle LED display______R */
+ char *mt_bt_switch; /* method to switch Bluetooth on/off_R */
+ char *bt_status; /* no model currently supports this__? */
+ char *mt_lcd_switch; /* method to turn LCD on/off_________A */
+ char *lcd_status; /* node to read LCD panel state______A */
+ char *brightness_up; /* method to set brightness up_______A */
+ char *brightness_down; /* method to set brightness down ____A */
+ char *brightness_set; /* method to set absolute brightness_R */
+ char *brightness_get; /* method to get absolute brightness_R */
+ char *brightness_status;/* node to get brightness____________A */
+ char *display_set; /* method to set video output________R */
+ char *display_get; /* method to get video output________R */
};
/*
@@ -111,41 +111,41 @@ struct model_data {
* about the hotk device
*/
struct asus_hotk {
- struct acpi_device *device; //the device we are in
- acpi_handle handle; //the handle of the hotk device
- char status; //status of the hotk, for LEDs, ...
- u32 ledd_status; //status of the LED display
- struct model_data *methods; //methods available on the laptop
- u8 brightness; //brightness level
+ struct acpi_device *device; /* the device we are in */
+ acpi_handle handle; /* the handle of the hotk device */
+ char status; /* status of the hotk, for LEDs */
+ u32 ledd_status; /* status of the LED display */
+ struct model_data *methods; /* methods available on the laptop */
+ u8 brightness; /* brightness level */
enum {
- A1x = 0, //A1340D, A1300F
- A2x, //A2500H
- A4G, //A4700G
- D1x, //D1
- L2D, //L2000D
- L3C, //L3800C
- L3D, //L3400D
- L3H, //L3H, L2000E, L5D
- L4R, //L4500R
- L5x, //L5800C
- L8L, //L8400L
- M1A, //M1300A
- M2E, //M2400E, L4400L
- M6N, //M6800N, W3400N
- M6R, //M6700R, A3000G
- P30, //Samsung P30
- S1x, //S1300A, but also L1400B and M2400A (L84F)
- S2x, //S200 (J1 reported), Victor MP-XP7210
- W1N, //W1000N
- W5A, //W5A
- W3V, //W3030V
- xxN, //M2400N, M3700N, M5200N, M6800N, S1300N, S5200N
- A4S, //Z81sp
- //(Centrino)
- F3Sa,
+ A1x = 0, /* A1340D, A1300F */
+ A2x, /* A2500H */
+ A4G, /* A4700G */
+ D1x, /* D1 */
+ L2D, /* L2000D */
+ L3C, /* L3800C */
+ L3D, /* L3400D */
+ L3H, /* L3H, L2000E, L5D */
+ L4R, /* L4500R */
+ L5x, /* L5800C */
+ L8L, /* L8400L */
+ M1A, /* M1300A */
+ M2E, /* M2400E, L4400L */
+ M6N, /* M6800N, W3400N */
+ M6R, /* M6700R, A3000G */
+ P30, /* Samsung P30 */
+ S1x, /* S1300A, but also L1400B and M2400A (L84F) */
+ S2x, /* S200 (J1 reported), Victor MP-XP7210 */
+ W1N, /* W1000N */
+ W5A, /* W5A */
+ W3V, /* W3030V */
+ xxN, /* M2400N, M3700N, M5200N, M6800N,
+ S1300N, S5200N*/
+ A4S, /* Z81sp */
+ F3Sa, /* (Centrino) */
END_MODEL
- } model; //Models currently supported
- u16 event_count[128]; //count for each event TODO make this better
+ } model; /* Models currently supported */
+ u16 event_count[128]; /* Count for each event TODO make this better */
};
/* Here we go */
@@ -459,18 +459,18 @@ static struct acpi_driver asus_hotk_driver = {
},
};
-/*
+/*
* This function evaluates an ACPI method, given an int as parameter, the
* method is searched within the scope of the handle, can be NULL. The output
* of the method is written is output, which can also be NULL
*
- * returns 1 if write is successful, 0 else.
+ * returns 1 if write is successful, 0 else.
*/
static int write_acpi_int(acpi_handle handle, const char *method, int val,
struct acpi_buffer *output)
{
- struct acpi_object_list params; //list of input parameters (an int here)
- union acpi_object in_obj; //the only param we use
+ struct acpi_object_list params; /* list of input parameters (int) */
+ union acpi_object in_obj; /* the only param we use */
acpi_status status;
params.count = 1;
@@ -507,18 +507,18 @@ proc_read_info(char *page, char **start, off_t off, int count, int *eof,
{
int len = 0;
int temp;
- char buf[16]; //enough for all info
+ char buf[16]; /* enough for all info */
/*
- * We use the easy way, we don't care of off and count, so we don't set eof
- * to 1
+ * We use the easy way, we don't care of off and count,
+ * so we don't set eof to 1
*/
len += sprintf(page, ACPI_HOTK_NAME " " ASUS_ACPI_VERSION "\n");
len += sprintf(page + len, "Model reference : %s\n",
hotk->methods->name);
- /*
- * The SFUN method probably allows the original driver to get the list
- * of features supported by a given model. For now, 0x0100 or 0x0800
+ /*
+ * The SFUN method probably allows the original driver to get the list
+ * of features supported by a given model. For now, 0x0100 or 0x0800
* bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
* The significance of others is yet to be found.
*/
@@ -528,7 +528,7 @@ proc_read_info(char *page, char **start, off_t off, int count, int *eof,
/*
* Another value for userspace: the ASYM method returns 0x02 for
* battery low and 0x04 for battery critical, its readings tend to be
- * more accurate than those provided by _BST.
+ * more accurate than those provided by _BST.
* Note: since not all the laptops provide this method, errors are
* silently ignored.
*/
@@ -579,7 +579,7 @@ static int read_led(const char *ledname, int ledmask)
return (hotk->status & ledmask) ? 1 : 0;
}
-static int parse_arg(const char __user * buf, unsigned long count, int *val)
+static int parse_arg(const char __user *buf, unsigned long count, int *val)
{
char s[32];
if (!count)
@@ -596,7 +596,7 @@ static int parse_arg(const char __user * buf, unsigned long count, int *val)
/* FIXME: kill extraneous args so it can be called independently */
static int
-write_led(const char __user * buffer, unsigned long count,
+write_led(const char __user *buffer, unsigned long count,
char *ledname, int ledmask, int invert)
{
int rv, value;
@@ -631,7 +631,7 @@ proc_read_mled(char *page, char **start, off_t off, int count, int *eof,
}
static int
-proc_write_mled(struct file *file, const char __user * buffer,
+proc_write_mled(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
return write_led(buffer, count, hotk->methods->mt_mled, MLED_ON, 1);
@@ -648,7 +648,7 @@ proc_read_ledd(char *page, char **start, off_t off, int count, int *eof,
}
static int
-proc_write_ledd(struct file *file, const char __user * buffer,
+proc_write_ledd(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
int rv, value;
@@ -677,7 +677,7 @@ proc_read_wled(char *page, char **start, off_t off, int count, int *eof,
}
static int
-proc_write_wled(struct file *file, const char __user * buffer,
+proc_write_wled(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
return write_led(buffer, count, hotk->methods->mt_wled, WLED_ON, 0);
@@ -694,10 +694,10 @@ proc_read_bluetooth(char *page, char **start, off_t off, int count, int *eof,
}
static int
-proc_write_bluetooth(struct file *file, const char __user * buffer,
+proc_write_bluetooth(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
- /* Note: mt_bt_switch controls both internal Bluetooth adapter's
+ /* Note: mt_bt_switch controls both internal Bluetooth adapter's
presence and its LED */
return write_led(buffer, count, hotk->methods->mt_bt_switch, BT_ON, 0);
}
@@ -714,7 +714,7 @@ proc_read_tled(char *page, char **start, off_t off, int count, int *eof,
}
static int
-proc_write_tled(struct file *file, const char __user * buffer,
+proc_write_tled(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
return write_led(buffer, count, hotk->methods->mt_tled, TLED_ON, 0);
@@ -734,7 +734,7 @@ static int get_lcd_state(void)
input.count = 2;
input.pointer = mt_params;
- /* Note: the following values are partly guessed up, but
+ /* Note: the following values are partly guessed up, but
otherwise they seem to work */
mt_params[0].type = ACPI_TYPE_INTEGER;
mt_params[0].integer.value = 0x02;
@@ -753,7 +753,7 @@ static int get_lcd_state(void)
/* That's what the AML code does */
lcd = out_obj.integer.value >> 8;
} else if (hotk->model == F3Sa) {
- unsigned long tmp;
+ unsigned long long tmp;
union acpi_object param;
struct acpi_object_list input;
acpi_status status;
@@ -796,12 +796,13 @@ static int set_lcd_state(int value)
acpi_evaluate_object(NULL,
hotk->methods->mt_lcd_switch,
NULL, NULL);
- } else { /* L3H and the like have to be handled differently */
+ } else {
+ /* L3H and the like must be handled differently */
if (!write_acpi_int
(hotk->handle, hotk->methods->mt_lcd_switch, 0x07,
NULL))
status = AE_ERROR;
- /* L3H's AML executes EHK (0x07) upon Fn+F7 keypress,
+ /* L3H's AML executes EHK (0x07) upon Fn+F7 keypress,
the exact behaviour is simulated here */
}
if (ACPI_FAILURE(status))
@@ -819,7 +820,7 @@ proc_read_lcd(char *page, char **start, off_t off, int count, int *eof,
}
static int
-proc_write_lcd(struct file *file, const char __user * buffer,
+proc_write_lcd(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
int rv, value;
@@ -897,7 +898,7 @@ proc_read_brn(char *page, char **start, off_t off, int count, int *eof,
}
static int
-proc_write_brn(struct file *file, const char __user * buffer,
+proc_write_brn(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
int rv, value;
@@ -921,7 +922,7 @@ static void set_display(int value)
}
/*
- * Now, *this* one could be more user-friendly, but so far, no-one has
+ * Now, *this* one could be more user-friendly, but so far, no-one has
* complained. The significance of bits is the same as in proc_write_disp()
*/
static int
@@ -933,18 +934,18 @@ proc_read_disp(char *page, char **start, off_t off, int count, int *eof,
if (!read_acpi_int(hotk->handle, hotk->methods->display_get, &value))
printk(KERN_WARNING
"Asus ACPI: Error reading display status\n");
- value &= 0x07; /* needed for some models, shouldn't hurt others */
+ value &= 0x07; /* needed for some models, shouldn't hurt others */
return sprintf(page, "%d\n", value);
}
/*
- * Experimental support for display switching. As of now: 1 should activate
- * the LCD output, 2 should do for CRT, and 4 for TV-Out. Any combination
- * (bitwise) of these will suffice. I never actually tested 3 displays hooked up
- * simultaneously, so be warned. See the acpi4asus README for more info.
+ * Experimental support for display switching. As of now: 1 should activate
+ * the LCD output, 2 should do for CRT, and 4 for TV-Out. Any combination
+ * (bitwise) of these will suffice. I never actually tested 3 displays hooked
+ * up simultaneously, so be warned. See the acpi4asus README for more info.
*/
static int
-proc_write_disp(struct file *file, const char __user * buffer,
+proc_write_disp(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
int rv, value;
@@ -957,12 +958,12 @@ proc_write_disp(struct file *file, const char __user * buffer,
typedef int (proc_readfunc) (char *page, char **start, off_t off, int count,
int *eof, void *data);
-typedef int (proc_writefunc) (struct file * file, const char __user * buffer,
+typedef int (proc_writefunc) (struct file *file, const char __user *buffer,
unsigned long count, void *data);
static int
-asus_proc_add(char *name, proc_writefunc * writefunc,
- proc_readfunc * readfunc, mode_t mode,
+asus_proc_add(char *name, proc_writefunc *writefunc,
+ proc_readfunc *readfunc, mode_t mode,
struct acpi_device *device)
{
struct proc_dir_entry *proc =
@@ -1040,9 +1041,9 @@ static int asus_hotk_add_fs(struct acpi_device *device)
&proc_read_bluetooth, mode, device);
}
- /*
- * We need both read node and write method as LCD switch is also accessible
- * from keyboard
+ /*
+ * We need both read node and write method as LCD switch is also
+ * accessible from the keyboard
*/
if (hotk->methods->mt_lcd_switch && hotk->methods->lcd_status) {
asus_proc_add(PROC_LCD, &proc_write_lcd, &proc_read_lcd, mode,
@@ -1096,11 +1097,10 @@ static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
if (!hotk)
return;
- if ((event & ~((u32) BR_UP)) < 16) {
+ if ((event & ~((u32) BR_UP)) < 16)
hotk->brightness = (event & ~((u32) BR_UP));
- } else if ((event & ~((u32) BR_DOWN)) < 16) {
+ else if ((event & ~((u32) BR_DOWN)) < 16)
hotk->brightness = (event & ~((u32) BR_DOWN));
- }
acpi_bus_generate_proc_event(hotk->device, event,
hotk->event_count[event % 128]++);
@@ -1186,8 +1186,8 @@ static int asus_hotk_get_info(void)
acpi_status status;
/*
- * Get DSDT headers early enough to allow for differentiating between
- * models, but late enough to allow acpi_bus_register_driver() to fail
+ * Get DSDT headers early enough to allow for differentiating between
+ * models, but late enough to allow acpi_bus_register_driver() to fail
* before doing anything ACPI-specific. Should we encounter a machine,
* which needs special handling (i.e. its hotkey device has a different
* HID), this bit will be moved. A global variable asus_info contains
@@ -1212,8 +1212,8 @@ static int asus_hotk_get_info(void)
/*
* Try to match the object returned by INIT to the specific model.
- * Handle every possible object (or the lack of thereof) the DSDT
- * writers might throw at us. When in trouble, we pass NULL to
+ * Handle every possible object (or the lack of thereof) the DSDT
+ * writers might throw at us. When in trouble, we pass NULL to
* asus_model_match() and try something completely different.
*/
if (buffer.pointer) {
@@ -1244,6 +1244,8 @@ static int asus_hotk_get_info(void)
"default values\n", string);
printk(KERN_NOTICE
" send /proc/acpi/dsdt to the developers\n");
+ kfree(model);
+ return -ENODEV;
}
hotk->methods = &model_conf[hotk->model];
return AE_OK;
@@ -1254,7 +1256,7 @@ static int asus_hotk_get_info(void)
/* Sort of per-model blacklist */
if (strncmp(string, "L2B", 3) == 0)
hotk->methods->lcd_status = NULL;
- /* L2B is similar enough to L3C to use its settings, with this only
+ /* L2B is similar enough to L3C to use its settings, with this only
exception */
else if (strncmp(string, "A3G", 3) == 0)
hotk->methods->lcd_status = "\\BLFG";
@@ -1321,7 +1323,7 @@ static int asus_hotk_add(struct acpi_device *device)
hotk->handle = device->handle;
strcpy(acpi_device_name(device), ACPI_HOTK_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_HOTK_CLASS);
- acpi_driver_data(device) = hotk;
+ device->driver_data = hotk;
hotk->device = device;
result = asus_hotk_check();
@@ -1366,10 +1368,9 @@ static int asus_hotk_add(struct acpi_device *device)
/* LED display is off by default */
hotk->ledd_status = 0xFFF;
- end:
- if (result) {
+end:
+ if (result)
kfree(hotk);
- }
return result;
}
@@ -1394,8 +1395,8 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
}
static struct backlight_ops asus_backlight_data = {
- .get_brightness = read_brightness,
- .update_status = set_brightness_status,
+ .get_brightness = read_brightness,
+ .update_status = set_brightness_status,
};
static void asus_acpi_exit(void)
@@ -1442,15 +1443,15 @@ static int __init asus_acpi_init(void)
return -ENODEV;
}
- asus_backlight_device = backlight_device_register("asus",NULL,NULL,
+ asus_backlight_device = backlight_device_register("asus", NULL, NULL,
&asus_backlight_data);
- if (IS_ERR(asus_backlight_device)) {
+ if (IS_ERR(asus_backlight_device)) {
printk(KERN_ERR "Could not register asus backlight device\n");
asus_backlight_device = NULL;
asus_acpi_exit();
return -ENODEV;
}
- asus_backlight_device->props.max_brightness = 15;
+ asus_backlight_device->props.max_brightness = 15;
return 0;
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 70f7f60929c..b2133e89ad9 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -804,7 +804,7 @@ static int acpi_battery_add(struct acpi_device *device)
battery->device = device;
strcpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
- acpi_driver_data(device) = battery;
+ device->driver_data = battery;
mutex_init(&battery->lock);
acpi_battery_update(battery);
#ifdef CONFIG_ACPI_PROCFS_POWER
diff --git a/drivers/acpi/bay.c b/drivers/acpi/bay.c
deleted file mode 100644
index 61b6c5beb2d..00000000000
--- a/drivers/acpi/bay.c
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * bay.c - ACPI removable drive bay driver
- *
- * Copyright (C) 2006 Kristen Carlson Accardi <kristen.c.accardi@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/notifier.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-#include <linux/seq_file.h>
-#include <asm/uaccess.h>
-#include <linux/platform_device.h>
-
-ACPI_MODULE_NAME("bay");
-MODULE_AUTHOR("Kristen Carlson Accardi");
-MODULE_DESCRIPTION("ACPI Removable Drive Bay Driver");
-MODULE_LICENSE("GPL");
-#define ACPI_BAY_CLASS "bay"
-#define ACPI_BAY_COMPONENT 0x10000000
-#define _COMPONENT ACPI_BAY_COMPONENT
-#define bay_dprintk(h,s) {\
- char prefix[80] = {'\0'};\
- struct acpi_buffer buffer = {sizeof(prefix), prefix};\
- acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);\
- printk(KERN_DEBUG PREFIX "%s: %s\n", prefix, s); }
-static void bay_notify(acpi_handle handle, u32 event, void *data);
-
-static const struct acpi_device_id bay_device_ids[] = {
- {"LNXIOBAY", 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, bay_device_ids);
-
-struct bay {
- acpi_handle handle;
- char *name;
- struct list_head list;
- struct platform_device *pdev;
-};
-
-static LIST_HEAD(drive_bays);
-
-
-/*****************************************************************************
- * Drive Bay functions *
- *****************************************************************************/
-/**
- * is_ejectable - see if a device is ejectable
- * @handle: acpi handle of the device
- *
- * If an acpi object has a _EJ0 method, then it is ejectable
- */
-static int is_ejectable(acpi_handle handle)
-{
- acpi_status status;
- acpi_handle tmp;
-
- status = acpi_get_handle(handle, "_EJ0", &tmp);
- if (ACPI_FAILURE(status))
- return 0;
- return 1;
-}
-
-/**
- * bay_present - see if the bay device is present
- * @bay: the drive bay
- *
- * execute the _STA method.
- */
-static int bay_present(struct bay *bay)
-{
- unsigned long sta;
- acpi_status status;
-
- if (bay) {
- status = acpi_evaluate_integer(bay->handle, "_STA", NULL, &sta);
- if (ACPI_SUCCESS(status) && sta)
- return 1;
- }
- return 0;
-}
-
-/**
- * eject_device - respond to an eject request
- * @handle - the device to eject
- *
- * Call this devices _EJ0 method.
- */
-static void eject_device(acpi_handle handle)
-{
- struct acpi_object_list arg_list;
- union acpi_object arg;
-
- bay_dprintk(handle, "Ejecting device");
-
- arg_list.count = 1;
- arg_list.pointer = &arg;
- arg.type = ACPI_TYPE_INTEGER;
- arg.integer.value = 1;
-
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_EJ0",
- &arg_list, NULL)))
- pr_debug("Failed to evaluate _EJ0!\n");
-}
-
-/*
- * show_present - read method for "present" file in sysfs
- */
-static ssize_t show_present(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct bay *bay = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", bay_present(bay));
-
-}
-static DEVICE_ATTR(present, S_IRUGO, show_present, NULL);
-
-/*
- * write_eject - write method for "eject" file in sysfs
- */
-static ssize_t write_eject(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct bay *bay = dev_get_drvdata(dev);
-
- if (!count)
- return -EINVAL;
-
- eject_device(bay->handle);
- return count;
-}
-static DEVICE_ATTR(eject, S_IWUSR, NULL, write_eject);
-
-/**
- * is_ata - see if a device is an ata device
- * @handle: acpi handle of the device
- *
- * If an acpi object has one of 4 ATA ACPI methods defined,
- * then it is an ATA device
- */
-static int is_ata(acpi_handle handle)
-{
- acpi_handle tmp;
-
- if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
- (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
- (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
- (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
- return 1;
-
- return 0;
-}
-
-/**
- * parent_is_ata(acpi_handle handle)
- *
- */
-static int parent_is_ata(acpi_handle handle)
-{
- acpi_handle phandle;
-
- if (acpi_get_parent(handle, &phandle))
- return 0;
-
- return is_ata(phandle);
-}
-
-/**
- * is_ejectable_bay - see if a device is an ejectable drive bay
- * @handle: acpi handle of the device
- *
- * If an acpi object is ejectable and has one of the ACPI ATA
- * methods defined, then we can safely call it an ejectable
- * drive bay
- */
-static int is_ejectable_bay(acpi_handle handle)
-{
- if ((is_ata(handle) || parent_is_ata(handle)) && is_ejectable(handle))
- return 1;
- return 0;
-}
-
-#if 0
-/**
- * eject_removable_drive - try to eject this drive
- * @dev : the device structure of the drive
- *
- * If a device is a removable drive that requires an _EJ0 method
- * to be executed in order to safely remove from the system, do
- * it. ATM - always returns success
- */
-int eject_removable_drive(struct device *dev)
-{
- acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
-
- if (handle) {
- bay_dprintk(handle, "Got device handle");
- if (is_ejectable_bay(handle))
- eject_device(handle);
- } else {
- printk("No acpi handle for device\n");
- }
-
- /* should I return an error code? */
- return 0;
-}
-EXPORT_SYMBOL_GPL(eject_removable_drive);
-#endif /* 0 */
-
-static int acpi_bay_add_fs(struct bay *bay)
-{
- int ret;
- struct device *dev = &bay->pdev->dev;
-
- ret = device_create_file(dev, &dev_attr_present);
- if (ret)
- goto add_fs_err;
- ret = device_create_file(dev, &dev_attr_eject);
- if (ret) {
- device_remove_file(dev, &dev_attr_present);
- goto add_fs_err;
- }
- return 0;
-
- add_fs_err:
- bay_dprintk(bay->handle, "Error adding sysfs files\n");
- return ret;
-}
-
-static void acpi_bay_remove_fs(struct bay *bay)
-{
- struct device *dev = &bay->pdev->dev;
-
- /* cleanup sysfs */
- device_remove_file(dev, &dev_attr_present);
- device_remove_file(dev, &dev_attr_eject);
-}
-
-static int bay_is_dock_device(acpi_handle handle)
-{
- acpi_handle parent;
-
- acpi_get_parent(handle, &parent);
-
- /* if the device or it's parent is dependent on the
- * dock, then we are a dock device
- */
- return (is_dock_device(handle) || is_dock_device(parent));
-}
-
-static int bay_add(acpi_handle handle, int id)
-{
- acpi_status status;
- struct bay *new_bay;
- struct platform_device *pdev;
- struct acpi_buffer nbuffer = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &nbuffer);
-
- bay_dprintk(handle, "Adding notify handler");
-
- /*
- * Initialize bay device structure
- */
- new_bay = kzalloc(sizeof(*new_bay), GFP_ATOMIC);
- INIT_LIST_HEAD(&new_bay->list);
- new_bay->handle = handle;
- new_bay->name = (char *)nbuffer.pointer;
-
- /* initialize platform device stuff */
- pdev = platform_device_register_simple(ACPI_BAY_CLASS, id, NULL, 0);
- if (IS_ERR(pdev)) {
- printk(KERN_ERR PREFIX "Error registering bay device\n");
- goto bay_add_err;
- }
- new_bay->pdev = pdev;
- platform_set_drvdata(pdev, new_bay);
-
- /*
- * we want the bay driver to be able to send uevents
- */
- pdev->dev.uevent_suppress = 0;
-
- /* register for events on this device */
- status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- bay_notify, new_bay);
- if (ACPI_FAILURE(status)) {
- printk(KERN_INFO PREFIX "Error installing bay notify handler\n");
- platform_device_unregister(new_bay->pdev);
- goto bay_add_err;
- }
-
- if (acpi_bay_add_fs(new_bay)) {
- acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- bay_notify);
- platform_device_unregister(new_bay->pdev);
- goto bay_add_err;
- }
-
- /* if we are on a dock station, we should register for dock
- * notifications.
- */
- if (bay_is_dock_device(handle)) {
- bay_dprintk(handle, "Is dependent on dock\n");
- register_hotplug_dock_device(handle, bay_notify, new_bay);
- }
- list_add(&new_bay->list, &drive_bays);
- printk(KERN_INFO PREFIX "Bay [%s] Added\n", new_bay->name);
- return 0;
-
-bay_add_err:
- kfree(new_bay->name);
- kfree(new_bay);
- return -ENODEV;
-}
-
-/**
- * bay_notify - act upon an acpi bay notification
- * @handle: the bay handle
- * @event: the acpi event
- * @data: our driver data struct
- *
- */
-static void bay_notify(acpi_handle handle, u32 event, void *data)
-{
- struct bay *bay_dev = (struct bay *)data;
- struct device *dev = &bay_dev->pdev->dev;
- char event_string[12];
- char *envp[] = { event_string, NULL };
-
- bay_dprintk(handle, "Bay event");
- sprintf(event_string, "BAY_EVENT=%d", event);
- kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
-}
-
-static acpi_status
-find_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- int *count = (int *)context;
-
- /*
- * there could be more than one ejectable bay.
- * so, just return AE_OK always so that every object
- * will be checked.
- */
- if (is_ejectable_bay(handle)) {
- bay_dprintk(handle, "found ejectable bay");
- if (!bay_add(handle, *count))
- (*count)++;
- }
- return AE_OK;
-}
-
-static int __init bay_init(void)
-{
- int bays = 0;
-
- INIT_LIST_HEAD(&drive_bays);
-
- if (acpi_disabled)
- return -ENODEV;
-
- /* look for dockable drive bays */
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, find_bay, &bays, NULL);
-
- if (!bays)
- return -ENODEV;
-
- return 0;
-}
-
-static void __exit bay_exit(void)
-{
- struct bay *bay, *tmp;
-
- list_for_each_entry_safe(bay, tmp, &drive_bays, list) {
- if (is_dock_device(bay->handle))
- unregister_hotplug_dock_device(bay->handle);
- acpi_bay_remove_fs(bay);
- acpi_remove_notify_handler(bay->handle, ACPI_SYSTEM_NOTIFY,
- bay_notify);
- platform_device_unregister(bay->pdev);
- kfree(bay->name);
- kfree(bay);
- }
-}
-
-postcore_initcall(bay_init);
-module_exit(bay_exit);
-
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index ccae305ee55..c797c6473f3 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -48,6 +48,23 @@ EXPORT_SYMBOL(acpi_root_dir);
#define STRUCT_TO_INT(s) (*((int*)&s))
+static int set_power_nocheck(const struct dmi_system_id *id)
+{
+ printk(KERN_NOTICE PREFIX "%s detected - "
+ "disable power check in power transistion\n", id->ident);
+ acpi_power_nocheck = 1;
+ return 0;
+}
+static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = {
+ {
+ set_power_nocheck, "HP Pavilion 05", {
+ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "HP Pavilion 05"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "2001211RE101GLEND") }, NULL},
+ {},
+};
+
+
/* --------------------------------------------------------------------------
Device Management
-------------------------------------------------------------------------- */
@@ -77,7 +94,7 @@ EXPORT_SYMBOL(acpi_bus_get_device);
int acpi_bus_get_status(struct acpi_device *device)
{
acpi_status status = AE_OK;
- unsigned long sta = 0;
+ unsigned long long sta = 0;
if (!device)
@@ -95,21 +112,21 @@ int acpi_bus_get_status(struct acpi_device *device)
}
/*
- * Otherwise we assume the status of our parent (unless we don't
- * have one, in which case status is implied).
+ * According to ACPI spec some device can be present and functional
+ * even if the parent is not present but functional.
+ * In such conditions the child device should not inherit the status
+ * from the parent.
*/
- else if (device->parent)
- device->status = device->parent->status;
else
STRUCT_TO_INT(device->status) =
ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING;
if (device->status.functional && !device->status.present) {
- printk(KERN_WARNING PREFIX "Device [%s] status [%08x]: "
- "functional but not present; setting present\n",
- device->pnp.bus_id, (u32) STRUCT_TO_INT(device->status));
- device->status.present = 1;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]: "
+ "functional but not present;\n",
+ device->pnp.bus_id,
+ (u32) STRUCT_TO_INT(device->status)));
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n",
@@ -155,7 +172,7 @@ int acpi_bus_get_power(acpi_handle handle, int *state)
int result = 0;
acpi_status status = 0;
struct acpi_device *device = NULL;
- unsigned long psc = 0;
+ unsigned long long psc = 0;
result = acpi_bus_get_device(handle, &device);
@@ -223,7 +240,19 @@ int acpi_bus_set_power(acpi_handle handle, int state)
/*
* Get device's current power state
*/
- acpi_bus_get_power(device->handle, &device->power.state);
+ if (!acpi_power_nocheck) {
+ /*
+ * Maybe the incorrect power state is returned on the bogus
+ * bios, which is different with the real power state.
+ * For example: the bios returns D0 state and the real power
+ * state is D3. OS expects to set the device to D0 state. In
+ * such case if OS uses the power state returned by the BIOS,
+ * the device can't be transisted to the correct power state.
+ * So if the acpi_power_nocheck is set, it is unnecessary to
+ * get the power state by calling acpi_bus_get_power.
+ */
+ acpi_bus_get_power(device->handle, &device->power.state);
+ }
if ((state == device->power.state) && !device->flags.force_power_state) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
state));
@@ -496,6 +525,19 @@ static int acpi_bus_check_scope(struct acpi_device *device)
return 0;
}
+static BLOCKING_NOTIFIER_HEAD(acpi_bus_notify_list);
+int register_acpi_bus_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&acpi_bus_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(register_acpi_bus_notifier);
+
+void unregister_acpi_bus_notifier(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&acpi_bus_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_acpi_bus_notifier);
+
/**
* acpi_bus_notify
* ---------------
@@ -506,6 +548,8 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
int result = 0;
struct acpi_device *device = NULL;
+ blocking_notifier_call_chain(&acpi_bus_notify_list,
+ type, (void *)handle);
if (acpi_bus_get_device(handle, &device))
return;
@@ -749,6 +793,12 @@ static int __init acpi_bus_init(void)
goto error1;
}
+ /*
+ * Maybe EC region is required at bus_scan/acpi_get_devices. So it
+ * is necessary to enable it as early as possible.
+ */
+ acpi_boot_ec_enable();
+
printk(KERN_INFO PREFIX "Interpreter enabled\n");
/* Initialize sleep structures */
@@ -818,7 +868,11 @@ static int __init acpi_init(void)
}
} else
disable_acpi();
-
+ /*
+ * If the laptop falls into the DMI check table, the power state check
+ * will be disabled in the course of device power transistion.
+ */
+ dmi_check_system(power_nocheck_dmi_table);
return result;
}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 1dfec413588..9d568d417ea 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -145,7 +145,7 @@ static int acpi_button_state_seq_show(struct seq_file *seq, void *offset)
{
struct acpi_button *button = seq->private;
acpi_status status;
- unsigned long state;
+ unsigned long long state;
if (!button || !button->device)
return 0;
@@ -253,7 +253,7 @@ static int acpi_button_remove_fs(struct acpi_device *device)
-------------------------------------------------------------------------- */
static int acpi_lid_send_state(struct acpi_button *button)
{
- unsigned long state;
+ unsigned long long state;
acpi_status status;
status = acpi_evaluate_integer(button->device->handle, "_LID", NULL,
@@ -384,7 +384,7 @@ static int acpi_button_add(struct acpi_device *device)
return -ENOMEM;
button->device = device;
- acpi_driver_data(device) = button;
+ device->driver_data = button;
button->input = input = input_allocate_device();
if (!input) {
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
index f9db4f444bd..4441e84b28a 100644
--- a/drivers/acpi/cm_sbs.c
+++ b/drivers/acpi/cm_sbs.c
@@ -52,8 +52,8 @@ struct proc_dir_entry *acpi_lock_ac_dir(void)
if (acpi_ac_dir) {
lock_ac_dir_cnt++;
} else {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Cannot create %s\n", ACPI_AC_CLASS));
+ printk(KERN_ERR PREFIX
+ "Cannot create %s\n", ACPI_AC_CLASS);
}
mutex_unlock(&cm_sbs_mutex);
return acpi_ac_dir;
@@ -83,8 +83,8 @@ struct proc_dir_entry *acpi_lock_battery_dir(void)
if (acpi_battery_dir) {
lock_battery_dir_cnt++;
} else {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Cannot create %s\n", ACPI_BATTERY_CLASS));
+ printk(KERN_ERR PREFIX
+ "Cannot create %s\n", ACPI_BATTERY_CLASS);
}
mutex_unlock(&cm_sbs_mutex);
return acpi_battery_dir;
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 3c25ec7a187..134818b265a 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -76,7 +76,7 @@ static int is_device_present(acpi_handle handle)
{
acpi_handle temp;
acpi_status status;
- unsigned long sta;
+ unsigned long long sta;
status = acpi_get_handle(handle, "_STA", &temp);
@@ -108,7 +108,7 @@ static int acpi_container_add(struct acpi_device *device)
container->handle = device->handle;
strcpy(acpi_device_name(device), ACPI_CONTAINER_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_CONTAINER_CLASS);
- acpi_driver_data(device) = container;
+ device->driver_data = container;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device <%s> bid <%s>\n",
acpi_device_name(device), acpi_device_bid(device)));
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c
index 6df564f4ca6..abf36b4b1d1 100644
--- a/drivers/acpi/debug.c
+++ b/drivers/acpi/debug.c
@@ -47,8 +47,6 @@ static const struct acpi_dlayer acpi_debug_layers[] = {
};
static const struct acpi_dlevel acpi_debug_levels[] = {
- ACPI_DEBUG_INIT(ACPI_LV_ERROR),
- ACPI_DEBUG_INIT(ACPI_LV_WARN),
ACPI_DEBUG_INIT(ACPI_LV_INIT),
ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
ACPI_DEBUG_INIT(ACPI_LV_INFO),
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
index 4613b9ca579..279a5a60a0d 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -103,6 +103,9 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
NULL);
acpi_ex_enter_interpreter();
}
+
+ acpi_ds_clear_implicit_return(walk_state);
+
#ifdef ACPI_DISASSEMBLER
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/dispatcher/dsmthdat.c b/drivers/acpi/dispatcher/dsmthdat.c
index 13c43eac35d..d03f81bd1bc 100644
--- a/drivers/acpi/dispatcher/dsmthdat.c
+++ b/drivers/acpi/dispatcher/dsmthdat.c
@@ -43,7 +43,6 @@
#include <acpi/acpi.h>
#include <acpi/acdispat.h>
-#include <acpi/amlcode.h>
#include <acpi/acnamesp.h>
#include <acpi/acinterp.h>
@@ -52,11 +51,11 @@ ACPI_MODULE_NAME("dsmthdat")
/* Local prototypes */
static void
-acpi_ds_method_data_delete_value(u16 opcode,
+acpi_ds_method_data_delete_value(u8 type,
u32 index, struct acpi_walk_state *walk_state);
static acpi_status
-acpi_ds_method_data_set_value(u16 opcode,
+acpi_ds_method_data_set_value(u8 type,
u32 index,
union acpi_operand_object *object,
struct acpi_walk_state *walk_state);
@@ -216,7 +215,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
* Store the argument in the method/walk descriptor.
* Do not copy the arg in order to implement call by reference
*/
- status = acpi_ds_method_data_set_value(AML_ARG_OP, index,
+ status = acpi_ds_method_data_set_value(ACPI_REFCLASS_ARG, index,
params[index],
walk_state);
if (ACPI_FAILURE(status)) {
@@ -234,7 +233,8 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
*
* FUNCTION: acpi_ds_method_data_get_node
*
- * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP
+ * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
+ * ACPI_REFCLASS_ARG
* Index - Which Local or Arg whose type to get
* walk_state - Current walk state object
* Node - Where the node is returned.
@@ -246,7 +246,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
******************************************************************************/
acpi_status
-acpi_ds_method_data_get_node(u16 opcode,
+acpi_ds_method_data_get_node(u8 type,
u32 index,
struct acpi_walk_state *walk_state,
struct acpi_namespace_node **node)
@@ -256,8 +256,8 @@ acpi_ds_method_data_get_node(u16 opcode,
/*
* Method Locals and Arguments are supported
*/
- switch (opcode) {
- case AML_LOCAL_OP:
+ switch (type) {
+ case ACPI_REFCLASS_LOCAL:
if (index > ACPI_METHOD_MAX_LOCAL) {
ACPI_ERROR((AE_INFO,
@@ -271,7 +271,7 @@ acpi_ds_method_data_get_node(u16 opcode,
*node = &walk_state->local_variables[index];
break;
- case AML_ARG_OP:
+ case ACPI_REFCLASS_ARG:
if (index > ACPI_METHOD_MAX_ARG) {
ACPI_ERROR((AE_INFO,
@@ -286,8 +286,8 @@ acpi_ds_method_data_get_node(u16 opcode,
break;
default:
- ACPI_ERROR((AE_INFO, "Opcode %d is invalid", opcode));
- return_ACPI_STATUS(AE_AML_BAD_OPCODE);
+ ACPI_ERROR((AE_INFO, "Type %d is invalid", type));
+ return_ACPI_STATUS(AE_TYPE);
}
return_ACPI_STATUS(AE_OK);
@@ -297,7 +297,8 @@ acpi_ds_method_data_get_node(u16 opcode,
*
* FUNCTION: acpi_ds_method_data_set_value
*
- * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP
+ * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
+ * ACPI_REFCLASS_ARG
* Index - Which Local or Arg to get
* Object - Object to be inserted into the stack entry
* walk_state - Current walk state object
@@ -310,7 +311,7 @@ acpi_ds_method_data_get_node(u16 opcode,
******************************************************************************/
static acpi_status
-acpi_ds_method_data_set_value(u16 opcode,
+acpi_ds_method_data_set_value(u8 type,
u32 index,
union acpi_operand_object *object,
struct acpi_walk_state *walk_state)
@@ -321,13 +322,13 @@ acpi_ds_method_data_set_value(u16 opcode,
ACPI_FUNCTION_TRACE(ds_method_data_set_value);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "NewObj %p Opcode %X, Refs=%d [%s]\n", object,
- opcode, object->common.reference_count,
+ "NewObj %p Type %2.2X, Refs=%d [%s]\n", object,
+ type, object->common.reference_count,
acpi_ut_get_type_name(object->common.type)));
/* Get the namespace node for the arg/local */
- status = acpi_ds_method_data_get_node(opcode, index, walk_state, &node);
+ status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -350,7 +351,8 @@ acpi_ds_method_data_set_value(u16 opcode,
*
* FUNCTION: acpi_ds_method_data_get_value
*
- * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP
+ * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
+ * ACPI_REFCLASS_ARG
* Index - Which local_var or argument to get
* walk_state - Current walk state object
* dest_desc - Where Arg or Local value is returned
@@ -363,7 +365,7 @@ acpi_ds_method_data_set_value(u16 opcode,
******************************************************************************/
acpi_status
-acpi_ds_method_data_get_value(u16 opcode,
+acpi_ds_method_data_get_value(u8 type,
u32 index,
struct acpi_walk_state *walk_state,
union acpi_operand_object **dest_desc)
@@ -383,7 +385,7 @@ acpi_ds_method_data_get_value(u16 opcode,
/* Get the namespace node for the arg/local */
- status = acpi_ds_method_data_get_node(opcode, index, walk_state, &node);
+ status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -419,8 +421,8 @@ acpi_ds_method_data_get_value(u16 opcode,
/* Otherwise, return the error */
else
- switch (opcode) {
- case AML_ARG_OP:
+ switch (type) {
+ case ACPI_REFCLASS_ARG:
ACPI_ERROR((AE_INFO,
"Uninitialized Arg[%d] at node %p",
@@ -428,7 +430,7 @@ acpi_ds_method_data_get_value(u16 opcode,
return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
- case AML_LOCAL_OP:
+ case ACPI_REFCLASS_LOCAL:
ACPI_ERROR((AE_INFO,
"Uninitialized Local[%d] at node %p",
@@ -437,9 +439,10 @@ acpi_ds_method_data_get_value(u16 opcode,
return_ACPI_STATUS(AE_AML_UNINITIALIZED_LOCAL);
default:
+
ACPI_ERROR((AE_INFO,
"Not a Arg/Local opcode: %X",
- opcode));
+ type));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
}
@@ -458,7 +461,8 @@ acpi_ds_method_data_get_value(u16 opcode,
*
* FUNCTION: acpi_ds_method_data_delete_value
*
- * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP
+ * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
+ * ACPI_REFCLASS_ARG
* Index - Which local_var or argument to delete
* walk_state - Current walk state object
*
@@ -470,7 +474,7 @@ acpi_ds_method_data_get_value(u16 opcode,
******************************************************************************/
static void
-acpi_ds_method_data_delete_value(u16 opcode,
+acpi_ds_method_data_delete_value(u8 type,
u32 index, struct acpi_walk_state *walk_state)
{
acpi_status status;
@@ -481,7 +485,7 @@ acpi_ds_method_data_delete_value(u16 opcode,
/* Get the namespace node for the arg/local */
- status = acpi_ds_method_data_get_node(opcode, index, walk_state, &node);
+ status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
if (ACPI_FAILURE(status)) {
return_VOID;
}
@@ -514,7 +518,8 @@ acpi_ds_method_data_delete_value(u16 opcode,
*
* FUNCTION: acpi_ds_store_object_to_local
*
- * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP
+ * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
+ * ACPI_REFCLASS_ARG
* Index - Which Local or Arg to set
* obj_desc - Value to be stored
* walk_state - Current walk state
@@ -528,7 +533,7 @@ acpi_ds_method_data_delete_value(u16 opcode,
******************************************************************************/
acpi_status
-acpi_ds_store_object_to_local(u16 opcode,
+acpi_ds_store_object_to_local(u8 type,
u32 index,
union acpi_operand_object *obj_desc,
struct acpi_walk_state *walk_state)
@@ -539,8 +544,8 @@ acpi_ds_store_object_to_local(u16 opcode,
union acpi_operand_object *new_obj_desc;
ACPI_FUNCTION_TRACE(ds_store_object_to_local);
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Opcode=%X Index=%d Obj=%p\n",
- opcode, index, obj_desc));
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Type=%2.2X Index=%d Obj=%p\n",
+ type, index, obj_desc));
/* Parameter validation */
@@ -550,7 +555,7 @@ acpi_ds_store_object_to_local(u16 opcode,
/* Get the namespace node for the arg/local */
- status = acpi_ds_method_data_get_node(opcode, index, walk_state, &node);
+ status = acpi_ds_method_data_get_node(type, index, walk_state, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -602,7 +607,7 @@ acpi_ds_store_object_to_local(u16 opcode,
*
* Weird, but true.
*/
- if (opcode == AML_ARG_OP) {
+ if (type == ACPI_REFCLASS_ARG) {
/*
* If we have a valid reference object that came from ref_of(),
* do the indirect store
@@ -611,8 +616,8 @@ acpi_ds_store_object_to_local(u16 opcode,
ACPI_DESC_TYPE_OPERAND)
&& (current_obj_desc->common.type ==
ACPI_TYPE_LOCAL_REFERENCE)
- && (current_obj_desc->reference.opcode ==
- AML_REF_OF_OP)) {
+ && (current_obj_desc->reference.class ==
+ ACPI_REFCLASS_REFOF)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Arg (%p) is an ObjRef(Node), storing in node %p\n",
new_obj_desc,
@@ -640,11 +645,9 @@ acpi_ds_store_object_to_local(u16 opcode,
}
}
- /*
- * Delete the existing object
- * before storing the new one
- */
- acpi_ds_method_data_delete_value(opcode, index, walk_state);
+ /* Delete the existing object before storing the new one */
+
+ acpi_ds_method_data_delete_value(type, index, walk_state);
}
/*
@@ -653,7 +656,7 @@ acpi_ds_store_object_to_local(u16 opcode,
* (increments the object reference count by one)
*/
status =
- acpi_ds_method_data_set_value(opcode, index, new_obj_desc,
+ acpi_ds_method_data_set_value(type, index, new_obj_desc,
walk_state);
/* Remove local reference if we copied the object above */
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c
index 0f280589921..4f08e599d07 100644
--- a/drivers/acpi/dispatcher/dsobject.c
+++ b/drivers/acpi/dispatcher/dsobject.c
@@ -731,54 +731,70 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
switch (op_info->type) {
case AML_TYPE_LOCAL_VARIABLE:
- /* Split the opcode into a base opcode + offset */
+ /* Local ID (0-7) is (AML opcode - base AML_LOCAL_OP) */
- obj_desc->reference.opcode = AML_LOCAL_OP;
- obj_desc->reference.offset = opcode - AML_LOCAL_OP;
+ obj_desc->reference.value = opcode - AML_LOCAL_OP;
+ obj_desc->reference.class = ACPI_REFCLASS_LOCAL;
#ifndef ACPI_NO_METHOD_EXECUTION
- status = acpi_ds_method_data_get_node(AML_LOCAL_OP,
- obj_desc->
- reference.offset,
- walk_state,
- (struct
- acpi_namespace_node
- **)&obj_desc->
- reference.object);
+ status =
+ acpi_ds_method_data_get_node(ACPI_REFCLASS_LOCAL,
+ obj_desc->reference.
+ value, walk_state,
+ ACPI_CAST_INDIRECT_PTR
+ (struct
+ acpi_namespace_node,
+ &obj_desc->reference.
+ object));
#endif
break;
case AML_TYPE_METHOD_ARGUMENT:
- /* Split the opcode into a base opcode + offset */
+ /* Arg ID (0-6) is (AML opcode - base AML_ARG_OP) */
- obj_desc->reference.opcode = AML_ARG_OP;
- obj_desc->reference.offset = opcode - AML_ARG_OP;
+ obj_desc->reference.value = opcode - AML_ARG_OP;
+ obj_desc->reference.class = ACPI_REFCLASS_ARG;
#ifndef ACPI_NO_METHOD_EXECUTION
- status = acpi_ds_method_data_get_node(AML_ARG_OP,
+ status = acpi_ds_method_data_get_node(ACPI_REFCLASS_ARG,
obj_desc->
- reference.offset,
+ reference.value,
walk_state,
+ ACPI_CAST_INDIRECT_PTR
(struct
- acpi_namespace_node
- **)&obj_desc->
- reference.object);
+ acpi_namespace_node,
+ &obj_desc->
+ reference.
+ object));
#endif
break;
- default: /* Other literals, etc.. */
+ default: /* Object name or Debug object */
- if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
+ switch (op->common.aml_opcode) {
+ case AML_INT_NAMEPATH_OP:
/* Node was saved in Op */
obj_desc->reference.node = op->common.node;
obj_desc->reference.object =
op->common.node->object;
- }
+ obj_desc->reference.class = ACPI_REFCLASS_NAME;
+ break;
+
+ case AML_DEBUG_OP:
- obj_desc->reference.opcode = opcode;
+ obj_desc->reference.class = ACPI_REFCLASS_DEBUG;
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO,
+ "Unimplemented reference type for AML opcode: %4.4X",
+ opcode));
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
break;
}
break;
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
index 6a81c4400ed..69fae5905bb 100644
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ b/drivers/acpi/dispatcher/dsopcode.c
@@ -1330,7 +1330,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
(walk_state->results->results.obj_desc[0]) ==
ACPI_TYPE_LOCAL_REFERENCE)
&& ((walk_state->results->results.obj_desc[0])->
- reference.opcode != AML_INDEX_OP)) {
+ reference.class != ACPI_REFCLASS_INDEX)) {
status =
acpi_ex_resolve_to_value(&walk_state->
results->results.
diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/dispatcher/dswexec.c
index b5072fa9c92..396fe12078c 100644
--- a/drivers/acpi/dispatcher/dswexec.c
+++ b/drivers/acpi/dispatcher/dswexec.c
@@ -166,6 +166,10 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
status = AE_CTRL_FALSE;
}
+ /* Predicate can be used for an implicit return value */
+
+ (void)acpi_ds_do_implicit_return(local_obj_desc, walk_state, TRUE);
+
cleanup:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%p\n",
@@ -429,10 +433,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
ACPI_TYPE_LOCAL_REFERENCE)
&& (walk_state->operands[1]->common.type ==
ACPI_TYPE_LOCAL_REFERENCE)
- && (walk_state->operands[0]->reference.opcode ==
- walk_state->operands[1]->reference.opcode)
- && (walk_state->operands[0]->reference.offset ==
- walk_state->operands[1]->reference.offset)) {
+ && (walk_state->operands[0]->reference.class ==
+ walk_state->operands[1]->reference.class)
+ && (walk_state->operands[0]->reference.value ==
+ walk_state->operands[1]->reference.value)) {
status = AE_OK;
} else {
ACPI_EXCEPTION((AE_INFO, status,
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 7d2edf143f1..5b30b8d91d7 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -48,7 +48,6 @@ MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
" before undocking");
static struct atomic_notifier_head dock_notifier_list;
-static struct platform_device *dock_device;
static char dock_device_name[] = "dock";
static const struct acpi_device_id dock_device_ids[] = {
@@ -65,23 +64,29 @@ struct dock_station {
struct mutex hp_lock;
struct list_head dependent_devices;
struct list_head hotplug_devices;
+
+ struct list_head sibiling;
+ struct platform_device *dock_device;
};
+static LIST_HEAD(dock_stations);
+static int dock_station_count;
struct dock_dependent_device {
struct list_head list;
struct list_head hotplug_list;
acpi_handle handle;
- acpi_notify_handler handler;
+ struct acpi_dock_ops *ops;
void *context;
};
#define DOCK_DOCKING 0x00000001
#define DOCK_UNDOCKING 0x00000002
+#define DOCK_IS_DOCK 0x00000010
+#define DOCK_IS_ATA 0x00000020
+#define DOCK_IS_BAT 0x00000040
#define DOCK_EVENT 3
#define UNDOCK_EVENT 2
-static struct dock_station *dock_station;
-
/*****************************************************************************
* Dock Dependent device functions *
*****************************************************************************/
@@ -199,6 +204,60 @@ static int is_dock(acpi_handle handle)
return 1;
}
+static int is_ejectable(acpi_handle handle)
+{
+ acpi_status status;
+ acpi_handle tmp;
+
+ status = acpi_get_handle(handle, "_EJ0", &tmp);
+ if (ACPI_FAILURE(status))
+ return 0;
+ return 1;
+}
+
+static int is_ata(acpi_handle handle)
+{
+ acpi_handle tmp;
+
+ if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
+ (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
+ (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
+ (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
+ return 1;
+
+ return 0;
+}
+
+static int is_battery(acpi_handle handle)
+{
+ struct acpi_device_info *info;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ int ret = 1;
+
+ if (!ACPI_SUCCESS(acpi_get_object_info(handle, &buffer)))
+ return 0;
+ info = buffer.pointer;
+ if (!(info->valid & ACPI_VALID_HID))
+ ret = 0;
+ else
+ ret = !strcmp("PNP0C0A", info->hardware_id.value);
+
+ kfree(buffer.pointer);
+ return ret;
+}
+
+static int is_ejectable_bay(acpi_handle handle)
+{
+ acpi_handle phandle;
+ if (!is_ejectable(handle))
+ return 0;
+ if (is_battery(handle) || is_ata(handle))
+ return 1;
+ if (!acpi_get_parent(handle, &phandle) && is_ata(phandle))
+ return 1;
+ return 0;
+}
+
/**
* is_dock_device - see if a device is on a dock station
* @handle: acpi handle of the device
@@ -209,11 +268,17 @@ static int is_dock(acpi_handle handle)
*/
int is_dock_device(acpi_handle handle)
{
- if (!dock_station)
+ struct dock_station *dock_station;
+
+ if (!dock_station_count)
return 0;
- if (is_dock(handle) || find_dock_dependent_device(dock_station, handle))
+ if (is_dock(handle))
return 1;
+ list_for_each_entry(dock_station, &dock_stations, sibiling) {
+ if (find_dock_dependent_device(dock_station, handle))
+ return 1;
+ }
return 0;
}
@@ -229,7 +294,7 @@ EXPORT_SYMBOL_GPL(is_dock_device);
*/
static int dock_present(struct dock_station *ds)
{
- unsigned long sta;
+ unsigned long long sta;
acpi_status status;
if (ds) {
@@ -320,8 +385,8 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
* First call driver specific hotplug functions
*/
list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) {
- if (dd->handler)
- dd->handler(dd->handle, event, dd->context);
+ if (dd->ops && dd->ops->handler)
+ dd->ops->handler(dd->handle, event, dd->context);
}
/*
@@ -341,9 +406,10 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
static void dock_event(struct dock_station *ds, u32 event, int num)
{
- struct device *dev = &dock_device->dev;
+ struct device *dev = &ds->dock_device->dev;
char event_string[13];
char *envp[] = { event_string, NULL };
+ struct dock_dependent_device *dd;
if (num == UNDOCK_EVENT)
sprintf(event_string, "EVENT=undock");
@@ -354,7 +420,14 @@ static void dock_event(struct dock_station *ds, u32 event, int num)
* Indicate that the status of the dock station has
* changed.
*/
- kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
+ if (num == DOCK_EVENT)
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
+
+ list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
+ if (dd->ops && dd->ops->uevent)
+ dd->ops->uevent(dd->handle, event, dd->context);
+ if (num != DOCK_EVENT)
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
}
/**
@@ -414,9 +487,10 @@ static void handle_dock(struct dock_station *ds, int dock)
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = dock;
status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer);
- if (ACPI_FAILURE(status))
- printk(KERN_ERR PREFIX "%s - failed to execute _DCK\n",
- (char *)name_buffer.pointer);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
+ ACPI_EXCEPTION((AE_INFO, status, "%s - failed to execute"
+ " _DCK\n", (char *)name_buffer.pointer));
+
kfree(buffer.pointer);
kfree(name_buffer.pointer);
}
@@ -452,6 +526,25 @@ static inline void complete_undock(struct dock_station *ds)
ds->flags &= ~(DOCK_UNDOCKING);
}
+static void dock_lock(struct dock_station *ds, int lock)
+{
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ acpi_status status;
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = !!lock;
+ status = acpi_evaluate_object(ds->handle, "_LCK", &arg_list, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ if (lock)
+ printk(KERN_WARNING PREFIX "Locking device failed\n");
+ else
+ printk(KERN_WARNING PREFIX "Unlocking device failed\n");
+ }
+}
+
/**
* dock_in_progress - see if we are in the middle of handling a dock event
* @ds: the dock station
@@ -479,7 +572,7 @@ static int dock_in_progress(struct dock_station *ds)
*/
int register_dock_notifier(struct notifier_block *nb)
{
- if (!dock_station)
+ if (!dock_station_count)
return -ENODEV;
return atomic_notifier_chain_register(&dock_notifier_list, nb);
@@ -493,7 +586,7 @@ EXPORT_SYMBOL_GPL(register_dock_notifier);
*/
void unregister_dock_notifier(struct notifier_block *nb)
{
- if (!dock_station)
+ if (!dock_station_count)
return;
atomic_notifier_chain_unregister(&dock_notifier_list, nb);
@@ -504,7 +597,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
/**
* register_hotplug_dock_device - register a hotplug function
* @handle: the handle of the device
- * @handler: the acpi_notifier_handler to call after docking
+ * @ops: handlers to call after docking
* @context: device specific data
*
* If a driver would like to perform a hotplug operation after a dock
@@ -512,27 +605,36 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
* the dock driver after _DCK is executed.
*/
int
-register_hotplug_dock_device(acpi_handle handle, acpi_notify_handler handler,
+register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
void *context)
{
struct dock_dependent_device *dd;
+ struct dock_station *dock_station;
+ int ret = -EINVAL;
- if (!dock_station)
+ if (!dock_station_count)
return -ENODEV;
/*
* make sure this handle is for a device dependent on the dock,
* this would include the dock station itself
*/
- dd = find_dock_dependent_device(dock_station, handle);
- if (dd) {
- dd->handler = handler;
- dd->context = context;
- dock_add_hotplug_device(dock_station, dd);
- return 0;
+ list_for_each_entry(dock_station, &dock_stations, sibiling) {
+ /*
+ * An ATA bay can be in a dock and itself can be ejected
+ * seperately, so there are two 'dock stations' which need the
+ * ops
+ */
+ dd = find_dock_dependent_device(dock_station, handle);
+ if (dd) {
+ dd->ops = ops;
+ dd->context = context;
+ dock_add_hotplug_device(dock_station, dd);
+ ret = 0;
+ }
}
- return -EINVAL;
+ return ret;
}
EXPORT_SYMBOL_GPL(register_hotplug_dock_device);
@@ -544,13 +646,16 @@ EXPORT_SYMBOL_GPL(register_hotplug_dock_device);
void unregister_hotplug_dock_device(acpi_handle handle)
{
struct dock_dependent_device *dd;
+ struct dock_station *dock_station;
- if (!dock_station)
+ if (!dock_station_count)
return;
- dd = find_dock_dependent_device(dock_station, handle);
- if (dd)
- dock_del_hotplug_device(dock_station, dd);
+ list_for_each_entry(dock_station, &dock_stations, sibiling) {
+ dd = find_dock_dependent_device(dock_station, handle);
+ if (dd)
+ dock_del_hotplug_device(dock_station, dd);
+ }
}
EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
@@ -575,13 +680,9 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
*/
dock_event(ds, event, UNDOCK_EVENT);
- if (!dock_present(ds)) {
- complete_undock(ds);
- return -ENODEV;
- }
-
hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST);
undock(ds);
+ dock_lock(ds, 0);
eject_dock(ds);
if (dock_present(ds)) {
printk(KERN_ERR PREFIX "Unable to undock!\n");
@@ -604,14 +705,36 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
static void dock_notify(acpi_handle handle, u32 event, void *data)
{
struct dock_station *ds = data;
+ struct acpi_device *tmp;
+ int surprise_removal = 0;
+
+ /*
+ * According to acpi spec 3.0a, if a DEVICE_CHECK notification
+ * is sent and _DCK is present, it is assumed to mean an undock
+ * request.
+ */
+ if ((ds->flags & DOCK_IS_DOCK) && event == ACPI_NOTIFY_DEVICE_CHECK)
+ event = ACPI_NOTIFY_EJECT_REQUEST;
+ /*
+ * dock station: BUS_CHECK - docked or surprise removal
+ * DEVICE_CHECK - undocked
+ * other device: BUS_CHECK/DEVICE_CHECK - added or surprise removal
+ *
+ * To simplify event handling, dock dependent device handler always
+ * get ACPI_NOTIFY_BUS_CHECK/ACPI_NOTIFY_DEVICE_CHECK for add and
+ * ACPI_NOTIFY_EJECT_REQUEST for removal
+ */
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
- if (!dock_in_progress(ds) && dock_present(ds)) {
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ if (!dock_in_progress(ds) && acpi_bus_get_device(ds->handle,
+ &tmp)) {
begin_dock(ds);
dock(ds);
if (!dock_present(ds)) {
printk(KERN_ERR PREFIX "Unable to dock!\n");
+ complete_dock(ds);
break;
}
atomic_notifier_call_chain(&dock_notifier_list,
@@ -619,20 +742,19 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
hotplug_dock_devices(ds, event);
complete_dock(ds);
dock_event(ds, event, DOCK_EVENT);
+ dock_lock(ds, 1);
+ break;
}
- break;
- case ACPI_NOTIFY_DEVICE_CHECK:
- /*
- * According to acpi spec 3.0a, if a DEVICE_CHECK notification
- * is sent and _DCK is present, it is assumed to mean an
- * undock request. This notify routine will only be called
- * for objects defining _DCK, so we will fall through to eject
- * request here. However, we will pass an eject request through
- * to the driver who wish to hotplug.
- */
+ if (dock_present(ds) || dock_in_progress(ds))
+ break;
+ /* This is a surprise removal */
+ surprise_removal = 1;
+ event = ACPI_NOTIFY_EJECT_REQUEST;
+ /* Fall back */
case ACPI_NOTIFY_EJECT_REQUEST:
begin_undock(ds);
- if (immediate_undock)
+ if ((immediate_undock && !(ds->flags & DOCK_IS_ATA))
+ || surprise_removal)
handle_eject_request(ds, event);
else
dock_event(ds, event, UNDOCK_EVENT);
@@ -642,6 +764,51 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
}
}
+struct dock_data {
+ acpi_handle handle;
+ unsigned long event;
+ struct dock_station *ds;
+};
+
+static void acpi_dock_deferred_cb(void *context)
+{
+ struct dock_data *data = (struct dock_data *)context;
+
+ dock_notify(data->handle, data->event, data->ds);
+ kfree(data);
+}
+
+static int acpi_dock_notifier_call(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ struct dock_station *dock_station;
+ acpi_handle handle = (acpi_handle)data;
+
+ if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
+ && event != ACPI_NOTIFY_EJECT_REQUEST)
+ return 0;
+ list_for_each_entry(dock_station, &dock_stations, sibiling) {
+ if (dock_station->handle == handle) {
+ struct dock_data *dock_data;
+
+ dock_data = kmalloc(sizeof(*dock_data), GFP_KERNEL);
+ if (!dock_data)
+ return 0;
+ dock_data->handle = handle;
+ dock_data->event = event;
+ dock_data->ds = dock_station;
+ acpi_os_hotplug_execute(acpi_dock_deferred_cb,
+ dock_data);
+ return 0 ;
+ }
+ }
+ return 0;
+}
+
+static struct notifier_block dock_acpi_notifier = {
+ .notifier_call = acpi_dock_notifier_call,
+};
+
/**
* find_dock_devices - find devices on the dock station
* @handle: the handle of the device we are examining
@@ -688,6 +855,8 @@ fdd_out:
static ssize_t show_docked(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dock_station *dock_station = *((struct dock_station **)
+ dev->platform_data);
return snprintf(buf, PAGE_SIZE, "%d\n", dock_present(dock_station));
}
@@ -699,6 +868,8 @@ static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
static ssize_t show_flags(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dock_station *dock_station = *((struct dock_station **)
+ dev->platform_data);
return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
}
@@ -711,6 +882,8 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
+ struct dock_station *dock_station = *((struct dock_station **)
+ dev->platform_data);
if (!count)
return -EINVAL;
@@ -727,16 +900,38 @@ static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
static ssize_t show_dock_uid(struct device *dev,
struct device_attribute *attr, char *buf)
{
- unsigned long lbuf;
+ unsigned long long lbuf;
+ struct dock_station *dock_station = *((struct dock_station **)
+ dev->platform_data);
acpi_status status = acpi_evaluate_integer(dock_station->handle,
"_UID", NULL, &lbuf);
if (ACPI_FAILURE(status))
return 0;
- return snprintf(buf, PAGE_SIZE, "%lx\n", lbuf);
+ return snprintf(buf, PAGE_SIZE, "%llx\n", lbuf);
}
static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
+static ssize_t show_dock_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dock_station *dock_station = *((struct dock_station **)
+ dev->platform_data);
+ char *type;
+
+ if (dock_station->flags & DOCK_IS_DOCK)
+ type = "dock_station";
+ else if (dock_station->flags & DOCK_IS_ATA)
+ type = "ata_bay";
+ else if (dock_station->flags & DOCK_IS_BAT)
+ type = "battery_bay";
+ else
+ type = "unknown";
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", type);
+}
+static DEVICE_ATTR(type, S_IRUGO, show_dock_type, NULL);
+
/**
* dock_add - add a new dock station
* @handle: the dock station handle
@@ -747,8 +942,9 @@ static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
static int dock_add(acpi_handle handle)
{
int ret;
- acpi_status status;
struct dock_dependent_device *dd;
+ struct dock_station *dock_station;
+ struct platform_device *dock_device;
/* allocate & initialize the dock_station private data */
dock_station = kzalloc(sizeof(*dock_station), GFP_KERNEL);
@@ -758,22 +954,34 @@ static int dock_add(acpi_handle handle)
dock_station->last_dock_time = jiffies - HZ;
INIT_LIST_HEAD(&dock_station->dependent_devices);
INIT_LIST_HEAD(&dock_station->hotplug_devices);
+ INIT_LIST_HEAD(&dock_station->sibiling);
spin_lock_init(&dock_station->dd_lock);
mutex_init(&dock_station->hp_lock);
ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
/* initialize platform device stuff */
- dock_device =
- platform_device_register_simple(dock_device_name, 0, NULL, 0);
+ dock_station->dock_device =
+ platform_device_register_simple(dock_device_name,
+ dock_station_count, NULL, 0);
+ dock_device = dock_station->dock_device;
if (IS_ERR(dock_device)) {
kfree(dock_station);
dock_station = NULL;
return PTR_ERR(dock_device);
}
+ platform_device_add_data(dock_device, &dock_station,
+ sizeof(struct dock_station *));
/* we want the dock device to send uevents */
dock_device->dev.uevent_suppress = 0;
+ if (is_dock(handle))
+ dock_station->flags |= DOCK_IS_DOCK;
+ if (is_ata(handle))
+ dock_station->flags |= DOCK_IS_ATA;
+ if (is_battery(handle))
+ dock_station->flags |= DOCK_IS_BAT;
+
ret = device_create_file(&dock_device->dev, &dev_attr_docked);
if (ret) {
printk("Error %d adding sysfs file\n", ret);
@@ -812,6 +1020,9 @@ static int dock_add(acpi_handle handle)
dock_station = NULL;
return ret;
}
+ ret = device_create_file(&dock_device->dev, &dev_attr_type);
+ if (ret)
+ printk(KERN_ERR"Error %d adding sysfs file\n", ret);
/* Find dependent devices */
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
@@ -828,24 +1039,12 @@ static int dock_add(acpi_handle handle)
}
add_dock_dependent_device(dock_station, dd);
- /* register for dock events */
- status = acpi_install_notify_handler(dock_station->handle,
- ACPI_SYSTEM_NOTIFY,
- dock_notify, dock_station);
-
- if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX "Error installing notify handler\n");
- ret = -ENODEV;
- goto dock_add_err;
- }
-
- printk(KERN_INFO PREFIX "%s\n", ACPI_DOCK_DRIVER_DESCRIPTION);
-
+ dock_station_count++;
+ list_add(&dock_station->sibiling, &dock_stations);
return 0;
-dock_add_err:
- kfree(dd);
dock_add_err_unregister:
+ device_remove_file(&dock_device->dev, &dev_attr_type);
device_remove_file(&dock_device->dev, &dev_attr_docked);
device_remove_file(&dock_device->dev, &dev_attr_undock);
device_remove_file(&dock_device->dev, &dev_attr_uid);
@@ -859,12 +1058,12 @@ dock_add_err_unregister:
/**
* dock_remove - free up resources related to the dock station
*/
-static int dock_remove(void)
+static int dock_remove(struct dock_station *dock_station)
{
struct dock_dependent_device *dd, *tmp;
- acpi_status status;
+ struct platform_device *dock_device = dock_station->dock_device;
- if (!dock_station)
+ if (!dock_station_count)
return 0;
/* remove dependent devices */
@@ -872,14 +1071,8 @@ static int dock_remove(void)
list)
kfree(dd);
- /* remove dock notify handler */
- status = acpi_remove_notify_handler(dock_station->handle,
- ACPI_SYSTEM_NOTIFY,
- dock_notify);
- if (ACPI_FAILURE(status))
- printk(KERN_ERR "Error removing notify handler\n");
-
/* cleanup sysfs */
+ device_remove_file(&dock_device->dev, &dev_attr_type);
device_remove_file(&dock_device->dev, &dev_attr_docked);
device_remove_file(&dock_device->dev, &dev_attr_undock);
device_remove_file(&dock_device->dev, &dev_attr_uid);
@@ -904,41 +1097,60 @@ static int dock_remove(void)
static acpi_status
find_dock(acpi_handle handle, u32 lvl, void *context, void **rv)
{
- int *count = context;
acpi_status status = AE_OK;
if (is_dock(handle)) {
if (dock_add(handle) >= 0) {
- (*count)++;
status = AE_CTRL_TERMINATE;
}
}
return status;
}
-static int __init dock_init(void)
+static acpi_status
+find_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
{
- int num = 0;
-
- dock_station = NULL;
+ /* If bay is a dock, it's already handled */
+ if (is_ejectable_bay(handle) && !is_dock(handle))
+ dock_add(handle);
+ return AE_OK;
+}
+static int __init dock_init(void)
+{
if (acpi_disabled)
return 0;
/* look for a dock station */
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, find_dock, &num, NULL);
+ ACPI_UINT32_MAX, find_dock, NULL, NULL);
- if (!num)
- printk(KERN_INFO "No dock devices found.\n");
+ /* look for bay */
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, find_bay, NULL, NULL);
+ if (!dock_station_count) {
+ printk(KERN_INFO PREFIX "No dock devices found.\n");
+ return 0;
+ }
+ register_acpi_bus_notifier(&dock_acpi_notifier);
+ printk(KERN_INFO PREFIX "%s: %d docks/bays found\n",
+ ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
return 0;
}
static void __exit dock_exit(void)
{
- dock_remove();
+ struct dock_station *dock_station;
+
+ unregister_acpi_bus_notifier(&dock_acpi_notifier);
+ list_for_each_entry(dock_station, &dock_stations, sibiling)
+ dock_remove(dock_station);
}
-postcore_initcall(dock_init);
+/*
+ * Must be called before drivers of devices in dock, otherwise we can't know
+ * which devices are in a dock
+ */
+subsys_initcall(dock_init);
module_exit(dock_exit);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 13593f9f219..ef42316f89f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1,7 +1,7 @@
/*
- * ec.c - ACPI Embedded Controller Driver (v2.0)
+ * ec.c - ACPI Embedded Controller Driver (v2.1)
*
- * Copyright (C) 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
+ * Copyright (C) 2006-2008 Alexey Starikovskiy <astarikovskiy@suse.de>
* Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
* Copyright (C) 2004 Luming Yu <luming.yu@intel.com>
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
@@ -26,7 +26,7 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-/* Uncomment next line to get verbose print outs*/
+/* Uncomment next line to get verbose printout */
/* #define DEBUG */
#include <linux/kernel.h>
@@ -38,6 +38,7 @@
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/spinlock.h>
#include <asm/io.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -65,22 +66,21 @@ enum ec_command {
ACPI_EC_COMMAND_QUERY = 0x84,
};
-/* EC events */
-enum ec_event {
- ACPI_EC_EVENT_OBF_1 = 1, /* Output buffer full */
- ACPI_EC_EVENT_IBF_0, /* Input buffer empty */
-};
-
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
#define ACPI_EC_UDELAY 100 /* Wait 100us before polling EC again */
+#define ACPI_EC_STORM_THRESHOLD 20 /* number of false interrupts
+ per one transaction */
+
enum {
- EC_FLAGS_WAIT_GPE = 0, /* Don't check status until GPE arrives */
EC_FLAGS_QUERY_PENDING, /* Query is pending */
- EC_FLAGS_GPE_MODE, /* Expect GPE to be sent for status change */
+ EC_FLAGS_GPE_MODE, /* Expect GPE to be sent
+ * for status change */
EC_FLAGS_NO_GPE, /* Don't use GPE mode */
- EC_FLAGS_RESCHEDULE_POLL /* Re-schedule poll */
+ EC_FLAGS_GPE_STORM, /* GPE storm detected */
+ EC_FLAGS_HANDLERS_INSTALLED /* Handlers for GPE and
+ * OpReg are installed */
};
/* If we find an EC via the ECDT, we need to keep a ptr to its context */
@@ -95,6 +95,15 @@ struct acpi_ec_query_handler {
u8 query_bit;
};
+struct transaction {
+ const u8 *wdata;
+ u8 *rdata;
+ unsigned short irq_count;
+ u8 command;
+ u8 wlen;
+ u8 rlen;
+};
+
static struct acpi_ec {
acpi_handle handle;
unsigned long gpe;
@@ -105,9 +114,8 @@ static struct acpi_ec {
struct mutex lock;
wait_queue_head_t wait;
struct list_head list;
- struct delayed_work work;
- atomic_t irq_count;
- u8 handlers_installed;
+ struct transaction *curr;
+ spinlock_t curr_lock;
} *boot_ec, *first_ec;
/*
@@ -150,7 +158,7 @@ static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
{
u8 x = inb(ec->data_addr);
pr_debug(PREFIX "---> data = 0x%2.2x\n", x);
- return inb(ec->data_addr);
+ return x;
}
static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
@@ -165,158 +173,172 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
outb(data, ec->data_addr);
}
-static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event)
+static int ec_transaction_done(struct acpi_ec *ec)
{
- if (test_bit(EC_FLAGS_WAIT_GPE, &ec->flags))
- return 0;
- if (event == ACPI_EC_EVENT_OBF_1) {
- if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_OBF)
- return 1;
- } else if (event == ACPI_EC_EVENT_IBF_0) {
- if (!(acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF))
- return 1;
- }
-
- return 0;
+ unsigned long flags;
+ int ret = 0;
+ spin_lock_irqsave(&ec->curr_lock, flags);
+ if (!ec->curr || (!ec->curr->wlen && !ec->curr->rlen))
+ ret = 1;
+ spin_unlock_irqrestore(&ec->curr_lock, flags);
+ return ret;
}
-static void ec_schedule_ec_poll(struct acpi_ec *ec)
+static void gpe_transaction(struct acpi_ec *ec, u8 status)
{
- if (test_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags))
- schedule_delayed_work(&ec->work,
- msecs_to_jiffies(ACPI_EC_DELAY));
+ unsigned long flags;
+ spin_lock_irqsave(&ec->curr_lock, flags);
+ if (!ec->curr)
+ goto unlock;
+ if (ec->curr->wlen > 0) {
+ if ((status & ACPI_EC_FLAG_IBF) == 0) {
+ acpi_ec_write_data(ec, *(ec->curr->wdata++));
+ --ec->curr->wlen;
+ } else
+ /* false interrupt, state didn't change */
+ ++ec->curr->irq_count;
+
+ } else if (ec->curr->rlen > 0) {
+ if ((status & ACPI_EC_FLAG_OBF) == 1) {
+ *(ec->curr->rdata++) = acpi_ec_read_data(ec);
+ --ec->curr->rlen;
+ } else
+ /* false interrupt, state didn't change */
+ ++ec->curr->irq_count;
+ }
+unlock:
+ spin_unlock_irqrestore(&ec->curr_lock, flags);
}
-static void ec_switch_to_poll_mode(struct acpi_ec *ec)
+static int acpi_ec_wait(struct acpi_ec *ec)
{
+ if (wait_event_timeout(ec->wait, ec_transaction_done(ec),
+ msecs_to_jiffies(ACPI_EC_DELAY)))
+ return 0;
+ /* missing GPEs, switch back to poll mode */
+ if (printk_ratelimit())
+ pr_info(PREFIX "missing confirmations, "
+ "switch off interrupt mode.\n");
set_bit(EC_FLAGS_NO_GPE, &ec->flags);
clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
- acpi_disable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
- set_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
+ return 1;
}
-static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
+static void acpi_ec_gpe_query(void *ec_cxt);
+
+static int ec_check_sci(struct acpi_ec *ec, u8 state)
{
- atomic_set(&ec->irq_count, 0);
- if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) &&
- likely(!force_poll)) {
- if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event),
- msecs_to_jiffies(ACPI_EC_DELAY)))
- return 0;
- clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
- if (acpi_ec_check_status(ec, event)) {
- /* missing GPEs, switch back to poll mode */
- if (printk_ratelimit())
- pr_info(PREFIX "missing confirmations, "
- "switch off interrupt mode.\n");
- ec_switch_to_poll_mode(ec);
- ec_schedule_ec_poll(ec);
- return 0;
- }
- } else {
- unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
- clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
- while (time_before(jiffies, delay)) {
- if (acpi_ec_check_status(ec, event))
- return 0;
- msleep(1);
- }
- if (acpi_ec_check_status(ec,event))
+ if (state & ACPI_EC_FLAG_SCI) {
+ if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
+ return acpi_os_execute(OSL_EC_BURST_HANDLER,
+ acpi_ec_gpe_query, ec);
+ }
+ return 0;
+}
+
+static int ec_poll(struct acpi_ec *ec)
+{
+ unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
+ msleep(1);
+ while (time_before(jiffies, delay)) {
+ gpe_transaction(ec, acpi_ec_read_status(ec));
+ msleep(1);
+ if (ec_transaction_done(ec))
return 0;
}
- pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n",
- acpi_ec_read_status(ec),
- (event == ACPI_EC_EVENT_OBF_1) ? "\"b0=1\"" : "\"b1=0\"");
return -ETIME;
}
-static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
- const u8 * wdata, unsigned wdata_len,
- u8 * rdata, unsigned rdata_len,
+static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
+ struct transaction *t,
int force_poll)
{
- int result = 0;
- set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
+ unsigned long tmp;
+ int ret = 0;
pr_debug(PREFIX "transaction start\n");
- acpi_ec_write_cmd(ec, command);
- for (; wdata_len > 0; --wdata_len) {
- result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll);
- if (result) {
- pr_err(PREFIX
- "write_cmd timeout, command = %d\n", command);
- goto end;
- }
- set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
- acpi_ec_write_data(ec, *(wdata++));
+ /* disable GPE during transaction if storm is detected */
+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
+ clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
+ acpi_disable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
}
-
- if (!rdata_len) {
- result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll);
- if (result) {
- pr_err(PREFIX
- "finish-write timeout, command = %d\n", command);
- goto end;
- }
- } else if (command == ACPI_EC_COMMAND_QUERY)
+ /* start transaction */
+ spin_lock_irqsave(&ec->curr_lock, tmp);
+ /* following two actions should be kept atomic */
+ t->irq_count = 0;
+ ec->curr = t;
+ acpi_ec_write_cmd(ec, ec->curr->command);
+ if (ec->curr->command == ACPI_EC_COMMAND_QUERY)
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
-
- for (; rdata_len > 0; --rdata_len) {
- result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, force_poll);
- if (result) {
- pr_err(PREFIX "read timeout, command = %d\n", command);
- goto end;
- }
- /* Don't expect GPE after last read */
- if (rdata_len > 1)
- set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
- *(rdata++) = acpi_ec_read_data(ec);
- }
- end:
+ spin_unlock_irqrestore(&ec->curr_lock, tmp);
+ /* if we selected poll mode or failed in GPE-mode do a poll loop */
+ if (force_poll ||
+ !test_bit(EC_FLAGS_GPE_MODE, &ec->flags) ||
+ acpi_ec_wait(ec))
+ ret = ec_poll(ec);
pr_debug(PREFIX "transaction end\n");
- return result;
+ spin_lock_irqsave(&ec->curr_lock, tmp);
+ ec->curr = NULL;
+ spin_unlock_irqrestore(&ec->curr_lock, tmp);
+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
+ /* check if we received SCI during transaction */
+ ec_check_sci(ec, acpi_ec_read_status(ec));
+ /* it is safe to enable GPE outside of transaction */
+ acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
+ } else if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
+ t->irq_count > ACPI_EC_STORM_THRESHOLD) {
+ pr_debug(PREFIX "GPE storm detected\n");
+ set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
+ }
+ return ret;
+}
+
+static int ec_check_ibf0(struct acpi_ec *ec)
+{
+ u8 status = acpi_ec_read_status(ec);
+ return (status & ACPI_EC_FLAG_IBF) == 0;
}
-static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
- const u8 * wdata, unsigned wdata_len,
- u8 * rdata, unsigned rdata_len,
+static int ec_wait_ibf0(struct acpi_ec *ec)
+{
+ unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
+ /* interrupt wait manually if GPE mode is not active */
+ unsigned long timeout = test_bit(EC_FLAGS_GPE_MODE, &ec->flags) ?
+ msecs_to_jiffies(ACPI_EC_DELAY) : msecs_to_jiffies(1);
+ while (time_before(jiffies, delay))
+ if (wait_event_timeout(ec->wait, ec_check_ibf0(ec), timeout))
+ return 0;
+ return -ETIME;
+}
+
+static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t,
int force_poll)
{
int status;
u32 glk;
-
- if (!ec || (wdata_len && !wdata) || (rdata_len && !rdata))
+ if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
return -EINVAL;
-
- if (rdata)
- memset(rdata, 0, rdata_len);
-
+ if (t->rdata)
+ memset(t->rdata, 0, t->rlen);
mutex_lock(&ec->lock);
if (ec->global_lock) {
status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
if (ACPI_FAILURE(status)) {
- mutex_unlock(&ec->lock);
- return -ENODEV;
+ status = -ENODEV;
+ goto unlock;
}
}
-
- status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0);
- if (status) {
+ if (ec_wait_ibf0(ec)) {
pr_err(PREFIX "input buffer is not empty, "
"aborting transaction\n");
+ status = -ETIME;
goto end;
}
-
- status = acpi_ec_transaction_unlocked(ec, command,
- wdata, wdata_len,
- rdata, rdata_len,
- force_poll);
-
- end:
-
+ status = acpi_ec_transaction_unlocked(ec, t, force_poll);
+end:
if (ec->global_lock)
acpi_release_global_lock(glk);
+unlock:
mutex_unlock(&ec->lock);
-
return status;
}
@@ -327,21 +349,32 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
int acpi_ec_burst_enable(struct acpi_ec *ec)
{
u8 d;
- return acpi_ec_transaction(ec, ACPI_EC_BURST_ENABLE, NULL, 0, &d, 1, 0);
+ struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
+ .wdata = NULL, .rdata = &d,
+ .wlen = 0, .rlen = 1};
+
+ return acpi_ec_transaction(ec, &t, 0);
}
int acpi_ec_burst_disable(struct acpi_ec *ec)
{
- return acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, NULL, 0, NULL, 0, 0);
+ struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
+ .wdata = NULL, .rdata = NULL,
+ .wlen = 0, .rlen = 0};
+
+ return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
+ acpi_ec_transaction(ec, &t, 0) : 0;
}
static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
{
int result;
u8 d;
+ struct transaction t = {.command = ACPI_EC_COMMAND_READ,
+ .wdata = &address, .rdata = &d,
+ .wlen = 1, .rlen = 1};
- result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_READ,
- &address, 1, &d, 1, 0);
+ result = acpi_ec_transaction(ec, &t, 0);
*data = d;
return result;
}
@@ -349,8 +382,11 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
{
u8 wdata[2] = { address, data };
- return acpi_ec_transaction(ec, ACPI_EC_COMMAND_WRITE,
- wdata, 2, NULL, 0, 0);
+ struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
+ .wdata = wdata, .rdata = NULL,
+ .wlen = 2, .rlen = 0};
+
+ return acpi_ec_transaction(ec, &t, 0);
}
/*
@@ -412,12 +448,13 @@ int ec_transaction(u8 command,
u8 * rdata, unsigned rdata_len,
int force_poll)
{
+ struct transaction t = {.command = command,
+ .wdata = wdata, .rdata = rdata,
+ .wlen = wdata_len, .rlen = rdata_len};
if (!first_ec)
return -ENODEV;
- return acpi_ec_transaction(first_ec, command, wdata,
- wdata_len, rdata, rdata_len,
- force_poll);
+ return acpi_ec_transaction(first_ec, &t, force_poll);
}
EXPORT_SYMBOL(ec_transaction);
@@ -426,7 +463,9 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
{
int result;
u8 d;
-
+ struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
+ .wdata = NULL, .rdata = &d,
+ .wlen = 0, .rlen = 1};
if (!ec || !data)
return -EINVAL;
@@ -436,7 +475,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
* bit to be cleared (and thus clearing the interrupt source).
*/
- result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_QUERY, NULL, 0, &d, 1, 0);
+ result = acpi_ec_transaction(ec, &t, 0);
if (result)
return result;
@@ -513,46 +552,26 @@ static void acpi_ec_gpe_query(void *ec_cxt)
static u32 acpi_ec_gpe_handler(void *data)
{
- acpi_status status = AE_OK;
struct acpi_ec *ec = data;
- u8 state = acpi_ec_read_status(ec);
+ u8 status;
pr_debug(PREFIX "~~~> interrupt\n");
- atomic_inc(&ec->irq_count);
- if (atomic_read(&ec->irq_count) > 5) {
- pr_err(PREFIX "GPE storm detected, disabling EC GPE\n");
- ec_switch_to_poll_mode(ec);
- goto end;
- }
- clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
- if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags))
+ status = acpi_ec_read_status(ec);
+
+ gpe_transaction(ec, status);
+ if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0)
wake_up(&ec->wait);
- if (state & ACPI_EC_FLAG_SCI) {
- if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
- status = acpi_os_execute(OSL_EC_BURST_HANDLER,
- acpi_ec_gpe_query, ec);
- } else if (!test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
- !test_bit(EC_FLAGS_NO_GPE, &ec->flags) &&
- in_interrupt()) {
+ ec_check_sci(ec, status);
+ if (!test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
+ !test_bit(EC_FLAGS_NO_GPE, &ec->flags)) {
/* this is non-query, must be confirmation */
if (printk_ratelimit())
pr_info(PREFIX "non-query interrupt received,"
" switching to interrupt mode\n");
set_bit(EC_FLAGS_GPE_MODE, &ec->flags);
- clear_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
}
-end:
- ec_schedule_ec_poll(ec);
- return ACPI_SUCCESS(status) ?
- ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
-}
-
-static void do_ec_poll(struct work_struct *work)
-{
- struct acpi_ec *ec = container_of(work, struct acpi_ec, work.work);
- atomic_set(&ec->irq_count, 0);
- (void)acpi_ec_gpe_handler(ec);
+ return ACPI_INTERRUPT_HANDLED;
}
/* --------------------------------------------------------------------------
@@ -696,8 +715,7 @@ static struct acpi_ec *make_acpi_ec(void)
mutex_init(&ec->lock);
init_waitqueue_head(&ec->wait);
INIT_LIST_HEAD(&ec->list);
- INIT_DELAYED_WORK_DEFERRABLE(&ec->work, do_ec_poll);
- atomic_set(&ec->irq_count, 0);
+ spin_lock_init(&ec->curr_lock);
return ec;
}
@@ -718,6 +736,7 @@ static acpi_status
ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
{
acpi_status status;
+ unsigned long long tmp;
struct acpi_ec *ec = context;
status = acpi_walk_resources(handle, METHOD_NAME__CRS,
@@ -727,31 +746,26 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
/* Get GPE bit assignment (EC events). */
/* TODO: Add support for _GPE returning a package */
- status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec->gpe);
+ status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
if (ACPI_FAILURE(status))
return status;
+ ec->gpe = tmp;
/* Use the global lock for all EC transactions? */
- acpi_evaluate_integer(handle, "_GLK", NULL, &ec->global_lock);
+ acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
+ ec->global_lock = tmp;
ec->handle = handle;
return AE_CTRL_TERMINATE;
}
-static void ec_poll_stop(struct acpi_ec *ec)
-{
- clear_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
- cancel_delayed_work(&ec->work);
-}
-
static void ec_remove_handlers(struct acpi_ec *ec)
{
- ec_poll_stop(ec);
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
pr_err(PREFIX "failed to remove space handler\n");
if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
&acpi_ec_gpe_handler)))
pr_err(PREFIX "failed to remove gpe handler\n");
- ec->handlers_installed = 0;
+ clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
}
static int acpi_ec_add(struct acpi_device *device)
@@ -788,7 +802,7 @@ static int acpi_ec_add(struct acpi_device *device)
if (!first_ec)
first_ec = ec;
- acpi_driver_data(device) = ec;
+ device->driver_data = ec;
acpi_ec_add_fs(device);
pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
ec->gpe, ec->command_addr, ec->data_addr);
@@ -813,7 +827,7 @@ static int acpi_ec_remove(struct acpi_device *device, int type)
}
mutex_unlock(&ec->lock);
acpi_ec_remove_fs(device);
- acpi_driver_data(device) = NULL;
+ device->driver_data = NULL;
if (ec == first_ec)
first_ec = NULL;
kfree(ec);
@@ -846,27 +860,36 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
static int ec_install_handlers(struct acpi_ec *ec)
{
acpi_status status;
- if (ec->handlers_installed)
+ if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
return 0;
status = acpi_install_gpe_handler(NULL, ec->gpe,
- ACPI_GPE_EDGE_TRIGGERED,
- &acpi_ec_gpe_handler, ec);
+ ACPI_GPE_EDGE_TRIGGERED,
+ &acpi_ec_gpe_handler, ec);
if (ACPI_FAILURE(status))
return -ENODEV;
-
acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
-
status = acpi_install_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler,
NULL, ec);
if (ACPI_FAILURE(status)) {
- acpi_remove_gpe_handler(NULL, ec->gpe, &acpi_ec_gpe_handler);
- return -ENODEV;
+ if (status == AE_NOT_FOUND) {
+ /*
+ * Maybe OS fails in evaluating the _REG object.
+ * The AE_NOT_FOUND error will be ignored and OS
+ * continue to initialize EC.
+ */
+ printk(KERN_ERR "Fail in evaluating the _REG object"
+ " of EC device. Broken bios is suspected.\n");
+ } else {
+ acpi_remove_gpe_handler(NULL, ec->gpe,
+ &acpi_ec_gpe_handler);
+ return -ENODEV;
+ }
}
- ec->handlers_installed = 1;
+ set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
return 0;
}
@@ -887,7 +910,6 @@ static int acpi_ec_start(struct acpi_device *device)
/* EC is fully operational, allow queries */
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
- ec_schedule_ec_poll(ec);
return ret;
}
@@ -906,7 +928,7 @@ static int acpi_ec_stop(struct acpi_device *device, int type)
int __init acpi_boot_ec_enable(void)
{
- if (!boot_ec || boot_ec->handlers_installed)
+ if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags))
return 0;
if (!ec_install_handlers(boot_ec)) {
first_ec = boot_ec;
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c
index 8892b9824fa..74da6fa52ef 100644
--- a/drivers/acpi/executer/exconfig.c
+++ b/drivers/acpi/executer/exconfig.c
@@ -43,7 +43,6 @@
#include <acpi/acpi.h>
#include <acpi/acinterp.h>
-#include <acpi/amlcode.h>
#include <acpi/acnamesp.h>
#include <acpi/actables.h>
#include <acpi/acdispat.h>
@@ -91,13 +90,12 @@ acpi_ex_add_table(u32 table_index,
/* Init the table handle */
- obj_desc->reference.opcode = AML_LOAD_OP;
+ obj_desc->reference.class = ACPI_REFCLASS_TABLE;
*ddb_handle = obj_desc;
/* Install the new table into the local data structures */
- obj_desc->reference.object = ACPI_CAST_PTR(void,
- (unsigned long)table_index);
+ obj_desc->reference.value = table_index;
/* Add the table to the namespace */
@@ -280,6 +278,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
struct acpi_walk_state *walk_state)
{
union acpi_operand_object *ddb_handle;
+ struct acpi_table_header *table;
struct acpi_table_desc table_desc;
u32 table_index;
acpi_status status;
@@ -294,9 +293,8 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
case ACPI_TYPE_REGION:
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Region %p %s\n",
- obj_desc,
- acpi_ut_get_object_type_name(obj_desc)));
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Load table from Region %p\n", obj_desc));
/* Region must be system_memory (from ACPI spec) */
@@ -316,61 +314,112 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
}
/*
- * We will simply map the memory region for the table. However, the
- * memory region is technically not guaranteed to remain stable and
- * we may eventually have to copy the table to a local buffer.
+ * Map the table header and get the actual table length. The region
+ * length is not guaranteed to be the same as the table length.
+ */
+ table = acpi_os_map_memory(obj_desc->region.address,
+ sizeof(struct acpi_table_header));
+ if (!table) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ length = table->length;
+ acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
+
+ /* Must have at least an ACPI table header */
+
+ if (length < sizeof(struct acpi_table_header)) {
+ return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
+ }
+
+ /*
+ * The memory region is not guaranteed to remain stable and we must
+ * copy the table to a local buffer. For example, the memory region
+ * is corrupted after suspend on some machines. Dynamically loaded
+ * tables are usually small, so this overhead is minimal.
*/
+
+ /* Allocate a buffer for the table */
+
+ table_desc.pointer = ACPI_ALLOCATE(length);
+ if (!table_desc.pointer) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Map the entire table and copy it */
+
+ table = acpi_os_map_memory(obj_desc->region.address, length);
+ if (!table) {
+ ACPI_FREE(table_desc.pointer);
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ ACPI_MEMCPY(table_desc.pointer, table, length);
+ acpi_os_unmap_memory(table, length);
+
table_desc.address = obj_desc->region.address;
- table_desc.length = obj_desc->region.length;
- table_desc.flags = ACPI_TABLE_ORIGIN_MAPPED;
break;
case ACPI_TYPE_BUFFER: /* Buffer or resolved region_field */
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Load from Buffer or Field %p %s\n", obj_desc,
- acpi_ut_get_object_type_name(obj_desc)));
-
- length = obj_desc->buffer.length;
+ "Load table from Buffer or Field %p\n",
+ obj_desc));
/* Must have at least an ACPI table header */
- if (length < sizeof(struct acpi_table_header)) {
+ if (obj_desc->buffer.length < sizeof(struct acpi_table_header)) {
return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
}
- /* Validate checksum here. It won't get validated in tb_add_table */
+ /* Get the actual table length from the table header */
- status =
- acpi_tb_verify_checksum(ACPI_CAST_PTR
- (struct acpi_table_header,
- obj_desc->buffer.pointer), length);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ table =
+ ACPI_CAST_PTR(struct acpi_table_header,
+ obj_desc->buffer.pointer);
+ length = table->length;
+
+ /* Table cannot extend beyond the buffer */
+
+ if (length > obj_desc->buffer.length) {
+ return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
+ }
+ if (length < sizeof(struct acpi_table_header)) {
+ return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
}
/*
- * We need to copy the buffer since the original buffer could be
- * changed or deleted in the future
+ * Copy the table from the buffer because the buffer could be modified
+ * or even deleted in the future
*/
table_desc.pointer = ACPI_ALLOCATE(length);
if (!table_desc.pointer) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
- ACPI_MEMCPY(table_desc.pointer, obj_desc->buffer.pointer,
- length);
- table_desc.length = length;
- table_desc.flags = ACPI_TABLE_ORIGIN_ALLOCATED;
+ ACPI_MEMCPY(table_desc.pointer, table, length);
+ table_desc.address = ACPI_TO_INTEGER(table_desc.pointer);
break;
default:
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
- /*
- * Install the new table into the local data structures
- */
+ /* Validate table checksum (will not get validated in tb_add_table) */
+
+ status = acpi_tb_verify_checksum(table_desc.pointer, length);
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(table_desc.pointer);
+ return_ACPI_STATUS(status);
+ }
+
+ /* Complete the table descriptor */
+
+ table_desc.length = length;
+ table_desc.flags = ACPI_TABLE_ORIGIN_ALLOCATED;
+
+ /* Install the new table into the local data structures */
+
status = acpi_tb_add_table(&table_desc, &table_index);
if (ACPI_FAILURE(status)) {
goto cleanup;
@@ -379,7 +428,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
/*
* Add the table to the namespace.
*
- * Note: We load the table objects relative to the root of the namespace.
+ * Note: Load the table objects relative to the root of the namespace.
* This appears to go against the ACPI specification, but we do it for
* compatibility with other ACPI implementations.
*/
@@ -415,7 +464,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
cleanup:
if (ACPI_FAILURE(status)) {
- /* Delete allocated buffer or mapping */
+ /* Delete allocated table buffer */
acpi_tb_delete_table(&table_desc);
}
@@ -455,9 +504,9 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Get the table index from the ddb_handle (acpi_size for 64-bit case) */
+ /* Get the table index from the ddb_handle */
- table_index = (u32) (acpi_size) table_desc->reference.object;
+ table_index = table_desc->reference.value;
/* Invoke table handler if present */
diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/executer/exconvrt.c
index 261d97516d9..1d1f35adddd 100644
--- a/drivers/acpi/executer/exconvrt.c
+++ b/drivers/acpi/executer/exconvrt.c
@@ -57,7 +57,7 @@ acpi_ex_convert_to_ascii(acpi_integer integer,
*
* FUNCTION: acpi_ex_convert_to_integer
*
- * PARAMETERS: obj_desc - Object to be converted. Must be an
+ * PARAMETERS: obj_desc - Object to be converted. Must be an
* Integer, Buffer, or String
* result_desc - Where the new Integer object is returned
* Flags - Used for string conversion
@@ -103,7 +103,7 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
}
/*
- * Convert the buffer/string to an integer. Note that both buffers and
+ * Convert the buffer/string to an integer. Note that both buffers and
* strings are treated as raw data - we don't convert ascii to hex for
* strings.
*
@@ -120,7 +120,7 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
/*
* Convert string to an integer - for most cases, the string must be
- * hexadecimal as per the ACPI specification. The only exception (as
+ * hexadecimal as per the ACPI specification. The only exception (as
* of ACPI 3.0) is that the to_integer() operator allows both decimal
* and hexadecimal strings (hex prefixed with "0x").
*/
@@ -159,6 +159,7 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
break;
default:
+
/* No other types can get here */
break;
}
@@ -185,7 +186,7 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
*
* FUNCTION: acpi_ex_convert_to_buffer
*
- * PARAMETERS: obj_desc - Object to be converted. Must be an
+ * PARAMETERS: obj_desc - Object to be converted. Must be an
* Integer, Buffer, or String
* result_desc - Where the new buffer object is returned
*
@@ -365,7 +366,7 @@ acpi_ex_convert_to_ascii(acpi_integer integer,
}
/*
- * Since leading zeros are supressed, we must check for the case where
+ * Since leading zeros are suppressed, we must check for the case where
* the integer equals 0
*
* Finally, null terminate the string and return the length
@@ -383,7 +384,7 @@ acpi_ex_convert_to_ascii(acpi_integer integer,
*
* FUNCTION: acpi_ex_convert_to_string
*
- * PARAMETERS: obj_desc - Object to be converted. Must be an
+ * PARAMETERS: obj_desc - Object to be converted. Must be an
* Integer, Buffer, or String
* result_desc - Where the string object is returned
* Type - String flags (base and conversion type)
@@ -472,7 +473,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
base = 10;
/*
- * Calculate the final string length. Individual string values
+ * Calculate the final string length. Individual string values
* are variable length (include separator for each)
*/
for (i = 0; i < obj_desc->buffer.length; i++) {
@@ -511,9 +512,14 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
/*
* Create a new string object and string buffer
* (-1 because of extra separator included in string_length from above)
+ * Allow creation of zero-length strings from zero-length buffers.
*/
+ if (string_length) {
+ string_length--;
+ }
+
return_desc = acpi_ut_create_string_object((acpi_size)
- (string_length - 1));
+ string_length);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -536,7 +542,9 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
* Null terminate the string
* (overwrites final comma/space from above)
*/
- new_buf--;
+ if (obj_desc->buffer.length) {
+ new_buf--;
+ }
*new_buf = 0;
break;
@@ -617,7 +625,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
case ACPI_TYPE_LOCAL_BANK_FIELD:
case ACPI_TYPE_LOCAL_INDEX_FIELD:
/*
- * These types require an Integer operand. We can convert
+ * These types require an Integer operand. We can convert
* a Buffer or a String to an Integer if necessary.
*/
status =
@@ -627,7 +635,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
case ACPI_TYPE_STRING:
/*
- * The operand must be a String. We can convert an
+ * The operand must be a String. We can convert an
* Integer or Buffer if necessary
*/
status =
@@ -637,7 +645,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
case ACPI_TYPE_BUFFER:
/*
- * The operand must be a Buffer. We can convert an
+ * The operand must be a Buffer. We can convert an
* Integer or String if necessary
*/
status =
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c
index 2be2e2bf95b..d087a7d28aa 100644
--- a/drivers/acpi/executer/exdump.c
+++ b/drivers/acpi/executer/exdump.c
@@ -45,7 +45,6 @@
#include <acpi/acinterp.h>
#include <acpi/amlcode.h>
#include <acpi/acnamesp.h>
-#include <acpi/acparser.h>
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exdump")
@@ -214,10 +213,11 @@ static struct acpi_exdump_info acpi_ex_dump_index_field[5] = {
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(index_field.data_obj), "Data Object"}
};
-static struct acpi_exdump_info acpi_ex_dump_reference[7] = {
+static struct acpi_exdump_info acpi_ex_dump_reference[8] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_reference), NULL},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.class), "Class"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.target_type), "Target Type"},
- {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(reference.offset), "Offset"},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(reference.value), "Value"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.object), "Object Desc"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.node), "Node"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.where), "Where"},
@@ -413,10 +413,10 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
case ACPI_EXD_REFERENCE:
- acpi_ex_out_string("Opcode",
- (acpi_ps_get_opcode_info
- (obj_desc->reference.opcode))->
- name);
+ acpi_ex_out_string("Class Name",
+ (char *)
+ acpi_ut_get_reference_name
+ (obj_desc));
acpi_ex_dump_reference_obj(obj_desc);
break;
@@ -494,40 +494,41 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
case ACPI_TYPE_LOCAL_REFERENCE:
- switch (obj_desc->reference.opcode) {
- case AML_DEBUG_OP:
+ acpi_os_printf("Reference: [%s] ",
+ acpi_ut_get_reference_name(obj_desc));
+
+ switch (obj_desc->reference.class) {
+ case ACPI_REFCLASS_DEBUG:
- acpi_os_printf("Reference: Debug\n");
+ acpi_os_printf("\n");
break;
- case AML_INDEX_OP:
+ case ACPI_REFCLASS_INDEX:
- acpi_os_printf("Reference: Index %p\n",
- obj_desc->reference.object);
+ acpi_os_printf("%p\n", obj_desc->reference.object);
break;
- case AML_LOAD_OP:
+ case ACPI_REFCLASS_TABLE:
- acpi_os_printf("Reference: [DdbHandle] TableIndex %p\n",
- obj_desc->reference.object);
+ acpi_os_printf("Table Index %X\n",
+ obj_desc->reference.value);
break;
- case AML_REF_OF_OP:
+ case ACPI_REFCLASS_REFOF:
- acpi_os_printf("Reference: (RefOf) %p [%s]\n",
- obj_desc->reference.object,
+ acpi_os_printf("%p [%s]\n", obj_desc->reference.object,
acpi_ut_get_type_name(((union
acpi_operand_object
- *)obj_desc->
+ *)
+ obj_desc->
reference.
object)->common.
type));
break;
- case AML_ARG_OP:
+ case ACPI_REFCLASS_ARG:
- acpi_os_printf("Reference: Arg%d",
- obj_desc->reference.offset);
+ acpi_os_printf("%X", obj_desc->reference.value);
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
@@ -542,10 +543,9 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
acpi_os_printf("\n");
break;
- case AML_LOCAL_OP:
+ case ACPI_REFCLASS_LOCAL:
- acpi_os_printf("Reference: Local%d",
- obj_desc->reference.offset);
+ acpi_os_printf("%X", obj_desc->reference.value);
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
@@ -560,21 +560,16 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
acpi_os_printf("\n");
break;
- case AML_INT_NAMEPATH_OP:
+ case ACPI_REFCLASS_NAME:
- acpi_os_printf("Reference: Namepath %X [%4.4s]\n",
- obj_desc->reference.node->name.integer,
+ acpi_os_printf("- [%4.4s]\n",
obj_desc->reference.node->name.ascii);
break;
- default:
-
- /* Unknown opcode */
+ default: /* Unknown reference class */
- acpi_os_printf("Unknown Reference opcode=%X\n",
- obj_desc->reference.opcode);
+ acpi_os_printf("%2.2X\n", obj_desc->reference.class);
break;
-
}
break;
@@ -865,8 +860,8 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
ret_buf.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- if (obj_desc->reference.opcode == AML_INT_NAMEPATH_OP) {
- acpi_os_printf(" Named Object %p ", obj_desc->reference.node);
+ if (obj_desc->reference.class == ACPI_REFCLASS_NAME) {
+ acpi_os_printf(" %p ", obj_desc->reference.node);
status =
acpi_ns_handle_to_pathname(obj_desc->reference.node,
@@ -882,14 +877,12 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
ACPI_DESC_TYPE_OPERAND) {
acpi_os_printf(" Target: %p",
obj_desc->reference.object);
- if (obj_desc->reference.opcode == AML_LOAD_OP) {
- /*
- * For DDBHandle reference,
- * obj_desc->Reference.Object is the table index
- */
- acpi_os_printf(" [DDBHandle]\n");
+ if (obj_desc->reference.class == ACPI_REFCLASS_TABLE) {
+ acpi_os_printf(" Table Index: %X\n",
+ obj_desc->reference.value);
} else {
- acpi_os_printf(" [%s]\n",
+ acpi_os_printf(" Target: %p [%s]\n",
+ obj_desc->reference.object,
acpi_ut_get_type_name(((union
acpi_operand_object
*)
@@ -988,9 +981,9 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
case ACPI_TYPE_LOCAL_REFERENCE:
- acpi_os_printf("[Object Reference] %s",
- (acpi_ps_get_opcode_info
- (obj_desc->reference.opcode))->name);
+ acpi_os_printf("[Object Reference] Type [%s] %2.2X",
+ acpi_ut_get_reference_name(obj_desc),
+ obj_desc->reference.class);
acpi_ex_dump_reference_obj(obj_desc);
break;
diff --git a/drivers/acpi/executer/exmisc.c b/drivers/acpi/executer/exmisc.c
index 731414a581a..efb19134005 100644
--- a/drivers/acpi/executer/exmisc.c
+++ b/drivers/acpi/executer/exmisc.c
@@ -86,10 +86,10 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
/*
* Must be a reference to a Local or Arg
*/
- switch (obj_desc->reference.opcode) {
- case AML_LOCAL_OP:
- case AML_ARG_OP:
- case AML_DEBUG_OP:
+ switch (obj_desc->reference.class) {
+ case ACPI_REFCLASS_LOCAL:
+ case ACPI_REFCLASS_ARG:
+ case ACPI_REFCLASS_DEBUG:
/* The referenced object is the pseudo-node for the local/arg */
@@ -98,8 +98,8 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
default:
- ACPI_ERROR((AE_INFO, "Unknown Reference opcode %X",
- obj_desc->reference.opcode));
+ ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
+ obj_desc->reference.class));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
break;
@@ -127,7 +127,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_NO_MEMORY);
}
- reference_obj->reference.opcode = AML_REF_OF_OP;
+ reference_obj->reference.class = ACPI_REFCLASS_REFOF;
reference_obj->reference.object = referenced_obj;
*return_desc = reference_obj;
diff --git a/drivers/acpi/executer/exoparg1.c b/drivers/acpi/executer/exoparg1.c
index 7c3bea575e0..f622f9eac8a 100644
--- a/drivers/acpi/executer/exoparg1.c
+++ b/drivers/acpi/executer/exoparg1.c
@@ -825,16 +825,16 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
*
* Must resolve/dereference the local/arg reference first
*/
- switch (operand[0]->reference.opcode) {
- case AML_LOCAL_OP:
- case AML_ARG_OP:
+ switch (operand[0]->reference.class) {
+ case ACPI_REFCLASS_LOCAL:
+ case ACPI_REFCLASS_ARG:
/* Set Operand[0] to the value of the local/arg */
status =
acpi_ds_method_data_get_value
- (operand[0]->reference.opcode,
- operand[0]->reference.offset,
+ (operand[0]->reference.class,
+ operand[0]->reference.value,
walk_state, &temp_desc);
if (ACPI_FAILURE(status)) {
goto cleanup;
@@ -848,7 +848,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
operand[0] = temp_desc;
break;
- case AML_REF_OF_OP:
+ case ACPI_REFCLASS_REFOF:
/* Get the object to which the reference refers */
@@ -928,8 +928,8 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
* This must be a reference object produced by either the
* Index() or ref_of() operator
*/
- switch (operand[0]->reference.opcode) {
- case AML_INDEX_OP:
+ switch (operand[0]->reference.class) {
+ case ACPI_REFCLASS_INDEX:
/*
* The target type for the Index operator must be
@@ -965,7 +965,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
return_desc->integer.value =
temp_desc->buffer.
pointer[operand[0]->reference.
- offset];
+ value];
break;
case ACPI_TYPE_PACKAGE:
@@ -985,7 +985,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unknown Index TargetType %X in obj %p",
+ "Unknown Index TargetType %X in reference object %p",
operand[0]->reference.
target_type, operand[0]));
status = AE_AML_OPERAND_TYPE;
@@ -993,7 +993,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
}
break;
- case AML_REF_OF_OP:
+ case ACPI_REFCLASS_REFOF:
return_desc = operand[0]->reference.object;
@@ -1013,9 +1013,9 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unknown opcode in reference(%p) - %X",
+ "Unknown class in reference(%p) - %2.2X",
operand[0],
- operand[0]->reference.opcode));
+ operand[0]->reference.class));
status = AE_TYPE;
goto cleanup;
diff --git a/drivers/acpi/executer/exoparg2.c b/drivers/acpi/executer/exoparg2.c
index 8e8bbb6cceb..368def5dffc 100644
--- a/drivers/acpi/executer/exoparg2.c
+++ b/drivers/acpi/executer/exoparg2.c
@@ -391,8 +391,8 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
/* Initialize the Index reference object */
index = operand[1]->integer.value;
- return_desc->reference.offset = (u32) index;
- return_desc->reference.opcode = AML_INDEX_OP;
+ return_desc->reference.value = (u32) index;
+ return_desc->reference.class = ACPI_REFCLASS_INDEX;
/*
* At this point, the Source operand is a String, Buffer, or Package.
diff --git a/drivers/acpi/executer/exresnte.c b/drivers/acpi/executer/exresnte.c
index 5596f42c967..423ad3635f3 100644
--- a/drivers/acpi/executer/exresnte.c
+++ b/drivers/acpi/executer/exresnte.c
@@ -46,8 +46,6 @@
#include <acpi/acdispat.h>
#include <acpi/acinterp.h>
#include <acpi/acnamesp.h>
-#include <acpi/acparser.h>
-#include <acpi/amlcode.h>
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exresnte")
@@ -238,10 +236,10 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
case ACPI_TYPE_LOCAL_REFERENCE:
- switch (source_desc->reference.opcode) {
- case AML_LOAD_OP: /* This is a ddb_handle */
- case AML_REF_OF_OP:
- case AML_INDEX_OP:
+ switch (source_desc->reference.class) {
+ case ACPI_REFCLASS_TABLE: /* This is a ddb_handle */
+ case ACPI_REFCLASS_REFOF:
+ case ACPI_REFCLASS_INDEX:
/* Return an additional reference to the object */
@@ -253,10 +251,8 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
/* No named references are allowed here */
ACPI_ERROR((AE_INFO,
- "Unsupported Reference opcode %X (%s)",
- source_desc->reference.opcode,
- acpi_ps_get_opcode_name(source_desc->
- reference.opcode)));
+ "Unsupported Reference type %X",
+ source_desc->reference.class));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
diff --git a/drivers/acpi/executer/exresolv.c b/drivers/acpi/executer/exresolv.c
index b35f7c817ac..89571b92a52 100644
--- a/drivers/acpi/executer/exresolv.c
+++ b/drivers/acpi/executer/exresolv.c
@@ -47,7 +47,6 @@
#include <acpi/acdispat.h>
#include <acpi/acinterp.h>
#include <acpi/acnamesp.h>
-#include <acpi/acparser.h>
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exresolv")
@@ -141,7 +140,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
acpi_status status = AE_OK;
union acpi_operand_object *stack_desc;
union acpi_operand_object *obj_desc = NULL;
- u16 opcode;
+ u8 ref_type;
ACPI_FUNCTION_TRACE(ex_resolve_object_to_value);
@@ -152,19 +151,19 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
switch (ACPI_GET_OBJECT_TYPE(stack_desc)) {
case ACPI_TYPE_LOCAL_REFERENCE:
- opcode = stack_desc->reference.opcode;
+ ref_type = stack_desc->reference.class;
- switch (opcode) {
- case AML_LOCAL_OP:
- case AML_ARG_OP:
+ switch (ref_type) {
+ case ACPI_REFCLASS_LOCAL:
+ case ACPI_REFCLASS_ARG:
/*
* Get the local from the method's state info
* Note: this increments the local's object reference count
*/
- status = acpi_ds_method_data_get_value(opcode,
+ status = acpi_ds_method_data_get_value(ref_type,
stack_desc->
- reference.offset,
+ reference.value,
walk_state,
&obj_desc);
if (ACPI_FAILURE(status)) {
@@ -173,7 +172,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Arg/Local %X] ValueObj is %p\n",
- stack_desc->reference.offset,
+ stack_desc->reference.value,
obj_desc));
/*
@@ -184,7 +183,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
*stack_ptr = obj_desc;
break;
- case AML_INDEX_OP:
+ case ACPI_REFCLASS_INDEX:
switch (stack_desc->reference.target_type) {
case ACPI_TYPE_BUFFER_FIELD:
@@ -239,15 +238,15 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
}
break;
- case AML_REF_OF_OP:
- case AML_DEBUG_OP:
- case AML_LOAD_OP:
+ case ACPI_REFCLASS_REFOF:
+ case ACPI_REFCLASS_DEBUG:
+ case ACPI_REFCLASS_TABLE:
/* Just leave the object as-is, do not dereference */
break;
- case AML_INT_NAMEPATH_OP: /* Reference to a named object */
+ case ACPI_REFCLASS_NAME: /* Reference to a named object */
/* Dereference the name */
@@ -273,8 +272,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Reference opcode %X (%s) in %p",
- opcode, acpi_ps_get_opcode_name(opcode),
+ "Unknown Reference type %X in %p", ref_type,
stack_desc));
status = AE_AML_INTERNAL;
break;
@@ -388,13 +386,13 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
* traversing the list of possibly many nested references.
*/
while (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REFERENCE) {
- switch (obj_desc->reference.opcode) {
- case AML_REF_OF_OP:
- case AML_INT_NAMEPATH_OP:
+ switch (obj_desc->reference.class) {
+ case ACPI_REFCLASS_REFOF:
+ case ACPI_REFCLASS_NAME:
/* Dereference the reference pointer */
- if (obj_desc->reference.opcode == AML_REF_OF_OP) {
+ if (obj_desc->reference.class == ACPI_REFCLASS_REFOF) {
node = obj_desc->reference.object;
} else { /* AML_INT_NAMEPATH_OP */
@@ -429,7 +427,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
}
break;
- case AML_INDEX_OP:
+ case ACPI_REFCLASS_INDEX:
/* Get the type of this reference (index into another object) */
@@ -455,22 +453,22 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
}
break;
- case AML_LOAD_OP:
+ case ACPI_REFCLASS_TABLE:
type = ACPI_TYPE_DDB_HANDLE;
goto exit;
- case AML_LOCAL_OP:
- case AML_ARG_OP:
+ case ACPI_REFCLASS_LOCAL:
+ case ACPI_REFCLASS_ARG:
if (return_desc) {
status =
acpi_ds_method_data_get_value(obj_desc->
reference.
- opcode,
+ class,
obj_desc->
reference.
- offset,
+ value,
walk_state,
&obj_desc);
if (ACPI_FAILURE(status)) {
@@ -481,10 +479,10 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
status =
acpi_ds_method_data_get_node(obj_desc->
reference.
- opcode,
+ class,
obj_desc->
reference.
- offset,
+ value,
walk_state,
&node);
if (ACPI_FAILURE(status)) {
@@ -499,7 +497,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
}
break;
- case AML_DEBUG_OP:
+ case ACPI_REFCLASS_DEBUG:
/* The Debug Object is of type "DebugObject" */
@@ -509,8 +507,8 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Reference subtype %X",
- obj_desc->reference.opcode));
+ "Unknown Reference Class %2.2X",
+ obj_desc->reference.class));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
}
diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/executer/exresop.c
index 54085f16ec2..0bb82593da7 100644
--- a/drivers/acpi/executer/exresop.c
+++ b/drivers/acpi/executer/exresop.c
@@ -225,41 +225,36 @@ acpi_ex_resolve_operands(u16 opcode,
if (object_type == (u8) ACPI_TYPE_LOCAL_REFERENCE) {
- /* Decode the Reference */
+ /* Validate the Reference */
- op_info = acpi_ps_get_opcode_info(opcode);
- if (op_info->class == AML_CLASS_UNKNOWN) {
- return_ACPI_STATUS(AE_AML_BAD_OPCODE);
- }
+ switch (obj_desc->reference.class) {
+ case ACPI_REFCLASS_DEBUG:
- switch (obj_desc->reference.opcode) {
- case AML_DEBUG_OP:
target_op = AML_DEBUG_OP;
/*lint -fallthrough */
- case AML_INDEX_OP:
- case AML_REF_OF_OP:
- case AML_ARG_OP:
- case AML_LOCAL_OP:
- case AML_LOAD_OP: /* ddb_handle from LOAD_OP or LOAD_TABLE_OP */
- case AML_INT_NAMEPATH_OP: /* Reference to a named object */
-
- ACPI_DEBUG_ONLY_MEMBERS(ACPI_DEBUG_PRINT
- ((ACPI_DB_EXEC,
- "Operand is a Reference, RefOpcode [%s]\n",
- (acpi_ps_get_opcode_info
- (obj_desc->
- reference.
- opcode))->
- name)));
+ case ACPI_REFCLASS_ARG:
+ case ACPI_REFCLASS_LOCAL:
+ case ACPI_REFCLASS_INDEX:
+ case ACPI_REFCLASS_REFOF:
+ case ACPI_REFCLASS_TABLE: /* ddb_handle from LOAD_OP or LOAD_TABLE_OP */
+ case ACPI_REFCLASS_NAME: /* Reference to a named object */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Operand is a Reference, Class [%s] %2.2X\n",
+ acpi_ut_get_reference_name
+ (obj_desc),
+ obj_desc->reference.
+ class));
break;
default:
+
ACPI_ERROR((AE_INFO,
- "Operand is a Reference, Unknown Reference Opcode: %X",
- obj_desc->reference.
- opcode));
+ "Unknown Reference Class %2.2X in %p",
+ obj_desc->reference.class,
+ obj_desc));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
@@ -270,8 +265,7 @@ acpi_ex_resolve_operands(u16 opcode,
/* Invalid descriptor */
- ACPI_ERROR((AE_INFO,
- "Invalid descriptor %p [%s]",
+ ACPI_ERROR((AE_INFO, "Invalid descriptor %p [%s]",
obj_desc,
acpi_ut_get_descriptor_name(obj_desc)));
@@ -343,7 +337,7 @@ acpi_ex_resolve_operands(u16 opcode,
if ((opcode == AML_STORE_OP) &&
(ACPI_GET_OBJECT_TYPE(*stack_ptr) ==
ACPI_TYPE_LOCAL_REFERENCE)
- && ((*stack_ptr)->reference.opcode == AML_INDEX_OP)) {
+ && ((*stack_ptr)->reference.class == ACPI_REFCLASS_INDEX)) {
goto next_operand;
}
break;
diff --git a/drivers/acpi/executer/exstore.c b/drivers/acpi/executer/exstore.c
index 38b55e35249..3318df4cbd9 100644
--- a/drivers/acpi/executer/exstore.c
+++ b/drivers/acpi/executer/exstore.c
@@ -47,7 +47,6 @@
#include <acpi/acinterp.h>
#include <acpi/amlcode.h>
#include <acpi/acnamesp.h>
-#include <acpi/acparser.h>
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exstore")
@@ -179,22 +178,26 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
case ACPI_TYPE_LOCAL_REFERENCE:
- if (source_desc->reference.opcode == AML_INDEX_OP) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "[%s, 0x%X]\n",
- acpi_ps_get_opcode_name
- (source_desc->reference.opcode),
- source_desc->reference.offset));
- } else {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s]",
- acpi_ps_get_opcode_name
- (source_desc->reference.opcode)));
- }
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s] ",
+ acpi_ut_get_reference_name(source_desc)));
+
+ /* Decode the reference */
+
+ switch (source_desc->reference.class) {
+ case ACPI_REFCLASS_INDEX:
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%X\n",
+ source_desc->reference.value));
+ break;
+
+ case ACPI_REFCLASS_TABLE:
- if (source_desc->reference.opcode == AML_LOAD_OP) { /* Load and load_table */
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- " Table OwnerId %p\n",
- source_desc->reference.object));
+ "Table Index 0x%X\n",
+ source_desc->reference.value));
+ break;
+
+ default:
break;
}
@@ -347,15 +350,15 @@ acpi_ex_store(union acpi_operand_object *source_desc,
}
/*
- * Examine the Reference opcode. These cases are handled:
+ * Examine the Reference class. These cases are handled:
*
* 1) Store to Name (Change the object associated with a name)
* 2) Store to an indexed area of a Buffer or Package
* 3) Store to a Method Local or Arg
* 4) Store to the debug object
*/
- switch (ref_desc->reference.opcode) {
- case AML_REF_OF_OP:
+ switch (ref_desc->reference.class) {
+ case ACPI_REFCLASS_REFOF:
/* Storing an object into a Name "container" */
@@ -365,7 +368,7 @@ acpi_ex_store(union acpi_operand_object *source_desc,
ACPI_IMPLICIT_CONVERSION);
break;
- case AML_INDEX_OP:
+ case ACPI_REFCLASS_INDEX:
/* Storing to an Index (pointer into a packager or buffer) */
@@ -374,18 +377,18 @@ acpi_ex_store(union acpi_operand_object *source_desc,
walk_state);
break;
- case AML_LOCAL_OP:
- case AML_ARG_OP:
+ case ACPI_REFCLASS_LOCAL:
+ case ACPI_REFCLASS_ARG:
/* Store to a method local/arg */
status =
- acpi_ds_store_object_to_local(ref_desc->reference.opcode,
- ref_desc->reference.offset,
+ acpi_ds_store_object_to_local(ref_desc->reference.class,
+ ref_desc->reference.value,
source_desc, walk_state);
break;
- case AML_DEBUG_OP:
+ case ACPI_REFCLASS_DEBUG:
/*
* Storing to the Debug object causes the value stored to be
@@ -401,9 +404,9 @@ acpi_ex_store(union acpi_operand_object *source_desc,
default:
- ACPI_ERROR((AE_INFO, "Unknown Reference opcode %X",
- ref_desc->reference.opcode));
- ACPI_DUMP_ENTRY(ref_desc, ACPI_LV_ERROR);
+ ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
+ ref_desc->reference.class));
+ ACPI_DUMP_ENTRY(ref_desc, ACPI_LV_INFO);
status = AE_AML_INTERNAL;
break;
@@ -458,7 +461,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
if (ACPI_GET_OBJECT_TYPE(source_desc) ==
ACPI_TYPE_LOCAL_REFERENCE
- && source_desc->reference.opcode == AML_LOAD_OP) {
+ && source_desc->reference.class == ACPI_REFCLASS_TABLE) {
/* This is a DDBHandle, just add a reference to it */
@@ -553,7 +556,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
/* Store the source value into the target buffer byte */
- obj_desc->buffer.pointer[index_desc->reference.offset] = value;
+ obj_desc->buffer.pointer[index_desc->reference.value] = value;
break;
default:
diff --git a/drivers/acpi/executer/exstoren.c b/drivers/acpi/executer/exstoren.c
index a6d2168b81f..eef61a00803 100644
--- a/drivers/acpi/executer/exstoren.c
+++ b/drivers/acpi/executer/exstoren.c
@@ -121,7 +121,8 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
(ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_STRING) &&
!((ACPI_GET_OBJECT_TYPE(source_desc) ==
ACPI_TYPE_LOCAL_REFERENCE)
- && (source_desc->reference.opcode == AML_LOAD_OP))) {
+ && (source_desc->reference.class ==
+ ACPI_REFCLASS_TABLE))) {
/* Conversion successful but still not a valid type */
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 2655bc1b4ee..60d54d1f6b1 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -265,7 +265,7 @@ static int acpi_fan_add(struct acpi_device *device)
dev_info(&device->dev, "registered as cooling_device%d\n", cdev->id);
- acpi_driver_data(device) = cdev;
+ device->driver_data = cdev;
result = sysfs_create_link(&device->dev.kobj,
&cdev->device.kobj,
"thermal_cooling");
@@ -327,8 +327,8 @@ static int acpi_fan_resume(struct acpi_device *device)
result = acpi_bus_get_power(device->handle, &power_state);
if (result) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Error reading fan power state\n"));
+ printk(KERN_ERR PREFIX
+ "Error reading fan power state\n");
return result;
}
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
index dba3cfbe8cb..25dccdf179b 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -78,19 +78,17 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
return_ACPI_STATUS(status);
}
- /* Set the vector */
+ /*
+ * According to the ACPI specification 2.0c and later, the 64-bit
+ * waking vector should be cleared and the 32-bit waking vector should
+ * be used, unless we want the wake-up code to be called by the BIOS in
+ * Protected Mode. Some systems (for example HP dv5-1004nr) are known
+ * to fail to resume if the 64-bit vector is used.
+ */
+ if (facs->version >= 1)
+ facs->xfirmware_waking_vector = 0;
- if ((facs->length < 32) || (!(facs->xfirmware_waking_vector))) {
- /*
- * ACPI 1.0 FACS or short table or optional X_ field is zero
- */
- facs->firmware_waking_vector = (u32) physical_address;
- } else {
- /*
- * ACPI 2.0 FACS with valid X_ field
- */
- facs->xfirmware_waking_vector = physical_address;
- }
+ facs->firmware_waking_vector = (u32)physical_address;
return_ACPI_STATUS(AE_OK);
}
@@ -134,20 +132,7 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
}
/* Get the vector */
-
- if ((facs->length < 32) || (!(facs->xfirmware_waking_vector))) {
- /*
- * ACPI 1.0 FACS or short table or optional X_ field is zero
- */
- *physical_address =
- (acpi_physical_address) facs->firmware_waking_vector;
- } else {
- /*
- * ACPI 2.0 FACS with valid X_ field
- */
- *physical_address =
- (acpi_physical_address) facs->xfirmware_waking_vector;
- }
+ *physical_address = (acpi_physical_address)facs->firmware_waking_vector;
return_ACPI_STATUS(AE_OK);
}
@@ -627,6 +612,13 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
}
/* TBD: _WAK "sometimes" returns stuff - do we want to look at it? */
+ /*
+ * Some BIOSes assume that WAK_STS will be cleared on resume and use
+ * it to determine whether the system is rebooting or resuming. Clear
+ * it for compatibility.
+ */
+ acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
+
acpi_gbl_system_awake_and_running = TRUE;
/* Enable power button */
diff --git a/drivers/acpi/namespace/Makefile b/drivers/acpi/namespace/Makefile
index 3f63d364069..371a2daf837 100644
--- a/drivers/acpi/namespace/Makefile
+++ b/drivers/acpi/namespace/Makefile
@@ -5,7 +5,7 @@
obj-y := nsaccess.o nsload.o nssearch.o nsxfeval.o \
nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \
nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \
- nsparse.o
+ nsparse.o nspredef.o
obj-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/namespace/nsdump.c
index 0ab22004728..cc0ae39440e 100644
--- a/drivers/acpi/namespace/nsdump.c
+++ b/drivers/acpi/namespace/nsdump.c
@@ -43,7 +43,6 @@
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
-#include <acpi/acparser.h>
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsdump")
@@ -334,9 +333,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
case ACPI_TYPE_LOCAL_REFERENCE:
acpi_os_printf("[%s]\n",
- acpi_ps_get_opcode_name(obj_desc->
- reference.
- opcode));
+ acpi_ut_get_reference_name(obj_desc));
break;
case ACPI_TYPE_BUFFER_FIELD:
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c
index d369164e00b..4cdf03ac2b4 100644
--- a/drivers/acpi/namespace/nseval.c
+++ b/drivers/acpi/namespace/nseval.c
@@ -78,6 +78,7 @@ ACPI_MODULE_NAME("nseval")
acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
{
acpi_status status;
+ struct acpi_namespace_node *node;
ACPI_FUNCTION_TRACE(ns_evaluate);
@@ -117,6 +118,8 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
info->resolved_node,
acpi_ns_get_attached_object(info->resolved_node)));
+ node = info->resolved_node;
+
/*
* Two major cases here:
*
@@ -148,21 +151,22 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
info->param_count++;
}
- /* Error if too few arguments were passed in */
+ /*
+ * Warning if too few or too many arguments have been passed by the
+ * caller. We don't want to abort here with an error because an
+ * incorrect number of arguments may not cause the method to fail.
+ * However, the method will fail if there are too few arguments passed
+ * and the method attempts to use one of the missing ones.
+ */
if (info->param_count < info->obj_desc->method.param_count) {
- ACPI_ERROR((AE_INFO,
+ ACPI_WARNING((AE_INFO,
"Insufficient arguments - "
"method [%4.4s] needs %d, found %d",
acpi_ut_get_node_name(info->resolved_node),
info->obj_desc->method.param_count,
info->param_count));
- return_ACPI_STATUS(AE_MISSING_ARGUMENTS);
- }
-
- /* Just a warning if too many arguments */
-
- else if (info->param_count >
+ } else if (info->param_count >
info->obj_desc->method.param_count) {
ACPI_WARNING((AE_INFO,
"Excess arguments - "
@@ -195,7 +199,28 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
} else {
/*
* 2) Object is not a method, return its current value
+ *
+ * Disallow certain object types. For these, "evaluation" is undefined.
*/
+ switch (info->resolved_node->type) {
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_EVENT:
+ case ACPI_TYPE_MUTEX:
+ case ACPI_TYPE_REGION:
+ case ACPI_TYPE_THERMAL:
+ case ACPI_TYPE_LOCAL_SCOPE:
+
+ ACPI_ERROR((AE_INFO,
+ "[%4.4s] Evaluation of object type [%s] is not supported",
+ info->resolved_node->name.ascii,
+ acpi_ut_get_type_name(info->resolved_node->
+ type)));
+
+ return_ACPI_STATUS(AE_TYPE);
+
+ default:
+ break;
+ }
/*
* Objects require additional resolution steps (e.g., the Node may be
@@ -239,9 +264,35 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
}
}
- /*
- * Check if there is a return value that must be dealt with
- */
+ /* Validation of return values for ACPI-predefined methods and objects */
+
+ if ((status == AE_OK) || (status == AE_CTRL_RETURN_VALUE)) {
+ /*
+ * If this is the first evaluation, check the return value. This
+ * ensures that any warnings will only be emitted during the very
+ * first evaluation of the object.
+ */
+ if (!(node->flags & ANOBJ_EVALUATED)) {
+ /*
+ * Check for a predefined ACPI name. If found, validate the
+ * returned object.
+ *
+ * Note: Ignore return status for now, emit warnings if there are
+ * problems with the returned object. May change later to abort
+ * the method on invalid return object.
+ */
+ (void)acpi_ns_check_predefined_names(node,
+ info->
+ return_object);
+ }
+
+ /* Mark the node as having been evaluated */
+
+ node->flags |= ANOBJ_EVALUATED;
+ }
+
+ /* Check if there is a return value that must be dealt with */
+
if (status == AE_CTRL_RETURN_VALUE) {
/* If caller does not want the return value, delete it */
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c
index bd577387800..42a39a7c96e 100644
--- a/drivers/acpi/namespace/nsnames.c
+++ b/drivers/acpi/namespace/nsnames.c
@@ -115,7 +115,6 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
return (AE_OK);
}
-#ifdef ACPI_DEBUG_OUTPUT
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_external_pathname
@@ -142,7 +141,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
size = acpi_ns_get_pathname_length(node);
if (!size) {
- return (NULL);
+ return_PTR(NULL);
}
/* Allocate a buffer to be returned to caller */
@@ -157,12 +156,12 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
status = acpi_ns_build_external_path(node, size, name_buffer);
if (ACPI_FAILURE(status)) {
- return (NULL);
+ ACPI_FREE(name_buffer);
+ return_PTR(NULL);
}
return_PTR(name_buffer);
}
-#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/namespace/nspredef.c b/drivers/acpi/namespace/nspredef.c
new file mode 100644
index 00000000000..0f17cf0898c
--- /dev/null
+++ b/drivers/acpi/namespace/nspredef.c
@@ -0,0 +1,900 @@
+/******************************************************************************
+ *
+ * Module Name: nspredef - Validation of ACPI predefined methods and objects
+ * $Revision: 1.1 $
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acpredef.h>
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("nspredef")
+
+/*******************************************************************************
+ *
+ * This module validates predefined ACPI objects that appear in the namespace,
+ * at the time they are evaluated (via acpi_evaluate_object). The purpose of this
+ * validation is to detect problems with BIOS-exposed predefined ACPI objects
+ * before the results are returned to the ACPI-related drivers.
+ *
+ * There are several areas that are validated:
+ *
+ * 1) The number of input arguments as defined by the method/object in the
+ * ASL is validated against the ACPI specification.
+ * 2) The type of the return object (if any) is validated against the ACPI
+ * specification.
+ * 3) For returned package objects, the count of package elements is
+ * validated, as well as the type of each package element. Nested
+ * packages are supported.
+ *
+ * For any problems found, a warning message is issued.
+ *
+ ******************************************************************************/
+/* Local prototypes */
+static acpi_status
+acpi_ns_check_package(char *pathname,
+ union acpi_operand_object *return_object,
+ const union acpi_predefined_info *predefined);
+
+static acpi_status
+acpi_ns_check_package_elements(char *pathname,
+ union acpi_operand_object **elements,
+ u8 type1, u32 count1, u8 type2, u32 count2);
+
+static acpi_status
+acpi_ns_check_object_type(char *pathname,
+ union acpi_operand_object *return_object,
+ u32 expected_btypes, u32 package_index);
+
+static acpi_status
+acpi_ns_check_reference(char *pathname,
+ union acpi_operand_object *return_object);
+
+/*
+ * Names for the types that can be returned by the predefined objects.
+ * Used for warning messages. Must be in the same order as the ACPI_RTYPEs
+ */
+static const char *acpi_rtype_names[] = {
+ "/Integer",
+ "/String",
+ "/Buffer",
+ "/Package",
+ "/Reference",
+};
+
+#define ACPI_NOT_PACKAGE ACPI_UINT32_MAX
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_predefined_names
+ *
+ * PARAMETERS: Node - Namespace node for the method/object
+ * return_object - Object returned from the evaluation of this
+ * method/object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check an ACPI name for a match in the predefined name list.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
+ union acpi_operand_object *return_object)
+{
+ acpi_status status = AE_OK;
+ const union acpi_predefined_info *predefined;
+ char *pathname;
+
+ /* Match the name for this method/object against the predefined list */
+
+ predefined = acpi_ns_check_for_predefined_name(node);
+ if (!predefined) {
+
+ /* Name was not one of the predefined names */
+
+ return (AE_OK);
+ }
+
+ /* Get the full pathname to the object, for use in error messages */
+
+ pathname = acpi_ns_get_external_pathname(node);
+ if (!pathname) {
+ pathname = ACPI_CAST_PTR(char, predefined->info.name);
+ }
+
+ /*
+ * Check that the parameter count for this method is in accordance
+ * with the ACPI specification.
+ */
+ acpi_ns_check_parameter_count(pathname, node, predefined);
+
+ /*
+ * If there is no return value, check if we require a return value for
+ * this predefined name. Either one return value is expected, or none,
+ * for both methods and other objects.
+ *
+ * Exit now if there is no return object. Warning if one was expected.
+ */
+ if (!return_object) {
+ if ((predefined->info.expected_btypes) &&
+ (!(predefined->info.expected_btypes & ACPI_RTYPE_NONE))) {
+ ACPI_ERROR((AE_INFO,
+ "%s: Missing expected return value",
+ pathname));
+
+ status = AE_AML_NO_RETURN_VALUE;
+ }
+ goto exit;
+ }
+
+ /*
+ * We have a return value, but if one wasn't expected, just exit, this is
+ * not a problem
+ *
+ * For example, if "Implicit return value" is enabled, methods will
+ * always return a value
+ */
+ if (!predefined->info.expected_btypes) {
+ goto exit;
+ }
+
+ /*
+ * Check that the type of the return object is what is expected for
+ * this predefined name
+ */
+ status = acpi_ns_check_object_type(pathname, return_object,
+ predefined->info.expected_btypes,
+ ACPI_NOT_PACKAGE);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* For returned Package objects, check the type of all sub-objects */
+
+ if (ACPI_GET_OBJECT_TYPE(return_object) == ACPI_TYPE_PACKAGE) {
+ status =
+ acpi_ns_check_package(pathname, return_object, predefined);
+ }
+
+ exit:
+ if (pathname) {
+ ACPI_FREE(pathname);
+ }
+
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_parameter_count
+ *
+ * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
+ * Node - Namespace node for the method/object
+ * Predefined - Pointer to entry in predefined name table
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Check that the declared (in ASL/AML) parameter count for a
+ * predefined name is what is expected (i.e., what is defined in
+ * the ACPI specification for this predefined name.)
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_check_parameter_count(char *pathname,
+ struct acpi_namespace_node *node,
+ const union acpi_predefined_info *predefined)
+{
+ u32 param_count;
+ u32 required_params_current;
+ u32 required_params_old;
+
+ /*
+ * Check that the ASL-defined parameter count is what is expected for
+ * this predefined name.
+ *
+ * Methods have 0-7 parameters. All other types have zero.
+ */
+ param_count = 0;
+ if (node->type == ACPI_TYPE_METHOD) {
+ param_count = node->object->method.param_count;
+ }
+
+ /* Validate parameter count - allow two different legal counts (_SCP) */
+
+ required_params_current = predefined->info.param_count & 0x0F;
+ required_params_old = predefined->info.param_count >> 4;
+
+ if ((param_count != required_params_current) &&
+ (param_count != required_params_old)) {
+ ACPI_WARNING((AE_INFO,
+ "%s: Parameter count mismatch - ASL declared %d, expected %d",
+ pathname, param_count, required_params_current));
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_for_predefined_name
+ *
+ * PARAMETERS: Node - Namespace node for the method/object
+ *
+ * RETURN: Pointer to entry in predefined table. NULL indicates not found.
+ *
+ * DESCRIPTION: Check an object name against the predefined object list.
+ *
+ ******************************************************************************/
+
+const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
+ acpi_namespace_node
+ *node)
+{
+ const union acpi_predefined_info *this_name;
+
+ /* Quick check for a predefined name, first character must be underscore */
+
+ if (node->name.ascii[0] != '_') {
+ return (NULL);
+ }
+
+ /* Search info table for a predefined method/object name */
+
+ this_name = predefined_names;
+ while (this_name->info.name[0]) {
+ if (ACPI_COMPARE_NAME(node->name.ascii, this_name->info.name)) {
+
+ /* Return pointer to this table entry */
+
+ return (this_name);
+ }
+
+ /*
+ * Skip next entry in the table if this name returns a Package
+ * (next entry contains the package info)
+ */
+ if (this_name->info.expected_btypes & ACPI_RTYPE_PACKAGE) {
+ this_name++;
+ }
+
+ this_name++;
+ }
+
+ return (NULL);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_package
+ *
+ * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
+ * return_object - Object returned from the evaluation of a
+ * method or object
+ * Predefined - Pointer to entry in predefined name table
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check a returned package object for the correct count and
+ * correct type of all sub-objects.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_check_package(char *pathname,
+ union acpi_operand_object *return_object,
+ const union acpi_predefined_info *predefined)
+{
+ const union acpi_predefined_info *package;
+ union acpi_operand_object *sub_package;
+ union acpi_operand_object **elements;
+ union acpi_operand_object **sub_elements;
+ acpi_status status;
+ u32 expected_count;
+ u32 count;
+ u32 i;
+ u32 j;
+
+ ACPI_FUNCTION_NAME(ns_check_package);
+
+ /* The package info for this name is in the next table entry */
+
+ package = predefined + 1;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "%s Validating return Package of Type %X, Count %X\n",
+ pathname, package->ret_info.type,
+ return_object->package.count));
+
+ /* Extract package count and elements array */
+
+ elements = return_object->package.elements;
+ count = return_object->package.count;
+
+ /* The package must have at least one element, else invalid */
+
+ if (!count) {
+ ACPI_WARNING((AE_INFO,
+ "%s: Return Package has no elements (empty)",
+ pathname));
+
+ return (AE_AML_OPERAND_VALUE);
+ }
+
+ /*
+ * Decode the type of the expected package contents
+ *
+ * PTYPE1 packages contain no subpackages
+ * PTYPE2 packages contain sub-packages
+ */
+ switch (package->ret_info.type) {
+ case ACPI_PTYPE1_FIXED:
+
+ /*
+ * The package count is fixed and there are no sub-packages
+ *
+ * If package is too small, exit.
+ * If package is larger than expected, issue warning but continue
+ */
+ expected_count =
+ package->ret_info.count1 + package->ret_info.count2;
+ if (count < expected_count) {
+ goto package_too_small;
+ } else if (count > expected_count) {
+ ACPI_WARNING((AE_INFO,
+ "%s: Return Package is larger than needed - "
+ "found %u, expected %u", pathname, count,
+ expected_count));
+ }
+
+ /* Validate all elements of the returned package */
+
+ status = acpi_ns_check_package_elements(pathname, elements,
+ package->ret_info.
+ object_type1,
+ package->ret_info.
+ count1,
+ package->ret_info.
+ object_type2,
+ package->ret_info.
+ count2);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ case ACPI_PTYPE1_VAR:
+
+ /*
+ * The package count is variable, there are no sub-packages, and all
+ * elements must be of the same type
+ */
+ for (i = 0; i < count; i++) {
+ status = acpi_ns_check_object_type(pathname, *elements,
+ package->ret_info.
+ object_type1, i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ elements++;
+ }
+ break;
+
+ case ACPI_PTYPE1_OPTION:
+
+ /*
+ * The package count is variable, there are no sub-packages. There are
+ * a fixed number of required elements, and a variable number of
+ * optional elements.
+ *
+ * Check if package is at least as large as the minimum required
+ */
+ expected_count = package->ret_info3.count;
+ if (count < expected_count) {
+ goto package_too_small;
+ }
+
+ /* Variable number of sub-objects */
+
+ for (i = 0; i < count; i++) {
+ if (i < package->ret_info3.count) {
+
+ /* These are the required package elements (0, 1, or 2) */
+
+ status =
+ acpi_ns_check_object_type(pathname,
+ *elements,
+ package->
+ ret_info3.
+ object_type[i],
+ i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ } else {
+ /* These are the optional package elements */
+
+ status =
+ acpi_ns_check_object_type(pathname,
+ *elements,
+ package->
+ ret_info3.
+ tail_object_type,
+ i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+ elements++;
+ }
+ break;
+
+ case ACPI_PTYPE2_PKG_COUNT:
+
+ /* First element is the (Integer) count of sub-packages to follow */
+
+ status = acpi_ns_check_object_type(pathname, *elements,
+ ACPI_RTYPE_INTEGER, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /*
+ * Count cannot be larger than the parent package length, but allow it
+ * to be smaller. The >= accounts for the Integer above.
+ */
+ expected_count = (u32) (*elements)->integer.value;
+ if (expected_count >= count) {
+ goto package_too_small;
+ }
+
+ count = expected_count;
+ elements++;
+
+ /* Now we can walk the sub-packages */
+
+ /*lint -fallthrough */
+
+ case ACPI_PTYPE2:
+ case ACPI_PTYPE2_FIXED:
+ case ACPI_PTYPE2_MIN:
+ case ACPI_PTYPE2_COUNT:
+
+ /*
+ * These types all return a single package that consists of a variable
+ * number of sub-packages
+ */
+ for (i = 0; i < count; i++) {
+ sub_package = *elements;
+ sub_elements = sub_package->package.elements;
+
+ /* Each sub-object must be of type Package */
+
+ status =
+ acpi_ns_check_object_type(pathname, sub_package,
+ ACPI_RTYPE_PACKAGE, i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Examine the different types of sub-packages */
+
+ switch (package->ret_info.type) {
+ case ACPI_PTYPE2:
+ case ACPI_PTYPE2_PKG_COUNT:
+
+ /* Each subpackage has a fixed number of elements */
+
+ expected_count =
+ package->ret_info.count1 +
+ package->ret_info.count2;
+ if (sub_package->package.count !=
+ expected_count) {
+ count = sub_package->package.count;
+ goto package_too_small;
+ }
+
+ status =
+ acpi_ns_check_package_elements(pathname,
+ sub_elements,
+ package->
+ ret_info.
+ object_type1,
+ package->
+ ret_info.
+ count1,
+ package->
+ ret_info.
+ object_type2,
+ package->
+ ret_info.
+ count2);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ case ACPI_PTYPE2_FIXED:
+
+ /* Each sub-package has a fixed length */
+
+ expected_count = package->ret_info2.count;
+ if (sub_package->package.count < expected_count) {
+ count = sub_package->package.count;
+ goto package_too_small;
+ }
+
+ /* Check the type of each sub-package element */
+
+ for (j = 0; j < expected_count; j++) {
+ status =
+ acpi_ns_check_object_type(pathname,
+ sub_elements
+ [j],
+ package->
+ ret_info2.
+ object_type
+ [j], j);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+ break;
+
+ case ACPI_PTYPE2_MIN:
+
+ /* Each sub-package has a variable but minimum length */
+
+ expected_count = package->ret_info.count1;
+ if (sub_package->package.count < expected_count) {
+ count = sub_package->package.count;
+ goto package_too_small;
+ }
+
+ /* Check the type of each sub-package element */
+
+ status =
+ acpi_ns_check_package_elements(pathname,
+ sub_elements,
+ package->
+ ret_info.
+ object_type1,
+ sub_package->
+ package.
+ count, 0, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ case ACPI_PTYPE2_COUNT:
+
+ /* First element is the (Integer) count of elements to follow */
+
+ status =
+ acpi_ns_check_object_type(pathname,
+ *sub_elements,
+ ACPI_RTYPE_INTEGER,
+ 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Make sure package is large enough for the Count */
+
+ expected_count =
+ (u32) (*sub_elements)->integer.value;
+ if (sub_package->package.count < expected_count) {
+ count = sub_package->package.count;
+ goto package_too_small;
+ }
+
+ /* Check the type of each sub-package element */
+
+ status =
+ acpi_ns_check_package_elements(pathname,
+ (sub_elements
+ + 1),
+ package->
+ ret_info.
+ object_type1,
+ (expected_count
+ - 1), 0, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ elements++;
+ }
+ break;
+
+ default:
+
+ /* Should not get here if predefined info table is correct */
+
+ ACPI_WARNING((AE_INFO,
+ "%s: Invalid internal return type in table entry: %X",
+ pathname, package->ret_info.type));
+
+ return (AE_AML_INTERNAL);
+ }
+
+ return (AE_OK);
+
+ package_too_small:
+
+ /* Error exit for the case with an incorrect package count */
+
+ ACPI_WARNING((AE_INFO, "%s: Return Package is too small - "
+ "found %u, expected %u", pathname, count,
+ expected_count));
+
+ return (AE_AML_OPERAND_VALUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_package_elements
+ *
+ * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
+ * Elements - Pointer to the package elements array
+ * Type1 - Object type for first group
+ * Count1 - Count for first group
+ * Type2 - Object type for second group
+ * Count2 - Count for second group
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check that all elements of a package are of the correct object
+ * type. Supports up to two groups of different object types.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_check_package_elements(char *pathname,
+ union acpi_operand_object **elements,
+ u8 type1, u32 count1, u8 type2, u32 count2)
+{
+ union acpi_operand_object **this_element = elements;
+ acpi_status status;
+ u32 i;
+
+ /*
+ * Up to two groups of package elements are supported by the data
+ * structure. All elements in each group must be of the same type.
+ * The second group can have a count of zero.
+ */
+ for (i = 0; i < count1; i++) {
+ status = acpi_ns_check_object_type(pathname, *this_element,
+ type1, i);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ this_element++;
+ }
+
+ for (i = 0; i < count2; i++) {
+ status = acpi_ns_check_object_type(pathname, *this_element,
+ type2, (i + count1));
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ this_element++;
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_object_type
+ *
+ * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
+ * return_object - Object return from the execution of this
+ * method/object
+ * expected_btypes - Bitmap of expected return type(s)
+ * package_index - Index of object within parent package (if
+ * applicable - ACPI_NOT_PACKAGE otherwise)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check the type of the return object against the expected object
+ * type(s). Use of Btype allows multiple expected object types.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_check_object_type(char *pathname,
+ union acpi_operand_object *return_object,
+ u32 expected_btypes, u32 package_index)
+{
+ acpi_status status = AE_OK;
+ u32 return_btype;
+ char type_buffer[48]; /* Room for 5 types */
+ u32 this_rtype;
+ u32 i;
+ u32 j;
+
+ /*
+ * If we get a NULL return_object here, it is a NULL package element,
+ * and this is always an error.
+ */
+ if (!return_object) {
+ goto type_error_exit;
+ }
+
+ /* A Namespace node should not get here, but make sure */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(return_object) == ACPI_DESC_TYPE_NAMED) {
+ ACPI_WARNING((AE_INFO,
+ "%s: Invalid return type - Found a Namespace node [%4.4s] type %s",
+ pathname, return_object->node.name.ascii,
+ acpi_ut_get_type_name(return_object->node.type)));
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ /*
+ * Convert the object type (ACPI_TYPE_xxx) to a bitmapped object type.
+ * The bitmapped type allows multiple possible return types.
+ *
+ * Note, the cases below must handle all of the possible types returned
+ * from all of the predefined names (including elements of returned
+ * packages)
+ */
+ switch (ACPI_GET_OBJECT_TYPE(return_object)) {
+ case ACPI_TYPE_INTEGER:
+ return_btype = ACPI_RTYPE_INTEGER;
+ break;
+
+ case ACPI_TYPE_BUFFER:
+ return_btype = ACPI_RTYPE_BUFFER;
+ break;
+
+ case ACPI_TYPE_STRING:
+ return_btype = ACPI_RTYPE_STRING;
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+ return_btype = ACPI_RTYPE_PACKAGE;
+ break;
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ return_btype = ACPI_RTYPE_REFERENCE;
+ break;
+
+ default:
+ /* Not one of the supported objects, must be incorrect */
+
+ goto type_error_exit;
+ }
+
+ /* Is the object one of the expected types? */
+
+ if (!(return_btype & expected_btypes)) {
+ goto type_error_exit;
+ }
+
+ /* For reference objects, check that the reference type is correct */
+
+ if (ACPI_GET_OBJECT_TYPE(return_object) == ACPI_TYPE_LOCAL_REFERENCE) {
+ status = acpi_ns_check_reference(pathname, return_object);
+ }
+
+ return (status);
+
+ type_error_exit:
+
+ /* Create a string with all expected types for this predefined object */
+
+ j = 1;
+ type_buffer[0] = 0;
+ this_rtype = ACPI_RTYPE_INTEGER;
+
+ for (i = 0; i < ACPI_NUM_RTYPES; i++) {
+
+ /* If one of the expected types, concatenate the name of this type */
+
+ if (expected_btypes & this_rtype) {
+ ACPI_STRCAT(type_buffer, &acpi_rtype_names[i][j]);
+ j = 0; /* Use name separator from now on */
+ }
+ this_rtype <<= 1; /* Next Rtype */
+ }
+
+ if (package_index == ACPI_NOT_PACKAGE) {
+ ACPI_WARNING((AE_INFO,
+ "%s: Return type mismatch - found %s, expected %s",
+ pathname,
+ acpi_ut_get_object_type_name(return_object),
+ type_buffer));
+ } else {
+ ACPI_WARNING((AE_INFO,
+ "%s: Return Package type mismatch at index %u - "
+ "found %s, expected %s", pathname, package_index,
+ acpi_ut_get_object_type_name(return_object),
+ type_buffer));
+ }
+
+ return (AE_AML_OPERAND_TYPE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_check_reference
+ *
+ * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
+ * return_object - Object returned from the evaluation of a
+ * method or object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check a returned reference object for the correct reference
+ * type. The only reference type that can be returned from a
+ * predefined method is a named reference. All others are invalid.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_check_reference(char *pathname,
+ union acpi_operand_object *return_object)
+{
+
+ /*
+ * Check the reference object for the correct reference type (opcode).
+ * The only type of reference that can be converted to an union acpi_object is
+ * a reference to a named object (reference class: NAME)
+ */
+ if (return_object->reference.class == ACPI_REFCLASS_NAME) {
+ return (AE_OK);
+ }
+
+ ACPI_WARNING((AE_INFO,
+ "%s: Return type mismatch - unexpected reference object type [%s] %2.2X",
+ pathname, acpi_ut_get_reference_name(return_object),
+ return_object->reference.class));
+
+ return (AE_AML_OPERAND_TYPE);
+}
diff --git a/drivers/acpi/namespace/nssearch.c b/drivers/acpi/namespace/nssearch.c
index 8399276cba1..a9a80bf811b 100644
--- a/drivers/acpi/namespace/nssearch.c
+++ b/drivers/acpi/namespace/nssearch.c
@@ -331,7 +331,7 @@ acpi_ns_search_and_enter(u32 target_name,
"Found bad character(s) in name, repaired: [%4.4s]\n",
ACPI_CAST_PTR(char, &target_name)));
} else {
- ACPI_DEBUG_PRINT((ACPI_DB_WARN,
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Found bad character(s) in name, repaired: [%4.4s]\n",
ACPI_CAST_PTR(char, &target_name)));
}
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
index 38be5865d95..a085cc39c05 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -48,6 +48,10 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsxfeval")
+
+/* Local prototypes */
+static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
+
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
@@ -69,6 +73,7 @@ ACPI_MODULE_NAME("nsxfeval")
* be valid (non-null)
*
******************************************************************************/
+
acpi_status
acpi_evaluate_object_typed(acpi_handle handle,
acpi_string pathname,
@@ -283,6 +288,10 @@ acpi_evaluate_object(acpi_handle handle,
if (ACPI_SUCCESS(status)) {
+ /* Dereference Index and ref_of references */
+
+ acpi_ns_resolve_references(info);
+
/* Get the size of the returned object */
status =
@@ -352,6 +361,74 @@ ACPI_EXPORT_SYMBOL(acpi_evaluate_object)
/*******************************************************************************
*
+ * FUNCTION: acpi_ns_resolve_references
+ *
+ * PARAMETERS: Info - Evaluation info block
+ *
+ * RETURN: Info->return_object is replaced with the dereferenced object
+ *
+ * DESCRIPTION: Dereference certain reference objects. Called before an
+ * internal return object is converted to an external union acpi_object.
+ *
+ * Performs an automatic dereference of Index and ref_of reference objects.
+ * These reference objects are not supported by the union acpi_object, so this is a
+ * last resort effort to return something useful. Also, provides compatibility
+ * with other ACPI implementations.
+ *
+ * NOTE: does not handle references within returned package objects or nested
+ * references, but this support could be added later if found to be necessary.
+ *
+ ******************************************************************************/
+static void acpi_ns_resolve_references(struct acpi_evaluate_info *info)
+{
+ union acpi_operand_object *obj_desc = NULL;
+ struct acpi_namespace_node *node;
+
+ /* We are interested in reference objects only */
+
+ if (ACPI_GET_OBJECT_TYPE(info->return_object) !=
+ ACPI_TYPE_LOCAL_REFERENCE) {
+ return;
+ }
+
+ /*
+ * Two types of references are supported - those created by Index and
+ * ref_of operators. A name reference (AML_NAMEPATH_OP) can be converted
+ * to an union acpi_object, so it is not dereferenced here. A ddb_handle
+ * (AML_LOAD_OP) cannot be dereferenced, nor can it be converted to
+ * an union acpi_object.
+ */
+ switch (info->return_object->reference.class) {
+ case ACPI_REFCLASS_INDEX:
+
+ obj_desc = *(info->return_object->reference.where);
+ break;
+
+ case ACPI_REFCLASS_REFOF:
+
+ node = info->return_object->reference.object;
+ if (node) {
+ obj_desc = node->object;
+ }
+ break;
+
+ default:
+ return;
+ }
+
+ /* Replace the existing reference object */
+
+ if (obj_desc) {
+ acpi_ut_add_reference(obj_desc);
+ acpi_ut_remove_reference(info->return_object);
+ info->return_object = obj_desc;
+ }
+
+ return;
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_walk_namespace
*
* PARAMETERS: Type - acpi_object_type to search for
@@ -379,6 +456,7 @@ ACPI_EXPORT_SYMBOL(acpi_evaluate_object)
* function, etc.
*
******************************************************************************/
+
acpi_status
acpi_walk_namespace(acpi_object_type type,
acpi_handle start_object,
diff --git a/drivers/acpi/namespace/nsxfname.c b/drivers/acpi/namespace/nsxfname.c
index a287ed550f5..5efa4e7ddb0 100644
--- a/drivers/acpi/namespace/nsxfname.c
+++ b/drivers/acpi/namespace/nsxfname.c
@@ -253,6 +253,7 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
node = acpi_ns_map_handle_to_node(handle);
if (!node) {
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ status = AE_BAD_PARAMETER;
goto cleanup;
}
@@ -264,6 +265,10 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
info->name = node->name.integer;
info->valid = 0;
+ if (node->type == ACPI_TYPE_METHOD) {
+ info->param_count = node->object->method.param_count;
+ }
+
status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto cleanup;
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index cb9864e39ba..25ceae9191e 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -258,7 +258,7 @@ int __init acpi_numa_init(void)
int acpi_get_pxm(acpi_handle h)
{
- unsigned long pxm;
+ unsigned long long pxm;
acpi_status status;
acpi_handle handle;
acpi_handle phandle = h;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 235a1386888..4be252145cb 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -608,7 +608,7 @@ static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */
acpi_handle handle;
struct acpi_pci_id *pci_id = *id;
acpi_status status;
- unsigned long temp;
+ unsigned long long temp;
acpi_object_type type;
acpi_get_parent(chandle, &handle);
@@ -620,8 +620,7 @@ static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */
if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
return;
- status =
- acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
+ status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
&temp);
if (ACPI_SUCCESS(status)) {
u32 val;
@@ -682,6 +681,22 @@ static void acpi_os_execute_deferred(struct work_struct *work)
return;
}
+static void acpi_os_execute_hp_deferred(struct work_struct *work)
+{
+ struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
+ if (!dpc) {
+ printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
+ return;
+ }
+
+ acpi_os_wait_events_complete(NULL);
+
+ dpc->function(dpc->context);
+ kfree(dpc);
+
+ return;
+}
+
/*******************************************************************************
*
* FUNCTION: acpi_os_execute
@@ -697,12 +712,13 @@ static void acpi_os_execute_deferred(struct work_struct *work)
*
******************************************************************************/
-acpi_status acpi_os_execute(acpi_execute_type type,
- acpi_osd_exec_callback function, void *context)
+static acpi_status __acpi_os_execute(acpi_execute_type type,
+ acpi_osd_exec_callback function, void *context, int hp)
{
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
struct workqueue_struct *queue;
+ int ret;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Scheduling function [%p(%p)] for deferred execution.\n",
function, context));
@@ -726,19 +742,38 @@ acpi_status acpi_os_execute(acpi_execute_type type,
dpc->function = function;
dpc->context = context;
- INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- queue = (type == OSL_NOTIFY_HANDLER) ? kacpi_notify_wq : kacpid_wq;
- if (!queue_work(queue, &dpc->work)) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Call to queue_work() failed.\n"));
+ if (!hp) {
+ INIT_WORK(&dpc->work, acpi_os_execute_deferred);
+ queue = (type == OSL_NOTIFY_HANDLER) ?
+ kacpi_notify_wq : kacpid_wq;
+ ret = queue_work(queue, &dpc->work);
+ } else {
+ INIT_WORK(&dpc->work, acpi_os_execute_hp_deferred);
+ ret = schedule_work(&dpc->work);
+ }
+
+ if (!ret) {
+ printk(KERN_ERR PREFIX
+ "Call to queue_work() failed.\n");
status = AE_ERROR;
kfree(dpc);
}
return_ACPI_STATUS(status);
}
+acpi_status acpi_os_execute(acpi_execute_type type,
+ acpi_osd_exec_callback function, void *context)
+{
+ return __acpi_os_execute(type, function, context, 0);
+}
EXPORT_SYMBOL(acpi_os_execute);
+acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
+ void *context)
+{
+ return __acpi_os_execute(0, function, context, 1);
+}
+
void acpi_os_wait_events_complete(void *context)
{
flush_workqueue(kacpid_wq);
diff --git a/drivers/acpi/parser/psloop.c b/drivers/acpi/parser/psloop.c
index c06238e55d9..4647039a0d8 100644
--- a/drivers/acpi/parser/psloop.c
+++ b/drivers/acpi/parser/psloop.c
@@ -719,6 +719,8 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
*op = NULL;
}
+ ACPI_PREEMPTION_POINT();
+
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/parser/psparse.c b/drivers/acpi/parser/psparse.c
index 15e1702e48d..68e932f215e 100644
--- a/drivers/acpi/parser/psparse.c
+++ b/drivers/acpi/parser/psparse.c
@@ -137,6 +137,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
union acpi_parse_object *next;
const struct acpi_opcode_info *parent_info;
union acpi_parse_object *replacement_op = NULL;
+ acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_PTR(ps_complete_this_op, op);
@@ -186,7 +187,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
replacement_op =
acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
if (!replacement_op) {
- goto allocate_error;
+ status = AE_NO_MEMORY;
}
break;
@@ -211,7 +212,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
replacement_op =
acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
if (!replacement_op) {
- goto allocate_error;
+ status = AE_NO_MEMORY;
}
} else
if ((op->common.parent->common.aml_opcode ==
@@ -226,13 +227,13 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
acpi_ps_alloc_op(op->common.
aml_opcode);
if (!replacement_op) {
- goto allocate_error;
+ status = AE_NO_MEMORY;
+ } else {
+ replacement_op->named.data =
+ op->named.data;
+ replacement_op->named.length =
+ op->named.length;
}
-
- replacement_op->named.data =
- op->named.data;
- replacement_op->named.length =
- op->named.length;
}
}
break;
@@ -242,7 +243,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
replacement_op =
acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
if (!replacement_op) {
- goto allocate_error;
+ status = AE_NO_MEMORY;
}
}
@@ -302,14 +303,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
/* Now we can actually delete the subtree rooted at Op */
acpi_ps_delete_parse_tree(op);
- return_ACPI_STATUS(AE_OK);
-
- allocate_error:
-
- /* Always delete the subtree, even on error */
-
- acpi_ps_delete_parse_tree(op);
- return_ACPI_STATUS(AE_NO_MEMORY);
+ return_ACPI_STATUS(status);
}
/*******************************************************************************
@@ -641,10 +635,12 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
ACPI_WALK_METHOD_RESTART;
}
} else {
- /* On error, delete any return object */
+ /* On error, delete any return object or implicit return */
acpi_ut_remove_reference(previous_walk_state->
return_desc);
+ acpi_ds_clear_implicit_return
+ (previous_walk_state);
}
}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index cf47805a744..fcfdef7b4fd 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -709,7 +709,7 @@ int acpi_pci_link_free_irq(acpi_handle handle)
acpi_device_bid(link->device)));
if (link->refcnt == 0) {
- acpi_ut_evaluate_object(link->device->handle, "_DIS", 0, NULL);
+ acpi_evaluate_object(link->device->handle, "_DIS", NULL, NULL);
}
mutex_unlock(&acpi_link_lock);
return (link->irq.active);
@@ -737,7 +737,7 @@ static int acpi_pci_link_add(struct acpi_device *device)
link->device = device;
strcpy(acpi_device_name(device), ACPI_PCI_LINK_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS);
- acpi_driver_data(device) = link;
+ device->driver_data = link;
mutex_lock(&acpi_link_lock);
result = acpi_pci_link_get_possible(link);
@@ -773,7 +773,7 @@ static int acpi_pci_link_add(struct acpi_device *device)
end:
/* disable all links -- to be activated on use */
- acpi_ut_evaluate_object(device->handle, "_DIS", 0, NULL);
+ acpi_evaluate_object(device->handle, "_DIS", NULL, NULL);
mutex_unlock(&acpi_link_lock);
if (result)
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index c3fed31166b..1b8f67d21d5 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -190,7 +190,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
struct acpi_pci_root *root = NULL;
struct acpi_pci_root *tmp;
acpi_status status = AE_OK;
- unsigned long value = 0;
+ unsigned long long value = 0;
acpi_handle handle = NULL;
struct acpi_device *child;
@@ -206,7 +206,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
root->device = device;
strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
- acpi_driver_data(device) = root;
+ device->driver_data = root;
device->ops.bind = acpi_pci_bind;
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index d5b4ef89887..cd1f4467be7 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -76,10 +76,10 @@ static struct acpi_pci_driver acpi_pci_slot_driver = {
};
static int
-check_slot(acpi_handle handle, unsigned long *sun)
+check_slot(acpi_handle handle, unsigned long long *sun)
{
int device = -1;
- unsigned long adr, sta;
+ unsigned long long adr, sta;
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -132,7 +132,7 @@ static acpi_status
register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
{
int device;
- unsigned long sun;
+ unsigned long long sun;
char name[SLOT_NAME_SIZE];
struct acpi_pci_slot *slot;
struct pci_slot *pci_slot;
@@ -150,7 +150,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
}
snprintf(name, sizeof(name), "%u", (u32)sun);
- pci_slot = pci_create_slot(pci_bus, device, name);
+ pci_slot = pci_create_slot(pci_bus, device, name, NULL);
if (IS_ERR(pci_slot)) {
err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot));
kfree(slot);
@@ -182,7 +182,7 @@ static acpi_status
walk_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
{
int device, function;
- unsigned long adr;
+ unsigned long long adr;
acpi_status status;
acpi_handle dummy_handle;
acpi_walk_callback user_function;
@@ -239,7 +239,7 @@ static int
walk_root_bridge(acpi_handle handle, acpi_walk_callback user_function)
{
int seg, bus;
- unsigned long tmp;
+ unsigned long long tmp;
acpi_status status;
acpi_handle dummy_handle;
struct pci_bus *pci_bus;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 4ab21cb1c8c..a1718e56103 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -54,6 +54,14 @@ ACPI_MODULE_NAME("power");
#define ACPI_POWER_RESOURCE_STATE_OFF 0x00
#define ACPI_POWER_RESOURCE_STATE_ON 0x01
#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
+
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "acpi."
+int acpi_power_nocheck;
+module_param_named(power_nocheck, acpi_power_nocheck, bool, 000);
+
static int acpi_power_add(struct acpi_device *device);
static int acpi_power_remove(struct acpi_device *device, int type);
static int acpi_power_resume(struct acpi_device *device);
@@ -128,16 +136,16 @@ acpi_power_get_context(acpi_handle handle,
return 0;
}
-static int acpi_power_get_state(struct acpi_power_resource *resource, int *state)
+static int acpi_power_get_state(acpi_handle handle, int *state)
{
acpi_status status = AE_OK;
- unsigned long sta = 0;
+ unsigned long long sta = 0;
- if (!resource || !state)
+ if (!handle || !state)
return -EINVAL;
- status = acpi_evaluate_integer(resource->device->handle, "_STA", NULL, &sta);
+ status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status))
return -ENODEV;
@@ -145,7 +153,7 @@ static int acpi_power_get_state(struct acpi_power_resource *resource, int *state
ACPI_POWER_RESOURCE_STATE_OFF;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] is %s\n",
- resource->name, state ? "on" : "off"));
+ acpi_ut_get_node_name(handle), state ? "on" : "off"));
return 0;
}
@@ -153,7 +161,6 @@ static int acpi_power_get_state(struct acpi_power_resource *resource, int *state
static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
{
int result = 0, state1;
- struct acpi_power_resource *resource = NULL;
u32 i = 0;
@@ -161,12 +168,15 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
+ /* */
for (i = 0; i < list->count; i++) {
- result = acpi_power_get_context(list->handles[i], &resource);
- if (result)
- return result;
- result = acpi_power_get_state(resource, &state1);
+ /*
+ * The state of the power resource can be obtained by
+ * using the ACPI handle. In such case it is unnecessary to
+ * get the Power resource first and then get its state again.
+ */
+ result = acpi_power_get_state(list->handles[i], &state1);
if (result)
return result;
@@ -226,12 +236,18 @@ static int acpi_power_on(acpi_handle handle, struct acpi_device *dev)
if (ACPI_FAILURE(status))
return -ENODEV;
- result = acpi_power_get_state(resource, &state);
- if (result)
- return result;
- if (state != ACPI_POWER_RESOURCE_STATE_ON)
- return -ENOEXEC;
-
+ if (!acpi_power_nocheck) {
+ /*
+ * If acpi_power_nocheck is set, it is unnecessary to check
+ * the power state after power transition.
+ */
+ result = acpi_power_get_state(resource->device->handle,
+ &state);
+ if (result)
+ return result;
+ if (state != ACPI_POWER_RESOURCE_STATE_ON)
+ return -ENOEXEC;
+ }
/* Update the power resource's _device_ power state */
resource->device->power.state = ACPI_STATE_D0;
@@ -277,11 +293,17 @@ static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev)
if (ACPI_FAILURE(status))
return -ENODEV;
- result = acpi_power_get_state(resource, &state);
- if (result)
- return result;
- if (state != ACPI_POWER_RESOURCE_STATE_OFF)
- return -ENOEXEC;
+ if (!acpi_power_nocheck) {
+ /*
+ * If acpi_power_nocheck is set, it is unnecessary to check
+ * the power state after power transition.
+ */
+ result = acpi_power_get_state(handle, &state);
+ if (result)
+ return result;
+ if (state != ACPI_POWER_RESOURCE_STATE_OFF)
+ return -ENOEXEC;
+ }
/* Update the power resource's _device_ power state */
resource->device->power.state = ACPI_STATE_D3;
@@ -555,7 +577,7 @@ static int acpi_power_seq_show(struct seq_file *seq, void *offset)
if (!resource)
goto end;
- result = acpi_power_get_state(resource, &state);
+ result = acpi_power_get_state(resource->device->handle, &state);
if (result)
goto end;
@@ -657,7 +679,7 @@ static int acpi_power_add(struct acpi_device *device)
strcpy(resource->name, device->pnp.bus_id);
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
- acpi_driver_data(device) = resource;
+ device->driver_data = resource;
/* Evalute the object to get the system level and resource order. */
status = acpi_evaluate_object(device->handle, NULL, NULL, &buffer);
@@ -668,7 +690,7 @@ static int acpi_power_add(struct acpi_device *device)
resource->system_level = acpi_object.power_resource.system_level;
resource->order = acpi_object.power_resource.resource_order;
- result = acpi_power_get_state(resource, &state);
+ result = acpi_power_get_state(device->handle, &state);
if (result)
goto end;
@@ -733,9 +755,9 @@ static int acpi_power_resume(struct acpi_device *device)
if (!device || !acpi_driver_data(device))
return -EINVAL;
- resource = (struct acpi_power_resource *)acpi_driver_data(device);
+ resource = acpi_driver_data(device);
- result = acpi_power_get_state(resource, &state);
+ result = acpi_power_get_state(device->handle, &state);
if (result)
return result;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index ee68ac54c0d..24a362f8034 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -563,7 +563,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
/* Check if it is a Device with HID and UID */
if (has_uid) {
- unsigned long value;
+ unsigned long long value;
status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
NULL, &value);
if (ACPI_FAILURE(status)) {
@@ -818,7 +818,7 @@ static int acpi_processor_add(struct acpi_device *device)
pr->handle = device->handle;
strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
- acpi_driver_data(device) = pr;
+ device->driver_data = pr;
return 0;
}
@@ -875,7 +875,7 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
static int is_processor_present(acpi_handle handle)
{
acpi_status status;
- unsigned long sta = 0;
+ unsigned long long sta = 0;
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index cf5b1b7b684..81b40ed5379 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -1587,6 +1587,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
if (acpi_idle_bm_check()) {
if (dev->safe_state) {
+ dev->last_state = dev->safe_state;
return dev->safe_state->enter(dev, dev->safe_state);
} else {
local_irq_disable();
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 80c251ec6d2..dc98f7a6f2c 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -38,6 +38,7 @@
#include <asm/uaccess.h>
#endif
+#include <asm/cpufeature.h>
#include <acpi/acpi_bus.h>
#include <acpi/processor.h>
@@ -126,7 +127,7 @@ static struct notifier_block acpi_ppc_notifier_block = {
static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
{
acpi_status status = 0;
- unsigned long ppc = 0;
+ unsigned long long ppc = 0;
if (!pr)
@@ -334,7 +335,6 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
acpi_status status = AE_OK;
acpi_handle handle = NULL;
-
if (!pr || !pr->performance || !pr->handle)
return -EINVAL;
@@ -347,13 +347,25 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
result = acpi_processor_get_performance_control(pr);
if (result)
- return result;
+ goto update_bios;
result = acpi_processor_get_performance_states(pr);
if (result)
- return result;
+ goto update_bios;
return 0;
+
+ /*
+ * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
+ * the BIOS is older than the CPU and does not know its frequencies
+ */
+ update_bios:
+ if (ACPI_SUCCESS(acpi_get_handle(pr->handle, "_PPC", &handle))){
+ if(boot_cpu_has(X86_FEATURE_EST))
+ printk(KERN_WARNING FW_BUG "BIOS needs update for CPU "
+ "frequency support\n");
+ }
+ return result;
}
int acpi_processor_notify_smm(struct module *calling_module)
@@ -524,13 +536,13 @@ static int acpi_processor_get_psd(struct acpi_processor *pr)
psd = buffer.pointer;
if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
+ printk(KERN_ERR PREFIX "Invalid _PSD data\n");
result = -EFAULT;
goto end;
}
if (psd->package.count != 1) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
+ printk(KERN_ERR PREFIX "Invalid _PSD data\n");
result = -EFAULT;
goto end;
}
@@ -543,19 +555,19 @@ static int acpi_processor_get_psd(struct acpi_processor *pr)
status = acpi_extract_package(&(psd->package.elements[0]),
&format, &state);
if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
+ printk(KERN_ERR PREFIX "Invalid _PSD data\n");
result = -EFAULT;
goto end;
}
if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:num_entries\n"));
+ printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n");
result = -EFAULT;
goto end;
}
if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:revision\n"));
+ printk(KERN_ERR PREFIX "Unknown _PSD:revision\n");
result = -EFAULT;
goto end;
}
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index a56fc6c4394..3da2df93d92 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -274,7 +274,7 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data)
static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
{
acpi_status status = 0;
- unsigned long tpc = 0;
+ unsigned long long tpc = 0;
if (!pr)
return -EINVAL;
@@ -528,13 +528,13 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
tsd = buffer.pointer;
if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
+ printk(KERN_ERR PREFIX "Invalid _TSD data\n");
result = -EFAULT;
goto end;
}
if (tsd->package.count != 1) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
+ printk(KERN_ERR PREFIX "Invalid _TSD data\n");
result = -EFAULT;
goto end;
}
@@ -547,19 +547,19 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
status = acpi_extract_package(&(tsd->package.elements[0]),
&format, &state);
if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
+ printk(KERN_ERR PREFIX "Invalid _TSD data\n");
result = -EFAULT;
goto end;
}
if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:num_entries\n"));
+ printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n");
result = -EFAULT;
goto end;
}
if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:revision\n"));
+ printk(KERN_ERR PREFIX "Unknown _TSD:revision\n");
result = -EFAULT;
goto end;
}
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c
index a6b662c00b6..755baf2ca70 100644
--- a/drivers/acpi/reboot.c
+++ b/drivers/acpi/reboot.c
@@ -15,9 +15,28 @@ void acpi_reboot(void)
rr = &acpi_gbl_FADT.reset_register;
- /* Is the reset register supported? */
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) ||
- rr->bit_width != 8 || rr->bit_offset != 0)
+ /*
+ * Is the ACPI reset register supported?
+ *
+ * According to ACPI 3.0, FADT.flags.RESET_REG_SUP indicates
+ * whether the ACPI reset mechanism is supported.
+ *
+ * However, some boxes have this bit clear, yet a valid
+ * ACPI_RESET_REG & RESET_VALUE, and ACPI reboot is the only
+ * mechanism that works for them after S3.
+ *
+ * This suggests that other operating systems may not be checking
+ * the RESET_REG_SUP bit, and are using other means to decide
+ * whether to use the ACPI reboot mechanism or not.
+ *
+ * So when acpi reboot is requested,
+ * only the reset_register is checked. If the following
+ * conditions are met, it indicates that the reset register is supported.
+ * a. reset_register is not zero
+ * b. the access width is eight
+ * c. the bit_offset is zero
+ */
+ if (!(rr->address) || rr->bit_width != 8 || rr->bit_offset != 0)
return;
reset_value = acpi_gbl_FADT.reset_value;
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index d9063ea414e..8eaaecf9200 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -43,7 +43,6 @@
#include <acpi/acpi.h>
#include <acpi/acresrc.h>
-#include <acpi/amlcode.h>
#include <acpi/acnamesp.h>
#define _COMPONENT ACPI_RESOURCES
@@ -560,8 +559,8 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
ACPI_GET_OBJECT_TYPE(*sub_object_list)) ||
((ACPI_TYPE_LOCAL_REFERENCE ==
ACPI_GET_OBJECT_TYPE(*sub_object_list)) &&
- ((*sub_object_list)->reference.opcode ==
- AML_INT_NAMEPATH_OP)))) {
+ ((*sub_object_list)->reference.class ==
+ ACPI_REFCLASS_NAME)))) {
name_found = TRUE;
} else {
/* Look at the next element */
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c
index 7804a8c40e7..c0bbfa2c419 100644
--- a/drivers/acpi/resources/rscreate.c
+++ b/drivers/acpi/resources/rscreate.c
@@ -43,7 +43,6 @@
#include <acpi/acpi.h>
#include <acpi/acresrc.h>
-#include <acpi/amlcode.h>
#include <acpi/acnamesp.h>
#define _COMPONENT ACPI_RESOURCES
@@ -310,13 +309,12 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
case ACPI_TYPE_LOCAL_REFERENCE:
- if (obj_desc->reference.opcode !=
- AML_INT_NAMEPATH_OP) {
+ if (obj_desc->reference.class !=
+ ACPI_REFCLASS_NAME) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Source) Need name, found reference op %X",
+ "(PRT[%X].Source) Need name, found Reference Class %X",
index,
- obj_desc->reference.
- opcode));
+ obj_desc->reference.class));
return_ACPI_STATUS(AE_BAD_DATA);
}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 7b011e7e29f..6050ce48187 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -931,7 +931,7 @@ static int acpi_sbs_add(struct acpi_device *device)
sbs->device = device;
strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_SBS_CLASS);
- acpi_driver_data(device) = sbs;
+ device->driver_data = sbs;
result = acpi_charger_add(sbs);
if (result)
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index a4e3767b8c6..e53e590252c 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -258,7 +258,7 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
static int acpi_smbus_hc_add(struct acpi_device *device)
{
int status;
- unsigned long val;
+ unsigned long long val;
struct acpi_smb_hc *hc;
if (!device)
@@ -282,7 +282,7 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
hc->ec = acpi_driver_data(device->parent);
hc->offset = (val >> 8) & 0xff;
hc->query_bit = val & 0xff;
- acpi_driver_data(device) = hc;
+ device->driver_data = hc;
acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc);
printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n",
@@ -303,7 +303,7 @@ static int acpi_smbus_hc_remove(struct acpi_device *device, int type)
hc = acpi_driver_data(device);
acpi_ec_remove_query_handler(hc->ec, hc->query_bit);
kfree(hc);
- acpi_driver_data(device) = NULL;
+ device->driver_data = NULL;
return 0;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index f6f52c1a2ab..a9dda8e0f9f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -113,16 +113,16 @@ static int acpi_bus_hot_remove_device(void *context)
if (acpi_bus_trim(device, 1)) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Removing device failed\n"));
+ printk(KERN_ERR PREFIX
+ "Removing device failed\n");
return -1;
}
/* power off device */
status = acpi_evaluate_object(handle, "_PS3", NULL, NULL);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
- ACPI_DEBUG_PRINT((ACPI_DB_WARN,
- "Power-off device failed\n"));
+ printk(KERN_WARNING PREFIX
+ "Power-off device failed\n");
if (device->flags.lockable) {
arg_list.count = 1;
@@ -276,6 +276,13 @@ int acpi_match_device_ids(struct acpi_device *device,
{
const struct acpi_device_id *id;
+ /*
+ * If the device is not present, it is unnecessary to load device
+ * driver for it.
+ */
+ if (!device->status.present)
+ return -ENODEV;
+
if (device->flags.hardware_id) {
for (id = ids; id->id[0]; id++) {
if (!strcmp((char*)id->id, device->pnp.hardware_id))
@@ -384,7 +391,7 @@ static int acpi_device_remove(struct device * dev)
acpi_drv->ops.remove(acpi_dev, acpi_dev->removal_type);
}
acpi_dev->driver = NULL;
- acpi_driver_data(dev) = NULL;
+ acpi_dev->driver_data = NULL;
put_device(dev);
return 0;
@@ -477,7 +484,7 @@ static int acpi_device_register(struct acpi_device *device,
result = acpi_device_setup_files(device);
if(result)
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error creating sysfs interface for device %s\n", device->dev.bus_id));
+ printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n", device->dev.bus_id);
device->removal_type = ACPI_BUS_REMOVAL_NORMAL;
return 0;
@@ -537,7 +544,7 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
result = driver->ops.add(device);
if (result) {
device->driver = NULL;
- acpi_driver_data(device) = NULL;
+ device->driver_data = NULL;
return result;
}
@@ -744,6 +751,16 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
if (!acpi_match_device_ids(device, button_device_ids))
device->wakeup.flags.run_wake = 1;
+ /*
+ * Don't set Power button GPE as run_wake
+ * if Fixed Power button is used
+ */
+ if (!strcmp(device->pnp.hardware_id, "PNP0C0C") &&
+ !(acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON)) {
+ device->wakeup.flags.run_wake = 0;
+ device->wakeup.flags.valid = 0;
+ }
+
end:
if (ACPI_FAILURE(status))
device->flags.wake_capable = 0;
@@ -807,6 +824,7 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
/* TBD: System wake support and resource requirements. */
device->power.state = ACPI_STATE_UNKNOWN;
+ acpi_bus_get_power(device->handle, &(device->power.state));
return 0;
}
@@ -1153,20 +1171,6 @@ static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
}
static int
-acpi_is_child_device(struct acpi_device *device,
- int (*matcher)(struct acpi_device *))
-{
- int result = -ENODEV;
-
- do {
- if (ACPI_SUCCESS(matcher(device)))
- return AE_OK;
- } while ((device = device->parent));
-
- return result;
-}
-
-static int
acpi_add_single_object(struct acpi_device **child,
struct acpi_device *parent, acpi_handle handle, int type,
struct acpi_bus_ops *ops)
@@ -1221,15 +1225,18 @@ acpi_add_single_object(struct acpi_device **child,
result = -ENODEV;
goto end;
}
- if (!device->status.present) {
- /* Bay and dock should be handled even if absent */
- if (!ACPI_SUCCESS(
- acpi_is_child_device(device, acpi_bay_match)) &&
- !ACPI_SUCCESS(
- acpi_is_child_device(device, acpi_dock_match))) {
- result = -ENODEV;
- goto end;
- }
+ /*
+ * When the device is neither present nor functional, the
+ * device should not be added to Linux ACPI device tree.
+ * When the status of the device is not present but functinal,
+ * it should be added to Linux ACPI tree. For example : bay
+ * device , dock device.
+ * In such conditions it is unncessary to check whether it is
+ * bay device or dock device.
+ */
+ if (!device->status.present && !device->status.functional) {
+ result = -ENODEV;
+ goto end;
}
break;
default:
@@ -1252,6 +1259,16 @@ acpi_add_single_object(struct acpi_device **child,
acpi_device_set_id(device, parent, handle, type);
/*
+ * The ACPI device is attached to acpi handle before getting
+ * the power/wakeup/peformance flags. Otherwise OS can't get
+ * the corresponding ACPI device by the acpi handle in the course
+ * of getting the power/wakeup/performance flags.
+ */
+ result = acpi_device_set_context(device, type);
+ if (result)
+ goto end;
+
+ /*
* Power Management
* ----------------
*/
@@ -1281,8 +1298,6 @@ acpi_add_single_object(struct acpi_device **child,
goto end;
}
- if ((result = acpi_device_set_context(device, type)))
- goto end;
result = acpi_device_register(device, parent);
@@ -1402,7 +1417,12 @@ static int acpi_bus_scan(struct acpi_device *start, struct acpi_bus_ops *ops)
* TBD: Need notifications and other detection mechanisms
* in place before we can fully implement this.
*/
- if (child->status.present) {
+ /*
+ * When the device is not present but functional, it is also
+ * necessary to scan the children of this device.
+ */
+ if (child->status.present || (!child->status.present &&
+ child->status.functional)) {
status = acpi_get_next_object(ACPI_TYPE_ANY, chandle,
NULL, NULL);
if (ACPI_SUCCESS(status)) {
@@ -1545,7 +1565,6 @@ static int acpi_bus_scan_fixed(struct acpi_device *root)
return result;
}
-int __init acpi_boot_ec_enable(void);
static int __init acpi_scan_init(void)
{
@@ -1579,9 +1598,6 @@ static int __init acpi_scan_init(void)
*/
result = acpi_bus_scan_fixed(acpi_root);
- /* EC region might be needed at bus_scan, so enable it now */
- acpi_boot_ec_enable();
-
if (!result)
result = acpi_bus_scan(acpi_root, &ops);
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index d13194a031b..26571bafb15 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -15,6 +15,7 @@
#include <linux/dmi.h>
#include <linux/device.h>
#include <linux/suspend.h>
+#include <linux/reboot.h>
#include <asm/io.h>
@@ -24,6 +25,36 @@
u8 sleep_states[ACPI_S_STATE_COUNT];
+static void acpi_sleep_tts_switch(u32 acpi_state)
+{
+ union acpi_object in_arg = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list arg_list = { 1, &in_arg };
+ acpi_status status = AE_OK;
+
+ in_arg.integer.value = acpi_state;
+ status = acpi_evaluate_object(NULL, "\\_TTS", &arg_list, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ /*
+ * OS can't evaluate the _TTS object correctly. Some warning
+ * message will be printed. But it won't break anything.
+ */
+ printk(KERN_NOTICE "Failure in evaluating _TTS object\n");
+ }
+}
+
+static int tts_notify_reboot(struct notifier_block *this,
+ unsigned long code, void *x)
+{
+ acpi_sleep_tts_switch(ACPI_STATE_S5);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block tts_notifier = {
+ .notifier_call = tts_notify_reboot,
+ .next = NULL,
+ .priority = 0,
+};
+
static int acpi_sleep_prepare(u32 acpi_state)
{
#ifdef CONFIG_ACPI_SLEEP
@@ -45,9 +76,8 @@ static int acpi_sleep_prepare(u32 acpi_state)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
-
/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
* user to request that behavior by using the 'acpi_old_suspend_ordering'
@@ -131,8 +161,9 @@ static void acpi_pm_end(void)
* failing transition to a sleep state.
*/
acpi_target_sleep_state = ACPI_STATE_S0;
+ acpi_sleep_tts_switch(acpi_target_sleep_state);
}
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_ACPI_SLEEP */
#ifdef CONFIG_SUSPEND
extern void do_suspend_lowlevel(void);
@@ -155,6 +186,7 @@ static int acpi_suspend_begin(suspend_state_t pm_state)
if (sleep_states[acpi_state]) {
acpi_target_sleep_state = acpi_state;
+ acpi_sleep_tts_switch(acpi_target_sleep_state);
} else {
printk(KERN_ERR "ACPI does not support this state: %d\n",
pm_state);
@@ -200,6 +232,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
break;
}
+ /* If ACPI is not enabled by the BIOS, we need to enable it here. */
+ acpi_enable();
/* Reprogram control registers and execute _BFS */
acpi_leave_sleep_state_prep(acpi_state);
@@ -296,6 +330,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
},
},
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "HP xw4600 Workstation",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
+ },
+ },
{},
};
#endif /* CONFIG_SUSPEND */
@@ -313,6 +355,7 @@ void __init acpi_no_s4_hw_signature(void)
static int acpi_hibernation_begin(void)
{
acpi_target_sleep_state = ACPI_STATE_S4;
+ acpi_sleep_tts_switch(acpi_target_sleep_state);
return 0;
}
@@ -376,7 +419,15 @@ static struct platform_hibernation_ops acpi_hibernation_ops = {
*/
static int acpi_hibernation_begin_old(void)
{
- int error = acpi_sleep_prepare(ACPI_STATE_S4);
+ int error;
+ /*
+ * The _TTS object should always be evaluated before the _PTS object.
+ * When the old_suspended_ordering is true, the _PTS object is
+ * evaluated in the acpi_sleep_prepare.
+ */
+ acpi_sleep_tts_switch(ACPI_STATE_S4);
+
+ error = acpi_sleep_prepare(ACPI_STATE_S4);
if (!error)
acpi_target_sleep_state = ACPI_STATE_S4;
@@ -444,7 +495,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
struct acpi_device *adev;
char acpi_method[] = "_SxD";
- unsigned long d_min, d_max;
+ unsigned long long d_min, d_max;
if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
printk(KERN_DEBUG "ACPI handle has no context!\n");
@@ -596,5 +647,10 @@ int __init acpi_sleep_init(void)
pm_power_off = acpi_power_off;
}
printk(")\n");
+ /*
+ * Register the tts_notifier to reboot notifier list so that the _TTS
+ * object can also be evaluated when the system enters S5.
+ */
+ register_reboot_notifier(&tts_notifier);
return 0;
}
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 24e80fd927e..1d74171b794 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -386,8 +386,8 @@ static ssize_t counter_set(struct kobject *kobj,
goto end;
if (!(all_counters[index].flags & ACPI_EVENT_VALID)) {
- ACPI_DEBUG_PRINT((ACPI_DB_WARN,
- "Can not change Invalid GPE/Fixed Event status\n"));
+ printk(KERN_WARNING PREFIX
+ "Can not change Invalid GPE/Fixed Event status\n");
return -EINVAL;
}
diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c
index a4a41ba2484..2c7885e7ffb 100644
--- a/drivers/acpi/tables/tbfadt.c
+++ b/drivers/acpi/tables/tbfadt.c
@@ -50,7 +50,7 @@ ACPI_MODULE_NAME("tbfadt")
/* Local prototypes */
static void inline
acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
- u8 bit_width, u64 address);
+ u8 byte_width, u64 address);
static void acpi_tb_convert_fadt(void);
@@ -111,7 +111,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
* FUNCTION: acpi_tb_init_generic_address
*
* PARAMETERS: generic_address - GAS struct to be initialized
- * bit_width - Width of this register
+ * byte_width - Width of this register
* Address - Address of the register
*
* RETURN: None
@@ -124,7 +124,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
static void inline
acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
- u8 bit_width, u64 address)
+ u8 byte_width, u64 address)
{
/*
@@ -136,7 +136,7 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
/* All other fields are byte-wide */
generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO;
- generic_address->bit_width = bit_width;
+ generic_address->bit_width = byte_width << 3;
generic_address->bit_offset = 0;
generic_address->access_width = 0;
}
@@ -342,9 +342,20 @@ static void acpi_tb_convert_fadt(void)
* useful to calculate them once, here.
*
* The PM event blocks are split into two register blocks, first is the
- * PM Status Register block, followed immediately by the PM Enable Register
- * block. Each is of length (pm1_event_length/2)
+ * PM Status Register block, followed immediately by the PM Enable
+ * Register block. Each is of length (xpm1x_event_block.bit_width/2).
+ *
+ * On various systems the v2 fields (and particularly the bit widths)
+ * cannot be relied upon, though. Hence resort to using the v1 length
+ * here (and warn about the inconsistency).
*/
+ if (acpi_gbl_FADT.xpm1a_event_block.bit_width
+ != acpi_gbl_FADT.pm1_event_length * 8)
+ printk(KERN_WARNING "FADT: "
+ "X_PM1a_EVT_BLK.bit_width (%u) does not match"
+ " PM1_EVT_LEN (%u)\n",
+ acpi_gbl_FADT.xpm1a_event_block.bit_width,
+ acpi_gbl_FADT.pm1_event_length);
pm1_register_length = (u8) ACPI_DIV_2(acpi_gbl_FADT.pm1_event_length);
/* The PM1A register block is required */
@@ -360,13 +371,20 @@ static void acpi_tb_convert_fadt(void)
/* The PM1B register block is optional, ignore if not present */
if (acpi_gbl_FADT.xpm1b_event_block.address) {
+ if (acpi_gbl_FADT.xpm1b_event_block.bit_width
+ != acpi_gbl_FADT.pm1_event_length * 8)
+ printk(KERN_WARNING "FADT: "
+ "X_PM1b_EVT_BLK.bit_width (%u) does not match"
+ " PM1_EVT_LEN (%u)\n",
+ acpi_gbl_FADT.xpm1b_event_block.bit_width,
+ acpi_gbl_FADT.pm1_event_length);
acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
pm1_register_length,
(acpi_gbl_FADT.xpm1b_event_block.
address + pm1_register_length));
/* Don't forget to copy space_id of the GAS */
acpi_gbl_xpm1b_enable.space_id =
- acpi_gbl_FADT.xpm1a_event_block.space_id;
+ acpi_gbl_FADT.xpm1b_event_block.space_id;
}
}
diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/tables/tbinstal.c
index b22185f55a1..18747ce8dd2 100644
--- a/drivers/acpi/tables/tbinstal.c
+++ b/drivers/acpi/tables/tbinstal.c
@@ -110,7 +110,6 @@ acpi_status
acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
{
u32 i;
- u32 length;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(tb_add_table);
@@ -145,25 +144,64 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
}
}
- length = ACPI_MIN(table_desc->length,
- acpi_gbl_root_table_list.tables[i].length);
+ /*
+ * Check for a table match on the entire table length,
+ * not just the header.
+ */
+ if (table_desc->length !=
+ acpi_gbl_root_table_list.tables[i].length) {
+ continue;
+ }
+
if (ACPI_MEMCMP(table_desc->pointer,
acpi_gbl_root_table_list.tables[i].pointer,
- length)) {
+ acpi_gbl_root_table_list.tables[i].length)) {
continue;
}
- /* Table is already registered */
-
+ /*
+ * Note: the current mechanism does not unregister a table if it is
+ * dynamically unloaded. The related namespace entries are deleted,
+ * but the table remains in the root table list.
+ *
+ * The assumption here is that the number of different tables that
+ * will be loaded is actually small, and there is minimal overhead
+ * in just keeping the table in case it is needed again.
+ *
+ * If this assumption changes in the future (perhaps on large
+ * machines with many table load/unload operations), tables will
+ * need to be unregistered when they are unloaded, and slots in the
+ * root table list should be reused when empty.
+ */
+
+ /*
+ * Table is already registered.
+ * We can delete the table that was passed as a parameter.
+ */
acpi_tb_delete_table(table_desc);
*table_index = i;
- status = AE_ALREADY_EXISTS;
- goto release;
+
+ if (acpi_gbl_root_table_list.tables[i].
+ flags & ACPI_TABLE_IS_LOADED) {
+
+ /* Table is still loaded, this is an error */
+
+ status = AE_ALREADY_EXISTS;
+ goto release;
+ } else {
+ /* Table was unloaded, allow it to be reloaded */
+
+ table_desc->pointer =
+ acpi_gbl_root_table_list.tables[i].pointer;
+ table_desc->address =
+ acpi_gbl_root_table_list.tables[i].address;
+ status = AE_OK;
+ goto print_header;
+ }
}
- /*
- * Add the table to the global table list
- */
+ /* Add the table to the global root table list */
+
status = acpi_tb_store_table(table_desc->address, table_desc->pointer,
table_desc->length, table_desc->flags,
table_index);
@@ -171,6 +209,7 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
goto release;
}
+ print_header:
acpi_tb_print_table_header(table_desc->address, table_desc->pointer);
release:
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 912703691d3..ad6cae938f0 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -246,18 +246,18 @@ static const struct file_operations acpi_thermal_polling_fops = {
static int acpi_thermal_get_temperature(struct acpi_thermal *tz)
{
acpi_status status = AE_OK;
-
+ unsigned long long tmp;
if (!tz)
return -EINVAL;
tz->last_temperature = tz->temperature;
- status =
- acpi_evaluate_integer(tz->device->handle, "_TMP", NULL, &tz->temperature);
+ status = acpi_evaluate_integer(tz->device->handle, "_TMP", NULL, &tmp);
if (ACPI_FAILURE(status))
return -ENODEV;
+ tz->temperature = tmp;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Temperature is %lu dK\n",
tz->temperature));
@@ -267,17 +267,16 @@ static int acpi_thermal_get_temperature(struct acpi_thermal *tz)
static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
{
acpi_status status = AE_OK;
-
+ unsigned long long tmp;
if (!tz)
return -EINVAL;
- status =
- acpi_evaluate_integer(tz->device->handle, "_TZP", NULL,
- &tz->polling_frequency);
+ status = acpi_evaluate_integer(tz->device->handle, "_TZP", NULL, &tmp);
if (ACPI_FAILURE(status))
return -ENODEV;
+ tz->polling_frequency = tmp;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Polling frequency is %lu dS\n",
tz->polling_frequency));
@@ -356,6 +355,7 @@ do { \
static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
{
acpi_status status = AE_OK;
+ unsigned long long tmp;
struct acpi_handle_list devices;
int valid = 0;
int i;
@@ -363,7 +363,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
/* Critical Shutdown (required) */
if (flag & ACPI_TRIPS_CRITICAL) {
status = acpi_evaluate_integer(tz->device->handle,
- "_CRT", NULL, &tz->trips.critical.temperature);
+ "_CRT", NULL, &tmp);
+ tz->trips.critical.temperature = tmp;
/*
* Treat freezing temperatures as invalid as well; some
* BIOSes return really low values and cause reboots at startup.
@@ -388,10 +389,12 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
} else if (crt > 0) {
unsigned long crt_k = CELSIUS_TO_KELVIN(crt);
/*
- * Allow override to lower critical threshold
+ * Allow override critical threshold
*/
- if (crt_k < tz->trips.critical.temperature)
- tz->trips.critical.temperature = crt_k;
+ if (crt_k > tz->trips.critical.temperature)
+ printk(KERN_WARNING PREFIX
+ "Critical threshold %d C\n", crt);
+ tz->trips.critical.temperature = crt_k;
}
}
}
@@ -399,12 +402,13 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
/* Critical Sleep (optional) */
if (flag & ACPI_TRIPS_HOT) {
status = acpi_evaluate_integer(tz->device->handle,
- "_HOT", NULL, &tz->trips.hot.temperature);
+ "_HOT", NULL, &tmp);
if (ACPI_FAILURE(status)) {
tz->trips.hot.flags.valid = 0;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"No hot threshold\n"));
} else {
+ tz->trips.hot.temperature = tmp;
tz->trips.hot.flags.valid = 1;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Found hot threshold [%lu]\n",
@@ -418,33 +422,40 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if (psv == -1) {
status = AE_SUPPORT;
} else if (psv > 0) {
- tz->trips.passive.temperature = CELSIUS_TO_KELVIN(psv);
+ tmp = CELSIUS_TO_KELVIN(psv);
status = AE_OK;
} else {
status = acpi_evaluate_integer(tz->device->handle,
- "_PSV", NULL, &tz->trips.passive.temperature);
+ "_PSV", NULL, &tmp);
}
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
else {
+ tz->trips.passive.temperature = tmp;
tz->trips.passive.flags.valid = 1;
if (flag == ACPI_TRIPS_INIT) {
status = acpi_evaluate_integer(
tz->device->handle, "_TC1",
- NULL, &tz->trips.passive.tc1);
+ NULL, &tmp);
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
+ else
+ tz->trips.passive.tc1 = tmp;
status = acpi_evaluate_integer(
tz->device->handle, "_TC2",
- NULL, &tz->trips.passive.tc2);
+ NULL, &tmp);
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
+ else
+ tz->trips.passive.tc2 = tmp;
status = acpi_evaluate_integer(
tz->device->handle, "_TSP",
- NULL, &tz->trips.passive.tsp);
+ NULL, &tmp);
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
+ else
+ tz->trips.passive.tsp = tmp;
}
}
}
@@ -479,7 +490,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if (flag & ACPI_TRIPS_ACTIVE) {
status = acpi_evaluate_integer(tz->device->handle,
- name, NULL, &tz->trips.active[i].temperature);
+ name, NULL, &tmp);
if (ACPI_FAILURE(status)) {
tz->trips.active[i].flags.valid = 0;
if (i == 0)
@@ -500,8 +511,10 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
tz->trips.active[i - 2].temperature :
CELSIUS_TO_KELVIN(act));
break;
- } else
+ } else {
+ tz->trips.active[i].temperature = tmp;
tz->trips.active[i].flags.valid = 1;
+ }
}
name[2] = 'L';
@@ -1213,8 +1226,8 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
acpi_bus_private_data_handler,
tz->thermal_zone);
if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Error attaching device data\n"));
+ printk(KERN_ERR PREFIX
+ "Error attaching device data\n");
return -ENODEV;
}
@@ -1647,7 +1660,7 @@ static int acpi_thermal_add(struct acpi_device *device)
strcpy(tz->name, device->pnp.bus_id);
strcpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
- acpi_driver_data(device) = tz;
+ device->driver_data = tz;
mutex_init(&tz->lock);
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/acpi/toshiba_acpi.c
index 8a649f40d16..2a632f8b7a0 100644
--- a/drivers/acpi/toshiba_acpi.c
+++ b/drivers/acpi/toshiba_acpi.c
@@ -548,7 +548,7 @@ static unsigned long write_video(const char *buffer, unsigned long count)
hci_read1(HCI_VIDEO_OUT, &video_out, &hci_result);
if (hci_result == HCI_SUCCESS) {
- int new_video_out = video_out;
+ unsigned int new_video_out = video_out;
if (lcd_out != -1)
_set_bit(&new_video_out, HCI_VIDEO_OUT_LCD, lcd_out);
if (crt_out != -1)
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index 7dcb67e0b21..241c535c175 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -232,7 +232,7 @@ acpi_status acpi_ut_validate_buffer(struct acpi_buffer * buffer)
* RETURN: Status
*
* DESCRIPTION: Validate that the buffer is of the required length or
- * allocate a new buffer. Returned buffer is always zeroed.
+ * allocate a new buffer. Returned buffer is always zeroed.
*
******************************************************************************/
@@ -240,7 +240,7 @@ acpi_status
acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
acpi_size required_length)
{
- acpi_status status = AE_OK;
+ acpi_size input_buffer_length;
/* Parameter validation */
@@ -248,55 +248,58 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
return (AE_BAD_PARAMETER);
}
- switch (buffer->length) {
+ /*
+ * Buffer->Length is used as both an input and output parameter. Get the
+ * input actual length and set the output required buffer length.
+ */
+ input_buffer_length = buffer->length;
+ buffer->length = required_length;
+
+ /*
+ * The input buffer length contains the actual buffer length, or the type
+ * of buffer to be allocated by this routine.
+ */
+ switch (input_buffer_length) {
case ACPI_NO_BUFFER:
- /* Set the exception and returned the required length */
+ /* Return the exception (and the required buffer length) */
- status = AE_BUFFER_OVERFLOW;
- break;
+ return (AE_BUFFER_OVERFLOW);
case ACPI_ALLOCATE_BUFFER:
/* Allocate a new buffer */
buffer->pointer = acpi_os_allocate(required_length);
- if (!buffer->pointer) {
- return (AE_NO_MEMORY);
- }
-
- /* Clear the buffer */
-
- ACPI_MEMSET(buffer->pointer, 0, required_length);
break;
case ACPI_ALLOCATE_LOCAL_BUFFER:
/* Allocate a new buffer with local interface to allow tracking */
- buffer->pointer = ACPI_ALLOCATE_ZEROED(required_length);
- if (!buffer->pointer) {
- return (AE_NO_MEMORY);
- }
+ buffer->pointer = ACPI_ALLOCATE(required_length);
break;
default:
/* Existing buffer: Validate the size of the buffer */
- if (buffer->length < required_length) {
- status = AE_BUFFER_OVERFLOW;
- break;
+ if (input_buffer_length < required_length) {
+ return (AE_BUFFER_OVERFLOW);
}
+ break;
+ }
- /* Clear the buffer */
+ /* Validate allocation from above or input buffer pointer */
- ACPI_MEMSET(buffer->pointer, 0, required_length);
- break;
+ if (!buffer->pointer) {
+ return (AE_NO_MEMORY);
}
- buffer->length = required_length;
- return (status);
+ /* Have a valid buffer, clear it */
+
+ ACPI_MEMSET(buffer->pointer, 0, required_length);
+ return (AE_OK);
}
#ifdef NOT_USED_BY_LINUX
diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/utilities/utcopy.c
index 53499ac9098..5b2f7c27b70 100644
--- a/drivers/acpi/utilities/utcopy.c
+++ b/drivers/acpi/utilities/utcopy.c
@@ -42,7 +42,6 @@
*/
#include <acpi/acpi.h>
-#include <acpi/amlcode.h>
#include <acpi/acnamesp.h>
@@ -176,20 +175,24 @@ acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
/* This is an object reference. */
- switch (internal_object->reference.opcode) {
- case AML_INT_NAMEPATH_OP:
-
- /* For namepath, return the object handle ("reference") */
-
- default:
-
- /* We are referring to the namespace node */
+ switch (internal_object->reference.class) {
+ case ACPI_REFCLASS_NAME:
+ /*
+ * For namepath, return the object handle ("reference")
+ * We are referring to the namespace node
+ */
external_object->reference.handle =
internal_object->reference.node;
external_object->reference.actual_type =
acpi_ns_get_type(internal_object->reference.node);
break;
+
+ default:
+
+ /* All other reference types are unsupported */
+
+ return_ACPI_STATUS(AE_TYPE);
}
break;
@@ -533,7 +536,7 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
/* TBD: should validate incoming handle */
- internal_object->reference.opcode = AML_INT_NAMEPATH_OP;
+ internal_object->reference.class = ACPI_REFCLASS_NAME;
internal_object->reference.node =
external_object->reference.handle;
break;
@@ -743,11 +746,11 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
* We copied the reference object, so we now must add a reference
* to the object pointed to by the reference
*
- * DDBHandle reference (from Load/load_table is a special reference,
- * it's Reference.Object is the table index, so does not need to
+ * DDBHandle reference (from Load/load_table) is a special reference,
+ * it does not have a Reference.Object, so does not need to
* increase the reference count
*/
- if (source_desc->reference.opcode == AML_LOAD_OP) {
+ if (source_desc->reference.class == ACPI_REFCLASS_TABLE) {
break;
}
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
index 42609d3a8aa..d197c6b29e1 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/utilities/utdelete.c
@@ -45,7 +45,6 @@
#include <acpi/acinterp.h>
#include <acpi/acnamesp.h>
#include <acpi/acevents.h>
-#include <acpi/amlcode.h>
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utdelete")
@@ -548,8 +547,8 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
* reference must track changes to the ref count of the index or
* target object.
*/
- if ((object->reference.opcode == AML_INDEX_OP) ||
- (object->reference.opcode == AML_INT_NAMEPATH_OP)) {
+ if ((object->reference.class == ACPI_REFCLASS_INDEX) ||
+ (object->reference.class == ACPI_REFCLASS_NAME)) {
next_object = object->reference.object;
}
break;
@@ -586,6 +585,13 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
ACPI_EXCEPTION((AE_INFO, status,
"Could not update object reference count"));
+ /* Free any stacked Update State objects */
+
+ while (state_list) {
+ state = acpi_ut_pop_generic_state(&state_list);
+ acpi_ut_delete_generic_state(state);
+ }
+
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c
index a6e71b801d2..670551b95e5 100644
--- a/drivers/acpi/utilities/utglobal.c
+++ b/drivers/acpi/utilities/utglobal.c
@@ -281,7 +281,6 @@ struct acpi_bit_register_info acpi_gbl_bit_register_info[ACPI_NUM_BITREG] = {
/* ACPI_BITREG_RT_CLOCK_ENABLE */ {ACPI_REGISTER_PM1_ENABLE,
ACPI_BITPOSITION_RT_CLOCK_ENABLE,
ACPI_BITMASK_RT_CLOCK_ENABLE},
- /* ACPI_BITREG_WAKE_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, 0, 0},
/* ACPI_BITREG_PCIEXP_WAKE_DISABLE */ {ACPI_REGISTER_PM1_ENABLE,
ACPI_BITPOSITION_PCIEXP_WAKE_DISABLE,
ACPI_BITMASK_PCIEXP_WAKE_DISABLE},
@@ -575,6 +574,47 @@ char *acpi_ut_get_descriptor_name(void *object)
}
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_reference_name
+ *
+ * PARAMETERS: Object - An ACPI reference object
+ *
+ * RETURN: Pointer to a string
+ *
+ * DESCRIPTION: Decode a reference object sub-type to a string.
+ *
+ ******************************************************************************/
+
+/* Printable names of reference object sub-types */
+
+static const char *acpi_gbl_ref_class_names[] = {
+ /* 00 */ "Local",
+ /* 01 */ "Argument",
+ /* 02 */ "RefOf",
+ /* 03 */ "Index",
+ /* 04 */ "DdbHandle",
+ /* 05 */ "Named Object",
+ /* 06 */ "Debug"
+};
+
+const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
+{
+ if (!object)
+ return "NULL Object";
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND)
+ return "Not an Operand object";
+
+ if (object->common.type != ACPI_TYPE_LOCAL_REFERENCE)
+ return "Not a Reference object";
+
+ if (object->reference.class > ACPI_REFCLASS_MAX)
+ return "Unknown Reference class";
+
+ return acpi_gbl_ref_class_names[object->reference.class];
+}
+
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
/*
* Strings and procedures used for debug only
@@ -677,14 +717,14 @@ u8 acpi_ut_valid_object_type(acpi_object_type type)
*
* PARAMETERS: None
*
- * RETURN: None
+ * RETURN: Status
*
* DESCRIPTION: Init library globals. All globals that require specific
* initialization should be initialized here!
*
******************************************************************************/
-void acpi_ut_init_globals(void)
+acpi_status acpi_ut_init_globals(void)
{
acpi_status status;
u32 i;
@@ -695,7 +735,7 @@ void acpi_ut_init_globals(void)
status = acpi_ut_create_caches();
if (ACPI_FAILURE(status)) {
- return;
+ return_ACPI_STATUS(status);
}
/* Mutex locked flags */
@@ -772,8 +812,8 @@ void acpi_ut_init_globals(void)
acpi_gbl_display_final_mem_stats = FALSE;
#endif
- return_VOID;
+ return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_dbg_level)
- ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
+ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c
index f34be677355..9089a158a87 100644
--- a/drivers/acpi/utilities/utmisc.c
+++ b/drivers/acpi/utilities/utmisc.c
@@ -995,6 +995,15 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
state->pkg.
this_target_obj, 0);
if (!state) {
+
+ /* Free any stacked Update State objects */
+
+ while (state_list) {
+ state =
+ acpi_ut_pop_generic_state
+ (&state_list);
+ acpi_ut_delete_generic_state(state);
+ }
return_ACPI_STATUS(AE_NO_MEMORY);
}
}
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c
index 916eff399eb..c354e7a42bc 100644
--- a/drivers/acpi/utilities/utobject.c
+++ b/drivers/acpi/utilities/utobject.c
@@ -43,7 +43,6 @@
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
-#include <acpi/amlcode.h>
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utobject")
@@ -478,8 +477,8 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
case ACPI_TYPE_LOCAL_REFERENCE:
- switch (internal_object->reference.opcode) {
- case AML_INT_NAMEPATH_OP:
+ switch (internal_object->reference.class) {
+ case ACPI_REFCLASS_NAME:
/*
* Get the actual length of the full pathname to this object.
@@ -503,8 +502,10 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
* required eventually.
*/
ACPI_ERROR((AE_INFO,
- "Unsupported Reference opcode=%X in object %p",
- internal_object->reference.opcode,
+ "Cannot convert to external object - "
+ "unsupported Reference Class [%s] %X in object %p",
+ acpi_ut_get_reference_name(internal_object),
+ internal_object->reference.class,
internal_object));
status = AE_TYPE;
break;
@@ -513,7 +514,9 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
default:
- ACPI_ERROR((AE_INFO, "Unsupported type=%X in object %p",
+ ACPI_ERROR((AE_INFO, "Cannot convert to external object - "
+ "unsupported type [%s] %X in object %p",
+ acpi_ut_get_object_type_name(internal_object),
ACPI_GET_OBJECT_TYPE(internal_object),
internal_object));
status = AE_TYPE;
diff --git a/drivers/acpi/utilities/utxface.c b/drivers/acpi/utilities/utxface.c
index f8bdadf3c32..c198a4d4058 100644
--- a/drivers/acpi/utilities/utxface.c
+++ b/drivers/acpi/utilities/utxface.c
@@ -81,7 +81,12 @@ acpi_status __init acpi_initialize_subsystem(void)
/* Initialize all globals used by the subsystem */
- acpi_ut_init_globals();
+ status = acpi_ut_init_globals();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During initialization of globals"));
+ return_ACPI_STATUS(status);
+ }
/* Create the default mutex objects */
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 10092614381..e827be36ee8 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -256,7 +256,7 @@ EXPORT_SYMBOL(acpi_extract_package);
acpi_status
acpi_evaluate_integer(acpi_handle handle,
acpi_string pathname,
- struct acpi_object_list *arguments, unsigned long *data)
+ struct acpi_object_list *arguments, unsigned long long *data)
{
acpi_status status = AE_OK;
union acpi_object *element;
@@ -288,7 +288,7 @@ acpi_evaluate_integer(acpi_handle handle,
*data = element->integer.value;
kfree(element);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%lu]\n", *data));
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%llu]\n", *data));
return AE_OK;
}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index e8a51a1700f..a29b0ccac65 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -291,20 +291,20 @@ static int acpi_video_device_lcd_set_level(struct acpi_video_device *device,
int level);
static int acpi_video_device_lcd_get_level_current(
struct acpi_video_device *device,
- unsigned long *level);
+ unsigned long long *level);
static int acpi_video_get_next_level(struct acpi_video_device *device,
u32 level_current, u32 event);
static void acpi_video_switch_brightness(struct acpi_video_device *device,
int event);
static int acpi_video_device_get_state(struct acpi_video_device *device,
- unsigned long *state);
+ unsigned long long *state);
static int acpi_video_output_get(struct output_device *od);
static int acpi_video_device_set_state(struct acpi_video_device *device, int state);
/*backlight device sysfs support*/
static int acpi_video_get_brightness(struct backlight_device *bd)
{
- unsigned long cur_level;
+ unsigned long long cur_level;
int i;
struct acpi_video_device *vd =
(struct acpi_video_device *)bl_get_data(bd);
@@ -336,7 +336,7 @@ static struct backlight_ops acpi_backlight_ops = {
/*video output device sysfs support*/
static int acpi_video_output_get(struct output_device *od)
{
- unsigned long state;
+ unsigned long long state;
struct acpi_video_device *vd =
(struct acpi_video_device *)dev_get_drvdata(&od->dev);
acpi_video_device_get_state(vd, &state);
@@ -370,7 +370,7 @@ static int video_get_cur_state(struct thermal_cooling_device *cdev, char *buf)
{
struct acpi_device *device = cdev->devdata;
struct acpi_video_device *video = acpi_driver_data(device);
- unsigned long level;
+ unsigned long long level;
int state;
acpi_video_device_lcd_get_level_current(video, &level);
@@ -410,7 +410,7 @@ static struct thermal_cooling_device_ops video_cooling_ops = {
/* device */
static int
-acpi_video_device_query(struct acpi_video_device *device, unsigned long *state)
+acpi_video_device_query(struct acpi_video_device *device, unsigned long long *state)
{
int status;
@@ -421,7 +421,7 @@ acpi_video_device_query(struct acpi_video_device *device, unsigned long *state)
static int
acpi_video_device_get_state(struct acpi_video_device *device,
- unsigned long *state)
+ unsigned long long *state)
{
int status;
@@ -436,7 +436,7 @@ acpi_video_device_set_state(struct acpi_video_device *device, int state)
int status;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
- unsigned long ret;
+ unsigned long long ret;
arg0.integer.value = state;
@@ -495,7 +495,7 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
static int
acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
- unsigned long *level)
+ unsigned long long *level)
{
if (device->cap._BQC)
return acpi_evaluate_integer(device->dev->handle, "_BQC", NULL,
@@ -549,7 +549,7 @@ static int
acpi_video_bus_set_POST(struct acpi_video_bus *video, unsigned long option)
{
int status;
- unsigned long tmp;
+ unsigned long long tmp;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
@@ -564,7 +564,7 @@ acpi_video_bus_set_POST(struct acpi_video_bus *video, unsigned long option)
}
static int
-acpi_video_bus_get_POST(struct acpi_video_bus *video, unsigned long *id)
+acpi_video_bus_get_POST(struct acpi_video_bus *video, unsigned long long *id)
{
int status;
@@ -575,7 +575,7 @@ acpi_video_bus_get_POST(struct acpi_video_bus *video, unsigned long *id)
static int
acpi_video_bus_POST_options(struct acpi_video_bus *video,
- unsigned long *options)
+ unsigned long long *options)
{
int status;
@@ -918,7 +918,7 @@ static int acpi_video_device_state_seq_show(struct seq_file *seq, void *offset)
{
int status;
struct acpi_video_device *dev = seq->private;
- unsigned long state;
+ unsigned long long state;
if (!dev)
@@ -927,14 +927,14 @@ static int acpi_video_device_state_seq_show(struct seq_file *seq, void *offset)
status = acpi_video_device_get_state(dev, &state);
seq_printf(seq, "state: ");
if (ACPI_SUCCESS(status))
- seq_printf(seq, "0x%02lx\n", state);
+ seq_printf(seq, "0x%02llx\n", state);
else
seq_printf(seq, "<not supported>\n");
status = acpi_video_device_query(dev, &state);
seq_printf(seq, "query: ");
if (ACPI_SUCCESS(status))
- seq_printf(seq, "0x%02lx\n", state);
+ seq_printf(seq, "0x%02llx\n", state);
else
seq_printf(seq, "<not supported>\n");
@@ -1217,7 +1217,7 @@ static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file)
static int acpi_video_bus_POST_info_seq_show(struct seq_file *seq, void *offset)
{
struct acpi_video_bus *video = seq->private;
- unsigned long options;
+ unsigned long long options;
int status;
@@ -1232,7 +1232,7 @@ static int acpi_video_bus_POST_info_seq_show(struct seq_file *seq, void *offset)
printk(KERN_WARNING PREFIX
"This indicates a BIOS bug. Please contact the manufacturer.\n");
}
- printk("%lx\n", options);
+ printk("%llx\n", options);
seq_printf(seq, "can POST: <integrated video>");
if (options & 2)
seq_printf(seq, " <PCI video>");
@@ -1256,7 +1256,7 @@ static int acpi_video_bus_POST_seq_show(struct seq_file *seq, void *offset)
{
struct acpi_video_bus *video = seq->private;
int status;
- unsigned long id;
+ unsigned long long id;
if (!video)
@@ -1303,7 +1303,7 @@ acpi_video_bus_write_POST(struct file *file,
struct seq_file *m = file->private_data;
struct acpi_video_bus *video = m->private;
char str[12] = { 0 };
- unsigned long opt, options;
+ unsigned long long opt, options;
if (!video || count + 1 > sizeof str)
@@ -1473,7 +1473,7 @@ static int
acpi_video_bus_get_one_device(struct acpi_device *device,
struct acpi_video_bus *video)
{
- unsigned long device_id;
+ unsigned long long device_id;
int status;
struct acpi_video_device *data;
struct acpi_video_device_attrib* attribute;
@@ -1491,7 +1491,7 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
- acpi_driver_data(device) = data;
+ device->driver_data = data;
data->device_id = device_id;
data->video = video;
@@ -1530,8 +1530,8 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
acpi_video_device_notify,
data);
if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Error installing notify handler\n"));
+ printk(KERN_ERR PREFIX
+ "Error installing notify handler\n");
if(data->brightness)
kfree(data->brightness->levels);
kfree(data->brightness);
@@ -1724,7 +1724,7 @@ acpi_video_get_next_level(struct acpi_video_device *device,
static void
acpi_video_switch_brightness(struct acpi_video_device *device, int event)
{
- unsigned long level_current, level_next;
+ unsigned long long level_current, level_next;
if (!device->brightness)
return;
acpi_video_device_lcd_get_level_current(device, &level_current);
@@ -1745,8 +1745,8 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video,
status = acpi_video_bus_get_one_device(dev, video);
if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_WARN,
- "Cant attach device"));
+ printk(KERN_WARNING PREFIX
+ "Cant attach device");
continue;
}
}
@@ -1982,7 +1982,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
video->device = device;
strcpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME);
strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
- acpi_driver_data(device) = video;
+ device->driver_data = video;
acpi_video_bus_find_cap(video);
error = acpi_video_bus_check(video);
@@ -2003,8 +2003,8 @@ static int acpi_video_bus_add(struct acpi_device *device)
ACPI_DEVICE_NOTIFY,
acpi_video_bus_notify, video);
if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Error installing notify handler\n"));
+ printk(KERN_ERR PREFIX
+ "Error installing notify handler\n");
error = -ENODEV;
goto err_stop_video;
}
@@ -2058,7 +2058,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
acpi_video_bus_remove_fs(device);
err_free_video:
kfree(video);
- acpi_driver_data(device) = NULL;
+ device->driver_data = NULL;
return error;
}
diff --git a/drivers/acpi/wmi.c b/drivers/acpi/wmi.c
index cfe2c833474..47cd7baf9b1 100644
--- a/drivers/acpi/wmi.c
+++ b/drivers/acpi/wmi.c
@@ -217,6 +217,35 @@ static bool find_guid(const char *guid_string, struct wmi_block **out)
return 0;
}
+static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
+{
+ struct guid_block *block = NULL;
+ char method[5];
+ struct acpi_object_list input;
+ union acpi_object params[1];
+ acpi_status status;
+ acpi_handle handle;
+
+ block = &wblock->gblock;
+ handle = wblock->handle;
+
+ if (!block)
+ return AE_NOT_EXIST;
+
+ input.count = 1;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = enable;
+
+ snprintf(method, 5, "WE%02X", block->notify_id);
+ status = acpi_evaluate_object(handle, method, &input, NULL);
+
+ if (status != AE_OK && status != AE_NOT_FOUND)
+ return status;
+ else
+ return AE_OK;
+}
+
/*
* Exported WMI functions
*/
@@ -242,7 +271,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
char method[4] = "WM";
if (!find_guid(guid_string, &wblock))
- return AE_BAD_ADDRESS;
+ return AE_ERROR;
block = &wblock->gblock;
handle = wblock->handle;
@@ -304,7 +333,7 @@ struct acpi_buffer *out)
return AE_BAD_PARAMETER;
if (!find_guid(guid_string, &wblock))
- return AE_BAD_ADDRESS;
+ return AE_ERROR;
block = &wblock->gblock;
handle = wblock->handle;
@@ -314,7 +343,7 @@ struct acpi_buffer *out)
/* Check GUID is a data block */
if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
- return AE_BAD_ADDRESS;
+ return AE_ERROR;
input.count = 1;
input.pointer = wq_params;
@@ -385,7 +414,7 @@ const struct acpi_buffer *in)
return AE_BAD_DATA;
if (!find_guid(guid_string, &wblock))
- return AE_BAD_ADDRESS;
+ return AE_ERROR;
block = &wblock->gblock;
handle = wblock->handle;
@@ -395,7 +424,7 @@ const struct acpi_buffer *in)
/* Check GUID is a data block */
if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
- return AE_BAD_ADDRESS;
+ return AE_ERROR;
input.count = 2;
input.pointer = params;
@@ -427,6 +456,7 @@ acpi_status wmi_install_notify_handler(const char *guid,
wmi_notify_handler handler, void *data)
{
struct wmi_block *block;
+ acpi_status status;
if (!guid || !handler)
return AE_BAD_PARAMETER;
@@ -441,7 +471,9 @@ wmi_notify_handler handler, void *data)
block->handler = handler;
block->handler_data = data;
- return AE_OK;
+ status = wmi_method_enable(block, 1);
+
+ return status;
}
EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
@@ -453,6 +485,7 @@ EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
acpi_status wmi_remove_notify_handler(const char *guid)
{
struct wmi_block *block;
+ acpi_status status;
if (!guid)
return AE_BAD_PARAMETER;
@@ -464,10 +497,12 @@ acpi_status wmi_remove_notify_handler(const char *guid)
if (!block->handler)
return AE_NULL_ENTRY;
+ status = wmi_method_enable(block, 0);
+
block->handler = NULL;
block->handler_data = NULL;
- return AE_OK;
+ return status;
}
EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 9330b7922f6..c012307d0ba 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -120,21 +120,6 @@ static void ata_acpi_associate_ide_port(struct ata_port *ap)
ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
}
-static void ata_acpi_eject_device(acpi_handle handle)
-{
- struct acpi_object_list arg_list;
- union acpi_object arg;
-
- arg_list.count = 1;
- arg_list.pointer = &arg;
- arg.type = ACPI_TYPE_INTEGER;
- arg.integer.value = 1;
-
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_EJ0",
- &arg_list, NULL)))
- printk(KERN_ERR "Failed to evaluate _EJ0!\n");
-}
-
/* @ap and @dev are the same as ata_acpi_handle_hotplug() */
static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev)
{
@@ -157,7 +142,6 @@ static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev)
* @ap: ATA port ACPI event occurred
* @dev: ATA device ACPI event occurred (can be NULL)
* @event: ACPI event which occurred
- * @is_dock_event: boolean indicating whether the event was a dock one
*
* All ACPI bay / device realted events end up in this function. If
* the event is port-wide @dev is NULL. If the event is specific to a
@@ -171,117 +155,100 @@ static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev)
* ACPI notify handler context. May sleep.
*/
static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
- u32 event, int is_dock_event)
+ u32 event)
{
- char event_string[12];
- char *envp[] = { event_string, NULL };
struct ata_eh_info *ehi = &ap->link.eh_info;
- struct kobject *kobj = NULL;
int wait = 0;
unsigned long flags;
- acpi_handle handle, tmphandle;
- unsigned long sta;
- acpi_status status;
+ acpi_handle handle;
- if (dev) {
- if (dev->sdev)
- kobj = &dev->sdev->sdev_gendev.kobj;
+ if (dev)
handle = dev->acpi_handle;
- } else {
- kobj = &ap->dev->kobj;
+ else
handle = ap->acpi_handle;
- }
-
- status = acpi_get_handle(handle, "_EJ0", &tmphandle);
- if (ACPI_FAILURE(status))
- /* This device does not support hotplug */
- return;
-
- if (event == ACPI_NOTIFY_BUS_CHECK ||
- event == ACPI_NOTIFY_DEVICE_CHECK)
- status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
spin_lock_irqsave(ap->lock, flags);
-
+ /*
+ * When dock driver calls into the routine, it will always use
+ * ACPI_NOTIFY_BUS_CHECK/ACPI_NOTIFY_DEVICE_CHECK for add and
+ * ACPI_NOTIFY_EJECT_REQUEST for remove
+ */
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
ata_ehi_push_desc(ehi, "ACPI event");
- if (ACPI_FAILURE(status)) {
- ata_port_printk(ap, KERN_ERR,
- "acpi: failed to determine bay status (0x%x)\n",
- status);
- break;
- }
-
- if (sta) {
- ata_ehi_hotplugged(ehi);
- ata_port_freeze(ap);
- } else {
- /* The device has gone - unplug it */
- ata_acpi_detach_device(ap, dev);
- wait = 1;
- }
+ ata_ehi_hotplugged(ehi);
+ ata_port_freeze(ap);
break;
case ACPI_NOTIFY_EJECT_REQUEST:
ata_ehi_push_desc(ehi, "ACPI event");
- if (!is_dock_event)
- break;
-
- /* undock event - immediate unplug */
ata_acpi_detach_device(ap, dev);
wait = 1;
break;
}
- /* make sure kobj doesn't go away while ap->lock is released */
- kobject_get(kobj);
-
spin_unlock_irqrestore(ap->lock, flags);
- if (wait) {
+ if (wait)
ata_port_wait_eh(ap);
- ata_acpi_eject_device(handle);
- }
-
- if (kobj && !is_dock_event) {
- sprintf(event_string, "BAY_EVENT=%d", event);
- kobject_uevent_env(kobj, KOBJ_CHANGE, envp);
- }
-
- kobject_put(kobj);
}
static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data)
{
struct ata_device *dev = data;
- ata_acpi_handle_hotplug(dev->link->ap, dev, event, 1);
+ ata_acpi_handle_hotplug(dev->link->ap, dev, event);
}
static void ata_acpi_ap_notify_dock(acpi_handle handle, u32 event, void *data)
{
struct ata_port *ap = data;
- ata_acpi_handle_hotplug(ap, NULL, event, 1);
+ ata_acpi_handle_hotplug(ap, NULL, event);
}
-static void ata_acpi_dev_notify(acpi_handle handle, u32 event, void *data)
+static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev,
+ u32 event)
{
- struct ata_device *dev = data;
+ struct kobject *kobj = NULL;
+ char event_string[20];
+ char *envp[] = { event_string, NULL };
+
+ if (dev) {
+ if (dev->sdev)
+ kobj = &dev->sdev->sdev_gendev.kobj;
+ } else
+ kobj = &ap->dev->kobj;
- ata_acpi_handle_hotplug(dev->link->ap, dev, event, 0);
+ if (kobj) {
+ snprintf(event_string, 20, "BAY_EVENT=%d", event);
+ kobject_uevent_env(kobj, KOBJ_CHANGE, envp);
+ }
}
-static void ata_acpi_ap_notify(acpi_handle handle, u32 event, void *data)
+static void ata_acpi_ap_uevent(acpi_handle handle, u32 event, void *data)
{
- struct ata_port *ap = data;
+ ata_acpi_uevent(data, NULL, event);
+}
- ata_acpi_handle_hotplug(ap, NULL, event, 0);
+static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
+{
+ struct ata_device *dev = data;
+ ata_acpi_uevent(dev->link->ap, dev, event);
}
+static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
+ .handler = ata_acpi_dev_notify_dock,
+ .uevent = ata_acpi_dev_uevent,
+};
+
+static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
+ .handler = ata_acpi_ap_notify_dock,
+ .uevent = ata_acpi_ap_uevent,
+};
+
/**
* ata_acpi_associate - associate ATA host with ACPI objects
* @host: target ATA host
@@ -315,24 +282,18 @@ void ata_acpi_associate(struct ata_host *host)
ata_acpi_associate_ide_port(ap);
if (ap->acpi_handle) {
- acpi_install_notify_handler(ap->acpi_handle,
- ACPI_SYSTEM_NOTIFY,
- ata_acpi_ap_notify, ap);
/* we might be on a docking station */
register_hotplug_dock_device(ap->acpi_handle,
- ata_acpi_ap_notify_dock, ap);
+ &ata_acpi_ap_dock_ops, ap);
}
for (j = 0; j < ata_link_max_devices(&ap->link); j++) {
struct ata_device *dev = &ap->link.device[j];
if (dev->acpi_handle) {
- acpi_install_notify_handler(dev->acpi_handle,
- ACPI_SYSTEM_NOTIFY,
- ata_acpi_dev_notify, dev);
/* we might be on a docking station */
register_hotplug_dock_device(dev->acpi_handle,
- ata_acpi_dev_notify_dock, dev);
+ &ata_acpi_dev_dock_ops, dev);
}
}
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 1ee9499bd34..bbb3cae5749 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5373,6 +5373,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
#ifdef CONFIG_ATA_SFF
INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
+#else
+ INIT_DELAYED_WORK(&ap->port_task, NULL);
#endif
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index a93247cc395..5d687d7cffa 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1206,7 +1206,10 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
ata_eh_clear_action(link, dev, ehi, action);
- if (!(ehc->i.flags & ATA_EHI_QUIET))
+ /* About to take EH action, set RECOVERED. Ignore actions on
+ * slave links as master will do them again.
+ */
+ if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
ap->pflags |= ATA_PFLAG_RECOVERED;
spin_unlock_irqrestore(ap->lock, flags);
@@ -2010,8 +2013,13 @@ void ata_eh_autopsy(struct ata_port *ap)
struct ata_eh_context *mehc = &ap->link.eh_context;
struct ata_eh_context *sehc = &ap->slave_link->eh_context;
+ /* transfer control flags from master to slave */
+ sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
+
+ /* perform autopsy on the slave link */
ata_eh_link_autopsy(ap->slave_link);
+ /* transfer actions from slave to master and clear slave */
ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
mehc->i.action |= sehc->i.action;
mehc->i.dev_action[1] |= sehc->i.dev_action[1];
@@ -2447,14 +2455,14 @@ int ata_eh_reset(struct ata_link *link, int classify,
dev->pio_mode = XFER_PIO_0;
dev->flags &= ~ATA_DFLAG_SLEEPING;
- if (ata_phys_link_offline(ata_dev_phys_link(dev)))
- continue;
-
- /* apply class override */
- if (lflags & ATA_LFLAG_ASSUME_ATA)
- classes[dev->devno] = ATA_DEV_ATA;
- else if (lflags & ATA_LFLAG_ASSUME_SEMB)
- classes[dev->devno] = ATA_DEV_SEMB_UNSUP; /* not yet */
+ if (!ata_phys_link_offline(ata_dev_phys_link(dev))) {
+ /* apply class override */
+ if (lflags & ATA_LFLAG_ASSUME_ATA)
+ classes[dev->devno] = ATA_DEV_ATA;
+ else if (lflags & ATA_LFLAG_ASSUME_SEMB)
+ classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
+ } else
+ classes[dev->devno] = ATA_DEV_NONE;
}
/* record current link speed */
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 2a4c516894f..4b473948632 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2153,8 +2153,17 @@ void ata_sff_error_handler(struct ata_port *ap)
*/
void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
{
- if (qc->ap->ioaddr.bmdma_addr)
+ struct ata_port *ap = qc->ap;
+ unsigned long flags;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ ap->hsm_task_state = HSM_ST_IDLE;
+
+ if (ap->ioaddr.bmdma_addr)
ata_bmdma_stop(qc);
+
+ spin_unlock_irqrestore(ap->lock, flags);
}
/**
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 1cfa74535d9..5b72e734300 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -70,6 +70,7 @@ enum {
static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
static void svia_noop_freeze(struct ata_port *ap);
static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
static int vt6421_pata_cable_detect(struct ata_port *ap);
@@ -103,21 +104,26 @@ static struct scsi_host_template svia_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
-static struct ata_port_operations vt6420_sata_ops = {
+static struct ata_port_operations svia_base_ops = {
.inherits = &ata_bmdma_port_ops,
+ .sff_tf_load = svia_tf_load,
+};
+
+static struct ata_port_operations vt6420_sata_ops = {
+ .inherits = &svia_base_ops,
.freeze = svia_noop_freeze,
.prereset = vt6420_prereset,
};
static struct ata_port_operations vt6421_pata_ops = {
- .inherits = &ata_bmdma_port_ops,
+ .inherits = &svia_base_ops,
.cable_detect = vt6421_pata_cable_detect,
.set_piomode = vt6421_set_pio_mode,
.set_dmamode = vt6421_set_dma_mode,
};
static struct ata_port_operations vt6421_sata_ops = {
- .inherits = &ata_bmdma_port_ops,
+ .inherits = &svia_base_ops,
.scr_read = svia_scr_read,
.scr_write = svia_scr_write,
};
@@ -168,6 +174,29 @@ static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
return 0;
}
+/**
+ * svia_tf_load - send taskfile registers to host controller
+ * @ap: Port to which output is sent
+ * @tf: ATA taskfile register set
+ *
+ * Outputs ATA taskfile to standard ATA host controller.
+ *
+ * This is to fix the internal bug of via chipsets, which will
+ * reset the device register after changing the IEN bit on ctl
+ * register.
+ */
+static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+ struct ata_taskfile ttf;
+
+ if (tf->ctl != ap->last_ctl) {
+ ttf = *tf;
+ ttf.flags |= ATA_TFLAG_DEVICE;
+ tf = &ttf;
+ }
+ ata_sff_tf_load(ap, tf);
+}
+
static void svia_noop_freeze(struct ata_port *ap)
{
/* Some VIA controllers choke if ATA_NIEN is manipulated in
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index a002a381df9..f6a337c34ac 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -72,9 +72,9 @@ static long disk_size(DAC960_Controller_T *p, int drive_nr)
}
}
-static int DAC960_open(struct inode *inode, struct file *file)
+static int DAC960_open(struct block_device *bdev, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
+ struct gendisk *disk = bdev->bd_disk;
DAC960_Controller_T *p = disk->queue->queuedata;
int drive_nr = (long)disk->private_data;
@@ -89,7 +89,7 @@ static int DAC960_open(struct inode *inode, struct file *file)
return -ENXIO;
}
- check_disk_change(inode->i_bdev);
+ check_disk_change(bdev);
if (!get_capacity(p->disks[drive_nr]))
return -ENXIO;
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 7516baff3bb..4b1d4ac960f 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1437,10 +1437,11 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static int fd_ioctl(struct inode *inode, struct file *filp,
+static int fd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param)
{
- int drive = iminor(inode) & 3;
+ struct amiga_floppy_struct *p = bdev->bd_disk->private_data;
+ int drive = p - unit;
static struct floppy_struct getprm;
void __user *argp = (void __user *)param;
@@ -1451,7 +1452,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
rel_fdc();
return -EBUSY;
}
- fsync_bdev(inode->i_bdev);
+ fsync_bdev(bdev);
if (fd_motor_on(drive) == 0) {
rel_fdc();
return -ENODEV;
@@ -1464,12 +1465,12 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
rel_fdc();
break;
case FDFMTTRK:
- if (param < unit[drive].type->tracks * unit[drive].type->heads)
+ if (param < p->type->tracks * p->type->heads)
{
get_fdc(drive);
if (fd_seek(drive,param) != 0){
- memset(unit[drive].trackbuf, FD_FILL_BYTE,
- unit[drive].dtype->sects * unit[drive].type->sect_mult * 512);
+ memset(p->trackbuf, FD_FILL_BYTE,
+ p->dtype->sects * p->type->sect_mult * 512);
non_int_flush_track(drive);
}
floppy_off(drive);
@@ -1480,14 +1481,14 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
break;
case FDFMTEND:
floppy_off(drive);
- invalidate_bdev(inode->i_bdev);
+ invalidate_bdev(bdev);
break;
case FDGETPRM:
memset((void *)&getprm, 0, sizeof (getprm));
- getprm.track=unit[drive].type->tracks;
- getprm.head=unit[drive].type->heads;
- getprm.sect=unit[drive].dtype->sects * unit[drive].type->sect_mult;
- getprm.size=unit[drive].blocks;
+ getprm.track=p->type->tracks;
+ getprm.head=p->type->heads;
+ getprm.sect=p->dtype->sects * p->type->sect_mult;
+ getprm.size=p->blocks;
if (copy_to_user(argp, &getprm, sizeof(struct floppy_struct)))
return -EFAULT;
break;
@@ -1500,10 +1501,10 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
break;
#ifdef RAW_IOCTL
case IOCTL_RAW_TRACK:
- if (copy_to_user(argp, raw_buf, unit[drive].type->read_size))
+ if (copy_to_user(argp, raw_buf, p->type->read_size))
return -EFAULT;
else
- return unit[drive].type->read_size;
+ return p->type->read_size;
#endif
default:
printk(KERN_DEBUG "fd_ioctl: unknown cmd %d for drive %d.",
@@ -1548,10 +1549,10 @@ static void fd_probe(int dev)
* /dev/PS0 etc), and disallows simultaneous access to the same
* drive with different device numbers.
*/
-static int floppy_open(struct inode *inode, struct file *filp)
+static int floppy_open(struct block_device *bdev, fmode_t mode)
{
- int drive = iminor(inode) & 3;
- int system = (iminor(inode) & 4) >> 2;
+ int drive = MINOR(bdev->bd_dev) & 3;
+ int system = (MINOR(bdev->bd_dev) & 4) >> 2;
int old_dev;
unsigned long flags;
@@ -1560,9 +1561,9 @@ static int floppy_open(struct inode *inode, struct file *filp)
if (fd_ref[drive] && old_dev != system)
return -EBUSY;
- if (filp && filp->f_mode & 3) {
- check_disk_change(inode->i_bdev);
- if (filp->f_mode & 2 ) {
+ if (mode & (FMODE_READ|FMODE_WRITE)) {
+ check_disk_change(bdev);
+ if (mode & FMODE_WRITE) {
int wrprot;
get_fdc(drive);
@@ -1592,9 +1593,10 @@ static int floppy_open(struct inode *inode, struct file *filp)
return 0;
}
-static int floppy_release(struct inode * inode, struct file * filp)
+static int floppy_release(struct gendisk *disk, fmode_t mode)
{
- int drive = iminor(inode) & 3;
+ struct amiga_floppy_struct *p = disk->private_data;
+ int drive = p - unit;
if (unit[drive].dirty == 1) {
del_timer (flush_track_timer + drive);
@@ -1650,7 +1652,7 @@ static struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_open,
.release = floppy_release,
- .ioctl = fd_ioctl,
+ .locked_ioctl = fd_ioctl,
.getgeo = fd_getgeo,
.media_changed = amiga_floppy_change,
};
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index d876ad86123..1747dd272cd 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -118,13 +118,11 @@ aoedisk_rm_sysfs(struct aoedev *d)
}
static int
-aoeblk_open(struct inode *inode, struct file *filp)
+aoeblk_open(struct block_device *bdev, fmode_t mode)
{
- struct aoedev *d;
+ struct aoedev *d = bdev->bd_disk->private_data;
ulong flags;
- d = inode->i_bdev->bd_disk->private_data;
-
spin_lock_irqsave(&d->lock, flags);
if (d->flags & DEVFL_UP) {
d->nopen++;
@@ -136,13 +134,11 @@ aoeblk_open(struct inode *inode, struct file *filp)
}
static int
-aoeblk_release(struct inode *inode, struct file *filp)
+aoeblk_release(struct gendisk *disk, fmode_t mode)
{
- struct aoedev *d;
+ struct aoedev *d = disk->private_data;
ulong flags;
- d = inode->i_bdev->bd_disk->private_data;
-
spin_lock_irqsave(&d->lock, flags);
if (--d->nopen == 0) {
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 432cf401829..69e1df7dfa1 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -361,13 +361,13 @@ static void finish_fdc( void );
static void finish_fdc_done( int dummy );
static void setup_req_params( int drive );
static void redo_fd_request( void);
-static int fd_ioctl( struct inode *inode, struct file *filp, unsigned int
+static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
cmd, unsigned long param);
static void fd_probe( int drive );
static int fd_test_drive_present( int drive );
static void config_types( void );
-static int floppy_open( struct inode *inode, struct file *filp );
-static int floppy_release( struct inode * inode, struct file * filp );
+static int floppy_open(struct block_device *bdev, fmode_t mode);
+static int floppy_release(struct gendisk *disk, fmode_t mode);
/************************* End of Prototypes **************************/
@@ -1483,10 +1483,10 @@ void do_fd_request(struct request_queue * q)
atari_enable_irq( IRQ_MFP_FDC );
}
-static int fd_ioctl(struct inode *inode, struct file *filp,
+static int fd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
+ struct gendisk *disk = bdev->bd_disk;
struct atari_floppy_struct *floppy = disk->private_data;
int drive = floppy - unit;
int type = floppy->type;
@@ -1661,7 +1661,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
/* invalidate the buffer track to force a reread */
BufferDrive = -1;
set_bit(drive, &fake_change);
- check_disk_change(inode->i_bdev);
+ check_disk_change(bdev);
return 0;
default:
return -EINVAL;
@@ -1804,37 +1804,36 @@ static void __init config_types( void )
* drive with different device numbers.
*/
-static int floppy_open( struct inode *inode, struct file *filp )
+static int floppy_open(struct block_device *bdev, fmode_t mode)
{
- struct atari_floppy_struct *p = inode->i_bdev->bd_disk->private_data;
- int type = iminor(inode) >> 2;
+ struct atari_floppy_struct *p = bdev->bd_disk->private_data;
+ int type = MINOR(bdev->bd_dev) >> 2;
DPRINT(("fd_open: type=%d\n",type));
if (p->ref && p->type != type)
return -EBUSY;
- if (p->ref == -1 || (p->ref && filp->f_flags & O_EXCL))
+ if (p->ref == -1 || (p->ref && mode & FMODE_EXCL))
return -EBUSY;
- if (filp->f_flags & O_EXCL)
+ if (mode & FMODE_EXCL)
p->ref = -1;
else
p->ref++;
p->type = type;
- if (filp->f_flags & O_NDELAY)
+ if (mode & FMODE_NDELAY)
return 0;
- if (filp->f_mode & 3) {
- check_disk_change(inode->i_bdev);
- if (filp->f_mode & 2) {
+ if (mode & (FMODE_READ|FMODE_WRITE)) {
+ check_disk_change(bdev);
+ if (mode & FMODE_WRITE) {
if (p->wpstat) {
if (p->ref < 0)
p->ref = 0;
else
p->ref--;
- floppy_release(inode, filp);
return -EROFS;
}
}
@@ -1843,9 +1842,9 @@ static int floppy_open( struct inode *inode, struct file *filp )
}
-static int floppy_release( struct inode * inode, struct file * filp )
+static int floppy_release(struct gendisk *disk, fmode_t mode)
{
- struct atari_floppy_struct *p = inode->i_bdev->bd_disk->private_data;
+ struct atari_floppy_struct *p = disk->private_data;
if (p->ref < 0)
p->ref = 0;
else if (!p->ref--) {
@@ -1859,7 +1858,7 @@ static struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_open,
.release = floppy_release,
- .ioctl = fd_ioctl,
+ .locked_ioctl = fd_ioctl,
.media_changed = check_floppy_change,
.revalidate_disk= floppy_revalidate,
};
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index d070d492e38..bdd4f5f4557 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -340,11 +340,10 @@ static int brd_direct_access (struct block_device *bdev, sector_t sector,
}
#endif
-static int brd_ioctl(struct inode *inode, struct file *file,
+static int brd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
int error;
- struct block_device *bdev = inode->i_bdev;
struct brd_device *brd = bdev->bd_disk->private_data;
if (cmd != BLKFLSBUF)
@@ -376,7 +375,7 @@ static int brd_ioctl(struct inode *inode, struct file *file,
static struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
- .ioctl = brd_ioctl,
+ .locked_ioctl = brd_ioctl,
#ifdef CONFIG_BLK_DEV_XIP
.direct_access = brd_direct_access,
#endif
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 1e1f9153000..4023885353e 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -152,9 +152,9 @@ static ctlr_info_t *hba[MAX_CTLR];
static void do_cciss_request(struct request_queue *q);
static irqreturn_t do_cciss_intr(int irq, void *dev_id);
-static int cciss_open(struct inode *inode, struct file *filep);
-static int cciss_release(struct inode *inode, struct file *filep);
-static int cciss_ioctl(struct inode *inode, struct file *filep,
+static int cciss_open(struct block_device *bdev, fmode_t mode);
+static int cciss_release(struct gendisk *disk, fmode_t mode);
+static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -192,14 +192,15 @@ static void cciss_procinit(int i)
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_COMPAT
-static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
+static int cciss_compat_ioctl(struct block_device *, fmode_t,
+ unsigned, unsigned long);
#endif
static struct block_device_operations cciss_fops = {
.owner = THIS_MODULE,
.open = cciss_open,
.release = cciss_release,
- .ioctl = cciss_ioctl,
+ .locked_ioctl = cciss_ioctl,
.getgeo = cciss_getgeo,
#ifdef CONFIG_COMPAT
.compat_ioctl = cciss_compat_ioctl,
@@ -547,13 +548,13 @@ static inline drive_info_struct *get_drv(struct gendisk *disk)
/*
* Open. Make sure the device is really there.
*/
-static int cciss_open(struct inode *inode, struct file *filep)
+static int cciss_open(struct block_device *bdev, fmode_t mode)
{
- ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
- drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
+ ctlr_info_t *host = get_host(bdev->bd_disk);
+ drive_info_struct *drv = get_drv(bdev->bd_disk);
#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
+ printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name);
#endif /* CCISS_DEBUG */
if (host->busy_initializing || drv->busy_configuring)
@@ -567,9 +568,9 @@ static int cciss_open(struct inode *inode, struct file *filep)
* for "raw controller".
*/
if (drv->heads == 0) {
- if (iminor(inode) != 0) { /* not node 0? */
+ if (MINOR(bdev->bd_dev) != 0) { /* not node 0? */
/* if not node 0 make sure it is a partition = 0 */
- if (iminor(inode) & 0x0f) {
+ if (MINOR(bdev->bd_dev) & 0x0f) {
return -ENXIO;
/* if it is, make sure we have a LUN ID */
} else if (drv->LunID == 0) {
@@ -587,14 +588,13 @@ static int cciss_open(struct inode *inode, struct file *filep)
/*
* Close. Sync first.
*/
-static int cciss_release(struct inode *inode, struct file *filep)
+static int cciss_release(struct gendisk *disk, fmode_t mode)
{
- ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
- drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
+ ctlr_info_t *host = get_host(disk);
+ drive_info_struct *drv = get_drv(disk);
#ifdef CCISS_DEBUG
- printk(KERN_DEBUG "cciss_release %s\n",
- inode->i_bdev->bd_disk->disk_name);
+ printk(KERN_DEBUG "cciss_release %s\n", disk->disk_name);
#endif /* CCISS_DEBUG */
drv->usage_count--;
@@ -604,21 +604,23 @@ static int cciss_release(struct inode *inode, struct file *filep)
#ifdef CONFIG_COMPAT
-static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
+static int do_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned cmd, unsigned long arg)
{
int ret;
lock_kernel();
- ret = cciss_ioctl(f->f_path.dentry->d_inode, f, cmd, arg);
+ ret = cciss_ioctl(bdev, mode, cmd, arg);
unlock_kernel();
return ret;
}
-static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
- unsigned long arg);
-static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
- unsigned long arg);
+static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
+ unsigned cmd, unsigned long arg);
+static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
+ unsigned cmd, unsigned long arg);
-static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
+static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned cmd, unsigned long arg)
{
switch (cmd) {
case CCISS_GETPCIINFO:
@@ -636,20 +638,20 @@ static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
case CCISS_REGNEWD:
case CCISS_RESCANDISK:
case CCISS_GETLUNINFO:
- return do_ioctl(f, cmd, arg);
+ return do_ioctl(bdev, mode, cmd, arg);
case CCISS_PASSTHRU32:
- return cciss_ioctl32_passthru(f, cmd, arg);
+ return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
case CCISS_BIG_PASSTHRU32:
- return cciss_ioctl32_big_passthru(f, cmd, arg);
+ return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg);
default:
return -ENOIOCTLCMD;
}
}
-static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
- unsigned long arg)
+static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
+ unsigned cmd, unsigned long arg)
{
IOCTL32_Command_struct __user *arg32 =
(IOCTL32_Command_struct __user *) arg;
@@ -676,7 +678,7 @@ static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
if (err)
return -EFAULT;
- err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
+ err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
if (err)
return err;
err |=
@@ -687,8 +689,8 @@ static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
return err;
}
-static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
- unsigned long arg)
+static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
+ unsigned cmd, unsigned long arg)
{
BIG_IOCTL32_Command_struct __user *arg32 =
(BIG_IOCTL32_Command_struct __user *) arg;
@@ -717,7 +719,7 @@ static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
if (err)
return -EFAULT;
- err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
+ err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
if (err)
return err;
err |=
@@ -745,10 +747,9 @@ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
/*
* ioctl
*/
-static int cciss_ioctl(struct inode *inode, struct file *filep,
+static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct block_device *bdev = inode->i_bdev;
struct gendisk *disk = bdev->bd_disk;
ctlr_info_t *host = get_host(disk);
drive_info_struct *drv = get_drv(disk);
@@ -1232,7 +1233,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
case SG_EMULATED_HOST:
case SG_IO:
case SCSI_IOCTL_SEND_COMMAND:
- return scsi_cmd_ioctl(filep, disk->queue, disk, cmd, argp);
+ return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
/* scsi_cmd_ioctl would normally handle these, below, but */
/* they aren't a good fit for cciss, as CD-ROMs are */
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 3d967525e9a..47d233c6d0b 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -156,9 +156,9 @@ static int sendcmd(
unsigned int blkcnt,
unsigned int log_unit );
-static int ida_open(struct inode *inode, struct file *filep);
-static int ida_release(struct inode *inode, struct file *filep);
-static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg);
+static int ida_open(struct block_device *bdev, fmode_t mode);
+static int ida_release(struct gendisk *disk, fmode_t mode);
+static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
@@ -197,7 +197,7 @@ static struct block_device_operations ida_fops = {
.owner = THIS_MODULE,
.open = ida_open,
.release = ida_release,
- .ioctl = ida_ioctl,
+ .locked_ioctl = ida_ioctl,
.getgeo = ida_getgeo,
.revalidate_disk= ida_revalidate,
};
@@ -818,12 +818,12 @@ DBGINFO(
/*
* Open. Make sure the device is really there.
*/
-static int ida_open(struct inode *inode, struct file *filep)
+static int ida_open(struct block_device *bdev, fmode_t mode)
{
- drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
- ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
+ drv_info_t *drv = get_drv(bdev->bd_disk);
+ ctlr_info_t *host = get_host(bdev->bd_disk);
- DBGINFO(printk("ida_open %s\n", inode->i_bdev->bd_disk->disk_name));
+ DBGINFO(printk("ida_open %s\n", bdev->bd_disk->disk_name));
/*
* Root is allowed to open raw volume zero even if it's not configured
* so array config can still work. I don't think I really like this,
@@ -843,9 +843,9 @@ static int ida_open(struct inode *inode, struct file *filep)
/*
* Close. Sync first.
*/
-static int ida_release(struct inode *inode, struct file *filep)
+static int ida_release(struct gendisk *disk, fmode_t mode)
{
- ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
+ ctlr_info_t *host = get_host(disk);
host->usage_count--;
return 0;
}
@@ -1128,10 +1128,10 @@ static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
* ida_ioctl does some miscellaneous stuff like reporting drive geometry,
* setting readahead and submitting commands from userspace to the controller.
*/
-static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg)
+static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
{
- drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
- ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
+ drv_info_t *drv = get_drv(bdev->bd_disk);
+ ctlr_info_t *host = get_host(bdev->bd_disk);
int error;
ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
ida_ioctl_t *my_io;
@@ -1165,7 +1165,7 @@ out_passthru:
put_user(host->ctlr_sig, (int __user *)arg);
return 0;
case IDAREVALIDATEVOLS:
- if (iminor(inode) != 0)
+ if (MINOR(bdev->bd_dev) != 0)
return -ENXIO;
return revalidate_allvol(host);
case IDADRIVERVERSION:
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 2cea27aba9a..14db747a636 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3450,14 +3450,14 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
unsigned long param)
{
-#define FD_IOCTL_ALLOWED ((filp) && (filp)->private_data)
+#define FD_IOCTL_ALLOWED (mode & (FMODE_WRITE|FMODE_WRITE_IOCTL))
#define OUT(c,x) case c: outparam = (const char *) (x); break
#define IN(c,x,tag) case c: *(x) = inparam. tag ; return 0
- int drive = (long)inode->i_bdev->bd_disk->private_data;
+ int drive = (long)bdev->bd_disk->private_data;
int type = ITYPE(UDRS->fd_device);
int i;
int ret;
@@ -3516,11 +3516,11 @@ static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
current_type[drive] = NULL;
floppy_sizes[drive] = MAX_DISK_SIZE << 1;
UDRS->keep_data = 0;
- return invalidate_drive(inode->i_bdev);
+ return invalidate_drive(bdev);
case FDSETPRM:
case FDDEFPRM:
return set_geometry(cmd, &inparam.g,
- drive, type, inode->i_bdev);
+ drive, type, bdev);
case FDGETPRM:
ECALL(get_floppy_geometry(drive, type,
(struct floppy_struct **)
@@ -3551,7 +3551,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
case FDFMTEND:
case FDFLUSH:
LOCK_FDC(drive, 1);
- return invalidate_drive(inode->i_bdev);
+ return invalidate_drive(bdev);
case FDSETEMSGTRESH:
UDP->max_errors.reporting =
@@ -3659,9 +3659,9 @@ static void __init config_types(void)
printk("\n");
}
-static int floppy_release(struct inode *inode, struct file *filp)
+static int floppy_release(struct gendisk *disk, fmode_t mode)
{
- int drive = (long)inode->i_bdev->bd_disk->private_data;
+ int drive = (long)disk->private_data;
mutex_lock(&open_lock);
if (UDRS->fd_ref < 0)
@@ -3682,18 +3682,17 @@ static int floppy_release(struct inode *inode, struct file *filp)
* /dev/PS0 etc), and disallows simultaneous access to the same
* drive with different device numbers.
*/
-static int floppy_open(struct inode *inode, struct file *filp)
+static int floppy_open(struct block_device *bdev, fmode_t mode)
{
- int drive = (long)inode->i_bdev->bd_disk->private_data;
- int old_dev;
+ int drive = (long)bdev->bd_disk->private_data;
+ int old_dev, new_dev;
int try;
int res = -EBUSY;
char *tmp;
- filp->private_data = (void *)0;
mutex_lock(&open_lock);
old_dev = UDRS->fd_device;
- if (opened_bdev[drive] && opened_bdev[drive] != inode->i_bdev)
+ if (opened_bdev[drive] && opened_bdev[drive] != bdev)
goto out2;
if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)) {
@@ -3701,15 +3700,15 @@ static int floppy_open(struct inode *inode, struct file *filp)
USETF(FD_VERIFY);
}
- if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (filp->f_flags & O_EXCL)))
+ if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (mode & FMODE_EXCL)))
goto out2;
- if (filp->f_flags & O_EXCL)
+ if (mode & FMODE_EXCL)
UDRS->fd_ref = -1;
else
UDRS->fd_ref++;
- opened_bdev[drive] = inode->i_bdev;
+ opened_bdev[drive] = bdev;
res = -ENXIO;
@@ -3744,31 +3743,26 @@ static int floppy_open(struct inode *inode, struct file *filp)
}
}
- UDRS->fd_device = iminor(inode);
- set_capacity(disks[drive], floppy_sizes[iminor(inode)]);
- if (old_dev != -1 && old_dev != iminor(inode)) {
+ new_dev = MINOR(bdev->bd_dev);
+ UDRS->fd_device = new_dev;
+ set_capacity(disks[drive], floppy_sizes[new_dev]);
+ if (old_dev != -1 && old_dev != new_dev) {
if (buffer_drive == drive)
buffer_track = -1;
}
- /* Allow ioctls if we have write-permissions even if read-only open.
- * Needed so that programs such as fdrawcmd still can work on write
- * protected disks */
- if ((filp->f_mode & FMODE_WRITE) || !file_permission(filp, MAY_WRITE))
- filp->private_data = (void *)8;
-
if (UFDCS->rawcmd == 1)
UFDCS->rawcmd = 2;
- if (!(filp->f_flags & O_NDELAY)) {
- if (filp->f_mode & 3) {
+ if (!(mode & FMODE_NDELAY)) {
+ if (mode & (FMODE_READ|FMODE_WRITE)) {
UDRS->last_checked = 0;
- check_disk_change(inode->i_bdev);
+ check_disk_change(bdev);
if (UTESTF(FD_DISK_CHANGED))
goto out;
}
res = -EROFS;
- if ((filp->f_mode & 2) && !(UTESTF(FD_DISK_WRITABLE)))
+ if ((mode & FMODE_WRITE) && !(UTESTF(FD_DISK_WRITABLE)))
goto out;
}
mutex_unlock(&open_lock);
@@ -3911,7 +3905,7 @@ static struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_open,
.release = floppy_release,
- .ioctl = fd_ioctl,
+ .locked_ioctl = fd_ioctl,
.getgeo = fd_getgeo,
.media_changed = check_floppy_change,
.revalidate_disk = floppy_revalidate,
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d3a25b027ff..3f09cd8bcc3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -210,7 +210,7 @@ lo_do_transfer(struct loop_device *lo, int cmd,
* space operations write_begin and write_end.
*/
static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
- int bsize, loff_t pos, struct page *unused)
+ loff_t pos, struct page *unused)
{
struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
struct address_space *mapping = file->f_mapping;
@@ -302,7 +302,7 @@ static int __do_lo_send_write(struct file *file,
* filesystems.
*/
static int do_lo_send_direct_write(struct loop_device *lo,
- struct bio_vec *bvec, int bsize, loff_t pos, struct page *page)
+ struct bio_vec *bvec, loff_t pos, struct page *page)
{
ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
kmap(bvec->bv_page) + bvec->bv_offset,
@@ -326,7 +326,7 @@ static int do_lo_send_direct_write(struct loop_device *lo,
* destination pages of the backing file.
*/
static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
- int bsize, loff_t pos, struct page *page)
+ loff_t pos, struct page *page)
{
int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
bvec->bv_offset, bvec->bv_len, pos >> 9);
@@ -341,10 +341,9 @@ static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
return ret;
}
-static int lo_send(struct loop_device *lo, struct bio *bio, int bsize,
- loff_t pos)
+static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
{
- int (*do_lo_send)(struct loop_device *, struct bio_vec *, int, loff_t,
+ int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
struct page *page);
struct bio_vec *bvec;
struct page *page = NULL;
@@ -362,7 +361,7 @@ static int lo_send(struct loop_device *lo, struct bio *bio, int bsize,
}
}
bio_for_each_segment(bvec, bio, i) {
- ret = do_lo_send(lo, bvec, bsize, pos, page);
+ ret = do_lo_send(lo, bvec, pos, page);
if (ret < 0)
break;
pos += bvec->bv_len;
@@ -478,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
if (bio_rw(bio) == WRITE)
- ret = lo_send(lo, bio, lo->lo_blocksize, pos);
+ ret = lo_send(lo, bio, pos);
else
ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
return ret;
@@ -652,8 +651,8 @@ static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
* This can only work if the loop device is used read-only, and if the
* new backing store is the same size and type as the old backing store.
*/
-static int loop_change_fd(struct loop_device *lo, struct file *lo_file,
- struct block_device *bdev, unsigned int arg)
+static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
+ unsigned int arg)
{
struct file *file, *old_file;
struct inode *inode;
@@ -712,7 +711,7 @@ static inline int is_loop_device(struct file *file)
return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
}
-static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
+static int loop_set_fd(struct loop_device *lo, fmode_t mode,
struct block_device *bdev, unsigned int arg)
{
struct file *file, *f;
@@ -740,7 +739,7 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
while (is_loop_device(f)) {
struct loop_device *l;
- if (f->f_mapping->host->i_rdev == lo_file->f_mapping->host->i_rdev)
+ if (f->f_mapping->host->i_bdev == bdev)
goto out_putf;
l = f->f_mapping->host->i_bdev->bd_disk->private_data;
@@ -786,7 +785,7 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
goto out_putf;
}
- if (!(lo_file->f_mode & FMODE_WRITE))
+ if (!(mode & FMODE_WRITE))
lo_flags |= LO_FLAGS_READ_ONLY;
set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
@@ -918,9 +917,11 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
- invalidate_bdev(bdev);
+ if (bdev)
+ invalidate_bdev(bdev);
set_capacity(lo->lo_disk, 0);
- bd_set_size(bdev, 0);
+ if (bdev)
+ bd_set_size(bdev, 0);
mapping_set_gfp_mask(filp->f_mapping, gfp);
lo->lo_state = Lo_unbound;
fput(filp);
@@ -1137,22 +1138,22 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
return err;
}
-static int lo_ioctl(struct inode * inode, struct file * file,
+static int lo_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
+ struct loop_device *lo = bdev->bd_disk->private_data;
int err;
mutex_lock(&lo->lo_ctl_mutex);
switch (cmd) {
case LOOP_SET_FD:
- err = loop_set_fd(lo, file, inode->i_bdev, arg);
+ err = loop_set_fd(lo, mode, bdev, arg);
break;
case LOOP_CHANGE_FD:
- err = loop_change_fd(lo, file, inode->i_bdev, arg);
+ err = loop_change_fd(lo, bdev, arg);
break;
case LOOP_CLR_FD:
- err = loop_clr_fd(lo, inode->i_bdev);
+ err = loop_clr_fd(lo, bdev);
break;
case LOOP_SET_STATUS:
err = loop_set_status_old(lo, (struct loop_info __user *) arg);
@@ -1292,10 +1293,10 @@ loop_get_status_compat(struct loop_device *lo,
return err;
}
-static long lo_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
- struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
+ struct loop_device *lo = bdev->bd_disk->private_data;
int err;
switch(cmd) {
@@ -1317,7 +1318,7 @@ static long lo_compat_ioctl(struct file *file, unsigned int cmd, unsigned long a
arg = (unsigned long) compat_ptr(arg);
case LOOP_SET_FD:
case LOOP_CHANGE_FD:
- err = lo_ioctl(inode, file, cmd, arg);
+ err = lo_ioctl(bdev, mode, cmd, arg);
break;
default:
err = -ENOIOCTLCMD;
@@ -1327,9 +1328,9 @@ static long lo_compat_ioctl(struct file *file, unsigned int cmd, unsigned long a
}
#endif
-static int lo_open(struct inode *inode, struct file *file)
+static int lo_open(struct block_device *bdev, fmode_t mode)
{
- struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
+ struct loop_device *lo = bdev->bd_disk->private_data;
mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++;
@@ -1338,15 +1339,15 @@ static int lo_open(struct inode *inode, struct file *file)
return 0;
}
-static int lo_release(struct inode *inode, struct file *file)
+static int lo_release(struct gendisk *disk, fmode_t mode)
{
- struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
+ struct loop_device *lo = disk->private_data;
mutex_lock(&lo->lo_ctl_mutex);
--lo->lo_refcnt;
if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) && !lo->lo_refcnt)
- loop_clr_fd(lo, inode->i_bdev);
+ loop_clr_fd(lo, NULL);
mutex_unlock(&lo->lo_ctl_mutex);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9034ca585af..d3a91cacee8 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -557,10 +557,11 @@ static void do_nbd_request(struct request_queue * q)
}
}
-static int nbd_ioctl(struct inode *inode, struct file *file,
+static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct nbd_device *lo = inode->i_bdev->bd_disk->private_data;
+ struct nbd_device *lo = bdev->bd_disk->private_data;
+ struct file *file;
int error;
struct request sreq ;
struct task_struct *thread;
@@ -612,8 +613,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
error = -EINVAL;
file = fget(arg);
if (file) {
- struct block_device *bdev = inode->i_bdev;
- inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file->f_path.dentry->d_inode;
if (S_ISSOCK(inode->i_mode)) {
lo->file = file;
lo->sock = SOCKET_I(inode);
@@ -628,14 +628,14 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
case NBD_SET_BLKSIZE:
lo->blksize = arg;
lo->bytesize &= ~(lo->blksize-1);
- inode->i_bdev->bd_inode->i_size = lo->bytesize;
- set_blocksize(inode->i_bdev, lo->blksize);
+ bdev->bd_inode->i_size = lo->bytesize;
+ set_blocksize(bdev, lo->blksize);
set_capacity(lo->disk, lo->bytesize >> 9);
return 0;
case NBD_SET_SIZE:
lo->bytesize = arg & ~(lo->blksize-1);
- inode->i_bdev->bd_inode->i_size = lo->bytesize;
- set_blocksize(inode->i_bdev, lo->blksize);
+ bdev->bd_inode->i_size = lo->bytesize;
+ set_blocksize(bdev, lo->blksize);
set_capacity(lo->disk, lo->bytesize >> 9);
return 0;
case NBD_SET_TIMEOUT:
@@ -643,8 +643,8 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
return 0;
case NBD_SET_SIZE_BLOCKS:
lo->bytesize = ((u64) arg) * lo->blksize;
- inode->i_bdev->bd_inode->i_size = lo->bytesize;
- set_blocksize(inode->i_bdev, lo->blksize);
+ bdev->bd_inode->i_size = lo->bytesize;
+ set_blocksize(bdev, lo->blksize);
set_capacity(lo->disk, lo->bytesize >> 9);
return 0;
case NBD_DO_IT:
@@ -666,10 +666,10 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
if (file)
fput(file);
lo->bytesize = 0;
- inode->i_bdev->bd_inode->i_size = 0;
+ bdev->bd_inode->i_size = 0;
set_capacity(lo->disk, 0);
if (max_part > 0)
- ioctl_by_bdev(inode->i_bdev, BLKRRPART, 0);
+ ioctl_by_bdev(bdev, BLKRRPART, 0);
return lo->harderror;
case NBD_CLEAR_QUE:
/*
@@ -680,7 +680,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
return 0;
case NBD_PRINT_DEBUG:
printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n",
- inode->i_bdev->bd_disk->disk_name,
+ bdev->bd_disk->disk_name,
lo->queue_head.next, lo->queue_head.prev,
&lo->queue_head);
return 0;
@@ -691,7 +691,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
static struct block_device_operations nbd_fops =
{
.owner = THIS_MODULE,
- .ioctl = nbd_ioctl,
+ .locked_ioctl = nbd_ioctl,
};
/*
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index b8a994a2b01..e91d4b4b014 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -223,23 +223,24 @@ static int pcd_warned; /* Have we logged a phase warning ? */
/* kernel glue structures */
-static int pcd_block_open(struct inode *inode, struct file *file)
+static int pcd_block_open(struct block_device *bdev, fmode_t mode)
{
- struct pcd_unit *cd = inode->i_bdev->bd_disk->private_data;
- return cdrom_open(&cd->info, inode, file);
+ struct pcd_unit *cd = bdev->bd_disk->private_data;
+ return cdrom_open(&cd->info, bdev, mode);
}
-static int pcd_block_release(struct inode *inode, struct file *file)
+static int pcd_block_release(struct gendisk *disk, fmode_t mode)
{
- struct pcd_unit *cd = inode->i_bdev->bd_disk->private_data;
- return cdrom_release(&cd->info, file);
+ struct pcd_unit *cd = disk->private_data;
+ cdrom_release(&cd->info, mode);
+ return 0;
}
-static int pcd_block_ioctl(struct inode *inode, struct file *file,
+static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
- struct pcd_unit *cd = inode->i_bdev->bd_disk->private_data;
- return cdrom_ioctl(file, &cd->info, inode, cmd, arg);
+ struct pcd_unit *cd = bdev->bd_disk->private_data;
+ return cdrom_ioctl(&cd->info, bdev, mode, cmd, arg);
}
static int pcd_block_media_changed(struct gendisk *disk)
@@ -252,7 +253,7 @@ static struct block_device_operations pcd_bdops = {
.owner = THIS_MODULE,
.open = pcd_block_open,
.release = pcd_block_release,
- .ioctl = pcd_block_ioctl,
+ .locked_ioctl = pcd_block_ioctl,
.media_changed = pcd_block_media_changed,
};
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 5fdfa7c888c..9299455b0af 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -728,9 +728,9 @@ static int pd_special_command(struct pd_unit *disk,
/* kernel glue structures */
-static int pd_open(struct inode *inode, struct file *file)
+static int pd_open(struct block_device *bdev, fmode_t mode)
{
- struct pd_unit *disk = inode->i_bdev->bd_disk->private_data;
+ struct pd_unit *disk = bdev->bd_disk->private_data;
disk->access++;
@@ -758,10 +758,10 @@ static int pd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static int pd_ioctl(struct inode *inode, struct file *file,
+static int pd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct pd_unit *disk = inode->i_bdev->bd_disk->private_data;
+ struct pd_unit *disk = bdev->bd_disk->private_data;
switch (cmd) {
case CDROMEJECT:
@@ -773,9 +773,9 @@ static int pd_ioctl(struct inode *inode, struct file *file,
}
}
-static int pd_release(struct inode *inode, struct file *file)
+static int pd_release(struct gendisk *p, fmode_t mode)
{
- struct pd_unit *disk = inode->i_bdev->bd_disk->private_data;
+ struct pd_unit *disk = p->private_data;
if (!--disk->access && disk->removable)
pd_special_command(disk, pd_door_unlock);
@@ -809,7 +809,7 @@ static struct block_device_operations pd_fops = {
.owner = THIS_MODULE,
.open = pd_open,
.release = pd_release,
- .ioctl = pd_ioctl,
+ .locked_ioctl = pd_ioctl,
.getgeo = pd_getgeo,
.media_changed = pd_check_media,
.revalidate_disk= pd_revalidate
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index e7fe6ca97dd..bef3b997ba3 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -201,13 +201,13 @@ module_param_array(drive3, int, NULL, 0);
#define ATAPI_READ_10 0x28
#define ATAPI_WRITE_10 0x2a
-static int pf_open(struct inode *inode, struct file *file);
+static int pf_open(struct block_device *bdev, fmode_t mode);
static void do_pf_request(struct request_queue * q);
-static int pf_ioctl(struct inode *inode, struct file *file,
+static int pf_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-static int pf_release(struct inode *inode, struct file *file);
+static int pf_release(struct gendisk *disk, fmode_t mode);
static int pf_detect(void);
static void do_pf_read(void);
@@ -266,7 +266,7 @@ static struct block_device_operations pf_fops = {
.owner = THIS_MODULE,
.open = pf_open,
.release = pf_release,
- .ioctl = pf_ioctl,
+ .locked_ioctl = pf_ioctl,
.getgeo = pf_getgeo,
.media_changed = pf_check_media,
};
@@ -296,16 +296,16 @@ static void __init pf_init_units(void)
}
}
-static int pf_open(struct inode *inode, struct file *file)
+static int pf_open(struct block_device *bdev, fmode_t mode)
{
- struct pf_unit *pf = inode->i_bdev->bd_disk->private_data;
+ struct pf_unit *pf = bdev->bd_disk->private_data;
pf_identify(pf);
if (pf->media_status == PF_NM)
return -ENODEV;
- if ((pf->media_status == PF_RO) && (file->f_mode & 2))
+ if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE))
return -EROFS;
pf->access++;
@@ -333,9 +333,9 @@ static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static int pf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
{
- struct pf_unit *pf = inode->i_bdev->bd_disk->private_data;
+ struct pf_unit *pf = bdev->bd_disk->private_data;
if (cmd != CDROMEJECT)
return -EINVAL;
@@ -346,9 +346,9 @@ static int pf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
return 0;
}
-static int pf_release(struct inode *inode, struct file *file)
+static int pf_release(struct gendisk *disk, fmode_t mode)
{
- struct pf_unit *pf = inode->i_bdev->bd_disk->private_data;
+ struct pf_unit *pf = disk->private_data;
if (pf->access <= 0)
return -EINVAL;
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 5ae229656ea..1e4006e18f0 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -667,7 +667,7 @@ static int pt_open(struct inode *inode, struct file *file)
goto out;
err = -EROFS;
- if ((!(tape->flags & PT_WRITE_OK)) && (file->f_mode & 2))
+ if ((!(tape->flags & PT_WRITE_OK)) && (file->f_mode & FMODE_WRITE))
goto out;
if (!(iminor(inode) & 128))
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 195ca7c720f..f20bf359b84 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2320,7 +2320,7 @@ static int pkt_open_write(struct pktcdvd_device *pd)
/*
* called at open time.
*/
-static int pkt_open_dev(struct pktcdvd_device *pd, int write)
+static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
{
int ret;
long lba;
@@ -2332,7 +2332,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write)
* so bdget() can't fail.
*/
bdget(pd->bdev->bd_dev);
- if ((ret = blkdev_get(pd->bdev, FMODE_READ, O_RDONLY)))
+ if ((ret = blkdev_get(pd->bdev, FMODE_READ)))
goto out;
if ((ret = bd_claim(pd->bdev, pd)))
@@ -2381,7 +2381,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write)
out_unclaim:
bd_release(pd->bdev);
out_putdev:
- blkdev_put(pd->bdev);
+ blkdev_put(pd->bdev, FMODE_READ);
out:
return ret;
}
@@ -2399,7 +2399,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
bd_release(pd->bdev);
- blkdev_put(pd->bdev);
+ blkdev_put(pd->bdev, FMODE_READ);
pkt_shrink_pktlist(pd);
}
@@ -2411,7 +2411,7 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
return pkt_devs[dev_minor];
}
-static int pkt_open(struct inode *inode, struct file *file)
+static int pkt_open(struct block_device *bdev, fmode_t mode)
{
struct pktcdvd_device *pd = NULL;
int ret;
@@ -2419,7 +2419,7 @@ static int pkt_open(struct inode *inode, struct file *file)
VPRINTK(DRIVER_NAME": entering open\n");
mutex_lock(&ctl_mutex);
- pd = pkt_find_dev_from_minor(iminor(inode));
+ pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
if (!pd) {
ret = -ENODEV;
goto out;
@@ -2428,20 +2428,20 @@ static int pkt_open(struct inode *inode, struct file *file)
pd->refcnt++;
if (pd->refcnt > 1) {
- if ((file->f_mode & FMODE_WRITE) &&
+ if ((mode & FMODE_WRITE) &&
!test_bit(PACKET_WRITABLE, &pd->flags)) {
ret = -EBUSY;
goto out_dec;
}
} else {
- ret = pkt_open_dev(pd, file->f_mode & FMODE_WRITE);
+ ret = pkt_open_dev(pd, mode & FMODE_WRITE);
if (ret)
goto out_dec;
/*
* needed here as well, since ext2 (among others) may change
* the blocksize at mount time
*/
- set_blocksize(inode->i_bdev, CD_FRAMESIZE);
+ set_blocksize(bdev, CD_FRAMESIZE);
}
mutex_unlock(&ctl_mutex);
@@ -2455,9 +2455,9 @@ out:
return ret;
}
-static int pkt_close(struct inode *inode, struct file *file)
+static int pkt_close(struct gendisk *disk, fmode_t mode)
{
- struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
+ struct pktcdvd_device *pd = disk->private_data;
int ret = 0;
mutex_lock(&ctl_mutex);
@@ -2765,7 +2765,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
bdev = bdget(dev);
if (!bdev)
return -ENOMEM;
- ret = blkdev_get(bdev, FMODE_READ, O_RDONLY | O_NONBLOCK);
+ ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY);
if (ret)
return ret;
@@ -2790,19 +2790,28 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
return 0;
out_mem:
- blkdev_put(bdev);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return ret;
}
-static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
{
- struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
+ struct pktcdvd_device *pd = bdev->bd_disk->private_data;
- VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode));
+ VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd,
+ MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
switch (cmd) {
+ case CDROMEJECT:
+ /*
+ * The door gets locked when the device is opened, so we
+ * have to unlock it or else the eject command fails.
+ */
+ if (pd->refcnt == 1)
+ pkt_lock_door(pd, 0);
+ /* fallthru */
/*
* forward selected CDROM ioctls to CD-ROM, for UDF
*/
@@ -2811,16 +2820,7 @@ static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u
case CDROM_LAST_WRITTEN:
case CDROM_SEND_PACKET:
case SCSI_IOCTL_SEND_COMMAND:
- return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
-
- case CDROMEJECT:
- /*
- * The door gets locked when the device is opened, so we
- * have to unlock it or else the eject command fails.
- */
- if (pd->refcnt == 1)
- pkt_lock_door(pd, 0);
- return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
+ return __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
default:
VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
@@ -2849,7 +2849,7 @@ static struct block_device_operations pktcdvd_ops = {
.owner = THIS_MODULE,
.open = pkt_open,
.release = pkt_close,
- .ioctl = pkt_ioctl,
+ .locked_ioctl = pkt_ioctl,
.media_changed = pkt_media_changed,
};
@@ -2975,7 +2975,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
pkt_debugfs_dev_remove(pd);
pkt_sysfs_dev_remove(pd);
- blkdev_put(pd->bdev);
+ blkdev_put(pd->bdev, FMODE_READ|FMODE_WRITE);
remove_proc_entry(pd->name, pkt_proc);
DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 730ccea78e4..612965307ba 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -244,10 +244,10 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state,
int interruptible);
static void release_drive(struct floppy_state *fs);
static int fd_eject(struct floppy_state *fs);
-static int floppy_ioctl(struct inode *inode, struct file *filp,
+static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param);
-static int floppy_open(struct inode *inode, struct file *filp);
-static int floppy_release(struct inode *inode, struct file *filp);
+static int floppy_open(struct block_device *bdev, fmode_t mode);
+static int floppy_release(struct gendisk *disk, fmode_t mode);
static int floppy_check_change(struct gendisk *disk);
static int floppy_revalidate(struct gendisk *disk);
@@ -839,10 +839,10 @@ static int fd_eject(struct floppy_state *fs)
static struct floppy_struct floppy_type =
{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */
-static int floppy_ioctl(struct inode *inode, struct file *filp,
+static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param)
{
- struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
+ struct floppy_state *fs = bdev->bd_disk->private_data;
int err;
if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
@@ -868,9 +868,9 @@ static int floppy_ioctl(struct inode *inode, struct file *filp,
return -ENOTTY;
}
-static int floppy_open(struct inode *inode, struct file *filp)
+static int floppy_open(struct block_device *bdev, fmode_t mode)
{
- struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
+ struct floppy_state *fs = bdev->bd_disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
int n, err = 0;
@@ -904,17 +904,17 @@ static int floppy_open(struct inode *inode, struct file *filp)
swim3_action(fs, SETMFM);
swim3_select(fs, RELAX);
- } else if (fs->ref_count == -1 || filp->f_flags & O_EXCL)
+ } else if (fs->ref_count == -1 || mode & FMODE_EXCL)
return -EBUSY;
- if (err == 0 && (filp->f_flags & O_NDELAY) == 0
- && (filp->f_mode & 3)) {
- check_disk_change(inode->i_bdev);
+ if (err == 0 && (mode & FMODE_NDELAY) == 0
+ && (mode & (FMODE_READ|FMODE_WRITE))) {
+ check_disk_change(bdev);
if (fs->ejected)
err = -ENXIO;
}
- if (err == 0 && (filp->f_mode & 2)) {
+ if (err == 0 && (mode & FMODE_WRITE)) {
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot)
@@ -930,7 +930,7 @@ static int floppy_open(struct inode *inode, struct file *filp)
return err;
}
- if (filp->f_flags & O_EXCL)
+ if (mode & FMODE_EXCL)
fs->ref_count = -1;
else
++fs->ref_count;
@@ -938,9 +938,9 @@ static int floppy_open(struct inode *inode, struct file *filp)
return 0;
}
-static int floppy_release(struct inode *inode, struct file *filp)
+static int floppy_release(struct gendisk *disk, fmode_t mode)
{
- struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
+ struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
if (fs->ref_count > 0 && --fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
@@ -1000,7 +1000,7 @@ static int floppy_revalidate(struct gendisk *disk)
static struct block_device_operations floppy_fops = {
.open = floppy_open,
.release = floppy_release,
- .ioctl = floppy_ioctl,
+ .locked_ioctl = floppy_ioctl,
.media_changed = floppy_check_change,
.revalidate_disk= floppy_revalidate,
};
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index f60e41833f6..fccac18d311 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -1667,10 +1667,9 @@ static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
* This is mostly needed to keep refcounting, but also to support
* media checks on removable media drives.
*/
-static int ub_bd_open(struct inode *inode, struct file *filp)
+static int ub_bd_open(struct block_device *bdev, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
- struct ub_lun *lun = disk->private_data;
+ struct ub_lun *lun = bdev->bd_disk->private_data;
struct ub_dev *sc = lun->udev;
unsigned long flags;
int rc;
@@ -1684,19 +1683,19 @@ static int ub_bd_open(struct inode *inode, struct file *filp)
spin_unlock_irqrestore(&ub_lock, flags);
if (lun->removable || lun->readonly)
- check_disk_change(inode->i_bdev);
+ check_disk_change(bdev);
/*
* The sd.c considers ->media_present and ->changed not equivalent,
* under some pretty murky conditions (a failure of READ CAPACITY).
* We may need it one day.
*/
- if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) {
+ if (lun->removable && lun->changed && !(mode & FMODE_NDELAY)) {
rc = -ENOMEDIUM;
goto err_open;
}
- if (lun->readonly && (filp->f_mode & FMODE_WRITE)) {
+ if (lun->readonly && (mode & FMODE_WRITE)) {
rc = -EROFS;
goto err_open;
}
@@ -1710,9 +1709,8 @@ err_open:
/*
*/
-static int ub_bd_release(struct inode *inode, struct file *filp)
+static int ub_bd_release(struct gendisk *disk, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
struct ub_lun *lun = disk->private_data;
struct ub_dev *sc = lun->udev;
@@ -1723,13 +1721,13 @@ static int ub_bd_release(struct inode *inode, struct file *filp)
/*
* The ioctl interface.
*/
-static int ub_bd_ioctl(struct inode *inode, struct file *filp,
+static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
+ struct gendisk *disk = bdev->bd_disk;
void __user *usermem = (void __user *) arg;
- return scsi_cmd_ioctl(filp, disk->queue, disk, cmd, usermem);
+ return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
}
/*
@@ -1793,7 +1791,7 @@ static struct block_device_operations ub_bd_fops = {
.owner = THIS_MODULE,
.open = ub_bd_open,
.release = ub_bd_release,
- .ioctl = ub_bd_ioctl,
+ .locked_ioctl = ub_bd_ioctl,
.media_changed = ub_bd_media_changed,
.revalidate_disk = ub_bd_revalidate,
};
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index 1730d29e604..ecccf65dce2 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -130,15 +130,15 @@ struct viodasd_device {
/*
* External open entry point.
*/
-static int viodasd_open(struct inode *ino, struct file *fil)
+static int viodasd_open(struct block_device *bdev, fmode_t mode)
{
- struct viodasd_device *d = ino->i_bdev->bd_disk->private_data;
+ struct viodasd_device *d = bdev->bd_disk->private_data;
HvLpEvent_Rc hvrc;
struct viodasd_waitevent we;
u16 flags = 0;
if (d->read_only) {
- if ((fil != NULL) && (fil->f_mode & FMODE_WRITE))
+ if (mode & FMODE_WRITE)
return -EROFS;
flags = vioblockflags_ro;
}
@@ -179,9 +179,9 @@ static int viodasd_open(struct inode *ino, struct file *fil)
/*
* External release entry point.
*/
-static int viodasd_release(struct inode *ino, struct file *fil)
+static int viodasd_release(struct gendisk *disk, fmode_t mode)
{
- struct viodasd_device *d = ino->i_bdev->bd_disk->private_data;
+ struct viodasd_device *d = disk->private_data;
HvLpEvent_Rc hvrc;
/* Send the event to OS/400. We DON'T expect a response */
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6ec5fc05278..85d79a02d48 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -146,11 +146,11 @@ static void do_virtblk_request(struct request_queue *q)
vblk->vq->vq_ops->kick(vblk->vq);
}
-static int virtblk_ioctl(struct inode *inode, struct file *filp,
+static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long data)
{
- return scsi_cmd_ioctl(filp, inode->i_bdev->bd_disk->queue,
- inode->i_bdev->bd_disk, cmd,
+ return scsi_cmd_ioctl(bdev->bd_disk->queue,
+ bdev->bd_disk, mode, cmd,
(void __user *)data);
}
@@ -180,7 +180,7 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
}
static struct block_device_operations virtblk_fops = {
- .ioctl = virtblk_ioctl,
+ .locked_ioctl = virtblk_ioctl,
.owner = THIS_MODULE,
.getgeo = virtblk_getgeo,
};
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 624d30f7da3..64b496fce98 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -132,7 +132,7 @@ static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static struct block_device_operations xd_fops = {
.owner = THIS_MODULE,
- .ioctl = xd_ioctl,
+ .locked_ioctl = xd_ioctl,
.getgeo = xd_getgeo,
};
static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int);
@@ -343,7 +343,7 @@ static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
}
/* xd_ioctl: handle device ioctl's */
-static int xd_ioctl (struct inode *inode,struct file *file,u_int cmd,u_long arg)
+static int xd_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg)
{
switch (cmd) {
case HDIO_SET_DMA:
diff --git a/drivers/block/xd.h b/drivers/block/xd.h
index cffd44a2038..37cacef16e9 100644
--- a/drivers/block/xd.h
+++ b/drivers/block/xd.h
@@ -105,7 +105,7 @@ static u_char xd_detect (u_char *controller, unsigned int *address);
static u_char xd_initdrives (void (*init_drive)(u_char drive));
static void do_xd_request (struct request_queue * q);
-static int xd_ioctl (struct inode *inode,struct file *file,unsigned int cmd,unsigned long arg);
+static int xd_ioctl (struct block_device *bdev,fmode_t mode,unsigned int cmd,unsigned long arg);
static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count);
static void xd_recalibrate (u_char drive);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 1a50ae70f71..b220c686089 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -156,11 +156,10 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
return 0;
}
-static int blkif_ioctl(struct inode *inode, struct file *filep,
+static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
unsigned command, unsigned long argument)
{
- struct blkfront_info *info =
- inode->i_bdev->bd_disk->private_data;
+ struct blkfront_info *info = bdev->bd_disk->private_data;
int i;
dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
@@ -1014,16 +1013,16 @@ static int blkfront_is_ready(struct xenbus_device *dev)
return info->is_ready;
}
-static int blkif_open(struct inode *inode, struct file *filep)
+static int blkif_open(struct block_device *bdev, fmode_t mode)
{
- struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
+ struct blkfront_info *info = bdev->bd_disk->private_data;
info->users++;
return 0;
}
-static int blkif_release(struct inode *inode, struct file *filep)
+static int blkif_release(struct gendisk *disk, fmode_t mode)
{
- struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
+ struct blkfront_info *info = disk->private_data;
info->users--;
if (info->users == 0) {
/* Check whether we have been instructed to close. We will
@@ -1044,7 +1043,7 @@ static struct block_device_operations xlvbd_block_fops =
.open = blkif_open,
.release = blkif_release,
.getgeo = blkif_getgeo,
- .ioctl = blkif_ioctl,
+ .locked_ioctl = blkif_ioctl,
};
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 4a7a059ebaf..ecab9e67d47 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -870,25 +870,24 @@ static int ace_revalidate_disk(struct gendisk *gd)
return ace->id_result;
}
-static int ace_open(struct inode *inode, struct file *filp)
+static int ace_open(struct block_device *bdev, fmode_t mode)
{
- struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
+ struct ace_device *ace = bdev->bd_disk->private_data;
unsigned long flags;
dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1);
- filp->private_data = ace;
spin_lock_irqsave(&ace->lock, flags);
ace->users++;
spin_unlock_irqrestore(&ace->lock, flags);
- check_disk_change(inode->i_bdev);
+ check_disk_change(bdev);
return 0;
}
-static int ace_release(struct inode *inode, struct file *filp)
+static int ace_release(struct gendisk *disk, fmode_t mode)
{
- struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
+ struct ace_device *ace = disk->private_data;
unsigned long flags;
u16 val;
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index be20a67f1fa..80754cdd311 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -137,8 +137,7 @@ get_chipram( void )
return;
}
-static int
-z2_open( struct inode *inode, struct file *filp )
+static int z2_open(struct block_device *bdev, fmode_t mode)
{
int device;
int max_z2_map = ( Z2RAM_SIZE / Z2RAM_CHUNKSIZE ) *
@@ -147,7 +146,7 @@ z2_open( struct inode *inode, struct file *filp )
sizeof( z2ram_map[0] );
int rc = -ENOMEM;
- device = iminor(inode);
+ device = MINOR(bdev->bd_dev);
if ( current_device != -1 && current_device != device )
{
@@ -299,7 +298,7 @@ err_out:
}
static int
-z2_release( struct inode *inode, struct file *filp )
+z2_release(struct gendisk *disk, fmode_t mode)
{
if ( current_device == -1 )
return 0;
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index d47f2f80acc..d16b02423d6 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -973,7 +973,7 @@ static int cdrom_close_write(struct cdrom_device_info *cdi)
* is in their own interest: device control becomes a lot easier
* this way.
*/
-int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
+int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode)
{
int ret;
@@ -982,14 +982,14 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
/* if this was a O_NONBLOCK open and we should honor the flags,
* do a quick open without drive/disc integrity checks. */
cdi->use_count++;
- if ((fp->f_flags & O_NONBLOCK) && (cdi->options & CDO_USE_FFLAGS)) {
+ if ((mode & FMODE_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) {
ret = cdi->ops->open(cdi, 1);
} else {
ret = open_for_data(cdi);
if (ret)
goto err;
cdrom_mmc3_profile(cdi);
- if (fp->f_mode & FMODE_WRITE) {
+ if (mode & FMODE_WRITE) {
ret = -EROFS;
if (cdrom_open_write(cdi))
goto err_release;
@@ -1007,7 +1007,7 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
cdi->name, cdi->use_count);
/* Do this on open. Don't wait for mount, because they might
not be mounting, but opening with O_NONBLOCK */
- check_disk_change(ip->i_bdev);
+ check_disk_change(bdev);
return 0;
err_release:
if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
@@ -1184,7 +1184,7 @@ static int check_for_audio_disc(struct cdrom_device_info * cdi,
return 0;
}
-int cdrom_release(struct cdrom_device_info *cdi, struct file *fp)
+void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode)
{
struct cdrom_device_ops *cdo = cdi->ops;
int opened_for_data;
@@ -1205,7 +1205,7 @@ int cdrom_release(struct cdrom_device_info *cdi, struct file *fp)
}
opened_for_data = !(cdi->options & CDO_USE_FFLAGS) ||
- !(fp && fp->f_flags & O_NONBLOCK);
+ !(mode & FMODE_NDELAY);
/*
* flush cache on last write release
@@ -1219,7 +1219,6 @@ int cdrom_release(struct cdrom_device_info *cdi, struct file *fp)
cdi->options & CDO_AUTO_EJECT && CDROM_CAN(CDC_OPEN_TRAY))
cdo->tray_move(cdi, 1);
}
- return 0;
}
static int cdrom_read_mech_status(struct cdrom_device_info *cdi,
@@ -2662,17 +2661,17 @@ static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi,
* these days.
* ATAPI / SCSI specific code now mainly resides in mmc_ioctl().
*/
-int cdrom_ioctl(struct file * file, struct cdrom_device_info *cdi,
- struct inode *ip, unsigned int cmd, unsigned long arg)
+int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
+ fmode_t mode, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
int ret;
- struct gendisk *disk = ip->i_bdev->bd_disk;
+ struct gendisk *disk = bdev->bd_disk;
/*
* Try the generic SCSI command ioctl's first.
*/
- ret = scsi_cmd_ioctl(file, disk->queue, disk, cmd, argp);
+ ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
if (ret != -ENOTTY)
return ret;
@@ -2696,7 +2695,7 @@ int cdrom_ioctl(struct file * file, struct cdrom_device_info *cdi,
case CDROM_SELECT_DISC:
return cdrom_ioctl_select_disc(cdi, arg);
case CDROMRESET:
- return cdrom_ioctl_reset(cdi, ip->i_bdev);
+ return cdrom_ioctl_reset(cdi, bdev);
case CDROM_LOCKDOOR:
return cdrom_ioctl_lock_door(cdi, arg);
case CDROM_DEBUG:
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index d6ba77a2dd7..9aaa86b232b 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -490,14 +490,14 @@ static struct cdrom_device_ops gdrom_ops = {
.n_minors = 1,
};
-static int gdrom_bdops_open(struct inode *inode, struct file *file)
+static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
{
- return cdrom_open(gd.cd_info, inode, file);
+ return cdrom_open(gd.cd_info, bdev, mode);
}
-static int gdrom_bdops_release(struct inode *inode, struct file *file)
+static int gdrom_bdops_release(struct block_device *bdev, fmode_t mode)
{
- return cdrom_release(gd.cd_info, file);
+ return cdrom_release(gd.cd_info, mode);
}
static int gdrom_bdops_mediachanged(struct gendisk *disk)
@@ -505,10 +505,10 @@ static int gdrom_bdops_mediachanged(struct gendisk *disk)
return cdrom_media_changed(gd.cd_info);
}
-static int gdrom_bdops_ioctl(struct inode *inode, struct file *file,
+static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
- return cdrom_ioctl(file, gd.cd_info, inode, cmd, arg);
+ return cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg);
}
static struct block_device_operations gdrom_bdops = {
@@ -516,7 +516,7 @@ static struct block_device_operations gdrom_bdops = {
.open = gdrom_bdops_open,
.release = gdrom_bdops_release,
.media_changed = gdrom_bdops_mediachanged,
- .ioctl = gdrom_bdops_ioctl,
+ .locked_ioctl = gdrom_bdops_ioctl,
};
static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 031e0e1a1a3..13929356135 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -151,23 +151,24 @@ static const struct file_operations proc_viocd_operations = {
.release = single_release,
};
-static int viocd_blk_open(struct inode *inode, struct file *file)
+static int viocd_blk_open(struct block_device *bdev, fmode_t mode)
{
- struct disk_info *di = inode->i_bdev->bd_disk->private_data;
- return cdrom_open(&di->viocd_info, inode, file);
+ struct disk_info *di = bdev->bd_disk->private_data;
+ return cdrom_open(&di->viocd_info, bdev, mode);
}
-static int viocd_blk_release(struct inode *inode, struct file *file)
+static int viocd_blk_release(struct gendisk *disk, fmode_t mode)
{
- struct disk_info *di = inode->i_bdev->bd_disk->private_data;
- return cdrom_release(&di->viocd_info, file);
+ struct disk_info *di = disk->private_data;
+ cdrom_release(&di->viocd_info, mode);
+ return 0;
}
-static int viocd_blk_ioctl(struct inode *inode, struct file *file,
+static int viocd_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
- struct disk_info *di = inode->i_bdev->bd_disk->private_data;
- return cdrom_ioctl(file, &di->viocd_info, inode, cmd, arg);
+ struct disk_info *di = bdev->bd_disk->private_data;
+ return cdrom_ioctl(&di->viocd_info, bdev, mode, cmd, arg);
}
static int viocd_blk_media_changed(struct gendisk *disk)
@@ -180,7 +181,7 @@ struct block_device_operations viocd_fops = {
.owner = THIS_MODULE,
.open = viocd_blk_open,
.release = viocd_blk_release,
- .ioctl = viocd_blk_ioctl,
+ .locked_ioctl = viocd_blk_ioctl,
.media_changed = viocd_blk_media_changed,
};
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index bf70450a49c..5b819b12675 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -161,7 +161,7 @@ static void hvc_console_print(struct console *co, const char *b,
}
} else {
r = cons_ops[index]->put_chars(vtermnos[index], c, i);
- if (r < 0) {
+ if (r <= 0) {
/* throw away chars on error */
i = 0;
} else if (r > 0) {
@@ -374,6 +374,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
if (hp->ops->notifier_del)
hp->ops->notifier_del(hp, hp->data);
+ /* cancel pending tty resize work */
+ cancel_work_sync(&hp->tty_resize);
+
/*
* Chain calls chars_in_buffer() and returns immediately if
* there is no buffered data otherwise sleeps on a wait queue
@@ -399,6 +402,9 @@ static void hvc_hangup(struct tty_struct *tty)
if (!hp)
return;
+ /* cancel pending tty resize work */
+ cancel_work_sync(&hp->tty_resize);
+
spin_lock_irqsave(&hp->lock, flags);
/*
@@ -418,8 +424,8 @@ static void hvc_hangup(struct tty_struct *tty)
spin_unlock_irqrestore(&hp->lock, flags);
- if (hp->ops->notifier_del)
- hp->ops->notifier_del(hp, hp->data);
+ if (hp->ops->notifier_hangup)
+ hp->ops->notifier_hangup(hp, hp->data);
while(temp_open_count) {
--temp_open_count;
@@ -431,7 +437,7 @@ static void hvc_hangup(struct tty_struct *tty)
* Push buffered characters whether they were just recently buffered or waiting
* on a blocked hypervisor. Call this function with hp->lock held.
*/
-static void hvc_push(struct hvc_struct *hp)
+static int hvc_push(struct hvc_struct *hp)
{
int n;
@@ -439,7 +445,7 @@ static void hvc_push(struct hvc_struct *hp)
if (n <= 0) {
if (n == 0) {
hp->do_wakeup = 1;
- return;
+ return 0;
}
/* throw away output on error; this happens when
there is no session connected to the vterm. */
@@ -450,6 +456,8 @@ static void hvc_push(struct hvc_struct *hp)
memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf);
else
hp->do_wakeup = 1;
+
+ return n;
}
static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count)
@@ -492,6 +500,39 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
return written;
}
+/**
+ * hvc_set_winsz() - Resize the hvc tty terminal window.
+ * @work: work structure.
+ *
+ * The routine shall not be called within an atomic context because it
+ * might sleep.
+ *
+ * Locking: hp->lock
+ */
+static void hvc_set_winsz(struct work_struct *work)
+{
+ struct hvc_struct *hp;
+ unsigned long hvc_flags;
+ struct tty_struct *tty;
+ struct winsize ws;
+
+ hp = container_of(work, struct hvc_struct, tty_resize);
+ if (!hp)
+ return;
+
+ spin_lock_irqsave(&hp->lock, hvc_flags);
+ if (!hp->tty) {
+ spin_unlock_irqrestore(&hp->lock, hvc_flags);
+ return;
+ }
+ ws = hp->ws;
+ tty = tty_kref_get(hp->tty);
+ spin_unlock_irqrestore(&hp->lock, hvc_flags);
+
+ tty_do_resize(tty, tty, &ws);
+ tty_kref_put(tty);
+}
+
/*
* This is actually a contract between the driver and the tty layer outlining
* how much write room the driver can guarantee will be sent OR BUFFERED. This
@@ -538,16 +579,20 @@ int hvc_poll(struct hvc_struct *hp)
char buf[N_INBUF] __ALIGNED__;
unsigned long flags;
int read_total = 0;
+ int written_total = 0;
spin_lock_irqsave(&hp->lock, flags);
/* Push pending writes */
if (hp->n_outbuf > 0)
- hvc_push(hp);
+ written_total = hvc_push(hp);
/* Reschedule us if still some write pending */
- if (hp->n_outbuf > 0)
+ if (hp->n_outbuf > 0) {
poll_mask |= HVC_POLL_WRITE;
+ /* If hvc_push() was not able to write, sleep a few msecs */
+ timeout = (written_total) ? 0 : MIN_TIMEOUT;
+ }
/* No tty attached, just skip */
tty = hp->tty;
@@ -632,6 +677,24 @@ int hvc_poll(struct hvc_struct *hp)
}
EXPORT_SYMBOL_GPL(hvc_poll);
+/**
+ * hvc_resize() - Update terminal window size information.
+ * @hp: HVC console pointer
+ * @ws: Terminal window size structure
+ *
+ * Stores the specified window size information in the hvc structure of @hp.
+ * The function schedule the tty resize update.
+ *
+ * Locking: Locking free; the function MUST be called holding hp->lock
+ */
+void hvc_resize(struct hvc_struct *hp, struct winsize ws)
+{
+ if ((hp->ws.ws_row != ws.ws_row) || (hp->ws.ws_col != ws.ws_col)) {
+ hp->ws = ws;
+ schedule_work(&hp->tty_resize);
+ }
+}
+
/*
* This kthread is either polling or interrupt driven. This is determined by
* calling hvc_poll() who determines whether a console adapter support
@@ -659,10 +722,6 @@ static int khvcd(void *unused)
poll_mask |= HVC_POLL_READ;
if (hvc_kicked)
continue;
- if (poll_mask & HVC_POLL_WRITE) {
- yield();
- continue;
- }
set_current_state(TASK_INTERRUPTIBLE);
if (!hvc_kicked) {
if (poll_mask == 0)
@@ -718,6 +777,7 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
kref_init(&hp->kref);
+ INIT_WORK(&hp->tty_resize, hvc_set_winsz);
spin_lock_init(&hp->lock);
spin_lock(&hvc_structs_lock);
@@ -743,7 +803,7 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
}
EXPORT_SYMBOL_GPL(hvc_alloc);
-int __devexit hvc_remove(struct hvc_struct *hp)
+int hvc_remove(struct hvc_struct *hp)
{
unsigned long flags;
struct tty_struct *tty;
@@ -796,7 +856,7 @@ static int hvc_init(void)
drv->minor_start = HVC_MINOR;
drv->type = TTY_DRIVER_TYPE_SYSTEM;
drv->init_termios = tty_std_termios;
- drv->flags = TTY_DRIVER_REAL_RAW;
+ drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS;
tty_set_operations(drv, &hvc_ops);
/* Always start the kthread because there can be hotplug vty adapters
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
index 9790201718a..8297dbc2e6e 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/char/hvc_console.h
@@ -27,6 +27,7 @@
#ifndef HVC_CONSOLE_H
#define HVC_CONSOLE_H
#include <linux/kref.h>
+#include <linux/tty.h>
/*
* This is the max number of console adapters that can/will be found as
@@ -56,6 +57,8 @@ struct hvc_struct {
struct hv_ops *ops;
int irq_requested;
int data;
+ struct winsize ws;
+ struct work_struct tty_resize;
struct list_head next;
struct kref kref; /* ref count & hvc_struct lifetime */
};
@@ -65,9 +68,10 @@ struct hv_ops {
int (*get_chars)(uint32_t vtermno, char *buf, int count);
int (*put_chars)(uint32_t vtermno, const char *buf, int count);
- /* Callbacks for notification. Called in open and close */
+ /* Callbacks for notification. Called in open, close and hangup */
int (*notifier_add)(struct hvc_struct *hp, int irq);
void (*notifier_del)(struct hvc_struct *hp, int irq);
+ void (*notifier_hangup)(struct hvc_struct *hp, int irq);
};
/* Register a vterm and a slot index for use as a console (console_init) */
@@ -77,15 +81,19 @@ extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
struct hv_ops *ops, int outbuf_size);
/* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
-extern int __devexit hvc_remove(struct hvc_struct *hp);
+extern int hvc_remove(struct hvc_struct *hp);
/* data available */
int hvc_poll(struct hvc_struct *hp);
void hvc_kick(void);
+/* Resize hvc tty terminal window */
+extern void hvc_resize(struct hvc_struct *hp, struct winsize ws);
+
/* default notifier for irq based notification */
extern int notifier_add_irq(struct hvc_struct *hp, int data);
extern void notifier_del_irq(struct hvc_struct *hp, int data);
+extern void notifier_hangup_irq(struct hvc_struct *hp, int data);
#if defined(CONFIG_XMON) && defined(CONFIG_SMP)
diff --git a/drivers/char/hvc_irq.c b/drivers/char/hvc_irq.c
index 73a59cdb894..d09e5688d44 100644
--- a/drivers/char/hvc_irq.c
+++ b/drivers/char/hvc_irq.c
@@ -42,3 +42,8 @@ void notifier_del_irq(struct hvc_struct *hp, int irq)
free_irq(irq, hp);
hp->irq_requested = 0;
}
+
+void notifier_hangup_irq(struct hvc_struct *hp, int irq)
+{
+ notifier_del_irq(hp, irq);
+}
diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
index b71c610fe5a..b74a2f8ab90 100644
--- a/drivers/char/hvc_iseries.c
+++ b/drivers/char/hvc_iseries.c
@@ -202,6 +202,7 @@ static struct hv_ops hvc_get_put_ops = {
.put_chars = put_chars,
.notifier_add = notifier_add_irq,
.notifier_del = notifier_del_irq,
+ .notifier_hangup = notifier_hangup_irq,
};
static int __devinit hvc_vio_probe(struct vio_dev *vdev,
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 93f3840c168..019e0b58593 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -82,6 +82,7 @@ static struct hv_ops hvc_get_put_ops = {
.put_chars = hvc_put_chars,
.notifier_add = notifier_add_irq,
.notifier_del = notifier_del_irq,
+ .notifier_hangup = notifier_hangup_irq,
};
static int __devinit hvc_vio_probe(struct vio_dev *vdev,
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index 538ceea5e7d..eba999f8598 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -102,6 +102,7 @@ static struct hv_ops hvc_ops = {
.put_chars = write_console,
.notifier_add = notifier_add_irq,
.notifier_del = notifier_del_irq,
+ .notifier_hangup = notifier_hangup_irq,
};
static int __init xen_init(void)
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 39f6357e3b5..8054ee839b3 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -338,7 +338,7 @@ nvram_open(struct inode *inode, struct file *file)
if ((nvram_open_cnt && (file->f_flags & O_EXCL)) ||
(nvram_open_mode & NVRAM_EXCL) ||
- ((file->f_mode & 2) && (nvram_open_mode & NVRAM_WRITE))) {
+ ((file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE))) {
spin_unlock(&nvram_state_lock);
unlock_kernel();
return -EBUSY;
@@ -346,7 +346,7 @@ nvram_open(struct inode *inode, struct file *file)
if (file->f_flags & O_EXCL)
nvram_open_mode |= NVRAM_EXCL;
- if (file->f_mode & 2)
+ if (file->f_mode & FMODE_WRITE)
nvram_open_mode |= NVRAM_WRITE;
nvram_open_cnt++;
@@ -366,7 +366,7 @@ nvram_release(struct inode *inode, struct file *file)
/* if only one instance is open, clear the EXCL bit */
if (nvram_open_mode & NVRAM_EXCL)
nvram_open_mode &= ~NVRAM_EXCL;
- if (file->f_mode & 2)
+ if (file->f_mode & FMODE_WRITE)
nvram_open_mode &= ~NVRAM_WRITE;
spin_unlock(&nvram_state_lock);
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index e139372d0e6..96adf28a17e 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -65,7 +65,7 @@ static int raw_open(struct inode *inode, struct file *filp)
if (!bdev)
goto out;
igrab(bdev->bd_inode);
- err = blkdev_get(bdev, filp->f_mode, 0);
+ err = blkdev_get(bdev, filp->f_mode);
if (err)
goto out;
err = bd_claim(bdev, raw_open);
@@ -87,7 +87,7 @@ static int raw_open(struct inode *inode, struct file *filp)
out2:
bd_release(bdev);
out1:
- blkdev_put(bdev);
+ blkdev_put(bdev, filp->f_mode);
out:
mutex_unlock(&raw_mutex);
return err;
@@ -112,7 +112,7 @@ static int raw_release(struct inode *inode, struct file *filp)
mutex_unlock(&raw_mutex);
bd_release(bdev);
- blkdev_put(bdev);
+ blkdev_put(bdev, filp->f_mode);
return 0;
}
@@ -125,7 +125,7 @@ raw_ioctl(struct inode *inode, struct file *filp,
{
struct block_device *bdev = filp->private_data;
- return blkdev_ioctl(bdev->bd_inode, NULL, command, arg);
+ return blkdev_ioctl(bdev, 0, command, arg);
}
static void bind_device(struct raw_config_request *rq)
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index 553b0e9d8d1..c8f8024cb40 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -90,7 +90,7 @@ void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty)
spin_lock_irqsave(&port->lock, flags);
if (port->tty)
tty_kref_put(port->tty);
- port->tty = tty;
+ port->tty = tty_kref_get(tty);
spin_unlock_irqrestore(&port->lock, flags);
}
EXPORT_SYMBOL(tty_port_tty_set);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index d0f4eb6fdb7..3fb0d2c88ba 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -198,6 +198,7 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
virtio_cons.put_chars = put_chars;
virtio_cons.notifier_add = notifier_add_vio;
virtio_cons.notifier_del = notifier_del_vio;
+ virtio_cons.notifier_hangup = notifier_del_vio;
/* The first argument of hvc_alloc() is the virtual console number, so
* we use zero. The second argument is the parameter for the
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 2e314849936..5bed73329ef 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -57,7 +57,11 @@ static void cpuidle_idle_call(void)
if (pm_idle_old)
pm_idle_old();
else
+#if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE)
+ default_idle();
+#else
local_irq_enable();
+#endif
return;
}
@@ -74,8 +78,11 @@ static void cpuidle_idle_call(void)
target_state = &dev->states[next_state];
/* enter the state and update stats */
- dev->last_residency = target_state->enter(dev, target_state);
dev->last_state = target_state;
+ dev->last_residency = target_state->enter(dev, target_state);
+ if (dev->last_state)
+ target_state = dev->last_state;
+
target_state->time += (unsigned long long)dev->last_residency;
target_state->usage++;
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 1ef68b31565..43b8cefad2c 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -171,6 +171,9 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
+#if CONFIG_I7300_IDLE_IOAT_CHANNEL
+ device->common.chancnt--;
+#endif
for (i = 0; i < device->common.chancnt; i++) {
ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
if (!ioat_chan) {
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index dbd42d6c93a..7f2ee27fe76 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -127,6 +127,13 @@ config GPIO_PCF857X
This driver provides an in-kernel interface to those GPIOs using
platform-neutral GPIO calls.
+config GPIO_TWL4030
+ tristate "TWL4030, TWL5030, and TPS659x0 GPIOs"
+ depends on TWL4030_CORE
+ help
+ Say yes here to access the GPIO signals of various multi-function
+ power management chips from Texas Instruments.
+
comment "PCI GPIO expanders:"
config GPIO_BT8XX
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 01b4bbde195..6aafdeb9ad0 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_GPIO_MAX732X) += max732x.o
obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
+obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
diff --git a/drivers/gpio/twl4030-gpio.c b/drivers/gpio/twl4030-gpio.c
new file mode 100644
index 00000000000..37d3eec8730
--- /dev/null
+++ b/drivers/gpio/twl4030-gpio.c
@@ -0,0 +1,521 @@
+/*
+ * twl4030_gpio.c -- access to GPIOs on TWL4030/TPS659x0 chips
+ *
+ * Copyright (C) 2006-2007 Texas Instruments, Inc.
+ * Copyright (C) 2006 MontaVista Software, Inc.
+ *
+ * Code re-arranged and cleaned up by:
+ * Syed Mohammed Khasim <x0khasim@ti.com>
+ *
+ * Initial Code:
+ * Andy Lowe / Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/i2c/twl4030.h>
+
+
+/*
+ * The GPIO "subchip" supports 18 GPIOs which can be configured as
+ * inputs or outputs, with pullups or pulldowns on each pin. Each
+ * GPIO can trigger interrupts on either or both edges.
+ *
+ * GPIO interrupts can be fed to either of two IRQ lines; this is
+ * intended to support multiple hosts.
+ *
+ * There are also two LED pins used sometimes as output-only GPIOs.
+ */
+
+
+static struct gpio_chip twl_gpiochip;
+static int twl4030_gpio_irq_base;
+
+/* genirq interfaces are not available to modules */
+#ifdef MODULE
+#define is_module() true
+#else
+#define is_module() false
+#endif
+
+/* GPIO_CTRL Fields */
+#define MASK_GPIO_CTRL_GPIO0CD1 BIT(0)
+#define MASK_GPIO_CTRL_GPIO1CD2 BIT(1)
+#define MASK_GPIO_CTRL_GPIO_ON BIT(2)
+
+/* Mask for GPIO registers when aggregated into a 32-bit integer */
+#define GPIO_32_MASK 0x0003ffff
+
+/* Data structures */
+static DEFINE_MUTEX(gpio_lock);
+
+/* store usage of each GPIO. - each bit represents one GPIO */
+static unsigned int gpio_usage_count;
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * To configure TWL4030 GPIO module registers
+ */
+static inline int gpio_twl4030_write(u8 address, u8 data)
+{
+ return twl4030_i2c_write_u8(TWL4030_MODULE_GPIO, data, address);
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * LED register offsets (use TWL4030_MODULE_{LED,PWMA,PWMB}))
+ * PWMs A and B are dedicated to LEDs A and B, respectively.
+ */
+
+#define TWL4030_LED_LEDEN 0x0
+
+/* LEDEN bits */
+#define LEDEN_LEDAON BIT(0)
+#define LEDEN_LEDBON BIT(1)
+#define LEDEN_LEDAEXT BIT(2)
+#define LEDEN_LEDBEXT BIT(3)
+#define LEDEN_LEDAPWM BIT(4)
+#define LEDEN_LEDBPWM BIT(5)
+#define LEDEN_PWM_LENGTHA BIT(6)
+#define LEDEN_PWM_LENGTHB BIT(7)
+
+#define TWL4030_PWMx_PWMxON 0x0
+#define TWL4030_PWMx_PWMxOFF 0x1
+
+#define PWMxON_LENGTH BIT(7)
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * To read a TWL4030 GPIO module register
+ */
+static inline int gpio_twl4030_read(u8 address)
+{
+ u8 data;
+ int ret = 0;
+
+ ret = twl4030_i2c_read_u8(TWL4030_MODULE_GPIO, &data, address);
+ return (ret < 0) ? ret : data;
+}
+
+/*----------------------------------------------------------------------*/
+
+static u8 cached_leden; /* protected by gpio_lock */
+
+/* The LED lines are open drain outputs ... a FET pulls to GND, so an
+ * external pullup is needed. We could also expose the integrated PWM
+ * as a LED brightness control; we initialize it as "always on".
+ */
+static void twl4030_led_set_value(int led, int value)
+{
+ u8 mask = LEDEN_LEDAON | LEDEN_LEDAPWM;
+ int status;
+
+ if (led)
+ mask <<= 1;
+
+ mutex_lock(&gpio_lock);
+ if (value)
+ cached_leden &= ~mask;
+ else
+ cached_leden |= mask;
+ status = twl4030_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
+ TWL4030_LED_LEDEN);
+ mutex_unlock(&gpio_lock);
+}
+
+static int twl4030_set_gpio_direction(int gpio, int is_input)
+{
+ u8 d_bnk = gpio >> 3;
+ u8 d_msk = BIT(gpio & 0x7);
+ u8 reg = 0;
+ u8 base = REG_GPIODATADIR1 + d_bnk;
+ int ret = 0;
+
+ mutex_lock(&gpio_lock);
+ ret = gpio_twl4030_read(base);
+ if (ret >= 0) {
+ if (is_input)
+ reg = ret & ~d_msk;
+ else
+ reg = ret | d_msk;
+
+ ret = gpio_twl4030_write(base, reg);
+ }
+ mutex_unlock(&gpio_lock);
+ return ret;
+}
+
+static int twl4030_set_gpio_dataout(int gpio, int enable)
+{
+ u8 d_bnk = gpio >> 3;
+ u8 d_msk = BIT(gpio & 0x7);
+ u8 base = 0;
+
+ if (enable)
+ base = REG_SETGPIODATAOUT1 + d_bnk;
+ else
+ base = REG_CLEARGPIODATAOUT1 + d_bnk;
+
+ return gpio_twl4030_write(base, d_msk);
+}
+
+static int twl4030_get_gpio_datain(int gpio)
+{
+ u8 d_bnk = gpio >> 3;
+ u8 d_off = gpio & 0x7;
+ u8 base = 0;
+ int ret = 0;
+
+ if (unlikely((gpio >= TWL4030_GPIO_MAX)
+ || !(gpio_usage_count & BIT(gpio))))
+ return -EPERM;
+
+ base = REG_GPIODATAIN1 + d_bnk;
+ ret = gpio_twl4030_read(base);
+ if (ret > 0)
+ ret = (ret >> d_off) & 0x1;
+
+ return ret;
+}
+
+/*
+ * Configure debounce timing value for a GPIO pin on TWL4030
+ */
+int twl4030_set_gpio_debounce(int gpio, int enable)
+{
+ u8 d_bnk = gpio >> 3;
+ u8 d_msk = BIT(gpio & 0x7);
+ u8 reg = 0;
+ u8 base = 0;
+ int ret = 0;
+
+ if (unlikely((gpio >= TWL4030_GPIO_MAX)
+ || !(gpio_usage_count & BIT(gpio))))
+ return -EPERM;
+
+ base = REG_GPIO_DEBEN1 + d_bnk;
+ mutex_lock(&gpio_lock);
+ ret = gpio_twl4030_read(base);
+ if (ret >= 0) {
+ if (enable)
+ reg = ret | d_msk;
+ else
+ reg = ret & ~d_msk;
+
+ ret = gpio_twl4030_write(base, reg);
+ }
+ mutex_unlock(&gpio_lock);
+ return ret;
+}
+EXPORT_SYMBOL(twl4030_set_gpio_debounce);
+
+/*----------------------------------------------------------------------*/
+
+static int twl_request(struct gpio_chip *chip, unsigned offset)
+{
+ int status = 0;
+
+ mutex_lock(&gpio_lock);
+
+ /* Support the two LED outputs as output-only GPIOs. */
+ if (offset >= TWL4030_GPIO_MAX) {
+ u8 ledclr_mask = LEDEN_LEDAON | LEDEN_LEDAEXT
+ | LEDEN_LEDAPWM | LEDEN_PWM_LENGTHA;
+ u8 module = TWL4030_MODULE_PWMA;
+
+ offset -= TWL4030_GPIO_MAX;
+ if (offset) {
+ ledclr_mask <<= 1;
+ module = TWL4030_MODULE_PWMB;
+ }
+
+ /* initialize PWM to always-drive */
+ status = twl4030_i2c_write_u8(module, 0x7f,
+ TWL4030_PWMx_PWMxOFF);
+ if (status < 0)
+ goto done;
+ status = twl4030_i2c_write_u8(module, 0x7f,
+ TWL4030_PWMx_PWMxON);
+ if (status < 0)
+ goto done;
+
+ /* init LED to not-driven (high) */
+ module = TWL4030_MODULE_LED;
+ status = twl4030_i2c_read_u8(module, &cached_leden,
+ TWL4030_LED_LEDEN);
+ if (status < 0)
+ goto done;
+ cached_leden &= ~ledclr_mask;
+ status = twl4030_i2c_write_u8(module, cached_leden,
+ TWL4030_LED_LEDEN);
+ if (status < 0)
+ goto done;
+
+ status = 0;
+ goto done;
+ }
+
+ /* on first use, turn GPIO module "on" */
+ if (!gpio_usage_count) {
+ struct twl4030_gpio_platform_data *pdata;
+ u8 value = MASK_GPIO_CTRL_GPIO_ON;
+
+ /* optionally have the first two GPIOs switch vMMC1
+ * and vMMC2 power supplies based on card presence.
+ */
+ pdata = chip->dev->platform_data;
+ value |= pdata->mmc_cd & 0x03;
+
+ status = gpio_twl4030_write(REG_GPIO_CTRL, value);
+ }
+
+ if (!status)
+ gpio_usage_count |= (0x1 << offset);
+
+done:
+ mutex_unlock(&gpio_lock);
+ return status;
+}
+
+static void twl_free(struct gpio_chip *chip, unsigned offset)
+{
+ if (offset >= TWL4030_GPIO_MAX) {
+ twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1);
+ return;
+ }
+
+ mutex_lock(&gpio_lock);
+
+ gpio_usage_count &= ~BIT(offset);
+
+ /* on last use, switch off GPIO module */
+ if (!gpio_usage_count)
+ gpio_twl4030_write(REG_GPIO_CTRL, 0x0);
+
+ mutex_unlock(&gpio_lock);
+}
+
+static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
+{
+ return (offset < TWL4030_GPIO_MAX)
+ ? twl4030_set_gpio_direction(offset, 1)
+ : -EINVAL;
+}
+
+static int twl_get(struct gpio_chip *chip, unsigned offset)
+{
+ int status = 0;
+
+ if (offset < TWL4030_GPIO_MAX)
+ status = twl4030_get_gpio_datain(offset);
+ else if (offset == TWL4030_GPIO_MAX)
+ status = cached_leden & LEDEN_LEDAON;
+ else
+ status = cached_leden & LEDEN_LEDBON;
+ return (status < 0) ? 0 : status;
+}
+
+static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
+{
+ if (offset < TWL4030_GPIO_MAX) {
+ twl4030_set_gpio_dataout(offset, value);
+ return twl4030_set_gpio_direction(offset, 0);
+ } else {
+ twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
+ return 0;
+ }
+}
+
+static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ if (offset < TWL4030_GPIO_MAX)
+ twl4030_set_gpio_dataout(offset, value);
+ else
+ twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
+}
+
+static int twl_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return (twl4030_gpio_irq_base && (offset < TWL4030_GPIO_MAX))
+ ? (twl4030_gpio_irq_base + offset)
+ : -EINVAL;
+}
+
+static struct gpio_chip twl_gpiochip = {
+ .label = "twl4030",
+ .owner = THIS_MODULE,
+ .request = twl_request,
+ .free = twl_free,
+ .direction_input = twl_direction_in,
+ .get = twl_get,
+ .direction_output = twl_direction_out,
+ .set = twl_set,
+ .to_irq = twl_to_irq,
+ .can_sleep = 1,
+};
+
+/*----------------------------------------------------------------------*/
+
+static int __devinit gpio_twl4030_pulls(u32 ups, u32 downs)
+{
+ u8 message[6];
+ unsigned i, gpio_bit;
+
+ /* For most pins, a pulldown was enabled by default.
+ * We should have data that's specific to this board.
+ */
+ for (gpio_bit = 1, i = 1; i < 6; i++) {
+ u8 bit_mask;
+ unsigned j;
+
+ for (bit_mask = 0, j = 0; j < 8; j += 2, gpio_bit <<= 1) {
+ if (ups & gpio_bit)
+ bit_mask |= 1 << (j + 1);
+ else if (downs & gpio_bit)
+ bit_mask |= 1 << (j + 0);
+ }
+ message[i] = bit_mask;
+ }
+
+ return twl4030_i2c_write(TWL4030_MODULE_GPIO, message,
+ REG_GPIOPUPDCTR1, 5);
+}
+
+static int gpio_twl4030_remove(struct platform_device *pdev);
+
+static int __devinit gpio_twl4030_probe(struct platform_device *pdev)
+{
+ struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
+ int ret;
+
+ /* maybe setup IRQs */
+ if (pdata->irq_base) {
+ if (is_module()) {
+ dev_err(&pdev->dev,
+ "can't dispatch IRQs from modules\n");
+ goto no_irqs;
+ }
+ ret = twl4030_sih_setup(TWL4030_MODULE_GPIO);
+ if (ret < 0)
+ return ret;
+ WARN_ON(ret != pdata->irq_base);
+ twl4030_gpio_irq_base = ret;
+ }
+
+no_irqs:
+ /*
+ * NOTE: boards may waste power if they don't set pullups
+ * and pulldowns correctly ... default for non-ULPI pins is
+ * pulldown, and some other pins may have external pullups
+ * or pulldowns. Careful!
+ */
+ ret = gpio_twl4030_pulls(pdata->pullups, pdata->pulldowns);
+ if (ret)
+ dev_dbg(&pdev->dev, "pullups %.05x %.05x --> %d\n",
+ pdata->pullups, pdata->pulldowns,
+ ret);
+
+ twl_gpiochip.base = pdata->gpio_base;
+ twl_gpiochip.ngpio = TWL4030_GPIO_MAX;
+ twl_gpiochip.dev = &pdev->dev;
+
+ /* NOTE: we assume VIBRA_CTL.VIBRA_EN, in MODULE_AUDIO_VOICE,
+ * is (still) clear if use_leds is set.
+ */
+ if (pdata->use_leds)
+ twl_gpiochip.ngpio += 2;
+
+ ret = gpiochip_add(&twl_gpiochip);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "could not register gpiochip, %d\n",
+ ret);
+ twl_gpiochip.ngpio = 0;
+ gpio_twl4030_remove(pdev);
+ } else if (pdata->setup) {
+ int status;
+
+ status = pdata->setup(&pdev->dev,
+ pdata->gpio_base, TWL4030_GPIO_MAX);
+ if (status)
+ dev_dbg(&pdev->dev, "setup --> %d\n", status);
+ }
+
+ return ret;
+}
+
+static int __devexit gpio_twl4030_remove(struct platform_device *pdev)
+{
+ struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
+ int status;
+
+ if (pdata->teardown) {
+ status = pdata->teardown(&pdev->dev,
+ pdata->gpio_base, TWL4030_GPIO_MAX);
+ if (status) {
+ dev_dbg(&pdev->dev, "teardown --> %d\n", status);
+ return status;
+ }
+ }
+
+ status = gpiochip_remove(&twl_gpiochip);
+ if (status < 0)
+ return status;
+
+ if (is_module())
+ return 0;
+
+ /* REVISIT no support yet for deregistering all the IRQs */
+ WARN_ON(1);
+ return -EIO;
+}
+
+/* Note: this hardware lives inside an I2C-based multi-function device. */
+MODULE_ALIAS("platform:twl4030_gpio");
+
+static struct platform_driver gpio_twl4030_driver = {
+ .driver.name = "twl4030_gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = gpio_twl4030_probe,
+ .remove = __devexit_p(gpio_twl4030_remove),
+};
+
+static int __init gpio_twl4030_init(void)
+{
+ return platform_driver_register(&gpio_twl4030_driver);
+}
+subsys_initcall(gpio_twl4030_init);
+
+static void __exit gpio_twl4030_exit(void)
+{
+ platform_driver_unregister(&gpio_twl4030_driver);
+}
+module_exit(gpio_twl4030_exit);
+
+MODULE_AUTHOR("Texas Instruments, Inc.");
+MODULE_DESCRIPTION("GPIO interface for TWL4030");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c
index 1839c57663c..80be1cab62a 100644
--- a/drivers/gpu/drm/drm_drawable.c
+++ b/drivers/gpu/drm/drm_drawable.c
@@ -76,11 +76,18 @@ int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_draw *draw = data;
unsigned long irqflags;
+ struct drm_drawable_info *info;
spin_lock_irqsave(&dev->drw_lock, irqflags);
- drm_free(drm_get_drawable_info(dev, draw->handle),
- sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
+ info = drm_get_drawable_info(dev, draw->handle);
+ if (info == NULL) {
+ spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+ return -EINVAL;
+ }
+ drm_free(info->rects, info->num_rects * sizeof(struct drm_clip_rect),
+ DRM_MEM_BUFS);
+ drm_free(info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
idr_remove(&dev->drw_idr, draw->handle);
@@ -111,7 +118,9 @@ int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file
switch (update->type) {
case DRM_DRAWABLE_CLIPRECTS:
- if (update->num != info->num_rects) {
+ if (update->num == 0)
+ rects = NULL;
+ else if (update->num != info->num_rects) {
rects = drm_alloc(update->num * sizeof(struct drm_clip_rect),
DRM_MEM_BUFS);
} else
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 90f5a8d9bdc..920b72fbc95 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -64,6 +64,8 @@
#define DRM_IOCTL_SG_ALLOC32 DRM_IOW( 0x38, drm_scatter_gather32_t)
#define DRM_IOCTL_SG_FREE32 DRM_IOW( 0x39, drm_scatter_gather32_t)
+#define DRM_IOCTL_UPDATE_DRAW32 DRM_IOW( 0x3f, drm_update_draw32_t)
+
#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
typedef struct drm_version_32 {
@@ -952,6 +954,37 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
DRM_IOCTL_SG_FREE, (unsigned long)request);
}
+typedef struct drm_update_draw32 {
+ drm_drawable_t handle;
+ unsigned int type;
+ unsigned int num;
+ /* 64-bit version has a 32-bit pad here */
+ u64 data; /**< Pointer */
+} __attribute__((packed)) drm_update_draw32_t;
+
+static int compat_drm_update_draw(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_update_draw32_t update32;
+ struct drm_update_draw __user *request;
+ int err;
+
+ if (copy_from_user(&update32, (void __user *)arg, sizeof(update32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
+ __put_user(update32.handle, &request->handle) ||
+ __put_user(update32.type, &request->type) ||
+ __put_user(update32.num, &request->num) ||
+ __put_user(update32.data, &request->data))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_path.dentry->d_inode, file,
+ DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
+ return err;
+}
+
struct drm_wait_vblank_request32 {
enum drm_vblank_seq_type type;
unsigned int sequence;
@@ -1033,6 +1066,7 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
#endif
[DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc,
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free,
+ [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
};
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 4091b9e291f..212a94f715b 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -594,11 +594,14 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
goto done;
}
+ /* Get a refcount on the vblank, which will be released by
+ * drm_vbl_send_signals().
+ */
ret = drm_vblank_get(dev, crtc);
if (ret) {
drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
DRM_MEM_DRIVER);
- return ret;
+ goto done;
}
atomic_inc(&dev->vbl_signal_pending);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index a4caf95485d..888159e03d2 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -232,6 +232,7 @@ int drm_lock_take(struct drm_lock_data *lock_data,
}
return 0;
}
+EXPORT_SYMBOL(drm_lock_take);
/**
* This takes a lock forcibly and hands it to context. Should ONLY be used
@@ -299,6 +300,7 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
wake_up_interruptible(&lock_data->lock_queue);
return 0;
}
+EXPORT_SYMBOL(drm_lock_free);
/**
* If we get here, it means that the process has called DRM_IOCTL_LOCK
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index db34780edbb..01de536e021 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -844,8 +844,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* correctly in testing on 945G.
* This may be a side effect of MSI having been made available for PEG
* and the registers being closely associated.
+ *
+ * According to chipset errata, on the 965GM, MSI interrupts may
+ * be lost or delayed
*/
- if (!IS_I945G(dev) && !IS_I945GM(dev))
+ if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev))
if (pci_enable_msi(dev->pdev))
DRM_ERROR("failed to enable MSI\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index eae4ed3956e..f20ffe17df7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -90,7 +90,7 @@ struct mem_block {
typedef struct _drm_i915_vbl_swap {
struct list_head head;
drm_drawable_t drw_id;
- unsigned int plane;
+ unsigned int pipe;
unsigned int sequence;
} drm_i915_vbl_swap_t;
@@ -240,6 +240,9 @@ typedef struct drm_i915_private {
u8 saveDACDATA[256*3]; /* 256 3-byte colors */
u8 saveCR[37];
+ /** Work task for vblank-related ring access */
+ struct work_struct vblank_work;
+
struct {
struct drm_mm gtt_space;
@@ -285,9 +288,6 @@ typedef struct drm_i915_private {
*/
struct delayed_work retire_work;
- /** Work task for vblank-related ring access */
- struct work_struct vblank_work;
-
uint32_t next_gem_seqno;
/**
@@ -441,7 +441,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
void i915_user_irq_get(struct drm_device *dev);
void i915_user_irq_put(struct drm_device *dev);
-extern void i915_gem_vblank_work_handler(struct work_struct *work);
+extern void i915_vblank_work_handler(struct work_struct *work);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(struct drm_device * dev);
extern int i915_driver_irq_postinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dc2e6fdb6ca..17ae330ff26 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2564,8 +2564,6 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
- INIT_WORK(&dev_priv->mm.vblank_work,
- i915_gem_vblank_work_handler);
dev_priv->mm.next_gem_seqno = 1;
i915_gem_detect_bit_6_swizzle(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
index 15d4160415b..93de15b4c9a 100644
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -192,7 +192,12 @@ static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
*start = &buf[offset];
*eof = 0;
- DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
+ if (dev_priv->hw_status_page != NULL) {
+ DRM_PROC_PRINT("Current sequence: %d\n",
+ i915_get_gem_seqno(dev));
+ } else {
+ DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
+ }
DRM_PROC_PRINT("Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
@@ -230,8 +235,12 @@ static int i915_interrupt_info(char *buf, char **start, off_t offset,
I915_READ(PIPEBSTAT));
DRM_PROC_PRINT("Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
- DRM_PROC_PRINT("Current sequence: %d\n",
- i915_get_gem_seqno(dev));
+ if (dev_priv->hw_status_page != NULL) {
+ DRM_PROC_PRINT("Current sequence: %d\n",
+ i915_get_gem_seqno(dev));
+ } else {
+ DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
+ }
DRM_PROC_PRINT("Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
DRM_PROC_PRINT("IRQ sequence: %d\n",
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index baae511c785..26f48932a51 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -60,43 +60,6 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
}
/**
- * i915_get_pipe - return the the pipe associated with a given plane
- * @dev: DRM device
- * @plane: plane to look for
- *
- * The Intel Mesa & 2D drivers call the vblank routines with a plane number
- * rather than a pipe number, since they may not always be equal. This routine
- * maps the given @plane back to a pipe number.
- */
-static int
-i915_get_pipe(struct drm_device *dev, int plane)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 dspcntr;
-
- dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
-
- return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
-}
-
-/**
- * i915_get_plane - return the the plane associated with a given pipe
- * @dev: DRM device
- * @pipe: pipe to look for
- *
- * The Intel Mesa & 2D drivers call the vblank routines with a plane number
- * rather than a plane number, since they may not always be equal. This routine
- * maps the given @pipe back to a plane number.
- */
-static int
-i915_get_plane(struct drm_device *dev, int pipe)
-{
- if (i915_get_pipe(dev, 0) == pipe)
- return 0;
- return 1;
-}
-
-/**
* i915_pipe_enabled - check if a pipe is enabled
* @dev: DRM device
* @pipe: pipe to check
@@ -121,6 +84,9 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
* Emit blits for scheduled buffer swaps.
*
* This function will be called with the HW lock held.
+ * Because this function must grab the ring mutex (dev->struct_mutex),
+ * it can no longer run at soft irq time. We'll fix this when we do
+ * the DRI2 swap buffer work.
*/
static void i915_vblank_tasklet(struct drm_device *dev)
{
@@ -141,6 +107,8 @@ static void i915_vblank_tasklet(struct drm_device *dev)
u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
RING_LOCALS;
+ mutex_lock(&dev->struct_mutex);
+
if (IS_I965G(dev) && sarea_priv->front_tiled) {
cmd |= XY_SRC_COPY_BLT_DST_TILED;
dst_pitch >>= 2;
@@ -165,7 +133,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
drm_i915_vbl_swap_t *vbl_swap =
list_entry(list, drm_i915_vbl_swap_t, head);
- int pipe = i915_get_pipe(dev, vbl_swap->plane);
+ int pipe = vbl_swap->pipe;
if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
continue;
@@ -179,20 +147,19 @@ static void i915_vblank_tasklet(struct drm_device *dev)
drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
- if (!drw) {
- spin_unlock(&dev->drw_lock);
- drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
- spin_lock(&dev_priv->swaps_lock);
- continue;
- }
-
list_for_each(hit, &hits) {
drm_i915_vbl_swap_t *swap_cmp =
list_entry(hit, drm_i915_vbl_swap_t, head);
struct drm_drawable_info *drw_cmp =
drm_get_drawable_info(dev, swap_cmp->drw_id);
- if (drw_cmp &&
+ /* Make sure both drawables are still
+ * around and have some rectangles before
+ * we look inside to order them for the
+ * blts below.
+ */
+ if (drw_cmp && drw_cmp->num_rects > 0 &&
+ drw && drw->num_rects > 0 &&
drw_cmp->rects[0].y1 > drw->rects[0].y1) {
list_add_tail(list, hit);
break;
@@ -212,6 +179,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
if (nhits == 0) {
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+ mutex_unlock(&dev->struct_mutex);
return;
}
@@ -265,18 +233,21 @@ static void i915_vblank_tasklet(struct drm_device *dev)
drm_i915_vbl_swap_t *swap_hit =
list_entry(hit, drm_i915_vbl_swap_t, head);
struct drm_clip_rect *rect;
- int num_rects, plane;
+ int num_rects, pipe;
unsigned short top, bottom;
drw = drm_get_drawable_info(dev, swap_hit->drw_id);
+ /* The drawable may have been destroyed since
+ * the vblank swap was queued
+ */
if (!drw)
continue;
rect = drw->rects;
- plane = swap_hit->plane;
- top = upper[plane];
- bottom = lower[plane];
+ pipe = swap_hit->pipe;
+ top = upper[pipe];
+ bottom = lower[pipe];
for (num_rects = drw->num_rects; num_rects--; rect++) {
int y1 = max(rect->y1, top);
@@ -302,6 +273,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
}
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+ mutex_unlock(&dev->struct_mutex);
list_for_each_safe(hit, tmp, &hits) {
drm_i915_vbl_swap_t *swap_hit =
@@ -313,15 +285,16 @@ static void i915_vblank_tasklet(struct drm_device *dev)
}
}
-u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
+/* Called from drm generic code, passed a 'crtc', which
+ * we use as a pipe index
+ */
+u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
u32 high1, high2, low, count;
- int pipe;
- pipe = i915_get_pipe(dev, plane);
high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
@@ -350,18 +323,37 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
}
void
-i915_gem_vblank_work_handler(struct work_struct *work)
+i915_vblank_work_handler(struct work_struct *work)
{
- drm_i915_private_t *dev_priv;
- struct drm_device *dev;
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ vblank_work);
+ struct drm_device *dev = dev_priv->dev;
+ unsigned long irqflags;
- dev_priv = container_of(work, drm_i915_private_t,
- mm.vblank_work);
- dev = dev_priv->dev;
+ if (dev->lock.hw_lock == NULL) {
+ i915_vblank_tasklet(dev);
+ return;
+ }
+
+ spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+ dev->locked_tasklet_func = i915_vblank_tasklet;
+ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+
+ /* Try to get the lock now, if this fails, the lock
+ * holder will execute the tasklet during unlock
+ */
+ if (!drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT))
+ return;
+
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+
+ spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+ dev->locked_tasklet_func = NULL;
+ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
- mutex_lock(&dev->struct_mutex);
i915_vblank_tasklet(dev);
- mutex_unlock(&dev->struct_mutex);
+ drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
}
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
@@ -398,7 +390,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
PIPE_VBLANK_INTERRUPT_STATUS)) {
vblank++;
- drm_handle_vblank(dev, i915_get_plane(dev, 0));
+ drm_handle_vblank(dev, 0);
}
I915_WRITE(PIPEASTAT, pipea_stats);
@@ -416,7 +408,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
PIPE_VBLANK_INTERRUPT_STATUS)) {
vblank++;
- drm_handle_vblank(dev, i915_get_plane(dev, 1));
+ drm_handle_vblank(dev, 1);
}
if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
@@ -441,12 +433,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
if (iir & I915_ASLE_INTERRUPT)
opregion_asle_intr(dev);
- if (vblank && dev_priv->swaps_pending > 0) {
- if (dev_priv->ring.ring_obj == NULL)
- drm_locked_tasklet(dev, i915_vblank_tasklet);
- else
- schedule_work(&dev_priv->mm.vblank_work);
- }
+ if (vblank && dev_priv->swaps_pending > 0)
+ schedule_work(&dev_priv->vblank_work);
return IRQ_HANDLED;
}
@@ -481,22 +469,24 @@ static int i915_emit_irq(struct drm_device * dev)
void i915_user_irq_get(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
- spin_lock(&dev_priv->user_irq_lock);
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
- spin_unlock(&dev_priv->user_irq_lock);
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
void i915_user_irq_put(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
- spin_lock(&dev_priv->user_irq_lock);
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
- spin_unlock(&dev_priv->user_irq_lock);
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -578,74 +568,95 @@ int i915_irq_wait(struct drm_device *dev, void *data,
return i915_wait_irq(dev, irqwait->irq_seq);
}
-int i915_enable_vblank(struct drm_device *dev, int plane)
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+int i915_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
u32 pipestat;
+ u32 interrupt = 0;
+ unsigned long irqflags;
switch (pipe) {
case 0:
pipestat_reg = PIPEASTAT;
- i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
+ interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
pipestat_reg = PIPEBSTAT;
- i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+ interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
pipe);
- break;
+ return 0;
}
- if (pipestat_reg) {
- pipestat = I915_READ(pipestat_reg);
- if (IS_I965G(dev))
- pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
- else
- pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
- /* Clear any stale interrupt status */
- pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
- PIPE_VBLANK_INTERRUPT_STATUS);
- I915_WRITE(pipestat_reg, pipestat);
- }
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ /* Enabling vblank events in IMR comes before PIPESTAT write, or
+ * there's a race where the PIPESTAT vblank bit gets set to 1, so
+ * the OR of enabled PIPESTAT bits goes to 1, so the PIPExEVENT in
+ * ISR flashes to 1, but the IIR bit doesn't get set to 1 because
+ * IMR masks it. It doesn't ever get set after we clear the masking
+ * in IMR because the ISR bit is edge, not level-triggered, on the
+ * OR of PIPESTAT bits.
+ */
+ i915_enable_irq(dev_priv, interrupt);
+ pipestat = I915_READ(pipestat_reg);
+ if (IS_I965G(dev))
+ pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
+ else
+ pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
+ /* Clear any stale interrupt status */
+ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
+ PIPE_VBLANK_INTERRUPT_STATUS);
+ I915_WRITE(pipestat_reg, pipestat);
+ (void) I915_READ(pipestat_reg); /* Posting read */
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
return 0;
}
-void i915_disable_vblank(struct drm_device *dev, int plane)
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+void i915_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
u32 pipestat;
+ u32 interrupt = 0;
+ unsigned long irqflags;
switch (pipe) {
case 0:
pipestat_reg = PIPEASTAT;
- i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
+ interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
pipestat_reg = PIPEBSTAT;
- i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+ interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
pipe);
+ return;
break;
}
- if (pipestat_reg) {
- pipestat = I915_READ(pipestat_reg);
- pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
- PIPE_VBLANK_INTERRUPT_ENABLE);
- /* Clear any stale interrupt status */
- pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
- PIPE_VBLANK_INTERRUPT_STATUS);
- I915_WRITE(pipestat_reg, pipestat);
- }
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ i915_disable_irq(dev_priv, interrupt);
+ pipestat = I915_READ(pipestat_reg);
+ pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
+ PIPE_VBLANK_INTERRUPT_ENABLE);
+ /* Clear any stale interrupt status */
+ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
+ PIPE_VBLANK_INTERRUPT_STATUS);
+ I915_WRITE(pipestat_reg, pipestat);
+ (void) I915_READ(pipestat_reg); /* Posting read */
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
/* Set the vblank monitor pipe
@@ -687,8 +698,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_vblank_swap_t *swap = data;
- drm_i915_vbl_swap_t *vbl_swap;
- unsigned int pipe, seqtype, curseq, plane;
+ drm_i915_vbl_swap_t *vbl_swap, *vbl_old;
+ unsigned int pipe, seqtype, curseq;
unsigned long irqflags;
struct list_head *list;
int ret;
@@ -709,8 +720,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
- plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
- pipe = i915_get_pipe(dev, plane);
+ pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
@@ -751,44 +761,52 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
}
}
+ vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
+
+ if (!vbl_swap) {
+ DRM_ERROR("Failed to allocate memory to queue swap\n");
+ drm_vblank_put(dev, pipe);
+ return -ENOMEM;
+ }
+
+ vbl_swap->drw_id = swap->drawable;
+ vbl_swap->pipe = pipe;
+ vbl_swap->sequence = swap->sequence;
+
spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
list_for_each(list, &dev_priv->vbl_swaps.head) {
- vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
+ vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
- if (vbl_swap->drw_id == swap->drawable &&
- vbl_swap->plane == plane &&
- vbl_swap->sequence == swap->sequence) {
+ if (vbl_old->drw_id == swap->drawable &&
+ vbl_old->pipe == pipe &&
+ vbl_old->sequence == swap->sequence) {
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+ drm_vblank_put(dev, pipe);
+ drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
DRM_DEBUG("Already scheduled\n");
return 0;
}
}
- spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
-
- if (dev_priv->swaps_pending >= 100) {
+ if (dev_priv->swaps_pending >= 10) {
DRM_DEBUG("Too many swaps queued\n");
+ DRM_DEBUG(" pipe 0: %d pipe 1: %d\n",
+ drm_vblank_count(dev, 0),
+ drm_vblank_count(dev, 1));
+
+ list_for_each(list, &dev_priv->vbl_swaps.head) {
+ vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
+ DRM_DEBUG("\tdrw %x pipe %d seq %x\n",
+ vbl_old->drw_id, vbl_old->pipe,
+ vbl_old->sequence);
+ }
+ spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
drm_vblank_put(dev, pipe);
+ drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
return -EBUSY;
}
- vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
-
- if (!vbl_swap) {
- DRM_ERROR("Failed to allocate memory to queue swap\n");
- drm_vblank_put(dev, pipe);
- return -ENOMEM;
- }
-
- DRM_DEBUG("\n");
-
- vbl_swap->drw_id = swap->drawable;
- vbl_swap->plane = plane;
- vbl_swap->sequence = swap->sequence;
-
- spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
-
list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
dev_priv->swaps_pending++;
@@ -815,6 +833,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
spin_lock_init(&dev_priv->swaps_lock);
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
+ INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_handler);
dev_priv->swaps_pending = 0;
/* Set initial unmasked IRQs to just the selected vblank pipes. */
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index 1e328d19cd6..3e01992230b 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -135,7 +135,7 @@ static int wait_for_pin(struct i2c_algo_pcf_data *adap, int *status) {
*status = get_pcf(adap, 1);
#ifndef STUB_I2C
while (timeout-- && (*status & I2C_PCF_PIN)) {
- adap->waitforpin();
+ adap->waitforpin(adap->data);
*status = get_pcf(adap, 1);
}
if (*status & I2C_PCF_LAB) {
@@ -208,7 +208,7 @@ static int pcf_init_8584 (struct i2c_algo_pcf_data *adap)
return -ENXIO;
}
- printk(KERN_DEBUG "i2c-algo-pcf.o: deteted and initialized PCF8584.\n");
+ printk(KERN_DEBUG "i2c-algo-pcf.o: detected and initialized PCF8584.\n");
return 0;
}
@@ -331,13 +331,16 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
int i;
int ret=0, timeout, status;
+ if (adap->xfer_begin)
+ adap->xfer_begin(adap->data);
/* Check for bus busy */
timeout = wait_for_bb(adap);
if (timeout) {
DEB2(printk(KERN_ERR "i2c-algo-pcf.o: "
"Timeout waiting for BB in pcf_xfer\n");)
- return -EIO;
+ i = -EIO;
+ goto out;
}
for (i = 0;ret >= 0 && i < num; i++) {
@@ -359,12 +362,14 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
if (timeout) {
if (timeout == -EINTR) {
/* arbitration lost */
- return (-EINTR);
+ i = -EINTR;
+ goto out;
}
i2c_stop(adap);
DEB2(printk(KERN_ERR "i2c-algo-pcf.o: Timeout waiting "
"for PIN(1) in pcf_xfer\n");)
- return (-EREMOTEIO);
+ i = -EREMOTEIO;
+ goto out;
}
#ifndef STUB_I2C
@@ -372,7 +377,8 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
if (status & I2C_PCF_LRB) {
i2c_stop(adap);
DEB2(printk(KERN_ERR "i2c-algo-pcf.o: No LRB(1) in pcf_xfer\n");)
- return (-EREMOTEIO);
+ i = -EREMOTEIO;
+ goto out;
}
#endif
@@ -404,6 +410,9 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
}
}
+out:
+ if (adap->xfer_end)
+ adap->xfer_end(adap->data);
return (i);
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index acadbc51fc0..7f95905bbb9 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -97,6 +97,7 @@ config I2C_I801
ICH9
Tolapai
ICH10
+ PCH
This driver can also be built as a module. If so, the module
will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 8164de1f4d7..228f7572306 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -423,7 +423,6 @@ static const struct i2c_adapter cpm_ops = {
.owner = THIS_MODULE,
.name = "i2c-cpm",
.algo = &cpm_i2c_algo,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
};
static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 7f38c01fb3a..0ed3ccb81b6 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -104,7 +104,8 @@ static int pcf_isa_getclock(void *data)
return (clock);
}
-static void pcf_isa_waitforpin(void) {
+static void pcf_isa_waitforpin(void *data)
+{
DEFINE_WAIT(wait);
int timeout = 2;
unsigned long flags;
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index 1098f21ace1..648aa7baff8 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -123,7 +123,7 @@ static int __devinit hydra_probe(struct pci_dev *dev,
hydra_adap.name))
return -EBUSY;
- hydra_bit_data.data = ioremap(base, pci_resource_len(dev, 0));
+ hydra_bit_data.data = pci_ioremap_bar(dev, 0);
if (hydra_bit_data.data == NULL) {
release_mem_region(base+offsetof(struct Hydra, CachePD), 4);
return -ENODEV;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index dc7ea32b69a..5123eb69a97 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -41,6 +41,7 @@
Tolapai 0x5032 32 hard yes yes yes
ICH10 0x3a30 32 hard yes yes yes
ICH10 0x3a60 32 hard yes yes yes
+ PCH 0x3b30 32 hard yes yes yes
Features supported by this driver:
Software PEC no
@@ -576,6 +577,7 @@ static struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TOLAPAI_1) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
{ 0, }
};
@@ -599,6 +601,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
case PCI_DEVICE_ID_INTEL_TOLAPAI_1:
case PCI_DEVICE_ID_INTEL_ICH10_4:
case PCI_DEVICE_ID_INTEL_ICH10_5:
+ case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
i801_features |= FEATURE_I2C_BLOCK_READ;
/* fall through */
case PCI_DEVICE_ID_INTEL_82801DB_3:
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index 17356827b93..4c35702830c 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -1,6 +1,8 @@
#
# Miscellaneous I2C chip drivers configuration
#
+# *** DEPRECATED! Do not add new entries! See Makefile ***
+#
menu "Miscellaneous I2C Chip support"
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index ca520fa143d..23d2a31b0a6 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -1,7 +1,8 @@
#
# Makefile for miscellaneous I2C chip drivers.
#
-# Think twice before you add a new driver to this directory.
+# Do not add new drivers to this directory! It is DEPRECATED.
+#
# Device drivers are better grouped according to the functionality they
# implement rather than to the bus they are connected to. In particular:
# * Hardware monitoring chip drivers go to drivers/hwmon
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 42e852d79ff..5a485c22660 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -266,6 +266,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.platform_data = info->platform_data;
+ if (info->archdata)
+ client->dev.archdata = *info->archdata;
+
client->flags = info->flags;
client->addr = info->addr;
client->irq = info->irq;
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 093d3248ca8..9cf92ac939d 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -18,22 +18,66 @@ ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o
obj-$(CONFIG_IDE) += ide-core.o
-ifeq ($(CONFIG_IDE_ARM), y)
- ide-arm-core-y += arm/ide_arm.o
- obj-y += ide-arm-core.o
-endif
-
-obj-$(CONFIG_IDE) += legacy/ pci/
+obj-$(CONFIG_IDE_ARM) += ide_arm.o
+
+obj-$(CONFIG_BLK_DEV_ALI14XX) += ali14xx.o
+obj-$(CONFIG_BLK_DEV_UMC8672) += umc8672.o
+obj-$(CONFIG_BLK_DEV_DTC2278) += dtc2278.o
+obj-$(CONFIG_BLK_DEV_HT6560B) += ht6560b.o
+obj-$(CONFIG_BLK_DEV_QD65XX) += qd65xx.o
+obj-$(CONFIG_BLK_DEV_4DRIVES) += ide-4drives.o
+
+obj-$(CONFIG_BLK_DEV_GAYLE) += gayle.o
+obj-$(CONFIG_BLK_DEV_FALCON_IDE) += falconide.o
+obj-$(CONFIG_BLK_DEV_MAC_IDE) += macide.o
+obj-$(CONFIG_BLK_DEV_Q40IDE) += q40ide.o
+obj-$(CONFIG_BLK_DEV_BUDDHA) += buddha.o
+
+obj-$(CONFIG_BLK_DEV_AEC62XX) += aec62xx.o
+obj-$(CONFIG_BLK_DEV_ALI15X3) += alim15x3.o
+obj-$(CONFIG_BLK_DEV_AMD74XX) += amd74xx.o
+obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o
+obj-$(CONFIG_BLK_DEV_CELLEB) += scc_pata.o
+obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o
+obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o
+obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o
+obj-$(CONFIG_BLK_DEV_CS5535) += cs5535.o
+obj-$(CONFIG_BLK_DEV_SC1200) += sc1200.o
+obj-$(CONFIG_BLK_DEV_CY82C693) += cy82c693.o
+obj-$(CONFIG_BLK_DEV_DELKIN) += delkin_cb.o
+obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o
+obj-$(CONFIG_BLK_DEV_IT8213) += it8213.o
+obj-$(CONFIG_BLK_DEV_IT821X) += it821x.o
+obj-$(CONFIG_BLK_DEV_JMICRON) += jmicron.o
+obj-$(CONFIG_BLK_DEV_NS87415) += ns87415.o
+obj-$(CONFIG_BLK_DEV_OPTI621) += opti621.o
+obj-$(CONFIG_BLK_DEV_PDC202XX_OLD) += pdc202xx_old.o
+obj-$(CONFIG_BLK_DEV_PDC202XX_NEW) += pdc202xx_new.o
+obj-$(CONFIG_BLK_DEV_PIIX) += piix.o
+obj-$(CONFIG_BLK_DEV_RZ1000) += rz1000.o
+obj-$(CONFIG_BLK_DEV_SVWKS) += serverworks.o
+obj-$(CONFIG_BLK_DEV_SGIIOC4) += sgiioc4.o
+obj-$(CONFIG_BLK_DEV_SIIMAGE) += siimage.o
+obj-$(CONFIG_BLK_DEV_SIS5513) += sis5513.o
+obj-$(CONFIG_BLK_DEV_SL82C105) += sl82c105.o
+obj-$(CONFIG_BLK_DEV_SLC90E66) += slc90e66.o
+obj-$(CONFIG_BLK_DEV_TC86C001) += tc86c001.o
+obj-$(CONFIG_BLK_DEV_TRIFLEX) += triflex.o
+obj-$(CONFIG_BLK_DEV_TRM290) += trm290.o
+obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o
+
+# Must appear at the end of the block
+obj-$(CONFIG_BLK_DEV_GENERIC) += ide-pci-generic.o
+ide-pci-generic-y += generic.o
obj-$(CONFIG_IDEPCI_PCIBUS_ORDER) += ide-scan-pci.o
-ifeq ($(CONFIG_BLK_DEV_CMD640), y)
- cmd640-core-y += pci/cmd640.o
- obj-y += cmd640-core.o
-endif
+obj-$(CONFIG_BLK_DEV_CMD640) += cmd640.o
+
+obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o
+
+obj-$(CONFIG_IDE_H8300) += ide-h8300.o
-obj-$(CONFIG_IDE) += ppc/
-obj-$(CONFIG_IDE_H8300) += h8300/
obj-$(CONFIG_IDE_GENERIC) += ide-generic.o
obj-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o
@@ -58,14 +102,12 @@ obj-$(CONFIG_IDE_GD) += ide-gd_mod.o
obj-$(CONFIG_BLK_DEV_IDECD) += ide-cd_mod.o
obj-$(CONFIG_BLK_DEV_IDETAPE) += ide-tape.o
-ifeq ($(CONFIG_BLK_DEV_IDECS), y)
- ide-cs-core-y += legacy/ide-cs.o
- obj-y += ide-cs-core.o
-endif
+obj-$(CONFIG_BLK_DEV_IDECS) += ide-cs.o
-ifeq ($(CONFIG_BLK_DEV_PLATFORM), y)
- ide-platform-core-y += legacy/ide_platform.o
- obj-y += ide-platform-core.o
-endif
+obj-$(CONFIG_BLK_DEV_PLATFORM) += ide_platform.o
+
+obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o
+obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o
+obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o
-obj-$(CONFIG_IDE) += arm/ mips/
+obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/aec62xx.c
index 4142c698e0d..4142c698e0d 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/aec62xx.c
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/ali14xx.c
index 90da1f953ed..90da1f953ed 100644
--- a/drivers/ide/legacy/ali14xx.c
+++ b/drivers/ide/ali14xx.c
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/alim15x3.c
index daf9dce39e5..daf9dce39e5 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/alim15x3.c
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/amd74xx.c
index 81ec73134ed..81ec73134ed 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/amd74xx.c
diff --git a/drivers/ide/arm/Makefile b/drivers/ide/arm/Makefile
deleted file mode 100644
index 5bc26053afa..00000000000
--- a/drivers/ide/arm/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-
-obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o
-obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o
-obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o
-
-ifeq ($(CONFIG_IDE_ARM), m)
- obj-m += ide_arm.o
-endif
-
-EXTRA_CFLAGS := -Idrivers/ide
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/atiixp.c
index b2735d28f5c..b2735d28f5c 100644
--- a/drivers/ide/pci/atiixp.c
+++ b/drivers/ide/atiixp.c
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c
index 0ec8fd1e4dc..0ec8fd1e4dc 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/au1xxx-ide.c
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/buddha.c
index c5a3c9ef6a5..c5a3c9ef6a5 100644
--- a/drivers/ide/legacy/buddha.c
+++ b/drivers/ide/buddha.c
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/cmd640.c
index e4306647d00..e4306647d00 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/cmd640.c
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/cmd64x.c
index 935385c77e0..935385c77e0 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/cmd64x.c
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/cs5520.c
index 5efb467f8fa..5efb467f8fa 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/cs5520.c
diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/cs5530.c
index 53f079cc00a..53f079cc00a 100644
--- a/drivers/ide/pci/cs5530.c
+++ b/drivers/ide/cs5530.c
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/cs5535.c
index 983d957a018..983d957a018 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/cs5535.c
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/cy82c693.c
index 5297f07d293..5297f07d293 100644
--- a/drivers/ide/pci/cy82c693.c
+++ b/drivers/ide/cy82c693.c
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/delkin_cb.c
index 8f1b2d9f051..8f1b2d9f051 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/delkin_cb.c
diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/dtc2278.c
index 689b2e49341..689b2e49341 100644
--- a/drivers/ide/legacy/dtc2278.c
+++ b/drivers/ide/dtc2278.c
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/falconide.c
index 39d500d84b0..39d500d84b0 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/falconide.c
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/gayle.c
index 69150688656..69150688656 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/gayle.c
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/generic.c
index 474f96a7c07..474f96a7c07 100644
--- a/drivers/ide/pci/generic.c
+++ b/drivers/ide/generic.c
diff --git a/drivers/ide/h8300/Makefile b/drivers/ide/h8300/Makefile
deleted file mode 100644
index 5eba16f423f..00000000000
--- a/drivers/ide/h8300/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-
-obj-$(CONFIG_IDE_H8300) += ide-h8300.o
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/hpt366.c
index a7909e9c720..a7909e9c720 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/hpt366.c
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/ht6560b.c
index c7e5c2246b7..c7e5c2246b7 100644
--- a/drivers/ide/legacy/ht6560b.c
+++ b/drivers/ide/ht6560b.c
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/icside.c
index 76bdc9a27f6..76bdc9a27f6 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/icside.c
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/ide-4drives.c
index 9e85b1ec960..9e85b1ec960 100644
--- a/drivers/ide/legacy/ide-4drives.c
+++ b/drivers/ide/ide-4drives.c
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 13265a8827d..133afd09843 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -2089,17 +2089,15 @@ static ide_driver_t ide_cdrom_driver = {
#endif
};
-static int idecd_open(struct inode *inode, struct file *file)
+static int idecd_open(struct block_device *bdev, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
- struct cdrom_info *info;
+ struct cdrom_info *info = ide_cd_get(bdev->bd_disk);
int rc = -ENOMEM;
- info = ide_cd_get(disk);
if (!info)
return -ENXIO;
- rc = cdrom_open(&info->devinfo, inode, file);
+ rc = cdrom_open(&info->devinfo, bdev, mode);
if (rc < 0)
ide_cd_put(info);
@@ -2107,12 +2105,11 @@ static int idecd_open(struct inode *inode, struct file *file)
return rc;
}
-static int idecd_release(struct inode *inode, struct file *file)
+static int idecd_release(struct gendisk *disk, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
- cdrom_release(&info->devinfo, file);
+ cdrom_release(&info->devinfo, mode);
ide_cd_put(info);
@@ -2158,10 +2155,9 @@ static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg)
return 0;
}
-static int idecd_ioctl(struct inode *inode, struct file *file,
+static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct block_device *bdev = inode->i_bdev;
struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info);
int err;
@@ -2174,9 +2170,9 @@ static int idecd_ioctl(struct inode *inode, struct file *file,
break;
}
- err = generic_ide_ioctl(info->drive, file, bdev, cmd, arg);
+ err = generic_ide_ioctl(info->drive, bdev, cmd, arg);
if (err == -EINVAL)
- err = cdrom_ioctl(file, &info->devinfo, inode, cmd, arg);
+ err = cdrom_ioctl(&info->devinfo, bdev, mode, cmd, arg);
return err;
}
@@ -2201,7 +2197,7 @@ static struct block_device_operations idecd_ops = {
.owner = THIS_MODULE,
.open = idecd_open,
.release = idecd_release,
- .ioctl = idecd_ioctl,
+ .locked_ioctl = idecd_ioctl,
.media_changed = idecd_media_changed,
.revalidate_disk = idecd_revalidate_disk
};
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/ide-cs.c
index cb199c815b5..cb199c815b5 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/ide-cs.c
diff --git a/drivers/ide/ide-disk.h b/drivers/ide/ide-disk.h
index b234b0feaf7..d511dab7c4a 100644
--- a/drivers/ide/ide-disk.h
+++ b/drivers/ide/ide-disk.h
@@ -13,7 +13,7 @@ ide_decl_devset(wcache);
ide_decl_devset(acoustic);
/* ide-disk_ioctl.c */
-int ide_disk_ioctl(ide_drive_t *, struct inode *, struct file *, unsigned int,
+int ide_disk_ioctl(ide_drive_t *, struct block_device *, fmode_t, unsigned int,
unsigned long);
#ifdef CONFIG_IDE_PROC_FS
diff --git a/drivers/ide/ide-disk_ioctl.c b/drivers/ide/ide-disk_ioctl.c
index a49698bcf96..7b783dd7c0b 100644
--- a/drivers/ide/ide-disk_ioctl.c
+++ b/drivers/ide/ide-disk_ioctl.c
@@ -13,15 +13,14 @@ static const struct ide_ioctl_devset ide_disk_ioctl_settings[] = {
{ 0 }
};
-int ide_disk_ioctl(ide_drive_t *drive, struct inode *inode, struct file *file,
+int ide_disk_ioctl(ide_drive_t *drive, struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct block_device *bdev = inode->i_bdev;
int err;
err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_disk_ioctl_settings);
if (err != -EOPNOTSUPP)
return err;
- return generic_ide_ioctl(drive, file, bdev, cmd, arg);
+ return generic_ide_ioctl(drive, bdev, cmd, arg);
}
diff --git a/drivers/ide/ide-floppy.h b/drivers/ide/ide-floppy.h
index c17124dd607..6dd2beb4843 100644
--- a/drivers/ide/ide-floppy.h
+++ b/drivers/ide/ide-floppy.h
@@ -23,8 +23,8 @@ void ide_floppy_create_mode_sense_cmd(struct ide_atapi_pc *, u8);
void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *);
/* ide-floppy_ioctl.c */
-int ide_floppy_ioctl(ide_drive_t *, struct inode *, struct file *, unsigned int,
- unsigned long);
+int ide_floppy_ioctl(ide_drive_t *, struct block_device *, fmode_t,
+ unsigned int, unsigned long);
#ifdef CONFIG_IDE_PROC_FS
/* ide-floppy_proc.c */
diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
index 409e4c15f9b..2bc51ff73fe 100644
--- a/drivers/ide/ide-floppy_ioctl.c
+++ b/drivers/ide/ide-floppy_ioctl.c
@@ -241,7 +241,7 @@ static int ide_floppy_lockdoor(ide_drive_t *drive, struct ide_atapi_pc *pc,
return 0;
}
-static int ide_floppy_format_ioctl(ide_drive_t *drive, struct file *file,
+static int ide_floppy_format_ioctl(ide_drive_t *drive, fmode_t mode,
unsigned int cmd, void __user *argp)
{
switch (cmd) {
@@ -250,7 +250,7 @@ static int ide_floppy_format_ioctl(ide_drive_t *drive, struct file *file,
case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY:
return ide_floppy_get_format_capacities(drive, argp);
case IDEFLOPPY_IOCTL_FORMAT_START:
- if (!(file->f_mode & 2))
+ if (!(mode & FMODE_WRITE))
return -EPERM;
return ide_floppy_format_unit(drive, (int __user *)argp);
case IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS:
@@ -260,10 +260,9 @@ static int ide_floppy_format_ioctl(ide_drive_t *drive, struct file *file,
}
}
-int ide_floppy_ioctl(ide_drive_t *drive, struct inode *inode,
- struct file *file, unsigned int cmd, unsigned long arg)
+int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
+ fmode_t mode, unsigned int cmd, unsigned long arg)
{
- struct block_device *bdev = inode->i_bdev;
struct ide_atapi_pc pc;
void __user *argp = (void __user *)arg;
int err;
@@ -271,7 +270,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct inode *inode,
if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR)
return ide_floppy_lockdoor(drive, &pc, arg, cmd);
- err = ide_floppy_format_ioctl(drive, file, cmd, argp);
+ err = ide_floppy_format_ioctl(drive, mode, cmd, argp);
if (err != -ENOTTY)
return err;
@@ -280,11 +279,11 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct inode *inode,
* and CDROM_SEND_PACKET (legacy) ioctls
*/
if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
- err = scsi_cmd_ioctl(file, bdev->bd_disk->queue,
- bdev->bd_disk, cmd, argp);
+ err = scsi_cmd_ioctl(bdev->bd_disk->queue, bdev->bd_disk,
+ mode, cmd, argp);
if (err == -ENOTTY)
- err = generic_ide_ioctl(drive, file, bdev, cmd, arg);
+ err = generic_ide_ioctl(drive, bdev, cmd, arg);
return err;
}
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index d44898f46c3..7b666285437 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -169,9 +169,9 @@ static ide_driver_t ide_gd_driver = {
#endif
};
-static int ide_gd_open(struct inode *inode, struct file *filp)
+static int ide_gd_open(struct block_device *bdev, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
+ struct gendisk *disk = bdev->bd_disk;
struct ide_disk_obj *idkp;
ide_drive_t *drive;
int ret = 0;
@@ -197,12 +197,12 @@ static int ide_gd_open(struct inode *inode, struct file *filp)
* unreadable disk, so that we can get the format capacity
* of the drive or begin the format - Sam
*/
- if (ret && (filp->f_flags & O_NDELAY) == 0) {
+ if (ret && (mode & FMODE_NDELAY) == 0) {
ret = -EIO;
goto out_put_idkp;
}
- if ((drive->dev_flags & IDE_DFLAG_WP) && (filp->f_mode & 2)) {
+ if ((drive->dev_flags & IDE_DFLAG_WP) && (mode & FMODE_WRITE)) {
ret = -EROFS;
goto out_put_idkp;
}
@@ -214,7 +214,7 @@ static int ide_gd_open(struct inode *inode, struct file *filp)
*/
drive->disk_ops->set_doorlock(drive, disk, 1);
drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
- check_disk_change(inode->i_bdev);
+ check_disk_change(bdev);
} else if (drive->dev_flags & IDE_DFLAG_FORMAT_IN_PROGRESS) {
ret = -EBUSY;
goto out_put_idkp;
@@ -227,9 +227,8 @@ out_put_idkp:
return ret;
}
-static int ide_gd_release(struct inode *inode, struct file *filp)
+static int ide_gd_release(struct gendisk *disk, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
ide_drive_t *drive = idkp->drive;
@@ -286,21 +285,20 @@ static int ide_gd_revalidate_disk(struct gendisk *disk)
return 0;
}
-static int ide_gd_ioctl(struct inode *inode, struct file *file,
+static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct block_device *bdev = inode->i_bdev;
struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
ide_drive_t *drive = idkp->drive;
- return drive->disk_ops->ioctl(drive, inode, file, cmd, arg);
+ return drive->disk_ops->ioctl(drive, bdev, mode, cmd, arg);
}
static struct block_device_operations ide_gd_ops = {
.owner = THIS_MODULE,
.open = ide_gd_open,
.release = ide_gd_release,
- .ioctl = ide_gd_ioctl,
+ .locked_ioctl = ide_gd_ioctl,
.getgeo = ide_gd_getgeo,
.media_changed = ide_gd_media_changed,
.revalidate_disk = ide_gd_revalidate_disk
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/ide-h8300.c
index e2cdd2e9cde..e2cdd2e9cde 100644
--- a/drivers/ide/h8300/ide-h8300.c
+++ b/drivers/ide/ide-h8300.c
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index a90945f4979..fcde16bb53a 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -240,8 +240,7 @@ static int generic_drive_reset(ide_drive_t *drive)
return ret;
}
-int generic_ide_ioctl(ide_drive_t *drive, struct file *file,
- struct block_device *bdev,
+int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
unsigned int cmd, unsigned long arg)
{
int err;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index b2b2e5e8d38..a2d470eb2b5 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -2340,35 +2340,30 @@ static const struct file_operations idetape_fops = {
.release = idetape_chrdev_release,
};
-static int idetape_open(struct inode *inode, struct file *filp)
+static int idetape_open(struct block_device *bdev, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
- struct ide_tape_obj *tape;
+ struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk);
- tape = ide_tape_get(disk);
if (!tape)
return -ENXIO;
return 0;
}
-static int idetape_release(struct inode *inode, struct file *filp)
+static int idetape_release(struct gendisk *disk, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
struct ide_tape_obj *tape = ide_drv_g(disk, ide_tape_obj);
ide_tape_put(tape);
-
return 0;
}
-static int idetape_ioctl(struct inode *inode, struct file *file,
+static int idetape_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct block_device *bdev = inode->i_bdev;
struct ide_tape_obj *tape = ide_drv_g(bdev->bd_disk, ide_tape_obj);
ide_drive_t *drive = tape->drive;
- int err = generic_ide_ioctl(drive, file, bdev, cmd, arg);
+ int err = generic_ide_ioctl(drive, bdev, cmd, arg);
if (err == -EINVAL)
err = idetape_blkdev_ioctl(drive, cmd, arg);
return err;
@@ -2378,7 +2373,7 @@ static struct block_device_operations idetape_block_ops = {
.owner = THIS_MODULE,
.open = idetape_open,
.release = idetape_release,
- .ioctl = idetape_ioctl,
+ .locked_ioctl = idetape_ioctl,
};
static int ide_tape_probe(ide_drive_t *drive)
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/ide_arm.c
index f728f2927b5..f728f2927b5 100644
--- a/drivers/ide/arm/ide_arm.c
+++ b/drivers/ide/ide_arm.c
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/ide_platform.c
index 051b4ab0f35..051b4ab0f35 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/ide_platform.c
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/it8213.c
index 7c2feeb3c5e..7c2feeb3c5e 100644
--- a/drivers/ide/pci/it8213.c
+++ b/drivers/ide/it8213.c
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/it821x.c
index 995e18bb313..995e18bb313 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/it821x.c
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/jmicron.c
index 9a68433cf46..9a68433cf46 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/jmicron.c
diff --git a/drivers/ide/legacy/Makefile b/drivers/ide/legacy/Makefile
deleted file mode 100644
index 6939329f89e..00000000000
--- a/drivers/ide/legacy/Makefile
+++ /dev/null
@@ -1,25 +0,0 @@
-
-# link order is important here
-
-obj-$(CONFIG_BLK_DEV_ALI14XX) += ali14xx.o
-obj-$(CONFIG_BLK_DEV_UMC8672) += umc8672.o
-obj-$(CONFIG_BLK_DEV_DTC2278) += dtc2278.o
-obj-$(CONFIG_BLK_DEV_HT6560B) += ht6560b.o
-obj-$(CONFIG_BLK_DEV_QD65XX) += qd65xx.o
-obj-$(CONFIG_BLK_DEV_4DRIVES) += ide-4drives.o
-
-obj-$(CONFIG_BLK_DEV_GAYLE) += gayle.o
-obj-$(CONFIG_BLK_DEV_FALCON_IDE) += falconide.o
-obj-$(CONFIG_BLK_DEV_MAC_IDE) += macide.o
-obj-$(CONFIG_BLK_DEV_Q40IDE) += q40ide.o
-obj-$(CONFIG_BLK_DEV_BUDDHA) += buddha.o
-
-ifeq ($(CONFIG_BLK_DEV_IDECS), m)
- obj-m += ide-cs.o
-endif
-
-ifeq ($(CONFIG_BLK_DEV_PLATFORM), m)
- obj-m += ide_platform.o
-endif
-
-EXTRA_CFLAGS := -Idrivers/ide
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/macide.c
index 43f97cc1d30..43f97cc1d30 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/macide.c
diff --git a/drivers/ide/mips/Makefile b/drivers/ide/mips/Makefile
deleted file mode 100644
index 5873fa0b876..00000000000
--- a/drivers/ide/mips/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o
-
-EXTRA_CFLAGS := -Idrivers/ide
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/ns87415.c
index 13789060f40..13789060f40 100644
--- a/drivers/ide/pci/ns87415.c
+++ b/drivers/ide/ns87415.c
diff --git a/drivers/ide/pci/opti621.c b/drivers/ide/opti621.c
index 6048eda3cd6..6048eda3cd6 100644
--- a/drivers/ide/pci/opti621.c
+++ b/drivers/ide/opti621.c
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index 122ed3c072f..122ed3c072f 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
diff --git a/drivers/ide/pci/Makefile b/drivers/ide/pci/Makefile
deleted file mode 100644
index ab44a1f5f5a..00000000000
--- a/drivers/ide/pci/Makefile
+++ /dev/null
@@ -1,43 +0,0 @@
-
-obj-$(CONFIG_BLK_DEV_AEC62XX) += aec62xx.o
-obj-$(CONFIG_BLK_DEV_ALI15X3) += alim15x3.o
-obj-$(CONFIG_BLK_DEV_AMD74XX) += amd74xx.o
-obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o
-obj-$(CONFIG_BLK_DEV_CELLEB) += scc_pata.o
-obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o
-obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o
-obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o
-obj-$(CONFIG_BLK_DEV_CS5535) += cs5535.o
-obj-$(CONFIG_BLK_DEV_SC1200) += sc1200.o
-obj-$(CONFIG_BLK_DEV_CY82C693) += cy82c693.o
-obj-$(CONFIG_BLK_DEV_DELKIN) += delkin_cb.o
-obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o
-obj-$(CONFIG_BLK_DEV_IT8213) += it8213.o
-obj-$(CONFIG_BLK_DEV_IT821X) += it821x.o
-obj-$(CONFIG_BLK_DEV_JMICRON) += jmicron.o
-obj-$(CONFIG_BLK_DEV_NS87415) += ns87415.o
-obj-$(CONFIG_BLK_DEV_OPTI621) += opti621.o
-obj-$(CONFIG_BLK_DEV_PDC202XX_OLD) += pdc202xx_old.o
-obj-$(CONFIG_BLK_DEV_PDC202XX_NEW) += pdc202xx_new.o
-obj-$(CONFIG_BLK_DEV_PIIX) += piix.o
-obj-$(CONFIG_BLK_DEV_RZ1000) += rz1000.o
-obj-$(CONFIG_BLK_DEV_SVWKS) += serverworks.o
-obj-$(CONFIG_BLK_DEV_SGIIOC4) += sgiioc4.o
-obj-$(CONFIG_BLK_DEV_SIIMAGE) += siimage.o
-obj-$(CONFIG_BLK_DEV_SIS5513) += sis5513.o
-obj-$(CONFIG_BLK_DEV_SL82C105) += sl82c105.o
-obj-$(CONFIG_BLK_DEV_SLC90E66) += slc90e66.o
-obj-$(CONFIG_BLK_DEV_TC86C001) += tc86c001.o
-obj-$(CONFIG_BLK_DEV_TRIFLEX) += triflex.o
-obj-$(CONFIG_BLK_DEV_TRM290) += trm290.o
-obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o
-
-# Must appear at the end of the block
-obj-$(CONFIG_BLK_DEV_GENERIC) += ide-pci-generic.o
-ide-pci-generic-y += generic.o
-
-ifeq ($(CONFIG_BLK_DEV_CMD640), m)
- obj-m += cmd640.o
-endif
-
-EXTRA_CFLAGS := -Idrivers/ide
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
index 211ae46e3e0..211ae46e3e0 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pdc202xx_new.c
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index 799557c25ee..799557c25ee 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/piix.c
index d63f9fdca76..d63f9fdca76 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/piix.c
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/pmac.c
index 2e19d629853..2e19d629853 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/pmac.c
diff --git a/drivers/ide/ppc/Makefile b/drivers/ide/ppc/Makefile
deleted file mode 100644
index 74e52adcdf4..00000000000
--- a/drivers/ide/ppc/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-
-obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/q40ide.c
index 4af4a8ce4cd..4af4a8ce4cd 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/q40ide.c
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/qd65xx.c
index bc27c7aba93..bc27c7aba93 100644
--- a/drivers/ide/legacy/qd65xx.c
+++ b/drivers/ide/qd65xx.c
diff --git a/drivers/ide/legacy/qd65xx.h b/drivers/ide/qd65xx.h
index c83dea85e62..c83dea85e62 100644
--- a/drivers/ide/legacy/qd65xx.h
+++ b/drivers/ide/qd65xx.h
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/rapide.c
index 78d27d9ae43..78d27d9ae43 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/rapide.c
diff --git a/drivers/ide/pci/rz1000.c b/drivers/ide/rz1000.c
index 7daf0135cba..7daf0135cba 100644
--- a/drivers/ide/pci/rz1000.c
+++ b/drivers/ide/rz1000.c
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/sc1200.c
index f1a8758e3a9..f1a8758e3a9 100644
--- a/drivers/ide/pci/sc1200.c
+++ b/drivers/ide/sc1200.c
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/scc_pata.c
index 49f163aa51e..49f163aa51e 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/scc_pata.c
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/serverworks.c
index 437bc919daf..437bc919daf 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/serverworks.c
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/sgiioc4.c
index 8af9b23499f..8af9b23499f 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/siimage.c
index eb4faf92c57..eb4faf92c57 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/siimage.c
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/sis5513.c
index ad32e18c5ba..ad32e18c5ba 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/sis5513.c
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/sl82c105.c
index 84dc33602ff..84dc33602ff 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/sl82c105.c
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/slc90e66.c
index 0f759e4ed77..0f759e4ed77 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/slc90e66.c
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/tc86c001.c
index 93e2cce4b29..93e2cce4b29 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/tc86c001.c
diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/triflex.c
index b6ff40336aa..b6ff40336aa 100644
--- a/drivers/ide/pci/triflex.c
+++ b/drivers/ide/triflex.c
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/trm290.c
index 75ea6152656..75ea6152656 100644
--- a/drivers/ide/pci/trm290.c
+++ b/drivers/ide/trm290.c
diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/umc8672.c
index 1da076e0c91..1da076e0c91 100644
--- a/drivers/ide/legacy/umc8672.c
+++ b/drivers/ide/umc8672.c
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/via82cxxx.c
index 2a812d3207e..2a812d3207e 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig
new file mode 100644
index 00000000000..f5b26dd579e
--- /dev/null
+++ b/drivers/idle/Kconfig
@@ -0,0 +1,16 @@
+
+menu "Memory power savings"
+
+config I7300_IDLE_IOAT_CHANNEL
+ bool
+
+config I7300_IDLE
+ tristate "Intel chipset idle power saving driver"
+ select I7300_IDLE_IOAT_CHANNEL
+ depends on X86_64
+ help
+ Enable idle power savings with certain Intel server chipsets.
+ The chipset must have I/O AT support, such as the Intel 7300.
+ The power savings depends on the type and quantity of DRAM devices.
+
+endmenu
diff --git a/drivers/idle/Makefile b/drivers/idle/Makefile
new file mode 100644
index 00000000000..5f68fc377e2
--- /dev/null
+++ b/drivers/idle/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_I7300_IDLE) += i7300_idle.o
+
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
new file mode 100644
index 00000000000..59d1bbc3cd3
--- /dev/null
+++ b/drivers/idle/i7300_idle.c
@@ -0,0 +1,674 @@
+/*
+ * (C) Copyright 2008 Intel Corporation
+ * Authors:
+ * Andy Henroid <andrew.d.henroid@intel.com>
+ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ */
+
+/*
+ * Save DIMM power on Intel 7300-based platforms when all CPUs/cores
+ * are idle, using the DIMM thermal throttling capability.
+ *
+ * This driver depends on the Intel integrated DMA controller (I/O AT).
+ * If the driver for I/O AT (drivers/dma/ioatdma*) is also enabled,
+ * this driver should work cooperatively.
+ */
+
+/* #define DEBUG */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/notifier.h>
+#include <linux/cpumask.h>
+#include <linux/ktime.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/stop_machine.h>
+
+#include <asm/idle.h>
+
+#include "../dma/ioatdma_hw.h"
+#include "../dma/ioatdma_registers.h"
+
+#define I7300_IDLE_DRIVER_VERSION "1.55"
+#define I7300_PRINT "i7300_idle:"
+
+static int debug;
+module_param_named(debug, debug, uint, 0644);
+MODULE_PARM_DESC(debug, "Enable debug printks in this driver");
+
+#define dprintk(fmt, arg...) \
+ do { if (debug) printk(KERN_INFO I7300_PRINT fmt, ##arg); } while (0)
+
+/*
+ * Value to set THRTLOW to when initiating throttling
+ * 0 = No throttling
+ * 1 = Throttle when > 4 activations per eval window (Maximum throttling)
+ * 2 = Throttle when > 8 activations
+ * 168 = Throttle when > 168 activations (Minimum throttling)
+ */
+#define MAX_THRTLWLIMIT 168
+static uint i7300_idle_thrtlowlm = 1;
+module_param_named(thrtlwlimit, i7300_idle_thrtlowlm, uint, 0644);
+MODULE_PARM_DESC(thrtlwlimit,
+ "Value for THRTLOWLM activation field "
+ "(0 = disable throttle, 1 = Max throttle, 168 = Min throttle)");
+
+/*
+ * simple invocation and duration statistics
+ */
+static unsigned long total_starts;
+static unsigned long total_us;
+
+#ifdef DEBUG
+static unsigned long past_skip;
+#endif
+
+static struct pci_dev *fbd_dev;
+
+static spinlock_t i7300_idle_lock;
+static int i7300_idle_active;
+
+static u8 i7300_idle_thrtctl_saved;
+static u8 i7300_idle_thrtlow_saved;
+static u32 i7300_idle_mc_saved;
+
+static cpumask_t idle_cpumask;
+static ktime_t start_ktime;
+static unsigned long avg_idle_us;
+
+static struct dentry *debugfs_dir;
+
+/* Begin: I/O AT Helper routines */
+
+#define IOAT_CHANBASE(ioat_ctl, chan) (ioat_ctl + 0x80 + 0x80 * chan)
+/* Snoop control (disable snoops when coherency is not important) */
+#define IOAT_DESC_SADDR_SNP_CTL (1UL << 1)
+#define IOAT_DESC_DADDR_SNP_CTL (1UL << 2)
+
+static struct pci_dev *ioat_dev;
+static struct ioat_dma_descriptor *ioat_desc; /* I/O AT desc & data (1 page) */
+static unsigned long ioat_desc_phys;
+static u8 *ioat_iomap; /* I/O AT memory-mapped control regs (aka CB_BAR) */
+static u8 *ioat_chanbase;
+
+/* Start I/O AT memory copy */
+static int i7300_idle_ioat_start(void)
+{
+ u32 err;
+ /* Clear error (due to circular descriptor pointer) */
+ err = readl(ioat_chanbase + IOAT_CHANERR_OFFSET);
+ if (err)
+ writel(err, ioat_chanbase + IOAT_CHANERR_OFFSET);
+
+ writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
+ return 0;
+}
+
+/* Stop I/O AT memory copy */
+static void i7300_idle_ioat_stop(void)
+{
+ int i;
+ u8 sts;
+
+ for (i = 0; i < 5; i++) {
+ writeb(IOAT_CHANCMD_RESET,
+ ioat_chanbase + IOAT1_CHANCMD_OFFSET);
+
+ udelay(10);
+
+ sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
+ IOAT_CHANSTS_DMA_TRANSFER_STATUS;
+
+ if (sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE)
+ break;
+
+ }
+
+ if (i == 5)
+ dprintk("failed to suspend+reset I/O AT after 5 retries\n");
+
+}
+
+/* Test I/O AT by copying 1024 byte from 2k to 1k */
+static int __init i7300_idle_ioat_selftest(u8 *ctl,
+ struct ioat_dma_descriptor *desc, unsigned long desc_phys)
+{
+ u64 chan_sts;
+
+ memset(desc, 0, 2048);
+ memset((u8 *) desc + 2048, 0xab, 1024);
+
+ desc[0].size = 1024;
+ desc[0].ctl = 0;
+ desc[0].src_addr = desc_phys + 2048;
+ desc[0].dst_addr = desc_phys + 1024;
+ desc[0].next = 0;
+
+ writeb(IOAT_CHANCMD_RESET, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
+ writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
+
+ udelay(1000);
+
+ chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
+ IOAT_CHANSTS_DMA_TRANSFER_STATUS;
+
+ if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE) {
+ /* Not complete, reset the channel */
+ writeb(IOAT_CHANCMD_RESET,
+ ioat_chanbase + IOAT1_CHANCMD_OFFSET);
+ return -1;
+ }
+
+ if (*(u32 *) ((u8 *) desc + 3068) != 0xabababab ||
+ *(u32 *) ((u8 *) desc + 2044) != 0xabababab) {
+ dprintk("Data values src 0x%x, dest 0x%x, memset 0x%x\n",
+ *(u32 *) ((u8 *) desc + 2048),
+ *(u32 *) ((u8 *) desc + 1024),
+ *(u32 *) ((u8 *) desc + 3072));
+ return -1;
+ }
+ return 0;
+}
+
+static struct device dummy_dma_dev = {
+ .bus_id = "fallback device",
+ .coherent_dma_mask = DMA_64BIT_MASK,
+ .dma_mask = &dummy_dma_dev.coherent_dma_mask,
+};
+
+/* Setup and initialize I/O AT */
+/* This driver needs I/O AT as the throttling takes effect only when there is
+ * some memory activity. We use I/O AT to set up a dummy copy, while all CPUs
+ * go idle and memory is throttled.
+ */
+static int __init i7300_idle_ioat_init(void)
+{
+ u8 ver, chan_count, ioat_chan;
+ u16 chan_ctl;
+
+ ioat_iomap = (u8 *) ioremap_nocache(pci_resource_start(ioat_dev, 0),
+ pci_resource_len(ioat_dev, 0));
+
+ if (!ioat_iomap) {
+ printk(KERN_ERR I7300_PRINT "failed to map I/O AT registers\n");
+ goto err_ret;
+ }
+
+ ver = readb(ioat_iomap + IOAT_VER_OFFSET);
+ if (ver != IOAT_VER_1_2) {
+ printk(KERN_ERR I7300_PRINT "unknown I/O AT version (%u.%u)\n",
+ ver >> 4, ver & 0xf);
+ goto err_unmap;
+ }
+
+ chan_count = readb(ioat_iomap + IOAT_CHANCNT_OFFSET);
+ if (!chan_count) {
+ printk(KERN_ERR I7300_PRINT "unexpected # of I/O AT channels "
+ "(%u)\n",
+ chan_count);
+ goto err_unmap;
+ }
+
+ ioat_chan = chan_count - 1;
+ ioat_chanbase = IOAT_CHANBASE(ioat_iomap, ioat_chan);
+
+ chan_ctl = readw(ioat_chanbase + IOAT_CHANCTRL_OFFSET);
+ if (chan_ctl & IOAT_CHANCTRL_CHANNEL_IN_USE) {
+ printk(KERN_ERR I7300_PRINT "channel %d in use\n", ioat_chan);
+ goto err_unmap;
+ }
+
+ writew(IOAT_CHANCTRL_CHANNEL_IN_USE,
+ ioat_chanbase + IOAT_CHANCTRL_OFFSET);
+
+ ioat_desc = (struct ioat_dma_descriptor *)dma_alloc_coherent(
+ &dummy_dma_dev, 4096,
+ (dma_addr_t *)&ioat_desc_phys, GFP_KERNEL);
+ if (!ioat_desc) {
+ printk(KERN_ERR I7300_PRINT "failed to allocate I/O AT desc\n");
+ goto err_mark_unused;
+ }
+
+ writel(ioat_desc_phys & 0xffffffffUL,
+ ioat_chanbase + IOAT1_CHAINADDR_OFFSET_LOW);
+ writel(ioat_desc_phys >> 32,
+ ioat_chanbase + IOAT1_CHAINADDR_OFFSET_HIGH);
+
+ if (i7300_idle_ioat_selftest(ioat_iomap, ioat_desc, ioat_desc_phys)) {
+ printk(KERN_ERR I7300_PRINT "I/O AT self-test failed\n");
+ goto err_free;
+ }
+
+ /* Setup circular I/O AT descriptor chain */
+ ioat_desc[0].ctl = IOAT_DESC_SADDR_SNP_CTL | IOAT_DESC_DADDR_SNP_CTL;
+ ioat_desc[0].src_addr = ioat_desc_phys + 2048;
+ ioat_desc[0].dst_addr = ioat_desc_phys + 3072;
+ ioat_desc[0].size = 128;
+ ioat_desc[0].next = ioat_desc_phys + sizeof(struct ioat_dma_descriptor);
+
+ ioat_desc[1].ctl = ioat_desc[0].ctl;
+ ioat_desc[1].src_addr = ioat_desc[0].src_addr;
+ ioat_desc[1].dst_addr = ioat_desc[0].dst_addr;
+ ioat_desc[1].size = ioat_desc[0].size;
+ ioat_desc[1].next = ioat_desc_phys;
+
+ return 0;
+
+err_free:
+ dma_free_coherent(&dummy_dma_dev, 4096, (void *)ioat_desc, 0);
+err_mark_unused:
+ writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
+err_unmap:
+ iounmap(ioat_iomap);
+err_ret:
+ return -ENODEV;
+}
+
+/* Cleanup I/O AT */
+static void __exit i7300_idle_ioat_exit(void)
+{
+ int i;
+ u64 chan_sts;
+
+ i7300_idle_ioat_stop();
+
+ /* Wait for a while for the channel to halt before releasing */
+ for (i = 0; i < 10; i++) {
+ writeb(IOAT_CHANCMD_RESET,
+ ioat_chanbase + IOAT1_CHANCMD_OFFSET);
+
+ chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
+ IOAT_CHANSTS_DMA_TRANSFER_STATUS;
+
+ if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) {
+ writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
+ break;
+ }
+ udelay(1000);
+ }
+
+ chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
+ IOAT_CHANSTS_DMA_TRANSFER_STATUS;
+
+ /*
+ * We tried to reset multiple times. If IO A/T channel is still active
+ * flag an error and return without cleanup. Memory leak is better
+ * than random corruption in that extreme error situation.
+ */
+ if (chan_sts == IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) {
+ printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels."
+ " Not freeing resources\n");
+ return;
+ }
+
+ dma_free_coherent(&dummy_dma_dev, 4096, (void *)ioat_desc, 0);
+ iounmap(ioat_iomap);
+}
+
+/* End: I/O AT Helper routines */
+
+#define DIMM_THRTLOW 0x64
+#define DIMM_THRTCTL 0x67
+#define DIMM_THRTCTL_THRMHUNT (1UL << 0)
+#define DIMM_MC 0x40
+#define DIMM_GTW_MODE (1UL << 17)
+#define DIMM_GBLACT 0x60
+
+/*
+ * Keep track of an exponential-decaying average of recent idle durations.
+ * The latest duration gets DURATION_WEIGHT_PCT percentage weight
+ * in this average, with the old average getting the remaining weight.
+ *
+ * High weights emphasize recent history, low weights include long history.
+ */
+#define DURATION_WEIGHT_PCT 55
+
+/*
+ * When the decaying average of recent durations or the predicted duration
+ * of the next timer interrupt is shorter than duration_threshold, the
+ * driver will decline to throttle.
+ */
+#define DURATION_THRESHOLD_US 100
+
+
+/* Store DIMM thermal throttle configuration */
+static int i7300_idle_thrt_save(void)
+{
+ u32 new_mc_val;
+ u8 gblactlm;
+
+ pci_read_config_byte(fbd_dev, DIMM_THRTCTL, &i7300_idle_thrtctl_saved);
+ pci_read_config_byte(fbd_dev, DIMM_THRTLOW, &i7300_idle_thrtlow_saved);
+ pci_read_config_dword(fbd_dev, DIMM_MC, &i7300_idle_mc_saved);
+ /*
+ * Make sure we have Global Throttling Window Mode set to have a
+ * "short" window. This (mostly) works around an issue where
+ * throttling persists until the end of the global throttling window
+ * size. On the tested system, this was resulting in a maximum of
+ * 64 ms to exit throttling (average 32 ms). The actual numbers
+ * depends on system frequencies. Setting the short window reduces
+ * this by a factor of 4096.
+ *
+ * We will only do this only if the system is set for
+ * unlimited-activations while in open-loop throttling (i.e., when
+ * Global Activation Throttle Limit is zero).
+ */
+ pci_read_config_byte(fbd_dev, DIMM_GBLACT, &gblactlm);
+ dprintk("thrtctl_saved = 0x%02x, thrtlow_saved = 0x%02x\n",
+ i7300_idle_thrtctl_saved,
+ i7300_idle_thrtlow_saved);
+ dprintk("mc_saved = 0x%08x, gblactlm = 0x%02x\n",
+ i7300_idle_mc_saved,
+ gblactlm);
+ if (gblactlm == 0) {
+ new_mc_val = i7300_idle_mc_saved | DIMM_GTW_MODE;
+ pci_write_config_dword(fbd_dev, DIMM_MC, new_mc_val);
+ return 0;
+ } else {
+ dprintk("could not set GTW_MODE = 1 (OLTT enabled)\n");
+ return -ENODEV;
+ }
+}
+
+/* Restore DIMM thermal throttle configuration */
+static void i7300_idle_thrt_restore(void)
+{
+ pci_write_config_dword(fbd_dev, DIMM_MC, i7300_idle_mc_saved);
+ pci_write_config_byte(fbd_dev, DIMM_THRTLOW, i7300_idle_thrtlow_saved);
+ pci_write_config_byte(fbd_dev, DIMM_THRTCTL, i7300_idle_thrtctl_saved);
+}
+
+/* Enable DIMM thermal throttling */
+static void i7300_idle_start(void)
+{
+ u8 new_ctl;
+ u8 limit;
+
+ new_ctl = i7300_idle_thrtctl_saved & ~DIMM_THRTCTL_THRMHUNT;
+ pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
+
+ limit = i7300_idle_thrtlowlm;
+ if (unlikely(limit > MAX_THRTLWLIMIT))
+ limit = MAX_THRTLWLIMIT;
+
+ pci_write_config_byte(fbd_dev, DIMM_THRTLOW, limit);
+
+ new_ctl = i7300_idle_thrtctl_saved | DIMM_THRTCTL_THRMHUNT;
+ pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
+}
+
+/* Disable DIMM thermal throttling */
+static void i7300_idle_stop(void)
+{
+ u8 new_ctl;
+ u8 got_ctl;
+
+ new_ctl = i7300_idle_thrtctl_saved & ~DIMM_THRTCTL_THRMHUNT;
+ pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
+
+ pci_write_config_byte(fbd_dev, DIMM_THRTLOW, i7300_idle_thrtlow_saved);
+ pci_write_config_byte(fbd_dev, DIMM_THRTCTL, i7300_idle_thrtctl_saved);
+ pci_read_config_byte(fbd_dev, DIMM_THRTCTL, &got_ctl);
+ WARN_ON_ONCE(got_ctl != i7300_idle_thrtctl_saved);
+}
+
+
+/*
+ * i7300_avg_duration_check()
+ * return 0 if the decaying average of recent idle durations is
+ * more than DURATION_THRESHOLD_US
+ */
+static int i7300_avg_duration_check(void)
+{
+ if (avg_idle_us >= DURATION_THRESHOLD_US)
+ return 0;
+
+#ifdef DEBUG
+ past_skip++;
+#endif
+ return 1;
+}
+
+/* Idle notifier to look at idle CPUs */
+static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ unsigned long flags;
+ ktime_t now_ktime;
+ static ktime_t idle_begin_time;
+ static int time_init = 1;
+
+ if (!i7300_idle_thrtlowlm)
+ return 0;
+
+ if (unlikely(time_init)) {
+ time_init = 0;
+ idle_begin_time = ktime_get();
+ }
+
+ spin_lock_irqsave(&i7300_idle_lock, flags);
+ if (val == IDLE_START) {
+
+ cpu_set(smp_processor_id(), idle_cpumask);
+
+ if (cpus_weight(idle_cpumask) != num_online_cpus())
+ goto end;
+
+ now_ktime = ktime_get();
+ idle_begin_time = now_ktime;
+
+ if (i7300_avg_duration_check())
+ goto end;
+
+ i7300_idle_active = 1;
+ total_starts++;
+ start_ktime = now_ktime;
+
+ i7300_idle_start();
+ i7300_idle_ioat_start();
+
+ } else if (val == IDLE_END) {
+ cpu_clear(smp_processor_id(), idle_cpumask);
+ if (cpus_weight(idle_cpumask) == (num_online_cpus() - 1)) {
+ /* First CPU coming out of idle */
+ u64 idle_duration_us;
+
+ now_ktime = ktime_get();
+
+ idle_duration_us = ktime_to_us(ktime_sub
+ (now_ktime, idle_begin_time));
+
+ avg_idle_us =
+ ((100 - DURATION_WEIGHT_PCT) * avg_idle_us +
+ DURATION_WEIGHT_PCT * idle_duration_us) / 100;
+
+ if (i7300_idle_active) {
+ ktime_t idle_ktime;
+
+ idle_ktime = ktime_sub(now_ktime, start_ktime);
+ total_us += ktime_to_us(idle_ktime);
+
+ i7300_idle_ioat_stop();
+ i7300_idle_stop();
+ i7300_idle_active = 0;
+ }
+ }
+ }
+end:
+ spin_unlock_irqrestore(&i7300_idle_lock, flags);
+ return 0;
+}
+
+static struct notifier_block i7300_idle_nb = {
+ .notifier_call = i7300_idle_notifier,
+};
+
+/*
+ * I/O AT controls (PCI bus 0 device 8 function 0)
+ * DIMM controls (PCI bus 0 device 16 function 1)
+ */
+#define IOAT_BUS 0
+#define IOAT_DEVFN PCI_DEVFN(8, 0)
+#define MEMCTL_BUS 0
+#define MEMCTL_DEVFN PCI_DEVFN(16, 1)
+
+struct fbd_ioat {
+ unsigned int vendor;
+ unsigned int ioat_dev;
+};
+
+/*
+ * The i5000 chip-set has the same hooks as the i7300
+ * but support is disabled by default because this driver
+ * has not been validated on that platform.
+ */
+#define SUPPORT_I5000 0
+
+static const struct fbd_ioat fbd_ioat_list[] = {
+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB},
+#if SUPPORT_I5000
+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT},
+#endif
+ {0, 0}
+};
+
+/* table of devices that work with this driver */
+static const struct pci_device_id pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) },
+#if SUPPORT_I5000
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
+#endif
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+/* Check for known platforms with I/O-AT */
+static int __init i7300_idle_platform_probe(void)
+{
+ int i;
+
+ fbd_dev = pci_get_bus_and_slot(MEMCTL_BUS, MEMCTL_DEVFN);
+ if (!fbd_dev)
+ return -ENODEV;
+
+ for (i = 0; pci_tbl[i].vendor != 0; i++) {
+ if (fbd_dev->vendor == pci_tbl[i].vendor &&
+ fbd_dev->device == pci_tbl[i].device) {
+ break;
+ }
+ }
+ if (pci_tbl[i].vendor == 0)
+ return -ENODEV;
+
+ ioat_dev = pci_get_bus_and_slot(IOAT_BUS, IOAT_DEVFN);
+ if (!ioat_dev)
+ return -ENODEV;
+
+ for (i = 0; fbd_ioat_list[i].vendor != 0; i++) {
+ if (ioat_dev->vendor == fbd_ioat_list[i].vendor &&
+ ioat_dev->device == fbd_ioat_list[i].ioat_dev) {
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+int stats_open_generic(struct inode *inode, struct file *fp)
+{
+ fp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t stats_read_ul(struct file *fp, char __user *ubuf, size_t count,
+ loff_t *off)
+{
+ unsigned long *p = fp->private_data;
+ char buf[32];
+ int len;
+
+ len = snprintf(buf, 32, "%lu\n", *p);
+ return simple_read_from_buffer(ubuf, count, off, buf, len);
+}
+
+static const struct file_operations idle_fops = {
+ .open = stats_open_generic,
+ .read = stats_read_ul,
+};
+
+struct debugfs_file_info {
+ void *ptr;
+ char name[32];
+ struct dentry *file;
+} debugfs_file_list[] = {
+ {&total_starts, "total_starts", NULL},
+ {&total_us, "total_us", NULL},
+#ifdef DEBUG
+ {&past_skip, "past_skip", NULL},
+#endif
+ {NULL, "", NULL}
+ };
+
+static int __init i7300_idle_init(void)
+{
+ spin_lock_init(&i7300_idle_lock);
+ cpus_clear(idle_cpumask);
+ total_us = 0;
+
+ if (i7300_idle_platform_probe())
+ return -ENODEV;
+
+ if (i7300_idle_thrt_save())
+ return -ENODEV;
+
+ if (i7300_idle_ioat_init())
+ return -ENODEV;
+
+ debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
+ if (debugfs_dir) {
+ int i = 0;
+
+ while (debugfs_file_list[i].ptr != NULL) {
+ debugfs_file_list[i].file = debugfs_create_file(
+ debugfs_file_list[i].name,
+ S_IRUSR,
+ debugfs_dir,
+ debugfs_file_list[i].ptr,
+ &idle_fops);
+ i++;
+ }
+ }
+
+ idle_notifier_register(&i7300_idle_nb);
+
+ printk(KERN_INFO "i7300_idle: loaded v%s\n", I7300_IDLE_DRIVER_VERSION);
+ return 0;
+}
+
+static void __exit i7300_idle_exit(void)
+{
+ idle_notifier_unregister(&i7300_idle_nb);
+
+ if (debugfs_dir) {
+ int i = 0;
+
+ while (debugfs_file_list[i].file != NULL) {
+ debugfs_remove(debugfs_file_list[i].file);
+ i++;
+ }
+
+ debugfs_remove(debugfs_dir);
+ }
+ i7300_idle_thrt_restore();
+ i7300_idle_ioat_exit();
+}
+
+module_init(i7300_idle_init);
+module_exit(i7300_idle_exit);
+
+MODULE_AUTHOR("Andy Henroid <andrew.d.henroid@intel.com>");
+MODULE_DESCRIPTION("Intel Chipset DIMM Idle Power Saving Driver v"
+ I7300_IDLE_DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 49c45feccd5..5c54fc2350b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -406,19 +406,15 @@ static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
if (i == qp_info->snoop_table_size) {
/* Grow table. */
- new_snoop_table = kmalloc(sizeof mad_snoop_priv *
- qp_info->snoop_table_size + 1,
- GFP_ATOMIC);
+ new_snoop_table = krealloc(qp_info->snoop_table,
+ sizeof mad_snoop_priv *
+ (qp_info->snoop_table_size + 1),
+ GFP_ATOMIC);
if (!new_snoop_table) {
i = -ENOMEM;
goto out;
}
- if (qp_info->snoop_table) {
- memcpy(new_snoop_table, qp_info->snoop_table,
- sizeof mad_snoop_priv *
- qp_info->snoop_table_size);
- kfree(qp_info->snoop_table);
- }
+
qp_info->snoop_table = new_snoop_table;
qp_info->snoop_table_size++;
}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 3ddacf39b7b..4346a24568f 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -904,8 +904,8 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
mutex_lock(&file->mut);
mc = ucma_alloc_multicast(ctx);
- if (IS_ERR(mc)) {
- ret = PTR_ERR(mc);
+ if (!mc) {
+ ret = -ENOMEM;
goto err1;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index c325c44807e..44e936e48a3 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1942,6 +1942,7 @@ fail4:
fail3:
cxgb3_free_atid(ep->com.tdev, ep->atid);
fail2:
+ cm_id->rem_ref(cm_id);
put_ep(&ep->com);
out:
return err;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 5d7b7855afb..4df887af66a 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -128,6 +128,8 @@ struct ehca_shca {
/* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
u32 hca_cap_mr_pgsize;
int max_mtu;
+ int max_num_qps;
+ int max_num_cqs;
atomic_t num_cqs;
atomic_t num_qps;
};
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 33647a95eb9..2f4c28a3027 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -132,9 +132,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
return ERR_PTR(-EINVAL);
- if (!atomic_add_unless(&shca->num_cqs, 1, ehca_max_cq)) {
+ if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
ehca_err(device, "Unable to create CQ, max number of %i "
- "CQs reached.", ehca_max_cq);
+ "CQs reached.", shca->max_num_cqs);
ehca_err(device, "To increase the maximum number of CQs "
"use the number_of_cqs module parameter.\n");
return ERR_PTR(-ENOSPC);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 598844d2edc..bb02a86aa52 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -44,6 +44,8 @@
#include <linux/slab.h>
#endif
+#include <linux/notifier.h>
+#include <linux/memory.h>
#include "ehca_classes.h"
#include "ehca_iverbs.h"
#include "ehca_mrmw.h"
@@ -366,22 +368,23 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
/* Set maximum number of CQs and QPs to calculate EQ size */
- if (ehca_max_qp == -1)
- ehca_max_qp = min_t(int, rblock->max_qp, EHCA_MAX_NUM_QUEUES);
- else if (ehca_max_qp < 1 || ehca_max_qp > rblock->max_qp) {
- ehca_gen_err("Requested number of QPs is out of range (1 - %i) "
- "specified by HW", rblock->max_qp);
- ret = -EINVAL;
- goto sense_attributes1;
+ if (shca->max_num_qps == -1)
+ shca->max_num_qps = min_t(int, rblock->max_qp,
+ EHCA_MAX_NUM_QUEUES);
+ else if (shca->max_num_qps < 1 || shca->max_num_qps > rblock->max_qp) {
+ ehca_gen_warn("The requested number of QPs is out of range "
+ "(1 - %i) specified by HW. Value is set to %i",
+ rblock->max_qp, rblock->max_qp);
+ shca->max_num_qps = rblock->max_qp;
}
- if (ehca_max_cq == -1)
- ehca_max_cq = min_t(int, rblock->max_cq, EHCA_MAX_NUM_QUEUES);
- else if (ehca_max_cq < 1 || ehca_max_cq > rblock->max_cq) {
- ehca_gen_err("Requested number of CQs is out of range (1 - %i) "
- "specified by HW", rblock->max_cq);
- ret = -EINVAL;
- goto sense_attributes1;
+ if (shca->max_num_cqs == -1)
+ shca->max_num_cqs = min_t(int, rblock->max_cq,
+ EHCA_MAX_NUM_QUEUES);
+ else if (shca->max_num_cqs < 1 || shca->max_num_cqs > rblock->max_cq) {
+ ehca_gen_warn("The requested number of CQs is out of range "
+ "(1 - %i) specified by HW. Value is set to %i",
+ rblock->max_cq, rblock->max_cq);
}
/* query max MTU from first port -- it's the same for all ports */
@@ -733,9 +736,13 @@ static int __devinit ehca_probe(struct of_device *dev,
ehca_gen_err("Cannot allocate shca memory.");
return -ENOMEM;
}
+
mutex_init(&shca->modify_mutex);
atomic_set(&shca->num_cqs, 0);
atomic_set(&shca->num_qps, 0);
+ shca->max_num_qps = ehca_max_qp;
+ shca->max_num_cqs = ehca_max_cq;
+
for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
spin_lock_init(&shca->sport[i].mod_sqp_lock);
@@ -755,7 +762,7 @@ static int __devinit ehca_probe(struct of_device *dev,
goto probe1;
}
- eq_size = 2 * ehca_max_cq + 4 * ehca_max_qp;
+ eq_size = 2 * shca->max_num_cqs + 4 * shca->max_num_qps;
/* create event queues */
ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size);
if (ret) {
@@ -964,6 +971,41 @@ void ehca_poll_eqs(unsigned long data)
spin_unlock(&shca_list_lock);
}
+static int ehca_mem_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ static unsigned long ehca_dmem_warn_time;
+
+ switch (action) {
+ case MEM_CANCEL_OFFLINE:
+ case MEM_CANCEL_ONLINE:
+ case MEM_ONLINE:
+ case MEM_OFFLINE:
+ return NOTIFY_OK;
+ case MEM_GOING_ONLINE:
+ case MEM_GOING_OFFLINE:
+ /* only ok if no hca is attached to the lpar */
+ spin_lock(&shca_list_lock);
+ if (list_empty(&shca_list)) {
+ spin_unlock(&shca_list_lock);
+ return NOTIFY_OK;
+ } else {
+ spin_unlock(&shca_list_lock);
+ if (printk_timed_ratelimit(&ehca_dmem_warn_time,
+ 30 * 1000))
+ ehca_gen_err("DMEM operations are not allowed"
+ "as long as an ehca adapter is"
+ "attached to the LPAR");
+ return NOTIFY_BAD;
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ehca_mem_nb = {
+ .notifier_call = ehca_mem_notifier,
+};
+
static int __init ehca_module_init(void)
{
int ret;
@@ -991,6 +1033,12 @@ static int __init ehca_module_init(void)
goto module_init2;
}
+ ret = register_memory_notifier(&ehca_mem_nb);
+ if (ret) {
+ ehca_gen_err("Failed registering memory add/remove notifier");
+ goto module_init3;
+ }
+
if (ehca_poll_all_eqs != 1) {
ehca_gen_err("WARNING!!!");
ehca_gen_err("It is possible to lose interrupts.");
@@ -1003,6 +1051,9 @@ static int __init ehca_module_init(void)
return 0;
+module_init3:
+ ibmebus_unregister_driver(&ehca_driver);
+
module_init2:
ehca_destroy_slab_caches();
@@ -1018,6 +1069,8 @@ static void __exit ehca_module_exit(void)
ibmebus_unregister_driver(&ehca_driver);
+ unregister_memory_notifier(&ehca_mem_nb);
+
ehca_destroy_slab_caches();
ehca_destroy_comp_pool();
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 4dbe2870e01..4d54b9f6456 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -465,9 +465,9 @@ static struct ehca_qp *internal_create_qp(
u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
unsigned long flags;
- if (!atomic_add_unless(&shca->num_qps, 1, ehca_max_qp)) {
+ if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
ehca_err(pd->device, "Unable to create QP, max number of %i "
- "QPs reached.", ehca_max_qp);
+ "QPs reached.", shca->max_num_qps);
ehca_err(pd->device, "To increase the maximum number of QPs "
"use the number_of_qps module parameter.\n");
return ERR_PTR(-ENOSPC);
@@ -502,6 +502,12 @@ static struct ehca_qp *internal_create_qp(
if (init_attr->srq) {
my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
+ if (qp_type == IB_QPT_UC) {
+ ehca_err(pd->device, "UC with SRQ not supported");
+ atomic_dec(&shca->num_qps);
+ return ERR_PTR(-EINVAL);
+ }
+
has_srq = 1;
parms.ext_type = EQPT_SRQBASE;
parms.srq_qpn = my_srq->real_qp_num;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index cdca3a511e1..606f1e2ef28 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -298,7 +298,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
int p, q;
int ret;
- for (p = 0; p < dev->dev->caps.num_ports; ++p)
+ for (p = 0; p < dev->num_ports; ++p)
for (q = 0; q <= 1; ++q) {
agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
q ? IB_QPT_GSI : IB_QPT_SMI,
@@ -314,7 +314,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
return 0;
err:
- for (p = 0; p < dev->dev->caps.num_ports; ++p)
+ for (p = 0; p < dev->num_ports; ++p)
for (q = 0; q <= 1; ++q)
if (dev->send_agent[p][q])
ib_unregister_mad_agent(dev->send_agent[p][q]);
@@ -327,7 +327,7 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
struct ib_mad_agent *agent;
int p, q;
- for (p = 0; p < dev->dev->caps.num_ports; ++p) {
+ for (p = 0; p < dev->num_ports; ++p) {
for (q = 0; q <= 1; ++q) {
agent = dev->send_agent[p][q];
dev->send_agent[p][q] = NULL;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index a3c2851c054..2e80f8f47b0 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -574,7 +574,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.owner = THIS_MODULE;
ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
- ibdev->ib_dev.phys_port_cnt = dev->caps.num_ports;
+ ibdev->num_ports = 0;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ ibdev->num_ports++;
+ ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
ibdev->ib_dev.num_comp_vectors = 1;
ibdev->ib_dev.dma_device = &dev->pdev->dev;
@@ -691,7 +694,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
struct mlx4_ib_dev *ibdev = ibdev_ptr;
int p;
- for (p = 1; p <= dev->caps.num_ports; ++p)
+ for (p = 1; p <= ibdev->num_ports; ++p)
mlx4_CLOSE_PORT(dev, p);
mlx4_ib_mad_cleanup(ibdev);
@@ -706,6 +709,10 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
enum mlx4_dev_event event, int port)
{
struct ib_event ibev;
+ struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
+
+ if (port > ibdev->num_ports)
+ return;
switch (event) {
case MLX4_DEV_EVENT_PORT_UP:
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 6e2b0dc21b6..9974e886b8d 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -162,6 +162,7 @@ struct mlx4_ib_ah {
struct mlx4_ib_dev {
struct ib_device ib_dev;
struct mlx4_dev *dev;
+ int num_ports;
void __iomem *uar_map;
struct mlx4_uar priv_uar;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index baa01deb243..39167a797f9 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -451,6 +451,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
{
+ int qpn;
int err;
mutex_init(&qp->mutex);
@@ -545,9 +546,17 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
}
}
- err = mlx4_qp_alloc(dev->dev, sqpn, &qp->mqp);
+ if (sqpn) {
+ qpn = sqpn;
+ } else {
+ err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
+ if (err)
+ goto err_wrid;
+ }
+
+ err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
if (err)
- goto err_wrid;
+ goto err_qpn;
/*
* Hardware wants QPN written in big-endian order (after
@@ -560,6 +569,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
return 0;
+err_qpn:
+ if (!sqpn)
+ mlx4_qp_release_range(dev->dev, qpn, 1);
+
err_wrid:
if (pd->uobject) {
if (!init_attr->srq)
@@ -655,6 +668,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
mlx4_ib_unlock_cqs(send_cq, recv_cq);
mlx4_qp_free(dev->dev, &qp->mqp);
+
+ if (!is_sqp(dev, qp))
+ mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
+
mlx4_mtt_cleanup(dev->dev, &qp->mtt);
if (is_user) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 68ba5c3482e..e0c7dfabf2b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -507,6 +507,7 @@ int ipoib_pkey_dev_delay_open(struct net_device *dev);
void ipoib_drain_cq(struct net_device *dev);
void ipoib_set_ethtool_ops(struct net_device *dev);
+int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca);
#ifdef CONFIG_INFINIBAND_IPOIB_CM
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 66af5c1a76e..e9795f60e5d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -42,6 +42,13 @@ static void ipoib_get_drvinfo(struct net_device *netdev,
strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1);
}
+static u32 ipoib_get_rx_csum(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ return test_bit(IPOIB_FLAG_CSUM, &priv->flags) &&
+ !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+}
+
static int ipoib_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
@@ -129,7 +136,7 @@ static void ipoib_get_ethtool_stats(struct net_device *dev,
static const struct ethtool_ops ipoib_ethtool_ops = {
.get_drvinfo = ipoib_get_drvinfo,
- .get_tso = ethtool_op_get_tso,
+ .get_rx_csum = ipoib_get_rx_csum,
.get_coalesce = ipoib_get_coalesce,
.set_coalesce = ipoib_set_coalesce,
.get_flags = ethtool_op_get_flags,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0e748aeeae9..28eb6f03c58 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -685,10 +685,6 @@ int ipoib_ib_dev_open(struct net_device *dev)
queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
round_jiffies_relative(HZ));
- init_timer(&priv->poll_timer);
- priv->poll_timer.function = ipoib_ib_tx_timer_func;
- priv->poll_timer.data = (unsigned long)dev;
-
set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
return 0;
@@ -906,6 +902,9 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
return -ENODEV;
}
+ setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
+ (unsigned long) dev);
+
if (dev->flags & IFF_UP) {
if (ipoib_ib_dev_open(dev)) {
ipoib_transport_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index c0ee514396d..fddded7900d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1173,11 +1173,48 @@ int ipoib_add_pkey_attr(struct net_device *dev)
return device_create_file(&dev->dev, &dev_attr_pkey);
}
+int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
+{
+ struct ib_device_attr *device_attr;
+ int result = -ENOMEM;
+
+ device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
+ if (!device_attr) {
+ printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
+ hca->name, sizeof *device_attr);
+ return result;
+ }
+
+ result = ib_query_device(hca, device_attr);
+ if (result) {
+ printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
+ hca->name, result);
+ kfree(device_attr);
+ return result;
+ }
+ priv->hca_caps = device_attr->device_cap_flags;
+
+ kfree(device_attr);
+
+ if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
+ set_bit(IPOIB_FLAG_CSUM, &priv->flags);
+ priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ }
+
+ if (lro)
+ priv->dev->features |= NETIF_F_LRO;
+
+ if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
+ priv->dev->features |= NETIF_F_TSO;
+
+ return 0;
+}
+
+
static struct net_device *ipoib_add_port(const char *format,
struct ib_device *hca, u8 port)
{
struct ipoib_dev_priv *priv;
- struct ib_device_attr *device_attr;
struct ib_port_attr attr;
int result = -ENOMEM;
@@ -1206,31 +1243,8 @@ static struct net_device *ipoib_add_port(const char *format,
goto device_init_failed;
}
- device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
- if (!device_attr) {
- printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
- hca->name, sizeof *device_attr);
+ if (ipoib_set_dev_features(priv, hca))
goto device_init_failed;
- }
-
- result = ib_query_device(hca, device_attr);
- if (result) {
- printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
- hca->name, result);
- kfree(device_attr);
- goto device_init_failed;
- }
- priv->hca_caps = device_attr->device_cap_flags;
-
- kfree(device_attr);
-
- if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
- set_bit(IPOIB_FLAG_CSUM, &priv->flags);
- priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
- }
-
- if (lro)
- priv->dev->features |= NETIF_F_LRO;
/*
* Set the full membership bit, so that we join the right
@@ -1266,9 +1280,6 @@ static struct net_device *ipoib_add_port(const char *format,
goto event_failed;
}
- if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
- priv->dev->features |= NETIF_F_TSO;
-
result = register_netdev(priv->dev);
if (result) {
printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index b08eb56196d..2cf1a408871 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -93,6 +93,10 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
+ result = ipoib_set_dev_features(priv, ppriv->ca);
+ if (result)
+ goto device_init_failed;
+
priv->pkey = pkey;
memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index f1ef33dfd8c..1c615804ea7 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
-obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o
+obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o
quiet_cmd_unroll = UNROLL $@
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 682ef9e6acd..ce26c84af06 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -23,7 +23,7 @@
#include <asm/page.h>
#include <asm/unaligned.h>
-#include "dm.h"
+#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "crypt"
#define MESG_STR(x) x, sizeof(x)
@@ -56,6 +56,7 @@ struct dm_crypt_io {
atomic_t pending;
int error;
sector_t sector;
+ struct dm_crypt_io *base_io;
};
struct dm_crypt_request {
@@ -93,7 +94,6 @@ struct crypt_config {
struct workqueue_struct *io_queue;
struct workqueue_struct *crypt_queue;
- wait_queue_head_t writeq;
/*
* crypto related data
@@ -534,6 +534,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
io->base_bio = bio;
io->sector = sector;
io->error = 0;
+ io->base_io = NULL;
atomic_set(&io->pending, 0);
return io;
@@ -547,6 +548,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
/*
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
+ * If base_io is set, wait for the last fragment to complete.
*/
static void crypt_dec_pending(struct dm_crypt_io *io)
{
@@ -555,7 +557,14 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
if (!atomic_dec_and_test(&io->pending))
return;
- bio_endio(io->base_bio, io->error);
+ if (likely(!io->base_io))
+ bio_endio(io->base_bio, io->error);
+ else {
+ if (io->error && !io->base_io->error)
+ io->base_io->error = io->error;
+ crypt_dec_pending(io->base_io);
+ }
+
mempool_free(io, cc->io_pool);
}
@@ -646,10 +655,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
static void kcryptd_io_write(struct dm_crypt_io *io)
{
struct bio *clone = io->ctx.bio_out;
- struct crypt_config *cc = io->target->private;
-
generic_make_request(clone);
- wake_up(&cc->writeq);
}
static void kcryptd_io(struct work_struct *work)
@@ -688,7 +694,6 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
clone->bi_sector = cc->start + io->sector;
- io->sector += bio_sectors(clone);
if (async)
kcryptd_queue_io(io);
@@ -700,16 +705,18 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->target->private;
struct bio *clone;
+ struct dm_crypt_io *new_io;
int crypt_finished;
unsigned out_of_pages = 0;
unsigned remaining = io->base_bio->bi_size;
+ sector_t sector = io->sector;
int r;
/*
* Prevent io from disappearing until this function completes.
*/
crypt_inc_pending(io);
- crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
+ crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
/*
* The allocated buffers can be smaller than the whole bio,
@@ -726,6 +733,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
io->ctx.idx_out = 0;
remaining -= clone->bi_size;
+ sector += bio_sectors(clone);
crypt_inc_pending(io);
r = crypt_convert(cc, &io->ctx);
@@ -741,6 +749,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
*/
if (unlikely(r < 0))
break;
+
+ io->sector = sector;
}
/*
@@ -750,8 +760,33 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
if (unlikely(out_of_pages))
congestion_wait(WRITE, HZ/100);
- if (unlikely(remaining))
- wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
+ /*
+ * With async crypto it is unsafe to share the crypto context
+ * between fragments, so switch to a new dm_crypt_io structure.
+ */
+ if (unlikely(!crypt_finished && remaining)) {
+ new_io = crypt_io_alloc(io->target, io->base_bio,
+ sector);
+ crypt_inc_pending(new_io);
+ crypt_convert_init(cc, &new_io->ctx, NULL,
+ io->base_bio, sector);
+ new_io->ctx.idx_in = io->ctx.idx_in;
+ new_io->ctx.offset_in = io->ctx.offset_in;
+
+ /*
+ * Fragments after the first use the base_io
+ * pending count.
+ */
+ if (!io->base_io)
+ new_io->base_io = io;
+ else {
+ new_io->base_io = io->base_io;
+ crypt_inc_pending(io->base_io);
+ crypt_dec_pending(io);
+ }
+
+ io = new_io;
+ }
}
crypt_dec_pending(io);
@@ -1078,7 +1113,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_crypt_queue;
}
- init_waitqueue_head(&cc->writeq);
ti->private = cc;
return 0;
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index bdd37f881c4..848b381f117 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -13,7 +13,8 @@
#include <linux/bio.h>
#include <linux/slab.h>
-#include "dm.h"
+#include <linux/device-mapper.h>
+
#include "dm-bio-list.h"
#define DM_MSG_PREFIX "delay"
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 769ab677f8e..01590f3e000 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -7,7 +7,6 @@
* This file is released under the GPL.
*/
-#include "dm.h"
#include "dm-snap.h"
#include <linux/mm.h>
@@ -105,6 +104,11 @@ struct pstore {
void *area;
/*
+ * An area of zeros used to clear the next area.
+ */
+ void *zero_area;
+
+ /*
* Used to keep track of which metadata area the data in
* 'chunk' refers to.
*/
@@ -149,6 +153,13 @@ static int alloc_area(struct pstore *ps)
if (!ps->area)
return r;
+ ps->zero_area = vmalloc(len);
+ if (!ps->zero_area) {
+ vfree(ps->area);
+ return r;
+ }
+ memset(ps->zero_area, 0, len);
+
return 0;
}
@@ -156,6 +167,8 @@ static void free_area(struct pstore *ps)
{
vfree(ps->area);
ps->area = NULL;
+ vfree(ps->zero_area);
+ ps->zero_area = NULL;
}
struct mdata_req {
@@ -220,25 +233,41 @@ static chunk_t area_location(struct pstore *ps, chunk_t area)
* Read or write a metadata area. Remembering to skip the first
* chunk which holds the header.
*/
-static int area_io(struct pstore *ps, chunk_t area, int rw)
+static int area_io(struct pstore *ps, int rw)
{
int r;
chunk_t chunk;
- chunk = area_location(ps, area);
+ chunk = area_location(ps, ps->current_area);
r = chunk_io(ps, chunk, rw, 0);
if (r)
return r;
- ps->current_area = area;
return 0;
}
-static int zero_area(struct pstore *ps, chunk_t area)
+static void zero_memory_area(struct pstore *ps)
{
memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
- return area_io(ps, area, WRITE);
+}
+
+static int zero_disk_area(struct pstore *ps, chunk_t area)
+{
+ struct dm_io_region where = {
+ .bdev = ps->snap->cow->bdev,
+ .sector = ps->snap->chunk_size * area_location(ps, area),
+ .count = ps->snap->chunk_size,
+ };
+ struct dm_io_request io_req = {
+ .bi_rw = WRITE,
+ .mem.type = DM_IO_VMA,
+ .mem.ptr.vma = ps->zero_area,
+ .client = ps->io_client,
+ .notify.fn = NULL,
+ };
+
+ return dm_io(&io_req, 1, &where, NULL);
}
static int read_header(struct pstore *ps, int *new_snapshot)
@@ -411,15 +440,14 @@ static int insert_exceptions(struct pstore *ps, int *full)
static int read_exceptions(struct pstore *ps)
{
- chunk_t area;
int r, full = 1;
/*
* Keeping reading chunks and inserting exceptions until
* we find a partially full area.
*/
- for (area = 0; full; area++) {
- r = area_io(ps, area, READ);
+ for (ps->current_area = 0; full; ps->current_area++) {
+ r = area_io(ps, READ);
if (r)
return r;
@@ -428,6 +456,8 @@ static int read_exceptions(struct pstore *ps)
return r;
}
+ ps->current_area--;
+
return 0;
}
@@ -486,12 +516,13 @@ static int persistent_read_metadata(struct exception_store *store)
return r;
}
- r = zero_area(ps, 0);
+ ps->current_area = 0;
+ zero_memory_area(ps);
+ r = zero_disk_area(ps, 0);
if (r) {
- DMWARN("zero_area(0) failed");
+ DMWARN("zero_disk_area(0) failed");
return r;
}
-
} else {
/*
* Sanity checks.
@@ -551,7 +582,6 @@ static void persistent_commit(struct exception_store *store,
void (*callback) (void *, int success),
void *callback_context)
{
- int r;
unsigned int i;
struct pstore *ps = get_info(store);
struct disk_exception de;
@@ -572,33 +602,41 @@ static void persistent_commit(struct exception_store *store,
cb->context = callback_context;
/*
- * If there are no more exceptions in flight, or we have
- * filled this metadata area we commit the exceptions to
- * disk.
+ * If there are exceptions in flight and we have not yet
+ * filled this metadata area there's nothing more to do.
*/
- if (atomic_dec_and_test(&ps->pending_count) ||
- (ps->current_committed == ps->exceptions_per_area)) {
- r = area_io(ps, ps->current_area, WRITE);
- if (r)
- ps->valid = 0;
+ if (!atomic_dec_and_test(&ps->pending_count) &&
+ (ps->current_committed != ps->exceptions_per_area))
+ return;
- /*
- * Have we completely filled the current area ?
- */
- if (ps->current_committed == ps->exceptions_per_area) {
- ps->current_committed = 0;
- r = zero_area(ps, ps->current_area + 1);
- if (r)
- ps->valid = 0;
- }
+ /*
+ * If we completely filled the current area, then wipe the next one.
+ */
+ if ((ps->current_committed == ps->exceptions_per_area) &&
+ zero_disk_area(ps, ps->current_area + 1))
+ ps->valid = 0;
- for (i = 0; i < ps->callback_count; i++) {
- cb = ps->callbacks + i;
- cb->callback(cb->context, r == 0 ? 1 : 0);
- }
+ /*
+ * Commit exceptions to disk.
+ */
+ if (ps->valid && area_io(ps, WRITE))
+ ps->valid = 0;
- ps->callback_count = 0;
+ /*
+ * Advance to the next area if this one is full.
+ */
+ if (ps->current_committed == ps->exceptions_per_area) {
+ ps->current_committed = 0;
+ ps->current_area++;
+ zero_memory_area(ps);
}
+
+ for (i = 0; i < ps->callback_count; i++) {
+ cb = ps->callbacks + i;
+ cb->callback(cb->context, ps->valid);
+ }
+
+ ps->callback_count = 0;
}
static void persistent_drop(struct exception_store *store)
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 4789c42d9a3..2fd6d445063 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -5,7 +5,7 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include <linux/device-mapper.h>
#include <linux/bio.h>
#include <linux/mempool.h>
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index dca401dc70a..777c948180f 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -988,9 +988,9 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size)
return r;
}
-static inline int get_mode(struct dm_ioctl *param)
+static inline fmode_t get_mode(struct dm_ioctl *param)
{
- int mode = FMODE_READ | FMODE_WRITE;
+ fmode_t mode = FMODE_READ | FMODE_WRITE;
if (param->flags & DM_READONLY_FLAG)
mode = FMODE_READ;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 996802b8a45..3073618269e 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -22,6 +22,7 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/device-mapper.h>
#include <linux/dm-kcopyd.h>
#include "dm.h"
@@ -268,6 +269,17 @@ static void push(struct list_head *jobs, struct kcopyd_job *job)
spin_unlock_irqrestore(&kc->job_lock, flags);
}
+
+static void push_head(struct list_head *jobs, struct kcopyd_job *job)
+{
+ unsigned long flags;
+ struct dm_kcopyd_client *kc = job->kc;
+
+ spin_lock_irqsave(&kc->job_lock, flags);
+ list_add(&job->list, jobs);
+ spin_unlock_irqrestore(&kc->job_lock, flags);
+}
+
/*
* These three functions process 1 item from the corresponding
* job list.
@@ -398,7 +410,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
* We couldn't service this job ATM, so
* push this job back onto the list.
*/
- push(jobs, job);
+ push_head(jobs, job);
break;
}
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 6449bcdf84c..44042becad8 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -5,12 +5,12 @@
*/
#include "dm.h"
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/slab.h>
+#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "linear"
@@ -110,20 +110,11 @@ static int linear_status(struct dm_target *ti, status_type_t type,
return 0;
}
-static int linear_ioctl(struct dm_target *ti, struct inode *inode,
- struct file *filp, unsigned int cmd,
+static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
unsigned long arg)
{
struct linear_c *lc = (struct linear_c *) ti->private;
- struct block_device *bdev = lc->dev->bdev;
- struct file fake_file = {};
- struct dentry fake_dentry = {};
-
- fake_file.f_mode = lc->dev->mode;
- fake_file.f_path.dentry = &fake_dentry;
- fake_dentry.d_inode = bdev->bd_inode;
-
- return blkdev_driver_ioctl(bdev->bd_inode, &fake_file, bdev->bd_disk, cmd, arg);
+ return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg);
}
static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 5b48478c79f..a8c0fc79ca7 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -12,7 +12,7 @@
#include <linux/dm-io.h>
#include <linux/dm-dirty-log.h>
-#include "dm.h"
+#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "dirty region log"
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 9bf3460c554..4840733cd90 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -5,7 +5,8 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include <linux/device-mapper.h>
+
#include "dm-path-selector.h"
#include "dm-bio-list.h"
#include "dm-bio-record.h"
@@ -1395,19 +1396,15 @@ error:
return -EINVAL;
}
-static int multipath_ioctl(struct dm_target *ti, struct inode *inode,
- struct file *filp, unsigned int cmd,
+static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
unsigned long arg)
{
struct multipath *m = (struct multipath *) ti->private;
struct block_device *bdev = NULL;
+ fmode_t mode = 0;
unsigned long flags;
- struct file fake_file = {};
- struct dentry fake_dentry = {};
int r = 0;
- fake_file.f_path.dentry = &fake_dentry;
-
spin_lock_irqsave(&m->lock, flags);
if (!m->current_pgpath)
@@ -1415,8 +1412,7 @@ static int multipath_ioctl(struct dm_target *ti, struct inode *inode,
if (m->current_pgpath) {
bdev = m->current_pgpath->path.dev->bdev;
- fake_dentry.d_inode = bdev->bd_inode;
- fake_file.f_mode = m->current_pgpath->path.dev->mode;
+ mode = m->current_pgpath->path.dev->mode;
}
if (m->queue_io)
@@ -1426,8 +1422,7 @@ static int multipath_ioctl(struct dm_target *ti, struct inode *inode,
spin_unlock_irqrestore(&m->lock, flags);
- return r ? : blkdev_driver_ioctl(bdev->bd_inode, &fake_file,
- bdev->bd_disk, cmd, arg);
+ return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}
/*-----------------------------------------------------------------
diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c
index ca1bb636a3e..96ea226155b 100644
--- a/drivers/md/dm-path-selector.c
+++ b/drivers/md/dm-path-selector.c
@@ -9,7 +9,8 @@
* Path selector registration.
*/
-#include "dm.h"
+#include <linux/device-mapper.h>
+
#include "dm-path-selector.h"
#include <linux/slab.h>
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 29913e42c4a..92dcc06832a 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1,30 +1,30 @@
/*
* Copyright (C) 2003 Sistina Software Limited.
+ * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
-#include "dm.h"
#include "dm-bio-list.h"
#include "dm-bio-record.h"
-#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
-#include <linux/time.h>
-#include <linux/vmalloc.h>
#include <linux/workqueue.h>
-#include <linux/log2.h>
-#include <linux/hardirq.h>
+#include <linux/device-mapper.h>
#include <linux/dm-io.h>
#include <linux/dm-dirty-log.h>
#include <linux/dm-kcopyd.h>
+#include <linux/dm-region-hash.h>
#define DM_MSG_PREFIX "raid1"
+
+#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
#define DM_IO_PAGES 64
+#define DM_KCOPYD_PAGES 64
#define DM_RAID1_HANDLE_ERRORS 0x01
#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
@@ -32,87 +32,6 @@
static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
/*-----------------------------------------------------------------
- * Region hash
- *
- * The mirror splits itself up into discrete regions. Each
- * region can be in one of three states: clean, dirty,
- * nosync. There is no need to put clean regions in the hash.
- *
- * In addition to being present in the hash table a region _may_
- * be present on one of three lists.
- *
- * clean_regions: Regions on this list have no io pending to
- * them, they are in sync, we are no longer interested in them,
- * they are dull. rh_update_states() will remove them from the
- * hash table.
- *
- * quiesced_regions: These regions have been spun down, ready
- * for recovery. rh_recovery_start() will remove regions from
- * this list and hand them to kmirrord, which will schedule the
- * recovery io with kcopyd.
- *
- * recovered_regions: Regions that kcopyd has successfully
- * recovered. rh_update_states() will now schedule any delayed
- * io, up the recovery_count, and remove the region from the
- * hash.
- *
- * There are 2 locks:
- * A rw spin lock 'hash_lock' protects just the hash table,
- * this is never held in write mode from interrupt context,
- * which I believe means that we only have to disable irqs when
- * doing a write lock.
- *
- * An ordinary spin lock 'region_lock' that protects the three
- * lists in the region_hash, with the 'state', 'list' and
- * 'bhs_delayed' fields of the regions. This is used from irq
- * context, so all other uses will have to suspend local irqs.
- *---------------------------------------------------------------*/
-struct mirror_set;
-struct region_hash {
- struct mirror_set *ms;
- uint32_t region_size;
- unsigned region_shift;
-
- /* holds persistent region state */
- struct dm_dirty_log *log;
-
- /* hash table */
- rwlock_t hash_lock;
- mempool_t *region_pool;
- unsigned int mask;
- unsigned int nr_buckets;
- struct list_head *buckets;
-
- spinlock_t region_lock;
- atomic_t recovery_in_flight;
- struct semaphore recovery_count;
- struct list_head clean_regions;
- struct list_head quiesced_regions;
- struct list_head recovered_regions;
- struct list_head failed_recovered_regions;
-};
-
-enum {
- RH_CLEAN,
- RH_DIRTY,
- RH_NOSYNC,
- RH_RECOVERING
-};
-
-struct region {
- struct region_hash *rh; /* FIXME: can we get rid of this ? */
- region_t key;
- int state;
-
- struct list_head hash_list;
- struct list_head list;
-
- atomic_t pending;
- struct bio_list delayed_bios;
-};
-
-
-/*-----------------------------------------------------------------
* Mirror set structures.
*---------------------------------------------------------------*/
enum dm_raid1_error {
@@ -132,8 +51,7 @@ struct mirror {
struct mirror_set {
struct dm_target *ti;
struct list_head list;
- struct region_hash rh;
- struct dm_kcopyd_client *kcopyd_client;
+
uint64_t features;
spinlock_t lock; /* protects the lists */
@@ -141,6 +59,8 @@ struct mirror_set {
struct bio_list writes;
struct bio_list failures;
+ struct dm_region_hash *rh;
+ struct dm_kcopyd_client *kcopyd_client;
struct dm_io_client *io_client;
mempool_t *read_record_pool;
@@ -159,25 +79,14 @@ struct mirror_set {
struct work_struct trigger_event;
- unsigned int nr_mirrors;
+ unsigned nr_mirrors;
struct mirror mirror[0];
};
-/*
- * Conversion fns
- */
-static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
-{
- return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
-}
-
-static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
+static void wakeup_mirrord(void *context)
{
- return region << rh->region_shift;
-}
+ struct mirror_set *ms = context;
-static void wake(struct mirror_set *ms)
-{
queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
}
@@ -186,7 +95,7 @@ static void delayed_wake_fn(unsigned long data)
struct mirror_set *ms = (struct mirror_set *) data;
clear_bit(0, &ms->timer_pending);
- wake(ms);
+ wakeup_mirrord(ms);
}
static void delayed_wake(struct mirror_set *ms)
@@ -200,473 +109,34 @@ static void delayed_wake(struct mirror_set *ms)
add_timer(&ms->timer);
}
-/* FIXME move this */
-static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
-
-#define MIN_REGIONS 64
-#define MAX_RECOVERY 1
-static int rh_init(struct region_hash *rh, struct mirror_set *ms,
- struct dm_dirty_log *log, uint32_t region_size,
- region_t nr_regions)
+static void wakeup_all_recovery_waiters(void *context)
{
- unsigned int nr_buckets, max_buckets;
- size_t i;
-
- /*
- * Calculate a suitable number of buckets for our hash
- * table.
- */
- max_buckets = nr_regions >> 6;
- for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
- ;
- nr_buckets >>= 1;
-
- rh->ms = ms;
- rh->log = log;
- rh->region_size = region_size;
- rh->region_shift = ffs(region_size) - 1;
- rwlock_init(&rh->hash_lock);
- rh->mask = nr_buckets - 1;
- rh->nr_buckets = nr_buckets;
-
- rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
- if (!rh->buckets) {
- DMERR("unable to allocate region hash memory");
- return -ENOMEM;
- }
-
- for (i = 0; i < nr_buckets; i++)
- INIT_LIST_HEAD(rh->buckets + i);
-
- spin_lock_init(&rh->region_lock);
- sema_init(&rh->recovery_count, 0);
- atomic_set(&rh->recovery_in_flight, 0);
- INIT_LIST_HEAD(&rh->clean_regions);
- INIT_LIST_HEAD(&rh->quiesced_regions);
- INIT_LIST_HEAD(&rh->recovered_regions);
- INIT_LIST_HEAD(&rh->failed_recovered_regions);
-
- rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
- sizeof(struct region));
- if (!rh->region_pool) {
- vfree(rh->buckets);
- rh->buckets = NULL;
- return -ENOMEM;
- }
-
- return 0;
+ wake_up_all(&_kmirrord_recovery_stopped);
}
-static void rh_exit(struct region_hash *rh)
-{
- unsigned int h;
- struct region *reg, *nreg;
-
- BUG_ON(!list_empty(&rh->quiesced_regions));
- for (h = 0; h < rh->nr_buckets; h++) {
- list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
- BUG_ON(atomic_read(&reg->pending));
- mempool_free(reg, rh->region_pool);
- }
- }
-
- if (rh->log)
- dm_dirty_log_destroy(rh->log);
- if (rh->region_pool)
- mempool_destroy(rh->region_pool);
- vfree(rh->buckets);
-}
-
-#define RH_HASH_MULT 2654435387U
-
-static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
-{
- return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
-}
-
-static struct region *__rh_lookup(struct region_hash *rh, region_t region)
-{
- struct region *reg;
-
- list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
- if (reg->key == region)
- return reg;
-
- return NULL;
-}
-
-static void __rh_insert(struct region_hash *rh, struct region *reg)
-{
- unsigned int h = rh_hash(rh, reg->key);
- list_add(&reg->hash_list, rh->buckets + h);
-}
-
-static struct region *__rh_alloc(struct region_hash *rh, region_t region)
-{
- struct region *reg, *nreg;
-
- read_unlock(&rh->hash_lock);
- nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
- if (unlikely(!nreg))
- nreg = kmalloc(sizeof(struct region), GFP_NOIO);
- nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
- RH_CLEAN : RH_NOSYNC;
- nreg->rh = rh;
- nreg->key = region;
-
- INIT_LIST_HEAD(&nreg->list);
-
- atomic_set(&nreg->pending, 0);
- bio_list_init(&nreg->delayed_bios);
- write_lock_irq(&rh->hash_lock);
-
- reg = __rh_lookup(rh, region);
- if (reg)
- /* we lost the race */
- mempool_free(nreg, rh->region_pool);
-
- else {
- __rh_insert(rh, nreg);
- if (nreg->state == RH_CLEAN) {
- spin_lock(&rh->region_lock);
- list_add(&nreg->list, &rh->clean_regions);
- spin_unlock(&rh->region_lock);
- }
- reg = nreg;
- }
- write_unlock_irq(&rh->hash_lock);
- read_lock(&rh->hash_lock);
-
- return reg;
-}
-
-static inline struct region *__rh_find(struct region_hash *rh, region_t region)
-{
- struct region *reg;
-
- reg = __rh_lookup(rh, region);
- if (!reg)
- reg = __rh_alloc(rh, region);
-
- return reg;
-}
-
-static int rh_state(struct region_hash *rh, region_t region, int may_block)
-{
- int r;
- struct region *reg;
-
- read_lock(&rh->hash_lock);
- reg = __rh_lookup(rh, region);
- read_unlock(&rh->hash_lock);
-
- if (reg)
- return reg->state;
-
- /*
- * The region wasn't in the hash, so we fall back to the
- * dirty log.
- */
- r = rh->log->type->in_sync(rh->log, region, may_block);
-
- /*
- * Any error from the dirty log (eg. -EWOULDBLOCK) gets
- * taken as a RH_NOSYNC
- */
- return r == 1 ? RH_CLEAN : RH_NOSYNC;
-}
-
-static inline int rh_in_sync(struct region_hash *rh,
- region_t region, int may_block)
-{
- int state = rh_state(rh, region, may_block);
- return state == RH_CLEAN || state == RH_DIRTY;
-}
-
-static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
-{
- struct bio *bio;
-
- while ((bio = bio_list_pop(bio_list))) {
- queue_bio(ms, bio, WRITE);
- }
-}
-
-static void complete_resync_work(struct region *reg, int success)
-{
- struct region_hash *rh = reg->rh;
-
- rh->log->type->set_region_sync(rh->log, reg->key, success);
-
- /*
- * Dispatch the bios before we call 'wake_up_all'.
- * This is important because if we are suspending,
- * we want to know that recovery is complete and
- * the work queue is flushed. If we wake_up_all
- * before we dispatch_bios (queue bios and call wake()),
- * then we risk suspending before the work queue
- * has been properly flushed.
- */
- dispatch_bios(rh->ms, &reg->delayed_bios);
- if (atomic_dec_and_test(&rh->recovery_in_flight))
- wake_up_all(&_kmirrord_recovery_stopped);
- up(&rh->recovery_count);
-}
-
-static void rh_update_states(struct region_hash *rh)
-{
- struct region *reg, *next;
-
- LIST_HEAD(clean);
- LIST_HEAD(recovered);
- LIST_HEAD(failed_recovered);
-
- /*
- * Quickly grab the lists.
- */
- write_lock_irq(&rh->hash_lock);
- spin_lock(&rh->region_lock);
- if (!list_empty(&rh->clean_regions)) {
- list_splice_init(&rh->clean_regions, &clean);
-
- list_for_each_entry(reg, &clean, list)
- list_del(&reg->hash_list);
- }
-
- if (!list_empty(&rh->recovered_regions)) {
- list_splice_init(&rh->recovered_regions, &recovered);
-
- list_for_each_entry (reg, &recovered, list)
- list_del(&reg->hash_list);
- }
-
- if (!list_empty(&rh->failed_recovered_regions)) {
- list_splice_init(&rh->failed_recovered_regions,
- &failed_recovered);
-
- list_for_each_entry(reg, &failed_recovered, list)
- list_del(&reg->hash_list);
- }
-
- spin_unlock(&rh->region_lock);
- write_unlock_irq(&rh->hash_lock);
-
- /*
- * All the regions on the recovered and clean lists have
- * now been pulled out of the system, so no need to do
- * any more locking.
- */
- list_for_each_entry_safe (reg, next, &recovered, list) {
- rh->log->type->clear_region(rh->log, reg->key);
- complete_resync_work(reg, 1);
- mempool_free(reg, rh->region_pool);
- }
-
- list_for_each_entry_safe(reg, next, &failed_recovered, list) {
- complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
- mempool_free(reg, rh->region_pool);
- }
-
- list_for_each_entry_safe(reg, next, &clean, list) {
- rh->log->type->clear_region(rh->log, reg->key);
- mempool_free(reg, rh->region_pool);
- }
-
- rh->log->type->flush(rh->log);
-}
-
-static void rh_inc(struct region_hash *rh, region_t region)
-{
- struct region *reg;
-
- read_lock(&rh->hash_lock);
- reg = __rh_find(rh, region);
-
- spin_lock_irq(&rh->region_lock);
- atomic_inc(&reg->pending);
-
- if (reg->state == RH_CLEAN) {
- reg->state = RH_DIRTY;
- list_del_init(&reg->list); /* take off the clean list */
- spin_unlock_irq(&rh->region_lock);
-
- rh->log->type->mark_region(rh->log, reg->key);
- } else
- spin_unlock_irq(&rh->region_lock);
-
-
- read_unlock(&rh->hash_lock);
-}
-
-static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
-{
- struct bio *bio;
-
- for (bio = bios->head; bio; bio = bio->bi_next)
- rh_inc(rh, bio_to_region(rh, bio));
-}
-
-static void rh_dec(struct region_hash *rh, region_t region)
+static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
{
unsigned long flags;
- struct region *reg;
int should_wake = 0;
+ struct bio_list *bl;
- read_lock(&rh->hash_lock);
- reg = __rh_lookup(rh, region);
- read_unlock(&rh->hash_lock);
-
- spin_lock_irqsave(&rh->region_lock, flags);
- if (atomic_dec_and_test(&reg->pending)) {
- /*
- * There is no pending I/O for this region.
- * We can move the region to corresponding list for next action.
- * At this point, the region is not yet connected to any list.
- *
- * If the state is RH_NOSYNC, the region should be kept off
- * from clean list.
- * The hash entry for RH_NOSYNC will remain in memory
- * until the region is recovered or the map is reloaded.
- */
-
- /* do nothing for RH_NOSYNC */
- if (reg->state == RH_RECOVERING) {
- list_add_tail(&reg->list, &rh->quiesced_regions);
- } else if (reg->state == RH_DIRTY) {
- reg->state = RH_CLEAN;
- list_add(&reg->list, &rh->clean_regions);
- }
- should_wake = 1;
- }
- spin_unlock_irqrestore(&rh->region_lock, flags);
+ bl = (rw == WRITE) ? &ms->writes : &ms->reads;
+ spin_lock_irqsave(&ms->lock, flags);
+ should_wake = !(bl->head);
+ bio_list_add(bl, bio);
+ spin_unlock_irqrestore(&ms->lock, flags);
if (should_wake)
- wake(rh->ms);
-}
-
-/*
- * Starts quiescing a region in preparation for recovery.
- */
-static int __rh_recovery_prepare(struct region_hash *rh)
-{
- int r;
- struct region *reg;
- region_t region;
-
- /*
- * Ask the dirty log what's next.
- */
- r = rh->log->type->get_resync_work(rh->log, &region);
- if (r <= 0)
- return r;
-
- /*
- * Get this region, and start it quiescing by setting the
- * recovering flag.
- */
- read_lock(&rh->hash_lock);
- reg = __rh_find(rh, region);
- read_unlock(&rh->hash_lock);
-
- spin_lock_irq(&rh->region_lock);
- reg->state = RH_RECOVERING;
-
- /* Already quiesced ? */
- if (atomic_read(&reg->pending))
- list_del_init(&reg->list);
- else
- list_move(&reg->list, &rh->quiesced_regions);
-
- spin_unlock_irq(&rh->region_lock);
-
- return 1;
-}
-
-static void rh_recovery_prepare(struct region_hash *rh)
-{
- /* Extra reference to avoid race with rh_stop_recovery */
- atomic_inc(&rh->recovery_in_flight);
-
- while (!down_trylock(&rh->recovery_count)) {
- atomic_inc(&rh->recovery_in_flight);
- if (__rh_recovery_prepare(rh) <= 0) {
- atomic_dec(&rh->recovery_in_flight);
- up(&rh->recovery_count);
- break;
- }
- }
-
- /* Drop the extra reference */
- if (atomic_dec_and_test(&rh->recovery_in_flight))
- wake_up_all(&_kmirrord_recovery_stopped);
-}
-
-/*
- * Returns any quiesced regions.
- */
-static struct region *rh_recovery_start(struct region_hash *rh)
-{
- struct region *reg = NULL;
-
- spin_lock_irq(&rh->region_lock);
- if (!list_empty(&rh->quiesced_regions)) {
- reg = list_entry(rh->quiesced_regions.next,
- struct region, list);
- list_del_init(&reg->list); /* remove from the quiesced list */
- }
- spin_unlock_irq(&rh->region_lock);
-
- return reg;
-}
-
-static void rh_recovery_end(struct region *reg, int success)
-{
- struct region_hash *rh = reg->rh;
-
- spin_lock_irq(&rh->region_lock);
- if (success)
- list_add(&reg->list, &reg->rh->recovered_regions);
- else {
- reg->state = RH_NOSYNC;
- list_add(&reg->list, &reg->rh->failed_recovered_regions);
- }
- spin_unlock_irq(&rh->region_lock);
-
- wake(rh->ms);
+ wakeup_mirrord(ms);
}
-static int rh_flush(struct region_hash *rh)
+static void dispatch_bios(void *context, struct bio_list *bio_list)
{
- return rh->log->type->flush(rh->log);
-}
-
-static void rh_delay(struct region_hash *rh, struct bio *bio)
-{
- struct region *reg;
-
- read_lock(&rh->hash_lock);
- reg = __rh_find(rh, bio_to_region(rh, bio));
- bio_list_add(&reg->delayed_bios, bio);
- read_unlock(&rh->hash_lock);
-}
-
-static void rh_stop_recovery(struct region_hash *rh)
-{
- int i;
-
- /* wait for any recovering regions */
- for (i = 0; i < MAX_RECOVERY; i++)
- down(&rh->recovery_count);
-}
-
-static void rh_start_recovery(struct region_hash *rh)
-{
- int i;
-
- for (i = 0; i < MAX_RECOVERY; i++)
- up(&rh->recovery_count);
+ struct mirror_set *ms = context;
+ struct bio *bio;
- wake(rh->ms);
+ while ((bio = bio_list_pop(bio_list)))
+ queue_bio(ms, bio, WRITE);
}
#define MIN_READ_RECORDS 20
@@ -776,8 +246,8 @@ out:
static void recovery_complete(int read_err, unsigned long write_err,
void *context)
{
- struct region *reg = (struct region *)context;
- struct mirror_set *ms = reg->rh->ms;
+ struct dm_region *reg = context;
+ struct mirror_set *ms = dm_rh_region_context(reg);
int m, bit = 0;
if (read_err) {
@@ -803,31 +273,33 @@ static void recovery_complete(int read_err, unsigned long write_err,
}
}
- rh_recovery_end(reg, !(read_err || write_err));
+ dm_rh_recovery_end(reg, !(read_err || write_err));
}
-static int recover(struct mirror_set *ms, struct region *reg)
+static int recover(struct mirror_set *ms, struct dm_region *reg)
{
int r;
- unsigned int i;
+ unsigned i;
struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
struct mirror *m;
unsigned long flags = 0;
+ region_t key = dm_rh_get_region_key(reg);
+ sector_t region_size = dm_rh_get_region_size(ms->rh);
/* fill in the source */
m = get_default_mirror(ms);
from.bdev = m->dev->bdev;
- from.sector = m->offset + region_to_sector(reg->rh, reg->key);
- if (reg->key == (ms->nr_regions - 1)) {
+ from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
+ if (key == (ms->nr_regions - 1)) {
/*
* The final region may be smaller than
* region_size.
*/
- from.count = ms->ti->len & (reg->rh->region_size - 1);
+ from.count = ms->ti->len & (region_size - 1);
if (!from.count)
- from.count = reg->rh->region_size;
+ from.count = region_size;
} else
- from.count = reg->rh->region_size;
+ from.count = region_size;
/* fill in the destinations */
for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
@@ -836,7 +308,7 @@ static int recover(struct mirror_set *ms, struct region *reg)
m = ms->mirror + i;
dest->bdev = m->dev->bdev;
- dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
+ dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
dest->count = from.count;
dest++;
}
@@ -853,22 +325,22 @@ static int recover(struct mirror_set *ms, struct region *reg)
static void do_recovery(struct mirror_set *ms)
{
+ struct dm_region *reg;
+ struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
int r;
- struct region *reg;
- struct dm_dirty_log *log = ms->rh.log;
/*
* Start quiescing some regions.
*/
- rh_recovery_prepare(&ms->rh);
+ dm_rh_recovery_prepare(ms->rh);
/*
* Copy any already quiesced regions.
*/
- while ((reg = rh_recovery_start(&ms->rh))) {
+ while ((reg = dm_rh_recovery_start(ms->rh))) {
r = recover(ms, reg);
if (r)
- rh_recovery_end(reg, 0);
+ dm_rh_recovery_end(reg, 0);
}
/*
@@ -909,9 +381,10 @@ static int default_ok(struct mirror *m)
static int mirror_available(struct mirror_set *ms, struct bio *bio)
{
- region_t region = bio_to_region(&ms->rh, bio);
+ struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
+ region_t region = dm_rh_bio_to_region(ms->rh, bio);
- if (ms->rh.log->type->in_sync(ms->rh.log, region, 0))
+ if (log->type->in_sync(log, region, 0))
return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
return 0;
@@ -985,7 +458,14 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
map_region(&io, m, bio);
bio_set_m(bio, m);
- (void) dm_io(&io_req, 1, &io, NULL);
+ BUG_ON(dm_io(&io_req, 1, &io, NULL));
+}
+
+static inline int region_in_sync(struct mirror_set *ms, region_t region,
+ int may_block)
+{
+ int state = dm_rh_get_state(ms->rh, region, may_block);
+ return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
}
static void do_reads(struct mirror_set *ms, struct bio_list *reads)
@@ -995,13 +475,13 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
struct mirror *m;
while ((bio = bio_list_pop(reads))) {
- region = bio_to_region(&ms->rh, bio);
+ region = dm_rh_bio_to_region(ms->rh, bio);
m = get_default_mirror(ms);
/*
* We can only read balance if the region is in sync.
*/
- if (likely(rh_in_sync(&ms->rh, region, 1)))
+ if (likely(region_in_sync(ms, region, 1)))
m = choose_mirror(ms, bio->bi_sector);
else if (m && atomic_read(&m->error_count))
m = NULL;
@@ -1024,57 +504,6 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
* NOSYNC: increment pending, just write to the default mirror
*---------------------------------------------------------------*/
-/* __bio_mark_nosync
- * @ms
- * @bio
- * @done
- * @error
- *
- * The bio was written on some mirror(s) but failed on other mirror(s).
- * We can successfully endio the bio but should avoid the region being
- * marked clean by setting the state RH_NOSYNC.
- *
- * This function is _not_ safe in interrupt context!
- */
-static void __bio_mark_nosync(struct mirror_set *ms,
- struct bio *bio, unsigned done, int error)
-{
- unsigned long flags;
- struct region_hash *rh = &ms->rh;
- struct dm_dirty_log *log = ms->rh.log;
- struct region *reg;
- region_t region = bio_to_region(rh, bio);
- int recovering = 0;
-
- /* We must inform the log that the sync count has changed. */
- log->type->set_region_sync(log, region, 0);
- ms->in_sync = 0;
-
- read_lock(&rh->hash_lock);
- reg = __rh_find(rh, region);
- read_unlock(&rh->hash_lock);
-
- /* region hash entry should exist because write was in-flight */
- BUG_ON(!reg);
- BUG_ON(!list_empty(&reg->list));
-
- spin_lock_irqsave(&rh->region_lock, flags);
- /*
- * Possible cases:
- * 1) RH_DIRTY
- * 2) RH_NOSYNC: was dirty, other preceeding writes failed
- * 3) RH_RECOVERING: flushing pending writes
- * Either case, the region should have not been connected to list.
- */
- recovering = (reg->state == RH_RECOVERING);
- reg->state = RH_NOSYNC;
- BUG_ON(!list_empty(&reg->list));
- spin_unlock_irqrestore(&rh->region_lock, flags);
-
- bio_endio(bio, error);
- if (recovering)
- complete_resync_work(reg, 0);
-}
static void write_callback(unsigned long error, void *context)
{
@@ -1119,7 +548,7 @@ static void write_callback(unsigned long error, void *context)
bio_list_add(&ms->failures, bio);
spin_unlock_irqrestore(&ms->lock, flags);
if (should_wake)
- wake(ms);
+ wakeup_mirrord(ms);
return;
}
out:
@@ -1149,7 +578,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
*/
bio_set_m(bio, get_default_mirror(ms));
- (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
+ BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
}
static void do_writes(struct mirror_set *ms, struct bio_list *writes)
@@ -1169,18 +598,19 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
bio_list_init(&recover);
while ((bio = bio_list_pop(writes))) {
- state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
+ state = dm_rh_get_state(ms->rh,
+ dm_rh_bio_to_region(ms->rh, bio), 1);
switch (state) {
- case RH_CLEAN:
- case RH_DIRTY:
+ case DM_RH_CLEAN:
+ case DM_RH_DIRTY:
this_list = &sync;
break;
- case RH_NOSYNC:
+ case DM_RH_NOSYNC:
this_list = &nosync;
break;
- case RH_RECOVERING:
+ case DM_RH_RECOVERING:
this_list = &recover;
break;
}
@@ -1193,9 +623,9 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
* be written to (writes to recover regions are going to
* be delayed).
*/
- rh_inc_pending(&ms->rh, &sync);
- rh_inc_pending(&ms->rh, &nosync);
- ms->log_failure = rh_flush(&ms->rh) ? 1 : 0;
+ dm_rh_inc_pending(ms->rh, &sync);
+ dm_rh_inc_pending(ms->rh, &nosync);
+ ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0;
/*
* Dispatch io.
@@ -1204,13 +634,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
spin_lock_irq(&ms->lock);
bio_list_merge(&ms->failures, &sync);
spin_unlock_irq(&ms->lock);
- wake(ms);
+ wakeup_mirrord(ms);
} else
while ((bio = bio_list_pop(&sync)))
do_write(ms, bio);
while ((bio = bio_list_pop(&recover)))
- rh_delay(&ms->rh, bio);
+ dm_rh_delay(ms->rh, bio);
while ((bio = bio_list_pop(&nosync))) {
map_bio(get_default_mirror(ms), bio);
@@ -1227,7 +657,8 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
if (!ms->log_failure) {
while ((bio = bio_list_pop(failures)))
- __bio_mark_nosync(ms, bio, bio->bi_size, 0);
+ ms->in_sync = 0;
+ dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
return;
}
@@ -1280,8 +711,8 @@ static void trigger_event(struct work_struct *work)
*---------------------------------------------------------------*/
static void do_mirror(struct work_struct *work)
{
- struct mirror_set *ms =container_of(work, struct mirror_set,
- kmirrord_work);
+ struct mirror_set *ms = container_of(work, struct mirror_set,
+ kmirrord_work);
struct bio_list reads, writes, failures;
unsigned long flags;
@@ -1294,7 +725,7 @@ static void do_mirror(struct work_struct *work)
bio_list_init(&ms->failures);
spin_unlock_irqrestore(&ms->lock, flags);
- rh_update_states(&ms->rh);
+ dm_rh_update_states(ms->rh, errors_handled(ms));
do_recovery(ms);
do_reads(ms, &reads);
do_writes(ms, &writes);
@@ -1303,7 +734,6 @@ static void do_mirror(struct work_struct *work)
dm_table_unplug_all(ms->ti->table);
}
-
/*-----------------------------------------------------------------
* Target functions
*---------------------------------------------------------------*/
@@ -1315,9 +745,6 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
size_t len;
struct mirror_set *ms = NULL;
- if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
- return NULL;
-
len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
ms = kzalloc(len, GFP_KERNEL);
@@ -1353,7 +780,11 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
return NULL;
}
- if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
+ ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
+ wakeup_all_recovery_waiters,
+ ms->ti->begin, MAX_RECOVERY,
+ dl, region_size, ms->nr_regions);
+ if (IS_ERR(ms->rh)) {
ti->error = "Error creating dirty region hash";
dm_io_client_destroy(ms->io_client);
mempool_destroy(ms->read_record_pool);
@@ -1371,7 +802,7 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti,
dm_put_device(ti, ms->mirror[m].dev);
dm_io_client_destroy(ms->io_client);
- rh_exit(&ms->rh);
+ dm_region_hash_destroy(ms->rh);
mempool_destroy(ms->read_record_pool);
kfree(ms);
}
@@ -1411,10 +842,10 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
* Create dirty log: log_type #log_params <log_params>
*/
static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
- unsigned int argc, char **argv,
- unsigned int *args_used)
+ unsigned argc, char **argv,
+ unsigned *args_used)
{
- unsigned int param_count;
+ unsigned param_count;
struct dm_dirty_log *dl;
if (argc < 2) {
@@ -1545,7 +976,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ti->private = ms;
- ti->split_io = ms->rh.region_size;
+ ti->split_io = dm_rh_get_region_size(ms->rh);
ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
if (!ms->kmirrord_wq) {
@@ -1580,11 +1011,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto err_destroy_wq;
}
- r = dm_kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
+ r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
if (r)
goto err_destroy_wq;
- wake(ms);
+ wakeup_mirrord(ms);
return 0;
err_destroy_wq:
@@ -1605,22 +1036,6 @@ static void mirror_dtr(struct dm_target *ti)
free_context(ms, ti, ms->nr_mirrors);
}
-static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
-{
- unsigned long flags;
- int should_wake = 0;
- struct bio_list *bl;
-
- bl = (rw == WRITE) ? &ms->writes : &ms->reads;
- spin_lock_irqsave(&ms->lock, flags);
- should_wake = !(bl->head);
- bio_list_add(bl, bio);
- spin_unlock_irqrestore(&ms->lock, flags);
-
- if (should_wake)
- wake(ms);
-}
-
/*
* Mirror mapping function
*/
@@ -1631,16 +1046,16 @@ static int mirror_map(struct dm_target *ti, struct bio *bio,
struct mirror *m;
struct mirror_set *ms = ti->private;
struct dm_raid1_read_record *read_record = NULL;
+ struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
- map_context->ll = bio_to_region(&ms->rh, bio);
+ map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
queue_bio(ms, bio, rw);
return DM_MAPIO_SUBMITTED;
}
- r = ms->rh.log->type->in_sync(ms->rh.log,
- bio_to_region(&ms->rh, bio), 0);
+ r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
if (r < 0 && r != -EWOULDBLOCK)
return r;
@@ -1688,7 +1103,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
- rh_dec(&ms->rh, map_context->ll);
+ dm_rh_dec(ms->rh, map_context->ll);
return error;
}
@@ -1744,7 +1159,7 @@ out:
static void mirror_presuspend(struct dm_target *ti)
{
struct mirror_set *ms = (struct mirror_set *) ti->private;
- struct dm_dirty_log *log = ms->rh.log;
+ struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
atomic_set(&ms->suspend, 1);
@@ -1752,10 +1167,10 @@ static void mirror_presuspend(struct dm_target *ti)
* We must finish up all the work that we've
* generated (i.e. recovery work).
*/
- rh_stop_recovery(&ms->rh);
+ dm_rh_stop_recovery(ms->rh);
wait_event(_kmirrord_recovery_stopped,
- !atomic_read(&ms->rh.recovery_in_flight));
+ !dm_rh_recovery_in_flight(ms->rh));
if (log->type->presuspend && log->type->presuspend(log))
/* FIXME: need better error handling */
@@ -1773,7 +1188,7 @@ static void mirror_presuspend(struct dm_target *ti)
static void mirror_postsuspend(struct dm_target *ti)
{
struct mirror_set *ms = ti->private;
- struct dm_dirty_log *log = ms->rh.log;
+ struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
if (log->type->postsuspend && log->type->postsuspend(log))
/* FIXME: need better error handling */
@@ -1783,13 +1198,13 @@ static void mirror_postsuspend(struct dm_target *ti)
static void mirror_resume(struct dm_target *ti)
{
struct mirror_set *ms = ti->private;
- struct dm_dirty_log *log = ms->rh.log;
+ struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
atomic_set(&ms->suspend, 0);
if (log->type->resume && log->type->resume(log))
/* FIXME: need better error handling */
DMWARN("log resume failed");
- rh_start_recovery(&ms->rh);
+ dm_rh_start_recovery(ms->rh);
}
/*
@@ -1821,7 +1236,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
{
unsigned int m, sz = 0;
struct mirror_set *ms = (struct mirror_set *) ti->private;
- struct dm_dirty_log *log = ms->rh.log;
+ struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
char buffer[ms->nr_mirrors + 1];
switch (type) {
@@ -1834,15 +1249,15 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
buffer[m] = '\0';
DMEMIT("%llu/%llu 1 %s ",
- (unsigned long long)log->type->get_sync_count(ms->rh.log),
+ (unsigned long long)log->type->get_sync_count(log),
(unsigned long long)ms->nr_regions, buffer);
- sz += log->type->status(ms->rh.log, type, result+sz, maxlen-sz);
+ sz += log->type->status(log, type, result+sz, maxlen-sz);
break;
case STATUSTYPE_TABLE:
- sz = log->type->status(ms->rh.log, type, result, maxlen);
+ sz = log->type->status(log, type, result, maxlen);
DMEMIT("%d", ms->nr_mirrors);
for (m = 0; m < ms->nr_mirrors; m++)
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
new file mode 100644
index 00000000000..59f8d9df9e1
--- /dev/null
+++ b/drivers/md/dm-region-hash.c
@@ -0,0 +1,704 @@
+/*
+ * Copyright (C) 2003 Sistina Software Limited.
+ * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <linux/dm-dirty-log.h>
+#include <linux/dm-region-hash.h>
+
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include "dm.h"
+#include "dm-bio-list.h"
+
+#define DM_MSG_PREFIX "region hash"
+
+/*-----------------------------------------------------------------
+ * Region hash
+ *
+ * The mirror splits itself up into discrete regions. Each
+ * region can be in one of three states: clean, dirty,
+ * nosync. There is no need to put clean regions in the hash.
+ *
+ * In addition to being present in the hash table a region _may_
+ * be present on one of three lists.
+ *
+ * clean_regions: Regions on this list have no io pending to
+ * them, they are in sync, we are no longer interested in them,
+ * they are dull. dm_rh_update_states() will remove them from the
+ * hash table.
+ *
+ * quiesced_regions: These regions have been spun down, ready
+ * for recovery. rh_recovery_start() will remove regions from
+ * this list and hand them to kmirrord, which will schedule the
+ * recovery io with kcopyd.
+ *
+ * recovered_regions: Regions that kcopyd has successfully
+ * recovered. dm_rh_update_states() will now schedule any delayed
+ * io, up the recovery_count, and remove the region from the
+ * hash.
+ *
+ * There are 2 locks:
+ * A rw spin lock 'hash_lock' protects just the hash table,
+ * this is never held in write mode from interrupt context,
+ * which I believe means that we only have to disable irqs when
+ * doing a write lock.
+ *
+ * An ordinary spin lock 'region_lock' that protects the three
+ * lists in the region_hash, with the 'state', 'list' and
+ * 'delayed_bios' fields of the regions. This is used from irq
+ * context, so all other uses will have to suspend local irqs.
+ *---------------------------------------------------------------*/
+struct dm_region_hash {
+ uint32_t region_size;
+ unsigned region_shift;
+
+ /* holds persistent region state */
+ struct dm_dirty_log *log;
+
+ /* hash table */
+ rwlock_t hash_lock;
+ mempool_t *region_pool;
+ unsigned mask;
+ unsigned nr_buckets;
+ unsigned prime;
+ unsigned shift;
+ struct list_head *buckets;
+
+ unsigned max_recovery; /* Max # of regions to recover in parallel */
+
+ spinlock_t region_lock;
+ atomic_t recovery_in_flight;
+ struct semaphore recovery_count;
+ struct list_head clean_regions;
+ struct list_head quiesced_regions;
+ struct list_head recovered_regions;
+ struct list_head failed_recovered_regions;
+
+ void *context;
+ sector_t target_begin;
+
+ /* Callback function to schedule bios writes */
+ void (*dispatch_bios)(void *context, struct bio_list *bios);
+
+ /* Callback function to wakeup callers worker thread. */
+ void (*wakeup_workers)(void *context);
+
+ /* Callback function to wakeup callers recovery waiters. */
+ void (*wakeup_all_recovery_waiters)(void *context);
+};
+
+struct dm_region {
+ struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */
+ region_t key;
+ int state;
+
+ struct list_head hash_list;
+ struct list_head list;
+
+ atomic_t pending;
+ struct bio_list delayed_bios;
+};
+
+/*
+ * Conversion fns
+ */
+static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
+{
+ return sector >> rh->region_shift;
+}
+
+sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
+{
+ return region << rh->region_shift;
+}
+EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
+
+region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
+{
+ return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
+}
+EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
+
+void *dm_rh_region_context(struct dm_region *reg)
+{
+ return reg->rh->context;
+}
+EXPORT_SYMBOL_GPL(dm_rh_region_context);
+
+region_t dm_rh_get_region_key(struct dm_region *reg)
+{
+ return reg->key;
+}
+EXPORT_SYMBOL_GPL(dm_rh_get_region_key);
+
+sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
+{
+ return rh->region_size;
+}
+EXPORT_SYMBOL_GPL(dm_rh_get_region_size);
+
+/*
+ * FIXME: shall we pass in a structure instead of all these args to
+ * dm_region_hash_create()????
+ */
+#define RH_HASH_MULT 2654435387U
+#define RH_HASH_SHIFT 12
+
+#define MIN_REGIONS 64
+struct dm_region_hash *dm_region_hash_create(
+ void *context, void (*dispatch_bios)(void *context,
+ struct bio_list *bios),
+ void (*wakeup_workers)(void *context),
+ void (*wakeup_all_recovery_waiters)(void *context),
+ sector_t target_begin, unsigned max_recovery,
+ struct dm_dirty_log *log, uint32_t region_size,
+ region_t nr_regions)
+{
+ struct dm_region_hash *rh;
+ unsigned nr_buckets, max_buckets;
+ size_t i;
+
+ /*
+ * Calculate a suitable number of buckets for our hash
+ * table.
+ */
+ max_buckets = nr_regions >> 6;
+ for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
+ ;
+ nr_buckets >>= 1;
+
+ rh = kmalloc(sizeof(*rh), GFP_KERNEL);
+ if (!rh) {
+ DMERR("unable to allocate region hash memory");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rh->context = context;
+ rh->dispatch_bios = dispatch_bios;
+ rh->wakeup_workers = wakeup_workers;
+ rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
+ rh->target_begin = target_begin;
+ rh->max_recovery = max_recovery;
+ rh->log = log;
+ rh->region_size = region_size;
+ rh->region_shift = ffs(region_size) - 1;
+ rwlock_init(&rh->hash_lock);
+ rh->mask = nr_buckets - 1;
+ rh->nr_buckets = nr_buckets;
+
+ rh->shift = RH_HASH_SHIFT;
+ rh->prime = RH_HASH_MULT;
+
+ rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
+ if (!rh->buckets) {
+ DMERR("unable to allocate region hash bucket memory");
+ kfree(rh);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < nr_buckets; i++)
+ INIT_LIST_HEAD(rh->buckets + i);
+
+ spin_lock_init(&rh->region_lock);
+ sema_init(&rh->recovery_count, 0);
+ atomic_set(&rh->recovery_in_flight, 0);
+ INIT_LIST_HEAD(&rh->clean_regions);
+ INIT_LIST_HEAD(&rh->quiesced_regions);
+ INIT_LIST_HEAD(&rh->recovered_regions);
+ INIT_LIST_HEAD(&rh->failed_recovered_regions);
+
+ rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
+ sizeof(struct dm_region));
+ if (!rh->region_pool) {
+ vfree(rh->buckets);
+ kfree(rh);
+ rh = ERR_PTR(-ENOMEM);
+ }
+
+ return rh;
+}
+EXPORT_SYMBOL_GPL(dm_region_hash_create);
+
+void dm_region_hash_destroy(struct dm_region_hash *rh)
+{
+ unsigned h;
+ struct dm_region *reg, *nreg;
+
+ BUG_ON(!list_empty(&rh->quiesced_regions));
+ for (h = 0; h < rh->nr_buckets; h++) {
+ list_for_each_entry_safe(reg, nreg, rh->buckets + h,
+ hash_list) {
+ BUG_ON(atomic_read(&reg->pending));
+ mempool_free(reg, rh->region_pool);
+ }
+ }
+
+ if (rh->log)
+ dm_dirty_log_destroy(rh->log);
+
+ if (rh->region_pool)
+ mempool_destroy(rh->region_pool);
+
+ vfree(rh->buckets);
+ kfree(rh);
+}
+EXPORT_SYMBOL_GPL(dm_region_hash_destroy);
+
+struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
+{
+ return rh->log;
+}
+EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
+
+static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
+{
+ return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
+}
+
+static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
+{
+ struct dm_region *reg;
+ struct list_head *bucket = rh->buckets + rh_hash(rh, region);
+
+ list_for_each_entry(reg, bucket, hash_list)
+ if (reg->key == region)
+ return reg;
+
+ return NULL;
+}
+
+static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
+{
+ list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key));
+}
+
+static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
+{
+ struct dm_region *reg, *nreg;
+
+ nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
+ if (unlikely(!nreg))
+ nreg = kmalloc(sizeof(*nreg), GFP_NOIO);
+
+ nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
+ DM_RH_CLEAN : DM_RH_NOSYNC;
+ nreg->rh = rh;
+ nreg->key = region;
+ INIT_LIST_HEAD(&nreg->list);
+ atomic_set(&nreg->pending, 0);
+ bio_list_init(&nreg->delayed_bios);
+
+ write_lock_irq(&rh->hash_lock);
+ reg = __rh_lookup(rh, region);
+ if (reg)
+ /* We lost the race. */
+ mempool_free(nreg, rh->region_pool);
+ else {
+ __rh_insert(rh, nreg);
+ if (nreg->state == DM_RH_CLEAN) {
+ spin_lock(&rh->region_lock);
+ list_add(&nreg->list, &rh->clean_regions);
+ spin_unlock(&rh->region_lock);
+ }
+
+ reg = nreg;
+ }
+ write_unlock_irq(&rh->hash_lock);
+
+ return reg;
+}
+
+static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
+{
+ struct dm_region *reg;
+
+ reg = __rh_lookup(rh, region);
+ if (!reg) {
+ read_unlock(&rh->hash_lock);
+ reg = __rh_alloc(rh, region);
+ read_lock(&rh->hash_lock);
+ }
+
+ return reg;
+}
+
+int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
+{
+ int r;
+ struct dm_region *reg;
+
+ read_lock(&rh->hash_lock);
+ reg = __rh_lookup(rh, region);
+ read_unlock(&rh->hash_lock);
+
+ if (reg)
+ return reg->state;
+
+ /*
+ * The region wasn't in the hash, so we fall back to the
+ * dirty log.
+ */
+ r = rh->log->type->in_sync(rh->log, region, may_block);
+
+ /*
+ * Any error from the dirty log (eg. -EWOULDBLOCK) gets
+ * taken as a DM_RH_NOSYNC
+ */
+ return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC;
+}
+EXPORT_SYMBOL_GPL(dm_rh_get_state);
+
+static void complete_resync_work(struct dm_region *reg, int success)
+{
+ struct dm_region_hash *rh = reg->rh;
+
+ rh->log->type->set_region_sync(rh->log, reg->key, success);
+
+ /*
+ * Dispatch the bios before we call 'wake_up_all'.
+ * This is important because if we are suspending,
+ * we want to know that recovery is complete and
+ * the work queue is flushed. If we wake_up_all
+ * before we dispatch_bios (queue bios and call wake()),
+ * then we risk suspending before the work queue
+ * has been properly flushed.
+ */
+ rh->dispatch_bios(rh->context, &reg->delayed_bios);
+ if (atomic_dec_and_test(&rh->recovery_in_flight))
+ rh->wakeup_all_recovery_waiters(rh->context);
+ up(&rh->recovery_count);
+}
+
+/* dm_rh_mark_nosync
+ * @ms
+ * @bio
+ * @done
+ * @error
+ *
+ * The bio was written on some mirror(s) but failed on other mirror(s).
+ * We can successfully endio the bio but should avoid the region being
+ * marked clean by setting the state DM_RH_NOSYNC.
+ *
+ * This function is _not_ safe in interrupt context!
+ */
+void dm_rh_mark_nosync(struct dm_region_hash *rh,
+ struct bio *bio, unsigned done, int error)
+{
+ unsigned long flags;
+ struct dm_dirty_log *log = rh->log;
+ struct dm_region *reg;
+ region_t region = dm_rh_bio_to_region(rh, bio);
+ int recovering = 0;
+
+ /* We must inform the log that the sync count has changed. */
+ log->type->set_region_sync(log, region, 0);
+
+ read_lock(&rh->hash_lock);
+ reg = __rh_find(rh, region);
+ read_unlock(&rh->hash_lock);
+
+ /* region hash entry should exist because write was in-flight */
+ BUG_ON(!reg);
+ BUG_ON(!list_empty(&reg->list));
+
+ spin_lock_irqsave(&rh->region_lock, flags);
+ /*
+ * Possible cases:
+ * 1) DM_RH_DIRTY
+ * 2) DM_RH_NOSYNC: was dirty, other preceeding writes failed
+ * 3) DM_RH_RECOVERING: flushing pending writes
+ * Either case, the region should have not been connected to list.
+ */
+ recovering = (reg->state == DM_RH_RECOVERING);
+ reg->state = DM_RH_NOSYNC;
+ BUG_ON(!list_empty(&reg->list));
+ spin_unlock_irqrestore(&rh->region_lock, flags);
+
+ bio_endio(bio, error);
+ if (recovering)
+ complete_resync_work(reg, 0);
+}
+EXPORT_SYMBOL_GPL(dm_rh_mark_nosync);
+
+void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
+{
+ struct dm_region *reg, *next;
+
+ LIST_HEAD(clean);
+ LIST_HEAD(recovered);
+ LIST_HEAD(failed_recovered);
+
+ /*
+ * Quickly grab the lists.
+ */
+ write_lock_irq(&rh->hash_lock);
+ spin_lock(&rh->region_lock);
+ if (!list_empty(&rh->clean_regions)) {
+ list_splice_init(&rh->clean_regions, &clean);
+
+ list_for_each_entry(reg, &clean, list)
+ list_del(&reg->hash_list);
+ }
+
+ if (!list_empty(&rh->recovered_regions)) {
+ list_splice_init(&rh->recovered_regions, &recovered);
+
+ list_for_each_entry(reg, &recovered, list)
+ list_del(&reg->hash_list);
+ }
+
+ if (!list_empty(&rh->failed_recovered_regions)) {
+ list_splice_init(&rh->failed_recovered_regions,
+ &failed_recovered);
+
+ list_for_each_entry(reg, &failed_recovered, list)
+ list_del(&reg->hash_list);
+ }
+
+ spin_unlock(&rh->region_lock);
+ write_unlock_irq(&rh->hash_lock);
+
+ /*
+ * All the regions on the recovered and clean lists have
+ * now been pulled out of the system, so no need to do
+ * any more locking.
+ */
+ list_for_each_entry_safe(reg, next, &recovered, list) {
+ rh->log->type->clear_region(rh->log, reg->key);
+ complete_resync_work(reg, 1);
+ mempool_free(reg, rh->region_pool);
+ }
+
+ list_for_each_entry_safe(reg, next, &failed_recovered, list) {
+ complete_resync_work(reg, errors_handled ? 0 : 1);
+ mempool_free(reg, rh->region_pool);
+ }
+
+ list_for_each_entry_safe(reg, next, &clean, list) {
+ rh->log->type->clear_region(rh->log, reg->key);
+ mempool_free(reg, rh->region_pool);
+ }
+
+ rh->log->type->flush(rh->log);
+}
+EXPORT_SYMBOL_GPL(dm_rh_update_states);
+
+static void rh_inc(struct dm_region_hash *rh, region_t region)
+{
+ struct dm_region *reg;
+
+ read_lock(&rh->hash_lock);
+ reg = __rh_find(rh, region);
+
+ spin_lock_irq(&rh->region_lock);
+ atomic_inc(&reg->pending);
+
+ if (reg->state == DM_RH_CLEAN) {
+ reg->state = DM_RH_DIRTY;
+ list_del_init(&reg->list); /* take off the clean list */
+ spin_unlock_irq(&rh->region_lock);
+
+ rh->log->type->mark_region(rh->log, reg->key);
+ } else
+ spin_unlock_irq(&rh->region_lock);
+
+
+ read_unlock(&rh->hash_lock);
+}
+
+void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
+{
+ struct bio *bio;
+
+ for (bio = bios->head; bio; bio = bio->bi_next)
+ rh_inc(rh, dm_rh_bio_to_region(rh, bio));
+}
+EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
+
+void dm_rh_dec(struct dm_region_hash *rh, region_t region)
+{
+ unsigned long flags;
+ struct dm_region *reg;
+ int should_wake = 0;
+
+ read_lock(&rh->hash_lock);
+ reg = __rh_lookup(rh, region);
+ read_unlock(&rh->hash_lock);
+
+ spin_lock_irqsave(&rh->region_lock, flags);
+ if (atomic_dec_and_test(&reg->pending)) {
+ /*
+ * There is no pending I/O for this region.
+ * We can move the region to corresponding list for next action.
+ * At this point, the region is not yet connected to any list.
+ *
+ * If the state is DM_RH_NOSYNC, the region should be kept off
+ * from clean list.
+ * The hash entry for DM_RH_NOSYNC will remain in memory
+ * until the region is recovered or the map is reloaded.
+ */
+
+ /* do nothing for DM_RH_NOSYNC */
+ if (reg->state == DM_RH_RECOVERING) {
+ list_add_tail(&reg->list, &rh->quiesced_regions);
+ } else if (reg->state == DM_RH_DIRTY) {
+ reg->state = DM_RH_CLEAN;
+ list_add(&reg->list, &rh->clean_regions);
+ }
+ should_wake = 1;
+ }
+ spin_unlock_irqrestore(&rh->region_lock, flags);
+
+ if (should_wake)
+ rh->wakeup_workers(rh->context);
+}
+EXPORT_SYMBOL_GPL(dm_rh_dec);
+
+/*
+ * Starts quiescing a region in preparation for recovery.
+ */
+static int __rh_recovery_prepare(struct dm_region_hash *rh)
+{
+ int r;
+ region_t region;
+ struct dm_region *reg;
+
+ /*
+ * Ask the dirty log what's next.
+ */
+ r = rh->log->type->get_resync_work(rh->log, &region);
+ if (r <= 0)
+ return r;
+
+ /*
+ * Get this region, and start it quiescing by setting the
+ * recovering flag.
+ */
+ read_lock(&rh->hash_lock);
+ reg = __rh_find(rh, region);
+ read_unlock(&rh->hash_lock);
+
+ spin_lock_irq(&rh->region_lock);
+ reg->state = DM_RH_RECOVERING;
+
+ /* Already quiesced ? */
+ if (atomic_read(&reg->pending))
+ list_del_init(&reg->list);
+ else
+ list_move(&reg->list, &rh->quiesced_regions);
+
+ spin_unlock_irq(&rh->region_lock);
+
+ return 1;
+}
+
+void dm_rh_recovery_prepare(struct dm_region_hash *rh)
+{
+ /* Extra reference to avoid race with dm_rh_stop_recovery */
+ atomic_inc(&rh->recovery_in_flight);
+
+ while (!down_trylock(&rh->recovery_count)) {
+ atomic_inc(&rh->recovery_in_flight);
+ if (__rh_recovery_prepare(rh) <= 0) {
+ atomic_dec(&rh->recovery_in_flight);
+ up(&rh->recovery_count);
+ break;
+ }
+ }
+
+ /* Drop the extra reference */
+ if (atomic_dec_and_test(&rh->recovery_in_flight))
+ rh->wakeup_all_recovery_waiters(rh->context);
+}
+EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare);
+
+/*
+ * Returns any quiesced regions.
+ */
+struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
+{
+ struct dm_region *reg = NULL;
+
+ spin_lock_irq(&rh->region_lock);
+ if (!list_empty(&rh->quiesced_regions)) {
+ reg = list_entry(rh->quiesced_regions.next,
+ struct dm_region, list);
+ list_del_init(&reg->list); /* remove from the quiesced list */
+ }
+ spin_unlock_irq(&rh->region_lock);
+
+ return reg;
+}
+EXPORT_SYMBOL_GPL(dm_rh_recovery_start);
+
+void dm_rh_recovery_end(struct dm_region *reg, int success)
+{
+ struct dm_region_hash *rh = reg->rh;
+
+ spin_lock_irq(&rh->region_lock);
+ if (success)
+ list_add(&reg->list, &reg->rh->recovered_regions);
+ else {
+ reg->state = DM_RH_NOSYNC;
+ list_add(&reg->list, &reg->rh->failed_recovered_regions);
+ }
+ spin_unlock_irq(&rh->region_lock);
+
+ rh->wakeup_workers(rh->context);
+}
+EXPORT_SYMBOL_GPL(dm_rh_recovery_end);
+
+/* Return recovery in flight count. */
+int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
+{
+ return atomic_read(&rh->recovery_in_flight);
+}
+EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight);
+
+int dm_rh_flush(struct dm_region_hash *rh)
+{
+ return rh->log->type->flush(rh->log);
+}
+EXPORT_SYMBOL_GPL(dm_rh_flush);
+
+void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
+{
+ struct dm_region *reg;
+
+ read_lock(&rh->hash_lock);
+ reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
+ bio_list_add(&reg->delayed_bios, bio);
+ read_unlock(&rh->hash_lock);
+}
+EXPORT_SYMBOL_GPL(dm_rh_delay);
+
+void dm_rh_stop_recovery(struct dm_region_hash *rh)
+{
+ int i;
+
+ /* wait for any recovering regions */
+ for (i = 0; i < rh->max_recovery; i++)
+ down(&rh->recovery_count);
+}
+EXPORT_SYMBOL_GPL(dm_rh_stop_recovery);
+
+void dm_rh_start_recovery(struct dm_region_hash *rh)
+{
+ int i;
+
+ for (i = 0; i < rh->max_recovery; i++)
+ up(&rh->recovery_count);
+
+ rh->wakeup_workers(rh->context);
+}
+EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
+
+MODULE_DESCRIPTION(DM_NAME " region hash");
+MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index 391dfa2ad43..cdfbf65b28c 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -9,7 +9,8 @@
* Round-robin path selector.
*/
-#include "dm.h"
+#include <linux/device-mapper.h>
+
#include "dm-path-selector.h"
#include <linux/slab.h>
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 6e5528aecc9..b2d9d1ac28a 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -600,7 +600,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->valid = 1;
s->active = 0;
- s->last_percent = 0;
init_rwsem(&s->lock);
spin_lock_init(&s->pe_lock);
s->ti = ti;
@@ -824,8 +823,10 @@ static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
* the bios for the original write to the origin.
*/
if (primary_pe &&
- atomic_dec_and_test(&primary_pe->ref_count))
+ atomic_dec_and_test(&primary_pe->ref_count)) {
origin_bios = bio_list_get(&primary_pe->origin_bios);
+ free_pending_exception(primary_pe);
+ }
/*
* Free the pe if it's not linked to an origin write or if
@@ -834,12 +835,6 @@ static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
if (!primary_pe || primary_pe != pe)
free_pending_exception(pe);
- /*
- * Free the primary pe if nothing references it.
- */
- if (primary_pe && !atomic_read(&primary_pe->ref_count))
- free_pending_exception(primary_pe);
-
return origin_bios;
}
diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h
index 292c15609ae..f07315fe236 100644
--- a/drivers/md/dm-snap.h
+++ b/drivers/md/dm-snap.h
@@ -9,7 +9,7 @@
#ifndef DM_SNAPSHOT_H
#define DM_SNAPSHOT_H
-#include "dm.h"
+#include <linux/device-mapper.h>
#include "dm-bio-list.h"
#include <linux/blkdev.h>
#include <linux/workqueue.h>
@@ -158,9 +158,6 @@ struct dm_snapshot {
/* Used for display of table */
char type;
- /* The last percentage we notified */
- int last_percent;
-
mempool_t *pending_pool;
struct exception_table pending;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index b745d8ac625..a2d068dbe9e 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -4,7 +4,7 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include <linux/device-mapper.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -60,8 +60,8 @@ static inline struct stripe_c *alloc_context(unsigned int stripes)
{
size_t len;
- if (array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
- stripes))
+ if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
+ stripes))
return NULL;
len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a740a6950f5..a63161aec48 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -43,7 +43,7 @@ struct dm_table {
* device. This should be a combination of FMODE_READ
* and FMODE_WRITE.
*/
- int mode;
+ fmode_t mode;
/* a list of devices used by this table */
struct list_head devices;
@@ -217,7 +217,7 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
return 0;
}
-int dm_table_create(struct dm_table **result, int mode,
+int dm_table_create(struct dm_table **result, fmode_t mode,
unsigned num_targets, struct mapped_device *md)
{
struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
@@ -313,19 +313,6 @@ static inline int check_space(struct dm_table *t)
}
/*
- * Convert a device path to a dev_t.
- */
-static int lookup_device(const char *path, dev_t *dev)
-{
- struct block_device *bdev = lookup_bdev(path);
- if (IS_ERR(bdev))
- return PTR_ERR(bdev);
- *dev = bdev->bd_dev;
- bdput(bdev);
- return 0;
-}
-
-/*
* See if we've already got a device in the list.
*/
static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
@@ -357,7 +344,7 @@ static int open_dev(struct dm_dev_internal *d, dev_t dev,
return PTR_ERR(bdev);
r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
if (r)
- blkdev_put(bdev);
+ blkdev_put(bdev, d->dm_dev.mode);
else
d->dm_dev.bdev = bdev;
return r;
@@ -372,7 +359,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
return;
bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
- blkdev_put(d->dm_dev.bdev);
+ blkdev_put(d->dm_dev.bdev, d->dm_dev.mode);
d->dm_dev.bdev = NULL;
}
@@ -395,7 +382,7 @@ static int check_device_area(struct dm_dev_internal *dd, sector_t start,
* careful to leave things as they were if we fail to reopen the
* device.
*/
-static int upgrade_mode(struct dm_dev_internal *dd, int new_mode,
+static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
struct mapped_device *md)
{
int r;
@@ -421,7 +408,7 @@ static int upgrade_mode(struct dm_dev_internal *dd, int new_mode,
*/
static int __table_get_device(struct dm_table *t, struct dm_target *ti,
const char *path, sector_t start, sector_t len,
- int mode, struct dm_dev **result)
+ fmode_t mode, struct dm_dev **result)
{
int r;
dev_t uninitialized_var(dev);
@@ -437,8 +424,12 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
return -EOVERFLOW;
} else {
/* convert the path to a device */
- if ((r = lookup_device(path, &dev)))
- return r;
+ struct block_device *bdev = lookup_bdev(path);
+
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
+ dev = bdev->bd_dev;
+ bdput(bdev);
}
dd = find_device(&t->devices, dev);
@@ -537,7 +528,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
EXPORT_SYMBOL_GPL(dm_set_device_limits);
int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
- sector_t len, int mode, struct dm_dev **result)
+ sector_t len, fmode_t mode, struct dm_dev **result)
{
int r = __table_get_device(ti->table, ti, path,
start, len, mode, result);
@@ -887,7 +878,7 @@ struct list_head *dm_table_get_devices(struct dm_table *t)
return &t->devices;
}
-int dm_table_get_mode(struct dm_table *t)
+fmode_t dm_table_get_mode(struct dm_table *t)
{
return t->mode;
}
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index bdec206c404..cdbf126ec10 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -4,7 +4,7 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include <linux/device-mapper.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 327de03a5bd..6963ad14840 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -21,7 +21,6 @@
#include <linux/idr.h>
#include <linux/hdreg.h>
#include <linux/blktrace_api.h>
-#include <linux/smp_lock.h>
#define DM_MSG_PREFIX "core"
@@ -76,7 +75,6 @@ union map_info *dm_get_mapinfo(struct bio *bio)
*/
struct dm_wq_req {
enum {
- DM_WQ_FLUSH_ALL,
DM_WQ_FLUSH_DEFERRED,
} type;
struct work_struct work;
@@ -151,40 +149,40 @@ static struct kmem_cache *_tio_cache;
static int __init local_init(void)
{
- int r;
+ int r = -ENOMEM;
/* allocate a slab for the dm_ios */
_io_cache = KMEM_CACHE(dm_io, 0);
if (!_io_cache)
- return -ENOMEM;
+ return r;
/* allocate a slab for the target ios */
_tio_cache = KMEM_CACHE(dm_target_io, 0);
- if (!_tio_cache) {
- kmem_cache_destroy(_io_cache);
- return -ENOMEM;
- }
+ if (!_tio_cache)
+ goto out_free_io_cache;
r = dm_uevent_init();
- if (r) {
- kmem_cache_destroy(_tio_cache);
- kmem_cache_destroy(_io_cache);
- return r;
- }
+ if (r)
+ goto out_free_tio_cache;
_major = major;
r = register_blkdev(_major, _name);
- if (r < 0) {
- kmem_cache_destroy(_tio_cache);
- kmem_cache_destroy(_io_cache);
- dm_uevent_exit();
- return r;
- }
+ if (r < 0)
+ goto out_uevent_exit;
if (!_major)
_major = r;
return 0;
+
+out_uevent_exit:
+ dm_uevent_exit();
+out_free_tio_cache:
+ kmem_cache_destroy(_tio_cache);
+out_free_io_cache:
+ kmem_cache_destroy(_io_cache);
+
+ return r;
}
static void local_exit(void)
@@ -249,13 +247,13 @@ static void __exit dm_exit(void)
/*
* Block device functions
*/
-static int dm_blk_open(struct inode *inode, struct file *file)
+static int dm_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mapped_device *md;
spin_lock(&_minor_lock);
- md = inode->i_bdev->bd_disk->private_data;
+ md = bdev->bd_disk->private_data;
if (!md)
goto out;
@@ -274,11 +272,9 @@ out:
return md ? 0 : -ENXIO;
}
-static int dm_blk_close(struct inode *inode, struct file *file)
+static int dm_blk_close(struct gendisk *disk, fmode_t mode)
{
- struct mapped_device *md;
-
- md = inode->i_bdev->bd_disk->private_data;
+ struct mapped_device *md = disk->private_data;
atomic_dec(&md->open_count);
dm_put(md);
return 0;
@@ -315,21 +311,14 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return dm_get_geometry(md, geo);
}
-static int dm_blk_ioctl(struct inode *inode, struct file *file,
+static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct mapped_device *md;
- struct dm_table *map;
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ struct dm_table *map = dm_get_table(md);
struct dm_target *tgt;
int r = -ENOTTY;
- /* We don't really need this lock, but we do need 'inode'. */
- unlock_kernel();
-
- md = inode->i_bdev->bd_disk->private_data;
-
- map = dm_get_table(md);
-
if (!map || !dm_table_get_size(map))
goto out;
@@ -345,12 +334,11 @@ static int dm_blk_ioctl(struct inode *inode, struct file *file,
}
if (tgt->type->ioctl)
- r = tgt->type->ioctl(tgt, inode, file, cmd, arg);
+ r = tgt->type->ioctl(tgt, cmd, arg);
out:
dm_table_put(map);
- lock_kernel();
return r;
}
@@ -669,6 +657,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
clone->bi_size = to_bytes(len);
clone->bi_io_vec->bv_offset = offset;
clone->bi_io_vec->bv_len = clone->bi_size;
+ clone->bi_flags |= 1 << BIO_CLONED;
return clone;
}
@@ -1394,9 +1383,6 @@ static void dm_wq_work(struct work_struct *work)
down_write(&md->io_lock);
switch (req->type) {
- case DM_WQ_FLUSH_ALL:
- __merge_pushback_list(md);
- /* pass through */
case DM_WQ_FLUSH_DEFERRED:
__flush_deferred_io(md);
break;
@@ -1526,7 +1512,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
if (!md->suspended_bdev) {
DMWARN("bdget failed in dm_suspend");
r = -ENOMEM;
- goto flush_and_out;
+ goto out;
}
/*
@@ -1577,14 +1563,6 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
set_bit(DMF_SUSPENDED, &md->flags);
-flush_and_out:
- if (r && noflush)
- /*
- * Because there may be already I/Os in the pushback list,
- * flush them before return.
- */
- dm_queue_flush(md, DM_WQ_FLUSH_ALL, NULL);
-
out:
if (r && md->suspended_bdev) {
bdput(md->suspended_bdev);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index cd189da2b2f..0ade60cdef4 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -62,15 +62,6 @@ void dm_put_target_type(struct target_type *t);
int dm_target_iterate(void (*iter_func)(struct target_type *tt,
void *param), void *param);
-/*-----------------------------------------------------------------
- * Useful inlines.
- *---------------------------------------------------------------*/
-static inline int array_too_big(unsigned long fixed, unsigned long obj,
- unsigned long num)
-{
- return (num > (ULONG_MAX - fixed) / obj);
-}
-
int dm_split_args(int *argc, char ***argvp, char *input);
/*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index aaa3d465de4..c1a837ca193 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1520,7 +1520,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
if (err) {
printk(KERN_ERR "md: could not bd_claim %s.\n",
bdevname(bdev, b));
- blkdev_put(bdev);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
return err;
}
if (!shared)
@@ -1536,7 +1536,7 @@ static void unlock_rdev(mdk_rdev_t *rdev)
if (!bdev)
MD_BUG();
bd_release(bdev);
- blkdev_put(bdev);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
}
void md_autodetect_dev(dev_t dev);
@@ -4785,7 +4785,7 @@ static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static int md_ioctl(struct inode *inode, struct file *file,
+static int md_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
int err = 0;
@@ -4823,7 +4823,7 @@ static int md_ioctl(struct inode *inode, struct file *file,
* Commands creating/starting a new array:
*/
- mddev = inode->i_bdev->bd_disk->private_data;
+ mddev = bdev->bd_disk->private_data;
if (!mddev) {
BUG();
@@ -4996,13 +4996,13 @@ abort:
return err;
}
-static int md_open(struct inode *inode, struct file *file)
+static int md_open(struct block_device *bdev, fmode_t mode)
{
/*
* Succeed if we can lock the mddev, which confirms that
* it isn't being stopped right now.
*/
- mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
+ mddev_t *mddev = bdev->bd_disk->private_data;
int err;
if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
@@ -5013,14 +5013,14 @@ static int md_open(struct inode *inode, struct file *file)
atomic_inc(&mddev->openers);
mddev_unlock(mddev);
- check_disk_change(inode->i_bdev);
+ check_disk_change(bdev);
out:
return err;
}
-static int md_release(struct inode *inode, struct file * file)
+static int md_release(struct gendisk *disk, fmode_t mode)
{
- mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
+ mddev_t *mddev = disk->private_data;
BUG_ON(!mddev);
atomic_dec(&mddev->openers);
@@ -5048,7 +5048,7 @@ static struct block_device_operations md_fops =
.owner = THIS_MODULE,
.open = md_open,
.release = md_release,
- .ioctl = md_ioctl,
+ .locked_ioctl = md_ioctl,
.getgeo = md_getgeo,
.media_changed = md_media_changed,
.revalidate_disk= md_revalidate,
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index 5b34c134aa2..127b0526a72 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -545,11 +545,11 @@ int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
if( VFL_TYPE_GRABBER == type ) {
vv->video_minor = vfd->minor;
INFO(("%s: registered device video%d [v4l2]\n",
- dev->name, vfd->minor & 0x1f));
+ dev->name, vfd->num));
} else {
vv->vbi_minor = vfd->minor;
INFO(("%s: registered device vbi%d [v4l2]\n",
- dev->name, vfd->minor & 0x1f));
+ dev->name, vfd->num));
}
*vid = vfd;
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index 99be9e5c85f..fe0bd55977e 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -834,7 +834,7 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
* copying is done already, arg is a kernel pointer.
*/
-int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int cmd, void *arg)
+static int __saa7146_video_do_ioctl(struct file *file, unsigned int cmd, void *arg)
{
struct saa7146_fh *fh = file->private_data;
struct saa7146_dev *dev = fh->dev;
@@ -1215,12 +1215,18 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
}
#endif
default:
- return v4l_compat_translate_ioctl(inode,file,cmd,arg,
- saa7146_video_do_ioctl);
+ return v4l_compat_translate_ioctl(file, cmd, arg,
+ __saa7146_video_do_ioctl);
}
return 0;
}
+int saa7146_video_do_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, void *arg)
+{
+ return __saa7146_video_do_ioctl(file, cmd, arg);
+}
+
/*********************************************************************************/
/* buffer handling functions */
diff --git a/drivers/media/dvb/frontends/s5h1411.c b/drivers/media/dvb/frontends/s5h1411.c
index 2febfb5a846..40644aacffc 100644
--- a/drivers/media/dvb/frontends/s5h1411.c
+++ b/drivers/media/dvb/frontends/s5h1411.c
@@ -38,6 +38,7 @@ struct s5h1411_state {
struct dvb_frontend frontend;
fe_modulation_t current_modulation;
+ unsigned int first_tune:1;
u32 current_frequency;
int if_freq;
@@ -62,7 +63,7 @@ static struct init_tab {
{ S5H1411_I2C_TOP_ADDR, 0x08, 0x0047, },
{ S5H1411_I2C_TOP_ADDR, 0x1c, 0x0400, },
{ S5H1411_I2C_TOP_ADDR, 0x1e, 0x0370, },
- { S5H1411_I2C_TOP_ADDR, 0x1f, 0x342a, },
+ { S5H1411_I2C_TOP_ADDR, 0x1f, 0x342c, },
{ S5H1411_I2C_TOP_ADDR, 0x24, 0x0231, },
{ S5H1411_I2C_TOP_ADDR, 0x25, 0x1011, },
{ S5H1411_I2C_TOP_ADDR, 0x26, 0x0f07, },
@@ -100,7 +101,6 @@ static struct init_tab {
{ S5H1411_I2C_TOP_ADDR, 0x78, 0x3141, },
{ S5H1411_I2C_TOP_ADDR, 0x7a, 0x3141, },
{ S5H1411_I2C_TOP_ADDR, 0xb3, 0x8003, },
- { S5H1411_I2C_TOP_ADDR, 0xb5, 0xafbb, },
{ S5H1411_I2C_TOP_ADDR, 0xb5, 0xa6bb, },
{ S5H1411_I2C_TOP_ADDR, 0xb6, 0x0609, },
{ S5H1411_I2C_TOP_ADDR, 0xb7, 0x2f06, },
@@ -393,7 +393,7 @@ static int s5h1411_set_if_freq(struct dvb_frontend *fe, int KHz)
switch (KHz) {
case 3250:
- s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x10d9);
+ s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x10d5);
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x5342);
s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x10d9);
break;
@@ -464,13 +464,25 @@ static int s5h1411_set_spectralinversion(struct dvb_frontend *fe, int inversion)
if (inversion == 1)
val |= 0x1000; /* Inverted */
- else
- val |= 0x0000;
state->inversion = inversion;
return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x24, val);
}
+static int s5h1411_set_serialmode(struct dvb_frontend *fe, int serial)
+{
+ struct s5h1411_state *state = fe->demodulator_priv;
+ u16 val;
+
+ dprintk("%s(%d)\n", __func__, serial);
+ val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xbd) & ~0x100;
+
+ if (serial == 1)
+ val |= 0x100;
+
+ return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, val);
+}
+
static int s5h1411_enable_modulation(struct dvb_frontend *fe,
fe_modulation_t m)
{
@@ -478,6 +490,12 @@ static int s5h1411_enable_modulation(struct dvb_frontend *fe,
dprintk("%s(0x%08x)\n", __func__, m);
+ if ((state->first_tune == 0) && (m == state->current_modulation)) {
+ dprintk("%s() Already at desired modulation. Skipping...\n",
+ __func__);
+ return 0;
+ }
+
switch (m) {
case VSB_8:
dprintk("%s() VSB_8\n", __func__);
@@ -502,6 +520,7 @@ static int s5h1411_enable_modulation(struct dvb_frontend *fe,
}
state->current_modulation = m;
+ state->first_tune = 0;
s5h1411_softreset(fe);
return 0;
@@ -535,7 +554,7 @@ static int s5h1411_set_gpio(struct dvb_frontend *fe, int enable)
return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xe0, val);
}
-static int s5h1411_sleep(struct dvb_frontend *fe, int enable)
+static int s5h1411_set_powerstate(struct dvb_frontend *fe, int enable)
{
struct s5h1411_state *state = fe->demodulator_priv;
@@ -551,6 +570,11 @@ static int s5h1411_sleep(struct dvb_frontend *fe, int enable)
return 0;
}
+static int s5h1411_sleep(struct dvb_frontend *fe)
+{
+ return s5h1411_set_powerstate(fe, 1);
+}
+
static int s5h1411_register_reset(struct dvb_frontend *fe)
{
struct s5h1411_state *state = fe->demodulator_priv;
@@ -574,9 +598,6 @@ static int s5h1411_set_frontend(struct dvb_frontend *fe,
s5h1411_enable_modulation(fe, p->u.vsb.modulation);
- /* Allow the demod to settle */
- msleep(100);
-
if (fe->ops.tuner_ops.set_params) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
@@ -587,6 +608,10 @@ static int s5h1411_set_frontend(struct dvb_frontend *fe,
fe->ops.i2c_gate_ctrl(fe, 0);
}
+ /* Issue a reset to the demod so it knows to resync against the
+ newly tuned frequency */
+ s5h1411_softreset(fe);
+
return 0;
}
@@ -599,7 +624,7 @@ static int s5h1411_init(struct dvb_frontend *fe)
dprintk("%s()\n", __func__);
- s5h1411_sleep(fe, 0);
+ s5h1411_set_powerstate(fe, 0);
s5h1411_register_reset(fe);
for (i = 0; i < ARRAY_SIZE(init_tab); i++)
@@ -610,12 +635,17 @@ static int s5h1411_init(struct dvb_frontend *fe)
/* The datasheet says that after initialisation, VSB is default */
state->current_modulation = VSB_8;
+ /* Although the datasheet says it's in VSB, empirical evidence
+ shows problems getting lock on the first tuning request. Make
+ sure we call enable_modulation the first time around */
+ state->first_tune = 1;
+
if (state->config->output_mode == S5H1411_SERIAL_OUTPUT)
/* Serial */
- s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, 0x1101);
+ s5h1411_set_serialmode(fe, 1);
else
/* Parallel */
- s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, 0x1001);
+ s5h1411_set_serialmode(fe, 0);
s5h1411_set_spectralinversion(fe, state->config->inversion);
s5h1411_set_if_freq(fe, state->config->vsb_if);
@@ -637,28 +667,29 @@ static int s5h1411_read_status(struct dvb_frontend *fe, fe_status_t *status)
*status = 0;
- /* Get the demodulator status */
- reg = (s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2) >> 15)
- & 0x0001;
- if (reg)
- *status |= FE_HAS_LOCK | FE_HAS_CARRIER | FE_HAS_SIGNAL;
+ /* Register F2 bit 15 = Master Lock, removed */
switch (state->current_modulation) {
case QAM_64:
case QAM_256:
reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf0);
- if (reg & 0x100)
- *status |= FE_HAS_VITERBI;
- if (reg & 0x10)
- *status |= FE_HAS_SYNC;
+ if (reg & 0x10) /* QAM FEC Lock */
+ *status |= FE_HAS_SYNC | FE_HAS_LOCK;
+ if (reg & 0x100) /* QAM EQ Lock */
+ *status |= FE_HAS_VITERBI | FE_HAS_CARRIER | FE_HAS_SIGNAL;
+
break;
case VSB_8:
- reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x5e);
- if (reg & 0x0001)
- *status |= FE_HAS_SYNC;
reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2);
- if (reg & 0x1000)
- *status |= FE_HAS_VITERBI;
+ if (reg & 0x1000) /* FEC Lock */
+ *status |= FE_HAS_SYNC | FE_HAS_LOCK;
+ if (reg & 0x2000) /* EQ Lock */
+ *status |= FE_HAS_VITERBI | FE_HAS_CARRIER | FE_HAS_SIGNAL;
+
+ reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x53);
+ if (reg & 0x1) /* AFC Lock */
+ *status |= FE_HAS_SIGNAL;
+
break;
default:
return -EINVAL;
@@ -863,6 +894,7 @@ static struct dvb_frontend_ops s5h1411_ops = {
},
.init = s5h1411_init,
+ .sleep = s5h1411_sleep,
.i2c_gate_ctrl = s5h1411_i2c_gate_ctrl,
.set_frontend = s5h1411_set_frontend,
.get_frontend = s5h1411_get_frontend,
diff --git a/drivers/media/dvb/frontends/s5h1411.h b/drivers/media/dvb/frontends/s5h1411.h
index 7d542bc00c4..45ec0f82989 100644
--- a/drivers/media/dvb/frontends/s5h1411.h
+++ b/drivers/media/dvb/frontends/s5h1411.h
@@ -47,7 +47,7 @@ struct s5h1411_config {
u16 mpeg_timing;
/* IF Freq for QAM and VSB in KHz */
-#define S5H1411_IF_2500 2500
+#define S5H1411_IF_3250 3250
#define S5H1411_IF_3500 3500
#define S5H1411_IF_4000 4000
#define S5H1411_IF_5380 5380
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index 78f56944e64..a5ca176a7b0 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -171,11 +171,11 @@ static int dsbr100_start(struct dsbr100_device *radio)
if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x00, 0xC7, radio->transfer_buffer, 8, 300)<0 ||
+ 0x00, 0xC7, radio->transfer_buffer, 8, 300) < 0 ||
usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
DSB100_ONOFF,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x01, 0x00, radio->transfer_buffer, 8, 300)<0)
+ 0x01, 0x00, radio->transfer_buffer, 8, 300) < 0)
return -1;
radio->muted=0;
return (radio->transfer_buffer)[0];
@@ -188,11 +188,11 @@ static int dsbr100_stop(struct dsbr100_device *radio)
if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x16, 0x1C, radio->transfer_buffer, 8, 300)<0 ||
+ 0x16, 0x1C, radio->transfer_buffer, 8, 300) < 0 ||
usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
DSB100_ONOFF,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x00, 0x00, radio->transfer_buffer, 8, 300)<0)
+ 0x00, 0x00, radio->transfer_buffer, 8, 300) < 0)
return -1;
radio->muted=1;
return (radio->transfer_buffer)[0];
@@ -201,24 +201,24 @@ static int dsbr100_stop(struct dsbr100_device *radio)
/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
static int dsbr100_setfreq(struct dsbr100_device *radio, int freq)
{
- freq = (freq/16*80)/1000+856;
+ freq = (freq / 16 * 80) / 1000 + 856;
if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
DSB100_TUNE,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- (freq>>8)&0x00ff, freq&0xff,
- radio->transfer_buffer, 8, 300)<0 ||
+ (freq >> 8) & 0x00ff, freq & 0xff,
+ radio->transfer_buffer, 8, 300) < 0 ||
usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x96, 0xB7, radio->transfer_buffer, 8, 300)<0 ||
+ 0x96, 0xB7, radio->transfer_buffer, 8, 300) < 0 ||
usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x00, 0x24, radio->transfer_buffer, 8, 300)<0) {
+ 0x00, 0x24, radio->transfer_buffer, 8, 300) < 0) {
radio->stereo = -1;
return -1;
}
- radio->stereo = ! ((radio->transfer_buffer)[0]&0x01);
+ radio->stereo = !((radio->transfer_buffer)[0] & 0x01);
return (radio->transfer_buffer)[0];
}
@@ -229,10 +229,10 @@ static void dsbr100_getstat(struct dsbr100_device *radio)
if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x00 , 0x24, radio->transfer_buffer, 8, 300)<0)
+ 0x00 , 0x24, radio->transfer_buffer, 8, 300) < 0)
radio->stereo = -1;
else
- radio->stereo = ! (radio->transfer_buffer[0]&0x01);
+ radio->stereo = !(radio->transfer_buffer[0] & 0x01);
}
@@ -265,7 +265,7 @@ static int vidioc_querycap(struct file *file, void *priv,
{
strlcpy(v->driver, "dsbr100", sizeof(v->driver));
strlcpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card));
- sprintf(v->bus_info, "ISA");
+ sprintf(v->bus_info, "USB");
v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER;
return 0;
@@ -282,9 +282,9 @@ static int vidioc_g_tuner(struct file *file, void *priv,
dsbr100_getstat(radio);
strcpy(v->name, "FM");
v->type = V4L2_TUNER_RADIO;
- v->rangelow = FREQ_MIN*FREQ_MUL;
- v->rangehigh = FREQ_MAX*FREQ_MUL;
- v->rxsubchans = V4L2_TUNER_SUB_MONO|V4L2_TUNER_SUB_STEREO;
+ v->rangelow = FREQ_MIN * FREQ_MUL;
+ v->rangehigh = FREQ_MAX * FREQ_MUL;
+ v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
v->capability = V4L2_TUNER_CAP_LOW;
if(radio->stereo)
v->audmode = V4L2_TUNER_MODE_STEREO;
@@ -309,7 +309,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
struct dsbr100_device *radio = video_drvdata(file);
radio->curfreq = f->frequency;
- if (dsbr100_setfreq(radio, radio->curfreq)==-1)
+ if (dsbr100_setfreq(radio, radio->curfreq) == -1)
dev_warn(&radio->usbdev->dev, "Set frequency failed\n");
return 0;
}
@@ -331,8 +331,7 @@ static int vidioc_queryctrl(struct file *file, void *priv,
for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
if (qc->id && qc->id == radio_qctrl[i].id) {
- memcpy(qc, &(radio_qctrl[i]),
- sizeof(*qc));
+ memcpy(qc, &(radio_qctrl[i]), sizeof(*qc));
return 0;
}
}
@@ -412,19 +411,25 @@ static int vidioc_s_audio(struct file *file, void *priv,
static int usb_dsbr100_open(struct inode *inode, struct file *file)
{
struct dsbr100_device *radio = video_drvdata(file);
+ int retval;
lock_kernel();
radio->users = 1;
radio->muted = 1;
- if (dsbr100_start(radio)<0) {
+ if (dsbr100_start(radio) < 0) {
dev_warn(&radio->usbdev->dev,
"Radio did not start up properly\n");
radio->users = 0;
unlock_kernel();
return -EIO;
}
- dsbr100_setfreq(radio, radio->curfreq);
+
+ retval = dsbr100_setfreq(radio, radio->curfreq);
+
+ if (retval == -1)
+ printk(KERN_WARNING KBUILD_MODNAME ": Set frequency failed\n");
+
unlock_kernel();
return 0;
}
@@ -485,13 +490,20 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
{
struct dsbr100_device *radio;
- if (!(radio = kmalloc(sizeof(struct dsbr100_device), GFP_KERNEL)))
+ radio = kmalloc(sizeof(struct dsbr100_device), GFP_KERNEL);
+
+ if (!radio)
return -ENOMEM;
- if (!(radio->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL))) {
+
+ radio->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL);
+
+ if (!(radio->transfer_buffer)) {
kfree(radio);
return -ENOMEM;
}
- if (!(radio->videodev = video_device_alloc())) {
+ radio->videodev = video_device_alloc();
+
+ if (!(radio->videodev)) {
kfree(radio->transfer_buffer);
kfree(radio);
return -ENOMEM;
@@ -501,7 +513,7 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
radio->removed = 0;
radio->users = 0;
radio->usbdev = interface_to_usbdev(intf);
- radio->curfreq = FREQ_MIN*FREQ_MUL;
+ radio->curfreq = FREQ_MIN * FREQ_MUL;
video_set_drvdata(radio->videodev, radio);
if (video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr) < 0) {
dev_warn(&intf->dev, "Could not register video device\n");
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index a33717c4800..256cbeffdcb 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -469,16 +469,21 @@ static int usb_amradio_open(struct inode *inode, struct file *file)
{
struct amradio_device *radio = video_get_drvdata(video_devdata(file));
+ lock_kernel();
+
radio->users = 1;
radio->muted = 1;
if (amradio_start(radio) < 0) {
warn("Radio did not start up properly");
radio->users = 0;
+ unlock_kernel();
return -EIO;
}
if (amradio_setfreq(radio, radio->curfreq) < 0)
warn("Set frequency failed");
+
+ unlock_kernel();
return 0;
}
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index 218754b4906..e09b0069323 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -866,7 +866,7 @@ static int __init ar_init(void)
}
printk("video%d: Found M64278 VGA (IRQ %d, Freq %dMHz).\n",
- ar->vdev->minor, M32R_IRQ_INT3, freq);
+ ar->vdev->num, M32R_IRQ_INT3, freq);
return 0;
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 5858bf5ff41..9ec4cec2e52 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -4246,7 +4246,7 @@ static int __devinit bttv_register_video(struct bttv *btv)
video_nr[btv->c.nr]) < 0)
goto err;
printk(KERN_INFO "bttv%d: registered device video%d\n",
- btv->c.nr,btv->video_dev->minor & 0x1f);
+ btv->c.nr, btv->video_dev->num);
if (device_create_file(&btv->video_dev->dev,
&dev_attr_card)<0) {
printk(KERN_ERR "bttv%d: device_create_file 'card' "
@@ -4263,7 +4263,7 @@ static int __devinit bttv_register_video(struct bttv *btv)
vbi_nr[btv->c.nr]) < 0)
goto err;
printk(KERN_INFO "bttv%d: registered device vbi%d\n",
- btv->c.nr,btv->vbi_dev->minor & 0x1f);
+ btv->c.nr, btv->vbi_dev->num);
if (!btv->has_radio)
return 0;
@@ -4275,7 +4275,7 @@ static int __devinit bttv_register_video(struct bttv *btv)
radio_nr[btv->c.nr]) < 0)
goto err;
printk(KERN_INFO "bttv%d: registered device radio%d\n",
- btv->c.nr,btv->radio_dev->minor & 0x1f);
+ btv->c.nr, btv->radio_dev->num);
/* all done */
return 0;
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index 17aa0adb346..0f930d35146 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -815,7 +815,7 @@ static int init_cqcam(struct parport *port)
}
printk(KERN_INFO "video%d: Colour QuickCam found on %s\n",
- qcam->vdev.minor, qcam->pport->name);
+ qcam->vdev.num, qcam->pport->name);
qcams[num_cams++] = qcam;
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index fc9497bdd32..a8c068e1de1 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -2059,10 +2059,10 @@ static void cafe_dfs_cam_setup(struct cafe_camera *cam)
if (!cafe_dfs_root)
return;
- sprintf(fname, "regs-%d", cam->v4ldev.minor);
+ sprintf(fname, "regs-%d", cam->v4ldev.num);
cam->dfs_regs = debugfs_create_file(fname, 0444, cafe_dfs_root,
cam, &cafe_dfs_reg_ops);
- sprintf(fname, "cam-%d", cam->v4ldev.minor);
+ sprintf(fname, "cam-%d", cam->v4ldev.num);
cam->dfs_cam_regs = debugfs_create_file(fname, 0444, cafe_dfs_root,
cam, &cafe_dfs_cam_ops);
}
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 1798b779a25..16c094f7785 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -1347,7 +1347,7 @@ static void create_proc_cpia_cam(struct cam_data *cam)
if (!cpia_proc_root || !cam)
return;
- snprintf(name, sizeof(name), "video%d", cam->vdev.minor);
+ snprintf(name, sizeof(name), "video%d", cam->vdev.num);
ent = create_proc_entry(name, S_IFREG|S_IRUGO|S_IWUSR, cpia_proc_root);
if (!ent)
@@ -1372,7 +1372,7 @@ static void destroy_proc_cpia_cam(struct cam_data *cam)
if (!cam || !cam->proc_entry)
return;
- snprintf(name, sizeof(name), "video%d", cam->vdev.minor);
+ snprintf(name, sizeof(name), "video%d", cam->vdev.num);
remove_proc_entry(name, cpia_proc_root);
cam->proc_entry = NULL;
}
@@ -4005,7 +4005,7 @@ void cpia_unregister_camera(struct cam_data *cam)
}
#ifdef CONFIG_PROC_FS
- DBG("destroying /proc/cpia/video%d\n", cam->vdev.minor);
+ DBG("destroying /proc/cpia/video%d\n", cam->vdev.num);
destroy_proc_cpia_cam(cam);
#endif
if (!cam->open_count) {
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 897e8d1a5c3..1c6bd633f19 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -1973,7 +1973,7 @@ void cpia2_unregister_camera(struct camera_data *cam)
} else {
LOG("/dev/video%d removed while open, "
"deferring video_unregister_device\n",
- cam->vdev->minor);
+ cam->vdev->num);
}
}
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 085121c2b47..7a1a7830a6b 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -613,6 +613,7 @@ static int __devinit cx18_probe(struct pci_dev *dev,
const struct pci_device_id *pci_id)
{
int retval = 0;
+ int i;
int vbi_buf_size;
u32 devtype;
struct cx18 *cx;
@@ -698,7 +699,8 @@ static int __devinit cx18_probe(struct pci_dev *dev,
/* active i2c */
CX18_DEBUG_INFO("activating i2c...\n");
- if (init_cx18_i2c(cx)) {
+ retval = init_cx18_i2c(cx);
+ if (retval) {
CX18_ERR("Could not initialize i2c\n");
goto free_map;
}
@@ -836,8 +838,11 @@ err:
CX18_ERR("Error %d on initialization\n", retval);
cx18_log_statistics(cx);
- kfree(cx18_cards[cx18_cards_active]);
- cx18_cards[cx18_cards_active] = NULL;
+ i = cx->num;
+ spin_lock(&cx18_cards_lock);
+ kfree(cx18_cards[i]);
+ cx18_cards[i] = NULL;
+ spin_unlock(&cx18_cards_lock);
return retval;
}
diff --git a/drivers/media/video/cx18/cx18-io.h b/drivers/media/video/cx18/cx18-io.h
index 197d4fbd9f9..287a5e8bf67 100644
--- a/drivers/media/video/cx18/cx18-io.h
+++ b/drivers/media/video/cx18/cx18-io.h
@@ -39,7 +39,7 @@ static inline void cx18_io_delay(struct cx18 *cx)
/* Statistics gathering */
static inline
-void cx18_log_write_retries(struct cx18 *cx, int i, const void *addr)
+void cx18_log_write_retries(struct cx18 *cx, int i, const void __iomem *addr)
{
if (i > CX18_MAX_MMIO_RETRIES)
i = CX18_MAX_MMIO_RETRIES;
@@ -48,7 +48,7 @@ void cx18_log_write_retries(struct cx18 *cx, int i, const void *addr)
}
static inline
-void cx18_log_read_retries(struct cx18 *cx, int i, const void *addr)
+void cx18_log_read_retries(struct cx18 *cx, int i, const void __iomem *addr)
{
if (i > CX18_MAX_MMIO_RETRIES)
i = CX18_MAX_MMIO_RETRIES;
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index 0c8e7542cf6..e5ff7705b7a 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -200,16 +200,18 @@ static int cx18_prep_dev(struct cx18 *cx, int type)
/* Initialize v4l2 variables and register v4l2 devices */
int cx18_streams_setup(struct cx18 *cx)
{
- int type;
+ int type, ret;
/* Setup V4L2 Devices */
for (type = 0; type < CX18_MAX_STREAMS; type++) {
/* Prepare device */
- if (cx18_prep_dev(cx, type))
+ ret = cx18_prep_dev(cx, type);
+ if (ret < 0)
break;
/* Allocate Stream */
- if (cx18_stream_alloc(&cx->streams[type]))
+ ret = cx18_stream_alloc(&cx->streams[type]);
+ if (ret < 0)
break;
}
if (type == CX18_MAX_STREAMS)
@@ -217,14 +219,14 @@ int cx18_streams_setup(struct cx18 *cx)
/* One or more streams could not be initialized. Clean 'em all up. */
cx18_streams_cleanup(cx, 0);
- return -ENOMEM;
+ return ret;
}
static int cx18_reg_dev(struct cx18 *cx, int type)
{
struct cx18_stream *s = &cx->streams[type];
int vfl_type = cx18_stream_info[type].vfl_type;
- int num;
+ int num, ret;
/* TODO: Shouldn't this be a VFL_TYPE_TRANSPORT or something?
* We need a VFL_TYPE_TS defined.
@@ -233,9 +235,10 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
/* just return if no DVB is supported */
if ((cx->card->hw_all & CX18_HW_DVB) == 0)
return 0;
- if (cx18_dvb_register(s) < 0) {
+ ret = cx18_dvb_register(s);
+ if (ret < 0) {
CX18_ERR("DVB failed to register\n");
- return -EINVAL;
+ return ret;
}
}
@@ -252,12 +255,13 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
}
/* Register device. First try the desired minor, then any free one. */
- if (video_register_device(s->v4l2dev, vfl_type, num)) {
+ ret = video_register_device(s->v4l2dev, vfl_type, num);
+ if (ret < 0) {
CX18_ERR("Couldn't register v4l2 device for %s kernel number %d\n",
s->name, num);
video_device_release(s->v4l2dev);
s->v4l2dev = NULL;
- return -ENOMEM;
+ return ret;
}
num = s->v4l2dev->num;
@@ -290,18 +294,22 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
int cx18_streams_register(struct cx18 *cx)
{
int type;
- int err = 0;
+ int err;
+ int ret = 0;
/* Register V4L2 devices */
- for (type = 0; type < CX18_MAX_STREAMS; type++)
- err |= cx18_reg_dev(cx, type);
+ for (type = 0; type < CX18_MAX_STREAMS; type++) {
+ err = cx18_reg_dev(cx, type);
+ if (err && ret == 0)
+ ret = err;
+ }
- if (err == 0)
+ if (ret == 0)
return 0;
/* One or more streams could not be initialized. Clean 'em all up. */
cx18_streams_cleanup(cx, 1);
- return -ENOMEM;
+ return ret;
}
/* Unregister v4l2 devices */
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 395c11fa47c..00831f3ef8f 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -1815,7 +1815,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
cx23885_mc417_init(dev);
printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
- dev->name, dev->v4l_device->minor & 0x1f);
+ dev->name, dev->v4l_device->num);
return 0;
}
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index ab3110d6046..c742a10be5c 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -1543,7 +1543,7 @@ int cx23885_video_register(struct cx23885_dev *dev)
goto fail_unreg;
}
printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n",
- dev->name, dev->video_dev->minor & 0x1f);
+ dev->name, dev->video_dev->num);
/* initial device configuration */
mutex_lock(&dev->lock);
cx23885_set_tvnorm(dev, dev->tvnorm);
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index e7136975430..078be631955 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -1285,7 +1285,7 @@ static int blackbird_register_video(struct cx8802_dev *dev)
return err;
}
printk(KERN_INFO "%s/2: registered device video%d [mpeg]\n",
- dev->core->name,dev->mpeg_dev->minor & 0x1f);
+ dev->core->name, dev->mpeg_dev->num);
return 0;
}
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index fbc224f46e0..5bcbb4cc7c2 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -3044,8 +3044,8 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
memcpy(&core->board, &cx88_boards[core->boardnr], sizeof(core->board));
- if (!core->board.num_frontends)
- core->board.num_frontends=1;
+ if (!core->board.num_frontends && (core->board.mpeg & CX88_MPEG_DVB))
+ core->board.num_frontends = 1;
info_printk(core, "subsystem: %04x:%04x, board: %s [card=%d,%s], frontend(s): %d\n",
pci->subsystem_vendor, pci->subsystem_device, core->board.name,
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index 6968ab0181a..cf6c30d4e54 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -789,7 +789,7 @@ static int dvb_register(struct cx8802_dev *dev)
if (fe0->dvb.frontend)
fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
if (attach_xc3028(0x61, dev) < 0)
- return -EINVAL;
+ goto frontend_detach;
break;
case CX88_BOARD_PCHDTV_HD3000:
fe0->dvb.frontend = dvb_attach(or51132_attach, &pchdtv_hd3000,
@@ -1058,7 +1058,6 @@ static int dvb_register(struct cx8802_dev *dev)
goto frontend_detach;
core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
-
}
}
break;
@@ -1110,10 +1109,7 @@ static int dvb_register(struct cx8802_dev *dev)
&dev->pci->dev, adapter_nr, mfe_shared);
frontend_detach:
- if (fe0->dvb.frontend) {
- dvb_frontend_detach(fe0->dvb.frontend);
- fe0->dvb.frontend = NULL;
- }
+ videobuf_dvb_dealloc_frontends(&dev->frontends);
return -EINVAL;
}
@@ -1246,8 +1242,11 @@ fail_core:
static int cx8802_dvb_remove(struct cx8802_driver *drv)
{
+ struct cx88_core *core = drv->core;
struct cx8802_dev *dev = drv->core->dvbdev;
+ dprintk( 1, "%s\n", __func__);
+
videobuf_dvb_unregister_bus(&dev->frontends);
vp3054_i2c_remove(dev);
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 01de2300709..1ab691d2069 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -116,8 +116,10 @@ static int detach_inform(struct i2c_client *client)
void cx88_call_i2c_clients(struct cx88_core *core, unsigned int cmd, void *arg)
{
+#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
struct videobuf_dvb_frontends *f = &core->dvbdev->frontends;
struct videobuf_dvb_frontend *fe = NULL;
+#endif
if (0 != core->i2c_rc)
return;
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 6df5cf31418..a1c435b4b1c 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -768,8 +768,11 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
{
struct cx8802_dev *dev;
struct cx88_core *core;
+ int err;
+#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
struct videobuf_dvb_frontend *demod;
- int err,i;
+ int i;
+#endif
/* general setup */
core = cx88_core_get(pci_dev);
@@ -782,11 +785,6 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
if (!core->board.mpeg)
goto fail_core;
- if (!core->board.num_frontends) {
- printk(KERN_ERR "%s() .num_frontends should be non-zero, err = %d\n", __func__, err);
- goto fail_core;
- }
-
err = -ENOMEM;
dev = kzalloc(sizeof(*dev),GFP_KERNEL);
if (NULL == dev)
@@ -801,10 +799,12 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
INIT_LIST_HEAD(&dev->drvlist);
list_add_tail(&dev->devlist,&cx8802_devlist);
+#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
mutex_init(&dev->frontends.lock);
INIT_LIST_HEAD(&dev->frontends.felist);
- printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__, core->board.num_frontends);
+ if (core->board.num_frontends)
+ printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__, core->board.num_frontends);
for (i = 1; i <= core->board.num_frontends; i++) {
demod = videobuf_dvb_alloc_frontend(&dev->frontends, i);
@@ -814,6 +814,7 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
goto fail_free;
}
}
+#endif
/* Maintain a reference so cx88-video can query the 8802 device. */
core->dvbdev = dev;
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 3904b73f52e..61265fd04d5 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -1911,7 +1911,7 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
goto fail_unreg;
}
printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n",
- core->name,dev->video_dev->minor & 0x1f);
+ core->name, dev->video_dev->num);
dev->vbi_dev = cx88_vdev_init(core,dev->pci,&cx8800_vbi_template,"vbi");
err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
@@ -1922,7 +1922,7 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
goto fail_unreg;
}
printk(KERN_INFO "%s/0: registered device vbi%d\n",
- core->name,dev->vbi_dev->minor & 0x1f);
+ core->name, dev->vbi_dev->num);
if (core->board.radio.type == CX88_RADIO) {
dev->radio_dev = cx88_vdev_init(core,dev->pci,
@@ -1935,7 +1935,7 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
goto fail_unreg;
}
printk(KERN_INFO "%s/0: registered device radio%d\n",
- core->name,dev->radio_dev->minor & 0x1f);
+ core->name, dev->radio_dev->num);
}
/* everything worked */
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index c53649e5315..a1ab2ef4557 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -2042,7 +2042,7 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
goto fail_unreg;
}
em28xx_info("Registered radio device as /dev/radio%d\n",
- dev->radio_dev->minor & 0x1f);
+ dev->radio_dev->num);
}
/* init video dma queues */
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index 7a85c41b0ee..9d0ef96c23f 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -588,7 +588,7 @@ static int et61x251_stream_interrupt(struct et61x251_device* cam)
cam->state |= DEV_MISCONFIGURED;
DBG(1, "URB timeout reached. The camera is misconfigured. To "
"use it, close and open /dev/video%d again.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
return -EIO;
}
@@ -1195,7 +1195,7 @@ static void et61x251_release_resources(struct kref *kref)
cam = container_of(kref, struct et61x251_device, kref);
- DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor);
+ DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num);
video_set_drvdata(cam->v4ldev, NULL);
video_unregister_device(cam->v4ldev);
usb_put_dev(cam->usbdev);
@@ -1237,7 +1237,7 @@ static int et61x251_open(struct inode* inode, struct file* filp)
if (cam->users) {
DBG(2, "Device /dev/video%d is already in use",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
DBG(3, "Simultaneous opens are not supported");
if ((filp->f_flags & O_NONBLOCK) ||
(filp->f_flags & O_NDELAY)) {
@@ -1280,7 +1280,7 @@ static int et61x251_open(struct inode* inode, struct file* filp)
cam->frame_count = 0;
et61x251_empty_framequeues(cam);
- DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor);
+ DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num);
out:
mutex_unlock(&cam->open_mutex);
@@ -1304,7 +1304,7 @@ static int et61x251_release(struct inode* inode, struct file* filp)
cam->users--;
wake_up_interruptible_nr(&cam->wait_open, 1);
- DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor);
+ DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num);
kref_put(&cam->kref, et61x251_release_resources);
@@ -1845,7 +1845,7 @@ et61x251_vidioc_s_crop(struct et61x251_device* cam, void __user * arg)
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
"use the camera, close and open /dev/video%d again.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
return -EIO;
}
@@ -1858,7 +1858,7 @@ et61x251_vidioc_s_crop(struct et61x251_device* cam, void __user * arg)
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
"use the camera, close and open /dev/video%d again.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
return -ENOMEM;
}
@@ -2068,7 +2068,7 @@ et61x251_vidioc_try_s_fmt(struct et61x251_device* cam, unsigned int cmd,
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
"use the camera, close and open /dev/video%d again.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
return -EIO;
}
@@ -2080,7 +2080,7 @@ et61x251_vidioc_try_s_fmt(struct et61x251_device* cam, unsigned int cmd,
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
"use the camera, close and open /dev/video%d again.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
return -ENOMEM;
}
@@ -2128,7 +2128,7 @@ et61x251_vidioc_s_jpegcomp(struct et61x251_device* cam, void __user * arg)
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
"problems. To use the camera, close and open "
- "/dev/video%d again.", cam->v4ldev->minor);
+ "/dev/video%d again.", cam->v4ldev->num);
return -EIO;
}
@@ -2605,7 +2605,7 @@ et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
goto fail;
}
- DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->minor);
+ DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num);
cam->module_param.force_munmap = force_munmap[dev_nr];
cam->module_param.frame_timeout = frame_timeout[dev_nr];
@@ -2658,7 +2658,7 @@ static void et61x251_usb_disconnect(struct usb_interface* intf)
if (cam->users) {
DBG(2, "Device /dev/video%d is open! Deregistration and "
"memory deallocation are deferred.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
cam->state |= DEV_MISCONFIGURED;
et61x251_stop_transfer(cam);
cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index aeaa13f6cb3..d36485023b6 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -1211,6 +1211,10 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
ivtv_call_i2c_clients(itv, VIDIOC_INT_S_STD_OUTPUT, &itv->std);
+ /* Turn off the output signal. The mpeg decoder is not yet
+ active so without this you would get a green image until the
+ mpeg decoder becomes active. */
+ ivtv_saa7127(itv, VIDIOC_STREAMOFF, NULL);
}
/* clear interrupt mask, effectively disabling interrupts */
@@ -1330,6 +1334,10 @@ int ivtv_init_on_first_open(struct ivtv *itv)
ivtv_s_frequency(NULL, &fh, &vf);
if (itv->card->v4l2_capabilities & V4L2_CAP_VIDEO_OUTPUT) {
+ /* Turn on the TV-out: ivtv_init_mpeg_decoder() initializes
+ the mpeg decoder so now the saa7127 receives a proper
+ signal. */
+ ivtv_saa7127(itv, VIDIOC_STREAMON, NULL);
ivtv_init_mpeg_decoder(itv);
}
ivtv_s_std(NULL, &fh, &itv->tuner_std);
@@ -1366,6 +1374,10 @@ static void ivtv_remove(struct pci_dev *pci_dev)
/* Stop all decoding */
IVTV_DEBUG_INFO("Stopping decoding\n");
+
+ /* Turn off the TV-out */
+ if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
+ ivtv_saa7127(itv, VIDIOC_STREAMOFF, NULL);
if (atomic_read(&itv->decoding) > 0) {
int type;
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index 24700c211d5..41dbbe9621a 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -726,6 +726,7 @@ int ivtv_saa7127(struct ivtv *itv, unsigned int cmd, void *arg)
{
return ivtv_call_i2c_client(itv, IVTV_SAA7127_I2C_ADDR, cmd, arg);
}
+EXPORT_SYMBOL(ivtv_saa7127);
int ivtv_saa717x(struct ivtv *itv, unsigned int cmd, void *arg)
{
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 208fb54842f..4bae38d21ef 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -1756,12 +1756,12 @@ static int ivtv_default(struct file *file, void *fh, int cmd, void *arg)
return 0;
}
-static int ivtv_serialized_ioctl(struct ivtv *itv, struct inode *inode, struct file *filp,
+static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct video_device *vfd = video_devdata(filp);
struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
- int ret;
+ long ret;
/* Filter dvb ioctls that cannot be handled by the v4l ioctl framework */
switch (cmd) {
@@ -1830,20 +1830,19 @@ static int ivtv_serialized_ioctl(struct ivtv *itv, struct inode *inode, struct f
if (ivtv_debug & IVTV_DBGFLG_IOCTL)
vfd->debug = V4L2_DEBUG_IOCTL | V4L2_DEBUG_IOCTL_ARG;
- ret = video_ioctl2(inode, filp, cmd, arg);
+ ret = __video_ioctl2(filp, cmd, arg);
vfd->debug = 0;
return ret;
}
-int ivtv_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
struct ivtv *itv = id->itv;
- int res;
+ long res;
mutex_lock(&itv->serialize_lock);
- res = ivtv_serialized_ioctl(itv, inode, filp, cmd, arg);
+ res = ivtv_serialized_ioctl(itv, filp, cmd, arg);
mutex_unlock(&itv->serialize_lock);
return res;
}
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.h b/drivers/media/video/ivtv/ivtv-ioctl.h
index 70188588b4f..58f003412af 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.h
+++ b/drivers/media/video/ivtv/ivtv-ioctl.h
@@ -30,7 +30,6 @@ void ivtv_set_funcs(struct video_device *vdev);
int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std);
int ivtv_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
int ivtv_s_input(struct file *file, void *fh, unsigned int inp);
-int ivtv_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg);
+long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
#endif
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 5bbf31e3930..9b7aa79eb26 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -48,7 +48,7 @@ static const struct file_operations ivtv_v4l2_enc_fops = {
.read = ivtv_v4l2_read,
.write = ivtv_v4l2_write,
.open = ivtv_v4l2_open,
- .ioctl = ivtv_v4l2_ioctl,
+ .unlocked_ioctl = ivtv_v4l2_ioctl,
.compat_ioctl = v4l_compat_ioctl32,
.release = ivtv_v4l2_close,
.poll = ivtv_v4l2_enc_poll,
@@ -59,7 +59,7 @@ static const struct file_operations ivtv_v4l2_dec_fops = {
.read = ivtv_v4l2_read,
.write = ivtv_v4l2_write,
.open = ivtv_v4l2_open,
- .ioctl = ivtv_v4l2_ioctl,
+ .unlocked_ioctl = ivtv_v4l2_ioctl,
.compat_ioctl = v4l_compat_ioctl32,
.release = ivtv_v4l2_close,
.poll = ivtv_v4l2_dec_poll,
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index 8a4a150b12f..921e281876f 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -48,6 +48,7 @@
#endif
#include "ivtv-driver.h"
+#include "ivtv-i2c.h"
#include "ivtv-udma.h"
#include "ivtv-mailbox.h"
@@ -894,11 +895,16 @@ static int ivtvfb_blank(int blank_mode, struct fb_info *info)
switch (blank_mode) {
case FB_BLANK_UNBLANK:
ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 1);
+ ivtv_saa7127(itv, VIDIOC_STREAMON, NULL);
break;
case FB_BLANK_NORMAL:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_VSYNC_SUSPEND:
+ ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0);
+ ivtv_saa7127(itv, VIDIOC_STREAMON, NULL);
+ break;
case FB_BLANK_POWERDOWN:
+ ivtv_saa7127(itv, VIDIOC_STREAMOFF, NULL);
ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0);
break;
}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-encoder.c b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
index a1252d673b4..273d2a1aa22 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
@@ -402,6 +402,10 @@ static int pvr2_encoder_prep_config(struct pvr2_hdw *hdw)
ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4, 0,3,0,0);
ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4,15,0,0,0);
+ /* prevent the PTSs from slowly drifting away in the generated
+ MPEG stream */
+ ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC, 2, 4, 1);
+
return ret;
}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 94265bd3d92..5b81ba46964 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -60,7 +60,6 @@ static struct pvr2_hdw *unit_pointers[PVR_NUM] = {[ 0 ... PVR_NUM-1 ] = NULL};
static DEFINE_MUTEX(pvr2_unit_mtx);
static int ctlchg;
-static int initusbreset = 1;
static int procreload;
static int tuner[PVR_NUM] = { [0 ... PVR_NUM-1] = -1 };
static int tolerance[PVR_NUM] = { [0 ... PVR_NUM-1] = 0 };
@@ -71,8 +70,6 @@ module_param(ctlchg, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ctlchg, "0=optimize ctl change 1=always accept new ctl value");
module_param(init_pause_msec, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(init_pause_msec, "hardware initialization settling delay");
-module_param(initusbreset, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(initusbreset, "Do USB reset device on probe");
module_param(procreload, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(procreload,
"Attempt init failure recovery with firmware reload");
@@ -1967,9 +1964,6 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
}
hdw->fw1_state = FW1_STATE_OK;
- if (initusbreset) {
- pvr2_hdw_device_reset(hdw);
- }
if (!pvr2_hdw_dev_ok(hdw)) return;
for (idx = 0; idx < hdw->hdw_desc->client_modules.cnt; idx++) {
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index f048d80b77e..97ed9595799 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -168,7 +168,7 @@ static const char *get_v4l_name(int v4l_type)
* This is part of Video 4 Linux API. The procedure handles ioctl() calls.
*
*/
-static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
+static int __pvr2_v4l2_do_ioctl(struct file *file,
unsigned int cmd, void *arg)
{
struct pvr2_v4l2_fh *fh = file->private_data;
@@ -863,8 +863,8 @@ static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
#endif
default :
- ret = v4l_compat_translate_ioctl(inode,file,cmd,
- arg,pvr2_v4l2_do_ioctl);
+ ret = v4l_compat_translate_ioctl(file, cmd,
+ arg, __pvr2_v4l2_do_ioctl);
}
pvr2_hdw_commit_ctl(hdw);
@@ -890,10 +890,15 @@ static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
return ret;
}
+static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, void *arg)
+{
+ return __pvr2_v4l2_do_ioctl(file, cmd, arg);
+}
static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
{
- int minor_id = dip->devbase.minor;
+ int num = dip->devbase.num;
struct pvr2_hdw *hdw = dip->v4lp->channel.mc_head->hdw;
enum pvr2_config cfg = dip->config;
int v4l_type = dip->v4l_type;
@@ -909,7 +914,7 @@ static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
video_unregister_device(&dip->devbase);
printk(KERN_INFO "pvrusb2: unregistered device %s%u [%s]\n",
- get_v4l_name(v4l_type),minor_id & 0x1f,
+ get_v4l_name(v4l_type), num,
pvr2_config_get_name(cfg));
}
@@ -1310,7 +1315,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
}
printk(KERN_INFO "pvrusb2: registered device %s%u [%s]\n",
- get_v4l_name(dip->v4l_type),dip->devbase.minor & 0x1f,
+ get_v4l_name(dip->v4l_type), dip->devbase.num,
pvr2_config_get_name(dip->config));
pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw,
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index ab28389b4cd..f3897a3fdb7 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -1795,7 +1795,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
goto err;
}
else {
- PWC_INFO("Registered as /dev/video%d.\n", pdev->vdev->minor & 0x3F);
+ PWC_INFO("Registered as /dev/video%d.\n", pdev->vdev->num);
}
/* occupy slot */
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index b686bfabbde..24918445294 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -996,7 +996,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
goto fail4;
}
printk(KERN_INFO "%s: registered device video%d [v4l2]\n",
- dev->name,dev->video_dev->minor & 0x1f);
+ dev->name, dev->video_dev->num);
dev->vbi_dev = vdev_init(dev, &saa7134_video_template, "vbi");
@@ -1005,7 +1005,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
if (err < 0)
goto fail4;
printk(KERN_INFO "%s: registered device vbi%d\n",
- dev->name,dev->vbi_dev->minor & 0x1f);
+ dev->name, dev->vbi_dev->num);
if (card_has_radio(dev)) {
dev->radio_dev = vdev_init(dev,&saa7134_radio_template,"radio");
@@ -1014,7 +1014,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
if (err < 0)
goto fail4;
printk(KERN_INFO "%s: registered device radio%d\n",
- dev->name,dev->radio_dev->minor & 0x1f);
+ dev->name, dev->radio_dev->num);
}
/* everything worked */
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 9a8766a78a0..7f40511bcc0 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -534,7 +534,7 @@ static int empress_init(struct saa7134_dev *dev)
return err;
}
printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
- dev->name,dev->empress_dev->minor & 0x1f);
+ dev->name, dev->empress_dev->num);
videobuf_queue_sg_init(&dev->empress_tsq, &saa7134_ts_qops,
&dev->pci->dev, &dev->slock,
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c
index ae3949180c4..044a2e94c34 100644
--- a/drivers/media/video/se401.c
+++ b/drivers/media/video/se401.c
@@ -1412,7 +1412,7 @@ static int se401_probe(struct usb_interface *intf,
return -EIO;
}
dev_info(&intf->dev, "registered new video device: video%d\n",
- se401->vdev.minor);
+ se401->vdev.num);
usb_set_intfdata (intf, se401);
return 0;
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 20e30bd9364..fcd2b62f92c 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -1008,7 +1008,7 @@ static int sn9c102_stream_interrupt(struct sn9c102_device* cam)
cam->state |= DEV_MISCONFIGURED;
DBG(1, "URB timeout reached. The camera is misconfigured. "
"To use it, close and open /dev/video%d again.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
return -EIO;
}
@@ -1734,7 +1734,7 @@ static void sn9c102_release_resources(struct kref *kref)
cam = container_of(kref, struct sn9c102_device, kref);
- DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor);
+ DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num);
video_set_drvdata(cam->v4ldev, NULL);
video_unregister_device(cam->v4ldev);
usb_put_dev(cam->usbdev);
@@ -1792,7 +1792,7 @@ static int sn9c102_open(struct inode* inode, struct file* filp)
if (cam->users) {
DBG(2, "Device /dev/video%d is already in use",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
DBG(3, "Simultaneous opens are not supported");
/*
open() must follow the open flags and should block
@@ -1845,7 +1845,7 @@ static int sn9c102_open(struct inode* inode, struct file* filp)
cam->frame_count = 0;
sn9c102_empty_framequeues(cam);
- DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor);
+ DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num);
out:
mutex_unlock(&cam->open_mutex);
@@ -1870,7 +1870,7 @@ static int sn9c102_release(struct inode* inode, struct file* filp)
cam->users--;
wake_up_interruptible_nr(&cam->wait_open, 1);
- DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor);
+ DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num);
kref_put(&cam->kref, sn9c102_release_resources);
@@ -2432,7 +2432,7 @@ sn9c102_vidioc_s_crop(struct sn9c102_device* cam, void __user * arg)
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
"use the camera, close and open /dev/video%d again.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
return -EIO;
}
@@ -2445,7 +2445,7 @@ sn9c102_vidioc_s_crop(struct sn9c102_device* cam, void __user * arg)
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
"use the camera, close and open /dev/video%d again.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
return -ENOMEM;
}
@@ -2689,7 +2689,7 @@ sn9c102_vidioc_try_s_fmt(struct sn9c102_device* cam, unsigned int cmd,
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
"use the camera, close and open /dev/video%d again.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
return -EIO;
}
@@ -2701,7 +2701,7 @@ sn9c102_vidioc_try_s_fmt(struct sn9c102_device* cam, unsigned int cmd,
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
"use the camera, close and open /dev/video%d again.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
return -ENOMEM;
}
@@ -2748,7 +2748,7 @@ sn9c102_vidioc_s_jpegcomp(struct sn9c102_device* cam, void __user * arg)
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
"problems. To use the camera, close and open "
- "/dev/video%d again.", cam->v4ldev->minor);
+ "/dev/video%d again.", cam->v4ldev->num);
return -EIO;
}
@@ -3348,7 +3348,7 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
goto fail;
}
- DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->minor);
+ DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num);
video_set_drvdata(cam->v4ldev, cam);
cam->module_param.force_munmap = force_munmap[dev_nr];
@@ -3402,7 +3402,7 @@ static void sn9c102_usb_disconnect(struct usb_interface* intf)
if (cam->users) {
DBG(2, "Device /dev/video%d is open! Deregistration and "
"memory deallocation are deferred.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
cam->state |= DEV_MISCONFIGURED;
sn9c102_stop_transfer(cam);
cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index edaea496451..e9eb6d754d5 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -1331,7 +1331,7 @@ static int stk_register_video_device(struct stk_camera *dev)
STK_ERROR("v4l registration failed\n");
else
STK_INFO("Syntek USB2.0 Camera is now controlling video device"
- " /dev/video%d\n", dev->vdev.minor);
+ " /dev/video%d\n", dev->vdev.num);
return err;
}
@@ -1426,7 +1426,7 @@ static void stk_camera_disconnect(struct usb_interface *interface)
stk_remove_sysfs_files(&dev->vdev);
STK_INFO("Syntek USB2.0 Camera release resources "
- "video device /dev/video%d\n", dev->vdev.minor);
+ "video device /dev/video%d\n", dev->vdev.num);
video_unregister_device(&dev->vdev);
}
diff --git a/drivers/media/video/stv680.c b/drivers/media/video/stv680.c
index 9c549d93599..328c41b1517 100644
--- a/drivers/media/video/stv680.c
+++ b/drivers/media/video/stv680.c
@@ -1470,7 +1470,8 @@ static int stv680_probe (struct usb_interface *intf, const struct usb_device_id
retval = -EIO;
goto error_vdev;
}
- PDEBUG (0, "STV(i): registered new video device: video%d", stv680->vdev->minor);
+ PDEBUG(0, "STV(i): registered new video device: video%d",
+ stv680->vdev->num);
usb_set_intfdata (intf, stv680);
retval = stv680_create_sysfs_files(stv680->vdev);
diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
index 07cd87d16f6..7c575bb8184 100644
--- a/drivers/media/video/usbvideo/usbvideo.c
+++ b/drivers/media/video/usbvideo/usbvideo.c
@@ -1059,7 +1059,7 @@ int usbvideo_RegisterVideoDevice(struct uvd *uvd)
dev_info(&uvd->dev->dev, "%s on /dev/video%d: canvas=%s videosize=%s\n",
(uvd->handle != NULL) ? uvd->handle->drvName : "???",
- uvd->vdev.minor, tmp2, tmp1);
+ uvd->vdev.num, tmp2, tmp1);
usb_get_dev(uvd->dev);
return 0;
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 7a127d6bfde..8e2d58bec48 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -877,7 +877,8 @@ vicam_probe( struct usb_interface *intf, const struct usb_device_id *id)
return -EIO;
}
- printk(KERN_INFO "ViCam webcam driver now controlling video device %d\n",cam->vdev.minor);
+ printk(KERN_INFO "ViCam webcam driver now controlling video device %d\n",
+ cam->vdev.num);
usb_set_intfdata (intf, cam);
diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c
index 92427fdc145..9907b9aff2b 100644
--- a/drivers/media/video/usbvision/usbvision-i2c.c
+++ b/drivers/media/video/usbvision/usbvision-i2c.c
@@ -236,7 +236,7 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
sizeof(struct i2c_client));
sprintf(usbvision->i2c_adap.name + strlen(usbvision->i2c_adap.name),
- " #%d", usbvision->vdev->minor & 0x1f);
+ " #%d", usbvision->vdev->num);
PDEBUG(DBG_I2C,"Adaptername: %s", usbvision->i2c_adap.name);
usbvision->i2c_adap.dev.parent = &usbvision->dev->dev;
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index 77aeb39b275..d185b57fdcd 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -1440,7 +1440,7 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
// vbi Device:
if (usbvision->vbi) {
PDEBUG(DBG_PROBE, "unregister /dev/vbi%d [v4l2]",
- usbvision->vbi->minor & 0x1f);
+ usbvision->vbi->num);
if (usbvision->vbi->minor != -1) {
video_unregister_device(usbvision->vbi);
} else {
@@ -1452,7 +1452,7 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
// Radio Device:
if (usbvision->rdev) {
PDEBUG(DBG_PROBE, "unregister /dev/radio%d [v4l2]",
- usbvision->rdev->minor & 0x1f);
+ usbvision->rdev->num);
if (usbvision->rdev->minor != -1) {
video_unregister_device(usbvision->rdev);
} else {
@@ -1464,7 +1464,7 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
// Video Device:
if (usbvision->vdev) {
PDEBUG(DBG_PROBE, "unregister /dev/video%d [v4l2]",
- usbvision->vdev->minor & 0x1f);
+ usbvision->vdev->num);
if (usbvision->vdev->minor != -1) {
video_unregister_device(usbvision->vdev);
} else {
@@ -1490,7 +1490,7 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
goto err_exit;
}
printk(KERN_INFO "USBVision[%d]: registered USBVision Video device /dev/video%d [v4l2]\n",
- usbvision->nr,usbvision->vdev->minor & 0x1f);
+ usbvision->nr, usbvision->vdev->num);
// Radio Device:
if (usbvision_device_data[usbvision->DevModel].Radio) {
@@ -1507,7 +1507,7 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
goto err_exit;
}
printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device /dev/radio%d [v4l2]\n",
- usbvision->nr, usbvision->rdev->minor & 0x1f);
+ usbvision->nr, usbvision->rdev->num);
}
// vbi Device:
if (usbvision_device_data[usbvision->DevModel].vbi) {
@@ -1523,7 +1523,7 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
goto err_exit;
}
printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device /dev/vbi%d [v4l2] (Not Working Yet!)\n",
- usbvision->nr,usbvision->vbi->minor & 0x1f);
+ usbvision->nr, usbvision->vbi->num);
}
// all done
return 0;
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 78e4c4e09d8..758dfefaba8 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -464,7 +464,7 @@ static int uvc_v4l2_release(struct inode *inode, struct file *file)
return 0;
}
-static int uvc_v4l2_do_ioctl(struct inode *inode, struct file *file,
+static int __uvc_v4l2_do_ioctl(struct file *file,
unsigned int cmd, void *arg)
{
struct video_device *vdev = video_devdata(file);
@@ -978,8 +978,8 @@ static int uvc_v4l2_do_ioctl(struct inode *inode, struct file *file,
return uvc_xu_ctrl_query(video, arg, 1);
default:
- if ((ret = v4l_compat_translate_ioctl(inode, file, cmd, arg,
- uvc_v4l2_do_ioctl)) == -ENOIOCTLCMD)
+ if ((ret = v4l_compat_translate_ioctl(file, cmd, arg,
+ __uvc_v4l2_do_ioctl)) == -ENOIOCTLCMD)
uvc_trace(UVC_TRACE_IOCTL, "Unknown ioctl 0x%08x\n",
cmd);
return ret;
@@ -988,6 +988,12 @@ static int uvc_v4l2_do_ioctl(struct inode *inode, struct file *file,
return ret;
}
+static int uvc_v4l2_do_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, void *arg)
+{
+ return __uvc_v4l2_do_ioctl(file, cmd, arg);
+}
+
static int uvc_v4l2_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
diff --git a/drivers/media/video/v4l1-compat.c b/drivers/media/video/v4l1-compat.c
index 928cb403737..f13c0a9d684 100644
--- a/drivers/media/video/v4l1-compat.c
+++ b/drivers/media/video/v4l1-compat.c
@@ -57,8 +57,7 @@ MODULE_LICENSE("GPL");
*/
static int
-get_v4l_control(struct inode *inode,
- struct file *file,
+get_v4l_control(struct file *file,
int cid,
v4l2_kioctl drv)
{
@@ -67,12 +66,12 @@ get_v4l_control(struct inode *inode,
int err;
qctrl2.id = cid;
- err = drv(inode, file, VIDIOC_QUERYCTRL, &qctrl2);
+ err = drv(file, VIDIOC_QUERYCTRL, &qctrl2);
if (err < 0)
dprintk("VIDIOC_QUERYCTRL: %d\n", err);
if (err == 0 && !(qctrl2.flags & V4L2_CTRL_FLAG_DISABLED)) {
ctrl2.id = qctrl2.id;
- err = drv(inode, file, VIDIOC_G_CTRL, &ctrl2);
+ err = drv(file, VIDIOC_G_CTRL, &ctrl2);
if (err < 0) {
dprintk("VIDIOC_G_CTRL: %d\n", err);
return 0;
@@ -85,8 +84,7 @@ get_v4l_control(struct inode *inode,
}
static int
-set_v4l_control(struct inode *inode,
- struct file *file,
+set_v4l_control(struct file *file,
int cid,
int value,
v4l2_kioctl drv)
@@ -96,7 +94,7 @@ set_v4l_control(struct inode *inode,
int err;
qctrl2.id = cid;
- err = drv(inode, file, VIDIOC_QUERYCTRL, &qctrl2);
+ err = drv(file, VIDIOC_QUERYCTRL, &qctrl2);
if (err < 0)
dprintk("VIDIOC_QUERYCTRL: %d\n", err);
if (err == 0 &&
@@ -114,7 +112,7 @@ set_v4l_control(struct inode *inode,
+ 32767)
/ 65535;
ctrl2.value += qctrl2.minimum;
- err = drv(inode, file, VIDIOC_S_CTRL, &ctrl2);
+ err = drv(file, VIDIOC_S_CTRL, &ctrl2);
if (err < 0)
dprintk("VIDIOC_S_CTRL: %d\n", err);
}
@@ -222,7 +220,6 @@ static int poll_one(struct file *file, struct poll_wqueues *pwq)
}
static int count_inputs(
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -232,14 +229,13 @@ static int count_inputs(
for (i = 0;; i++) {
memset(&input2, 0, sizeof(input2));
input2.index = i;
- if (0 != drv(inode, file, VIDIOC_ENUMINPUT, &input2))
+ if (0 != drv(file, VIDIOC_ENUMINPUT, &input2))
break;
}
return i;
}
static int check_size(
- struct inode *inode,
struct file *file,
v4l2_kioctl drv,
int *maxw,
@@ -252,14 +248,14 @@ static int check_size(
memset(&fmt2, 0, sizeof(fmt2));
desc2.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- if (0 != drv(inode, file, VIDIOC_ENUM_FMT, &desc2))
+ if (0 != drv(file, VIDIOC_ENUM_FMT, &desc2))
goto done;
fmt2.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fmt2.fmt.pix.width = 10000;
fmt2.fmt.pix.height = 10000;
fmt2.fmt.pix.pixelformat = desc2.pixelformat;
- if (0 != drv(inode, file, VIDIOC_TRY_FMT, &fmt2))
+ if (0 != drv(file, VIDIOC_TRY_FMT, &fmt2))
goto done;
*maxw = fmt2.fmt.pix.width;
@@ -273,7 +269,6 @@ done:
static noinline int v4l1_compat_get_capabilities(
struct video_capability *cap,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -289,13 +284,13 @@ static noinline int v4l1_compat_get_capabilities(
memset(cap, 0, sizeof(*cap));
memset(&fbuf, 0, sizeof(fbuf));
- err = drv(inode, file, VIDIOC_QUERYCAP, cap2);
+ err = drv(file, VIDIOC_QUERYCAP, cap2);
if (err < 0) {
dprintk("VIDIOCGCAP / VIDIOC_QUERYCAP: %d\n", err);
goto done;
}
if (cap2->capabilities & V4L2_CAP_VIDEO_OVERLAY) {
- err = drv(inode, file, VIDIOC_G_FBUF, &fbuf);
+ err = drv(file, VIDIOC_G_FBUF, &fbuf);
if (err < 0) {
dprintk("VIDIOCGCAP / VIDIOC_G_FBUF: %d\n", err);
memset(&fbuf, 0, sizeof(fbuf));
@@ -317,8 +312,8 @@ static noinline int v4l1_compat_get_capabilities(
if (fbuf.capability & V4L2_FBUF_CAP_LIST_CLIPPING)
cap->type |= VID_TYPE_CLIPPING;
- cap->channels = count_inputs(inode, file, drv);
- check_size(inode, file, drv,
+ cap->channels = count_inputs(file, drv);
+ check_size(file, drv,
&cap->maxwidth, &cap->maxheight);
cap->audios = 0; /* FIXME */
cap->minwidth = 48; /* FIXME */
@@ -331,7 +326,6 @@ done:
static noinline int v4l1_compat_get_frame_buffer(
struct video_buffer *buffer,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -341,7 +335,7 @@ static noinline int v4l1_compat_get_frame_buffer(
memset(buffer, 0, sizeof(*buffer));
memset(&fbuf, 0, sizeof(fbuf));
- err = drv(inode, file, VIDIOC_G_FBUF, &fbuf);
+ err = drv(file, VIDIOC_G_FBUF, &fbuf);
if (err < 0) {
dprintk("VIDIOCGFBUF / VIDIOC_G_FBUF: %d\n", err);
goto done;
@@ -386,7 +380,6 @@ done:
static noinline int v4l1_compat_set_frame_buffer(
struct video_buffer *buffer,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -415,7 +408,7 @@ static noinline int v4l1_compat_set_frame_buffer(
break;
}
fbuf.fmt.bytesperline = buffer->bytesperline;
- err = drv(inode, file, VIDIOC_S_FBUF, &fbuf);
+ err = drv(file, VIDIOC_S_FBUF, &fbuf);
if (err < 0)
dprintk("VIDIOCSFBUF / VIDIOC_S_FBUF: %d\n", err);
return err;
@@ -423,7 +416,6 @@ static noinline int v4l1_compat_set_frame_buffer(
static noinline int v4l1_compat_get_win_cap_dimensions(
struct video_window *win,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -438,7 +430,7 @@ static noinline int v4l1_compat_get_win_cap_dimensions(
memset(win, 0, sizeof(*win));
fmt->type = V4L2_BUF_TYPE_VIDEO_OVERLAY;
- err = drv(inode, file, VIDIOC_G_FMT, fmt);
+ err = drv(file, VIDIOC_G_FMT, fmt);
if (err < 0)
dprintk("VIDIOCGWIN / VIDIOC_G_WIN: %d\n", err);
if (err == 0) {
@@ -453,7 +445,7 @@ static noinline int v4l1_compat_get_win_cap_dimensions(
}
fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- err = drv(inode, file, VIDIOC_G_FMT, fmt);
+ err = drv(file, VIDIOC_G_FMT, fmt);
if (err < 0) {
dprintk("VIDIOCGWIN / VIDIOC_G_FMT: %d\n", err);
goto done;
@@ -472,7 +464,6 @@ done:
static noinline int v4l1_compat_set_win_cap_dimensions(
struct video_window *win,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -485,8 +476,8 @@ static noinline int v4l1_compat_set_win_cap_dimensions(
return err;
}
fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- drv(inode, file, VIDIOC_STREAMOFF, &fmt->type);
- err1 = drv(inode, file, VIDIOC_G_FMT, fmt);
+ drv(file, VIDIOC_STREAMOFF, &fmt->type);
+ err1 = drv(file, VIDIOC_G_FMT, fmt);
if (err1 < 0)
dprintk("VIDIOCSWIN / VIDIOC_G_FMT: %d\n", err1);
if (err1 == 0) {
@@ -494,7 +485,7 @@ static noinline int v4l1_compat_set_win_cap_dimensions(
fmt->fmt.pix.height = win->height;
fmt->fmt.pix.field = V4L2_FIELD_ANY;
fmt->fmt.pix.bytesperline = 0;
- err = drv(inode, file, VIDIOC_S_FMT, fmt);
+ err = drv(file, VIDIOC_S_FMT, fmt);
if (err < 0)
dprintk("VIDIOCSWIN / VIDIOC_S_FMT #1: %d\n",
err);
@@ -511,7 +502,7 @@ static noinline int v4l1_compat_set_win_cap_dimensions(
fmt->fmt.win.chromakey = win->chromakey;
fmt->fmt.win.clips = (void __user *)win->clips;
fmt->fmt.win.clipcount = win->clipcount;
- err2 = drv(inode, file, VIDIOC_S_FMT, fmt);
+ err2 = drv(file, VIDIOC_S_FMT, fmt);
if (err2 < 0)
dprintk("VIDIOCSWIN / VIDIOC_S_FMT #2: %d\n", err2);
@@ -525,7 +516,6 @@ static noinline int v4l1_compat_set_win_cap_dimensions(
static noinline int v4l1_compat_turn_preview_on_off(
int *on,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -536,9 +526,9 @@ static noinline int v4l1_compat_turn_preview_on_off(
/* dirty hack time. But v4l1 has no STREAMOFF
* equivalent in the API, and this one at
* least comes close ... */
- drv(inode, file, VIDIOC_STREAMOFF, &captype);
+ drv(file, VIDIOC_STREAMOFF, &captype);
}
- err = drv(inode, file, VIDIOC_OVERLAY, on);
+ err = drv(file, VIDIOC_OVERLAY, on);
if (err < 0)
dprintk("VIDIOCCAPTURE / VIDIOC_PREVIEW: %d\n", err);
return err;
@@ -546,7 +536,6 @@ static noinline int v4l1_compat_turn_preview_on_off(
static noinline int v4l1_compat_get_input_info(
struct video_channel *chan,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -556,7 +545,7 @@ static noinline int v4l1_compat_get_input_info(
memset(&input2, 0, sizeof(input2));
input2.index = chan->channel;
- err = drv(inode, file, VIDIOC_ENUMINPUT, &input2);
+ err = drv(file, VIDIOC_ENUMINPUT, &input2);
if (err < 0) {
dprintk("VIDIOCGCHAN / VIDIOC_ENUMINPUT: "
"channel=%d err=%d\n", chan->channel, err);
@@ -578,7 +567,7 @@ static noinline int v4l1_compat_get_input_info(
break;
}
chan->norm = 0;
- err = drv(inode, file, VIDIOC_G_STD, &sid);
+ err = drv(file, VIDIOC_G_STD, &sid);
if (err < 0)
dprintk("VIDIOCGCHAN / VIDIOC_G_STD: %d\n", err);
if (err == 0) {
@@ -595,14 +584,13 @@ done:
static noinline int v4l1_compat_set_input(
struct video_channel *chan,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
int err;
v4l2_std_id sid = 0;
- err = drv(inode, file, VIDIOC_S_INPUT, &chan->channel);
+ err = drv(file, VIDIOC_S_INPUT, &chan->channel);
if (err < 0)
dprintk("VIDIOCSCHAN / VIDIOC_S_INPUT: %d\n", err);
switch (chan->norm) {
@@ -617,7 +605,7 @@ static noinline int v4l1_compat_set_input(
break;
}
if (0 != sid) {
- err = drv(inode, file, VIDIOC_S_STD, &sid);
+ err = drv(file, VIDIOC_S_STD, &sid);
if (err < 0)
dprintk("VIDIOCSCHAN / VIDIOC_S_STD: %d\n", err);
}
@@ -626,7 +614,6 @@ static noinline int v4l1_compat_set_input(
static noinline int v4l1_compat_get_picture(
struct video_picture *pict,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -639,19 +626,19 @@ static noinline int v4l1_compat_get_picture(
return err;
}
- pict->brightness = get_v4l_control(inode, file,
+ pict->brightness = get_v4l_control(file,
V4L2_CID_BRIGHTNESS, drv);
- pict->hue = get_v4l_control(inode, file,
+ pict->hue = get_v4l_control(file,
V4L2_CID_HUE, drv);
- pict->contrast = get_v4l_control(inode, file,
+ pict->contrast = get_v4l_control(file,
V4L2_CID_CONTRAST, drv);
- pict->colour = get_v4l_control(inode, file,
+ pict->colour = get_v4l_control(file,
V4L2_CID_SATURATION, drv);
- pict->whiteness = get_v4l_control(inode, file,
+ pict->whiteness = get_v4l_control(file,
V4L2_CID_WHITENESS, drv);
fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- err = drv(inode, file, VIDIOC_G_FMT, fmt);
+ err = drv(file, VIDIOC_G_FMT, fmt);
if (err < 0) {
dprintk("VIDIOCGPICT / VIDIOC_G_FMT: %d\n", err);
goto done;
@@ -669,7 +656,6 @@ done:
static noinline int v4l1_compat_set_picture(
struct video_picture *pict,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -685,15 +671,15 @@ static noinline int v4l1_compat_set_picture(
}
memset(&fbuf, 0, sizeof(fbuf));
- set_v4l_control(inode, file,
+ set_v4l_control(file,
V4L2_CID_BRIGHTNESS, pict->brightness, drv);
- set_v4l_control(inode, file,
+ set_v4l_control(file,
V4L2_CID_HUE, pict->hue, drv);
- set_v4l_control(inode, file,
+ set_v4l_control(file,
V4L2_CID_CONTRAST, pict->contrast, drv);
- set_v4l_control(inode, file,
+ set_v4l_control(file,
V4L2_CID_SATURATION, pict->colour, drv);
- set_v4l_control(inode, file,
+ set_v4l_control(file,
V4L2_CID_WHITENESS, pict->whiteness, drv);
/*
* V4L1 uses this ioctl to set both memory capture and overlay
@@ -703,7 +689,7 @@ static noinline int v4l1_compat_set_picture(
*/
fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- err = drv(inode, file, VIDIOC_G_FMT, fmt);
+ err = drv(file, VIDIOC_G_FMT, fmt);
/* If VIDIOC_G_FMT failed, then the driver likely doesn't
support memory capture. Trying to set the memory capture
parameters would be pointless. */
@@ -714,13 +700,13 @@ static noinline int v4l1_compat_set_picture(
palette_to_pixelformat(pict->palette)) {
fmt->fmt.pix.pixelformat = palette_to_pixelformat(
pict->palette);
- mem_err = drv(inode, file, VIDIOC_S_FMT, fmt);
+ mem_err = drv(file, VIDIOC_S_FMT, fmt);
if (mem_err < 0)
dprintk("VIDIOCSPICT / VIDIOC_S_FMT: %d\n",
mem_err);
}
- err = drv(inode, file, VIDIOC_G_FBUF, &fbuf);
+ err = drv(file, VIDIOC_G_FBUF, &fbuf);
/* If VIDIOC_G_FBUF failed, then the driver likely doesn't
support overlay. Trying to set the overlay parameters
would be quite pointless. */
@@ -731,7 +717,7 @@ static noinline int v4l1_compat_set_picture(
palette_to_pixelformat(pict->palette)) {
fbuf.fmt.pixelformat = palette_to_pixelformat(
pict->palette);
- ovl_err = drv(inode, file, VIDIOC_S_FBUF, &fbuf);
+ ovl_err = drv(file, VIDIOC_S_FBUF, &fbuf);
if (ovl_err < 0)
dprintk("VIDIOCSPICT / VIDIOC_S_FBUF: %d\n",
ovl_err);
@@ -752,7 +738,6 @@ static noinline int v4l1_compat_set_picture(
static noinline int v4l1_compat_get_tuner(
struct video_tuner *tun,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -762,7 +747,7 @@ static noinline int v4l1_compat_get_tuner(
v4l2_std_id sid;
memset(&tun2, 0, sizeof(tun2));
- err = drv(inode, file, VIDIOC_G_TUNER, &tun2);
+ err = drv(file, VIDIOC_G_TUNER, &tun2);
if (err < 0) {
dprintk("VIDIOCGTUNER / VIDIOC_G_TUNER: %d\n", err);
goto done;
@@ -778,7 +763,7 @@ static noinline int v4l1_compat_get_tuner(
for (i = 0; i < 64; i++) {
memset(&std2, 0, sizeof(std2));
std2.index = i;
- if (0 != drv(inode, file, VIDIOC_ENUMSTD, &std2))
+ if (0 != drv(file, VIDIOC_ENUMSTD, &std2))
break;
if (std2.id & V4L2_STD_PAL)
tun->flags |= VIDEO_TUNER_PAL;
@@ -788,7 +773,7 @@ static noinline int v4l1_compat_get_tuner(
tun->flags |= VIDEO_TUNER_SECAM;
}
- err = drv(inode, file, VIDIOC_G_STD, &sid);
+ err = drv(file, VIDIOC_G_STD, &sid);
if (err < 0)
dprintk("VIDIOCGTUNER / VIDIOC_G_STD: %d\n", err);
if (err == 0) {
@@ -811,7 +796,6 @@ done:
static noinline int v4l1_compat_select_tuner(
struct video_tuner *tun,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -821,7 +805,7 @@ static noinline int v4l1_compat_select_tuner(
t.index = tun->tuner;
- err = drv(inode, file, VIDIOC_S_INPUT, &t);
+ err = drv(file, VIDIOC_S_INPUT, &t);
if (err < 0)
dprintk("VIDIOCSTUNER / VIDIOC_S_INPUT: %d\n", err);
return err;
@@ -829,7 +813,6 @@ static noinline int v4l1_compat_select_tuner(
static noinline int v4l1_compat_get_frequency(
unsigned long *freq,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -838,7 +821,7 @@ static noinline int v4l1_compat_get_frequency(
memset(&freq2, 0, sizeof(freq2));
freq2.tuner = 0;
- err = drv(inode, file, VIDIOC_G_FREQUENCY, &freq2);
+ err = drv(file, VIDIOC_G_FREQUENCY, &freq2);
if (err < 0)
dprintk("VIDIOCGFREQ / VIDIOC_G_FREQUENCY: %d\n", err);
if (0 == err)
@@ -848,7 +831,6 @@ static noinline int v4l1_compat_get_frequency(
static noinline int v4l1_compat_set_frequency(
unsigned long *freq,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -856,9 +838,9 @@ static noinline int v4l1_compat_set_frequency(
struct v4l2_frequency freq2;
memset(&freq2, 0, sizeof(freq2));
- drv(inode, file, VIDIOC_G_FREQUENCY, &freq2);
+ drv(file, VIDIOC_G_FREQUENCY, &freq2);
freq2.frequency = *freq;
- err = drv(inode, file, VIDIOC_S_FREQUENCY, &freq2);
+ err = drv(file, VIDIOC_S_FREQUENCY, &freq2);
if (err < 0)
dprintk("VIDIOCSFREQ / VIDIOC_S_FREQUENCY: %d\n", err);
return err;
@@ -866,7 +848,6 @@ static noinline int v4l1_compat_set_frequency(
static noinline int v4l1_compat_get_audio(
struct video_audio *aud,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -876,7 +857,7 @@ static noinline int v4l1_compat_get_audio(
struct v4l2_tuner tun2;
memset(&aud2, 0, sizeof(aud2));
- err = drv(inode, file, VIDIOC_G_AUDIO, &aud2);
+ err = drv(file, VIDIOC_G_AUDIO, &aud2);
if (err < 0) {
dprintk("VIDIOCGAUDIO / VIDIOC_G_AUDIO: %d\n", err);
goto done;
@@ -886,27 +867,27 @@ static noinline int v4l1_compat_get_audio(
aud->name[sizeof(aud->name) - 1] = 0;
aud->audio = aud2.index;
aud->flags = 0;
- i = get_v4l_control(inode, file, V4L2_CID_AUDIO_VOLUME, drv);
+ i = get_v4l_control(file, V4L2_CID_AUDIO_VOLUME, drv);
if (i >= 0) {
aud->volume = i;
aud->flags |= VIDEO_AUDIO_VOLUME;
}
- i = get_v4l_control(inode, file, V4L2_CID_AUDIO_BASS, drv);
+ i = get_v4l_control(file, V4L2_CID_AUDIO_BASS, drv);
if (i >= 0) {
aud->bass = i;
aud->flags |= VIDEO_AUDIO_BASS;
}
- i = get_v4l_control(inode, file, V4L2_CID_AUDIO_TREBLE, drv);
+ i = get_v4l_control(file, V4L2_CID_AUDIO_TREBLE, drv);
if (i >= 0) {
aud->treble = i;
aud->flags |= VIDEO_AUDIO_TREBLE;
}
- i = get_v4l_control(inode, file, V4L2_CID_AUDIO_BALANCE, drv);
+ i = get_v4l_control(file, V4L2_CID_AUDIO_BALANCE, drv);
if (i >= 0) {
aud->balance = i;
aud->flags |= VIDEO_AUDIO_BALANCE;
}
- i = get_v4l_control(inode, file, V4L2_CID_AUDIO_MUTE, drv);
+ i = get_v4l_control(file, V4L2_CID_AUDIO_MUTE, drv);
if (i >= 0) {
if (i)
aud->flags |= VIDEO_AUDIO_MUTE;
@@ -914,13 +895,13 @@ static noinline int v4l1_compat_get_audio(
}
aud->step = 1;
qctrl2.id = V4L2_CID_AUDIO_VOLUME;
- if (drv(inode, file, VIDIOC_QUERYCTRL, &qctrl2) == 0 &&
+ if (drv(file, VIDIOC_QUERYCTRL, &qctrl2) == 0 &&
!(qctrl2.flags & V4L2_CTRL_FLAG_DISABLED))
aud->step = qctrl2.step;
aud->mode = 0;
memset(&tun2, 0, sizeof(tun2));
- err = drv(inode, file, VIDIOC_G_TUNER, &tun2);
+ err = drv(file, VIDIOC_G_TUNER, &tun2);
if (err < 0) {
dprintk("VIDIOCGAUDIO / VIDIOC_G_TUNER: %d\n", err);
err = 0;
@@ -939,7 +920,6 @@ done:
static noinline int v4l1_compat_set_audio(
struct video_audio *aud,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -951,24 +931,24 @@ static noinline int v4l1_compat_set_audio(
memset(&tun2, 0, sizeof(tun2));
aud2.index = aud->audio;
- err = drv(inode, file, VIDIOC_S_AUDIO, &aud2);
+ err = drv(file, VIDIOC_S_AUDIO, &aud2);
if (err < 0) {
dprintk("VIDIOCSAUDIO / VIDIOC_S_AUDIO: %d\n", err);
goto done;
}
- set_v4l_control(inode, file, V4L2_CID_AUDIO_VOLUME,
+ set_v4l_control(file, V4L2_CID_AUDIO_VOLUME,
aud->volume, drv);
- set_v4l_control(inode, file, V4L2_CID_AUDIO_BASS,
+ set_v4l_control(file, V4L2_CID_AUDIO_BASS,
aud->bass, drv);
- set_v4l_control(inode, file, V4L2_CID_AUDIO_TREBLE,
+ set_v4l_control(file, V4L2_CID_AUDIO_TREBLE,
aud->treble, drv);
- set_v4l_control(inode, file, V4L2_CID_AUDIO_BALANCE,
+ set_v4l_control(file, V4L2_CID_AUDIO_BALANCE,
aud->balance, drv);
- set_v4l_control(inode, file, V4L2_CID_AUDIO_MUTE,
+ set_v4l_control(file, V4L2_CID_AUDIO_MUTE,
!!(aud->flags & VIDEO_AUDIO_MUTE), drv);
- err = drv(inode, file, VIDIOC_G_TUNER, &tun2);
+ err = drv(file, VIDIOC_G_TUNER, &tun2);
if (err < 0)
dprintk("VIDIOCSAUDIO / VIDIOC_G_TUNER: %d\n", err);
if (err == 0) {
@@ -985,7 +965,7 @@ static noinline int v4l1_compat_set_audio(
tun2.audmode = V4L2_TUNER_MODE_LANG2;
break;
}
- err = drv(inode, file, VIDIOC_S_TUNER, &tun2);
+ err = drv(file, VIDIOC_S_TUNER, &tun2);
if (err < 0)
dprintk("VIDIOCSAUDIO / VIDIOC_S_TUNER: %d\n", err);
}
@@ -996,7 +976,6 @@ done:
static noinline int v4l1_compat_capture_frame(
struct video_mmap *mm,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -1013,7 +992,7 @@ static noinline int v4l1_compat_capture_frame(
memset(&buf, 0, sizeof(buf));
fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- err = drv(inode, file, VIDIOC_G_FMT, fmt);
+ err = drv(file, VIDIOC_G_FMT, fmt);
if (err < 0) {
dprintk("VIDIOCMCAPTURE / VIDIOC_G_FMT: %d\n", err);
goto done;
@@ -1029,7 +1008,7 @@ static noinline int v4l1_compat_capture_frame(
palette_to_pixelformat(mm->format);
fmt->fmt.pix.field = V4L2_FIELD_ANY;
fmt->fmt.pix.bytesperline = 0;
- err = drv(inode, file, VIDIOC_S_FMT, fmt);
+ err = drv(file, VIDIOC_S_FMT, fmt);
if (err < 0) {
dprintk("VIDIOCMCAPTURE / VIDIOC_S_FMT: %d\n", err);
goto done;
@@ -1037,17 +1016,17 @@ static noinline int v4l1_compat_capture_frame(
}
buf.index = mm->frame;
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- err = drv(inode, file, VIDIOC_QUERYBUF, &buf);
+ err = drv(file, VIDIOC_QUERYBUF, &buf);
if (err < 0) {
dprintk("VIDIOCMCAPTURE / VIDIOC_QUERYBUF: %d\n", err);
goto done;
}
- err = drv(inode, file, VIDIOC_QBUF, &buf);
+ err = drv(file, VIDIOC_QBUF, &buf);
if (err < 0) {
dprintk("VIDIOCMCAPTURE / VIDIOC_QBUF: %d\n", err);
goto done;
}
- err = drv(inode, file, VIDIOC_STREAMON, &captype);
+ err = drv(file, VIDIOC_STREAMON, &captype);
if (err < 0)
dprintk("VIDIOCMCAPTURE / VIDIOC_STREAMON: %d\n", err);
done:
@@ -1057,7 +1036,6 @@ done:
static noinline int v4l1_compat_sync(
int *i,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -1069,7 +1047,7 @@ static noinline int v4l1_compat_sync(
memset(&buf, 0, sizeof(buf));
buf.index = *i;
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- err = drv(inode, file, VIDIOC_QUERYBUF, &buf);
+ err = drv(file, VIDIOC_QUERYBUF, &buf);
if (err < 0) {
/* No such buffer */
dprintk("VIDIOCSYNC / VIDIOC_QUERYBUF: %d\n", err);
@@ -1082,7 +1060,7 @@ static noinline int v4l1_compat_sync(
}
/* make sure capture actually runs so we don't block forever */
- err = drv(inode, file, VIDIOC_STREAMON, &captype);
+ err = drv(file, VIDIOC_STREAMON, &captype);
if (err < 0) {
dprintk("VIDIOCSYNC / VIDIOC_STREAMON: %d\n", err);
goto done;
@@ -1096,7 +1074,7 @@ static noinline int v4l1_compat_sync(
if (err < 0 || /* error or sleep was interrupted */
err == 0) /* timeout? Shouldn't occur. */
break;
- err = drv(inode, file, VIDIOC_QUERYBUF, &buf);
+ err = drv(file, VIDIOC_QUERYBUF, &buf);
if (err < 0)
dprintk("VIDIOCSYNC / VIDIOC_QUERYBUF: %d\n", err);
}
@@ -1104,7 +1082,7 @@ static noinline int v4l1_compat_sync(
if (!(buf.flags & V4L2_BUF_FLAG_DONE)) /* not done */
goto done;
do {
- err = drv(inode, file, VIDIOC_DQBUF, &buf);
+ err = drv(file, VIDIOC_DQBUF, &buf);
if (err < 0)
dprintk("VIDIOCSYNC / VIDIOC_DQBUF: %d\n", err);
} while (err == 0 && buf.index != *i);
@@ -1114,7 +1092,6 @@ done:
static noinline int v4l1_compat_get_vbi_format(
struct vbi_format *fmt,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -1128,7 +1105,7 @@ static noinline int v4l1_compat_get_vbi_format(
}
fmt2->type = V4L2_BUF_TYPE_VBI_CAPTURE;
- err = drv(inode, file, VIDIOC_G_FMT, fmt2);
+ err = drv(file, VIDIOC_G_FMT, fmt2);
if (err < 0) {
dprintk("VIDIOCGVBIFMT / VIDIOC_G_FMT: %d\n", err);
goto done;
@@ -1153,7 +1130,6 @@ done:
static noinline int v4l1_compat_set_vbi_format(
struct vbi_format *fmt,
- struct inode *inode,
struct file *file,
v4l2_kioctl drv)
{
@@ -1179,7 +1155,7 @@ static noinline int v4l1_compat_set_vbi_format(
fmt2->fmt.vbi.start[1] = fmt->start[1];
fmt2->fmt.vbi.count[1] = fmt->count[1];
fmt2->fmt.vbi.flags = fmt->flags;
- err = drv(inode, file, VIDIOC_TRY_FMT, fmt2);
+ err = drv(file, VIDIOC_TRY_FMT, fmt2);
if (err < 0) {
dprintk("VIDIOCSVBIFMT / VIDIOC_TRY_FMT: %d\n", err);
goto done;
@@ -1196,7 +1172,7 @@ static noinline int v4l1_compat_set_vbi_format(
err = -EINVAL;
goto done;
}
- err = drv(inode, file, VIDIOC_S_FMT, fmt2);
+ err = drv(file, VIDIOC_S_FMT, fmt2);
if (err < 0)
dprintk("VIDIOCSVBIFMT / VIDIOC_S_FMT: %d\n", err);
done:
@@ -1208,8 +1184,7 @@ done:
* This function is exported.
*/
int
-v4l_compat_translate_ioctl(struct inode *inode,
- struct file *file,
+v4l_compat_translate_ioctl(struct file *file,
int cmd,
void *arg,
v4l2_kioctl drv)
@@ -1218,64 +1193,64 @@ v4l_compat_translate_ioctl(struct inode *inode,
switch (cmd) {
case VIDIOCGCAP: /* capability */
- err = v4l1_compat_get_capabilities(arg, inode, file, drv);
+ err = v4l1_compat_get_capabilities(arg, file, drv);
break;
case VIDIOCGFBUF: /* get frame buffer */
- err = v4l1_compat_get_frame_buffer(arg, inode, file, drv);
+ err = v4l1_compat_get_frame_buffer(arg, file, drv);
break;
case VIDIOCSFBUF: /* set frame buffer */
- err = v4l1_compat_set_frame_buffer(arg, inode, file, drv);
+ err = v4l1_compat_set_frame_buffer(arg, file, drv);
break;
case VIDIOCGWIN: /* get window or capture dimensions */
- err = v4l1_compat_get_win_cap_dimensions(arg, inode, file, drv);
+ err = v4l1_compat_get_win_cap_dimensions(arg, file, drv);
break;
case VIDIOCSWIN: /* set window and/or capture dimensions */
- err = v4l1_compat_set_win_cap_dimensions(arg, inode, file, drv);
+ err = v4l1_compat_set_win_cap_dimensions(arg, file, drv);
break;
case VIDIOCCAPTURE: /* turn on/off preview */
- err = v4l1_compat_turn_preview_on_off(arg, inode, file, drv);
+ err = v4l1_compat_turn_preview_on_off(arg, file, drv);
break;
case VIDIOCGCHAN: /* get input information */
- err = v4l1_compat_get_input_info(arg, inode, file, drv);
+ err = v4l1_compat_get_input_info(arg, file, drv);
break;
case VIDIOCSCHAN: /* set input */
- err = v4l1_compat_set_input(arg, inode, file, drv);
+ err = v4l1_compat_set_input(arg, file, drv);
break;
case VIDIOCGPICT: /* get tone controls & partial capture format */
- err = v4l1_compat_get_picture(arg, inode, file, drv);
+ err = v4l1_compat_get_picture(arg, file, drv);
break;
case VIDIOCSPICT: /* set tone controls & partial capture format */
- err = v4l1_compat_set_picture(arg, inode, file, drv);
+ err = v4l1_compat_set_picture(arg, file, drv);
break;
case VIDIOCGTUNER: /* get tuner information */
- err = v4l1_compat_get_tuner(arg, inode, file, drv);
+ err = v4l1_compat_get_tuner(arg, file, drv);
break;
case VIDIOCSTUNER: /* select a tuner input */
- err = v4l1_compat_select_tuner(arg, inode, file, drv);
+ err = v4l1_compat_select_tuner(arg, file, drv);
break;
case VIDIOCGFREQ: /* get frequency */
- err = v4l1_compat_get_frequency(arg, inode, file, drv);
+ err = v4l1_compat_get_frequency(arg, file, drv);
break;
case VIDIOCSFREQ: /* set frequency */
- err = v4l1_compat_set_frequency(arg, inode, file, drv);
+ err = v4l1_compat_set_frequency(arg, file, drv);
break;
case VIDIOCGAUDIO: /* get audio properties/controls */
- err = v4l1_compat_get_audio(arg, inode, file, drv);
+ err = v4l1_compat_get_audio(arg, file, drv);
break;
case VIDIOCSAUDIO: /* set audio controls */
- err = v4l1_compat_set_audio(arg, inode, file, drv);
+ err = v4l1_compat_set_audio(arg, file, drv);
break;
case VIDIOCMCAPTURE: /* capture a frame */
- err = v4l1_compat_capture_frame(arg, inode, file, drv);
+ err = v4l1_compat_capture_frame(arg, file, drv);
break;
case VIDIOCSYNC: /* wait for a frame */
- err = v4l1_compat_sync(arg, inode, file, drv);
+ err = v4l1_compat_sync(arg, file, drv);
break;
case VIDIOCGVBIFMT: /* query VBI data capture format */
- err = v4l1_compat_get_vbi_format(arg, inode, file, drv);
+ err = v4l1_compat_get_vbi_format(arg, file, drv);
break;
case VIDIOCSVBIFMT:
- err = v4l1_compat_set_vbi_format(arg, inode, file, drv);
+ err = v4l1_compat_set_vbi_format(arg, file, drv);
break;
default:
err = -ENOIOCTLCMD;
diff --git a/drivers/media/video/v4l2-int-device.c b/drivers/media/video/v4l2-int-device.c
index 0e4549922f2..a935bae538e 100644
--- a/drivers/media/video/v4l2-int-device.c
+++ b/drivers/media/video/v4l2-int-device.c
@@ -32,7 +32,7 @@
static DEFINE_MUTEX(mutex);
static LIST_HEAD(int_list);
-static void v4l2_int_device_try_attach_all(void)
+void v4l2_int_device_try_attach_all(void)
{
struct v4l2_int_device *m, *s;
@@ -66,6 +66,7 @@ static void v4l2_int_device_try_attach_all(void)
}
}
}
+EXPORT_SYMBOL_GPL(v4l2_int_device_try_attach_all);
static int ioctl_sort_cmp(const void *a, const void *b)
{
@@ -144,6 +145,7 @@ int v4l2_int_ioctl_0(struct v4l2_int_device *d, int cmd)
find_ioctl(d->u.slave, cmd,
(v4l2_int_ioctl_func *)no_such_ioctl_0))(d);
}
+EXPORT_SYMBOL_GPL(v4l2_int_ioctl_0);
static int no_such_ioctl_1(struct v4l2_int_device *d, void *arg)
{
@@ -156,5 +158,6 @@ int v4l2_int_ioctl_1(struct v4l2_int_device *d, int cmd, void *arg)
find_ioctl(d->u.slave, cmd,
(v4l2_int_ioctl_func *)no_such_ioctl_1))(d, arg);
}
+EXPORT_SYMBOL_GPL(v4l2_int_ioctl_1);
MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 155c9d77a46..710e1a40c42 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -625,13 +625,13 @@ static int check_fmt(const struct v4l2_ioctl_ops *ops, enum v4l2_buf_type type)
return -EINVAL;
}
-static int __video_do_ioctl(struct inode *inode, struct file *file,
+static int __video_do_ioctl(struct file *file,
unsigned int cmd, void *arg)
{
struct video_device *vfd = video_devdata(file);
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
- void *fh = file->private_data;
- int ret = -EINVAL;
+ void *fh = file->private_data;
+ int ret = -EINVAL;
if ((vfd->debug & V4L2_DEBUG_IOCTL) &&
!(vfd->debug & V4L2_DEBUG_IOCTL_ARG)) {
@@ -675,7 +675,7 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
V4L2 ioctls.
********************************************************/
if (_IOC_TYPE(cmd) == 'v' && _IOC_NR(cmd) < BASE_VIDIOCPRIVATE)
- return v4l_compat_translate_ioctl(inode, file, cmd, arg,
+ return v4l_compat_translate_ioctl(file, cmd, arg,
__video_do_ioctl);
#endif
@@ -1768,7 +1768,7 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
return ret;
}
-int video_ioctl2(struct inode *inode, struct file *file,
+int __video_ioctl2(struct file *file,
unsigned int cmd, unsigned long arg)
{
char sbuf[128];
@@ -1832,7 +1832,7 @@ int video_ioctl2(struct inode *inode, struct file *file,
}
/* Handles IOCTL */
- err = __video_do_ioctl(inode, file, cmd, parg);
+ err = __video_do_ioctl(file, cmd, parg);
if (err == -ENOIOCTLCMD)
err = -EINVAL;
if (is_ext_ctrl) {
@@ -1860,4 +1860,11 @@ out:
kfree(mbuf);
return err;
}
+EXPORT_SYMBOL(__video_ioctl2);
+
+int video_ioctl2(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return __video_ioctl2(file, cmd, arg);
+}
EXPORT_SYMBOL(video_ioctl2);
diff --git a/drivers/media/video/videobuf-dvb.c b/drivers/media/video/videobuf-dvb.c
index 917277d3660..0e7dcba8e4a 100644
--- a/drivers/media/video/videobuf-dvb.c
+++ b/drivers/media/video/videobuf-dvb.c
@@ -296,29 +296,7 @@ EXPORT_SYMBOL(videobuf_dvb_register_bus);
void videobuf_dvb_unregister_bus(struct videobuf_dvb_frontends *f)
{
- struct list_head *list, *q;
- struct videobuf_dvb_frontend *fe;
-
- mutex_lock(&f->lock);
- list_for_each_safe(list, q, &f->felist) {
- fe = list_entry(list, struct videobuf_dvb_frontend, felist);
- if (fe->dvb.net.dvbdev) {
- dvb_net_release(&fe->dvb.net);
- fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
- &fe->dvb.fe_mem);
- fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
- &fe->dvb.fe_hw);
- dvb_dmxdev_release(&fe->dvb.dmxdev);
- dvb_dmx_release(&fe->dvb.demux);
- dvb_unregister_frontend(fe->dvb.frontend);
- }
- if (fe->dvb.frontend)
- /* always allocated, may have been reset */
- dvb_frontend_detach(fe->dvb.frontend);
- list_del(list);
- kfree(fe);
- }
- mutex_unlock(&f->lock);
+ videobuf_dvb_dealloc_frontends(f);
dvb_unregister_adapter(&f->adapter);
}
@@ -389,3 +367,31 @@ fail_alloc:
return fe;
}
EXPORT_SYMBOL(videobuf_dvb_alloc_frontend);
+
+void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f)
+{
+ struct list_head *list, *q;
+ struct videobuf_dvb_frontend *fe;
+
+ mutex_lock(&f->lock);
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct videobuf_dvb_frontend, felist);
+ if (fe->dvb.net.dvbdev) {
+ dvb_net_release(&fe->dvb.net);
+ fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
+ &fe->dvb.fe_mem);
+ fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
+ &fe->dvb.fe_hw);
+ dvb_dmxdev_release(&fe->dvb.dmxdev);
+ dvb_dmx_release(&fe->dvb.demux);
+ dvb_unregister_frontend(fe->dvb.frontend);
+ }
+ if (fe->dvb.frontend)
+ /* always allocated, may have been reset */
+ dvb_frontend_detach(fe->dvb.frontend);
+ list_del(list); /* remove list entry */
+ kfree(fe); /* free frontend allocation */
+ }
+ mutex_unlock(&f->lock);
+}
+EXPORT_SYMBOL(videobuf_dvb_dealloc_frontends);
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 7d7e51def46..e15e48f04be 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -1163,11 +1163,11 @@ static int vivi_release(void)
if (-1 != dev->vfd->minor) {
printk(KERN_INFO "%s: unregistering /dev/video%d\n",
- VIVI_MODULE_NAME, dev->vfd->minor);
+ VIVI_MODULE_NAME, dev->vfd->num);
video_unregister_device(dev->vfd);
} else {
printk(KERN_INFO "%s: releasing /dev/video%d\n",
- VIVI_MODULE_NAME, dev->vfd->minor);
+ VIVI_MODULE_NAME, dev->vfd->num);
video_device_release(dev->vfd);
}
@@ -1307,7 +1307,7 @@ static int __init vivi_init(void)
dev->vfd = vfd;
printk(KERN_INFO "%s: V4L2 device registered as /dev/video%d\n",
- VIVI_MODULE_NAME, vfd->minor);
+ VIVI_MODULE_NAME, vfd->num);
}
if (ret < 0) {
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c
index dcd45dbd82d..4dfb43bd184 100644
--- a/drivers/media/video/w9968cf.c
+++ b/drivers/media/video/w9968cf.c
@@ -2398,7 +2398,7 @@ error:
cam->sensor = CC_UNKNOWN;
DBG(1, "Image sensor initialization failed for %s (/dev/video%d). "
"Try to detach and attach this device again",
- symbolic(camlist, cam->id), cam->v4ldev->minor)
+ symbolic(camlist, cam->id), cam->v4ldev->num)
return err;
}
@@ -2644,7 +2644,7 @@ static void w9968cf_release_resources(struct w9968cf_device* cam)
{
mutex_lock(&w9968cf_devlist_mutex);
- DBG(2, "V4L device deregistered: /dev/video%d", cam->v4ldev->minor)
+ DBG(2, "V4L device deregistered: /dev/video%d", cam->v4ldev->num)
video_unregister_device(cam->v4ldev);
list_del(&cam->v4llist);
@@ -2679,7 +2679,7 @@ static int w9968cf_open(struct inode* inode, struct file* filp)
DBG(2, "No supported image sensor has been detected by the "
"'ovcamchip' module for the %s (/dev/video%d). Make "
"sure it is loaded *before* (re)connecting the camera.",
- symbolic(camlist, cam->id), cam->v4ldev->minor)
+ symbolic(camlist, cam->id), cam->v4ldev->num)
mutex_unlock(&cam->dev_mutex);
up_read(&w9968cf_disconnect);
return -ENODEV;
@@ -2687,7 +2687,7 @@ static int w9968cf_open(struct inode* inode, struct file* filp)
if (cam->users) {
DBG(2, "%s (/dev/video%d) has been already occupied by '%s'",
- symbolic(camlist, cam->id),cam->v4ldev->minor,cam->command)
+ symbolic(camlist, cam->id), cam->v4ldev->num, cam->command)
if ((filp->f_flags & O_NONBLOCK)||(filp->f_flags & O_NDELAY)) {
mutex_unlock(&cam->dev_mutex);
up_read(&w9968cf_disconnect);
@@ -2709,7 +2709,7 @@ static int w9968cf_open(struct inode* inode, struct file* filp)
}
DBG(5, "Opening '%s', /dev/video%d ...",
- symbolic(camlist, cam->id), cam->v4ldev->minor)
+ symbolic(camlist, cam->id), cam->v4ldev->num)
cam->streaming = 0;
cam->misconfigured = 0;
@@ -2947,7 +2947,7 @@ static int w9968cf_v4l_ioctl(struct inode* inode, struct file* filp,
.minheight = cam->minheight,
};
sprintf(cap.name, "W996[87]CF USB Camera #%d",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
cap.maxwidth = (cam->upscaling && w9968cf_vpp)
? max((u16)W9968CF_MAX_WIDTH, cam->maxwidth)
: cam->maxwidth;
@@ -3567,7 +3567,7 @@ w9968cf_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
goto fail;
}
- DBG(2, "V4L device registered as /dev/video%d", cam->v4ldev->minor)
+ DBG(2, "V4L device registered as /dev/video%d", cam->v4ldev->num)
/* Set some basic constants */
w9968cf_configure_camera(cam, udev, mod_id, dev_nr);
@@ -3618,7 +3618,7 @@ static void w9968cf_usb_disconnect(struct usb_interface* intf)
DBG(2, "The device is open (/dev/video%d)! "
"Process name: %s. Deregistration and memory "
"deallocation are deferred on close.",
- cam->v4ldev->minor, cam->command)
+ cam->v4ldev->num, cam->command)
cam->misconfigured = 1;
w9968cf_stop_transfer(cam);
wake_up_interruptible(&cam->wait_queue);
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index 6a0902bcba6..9fc58170763 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -539,7 +539,7 @@ static int zc0301_stream_interrupt(struct zc0301_device* cam)
cam->state |= DEV_MISCONFIGURED;
DBG(1, "URB timeout reached. The camera is misconfigured. To "
"use it, close and open /dev/video%d again.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
return -EIO;
}
@@ -640,7 +640,7 @@ static void zc0301_release_resources(struct kref *kref)
{
struct zc0301_device *cam = container_of(kref, struct zc0301_device,
kref);
- DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor);
+ DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num);
video_set_drvdata(cam->v4ldev, NULL);
video_unregister_device(cam->v4ldev);
usb_put_dev(cam->usbdev);
@@ -679,7 +679,7 @@ static int zc0301_open(struct inode* inode, struct file* filp)
}
if (cam->users) {
- DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->minor);
+ DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->num);
DBG(3, "Simultaneous opens are not supported");
if ((filp->f_flags & O_NONBLOCK) ||
(filp->f_flags & O_NDELAY)) {
@@ -722,7 +722,7 @@ static int zc0301_open(struct inode* inode, struct file* filp)
cam->frame_count = 0;
zc0301_empty_framequeues(cam);
- DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor);
+ DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num);
out:
mutex_unlock(&cam->open_mutex);
@@ -746,7 +746,7 @@ static int zc0301_release(struct inode* inode, struct file* filp)
cam->users--;
wake_up_interruptible_nr(&cam->wait_open, 1);
- DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor);
+ DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num);
kref_put(&cam->kref, zc0301_release_resources);
@@ -1275,7 +1275,7 @@ zc0301_vidioc_s_crop(struct zc0301_device* cam, void __user * arg)
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
"use the camera, close and open /dev/video%d again.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
return -EIO;
}
@@ -1288,7 +1288,7 @@ zc0301_vidioc_s_crop(struct zc0301_device* cam, void __user * arg)
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
"use the camera, close and open /dev/video%d again.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
return -ENOMEM;
}
@@ -1470,7 +1470,7 @@ zc0301_vidioc_try_s_fmt(struct zc0301_device* cam, unsigned int cmd,
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
"use the camera, close and open /dev/video%d again.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
return -EIO;
}
@@ -1482,7 +1482,7 @@ zc0301_vidioc_try_s_fmt(struct zc0301_device* cam, unsigned int cmd,
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
"use the camera, close and open /dev/video%d again.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
return -ENOMEM;
}
@@ -1529,7 +1529,7 @@ zc0301_vidioc_s_jpegcomp(struct zc0301_device* cam, void __user * arg)
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
"problems. To use the camera, close and open "
- "/dev/video%d again.", cam->v4ldev->minor);
+ "/dev/video%d again.", cam->v4ldev->num);
return -EIO;
}
@@ -2005,7 +2005,7 @@ zc0301_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
goto fail;
}
- DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->minor);
+ DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num);
cam->module_param.force_munmap = force_munmap[dev_nr];
cam->module_param.frame_timeout = frame_timeout[dev_nr];
@@ -2044,7 +2044,7 @@ static void zc0301_usb_disconnect(struct usb_interface* intf)
if (cam->users) {
DBG(2, "Device /dev/video%d is open! Deregistration and "
"memory deallocation are deferred.",
- cam->v4ldev->minor);
+ cam->v4ldev->num);
cam->state |= DEV_MISCONFIGURED;
zc0301_stop_transfer(cam);
cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index 7cdac99deea..a1d81ed44c7 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -885,7 +885,7 @@ static int zr364xx_probe(struct usb_interface *intf,
usb_set_intfdata(intf, cam);
dev_info(&udev->dev, DRIVER_DESC " controlling video device %d\n",
- cam->vdev->minor);
+ cam->vdev->num);
return 0;
}
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 5263913e0c6..7911151e56a 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -172,9 +172,9 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error);
/*** Block device ***/
-static int mspro_block_bd_open(struct inode *inode, struct file *filp)
+static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
+ struct gendisk *disk = bdev->bd_disk;
struct mspro_block_data *msb = disk->private_data;
int rc = -ENXIO;
@@ -182,7 +182,7 @@ static int mspro_block_bd_open(struct inode *inode, struct file *filp)
if (msb && msb->card) {
msb->usage_count++;
- if ((filp->f_mode & FMODE_WRITE) && msb->read_only)
+ if ((mode & FMODE_WRITE) && msb->read_only)
rc = -EROFS;
else
rc = 0;
@@ -218,9 +218,8 @@ static int mspro_block_disk_release(struct gendisk *disk)
return 0;
}
-static int mspro_block_bd_release(struct inode *inode, struct file *filp)
+static int mspro_block_bd_release(struct gendisk *disk, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
return mspro_block_disk_release(disk);
}
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 81483de8c0f..11a617ab424 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -575,9 +575,9 @@ static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
*
* Returns 0 on success or negative error code on failure.
*/
-static int i2o_block_open(struct inode *inode, struct file *file)
+static int i2o_block_open(struct block_device *bdev, fmode_t mode)
{
- struct i2o_block_device *dev = inode->i_bdev->bd_disk->private_data;
+ struct i2o_block_device *dev = bdev->bd_disk->private_data;
if (!dev->i2o_dev)
return -ENODEV;
@@ -604,9 +604,8 @@ static int i2o_block_open(struct inode *inode, struct file *file)
*
* Returns 0 on success or negative error code on failure.
*/
-static int i2o_block_release(struct inode *inode, struct file *file)
+static int i2o_block_release(struct gendisk *disk, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
struct i2o_block_device *dev = disk->private_data;
u8 operation;
@@ -653,10 +652,10 @@ static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo)
*
* Return 0 on success or negative error on failure.
*/
-static int i2o_block_ioctl(struct inode *inode, struct file *file,
+static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
+ struct gendisk *disk = bdev->bd_disk;
struct i2o_block_device *dev = disk->private_data;
/* Anyone capable of this syscall can do *real bad* things */
@@ -933,7 +932,7 @@ static struct block_device_operations i2o_block_fops = {
.owner = THIS_MODULE,
.open = i2o_block_open,
.release = i2o_block_release,
- .ioctl = i2o_block_ioctl,
+ .locked_ioctl = i2o_block_ioctl,
.getgeo = i2o_block_getgeo,
.media_changed = i2o_block_media_changed
};
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 68e237b830a..0acefe8aff8 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -17,7 +17,7 @@ wm8350-objs := wm8350-core.o wm8350-regmap.o wm8350-gpio.o
obj-$(CONFIG_MFD_WM8350) += wm8350.o
obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o
-obj-$(CONFIG_TWL4030_CORE) += twl4030-core.o
+obj-$(CONFIG_TWL4030_CORE) += twl4030-core.o twl4030-irq.o
obj-$(CONFIG_MFD_CORE) += mfd-core.o
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 220e4371266..170f9d47c2f 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1374,31 +1374,31 @@ static int sm501_init_dev(struct sm501_devdata *sm)
static int sm501_plat_probe(struct platform_device *dev)
{
struct sm501_devdata *sm;
- int err;
+ int ret;
sm = kzalloc(sizeof(struct sm501_devdata), GFP_KERNEL);
if (sm == NULL) {
dev_err(&dev->dev, "no memory for device data\n");
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err1;
}
sm->dev = &dev->dev;
sm->pdev_id = dev->id;
- sm->irq = platform_get_irq(dev, 0);
- sm->io_res = platform_get_resource(dev, IORESOURCE_MEM, 1);
- sm->mem_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
sm->platdata = dev->dev.platform_data;
- if (sm->irq < 0) {
+ ret = platform_get_irq(dev, 0);
+ if (ret < 0) {
dev_err(&dev->dev, "failed to get irq resource\n");
- err = sm->irq;
goto err_res;
}
+ sm->irq = ret;
+ sm->io_res = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ sm->mem_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (sm->io_res == NULL || sm->mem_res == NULL) {
dev_err(&dev->dev, "failed to get IO resource\n");
- err = -ENOENT;
+ ret = -ENOENT;
goto err_res;
}
@@ -1407,7 +1407,7 @@ static int sm501_plat_probe(struct platform_device *dev)
if (sm->regs_claim == NULL) {
dev_err(&dev->dev, "cannot claim registers\n");
- err= -EBUSY;
+ ret = -EBUSY;
goto err_res;
}
@@ -1418,7 +1418,7 @@ static int sm501_plat_probe(struct platform_device *dev)
if (sm->regs == NULL) {
dev_err(&dev->dev, "cannot remap registers\n");
- err = -EIO;
+ ret = -EIO;
goto err_claim;
}
@@ -1430,7 +1430,7 @@ static int sm501_plat_probe(struct platform_device *dev)
err_res:
kfree(sm);
err1:
- return err;
+ return ret;
}
@@ -1625,8 +1625,7 @@ static int sm501_pci_probe(struct pci_dev *dev,
goto err3;
}
- sm->regs = ioremap(pci_resource_start(dev, 1),
- pci_resource_len(dev, 1));
+ sm->regs = pci_ioremap_bar(dev, 1);
if (sm->regs == NULL) {
dev_err(&dev->dev, "cannot remap registers\n");
diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c
index fd9a0160202..dd843c4fbcc 100644
--- a/drivers/mfd/twl4030-core.c
+++ b/drivers/mfd/twl4030-core.c
@@ -27,15 +27,11 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/kernel_stat.h>
#include <linux/init.h>
#include <linux/mutex.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/random.h>
-#include <linux/kthread.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/i2c/twl4030.h>
@@ -93,26 +89,6 @@
#define twl_has_usb() false
#endif
-static inline void activate_irq(int irq)
-{
-#ifdef CONFIG_ARM
- /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
- * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
- */
- set_irq_flags(irq, IRQF_VALID);
-#else
- /* same effect on other architectures */
- set_irq_noprobe(irq);
-#endif
-}
-
-/* Primary Interrupt Handler on TWL4030 Registers */
-
-/* Register Definitions */
-
-#define REG_PIH_ISR_P1 (0x1)
-#define REG_PIH_ISR_P2 (0x2)
-#define REG_PIH_SIR (0x3)
/* Triton Core internal information (BEGIN) */
@@ -175,138 +151,6 @@ static inline void activate_irq(int irq)
/*----------------------------------------------------------------------*/
-/**
- * struct twl4030_mod_iregs - TWL module IMR/ISR regs to mask/clear at init
- * @mod_no: TWL4030 module number (e.g., TWL4030_MODULE_GPIO)
- * @sih_ctrl: address of module SIH_CTRL register
- * @reg_cnt: number of IMR/ISR regs
- * @imrs: pointer to array of TWL module interrupt mask register indices
- * @isrs: pointer to array of TWL module interrupt status register indices
- *
- * Ties together TWL4030 modules and lists of IMR/ISR registers to mask/clear
- * during twl_init_irq().
- */
-struct twl4030_mod_iregs {
- const u8 mod_no;
- const u8 sih_ctrl;
- const u8 reg_cnt;
- const u8 *imrs;
- const u8 *isrs;
-};
-
-/* TWL4030 INT module interrupt mask registers */
-static const u8 __initconst twl4030_int_imr_regs[] = {
- TWL4030_INT_PWR_IMR1,
- TWL4030_INT_PWR_IMR2,
-};
-
-/* TWL4030 INT module interrupt status registers */
-static const u8 __initconst twl4030_int_isr_regs[] = {
- TWL4030_INT_PWR_ISR1,
- TWL4030_INT_PWR_ISR2,
-};
-
-/* TWL4030 INTERRUPTS module interrupt mask registers */
-static const u8 __initconst twl4030_interrupts_imr_regs[] = {
- TWL4030_INTERRUPTS_BCIIMR1A,
- TWL4030_INTERRUPTS_BCIIMR1B,
- TWL4030_INTERRUPTS_BCIIMR2A,
- TWL4030_INTERRUPTS_BCIIMR2B,
-};
-
-/* TWL4030 INTERRUPTS module interrupt status registers */
-static const u8 __initconst twl4030_interrupts_isr_regs[] = {
- TWL4030_INTERRUPTS_BCIISR1A,
- TWL4030_INTERRUPTS_BCIISR1B,
- TWL4030_INTERRUPTS_BCIISR2A,
- TWL4030_INTERRUPTS_BCIISR2B,
-};
-
-/* TWL4030 MADC module interrupt mask registers */
-static const u8 __initconst twl4030_madc_imr_regs[] = {
- TWL4030_MADC_IMR1,
- TWL4030_MADC_IMR2,
-};
-
-/* TWL4030 MADC module interrupt status registers */
-static const u8 __initconst twl4030_madc_isr_regs[] = {
- TWL4030_MADC_ISR1,
- TWL4030_MADC_ISR2,
-};
-
-/* TWL4030 keypad module interrupt mask registers */
-static const u8 __initconst twl4030_keypad_imr_regs[] = {
- TWL4030_KEYPAD_KEYP_IMR1,
- TWL4030_KEYPAD_KEYP_IMR2,
-};
-
-/* TWL4030 keypad module interrupt status registers */
-static const u8 __initconst twl4030_keypad_isr_regs[] = {
- TWL4030_KEYPAD_KEYP_ISR1,
- TWL4030_KEYPAD_KEYP_ISR2,
-};
-
-/* TWL4030 GPIO module interrupt mask registers */
-static const u8 __initconst twl4030_gpio_imr_regs[] = {
- REG_GPIO_IMR1A,
- REG_GPIO_IMR1B,
- REG_GPIO_IMR2A,
- REG_GPIO_IMR2B,
- REG_GPIO_IMR3A,
- REG_GPIO_IMR3B,
-};
-
-/* TWL4030 GPIO module interrupt status registers */
-static const u8 __initconst twl4030_gpio_isr_regs[] = {
- REG_GPIO_ISR1A,
- REG_GPIO_ISR1B,
- REG_GPIO_ISR2A,
- REG_GPIO_ISR2B,
- REG_GPIO_ISR3A,
- REG_GPIO_ISR3B,
-};
-
-/* TWL4030 modules that have IMR/ISR registers that must be masked/cleared */
-static const struct twl4030_mod_iregs __initconst twl4030_mod_regs[] = {
- {
- .mod_no = TWL4030_MODULE_INT,
- .sih_ctrl = TWL4030_INT_PWR_SIH_CTRL,
- .reg_cnt = ARRAY_SIZE(twl4030_int_imr_regs),
- .imrs = twl4030_int_imr_regs,
- .isrs = twl4030_int_isr_regs,
- },
- {
- .mod_no = TWL4030_MODULE_INTERRUPTS,
- .sih_ctrl = TWL4030_INTERRUPTS_BCISIHCTRL,
- .reg_cnt = ARRAY_SIZE(twl4030_interrupts_imr_regs),
- .imrs = twl4030_interrupts_imr_regs,
- .isrs = twl4030_interrupts_isr_regs,
- },
- {
- .mod_no = TWL4030_MODULE_MADC,
- .sih_ctrl = TWL4030_MADC_SIH_CTRL,
- .reg_cnt = ARRAY_SIZE(twl4030_madc_imr_regs),
- .imrs = twl4030_madc_imr_regs,
- .isrs = twl4030_madc_isr_regs,
- },
- {
- .mod_no = TWL4030_MODULE_KEYPAD,
- .sih_ctrl = TWL4030_KEYPAD_KEYP_SIH_CTRL,
- .reg_cnt = ARRAY_SIZE(twl4030_keypad_imr_regs),
- .imrs = twl4030_keypad_imr_regs,
- .isrs = twl4030_keypad_isr_regs,
- },
- {
- .mod_no = TWL4030_MODULE_GPIO,
- .sih_ctrl = REG_GPIO_SIH_CTRL,
- .reg_cnt = ARRAY_SIZE(twl4030_gpio_imr_regs),
- .imrs = twl4030_gpio_imr_regs,
- .isrs = twl4030_gpio_isr_regs,
- },
-};
-
-/*----------------------------------------------------------------*/
-
/* is driver active, bound to a chip? */
static bool inuse;
@@ -367,33 +211,6 @@ static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
/*----------------------------------------------------------------------*/
-/*
- * TWL4030 doesn't have PIH mask, hence dummy function for mask
- * and unmask of the (eight) interrupts reported at that level ...
- * masking is only available from SIH (secondary) modules.
- */
-
-static void twl4030_i2c_ackirq(unsigned int irq)
-{
-}
-
-static void twl4030_i2c_disableint(unsigned int irq)
-{
-}
-
-static void twl4030_i2c_enableint(unsigned int irq)
-{
-}
-
-static struct irq_chip twl4030_irq_chip = {
- .name = "twl4030",
- .ack = twl4030_i2c_ackirq,
- .mask = twl4030_i2c_disableint,
- .unmask = twl4030_i2c_enableint,
-};
-
-/*----------------------------------------------------------------------*/
-
/* Exported Functions */
/**
@@ -535,108 +352,11 @@ EXPORT_SYMBOL(twl4030_i2c_read_u8);
/*----------------------------------------------------------------------*/
-static unsigned twl4030_irq_base;
-
-static struct completion irq_event;
-
-/*
- * This thread processes interrupts reported by the Primary Interrupt Handler.
- */
-static int twl4030_irq_thread(void *data)
-{
- long irq = (long)data;
- irq_desc_t *desc = irq_desc + irq;
- static unsigned i2c_errors;
- const static unsigned max_i2c_errors = 100;
-
- current->flags |= PF_NOFREEZE;
-
- while (!kthread_should_stop()) {
- int ret;
- int module_irq;
- u8 pih_isr;
-
- /* Wait for IRQ, then read PIH irq status (also blocking) */
- wait_for_completion_interruptible(&irq_event);
-
- ret = twl4030_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
- REG_PIH_ISR_P1);
- if (ret) {
- pr_warning("%s: I2C error %d reading PIH ISR\n",
- DRIVER_NAME, ret);
- if (++i2c_errors >= max_i2c_errors) {
- printk(KERN_ERR "Maximum I2C error count"
- " exceeded. Terminating %s.\n",
- __func__);
- break;
- }
- complete(&irq_event);
- continue;
- }
-
- /* these handlers deal with the relevant SIH irq status */
- local_irq_disable();
- for (module_irq = twl4030_irq_base;
- pih_isr;
- pih_isr >>= 1, module_irq++) {
- if (pih_isr & 0x1) {
- irq_desc_t *d = irq_desc + module_irq;
-
- d->handle_irq(module_irq, d);
- }
- }
- local_irq_enable();
-
- desc->chip->unmask(irq);
- }
-
- return 0;
-}
-
/*
- * do_twl4030_irq() is the desc->handle method for the twl4030 interrupt.
- * This is a chained interrupt, so there is no desc->action method for it.
- * Now we need to query the interrupt controller in the twl4030 to determine
- * which module is generating the interrupt request. However, we can't do i2c
- * transactions in interrupt context, so we must defer that work to a kernel
- * thread. All we do here is acknowledge and mask the interrupt and wakeup
- * the kernel thread.
+ * NOTE: We know the first 8 IRQs after pdata->base_irq are
+ * for the PIH, and the next are for the PWR_INT SIH, since
+ * that's how twl_init_irq() sets things up.
*/
-static void do_twl4030_irq(unsigned int irq, irq_desc_t *desc)
-{
- const unsigned int cpu = smp_processor_id();
-
- /*
- * Earlier this was desc->triggered = 1;
- */
- desc->status |= IRQ_LEVEL;
-
- /*
- * Acknowledge, clear _AND_ disable the interrupt.
- */
- desc->chip->ack(irq);
-
- if (!desc->depth) {
- kstat_cpu(cpu).irqs[irq]++;
-
- complete(&irq_event);
- }
-}
-
-static struct task_struct * __init start_twl4030_irq_thread(long irq)
-{
- struct task_struct *thread;
-
- init_completion(&irq_event);
- thread = kthread_run(twl4030_irq_thread, (void *)irq, "twl4030-irq");
- if (!thread)
- pr_err("%s: could not create twl4030 irq %ld thread!\n",
- DRIVER_NAME, irq);
-
- return thread;
-}
-
-/*----------------------------------------------------------------------*/
static int add_children(struct twl4030_platform_data *pdata)
{
@@ -668,7 +388,7 @@ static int add_children(struct twl4030_platform_data *pdata)
if (status == 0) {
struct resource r = {
- .start = TWL4030_PWRIRQ_CHG_PRES,
+ .start = pdata->irq_base + 8 + 1,
.flags = IORESOURCE_IRQ,
};
@@ -817,8 +537,7 @@ static int add_children(struct twl4030_platform_data *pdata)
/* RTC module IRQ */
if (status == 0) {
struct resource r = {
- /* REVISIT don't hard-wire this stuff */
- .start = TWL4030_PWRIRQ_RTC,
+ .start = pdata->irq_base + 8 + 3,
.flags = IORESOURCE_IRQ,
};
@@ -863,7 +582,7 @@ static int add_children(struct twl4030_platform_data *pdata)
if (status == 0) {
struct resource r = {
- .start = TWL4030_PWRIRQ_USB_PRES,
+ .start = pdata->irq_base + 8 + 2,
.flags = IORESOURCE_IRQ,
};
@@ -965,123 +684,17 @@ static void __init clocks_init(void)
/*----------------------------------------------------------------------*/
-/**
- * twl4030_i2c_clear_isr - clear TWL4030 SIH ISR regs via read + write
- * @mod_no: TWL4030 module number
- * @reg: register index to clear
- * @cor: value of the <module>_SIH_CTRL.COR bit (1 or 0)
- *
- * Either reads (cor == 1) or writes (cor == 0) to a TWL4030 interrupt
- * status register to ensure that any prior interrupts are cleared.
- * Returns the status from the I2C read operation.
- */
-static int __init twl4030_i2c_clear_isr(u8 mod_no, u8 reg, u8 cor)
-{
- u8 tmp;
-
- return (cor) ? twl4030_i2c_read_u8(mod_no, &tmp, reg) :
- twl4030_i2c_write_u8(mod_no, 0xff, reg);
-}
-
-/**
- * twl4030_read_cor_bit - are TWL module ISRs cleared by reads or writes?
- * @mod_no: TWL4030 module number
- * @reg: register index to clear
- *
- * Returns 1 if the TWL4030 SIH interrupt status registers (ISRs) for
- * the specified TWL module are cleared by reads, or 0 if cleared by
- * writes.
- */
-static int twl4030_read_cor_bit(u8 mod_no, u8 reg)
-{
- u8 tmp = 0;
-
- WARN_ON(twl4030_i2c_read_u8(mod_no, &tmp, reg) < 0);
-
- tmp &= TWL4030_SIH_CTRL_COR_MASK;
- tmp >>= __ffs(TWL4030_SIH_CTRL_COR_MASK);
-
- return tmp;
-}
-
-/**
- * twl4030_mask_clear_intrs - mask and clear all TWL4030 interrupts
- * @t: pointer to twl4030_mod_iregs array
- * @t_sz: ARRAY_SIZE(t) (starting at 1)
- *
- * Mask all TWL4030 interrupt mask registers (IMRs) and clear all
- * interrupt status registers (ISRs). No return value, but will WARN if
- * any I2C operations fail.
- */
-static void __init twl4030_mask_clear_intrs(const struct twl4030_mod_iregs *t,
- const u8 t_sz)
-{
- int i, j;
-
- /*
- * N.B. - further efficiency is possible here. Eight I2C
- * operations on BCI and GPIO modules are avoidable if I2C
- * burst read/write transactions were implemented. Would
- * probably save about 1ms of boot time and a small amount of
- * power.
- */
- for (i = 0; i < t_sz; i++) {
- const struct twl4030_mod_iregs tmr = t[i];
- int cor;
-
- /* Are ISRs cleared by reads or writes? */
- cor = twl4030_read_cor_bit(tmr.mod_no, tmr.sih_ctrl);
-
- for (j = 0; j < tmr.reg_cnt; j++) {
-
- /* Mask interrupts at the TWL4030 */
- WARN_ON(twl4030_i2c_write_u8(tmr.mod_no, 0xff,
- tmr.imrs[j]) < 0);
-
- /* Clear TWL4030 ISRs */
- WARN_ON(twl4030_i2c_clear_isr(tmr.mod_no,
- tmr.isrs[j], cor) < 0);
- }
- }
-}
-
-
-static void twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
-{
- int i;
-
- /*
- * Mask and clear all TWL4030 interrupts since initially we do
- * not have any TWL4030 module interrupt handlers present
- */
- twl4030_mask_clear_intrs(twl4030_mod_regs,
- ARRAY_SIZE(twl4030_mod_regs));
-
- twl4030_irq_base = irq_base;
-
- /* install an irq handler for each of the PIH modules */
- for (i = irq_base; i < irq_end; i++) {
- set_irq_chip_and_handler(i, &twl4030_irq_chip,
- handle_simple_irq);
- activate_irq(i);
- }
-
- /* install an irq handler to demultiplex the TWL4030 interrupt */
- set_irq_data(irq_num, start_twl4030_irq_thread(irq_num));
- set_irq_chained_handler(irq_num, do_twl4030_irq);
-}
-
-/*----------------------------------------------------------------------*/
+int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end);
+int twl_exit_irq(void);
static int twl4030_remove(struct i2c_client *client)
{
unsigned i;
+ int status;
- /* FIXME undo twl_init_irq() */
- if (twl4030_irq_base) {
- dev_err(&client->dev, "can't yet clean up IRQs?\n");
- return -ENOSYS;
- }
+ status = twl_exit_irq();
+ if (status < 0)
+ return status;
for (i = 0; i < TWL4030_NUM_SLAVES; i++) {
struct twl4030_client *twl = &twl4030_modules[i];
@@ -1112,7 +725,7 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -EIO;
}
- if (inuse || twl4030_irq_base) {
+ if (inuse) {
dev_dbg(&client->dev, "driver is already in use\n");
return -EBUSY;
}
@@ -1146,9 +759,9 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (client->irq
&& pdata->irq_base
&& pdata->irq_end > pdata->irq_base) {
- twl_init_irq(client->irq, pdata->irq_base, pdata->irq_end);
- dev_info(&client->dev, "IRQ %d chains IRQs %d..%d\n",
- client->irq, pdata->irq_base, pdata->irq_end - 1);
+ status = twl_init_irq(client->irq, pdata->irq_base, pdata->irq_end);
+ if (status < 0)
+ goto fail;
}
status = add_children(pdata);
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
new file mode 100644
index 00000000000..fae868a8d49
--- /dev/null
+++ b/drivers/mfd/twl4030-irq.c
@@ -0,0 +1,743 @@
+/*
+ * twl4030-irq.c - TWL4030/TPS659x0 irq support
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * Modifications to defer interrupt handling to a kernel thread:
+ * Copyright (C) 2006 MontaVista Software, Inc.
+ *
+ * Based on tlv320aic23.c:
+ * Copyright (c) by Kai Svahn <kai.svahn@nokia.com>
+ *
+ * Code cleanup and modifications to IRQ handler.
+ * by syed khasim <x0khasim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include <linux/i2c/twl4030.h>
+
+
+/*
+ * TWL4030 IRQ handling has two stages in hardware, and thus in software.
+ * The Primary Interrupt Handler (PIH) stage exposes status bits saying
+ * which Secondary Interrupt Handler (SIH) stage is raising an interrupt.
+ * SIH modules are more traditional IRQ components, which support per-IRQ
+ * enable/disable and trigger controls; they do most of the work.
+ *
+ * These chips are designed to support IRQ handling from two different
+ * I2C masters. Each has a dedicated IRQ line, and dedicated IRQ status
+ * and mask registers in the PIH and SIH modules.
+ *
+ * We set up IRQs starting at a platform-specified base, always starting
+ * with PIH and the SIH for PWR_INT and then usually adding GPIO:
+ * base + 0 .. base + 7 PIH
+ * base + 8 .. base + 15 SIH for PWR_INT
+ * base + 16 .. base + 33 SIH for GPIO
+ */
+
+/* PIH register offsets */
+#define REG_PIH_ISR_P1 0x01
+#define REG_PIH_ISR_P2 0x02
+#define REG_PIH_SIR 0x03 /* for testing */
+
+
+/* Linux could (eventually) use either IRQ line */
+static int irq_line;
+
+struct sih {
+ char name[8];
+ u8 module; /* module id */
+ u8 control_offset; /* for SIH_CTRL */
+ bool set_cor;
+
+ u8 bits; /* valid in isr/imr */
+ u8 bytes_ixr; /* bytelen of ISR/IMR/SIR */
+
+ u8 edr_offset;
+ u8 bytes_edr; /* bytelen of EDR */
+
+ /* SIR ignored -- set interrupt, for testing only */
+ struct irq_data {
+ u8 isr_offset;
+ u8 imr_offset;
+ } mask[2];
+ /* + 2 bytes padding */
+};
+
+#define SIH_INITIALIZER(modname, nbits) \
+ .module = TWL4030_MODULE_ ## modname, \
+ .control_offset = TWL4030_ ## modname ## _SIH_CTRL, \
+ .bits = nbits, \
+ .bytes_ixr = DIV_ROUND_UP(nbits, 8), \
+ .edr_offset = TWL4030_ ## modname ## _EDR, \
+ .bytes_edr = DIV_ROUND_UP((2*(nbits)), 8), \
+ .mask = { { \
+ .isr_offset = TWL4030_ ## modname ## _ISR1, \
+ .imr_offset = TWL4030_ ## modname ## _IMR1, \
+ }, \
+ { \
+ .isr_offset = TWL4030_ ## modname ## _ISR2, \
+ .imr_offset = TWL4030_ ## modname ## _IMR2, \
+ }, },
+
+/* register naming policies are inconsistent ... */
+#define TWL4030_INT_PWR_EDR TWL4030_INT_PWR_EDR1
+#define TWL4030_MODULE_KEYPAD_KEYP TWL4030_MODULE_KEYPAD
+#define TWL4030_MODULE_INT_PWR TWL4030_MODULE_INT
+
+
+/* Order in this table matches order in PIH_ISR. That is,
+ * BIT(n) in PIH_ISR is sih_modules[n].
+ */
+static const struct sih sih_modules[6] = {
+ [0] = {
+ .name = "gpio",
+ .module = TWL4030_MODULE_GPIO,
+ .control_offset = REG_GPIO_SIH_CTRL,
+ .set_cor = true,
+ .bits = TWL4030_GPIO_MAX,
+ .bytes_ixr = 3,
+ /* Note: *all* of these IRQs default to no-trigger */
+ .edr_offset = REG_GPIO_EDR1,
+ .bytes_edr = 5,
+ .mask = { {
+ .isr_offset = REG_GPIO_ISR1A,
+ .imr_offset = REG_GPIO_IMR1A,
+ }, {
+ .isr_offset = REG_GPIO_ISR1B,
+ .imr_offset = REG_GPIO_IMR1B,
+ }, },
+ },
+ [1] = {
+ .name = "keypad",
+ .set_cor = true,
+ SIH_INITIALIZER(KEYPAD_KEYP, 4)
+ },
+ [2] = {
+ .name = "bci",
+ .module = TWL4030_MODULE_INTERRUPTS,
+ .control_offset = TWL4030_INTERRUPTS_BCISIHCTRL,
+ .bits = 12,
+ .bytes_ixr = 2,
+ .edr_offset = TWL4030_INTERRUPTS_BCIEDR1,
+ /* Note: most of these IRQs default to no-trigger */
+ .bytes_edr = 3,
+ .mask = { {
+ .isr_offset = TWL4030_INTERRUPTS_BCIISR1A,
+ .imr_offset = TWL4030_INTERRUPTS_BCIIMR1A,
+ }, {
+ .isr_offset = TWL4030_INTERRUPTS_BCIISR1B,
+ .imr_offset = TWL4030_INTERRUPTS_BCIIMR1B,
+ }, },
+ },
+ [3] = {
+ .name = "madc",
+ SIH_INITIALIZER(MADC, 4)
+ },
+ [4] = {
+ /* USB doesn't use the same SIH organization */
+ .name = "usb",
+ },
+ [5] = {
+ .name = "power",
+ .set_cor = true,
+ SIH_INITIALIZER(INT_PWR, 8)
+ },
+ /* there are no SIH modules #6 or #7 ... */
+};
+
+#undef TWL4030_MODULE_KEYPAD_KEYP
+#undef TWL4030_MODULE_INT_PWR
+#undef TWL4030_INT_PWR_EDR
+
+/*----------------------------------------------------------------------*/
+
+static unsigned twl4030_irq_base;
+
+static struct completion irq_event;
+
+/*
+ * This thread processes interrupts reported by the Primary Interrupt Handler.
+ */
+static int twl4030_irq_thread(void *data)
+{
+ long irq = (long)data;
+ irq_desc_t *desc = irq_desc + irq;
+ static unsigned i2c_errors;
+ const static unsigned max_i2c_errors = 100;
+
+ current->flags |= PF_NOFREEZE;
+
+ while (!kthread_should_stop()) {
+ int ret;
+ int module_irq;
+ u8 pih_isr;
+
+ /* Wait for IRQ, then read PIH irq status (also blocking) */
+ wait_for_completion_interruptible(&irq_event);
+
+ ret = twl4030_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
+ REG_PIH_ISR_P1);
+ if (ret) {
+ pr_warning("twl4030: I2C error %d reading PIH ISR\n",
+ ret);
+ if (++i2c_errors >= max_i2c_errors) {
+ printk(KERN_ERR "Maximum I2C error count"
+ " exceeded. Terminating %s.\n",
+ __func__);
+ break;
+ }
+ complete(&irq_event);
+ continue;
+ }
+
+ /* these handlers deal with the relevant SIH irq status */
+ local_irq_disable();
+ for (module_irq = twl4030_irq_base;
+ pih_isr;
+ pih_isr >>= 1, module_irq++) {
+ if (pih_isr & 0x1) {
+ irq_desc_t *d = irq_desc + module_irq;
+
+ /* These can't be masked ... always warn
+ * if we get any surprises.
+ */
+ if (d->status & IRQ_DISABLED)
+ note_interrupt(module_irq, d,
+ IRQ_NONE);
+ else
+ d->handle_irq(module_irq, d);
+ }
+ }
+ local_irq_enable();
+
+ desc->chip->unmask(irq);
+ }
+
+ return 0;
+}
+
+/*
+ * handle_twl4030_pih() is the desc->handle method for the twl4030 interrupt.
+ * This is a chained interrupt, so there is no desc->action method for it.
+ * Now we need to query the interrupt controller in the twl4030 to determine
+ * which module is generating the interrupt request. However, we can't do i2c
+ * transactions in interrupt context, so we must defer that work to a kernel
+ * thread. All we do here is acknowledge and mask the interrupt and wakeup
+ * the kernel thread.
+ */
+static void handle_twl4030_pih(unsigned int irq, irq_desc_t *desc)
+{
+ /* Acknowledge, clear *AND* mask the interrupt... */
+ desc->chip->ack(irq);
+ complete(&irq_event);
+}
+
+static struct task_struct *start_twl4030_irq_thread(long irq)
+{
+ struct task_struct *thread;
+
+ init_completion(&irq_event);
+ thread = kthread_run(twl4030_irq_thread, (void *)irq, "twl4030-irq");
+ if (!thread)
+ pr_err("twl4030: could not create irq %ld thread!\n", irq);
+
+ return thread;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * twl4030_init_sih_modules() ... start from a known state where no
+ * IRQs will be coming in, and where we can quickly enable them then
+ * handle them as they arrive. Mask all IRQs: maybe init SIH_CTRL.
+ *
+ * NOTE: we don't touch EDR registers here; they stay with hardware
+ * defaults or whatever the last value was. Note that when both EDR
+ * bits for an IRQ are clear, that's as if its IMR bit is set...
+ */
+static int twl4030_init_sih_modules(unsigned line)
+{
+ const struct sih *sih;
+ u8 buf[4];
+ int i;
+ int status;
+
+ /* line 0 == int1_n signal; line 1 == int2_n signal */
+ if (line > 1)
+ return -EINVAL;
+
+ irq_line = line;
+
+ /* disable all interrupts on our line */
+ memset(buf, 0xff, sizeof buf);
+ sih = sih_modules;
+ for (i = 0; i < ARRAY_SIZE(sih_modules); i++, sih++) {
+
+ /* skip USB -- it's funky */
+ if (!sih->bytes_ixr)
+ continue;
+
+ status = twl4030_i2c_write(sih->module, buf,
+ sih->mask[line].imr_offset, sih->bytes_ixr);
+ if (status < 0)
+ pr_err("twl4030: err %d initializing %s %s\n",
+ status, sih->name, "IMR");
+
+ /* Maybe disable "exclusive" mode; buffer second pending irq;
+ * set Clear-On-Read (COR) bit.
+ *
+ * NOTE that sometimes COR polarity is documented as being
+ * inverted: for MADC and BCI, COR=1 means "clear on write".
+ * And for PWR_INT it's not documented...
+ */
+ if (sih->set_cor) {
+ status = twl4030_i2c_write_u8(sih->module,
+ TWL4030_SIH_CTRL_COR_MASK,
+ sih->control_offset);
+ if (status < 0)
+ pr_err("twl4030: err %d initializing %s %s\n",
+ status, sih->name, "SIH_CTRL");
+ }
+ }
+
+ sih = sih_modules;
+ for (i = 0; i < ARRAY_SIZE(sih_modules); i++, sih++) {
+ u8 rxbuf[4];
+ int j;
+
+ /* skip USB */
+ if (!sih->bytes_ixr)
+ continue;
+
+ /* Clear pending interrupt status. Either the read was
+ * enough, or we need to write those bits. Repeat, in
+ * case an IRQ is pending (PENDDIS=0) ... that's not
+ * uncommon with PWR_INT.PWRON.
+ */
+ for (j = 0; j < 2; j++) {
+ status = twl4030_i2c_read(sih->module, rxbuf,
+ sih->mask[line].isr_offset, sih->bytes_ixr);
+ if (status < 0)
+ pr_err("twl4030: err %d initializing %s %s\n",
+ status, sih->name, "ISR");
+
+ if (!sih->set_cor)
+ status = twl4030_i2c_write(sih->module, buf,
+ sih->mask[line].isr_offset,
+ sih->bytes_ixr);
+ /* else COR=1 means read sufficed.
+ * (for most SIH modules...)
+ */
+ }
+ }
+
+ return 0;
+}
+
+static inline void activate_irq(int irq)
+{
+#ifdef CONFIG_ARM
+ /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
+ * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
+ */
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ /* same effect on other architectures */
+ set_irq_noprobe(irq);
+#endif
+}
+
+/*----------------------------------------------------------------------*/
+
+static DEFINE_SPINLOCK(sih_agent_lock);
+
+static struct workqueue_struct *wq;
+
+struct sih_agent {
+ int irq_base;
+ const struct sih *sih;
+
+ u32 imr;
+ bool imr_change_pending;
+ struct work_struct mask_work;
+
+ u32 edge_change;
+ struct work_struct edge_work;
+};
+
+static void twl4030_sih_do_mask(struct work_struct *work)
+{
+ struct sih_agent *agent;
+ const struct sih *sih;
+ union {
+ u8 bytes[4];
+ u32 word;
+ } imr;
+ int status;
+
+ agent = container_of(work, struct sih_agent, mask_work);
+
+ /* see what work we have */
+ spin_lock_irq(&sih_agent_lock);
+ if (agent->imr_change_pending) {
+ sih = agent->sih;
+ /* byte[0] gets overwritten as we write ... */
+ imr.word = cpu_to_le32(agent->imr << 8);
+ agent->imr_change_pending = false;
+ } else
+ sih = NULL;
+ spin_unlock_irq(&sih_agent_lock);
+ if (!sih)
+ return;
+
+ /* write the whole mask ... simpler than subsetting it */
+ status = twl4030_i2c_write(sih->module, imr.bytes,
+ sih->mask[irq_line].imr_offset, sih->bytes_ixr);
+ if (status)
+ pr_err("twl4030: %s, %s --> %d\n", __func__,
+ "write", status);
+}
+
+static void twl4030_sih_do_edge(struct work_struct *work)
+{
+ struct sih_agent *agent;
+ const struct sih *sih;
+ u8 bytes[6];
+ u32 edge_change;
+ int status;
+
+ agent = container_of(work, struct sih_agent, edge_work);
+
+ /* see what work we have */
+ spin_lock_irq(&sih_agent_lock);
+ edge_change = agent->edge_change;
+ agent->edge_change = 0;;
+ sih = edge_change ? agent->sih : NULL;
+ spin_unlock_irq(&sih_agent_lock);
+ if (!sih)
+ return;
+
+ /* Read, reserving first byte for write scratch. Yes, this
+ * could be cached for some speedup ... but be careful about
+ * any processor on the other IRQ line, EDR registers are
+ * shared.
+ */
+ status = twl4030_i2c_read(sih->module, bytes + 1,
+ sih->edr_offset, sih->bytes_edr);
+ if (status) {
+ pr_err("twl4030: %s, %s --> %d\n", __func__,
+ "read", status);
+ return;
+ }
+
+ /* Modify only the bits we know must change */
+ while (edge_change) {
+ int i = fls(edge_change) - 1;
+ struct irq_desc *d = irq_desc + i + agent->irq_base;
+ int byte = 1 + (i >> 2);
+ int off = (i & 0x3) * 2;
+
+ bytes[byte] &= ~(0x03 << off);
+
+ spin_lock_irq(&d->lock);
+ if (d->status & IRQ_TYPE_EDGE_RISING)
+ bytes[byte] |= BIT(off + 1);
+ if (d->status & IRQ_TYPE_EDGE_FALLING)
+ bytes[byte] |= BIT(off + 0);
+ spin_unlock_irq(&d->lock);
+
+ edge_change &= ~BIT(i);
+ }
+
+ /* Write */
+ status = twl4030_i2c_write(sih->module, bytes,
+ sih->edr_offset, sih->bytes_edr);
+ if (status)
+ pr_err("twl4030: %s, %s --> %d\n", __func__,
+ "write", status);
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * All irq_chip methods get issued from code holding irq_desc[irq].lock,
+ * which can't perform the underlying I2C operations (because they sleep).
+ * So we must hand them off to a thread (workqueue) and cope with asynch
+ * completion, potentially including some re-ordering, of these requests.
+ */
+
+static void twl4030_sih_mask(unsigned irq)
+{
+ struct sih_agent *sih = get_irq_chip_data(irq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sih_agent_lock, flags);
+ sih->imr |= BIT(irq - sih->irq_base);
+ sih->imr_change_pending = true;
+ queue_work(wq, &sih->mask_work);
+ spin_unlock_irqrestore(&sih_agent_lock, flags);
+}
+
+static void twl4030_sih_unmask(unsigned irq)
+{
+ struct sih_agent *sih = get_irq_chip_data(irq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sih_agent_lock, flags);
+ sih->imr &= ~BIT(irq - sih->irq_base);
+ sih->imr_change_pending = true;
+ queue_work(wq, &sih->mask_work);
+ spin_unlock_irqrestore(&sih_agent_lock, flags);
+}
+
+static int twl4030_sih_set_type(unsigned irq, unsigned trigger)
+{
+ struct sih_agent *sih = get_irq_chip_data(irq);
+ struct irq_desc *desc = irq_desc + irq;
+ unsigned long flags;
+
+ if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+ return -EINVAL;
+
+ spin_lock_irqsave(&sih_agent_lock, flags);
+ if ((desc->status & IRQ_TYPE_SENSE_MASK) != trigger) {
+ desc->status &= ~IRQ_TYPE_SENSE_MASK;
+ desc->status |= trigger;
+ sih->edge_change |= BIT(irq - sih->irq_base);
+ queue_work(wq, &sih->edge_work);
+ }
+ spin_unlock_irqrestore(&sih_agent_lock, flags);
+ return 0;
+}
+
+static struct irq_chip twl4030_sih_irq_chip = {
+ .name = "twl4030",
+ .mask = twl4030_sih_mask,
+ .unmask = twl4030_sih_unmask,
+ .set_type = twl4030_sih_set_type,
+};
+
+/*----------------------------------------------------------------------*/
+
+static inline int sih_read_isr(const struct sih *sih)
+{
+ int status;
+ union {
+ u8 bytes[4];
+ u32 word;
+ } isr;
+
+ /* FIXME need retry-on-error ... */
+
+ isr.word = 0;
+ status = twl4030_i2c_read(sih->module, isr.bytes,
+ sih->mask[irq_line].isr_offset, sih->bytes_ixr);
+
+ return (status < 0) ? status : le32_to_cpu(isr.word);
+}
+
+/*
+ * Generic handler for SIH interrupts ... we "know" this is called
+ * in task context, with IRQs enabled.
+ */
+static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
+{
+ struct sih_agent *agent = get_irq_data(irq);
+ const struct sih *sih = agent->sih;
+ int isr;
+
+ /* reading ISR acks the IRQs, using clear-on-read mode */
+ local_irq_enable();
+ isr = sih_read_isr(sih);
+ local_irq_disable();
+
+ if (isr < 0) {
+ pr_err("twl4030: %s SIH, read ISR error %d\n",
+ sih->name, isr);
+ /* REVISIT: recover; eventually mask it all, etc */
+ return;
+ }
+
+ while (isr) {
+ irq = fls(isr);
+ irq--;
+ isr &= ~BIT(irq);
+
+ if (irq < sih->bits)
+ generic_handle_irq(agent->irq_base + irq);
+ else
+ pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
+ sih->name, irq);
+ }
+}
+
+static unsigned twl4030_irq_next;
+
+/* returns the first IRQ used by this SIH bank,
+ * or negative errno
+ */
+int twl4030_sih_setup(int module)
+{
+ int sih_mod;
+ const struct sih *sih = NULL;
+ struct sih_agent *agent;
+ int i, irq;
+ int status = -EINVAL;
+ unsigned irq_base = twl4030_irq_next;
+
+ /* only support modules with standard clear-on-read for now */
+ for (sih_mod = 0, sih = sih_modules;
+ sih_mod < ARRAY_SIZE(sih_modules);
+ sih_mod++, sih++) {
+ if (sih->module == module && sih->set_cor) {
+ if (!WARN((irq_base + sih->bits) > NR_IRQS,
+ "irq %d for %s too big\n",
+ irq_base + sih->bits,
+ sih->name))
+ status = 0;
+ break;
+ }
+ }
+ if (status < 0)
+ return status;
+
+ agent = kzalloc(sizeof *agent, GFP_KERNEL);
+ if (!agent)
+ return -ENOMEM;
+
+ status = 0;
+
+ agent->irq_base = irq_base;
+ agent->sih = sih;
+ agent->imr = ~0;
+ INIT_WORK(&agent->mask_work, twl4030_sih_do_mask);
+ INIT_WORK(&agent->edge_work, twl4030_sih_do_edge);
+
+ for (i = 0; i < sih->bits; i++) {
+ irq = irq_base + i;
+
+ set_irq_chip_and_handler(irq, &twl4030_sih_irq_chip,
+ handle_edge_irq);
+ set_irq_chip_data(irq, agent);
+ activate_irq(irq);
+ }
+
+ status = irq_base;
+ twl4030_irq_next += i;
+
+ /* replace generic PIH handler (handle_simple_irq) */
+ irq = sih_mod + twl4030_irq_base;
+ set_irq_data(irq, agent);
+ set_irq_chained_handler(irq, handle_twl4030_sih);
+
+ pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
+ irq, irq_base, twl4030_irq_next - 1);
+
+ return status;
+}
+
+/* FIXME need a call to reverse twl4030_sih_setup() ... */
+
+
+/*----------------------------------------------------------------------*/
+
+/* FIXME pass in which interrupt line we'll use ... */
+#define twl_irq_line 0
+
+int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
+{
+ static struct irq_chip twl4030_irq_chip;
+
+ int status;
+ int i;
+ struct task_struct *task;
+
+ /*
+ * Mask and clear all TWL4030 interrupts since initially we do
+ * not have any TWL4030 module interrupt handlers present
+ */
+ status = twl4030_init_sih_modules(twl_irq_line);
+ if (status < 0)
+ return status;
+
+ wq = create_singlethread_workqueue("twl4030-irqchip");
+ if (!wq) {
+ pr_err("twl4030: workqueue FAIL\n");
+ return -ESRCH;
+ }
+
+ twl4030_irq_base = irq_base;
+
+ /* install an irq handler for each of the SIH modules;
+ * clone dummy irq_chip since PIH can't *do* anything
+ */
+ twl4030_irq_chip = dummy_irq_chip;
+ twl4030_irq_chip.name = "twl4030";
+
+ twl4030_sih_irq_chip.ack = dummy_irq_chip.ack;
+
+ for (i = irq_base; i < irq_end; i++) {
+ set_irq_chip_and_handler(i, &twl4030_irq_chip,
+ handle_simple_irq);
+ activate_irq(i);
+ }
+ twl4030_irq_next = i;
+ pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", "PIH",
+ irq_num, irq_base, twl4030_irq_next - 1);
+
+ /* ... and the PWR_INT module ... */
+ status = twl4030_sih_setup(TWL4030_MODULE_INT);
+ if (status < 0) {
+ pr_err("twl4030: sih_setup PWR INT --> %d\n", status);
+ goto fail;
+ }
+
+ /* install an irq handler to demultiplex the TWL4030 interrupt */
+ task = start_twl4030_irq_thread(irq_num);
+ if (!task) {
+ pr_err("twl4030: irq thread FAIL\n");
+ status = -ESRCH;
+ goto fail;
+ }
+
+ set_irq_data(irq_num, task);
+ set_irq_chained_handler(irq_num, handle_twl4030_pih);
+
+ return status;
+
+fail:
+ for (i = irq_base; i < irq_end; i++)
+ set_irq_chip_and_handler(i, NULL, NULL);
+ destroy_workqueue(wq);
+ wq = NULL;
+ return status;
+}
+
+int twl_exit_irq(void)
+{
+ /* FIXME undo twl_init_irq() */
+ if (twl4030_irq_base) {
+ pr_err("twl4030: can't yet clean up IRQs?\n");
+ return -ENOSYS;
+ }
+ return 0;
+}
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index bf87f675e7f..0d47fb9e4b3 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -183,6 +183,9 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
(wm8350->reg_cache[i] & ~wm8350_reg_io_map[i].writable)
| src[i - reg];
+ /* Don't store volatile bits */
+ wm8350->reg_cache[i] &= ~wm8350_reg_io_map[i].vol;
+
src[i - reg] = cpu_to_be16(src[i - reg]);
}
@@ -1120,6 +1123,7 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int mode)
}
value = be16_to_cpu(value);
value &= wm8350_reg_io_map[i].readable;
+ value &= ~wm8350_reg_io_map[i].vol;
wm8350->reg_cache[i] = value;
} else
wm8350->reg_cache[i] = reg_map[i];
@@ -1128,7 +1132,6 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int mode)
out:
return ret;
}
-EXPORT_SYMBOL_GPL(wm8350_create_cache);
/*
* Register a client device. This is non-fatal since there is no need to
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index efd3aa08b88..9494400e8fd 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -145,6 +145,7 @@ config ACER_WMI
depends on NEW_LEDS
depends on BACKLIGHT_CLASS_DEVICE
depends on SERIO_I8042
+ depends on RFKILL
select ACPI_WMI
---help---
This is a driver for newer Acer (and Wistron) laptops. It adds
@@ -245,6 +246,17 @@ config MSI_LAPTOP
If you have an MSI S270 laptop, say Y or M here.
+config PANASONIC_LAPTOP
+ tristate "Panasonic Laptop Extras"
+ depends on X86 && INPUT && ACPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ ---help---
+ This driver adds support for access to backlight control and hotkeys
+ on Panasonic Let's Note laptops.
+
+ If you have a Panasonic Let's note laptop (such as the R1(N variant),
+ R2, R3, R5, T2, W2 and Y2 series), say Y.
+
config COMPAL_LAPTOP
tristate "Compal Laptop Extras"
depends on X86
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c6c13f60b45..909e2468cdc 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_SGI_IOC4) += ioc4.o
obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
+obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c
index d8b0d326e45..0532a2de2ce 100644
--- a/drivers/misc/acer-wmi.c
+++ b/drivers/misc/acer-wmi.c
@@ -33,6 +33,8 @@
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/i8042.h>
+#include <linux/rfkill.h>
+#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <acpi/acpi_drivers.h>
@@ -123,21 +125,15 @@ enum interface_flags {
static int max_brightness = 0xF;
-static int wireless = -1;
-static int bluetooth = -1;
static int mailled = -1;
static int brightness = -1;
static int threeg = -1;
static int force_series;
module_param(mailled, int, 0444);
-module_param(wireless, int, 0444);
-module_param(bluetooth, int, 0444);
module_param(brightness, int, 0444);
module_param(threeg, int, 0444);
module_param(force_series, int, 0444);
-MODULE_PARM_DESC(wireless, "Set initial state of Wireless hardware");
-MODULE_PARM_DESC(bluetooth, "Set initial state of Bluetooth hardware");
MODULE_PARM_DESC(mailled, "Set initial state of Mail LED");
MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness");
MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware");
@@ -145,8 +141,6 @@ MODULE_PARM_DESC(force_series, "Force a different laptop series");
struct acer_data {
int mailled;
- int wireless;
- int bluetooth;
int threeg;
int brightness;
};
@@ -157,6 +151,9 @@ struct acer_debug {
u32 wmid_devices;
};
+static struct rfkill *wireless_rfkill;
+static struct rfkill *bluetooth_rfkill;
+
/* Each low-level interface must define at least some of the following */
struct wmi_interface {
/* The WMI device type */
@@ -476,7 +473,7 @@ struct wmi_interface *iface)
}
break;
default:
- return AE_BAD_ADDRESS;
+ return AE_ERROR;
}
return AE_OK;
}
@@ -514,7 +511,7 @@ static acpi_status AMW0_set_u32(u32 value, u32 cap, struct wmi_interface *iface)
break;
}
default:
- return AE_BAD_ADDRESS;
+ return AE_ERROR;
}
/* Actually do the set */
@@ -689,7 +686,7 @@ struct wmi_interface *iface)
return 0;
}
default:
- return AE_BAD_ADDRESS;
+ return AE_ERROR;
}
status = WMI_execute_u32(method_id, 0, &result);
@@ -735,7 +732,7 @@ static acpi_status WMID_set_u32(u32 value, u32 cap, struct wmi_interface *iface)
}
break;
default:
- return AE_BAD_ADDRESS;
+ return AE_ERROR;
}
return WMI_execute_u32(method_id, (u32)value, NULL);
}
@@ -785,7 +782,7 @@ static struct wmi_interface wmid_interface = {
static acpi_status get_u32(u32 *value, u32 cap)
{
- acpi_status status = AE_BAD_ADDRESS;
+ acpi_status status = AE_ERROR;
switch (interface->type) {
case ACER_AMW0:
@@ -846,8 +843,6 @@ static void __init acer_commandline_init(void)
* capability isn't available on the given interface
*/
set_u32(mailled, ACER_CAP_MAILLED);
- set_u32(wireless, ACER_CAP_WIRELESS);
- set_u32(bluetooth, ACER_CAP_BLUETOOTH);
set_u32(threeg, ACER_CAP_THREEG);
set_u32(brightness, ACER_CAP_BRIGHTNESS);
}
@@ -933,40 +928,135 @@ static void acer_backlight_exit(void)
}
/*
- * Read/ write bool sysfs macro
+ * Rfkill devices
*/
-#define show_set_bool(value, cap) \
-static ssize_t \
-show_bool_##value(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- u32 result; \
- acpi_status status = get_u32(&result, cap); \
- if (ACPI_SUCCESS(status)) \
- return sprintf(buf, "%u\n", result); \
- return sprintf(buf, "Read error\n"); \
-} \
-\
-static ssize_t \
-set_bool_##value(struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- u32 tmp = simple_strtoul(buf, NULL, 10); \
- acpi_status status = set_u32(tmp, cap); \
- if (ACPI_FAILURE(status)) \
- return -EINVAL; \
- return count; \
-} \
-static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
- show_bool_##value, set_bool_##value);
-
-show_set_bool(wireless, ACER_CAP_WIRELESS);
-show_set_bool(bluetooth, ACER_CAP_BLUETOOTH);
-show_set_bool(threeg, ACER_CAP_THREEG);
+static void acer_rfkill_update(struct work_struct *ignored);
+static DECLARE_DELAYED_WORK(acer_rfkill_work, acer_rfkill_update);
+static void acer_rfkill_update(struct work_struct *ignored)
+{
+ u32 state;
+ acpi_status status;
+
+ status = get_u32(&state, ACER_CAP_WIRELESS);
+ if (ACPI_SUCCESS(status))
+ rfkill_force_state(wireless_rfkill, state ?
+ RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED);
+
+ if (has_cap(ACER_CAP_BLUETOOTH)) {
+ status = get_u32(&state, ACER_CAP_BLUETOOTH);
+ if (ACPI_SUCCESS(status))
+ rfkill_force_state(bluetooth_rfkill, state ?
+ RFKILL_STATE_UNBLOCKED :
+ RFKILL_STATE_SOFT_BLOCKED);
+ }
+
+ schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
+}
+
+static int acer_rfkill_set(void *data, enum rfkill_state state)
+{
+ acpi_status status;
+ u32 *cap = data;
+ status = set_u32((u32) (state == RFKILL_STATE_UNBLOCKED), *cap);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ return 0;
+}
+
+static struct rfkill * acer_rfkill_register(struct device *dev,
+enum rfkill_type type, char *name, u32 cap)
+{
+ int err;
+ u32 state;
+ u32 *data;
+ struct rfkill *rfkill_dev;
+
+ rfkill_dev = rfkill_allocate(dev, type);
+ if (!rfkill_dev)
+ return ERR_PTR(-ENOMEM);
+ rfkill_dev->name = name;
+ get_u32(&state, cap);
+ rfkill_dev->state = state ? RFKILL_STATE_UNBLOCKED :
+ RFKILL_STATE_SOFT_BLOCKED;
+ data = kzalloc(sizeof(u32), GFP_KERNEL);
+ if (!data) {
+ rfkill_free(rfkill_dev);
+ return ERR_PTR(-ENOMEM);
+ }
+ *data = cap;
+ rfkill_dev->data = data;
+ rfkill_dev->toggle_radio = acer_rfkill_set;
+ rfkill_dev->user_claim_unsupported = 1;
+
+ err = rfkill_register(rfkill_dev);
+ if (err) {
+ kfree(rfkill_dev->data);
+ rfkill_free(rfkill_dev);
+ return ERR_PTR(err);
+ }
+ return rfkill_dev;
+}
+
+static int acer_rfkill_init(struct device *dev)
+{
+ wireless_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_WLAN,
+ "acer-wireless", ACER_CAP_WIRELESS);
+ if (IS_ERR(wireless_rfkill))
+ return PTR_ERR(wireless_rfkill);
+
+ if (has_cap(ACER_CAP_BLUETOOTH)) {
+ bluetooth_rfkill = acer_rfkill_register(dev,
+ RFKILL_TYPE_BLUETOOTH, "acer-bluetooth",
+ ACER_CAP_BLUETOOTH);
+ if (IS_ERR(bluetooth_rfkill)) {
+ kfree(wireless_rfkill->data);
+ rfkill_unregister(wireless_rfkill);
+ return PTR_ERR(bluetooth_rfkill);
+ }
+ }
+
+ schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
+
+ return 0;
+}
+
+static void acer_rfkill_exit(void)
+{
+ cancel_delayed_work_sync(&acer_rfkill_work);
+ kfree(wireless_rfkill->data);
+ rfkill_unregister(wireless_rfkill);
+ if (has_cap(ACER_CAP_BLUETOOTH)) {
+ kfree(wireless_rfkill->data);
+ rfkill_unregister(bluetooth_rfkill);
+ }
+ return;
+}
/*
- * Read interface sysfs macro
+ * sysfs interface
*/
+static ssize_t show_bool_threeg(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 result; \
+ acpi_status status = get_u32(&result, ACER_CAP_THREEG);
+ if (ACPI_SUCCESS(status))
+ return sprintf(buf, "%u\n", result);
+ return sprintf(buf, "Read error\n");
+}
+
+static ssize_t set_bool_threeg(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ u32 tmp = simple_strtoul(buf, NULL, 10);
+ acpi_status status = set_u32(tmp, ACER_CAP_THREEG);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+ return count;
+}
+static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg,
+ set_bool_threeg);
+
static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1026,7 +1116,9 @@ static int __devinit acer_platform_probe(struct platform_device *device)
goto error_brightness;
}
- return 0;
+ err = acer_rfkill_init(&device->dev);
+
+ return err;
error_brightness:
acer_led_exit();
@@ -1040,6 +1132,8 @@ static int acer_platform_remove(struct platform_device *device)
acer_led_exit();
if (has_cap(ACER_CAP_BRIGHTNESS))
acer_backlight_exit();
+
+ acer_rfkill_exit();
return 0;
}
@@ -1052,16 +1146,6 @@ pm_message_t state)
if (!data)
return -ENOMEM;
- if (has_cap(ACER_CAP_WIRELESS)) {
- get_u32(&value, ACER_CAP_WIRELESS);
- data->wireless = value;
- }
-
- if (has_cap(ACER_CAP_BLUETOOTH)) {
- get_u32(&value, ACER_CAP_BLUETOOTH);
- data->bluetooth = value;
- }
-
if (has_cap(ACER_CAP_MAILLED)) {
get_u32(&value, ACER_CAP_MAILLED);
data->mailled = value;
@@ -1082,15 +1166,6 @@ static int acer_platform_resume(struct platform_device *device)
if (!data)
return -ENOMEM;
- if (has_cap(ACER_CAP_WIRELESS))
- set_u32(data->wireless, ACER_CAP_WIRELESS);
-
- if (has_cap(ACER_CAP_BLUETOOTH))
- set_u32(data->bluetooth, ACER_CAP_BLUETOOTH);
-
- if (has_cap(ACER_CAP_THREEG))
- set_u32(data->threeg, ACER_CAP_THREEG);
-
if (has_cap(ACER_CAP_MAILLED))
set_u32(data->mailled, ACER_CAP_MAILLED);
@@ -1115,12 +1190,6 @@ static struct platform_device *acer_platform_device;
static int remove_sysfs(struct platform_device *device)
{
- if (has_cap(ACER_CAP_WIRELESS))
- device_remove_file(&device->dev, &dev_attr_wireless);
-
- if (has_cap(ACER_CAP_BLUETOOTH))
- device_remove_file(&device->dev, &dev_attr_bluetooth);
-
if (has_cap(ACER_CAP_THREEG))
device_remove_file(&device->dev, &dev_attr_threeg);
@@ -1133,20 +1202,6 @@ static int create_sysfs(void)
{
int retval = -ENOMEM;
- if (has_cap(ACER_CAP_WIRELESS)) {
- retval = device_create_file(&acer_platform_device->dev,
- &dev_attr_wireless);
- if (retval)
- goto error_sysfs;
- }
-
- if (has_cap(ACER_CAP_BLUETOOTH)) {
- retval = device_create_file(&acer_platform_device->dev,
- &dev_attr_bluetooth);
- if (retval)
- goto error_sysfs;
- }
-
if (has_cap(ACER_CAP_THREEG)) {
retval = device_create_file(&acer_platform_device->dev,
&dev_attr_threeg);
diff --git a/drivers/misc/asus-laptop.c b/drivers/misc/asus-laptop.c
index 7c6dfd03de9..a9d5228724a 100644
--- a/drivers/misc/asus-laptop.c
+++ b/drivers/misc/asus-laptop.c
@@ -139,6 +139,7 @@ ASUS_HANDLE(lcd_switch, "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
"\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
"\\_SB.PCI0.PX40.EC0.Q10", /* M1A */
"\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */
+ "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */
"\\_SB.PCI0.PX40.Q10", /* S1x */
"\\Q10"); /* A2x, L2D, L3D, M2E */
@@ -280,7 +281,7 @@ static int write_acpi_int(acpi_handle handle, const char *method, int val,
static int read_wireless_status(int mask)
{
- ulong status;
+ unsigned long long status;
acpi_status rv = AE_OK;
if (!wireless_status_handle)
@@ -297,7 +298,7 @@ static int read_wireless_status(int mask)
static int read_gps_status(void)
{
- ulong status;
+ unsigned long long status;
acpi_status rv = AE_OK;
rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status);
@@ -350,7 +351,7 @@ static void write_status(acpi_handle handle, int out, int mask)
static void object##_led_set(struct led_classdev *led_cdev, \
enum led_brightness value) \
{ \
- object##_led_wk = value; \
+ object##_led_wk = (value > 0) ? 1 : 0; \
queue_work(led_workqueue, &object##_led_work); \
} \
static void object##_led_update(struct work_struct *ignored) \
@@ -404,7 +405,7 @@ static void lcd_blank(int blank)
static int read_brightness(struct backlight_device *bd)
{
- ulong value;
+ unsigned long long value;
acpi_status rv = AE_OK;
rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value);
@@ -455,7 +456,7 @@ static ssize_t show_infos(struct device *dev,
struct device_attribute *attr, char *page)
{
int len = 0;
- ulong temp;
+ unsigned long long temp;
char buf[16]; //enough for all info
acpi_status rv = AE_OK;
@@ -603,7 +604,7 @@ static void set_display(int value)
static int read_display(void)
{
- ulong value = 0;
+ unsigned long long value = 0;
acpi_status rv = AE_OK;
/* In most of the case, we know how to set the display, but sometime
@@ -849,7 +850,7 @@ static int asus_hotk_get_info(void)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *model = NULL;
- ulong bsts_result, hwrs_result;
+ unsigned long long bsts_result, hwrs_result;
char *string = NULL;
acpi_status status;
@@ -996,7 +997,7 @@ static int asus_hotk_add(struct acpi_device *device)
hotk->handle = device->handle;
strcpy(acpi_device_name(device), ASUS_HOTK_DEVICE_NAME);
strcpy(acpi_device_class(device), ASUS_HOTK_CLASS);
- acpi_driver_data(device) = hotk;
+ device->driver_data = hotk;
hotk->device = device;
result = asus_hotk_check();
diff --git a/drivers/misc/eeepc-laptop.c b/drivers/misc/eeepc-laptop.c
index 1ee8501e90f..9ef98b2d503 100644
--- a/drivers/misc/eeepc-laptop.c
+++ b/drivers/misc/eeepc-laptop.c
@@ -28,6 +28,8 @@
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_bus.h>
#include <linux/uaccess.h>
+#include <linux/input.h>
+#include <linux/rfkill.h>
#define EEEPC_LAPTOP_VERSION "0.1"
@@ -125,6 +127,10 @@ struct eeepc_hotk {
by this BIOS */
uint init_flag; /* Init flags */
u16 event_count[128]; /* count for each event */
+ struct input_dev *inputdev;
+ u16 *keycode_map;
+ struct rfkill *eeepc_wlan_rfkill;
+ struct rfkill *eeepc_bluetooth_rfkill;
};
/* The actual device the driver binds to */
@@ -140,6 +146,27 @@ static struct platform_driver platform_driver = {
static struct platform_device *platform_device;
+struct key_entry {
+ char type;
+ u8 code;
+ u16 keycode;
+};
+
+enum { KE_KEY, KE_END };
+
+static struct key_entry eeepc_keymap[] = {
+ /* Sleep already handled via generic ACPI code */
+ {KE_KEY, 0x10, KEY_WLAN },
+ {KE_KEY, 0x12, KEY_PROG1 },
+ {KE_KEY, 0x13, KEY_MUTE },
+ {KE_KEY, 0x14, KEY_VOLUMEDOWN },
+ {KE_KEY, 0x15, KEY_VOLUMEUP },
+ {KE_KEY, 0x30, KEY_SWITCHVIDEOMODE },
+ {KE_KEY, 0x31, KEY_SWITCHVIDEOMODE },
+ {KE_KEY, 0x32, KEY_SWITCHVIDEOMODE },
+ {KE_END, 0},
+};
+
/*
* The hotkey driver declaration
*/
@@ -204,7 +231,7 @@ static int write_acpi_int(acpi_handle handle, const char *method, int val,
static int read_acpi_int(acpi_handle handle, const char *method, int *val)
{
acpi_status status;
- ulong result;
+ unsigned long long result;
status = acpi_evaluate_integer(handle, (char *)method, NULL, &result);
if (ACPI_FAILURE(status)) {
@@ -261,6 +288,44 @@ static int update_bl_status(struct backlight_device *bd)
}
/*
+ * Rfkill helpers
+ */
+
+static int eeepc_wlan_rfkill_set(void *data, enum rfkill_state state)
+{
+ if (state == RFKILL_STATE_SOFT_BLOCKED)
+ return set_acpi(CM_ASL_WLAN, 0);
+ else
+ return set_acpi(CM_ASL_WLAN, 1);
+}
+
+static int eeepc_wlan_rfkill_state(void *data, enum rfkill_state *state)
+{
+ if (get_acpi(CM_ASL_WLAN) == 1)
+ *state = RFKILL_STATE_UNBLOCKED;
+ else
+ *state = RFKILL_STATE_SOFT_BLOCKED;
+ return 0;
+}
+
+static int eeepc_bluetooth_rfkill_set(void *data, enum rfkill_state state)
+{
+ if (state == RFKILL_STATE_SOFT_BLOCKED)
+ return set_acpi(CM_ASL_BLUETOOTH, 0);
+ else
+ return set_acpi(CM_ASL_BLUETOOTH, 1);
+}
+
+static int eeepc_bluetooth_rfkill_state(void *data, enum rfkill_state *state)
+{
+ if (get_acpi(CM_ASL_BLUETOOTH) == 1)
+ *state = RFKILL_STATE_UNBLOCKED;
+ else
+ *state = RFKILL_STATE_SOFT_BLOCKED;
+ return 0;
+}
+
+/*
* Sys helpers
*/
static int parse_arg(const char *buf, unsigned long count, int *val)
@@ -311,13 +376,11 @@ static ssize_t show_sys_acpi(int cm, char *buf)
EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA);
EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER);
EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH);
-EEEPC_CREATE_DEVICE_ATTR(wlan, CM_ASL_WLAN);
static struct attribute *platform_attributes[] = {
&dev_attr_camera.attr,
&dev_attr_cardr.attr,
&dev_attr_disp.attr,
- &dev_attr_wlan.attr,
NULL
};
@@ -328,8 +391,64 @@ static struct attribute_group platform_attribute_group = {
/*
* Hotkey functions
*/
+static struct key_entry *eepc_get_entry_by_scancode(int code)
+{
+ struct key_entry *key;
+
+ for (key = eeepc_keymap; key->type != KE_END; key++)
+ if (code == key->code)
+ return key;
+
+ return NULL;
+}
+
+static struct key_entry *eepc_get_entry_by_keycode(int code)
+{
+ struct key_entry *key;
+
+ for (key = eeepc_keymap; key->type != KE_END; key++)
+ if (code == key->keycode && key->type == KE_KEY)
+ return key;
+
+ return NULL;
+}
+
+static int eeepc_getkeycode(struct input_dev *dev, int scancode, int *keycode)
+{
+ struct key_entry *key = eepc_get_entry_by_scancode(scancode);
+
+ if (key && key->type == KE_KEY) {
+ *keycode = key->keycode;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode)
+{
+ struct key_entry *key;
+ int old_keycode;
+
+ if (keycode < 0 || keycode > KEY_MAX)
+ return -EINVAL;
+
+ key = eepc_get_entry_by_scancode(scancode);
+ if (key && key->type == KE_KEY) {
+ old_keycode = key->keycode;
+ key->keycode = keycode;
+ set_bit(keycode, dev->keybit);
+ if (!eepc_get_entry_by_keycode(old_keycode))
+ clear_bit(old_keycode, dev->keybit);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static int eeepc_hotk_check(void)
{
+ const struct key_entry *key;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
int result;
@@ -356,6 +475,31 @@ static int eeepc_hotk_check(void)
"Get control methods supported: 0x%x\n",
ehotk->cm_supported);
}
+ ehotk->inputdev = input_allocate_device();
+ if (!ehotk->inputdev) {
+ printk(EEEPC_INFO "Unable to allocate input device\n");
+ return 0;
+ }
+ ehotk->inputdev->name = "Asus EeePC extra buttons";
+ ehotk->inputdev->phys = EEEPC_HOTK_FILE "/input0";
+ ehotk->inputdev->id.bustype = BUS_HOST;
+ ehotk->inputdev->getkeycode = eeepc_getkeycode;
+ ehotk->inputdev->setkeycode = eeepc_setkeycode;
+
+ for (key = eeepc_keymap; key->type != KE_END; key++) {
+ switch (key->type) {
+ case KE_KEY:
+ set_bit(EV_KEY, ehotk->inputdev->evbit);
+ set_bit(key->keycode, ehotk->inputdev->keybit);
+ break;
+ }
+ }
+ result = input_register_device(ehotk->inputdev);
+ if (result) {
+ printk(EEEPC_INFO "Unable to register input device\n");
+ input_free_device(ehotk->inputdev);
+ return 0;
+ }
} else {
printk(EEEPC_ERR "Hotkey device not present, aborting\n");
return -EINVAL;
@@ -363,21 +507,6 @@ static int eeepc_hotk_check(void)
return 0;
}
-static void notify_wlan(u32 *event)
-{
- /* if DISABLE_ASL_WLAN is set, the notify code for fn+f2
- will always be 0x10 */
- if (ehotk->cm_supported & (0x1 << CM_ASL_WLAN)) {
- const char *method = cm_getv[CM_ASL_WLAN];
- int value;
- if (read_acpi_int(ehotk->handle, method, &value))
- printk(EEEPC_WARNING "Error reading %s\n",
- method);
- else if (value == 1)
- *event = 0x11;
- }
-}
-
static void notify_brn(void)
{
struct backlight_device *bd = eeepc_backlight_device;
@@ -386,14 +515,28 @@ static void notify_brn(void)
static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
{
+ static struct key_entry *key;
if (!ehotk)
return;
- if (event == NOTIFY_WLAN_ON && (DISABLE_ASL_WLAN & ehotk->init_flag))
- notify_wlan(&event);
if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX)
notify_brn();
acpi_bus_generate_proc_event(ehotk->device, event,
ehotk->event_count[event % 128]++);
+ if (ehotk->inputdev) {
+ key = eepc_get_entry_by_scancode(event);
+ if (key) {
+ switch (key->type) {
+ case KE_KEY:
+ input_report_key(ehotk->inputdev, key->keycode,
+ 1);
+ input_sync(ehotk->inputdev);
+ input_report_key(ehotk->inputdev, key->keycode,
+ 0);
+ input_sync(ehotk->inputdev);
+ break;
+ }
+ }
+ }
}
static int eeepc_hotk_add(struct acpi_device *device)
@@ -411,7 +554,7 @@ static int eeepc_hotk_add(struct acpi_device *device)
ehotk->handle = device->handle;
strcpy(acpi_device_name(device), EEEPC_HOTK_DEVICE_NAME);
strcpy(acpi_device_class(device), EEEPC_HOTK_CLASS);
- acpi_driver_data(device) = ehotk;
+ device->driver_data = ehotk;
ehotk->device = device;
result = eeepc_hotk_check();
if (result)
@@ -420,6 +563,47 @@ static int eeepc_hotk_add(struct acpi_device *device)
eeepc_hotk_notify, ehotk);
if (ACPI_FAILURE(status))
printk(EEEPC_ERR "Error installing notify handler\n");
+
+ if (get_acpi(CM_ASL_WLAN) != -1) {
+ ehotk->eeepc_wlan_rfkill = rfkill_allocate(&device->dev,
+ RFKILL_TYPE_WLAN);
+
+ if (!ehotk->eeepc_wlan_rfkill)
+ goto end;
+
+ ehotk->eeepc_wlan_rfkill->name = "eeepc-wlan";
+ ehotk->eeepc_wlan_rfkill->toggle_radio = eeepc_wlan_rfkill_set;
+ ehotk->eeepc_wlan_rfkill->get_state = eeepc_wlan_rfkill_state;
+ if (get_acpi(CM_ASL_WLAN) == 1)
+ ehotk->eeepc_wlan_rfkill->state =
+ RFKILL_STATE_UNBLOCKED;
+ else
+ ehotk->eeepc_wlan_rfkill->state =
+ RFKILL_STATE_SOFT_BLOCKED;
+ rfkill_register(ehotk->eeepc_wlan_rfkill);
+ }
+
+ if (get_acpi(CM_ASL_BLUETOOTH) != -1) {
+ ehotk->eeepc_bluetooth_rfkill =
+ rfkill_allocate(&device->dev, RFKILL_TYPE_BLUETOOTH);
+
+ if (!ehotk->eeepc_bluetooth_rfkill)
+ goto end;
+
+ ehotk->eeepc_bluetooth_rfkill->name = "eeepc-bluetooth";
+ ehotk->eeepc_bluetooth_rfkill->toggle_radio =
+ eeepc_bluetooth_rfkill_set;
+ ehotk->eeepc_bluetooth_rfkill->get_state =
+ eeepc_bluetooth_rfkill_state;
+ if (get_acpi(CM_ASL_BLUETOOTH) == 1)
+ ehotk->eeepc_bluetooth_rfkill->state =
+ RFKILL_STATE_UNBLOCKED;
+ else
+ ehotk->eeepc_bluetooth_rfkill->state =
+ RFKILL_STATE_SOFT_BLOCKED;
+ rfkill_register(ehotk->eeepc_bluetooth_rfkill);
+ }
+
end:
if (result) {
kfree(ehotk);
@@ -553,6 +737,12 @@ static void eeepc_backlight_exit(void)
{
if (eeepc_backlight_device)
backlight_device_unregister(eeepc_backlight_device);
+ if (ehotk->inputdev)
+ input_unregister_device(ehotk->inputdev);
+ if (ehotk->eeepc_wlan_rfkill)
+ rfkill_unregister(ehotk->eeepc_wlan_rfkill);
+ if (ehotk->eeepc_bluetooth_rfkill)
+ rfkill_unregister(ehotk->eeepc_bluetooth_rfkill);
eeepc_backlight_device = NULL;
}
diff --git a/drivers/misc/fujitsu-laptop.c b/drivers/misc/fujitsu-laptop.c
index 3e56203e494..d2cf0bfe316 100644
--- a/drivers/misc/fujitsu-laptop.c
+++ b/drivers/misc/fujitsu-laptop.c
@@ -44,8 +44,9 @@
* Hotkeys present on certain Fujitsu laptops (eg: the S6xxx series) are
* also supported by this driver.
*
- * This driver has been tested on a Fujitsu Lifebook S6410 and S7020. It
- * should work on most P-series and S-series Lifebooks, but YMMV.
+ * This driver has been tested on a Fujitsu Lifebook S6410, S7020 and
+ * P8010. It should work on most P-series and S-series Lifebooks, but
+ * YMMV.
*
* The module parameter use_alt_lcd_levels switches between different ACPI
* brightness controls which are used by different Fujitsu laptops. In most
@@ -65,7 +66,7 @@
#include <linux/video_output.h>
#include <linux/platform_device.h>
-#define FUJITSU_DRIVER_VERSION "0.4.2"
+#define FUJITSU_DRIVER_VERSION "0.4.3"
#define FUJITSU_LCD_N_LEVELS 8
@@ -83,10 +84,10 @@
#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87
/* Hotkey details */
-#define LOCK_KEY 0x410 /* codes for the keys in the GIRB register */
-#define DISPLAY_KEY 0x411 /* keys are mapped to KEY_SCREENLOCK (the key with the key symbol) */
-#define ENERGY_KEY 0x412 /* KEY_MEDIA (the key with the laptop symbol, KEY_EMAIL (E key)) */
-#define REST_KEY 0x413 /* KEY_SUSPEND (R key) */
+#define KEY1_CODE 0x410 /* codes for the keys in the GIRB register */
+#define KEY2_CODE 0x411
+#define KEY3_CODE 0x412
+#define KEY4_CODE 0x413
#define MAX_HOTKEY_RINGBUFFER_SIZE 100
#define RINGBUFFERSIZE 40
@@ -123,6 +124,7 @@ struct fujitsu_t {
char phys[32];
struct backlight_device *bl_device;
struct platform_device *pf_device;
+ int keycode1, keycode2, keycode3, keycode4;
unsigned int max_brightness;
unsigned int brightness_changed;
@@ -224,7 +226,7 @@ static int set_lcd_level_alt(int level)
static int get_lcd_level(void)
{
- unsigned long state = 0;
+ unsigned long long state = 0;
acpi_status status = AE_OK;
vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n");
@@ -246,7 +248,7 @@ static int get_lcd_level(void)
static int get_max_brightness(void)
{
- unsigned long state = 0;
+ unsigned long long state = 0;
acpi_status status = AE_OK;
vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n");
@@ -263,7 +265,7 @@ static int get_max_brightness(void)
static int get_lcd_level_alt(void)
{
- unsigned long state = 0;
+ unsigned long long state = 0;
acpi_status status = AE_OK;
vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLS\n");
@@ -384,7 +386,7 @@ static ssize_t store_lcd_level(struct device *dev,
static int get_irb(void)
{
- unsigned long state = 0;
+ unsigned long long state = 0;
acpi_status status = AE_OK;
vdbg_printk(FUJLAPTOP_DBG_TRACE, "Get irb\n");
@@ -430,7 +432,7 @@ static struct platform_driver fujitsupf_driver = {
}
};
-static int dmi_check_cb_s6410(const struct dmi_system_id *id)
+static void dmi_check_cb_common(const struct dmi_system_id *id)
{
acpi_handle handle;
int have_blnf;
@@ -452,24 +454,40 @@ static int dmi_check_cb_s6410(const struct dmi_system_id *id)
"auto-detecting disable_adjust\n");
disable_brightness_adjust = have_blnf ? 0 : 1;
}
+}
+
+static int dmi_check_cb_s6410(const struct dmi_system_id *id)
+{
+ dmi_check_cb_common(id);
+ fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */
+ fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */
+ return 0;
+}
+
+static int dmi_check_cb_p8010(const struct dmi_system_id *id)
+{
+ dmi_check_cb_common(id);
+ fujitsu->keycode1 = KEY_HELP; /* "Support" */
+ fujitsu->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */
+ fujitsu->keycode4 = KEY_WWW; /* "Internet" */
return 0;
}
static struct dmi_system_id __initdata fujitsu_dmi_table[] = {
{
- .ident = "Fujitsu Siemens",
+ .ident = "Fujitsu Siemens S6410",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6410"),
},
.callback = dmi_check_cb_s6410},
{
- .ident = "FUJITSU LifeBook P8010",
+ .ident = "Fujitsu LifeBook P8010",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P8010"),
- },
- .callback = dmi_check_cb_s6410},
+ },
+ .callback = dmi_check_cb_p8010},
{}
};
@@ -490,7 +508,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
fujitsu->acpi_handle = device->handle;
sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_DEVICE_NAME);
sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
- acpi_driver_data(device) = fujitsu;
+ device->driver_data = fujitsu;
status = acpi_install_notify_handler(device->handle,
ACPI_DEVICE_NOTIFY,
@@ -547,7 +565,6 @@ static int acpi_fujitsu_add(struct acpi_device *device)
}
/* do config (detect defaults) */
- dmi_check_system(fujitsu_dmi_table);
use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0;
disable_brightness_keys = disable_brightness_keys == 1 ? 1 : 0;
disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0;
@@ -623,17 +640,17 @@ static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data)
keycode = 0;
if (disable_brightness_keys != 1) {
if (oldb == 0) {
- acpi_bus_generate_proc_event(fujitsu->
- dev,
- ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS,
- 0);
+ acpi_bus_generate_proc_event
+ (fujitsu->dev,
+ ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS,
+ 0);
keycode = KEY_BRIGHTNESSDOWN;
} else if (oldb ==
(fujitsu->max_brightness) - 1) {
- acpi_bus_generate_proc_event(fujitsu->
- dev,
- ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS,
- 0);
+ acpi_bus_generate_proc_event
+ (fujitsu->dev,
+ ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS,
+ 0);
keycode = KEY_BRIGHTNESSUP;
}
}
@@ -646,8 +663,7 @@ static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data)
}
if (disable_brightness_keys != 1) {
acpi_bus_generate_proc_event(fujitsu->dev,
- ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS,
- 0);
+ ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, 0);
keycode = KEY_BRIGHTNESSUP;
}
} else if (oldb > newb) {
@@ -659,8 +675,7 @@ static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data)
}
if (disable_brightness_keys != 1) {
acpi_bus_generate_proc_event(fujitsu->dev,
- ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS,
- 0);
+ ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, 0);
keycode = KEY_BRIGHTNESSDOWN;
}
} else {
@@ -703,7 +718,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
sprintf(acpi_device_name(device), "%s",
ACPI_FUJITSU_HOTKEY_DEVICE_NAME);
sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
- acpi_driver_data(device) = fujitsu_hotkey;
+ device->driver_data = fujitsu_hotkey;
status = acpi_install_notify_handler(device->handle,
ACPI_DEVICE_NOTIFY,
@@ -742,10 +757,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
input->id.product = 0x06;
input->dev.parent = &device->dev;
input->evbit[0] = BIT(EV_KEY);
- set_bit(KEY_SCREENLOCK, input->keybit);
- set_bit(KEY_MEDIA, input->keybit);
- set_bit(KEY_EMAIL, input->keybit);
- set_bit(KEY_SUSPEND, input->keybit);
+ set_bit(fujitsu->keycode1, input->keybit);
+ set_bit(fujitsu->keycode2, input->keybit);
+ set_bit(fujitsu->keycode3, input->keybit);
+ set_bit(fujitsu->keycode4, input->keybit);
set_bit(KEY_UNKNOWN, input->keybit);
error = input_register_device(input);
@@ -833,24 +848,24 @@ static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event,
irb);
switch (irb & 0x4ff) {
- case LOCK_KEY:
- keycode = KEY_SCREENLOCK;
+ case KEY1_CODE:
+ keycode = fujitsu->keycode1;
break;
- case DISPLAY_KEY:
- keycode = KEY_MEDIA;
+ case KEY2_CODE:
+ keycode = fujitsu->keycode2;
break;
- case ENERGY_KEY:
- keycode = KEY_EMAIL;
+ case KEY3_CODE:
+ keycode = fujitsu->keycode3;
break;
- case REST_KEY:
- keycode = KEY_SUSPEND;
+ case KEY4_CODE:
+ keycode = fujitsu->keycode4;
break;
case 0:
keycode = 0;
break;
default:
vdbg_printk(FUJLAPTOP_DBG_WARN,
- "Unknown GIRB result [%x]\n", irb);
+ "Unknown GIRB result [%x]\n", irb);
keycode = -1;
break;
}
@@ -859,12 +874,12 @@ static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event,
"Push keycode into ringbuffer [%d]\n",
keycode);
status = kfifo_put(fujitsu_hotkey->fifo,
- (unsigned char *)&keycode,
- sizeof(keycode));
+ (unsigned char *)&keycode,
+ sizeof(keycode));
if (status != sizeof(keycode)) {
vdbg_printk(FUJLAPTOP_DBG_WARN,
- "Could not push keycode [0x%x]\n",
- keycode);
+ "Could not push keycode [0x%x]\n",
+ keycode);
} else {
input_report_key(input, keycode, 1);
input_sync(input);
@@ -879,8 +894,8 @@ static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event,
input_report_key(input, keycode_r, 0);
input_sync(input);
vdbg_printk(FUJLAPTOP_DBG_TRACE,
- "Pop keycode from ringbuffer [%d]\n",
- keycode_r);
+ "Pop keycode from ringbuffer [%d]\n",
+ keycode_r);
}
}
}
@@ -943,6 +958,11 @@ static int __init fujitsu_init(void)
if (!fujitsu)
return -ENOMEM;
memset(fujitsu, 0, sizeof(struct fujitsu_t));
+ fujitsu->keycode1 = KEY_PROG1;
+ fujitsu->keycode2 = KEY_PROG2;
+ fujitsu->keycode3 = KEY_PROG3;
+ fujitsu->keycode4 = KEY_PROG4;
+ dmi_check_system(fujitsu_dmi_table);
result = acpi_bus_register_driver(&acpi_fujitsu_driver);
if (result < 0) {
@@ -1076,15 +1096,14 @@ MODULE_DESCRIPTION("Fujitsu laptop extras support");
MODULE_VERSION(FUJITSU_DRIVER_VERSION);
MODULE_LICENSE("GPL");
-MODULE_ALIAS
- ("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
-MODULE_ALIAS
- ("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
+MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
+MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
static struct pnp_device_id pnp_ids[] = {
- { .id = "FUJ02bf" },
- { .id = "FUJ02B1" },
- { .id = "FUJ02E3" },
- { .id = "" }
+ {.id = "FUJ02bf"},
+ {.id = "FUJ02B1"},
+ {.id = "FUJ02E3"},
+ {.id = ""}
};
+
MODULE_DEVICE_TABLE(pnp, pnp_ids);
diff --git a/drivers/misc/intel_menlow.c b/drivers/misc/intel_menlow.c
index 80a13635240..e00a2756e97 100644
--- a/drivers/misc/intel_menlow.c
+++ b/drivers/misc/intel_menlow.c
@@ -57,7 +57,7 @@ static int memory_get_int_max_bandwidth(struct thermal_cooling_device *cdev,
{
struct acpi_device *device = cdev->devdata;
acpi_handle handle = device->handle;
- unsigned long value;
+ unsigned long long value;
struct acpi_object_list arg_list;
union acpi_object arg;
acpi_status status = AE_OK;
@@ -90,7 +90,7 @@ static int memory_get_cur_bandwidth(struct thermal_cooling_device *cdev,
{
struct acpi_device *device = cdev->devdata;
acpi_handle handle = device->handle;
- unsigned long value;
+ unsigned long long value;
struct acpi_object_list arg_list;
union acpi_object arg;
acpi_status status = AE_OK;
@@ -104,7 +104,7 @@ static int memory_get_cur_bandwidth(struct thermal_cooling_device *cdev,
if (ACPI_FAILURE(status))
return -EFAULT;
- return sprintf(buf, "%ld\n", value);
+ return sprintf(buf, "%llu\n", value);
}
static int memory_set_cur_bandwidth(struct thermal_cooling_device *cdev,
@@ -115,7 +115,7 @@ static int memory_set_cur_bandwidth(struct thermal_cooling_device *cdev,
struct acpi_object_list arg_list;
union acpi_object arg;
acpi_status status;
- int temp;
+ unsigned long long temp;
unsigned long max_state;
if (memory_get_int_max_bandwidth(cdev, &max_state))
@@ -131,7 +131,7 @@ static int memory_set_cur_bandwidth(struct thermal_cooling_device *cdev,
status =
acpi_evaluate_integer(handle, MEMORY_SET_BANDWIDTH, &arg_list,
- (unsigned long *)&temp);
+ &temp);
printk(KERN_INFO
"Bandwidth value was %d: status is %d\n", state, status);
@@ -175,7 +175,7 @@ static int intel_menlow_memory_add(struct acpi_device *device)
goto end;
}
- acpi_driver_data(device) = cdev;
+ device->driver_data = cdev;
result = sysfs_create_link(&device->dev.kobj,
&cdev->device.kobj, "thermal_cooling");
if (result)
@@ -252,7 +252,8 @@ static DEFINE_MUTEX(intel_menlow_attr_lock);
* @auxtype : AUX0/AUX1
* @buf: syfs buffer
*/
-static int sensor_get_auxtrip(acpi_handle handle, int index, int *value)
+static int sensor_get_auxtrip(acpi_handle handle, int index,
+ unsigned long long *value)
{
acpi_status status;
@@ -260,7 +261,7 @@ static int sensor_get_auxtrip(acpi_handle handle, int index, int *value)
return -EINVAL;
status = acpi_evaluate_integer(handle, index ? GET_AUX1 : GET_AUX0,
- NULL, (unsigned long *)value);
+ NULL, value);
if (ACPI_FAILURE(status))
return -EIO;
@@ -282,13 +283,13 @@ static int sensor_set_auxtrip(acpi_handle handle, int index, int value)
struct acpi_object_list args = {
1, &arg
};
- int temp;
+ unsigned long long temp;
if (index != 0 && index != 1)
return -EINVAL;
status = acpi_evaluate_integer(handle, index ? GET_AUX0 : GET_AUX1,
- NULL, (unsigned long *)&temp);
+ NULL, &temp);
if (ACPI_FAILURE(status))
return -EIO;
if ((index && value < temp) || (!index && value > temp))
@@ -296,7 +297,7 @@ static int sensor_set_auxtrip(acpi_handle handle, int index, int value)
arg.integer.value = value;
status = acpi_evaluate_integer(handle, index ? SET_AUX1 : SET_AUX0,
- &args, (unsigned long *)&temp);
+ &args, &temp);
if (ACPI_FAILURE(status))
return -EIO;
@@ -312,7 +313,7 @@ static ssize_t aux0_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
- int value;
+ unsigned long long value;
int result;
result = sensor_get_auxtrip(attr->handle, 0, &value);
@@ -324,7 +325,7 @@ static ssize_t aux1_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
- int value;
+ unsigned long long value;
int result;
result = sensor_get_auxtrip(attr->handle, 1, &value);
@@ -376,7 +377,7 @@ static ssize_t bios_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
acpi_status status;
- unsigned long bios_enabled;
+ unsigned long long bios_enabled;
status = acpi_evaluate_integer(NULL, BIOS_ENABLED, NULL, &bios_enabled);
if (ACPI_FAILURE(status))
@@ -492,7 +493,7 @@ static int __init intel_menlow_module_init(void)
{
int result = -ENODEV;
acpi_status status;
- unsigned long enable;
+ unsigned long long enable;
if (acpi_disabled)
return result;
diff --git a/drivers/misc/panasonic-laptop.c b/drivers/misc/panasonic-laptop.c
new file mode 100644
index 00000000000..a2cb598d8ab
--- /dev/null
+++ b/drivers/misc/panasonic-laptop.c
@@ -0,0 +1,767 @@
+/*
+ * Panasonic HotKey and LCD brightness control driver
+ * (C) 2004 Hiroshi Miura <miura@da-cha.org>
+ * (C) 2004 NTT DATA Intellilink Co. http://www.intellilink.co.jp/
+ * (C) YOKOTA Hiroshi <yokota (at) netlab. is. tsukuba. ac. jp>
+ * (C) 2004 David Bronaugh <dbronaugh>
+ * (C) 2006-2008 Harald Welte <laforge@gnumonks.org>
+ *
+ * derived from toshiba_acpi.c, Copyright (C) 2002-2004 John Belmonte
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * publicshed by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *---------------------------------------------------------------------------
+ *
+ * ChangeLog:
+ * Sep.23, 2008 Harald Welte <laforge@gnumonks.org>
+ * -v0.95 rename driver from drivers/acpi/pcc_acpi.c to
+ * drivers/misc/panasonic-laptop.c
+ *
+ * Jul.04, 2008 Harald Welte <laforge@gnumonks.org>
+ * -v0.94 replace /proc interface with device attributes
+ * support {set,get}keycode on th input device
+ *
+ * Jun.27, 2008 Harald Welte <laforge@gnumonks.org>
+ * -v0.92 merge with 2.6.26-rc6 input API changes
+ * remove broken <= 2.6.15 kernel support
+ * resolve all compiler warnings
+ * various coding style fixes (checkpatch.pl)
+ * add support for backlight api
+ * major code restructuring
+ *
+ * Dac.28, 2007 Harald Welte <laforge@gnumonks.org>
+ * -v0.91 merge with 2.6.24-rc6 ACPI changes
+ *
+ * Nov.04, 2006 Hiroshi Miura <miura@da-cha.org>
+ * -v0.9 remove warning about section reference.
+ * remove acpi_os_free
+ * add /proc/acpi/pcc/brightness interface for HAL access
+ * merge dbronaugh's enhancement
+ * Aug.17, 2004 David Bronaugh (dbronaugh)
+ * - Added screen brightness setting interface
+ * Thanks to FreeBSD crew (acpi_panasonic.c)
+ * for the ideas I needed to accomplish it
+ *
+ * May.29, 2006 Hiroshi Miura <miura@da-cha.org>
+ * -v0.8.4 follow to change keyinput structure
+ * thanks Fabian Yamaguchi <fabs@cs.tu-berlin.de>,
+ * Jacob Bower <jacob.bower@ic.ac.uk> and
+ * Hiroshi Yokota for providing solutions.
+ *
+ * Oct.02, 2004 Hiroshi Miura <miura@da-cha.org>
+ * -v0.8.2 merge code of YOKOTA Hiroshi
+ * <yokota@netlab.is.tsukuba.ac.jp>.
+ * Add sticky key mode interface.
+ * Refactoring acpi_pcc_generate_keyinput().
+ *
+ * Sep.15, 2004 Hiroshi Miura <miura@da-cha.org>
+ * -v0.8 Generate key input event on input subsystem.
+ * This is based on yet another driver written by
+ * Ryuta Nakanishi.
+ *
+ * Sep.10, 2004 Hiroshi Miura <miura@da-cha.org>
+ * -v0.7 Change proc interface functions using seq_file
+ * facility as same as other ACPI drivers.
+ *
+ * Aug.28, 2004 Hiroshi Miura <miura@da-cha.org>
+ * -v0.6.4 Fix a silly error with status checking
+ *
+ * Aug.25, 2004 Hiroshi Miura <miura@da-cha.org>
+ * -v0.6.3 replace read_acpi_int by standard function
+ * acpi_evaluate_integer
+ * some clean up and make smart copyright notice.
+ * fix return value of pcc_acpi_get_key()
+ * fix checking return value of acpi_bus_register_driver()
+ *
+ * Aug.22, 2004 David Bronaugh <dbronaugh@linuxboxen.org>
+ * -v0.6.2 Add check on ACPI data (num_sifr)
+ * Coding style cleanups, better error messages/handling
+ * Fixed an off-by-one error in memory allocation
+ *
+ * Aug.21, 2004 David Bronaugh <dbronaugh@linuxboxen.org>
+ * -v0.6.1 Fix a silly error with status checking
+ *
+ * Aug.20, 2004 David Bronaugh <dbronaugh@linuxboxen.org>
+ * - v0.6 Correct brightness controls to reflect reality
+ * based on information gleaned by Hiroshi Miura
+ * and discussions with Hiroshi Miura
+ *
+ * Aug.10, 2004 Hiroshi Miura <miura@da-cha.org>
+ * - v0.5 support LCD brightness control
+ * based on the disclosed information by MEI.
+ *
+ * Jul.25, 2004 Hiroshi Miura <miura@da-cha.org>
+ * - v0.4 first post version
+ * add function to retrive SIFR
+ *
+ * Jul.24, 2004 Hiroshi Miura <miura@da-cha.org>
+ * - v0.3 get proper status of hotkey
+ *
+ * Jul.22, 2004 Hiroshi Miura <miura@da-cha.org>
+ * - v0.2 add HotKey handler
+ *
+ * Jul.17, 2004 Hiroshi Miura <miura@da-cha.org>
+ * - v0.1 start from toshiba_acpi driver written by John Belmonte
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/backlight.h>
+#include <linux/ctype.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/input.h>
+
+
+#ifndef ACPI_HOTKEY_COMPONENT
+#define ACPI_HOTKEY_COMPONENT 0x10000000
+#endif
+
+#define _COMPONENT ACPI_HOTKEY_COMPONENT
+
+MODULE_AUTHOR("Hiroshi Miura, David Bronaugh and Harald Welte");
+MODULE_DESCRIPTION("ACPI HotKey driver for Panasonic Let's Note laptops");
+MODULE_LICENSE("GPL");
+
+#define LOGPREFIX "pcc_acpi: "
+
+/* Define ACPI PATHs */
+/* Lets note hotkeys */
+#define METHOD_HKEY_QUERY "HINF"
+#define METHOD_HKEY_SQTY "SQTY"
+#define METHOD_HKEY_SINF "SINF"
+#define METHOD_HKEY_SSET "SSET"
+#define HKEY_NOTIFY 0x80
+
+#define ACPI_PCC_DRIVER_NAME "Panasonic Laptop Support"
+#define ACPI_PCC_DEVICE_NAME "Hotkey"
+#define ACPI_PCC_CLASS "pcc"
+
+#define ACPI_PCC_INPUT_PHYS "panasonic/hkey0"
+
+/* LCD_TYPEs: 0 = Normal, 1 = Semi-transparent
+ ENV_STATEs: Normal temp=0x01, High temp=0x81, N/A=0x00
+*/
+enum SINF_BITS { SINF_NUM_BATTERIES = 0,
+ SINF_LCD_TYPE,
+ SINF_AC_MAX_BRIGHT,
+ SINF_AC_MIN_BRIGHT,
+ SINF_AC_CUR_BRIGHT,
+ SINF_DC_MAX_BRIGHT,
+ SINF_DC_MIN_BRIGHT,
+ SINF_DC_CUR_BRIGHT,
+ SINF_MUTE,
+ SINF_RESERVED,
+ SINF_ENV_STATE,
+ SINF_STICKY_KEY = 0x80,
+ };
+/* R1 handles SINF_AC_CUR_BRIGHT as SINF_CUR_BRIGHT, doesn't know AC state */
+
+static int acpi_pcc_hotkey_add(struct acpi_device *device);
+static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type);
+static int acpi_pcc_hotkey_resume(struct acpi_device *device);
+
+static const struct acpi_device_id pcc_device_ids[] = {
+ { "MAT0012", 0},
+ { "MAT0013", 0},
+ { "MAT0018", 0},
+ { "MAT0019", 0},
+ { "", 0},
+};
+
+static struct acpi_driver acpi_pcc_driver = {
+ .name = ACPI_PCC_DRIVER_NAME,
+ .class = ACPI_PCC_CLASS,
+ .ids = pcc_device_ids,
+ .ops = {
+ .add = acpi_pcc_hotkey_add,
+ .remove = acpi_pcc_hotkey_remove,
+ .resume = acpi_pcc_hotkey_resume,
+ },
+};
+
+#define KEYMAP_SIZE 11
+static const int initial_keymap[KEYMAP_SIZE] = {
+ /* 0 */ KEY_RESERVED,
+ /* 1 */ KEY_BRIGHTNESSDOWN,
+ /* 2 */ KEY_BRIGHTNESSUP,
+ /* 3 */ KEY_DISPLAYTOGGLE,
+ /* 4 */ KEY_MUTE,
+ /* 5 */ KEY_VOLUMEDOWN,
+ /* 6 */ KEY_VOLUMEUP,
+ /* 7 */ KEY_SLEEP,
+ /* 8 */ KEY_PROG1, /* Change CPU boost */
+ /* 9 */ KEY_BATTERY,
+ /* 10 */ KEY_SUSPEND,
+};
+
+struct pcc_acpi {
+ acpi_handle handle;
+ unsigned long num_sifr;
+ int sticky_mode;
+ u32 *sinf;
+ struct acpi_device *device;
+ struct input_dev *input_dev;
+ struct backlight_device *backlight;
+ int keymap[KEYMAP_SIZE];
+};
+
+struct pcc_keyinput {
+ struct acpi_hotkey *hotkey;
+};
+
+/* method access functions */
+static int acpi_pcc_write_sset(struct pcc_acpi *pcc, int func, int val)
+{
+ union acpi_object in_objs[] = {
+ { .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = func, },
+ { .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = val, },
+ };
+ struct acpi_object_list params = {
+ .count = ARRAY_SIZE(in_objs),
+ .pointer = in_objs,
+ };
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_write_sset");
+
+ status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SSET,
+ &params, NULL);
+
+ return status == AE_OK;
+}
+
+static inline int acpi_pcc_get_sqty(struct acpi_device *device)
+{
+ unsigned long long s;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_get_sqty");
+
+ status = acpi_evaluate_integer(device->handle, METHOD_HKEY_SQTY,
+ NULL, &s);
+ if (ACPI_SUCCESS(status))
+ return s;
+ else {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "evaluation error HKEY.SQTY\n"));
+ return -EINVAL;
+ }
+}
+
+static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc, u32 *sinf)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *hkey = NULL;
+ int i;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_retrieve_biosdata");
+
+ status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, 0,
+ &buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "evaluation error HKEY.SINF\n"));
+ return 0;
+ }
+
+ hkey = buffer.pointer;
+ if (!hkey || (hkey->type != ACPI_TYPE_PACKAGE)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid HKEY.SINF\n"));
+ goto end;
+ }
+
+ if (pcc->num_sifr < hkey->package.count) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "SQTY reports bad SINF length\n"));
+ status = AE_ERROR;
+ goto end;
+ }
+
+ for (i = 0; i < hkey->package.count; i++) {
+ union acpi_object *element = &(hkey->package.elements[i]);
+ if (likely(element->type == ACPI_TYPE_INTEGER)) {
+ sinf[i] = element->integer.value;
+ } else
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Invalid HKEY.SINF data\n"));
+ }
+ sinf[hkey->package.count] = -1;
+
+end:
+ kfree(buffer.pointer);
+ return status == AE_OK;
+}
+
+/* backlight API interface functions */
+
+/* This driver currently treats AC and DC brightness identical,
+ * since we don't need to invent an interface to the core ACPI
+ * logic to receive events in case a power supply is plugged in
+ * or removed */
+
+static int bl_get(struct backlight_device *bd)
+{
+ struct pcc_acpi *pcc = bl_get_data(bd);
+
+ if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+ return -EIO;
+
+ return pcc->sinf[SINF_AC_CUR_BRIGHT];
+}
+
+static int bl_set_status(struct backlight_device *bd)
+{
+ struct pcc_acpi *pcc = bl_get_data(bd);
+ int bright = bd->props.brightness;
+ int rc;
+
+ if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+ return -EIO;
+
+ if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT])
+ bright = pcc->sinf[SINF_AC_MIN_BRIGHT];
+
+ if (bright < pcc->sinf[SINF_DC_MIN_BRIGHT])
+ bright = pcc->sinf[SINF_DC_MIN_BRIGHT];
+
+ if (bright < pcc->sinf[SINF_AC_MIN_BRIGHT] ||
+ bright > pcc->sinf[SINF_AC_MAX_BRIGHT])
+ return -EINVAL;
+
+ rc = acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, bright);
+ if (rc < 0)
+ return rc;
+
+ return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
+}
+
+static struct backlight_ops pcc_backlight_ops = {
+ .get_brightness = bl_get,
+ .update_status = bl_set_status,
+};
+
+
+/* sysfs user interface functions */
+
+static ssize_t show_numbatt(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+ if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+ return -EIO;
+
+ return sprintf(buf, "%u\n", pcc->sinf[SINF_NUM_BATTERIES]);
+}
+
+static ssize_t show_lcdtype(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+ if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+ return -EIO;
+
+ return sprintf(buf, "%u\n", pcc->sinf[SINF_LCD_TYPE]);
+}
+
+static ssize_t show_mute(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+ if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+ return -EIO;
+
+ return sprintf(buf, "%u\n", pcc->sinf[SINF_MUTE]);
+}
+
+static ssize_t show_sticky(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+ if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf))
+ return -EIO;
+
+ return sprintf(buf, "%u\n", pcc->sinf[SINF_STICKY_KEY]);
+}
+
+static ssize_t set_sticky(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+ int val;
+
+ if (count && sscanf(buf, "%i", &val) == 1 &&
+ (val == 0 || val == 1)) {
+ acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, val);
+ pcc->sticky_mode = val;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(numbatt, S_IRUGO, show_numbatt, NULL);
+static DEVICE_ATTR(lcdtype, S_IRUGO, show_lcdtype, NULL);
+static DEVICE_ATTR(mute, S_IRUGO, show_mute, NULL);
+static DEVICE_ATTR(sticky_key, S_IRUGO | S_IWUSR, show_sticky, set_sticky);
+
+static struct attribute *pcc_sysfs_entries[] = {
+ &dev_attr_numbatt.attr,
+ &dev_attr_lcdtype.attr,
+ &dev_attr_mute.attr,
+ &dev_attr_sticky_key.attr,
+ NULL,
+};
+
+static struct attribute_group pcc_attr_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = pcc_sysfs_entries,
+};
+
+
+/* hotkey input device driver */
+
+static int pcc_getkeycode(struct input_dev *dev, int scancode, int *keycode)
+{
+ struct pcc_acpi *pcc = input_get_drvdata(dev);
+
+ if (scancode >= ARRAY_SIZE(pcc->keymap))
+ return -EINVAL;
+
+ *keycode = pcc->keymap[scancode];
+
+ return 0;
+}
+
+static int keymap_get_by_keycode(struct pcc_acpi *pcc, int keycode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pcc->keymap); i++) {
+ if (pcc->keymap[i] == keycode)
+ return i+1;
+ }
+
+ return 0;
+}
+
+static int pcc_setkeycode(struct input_dev *dev, int scancode, int keycode)
+{
+ struct pcc_acpi *pcc = input_get_drvdata(dev);
+ int oldkeycode;
+
+ if (scancode >= ARRAY_SIZE(pcc->keymap))
+ return -EINVAL;
+
+ if (keycode < 0 || keycode > KEY_MAX)
+ return -EINVAL;
+
+ oldkeycode = pcc->keymap[scancode];
+ pcc->keymap[scancode] = keycode;
+
+ set_bit(keycode, dev->keybit);
+
+ if (!keymap_get_by_keycode(pcc, oldkeycode))
+ clear_bit(oldkeycode, dev->keybit);
+
+ return 0;
+}
+
+static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
+{
+ struct input_dev *hotk_input_dev = pcc->input_dev;
+ int rc;
+ int key_code, hkey_num;
+ unsigned long long result;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_generate_keyinput");
+
+ rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY,
+ NULL, &result);
+ if (!ACPI_SUCCESS(rc)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "error getting hotkey status\n"));
+ return;
+ }
+
+ acpi_bus_generate_proc_event(pcc->device, HKEY_NOTIFY, result);
+
+ hkey_num = result & 0xf;
+
+ if (hkey_num < 0 || hkey_num > ARRAY_SIZE(pcc->keymap)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "hotkey number out of range: %d\n",
+ hkey_num));
+ return;
+ }
+
+ key_code = pcc->keymap[hkey_num];
+
+ if (key_code != KEY_RESERVED) {
+ int pushed = (result & 0x80) ? TRUE : FALSE;
+
+ input_report_key(hotk_input_dev, key_code, pushed);
+ input_sync(hotk_input_dev);
+ }
+
+ return;
+}
+
+static void acpi_pcc_hotkey_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct pcc_acpi *pcc = (struct pcc_acpi *) data;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_notify");
+
+ switch (event) {
+ case HKEY_NOTIFY:
+ acpi_pcc_generate_keyinput(pcc);
+ break;
+ default:
+ /* nothing to do */
+ break;
+ }
+}
+
+static int acpi_pcc_init_input(struct pcc_acpi *pcc)
+{
+ int i, rc;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_init_input");
+
+ pcc->input_dev = input_allocate_device();
+ if (!pcc->input_dev) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Couldn't allocate input device for hotkey"));
+ return -ENOMEM;
+ }
+
+ pcc->input_dev->evbit[0] = BIT(EV_KEY);
+
+ pcc->input_dev->name = ACPI_PCC_DRIVER_NAME;
+ pcc->input_dev->phys = ACPI_PCC_INPUT_PHYS;
+ pcc->input_dev->id.bustype = BUS_HOST;
+ pcc->input_dev->id.vendor = 0x0001;
+ pcc->input_dev->id.product = 0x0001;
+ pcc->input_dev->id.version = 0x0100;
+ pcc->input_dev->getkeycode = pcc_getkeycode;
+ pcc->input_dev->setkeycode = pcc_setkeycode;
+
+ /* load initial keymap */
+ memcpy(pcc->keymap, initial_keymap, sizeof(pcc->keymap));
+
+ for (i = 0; i < ARRAY_SIZE(pcc->keymap); i++)
+ __set_bit(pcc->keymap[i], pcc->input_dev->keybit);
+ __clear_bit(KEY_RESERVED, pcc->input_dev->keybit);
+
+ input_set_drvdata(pcc->input_dev, pcc);
+
+ rc = input_register_device(pcc->input_dev);
+ if (rc < 0)
+ input_free_device(pcc->input_dev);
+
+ return rc;
+}
+
+/* kernel module interface */
+
+static int acpi_pcc_hotkey_resume(struct acpi_device *device)
+{
+ struct pcc_acpi *pcc = acpi_driver_data(device);
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_resume");
+
+ if (device == NULL || pcc == NULL)
+ return -EINVAL;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Sticky mode restore: %d\n",
+ pcc->sticky_mode));
+
+ status = acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode);
+
+ return status == AE_OK ? 0 : -EINVAL;
+}
+
+static int acpi_pcc_hotkey_add(struct acpi_device *device)
+{
+ acpi_status status;
+ struct pcc_acpi *pcc;
+ int num_sifr, result;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_add");
+
+ if (!device)
+ return -EINVAL;
+
+ num_sifr = acpi_pcc_get_sqty(device);
+
+ if (num_sifr > 255) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr too large"));
+ return -ENODEV;
+ }
+
+ pcc = kzalloc(sizeof(struct pcc_acpi), GFP_KERNEL);
+ if (!pcc) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Couldn't allocate mem for pcc"));
+ return -ENOMEM;
+ }
+
+ pcc->sinf = kzalloc(sizeof(u32) * (num_sifr + 1), GFP_KERNEL);
+ if (!pcc->sinf) {
+ result = -ENOMEM;
+ goto out_hotkey;
+ }
+
+ pcc->device = device;
+ pcc->handle = device->handle;
+ pcc->num_sifr = num_sifr;
+ device->driver_data = pcc;
+ strcpy(acpi_device_name(device), ACPI_PCC_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_PCC_CLASS);
+
+ result = acpi_pcc_init_input(pcc);
+ if (result) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Error installing keyinput handler\n"));
+ goto out_sinf;
+ }
+
+ /* initialize hotkey input device */
+ status = acpi_install_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY,
+ acpi_pcc_hotkey_notify, pcc);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Error installing notify handler\n"));
+ result = -ENODEV;
+ goto out_input;
+ }
+
+ /* initialize backlight */
+ pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
+ &pcc_backlight_ops);
+ if (IS_ERR(pcc->backlight))
+ goto out_notify;
+
+ if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Couldn't retrieve BIOS data\n"));
+ goto out_backlight;
+ }
+
+ /* read the initial brightness setting from the hardware */
+ pcc->backlight->props.max_brightness =
+ pcc->sinf[SINF_AC_MAX_BRIGHT];
+ pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
+
+ /* read the initial sticky key mode from the hardware */
+ pcc->sticky_mode = pcc->sinf[SINF_STICKY_KEY];
+
+ /* add sysfs attributes */
+ result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group);
+ if (result)
+ goto out_backlight;
+
+ return 0;
+
+out_backlight:
+ backlight_device_unregister(pcc->backlight);
+out_notify:
+ acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY,
+ acpi_pcc_hotkey_notify);
+out_input:
+ input_unregister_device(pcc->input_dev);
+ /* no need to input_free_device() since core input API refcount and
+ * free()s the device */
+out_sinf:
+ kfree(pcc->sinf);
+out_hotkey:
+ kfree(pcc);
+
+ return result;
+}
+
+static int __init acpi_pcc_init(void)
+{
+ int result = 0;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_init");
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ result = acpi_bus_register_driver(&acpi_pcc_driver);
+ if (result < 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Error registering hotkey driver\n"));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type)
+{
+ struct pcc_acpi *pcc = acpi_driver_data(device);
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_remove");
+
+ if (!device || !pcc)
+ return -EINVAL;
+
+ sysfs_remove_group(&device->dev.kobj, &pcc_attr_group);
+
+ backlight_device_unregister(pcc->backlight);
+
+ acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY,
+ acpi_pcc_hotkey_notify);
+
+ input_unregister_device(pcc->input_dev);
+ /* no need to input_free_device() since core input API refcount and
+ * free()s the device */
+
+ kfree(pcc->sinf);
+ kfree(pcc);
+
+ return 0;
+}
+
+static void __exit acpi_pcc_exit(void)
+{
+ ACPI_FUNCTION_TRACE("acpi_pcc_exit");
+
+ acpi_bus_unregister_driver(&acpi_pcc_driver);
+}
+
+module_init(acpi_pcc_init);
+module_exit(acpi_pcc_exit);
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
index 60775be2282..5a97d3a9d74 100644
--- a/drivers/misc/sony-laptop.c
+++ b/drivers/misc/sony-laptop.c
@@ -970,7 +970,7 @@ static int sony_nc_resume(struct acpi_device *device)
/* set the last requested brightness level */
if (sony_backlight_device &&
!sony_backlight_update_status(sony_backlight_device))
- printk(KERN_WARNING DRV_PFX "unable to restore brightness level");
+ printk(KERN_WARNING DRV_PFX "unable to restore brightness level\n");
/* re-initialize models with specific requirements */
dmi_check_system(sony_nc_ids);
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
index 6b9300779a4..4db1cf9078d 100644
--- a/drivers/misc/thinkpad_acpi.c
+++ b/drivers/misc/thinkpad_acpi.c
@@ -159,7 +159,6 @@ enum {
#define TPACPI_DEBUG KERN_DEBUG TPACPI_LOG
#define TPACPI_DBG_ALL 0xffff
-#define TPACPI_DBG_ALL 0xffff
#define TPACPI_DBG_INIT 0x0001
#define TPACPI_DBG_EXIT 0x0002
#define dbg_printk(a_dbg_level, format, arg...) \
@@ -543,7 +542,7 @@ static int __init setup_acpi_notify(struct ibm_struct *ibm)
return -ENODEV;
}
- acpi_driver_data(ibm->acpi->device) = ibm;
+ ibm->acpi->device->driver_data = ibm;
sprintf(acpi_device_class(ibm->acpi->device), "%s/%s",
TPACPI_ACPI_EVENT_PREFIX,
ibm->name);
@@ -582,7 +581,8 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
ibm->acpi->driver = kzalloc(sizeof(struct acpi_driver), GFP_KERNEL);
if (!ibm->acpi->driver) {
- printk(TPACPI_ERR "kzalloc(ibm->driver) failed\n");
+ printk(TPACPI_ERR
+ "failed to allocate memory for ibm->acpi->driver\n");
return -ENOMEM;
}
@@ -838,6 +838,13 @@ static int parse_strtoul(const char *buf,
return 0;
}
+static void tpacpi_disable_brightness_delay(void)
+{
+ if (acpi_evalf(hkey_handle, NULL, "PWMS", "qvd", 0))
+ printk(TPACPI_NOTICE
+ "ACPI backlight control delay disabled\n");
+}
+
static int __init tpacpi_query_bcl_levels(acpi_handle handle)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -2139,6 +2146,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (!tp_features.hotkey)
return 1;
+ tpacpi_disable_brightness_delay();
+
hotkey_dev_attributes = create_attr_set(13, NULL);
if (!hotkey_dev_attributes)
return -ENOMEM;
@@ -2512,6 +2521,8 @@ static void hotkey_suspend(pm_message_t state)
static void hotkey_resume(void)
{
+ tpacpi_disable_brightness_delay();
+
if (hotkey_mask_get())
printk(TPACPI_ERR
"error while trying to read hot key mask "
@@ -5983,6 +5994,52 @@ static void fan_exit(void)
flush_workqueue(tpacpi_wq);
}
+static void fan_suspend(pm_message_t state)
+{
+ if (!fan_control_allowed)
+ return;
+
+ /* Store fan status in cache */
+ fan_get_status_safe(NULL);
+ if (tp_features.fan_ctrl_status_undef)
+ fan_control_desired_level = TP_EC_FAN_AUTO;
+}
+
+static void fan_resume(void)
+{
+ u8 saved_fan_level;
+ u8 current_level = 7;
+ bool do_set = false;
+
+ /* DSDT *always* updates status on resume */
+ tp_features.fan_ctrl_status_undef = 0;
+
+ saved_fan_level = fan_control_desired_level;
+ if (!fan_control_allowed ||
+ (fan_get_status_safe(&current_level) < 0))
+ return;
+
+ switch (fan_control_access_mode) {
+ case TPACPI_FAN_WR_ACPI_SFAN:
+ do_set = (saved_fan_level > current_level);
+ break;
+ case TPACPI_FAN_WR_ACPI_FANS:
+ case TPACPI_FAN_WR_TPEC:
+ do_set = ((saved_fan_level & TP_EC_FAN_FULLSPEED) ||
+ (saved_fan_level == 7 &&
+ !(current_level & TP_EC_FAN_FULLSPEED)));
+ break;
+ default:
+ return;
+ }
+ if (do_set) {
+ printk(TPACPI_NOTICE
+ "restoring fan level to 0x%02x\n",
+ saved_fan_level);
+ fan_set_level_safe(saved_fan_level);
+ }
+}
+
static int fan_read(char *p)
{
int len = 0;
@@ -6174,6 +6231,8 @@ static struct ibm_struct fan_driver_data = {
.read = fan_read,
.write = fan_write,
.exit = fan_exit,
+ .suspend = fan_suspend,
+ .resume = fan_resume,
};
/****************************************************************************
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 24c97d3d16b..3d067c35185 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -92,18 +92,17 @@ static void mmc_blk_put(struct mmc_blk_data *md)
mutex_unlock(&open_lock);
}
-static int mmc_blk_open(struct inode *inode, struct file *filp)
+static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
{
- struct mmc_blk_data *md;
+ struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
int ret = -ENXIO;
- md = mmc_blk_get(inode->i_bdev->bd_disk);
if (md) {
if (md->usage == 2)
- check_disk_change(inode->i_bdev);
+ check_disk_change(bdev);
ret = 0;
- if ((filp->f_mode & FMODE_WRITE) && md->read_only) {
+ if ((mode & FMODE_WRITE) && md->read_only) {
mmc_blk_put(md);
ret = -EROFS;
}
@@ -112,9 +111,9 @@ static int mmc_blk_open(struct inode *inode, struct file *filp)
return ret;
}
-static int mmc_blk_release(struct inode *inode, struct file *filp)
+static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
{
- struct mmc_blk_data *md = inode->i_bdev->bd_disk->private_data;
+ struct mmc_blk_data *md = disk->private_data;
mmc_blk_put(md);
return 0;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 91fbba76763..8c295f40d2a 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -224,7 +224,7 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
if (dev->blkdev) {
invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
0, -1);
- close_bdev_excl(dev->blkdev);
+ close_bdev_exclusive(dev->blkdev, FMODE_READ|FMODE_WRITE);
}
kfree(dev);
@@ -246,7 +246,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
return NULL;
/* Get a handle on the device */
- bdev = open_bdev_excl(devname, O_RDWR, NULL);
+ bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, NULL);
#ifndef MODULE
if (IS_ERR(bdev)) {
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 681d5aca2af..1409f01406f 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -133,15 +133,12 @@ static void mtd_blktrans_request(struct request_queue *rq)
}
-static int blktrans_open(struct inode *i, struct file *f)
+static int blktrans_open(struct block_device *bdev, fmode_t mode)
{
- struct mtd_blktrans_dev *dev;
- struct mtd_blktrans_ops *tr;
+ struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
+ struct mtd_blktrans_ops *tr = dev->tr;
int ret = -ENODEV;
- dev = i->i_bdev->bd_disk->private_data;
- tr = dev->tr;
-
if (!try_module_get(dev->mtd->owner))
goto out;
@@ -164,15 +161,12 @@ static int blktrans_open(struct inode *i, struct file *f)
return ret;
}
-static int blktrans_release(struct inode *i, struct file *f)
+static int blktrans_release(struct gendisk *disk, fmode_t mode)
{
- struct mtd_blktrans_dev *dev;
- struct mtd_blktrans_ops *tr;
+ struct mtd_blktrans_dev *dev = disk->private_data;
+ struct mtd_blktrans_ops *tr = dev->tr;
int ret = 0;
- dev = i->i_bdev->bd_disk->private_data;
- tr = dev->tr;
-
if (tr->release)
ret = tr->release(dev);
@@ -194,10 +188,10 @@ static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return -ENOTTY;
}
-static int blktrans_ioctl(struct inode *inode, struct file *file,
+static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
+ struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
struct mtd_blktrans_ops *tr = dev->tr;
switch (cmd) {
@@ -215,7 +209,7 @@ static struct block_device_operations mtd_blktrans_ops = {
.owner = THIS_MODULE,
.open = blktrans_open,
.release = blktrans_release,
- .ioctl = blktrans_ioctl,
+ .locked_ioctl = blktrans_ioctl,
.getgeo = blktrans_getgeo,
};
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 963840e9b5b..bcffeda2df3 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -96,7 +96,7 @@ static int mtd_open(struct inode *inode, struct file *file)
return -ENODEV;
/* You can't open the RO devices RW */
- if ((file->f_mode & 2) && (minor & 1))
+ if ((file->f_mode & FMODE_WRITE) && (minor & 1))
return -EACCES;
lock_kernel();
@@ -114,7 +114,7 @@ static int mtd_open(struct inode *inode, struct file *file)
}
/* You can't open it RW if it's not a writeable device */
- if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) {
+ if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
put_mtd_device(mtd);
ret = -EACCES;
goto out;
@@ -144,7 +144,7 @@ static int mtd_close(struct inode *inode, struct file *file)
DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
/* Only sync if opened RW */
- if ((file->f_mode & 2) && mtd->sync)
+ if ((file->f_mode & FMODE_WRITE) && mtd->sync)
mtd->sync(mtd);
put_mtd_device(mtd);
@@ -443,7 +443,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
{
struct erase_info *erase;
- if(!(file->f_mode & 2))
+ if(!(file->f_mode & FMODE_WRITE))
return -EPERM;
erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
@@ -497,7 +497,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
struct mtd_oob_buf __user *user_buf = argp;
uint32_t retlen;
- if(!(file->f_mode & 2))
+ if(!(file->f_mode & FMODE_WRITE))
return -EPERM;
if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ad301ace608..0b71ebc074b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -464,6 +464,12 @@ config MIPS_JAZZ_SONIC
This is the driver for the onboard card of MIPS Magnum 4000,
Acer PICA, Olivetti M700-10 and a few other identical OEM systems.
+config XTENSA_XT2000_SONIC
+ tristate "Xtensa XT2000 onboard SONIC Ethernet support"
+ depends on XTENSA_PLATFORM_XT2000
+ help
+ This is the driver for the onboard card of the Xtensa XT2000 board.
+
config MIPS_AU1X00_ENET
bool "MIPS AU1000 Ethernet support"
depends on SOC_AU1X00
@@ -2504,6 +2510,15 @@ config PASEMI_MAC
This driver supports the on-chip 1/10Gbit Ethernet controller on
PA Semi's PWRficient line of chips.
+config MLX4_EN
+ tristate "Mellanox Technologies 10Gbit Ethernet support"
+ depends on PCI && INET
+ select MLX4_CORE
+ select INET_LRO
+ help
+ This driver supports Mellanox Technologies ConnectX Ethernet
+ devices.
+
config MLX4_CORE
tristate
depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index fa2510b2e60..f19acf8b922 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -227,6 +227,8 @@ pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
obj-$(CONFIG_MLX4_CORE) += mlx4/
obj-$(CONFIG_ENC28J60) += enc28j60.o
+obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
+
obj-$(CONFIG_MACB) += macb.o
obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 08e18bcb970..45dd9bdc5d6 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -2,6 +2,7 @@
* Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver
*
* Copyright (C) 2007 Domen Puncer, Telargo, Inc.
+ * Copyright (C) 2008 Wolfram Sang, Pengutronix
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
@@ -21,58 +22,45 @@ struct mpc52xx_fec_mdio_priv {
struct mpc52xx_fec __iomem *regs;
};
-static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
+ int reg, u32 value)
{
struct mpc52xx_fec_mdio_priv *priv = bus->priv;
struct mpc52xx_fec __iomem *fec;
int tries = 100;
- u32 request = FEC_MII_READ_FRAME;
+
+ value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
+ value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
fec = priv->regs;
out_be32(&fec->ievent, FEC_IEVENT_MII);
-
- request |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
- request |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
-
- out_be32(&priv->regs->mii_data, request);
+ out_be32(&priv->regs->mii_data, value);
/* wait for it to finish, this takes about 23 us on lite5200b */
while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
udelay(5);
- if (tries == 0)
+ if (!tries)
return -ETIMEDOUT;
- return in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK;
+ return value & FEC_MII_DATA_OP_RD ?
+ in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK : 0;
}
-static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data)
+static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
{
- struct mpc52xx_fec_mdio_priv *priv = bus->priv;
- struct mpc52xx_fec __iomem *fec;
- u32 value = data;
- int tries = 100;
-
- fec = priv->regs;
- out_be32(&fec->ievent, FEC_IEVENT_MII);
-
- value |= FEC_MII_WRITE_FRAME;
- value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
- value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
-
- out_be32(&priv->regs->mii_data, value);
-
- /* wait for request to finish */
- while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
- udelay(5);
-
- if (tries == 0)
- return -ETIMEDOUT;
+ return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, FEC_MII_READ_FRAME);
+}
- return 0;
+static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg,
+ u16 data)
+{
+ return mpc52xx_fec_mdio_transfer(bus, phy_id, reg,
+ data | FEC_MII_WRITE_FRAME);
}
-static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_id *match)
+static int mpc52xx_fec_mdio_probe(struct of_device *of,
+ const struct of_device_id *match)
{
struct device *dev = &of->dev;
struct device_node *np = of->node;
@@ -131,7 +119,8 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_i
dev_set_drvdata(dev, bus);
/* set MII speed */
- out_be32(&priv->regs->mii_speed, ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1);
+ out_be32(&priv->regs->mii_speed,
+ ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1);
/* enable MII interrupt */
out_be32(&priv->regs->imask, in_be32(&priv->regs->imask) | FEC_IMASK_MII);
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index efcf21c9f5c..2ee2622258f 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2604,8 +2604,16 @@ static int __devinit emac_init_config(struct emac_instance *dev)
if (of_device_is_compatible(np, "ibm,emac-440ep") ||
of_device_is_compatible(np, "ibm,emac-440gr"))
dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
- if (of_device_is_compatible(np, "ibm,emac-405ez"))
+ if (of_device_is_compatible(np, "ibm,emac-405ez")) {
+#ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CONTROL
dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
+#else
+ printk(KERN_ERR "%s: Flow control not disabled!\n",
+ np->full_name);
+ return -ENXIO;
+#endif
+ }
+
}
/* Fixup some feature bits based on the device tree */
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index 1839d3f154a..ecf9798987f 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -280,9 +280,11 @@ static irqreturn_t mal_txeob(int irq, void *dev_instance)
mal_schedule_poll(mal);
set_mal_dcrn(mal, MAL_TXEOBISR, r);
+#ifdef CONFIG_PPC_DCR_NATIVE
if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
(mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
+#endif
return IRQ_HANDLED;
}
@@ -298,9 +300,11 @@ static irqreturn_t mal_rxeob(int irq, void *dev_instance)
mal_schedule_poll(mal);
set_mal_dcrn(mal, MAL_RXEOBISR, r);
+#ifdef CONFIG_PPC_DCR_NATIVE
if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
(mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
+#endif
return IRQ_HANDLED;
}
@@ -572,9 +576,18 @@ static int __devinit mal_probe(struct of_device *ofdev,
goto fail;
}
- if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez"))
+ if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez")) {
+#if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \
+ defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR)
mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
MAL_FTR_COMMON_ERR_INT);
+#else
+ printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
+ ofdev->node->full_name);
+ err = -ENODEV;
+ goto fail;
+#endif
+ }
mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0);
mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1);
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index 0952a6528f5..a7a97bf998f 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -1,4 +1,9 @@
obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
- mr.o pd.o profile.o qp.o reset.o srq.o
+ mr.o pd.o port.o profile.o qp.o reset.o srq.o
+
+obj-$(CONFIG_MLX4_EN) += mlx4_en.o
+
+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o \
+ en_resources.o en_netdev.o
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index b411b79d72a..ad95d5f7b63 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -48,13 +48,16 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
if (obj >= bitmap->max) {
- bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
obj = find_first_zero_bit(bitmap->table, bitmap->max);
}
if (obj < bitmap->max) {
set_bit(obj, bitmap->table);
- bitmap->last = (obj + 1) & (bitmap->max - 1);
+ bitmap->last = (obj + 1);
+ if (bitmap->last == bitmap->max)
+ bitmap->last = 0;
obj |= bitmap->top;
} else
obj = -1;
@@ -66,16 +69,90 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
{
- obj &= bitmap->max - 1;
+ mlx4_bitmap_free_range(bitmap, obj, 1);
+}
+
+static unsigned long find_aligned_range(unsigned long *bitmap,
+ u32 start, u32 nbits,
+ int len, int align)
+{
+ unsigned long end, i;
+
+again:
+ start = ALIGN(start, align);
+
+ while ((start < nbits) && test_bit(start, bitmap))
+ start += align;
+
+ if (start >= nbits)
+ return -1;
+
+ end = start+len;
+ if (end > nbits)
+ return -1;
+
+ for (i = start + 1; i < end; i++) {
+ if (test_bit(i, bitmap)) {
+ start = i + 1;
+ goto again;
+ }
+ }
+
+ return start;
+}
+
+u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
+{
+ u32 obj, i;
+
+ if (likely(cnt == 1 && align == 1))
+ return mlx4_bitmap_alloc(bitmap);
+
+ spin_lock(&bitmap->lock);
+
+ obj = find_aligned_range(bitmap->table, bitmap->last,
+ bitmap->max, cnt, align);
+ if (obj >= bitmap->max) {
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
+ obj = find_aligned_range(bitmap->table, 0, bitmap->max,
+ cnt, align);
+ }
+
+ if (obj < bitmap->max) {
+ for (i = 0; i < cnt; i++)
+ set_bit(obj + i, bitmap->table);
+ if (obj == bitmap->last) {
+ bitmap->last = (obj + cnt);
+ if (bitmap->last >= bitmap->max)
+ bitmap->last = 0;
+ }
+ obj |= bitmap->top;
+ } else
+ obj = -1;
+
+ spin_unlock(&bitmap->lock);
+
+ return obj;
+}
+
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
+{
+ u32 i;
+
+ obj &= bitmap->max + bitmap->reserved_top - 1;
spin_lock(&bitmap->lock);
- clear_bit(obj, bitmap->table);
+ for (i = 0; i < cnt; i++)
+ clear_bit(obj + i, bitmap->table);
bitmap->last = min(bitmap->last, obj);
- bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
spin_unlock(&bitmap->lock);
}
-int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved)
+int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
+ u32 reserved_bot, u32 reserved_top)
{
int i;
@@ -85,14 +162,16 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved
bitmap->last = 0;
bitmap->top = 0;
- bitmap->max = num;
+ bitmap->max = num - reserved_top;
bitmap->mask = mask;
+ bitmap->reserved_top = reserved_top;
spin_lock_init(&bitmap->lock);
- bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL);
+ bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
+ sizeof (long), GFP_KERNEL);
if (!bitmap->table)
return -ENOMEM;
- for (i = 0; i < reserved; ++i)
+ for (i = 0; i < reserved_bot; ++i)
set_bit(i, bitmap->table);
return 0;
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 9bb50e3f897..b7ad2829d67 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -300,7 +300,7 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
- dev->caps.num_cqs - 1, dev->caps.reserved_cqs);
+ dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
if (err)
return err;
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
new file mode 100644
index 00000000000..1368a8010af
--- /dev/null
+++ b/drivers/net/mlx4/en_cq.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/qp.h>
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4_en.h"
+
+static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
+{
+ return;
+}
+
+
+int mlx4_en_create_cq(struct mlx4_en_priv *priv,
+ struct mlx4_en_cq *cq,
+ int entries, int ring, enum cq_type mode)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ cq->size = entries;
+ if (mode == RX)
+ cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
+ else
+ cq->buf_size = sizeof(struct mlx4_cqe);
+
+ cq->ring = ring;
+ cq->is_tx = mode;
+ spin_lock_init(&cq->lock);
+
+ err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
+ cq->buf_size, 2 * PAGE_SIZE);
+ if (err)
+ return err;
+
+ err = mlx4_en_map_buffer(&cq->wqres.buf);
+ if (err)
+ mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
+
+ return err;
+}
+
+int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ cq->dev = mdev->pndev[priv->port];
+ cq->mcq.set_ci_db = cq->wqres.db.db;
+ cq->mcq.arm_db = cq->wqres.db.db + 1;
+ *cq->mcq.set_ci_db = 0;
+ *cq->mcq.arm_db = 0;
+ cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
+ memset(cq->buf, 0, cq->buf_size);
+
+ err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
+ cq->wqres.db.dma, &cq->mcq, cq->is_tx);
+ if (err)
+ return err;
+
+ cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
+ cq->mcq.event = mlx4_en_cq_event;
+
+ if (cq->is_tx) {
+ init_timer(&cq->timer);
+ cq->timer.function = mlx4_en_poll_tx_cq;
+ cq->timer.data = (unsigned long) cq;
+ } else {
+ netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
+ napi_enable(&cq->napi);
+ }
+
+ return 0;
+}
+
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ mlx4_en_unmap_buffer(&cq->wqres.buf);
+ mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
+ cq->buf_size = 0;
+ cq->buf = NULL;
+}
+
+void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ if (cq->is_tx)
+ del_timer(&cq->timer);
+ else
+ napi_disable(&cq->napi);
+
+ mlx4_cq_free(mdev->dev, &cq->mcq);
+}
+
+/* Set rx cq moderation parameters */
+int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+ return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
+ cq->moder_cnt, cq->moder_time);
+}
+
+int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+ cq->armed = 1;
+ mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
+ &priv->mdev->uar_lock);
+
+ return 0;
+}
+
+
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
new file mode 100644
index 00000000000..1b0eebf84f7
--- /dev/null
+++ b/drivers/net/mlx4/en_main.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/cpumask.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/cpumask.h>
+
+#include <linux/mlx4/driver.h>
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4_en.h"
+
+MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
+MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
+
+static const char mlx4_en_version[] =
+ DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
+ DRV_VERSION " (" DRV_RELDATE ")\n";
+
+static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
+ enum mlx4_dev_event event, int port)
+{
+ struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
+ struct mlx4_en_priv *priv;
+
+ if (!mdev->pndev[port])
+ return;
+
+ priv = netdev_priv(mdev->pndev[port]);
+ switch (event) {
+ case MLX4_DEV_EVENT_PORT_UP:
+ case MLX4_DEV_EVENT_PORT_DOWN:
+ /* To prevent races, we poll the link state in a separate
+ task rather than changing it here */
+ priv->link_state = event;
+ queue_work(mdev->workqueue, &priv->linkstate_task);
+ break;
+
+ case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
+ mlx4_err(mdev, "Internal error detected, restarting device\n");
+ break;
+
+ default:
+ mlx4_warn(mdev, "Unhandled event: %d\n", event);
+ }
+}
+
+static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
+{
+ struct mlx4_en_dev *mdev = endev_ptr;
+ int i;
+
+ mutex_lock(&mdev->state_lock);
+ mdev->device_up = false;
+ mutex_unlock(&mdev->state_lock);
+
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
+ if (mdev->pndev[i])
+ mlx4_en_destroy_netdev(mdev->pndev[i]);
+
+ flush_workqueue(mdev->workqueue);
+ destroy_workqueue(mdev->workqueue);
+ mlx4_mr_free(dev, &mdev->mr);
+ mlx4_uar_free(dev, &mdev->priv_uar);
+ mlx4_pd_free(dev, mdev->priv_pdn);
+ kfree(mdev);
+}
+
+static void *mlx4_en_add(struct mlx4_dev *dev)
+{
+ static int mlx4_en_version_printed;
+ struct mlx4_en_dev *mdev;
+ int i;
+ int err;
+
+ if (!mlx4_en_version_printed) {
+ printk(KERN_INFO "%s", mlx4_en_version);
+ mlx4_en_version_printed++;
+ }
+
+ mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
+ if (!mdev) {
+ dev_err(&dev->pdev->dev, "Device struct alloc failed, "
+ "aborting.\n");
+ err = -ENOMEM;
+ goto err_free_res;
+ }
+
+ if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
+ goto err_free_dev;
+
+ if (mlx4_uar_alloc(dev, &mdev->priv_uar))
+ goto err_pd;
+
+ mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!mdev->uar_map)
+ goto err_uar;
+ spin_lock_init(&mdev->uar_lock);
+
+ mdev->dev = dev;
+ mdev->dma_device = &(dev->pdev->dev);
+ mdev->pdev = dev->pdev;
+ mdev->device_up = false;
+
+ mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
+ if (!mdev->LSO_support)
+ mlx4_warn(mdev, "LSO not supported, please upgrade to later "
+ "FW version to enable LSO\n");
+
+ if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
+ MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
+ 0, 0, &mdev->mr)) {
+ mlx4_err(mdev, "Failed allocating memory region\n");
+ goto err_uar;
+ }
+ if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
+ mlx4_err(mdev, "Failed enabling memory region\n");
+ goto err_mr;
+ }
+
+ /* Build device profile according to supplied module parameters */
+ err = mlx4_en_get_profile(mdev);
+ if (err) {
+ mlx4_err(mdev, "Bad module parameters, aborting.\n");
+ goto err_mr;
+ }
+
+ /* Configure wich ports to start according to module parameters */
+ mdev->port_cnt = 0;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
+ mdev->port_cnt++;
+
+ /* If we did not receive an explicit number of Rx rings, default to
+ * the number of completion vectors populated by the mlx4_core */
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ mlx4_info(mdev, "Using %d tx rings for port:%d\n",
+ mdev->profile.prof[i].tx_ring_num, i);
+ if (!mdev->profile.prof[i].rx_ring_num) {
+ mdev->profile.prof[i].rx_ring_num = 1;
+ mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
+ 1, i);
+ } else
+ mlx4_info(mdev, "Using %d rx rings for port:%d\n",
+ mdev->profile.prof[i].rx_ring_num, i);
+ }
+
+ /* Create our own workqueue for reset/multicast tasks
+ * Note: we cannot use the shared workqueue because of deadlocks caused
+ * by the rtnl lock */
+ mdev->workqueue = create_singlethread_workqueue("mlx4_en");
+ if (!mdev->workqueue) {
+ err = -ENOMEM;
+ goto err_close_nic;
+ }
+
+ /* At this stage all non-port specific tasks are complete:
+ * mark the card state as up */
+ mutex_init(&mdev->state_lock);
+ mdev->device_up = true;
+
+ /* Setup ports */
+
+ /* Create a netdev for each port */
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ mlx4_info(mdev, "Activating port:%d\n", i);
+ if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) {
+ mdev->pndev[i] = NULL;
+ goto err_free_netdev;
+ }
+ }
+ return mdev;
+
+
+err_free_netdev:
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ if (mdev->pndev[i])
+ mlx4_en_destroy_netdev(mdev->pndev[i]);
+ }
+
+ mutex_lock(&mdev->state_lock);
+ mdev->device_up = false;
+ mutex_unlock(&mdev->state_lock);
+ flush_workqueue(mdev->workqueue);
+
+ /* Stop event queue before we drop down to release shared SW state */
+
+err_close_nic:
+ destroy_workqueue(mdev->workqueue);
+err_mr:
+ mlx4_mr_free(dev, &mdev->mr);
+err_uar:
+ mlx4_uar_free(dev, &mdev->priv_uar);
+err_pd:
+ mlx4_pd_free(dev, mdev->priv_pdn);
+err_free_dev:
+ kfree(mdev);
+err_free_res:
+ return NULL;
+}
+
+static struct mlx4_interface mlx4_en_interface = {
+ .add = mlx4_en_add,
+ .remove = mlx4_en_remove,
+ .event = mlx4_en_event,
+};
+
+static int __init mlx4_en_init(void)
+{
+ return mlx4_register_interface(&mlx4_en_interface);
+}
+
+static void __exit mlx4_en_cleanup(void)
+{
+ mlx4_unregister_interface(&mlx4_en_interface);
+}
+
+module_init(mlx4_en_init);
+module_exit(mlx4_en_cleanup);
+
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
new file mode 100644
index 00000000000..a339afbeed3
--- /dev/null
+++ b/drivers/net/mlx4/en_netdev.c
@@ -0,0 +1,1088 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+
+#include <linux/mlx4/driver.h>
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/cq.h>
+
+#include "mlx4_en.h"
+#include "en_port.h"
+
+
+static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ mlx4_dbg(HW, priv, "Registering VLAN group:%p\n", grp);
+ priv->vlgrp = grp;
+
+ mutex_lock(&mdev->state_lock);
+ if (mdev->device_up && priv->port_up) {
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp);
+ if (err)
+ mlx4_err(mdev, "Failed configuring VLAN filter\n");
+ }
+ mutex_unlock(&mdev->state_lock);
+}
+
+static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ if (!priv->vlgrp)
+ return;
+
+ mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n",
+ vid, vlan_group_get_device(priv->vlgrp, vid));
+
+ /* Add VID to port VLAN filter */
+ mutex_lock(&mdev->state_lock);
+ if (mdev->device_up && priv->port_up) {
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
+ if (err)
+ mlx4_err(mdev, "Failed configuring VLAN filter\n");
+ }
+ mutex_unlock(&mdev->state_lock);
+}
+
+static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ if (!priv->vlgrp)
+ return;
+
+ mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp "
+ "entry:%p)\n", vid, priv->vlgrp,
+ vlan_group_get_device(priv->vlgrp, vid));
+ vlan_group_set_device(priv->vlgrp, vid, NULL);
+
+ /* Remove VID from port VLAN filter */
+ mutex_lock(&mdev->state_lock);
+ if (mdev->device_up && priv->port_up) {
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
+ if (err)
+ mlx4_err(mdev, "Failed configuring VLAN filter\n");
+ }
+ mutex_unlock(&mdev->state_lock);
+}
+
+static u64 mlx4_en_mac_to_u64(u8 *addr)
+{
+ u64 mac = 0;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac <<= 8;
+ mac |= addr[i];
+ }
+ return mac;
+}
+
+static int mlx4_en_set_mac(struct net_device *dev, void *addr)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
+ priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
+ queue_work(mdev->workqueue, &priv->mac_task);
+ return 0;
+}
+
+static void mlx4_en_do_set_mac(struct work_struct *work)
+{
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ mac_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err = 0;
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ /* Remove old MAC and insert the new one */
+ mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
+ err = mlx4_register_mac(mdev->dev, priv->port,
+ priv->mac, &priv->mac_index);
+ if (err)
+ mlx4_err(mdev, "Failed changing HW MAC address\n");
+ } else
+ mlx4_dbg(HW, priv, "Port is down, exiting...\n");
+
+ mutex_unlock(&mdev->state_lock);
+}
+
+static void mlx4_en_clear_list(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct dev_mc_list *plist = priv->mc_list;
+ struct dev_mc_list *next;
+
+ while (plist) {
+ next = plist->next;
+ kfree(plist);
+ plist = next;
+ }
+ priv->mc_list = NULL;
+}
+
+static void mlx4_en_cache_mclist(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct dev_mc_list *mclist;
+ struct dev_mc_list *tmp;
+ struct dev_mc_list *plist = NULL;
+
+ for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
+ tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC);
+ if (!tmp) {
+ mlx4_err(mdev, "failed to allocate multicast list\n");
+ mlx4_en_clear_list(dev);
+ return;
+ }
+ memcpy(tmp, mclist, sizeof(struct dev_mc_list));
+ tmp->next = NULL;
+ if (plist)
+ plist->next = tmp;
+ else
+ priv->mc_list = tmp;
+ plist = tmp;
+ }
+}
+
+
+static void mlx4_en_set_multicast(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (!priv->port_up)
+ return;
+
+ queue_work(priv->mdev->workqueue, &priv->mcast_task);
+}
+
+static void mlx4_en_do_set_multicast(struct work_struct *work)
+{
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ mcast_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+ struct dev_mc_list *mclist;
+ u64 mcast_addr = 0;
+ int err;
+
+ mutex_lock(&mdev->state_lock);
+ if (!mdev->device_up) {
+ mlx4_dbg(HW, priv, "Card is not up, ignoring "
+ "multicast change.\n");
+ goto out;
+ }
+ if (!priv->port_up) {
+ mlx4_dbg(HW, priv, "Port is down, ignoring "
+ "multicast change.\n");
+ goto out;
+ }
+
+ /*
+ * Promsicuous mode: disable all filters
+ */
+
+ if (dev->flags & IFF_PROMISC) {
+ if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
+ if (netif_msg_rx_status(priv))
+ mlx4_warn(mdev, "Port:%d entering promiscuous mode\n",
+ priv->port);
+ priv->flags |= MLX4_EN_FLAG_PROMISC;
+
+ /* Enable promiscouos mode */
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
+ priv->base_qpn, 1);
+ if (err)
+ mlx4_err(mdev, "Failed enabling "
+ "promiscous mode\n");
+
+ /* Disable port multicast filter (unconditionally) */
+ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+ 0, MLX4_MCAST_DISABLE);
+ if (err)
+ mlx4_err(mdev, "Failed disabling "
+ "multicast filter\n");
+
+ /* Disable port VLAN filter */
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
+ if (err)
+ mlx4_err(mdev, "Failed disabling "
+ "VLAN filter\n");
+ }
+ goto out;
+ }
+
+ /*
+ * Not in promiscous mode
+ */
+
+ if (priv->flags & MLX4_EN_FLAG_PROMISC) {
+ if (netif_msg_rx_status(priv))
+ mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n",
+ priv->port);
+ priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+
+ /* Disable promiscouos mode */
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
+ priv->base_qpn, 0);
+ if (err)
+ mlx4_err(mdev, "Failed disabling promiscous mode\n");
+
+ /* Enable port VLAN filter */
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
+ if (err)
+ mlx4_err(mdev, "Failed enabling VLAN filter\n");
+ }
+
+ /* Enable/disable the multicast filter according to IFF_ALLMULTI */
+ if (dev->flags & IFF_ALLMULTI) {
+ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+ 0, MLX4_MCAST_DISABLE);
+ if (err)
+ mlx4_err(mdev, "Failed disabling multicast filter\n");
+ } else {
+ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+ 0, MLX4_MCAST_DISABLE);
+ if (err)
+ mlx4_err(mdev, "Failed disabling multicast filter\n");
+
+ /* Flush mcast filter and init it with broadcast address */
+ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
+ 1, MLX4_MCAST_CONFIG);
+
+ /* Update multicast list - we cache all addresses so they won't
+ * change while HW is updated holding the command semaphor */
+ netif_tx_lock_bh(dev);
+ mlx4_en_cache_mclist(dev);
+ netif_tx_unlock_bh(dev);
+ for (mclist = priv->mc_list; mclist; mclist = mclist->next) {
+ mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr);
+ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
+ mcast_addr, 0, MLX4_MCAST_CONFIG);
+ }
+ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+ 0, MLX4_MCAST_ENABLE);
+ if (err)
+ mlx4_err(mdev, "Failed enabling multicast filter\n");
+
+ mlx4_en_clear_list(dev);
+ }
+out:
+ mutex_unlock(&mdev->state_lock);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mlx4_en_netpoll(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_cq *cq;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ cq = &priv->rx_cq[i];
+ spin_lock_irqsave(&cq->lock, flags);
+ napi_synchronize(&cq->napi);
+ mlx4_en_process_rx_cq(dev, cq, 0);
+ spin_unlock_irqrestore(&cq->lock, flags);
+ }
+}
+#endif
+
+static void mlx4_en_tx_timeout(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ if (netif_msg_timer(priv))
+ mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port);
+
+ if (netif_carrier_ok(dev)) {
+ priv->port_stats.tx_timeout++;
+ mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
+ queue_work(mdev->workqueue, &priv->watchdog_task);
+ }
+}
+
+
+static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ spin_lock_bh(&priv->stats_lock);
+ memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
+ spin_unlock_bh(&priv->stats_lock);
+
+ return &priv->ret_stats;
+}
+
+static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_cq *cq;
+ int i;
+
+ /* If we haven't received a specific coalescing setting
+ * (module param), we set the moderation paramters as follows:
+ * - moder_cnt is set to the number of mtu sized packets to
+ * satisfy our coelsing target.
+ * - moder_time is set to a fixed value.
+ */
+ priv->rx_frames = (mdev->profile.rx_moder_cnt ==
+ MLX4_EN_AUTO_CONF) ?
+ MLX4_EN_RX_COAL_TARGET /
+ priv->dev->mtu + 1 :
+ mdev->profile.rx_moder_cnt;
+ priv->rx_usecs = (mdev->profile.rx_moder_time ==
+ MLX4_EN_AUTO_CONF) ?
+ MLX4_EN_RX_COAL_TIME :
+ mdev->profile.rx_moder_time;
+ mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
+ "rx_frames:%d rx_usecs:%d\n",
+ priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
+
+ /* Setup cq moderation params */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ cq = &priv->rx_cq[i];
+ cq->moder_cnt = priv->rx_frames;
+ cq->moder_time = priv->rx_usecs;
+ }
+
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ cq = &priv->tx_cq[i];
+ cq->moder_cnt = MLX4_EN_TX_COAL_PKTS;
+ cq->moder_time = MLX4_EN_TX_COAL_TIME;
+ }
+
+ /* Reset auto-moderation params */
+ priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
+ priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
+ priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
+ priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
+ priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
+ priv->adaptive_rx_coal = mdev->profile.auto_moder;
+ priv->last_moder_time = MLX4_EN_AUTO_CONF;
+ priv->last_moder_jiffies = 0;
+ priv->last_moder_packets = 0;
+ priv->last_moder_tx_packets = 0;
+ priv->last_moder_bytes = 0;
+}
+
+static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
+{
+ unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_cq *cq;
+ unsigned long packets;
+ unsigned long rate;
+ unsigned long avg_pkt_size;
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_pkt_diff;
+ unsigned long rx_pkt_diff;
+ int moder_time;
+ int i, err;
+
+ if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
+ return;
+
+ spin_lock_bh(&priv->stats_lock);
+ rx_packets = priv->stats.rx_packets;
+ rx_bytes = priv->stats.rx_bytes;
+ tx_packets = priv->stats.tx_packets;
+ spin_unlock_bh(&priv->stats_lock);
+
+ if (!priv->last_moder_jiffies || !period)
+ goto out;
+
+ tx_pkt_diff = ((unsigned long) (tx_packets -
+ priv->last_moder_tx_packets));
+ rx_pkt_diff = ((unsigned long) (rx_packets -
+ priv->last_moder_packets));
+ packets = max(tx_pkt_diff, rx_pkt_diff);
+ rate = packets * HZ / period;
+ avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
+ priv->last_moder_bytes)) / packets : 0;
+
+ /* Apply auto-moderation only when packet rate exceeds a rate that
+ * it matters */
+ if (rate > MLX4_EN_RX_RATE_THRESH) {
+ /* If tx and rx packet rates are not balanced, assume that
+ * traffic is mainly BW bound and apply maximum moderation.
+ * Otherwise, moderate according to packet rate */
+ if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
+ 2 * rx_pkt_diff > 3 * tx_pkt_diff) {
+ moder_time = priv->rx_usecs_high;
+ } else {
+ if (rate < priv->pkt_rate_low)
+ moder_time = priv->rx_usecs_low;
+ else if (rate > priv->pkt_rate_high)
+ moder_time = priv->rx_usecs_high;
+ else
+ moder_time = (rate - priv->pkt_rate_low) *
+ (priv->rx_usecs_high - priv->rx_usecs_low) /
+ (priv->pkt_rate_high - priv->pkt_rate_low) +
+ priv->rx_usecs_low;
+ }
+ } else {
+ /* When packet rate is low, use default moderation rather than
+ * 0 to prevent interrupt storms if traffic suddenly increases */
+ moder_time = priv->rx_usecs;
+ }
+
+ mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
+ tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
+
+ mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
+ "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
+ priv->last_moder_time, moder_time, period, packets,
+ avg_pkt_size, rate);
+
+ if (moder_time != priv->last_moder_time) {
+ priv->last_moder_time = moder_time;
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ cq = &priv->rx_cq[i];
+ cq->moder_time = moder_time;
+ err = mlx4_en_set_cq_moder(priv, cq);
+ if (err) {
+ mlx4_err(mdev, "Failed modifying moderation for cq:%d "
+ "on port:%d\n", i, priv->port);
+ break;
+ }
+ }
+ }
+
+out:
+ priv->last_moder_packets = rx_packets;
+ priv->last_moder_tx_packets = tx_packets;
+ priv->last_moder_bytes = rx_bytes;
+ priv->last_moder_jiffies = jiffies;
+}
+
+static void mlx4_en_do_get_stats(struct work_struct *work)
+{
+ struct delayed_work *delay = container_of(work, struct delayed_work, work);
+ struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
+ stats_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
+ if (err)
+ mlx4_dbg(HW, priv, "Could not update stats for "
+ "port:%d\n", priv->port);
+
+ mutex_lock(&mdev->state_lock);
+ if (mdev->device_up) {
+ if (priv->port_up)
+ mlx4_en_auto_moderation(priv);
+
+ queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
+ }
+ mutex_unlock(&mdev->state_lock);
+}
+
+static void mlx4_en_linkstate(struct work_struct *work)
+{
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ linkstate_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int linkstate = priv->link_state;
+
+ mutex_lock(&mdev->state_lock);
+ /* If observable port state changed set carrier state and
+ * report to system log */
+ if (priv->last_link_state != linkstate) {
+ if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
+ if (netif_msg_link(priv))
+ mlx4_info(mdev, "Port %d - link down\n", priv->port);
+ netif_carrier_off(priv->dev);
+ } else {
+ if (netif_msg_link(priv))
+ mlx4_info(mdev, "Port %d - link up\n", priv->port);
+ netif_carrier_on(priv->dev);
+ }
+ }
+ priv->last_link_state = linkstate;
+ mutex_unlock(&mdev->state_lock);
+}
+
+
+static int mlx4_en_start_port(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_cq *cq;
+ struct mlx4_en_tx_ring *tx_ring;
+ struct mlx4_en_rx_ring *rx_ring;
+ int rx_index = 0;
+ int tx_index = 0;
+ u16 stride;
+ int err = 0;
+ int i;
+ int j;
+
+ if (priv->port_up) {
+ mlx4_dbg(DRV, priv, "start port called while port already up\n");
+ return 0;
+ }
+
+ /* Calculate Rx buf size */
+ dev->mtu = min(dev->mtu, priv->max_mtu);
+ mlx4_en_calc_rx_buf(dev);
+ mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
+ stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+ DS_SIZE * priv->num_frags);
+ /* Configure rx cq's and rings */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ cq = &priv->rx_cq[i];
+ rx_ring = &priv->rx_ring[i];
+
+ err = mlx4_en_activate_cq(priv, cq);
+ if (err) {
+ mlx4_err(mdev, "Failed activating Rx CQ\n");
+ goto rx_err;
+ }
+ for (j = 0; j < cq->size; j++)
+ cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
+ err = mlx4_en_set_cq_moder(priv, cq);
+ if (err) {
+ mlx4_err(mdev, "Failed setting cq moderation parameters");
+ mlx4_en_deactivate_cq(priv, cq);
+ goto cq_err;
+ }
+ mlx4_en_arm_cq(priv, cq);
+
+ ++rx_index;
+ }
+
+ err = mlx4_en_activate_rx_rings(priv);
+ if (err) {
+ mlx4_err(mdev, "Failed to activate RX rings\n");
+ goto cq_err;
+ }
+
+ err = mlx4_en_config_rss_steer(priv);
+ if (err) {
+ mlx4_err(mdev, "Failed configuring rss steering\n");
+ goto rx_err;
+ }
+
+ /* Configure tx cq's and rings */
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ /* Configure cq */
+ cq = &priv->tx_cq[i];
+ err = mlx4_en_activate_cq(priv, cq);
+ if (err) {
+ mlx4_err(mdev, "Failed allocating Tx CQ\n");
+ goto tx_err;
+ }
+ err = mlx4_en_set_cq_moder(priv, cq);
+ if (err) {
+ mlx4_err(mdev, "Failed setting cq moderation parameters");
+ mlx4_en_deactivate_cq(priv, cq);
+ goto tx_err;
+ }
+ mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
+ cq->buf->wqe_index = cpu_to_be16(0xffff);
+
+ /* Configure ring */
+ tx_ring = &priv->tx_ring[i];
+ err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
+ priv->rx_ring[0].srq.srqn);
+ if (err) {
+ mlx4_err(mdev, "Failed allocating Tx ring\n");
+ mlx4_en_deactivate_cq(priv, cq);
+ goto tx_err;
+ }
+ /* Set initial ownership of all Tx TXBBs to SW (1) */
+ for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
+ *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
+ ++tx_index;
+ }
+
+ /* Configure port */
+ err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+ priv->rx_skb_size + ETH_FCS_LEN,
+ mdev->profile.tx_pause,
+ mdev->profile.tx_ppp,
+ mdev->profile.rx_pause,
+ mdev->profile.rx_ppp);
+ if (err) {
+ mlx4_err(mdev, "Failed setting port general configurations"
+ " for port %d, with error %d\n", priv->port, err);
+ goto tx_err;
+ }
+ /* Set default qp number */
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
+ if (err) {
+ mlx4_err(mdev, "Failed setting default qp numbers\n");
+ goto tx_err;
+ }
+ /* Set port mac number */
+ mlx4_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
+ err = mlx4_register_mac(mdev->dev, priv->port,
+ priv->mac, &priv->mac_index);
+ if (err) {
+ mlx4_err(mdev, "Failed setting port mac\n");
+ goto tx_err;
+ }
+
+ /* Init port */
+ mlx4_dbg(HW, priv, "Initializing port\n");
+ err = mlx4_INIT_PORT(mdev->dev, priv->port);
+ if (err) {
+ mlx4_err(mdev, "Failed Initializing port\n");
+ goto mac_err;
+ }
+
+ /* Schedule multicast task to populate multicast list */
+ queue_work(mdev->workqueue, &priv->mcast_task);
+
+ priv->port_up = true;
+ netif_start_queue(dev);
+ return 0;
+
+mac_err:
+ mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
+tx_err:
+ while (tx_index--) {
+ mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
+ mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
+ }
+
+ mlx4_en_release_rss_steer(priv);
+rx_err:
+ for (i = 0; i < priv->rx_ring_num; i++)
+ mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[rx_index]);
+cq_err:
+ while (rx_index--)
+ mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
+
+ return err; /* need to close devices */
+}
+
+
+static void mlx4_en_stop_port(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int i;
+
+ if (!priv->port_up) {
+ mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n",
+ priv->port);
+ return;
+ }
+ netif_stop_queue(dev);
+
+ /* Synchronize with tx routine */
+ netif_tx_lock_bh(dev);
+ priv->port_up = false;
+ netif_tx_unlock_bh(dev);
+
+ /* close port*/
+ mlx4_CLOSE_PORT(mdev->dev, priv->port);
+
+ /* Unregister Mac address for the port */
+ mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
+
+ /* Free TX Rings */
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
+ mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
+ }
+ msleep(10);
+
+ for (i = 0; i < priv->tx_ring_num; i++)
+ mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
+
+ /* Free RSS qps */
+ mlx4_en_release_rss_steer(priv);
+
+ /* Free RX Rings */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+ while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
+ msleep(1);
+ mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
+ }
+}
+
+static void mlx4_en_restart(struct work_struct *work)
+{
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ watchdog_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+
+ mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
+ mlx4_en_stop_port(dev);
+ if (mlx4_en_start_port(dev))
+ mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
+}
+
+
+static int mlx4_en_open(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int i;
+ int err = 0;
+
+ mutex_lock(&mdev->state_lock);
+
+ if (!mdev->device_up) {
+ mlx4_err(mdev, "Cannot open - device down/disabled\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ /* Reset HW statistics and performance counters */
+ if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
+ mlx4_dbg(HW, priv, "Failed dumping statistics\n");
+
+ memset(&priv->stats, 0, sizeof(priv->stats));
+ memset(&priv->pstats, 0, sizeof(priv->pstats));
+
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ priv->tx_ring[i].bytes = 0;
+ priv->tx_ring[i].packets = 0;
+ }
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ priv->rx_ring[i].bytes = 0;
+ priv->rx_ring[i].packets = 0;
+ }
+
+ mlx4_en_set_default_moderation(priv);
+ err = mlx4_en_start_port(dev);
+ if (err)
+ mlx4_err(mdev, "Failed starting port:%d\n", priv->port);
+
+out:
+ mutex_unlock(&mdev->state_lock);
+ return err;
+}
+
+
+static int mlx4_en_close(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ if (netif_msg_ifdown(priv))
+ mlx4_info(mdev, "Close called for port:%d\n", priv->port);
+
+ mutex_lock(&mdev->state_lock);
+
+ mlx4_en_stop_port(dev);
+ netif_carrier_off(dev);
+
+ mutex_unlock(&mdev->state_lock);
+ return 0;
+}
+
+static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ if (priv->tx_ring[i].tx_info)
+ mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
+ if (priv->tx_cq[i].buf)
+ mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
+ }
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ if (priv->rx_ring[i].rx_info)
+ mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
+ if (priv->rx_cq[i].buf)
+ mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
+ }
+}
+
+static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile *prof = priv->prof;
+ int i;
+
+ /* Create tx Rings */
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
+ prof->tx_ring_size, i, TX))
+ goto err;
+
+ if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
+ prof->tx_ring_size, TXBB_SIZE))
+ goto err;
+ }
+
+ /* Create rx Rings */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
+ prof->rx_ring_size, i, RX))
+ goto err;
+
+ if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
+ prof->rx_ring_size, priv->stride))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ mlx4_err(mdev, "Failed to allocate NIC resources\n");
+ return -ENOMEM;
+}
+
+
+void mlx4_en_destroy_netdev(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
+
+ /* Unregister device - this will close the port if it was up */
+ if (priv->registered)
+ unregister_netdev(dev);
+
+ if (priv->allocated)
+ mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
+
+ cancel_delayed_work(&priv->stats_task);
+ cancel_delayed_work(&priv->refill_task);
+ /* flush any pending task for this netdev */
+ flush_workqueue(mdev->workqueue);
+
+ /* Detach the netdev so tasks would not attempt to access it */
+ mutex_lock(&mdev->state_lock);
+ mdev->pndev[priv->port] = NULL;
+ mutex_unlock(&mdev->state_lock);
+
+ mlx4_en_free_resources(priv);
+ free_netdev(dev);
+}
+
+static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err = 0;
+
+ mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
+ dev->mtu, new_mtu);
+
+ if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
+ mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu);
+ return -EPERM;
+ }
+ dev->mtu = new_mtu;
+
+ if (netif_running(dev)) {
+ mutex_lock(&mdev->state_lock);
+ if (!mdev->device_up) {
+ /* NIC is probably restarting - let watchdog task reset
+ * the port */
+ mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n");
+ } else {
+ mlx4_en_stop_port(dev);
+ mlx4_en_set_default_moderation(priv);
+ err = mlx4_en_start_port(dev);
+ if (err) {
+ mlx4_err(mdev, "Failed restarting port:%d\n",
+ priv->port);
+ queue_work(mdev->workqueue, &priv->watchdog_task);
+ }
+ }
+ mutex_unlock(&mdev->state_lock);
+ }
+ return 0;
+}
+
+int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ struct mlx4_en_port_profile *prof)
+{
+ struct net_device *dev;
+ struct mlx4_en_priv *priv;
+ int i;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ if (dev == NULL) {
+ mlx4_err(mdev, "Net device allocation failed\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
+
+ /*
+ * Initialize driver private data
+ */
+
+ priv = netdev_priv(dev);
+ memset(priv, 0, sizeof(struct mlx4_en_priv));
+ priv->dev = dev;
+ priv->mdev = mdev;
+ priv->prof = prof;
+ priv->port = port;
+ priv->port_up = false;
+ priv->rx_csum = 1;
+ priv->flags = prof->flags;
+ priv->tx_ring_num = prof->tx_ring_num;
+ priv->rx_ring_num = prof->rx_ring_num;
+ priv->mc_list = NULL;
+ priv->mac_index = -1;
+ priv->msg_enable = MLX4_EN_MSG_LEVEL;
+ spin_lock_init(&priv->stats_lock);
+ INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
+ INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
+ INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill);
+ INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
+ INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
+ INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
+
+ /* Query for default mac and max mtu */
+ priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
+ priv->mac = mdev->dev->caps.def_mac[priv->port];
+ if (ILLEGAL_MAC(priv->mac)) {
+ mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
+ priv->port, priv->mac);
+ err = -EINVAL;
+ goto out;
+ }
+
+ priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+ DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
+ err = mlx4_en_alloc_resources(priv);
+ if (err)
+ goto out;
+
+ /* Populate Rx default RSS mappings */
+ mlx4_en_set_default_rss_map(priv, &priv->rss_map, priv->rx_ring_num *
+ RSS_FACTOR, priv->rx_ring_num);
+ /* Allocate page for receive rings */
+ err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
+ MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
+ if (err) {
+ mlx4_err(mdev, "Failed to allocate page for rx qps\n");
+ goto out;
+ }
+ priv->allocated = 1;
+
+ /* Populate Tx priority mappings */
+ mlx4_en_set_prio_map(priv, priv->tx_prio_map, prof->tx_ring_num);
+
+ /*
+ * Initialize netdev entry points
+ */
+
+ dev->open = &mlx4_en_open;
+ dev->stop = &mlx4_en_close;
+ dev->hard_start_xmit = &mlx4_en_xmit;
+ dev->get_stats = &mlx4_en_get_stats;
+ dev->set_multicast_list = &mlx4_en_set_multicast;
+ dev->set_mac_address = &mlx4_en_set_mac;
+ dev->change_mtu = &mlx4_en_change_mtu;
+ dev->tx_timeout = &mlx4_en_tx_timeout;
+ dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
+ dev->vlan_rx_register = mlx4_en_vlan_rx_register;
+ dev->vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid;
+ dev->vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = mlx4_en_netpoll;
+#endif
+ SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
+
+ /* Set defualt MAC */
+ dev->addr_len = ETH_ALEN;
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[ETH_ALEN - 1 - i] =
+ (u8) (priv->mac >> (8 * i));
+
+ /*
+ * Set driver features
+ */
+ dev->features |= NETIF_F_SG;
+ dev->features |= NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_HIGHDMA;
+ dev->features |= NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+ if (mdev->profile.num_lro)
+ dev->features |= NETIF_F_LRO;
+ if (mdev->LSO_support) {
+ dev->features |= NETIF_F_TSO;
+ dev->features |= NETIF_F_TSO6;
+ }
+
+ mdev->pndev[port] = dev;
+
+ netif_carrier_off(dev);
+ err = register_netdev(dev);
+ if (err) {
+ mlx4_err(mdev, "Netdev registration failed\n");
+ goto out;
+ }
+ priv->registered = 1;
+ queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
+ return 0;
+
+out:
+ mlx4_en_destroy_netdev(dev);
+ return err;
+}
+
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
new file mode 100644
index 00000000000..c2e69b1bcd0
--- /dev/null
+++ b/drivers/net/mlx4/en_params.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "mlx4_en.h"
+#include "en_port.h"
+
+#define MLX4_EN_PARM_INT(X, def_val, desc) \
+ static unsigned int X = def_val;\
+ module_param(X , uint, 0444); \
+ MODULE_PARM_DESC(X, desc);
+
+
+/*
+ * Device scope module parameters
+ */
+
+
+/* Use a XOR rathern than Toeplitz hash function for RSS */
+MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
+
+/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
+MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
+
+/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
+MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
+ "Number of LRO sessions per ring or disabled (0)");
+
+/* Priority pausing */
+MLX4_EN_PARM_INT(pptx, MLX4_EN_DEF_TX_PAUSE,
+ "Pause policy on TX: 0 never generate pause frames "
+ "1 generate pause frames according to RX buffer threshold");
+MLX4_EN_PARM_INT(pprx, MLX4_EN_DEF_RX_PAUSE,
+ "Pause policy on RX: 0 ignore received pause frames "
+ "1 respect received pause frames");
+MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
+ " Per priority bit mask");
+MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
+ " Per priority bit mask");
+
+/* Interrupt moderation tunning */
+MLX4_EN_PARM_INT(rx_moder_cnt, MLX4_EN_AUTO_CONF,
+ "Max coalesced descriptors for Rx interrupt moderation");
+MLX4_EN_PARM_INT(rx_moder_time, MLX4_EN_AUTO_CONF,
+ "Timeout following last packet for Rx interrupt moderation");
+MLX4_EN_PARM_INT(auto_moder, 1, "Enable dynamic interrupt moderation");
+
+MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)");
+MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)");
+
+MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1");
+MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2");
+MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1");
+MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2");
+
+
+int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
+{
+ struct mlx4_en_profile *params = &mdev->profile;
+
+ params->rx_moder_cnt = min_t(int, rx_moder_cnt, MLX4_EN_AUTO_CONF);
+ params->rx_moder_time = min_t(int, rx_moder_time, MLX4_EN_AUTO_CONF);
+ params->auto_moder = auto_moder;
+ params->rss_xor = (rss_xor != 0);
+ params->rss_mask = rss_mask & 0x1f;
+ params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
+ params->rx_pause = pprx;
+ params->rx_ppp = pfcrx;
+ params->tx_pause = pptx;
+ params->tx_ppp = pfctx;
+ if (params->rx_ppp || params->tx_ppp) {
+ params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
+ params->prof[2].tx_ring_num = MLX4_EN_TX_RING_NUM;
+ } else {
+ params->prof[1].tx_ring_num = 1;
+ params->prof[2].tx_ring_num = 1;
+ }
+ params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS);
+ params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS);
+
+ if (tx_ring_size1 == MLX4_EN_AUTO_CONF)
+ tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE;
+ params->prof[1].tx_ring_size =
+ (tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ?
+ MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1);
+
+ if (tx_ring_size2 == MLX4_EN_AUTO_CONF)
+ tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE;
+ params->prof[2].tx_ring_size =
+ (tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ?
+ MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2);
+
+ if (rx_ring_size1 == MLX4_EN_AUTO_CONF)
+ rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE;
+ params->prof[1].rx_ring_size =
+ (rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ?
+ MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1);
+
+ if (rx_ring_size2 == MLX4_EN_AUTO_CONF)
+ rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE;
+ params->prof[2].rx_ring_size =
+ (rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ?
+ MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2);
+ return 0;
+}
+
+
+/*
+ * Ethtool support
+ */
+
+static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
+{
+ int i;
+
+ priv->port_stats.lro_aggregated = 0;
+ priv->port_stats.lro_flushed = 0;
+ priv->port_stats.lro_no_desc = 0;
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
+ priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
+ priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
+ }
+}
+
+static void
+mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id);
+ strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
+ sprintf(drvinfo->fw_version, "%d.%d.%d",
+ (u16) (mdev->dev->caps.fw_ver >> 32),
+ (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
+ (u16) (mdev->dev->caps.fw_ver & 0xffff));
+ strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
+ drvinfo->n_stats = 0;
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+static u32 mlx4_en_get_tso(struct net_device *dev)
+{
+ return (dev->features & NETIF_F_TSO) != 0;
+}
+
+static int mlx4_en_set_tso(struct net_device *dev, u32 data)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (data) {
+ if (!priv->mdev->LSO_support)
+ return -EPERM;
+ dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ } else
+ dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ return 0;
+}
+
+static u32 mlx4_en_get_rx_csum(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ return priv->rx_csum;
+}
+
+static int mlx4_en_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ priv->rx_csum = (data != 0);
+ return 0;
+}
+
+static const char main_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
+ "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
+ "rx_length_errors", "rx_over_errors", "rx_crc_errors",
+ "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
+ "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
+ "tx_heartbeat_errors", "tx_window_errors",
+
+ /* port statistics */
+ "lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets",
+ "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
+ "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
+
+ /* packet statistics */
+ "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
+ "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
+ "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
+ "tx_prio_6", "tx_prio_7",
+};
+#define NUM_MAIN_STATS 21
+#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
+
+static u32 mlx4_en_get_msglevel(struct net_device *dev)
+{
+ return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
+}
+
+static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
+{
+ ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
+}
+
+static void mlx4_en_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ return;
+}
+
+static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
+}
+
+static void mlx4_en_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, uint64_t *data)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int index = 0;
+ int i;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ mlx4_en_update_lro_stats(priv);
+
+ for (i = 0; i < NUM_MAIN_STATS; i++)
+ data[index++] = ((unsigned long *) &priv->stats)[i];
+ for (i = 0; i < NUM_PORT_STATS; i++)
+ data[index++] = ((unsigned long *) &priv->port_stats)[i];
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ data[index++] = priv->tx_ring[i].packets;
+ data[index++] = priv->tx_ring[i].bytes;
+ }
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ data[index++] = priv->rx_ring[i].packets;
+ data[index++] = priv->rx_ring[i].bytes;
+ }
+ for (i = 0; i < NUM_PKT_STATS; i++)
+ data[index++] = ((unsigned long *) &priv->pkstats)[i];
+ spin_unlock_bh(&priv->stats_lock);
+
+}
+
+static void mlx4_en_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int index = 0;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ /* Add main counters */
+ for (i = 0; i < NUM_MAIN_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
+ for (i = 0; i < NUM_PORT_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[i + NUM_MAIN_STATS]);
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "tx%d_packets", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "tx%d_bytes", i);
+ }
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_packets", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_bytes", i);
+ }
+ for (i = 0; i < NUM_PKT_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
+}
+
+static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->supported = SUPPORTED_10000baseT_Full;
+ cmd->advertising = SUPPORTED_10000baseT_Full;
+ if (netif_carrier_ok(dev)) {
+ cmd->speed = SPEED_10000;
+ cmd->duplex = DUPLEX_FULL;
+ } else {
+ cmd->speed = -1;
+ cmd->duplex = -1;
+ }
+ return 0;
+}
+
+static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ if ((cmd->autoneg == AUTONEG_ENABLE) ||
+ (cmd->speed != SPEED_10000) || (cmd->duplex != DUPLEX_FULL))
+ return -EINVAL;
+
+ /* Nothing to change */
+ return 0;
+}
+
+static int mlx4_en_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ coal->tx_coalesce_usecs = 0;
+ coal->tx_max_coalesced_frames = 0;
+ coal->rx_coalesce_usecs = priv->rx_usecs;
+ coal->rx_max_coalesced_frames = priv->rx_frames;
+
+ coal->pkt_rate_low = priv->pkt_rate_low;
+ coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
+ coal->pkt_rate_high = priv->pkt_rate_high;
+ coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
+ coal->rate_sample_interval = priv->sample_interval;
+ coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
+ return 0;
+}
+
+static int mlx4_en_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int err, i;
+
+ priv->rx_frames = (coal->rx_max_coalesced_frames ==
+ MLX4_EN_AUTO_CONF) ?
+ MLX4_EN_RX_COAL_TARGET /
+ priv->dev->mtu + 1 :
+ coal->rx_max_coalesced_frames;
+ priv->rx_usecs = (coal->rx_coalesce_usecs ==
+ MLX4_EN_AUTO_CONF) ?
+ MLX4_EN_RX_COAL_TIME :
+ coal->rx_coalesce_usecs;
+
+ /* Set adaptive coalescing params */
+ priv->pkt_rate_low = coal->pkt_rate_low;
+ priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
+ priv->pkt_rate_high = coal->pkt_rate_high;
+ priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
+ priv->sample_interval = coal->rate_sample_interval;
+ priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
+ priv->last_moder_time = MLX4_EN_AUTO_CONF;
+ if (priv->adaptive_rx_coal)
+ return 0;
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ priv->rx_cq[i].moder_cnt = priv->rx_frames;
+ priv->rx_cq[i].moder_time = priv->rx_usecs;
+ err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int mlx4_en_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ mdev->profile.tx_pause = pause->tx_pause != 0;
+ mdev->profile.rx_pause = pause->rx_pause != 0;
+ err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+ priv->rx_skb_size + ETH_FCS_LEN,
+ mdev->profile.tx_pause,
+ mdev->profile.tx_ppp,
+ mdev->profile.rx_pause,
+ mdev->profile.rx_ppp);
+ if (err)
+ mlx4_err(mdev, "Failed setting pause params to\n");
+
+ return err;
+}
+
+static void mlx4_en_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ pause->tx_pause = mdev->profile.tx_pause;
+ pause->rx_pause = mdev->profile.rx_pause;
+}
+
+static void mlx4_en_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ memset(param, 0, sizeof(*param));
+ param->rx_max_pending = mdev->dev->caps.max_rq_sg;
+ param->tx_max_pending = mdev->dev->caps.max_sq_sg;
+ param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size;
+ param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
+}
+
+const struct ethtool_ops mlx4_en_ethtool_ops = {
+ .get_drvinfo = mlx4_en_get_drvinfo,
+ .get_settings = mlx4_en_get_settings,
+ .set_settings = mlx4_en_set_settings,
+#ifdef NETIF_F_TSO
+ .get_tso = mlx4_en_get_tso,
+ .set_tso = mlx4_en_set_tso,
+#endif
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_link = ethtool_op_get_link,
+ .get_rx_csum = mlx4_en_get_rx_csum,
+ .set_rx_csum = mlx4_en_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
+ .get_strings = mlx4_en_get_strings,
+ .get_sset_count = mlx4_en_get_sset_count,
+ .get_ethtool_stats = mlx4_en_get_ethtool_stats,
+ .get_wol = mlx4_en_get_wol,
+ .get_msglevel = mlx4_en_get_msglevel,
+ .set_msglevel = mlx4_en_set_msglevel,
+ .get_coalesce = mlx4_en_get_coalesce,
+ .set_coalesce = mlx4_en_set_coalesce,
+ .get_pauseparam = mlx4_en_get_pauseparam,
+ .set_pauseparam = mlx4_en_set_pauseparam,
+ .get_ringparam = mlx4_en_get_ringparam,
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = ethtool_op_set_flags,
+};
+
+
+
+
+
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
new file mode 100644
index 00000000000..c5a4c038975
--- /dev/null
+++ b/drivers/net/mlx4/en_port.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+
+#include <linux/if_vlan.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/cmd.h>
+
+#include "en_port.h"
+#include "mlx4_en.h"
+
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
+ u64 mac, u64 clear, u8 mode)
+{
+ return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
+ MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_vlan_fltr_mbox *filter;
+ int i;
+ int j;
+ int index = 0;
+ u32 entry;
+ int err = 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ filter = mailbox->buf;
+ if (grp) {
+ memset(filter, 0, sizeof *filter);
+ for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
+ entry = 0;
+ for (j = 0; j < 32; j++)
+ if (vlan_group_get_device(grp, index++))
+ entry |= 1 << j;
+ filter->entry[i] = cpu_to_be32(entry);
+ }
+ } else {
+ /* When no vlans are configured we block all vlans */
+ memset(filter, 0, sizeof(*filter));
+ }
+ err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR,
+ MLX4_CMD_TIME_CLASS_B);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+ u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_general_context *context;
+ int err;
+ u32 in_mod;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof *context);
+
+ context->flags = SET_PORT_GEN_ALL_VALID;
+ context->mtu = cpu_to_be16(mtu);
+ context->pptx = (pptx * (!pfctx)) << 7;
+ context->pfctx = pfctx;
+ context->pprx = (pprx * (!pfcrx)) << 7;
+ context->pfcrx = pfcrx;
+
+ in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+ u8 promisc)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_rqp_calc_context *context;
+ int err;
+ u32 in_mod;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof *context);
+
+ context->base_qpn = cpu_to_be32(base_qpn);
+ context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn);
+ context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn);
+ context->intra_no_vlan = 0;
+ context->no_vlan = MLX4_NO_VLAN_IDX;
+ context->intra_vlan_miss = 0;
+ context->vlan_miss = MLX4_VLAN_MISS_IDX;
+
+ in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+
+int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
+{
+ struct mlx4_en_stat_out_mbox *mlx4_en_stats;
+ struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
+ struct net_device_stats *stats = &priv->stats;
+ struct mlx4_cmd_mailbox *mailbox;
+ u64 in_mod = reset << 8 | port;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
+ err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
+ MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
+ if (err)
+ goto out;
+
+ mlx4_en_stats = mailbox->buf;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ stats->rx_packets = be32_to_cpu(mlx4_en_stats->RTOTFRMS) -
+ be32_to_cpu(mlx4_en_stats->RDROP);
+ stats->tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) +
+ be64_to_cpu(mlx4_en_stats->TTOT_prio_1) +
+ be64_to_cpu(mlx4_en_stats->TTOT_prio_2) +
+ be64_to_cpu(mlx4_en_stats->TTOT_prio_3) +
+ be64_to_cpu(mlx4_en_stats->TTOT_prio_4) +
+ be64_to_cpu(mlx4_en_stats->TTOT_prio_5) +
+ be64_to_cpu(mlx4_en_stats->TTOT_prio_6) +
+ be64_to_cpu(mlx4_en_stats->TTOT_prio_7) +
+ be64_to_cpu(mlx4_en_stats->TTOT_novlan) +
+ be64_to_cpu(mlx4_en_stats->TTOT_loopbk);
+ stats->rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) +
+ be64_to_cpu(mlx4_en_stats->ROCT_prio_1) +
+ be64_to_cpu(mlx4_en_stats->ROCT_prio_2) +
+ be64_to_cpu(mlx4_en_stats->ROCT_prio_3) +
+ be64_to_cpu(mlx4_en_stats->ROCT_prio_4) +
+ be64_to_cpu(mlx4_en_stats->ROCT_prio_5) +
+ be64_to_cpu(mlx4_en_stats->ROCT_prio_6) +
+ be64_to_cpu(mlx4_en_stats->ROCT_prio_7) +
+ be64_to_cpu(mlx4_en_stats->ROCT_novlan);
+
+ stats->tx_bytes = be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_0) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_1) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_2) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_3) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_4) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_5) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_6) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_7) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_novlan) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_loopbk);
+
+ stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
+ be32_to_cpu(mlx4_en_stats->RdropLength) +
+ be32_to_cpu(mlx4_en_stats->RJBBR) +
+ be32_to_cpu(mlx4_en_stats->RCRC) +
+ be32_to_cpu(mlx4_en_stats->RRUNT);
+ stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP);
+ stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
+ be64_to_cpu(mlx4_en_stats->MCAST_novlan);
+ stats->collisions = 0;
+ stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
+ stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+ stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
+ stats->rx_frame_errors = 0;
+ stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+ stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+ stats->tx_aborted_errors = 0;
+ stats->tx_carrier_errors = 0;
+ stats->tx_fifo_errors = 0;
+ stats->tx_heartbeat_errors = 0;
+ stats->tx_window_errors = 0;
+
+ priv->pkstats.broadcast =
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
+ priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
+ priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
+ priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
+ priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
+ priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
+ priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
+ priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
+ priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
+ priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
+ priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
+ priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
+ priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
+ priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
+ priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
+ priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
+ priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
+ spin_unlock_bh(&priv->stats_lock);
+
+out:
+ mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+ return err;
+}
+
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
new file mode 100644
index 00000000000..e6477f12beb
--- /dev/null
+++ b/drivers/net/mlx4/en_port.h
@@ -0,0 +1,570 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _MLX4_EN_PORT_H_
+#define _MLX4_EN_PORT_H_
+
+
+#define SET_PORT_GEN_ALL_VALID 0x7
+#define SET_PORT_PROMISC_SHIFT 31
+
+enum {
+ MLX4_CMD_SET_VLAN_FLTR = 0x47,
+ MLX4_CMD_SET_MCAST_FLTR = 0x48,
+ MLX4_CMD_DUMP_ETH_STATS = 0x49,
+};
+
+struct mlx4_set_port_general_context {
+ u8 reserved[3];
+ u8 flags;
+ u16 reserved2;
+ __be16 mtu;
+ u8 pptx;
+ u8 pfctx;
+ u16 reserved3;
+ u8 pprx;
+ u8 pfcrx;
+ u16 reserved4;
+};
+
+struct mlx4_set_port_rqp_calc_context {
+ __be32 base_qpn;
+ __be32 flags;
+ u8 reserved[3];
+ u8 mac_miss;
+ u8 intra_no_vlan;
+ u8 no_vlan;
+ u8 intra_vlan_miss;
+ u8 vlan_miss;
+ u8 reserved2[3];
+ u8 no_vlan_prio;
+ __be32 promisc;
+ __be32 mcast;
+};
+
+#define VLAN_FLTR_SIZE 128
+struct mlx4_set_vlan_fltr_mbox {
+ __be32 entry[VLAN_FLTR_SIZE];
+};
+
+
+enum {
+ MLX4_MCAST_CONFIG = 0,
+ MLX4_MCAST_DISABLE = 1,
+ MLX4_MCAST_ENABLE = 2,
+};
+
+
+struct mlx4_en_stat_out_mbox {
+ /* Received frames with a length of 64 octets */
+ __be64 R64_prio_0;
+ __be64 R64_prio_1;
+ __be64 R64_prio_2;
+ __be64 R64_prio_3;
+ __be64 R64_prio_4;
+ __be64 R64_prio_5;
+ __be64 R64_prio_6;
+ __be64 R64_prio_7;
+ __be64 R64_novlan;
+ /* Received frames with a length of 127 octets */
+ __be64 R127_prio_0;
+ __be64 R127_prio_1;
+ __be64 R127_prio_2;
+ __be64 R127_prio_3;
+ __be64 R127_prio_4;
+ __be64 R127_prio_5;
+ __be64 R127_prio_6;
+ __be64 R127_prio_7;
+ __be64 R127_novlan;
+ /* Received frames with a length of 255 octets */
+ __be64 R255_prio_0;
+ __be64 R255_prio_1;
+ __be64 R255_prio_2;
+ __be64 R255_prio_3;
+ __be64 R255_prio_4;
+ __be64 R255_prio_5;
+ __be64 R255_prio_6;
+ __be64 R255_prio_7;
+ __be64 R255_novlan;
+ /* Received frames with a length of 511 octets */
+ __be64 R511_prio_0;
+ __be64 R511_prio_1;
+ __be64 R511_prio_2;
+ __be64 R511_prio_3;
+ __be64 R511_prio_4;
+ __be64 R511_prio_5;
+ __be64 R511_prio_6;
+ __be64 R511_prio_7;
+ __be64 R511_novlan;
+ /* Received frames with a length of 1023 octets */
+ __be64 R1023_prio_0;
+ __be64 R1023_prio_1;
+ __be64 R1023_prio_2;
+ __be64 R1023_prio_3;
+ __be64 R1023_prio_4;
+ __be64 R1023_prio_5;
+ __be64 R1023_prio_6;
+ __be64 R1023_prio_7;
+ __be64 R1023_novlan;
+ /* Received frames with a length of 1518 octets */
+ __be64 R1518_prio_0;
+ __be64 R1518_prio_1;
+ __be64 R1518_prio_2;
+ __be64 R1518_prio_3;
+ __be64 R1518_prio_4;
+ __be64 R1518_prio_5;
+ __be64 R1518_prio_6;
+ __be64 R1518_prio_7;
+ __be64 R1518_novlan;
+ /* Received frames with a length of 1522 octets */
+ __be64 R1522_prio_0;
+ __be64 R1522_prio_1;
+ __be64 R1522_prio_2;
+ __be64 R1522_prio_3;
+ __be64 R1522_prio_4;
+ __be64 R1522_prio_5;
+ __be64 R1522_prio_6;
+ __be64 R1522_prio_7;
+ __be64 R1522_novlan;
+ /* Received frames with a length of 1548 octets */
+ __be64 R1548_prio_0;
+ __be64 R1548_prio_1;
+ __be64 R1548_prio_2;
+ __be64 R1548_prio_3;
+ __be64 R1548_prio_4;
+ __be64 R1548_prio_5;
+ __be64 R1548_prio_6;
+ __be64 R1548_prio_7;
+ __be64 R1548_novlan;
+ /* Received frames with a length of 1548 < octets < MTU */
+ __be64 R2MTU_prio_0;
+ __be64 R2MTU_prio_1;
+ __be64 R2MTU_prio_2;
+ __be64 R2MTU_prio_3;
+ __be64 R2MTU_prio_4;
+ __be64 R2MTU_prio_5;
+ __be64 R2MTU_prio_6;
+ __be64 R2MTU_prio_7;
+ __be64 R2MTU_novlan;
+ /* Received frames with a length of MTU< octets and good CRC */
+ __be64 RGIANT_prio_0;
+ __be64 RGIANT_prio_1;
+ __be64 RGIANT_prio_2;
+ __be64 RGIANT_prio_3;
+ __be64 RGIANT_prio_4;
+ __be64 RGIANT_prio_5;
+ __be64 RGIANT_prio_6;
+ __be64 RGIANT_prio_7;
+ __be64 RGIANT_novlan;
+ /* Received broadcast frames with good CRC */
+ __be64 RBCAST_prio_0;
+ __be64 RBCAST_prio_1;
+ __be64 RBCAST_prio_2;
+ __be64 RBCAST_prio_3;
+ __be64 RBCAST_prio_4;
+ __be64 RBCAST_prio_5;
+ __be64 RBCAST_prio_6;
+ __be64 RBCAST_prio_7;
+ __be64 RBCAST_novlan;
+ /* Received multicast frames with good CRC */
+ __be64 MCAST_prio_0;
+ __be64 MCAST_prio_1;
+ __be64 MCAST_prio_2;
+ __be64 MCAST_prio_3;
+ __be64 MCAST_prio_4;
+ __be64 MCAST_prio_5;
+ __be64 MCAST_prio_6;
+ __be64 MCAST_prio_7;
+ __be64 MCAST_novlan;
+ /* Received unicast not short or GIANT frames with good CRC */
+ __be64 RTOTG_prio_0;
+ __be64 RTOTG_prio_1;
+ __be64 RTOTG_prio_2;
+ __be64 RTOTG_prio_3;
+ __be64 RTOTG_prio_4;
+ __be64 RTOTG_prio_5;
+ __be64 RTOTG_prio_6;
+ __be64 RTOTG_prio_7;
+ __be64 RTOTG_novlan;
+
+ /* Count of total octets of received frames, includes framing characters */
+ __be64 RTTLOCT_prio_0;
+ /* Count of total octets of received frames, not including framing
+ characters */
+ __be64 RTTLOCT_NOFRM_prio_0;
+ /* Count of Total number of octets received
+ (only for frames without errors) */
+ __be64 ROCT_prio_0;
+
+ __be64 RTTLOCT_prio_1;
+ __be64 RTTLOCT_NOFRM_prio_1;
+ __be64 ROCT_prio_1;
+
+ __be64 RTTLOCT_prio_2;
+ __be64 RTTLOCT_NOFRM_prio_2;
+ __be64 ROCT_prio_2;
+
+ __be64 RTTLOCT_prio_3;
+ __be64 RTTLOCT_NOFRM_prio_3;
+ __be64 ROCT_prio_3;
+
+ __be64 RTTLOCT_prio_4;
+ __be64 RTTLOCT_NOFRM_prio_4;
+ __be64 ROCT_prio_4;
+
+ __be64 RTTLOCT_prio_5;
+ __be64 RTTLOCT_NOFRM_prio_5;
+ __be64 ROCT_prio_5;
+
+ __be64 RTTLOCT_prio_6;
+ __be64 RTTLOCT_NOFRM_prio_6;
+ __be64 ROCT_prio_6;
+
+ __be64 RTTLOCT_prio_7;
+ __be64 RTTLOCT_NOFRM_prio_7;
+ __be64 ROCT_prio_7;
+
+ __be64 RTTLOCT_novlan;
+ __be64 RTTLOCT_NOFRM_novlan;
+ __be64 ROCT_novlan;
+
+ /* Count of Total received frames including bad frames */
+ __be64 RTOT_prio_0;
+ /* Count of Total number of received frames with 802.1Q encapsulation */
+ __be64 R1Q_prio_0;
+ __be64 reserved1;
+
+ __be64 RTOT_prio_1;
+ __be64 R1Q_prio_1;
+ __be64 reserved2;
+
+ __be64 RTOT_prio_2;
+ __be64 R1Q_prio_2;
+ __be64 reserved3;
+
+ __be64 RTOT_prio_3;
+ __be64 R1Q_prio_3;
+ __be64 reserved4;
+
+ __be64 RTOT_prio_4;
+ __be64 R1Q_prio_4;
+ __be64 reserved5;
+
+ __be64 RTOT_prio_5;
+ __be64 R1Q_prio_5;
+ __be64 reserved6;
+
+ __be64 RTOT_prio_6;
+ __be64 R1Q_prio_6;
+ __be64 reserved7;
+
+ __be64 RTOT_prio_7;
+ __be64 R1Q_prio_7;
+ __be64 reserved8;
+
+ __be64 RTOT_novlan;
+ __be64 R1Q_novlan;
+ __be64 reserved9;
+
+ /* Total number of Successfully Received Control Frames */
+ __be64 RCNTL;
+ __be64 reserved10;
+ __be64 reserved11;
+ __be64 reserved12;
+ /* Count of received frames with a length/type field value between 46
+ (42 for VLANtagged frames) and 1500 (also 1500 for VLAN-tagged frames),
+ inclusive */
+ __be64 RInRangeLengthErr;
+ /* Count of received frames with length/type field between 1501 and 1535
+ decimal, inclusive */
+ __be64 ROutRangeLengthErr;
+ /* Count of received frames that are longer than max allowed size for
+ 802.3 frames (1518/1522) */
+ __be64 RFrmTooLong;
+ /* Count frames received with PCS error */
+ __be64 PCS;
+
+ /* Transmit frames with a length of 64 octets */
+ __be64 T64_prio_0;
+ __be64 T64_prio_1;
+ __be64 T64_prio_2;
+ __be64 T64_prio_3;
+ __be64 T64_prio_4;
+ __be64 T64_prio_5;
+ __be64 T64_prio_6;
+ __be64 T64_prio_7;
+ __be64 T64_novlan;
+ __be64 T64_loopbk;
+ /* Transmit frames with a length of 65 to 127 octets. */
+ __be64 T127_prio_0;
+ __be64 T127_prio_1;
+ __be64 T127_prio_2;
+ __be64 T127_prio_3;
+ __be64 T127_prio_4;
+ __be64 T127_prio_5;
+ __be64 T127_prio_6;
+ __be64 T127_prio_7;
+ __be64 T127_novlan;
+ __be64 T127_loopbk;
+ /* Transmit frames with a length of 128 to 255 octets */
+ __be64 T255_prio_0;
+ __be64 T255_prio_1;
+ __be64 T255_prio_2;
+ __be64 T255_prio_3;
+ __be64 T255_prio_4;
+ __be64 T255_prio_5;
+ __be64 T255_prio_6;
+ __be64 T255_prio_7;
+ __be64 T255_novlan;
+ __be64 T255_loopbk;
+ /* Transmit frames with a length of 256 to 511 octets */
+ __be64 T511_prio_0;
+ __be64 T511_prio_1;
+ __be64 T511_prio_2;
+ __be64 T511_prio_3;
+ __be64 T511_prio_4;
+ __be64 T511_prio_5;
+ __be64 T511_prio_6;
+ __be64 T511_prio_7;
+ __be64 T511_novlan;
+ __be64 T511_loopbk;
+ /* Transmit frames with a length of 512 to 1023 octets */
+ __be64 T1023_prio_0;
+ __be64 T1023_prio_1;
+ __be64 T1023_prio_2;
+ __be64 T1023_prio_3;
+ __be64 T1023_prio_4;
+ __be64 T1023_prio_5;
+ __be64 T1023_prio_6;
+ __be64 T1023_prio_7;
+ __be64 T1023_novlan;
+ __be64 T1023_loopbk;
+ /* Transmit frames with a length of 1024 to 1518 octets */
+ __be64 T1518_prio_0;
+ __be64 T1518_prio_1;
+ __be64 T1518_prio_2;
+ __be64 T1518_prio_3;
+ __be64 T1518_prio_4;
+ __be64 T1518_prio_5;
+ __be64 T1518_prio_6;
+ __be64 T1518_prio_7;
+ __be64 T1518_novlan;
+ __be64 T1518_loopbk;
+ /* Counts transmit frames with a length of 1519 to 1522 bytes */
+ __be64 T1522_prio_0;
+ __be64 T1522_prio_1;
+ __be64 T1522_prio_2;
+ __be64 T1522_prio_3;
+ __be64 T1522_prio_4;
+ __be64 T1522_prio_5;
+ __be64 T1522_prio_6;
+ __be64 T1522_prio_7;
+ __be64 T1522_novlan;
+ __be64 T1522_loopbk;
+ /* Transmit frames with a length of 1523 to 1548 octets */
+ __be64 T1548_prio_0;
+ __be64 T1548_prio_1;
+ __be64 T1548_prio_2;
+ __be64 T1548_prio_3;
+ __be64 T1548_prio_4;
+ __be64 T1548_prio_5;
+ __be64 T1548_prio_6;
+ __be64 T1548_prio_7;
+ __be64 T1548_novlan;
+ __be64 T1548_loopbk;
+ /* Counts transmit frames with a length of 1549 to MTU bytes */
+ __be64 T2MTU_prio_0;
+ __be64 T2MTU_prio_1;
+ __be64 T2MTU_prio_2;
+ __be64 T2MTU_prio_3;
+ __be64 T2MTU_prio_4;
+ __be64 T2MTU_prio_5;
+ __be64 T2MTU_prio_6;
+ __be64 T2MTU_prio_7;
+ __be64 T2MTU_novlan;
+ __be64 T2MTU_loopbk;
+ /* Transmit frames with a length greater than MTU octets and a good CRC. */
+ __be64 TGIANT_prio_0;
+ __be64 TGIANT_prio_1;
+ __be64 TGIANT_prio_2;
+ __be64 TGIANT_prio_3;
+ __be64 TGIANT_prio_4;
+ __be64 TGIANT_prio_5;
+ __be64 TGIANT_prio_6;
+ __be64 TGIANT_prio_7;
+ __be64 TGIANT_novlan;
+ __be64 TGIANT_loopbk;
+ /* Transmit broadcast frames with a good CRC */
+ __be64 TBCAST_prio_0;
+ __be64 TBCAST_prio_1;
+ __be64 TBCAST_prio_2;
+ __be64 TBCAST_prio_3;
+ __be64 TBCAST_prio_4;
+ __be64 TBCAST_prio_5;
+ __be64 TBCAST_prio_6;
+ __be64 TBCAST_prio_7;
+ __be64 TBCAST_novlan;
+ __be64 TBCAST_loopbk;
+ /* Transmit multicast frames with a good CRC */
+ __be64 TMCAST_prio_0;
+ __be64 TMCAST_prio_1;
+ __be64 TMCAST_prio_2;
+ __be64 TMCAST_prio_3;
+ __be64 TMCAST_prio_4;
+ __be64 TMCAST_prio_5;
+ __be64 TMCAST_prio_6;
+ __be64 TMCAST_prio_7;
+ __be64 TMCAST_novlan;
+ __be64 TMCAST_loopbk;
+ /* Transmit good frames that are neither broadcast nor multicast */
+ __be64 TTOTG_prio_0;
+ __be64 TTOTG_prio_1;
+ __be64 TTOTG_prio_2;
+ __be64 TTOTG_prio_3;
+ __be64 TTOTG_prio_4;
+ __be64 TTOTG_prio_5;
+ __be64 TTOTG_prio_6;
+ __be64 TTOTG_prio_7;
+ __be64 TTOTG_novlan;
+ __be64 TTOTG_loopbk;
+
+ /* total octets of transmitted frames, including framing characters */
+ __be64 TTTLOCT_prio_0;
+ /* total octets of transmitted frames, not including framing characters */
+ __be64 TTTLOCT_NOFRM_prio_0;
+ /* ifOutOctets */
+ __be64 TOCT_prio_0;
+
+ __be64 TTTLOCT_prio_1;
+ __be64 TTTLOCT_NOFRM_prio_1;
+ __be64 TOCT_prio_1;
+
+ __be64 TTTLOCT_prio_2;
+ __be64 TTTLOCT_NOFRM_prio_2;
+ __be64 TOCT_prio_2;
+
+ __be64 TTTLOCT_prio_3;
+ __be64 TTTLOCT_NOFRM_prio_3;
+ __be64 TOCT_prio_3;
+
+ __be64 TTTLOCT_prio_4;
+ __be64 TTTLOCT_NOFRM_prio_4;
+ __be64 TOCT_prio_4;
+
+ __be64 TTTLOCT_prio_5;
+ __be64 TTTLOCT_NOFRM_prio_5;
+ __be64 TOCT_prio_5;
+
+ __be64 TTTLOCT_prio_6;
+ __be64 TTTLOCT_NOFRM_prio_6;
+ __be64 TOCT_prio_6;
+
+ __be64 TTTLOCT_prio_7;
+ __be64 TTTLOCT_NOFRM_prio_7;
+ __be64 TOCT_prio_7;
+
+ __be64 TTTLOCT_novlan;
+ __be64 TTTLOCT_NOFRM_novlan;
+ __be64 TOCT_novlan;
+
+ __be64 TTTLOCT_loopbk;
+ __be64 TTTLOCT_NOFRM_loopbk;
+ __be64 TOCT_loopbk;
+
+ /* Total frames transmitted with a good CRC that are not aborted */
+ __be64 TTOT_prio_0;
+ /* Total number of frames transmitted with 802.1Q encapsulation */
+ __be64 T1Q_prio_0;
+ __be64 reserved13;
+
+ __be64 TTOT_prio_1;
+ __be64 T1Q_prio_1;
+ __be64 reserved14;
+
+ __be64 TTOT_prio_2;
+ __be64 T1Q_prio_2;
+ __be64 reserved15;
+
+ __be64 TTOT_prio_3;
+ __be64 T1Q_prio_3;
+ __be64 reserved16;
+
+ __be64 TTOT_prio_4;
+ __be64 T1Q_prio_4;
+ __be64 reserved17;
+
+ __be64 TTOT_prio_5;
+ __be64 T1Q_prio_5;
+ __be64 reserved18;
+
+ __be64 TTOT_prio_6;
+ __be64 T1Q_prio_6;
+ __be64 reserved19;
+
+ __be64 TTOT_prio_7;
+ __be64 T1Q_prio_7;
+ __be64 reserved20;
+
+ __be64 TTOT_novlan;
+ __be64 T1Q_novlan;
+ __be64 reserved21;
+
+ __be64 TTOT_loopbk;
+ __be64 T1Q_loopbk;
+ __be64 reserved22;
+
+ /* Received frames with a length greater than MTU octets and a bad CRC */
+ __be32 RJBBR;
+ /* Received frames with a bad CRC that are not runts, jabbers,
+ or alignment errors */
+ __be32 RCRC;
+ /* Received frames with SFD with a length of less than 64 octets and a
+ bad CRC */
+ __be32 RRUNT;
+ /* Received frames with a length less than 64 octets and a good CRC */
+ __be32 RSHORT;
+ /* Total Number of Received Packets Dropped */
+ __be32 RDROP;
+ /* Drop due to overflow */
+ __be32 RdropOvflw;
+ /* Drop due to overflow */
+ __be32 RdropLength;
+ /* Total of good frames. Does not include frames received with
+ frame-too-long, FCS, or length errors */
+ __be32 RTOTFRMS;
+ /* Total dropped Xmited packets */
+ __be32 TDROP;
+};
+
+
+#endif
diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c
new file mode 100644
index 00000000000..a0545209e50
--- /dev/null
+++ b/drivers/net/mlx4/en_resources.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4_en.h"
+
+void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
+ int is_tx, int rss, int qpn, int cqn, int srqn,
+ struct mlx4_qp_context *context)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ memset(context, 0, sizeof *context);
+ context->flags = cpu_to_be32(7 << 16 | rss << 13);
+ context->pd = cpu_to_be32(mdev->priv_pdn);
+ context->mtu_msgmax = 0xff;
+ context->rq_size_stride = 0;
+ if (is_tx)
+ context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
+ else
+ context->sq_size_stride = 1;
+ context->usr_page = cpu_to_be32(mdev->priv_uar.index);
+ context->local_qpn = cpu_to_be32(qpn);
+ context->pri_path.ackto = 1 & 0x07;
+ context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
+ context->pri_path.counter_index = 0xff;
+ context->cqn_send = cpu_to_be32(cqn);
+ context->cqn_recv = cpu_to_be32(cqn);
+ context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
+ if (!rss)
+ context->srqn = cpu_to_be32(MLX4_EN_USE_SRQ | srqn);
+}
+
+
+int mlx4_en_map_buffer(struct mlx4_buf *buf)
+{
+ struct page **pages;
+ int i;
+
+ if (BITS_PER_LONG == 64 || buf->nbufs == 1)
+ return 0;
+
+ pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (i = 0; i < buf->nbufs; ++i)
+ pages[i] = virt_to_page(buf->page_list[i].buf);
+
+ buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
+ kfree(pages);
+ if (!buf->direct.buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
+{
+ if (BITS_PER_LONG == 64 || buf->nbufs == 1)
+ return;
+
+ vunmap(buf->direct.buf);
+}
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
new file mode 100644
index 00000000000..6232227f56c
--- /dev/null
+++ b/drivers/net/mlx4/en_rx.c
@@ -0,0 +1,1080 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/qp.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+
+#include "mlx4_en.h"
+
+static void *get_wqe(struct mlx4_en_rx_ring *ring, int n)
+{
+ int offset = n << ring->srq.wqe_shift;
+ return ring->buf + offset;
+}
+
+static void mlx4_en_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
+{
+ return;
+}
+
+static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
+ void **ip_hdr, void **tcpudp_hdr,
+ u64 *hdr_flags, void *priv)
+{
+ *mac_hdr = page_address(frags->page) + frags->page_offset;
+ *ip_hdr = *mac_hdr + ETH_HLEN;
+ *tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
+ *hdr_flags = LRO_IPV4 | LRO_TCP;
+
+ return 0;
+}
+
+static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+ struct skb_frag_struct *skb_frags,
+ struct mlx4_en_rx_alloc *ring_alloc,
+ int i)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+ struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
+ struct page *page;
+ dma_addr_t dma;
+
+ if (page_alloc->offset == frag_info->last_offset) {
+ /* Allocate new page */
+ page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER);
+ if (!page)
+ return -ENOMEM;
+
+ skb_frags[i].page = page_alloc->page;
+ skb_frags[i].page_offset = page_alloc->offset;
+ page_alloc->page = page;
+ page_alloc->offset = frag_info->frag_align;
+ } else {
+ page = page_alloc->page;
+ get_page(page);
+
+ skb_frags[i].page = page;
+ skb_frags[i].page_offset = page_alloc->offset;
+ page_alloc->offset += frag_info->frag_stride;
+ }
+ dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
+ skb_frags[i].page_offset, frag_info->frag_size,
+ PCI_DMA_FROMDEVICE);
+ rx_desc->data[i].addr = cpu_to_be64(dma);
+ return 0;
+}
+
+static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ struct mlx4_en_rx_alloc *page_alloc;
+ int i;
+
+ for (i = 0; i < priv->num_frags; i++) {
+ page_alloc = &ring->page_alloc[i];
+ page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
+ MLX4_EN_ALLOC_ORDER);
+ if (!page_alloc->page)
+ goto out;
+
+ page_alloc->offset = priv->frag_info[i].frag_align;
+ mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
+ i, page_alloc->page);
+ }
+ return 0;
+
+out:
+ while (i--) {
+ page_alloc = &ring->page_alloc[i];
+ put_page(page_alloc->page);
+ page_alloc->page = NULL;
+ }
+ return -ENOMEM;
+}
+
+static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ struct mlx4_en_rx_alloc *page_alloc;
+ int i;
+
+ for (i = 0; i < priv->num_frags; i++) {
+ page_alloc = &ring->page_alloc[i];
+ mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
+ i, page_count(page_alloc->page));
+
+ put_page(page_alloc->page);
+ page_alloc->page = NULL;
+ }
+}
+
+
+static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring, int index)
+{
+ struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
+ struct skb_frag_struct *skb_frags = ring->rx_info +
+ (index << priv->log_rx_info);
+ int possible_frags;
+ int i;
+
+ /* Pre-link descriptor */
+ rx_desc->next.next_wqe_index = cpu_to_be16((index + 1) & ring->size_mask);
+
+ /* Set size and memtype fields */
+ for (i = 0; i < priv->num_frags; i++) {
+ skb_frags[i].size = priv->frag_info[i].frag_size;
+ rx_desc->data[i].byte_count =
+ cpu_to_be32(priv->frag_info[i].frag_size);
+ rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
+ }
+
+ /* If the number of used fragments does not fill up the ring stride,
+ * remaining (unused) fragments must be padded with null address/size
+ * and a special memory key */
+ possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
+ for (i = priv->num_frags; i < possible_frags; i++) {
+ rx_desc->data[i].byte_count = 0;
+ rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
+ rx_desc->data[i].addr = 0;
+ }
+}
+
+
+static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring, int index)
+{
+ struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
+ struct skb_frag_struct *skb_frags = ring->rx_info +
+ (index << priv->log_rx_info);
+ int i;
+
+ for (i = 0; i < priv->num_frags; i++)
+ if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
+ goto err;
+
+ return 0;
+
+err:
+ while (i--)
+ put_page(skb_frags[i].page);
+ return -ENOMEM;
+}
+
+static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
+{
+ *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
+}
+
+static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_rx_ring *ring;
+ int ring_ind;
+ int buf_ind;
+
+ for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
+ for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+ ring = &priv->rx_ring[ring_ind];
+
+ if (mlx4_en_prepare_rx_desc(priv, ring,
+ ring->actual_size)) {
+ if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
+ mlx4_err(mdev, "Failed to allocate "
+ "enough rx buffers\n");
+ return -ENOMEM;
+ } else {
+ if (netif_msg_rx_err(priv))
+ mlx4_warn(mdev,
+ "Only %d buffers allocated\n",
+ ring->actual_size);
+ goto out;
+ }
+ }
+ ring->actual_size++;
+ ring->prod++;
+ }
+ }
+out:
+ return 0;
+}
+
+static int mlx4_en_fill_rx_buf(struct net_device *dev,
+ struct mlx4_en_rx_ring *ring)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int num = 0;
+ int err;
+
+ while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
+ err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
+ ring->size_mask);
+ if (err) {
+ if (netif_msg_rx_err(priv))
+ mlx4_warn(priv->mdev,
+ "Failed preparing rx descriptor\n");
+ priv->port_stats.rx_alloc_failed++;
+ break;
+ }
+ ++num;
+ ++ring->prod;
+ }
+ if ((u32) (ring->prod - ring->cons) == ring->size)
+ ring->full = 1;
+
+ return num;
+}
+
+static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct skb_frag_struct *skb_frags;
+ struct mlx4_en_rx_desc *rx_desc;
+ dma_addr_t dma;
+ int index;
+ int nr;
+
+ mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
+ ring->cons, ring->prod);
+
+ /* Unmap and free Rx buffers */
+ BUG_ON((u32) (ring->prod - ring->cons) > ring->size);
+ while (ring->cons != ring->prod) {
+ index = ring->cons & ring->size_mask;
+ rx_desc = ring->buf + (index << ring->log_stride);
+ skb_frags = ring->rx_info + (index << priv->log_rx_info);
+ mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index);
+
+ for (nr = 0; nr < priv->num_frags; nr++) {
+ mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
+ dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+ mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
+ pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+ PCI_DMA_FROMDEVICE);
+ put_page(skb_frags[nr].page);
+ }
+ ++ring->cons;
+ }
+}
+
+
+void mlx4_en_rx_refill(struct work_struct *work)
+{
+ struct delayed_work *delay = container_of(work, struct delayed_work, work);
+ struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
+ refill_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+ struct mlx4_en_rx_ring *ring;
+ int need_refill = 0;
+ int i;
+
+ mutex_lock(&mdev->state_lock);
+ if (!mdev->device_up || !priv->port_up)
+ goto out;
+
+ /* We only get here if there are no receive buffers, so we can't race
+ * with Rx interrupts while filling buffers */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ ring = &priv->rx_ring[i];
+ if (ring->need_refill) {
+ if (mlx4_en_fill_rx_buf(dev, ring)) {
+ ring->need_refill = 0;
+ mlx4_en_update_rx_prod_db(ring);
+ } else
+ need_refill = 1;
+ }
+ }
+ if (need_refill)
+ queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ);
+
+out:
+ mutex_unlock(&mdev->state_lock);
+}
+
+
+int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+ int tmp;
+
+ /* Sanity check SRQ size before proceeding */
+ if (size >= mdev->dev->caps.max_srq_wqes)
+ return -EINVAL;
+
+ ring->prod = 0;
+ ring->cons = 0;
+ ring->size = size;
+ ring->size_mask = size - 1;
+ ring->stride = stride;
+ ring->log_stride = ffs(ring->stride) - 1;
+ ring->buf_size = ring->size * ring->stride;
+
+ tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
+ sizeof(struct skb_frag_struct));
+ ring->rx_info = vmalloc(tmp);
+ if (!ring->rx_info) {
+ mlx4_err(mdev, "Failed allocating rx_info ring\n");
+ return -ENOMEM;
+ }
+ mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
+ ring->rx_info, tmp);
+
+ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
+ ring->buf_size, 2 * PAGE_SIZE);
+ if (err)
+ goto err_ring;
+
+ err = mlx4_en_map_buffer(&ring->wqres.buf);
+ if (err) {
+ mlx4_err(mdev, "Failed to map RX buffer\n");
+ goto err_hwq;
+ }
+ ring->buf = ring->wqres.buf.direct.buf;
+
+ /* Configure lro mngr */
+ memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
+ ring->lro.dev = priv->dev;
+ ring->lro.features = LRO_F_NAPI;
+ ring->lro.frag_align_pad = NET_IP_ALIGN;
+ ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
+ ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+ ring->lro.max_desc = mdev->profile.num_lro;
+ ring->lro.max_aggr = MAX_SKB_FRAGS;
+ ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
+ sizeof(struct net_lro_desc),
+ GFP_KERNEL);
+ if (!ring->lro.lro_arr) {
+ mlx4_err(mdev, "Failed to allocate lro array\n");
+ goto err_map;
+ }
+ ring->lro.get_frag_header = mlx4_en_get_frag_header;
+
+ return 0;
+
+err_map:
+ mlx4_en_unmap_buffer(&ring->wqres.buf);
+err_hwq:
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+err_ring:
+ vfree(ring->rx_info);
+ ring->rx_info = NULL;
+ return err;
+}
+
+int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_wqe_srq_next_seg *next;
+ struct mlx4_en_rx_ring *ring;
+ int i;
+ int ring_ind;
+ int err;
+ int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+ DS_SIZE * priv->num_frags);
+ int max_gs = (stride - sizeof(struct mlx4_wqe_srq_next_seg)) / DS_SIZE;
+
+ for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+ ring = &priv->rx_ring[ring_ind];
+
+ ring->prod = 0;
+ ring->cons = 0;
+ ring->actual_size = 0;
+ ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
+
+ ring->stride = stride;
+ ring->log_stride = ffs(ring->stride) - 1;
+ ring->buf_size = ring->size * ring->stride;
+
+ memset(ring->buf, 0, ring->buf_size);
+ mlx4_en_update_rx_prod_db(ring);
+
+ /* Initailize all descriptors */
+ for (i = 0; i < ring->size; i++)
+ mlx4_en_init_rx_desc(priv, ring, i);
+
+ /* Initialize page allocators */
+ err = mlx4_en_init_allocator(priv, ring);
+ if (err) {
+ mlx4_err(mdev, "Failed initializing ring allocator\n");
+ goto err_allocator;
+ }
+
+ /* Fill Rx buffers */
+ ring->full = 0;
+ }
+ if (mlx4_en_fill_rx_buffers(priv))
+ goto err_buffers;
+
+ for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+ ring = &priv->rx_ring[ring_ind];
+
+ mlx4_en_update_rx_prod_db(ring);
+
+ /* Configure SRQ representing the ring */
+ ring->srq.max = ring->size;
+ ring->srq.max_gs = max_gs;
+ ring->srq.wqe_shift = ilog2(ring->stride);
+
+ for (i = 0; i < ring->srq.max; ++i) {
+ next = get_wqe(ring, i);
+ next->next_wqe_index =
+ cpu_to_be16((i + 1) & (ring->srq.max - 1));
+ }
+
+ err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt,
+ ring->wqres.db.dma, &ring->srq);
+ if (err){
+ mlx4_err(mdev, "Failed to allocate srq\n");
+ goto err_srq;
+ }
+ ring->srq.event = mlx4_en_srq_event;
+ }
+
+ return 0;
+
+err_srq:
+ while (ring_ind >= 0) {
+ ring = &priv->rx_ring[ring_ind];
+ mlx4_srq_free(mdev->dev, &ring->srq);
+ ring_ind--;
+ }
+
+err_buffers:
+ for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
+ mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
+
+ ring_ind = priv->rx_ring_num - 1;
+err_allocator:
+ while (ring_ind >= 0) {
+ mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
+ ring_ind--;
+ }
+ return err;
+}
+
+void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ kfree(ring->lro.lro_arr);
+ mlx4_en_unmap_buffer(&ring->wqres.buf);
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+ vfree(ring->rx_info);
+ ring->rx_info = NULL;
+}
+
+void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ mlx4_srq_free(mdev->dev, &ring->srq);
+ mlx4_en_free_rx_buf(priv, ring);
+ mlx4_en_destroy_allocator(priv, ring);
+}
+
+
+/* Unmap a completed descriptor and free unused pages */
+static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+ struct skb_frag_struct *skb_frags,
+ struct skb_frag_struct *skb_frags_rx,
+ struct mlx4_en_rx_alloc *page_alloc,
+ int length)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_frag_info *frag_info;
+ int nr;
+ dma_addr_t dma;
+
+ /* Collect used fragments while replacing them in the HW descirptors */
+ for (nr = 0; nr < priv->num_frags; nr++) {
+ frag_info = &priv->frag_info[nr];
+ if (length <= frag_info->frag_prefix_size)
+ break;
+
+ /* Save page reference in skb */
+ skb_frags_rx[nr].page = skb_frags[nr].page;
+ skb_frags_rx[nr].size = skb_frags[nr].size;
+ skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
+ dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+ /* Allocate a replacement page */
+ if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr))
+ goto fail;
+
+ /* Unmap buffer */
+ pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+ PCI_DMA_FROMDEVICE);
+ }
+ /* Adjust size of last fragment to match actual length */
+ skb_frags_rx[nr - 1].size = length -
+ priv->frag_info[nr - 1].frag_prefix_size;
+ return nr;
+
+fail:
+ /* Drop all accumulated fragments (which have already been replaced in
+ * the descriptor) of this packet; remaining fragments are reused... */
+ while (nr > 0) {
+ nr--;
+ put_page(skb_frags_rx[nr].page);
+ }
+ return 0;
+}
+
+
+static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+ struct skb_frag_struct *skb_frags,
+ struct mlx4_en_rx_alloc *page_alloc,
+ unsigned int length)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct sk_buff *skb;
+ void *va;
+ int used_frags;
+ dma_addr_t dma;
+
+ skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
+ if (!skb) {
+ mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n");
+ return NULL;
+ }
+ skb->dev = priv->dev;
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb->len = length;
+ skb->truesize = length + sizeof(struct sk_buff);
+
+ /* Get pointer to first fragment so we could copy the headers into the
+ * (linear part of the) skb */
+ va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
+
+ if (length <= SMALL_PACKET_SIZE) {
+ /* We are copying all relevant data to the skb - temporarily
+ * synch buffers for the copy */
+ dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
+ length, DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, va, length);
+ dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
+ length, DMA_FROM_DEVICE);
+ skb->tail += length;
+ } else {
+
+ /* Move relevant fragments to skb */
+ used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
+ skb_shinfo(skb)->frags,
+ page_alloc, length);
+ skb_shinfo(skb)->nr_frags = used_frags;
+
+ /* Copy headers into the skb linear buffer */
+ memcpy(skb->data, va, HEADER_COPY_SIZE);
+ skb->tail += HEADER_COPY_SIZE;
+
+ /* Skip headers in first fragment */
+ skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
+
+ /* Adjust size of first fragment */
+ skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
+ skb->data_len = length - HEADER_COPY_SIZE;
+ }
+ return skb;
+}
+
+static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring,
+ int from, int to, int num)
+{
+ struct skb_frag_struct *skb_frags_from;
+ struct skb_frag_struct *skb_frags_to;
+ struct mlx4_en_rx_desc *rx_desc_from;
+ struct mlx4_en_rx_desc *rx_desc_to;
+ int from_index, to_index;
+ int nr, i;
+
+ for (i = 0; i < num; i++) {
+ from_index = (from + i) & ring->size_mask;
+ to_index = (to + i) & ring->size_mask;
+ skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info);
+ skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info);
+ rx_desc_from = ring->buf + (from_index << ring->log_stride);
+ rx_desc_to = ring->buf + (to_index << ring->log_stride);
+
+ for (nr = 0; nr < priv->num_frags; nr++) {
+ skb_frags_to[nr].page = skb_frags_from[nr].page;
+ skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset;
+ rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr;
+ }
+ }
+}
+
+
+int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_cqe *cqe;
+ struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
+ struct skb_frag_struct *skb_frags;
+ struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
+ struct mlx4_en_rx_desc *rx_desc;
+ struct sk_buff *skb;
+ int index;
+ int nr;
+ unsigned int length;
+ int polled = 0;
+ int ip_summed;
+
+ if (!priv->port_up)
+ return 0;
+
+ /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
+ * descriptor offset can be deduced from the CQE index instead of
+ * reading 'cqe->index' */
+ index = cq->mcq.cons_index & ring->size_mask;
+ cqe = &cq->buf[index];
+
+ /* Process all completed CQEs */
+ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
+ cq->mcq.cons_index & cq->size)) {
+
+ skb_frags = ring->rx_info + (index << priv->log_rx_info);
+ rx_desc = ring->buf + (index << ring->log_stride);
+
+ /*
+ * make sure we read the CQE after we read the ownership bit
+ */
+ rmb();
+
+ /* Drop packet on bad receive or bad checksum */
+ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+ MLX4_CQE_OPCODE_ERROR)) {
+ mlx4_err(mdev, "CQE completed in error - vendor "
+ "syndrom:%d syndrom:%d\n",
+ ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
+ ((struct mlx4_err_cqe *) cqe)->syndrome);
+ goto next;
+ }
+ if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
+ mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
+ goto next;
+ }
+
+ /*
+ * Packet is OK - process it.
+ */
+ length = be32_to_cpu(cqe->byte_cnt);
+ ring->bytes += length;
+ ring->packets++;
+
+ if (likely(priv->rx_csum)) {
+ if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+ (cqe->checksum == cpu_to_be16(0xffff))) {
+ priv->port_stats.rx_chksum_good++;
+ /* This packet is eligible for LRO if it is:
+ * - DIX Ethernet (type interpretation)
+ * - TCP/IP (v4)
+ * - without IP options
+ * - not an IP fragment */
+ if (mlx4_en_can_lro(cqe->status) &&
+ dev->features & NETIF_F_LRO) {
+
+ nr = mlx4_en_complete_rx_desc(
+ priv, rx_desc,
+ skb_frags, lro_frags,
+ ring->page_alloc, length);
+ if (!nr)
+ goto next;
+
+ if (priv->vlgrp && (cqe->vlan_my_qpn &
+ cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) {
+ lro_vlan_hwaccel_receive_frags(
+ &ring->lro, lro_frags,
+ length, length,
+ priv->vlgrp,
+ be16_to_cpu(cqe->sl_vid),
+ NULL, 0);
+ } else
+ lro_receive_frags(&ring->lro,
+ lro_frags,
+ length,
+ length,
+ NULL, 0);
+
+ goto next;
+ }
+
+ /* LRO not possible, complete processing here */
+ ip_summed = CHECKSUM_UNNECESSARY;
+ INC_PERF_COUNTER(priv->pstats.lro_misses);
+ } else {
+ ip_summed = CHECKSUM_NONE;
+ priv->port_stats.rx_chksum_none++;
+ }
+ } else {
+ ip_summed = CHECKSUM_NONE;
+ priv->port_stats.rx_chksum_none++;
+ }
+
+ skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
+ ring->page_alloc, length);
+ if (!skb) {
+ priv->stats.rx_dropped++;
+ goto next;
+ }
+
+ skb->ip_summed = ip_summed;
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* Push it up the stack */
+ if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_VLAN_PRESENT_MASK)) {
+ vlan_hwaccel_receive_skb(skb, priv->vlgrp,
+ be16_to_cpu(cqe->sl_vid));
+ } else
+ netif_receive_skb(skb);
+
+ dev->last_rx = jiffies;
+
+next:
+ ++cq->mcq.cons_index;
+ index = (cq->mcq.cons_index) & ring->size_mask;
+ cqe = &cq->buf[index];
+ if (++polled == budget) {
+ /* We are here because we reached the NAPI budget -
+ * flush only pending LRO sessions */
+ lro_flush_all(&ring->lro);
+ goto out;
+ }
+ }
+
+ /* If CQ is empty flush all LRO sessions unconditionally */
+ lro_flush_all(&ring->lro);
+
+out:
+ AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
+ mlx4_cq_set_ci(&cq->mcq);
+ wmb(); /* ensure HW sees CQ consumer before we post new buffers */
+ ring->cons = cq->mcq.cons_index;
+ ring->prod += polled; /* Polled descriptors were realocated in place */
+ if (unlikely(!ring->full)) {
+ mlx4_en_copy_desc(priv, ring, ring->cons - polled,
+ ring->prod - polled, polled);
+ mlx4_en_fill_rx_buf(dev, ring);
+ }
+ mlx4_en_update_rx_prod_db(ring);
+ return polled;
+}
+
+
+void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+{
+ struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
+ struct mlx4_en_priv *priv = netdev_priv(cq->dev);
+
+ if (priv->port_up)
+ netif_rx_schedule(cq->dev, &cq->napi);
+ else
+ mlx4_en_arm_cq(priv, cq);
+}
+
+/* Rx CQ polling - called by NAPI */
+int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
+{
+ struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
+ struct net_device *dev = cq->dev;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int done;
+
+ done = mlx4_en_process_rx_cq(dev, cq, budget);
+
+ /* If we used up all the quota - we're probably not done yet... */
+ if (done == budget)
+ INC_PERF_COUNTER(priv->pstats.napi_quota);
+ else {
+ /* Done for now */
+ netif_rx_complete(dev, napi);
+ mlx4_en_arm_cq(priv, cq);
+ }
+ return done;
+}
+
+
+/* Calculate the last offset position that accomodates a full fragment
+ * (assuming fagment size = stride-align) */
+static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
+{
+ u16 res = MLX4_EN_ALLOC_SIZE % stride;
+ u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
+
+ mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
+ "res:%d offset:%d\n", stride, align, res, offset);
+ return offset;
+}
+
+
+static int frag_sizes[] = {
+ FRAG_SZ0,
+ FRAG_SZ1,
+ FRAG_SZ2,
+ FRAG_SZ3
+};
+
+void mlx4_en_calc_rx_buf(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
+ int buf_size = 0;
+ int i = 0;
+
+ while (buf_size < eff_mtu) {
+ priv->frag_info[i].frag_size =
+ (eff_mtu > buf_size + frag_sizes[i]) ?
+ frag_sizes[i] : eff_mtu - buf_size;
+ priv->frag_info[i].frag_prefix_size = buf_size;
+ if (!i) {
+ priv->frag_info[i].frag_align = NET_IP_ALIGN;
+ priv->frag_info[i].frag_stride =
+ ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
+ } else {
+ priv->frag_info[i].frag_align = 0;
+ priv->frag_info[i].frag_stride =
+ ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
+ }
+ priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset(
+ priv, priv->frag_info[i].frag_stride,
+ priv->frag_info[i].frag_align);
+ buf_size += priv->frag_info[i].frag_size;
+ i++;
+ }
+
+ priv->num_frags = i;
+ priv->rx_skb_size = eff_mtu;
+ priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
+
+ mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
+ "num_frags:%d):\n", eff_mtu, priv->num_frags);
+ for (i = 0; i < priv->num_frags; i++) {
+ mlx4_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d "
+ "stride:%d last_offset:%d\n", i,
+ priv->frag_info[i].frag_size,
+ priv->frag_info[i].frag_prefix_size,
+ priv->frag_info[i].frag_align,
+ priv->frag_info[i].frag_stride,
+ priv->frag_info[i].last_offset);
+ }
+}
+
+/* RSS related functions */
+
+/* Calculate rss size and map each entry in rss table to rx ring */
+void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
+ struct mlx4_en_rss_map *rss_map,
+ int num_entries, int num_rings)
+{
+ int i;
+
+ rss_map->size = roundup_pow_of_two(num_entries);
+ mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n",
+ rss_map->size);
+
+ for (i = 0; i < rss_map->size; i++) {
+ rss_map->map[i] = i % num_rings;
+ mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]);
+ }
+}
+
+static void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
+{
+ return;
+}
+
+
+static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
+ int qpn, int srqn, int cqn,
+ enum mlx4_qp_state *state,
+ struct mlx4_qp *qp)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_qp_context *context;
+ int err = 0;
+
+ context = kmalloc(sizeof *context , GFP_KERNEL);
+ if (!context) {
+ mlx4_err(mdev, "Failed to allocate qp context\n");
+ return -ENOMEM;
+ }
+
+ err = mlx4_qp_alloc(mdev->dev, qpn, qp);
+ if (err) {
+ mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn);
+ goto out;
+ return err;
+ }
+ qp->event = mlx4_en_sqp_event;
+
+ memset(context, 0, sizeof *context);
+ mlx4_en_fill_qp_context(priv, 0, 0, 0, 0, qpn, cqn, srqn, context);
+
+ err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, context, qp, state);
+ if (err) {
+ mlx4_qp_remove(mdev->dev, qp);
+ mlx4_qp_free(mdev->dev, qp);
+ }
+out:
+ kfree(context);
+ return err;
+}
+
+/* Allocate rx qp's and configure them according to rss map */
+int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_rss_map *rss_map = &priv->rss_map;
+ struct mlx4_qp_context context;
+ struct mlx4_en_rss_context *rss_context;
+ void *ptr;
+ int rss_xor = mdev->profile.rss_xor;
+ u8 rss_mask = mdev->profile.rss_mask;
+ int i, srqn, qpn, cqn;
+ int err = 0;
+ int good_qps = 0;
+
+ mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port);
+ err = mlx4_qp_reserve_range(mdev->dev, rss_map->size,
+ rss_map->size, &rss_map->base_qpn);
+ if (err) {
+ mlx4_err(mdev, "Failed reserving %d qps for port %u\n",
+ rss_map->size, priv->port);
+ return err;
+ }
+
+ for (i = 0; i < rss_map->size; i++) {
+ cqn = priv->rx_ring[rss_map->map[i]].cqn;
+ srqn = priv->rx_ring[rss_map->map[i]].srq.srqn;
+ qpn = rss_map->base_qpn + i;
+ err = mlx4_en_config_rss_qp(priv, qpn, srqn, cqn,
+ &rss_map->state[i],
+ &rss_map->qps[i]);
+ if (err)
+ goto rss_err;
+
+ ++good_qps;
+ }
+
+ /* Configure RSS indirection qp */
+ err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
+ if (err) {
+ mlx4_err(mdev, "Failed to reserve range for RSS "
+ "indirection qp\n");
+ goto rss_err;
+ }
+ err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
+ if (err) {
+ mlx4_err(mdev, "Failed to allocate RSS indirection QP\n");
+ goto reserve_err;
+ }
+ rss_map->indir_qp.event = mlx4_en_sqp_event;
+ mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
+ priv->rx_ring[0].cqn, 0, &context);
+
+ ptr = ((void *) &context) + 0x3c;
+ rss_context = (struct mlx4_en_rss_context *) ptr;
+ rss_context->base_qpn = cpu_to_be32(ilog2(rss_map->size) << 24 |
+ (rss_map->base_qpn));
+ rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
+ rss_context->hash_fn = rss_xor & 0x3;
+ rss_context->flags = rss_mask << 2;
+
+ err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
+ &rss_map->indir_qp, &rss_map->indir_state);
+ if (err)
+ goto indir_err;
+
+ return 0;
+
+indir_err:
+ mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
+ MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
+ mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
+ mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
+reserve_err:
+ mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
+rss_err:
+ for (i = 0; i < good_qps; i++) {
+ mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
+ MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
+ mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
+ mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
+ }
+ mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
+ return err;
+}
+
+void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_rss_map *rss_map = &priv->rss_map;
+ int i;
+
+ mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
+ MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
+ mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
+ mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
+ mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
+
+ for (i = 0; i < rss_map->size; i++) {
+ mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
+ MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
+ mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
+ mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
+ }
+ mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
+}
+
+
+
+
+
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
new file mode 100644
index 00000000000..8592f8fb847
--- /dev/null
+++ b/drivers/net/mlx4/en_tx.c
@@ -0,0 +1,820 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/qp.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+
+#include "mlx4_en.h"
+
+enum {
+ MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
+};
+
+static int inline_thold __read_mostly = MAX_INLINE;
+
+module_param_named(inline_thold, inline_thold, int, 0444);
+MODULE_PARM_DESC(inline_thold, "treshold for using inline data");
+
+int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring, u32 size,
+ u16 stride)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int tmp;
+ int err;
+
+ ring->size = size;
+ ring->size_mask = size - 1;
+ ring->stride = stride;
+
+ inline_thold = min(inline_thold, MAX_INLINE);
+
+ spin_lock_init(&ring->comp_lock);
+
+ tmp = size * sizeof(struct mlx4_en_tx_info);
+ ring->tx_info = vmalloc(tmp);
+ if (!ring->tx_info) {
+ mlx4_err(mdev, "Failed allocating tx_info ring\n");
+ return -ENOMEM;
+ }
+ mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
+ ring->tx_info, tmp);
+
+ ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
+ if (!ring->bounce_buf) {
+ mlx4_err(mdev, "Failed allocating bounce buffer\n");
+ err = -ENOMEM;
+ goto err_tx;
+ }
+ ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
+
+ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
+ 2 * PAGE_SIZE);
+ if (err) {
+ mlx4_err(mdev, "Failed allocating hwq resources\n");
+ goto err_bounce;
+ }
+
+ err = mlx4_en_map_buffer(&ring->wqres.buf);
+ if (err) {
+ mlx4_err(mdev, "Failed to map TX buffer\n");
+ goto err_hwq_res;
+ }
+
+ ring->buf = ring->wqres.buf.direct.buf;
+
+ mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
+ "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
+ ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
+
+ err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
+ if (err) {
+ mlx4_err(mdev, "Failed reserving qp for tx ring.\n");
+ goto err_map;
+ }
+
+ err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
+ if (err) {
+ mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn);
+ goto err_reserve;
+ }
+
+ return 0;
+
+err_reserve:
+ mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
+err_map:
+ mlx4_en_unmap_buffer(&ring->wqres.buf);
+err_hwq_res:
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+err_bounce:
+ kfree(ring->bounce_buf);
+ ring->bounce_buf = NULL;
+err_tx:
+ vfree(ring->tx_info);
+ ring->tx_info = NULL;
+ return err;
+}
+
+void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
+
+ mlx4_qp_remove(mdev->dev, &ring->qp);
+ mlx4_qp_free(mdev->dev, &ring->qp);
+ mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
+ mlx4_en_unmap_buffer(&ring->wqres.buf);
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+ kfree(ring->bounce_buf);
+ ring->bounce_buf = NULL;
+ vfree(ring->tx_info);
+ ring->tx_info = NULL;
+}
+
+int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int cq, int srqn)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ ring->cqn = cq;
+ ring->prod = 0;
+ ring->cons = 0xffffffff;
+ ring->last_nr_txbb = 1;
+ ring->poll_cnt = 0;
+ ring->blocked = 0;
+ memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
+ memset(ring->buf, 0, ring->buf_size);
+
+ ring->qp_state = MLX4_QP_STATE_RST;
+ ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
+
+ mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
+ ring->cqn, srqn, &ring->context);
+
+ err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
+ &ring->qp, &ring->qp_state);
+
+ return err;
+}
+
+void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
+ MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
+}
+
+
+static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
+ struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
+ struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
+ struct sk_buff *skb = tx_info->skb;
+ struct skb_frag_struct *frag;
+ void *end = ring->buf + ring->buf_size;
+ int frags = skb_shinfo(skb)->nr_frags;
+ int i;
+ __be32 *ptr = (__be32 *)tx_desc;
+ __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
+
+ /* Optimize the common case when there are no wraparounds */
+ if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
+ if (tx_info->linear) {
+ pci_unmap_single(mdev->pdev,
+ (dma_addr_t) be64_to_cpu(data->addr),
+ be32_to_cpu(data->byte_count),
+ PCI_DMA_TODEVICE);
+ ++data;
+ }
+
+ for (i = 0; i < frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ pci_unmap_page(mdev->pdev,
+ (dma_addr_t) be64_to_cpu(data[i].addr),
+ frag->size, PCI_DMA_TODEVICE);
+ }
+ /* Stamp the freed descriptor */
+ for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
+ *ptr = stamp;
+ ptr += STAMP_DWORDS;
+ }
+
+ } else {
+ if ((void *) data >= end) {
+ data = (struct mlx4_wqe_data_seg *)
+ (ring->buf + ((void *) data - end));
+ }
+
+ if (tx_info->linear) {
+ pci_unmap_single(mdev->pdev,
+ (dma_addr_t) be64_to_cpu(data->addr),
+ be32_to_cpu(data->byte_count),
+ PCI_DMA_TODEVICE);
+ ++data;
+ }
+
+ for (i = 0; i < frags; i++) {
+ /* Check for wraparound before unmapping */
+ if ((void *) data >= end)
+ data = (struct mlx4_wqe_data_seg *) ring->buf;
+ frag = &skb_shinfo(skb)->frags[i];
+ pci_unmap_page(mdev->pdev,
+ (dma_addr_t) be64_to_cpu(data->addr),
+ frag->size, PCI_DMA_TODEVICE);
+ }
+ /* Stamp the freed descriptor */
+ for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
+ *ptr = stamp;
+ ptr += STAMP_DWORDS;
+ if ((void *) ptr >= end) {
+ ptr = ring->buf;
+ stamp ^= cpu_to_be32(0x80000000);
+ }
+ }
+
+ }
+ dev_kfree_skb_any(skb);
+ return tx_info->nr_txbb;
+}
+
+
+int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int cnt = 0;
+
+ /* Skip last polled descriptor */
+ ring->cons += ring->last_nr_txbb;
+ mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
+ ring->cons, ring->prod);
+
+ if ((u32) (ring->prod - ring->cons) > ring->size) {
+ if (netif_msg_tx_err(priv))
+ mlx4_warn(priv->mdev, "Tx consumer passed producer!\n");
+ return 0;
+ }
+
+ while (ring->cons != ring->prod) {
+ ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
+ ring->cons & ring->size_mask,
+ !!(ring->cons & ring->size));
+ ring->cons += ring->last_nr_txbb;
+ cnt++;
+ }
+
+ if (cnt)
+ mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
+
+ return cnt;
+}
+
+void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num)
+{
+ int block = 8 / ring_num;
+ int extra = 8 - (block * ring_num);
+ int num = 0;
+ u16 ring = 1;
+ int prio;
+
+ if (ring_num == 1) {
+ for (prio = 0; prio < 8; prio++)
+ prio_map[prio] = 0;
+ return;
+ }
+
+ for (prio = 0; prio < 8; prio++) {
+ if (extra && (num == block + 1)) {
+ ring++;
+ num = 0;
+ extra--;
+ } else if (!extra && (num == block)) {
+ ring++;
+ num = 0;
+ }
+ prio_map[prio] = ring;
+ mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring);
+ num++;
+ }
+}
+
+static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_cq *mcq = &cq->mcq;
+ struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+ struct mlx4_cqe *cqe = cq->buf;
+ u16 index;
+ u16 new_index;
+ u32 txbbs_skipped = 0;
+ u32 cq_last_sav;
+
+ /* index always points to the first TXBB of the last polled descriptor */
+ index = ring->cons & ring->size_mask;
+ new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
+ if (index == new_index)
+ return;
+
+ if (!priv->port_up)
+ return;
+
+ /*
+ * We use a two-stage loop:
+ * - the first samples the HW-updated CQE
+ * - the second frees TXBBs until the last sample
+ * This lets us amortize CQE cache misses, while still polling the CQ
+ * until is quiescent.
+ */
+ cq_last_sav = mcq->cons_index;
+ do {
+ do {
+ /* Skip over last polled CQE */
+ index = (index + ring->last_nr_txbb) & ring->size_mask;
+ txbbs_skipped += ring->last_nr_txbb;
+
+ /* Poll next CQE */
+ ring->last_nr_txbb = mlx4_en_free_tx_desc(
+ priv, ring, index,
+ !!((ring->cons + txbbs_skipped) &
+ ring->size));
+ ++mcq->cons_index;
+
+ } while (index != new_index);
+
+ new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
+ } while (index != new_index);
+ AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
+ (u32) (mcq->cons_index - cq_last_sav));
+
+ /*
+ * To prevent CQ overflow we first update CQ consumer and only then
+ * the ring consumer.
+ */
+ mlx4_cq_set_ci(mcq);
+ wmb();
+ ring->cons += txbbs_skipped;
+
+ /* Wakeup Tx queue if this ring stopped it */
+ if (unlikely(ring->blocked)) {
+ if (((u32) (ring->prod - ring->cons) <=
+ ring->size - HEADROOM - MAX_DESC_TXBBS) && !cq->armed) {
+
+ /* TODO: support multiqueue netdevs. Currently, we block
+ * when *any* ring is full. Note that:
+ * - 2 Tx rings can unblock at the same time and call
+ * netif_wake_queue(), which is OK since this
+ * operation is idempotent.
+ * - We might wake the queue just after another ring
+ * stopped it. This is no big deal because the next
+ * transmission on that ring would stop the queue.
+ */
+ ring->blocked = 0;
+ netif_wake_queue(dev);
+ priv->port_stats.wake_queue++;
+ }
+ }
+}
+
+void mlx4_en_tx_irq(struct mlx4_cq *mcq)
+{
+ struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
+ struct mlx4_en_priv *priv = netdev_priv(cq->dev);
+ struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+
+ spin_lock_irq(&ring->comp_lock);
+ cq->armed = 0;
+ mlx4_en_process_tx_cq(cq->dev, cq);
+ if (ring->blocked)
+ mlx4_en_arm_cq(priv, cq);
+ else
+ mod_timer(&cq->timer, jiffies + 1);
+ spin_unlock_irq(&ring->comp_lock);
+}
+
+
+void mlx4_en_poll_tx_cq(unsigned long data)
+{
+ struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
+ struct mlx4_en_priv *priv = netdev_priv(cq->dev);
+ struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+ u32 inflight;
+
+ INC_PERF_COUNTER(priv->pstats.tx_poll);
+
+ netif_tx_lock(priv->dev);
+ spin_lock_irq(&ring->comp_lock);
+ mlx4_en_process_tx_cq(cq->dev, cq);
+ inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
+
+ /* If there are still packets in flight and the timer has not already
+ * been scheduled by the Tx routine then schedule it here to guarantee
+ * completion processing of these packets */
+ if (inflight && priv->port_up)
+ mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
+
+ spin_unlock_irq(&ring->comp_lock);
+ netif_tx_unlock(priv->dev);
+}
+
+static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ u32 index,
+ unsigned int desc_size)
+{
+ u32 copy = (ring->size - index) * TXBB_SIZE;
+ int i;
+
+ for (i = desc_size - copy - 4; i >= 0; i -= 4) {
+ if ((i & (TXBB_SIZE - 1)) == 0)
+ wmb();
+
+ *((u32 *) (ring->buf + i)) =
+ *((u32 *) (ring->bounce_buf + copy + i));
+ }
+
+ for (i = copy - 4; i >= 4 ; i -= 4) {
+ if ((i & (TXBB_SIZE - 1)) == 0)
+ wmb();
+
+ *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
+ *((u32 *) (ring->bounce_buf + i));
+ }
+
+ /* Return real descriptor location */
+ return ring->buf + index * TXBB_SIZE;
+}
+
+static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
+{
+ struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
+ struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
+
+ /* If we don't have a pending timer, set one up to catch our recent
+ post in case the interface becomes idle */
+ if (!timer_pending(&cq->timer))
+ mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
+
+ /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
+ if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
+ mlx4_en_process_tx_cq(priv->dev, cq);
+}
+
+static void *get_frag_ptr(struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ struct page *page = frag->page;
+ void *ptr;
+
+ ptr = page_address(page);
+ if (unlikely(!ptr))
+ return NULL;
+
+ return ptr + frag->page_offset;
+}
+
+static int is_inline(struct sk_buff *skb, void **pfrag)
+{
+ void *ptr;
+
+ if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
+ if (skb_shinfo(skb)->nr_frags == 1) {
+ ptr = get_frag_ptr(skb);
+ if (unlikely(!ptr))
+ return 0;
+
+ if (pfrag)
+ *pfrag = ptr;
+
+ return 1;
+ } else if (unlikely(skb_shinfo(skb)->nr_frags))
+ return 0;
+ else
+ return 1;
+ }
+
+ return 0;
+}
+
+static int inline_size(struct sk_buff *skb)
+{
+ if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
+ <= MLX4_INLINE_ALIGN)
+ return ALIGN(skb->len + CTRL_SIZE +
+ sizeof(struct mlx4_wqe_inline_seg), 16);
+ else
+ return ALIGN(skb->len + CTRL_SIZE + 2 *
+ sizeof(struct mlx4_wqe_inline_seg), 16);
+}
+
+static int get_real_size(struct sk_buff *skb, struct net_device *dev,
+ int *lso_header_size)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int real_size;
+
+ if (skb_is_gso(skb)) {
+ *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
+ ALIGN(*lso_header_size + 4, DS_SIZE);
+ if (unlikely(*lso_header_size != skb_headlen(skb))) {
+ /* We add a segment for the skb linear buffer only if
+ * it contains data */
+ if (*lso_header_size < skb_headlen(skb))
+ real_size += DS_SIZE;
+ else {
+ if (netif_msg_tx_err(priv))
+ mlx4_warn(mdev, "Non-linear headers\n");
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+ }
+ if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
+ if (netif_msg_tx_err(priv))
+ mlx4_warn(mdev, "LSO header size too big\n");
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+ } else {
+ *lso_header_size = 0;
+ if (!is_inline(skb, NULL))
+ real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
+ else
+ real_size = inline_size(skb);
+ }
+
+ return real_size;
+}
+
+static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
+ int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
+{
+ struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
+ int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
+
+ if (skb->len <= spc) {
+ inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
+ skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
+ if (skb_shinfo(skb)->nr_frags)
+ memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
+ skb_shinfo(skb)->frags[0].size);
+
+ } else {
+ inl->byte_count = cpu_to_be32(1 << 31 | spc);
+ if (skb_headlen(skb) <= spc) {
+ skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
+ if (skb_headlen(skb) < spc) {
+ memcpy(((void *)(inl + 1)) + skb_headlen(skb),
+ fragptr, spc - skb_headlen(skb));
+ fragptr += spc - skb_headlen(skb);
+ }
+ inl = (void *) (inl + 1) + spc;
+ memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
+ } else {
+ skb_copy_from_linear_data(skb, inl + 1, spc);
+ inl = (void *) (inl + 1) + spc;
+ skb_copy_from_linear_data_offset(skb, spc, inl + 1,
+ skb_headlen(skb) - spc);
+ if (skb_shinfo(skb)->nr_frags)
+ memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
+ fragptr, skb_shinfo(skb)->frags[0].size);
+ }
+
+ wmb();
+ inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
+ }
+ tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
+ tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+}
+
+static int get_vlan_info(struct mlx4_en_priv *priv, struct sk_buff *skb,
+ u16 *vlan_tag)
+{
+ int tx_ind;
+
+ /* Obtain VLAN information if present */
+ if (priv->vlgrp && vlan_tx_tag_present(skb)) {
+ *vlan_tag = vlan_tx_tag_get(skb);
+ /* Set the Tx ring to use according to vlan priority */
+ tx_ind = priv->tx_prio_map[*vlan_tag >> 13];
+ } else {
+ *vlan_tag = 0;
+ tx_ind = 0;
+ }
+ return tx_ind;
+}
+
+int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_ring *ring;
+ struct mlx4_en_cq *cq;
+ struct mlx4_en_tx_desc *tx_desc;
+ struct mlx4_wqe_data_seg *data;
+ struct skb_frag_struct *frag;
+ struct mlx4_en_tx_info *tx_info;
+ int tx_ind = 0;
+ int nr_txbb;
+ int desc_size;
+ int real_size;
+ dma_addr_t dma;
+ u32 index;
+ __be32 op_own;
+ u16 vlan_tag;
+ int i;
+ int lso_header_size;
+ void *fragptr;
+
+ if (unlikely(!skb->len)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ real_size = get_real_size(skb, dev, &lso_header_size);
+ if (unlikely(!real_size))
+ return NETDEV_TX_OK;
+
+ /* Allign descriptor to TXBB size */
+ desc_size = ALIGN(real_size, TXBB_SIZE);
+ nr_txbb = desc_size / TXBB_SIZE;
+ if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
+ if (netif_msg_tx_err(priv))
+ mlx4_warn(mdev, "Oversized header or SG list\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ tx_ind = get_vlan_info(priv, skb, &vlan_tag);
+ ring = &priv->tx_ring[tx_ind];
+
+ /* Check available TXBBs And 2K spare for prefetch */
+ if (unlikely(((int)(ring->prod - ring->cons)) >
+ ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ /* every full Tx ring stops queue.
+ * TODO: implement multi-queue support (per-queue stop) */
+ netif_stop_queue(dev);
+ ring->blocked = 1;
+ priv->port_stats.queue_stopped++;
+
+ /* Use interrupts to find out when queue opened */
+ cq = &priv->tx_cq[tx_ind];
+ mlx4_en_arm_cq(priv, cq);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Now that we know what Tx ring to use */
+ if (unlikely(!priv->port_up)) {
+ if (netif_msg_tx_err(priv))
+ mlx4_warn(mdev, "xmit: port down!\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Track current inflight packets for performance analysis */
+ AVG_PERF_COUNTER(priv->pstats.inflight_avg,
+ (u32) (ring->prod - ring->cons - 1));
+
+ /* Packet is good - grab an index and transmit it */
+ index = ring->prod & ring->size_mask;
+
+ /* See if we have enough space for whole descriptor TXBB for setting
+ * SW ownership on next descriptor; if not, use a bounce buffer. */
+ if (likely(index + nr_txbb <= ring->size))
+ tx_desc = ring->buf + index * TXBB_SIZE;
+ else
+ tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
+
+ /* Save skb in tx_info ring */
+ tx_info = &ring->tx_info[index];
+ tx_info->skb = skb;
+ tx_info->nr_txbb = nr_txbb;
+
+ /* Prepare ctrl segement apart opcode+ownership, which depends on
+ * whether LSO is used */
+ tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
+ tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
+ MLX4_WQE_CTRL_SOLICITED);
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
+ MLX4_WQE_CTRL_TCP_UDP_CSUM);
+ priv->port_stats.tx_chksum_offload++;
+ }
+
+ /* Handle LSO (TSO) packets */
+ if (lso_header_size) {
+ /* Mark opcode as LSO */
+ op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
+ ((ring->prod & ring->size) ?
+ cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
+
+ /* Fill in the LSO prefix */
+ tx_desc->lso.mss_hdr_size = cpu_to_be32(
+ skb_shinfo(skb)->gso_size << 16 | lso_header_size);
+
+ /* Copy headers;
+ * note that we already verified that it is linear */
+ memcpy(tx_desc->lso.header, skb->data, lso_header_size);
+ data = ((void *) &tx_desc->lso +
+ ALIGN(lso_header_size + 4, DS_SIZE));
+
+ priv->port_stats.tso_packets++;
+ i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
+ !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
+ ring->bytes += skb->len + (i - 1) * lso_header_size;
+ ring->packets += i;
+ } else {
+ /* Normal (Non LSO) packet */
+ op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
+ ((ring->prod & ring->size) ?
+ cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
+ data = &tx_desc->data;
+ ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
+ ring->packets++;
+
+ }
+ AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
+
+
+ /* valid only for none inline segments */
+ tx_info->data_offset = (void *) data - (void *) tx_desc;
+
+ tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
+ data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
+
+ if (!is_inline(skb, &fragptr)) {
+ /* Map fragments */
+ for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
+ frag = &skb_shinfo(skb)->frags[i];
+ dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
+ frag->size, PCI_DMA_TODEVICE);
+ data->addr = cpu_to_be64(dma);
+ data->lkey = cpu_to_be32(mdev->mr.key);
+ wmb();
+ data->byte_count = cpu_to_be32(frag->size);
+ --data;
+ }
+
+ /* Map linear part */
+ if (tx_info->linear) {
+ dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size,
+ skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
+ data->addr = cpu_to_be64(dma);
+ data->lkey = cpu_to_be32(mdev->mr.key);
+ wmb();
+ data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
+ }
+ } else
+ build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
+
+ ring->prod += nr_txbb;
+
+ /* If we used a bounce buffer then copy descriptor back into place */
+ if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf)
+ tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
+
+ /* Run destructor before passing skb to HW */
+ if (likely(!skb_shared(skb)))
+ skb_orphan(skb);
+
+ /* Ensure new descirptor hits memory
+ * before setting ownership of this descriptor to HW */
+ wmb();
+ tx_desc->ctrl.owner_opcode = op_own;
+
+ /* Ring doorbell! */
+ wmb();
+ writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
+ dev->trans_start = jiffies;
+
+ /* Poll CQ here */
+ mlx4_en_xmit_poll(priv, tx_ind);
+
+ return 0;
+}
+
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 8a8b56135a5..de169338cd9 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -558,7 +558,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
int i;
err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
- dev->caps.num_eqs - 1, dev->caps.reserved_eqs);
+ dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
if (err)
return err;
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7e32955da98..be09fdb79cb 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -88,6 +88,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
[ 8] = "P_Key violation counter",
[ 9] = "Q_Key violation counter",
[10] = "VMM",
+ [12] = "DPDP",
[16] = "MW support",
[17] = "APM support",
[18] = "Atomic ops support",
@@ -346,7 +347,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
dev_cap->max_vl[i] = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
- dev_cap->max_mtu[i] = field >> 4;
+ dev_cap->ib_mtu[i] = field >> 4;
dev_cap->max_port_width[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
dev_cap->max_gids[i] = 1 << (field & 0xf);
@@ -354,9 +355,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_pkeys[i] = 1 << (field & 0xf);
}
} else {
+#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
#define QUERY_PORT_MTU_OFFSET 0x01
+#define QUERY_PORT_ETH_MTU_OFFSET 0x02
#define QUERY_PORT_WIDTH_OFFSET 0x06
#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
+#define QUERY_PORT_MAC_OFFSET 0x08
+#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
#define QUERY_PORT_MAX_VL_OFFSET 0x0b
for (i = 1; i <= dev_cap->num_ports; ++i) {
@@ -365,8 +370,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (err)
goto out;
+ MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+ dev_cap->supported_port_types[i] = field & 3;
MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
- dev_cap->max_mtu[i] = field & 0xf;
+ dev_cap->ib_mtu[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
dev_cap->max_port_width[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
@@ -374,6 +381,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_pkeys[i] = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
dev_cap->max_vl[i] = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
+ dev_cap->log_max_macs[i] = field & 0xf;
+ dev_cap->log_max_vlans[i] = field >> 4;
+ MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
+ MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
}
}
@@ -407,7 +419,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
- dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu[1],
+ dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
dev_cap->max_port_width[1]);
mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
@@ -819,7 +831,7 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
- field = 128 << dev->caps.mtu_cap[port];
+ field = 128 << dev->caps.ib_mtu_cap[port];
MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
field = dev->caps.gid_table_len[port];
MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index decbb5c2ad4..526d7f30c04 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -66,11 +66,13 @@ struct mlx4_dev_cap {
int local_ca_ack_delay;
int num_ports;
u32 max_msg_sz;
- int max_mtu[MLX4_MAX_PORTS + 1];
+ int ib_mtu[MLX4_MAX_PORTS + 1];
int max_port_width[MLX4_MAX_PORTS + 1];
int max_vl[MLX4_MAX_PORTS + 1];
int max_gids[MLX4_MAX_PORTS + 1];
int max_pkeys[MLX4_MAX_PORTS + 1];
+ u64 def_mac[MLX4_MAX_PORTS + 1];
+ u16 eth_mtu[MLX4_MAX_PORTS + 1];
u16 stat_rate_support;
u32 flags;
int reserved_uars;
@@ -102,6 +104,9 @@ struct mlx4_dev_cap {
u32 reserved_lkey;
u64 max_icm_sz;
int max_gso_sz;
+ u8 supported_port_types[MLX4_MAX_PORTS + 1];
+ u8 log_max_macs[MLX4_MAX_PORTS + 1];
+ u8 log_max_vlans[MLX4_MAX_PORTS + 1];
};
struct mlx4_adapter {
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 1252a919de2..468921b8f4b 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -85,6 +85,57 @@ static struct mlx4_profile default_profile = {
.num_mtt = 1 << 20,
};
+static int log_num_mac = 2;
+module_param_named(log_num_mac, log_num_mac, int, 0444);
+MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
+
+static int log_num_vlan;
+module_param_named(log_num_vlan, log_num_vlan, int, 0444);
+MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
+
+static int use_prio;
+module_param_named(use_prio, use_prio, bool, 0444);
+MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
+ "(0/1, default 0)");
+
+static int mlx4_check_port_params(struct mlx4_dev *dev,
+ enum mlx4_port_type *port_type)
+{
+ int i;
+
+ for (i = 0; i < dev->caps.num_ports - 1; i++) {
+ if (port_type[i] != port_type[i+1] &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+ mlx4_err(dev, "Only same port types supported "
+ "on this HCA, aborting.\n");
+ return -EINVAL;
+ }
+ }
+ if ((port_type[0] == MLX4_PORT_TYPE_ETH) &&
+ (port_type[1] == MLX4_PORT_TYPE_IB)) {
+ mlx4_err(dev, "eth-ib configuration is not supported.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dev->caps.num_ports; i++) {
+ if (!(port_type[i] & dev->caps.supported_type[i+1])) {
+ mlx4_err(dev, "Requested port type for port %d is not "
+ "supported on this HCA\n", i + 1);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static void mlx4_set_port_mask(struct mlx4_dev *dev)
+{
+ int i;
+
+ dev->caps.port_mask = 0;
+ for (i = 1; i <= dev->caps.num_ports; ++i)
+ if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
+ dev->caps.port_mask |= 1 << (i - 1);
+}
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
int err;
@@ -120,10 +171,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.num_ports = dev_cap->num_ports;
for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.vl_cap[i] = dev_cap->max_vl[i];
- dev->caps.mtu_cap[i] = dev_cap->max_mtu[i];
+ dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
+ dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
+ dev->caps.def_mac[i] = dev_cap->def_mac[i];
+ dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
}
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
@@ -134,7 +188,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_rq_sg = dev_cap->max_rq_sg;
dev->caps.max_wqes = dev_cap->max_qp_sz;
dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
- dev->caps.reserved_qps = dev_cap->reserved_qps;
dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
dev->caps.reserved_srqs = dev_cap->reserved_srqs;
@@ -163,9 +216,138 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
+ dev->caps.log_num_macs = log_num_mac;
+ dev->caps.log_num_vlans = log_num_vlan;
+ dev->caps.log_num_prios = use_prio ? 3 : 0;
+
+ for (i = 1; i <= dev->caps.num_ports; ++i) {
+ if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
+ else
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+
+ if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
+ dev->caps.log_num_macs = dev_cap->log_max_macs[i];
+ mlx4_warn(dev, "Requested number of MACs is too much "
+ "for port %d, reducing to %d.\n",
+ i, 1 << dev->caps.log_num_macs);
+ }
+ if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
+ dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
+ mlx4_warn(dev, "Requested number of VLANs is too much "
+ "for port %d, reducing to %d.\n",
+ i, 1 << dev->caps.log_num_vlans);
+ }
+ }
+
+ mlx4_set_port_mask(dev);
+
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
+ (1 << dev->caps.log_num_macs) *
+ (1 << dev->caps.log_num_vlans) *
+ (1 << dev->caps.log_num_prios) *
+ dev->caps.num_ports;
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
+
+ dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
+
return 0;
}
+/*
+ * Change the port configuration of the device.
+ * Every user of this function must hold the port mutex.
+ */
+static int mlx4_change_port_types(struct mlx4_dev *dev,
+ enum mlx4_port_type *port_types)
+{
+ int err = 0;
+ int change = 0;
+ int port;
+
+ for (port = 0; port < dev->caps.num_ports; port++) {
+ if (port_types[port] != dev->caps.port_type[port + 1]) {
+ change = 1;
+ dev->caps.port_type[port + 1] = port_types[port];
+ }
+ }
+ if (change) {
+ mlx4_unregister_device(dev);
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ mlx4_CLOSE_PORT(dev, port);
+ err = mlx4_SET_PORT(dev, port);
+ if (err) {
+ mlx4_err(dev, "Failed to set port %d, "
+ "aborting\n", port);
+ goto out;
+ }
+ }
+ mlx4_set_port_mask(dev);
+ err = mlx4_register_device(dev);
+ }
+
+out:
+ return err;
+}
+
+static ssize_t show_port_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
+ port_attr);
+ struct mlx4_dev *mdev = info->dev;
+
+ return sprintf(buf, "%s\n",
+ mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB ?
+ "ib" : "eth");
+}
+
+static ssize_t set_port_type(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
+ port_attr);
+ struct mlx4_dev *mdev = info->dev;
+ struct mlx4_priv *priv = mlx4_priv(mdev);
+ enum mlx4_port_type types[MLX4_MAX_PORTS];
+ int i;
+ int err = 0;
+
+ if (!strcmp(buf, "ib\n"))
+ info->tmp_type = MLX4_PORT_TYPE_IB;
+ else if (!strcmp(buf, "eth\n"))
+ info->tmp_type = MLX4_PORT_TYPE_ETH;
+ else {
+ mlx4_err(mdev, "%s is not supported port type\n", buf);
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv->port_mutex);
+ for (i = 0; i < mdev->caps.num_ports; i++)
+ types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
+ mdev->caps.port_type[i+1];
+
+ err = mlx4_check_port_params(mdev, types);
+ if (err)
+ goto out;
+
+ for (i = 1; i <= mdev->caps.num_ports; i++)
+ priv->port[i].tmp_type = 0;
+
+ err = mlx4_change_port_types(mdev, types);
+
+out:
+ mutex_unlock(&priv->port_mutex);
+ return err ? err : count;
+}
+
static int mlx4_load_fw(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -211,7 +393,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
((u64) (MLX4_CMPT_TYPE_QP *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
cmpt_entry_sz, dev->caps.num_qps,
- dev->caps.reserved_qps, 0, 0);
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
if (err)
goto err;
@@ -336,7 +519,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
init_hca->qpc_base,
dev_cap->qpc_entry_sz,
dev->caps.num_qps,
- dev->caps.reserved_qps, 0, 0);
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
if (err) {
mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
goto err_unmap_dmpt;
@@ -346,7 +530,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
init_hca->auxc_base,
dev_cap->aux_entry_sz,
dev->caps.num_qps,
- dev->caps.reserved_qps, 0, 0);
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
if (err) {
mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
goto err_unmap_qp;
@@ -356,7 +541,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
init_hca->altc_base,
dev_cap->altc_entry_sz,
dev->caps.num_qps,
- dev->caps.reserved_qps, 0, 0);
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
if (err) {
mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
goto err_unmap_auxc;
@@ -366,7 +552,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
init_hca->rdmarc_base,
dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
dev->caps.num_qps,
- dev->caps.reserved_qps, 0, 0);
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
if (err) {
mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
goto err_unmap_altc;
@@ -565,6 +752,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
+ int port;
err = mlx4_init_uar_table(dev);
if (err) {
@@ -663,8 +851,20 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_qp_table_free;
}
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ err = mlx4_SET_PORT(dev, port);
+ if (err) {
+ mlx4_err(dev, "Failed to set port %d, aborting\n",
+ port);
+ goto err_mcg_table_free;
+ }
+ }
+
return 0;
+err_mcg_table_free:
+ mlx4_cleanup_mcg_table(dev);
+
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@@ -728,11 +928,45 @@ no_msi:
priv->eq_table.eq[i].irq = dev->pdev->irq;
}
+static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ int err = 0;
+
+ info->dev = dev;
+ info->port = port;
+ mlx4_init_mac_table(dev, &info->mac_table);
+ mlx4_init_vlan_table(dev, &info->vlan_table);
+
+ sprintf(info->dev_name, "mlx4_port%d", port);
+ info->port_attr.attr.name = info->dev_name;
+ info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+ info->port_attr.show = show_port_type;
+ info->port_attr.store = set_port_type;
+
+ err = device_create_file(&dev->pdev->dev, &info->port_attr);
+ if (err) {
+ mlx4_err(dev, "Failed to create file for port %d\n", port);
+ info->port = -1;
+ }
+
+ return err;
+}
+
+static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
+{
+ if (info->port < 0)
+ return;
+
+ device_remove_file(&info->dev->pdev->dev, &info->port_attr);
+}
+
static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mlx4_priv *priv;
struct mlx4_dev *dev;
int err;
+ int port;
printk(KERN_INFO PFX "Initializing %s\n",
pci_name(pdev));
@@ -807,6 +1041,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
+ mutex_init(&priv->port_mutex);
+
INIT_LIST_HEAD(&priv->pgdir_list);
mutex_init(&priv->pgdir_mutex);
@@ -842,15 +1078,24 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_close;
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ err = mlx4_init_port_info(dev, port);
+ if (err)
+ goto err_port;
+ }
+
err = mlx4_register_device(dev);
if (err)
- goto err_cleanup;
+ goto err_port;
pci_set_drvdata(pdev, dev);
return 0;
-err_cleanup:
+err_port:
+ for (port = 1; port <= dev->caps.num_ports; port++)
+ mlx4_cleanup_port_info(&priv->port[port]);
+
mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
@@ -907,8 +1152,10 @@ static void mlx4_remove_one(struct pci_dev *pdev)
if (dev) {
mlx4_unregister_device(dev);
- for (p = 1; p <= dev->caps.num_ports; ++p)
+ for (p = 1; p <= dev->caps.num_ports; p++) {
+ mlx4_cleanup_port_info(&priv->port[p]);
mlx4_CLOSE_PORT(dev, p);
+ }
mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_qp_table(dev);
@@ -948,6 +1195,8 @@ static struct pci_device_id mlx4_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
{ PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
{ PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
{ 0, }
};
@@ -960,10 +1209,28 @@ static struct pci_driver mlx4_driver = {
.remove = __devexit_p(mlx4_remove_one)
};
+static int __init mlx4_verify_params(void)
+{
+ if ((log_num_mac < 0) || (log_num_mac > 7)) {
+ printk(KERN_WARNING "mlx4_core: bad num_mac: %d\n", log_num_mac);
+ return -1;
+ }
+
+ if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
+ printk(KERN_WARNING "mlx4_core: bad num_vlan: %d\n", log_num_vlan);
+ return -1;
+ }
+
+ return 0;
+}
+
static int __init mlx4_init(void)
{
int ret;
+ if (mlx4_verify_params())
+ return -EINVAL;
+
ret = mlx4_catas_init();
if (ret)
return ret;
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index c83f88ce073..592c01ae2c5 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -368,8 +368,8 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
- err = mlx4_bitmap_init(&priv->mcg_table.bitmap,
- dev->caps.num_amgms, dev->caps.num_amgms - 1, 0);
+ err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
+ dev->caps.num_amgms - 1, 0, 0);
if (err)
return err;
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 5337e3ac3e7..fa431fad0ee 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -111,6 +111,7 @@ struct mlx4_bitmap {
u32 last;
u32 top;
u32 max;
+ u32 reserved_top;
u32 mask;
spinlock_t lock;
unsigned long *table;
@@ -251,6 +252,38 @@ struct mlx4_catas_err {
struct list_head list;
};
+#define MLX4_MAX_MAC_NUM 128
+#define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3)
+
+struct mlx4_mac_table {
+ __be64 entries[MLX4_MAX_MAC_NUM];
+ int refs[MLX4_MAX_MAC_NUM];
+ struct mutex mutex;
+ int total;
+ int max;
+};
+
+#define MLX4_MAX_VLAN_NUM 128
+#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
+
+struct mlx4_vlan_table {
+ __be32 entries[MLX4_MAX_VLAN_NUM];
+ int refs[MLX4_MAX_VLAN_NUM];
+ struct mutex mutex;
+ int total;
+ int max;
+};
+
+struct mlx4_port_info {
+ struct mlx4_dev *dev;
+ int port;
+ char dev_name[16];
+ struct device_attribute port_attr;
+ enum mlx4_port_type tmp_type;
+ struct mlx4_mac_table mac_table;
+ struct mlx4_vlan_table vlan_table;
+};
+
struct mlx4_priv {
struct mlx4_dev dev;
@@ -279,6 +312,8 @@ struct mlx4_priv {
struct mlx4_uar driver_uar;
void __iomem *kar;
+ struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
+ struct mutex port_mutex;
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -288,7 +323,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
-int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved);
+u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
+int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
+ u32 reserved_bot, u32 resetrved_top);
void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
int mlx4_reset(struct mlx4_dev *dev);
@@ -346,4 +384,9 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_handle_catas_err(struct mlx4_dev *dev);
+void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
+void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
+
+int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+
#endif /* MLX4_H */
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
new file mode 100644
index 00000000000..11fb17c6e97
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -0,0 +1,561 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _MLX4_EN_H_
+#define _MLX4_EN_H_
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/inet_lro.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/qp.h>
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/srq.h>
+#include <linux/mlx4/doorbell.h>
+
+#include "en_port.h"
+
+#define DRV_NAME "mlx4_en"
+#define DRV_VERSION "1.4.0"
+#define DRV_RELDATE "Sep 2008"
+
+
+#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
+
+#define mlx4_dbg(mlevel, priv, format, arg...) \
+ if (NETIF_MSG_##mlevel & priv->msg_enable) \
+ printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\
+ (&priv->mdev->pdev->dev)->bus_id , ## arg)
+
+#define mlx4_err(mdev, format, arg...) \
+ printk(KERN_ERR "%s %s: " format , DRV_NAME ,\
+ (&mdev->pdev->dev)->bus_id , ## arg)
+#define mlx4_info(mdev, format, arg...) \
+ printk(KERN_INFO "%s %s: " format , DRV_NAME ,\
+ (&mdev->pdev->dev)->bus_id , ## arg)
+#define mlx4_warn(mdev, format, arg...) \
+ printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\
+ (&mdev->pdev->dev)->bus_id , ## arg)
+
+/*
+ * Device constants
+ */
+
+
+#define MLX4_EN_PAGE_SHIFT 12
+#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
+#define MAX_TX_RINGS 16
+#define MAX_RX_RINGS 16
+#define MAX_RSS_MAP_SIZE 64
+#define RSS_FACTOR 2
+#define TXBB_SIZE 64
+#define HEADROOM (2048 / TXBB_SIZE + 1)
+#define MAX_LSO_HDR_SIZE 92
+#define STAMP_STRIDE 64
+#define STAMP_DWORDS (STAMP_STRIDE / 4)
+#define STAMP_SHIFT 31
+#define STAMP_VAL 0x7fffffff
+#define STATS_DELAY (HZ / 4)
+
+/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
+#define MAX_DESC_SIZE 512
+#define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
+
+/*
+ * OS related constants and tunables
+ */
+
+#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
+
+#define MLX4_EN_ALLOC_ORDER 2
+#define MLX4_EN_ALLOC_SIZE (PAGE_SIZE << MLX4_EN_ALLOC_ORDER)
+
+#define MLX4_EN_MAX_LRO_DESCRIPTORS 32
+
+/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU
+ * and 4K allocations) */
+enum {
+ FRAG_SZ0 = 512 - NET_IP_ALIGN,
+ FRAG_SZ1 = 1024,
+ FRAG_SZ2 = 4096,
+ FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
+};
+#define MLX4_EN_MAX_RX_FRAGS 4
+
+/* Minimum ring size for our page-allocation sceme to work */
+#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
+#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
+
+#define MLX4_EN_TX_RING_NUM 9
+#define MLX4_EN_DEF_TX_RING_SIZE 1024
+#define MLX4_EN_DEF_RX_RING_SIZE 1024
+
+/* Target number of bytes to coalesce with interrupt moderation */
+#define MLX4_EN_RX_COAL_TARGET 0x20000
+#define MLX4_EN_RX_COAL_TIME 0x10
+
+#define MLX4_EN_TX_COAL_PKTS 5
+#define MLX4_EN_TX_COAL_TIME 0x80
+
+#define MLX4_EN_RX_RATE_LOW 400000
+#define MLX4_EN_RX_COAL_TIME_LOW 0
+#define MLX4_EN_RX_RATE_HIGH 450000
+#define MLX4_EN_RX_COAL_TIME_HIGH 128
+#define MLX4_EN_RX_SIZE_THRESH 1024
+#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
+#define MLX4_EN_SAMPLE_INTERVAL 0
+
+#define MLX4_EN_AUTO_CONF 0xffff
+
+#define MLX4_EN_DEF_RX_PAUSE 1
+#define MLX4_EN_DEF_TX_PAUSE 1
+
+/* Interval between sucessive polls in the Tx routine when polling is used
+ instead of interrupts (in per-core Tx rings) - should be power of 2 */
+#define MLX4_EN_TX_POLL_MODER 16
+#define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4)
+
+#define ETH_LLC_SNAP_SIZE 8
+
+#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
+#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
+
+#define MLX4_EN_MIN_MTU 46
+#define ETH_BCAST 0xffffffffffffULL
+
+#ifdef MLX4_EN_PERF_STAT
+/* Number of samples to 'average' */
+#define AVG_SIZE 128
+#define AVG_FACTOR 1024
+#define NUM_PERF_STATS NUM_PERF_COUNTERS
+
+#define INC_PERF_COUNTER(cnt) (++(cnt))
+#define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
+#define AVG_PERF_COUNTER(cnt, sample) \
+ ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
+#define GET_PERF_COUNTER(cnt) (cnt)
+#define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR)
+
+#else
+
+#define NUM_PERF_STATS 0
+#define INC_PERF_COUNTER(cnt) do {} while (0)
+#define ADD_PERF_COUNTER(cnt, add) do {} while (0)
+#define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
+#define GET_PERF_COUNTER(cnt) (0)
+#define GET_AVG_PERF_COUNTER(cnt) (0)
+#endif /* MLX4_EN_PERF_STAT */
+
+/*
+ * Configurables
+ */
+
+enum cq_type {
+ RX = 0,
+ TX = 1,
+};
+
+
+/*
+ * Useful macros
+ */
+#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
+#define XNOR(x, y) (!(x) == !(y))
+#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
+
+
+struct mlx4_en_tx_info {
+ struct sk_buff *skb;
+ u32 nr_txbb;
+ u8 linear;
+ u8 data_offset;
+};
+
+
+#define MLX4_EN_BIT_DESC_OWN 0x80000000
+#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
+#define MLX4_EN_MEMTYPE_PAD 0x100
+#define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
+
+
+struct mlx4_en_tx_desc {
+ struct mlx4_wqe_ctrl_seg ctrl;
+ union {
+ struct mlx4_wqe_data_seg data; /* at least one data segment */
+ struct mlx4_wqe_lso_seg lso;
+ struct mlx4_wqe_inline_seg inl;
+ };
+};
+
+#define MLX4_EN_USE_SRQ 0x01000000
+
+struct mlx4_en_rx_alloc {
+ struct page *page;
+ u16 offset;
+};
+
+struct mlx4_en_tx_ring {
+ struct mlx4_hwq_resources wqres;
+ u32 size ; /* number of TXBBs */
+ u32 size_mask;
+ u16 stride;
+ u16 cqn; /* index of port CQ associated with this ring */
+ u32 prod;
+ u32 cons;
+ u32 buf_size;
+ u32 doorbell_qpn;
+ void *buf;
+ u16 poll_cnt;
+ int blocked;
+ struct mlx4_en_tx_info *tx_info;
+ u8 *bounce_buf;
+ u32 last_nr_txbb;
+ struct mlx4_qp qp;
+ struct mlx4_qp_context context;
+ int qpn;
+ enum mlx4_qp_state qp_state;
+ struct mlx4_srq dummy;
+ unsigned long bytes;
+ unsigned long packets;
+ spinlock_t comp_lock;
+};
+
+struct mlx4_en_rx_desc {
+ struct mlx4_wqe_srq_next_seg next;
+ /* actual number of entries depends on rx ring stride */
+ struct mlx4_wqe_data_seg data[0];
+};
+
+struct mlx4_en_rx_ring {
+ struct mlx4_srq srq;
+ struct mlx4_hwq_resources wqres;
+ struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
+ struct net_lro_mgr lro;
+ u32 size ; /* number of Rx descs*/
+ u32 actual_size;
+ u32 size_mask;
+ u16 stride;
+ u16 log_stride;
+ u16 cqn; /* index of port CQ associated with this ring */
+ u32 prod;
+ u32 cons;
+ u32 buf_size;
+ int need_refill;
+ int full;
+ void *buf;
+ void *rx_info;
+ unsigned long bytes;
+ unsigned long packets;
+};
+
+
+static inline int mlx4_en_can_lro(__be16 status)
+{
+ return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+ MLX4_CQE_STATUS_IPV4F |
+ MLX4_CQE_STATUS_IPV6 |
+ MLX4_CQE_STATUS_IPV4OPT |
+ MLX4_CQE_STATUS_TCP |
+ MLX4_CQE_STATUS_UDP |
+ MLX4_CQE_STATUS_IPOK)) ==
+ cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+ MLX4_CQE_STATUS_IPOK |
+ MLX4_CQE_STATUS_TCP);
+}
+
+struct mlx4_en_cq {
+ struct mlx4_cq mcq;
+ struct mlx4_hwq_resources wqres;
+ int ring;
+ spinlock_t lock;
+ struct net_device *dev;
+ struct napi_struct napi;
+ /* Per-core Tx cq processing support */
+ struct timer_list timer;
+ int size;
+ int buf_size;
+ unsigned vector;
+ enum cq_type is_tx;
+ u16 moder_time;
+ u16 moder_cnt;
+ int armed;
+ struct mlx4_cqe *buf;
+#define MLX4_EN_OPCODE_ERROR 0x1e
+};
+
+struct mlx4_en_port_profile {
+ u32 flags;
+ u32 tx_ring_num;
+ u32 rx_ring_num;
+ u32 tx_ring_size;
+ u32 rx_ring_size;
+};
+
+struct mlx4_en_profile {
+ int rss_xor;
+ int num_lro;
+ u8 rss_mask;
+ u32 active_ports;
+ u32 small_pkt_int;
+ int rx_moder_cnt;
+ int rx_moder_time;
+ int auto_moder;
+ u8 rx_pause;
+ u8 rx_ppp;
+ u8 tx_pause;
+ u8 tx_ppp;
+ u8 no_reset;
+ struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
+};
+
+struct mlx4_en_dev {
+ struct mlx4_dev *dev;
+ struct pci_dev *pdev;
+ struct mutex state_lock;
+ struct net_device *pndev[MLX4_MAX_PORTS + 1];
+ u32 port_cnt;
+ bool device_up;
+ struct mlx4_en_profile profile;
+ u32 LSO_support;
+ struct workqueue_struct *workqueue;
+ struct device *dma_device;
+ void __iomem *uar_map;
+ struct mlx4_uar priv_uar;
+ struct mlx4_mr mr;
+ u32 priv_pdn;
+ spinlock_t uar_lock;
+};
+
+
+struct mlx4_en_rss_map {
+ int size;
+ int base_qpn;
+ u16 map[MAX_RSS_MAP_SIZE];
+ struct mlx4_qp qps[MAX_RSS_MAP_SIZE];
+ enum mlx4_qp_state state[MAX_RSS_MAP_SIZE];
+ struct mlx4_qp indir_qp;
+ enum mlx4_qp_state indir_state;
+};
+
+struct mlx4_en_rss_context {
+ __be32 base_qpn;
+ __be32 default_qpn;
+ u16 reserved;
+ u8 hash_fn;
+ u8 flags;
+ __be32 rss_key[10];
+};
+
+struct mlx4_en_pkt_stats {
+ unsigned long broadcast;
+ unsigned long rx_prio[8];
+ unsigned long tx_prio[8];
+#define NUM_PKT_STATS 17
+};
+
+struct mlx4_en_port_stats {
+ unsigned long lro_aggregated;
+ unsigned long lro_flushed;
+ unsigned long lro_no_desc;
+ unsigned long tso_packets;
+ unsigned long queue_stopped;
+ unsigned long wake_queue;
+ unsigned long tx_timeout;
+ unsigned long rx_alloc_failed;
+ unsigned long rx_chksum_good;
+ unsigned long rx_chksum_none;
+ unsigned long tx_chksum_offload;
+#define NUM_PORT_STATS 11
+};
+
+struct mlx4_en_perf_stats {
+ u32 tx_poll;
+ u64 tx_pktsz_avg;
+ u32 inflight_avg;
+ u16 tx_coal_avg;
+ u16 rx_coal_avg;
+ u32 napi_quota;
+#define NUM_PERF_COUNTERS 6
+};
+
+struct mlx4_en_frag_info {
+ u16 frag_size;
+ u16 frag_prefix_size;
+ u16 frag_stride;
+ u16 frag_align;
+ u16 last_offset;
+
+};
+
+struct mlx4_en_priv {
+ struct mlx4_en_dev *mdev;
+ struct mlx4_en_port_profile *prof;
+ struct net_device *dev;
+ struct vlan_group *vlgrp;
+ struct net_device_stats stats;
+ struct net_device_stats ret_stats;
+ spinlock_t stats_lock;
+
+ unsigned long last_moder_packets;
+ unsigned long last_moder_tx_packets;
+ unsigned long last_moder_bytes;
+ unsigned long last_moder_jiffies;
+ int last_moder_time;
+ u16 rx_usecs;
+ u16 rx_frames;
+ u16 tx_usecs;
+ u16 tx_frames;
+ u32 pkt_rate_low;
+ u16 rx_usecs_low;
+ u32 pkt_rate_high;
+ u16 rx_usecs_high;
+ u16 sample_interval;
+ u16 adaptive_rx_coal;
+ u32 msg_enable;
+
+ struct mlx4_hwq_resources res;
+ int link_state;
+ int last_link_state;
+ bool port_up;
+ int port;
+ int registered;
+ int allocated;
+ int stride;
+ int rx_csum;
+ u64 mac;
+ int mac_index;
+ unsigned max_mtu;
+ int base_qpn;
+
+ struct mlx4_en_rss_map rss_map;
+ u16 tx_prio_map[8];
+ u32 flags;
+#define MLX4_EN_FLAG_PROMISC 0x1
+ u32 tx_ring_num;
+ u32 rx_ring_num;
+ u32 rx_skb_size;
+ struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
+ u16 num_frags;
+ u16 log_rx_info;
+
+ struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
+ struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
+ struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
+ struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
+ struct work_struct mcast_task;
+ struct work_struct mac_task;
+ struct delayed_work refill_task;
+ struct work_struct watchdog_task;
+ struct work_struct linkstate_task;
+ struct delayed_work stats_task;
+ struct mlx4_en_perf_stats pstats;
+ struct mlx4_en_pkt_stats pkstats;
+ struct mlx4_en_port_stats port_stats;
+ struct dev_mc_list *mc_list;
+ struct mlx4_en_stat_out_mbox hw_stats;
+};
+
+
+void mlx4_en_destroy_netdev(struct net_device *dev);
+int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ struct mlx4_en_port_profile *prof);
+
+int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
+
+int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+ int entries, int ring, enum cq_type mode);
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+
+void mlx4_en_poll_tx_cq(unsigned long data);
+void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+
+int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ u32 size, u16 stride);
+void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
+int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int cq, int srqn);
+void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring);
+
+int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring,
+ u32 size, u16 stride);
+void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring);
+int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
+void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring);
+int mlx4_en_process_rx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq,
+ int budget);
+int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
+void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
+ int is_tx, int rss, int qpn, int cqn, int srqn,
+ struct mlx4_qp_context *context);
+int mlx4_en_map_buffer(struct mlx4_buf *buf);
+void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
+
+void mlx4_en_calc_rx_buf(struct net_device *dev);
+void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
+ struct mlx4_en_rss_map *rss_map,
+ int num_entries, int num_rings);
+void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
+int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
+void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
+int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
+void mlx4_en_rx_refill(struct work_struct *work);
+void mlx4_en_rx_irq(struct mlx4_cq *mcq);
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
+int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp);
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+ u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+ u8 promisc);
+
+int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
+
+/*
+ * Globals
+ */
+extern const struct ethtool_ops mlx4_en_ethtool_ops;
+#endif
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index d1dd5b48dbd..0caf74cae8b 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -461,7 +461,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
int err;
err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
- ~0, dev->caps.reserved_mrws);
+ ~0, dev->caps.reserved_mrws, 0);
if (err)
return err;
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index aa616892d09..26d1a7a9e37 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -62,7 +62,7 @@ int mlx4_init_pd_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
- (1 << 24) - 1, dev->caps.reserved_pds);
+ (1 << 24) - 1, dev->caps.reserved_pds, 0);
}
void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
@@ -100,7 +100,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev)
return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
dev->caps.num_uars, dev->caps.num_uars - 1,
- max(128, dev->caps.reserved_uars));
+ max(128, dev->caps.reserved_uars), 0);
}
void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
new file mode 100644
index 00000000000..e2fdab42c4c
--- /dev/null
+++ b/drivers/net/mlx4/port.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+
+#define MLX4_MAC_VALID (1ull << 63)
+#define MLX4_MAC_MASK 0xffffffffffffULL
+
+#define MLX4_VLAN_VALID (1u << 31)
+#define MLX4_VLAN_MASK 0xfff
+
+void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
+{
+ int i;
+
+ mutex_init(&table->mutex);
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ table->entries[i] = 0;
+ table->refs[i] = 0;
+ }
+ table->max = 1 << dev->caps.log_num_macs;
+ table->total = 0;
+}
+
+void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
+{
+ int i;
+
+ mutex_init(&table->mutex);
+ for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
+ table->entries[i] = 0;
+ table->refs[i] = 0;
+ }
+ table->max = 1 << dev->caps.log_num_vlans;
+ table->total = 0;
+}
+
+static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
+ __be64 *entries)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
+
+ in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
+{
+ struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
+ int i, err = 0;
+ int free = -1;
+
+ mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
+ mutex_lock(&table->mutex);
+ for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
+ if (free < 0 && !table->refs[i]) {
+ free = i;
+ continue;
+ }
+
+ if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
+ /* MAC already registered, increase refernce count */
+ *index = i;
+ ++table->refs[i];
+ goto out;
+ }
+ }
+ mlx4_dbg(dev, "Free MAC index is %d\n", free);
+
+ if (table->total == table->max) {
+ /* No free mac entries */
+ err = -ENOSPC;
+ goto out;
+ }
+
+ /* Register new MAC */
+ table->refs[free] = 1;
+ table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
+
+ err = mlx4_set_port_mac_table(dev, port, table->entries);
+ if (unlikely(err)) {
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
+ table->refs[free] = 0;
+ table->entries[free] = 0;
+ goto out;
+ }
+
+ *index = free;
+ ++table->total;
+out:
+ mutex_unlock(&table->mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_register_mac);
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index)
+{
+ struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
+
+ mutex_lock(&table->mutex);
+ if (!table->refs[index]) {
+ mlx4_warn(dev, "No MAC entry for index %d\n", index);
+ goto out;
+ }
+ if (--table->refs[index]) {
+ mlx4_warn(dev, "Have more references for index %d,"
+ "no need to modify MAC table\n", index);
+ goto out;
+ }
+ table->entries[index] = 0;
+ mlx4_set_port_mac_table(dev, port, table->entries);
+ --table->total;
+out:
+ mutex_unlock(&table->mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
+
+static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
+ __be32 *entries)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
+ in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+{
+ struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+ int i, err = 0;
+ int free = -1;
+
+ mutex_lock(&table->mutex);
+ for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
+ if (free < 0 && (table->refs[i] == 0)) {
+ free = i;
+ continue;
+ }
+
+ if (table->refs[i] &&
+ (vlan == (MLX4_VLAN_MASK &
+ be32_to_cpu(table->entries[i])))) {
+ /* Vlan already registered, increase refernce count */
+ *index = i;
+ ++table->refs[i];
+ goto out;
+ }
+ }
+
+ if (table->total == table->max) {
+ /* No free vlan entries */
+ err = -ENOSPC;
+ goto out;
+ }
+
+ /* Register new MAC */
+ table->refs[free] = 1;
+ table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
+
+ err = mlx4_set_port_vlan_table(dev, port, table->entries);
+ if (unlikely(err)) {
+ mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
+ table->refs[free] = 0;
+ table->entries[free] = 0;
+ goto out;
+ }
+
+ *index = free;
+ ++table->total;
+out:
+ mutex_unlock(&table->mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_register_vlan);
+
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+{
+ struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+
+ if (index < MLX4_VLAN_REGULAR) {
+ mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
+ return;
+ }
+
+ mutex_lock(&table->mutex);
+ if (!table->refs[index]) {
+ mlx4_warn(dev, "No vlan entry for index %d\n", index);
+ goto out;
+ }
+ if (--table->refs[index]) {
+ mlx4_dbg(dev, "Have more references for index %d,"
+ "no need to modify vlan table\n", index);
+ goto out;
+ }
+ table->entries[index] = 0;
+ mlx4_set_port_vlan_table(dev, port, table->entries);
+ --table->total;
+out:
+ mutex_unlock(&table->mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
+
+int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+ u8 is_eth = dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memset(mailbox->buf, 0, 256);
+ if (is_eth) {
+ ((u8 *) mailbox->buf)[3] = 6;
+ ((__be16 *) mailbox->buf)[4] = cpu_to_be16(1 << 15);
+ ((__be16 *) mailbox->buf)[6] = cpu_to_be16(1 << 15);
+ }
+ err = mlx4_cmd(dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index c49a86044bf..1c565ef8d17 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -147,19 +147,42 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
}
EXPORT_SYMBOL_GPL(mlx4_qp_modify);
-int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp)
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+ int qpn;
+
+ qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+ if (qpn == -1)
+ return -ENOMEM;
+
+ *base = qpn;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
+
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+ if (base_qpn < dev->caps.sqp_start + 8)
+ return;
+
+ mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
- if (sqpn)
- qp->qpn = sqpn;
- else {
- qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap);
- if (qp->qpn == -1)
- return -ENOMEM;
- }
+ if (!qpn)
+ return -EINVAL;
+
+ qp->qpn = qpn;
err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
if (err)
@@ -208,9 +231,6 @@ err_put_qp:
mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
err_out:
- if (!sqpn)
- mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
-
return err;
}
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
@@ -239,9 +259,6 @@ void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
-
- if (qp->qpn >= dev->caps.sqp_start + 8)
- mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
}
EXPORT_SYMBOL_GPL(mlx4_qp_free);
@@ -255,6 +272,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
int err;
+ int reserved_from_top = 0;
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
@@ -264,9 +282,40 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
* block of special QPs must be aligned to a multiple of 8, so
* round up.
*/
- dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8);
+ dev->caps.sqp_start =
+ ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
+
+ {
+ int sort[MLX4_NUM_QP_REGION];
+ int i, j, tmp;
+ int last_base = dev->caps.num_qps;
+
+ for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
+ sort[i] = i;
+
+ for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
+ for (j = 2; j < i; ++j) {
+ if (dev->caps.reserved_qps_cnt[sort[j]] >
+ dev->caps.reserved_qps_cnt[sort[j - 1]]) {
+ tmp = sort[j];
+ sort[j] = sort[j - 1];
+ sort[j - 1] = tmp;
+ }
+ }
+ }
+
+ for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
+ last_base -= dev->caps.reserved_qps_cnt[sort[i]];
+ dev->caps.reserved_qps_base[sort[i]] = last_base;
+ reserved_from_top +=
+ dev->caps.reserved_qps_cnt[sort[i]];
+ }
+
+ }
+
err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
- (1 << 24) - 1, dev->caps.sqp_start + 8);
+ (1 << 23) - 1, dev->caps.sqp_start + 8,
+ reserved_from_top);
if (err)
return err;
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index 533eb6db24b..fe9f218691f 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -245,7 +245,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
- dev->caps.num_srqs - 1, dev->caps.reserved_srqs);
+ dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
if (err)
return err;
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c
new file mode 100644
index 00000000000..da42aa06a3b
--- /dev/null
+++ b/drivers/net/xtsonic.c
@@ -0,0 +1,319 @@
+/*
+ * xtsonic.c
+ *
+ * (C) 2001 - 2007 Tensilica Inc.
+ * Kevin Chea <kchea@yahoo.com>
+ * Marc Gauthier <marc@linux-xtensa.org>
+ * Chris Zankel <chris@zankel.net>
+ *
+ * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ *
+ * This driver is based on work from Andreas Busse, but most of
+ * the code is rewritten.
+ *
+ * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
+ *
+ * A driver for the onboard Sonic ethernet controller on the XT2000.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/dma.h>
+
+static char xtsonic_string[] = "xtsonic";
+
+extern unsigned xtboard_nvram_valid(void);
+extern void xtboard_get_ether_addr(unsigned char *buf);
+
+#include "sonic.h"
+
+/*
+ * According to the documentation for the Sonic ethernet controller,
+ * EOBC should be 760 words (1520 bytes) for 32-bit applications, and,
+ * as such, 2 words less than the buffer size. The value for RBSIZE
+ * defined in sonic.h, however is only 1520.
+ *
+ * (Note that in 16-bit configurations, EOBC is 759 words (1518 bytes) and
+ * RBSIZE 1520 bytes)
+ */
+#undef SONIC_RBSIZE
+#define SONIC_RBSIZE 1524
+
+/*
+ * The chip provides 256 byte register space.
+ */
+#define SONIC_MEM_SIZE 0x100
+
+/*
+ * Macros to access SONIC registers
+ */
+#define SONIC_READ(reg) \
+ (0xffff & *((volatile unsigned int *)dev->base_addr+reg))
+
+#define SONIC_WRITE(reg,val) \
+ *((volatile unsigned int *)dev->base_addr+reg) = val
+
+
+/* Use 0 for production, 1 for verification, and >2 for debug */
+#ifdef SONIC_DEBUG
+static unsigned int sonic_debug = SONIC_DEBUG;
+#else
+static unsigned int sonic_debug = 1;
+#endif
+
+/*
+ * We cannot use station (ethernet) address prefixes to detect the
+ * sonic controller since these are board manufacturer depended.
+ * So we check for known Silicon Revision IDs instead.
+ */
+static unsigned short known_revisions[] =
+{
+ 0x101, /* SONIC 83934 */
+ 0xffff /* end of list */
+};
+
+static int xtsonic_open(struct net_device *dev)
+{
+ if (request_irq(dev->irq,&sonic_interrupt,IRQF_DISABLED,"sonic",dev)) {
+ printk(KERN_ERR "%s: unable to get IRQ %d.\n",
+ dev->name, dev->irq);
+ return -EAGAIN;
+ }
+ return sonic_open(dev);
+}
+
+static int xtsonic_close(struct net_device *dev)
+{
+ int err;
+ err = sonic_close(dev);
+ free_irq(dev->irq, dev);
+ return err;
+}
+
+static int __init sonic_probe1(struct net_device *dev)
+{
+ static unsigned version_printed = 0;
+ unsigned int silicon_revision;
+ struct sonic_local *lp = netdev_priv(dev);
+ unsigned int base_addr = dev->base_addr;
+ int i;
+ int err = 0;
+
+ if (!request_mem_region(base_addr, 0x100, xtsonic_string))
+ return -EBUSY;
+
+ /*
+ * get the Silicon Revision ID. If this is one of the known
+ * one assume that we found a SONIC ethernet controller at
+ * the expected location.
+ */
+ silicon_revision = SONIC_READ(SONIC_SR);
+ if (sonic_debug > 1)
+ printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);
+
+ i = 0;
+ while ((known_revisions[i] != 0xffff) &&
+ (known_revisions[i] != silicon_revision))
+ i++;
+
+ if (known_revisions[i] == 0xffff) {
+ printk("SONIC ethernet controller not found (0x%4x)\n",
+ silicon_revision);
+ return -ENODEV;
+ }
+
+ if (sonic_debug && version_printed++ == 0)
+ printk(version);
+
+ /*
+ * Put the sonic into software reset, then retrieve ethernet address.
+ * Note: we are assuming that the boot-loader has initialized the cam.
+ */
+ SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
+ SONIC_WRITE(SONIC_DCR,
+ SONIC_DCR_WC0|SONIC_DCR_DW|SONIC_DCR_LBR|SONIC_DCR_SBUS);
+ SONIC_WRITE(SONIC_CEP,0);
+ SONIC_WRITE(SONIC_IMR,0);
+
+ SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
+ SONIC_WRITE(SONIC_CEP,0);
+
+ for (i=0; i<3; i++) {
+ unsigned int val = SONIC_READ(SONIC_CAP0-i);
+ dev->dev_addr[i*2] = val;
+ dev->dev_addr[i*2+1] = val >> 8;
+ }
+
+ /* Initialize the device structure. */
+
+ lp->dma_bitmode = SONIC_BITMODE32;
+
+ /*
+ * Allocate local private descriptor areas in uncached space.
+ * The entire structure must be located within the same 64kb segment.
+ * A simple way to ensure this is to allocate twice the
+ * size of the structure -- given that the structure is
+ * much less than 64 kB, at least one of the halves of
+ * the allocated area will be contained entirely in 64 kB.
+ * We also allocate extra space for a pointer to allow freeing
+ * this structure later on (in xtsonic_cleanup_module()).
+ */
+ lp->descriptors =
+ dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr, GFP_KERNEL);
+
+ if (lp->descriptors == NULL) {
+ printk(KERN_ERR "%s: couldn't alloc DMA memory for "
+ " descriptors.\n", lp->device->bus_id);
+ goto out;
+ }
+
+ lp->cda = lp->descriptors;
+ lp->tda = lp->cda + (SIZEOF_SONIC_CDA
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+
+ /* get the virtual dma address */
+
+ lp->cda_laddr = lp->descriptors_laddr;
+ lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+
+ dev->open = xtsonic_open;
+ dev->stop = xtsonic_close;
+ dev->hard_start_xmit = sonic_send_packet;
+ dev->get_stats = sonic_get_stats;
+ dev->set_multicast_list = &sonic_multicast_list;
+ dev->tx_timeout = sonic_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ /*
+ * clear tally counter
+ */
+ SONIC_WRITE(SONIC_CRCT,0xffff);
+ SONIC_WRITE(SONIC_FAET,0xffff);
+ SONIC_WRITE(SONIC_MPT,0xffff);
+
+ return 0;
+out:
+ release_region(dev->base_addr, SONIC_MEM_SIZE);
+ return err;
+}
+
+
+/*
+ * Probe for a SONIC ethernet controller on an XT2000 board.
+ * Actually probing is superfluous but we're paranoid.
+ */
+
+int __init xtsonic_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct sonic_local *lp;
+ struct resource *resmem, *resirq;
+ int err = 0;
+
+ DECLARE_MAC_BUF(mac);
+
+ if ((resmem = platform_get_resource(pdev, IORESOURCE_MEM, 0)) == NULL)
+ return -ENODEV;
+
+ if ((resirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0)) == NULL)
+ return -ENODEV;
+
+ if ((dev = alloc_etherdev(sizeof(struct sonic_local))) == NULL)
+ return -ENOMEM;
+
+ lp = netdev_priv(dev);
+ lp->device = &pdev->dev;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ netdev_boot_setup_check(dev);
+
+ dev->base_addr = resmem->start;
+ dev->irq = resirq->start;
+
+ if ((err = sonic_probe1(dev)))
+ goto out;
+ if ((err = register_netdev(dev)))
+ goto out1;
+
+ printk("%s: SONIC ethernet @%08lx, MAC %s, IRQ %d\n", dev->name,
+ dev->base_addr, print_mac(mac, dev->dev_addr), dev->irq);
+
+ return 0;
+
+out1:
+ release_region(dev->base_addr, SONIC_MEM_SIZE);
+out:
+ free_netdev(dev);
+
+ return err;
+}
+
+MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver");
+module_param(sonic_debug, int, 0);
+MODULE_PARM_DESC(sonic_debug, "xtsonic debug level (1-4)");
+
+#include "sonic.c"
+
+static int __devexit xtsonic_device_remove (struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct sonic_local *lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
+ release_region (dev->base_addr, SONIC_MEM_SIZE);
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct platform_driver xtsonic_driver = {
+ .probe = xtsonic_probe,
+ .remove = __devexit_p(xtsonic_device_remove),
+ .driver = {
+ .name = xtsonic_string,
+ },
+};
+
+static int __init xtsonic_init(void)
+{
+ return platform_driver_register(&xtsonic_driver);
+}
+
+static void __exit xtsonic_cleanup(void)
+{
+ platform_driver_unregister(&xtsonic_driver);
+}
+
+module_init(xtsonic_init);
+module_exit(xtsonic_cleanup);
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index 6a98dc8aa30..24bbef777c1 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -41,7 +41,7 @@ void of_register_i2c_devices(struct i2c_adapter *adap,
info.addr = *addr;
- request_module(info.type);
+ request_module("%s", info.type);
result = i2c_new_device(adap, &info);
if (result == NULL) {
diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c
index b01eec026f6..bed0ed6dcdc 100644
--- a/drivers/of/of_spi.c
+++ b/drivers/of/of_spi.c
@@ -61,6 +61,8 @@ void of_register_spi_devices(struct spi_master *master, struct device_node *np)
spi->mode |= SPI_CPHA;
if (of_find_property(nc, "spi-cpol", NULL))
spi->mode |= SPI_CPOL;
+ if (of_find_property(nc, "spi-cs-high", NULL))
+ spi->mode |= SPI_CS_HIGH;
/* Device speed */
prop = of_get_property(nc, "spi-max-frequency", &len);
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index ed982273fb8..b55cd23ffde 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -41,7 +41,6 @@ static cpumask_t marked_cpus = CPU_MASK_NONE;
static DEFINE_SPINLOCK(task_mortuary);
static void process_task_mortuary(void);
-
/* Take ownership of the task struct and place it on the
* list for processing. Only after two full buffer syncs
* does the task eventually get freed, because by then
@@ -341,7 +340,7 @@ static void add_trace_begin(void)
* Add IBS fetch and op entries to event buffer
*/
static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code,
- int in_kernel, struct mm_struct *mm)
+ struct mm_struct *mm)
{
unsigned long rip;
int i, count;
@@ -565,9 +564,11 @@ void sync_buffer(int cpu)
struct task_struct *new;
unsigned long cookie = 0;
int in_kernel = 1;
- unsigned int i;
sync_buffer_state state = sb_buffer_start;
+#ifndef CONFIG_OPROFILE_IBS
+ unsigned int i;
unsigned long available;
+#endif
mutex_lock(&buffer_mutex);
@@ -575,9 +576,13 @@ void sync_buffer(int cpu)
/* Remember, only we can modify tail_pos */
+#ifndef CONFIG_OPROFILE_IBS
available = get_slots(cpu_buf);
for (i = 0; i < available; ++i) {
+#else
+ while (get_slots(cpu_buf)) {
+#endif
struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos];
if (is_code(s->eip)) {
@@ -593,12 +598,10 @@ void sync_buffer(int cpu)
#ifdef CONFIG_OPROFILE_IBS
} else if (s->event == IBS_FETCH_BEGIN) {
state = sb_bt_start;
- add_ibs_begin(cpu_buf,
- IBS_FETCH_CODE, in_kernel, mm);
+ add_ibs_begin(cpu_buf, IBS_FETCH_CODE, mm);
} else if (s->event == IBS_OP_BEGIN) {
state = sb_bt_start;
- add_ibs_begin(cpu_buf,
- IBS_OP_CODE, in_kernel, mm);
+ add_ibs_begin(cpu_buf, IBS_OP_CODE, mm);
#endif
} else {
struct mm_struct *oldmm = mm;
@@ -628,3 +631,27 @@ void sync_buffer(int cpu)
mutex_unlock(&buffer_mutex);
}
+
+/* The function can be used to add a buffer worth of data directly to
+ * the kernel buffer. The buffer is assumed to be a circular buffer.
+ * Take the entries from index start and end at index end, wrapping
+ * at max_entries.
+ */
+void oprofile_put_buff(unsigned long *buf, unsigned int start,
+ unsigned int stop, unsigned int max)
+{
+ int i;
+
+ i = start;
+
+ mutex_lock(&buffer_mutex);
+ while (i != stop) {
+ add_event_entry(buf[i++]);
+
+ if (i >= max)
+ i = 0;
+ }
+
+ mutex_unlock(&buffer_mutex);
+}
+
diff --git a/drivers/oprofile/buffer_sync.h b/drivers/oprofile/buffer_sync.h
index 08866f6a96a..3110732c183 100644
--- a/drivers/oprofile/buffer_sync.h
+++ b/drivers/oprofile/buffer_sync.h
@@ -9,13 +9,13 @@
#ifndef OPROFILE_BUFFER_SYNC_H
#define OPROFILE_BUFFER_SYNC_H
-
+
/* add the necessary profiling hooks */
int sync_start(void);
/* remove the hooks */
void sync_stop(void);
-
+
/* sync the given CPU's buffer */
void sync_buffer(int cpu);
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index e1bd5a937f6..01d38e78cde 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -22,7 +22,7 @@
#include <linux/oprofile.h>
#include <linux/vmalloc.h>
#include <linux/errno.h>
-
+
#include "event_buffer.h"
#include "cpu_buffer.h"
#include "buffer_sync.h"
@@ -38,27 +38,40 @@ static int work_enabled;
void free_cpu_buffers(void)
{
int i;
-
- for_each_online_cpu(i) {
+
+ for_each_possible_cpu(i) {
vfree(per_cpu(cpu_buffer, i).buffer);
per_cpu(cpu_buffer, i).buffer = NULL;
}
}
+unsigned long oprofile_get_cpu_buffer_size(void)
+{
+ return fs_cpu_buffer_size;
+}
+
+void oprofile_cpu_buffer_inc_smpl_lost(void)
+{
+ struct oprofile_cpu_buffer *cpu_buf
+ = &__get_cpu_var(cpu_buffer);
+
+ cpu_buf->sample_lost_overflow++;
+}
+
int alloc_cpu_buffers(void)
{
int i;
-
+
unsigned long buffer_size = fs_cpu_buffer_size;
-
- for_each_online_cpu(i) {
+
+ for_each_possible_cpu(i) {
struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
-
+
b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
cpu_to_node(i));
if (!b->buffer)
goto fail;
-
+
b->last_task = NULL;
b->last_is_kernel = -1;
b->tracing = 0;
@@ -112,7 +125,7 @@ void end_cpu_work(void)
}
/* Resets the cpu buffer to a sane state. */
-void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
+void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf)
{
/* reset these to invalid values; the next sample
* collected will populate the buffer with proper
@@ -123,7 +136,7 @@ void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
}
/* compute number of available slots in cpu_buffer queue */
-static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
+static unsigned long nr_available_slots(struct oprofile_cpu_buffer const *b)
{
unsigned long head = b->head_pos;
unsigned long tail = b->tail_pos;
@@ -134,7 +147,7 @@ static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
return tail + (b->buffer_size - head) - 1;
}
-static void increment_head(struct oprofile_cpu_buffer * b)
+static void increment_head(struct oprofile_cpu_buffer *b)
{
unsigned long new_head = b->head_pos + 1;
@@ -149,17 +162,17 @@ static void increment_head(struct oprofile_cpu_buffer * b)
}
static inline void
-add_sample(struct oprofile_cpu_buffer * cpu_buf,
- unsigned long pc, unsigned long event)
+add_sample(struct oprofile_cpu_buffer *cpu_buf,
+ unsigned long pc, unsigned long event)
{
- struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos];
+ struct op_sample *entry = &cpu_buf->buffer[cpu_buf->head_pos];
entry->eip = pc;
entry->event = event;
increment_head(cpu_buf);
}
static inline void
-add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
+add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
{
add_sample(buffer, ESCAPE_CODE, value);
}
@@ -173,10 +186,10 @@ add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
* pc. We tag this in the buffer by generating kernel enter/exit
* events whenever is_kernel changes
*/
-static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
+static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
int is_kernel, unsigned long event)
{
- struct task_struct * task;
+ struct task_struct *task;
cpu_buf->sample_received++;
@@ -205,7 +218,7 @@ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
cpu_buf->last_task = task;
add_code(cpu_buf, (unsigned long)task);
}
-
+
add_sample(cpu_buf, pc, event);
return 1;
}
@@ -222,7 +235,7 @@ static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
return 1;
}
-static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
+static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
{
cpu_buf->tracing = 0;
}
@@ -257,21 +270,23 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
#ifdef CONFIG_OPROFILE_IBS
-#define MAX_IBS_SAMPLE_SIZE 14
-static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
- unsigned long pc, int is_kernel, unsigned int *ibs, int ibs_code)
+#define MAX_IBS_SAMPLE_SIZE 14
+
+void oprofile_add_ibs_sample(struct pt_regs *const regs,
+ unsigned int *const ibs_sample, int ibs_code)
{
+ int is_kernel = !user_mode(regs);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
struct task_struct *task;
cpu_buf->sample_received++;
if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
+ /* we can't backtrace since we lost the source of this event */
cpu_buf->sample_lost_overflow++;
- return 0;
+ return;
}
- is_kernel = !!is_kernel;
-
/* notice a switch from user->kernel or vice versa */
if (cpu_buf->last_is_kernel != is_kernel) {
cpu_buf->last_is_kernel = is_kernel;
@@ -281,7 +296,6 @@ static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
/* notice a task switch */
if (!is_kernel) {
task = current;
-
if (cpu_buf->last_task != task) {
cpu_buf->last_task = task;
add_code(cpu_buf, (unsigned long)task);
@@ -289,36 +303,17 @@ static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
}
add_code(cpu_buf, ibs_code);
- add_sample(cpu_buf, ibs[0], ibs[1]);
- add_sample(cpu_buf, ibs[2], ibs[3]);
- add_sample(cpu_buf, ibs[4], ibs[5]);
+ add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
+ add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
+ add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
if (ibs_code == IBS_OP_BEGIN) {
- add_sample(cpu_buf, ibs[6], ibs[7]);
- add_sample(cpu_buf, ibs[8], ibs[9]);
- add_sample(cpu_buf, ibs[10], ibs[11]);
- }
-
- return 1;
-}
-
-void oprofile_add_ibs_sample(struct pt_regs *const regs,
- unsigned int * const ibs_sample, u8 code)
-{
- int is_kernel = !user_mode(regs);
- unsigned long pc = profile_pc(regs);
-
- struct oprofile_cpu_buffer *cpu_buf =
- &per_cpu(cpu_buffer, smp_processor_id());
-
- if (!backtrace_depth) {
- log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code);
- return;
+ add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
+ add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
+ add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
}
- /* if log_sample() fails we can't backtrace since we lost the source
- * of this event */
- if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code))
+ if (backtrace_depth)
oprofile_ops.backtrace(regs, backtrace_depth);
}
@@ -363,11 +358,16 @@ void oprofile_add_trace(unsigned long pc)
*/
static void wq_sync_buffer(struct work_struct *work)
{
- struct oprofile_cpu_buffer * b =
+ struct oprofile_cpu_buffer *b =
container_of(work, struct oprofile_cpu_buffer, work.work);
if (b->cpu != smp_processor_id()) {
printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
smp_processor_id(), b->cpu);
+
+ if (!cpu_online(b->cpu)) {
+ cancel_delayed_work(&b->work);
+ return;
+ }
}
sync_buffer(b->cpu);
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 9c44d004da6..d3cc26264db 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -15,9 +15,9 @@
#include <linux/workqueue.h>
#include <linux/cache.h>
#include <linux/sched.h>
-
+
struct task_struct;
-
+
int alloc_cpu_buffers(void);
void free_cpu_buffers(void);
@@ -31,15 +31,15 @@ struct op_sample {
unsigned long eip;
unsigned long event;
};
-
+
struct oprofile_cpu_buffer {
volatile unsigned long head_pos;
volatile unsigned long tail_pos;
unsigned long buffer_size;
- struct task_struct * last_task;
+ struct task_struct *last_task;
int last_is_kernel;
int tracing;
- struct op_sample * buffer;
+ struct op_sample *buffer;
unsigned long sample_received;
unsigned long sample_lost_overflow;
unsigned long backtrace_aborted;
@@ -50,7 +50,7 @@ struct oprofile_cpu_buffer {
DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
-void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
+void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf);
/* transient events for the CPU buffer -> event buffer */
#define CPU_IS_KERNEL 1
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index 8d692a5c8e7..d962ba0dd87 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -19,16 +19,16 @@
#include <linux/dcookies.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
-
+
#include "oprof.h"
#include "event_buffer.h"
#include "oprofile_stats.h"
DEFINE_MUTEX(buffer_mutex);
-
+
static unsigned long buffer_opened;
static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
-static unsigned long * event_buffer;
+static unsigned long *event_buffer;
static unsigned long buffer_size;
static unsigned long buffer_watershed;
static size_t buffer_pos;
@@ -66,7 +66,7 @@ void wake_up_buffer_waiter(void)
mutex_unlock(&buffer_mutex);
}
-
+
int alloc_event_buffer(void)
{
int err = -ENOMEM;
@@ -76,13 +76,13 @@ int alloc_event_buffer(void)
buffer_size = fs_buffer_size;
buffer_watershed = fs_buffer_watershed;
spin_unlock_irqrestore(&oprofilefs_lock, flags);
-
+
if (buffer_watershed >= buffer_size)
return -EINVAL;
-
+
event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
if (!event_buffer)
- goto out;
+ goto out;
err = 0;
out:
@@ -97,8 +97,8 @@ void free_event_buffer(void)
event_buffer = NULL;
}
-
-static int event_buffer_open(struct inode * inode, struct file * file)
+
+static int event_buffer_open(struct inode *inode, struct file *file)
{
int err = -EPERM;
@@ -116,14 +116,14 @@ static int event_buffer_open(struct inode * inode, struct file * file)
file->private_data = dcookie_register();
if (!file->private_data)
goto out;
-
+
if ((err = oprofile_setup()))
goto fail;
/* NB: the actual start happens from userspace
* echo 1 >/dev/oprofile/enable
*/
-
+
return 0;
fail:
@@ -134,7 +134,7 @@ out:
}
-static int event_buffer_release(struct inode * inode, struct file * file)
+static int event_buffer_release(struct inode *inode, struct file *file)
{
oprofile_stop();
oprofile_shutdown();
@@ -146,8 +146,8 @@ static int event_buffer_release(struct inode * inode, struct file * file)
}
-static ssize_t event_buffer_read(struct file * file, char __user * buf,
- size_t count, loff_t * offset)
+static ssize_t event_buffer_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
{
int retval = -EINVAL;
size_t const max = buffer_size * sizeof(unsigned long);
@@ -172,18 +172,18 @@ static ssize_t event_buffer_read(struct file * file, char __user * buf,
retval = -EFAULT;
count = buffer_pos * sizeof(unsigned long);
-
+
if (copy_to_user(buf, event_buffer, count))
goto out;
retval = count;
buffer_pos = 0;
-
+
out:
mutex_unlock(&buffer_mutex);
return retval;
}
-
+
const struct file_operations event_buffer_fops = {
.open = event_buffer_open,
.release = event_buffer_release,
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h
index 5076ed1ebd8..4e70749f8d1 100644
--- a/drivers/oprofile/event_buffer.h
+++ b/drivers/oprofile/event_buffer.h
@@ -10,13 +10,20 @@
#ifndef EVENT_BUFFER_H
#define EVENT_BUFFER_H
-#include <linux/types.h>
+#include <linux/types.h>
#include <asm/mutex.h>
-
+
int alloc_event_buffer(void);
void free_event_buffer(void);
-
+
+/**
+ * Add data to the event buffer.
+ * The data passed is free-form, but typically consists of
+ * file offsets, dcookies, context information, and ESCAPE codes.
+ */
+void add_event_entry(unsigned long data);
+
/* wake up the process sleeping on the event file */
void wake_up_buffer_waiter(void);
@@ -24,10 +31,10 @@ void wake_up_buffer_waiter(void);
#define NO_COOKIE 0UL
extern const struct file_operations event_buffer_fops;
-
+
/* mutex between sync_cpu_buffers() and the
* file reading code.
*/
extern struct mutex buffer_mutex;
-
+
#endif /* EVENT_BUFFER_H */
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index 2c645170f06..cd375907f26 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -19,7 +19,7 @@
#include "cpu_buffer.h"
#include "buffer_sync.h"
#include "oprofile_stats.h"
-
+
struct oprofile_operations oprofile_ops;
unsigned long oprofile_started;
@@ -36,7 +36,7 @@ static int timer = 0;
int oprofile_setup(void)
{
int err;
-
+
mutex_lock(&start_mutex);
if ((err = alloc_cpu_buffers()))
@@ -44,10 +44,10 @@ int oprofile_setup(void)
if ((err = alloc_event_buffer()))
goto out1;
-
+
if (oprofile_ops.setup && (err = oprofile_ops.setup()))
goto out2;
-
+
/* Note even though this starts part of the
* profiling overhead, it's necessary to prevent
* us missing task deaths and eventually oopsing
@@ -74,7 +74,7 @@ post_sync:
is_setup = 1;
mutex_unlock(&start_mutex);
return 0;
-
+
out3:
if (oprofile_ops.shutdown)
oprofile_ops.shutdown();
@@ -92,17 +92,17 @@ out:
int oprofile_start(void)
{
int err = -EINVAL;
-
+
mutex_lock(&start_mutex);
-
+
if (!is_setup)
goto out;
- err = 0;
-
+ err = 0;
+
if (oprofile_started)
goto out;
-
+
oprofile_reset_stats();
if ((err = oprofile_ops.start()))
@@ -114,7 +114,7 @@ out:
return err;
}
-
+
/* echo 0>/dev/oprofile/enable */
void oprofile_stop(void)
{
@@ -204,13 +204,13 @@ static void __exit oprofile_exit(void)
oprofile_arch_exit();
}
-
+
module_init(oprofile_init);
module_exit(oprofile_exit);
module_param_named(timer, timer, int, 0644);
MODULE_PARM_DESC(timer, "force use of timer interrupt");
-
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Levon <levon@movementarian.org>");
MODULE_DESCRIPTION("OProfile system profiler");
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index 18323650806..5df0c21a608 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -11,7 +11,7 @@
#define OPROF_H
int oprofile_setup(void);
-void oprofile_shutdown(void);
+void oprofile_shutdown(void);
int oprofilefs_register(void);
void oprofilefs_unregister(void);
@@ -20,20 +20,20 @@ int oprofile_start(void);
void oprofile_stop(void);
struct oprofile_operations;
-
+
extern unsigned long fs_buffer_size;
extern unsigned long fs_cpu_buffer_size;
extern unsigned long fs_buffer_watershed;
extern struct oprofile_operations oprofile_ops;
extern unsigned long oprofile_started;
extern unsigned long backtrace_depth;
-
+
struct super_block;
struct dentry;
-void oprofile_create_files(struct super_block * sb, struct dentry * root);
-void oprofile_timer_init(struct oprofile_operations * ops);
+void oprofile_create_files(struct super_block *sb, struct dentry *root);
+void oprofile_timer_init(struct oprofile_operations *ops);
int oprofile_set_backtrace(unsigned long depth);
-
+
#endif /* OPROF_H */
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index ef953ba5ab6..cc106d503ac 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -13,18 +13,18 @@
#include "event_buffer.h"
#include "oprofile_stats.h"
#include "oprof.h"
-
+
unsigned long fs_buffer_size = 131072;
unsigned long fs_cpu_buffer_size = 8192;
unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
-static ssize_t depth_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
+static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset);
}
-static ssize_t depth_write(struct file * file, char const __user * buf, size_t count, loff_t * offset)
+static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
{
unsigned long val;
int retval;
@@ -49,8 +49,8 @@ static const struct file_operations depth_fops = {
.write = depth_write
};
-
-static ssize_t pointer_size_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
+
+static ssize_t pointer_size_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset);
}
@@ -61,24 +61,24 @@ static const struct file_operations pointer_size_fops = {
};
-static ssize_t cpu_type_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
+static ssize_t cpu_type_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset);
}
-
-
+
+
static const struct file_operations cpu_type_fops = {
.read = cpu_type_read,
};
-
-
-static ssize_t enable_read(struct file * file, char __user * buf, size_t count, loff_t * offset)
+
+
+static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
}
-static ssize_t enable_write(struct file * file, char const __user * buf, size_t count, loff_t * offset)
+static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
{
unsigned long val;
int retval;
@@ -89,7 +89,7 @@ static ssize_t enable_write(struct file * file, char const __user * buf, size_t
retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval)
return retval;
-
+
if (val)
retval = oprofile_start();
else
@@ -100,14 +100,14 @@ static ssize_t enable_write(struct file * file, char const __user * buf, size_t
return count;
}
-
+
static const struct file_operations enable_fops = {
.read = enable_read,
.write = enable_write,
};
-static ssize_t dump_write(struct file * file, char const __user * buf, size_t count, loff_t * offset)
+static ssize_t dump_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
{
wake_up_buffer_waiter();
return count;
@@ -117,8 +117,8 @@ static ssize_t dump_write(struct file * file, char const __user * buf, size_t co
static const struct file_operations dump_fops = {
.write = dump_write,
};
-
-void oprofile_create_files(struct super_block * sb, struct dentry * root)
+
+void oprofile_create_files(struct super_block *sb, struct dentry *root)
{
oprofilefs_create_file(sb, root, "enable", &enable_fops);
oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
@@ -126,7 +126,7 @@ void oprofile_create_files(struct super_block * sb, struct dentry * root)
oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size);
- oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
+ oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
oprofile_create_stats_files(sb, root);
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index f99b28e7b79..e1f6ce03705 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -11,17 +11,17 @@
#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/threads.h>
-
+
#include "oprofile_stats.h"
#include "cpu_buffer.h"
-
+
struct oprofile_stat_struct oprofile_stats;
-
+
void oprofile_reset_stats(void)
{
- struct oprofile_cpu_buffer * cpu_buf;
+ struct oprofile_cpu_buffer *cpu_buf;
int i;
-
+
for_each_possible_cpu(i) {
cpu_buf = &per_cpu(cpu_buffer, i);
cpu_buf->sample_received = 0;
@@ -29,18 +29,18 @@ void oprofile_reset_stats(void)
cpu_buf->backtrace_aborted = 0;
cpu_buf->sample_invalid_eip = 0;
}
-
+
atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
atomic_set(&oprofile_stats.event_lost_overflow, 0);
}
-void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
+void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
{
- struct oprofile_cpu_buffer * cpu_buf;
- struct dentry * cpudir;
- struct dentry * dir;
+ struct oprofile_cpu_buffer *cpu_buf;
+ struct dentry *cpudir;
+ struct dentry *dir;
char buf[10];
int i;
@@ -52,7 +52,7 @@ void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
cpu_buf = &per_cpu(cpu_buffer, i);
snprintf(buf, 10, "cpu%d", i);
cpudir = oprofilefs_mkdir(sb, dir, buf);
-
+
/* Strictly speaking access to these ulongs is racy,
* but we can't simply lock them, and they are
* informational only.
@@ -66,7 +66,7 @@ void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip",
&cpu_buf->sample_invalid_eip);
}
-
+
oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm",
&oprofile_stats.sample_lost_no_mm);
oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping",
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
index 6d755a633f1..3da0d08dc1f 100644
--- a/drivers/oprofile/oprofile_stats.h
+++ b/drivers/oprofile/oprofile_stats.h
@@ -11,7 +11,7 @@
#define OPROFILE_STATS_H
#include <asm/atomic.h>
-
+
struct oprofile_stat_struct {
atomic_t sample_lost_no_mm;
atomic_t sample_lost_no_mapping;
@@ -20,14 +20,14 @@ struct oprofile_stat_struct {
};
extern struct oprofile_stat_struct oprofile_stats;
-
+
/* reset all stats to zero */
void oprofile_reset_stats(void);
-
+
struct super_block;
struct dentry;
-
+
/* create the stats/ dir */
-void oprofile_create_stats_files(struct super_block * sb, struct dentry * root);
+void oprofile_create_stats_files(struct super_block *sb, struct dentry *root);
#endif /* OPROFILE_STATS_H */
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 8543cb26cf3..ddc4c59f02d 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -23,9 +23,9 @@
DEFINE_SPINLOCK(oprofilefs_lock);
-static struct inode * oprofilefs_get_inode(struct super_block * sb, int mode)
+static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
{
- struct inode * inode = new_inode(sb);
+ struct inode *inode = new_inode(sb);
if (inode) {
inode->i_mode = mode;
@@ -44,7 +44,7 @@ static struct super_operations s_ops = {
};
-ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset)
+ssize_t oprofilefs_str_to_user(char const *str, char __user *buf, size_t count, loff_t *offset)
{
return simple_read_from_buffer(buf, count, offset, str, strlen(str));
}
@@ -52,7 +52,7 @@ ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count
#define TMPBUFSIZE 50
-ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset)
+ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t count, loff_t *offset)
{
char tmpbuf[TMPBUFSIZE];
size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
@@ -62,7 +62,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co
}
-int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count)
+int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
{
char tmpbuf[TMPBUFSIZE];
unsigned long flags;
@@ -85,16 +85,16 @@ int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, siz
}
-static ssize_t ulong_read_file(struct file * file, char __user * buf, size_t count, loff_t * offset)
+static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
- unsigned long * val = file->private_data;
+ unsigned long *val = file->private_data;
return oprofilefs_ulong_to_user(*val, buf, count, offset);
}
-static ssize_t ulong_write_file(struct file * file, char const __user * buf, size_t count, loff_t * offset)
+static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
{
- unsigned long * value = file->private_data;
+ unsigned long *value = file->private_data;
int retval;
if (*offset)
@@ -108,7 +108,7 @@ static ssize_t ulong_write_file(struct file * file, char const __user * buf, siz
}
-static int default_open(struct inode * inode, struct file * filp)
+static int default_open(struct inode *inode, struct file *filp)
{
if (inode->i_private)
filp->private_data = inode->i_private;
@@ -129,12 +129,12 @@ static const struct file_operations ulong_ro_fops = {
};
-static struct dentry * __oprofilefs_create_file(struct super_block * sb,
- struct dentry * root, char const * name, const struct file_operations * fops,
+static struct dentry *__oprofilefs_create_file(struct super_block *sb,
+ struct dentry *root, char const *name, const struct file_operations *fops,
int perm)
{
- struct dentry * dentry;
- struct inode * inode;
+ struct dentry *dentry;
+ struct inode *inode;
dentry = d_alloc_name(root, name);
if (!dentry)
@@ -150,10 +150,10 @@ static struct dentry * __oprofilefs_create_file(struct super_block * sb,
}
-int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
- char const * name, unsigned long * val)
+int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root,
+ char const *name, unsigned long *val)
{
- struct dentry * d = __oprofilefs_create_file(sb, root, name,
+ struct dentry *d = __oprofilefs_create_file(sb, root, name,
&ulong_fops, 0644);
if (!d)
return -EFAULT;
@@ -163,10 +163,10 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
}
-int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
- char const * name, unsigned long * val)
+int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root,
+ char const *name, unsigned long *val)
{
- struct dentry * d = __oprofilefs_create_file(sb, root, name,
+ struct dentry *d = __oprofilefs_create_file(sb, root, name,
&ulong_ro_fops, 0444);
if (!d)
return -EFAULT;
@@ -176,23 +176,23 @@ int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
}
-static ssize_t atomic_read_file(struct file * file, char __user * buf, size_t count, loff_t * offset)
+static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
- atomic_t * val = file->private_data;
+ atomic_t *val = file->private_data;
return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
}
-
+
static const struct file_operations atomic_ro_fops = {
.read = atomic_read_file,
.open = default_open,
};
-
-int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
- char const * name, atomic_t * val)
+
+int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
+ char const *name, atomic_t *val)
{
- struct dentry * d = __oprofilefs_create_file(sb, root, name,
+ struct dentry *d = __oprofilefs_create_file(sb, root, name,
&atomic_ro_fops, 0444);
if (!d)
return -EFAULT;
@@ -201,9 +201,9 @@ int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
return 0;
}
-
-int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
- char const * name, const struct file_operations * fops)
+
+int oprofilefs_create_file(struct super_block *sb, struct dentry *root,
+ char const *name, const struct file_operations *fops)
{
if (!__oprofilefs_create_file(sb, root, name, fops, 0644))
return -EFAULT;
@@ -211,8 +211,8 @@ int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
}
-int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root,
- char const * name, const struct file_operations * fops, int perm)
+int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root,
+ char const *name, const struct file_operations *fops, int perm)
{
if (!__oprofilefs_create_file(sb, root, name, fops, perm))
return -EFAULT;
@@ -220,11 +220,11 @@ int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root,
}
-struct dentry * oprofilefs_mkdir(struct super_block * sb,
- struct dentry * root, char const * name)
+struct dentry *oprofilefs_mkdir(struct super_block *sb,
+ struct dentry *root, char const *name)
{
- struct dentry * dentry;
- struct inode * inode;
+ struct dentry *dentry;
+ struct inode *inode;
dentry = d_alloc_name(root, name);
if (!dentry)
@@ -241,10 +241,10 @@ struct dentry * oprofilefs_mkdir(struct super_block * sb,
}
-static int oprofilefs_fill_super(struct super_block * sb, void * data, int silent)
+static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
{
- struct inode * root_inode;
- struct dentry * root_dentry;
+ struct inode *root_inode;
+ struct dentry *root_dentry;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 710a45f0d73..333f915568c 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -19,7 +19,7 @@
static int timer_notify(struct pt_regs *regs)
{
- oprofile_add_sample(regs, 0);
+ oprofile_add_sample(regs, 0);
return 0;
}
@@ -35,7 +35,7 @@ static void timer_stop(void)
}
-void __init oprofile_timer_init(struct oprofile_operations * ops)
+void __init oprofile_timer_init(struct oprofile_operations *ops)
{
ops->create_files = NULL;
ops->setup = NULL;
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 5ac207932fd..685d94e69d4 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -86,7 +86,7 @@ static int eisa_eeprom_open(struct inode *inode, struct file *file)
{
cycle_kernel_lock();
- if (file->f_mode & 2)
+ if (file->f_mode & FMODE_WRITE)
return -EINVAL;
return 0;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 8a846adf1dc..96f3bdf0ec4 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2791,6 +2791,7 @@ enum parport_pc_pci_cards {
oxsemi_952,
oxsemi_954,
oxsemi_840,
+ oxsemi_pcie_pport,
aks_0100,
mobility_pp,
netmos_9705,
@@ -2868,6 +2869,7 @@ static struct parport_pc_pci {
/* oxsemi_952 */ { 1, { { 0, 1 }, } },
/* oxsemi_954 */ { 1, { { 0, -1 }, } },
/* oxsemi_840 */ { 1, { { 0, 1 }, } },
+ /* oxsemi_pcie_pport */ { 1, { { 0, 1 }, } },
/* aks_0100 */ { 1, { { 0, -1 }, } },
/* mobility_pp */ { 1, { { 0, 1 }, } },
/* netmos_9705 */ { 1, { { 0, -1 }, } }, /* untested */
@@ -2928,7 +2930,6 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
{ 0x1409, 0x7268, 0x1409, 0x0103, 0, 0, timedia_4008a },
{ 0x1409, 0x7268, 0x1409, 0x0104, 0, 0, timedia_4018 },
{ 0x1409, 0x7268, 0x1409, 0x9018, 0, 0, timedia_9018a },
- { 0x14f2, 0x0121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, mobility_pp },
{ PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_2P_EPP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_2p_epp },
{ PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_1P_ECP,
@@ -2946,8 +2947,25 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_954 },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_12PCI840,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_840 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe840,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe840_G,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_0,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_0_G,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_G,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_U,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
{ PCI_VENDOR_ID_AKS, PCI_DEVICE_ID_AKS_ALADDINCARD,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, aks_0100 },
+ { 0x14f2, 0x0121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, mobility_pp },
/* NetMos communication controllers */
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9705,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9705 },
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 8b29c307f1a..691b3adeb87 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -188,8 +188,7 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
return 0;
}
-static int __init
-dmar_parse_dev(struct dmar_drhd_unit *dmaru)
+static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
{
struct acpi_dmar_hardware_unit *drhd;
static int include_all;
@@ -277,14 +276,15 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
drhd = (struct acpi_dmar_hardware_unit *)header;
printk (KERN_INFO PREFIX
"DRHD (flags: 0x%08x)base: 0x%016Lx\n",
- drhd->flags, drhd->address);
+ drhd->flags, (unsigned long long)drhd->address);
break;
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
rmrr = (struct acpi_dmar_reserved_memory *)header;
printk (KERN_INFO PREFIX
"RMRR base: 0x%016Lx end: 0x%016Lx\n",
- rmrr->base_address, rmrr->end_address);
+ (unsigned long long)rmrr->base_address,
+ (unsigned long long)rmrr->end_address);
break;
}
}
@@ -328,7 +328,7 @@ parse_dmar_table(void)
if (!dmar)
return -ENODEV;
- if (dmar->width < PAGE_SHIFT_4K - 1) {
+ if (dmar->width < PAGE_SHIFT - 1) {
printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
return -EINVAL;
}
@@ -460,8 +460,8 @@ void __init detect_intel_iommu(void)
ret = dmar_table_detect();
-#ifdef CONFIG_DMAR
{
+#ifdef CONFIG_INTR_REMAP
struct acpi_table_dmar *dmar;
/*
* for now we will disable dma-remapping when interrupt
@@ -470,29 +470,17 @@ void __init detect_intel_iommu(void)
* is added, we will not need this any more.
*/
dmar = (struct acpi_table_dmar *) dmar_tbl;
- if (ret && cpu_has_x2apic && dmar->flags & 0x1) {
+ if (ret && cpu_has_x2apic && dmar->flags & 0x1)
printk(KERN_INFO
"Queued invalidation will be enabled to support "
"x2apic and Intr-remapping.\n");
- printk(KERN_INFO
- "Disabling IOMMU detection, because of missing "
- "queued invalidation support for IOTLB "
- "invalidation\n");
- printk(KERN_INFO
- "Use \"nox2apic\", if you want to use Intel "
- " IOMMU for DMA-remapping and don't care about "
- " x2apic support\n");
-
- dmar_disabled = 1;
- goto end;
- }
-
+#endif
+#ifdef CONFIG_DMAR
if (ret && !no_iommu && !iommu_detected && !swiotlb &&
!dmar_disabled)
iommu_detected = 1;
- }
-end:
#endif
+ }
dmar_tbl = NULL;
}
@@ -510,7 +498,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->seq_id = iommu_allocated++;
- iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
+ iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
if (!iommu->reg) {
printk(KERN_ERR "IOMMU: can't map the region\n");
goto error;
@@ -521,8 +509,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
cap_max_fault_reg_offset(iommu->cap));
- map_size = PAGE_ALIGN_4K(map_size);
- if (map_size > PAGE_SIZE_4K) {
+ map_size = VTD_PAGE_ALIGN(map_size);
+ if (map_size > VTD_PAGE_SIZE) {
iounmap(iommu->reg);
iommu->reg = ioremap(drhd->reg_base_addr, map_size);
if (!iommu->reg) {
@@ -533,8 +521,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
ver = readl(iommu->reg + DMAR_VER_REG);
pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
- drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
- iommu->cap, iommu->ecap);
+ (unsigned long long)drhd->reg_base_addr,
+ DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
+ (unsigned long long)iommu->cap,
+ (unsigned long long)iommu->ecap);
spin_lock_init(&iommu->register_lock);
@@ -587,11 +577,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
hw = qi->desc;
- spin_lock(&qi->q_lock);
+ spin_lock_irqsave(&qi->q_lock, flags);
while (qi->free_cnt < 3) {
- spin_unlock(&qi->q_lock);
+ spin_unlock_irqrestore(&qi->q_lock, flags);
cpu_relax();
- spin_lock(&qi->q_lock);
+ spin_lock_irqsave(&qi->q_lock, flags);
}
index = qi->free_head;
@@ -612,15 +602,22 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
qi->free_head = (qi->free_head + 2) % QI_LENGTH;
qi->free_cnt -= 2;
- spin_lock_irqsave(&iommu->register_lock, flags);
+ spin_lock(&iommu->register_lock);
/*
* update the HW tail register indicating the presence of
* new descriptors.
*/
writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ spin_unlock(&iommu->register_lock);
while (qi->desc_status[wait_index] != QI_DONE) {
+ /*
+ * We will leave the interrupts disabled, to prevent interrupt
+ * context to queue another cmd while a cmd is already submitted
+ * and waiting for completion on this cpu. This is to avoid
+ * a deadlock where the interrupt context can wait indefinitely
+ * for free slots in the queue.
+ */
spin_unlock(&qi->q_lock);
cpu_relax();
spin_lock(&qi->q_lock);
@@ -629,7 +626,7 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
qi->desc_status[index] = QI_DONE;
reclaim_free_desc(qi);
- spin_unlock(&qi->q_lock);
+ spin_unlock_irqrestore(&qi->q_lock, flags);
}
/*
@@ -645,6 +642,62 @@ void qi_global_iec(struct intel_iommu *iommu)
qi_submit_sync(&desc, iommu);
}
+int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
+ u64 type, int non_present_entry_flush)
+{
+
+ struct qi_desc desc;
+
+ if (non_present_entry_flush) {
+ if (!cap_caching_mode(iommu->cap))
+ return 1;
+ else
+ did = 0;
+ }
+
+ desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
+ | QI_CC_GRAN(type) | QI_CC_TYPE;
+ desc.high = 0;
+
+ qi_submit_sync(&desc, iommu);
+
+ return 0;
+
+}
+
+int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type,
+ int non_present_entry_flush)
+{
+ u8 dw = 0, dr = 0;
+
+ struct qi_desc desc;
+ int ih = 0;
+
+ if (non_present_entry_flush) {
+ if (!cap_caching_mode(iommu->cap))
+ return 1;
+ else
+ did = 0;
+ }
+
+ if (cap_write_drain(iommu->cap))
+ dw = 1;
+
+ if (cap_read_drain(iommu->cap))
+ dr = 1;
+
+ desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
+ | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
+ desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
+ | QI_IOTLB_AM(size_order);
+
+ qi_submit_sync(&desc, iommu);
+
+ return 0;
+
+}
+
/*
* Enable Queued Invalidation interface. This is a must to support
* interrupt-remapping. Also used by DMA-remapping, which replaces
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 5a58b075dd8..f9e244da30a 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -50,9 +50,6 @@
#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
-/* name size which is used for entries in pcihpfs */
-#define SLOT_NAME_SIZE 20 /* {_SUN} */
-
struct acpiphp_bridge;
struct acpiphp_slot;
@@ -63,9 +60,13 @@ struct slot {
struct hotplug_slot *hotplug_slot;
struct acpiphp_slot *acpi_slot;
struct hotplug_slot_info info;
- char name[SLOT_NAME_SIZE];
};
+static inline const char *slot_name(struct slot *slot)
+{
+ return hotplug_slot_name(slot->hotplug_slot);
+}
+
/*
* struct acpiphp_bridge - PCI bridge information
*
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 0e496e866a8..95b536a23d2 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -44,6 +44,9 @@
#define MY_NAME "acpiphp"
+/* name size which is used for entries in pcihpfs */
+#define SLOT_NAME_SIZE 21 /* {_SUN} */
+
static int debug;
int acpiphp_debug;
@@ -84,7 +87,6 @@ static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
.get_adapter_status = get_adapter_status,
};
-
/**
* acpiphp_register_attention - set attention LED callback
* @info: must be completely filled with LED callbacks
@@ -136,7 +138,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
/* enable the specified slot */
return acpiphp_enable_slot(slot->acpi_slot);
@@ -154,7 +156,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
struct slot *slot = hotplug_slot->private;
int retval;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
/* disable the specified slot */
retval = acpiphp_disable_slot(slot->acpi_slot);
@@ -177,7 +179,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
{
int retval = -ENODEV;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
if (attention_info && try_module_get(attention_info->owner)) {
retval = attention_info->set_attn(hotplug_slot, status);
@@ -200,7 +202,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = acpiphp_get_power_status(slot->acpi_slot);
@@ -222,7 +224,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
int retval = -EINVAL;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
if (attention_info && try_module_get(attention_info->owner)) {
retval = attention_info->get_attn(hotplug_slot, value);
@@ -245,7 +247,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = acpiphp_get_latch_status(slot->acpi_slot);
@@ -265,7 +267,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = acpiphp_get_adapter_status(slot->acpi_slot);
@@ -299,7 +301,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
kfree(slot->hotplug_slot);
kfree(slot);
@@ -310,6 +312,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
{
struct slot *slot;
int retval = -ENOMEM;
+ char name[SLOT_NAME_SIZE];
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
@@ -321,8 +324,6 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
slot->hotplug_slot->info = &slot->info;
- slot->hotplug_slot->name = slot->name;
-
slot->hotplug_slot->private = slot;
slot->hotplug_slot->release = &release_slot;
slot->hotplug_slot->ops = &acpi_hotplug_slot_ops;
@@ -336,11 +337,12 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
acpiphp_slot->slot = slot;
- snprintf(slot->name, sizeof(slot->name), "%u", slot->acpi_slot->sun);
+ snprintf(name, SLOT_NAME_SIZE, "%u", slot->acpi_slot->sun);
retval = pci_hp_register(slot->hotplug_slot,
acpiphp_slot->bridge->pci_bus,
- acpiphp_slot->device);
+ acpiphp_slot->device,
+ name);
if (retval == -EBUSY)
goto error_hpslot;
if (retval) {
@@ -348,7 +350,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
goto error_hpslot;
}
- info("Slot [%s] registered\n", slot->hotplug_slot->name);
+ info("Slot [%s] registered\n", slot_name(slot));
return 0;
error_hpslot:
@@ -365,7 +367,7 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
struct slot *slot = acpiphp_slot->slot;
int retval = 0;
- info ("Slot [%s] unregistered\n", slot->hotplug_slot->name);
+ info("Slot [%s] unregistered\n", slot_name(slot));
retval = pci_hp_deregister(slot->hotplug_slot);
if (retval)
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a3e4705dd8f..955aae4071f 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -169,7 +169,9 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
}
-
+static struct acpi_dock_ops acpiphp_dock_ops = {
+ .handler = handle_hotplug_event_func,
+};
/* callback routine to register each ACPI PCI slot object */
static acpi_status
@@ -180,7 +182,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
struct acpiphp_func *newfunc;
acpi_handle tmp;
acpi_status status = AE_OK;
- unsigned long adr, sun;
+ unsigned long long adr, sun;
int device, function, retval;
status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
@@ -285,7 +287,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
*/
newfunc->flags &= ~FUNC_HAS_EJ0;
if (register_hotplug_dock_device(handle,
- handle_hotplug_event_func, newfunc))
+ &acpiphp_dock_ops, newfunc))
dbg("failed to register dock device\n");
/* we need to be notified when dock events happen
@@ -528,7 +530,7 @@ find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
{
acpi_status status;
acpi_handle dummy_handle;
- unsigned long tmp;
+ unsigned long long tmp;
int device, function;
struct pci_dev *dev;
struct pci_bus *pci_bus = context;
@@ -573,7 +575,7 @@ find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
static int add_bridge(acpi_handle handle)
{
acpi_status status;
- unsigned long tmp;
+ unsigned long long tmp;
int seg, bus;
acpi_handle dummy_handle;
struct pci_bus *pci_bus;
@@ -767,7 +769,7 @@ static int get_gsi_base(acpi_handle handle, u32 *gsi_base)
{
acpi_status status;
int result = -1;
- unsigned long gsb;
+ unsigned long long gsb;
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *obj;
void *table;
@@ -808,7 +810,7 @@ static acpi_status
ioapic_add(acpi_handle handle, u32 lvl, void *context, void **rv)
{
acpi_status status;
- unsigned long sta;
+ unsigned long long sta;
acpi_handle tmp;
struct pci_dev *pdev;
u32 gsi_base;
@@ -872,7 +874,7 @@ static acpi_status
ioapic_remove(acpi_handle handle, u32 lvl, void *context, void **rv)
{
acpi_status status;
- unsigned long sta;
+ unsigned long long sta;
acpi_handle tmp;
u32 gsi_base;
struct acpiphp_ioapic *pos, *n, *ioapic = NULL;
@@ -1264,7 +1266,7 @@ static int disable_device(struct acpiphp_slot *slot)
static unsigned int get_slot_status(struct acpiphp_slot *slot)
{
acpi_status status;
- unsigned long sta = 0;
+ unsigned long long sta = 0;
u32 dvid;
struct list_head *l;
struct acpiphp_func *func;
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 2b7c45e3937..b291ee68b4f 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -183,7 +183,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
union acpi_object args[2];
struct acpi_object_list params = { .pointer = args, .count = 2 };
acpi_status stat;
- unsigned long rc;
+ unsigned long long rc;
union apci_descriptor *ibm_slot;
ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot));
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index d9769b30be9..9fff878cf02 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
/* PICMG 2.1 R2.0 HS CSR bits: */
#define HS_CSR_INS 0x0080
@@ -69,6 +70,11 @@ struct cpci_hp_controller {
struct cpci_hp_controller_ops *ops;
};
+static inline const char *slot_name(struct slot *slot)
+{
+ return hotplug_slot_name(slot->hotplug_slot);
+}
+
extern int cpci_hp_register_controller(struct cpci_hp_controller *controller);
extern int cpci_hp_unregister_controller(struct cpci_hp_controller *controller);
extern int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last);
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 935947991dc..de94f4feef8 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -108,7 +108,7 @@ enable_slot(struct hotplug_slot *hotplug_slot)
struct slot *slot = hotplug_slot->private;
int retval = 0;
- dbg("%s - physical_slot = %s", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s", __func__, slot_name(slot));
if (controller->ops->set_power)
retval = controller->ops->set_power(slot, 1);
@@ -121,25 +121,23 @@ disable_slot(struct hotplug_slot *hotplug_slot)
struct slot *slot = hotplug_slot->private;
int retval = 0;
- dbg("%s - physical_slot = %s", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s", __func__, slot_name(slot));
down_write(&list_rwsem);
/* Unconfigure device */
- dbg("%s - unconfiguring slot %s",
- __func__, slot->hotplug_slot->name);
+ dbg("%s - unconfiguring slot %s", __func__, slot_name(slot));
if ((retval = cpci_unconfigure_slot(slot))) {
err("%s - could not unconfigure slot %s",
- __func__, slot->hotplug_slot->name);
+ __func__, slot_name(slot));
goto disable_error;
}
- dbg("%s - finished unconfiguring slot %s",
- __func__, slot->hotplug_slot->name);
+ dbg("%s - finished unconfiguring slot %s", __func__, slot_name(slot));
/* Clear EXT (by setting it) */
if (cpci_clear_ext(slot)) {
err("%s - could not clear EXT for slot %s",
- __func__, slot->hotplug_slot->name);
+ __func__, slot_name(slot));
retval = -ENODEV;
goto disable_error;
}
@@ -214,7 +212,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
struct slot *slot = hotplug_slot->private;
kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot->name);
kfree(slot->hotplug_slot);
if (slot->dev)
pci_dev_put(slot->dev);
@@ -222,12 +219,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
}
#define SLOT_NAME_SIZE 6
-static void
-make_slot_name(struct slot *slot)
-{
- snprintf(slot->hotplug_slot->name,
- SLOT_NAME_SIZE, "%02x:%02x", slot->bus->number, slot->number);
-}
int
cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
@@ -235,7 +226,7 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
struct slot *slot;
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *info;
- char *name;
+ char name[SLOT_NAME_SIZE];
int status = -ENOMEM;
int i;
@@ -262,34 +253,31 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
goto error_hpslot;
hotplug_slot->info = info;
- name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL);
- if (!name)
- goto error_info;
- hotplug_slot->name = name;
-
slot->bus = bus;
slot->number = i;
slot->devfn = PCI_DEVFN(i, 0);
+ snprintf(name, SLOT_NAME_SIZE, "%02x:%02x", bus->number, i);
+
hotplug_slot->private = slot;
hotplug_slot->release = &release_slot;
- make_slot_name(slot);
hotplug_slot->ops = &cpci_hotplug_slot_ops;
/*
* Initialize the slot info structure with some known
* good values.
*/
- dbg("initializing slot %s", slot->hotplug_slot->name);
+ dbg("initializing slot %s", name);
info->power_status = cpci_get_power_status(slot);
info->attention_status = cpci_get_attention_status(slot);
- dbg("registering slot %s", slot->hotplug_slot->name);
- status = pci_hp_register(slot->hotplug_slot, bus, i);
+ dbg("registering slot %s", name);
+ status = pci_hp_register(slot->hotplug_slot, bus, i, name);
if (status) {
err("pci_hp_register failed with error %d", status);
- goto error_name;
+ goto error_info;
}
+ dbg("slot registered with name: %s", slot_name(slot));
/* Add slot to our internal list */
down_write(&list_rwsem);
@@ -298,8 +286,6 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
up_write(&list_rwsem);
}
return 0;
-error_name:
- kfree(name);
error_info:
kfree(info);
error_hpslot:
@@ -327,7 +313,7 @@ cpci_hp_unregister_bus(struct pci_bus *bus)
list_del(&slot->slot_list);
slots--;
- dbg("deregistering slot %s", slot->hotplug_slot->name);
+ dbg("deregistering slot %s", slot_name(slot));
status = pci_hp_deregister(slot->hotplug_slot);
if (status) {
err("pci_hp_deregister failed with error %d",
@@ -379,11 +365,10 @@ init_slots(int clear_ins)
return -1;
}
list_for_each_entry(slot, &slot_list, slot_list) {
- dbg("%s - looking at slot %s",
- __func__, slot->hotplug_slot->name);
+ dbg("%s - looking at slot %s", __func__, slot_name(slot));
if (clear_ins && cpci_check_and_clear_ins(slot))
dbg("%s - cleared INS for slot %s",
- __func__, slot->hotplug_slot->name);
+ __func__, slot_name(slot));
dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0));
if (dev) {
if (update_adapter_status(slot->hotplug_slot, 1))
@@ -414,8 +399,7 @@ check_slots(void)
}
extracted = inserted = 0;
list_for_each_entry(slot, &slot_list, slot_list) {
- dbg("%s - looking at slot %s",
- __func__, slot->hotplug_slot->name);
+ dbg("%s - looking at slot %s", __func__, slot_name(slot));
if (cpci_check_and_clear_ins(slot)) {
/*
* Some broken hardware (e.g. PLX 9054AB) asserts
@@ -423,35 +407,34 @@ check_slots(void)
*/
if (slot->dev) {
warn("slot %s already inserted",
- slot->hotplug_slot->name);
+ slot_name(slot));
inserted++;
continue;
}
/* Process insertion */
- dbg("%s - slot %s inserted",
- __func__, slot->hotplug_slot->name);
+ dbg("%s - slot %s inserted", __func__, slot_name(slot));
/* GSM, debug */
hs_csr = cpci_get_hs_csr(slot);
dbg("%s - slot %s HS_CSR (1) = %04x",
- __func__, slot->hotplug_slot->name, hs_csr);
+ __func__, slot_name(slot), hs_csr);
/* Configure device */
dbg("%s - configuring slot %s",
- __func__, slot->hotplug_slot->name);
+ __func__, slot_name(slot));
if (cpci_configure_slot(slot)) {
err("%s - could not configure slot %s",
- __func__, slot->hotplug_slot->name);
+ __func__, slot_name(slot));
continue;
}
dbg("%s - finished configuring slot %s",
- __func__, slot->hotplug_slot->name);
+ __func__, slot_name(slot));
/* GSM, debug */
hs_csr = cpci_get_hs_csr(slot);
dbg("%s - slot %s HS_CSR (2) = %04x",
- __func__, slot->hotplug_slot->name, hs_csr);
+ __func__, slot_name(slot), hs_csr);
if (update_latch_status(slot->hotplug_slot, 1))
warn("failure to update latch file");
@@ -464,18 +447,18 @@ check_slots(void)
/* GSM, debug */
hs_csr = cpci_get_hs_csr(slot);
dbg("%s - slot %s HS_CSR (3) = %04x",
- __func__, slot->hotplug_slot->name, hs_csr);
+ __func__, slot_name(slot), hs_csr);
inserted++;
} else if (cpci_check_ext(slot)) {
/* Process extraction request */
dbg("%s - slot %s extracted",
- __func__, slot->hotplug_slot->name);
+ __func__, slot_name(slot));
/* GSM, debug */
hs_csr = cpci_get_hs_csr(slot);
dbg("%s - slot %s HS_CSR = %04x",
- __func__, slot->hotplug_slot->name, hs_csr);
+ __func__, slot_name(slot), hs_csr);
if (!slot->extracting) {
if (update_latch_status(slot->hotplug_slot, 0)) {
@@ -493,7 +476,7 @@ check_slots(void)
* bother trying to tell the driver or not?
*/
err("card in slot %s was improperly removed",
- slot->hotplug_slot->name);
+ slot_name(slot));
if (update_adapter_status(slot->hotplug_slot, 0))
warn("failure to update adapter file");
slot->extracting = 0;
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index df82b95e287..829c327cfb5 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -209,7 +209,7 @@ int cpci_led_on(struct slot* slot)
hs_cap + 2,
hs_csr)) {
err("Could not set LOO for slot %s",
- slot->hotplug_slot->name);
+ hotplug_slot_name(slot->hotplug_slot));
return -ENODEV;
}
}
@@ -238,7 +238,7 @@ int cpci_led_off(struct slot* slot)
hs_cap + 2,
hs_csr)) {
err("Could not clear LOO for slot %s",
- slot->hotplug_slot->name);
+ hotplug_slot_name(slot->hotplug_slot));
return -ENODEV;
}
}
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index b1decfa88b7..afaf8f69f73 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -449,6 +449,11 @@ extern u8 cpqhp_disk_irq;
/* inline functions */
+static inline char *slot_name(struct slot *slot)
+{
+ return hotplug_slot_name(slot->hotplug_slot);
+}
+
/*
* return_resource
*
@@ -696,14 +701,6 @@ static inline int get_presence_status(struct controller *ctrl, struct slot *slot
return presence_save;
}
-#define SLOT_NAME_SIZE 10
-
-static inline void make_slot_name(char *buffer, int buffer_size, struct slot *slot)
-{
- snprintf(buffer, buffer_size, "%d", slot->number);
-}
-
-
static inline int wait_for_ctrl_irq(struct controller *ctrl)
{
DECLARE_WAITQUEUE(wait, current);
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 54defec51d0..724d42c4adb 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -315,14 +315,15 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot->name);
kfree(slot->hotplug_slot);
kfree(slot);
}
+#define SLOT_NAME_SIZE 10
+
static int ctrl_slot_setup(struct controller *ctrl,
void __iomem *smbios_start,
void __iomem *smbios_table)
@@ -335,6 +336,7 @@ static int ctrl_slot_setup(struct controller *ctrl,
u8 slot_number;
u8 ctrl_slot;
u32 tempdword;
+ char name[SLOT_NAME_SIZE];
void __iomem *slot_entry= NULL;
int result = -ENOMEM;
@@ -363,16 +365,12 @@ static int ctrl_slot_setup(struct controller *ctrl,
if (!hotplug_slot->info)
goto error_hpslot;
hotplug_slot_info = hotplug_slot->info;
- hotplug_slot->name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL);
-
- if (!hotplug_slot->name)
- goto error_info;
slot->ctrl = ctrl;
slot->bus = ctrl->bus;
slot->device = slot_device;
slot->number = slot_number;
- dbg("slot->number = %d\n", slot->number);
+ dbg("slot->number = %u\n", slot->number);
slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
slot_entry);
@@ -418,9 +416,9 @@ static int ctrl_slot_setup(struct controller *ctrl,
/* register this slot with the hotplug pci core */
hotplug_slot->release = &release_slot;
hotplug_slot->private = slot;
- make_slot_name(hotplug_slot->name, SLOT_NAME_SIZE, slot);
+ snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
-
+
hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
hotplug_slot_info->attention_status =
cpq_get_attention_status(ctrl, slot);
@@ -436,10 +434,11 @@ static int ctrl_slot_setup(struct controller *ctrl,
slot_number);
result = pci_hp_register(hotplug_slot,
ctrl->pci_dev->subordinate,
- slot->device);
+ slot->device,
+ name);
if (result) {
err("pci_hp_register failed with error %d\n", result);
- goto error_name;
+ goto error_info;
}
slot->next = ctrl->slot;
@@ -451,8 +450,6 @@ static int ctrl_slot_setup(struct controller *ctrl,
}
return 0;
-error_name:
- kfree(hotplug_slot->name);
error_info:
kfree(hotplug_slot_info);
error_hpslot:
@@ -638,7 +635,7 @@ static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status)
u8 device;
u8 function;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
return -ENODEV;
@@ -665,7 +662,7 @@ static int process_SI(struct hotplug_slot *hotplug_slot)
u8 device;
u8 function;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
return -ENODEV;
@@ -697,7 +694,7 @@ static int process_SS(struct hotplug_slot *hotplug_slot)
u8 device;
u8 function;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
return -ENODEV;
@@ -720,7 +717,7 @@ static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
return cpqhp_hardware_test(ctrl, value);
}
@@ -731,7 +728,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = get_slot_enabled(ctrl, slot);
return 0;
@@ -742,7 +739,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = cpq_get_attention_status(ctrl, slot);
return 0;
@@ -753,7 +750,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = cpq_get_latch_status(ctrl, slot);
@@ -765,7 +762,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = get_presence_status(ctrl, slot);
@@ -777,7 +774,7 @@ static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = ctrl->speed_capability;
@@ -789,7 +786,7 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = ctrl->speed;
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index ef041ca91c2..a60a2529099 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1139,7 +1139,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
for(slot = ctrl->slot; slot; slot = slot->next) {
if (slot->device == (hp_slot + ctrl->slot_device_offset))
continue;
- if (!slot->hotplug_slot && !slot->hotplug_slot->info)
+ if (!slot->hotplug_slot || !slot->hotplug_slot->info)
continue;
if (slot->hotplug_slot->info->adapter_status == 0)
continue;
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
index 146ca9cd156..3a2637a0093 100644
--- a/drivers/pci/hotplug/fakephp.c
+++ b/drivers/pci/hotplug/fakephp.c
@@ -66,10 +66,10 @@ struct dummy_slot {
struct pci_dev *dev;
struct work_struct remove_work;
unsigned long removed;
- char name[8];
};
static int debug;
+static int dup_slots;
static LIST_HEAD(slot_list);
static struct workqueue_struct *dummyphp_wq;
@@ -96,10 +96,13 @@ static void dummy_release(struct hotplug_slot *slot)
kfree(dslot);
}
+#define SLOT_NAME_SIZE 8
+
static int add_slot(struct pci_dev *dev)
{
struct dummy_slot *dslot;
struct hotplug_slot *slot;
+ char name[SLOT_NAME_SIZE];
int retval = -ENOMEM;
static int count = 1;
@@ -119,19 +122,22 @@ static int add_slot(struct pci_dev *dev)
if (!dslot)
goto error_info;
- slot->name = dslot->name;
- snprintf(slot->name, sizeof(dslot->name), "fake%d", count++);
- dbg("slot->name = %s\n", slot->name);
+ if (dup_slots)
+ snprintf(name, SLOT_NAME_SIZE, "fake");
+ else
+ snprintf(name, SLOT_NAME_SIZE, "fake%d", count++);
+ dbg("slot->name = %s\n", name);
slot->ops = &dummy_hotplug_slot_ops;
slot->release = &dummy_release;
slot->private = dslot;
- retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn));
+ retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn), name);
if (retval) {
err("pci_hp_register failed with error %d\n", retval);
goto error_dslot;
}
+ dbg("slot->name = %s\n", hotplug_slot_name(slot));
dslot->slot = slot;
dslot->dev = pci_dev_get(dev);
list_add (&dslot->node, &slot_list);
@@ -167,10 +173,11 @@ static void remove_slot(struct dummy_slot *dslot)
{
int retval;
- dbg("removing slot %s\n", dslot->slot->name);
+ dbg("removing slot %s\n", hotplug_slot_name(dslot->slot));
retval = pci_hp_deregister(dslot->slot);
if (retval)
- err("Problem unregistering a slot %s\n", dslot->slot->name);
+ err("Problem unregistering a slot %s\n",
+ hotplug_slot_name(dslot->slot));
}
/* called from the single-threaded workqueue handler to remove a slot */
@@ -308,7 +315,7 @@ static int disable_slot(struct hotplug_slot *slot)
return -ENODEV;
dslot = slot->private;
- dbg("%s - physical_slot = %s\n", __func__, slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(slot));
for (func = 7; func >= 0; func--) {
dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func);
@@ -373,4 +380,5 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
-
+module_param(dup_slots, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dup_slots, "Force duplicate slot names for debugging");
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
index 612d9630150..a8d391a4957 100644
--- a/drivers/pci/hotplug/ibmphp.h
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -707,17 +707,16 @@ struct slot {
u8 device;
u8 number;
u8 real_physical_slot_num;
- char name[100];
u32 capabilities;
u8 supported_speed;
u8 supported_bus_mode;
+ u8 flag; /* this is for disable slot and polling */
+ u8 ctlr_index;
struct hotplug_slot *hotplug_slot;
struct controller *ctrl;
struct pci_func *func;
u8 irq[4];
- u8 flag; /* this is for disable slot and polling */
int bit_mode; /* 0 = 32, 1 = 64 */
- u8 ctlr_index;
struct bus_info *bus_on;
struct list_head ibm_slot_list;
u8 status;
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 8cfd1c4926c..c1abac8ab5c 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -587,11 +587,14 @@ static u8 calculate_first_slot (u8 slot_num)
return first_slot + 1;
}
+
+#define SLOT_NAME_SIZE 30
+
static char *create_file_name (struct slot * slot_cur)
{
struct opt_rio *opt_vg_ptr = NULL;
struct opt_rio_lo *opt_lo_ptr = NULL;
- static char str[30];
+ static char str[SLOT_NAME_SIZE];
int which = 0; /* rxe = 1, chassis = 0 */
u8 number = 1; /* either chassis or rxe # */
u8 first_slot = 1;
@@ -703,7 +706,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
slot = hotplug_slot->private;
kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot->name);
kfree(slot->hotplug_slot);
slot->ctrl = NULL;
slot->bus_on = NULL;
@@ -734,6 +736,7 @@ static int __init ebda_rsrc_controller (void)
struct bus_info *bus_info_ptr1, *bus_info_ptr2;
int rc;
struct slot *tmp_slot;
+ char name[SLOT_NAME_SIZE];
addr = hpc_list_ptr->phys_addr;
for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) {
@@ -897,12 +900,6 @@ static int __init ebda_rsrc_controller (void)
goto error_no_hp_info;
}
- hp_slot_ptr->name = kmalloc(30, GFP_KERNEL);
- if (!hp_slot_ptr->name) {
- rc = -ENOMEM;
- goto error_no_hp_name;
- }
-
tmp_slot = kzalloc(sizeof(*tmp_slot), GFP_KERNEL);
if (!tmp_slot) {
rc = -ENOMEM;
@@ -964,9 +961,9 @@ static int __init ebda_rsrc_controller (void)
} /* each hpc */
list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) {
- snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot));
+ snprintf(name, SLOT_NAME_SIZE, "%s", create_file_name(tmp_slot));
pci_hp_register(tmp_slot->hotplug_slot,
- pci_find_bus(0, tmp_slot->bus), tmp_slot->device);
+ pci_find_bus(0, tmp_slot->bus), tmp_slot->device, name);
}
print_ebda_hpc ();
@@ -976,8 +973,6 @@ static int __init ebda_rsrc_controller (void)
error:
kfree (hp_slot_ptr->private);
error_no_slot:
- kfree (hp_slot_ptr->name);
-error_no_hp_name:
kfree (hp_slot_ptr->info);
error_no_hp_info:
kfree (hp_slot_ptr);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 2e6c4474644..535fce0f07f 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -37,6 +37,7 @@
#include <linux/init.h>
#include <linux/mount.h>
#include <linux/namei.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <asm/uaccess.h>
@@ -61,7 +62,7 @@ static int debug;
//////////////////////////////////////////////////////////////////
static LIST_HEAD(pci_hotplug_slot_list);
-static DEFINE_SPINLOCK(pci_hotplug_slot_list_lock);
+static DEFINE_MUTEX(pci_hp_mutex);
/* these strings match up with the values in pci_bus_speed */
static char *pci_bus_speed_strings[] = {
@@ -530,16 +531,12 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
struct hotplug_slot *slot;
struct list_head *tmp;
- spin_lock(&pci_hotplug_slot_list_lock);
list_for_each (tmp, &pci_hotplug_slot_list) {
slot = list_entry (tmp, struct hotplug_slot, slot_list);
- if (strcmp(slot->name, name) == 0)
- goto out;
+ if (strcmp(hotplug_slot_name(slot), name) == 0)
+ return slot;
}
- slot = NULL;
-out:
- spin_unlock(&pci_hotplug_slot_list_lock);
- return slot;
+ return NULL;
}
/**
@@ -547,13 +544,15 @@ out:
* @bus: bus this slot is on
* @slot: pointer to the &struct hotplug_slot to register
* @slot_nr: slot number
+ * @name: name registered with kobject core
*
* Registers a hotplug slot with the pci hotplug subsystem, which will allow
* userspace interaction to the slot.
*
* Returns 0 if successful, anything else for an error.
*/
-int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr)
+int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr,
+ const char *name)
{
int result;
struct pci_slot *pci_slot;
@@ -568,48 +567,29 @@ int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr)
return -EINVAL;
}
- /* Check if we have already registered a slot with the same name. */
- if (get_slot_from_name(slot->name))
- return -EEXIST;
+ mutex_lock(&pci_hp_mutex);
/*
* No problems if we call this interface from both ACPI_PCI_SLOT
* driver and call it here again. If we've already created the
* pci_slot, the interface will simply bump the refcount.
*/
- pci_slot = pci_create_slot(bus, slot_nr, slot->name);
- if (IS_ERR(pci_slot))
- return PTR_ERR(pci_slot);
-
- if (pci_slot->hotplug) {
- dbg("%s: already claimed\n", __func__);
- pci_destroy_slot(pci_slot);
- return -EBUSY;
+ pci_slot = pci_create_slot(bus, slot_nr, name, slot);
+ if (IS_ERR(pci_slot)) {
+ result = PTR_ERR(pci_slot);
+ goto out;
}
slot->pci_slot = pci_slot;
pci_slot->hotplug = slot;
- /*
- * Allow pcihp drivers to override the ACPI_PCI_SLOT name.
- */
- if (strcmp(kobject_name(&pci_slot->kobj), slot->name)) {
- result = kobject_rename(&pci_slot->kobj, slot->name);
- if (result) {
- pci_destroy_slot(pci_slot);
- return result;
- }
- }
-
- spin_lock(&pci_hotplug_slot_list_lock);
list_add(&slot->slot_list, &pci_hotplug_slot_list);
- spin_unlock(&pci_hotplug_slot_list_lock);
result = fs_add_slot(pci_slot);
kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
- dbg("Added slot %s to the list\n", slot->name);
-
-
+ dbg("Added slot %s to the list\n", name);
+out:
+ mutex_unlock(&pci_hp_mutex);
return result;
}
@@ -630,21 +610,23 @@ int pci_hp_deregister(struct hotplug_slot *hotplug)
if (!hotplug)
return -ENODEV;
- temp = get_slot_from_name(hotplug->name);
- if (temp != hotplug)
+ mutex_lock(&pci_hp_mutex);
+ temp = get_slot_from_name(hotplug_slot_name(hotplug));
+ if (temp != hotplug) {
+ mutex_unlock(&pci_hp_mutex);
return -ENODEV;
+ }
- spin_lock(&pci_hotplug_slot_list_lock);
list_del(&hotplug->slot_list);
- spin_unlock(&pci_hotplug_slot_list_lock);
slot = hotplug->pci_slot;
fs_remove_slot(slot);
- dbg("Removed slot %s from the list\n", hotplug->name);
+ dbg("Removed slot %s from the list\n", hotplug_slot_name(hotplug));
hotplug->release(hotplug);
slot->hotplug = NULL;
pci_destroy_slot(slot);
+ mutex_unlock(&pci_hp_mutex);
return 0;
}
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index c367978bd7f..a4817a841fa 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -74,15 +74,13 @@ extern struct workqueue_struct *pciehp_wq;
struct slot {
u8 bus;
u8 device;
- u32 number;
u8 state;
- struct timer_list task_event;
u8 hp_slot;
+ u32 number;
struct controller *ctrl;
struct hpc_ops *hpc_ops;
struct hotplug_slot *hotplug_slot;
struct list_head slot_list;
- char name[SLOT_NAME_SIZE];
unsigned long last_emi_toggle;
struct delayed_work work; /* work for button event */
struct mutex lock;
@@ -112,6 +110,7 @@ struct controller {
struct timer_list poll_timer;
int cmd_busy;
unsigned int no_cmd_complete:1;
+ unsigned int link_active_reporting:1;
};
#define INT_BUTTON_IGNORE 0
@@ -175,6 +174,11 @@ int pciehp_enable_slot(struct slot *p_slot);
int pciehp_disable_slot(struct slot *p_slot);
int pcie_enable_notification(struct controller *ctrl);
+static inline const char *slot_name(struct slot *slot)
+{
+ return hotplug_slot_name(slot->hotplug_slot);
+}
+
static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
{
struct slot *slot;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index c748a19db89..62be1b59c74 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -185,7 +185,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
struct slot *slot = hotplug_slot->private;
ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
- __func__, hotplug_slot->name);
+ __func__, hotplug_slot_name(hotplug_slot));
kfree(hotplug_slot->info);
kfree(hotplug_slot);
@@ -196,7 +196,7 @@ static int init_slots(struct controller *ctrl)
struct slot *slot;
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *info;
- int len, dup = 1;
+ char name[SLOT_NAME_SIZE];
int retval = -ENOMEM;
list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
@@ -210,41 +210,28 @@ static int init_slots(struct controller *ctrl)
/* register this slot with the hotplug pci core */
hotplug_slot->info = info;
- hotplug_slot->name = slot->name;
hotplug_slot->private = slot;
hotplug_slot->release = &release_slot;
hotplug_slot->ops = &pciehp_hotplug_slot_ops;
- get_power_status(hotplug_slot, &info->power_status);
- get_attention_status(hotplug_slot, &info->attention_status);
- get_latch_status(hotplug_slot, &info->latch_status);
- get_adapter_status(hotplug_slot, &info->adapter_status);
slot->hotplug_slot = hotplug_slot;
+ snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
ctrl_dbg(ctrl, "Registering bus=%x dev=%x hp_slot=%x sun=%x "
"slot_device_offset=%x\n", slot->bus, slot->device,
slot->hp_slot, slot->number, ctrl->slot_device_offset);
-duplicate_name:
retval = pci_hp_register(hotplug_slot,
ctrl->pci_dev->subordinate,
- slot->device);
+ slot->device,
+ name);
if (retval) {
- /*
- * If slot N already exists, we'll try to create
- * slot N-1, N-2 ... N-M, until we overflow.
- */
- if (retval == -EEXIST) {
- len = snprintf(slot->name, SLOT_NAME_SIZE,
- "%d-%d", slot->number, dup++);
- if (len < SLOT_NAME_SIZE)
- goto duplicate_name;
- else
- ctrl_err(ctrl, "duplicate slot name "
- "overflow\n");
- }
ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
retval);
goto error_info;
}
+ get_power_status(hotplug_slot, &info->power_status);
+ get_attention_status(hotplug_slot, &info->attention_status);
+ get_latch_status(hotplug_slot, &info->latch_status);
+ get_adapter_status(hotplug_slot, &info->adapter_status);
/* create additional sysfs entries */
if (EMI(ctrl)) {
retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj,
@@ -287,7 +274,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
struct slot *slot = hotplug_slot->private;
ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
- __func__, hotplug_slot->name);
+ __func__, slot_name(slot));
hotplug_slot->info->attention_status = status;
@@ -303,7 +290,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
struct slot *slot = hotplug_slot->private;
ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
- __func__, hotplug_slot->name);
+ __func__, slot_name(slot));
return pciehp_sysfs_enable_slot(slot);
}
@@ -314,7 +301,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
struct slot *slot = hotplug_slot->private;
ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
- __func__, hotplug_slot->name);
+ __func__, slot_name(slot));
return pciehp_sysfs_disable_slot(slot);
}
@@ -325,7 +312,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
int retval;
ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
- __func__, hotplug_slot->name);
+ __func__, slot_name(slot));
retval = slot->hpc_ops->get_power_status(slot, value);
if (retval < 0)
@@ -340,7 +327,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
int retval;
ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
- __func__, hotplug_slot->name);
+ __func__, slot_name(slot));
retval = slot->hpc_ops->get_attention_status(slot, value);
if (retval < 0)
@@ -355,7 +342,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
int retval;
ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
- __func__, hotplug_slot->name);
+ __func__, slot_name(slot));
retval = slot->hpc_ops->get_latch_status(slot, value);
if (retval < 0)
@@ -370,7 +357,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
int retval;
ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
- __func__, hotplug_slot->name);
+ __func__, slot_name(slot));
retval = slot->hpc_ops->get_adapter_status(slot, value);
if (retval < 0)
@@ -386,7 +373,7 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
int retval;
ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
- __func__, hotplug_slot->name);
+ __func__, slot_name(slot));
retval = slot->hpc_ops->get_max_bus_speed(slot, value);
if (retval < 0)
@@ -401,7 +388,7 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
int retval;
ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
- __func__, hotplug_slot->name);
+ __func__, slot_name(slot));
retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
if (retval < 0)
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index acb7f9efd18..d6c5eb29775 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -66,7 +66,7 @@ u8 pciehp_handle_attention_button(struct slot *p_slot)
/*
* Button pressed - See if need to TAKE ACTION!!!
*/
- ctrl_info(ctrl, "Button pressed on Slot(%s)\n", p_slot->name);
+ ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot));
event_type = INT_BUTTON_PRESS;
queue_interrupt_event(p_slot, event_type);
@@ -88,13 +88,13 @@ u8 pciehp_handle_switch_change(struct slot *p_slot)
/*
* Switch opened
*/
- ctrl_info(ctrl, "Latch open on Slot(%s)\n", p_slot->name);
+ ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot));
event_type = INT_SWITCH_OPEN;
} else {
/*
* Switch closed
*/
- ctrl_info(ctrl, "Latch close on Slot(%s)\n", p_slot->name);
+ ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot));
event_type = INT_SWITCH_CLOSE;
}
@@ -120,13 +120,14 @@ u8 pciehp_handle_presence_change(struct slot *p_slot)
/*
* Card Present
*/
- ctrl_info(ctrl, "Card present on Slot(%s)\n", p_slot->name);
+ ctrl_info(ctrl, "Card present on Slot(%s)\n", slot_name(p_slot));
event_type = INT_PRESENCE_ON;
} else {
/*
* Not Present
*/
- ctrl_info(ctrl, "Card not present on Slot(%s)\n", p_slot->name);
+ ctrl_info(ctrl, "Card not present on Slot(%s)\n",
+ slot_name(p_slot));
event_type = INT_PRESENCE_OFF;
}
@@ -148,13 +149,13 @@ u8 pciehp_handle_power_fault(struct slot *p_slot)
* power fault Cleared
*/
ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n",
- p_slot->name);
+ slot_name(p_slot));
event_type = INT_POWER_FAULT_CLEAR;
} else {
/*
* power fault
*/
- ctrl_info(ctrl, "Power fault on Slot(%s)\n", p_slot->name);
+ ctrl_info(ctrl, "Power fault on Slot(%s)\n", slot_name(p_slot));
event_type = INT_POWER_FAULT;
ctrl_info(ctrl, "power fault bit %x set\n", 0);
}
@@ -225,9 +226,6 @@ static int board_added(struct slot *p_slot)
if (PWR_LED(ctrl))
p_slot->hpc_ops->green_led_blink(p_slot);
- /* Wait for ~1 second */
- msleep(1000);
-
/* Check link training status */
retval = p_slot->hpc_ops->check_lnk_status(ctrl);
if (retval) {
@@ -412,12 +410,12 @@ static void handle_button_press_event(struct slot *p_slot)
p_slot->state = BLINKINGOFF_STATE;
ctrl_info(ctrl,
"PCI slot #%s - powering off due to button "
- "press.\n", p_slot->name);
+ "press.\n", slot_name(p_slot));
} else {
p_slot->state = BLINKINGON_STATE;
ctrl_info(ctrl,
"PCI slot #%s - powering on due to button "
- "press.\n", p_slot->name);
+ "press.\n", slot_name(p_slot));
}
/* blink green LED and turn off amber */
if (PWR_LED(ctrl))
@@ -434,7 +432,7 @@ static void handle_button_press_event(struct slot *p_slot)
* press the attention again before the 5 sec. limit
* expires to cancel hot-add or hot-remove
*/
- ctrl_info(ctrl, "Button cancel on Slot(%s)\n", p_slot->name);
+ ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot));
ctrl_dbg(ctrl, "%s: button cancel\n", __func__);
cancel_delayed_work(&p_slot->work);
if (p_slot->state == BLINKINGOFF_STATE) {
@@ -447,7 +445,7 @@ static void handle_button_press_event(struct slot *p_slot)
if (ATTN_LED(ctrl))
p_slot->hpc_ops->set_attention_status(p_slot, 0);
ctrl_info(ctrl, "PCI slot #%s - action canceled "
- "due to button press\n", p_slot->name);
+ "due to button press\n", slot_name(p_slot));
p_slot->state = STATIC_STATE;
break;
case POWEROFF_STATE:
@@ -457,7 +455,7 @@ static void handle_button_press_event(struct slot *p_slot)
* this means that the previous attention button action
* to hot-add or hot-remove is undergoing
*/
- ctrl_info(ctrl, "Button ignore on Slot(%s)\n", p_slot->name);
+ ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot));
update_slot_info(p_slot);
break;
default:
@@ -540,7 +538,7 @@ int pciehp_enable_slot(struct slot *p_slot)
rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
if (rc || !getstatus) {
ctrl_info(ctrl, "%s: no adapter on slot(%s)\n",
- __func__, p_slot->name);
+ __func__, slot_name(p_slot));
mutex_unlock(&p_slot->ctrl->crit_sect);
return -ENODEV;
}
@@ -548,7 +546,7 @@ int pciehp_enable_slot(struct slot *p_slot)
rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "%s: latch open on slot(%s)\n",
- __func__, p_slot->name);
+ __func__, slot_name(p_slot));
mutex_unlock(&p_slot->ctrl->crit_sect);
return -ENODEV;
}
@@ -558,7 +556,7 @@ int pciehp_enable_slot(struct slot *p_slot)
rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "%s: already enabled on slot(%s)\n",
- __func__, p_slot->name);
+ __func__, slot_name(p_slot));
mutex_unlock(&p_slot->ctrl->crit_sect);
return -EINVAL;
}
@@ -594,7 +592,7 @@ int pciehp_disable_slot(struct slot *p_slot)
ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
if (ret || !getstatus) {
ctrl_info(ctrl, "%s: no adapter on slot(%s)\n",
- __func__, p_slot->name);
+ __func__, slot_name(p_slot));
mutex_unlock(&p_slot->ctrl->crit_sect);
return -ENODEV;
}
@@ -604,7 +602,7 @@ int pciehp_disable_slot(struct slot *p_slot)
ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
if (ret || getstatus) {
ctrl_info(ctrl, "%s: latch open on slot(%s)\n",
- __func__, p_slot->name);
+ __func__, slot_name(p_slot));
mutex_unlock(&p_slot->ctrl->crit_sect);
return -ENODEV;
}
@@ -614,7 +612,7 @@ int pciehp_disable_slot(struct slot *p_slot)
ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
if (ret || !getstatus) {
ctrl_info(ctrl, "%s: already disabled slot(%s)\n",
- __func__, p_slot->name);
+ __func__, slot_name(p_slot));
mutex_unlock(&p_slot->ctrl->crit_sect);
return -EINVAL;
}
@@ -645,14 +643,16 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
break;
case POWERON_STATE:
ctrl_info(ctrl, "Slot %s is already in powering on state\n",
- p_slot->name);
+ slot_name(p_slot));
break;
case BLINKINGOFF_STATE:
case POWEROFF_STATE:
- ctrl_info(ctrl, "Already enabled on slot %s\n", p_slot->name);
+ ctrl_info(ctrl, "Already enabled on slot %s\n",
+ slot_name(p_slot));
break;
default:
- ctrl_err(ctrl, "Not a valid state on slot %s\n", p_slot->name);
+ ctrl_err(ctrl, "Not a valid state on slot %s\n",
+ slot_name(p_slot));
break;
}
mutex_unlock(&p_slot->lock);
@@ -678,14 +678,16 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot)
break;
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot %s is already in powering off state\n",
- p_slot->name);
+ slot_name(p_slot));
break;
case BLINKINGON_STATE:
case POWERON_STATE:
- ctrl_info(ctrl, "Already disabled on slot %s\n", p_slot->name);
+ ctrl_info(ctrl, "Already disabled on slot %s\n",
+ slot_name(p_slot));
break;
default:
- ctrl_err(ctrl, "Not a valid state on slot %s\n", p_slot->name);
+ ctrl_err(ctrl, "Not a valid state on slot %s\n",
+ slot_name(p_slot));
break;
}
mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 8e9530c4c36..58c72d2cc21 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -125,6 +125,7 @@ static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
/* Field definitions in Link Capabilities Register */
#define MAX_LNK_SPEED 0x000F
#define MAX_LNK_WIDTH 0x03F0
+#define LINK_ACTIVE_REPORTING 0x00100000
/* Link Width Encoding */
#define LNK_X1 0x01
@@ -141,6 +142,7 @@ static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
#define LNK_TRN_ERR 0x0400
#define LNK_TRN 0x0800
#define SLOT_CLK_CONF 0x1000
+#define LINK_ACTIVE 0x2000
/* Field definitions in Slot Capabilities Register */
#define ATTN_BUTTN_PRSN 0x00000001
@@ -368,11 +370,52 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
return retval;
}
+static inline int check_link_active(struct controller *ctrl)
+{
+ u16 link_status;
+
+ if (pciehp_readw(ctrl, LNKSTATUS, &link_status))
+ return 0;
+ return !!(link_status & LINK_ACTIVE);
+}
+
+static void pcie_wait_link_active(struct controller *ctrl)
+{
+ int timeout = 1000;
+
+ if (check_link_active(ctrl))
+ return;
+ while (timeout > 0) {
+ msleep(10);
+ timeout -= 10;
+ if (check_link_active(ctrl))
+ return;
+ }
+ ctrl_dbg(ctrl, "Data Link Layer Link Active not set in 1000 msec\n");
+}
+
static int hpc_check_lnk_status(struct controller *ctrl)
{
u16 lnk_status;
int retval = 0;
+ /*
+ * Data Link Layer Link Active Reporting must be capable for
+ * hot-plug capable downstream port. But old controller might
+ * not implement it. In this case, we wait for 1000 ms.
+ */
+ if (ctrl->link_active_reporting){
+ /* Wait for Data Link Layer Link Active bit to be set */
+ pcie_wait_link_active(ctrl);
+ /*
+ * We must wait for 100 ms after the Data Link Layer
+ * Link Active bit reads 1b before initiating a
+ * configuration access to the hot added device.
+ */
+ msleep(100);
+ } else
+ msleep(1000);
+
retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
if (retval) {
ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
@@ -1061,7 +1104,6 @@ static int pcie_init_slot(struct controller *ctrl)
slot->device = ctrl->slot_device_offset + slot->hp_slot;
slot->hpc_ops = ctrl->hpc_ops;
slot->number = ctrl->first_slot;
- snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number);
mutex_init(&slot->lock);
INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
list_add(&slot->slot_list, &ctrl->slot_list);
@@ -1132,7 +1174,7 @@ static inline void dbg_ctrl(struct controller *ctrl)
struct controller *pcie_init(struct pcie_device *dev)
{
struct controller *ctrl;
- u32 slot_cap;
+ u32 slot_cap, link_cap;
struct pci_dev *pdev = dev->port;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
@@ -1148,11 +1190,11 @@ struct controller *pcie_init(struct pcie_device *dev)
if (!ctrl->cap_base) {
ctrl_err(ctrl, "%s: Cannot find PCI Express capability\n",
__func__);
- goto abort;
+ goto abort_ctrl;
}
if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) {
ctrl_err(ctrl, "%s: Cannot read SLOTCAP register\n", __func__);
- goto abort;
+ goto abort_ctrl;
}
ctrl->slot_cap = slot_cap;
@@ -1174,6 +1216,16 @@ struct controller *pcie_init(struct pcie_device *dev)
!(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl)))
ctrl->no_cmd_complete = 1;
+ /* Check if Data Link Layer Link Active Reporting is implemented */
+ if (pciehp_readl(ctrl, LNKCAP, &link_cap)) {
+ ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
+ goto abort_ctrl;
+ }
+ if (link_cap & LINK_ACTIVE_REPORTING) {
+ ctrl_dbg(ctrl, "Link Active Reporting supported\n");
+ ctrl->link_active_reporting = 1;
+ }
+
/* Clear all remaining event bits in Slot Status register */
if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f))
goto abort_ctrl;
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 50884507b8b..2ea9cf1a8d0 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -43,7 +43,7 @@ static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot)
void dealloc_slot_struct(struct slot *slot)
{
kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot->name);
+ kfree(slot->name);
kfree(slot->hotplug_slot);
kfree(slot);
}
@@ -63,11 +63,9 @@ struct slot *alloc_slot_struct(struct device_node *dn,
GFP_KERNEL);
if (!slot->hotplug_slot->info)
goto error_hpslot;
- slot->hotplug_slot->name = kmalloc(strlen(drc_name) + 1, GFP_KERNEL);
- if (!slot->hotplug_slot->name)
+ slot->name = kstrdup(drc_name, GFP_KERNEL);
+ if (!slot->name)
goto error_info;
- slot->name = slot->hotplug_slot->name;
- strcpy(slot->name, drc_name);
slot->dn = dn;
slot->index = drc_index;
slot->power_domain = power_domain;
@@ -137,7 +135,7 @@ int rpaphp_register_slot(struct slot *slot)
slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn);
else
slotno = -1;
- retval = pci_hp_register(php_slot, slot->bus, slotno);
+ retval = pci_hp_register(php_slot, slot->bus, slotno, slot->name);
if (retval) {
err("pci_hp_register failed with error %d\n", retval);
return retval;
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 410fe0394a8..3eee70928d4 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -161,7 +161,8 @@ static int sn_pci_bus_valid(struct pci_bus *pci_bus)
}
static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
- struct pci_bus *pci_bus, int device)
+ struct pci_bus *pci_bus, int device,
+ char *name)
{
struct pcibus_info *pcibus_info;
struct slot *slot;
@@ -173,15 +174,9 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
return -ENOMEM;
bss_hotplug_slot->private = slot;
- bss_hotplug_slot->name = kmalloc(SN_SLOT_NAME_SIZE, GFP_KERNEL);
- if (!bss_hotplug_slot->name) {
- kfree(bss_hotplug_slot->private);
- return -ENOMEM;
- }
-
slot->device_num = device;
slot->pci_bus = pci_bus;
- sprintf(bss_hotplug_slot->name, "%04x:%02x:%02x",
+ sprintf(name, "%04x:%02x:%02x",
pci_domain_nr(pci_bus),
((u16)pcibus_info->pbi_buscommon.bs_persist_busnum),
device + 1);
@@ -418,7 +413,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
/*
* Add the slot's devices to the ACPI infrastructure */
if (SN_ACPI_BASE_SUPPORT() && ssdt) {
- unsigned long adr;
+ unsigned long long adr;
struct acpi_device *pdevice;
struct acpi_device *device;
acpi_handle phandle;
@@ -510,7 +505,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
/* free the ACPI resources for the slot */
if (SN_ACPI_BASE_SUPPORT() &&
PCI_CONTROLLER(slot->pci_bus)->acpi_handle) {
- unsigned long adr;
+ unsigned long long adr;
struct acpi_device *device;
acpi_handle phandle;
acpi_handle chandle = NULL;
@@ -608,7 +603,6 @@ static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot)
{
kfree(bss_hotplug_slot->info);
- kfree(bss_hotplug_slot->name);
kfree(bss_hotplug_slot->private);
kfree(bss_hotplug_slot);
}
@@ -618,6 +612,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
int device;
struct pci_slot *pci_slot;
struct hotplug_slot *bss_hotplug_slot;
+ char name[SN_SLOT_NAME_SIZE];
int rc = 0;
/*
@@ -645,15 +640,14 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
}
if (sn_hp_slot_private_alloc(bss_hotplug_slot,
- pci_bus, device)) {
+ pci_bus, device, name)) {
rc = -ENOMEM;
goto alloc_err;
}
-
bss_hotplug_slot->ops = &sn_hotplug_slot_ops;
bss_hotplug_slot->release = &sn_release_slot;
- rc = pci_hp_register(bss_hotplug_slot, pci_bus, device);
+ rc = pci_hp_register(bss_hotplug_slot, pci_bus, device, name);
if (rc)
goto register_err;
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 8a026f750de..4d9fed00e1d 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -69,15 +69,13 @@ struct slot {
u8 state;
u8 presence_save;
u8 pwr_save;
- struct timer_list task_event;
- u8 hp_slot;
struct controller *ctrl;
struct hpc_ops *hpc_ops;
struct hotplug_slot *hotplug_slot;
struct list_head slot_list;
- char name[SLOT_NAME_SIZE];
struct delayed_work work; /* work for button event */
struct mutex lock;
+ u8 hp_slot;
};
struct event_info {
@@ -169,6 +167,11 @@ extern void cleanup_slots(struct controller *ctrl);
extern void shpchp_queue_pushbutton_work(struct work_struct *work);
extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev);
+static inline const char *slot_name(struct slot *slot)
+{
+ return hotplug_slot_name(slot->hotplug_slot);
+}
+
#ifdef CONFIG_ACPI
#include <linux/pci-acpi.h>
static inline int get_hp_params_from_firmware(struct pci_dev *dev,
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index cc38615395f..7af9191df4d 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -89,7 +89,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
@@ -101,8 +101,9 @@ static int init_slots(struct controller *ctrl)
struct slot *slot;
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *info;
+ char name[SLOT_NAME_SIZE];
int retval = -ENOMEM;
- int i, len, dup = 1;
+ int i;
for (i = 0; i < ctrl->num_slots; i++) {
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
@@ -119,8 +120,6 @@ static int init_slots(struct controller *ctrl)
goto error_hpslot;
hotplug_slot->info = info;
- hotplug_slot->name = slot->name;
-
slot->hp_slot = i;
slot->ctrl = ctrl;
slot->bus = ctrl->pci_dev->subordinate->number;
@@ -133,37 +132,24 @@ static int init_slots(struct controller *ctrl)
/* register this slot with the hotplug pci core */
hotplug_slot->private = slot;
hotplug_slot->release = &release_slot;
- snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number);
+ snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
hotplug_slot->ops = &shpchp_hotplug_slot_ops;
- get_power_status(hotplug_slot, &info->power_status);
- get_attention_status(hotplug_slot, &info->attention_status);
- get_latch_status(hotplug_slot, &info->latch_status);
- get_adapter_status(hotplug_slot, &info->adapter_status);
-
dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
"slot_device_offset=%x\n", slot->bus, slot->device,
slot->hp_slot, slot->number, ctrl->slot_device_offset);
-duplicate_name:
retval = pci_hp_register(slot->hotplug_slot,
- ctrl->pci_dev->subordinate, slot->device);
+ ctrl->pci_dev->subordinate, slot->device, name);
if (retval) {
- /*
- * If slot N already exists, we'll try to create
- * slot N-1, N-2 ... N-M, until we overflow.
- */
- if (retval == -EEXIST) {
- len = snprintf(slot->name, SLOT_NAME_SIZE,
- "%d-%d", slot->number, dup++);
- if (len < SLOT_NAME_SIZE)
- goto duplicate_name;
- else
- err("duplicate slot name overflow\n");
- }
err("pci_hp_register failed with error %d\n", retval);
goto error_info;
}
+ get_power_status(hotplug_slot, &info->power_status);
+ get_attention_status(hotplug_slot, &info->attention_status);
+ get_latch_status(hotplug_slot, &info->latch_status);
+ get_adapter_status(hotplug_slot, &info->adapter_status);
+
list_add(&slot->slot_list, &ctrl->slot_list);
}
@@ -201,7 +187,7 @@ static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status)
{
struct slot *slot = get_slot(hotplug_slot);
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
hotplug_slot->info->attention_status = status;
slot->hpc_ops->set_attention_status(slot, status);
@@ -213,7 +199,7 @@ static int enable_slot (struct hotplug_slot *hotplug_slot)
{
struct slot *slot = get_slot(hotplug_slot);
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
return shpchp_sysfs_enable_slot(slot);
}
@@ -222,7 +208,7 @@ static int disable_slot (struct hotplug_slot *hotplug_slot)
{
struct slot *slot = get_slot(hotplug_slot);
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
return shpchp_sysfs_disable_slot(slot);
}
@@ -232,7 +218,7 @@ static int get_power_status (struct hotplug_slot *hotplug_slot, u8 *value)
struct slot *slot = get_slot(hotplug_slot);
int retval;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
retval = slot->hpc_ops->get_power_status(slot, value);
if (retval < 0)
@@ -246,7 +232,7 @@ static int get_attention_status (struct hotplug_slot *hotplug_slot, u8 *value)
struct slot *slot = get_slot(hotplug_slot);
int retval;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
retval = slot->hpc_ops->get_attention_status(slot, value);
if (retval < 0)
@@ -260,7 +246,7 @@ static int get_latch_status (struct hotplug_slot *hotplug_slot, u8 *value)
struct slot *slot = get_slot(hotplug_slot);
int retval;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
retval = slot->hpc_ops->get_latch_status(slot, value);
if (retval < 0)
@@ -274,7 +260,7 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
struct slot *slot = get_slot(hotplug_slot);
int retval;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
retval = slot->hpc_ops->get_adapter_status(slot, value);
if (retval < 0)
@@ -289,7 +275,7 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
struct slot *slot = get_slot(hotplug_slot);
int retval;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
retval = slot->hpc_ops->get_max_bus_speed(slot, value);
if (retval < 0)
@@ -303,7 +289,7 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
struct slot *slot = get_slot(hotplug_slot);
int retval;
- dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
if (retval < 0)
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index dfb53932dfb..919b1ee4431 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -70,7 +70,7 @@ u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl)
/*
* Button pressed - See if need to TAKE ACTION!!!
*/
- info("Button pressed on Slot(%s)\n", p_slot->name);
+ info("Button pressed on Slot(%s)\n", slot_name(p_slot));
event_type = INT_BUTTON_PRESS;
queue_interrupt_event(p_slot, event_type);
@@ -98,7 +98,7 @@ u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl)
/*
* Switch opened
*/
- info("Latch open on Slot(%s)\n", p_slot->name);
+ info("Latch open on Slot(%s)\n", slot_name(p_slot));
event_type = INT_SWITCH_OPEN;
if (p_slot->pwr_save && p_slot->presence_save) {
event_type = INT_POWER_FAULT;
@@ -108,7 +108,7 @@ u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl)
/*
* Switch closed
*/
- info("Latch close on Slot(%s)\n", p_slot->name);
+ info("Latch close on Slot(%s)\n", slot_name(p_slot));
event_type = INT_SWITCH_CLOSE;
}
@@ -135,13 +135,13 @@ u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl)
/*
* Card Present
*/
- info("Card present on Slot(%s)\n", p_slot->name);
+ info("Card present on Slot(%s)\n", slot_name(p_slot));
event_type = INT_PRESENCE_ON;
} else {
/*
* Not Present
*/
- info("Card not present on Slot(%s)\n", p_slot->name);
+ info("Card not present on Slot(%s)\n", slot_name(p_slot));
event_type = INT_PRESENCE_OFF;
}
@@ -164,14 +164,14 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
/*
* Power fault Cleared
*/
- info("Power fault cleared on Slot(%s)\n", p_slot->name);
+ info("Power fault cleared on Slot(%s)\n", slot_name(p_slot));
p_slot->status = 0x00;
event_type = INT_POWER_FAULT_CLEAR;
} else {
/*
* Power fault
*/
- info("Power fault on Slot(%s)\n", p_slot->name);
+ info("Power fault on Slot(%s)\n", slot_name(p_slot));
event_type = INT_POWER_FAULT;
/* set power fault status for this board */
p_slot->status = 0xFF;
@@ -493,11 +493,11 @@ static void handle_button_press_event(struct slot *p_slot)
if (getstatus) {
p_slot->state = BLINKINGOFF_STATE;
info("PCI slot #%s - powering off due to button "
- "press.\n", p_slot->name);
+ "press.\n", slot_name(p_slot));
} else {
p_slot->state = BLINKINGON_STATE;
info("PCI slot #%s - powering on due to button "
- "press.\n", p_slot->name);
+ "press.\n", slot_name(p_slot));
}
/* blink green LED and turn off amber */
p_slot->hpc_ops->green_led_blink(p_slot);
@@ -512,7 +512,7 @@ static void handle_button_press_event(struct slot *p_slot)
* press the attention again before the 5 sec. limit
* expires to cancel hot-add or hot-remove
*/
- info("Button cancel on Slot(%s)\n", p_slot->name);
+ info("Button cancel on Slot(%s)\n", slot_name(p_slot));
dbg("%s: button cancel\n", __func__);
cancel_delayed_work(&p_slot->work);
if (p_slot->state == BLINKINGOFF_STATE)
@@ -521,7 +521,7 @@ static void handle_button_press_event(struct slot *p_slot)
p_slot->hpc_ops->green_led_off(p_slot);
p_slot->hpc_ops->set_attention_status(p_slot, 0);
info("PCI slot #%s - action canceled due to button press\n",
- p_slot->name);
+ slot_name(p_slot));
p_slot->state = STATIC_STATE;
break;
case POWEROFF_STATE:
@@ -531,7 +531,7 @@ static void handle_button_press_event(struct slot *p_slot)
* this means that the previous attention button action
* to hot-add or hot-remove is undergoing
*/
- info("Button ignore on Slot(%s)\n", p_slot->name);
+ info("Button ignore on Slot(%s)\n", slot_name(p_slot));
update_slot_info(p_slot);
break;
default:
@@ -574,17 +574,17 @@ static int shpchp_enable_slot (struct slot *p_slot)
mutex_lock(&p_slot->ctrl->crit_sect);
rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
if (rc || !getstatus) {
- info("No adapter on slot(%s)\n", p_slot->name);
+ info("No adapter on slot(%s)\n", slot_name(p_slot));
goto out;
}
rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
if (rc || getstatus) {
- info("Latch open on slot(%s)\n", p_slot->name);
+ info("Latch open on slot(%s)\n", slot_name(p_slot));
goto out;
}
rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
if (rc || getstatus) {
- info("Already enabled on slot(%s)\n", p_slot->name);
+ info("Already enabled on slot(%s)\n", slot_name(p_slot));
goto out;
}
@@ -633,17 +633,17 @@ static int shpchp_disable_slot (struct slot *p_slot)
rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
if (rc || !getstatus) {
- info("No adapter on slot(%s)\n", p_slot->name);
+ info("No adapter on slot(%s)\n", slot_name(p_slot));
goto out;
}
rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
if (rc || getstatus) {
- info("Latch open on slot(%s)\n", p_slot->name);
+ info("Latch open on slot(%s)\n", slot_name(p_slot));
goto out;
}
rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
if (rc || !getstatus) {
- info("Already disabled slot(%s)\n", p_slot->name);
+ info("Already disabled slot(%s)\n", slot_name(p_slot));
goto out;
}
@@ -671,14 +671,14 @@ int shpchp_sysfs_enable_slot(struct slot *p_slot)
break;
case POWERON_STATE:
info("Slot %s is already in powering on state\n",
- p_slot->name);
+ slot_name(p_slot));
break;
case BLINKINGOFF_STATE:
case POWEROFF_STATE:
- info("Already enabled on slot %s\n", p_slot->name);
+ info("Already enabled on slot %s\n", slot_name(p_slot));
break;
default:
- err("Not a valid state on slot %s\n", p_slot->name);
+ err("Not a valid state on slot %s\n", slot_name(p_slot));
break;
}
mutex_unlock(&p_slot->lock);
@@ -703,14 +703,14 @@ int shpchp_sysfs_disable_slot(struct slot *p_slot)
break;
case POWEROFF_STATE:
info("Slot %s is already in powering off state\n",
- p_slot->name);
+ slot_name(p_slot));
break;
case BLINKINGON_STATE:
case POWERON_STATE:
- info("Already disabled on slot %s\n", p_slot->name);
+ info("Already disabled on slot %s\n", slot_name(p_slot));
break;
default:
- err("Not a valid state on slot %s\n", p_slot->name);
+ err("Not a valid state on slot %s\n", slot_name(p_slot));
break;
}
mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 8b51e10b778..a2692724b68 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -18,6 +18,7 @@
* Author: Ashok Raj <ashok.raj@intel.com>
* Author: Shaohua Li <shaohua.li@intel.com>
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * Author: Fenghua Yu <fenghua.yu@intel.com>
*/
#include <linux/init.h>
@@ -35,11 +36,13 @@
#include <linux/timer.h>
#include <linux/iova.h>
#include <linux/intel-iommu.h>
-#include <asm/proto.h> /* force_iommu in this header in x86-64*/
#include <asm/cacheflush.h>
#include <asm/iommu.h>
#include "pci.h"
+#define ROOT_SIZE VTD_PAGE_SIZE
+#define CONTEXT_SIZE VTD_PAGE_SIZE
+
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
@@ -199,7 +202,7 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
spin_unlock_irqrestore(&iommu->lock, flags);
return NULL;
}
- __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K);
+ __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
phy_addr = virt_to_phys((void *)context);
set_root_value(root, phy_addr);
set_root_present(root);
@@ -345,7 +348,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
return NULL;
}
__iommu_flush_cache(domain->iommu, tmp_page,
- PAGE_SIZE_4K);
+ PAGE_SIZE);
dma_set_pte_addr(*pte, virt_to_phys(tmp_page));
/*
* high level table always sets r/w, last level page
@@ -408,13 +411,13 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
start &= (((u64)1) << addr_width) - 1;
end &= (((u64)1) << addr_width) - 1;
/* in case it's partial page */
- start = PAGE_ALIGN_4K(start);
- end &= PAGE_MASK_4K;
+ start = PAGE_ALIGN(start);
+ end &= PAGE_MASK;
/* we don't need lock here, nobody else touches the iova range */
while (start < end) {
dma_pte_clear_one(domain, start);
- start += PAGE_SIZE_4K;
+ start += VTD_PAGE_SIZE;
}
}
@@ -468,7 +471,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
if (!root)
return -ENOMEM;
- __iommu_flush_cache(iommu, root, PAGE_SIZE_4K);
+ __iommu_flush_cache(iommu, root, ROOT_SIZE);
spin_lock_irqsave(&iommu->lock, flags);
iommu->root_entry = root;
@@ -567,27 +570,6 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
return 0;
}
-static int inline iommu_flush_context_global(struct intel_iommu *iommu,
- int non_present_entry_flush)
-{
- return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
- non_present_entry_flush);
-}
-
-static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did,
- int non_present_entry_flush)
-{
- return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
- non_present_entry_flush);
-}
-
-static int inline iommu_flush_context_device(struct intel_iommu *iommu,
- u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush)
-{
- return __iommu_flush_context(iommu, did, source_id, function_mask,
- DMA_CCMD_DEVICE_INVL, non_present_entry_flush);
-}
-
/* return value determine if we need a write buffer flush */
static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
u64 addr, unsigned int size_order, u64 type,
@@ -655,37 +637,25 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
- DMA_TLB_IIRG(type), DMA_TLB_IAIG(val));
+ (unsigned long long)DMA_TLB_IIRG(type),
+ (unsigned long long)DMA_TLB_IAIG(val));
/* flush iotlb entry will implicitly flush write buffer */
return 0;
}
-static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu,
- int non_present_entry_flush)
-{
- return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
- non_present_entry_flush);
-}
-
-static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did,
- int non_present_entry_flush)
-{
- return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
- non_present_entry_flush);
-}
-
static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
u64 addr, unsigned int pages, int non_present_entry_flush)
{
unsigned int mask;
- BUG_ON(addr & (~PAGE_MASK_4K));
+ BUG_ON(addr & (~VTD_PAGE_MASK));
BUG_ON(pages == 0);
/* Fallback to domain selective flush if no PSI support */
if (!cap_pgsel_inv(iommu->cap))
- return iommu_flush_iotlb_dsi(iommu, did,
- non_present_entry_flush);
+ return iommu->flush.flush_iotlb(iommu, did, 0, 0,
+ DMA_TLB_DSI_FLUSH,
+ non_present_entry_flush);
/*
* PSI requires page size to be 2 ^ x, and the base address is naturally
@@ -694,11 +664,12 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
mask = ilog2(__roundup_pow_of_two(pages));
/* Fallback to domain selective flush if size is too big */
if (mask > cap_max_amask_val(iommu->cap))
- return iommu_flush_iotlb_dsi(iommu, did,
- non_present_entry_flush);
+ return iommu->flush.flush_iotlb(iommu, did, 0, 0,
+ DMA_TLB_DSI_FLUSH, non_present_entry_flush);
- return __iommu_flush_iotlb(iommu, did, addr, mask,
- DMA_TLB_PSI_FLUSH, non_present_entry_flush);
+ return iommu->flush.flush_iotlb(iommu, did, addr, mask,
+ DMA_TLB_PSI_FLUSH,
+ non_present_entry_flush);
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -831,7 +802,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
}
static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
- u8 fault_reason, u16 source_id, u64 addr)
+ u8 fault_reason, u16 source_id, unsigned long long addr)
{
const char *reason;
@@ -1084,9 +1055,9 @@ static void dmar_init_reserved_ranges(void)
if (!r->flags || !(r->flags & IORESOURCE_MEM))
continue;
addr = r->start;
- addr &= PAGE_MASK_4K;
+ addr &= PAGE_MASK;
size = r->end - addr;
- size = PAGE_ALIGN_4K(size);
+ size = PAGE_ALIGN(size);
iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
IOVA_PFN(size + addr) - 1);
if (!iova)
@@ -1148,7 +1119,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
domain->pgd = (struct dma_pte *)alloc_pgtable_page();
if (!domain->pgd)
return -ENOMEM;
- __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K);
+ __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
return 0;
}
@@ -1164,7 +1135,7 @@ static void domain_exit(struct dmar_domain *domain)
/* destroy iovas */
put_iova_domain(&domain->iovad);
end = DOMAIN_MAX_ADDR(domain->gaw);
- end = end & (~PAGE_MASK_4K);
+ end = end & (~PAGE_MASK);
/* clear ptes */
dma_pte_clear_range(domain, 0, end);
@@ -1204,11 +1175,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
__iommu_flush_cache(iommu, context, sizeof(*context));
/* it's a non-present to present mapping */
- if (iommu_flush_context_device(iommu, domain->id,
- (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1))
+ if (iommu->flush.flush_context(iommu, domain->id,
+ (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL, 1))
iommu_flush_write_buffer(iommu);
else
- iommu_flush_iotlb_dsi(iommu, 0, 0);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
+
spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
}
@@ -1283,22 +1256,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
u64 start_pfn, end_pfn;
struct dma_pte *pte;
int index;
+ int addr_width = agaw_to_width(domain->agaw);
+
+ hpa &= (((u64)1) << addr_width) - 1;
if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
return -EINVAL;
- iova &= PAGE_MASK_4K;
- start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K;
- end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K;
+ iova &= PAGE_MASK;
+ start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
+ end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
index = 0;
while (start_pfn < end_pfn) {
- pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index);
+ pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
if (!pte)
return -ENOMEM;
/* We don't need lock here, nobody else
* touches the iova range
*/
BUG_ON(dma_pte_addr(*pte));
- dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K);
+ dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT);
dma_set_pte_prot(*pte, prot);
__iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
start_pfn++;
@@ -1310,8 +1286,10 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
{
clear_context_table(domain->iommu, bus, devfn);
- iommu_flush_context_global(domain->iommu, 0);
- iommu_flush_iotlb_global(domain->iommu, 0);
+ domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0,
+ DMA_CCMD_GLOBAL_INVL, 0);
+ domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0,
+ DMA_TLB_GLOBAL_FLUSH, 0);
}
static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -1474,11 +1452,13 @@ error:
return find_domain(pdev);
}
-static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end)
+static int iommu_prepare_identity_map(struct pci_dev *pdev,
+ unsigned long long start,
+ unsigned long long end)
{
struct dmar_domain *domain;
unsigned long size;
- u64 base;
+ unsigned long long base;
int ret;
printk(KERN_INFO
@@ -1490,9 +1470,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end)
return -ENOMEM;
/* The address might not be aligned */
- base = start & PAGE_MASK_4K;
+ base = start & PAGE_MASK;
size = end - base;
- size = PAGE_ALIGN_4K(size);
+ size = PAGE_ALIGN(size);
if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
IOVA_PFN(base + size) - 1)) {
printk(KERN_ERR "IOMMU: reserve iova failed\n");
@@ -1662,6 +1642,28 @@ int __init init_dmars(void)
}
}
+ for_each_drhd_unit(drhd) {
+ if (drhd->ignored)
+ continue;
+
+ iommu = drhd->iommu;
+ if (dmar_enable_qi(iommu)) {
+ /*
+ * Queued Invalidate not enabled, use Register Based
+ * Invalidate
+ */
+ iommu->flush.flush_context = __iommu_flush_context;
+ iommu->flush.flush_iotlb = __iommu_flush_iotlb;
+ printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
+ "invalidation\n", drhd->reg_base_addr);
+ } else {
+ iommu->flush.flush_context = qi_flush_context;
+ iommu->flush.flush_iotlb = qi_flush_iotlb;
+ printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
+ "invalidation\n", drhd->reg_base_addr);
+ }
+ }
+
/*
* For each rmrr
* for each dev attached to rmrr
@@ -1714,9 +1716,10 @@ int __init init_dmars(void)
iommu_set_root_entry(iommu);
- iommu_flush_context_global(iommu, 0);
- iommu_flush_iotlb_global(iommu, 0);
-
+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
+ 0);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
+ 0);
iommu_disable_protect_mem_regions(iommu);
ret = iommu_enable_translation(iommu);
@@ -1738,8 +1741,8 @@ error:
static inline u64 aligned_size(u64 host_addr, size_t size)
{
u64 addr;
- addr = (host_addr & (~PAGE_MASK_4K)) + size;
- return PAGE_ALIGN_4K(addr);
+ addr = (host_addr & (~PAGE_MASK)) + size;
+ return PAGE_ALIGN(addr);
}
struct iova *
@@ -1753,20 +1756,20 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
return NULL;
piova = alloc_iova(&domain->iovad,
- size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1);
+ size >> PAGE_SHIFT, IOVA_PFN(end), 1);
return piova;
}
static struct iova *
__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
- size_t size)
+ size_t size, u64 dma_mask)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct iova *iova = NULL;
- if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) {
- iova = iommu_alloc_iova(domain, size, pdev->dma_mask);
- } else {
+ if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac)
+ iova = iommu_alloc_iova(domain, size, dma_mask);
+ else {
/*
* First try to allocate an io virtual address in
* DMA_32BIT_MASK and if that fails then try allocating
@@ -1774,7 +1777,7 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
*/
iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
if (!iova)
- iova = iommu_alloc_iova(domain, size, pdev->dma_mask);
+ iova = iommu_alloc_iova(domain, size, dma_mask);
}
if (!iova) {
@@ -1813,12 +1816,12 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
return domain;
}
-static dma_addr_t
-intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
+static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
+ size_t size, int dir, u64 dma_mask)
{
struct pci_dev *pdev = to_pci_dev(hwdev);
struct dmar_domain *domain;
- unsigned long start_paddr;
+ phys_addr_t start_paddr;
struct iova *iova;
int prot = 0;
int ret;
@@ -1833,11 +1836,11 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
size = aligned_size((u64)paddr, size);
- iova = __intel_alloc_iova(hwdev, domain, size);
+ iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
if (!iova)
goto error;
- start_paddr = iova->pfn_lo << PAGE_SHIFT_4K;
+ start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
/*
* Check if DMAR supports zero-length reads on write only
@@ -1855,30 +1858,33 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
* is not a big problem
*/
ret = domain_page_mapping(domain, start_paddr,
- ((u64)paddr) & PAGE_MASK_4K, size, prot);
+ ((u64)paddr) & PAGE_MASK, size, prot);
if (ret)
goto error;
- pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n",
- pci_name(pdev), size, (u64)paddr,
- size, (u64)start_paddr, dir);
-
/* it's a non-present to present mapping */
ret = iommu_flush_iotlb_psi(domain->iommu, domain->id,
- start_paddr, size >> PAGE_SHIFT_4K, 1);
+ start_paddr, size >> VTD_PAGE_SHIFT, 1);
if (ret)
iommu_flush_write_buffer(domain->iommu);
- return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K)));
+ return start_paddr + ((u64)paddr & (~PAGE_MASK));
error:
if (iova)
__free_iova(&domain->iovad, iova);
printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
- pci_name(pdev), size, (u64)paddr, dir);
+ pci_name(pdev), size, (unsigned long long)paddr, dir);
return 0;
}
+dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr,
+ size_t size, int dir)
+{
+ return __intel_map_single(hwdev, paddr, size, dir,
+ to_pci_dev(hwdev)->dma_mask);
+}
+
static void flush_unmaps(void)
{
int i, j;
@@ -1891,7 +1897,8 @@ static void flush_unmaps(void)
struct intel_iommu *iommu =
deferred_flush[i].domain[0]->iommu;
- iommu_flush_iotlb_global(iommu, 0);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+ DMA_TLB_GLOBAL_FLUSH, 0);
for (j = 0; j < deferred_flush[i].next; j++) {
__free_iova(&deferred_flush[i].domain[j]->iovad,
deferred_flush[i].iova[j]);
@@ -1936,8 +1943,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
spin_unlock_irqrestore(&async_umap_flush_lock, flags);
}
-static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
- size_t size, int dir)
+void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
+ int dir)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct dmar_domain *domain;
@@ -1953,11 +1960,11 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
if (!iova)
return;
- start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+ start_addr = iova->pfn_lo << PAGE_SHIFT;
size = aligned_size((u64)dev_addr, size);
pr_debug("Device %s unmapping: %lx@%llx\n",
- pci_name(pdev), size, (u64)start_addr);
+ pci_name(pdev), size, (unsigned long long)start_addr);
/* clear the whole page */
dma_pte_clear_range(domain, start_addr, start_addr + size);
@@ -1965,7 +1972,7 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
dma_pte_free_pagetable(domain, start_addr, start_addr + size);
if (intel_iommu_strict) {
if (iommu_flush_iotlb_psi(domain->iommu,
- domain->id, start_addr, size >> PAGE_SHIFT_4K, 0))
+ domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
iommu_flush_write_buffer(domain->iommu);
/* free iova */
__free_iova(&domain->iovad, iova);
@@ -1978,13 +1985,13 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
}
}
-static void * intel_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags)
+void *intel_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags)
{
void *vaddr;
int order;
- size = PAGE_ALIGN_4K(size);
+ size = PAGE_ALIGN(size);
order = get_order(size);
flags &= ~(GFP_DMA | GFP_DMA32);
@@ -1993,19 +2000,21 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size,
return NULL;
memset(vaddr, 0, size);
- *dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL);
+ *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
+ DMA_BIDIRECTIONAL,
+ hwdev->coherent_dma_mask);
if (*dma_handle)
return vaddr;
free_pages((unsigned long)vaddr, order);
return NULL;
}
-static void intel_free_coherent(struct device *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
{
int order;
- size = PAGE_ALIGN_4K(size);
+ size = PAGE_ALIGN(size);
order = get_order(size);
intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
@@ -2013,8 +2022,9 @@ static void intel_free_coherent(struct device *hwdev, size_t size,
}
#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
- int nelems, int dir)
+
+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
+ int nelems, int dir)
{
int i;
struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -2038,7 +2048,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
size += aligned_size((u64)addr, sg->length);
}
- start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+ start_addr = iova->pfn_lo << PAGE_SHIFT;
/* clear the whole page */
dma_pte_clear_range(domain, start_addr, start_addr + size);
@@ -2046,7 +2056,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
dma_pte_free_pagetable(domain, start_addr, start_addr + size);
if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
- size >> PAGE_SHIFT_4K, 0))
+ size >> VTD_PAGE_SHIFT, 0))
iommu_flush_write_buffer(domain->iommu);
/* free iova */
@@ -2067,8 +2077,8 @@ static int intel_nontranslate_map_sg(struct device *hddev,
return nelems;
}
-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
- int nelems, int dir)
+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
+ int dir)
{
void *addr;
int i;
@@ -2096,7 +2106,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
size += aligned_size((u64)addr, sg->length);
}
- iova = __intel_alloc_iova(hwdev, domain, size);
+ iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
if (!iova) {
sglist->dma_length = 0;
return 0;
@@ -2112,14 +2122,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
prot |= DMA_PTE_WRITE;
- start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+ start_addr = iova->pfn_lo << PAGE_SHIFT;
offset = 0;
for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
addr = (void *)virt_to_phys(addr);
size = aligned_size((u64)addr, sg->length);
ret = domain_page_mapping(domain, start_addr + offset,
- ((u64)addr) & PAGE_MASK_4K,
+ ((u64)addr) & PAGE_MASK,
size, prot);
if (ret) {
/* clear the page */
@@ -2133,14 +2143,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
return 0;
}
sg->dma_address = start_addr + offset +
- ((u64)addr & (~PAGE_MASK_4K));
+ ((u64)addr & (~PAGE_MASK));
sg->dma_length = sg->length;
offset += size;
}
/* it's a non-present to present mapping */
if (iommu_flush_iotlb_psi(domain->iommu, domain->id,
- start_addr, offset >> PAGE_SHIFT_4K, 1))
+ start_addr, offset >> VTD_PAGE_SHIFT, 1))
iommu_flush_write_buffer(domain->iommu);
return nelems;
}
@@ -2180,7 +2190,6 @@ static inline int iommu_devinfo_cache_init(void)
sizeof(struct device_domain_info),
0,
SLAB_HWCACHE_ALIGN,
-
NULL);
if (!iommu_devinfo_cache) {
printk(KERN_ERR "Couldn't create devinfo cache\n");
@@ -2198,7 +2207,6 @@ static inline int iommu_iova_cache_init(void)
sizeof(struct iova),
0,
SLAB_HWCACHE_ALIGN,
-
NULL);
if (!iommu_iova_cache) {
printk(KERN_ERR "Couldn't create iova cache\n");
@@ -2327,7 +2335,7 @@ void intel_iommu_domain_exit(struct dmar_domain *domain)
return;
end = DOMAIN_MAX_ADDR(domain->gaw);
- end = end & (~PAGE_MASK_4K);
+ end = end & (~VTD_PAGE_MASK);
/* clear ptes */
dma_pte_clear_range(domain, 0, end);
@@ -2423,6 +2431,6 @@ u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
if (pte)
pfn = dma_pte_addr(*pte);
- return pfn >> PAGE_SHIFT_4K;
+ return pfn >> VTD_PAGE_SHIFT;
}
EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d2812013fd2..74801f7df9c 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -759,3 +759,24 @@ void pci_msi_init_pci_dev(struct pci_dev *dev)
{
INIT_LIST_HEAD(&dev->msi_list);
}
+
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#include <linux/pci-acpi.h>
+static void __devinit msi_acpi_init(void)
+{
+ if (acpi_pci_disabled)
+ return;
+ pci_osc_support_set(OSC_MSI_SUPPORT);
+ pcie_osc_support_set(OSC_MSI_SUPPORT);
+}
+#else
+static inline void msi_acpi_init(void) { }
+#endif /* CONFIG_ACPI */
+
+void __devinit msi_init(void)
+{
+ if (!pci_msi_enable)
+ return;
+ msi_acpi_init();
+}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 89a2f0fa10f..dfe7c8e1b18 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -24,17 +24,17 @@ struct acpi_osc_data {
acpi_handle handle;
u32 support_set;
u32 control_set;
- int is_queried;
- u32 query_result;
struct list_head sibiling;
};
static LIST_HEAD(acpi_osc_data_list);
struct acpi_osc_args {
u32 capbuf[3];
- u32 query_result;
+ u32 ctrl_result;
};
+static DEFINE_MUTEX(pci_acpi_lock);
+
static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
{
struct acpi_osc_data *data;
@@ -108,9 +108,8 @@ static acpi_status acpi_run_osc(acpi_handle handle,
goto out_kfree;
}
out_success:
- if (flags & OSC_QUERY_ENABLE)
- osc_args->query_result =
- *((u32 *)(out_obj->buffer.pointer + 8));
+ osc_args->ctrl_result =
+ *((u32 *)(out_obj->buffer.pointer + 8));
status = AE_OK;
out_kfree:
@@ -118,41 +117,53 @@ out_kfree:
return status;
}
-static acpi_status acpi_query_osc(acpi_handle handle,
- u32 level, void *context, void **retval)
+static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data,
+ u32 *result)
{
acpi_status status;
- struct acpi_osc_data *osc_data;
- u32 flags = (unsigned long)context, support_set;
- acpi_handle tmp;
+ u32 support_set;
struct acpi_osc_args osc_args;
- status = acpi_get_handle(handle, "_OSC", &tmp);
- if (ACPI_FAILURE(status))
- return status;
-
- osc_data = acpi_get_osc_data(handle);
- if (!osc_data) {
- printk(KERN_ERR "acpi osc data array is full\n");
- return AE_ERROR;
- }
-
/* do _OSC query for all possible controls */
support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS);
osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set;
osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
- status = acpi_run_osc(handle, &osc_args);
+ status = acpi_run_osc(osc_data->handle, &osc_args);
if (ACPI_SUCCESS(status)) {
osc_data->support_set = support_set;
- osc_data->query_result = osc_args.query_result;
- osc_data->is_queried = 1;
+ *result = osc_args.ctrl_result;
}
return status;
}
+static acpi_status acpi_query_osc(acpi_handle handle,
+ u32 level, void *context, void **retval)
+{
+ acpi_status status;
+ struct acpi_osc_data *osc_data;
+ u32 flags = (unsigned long)context, dummy;
+ acpi_handle tmp;
+
+ status = acpi_get_handle(handle, "_OSC", &tmp);
+ if (ACPI_FAILURE(status))
+ return AE_OK;
+
+ mutex_lock(&pci_acpi_lock);
+ osc_data = acpi_get_osc_data(handle);
+ if (!osc_data) {
+ printk(KERN_ERR "acpi osc data array is full\n");
+ goto out;
+ }
+
+ __acpi_query_osc(flags, osc_data, &dummy);
+out:
+ mutex_unlock(&pci_acpi_lock);
+ return AE_OK;
+}
+
/**
* __pci_osc_support_set - register OS support to Firmware
* @flags: OS support bits
@@ -181,7 +192,7 @@ acpi_status __pci_osc_support_set(u32 flags, const char *hid)
acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
{
acpi_status status;
- u32 ctrlset, control_set;
+ u32 ctrlset, control_set, result;
acpi_handle tmp;
struct acpi_osc_data *osc_data;
struct acpi_osc_args osc_args;
@@ -190,19 +201,28 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
if (ACPI_FAILURE(status))
return status;
+ mutex_lock(&pci_acpi_lock);
osc_data = acpi_get_osc_data(handle);
if (!osc_data) {
printk(KERN_ERR "acpi osc data array is full\n");
- return AE_ERROR;
+ status = AE_ERROR;
+ goto out;
}
ctrlset = (flags & OSC_CONTROL_MASKS);
- if (!ctrlset)
- return AE_TYPE;
+ if (!ctrlset) {
+ status = AE_TYPE;
+ goto out;
+ }
- if (osc_data->is_queried &&
- ((osc_data->query_result & ctrlset) != ctrlset))
- return AE_SUPPORT;
+ status = __acpi_query_osc(osc_data->support_set, osc_data, &result);
+ if (ACPI_FAILURE(status))
+ goto out;
+
+ if ((result & ctrlset) != ctrlset) {
+ status = AE_SUPPORT;
+ goto out;
+ }
control_set = osc_data->control_set | ctrlset;
osc_args.capbuf[OSC_QUERY_TYPE] = 0;
@@ -211,7 +231,8 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
status = acpi_run_osc(handle, &osc_args);
if (ACPI_SUCCESS(status))
osc_data->control_set = control_set;
-
+out:
+ mutex_unlock(&pci_acpi_lock);
return status;
}
EXPORT_SYMBOL(pci_osc_control_set);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4db261e13e6..533aeb5fcbe 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -18,6 +18,7 @@
#include <linux/log2.h>
#include <linux/pci-aspm.h>
#include <linux/pm_wakeup.h>
+#include <linux/interrupt.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
@@ -1746,6 +1747,103 @@ EXPORT_SYMBOL(pci_set_dma_seg_boundary);
#endif
/**
+ * pci_execute_reset_function() - Reset a PCI device function
+ * @dev: Device function to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device. The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * The device function is presumed to be unused when this function is called.
+ * Resetting the device will make the contents of PCI configuration space
+ * random, so any caller of this must be prepared to reinitialise the
+ * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
+ * etc.
+ *
+ * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * device doesn't support resetting a single function.
+ */
+int pci_execute_reset_function(struct pci_dev *dev)
+{
+ u16 status;
+ u32 cap;
+ int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+
+ if (!exppos)
+ return -ENOTTY;
+ pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
+ if (!(cap & PCI_EXP_DEVCAP_FLR))
+ return -ENOTTY;
+
+ pci_block_user_cfg_access(dev);
+
+ /* Wait for Transaction Pending bit clean */
+ msleep(100);
+ pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
+ if (status & PCI_EXP_DEVSTA_TRPND) {
+ dev_info(&dev->dev, "Busy after 100ms while trying to reset; "
+ "sleeping for 1 second\n");
+ ssleep(1);
+ pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
+ if (status & PCI_EXP_DEVSTA_TRPND)
+ dev_info(&dev->dev, "Still busy after 1s; "
+ "proceeding with reset anyway\n");
+ }
+
+ pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_BCR_FLR);
+ mdelay(100);
+
+ pci_unblock_user_cfg_access(dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_execute_reset_function);
+
+/**
+ * pci_reset_function() - quiesce and reset a PCI device function
+ * @dev: Device function to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device. The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * This function does not just reset the PCI portion of a device, but
+ * clears all the state associated with the device. This function differs
+ * from pci_execute_reset_function in that it saves and restores device state
+ * over the reset.
+ *
+ * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * device doesn't support resetting a single function.
+ */
+int pci_reset_function(struct pci_dev *dev)
+{
+ u32 cap;
+ int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ int r;
+
+ if (!exppos)
+ return -ENOTTY;
+ pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
+ if (!(cap & PCI_EXP_DEVCAP_FLR))
+ return -ENOTTY;
+
+ if (!dev->msi_enabled && !dev->msix_enabled)
+ disable_irq(dev->irq);
+ pci_save_state(dev);
+
+ pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+
+ r = pci_execute_reset_function(dev);
+
+ pci_restore_state(dev);
+ if (!dev->msi_enabled && !dev->msix_enabled)
+ enable_irq(dev->irq);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(pci_reset_function);
+
+/**
* pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
* @dev: PCI device to query
*
@@ -1933,6 +2031,9 @@ static int __devinit pci_init(void)
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
pci_fixup_device(pci_fixup_final, dev);
}
+
+ msi_init();
+
return 0;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index b205ab866a1..9de87e9f98f 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -98,9 +98,11 @@ extern unsigned int pci_pm_d3_delay;
#ifdef CONFIG_PCI_MSI
void pci_no_msi(void);
extern void pci_msi_init_pci_dev(struct pci_dev *dev);
+extern void __devinit msi_init(void);
#else
static inline void pci_no_msi(void) { }
static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
+static inline void msi_init(void) { }
#endif
#ifdef CONFIG_PCIEAER
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index aaaf0a1fed2..6f1e51d77bc 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -480,19 +480,27 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
u32 buses, i, j = 0;
u16 bctl;
+ int broken = 0;
pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n",
buses & 0xffffff, pass);
+ /* Check if setup is sensible at all */
+ if (!pass &&
+ ((buses & 0xff) != bus->number || ((buses >> 8) & 0xff) <= bus->number)) {
+ dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
+ broken = 1;
+ }
+
/* Disable MasterAbortMode during probing to avoid reporting
of bus errors (in some architectures) */
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
- if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus) {
+ if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus && !broken) {
unsigned int cmax, busnr;
/*
* Bus already configured by firmware, process it in the first
@@ -530,7 +538,7 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
* do in the second pass.
*/
if (!pass) {
- if (pcibios_assign_all_busses())
+ if (pcibios_assign_all_busses() || broken)
/* Temporarily disable forwarding of the
configuration cycles on all bridges in
this bus segment to avoid possible
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bbf66ea8fd8..96cf8ecd04c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -43,6 +43,20 @@ static void __devinit quirk_mellanox_tavor(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
+/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
+int forbid_dac __read_mostly;
+EXPORT_SYMBOL(forbid_dac);
+
+static __devinit void via_no_dac(struct pci_dev *dev)
+{
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
+ dev_info(&dev->dev,
+ "VIA PCI bridge detected. Disabling DAC.\n");
+ forbid_dac = 1;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
+
/* Deal with broken BIOS'es that neglect to enable passive release,
which can cause problems in combination with the 82441FX/PPro MTRRs */
static void quirk_passive_release(struct pci_dev *dev)
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 4edfc4731bd..5af8bd53814 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -166,6 +166,7 @@ struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
{
struct pci_dev *pdev;
+ pci_dev_get(from);
pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
pci_dev_put(pdev);
return pdev;
@@ -270,12 +271,8 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
struct pci_dev *pdev = NULL;
WARN_ON(in_interrupt());
- if (from) {
- /* FIXME
- * take the cast off, when bus_find_device is made const.
- */
- dev_start = (struct device *)&from->dev;
- }
+ if (from)
+ dev_start = &from->dev;
dev = bus_find_device(&pci_bus_type, dev_start, (void *)id,
match_pci_dev_by_id);
if (dev)
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 0c6db03698e..4dd1c3e157a 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -78,18 +78,100 @@ static struct kobj_type pci_slot_ktype = {
.default_attrs = pci_slot_default_attrs,
};
+static char *make_slot_name(const char *name)
+{
+ char *new_name;
+ int len, max, dup;
+
+ new_name = kstrdup(name, GFP_KERNEL);
+ if (!new_name)
+ return NULL;
+
+ /*
+ * Make sure we hit the realloc case the first time through the
+ * loop. 'len' will be strlen(name) + 3 at that point which is
+ * enough space for "name-X" and the trailing NUL.
+ */
+ len = strlen(name) + 2;
+ max = 1;
+ dup = 1;
+
+ for (;;) {
+ struct kobject *dup_slot;
+ dup_slot = kset_find_obj(pci_slots_kset, new_name);
+ if (!dup_slot)
+ break;
+ kobject_put(dup_slot);
+ if (dup == max) {
+ len++;
+ max *= 10;
+ kfree(new_name);
+ new_name = kmalloc(len, GFP_KERNEL);
+ if (!new_name)
+ break;
+ }
+ sprintf(new_name, "%s-%d", name, dup++);
+ }
+
+ return new_name;
+}
+
+static int rename_slot(struct pci_slot *slot, const char *name)
+{
+ int result = 0;
+ char *slot_name;
+
+ if (strcmp(pci_slot_name(slot), name) == 0)
+ return result;
+
+ slot_name = make_slot_name(name);
+ if (!slot_name)
+ return -ENOMEM;
+
+ result = kobject_rename(&slot->kobj, slot_name);
+ kfree(slot_name);
+
+ return result;
+}
+
+static struct pci_slot *get_slot(struct pci_bus *parent, int slot_nr)
+{
+ struct pci_slot *slot;
+ /*
+ * We already hold pci_bus_sem so don't worry
+ */
+ list_for_each_entry(slot, &parent->slots, list)
+ if (slot->number == slot_nr) {
+ kobject_get(&slot->kobj);
+ return slot;
+ }
+
+ return NULL;
+}
+
/**
* pci_create_slot - create or increment refcount for physical PCI slot
* @parent: struct pci_bus of parent bridge
* @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder
* @name: user visible string presented in /sys/bus/pci/slots/<name>
+ * @hotplug: set if caller is hotplug driver, NULL otherwise
*
* PCI slots have first class attributes such as address, speed, width,
* and a &struct pci_slot is used to manage them. This interface will
* either return a new &struct pci_slot to the caller, or if the pci_slot
* already exists, its refcount will be incremented.
*
- * Slots are uniquely identified by a @pci_bus, @slot_nr, @name tuple.
+ * Slots are uniquely identified by a @pci_bus, @slot_nr tuple.
+ *
+ * There are known platforms with broken firmware that assign the same
+ * name to multiple slots. Workaround these broken platforms by renaming
+ * the slots on behalf of the caller. If firmware assigns name N to
+ * multiple slots:
+ *
+ * The first slot is assigned N
+ * The second slot is assigned N-1
+ * The third slot is assigned N-2
+ * etc.
*
* Placeholder slots:
* In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify
@@ -98,61 +180,67 @@ static struct kobj_type pci_slot_ktype = {
* the slot. In this scenario, the caller may pass -1 for @slot_nr.
*
* The following semantics are imposed when the caller passes @slot_nr ==
- * -1. First, the check for existing %struct pci_slot is skipped, as the
- * caller may know about several unpopulated slots on a given %struct
- * pci_bus, and each slot would have a @slot_nr of -1. Uniqueness for
- * these slots is then determined by the @name parameter. We expect
- * kobject_init_and_add() to warn us if the caller attempts to create
- * multiple slots with the same name. The other change in semantics is
+ * -1. First, we no longer check for an existing %struct pci_slot, as there
+ * may be many slots with @slot_nr of -1. The other change in semantics is
* user-visible, which is the 'address' parameter presented in sysfs will
* consist solely of a dddd:bb tuple, where dddd is the PCI domain of the
* %struct pci_bus and bb is the bus number. In other words, the devfn of
* the 'placeholder' slot will not be displayed.
*/
-
struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
- const char *name)
+ const char *name,
+ struct hotplug_slot *hotplug)
{
struct pci_dev *dev;
struct pci_slot *slot;
- int err;
+ int err = 0;
+ char *slot_name = NULL;
down_write(&pci_bus_sem);
if (slot_nr == -1)
goto placeholder;
- /* If we've already created this slot, bump refcount and return. */
- list_for_each_entry(slot, &parent->slots, list) {
- if (slot->number == slot_nr) {
- kobject_get(&slot->kobj);
- pr_debug("%s: inc refcount to %d on %04x:%02x:%02x\n",
- __func__,
- atomic_read(&slot->kobj.kref.refcount),
- pci_domain_nr(parent), parent->number,
- slot_nr);
- goto out;
+ /*
+ * Hotplug drivers are allowed to rename an existing slot,
+ * but only if not already claimed.
+ */
+ slot = get_slot(parent, slot_nr);
+ if (slot) {
+ if (hotplug) {
+ if ((err = slot->hotplug ? -EBUSY : 0)
+ || (err = rename_slot(slot, name))) {
+ kobject_put(&slot->kobj);
+ slot = NULL;
+ goto err;
+ }
}
+ goto out;
}
placeholder:
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot) {
- slot = ERR_PTR(-ENOMEM);
- goto out;
+ err = -ENOMEM;
+ goto err;
}
slot->bus = parent;
slot->number = slot_nr;
slot->kobj.kset = pci_slots_kset;
- err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
- "%s", name);
- if (err) {
- printk(KERN_ERR "Unable to register kobject %s\n", name);
+
+ slot_name = make_slot_name(name);
+ if (!slot_name) {
+ err = -ENOMEM;
goto err;
}
+ err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
+ "%s", slot_name);
+ if (err)
+ goto err;
+
INIT_LIST_HEAD(&slot->list);
list_add(&slot->list, &parent->slots);
@@ -164,10 +252,10 @@ placeholder:
pr_debug("%s: created pci_slot on %04x:%02x:%02x\n",
__func__, pci_domain_nr(parent), parent->number, slot_nr);
- out:
+out:
up_write(&pci_bus_sem);
return slot;
- err:
+err:
kfree(slot);
slot = ERR_PTR(err);
goto out;
@@ -175,7 +263,7 @@ placeholder:
EXPORT_SYMBOL_GPL(pci_create_slot);
/**
- * pci_update_slot_number - update %struct pci_slot -> number
+ * pci_renumber_slot - update %struct pci_slot -> number
* @slot - %struct pci_slot to update
* @slot_nr - new number for slot
*
@@ -183,27 +271,22 @@ EXPORT_SYMBOL_GPL(pci_create_slot);
* created a placeholder slot in pci_create_slot() by passing a -1 as
* slot_nr, to update their %struct pci_slot with the correct @slot_nr.
*/
-
-void pci_update_slot_number(struct pci_slot *slot, int slot_nr)
+void pci_renumber_slot(struct pci_slot *slot, int slot_nr)
{
- int name_count = 0;
struct pci_slot *tmp;
down_write(&pci_bus_sem);
list_for_each_entry(tmp, &slot->bus->slots, list) {
WARN_ON(tmp->number == slot_nr);
- if (!strcmp(kobject_name(&tmp->kobj), kobject_name(&slot->kobj)))
- name_count++;
+ goto out;
}
- if (name_count > 1)
- printk(KERN_WARNING "pci_update_slot_number found %d slots with the same name: %s\n", name_count, kobject_name(&slot->kobj));
-
slot->number = slot_nr;
+out:
up_write(&pci_bus_sem);
}
-EXPORT_SYMBOL_GPL(pci_update_slot_number);
+EXPORT_SYMBOL_GPL(pci_renumber_slot);
/**
* pci_destroy_slot - decrement refcount for physical PCI slot
@@ -213,7 +296,6 @@ EXPORT_SYMBOL_GPL(pci_update_slot_number);
* just call kobject_put on its kobj and let our release methods do the
* rest.
*/
-
void pci_destroy_slot(struct pci_slot *slot)
{
pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__,
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index b46c60b7270..23e492bf75c 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -70,7 +70,7 @@ pxa2xx-obj-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o
pxa2xx-obj-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o
pxa2xx-obj-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x2xx_cs.o
pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o
-pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps.o
+pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o
pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
diff --git a/drivers/pnp/Kconfig b/drivers/pnp/Kconfig
index 821933f9aa5..2a37b3fedb8 100644
--- a/drivers/pnp/Kconfig
+++ b/drivers/pnp/Kconfig
@@ -20,13 +20,21 @@ menuconfig PNP
If unsure, say Y.
-if PNP
-
-config PNP_DEBUG
- bool "PnP Debug Messages"
+config PNP_DEBUG_MESSAGES
+ default y
+ bool "PNP debugging messages"
+ depends on PNP
help
- Say Y if you want the Plug and Play Layer to print debug messages.
- This is useful if you are developing a PnP driver or troubleshooting.
+ Say Y here if you want the PNP layer to be able to produce debugging
+ messages if needed. The messages can be enabled at boot-time with
+ the pnp.debug kernel parameter.
+
+ This option allows you to save a bit of space if you do not want
+ the messages to even be built into the kernel.
+
+ If you have any doubts about this, say Y here.
+
+if PNP
comment "Protocols"
diff --git a/drivers/pnp/Makefile b/drivers/pnp/Makefile
index e83f34f1b5b..8de3775ec24 100644
--- a/drivers/pnp/Makefile
+++ b/drivers/pnp/Makefile
@@ -10,7 +10,3 @@ obj-$(CONFIG_ISAPNP) += isapnp/
# pnp_system_init goes after pnpacpi/pnpbios init
obj-y += system.o
-
-ifeq ($(CONFIG_PNP_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index 3b8b9d3cb03..0b8d14050ef 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -166,3 +166,13 @@ struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
resource_size_t start,
resource_size_t end, int flags);
+
+extern int pnp_debug;
+
+#if defined(CONFIG_PNP_DEBUG_MESSAGES)
+#define pnp_dbg(dev, format, arg...) \
+ ({ if (pnp_debug) dev_printk(KERN_DEBUG, dev, format, ## arg); 0; })
+#else
+#define pnp_dbg(dev, format, arg...) \
+ ({ if (0) dev_printk(KERN_DEBUG, dev, format, ## arg); 0; })
+#endif
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 817fe626e15..16c01c6fa7c 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -177,6 +177,9 @@ int __pnp_add_device(struct pnp_dev *dev)
int pnp_add_device(struct pnp_dev *dev)
{
int ret;
+ char buf[128];
+ int len = 0;
+ struct pnp_id *id;
if (dev->card)
return -EINVAL;
@@ -185,17 +188,12 @@ int pnp_add_device(struct pnp_dev *dev)
if (ret)
return ret;
-#ifdef CONFIG_PNP_DEBUG
- {
- struct pnp_id *id;
+ buf[0] = '\0';
+ for (id = dev->id; id; id = id->next)
+ len += scnprintf(buf + len, sizeof(buf) - len, " %s", id->id);
- dev_printk(KERN_DEBUG, &dev->dev, "%s device, IDs",
- dev->protocol->name);
- for (id = dev->id; id; id = id->next)
- printk(" %s", id->id);
- printk(" (%s)\n", dev->active ? "active" : "disabled");
- }
-#endif
+ pnp_dbg(&dev->dev, "%s device, IDs%s (%s)\n",
+ dev->protocol->name, buf, dev->active ? "active" : "disabled");
return 0;
}
@@ -214,3 +212,14 @@ static int __init pnp_init(void)
}
subsys_initcall(pnp_init);
+
+int pnp_debug;
+
+#if defined(CONFIG_PNP_DEBUG_MESSAGES)
+static int __init pnp_debug_setup(char *__unused)
+{
+ pnp_debug = 1;
+ return 1;
+}
+__setup("pnp.debug", pnp_debug_setup);
+#endif
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index e3f7e89c4df..527ee764c93 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -114,7 +114,6 @@ static int pnp_device_probe(struct device *dev)
} else
goto fail;
- dev_dbg(dev, "driver attached\n");
return error;
fail:
@@ -211,8 +210,6 @@ struct bus_type pnp_bus_type = {
int pnp_register_driver(struct pnp_driver *drv)
{
- pnp_dbg("the driver '%s' has been registered", drv->name);
-
drv->driver.name = drv->name;
drv->driver.bus = &pnp_bus_type;
@@ -222,7 +219,6 @@ int pnp_register_driver(struct pnp_driver *drv)
void pnp_unregister_driver(struct pnp_driver *drv)
{
driver_unregister(&drv->driver);
- pnp_dbg("the driver '%s' has been unregistered", drv->name);
}
/**
diff --git a/drivers/pnp/isapnp/Makefile b/drivers/pnp/isapnp/Makefile
index 3e38f06f8d7..cac18bbfb81 100644
--- a/drivers/pnp/isapnp/Makefile
+++ b/drivers/pnp/isapnp/Makefile
@@ -5,7 +5,3 @@
isapnp-proc-$(CONFIG_PROC_FS) = proc.o
obj-y := core.o compat.o $(isapnp-proc-y)
-
-ifeq ($(CONFIG_PNP_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index 46455fbab6d..e851160e14f 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -901,7 +901,7 @@ static int isapnp_get_resources(struct pnp_dev *dev)
{
int i, ret;
- dev_dbg(&dev->dev, "get resources\n");
+ pnp_dbg(&dev->dev, "get resources\n");
pnp_init_resources(dev);
isapnp_cfg_begin(dev->card->number, dev->number);
dev->active = isapnp_read_byte(ISAPNP_CFG_ACTIVATE);
@@ -939,13 +939,13 @@ static int isapnp_set_resources(struct pnp_dev *dev)
struct resource *res;
int tmp;
- dev_dbg(&dev->dev, "set resources\n");
+ pnp_dbg(&dev->dev, "set resources\n");
isapnp_cfg_begin(dev->card->number, dev->number);
dev->active = 1;
for (tmp = 0; tmp < ISAPNP_MAX_PORT; tmp++) {
res = pnp_get_resource(dev, IORESOURCE_IO, tmp);
if (pnp_resource_enabled(res)) {
- dev_dbg(&dev->dev, " set io %d to %#llx\n",
+ pnp_dbg(&dev->dev, " set io %d to %#llx\n",
tmp, (unsigned long long) res->start);
isapnp_write_word(ISAPNP_CFG_PORT + (tmp << 1),
res->start);
@@ -957,14 +957,14 @@ static int isapnp_set_resources(struct pnp_dev *dev)
int irq = res->start;
if (irq == 2)
irq = 9;
- dev_dbg(&dev->dev, " set irq %d to %d\n", tmp, irq);
+ pnp_dbg(&dev->dev, " set irq %d to %d\n", tmp, irq);
isapnp_write_byte(ISAPNP_CFG_IRQ + (tmp << 1), irq);
}
}
for (tmp = 0; tmp < ISAPNP_MAX_DMA; tmp++) {
res = pnp_get_resource(dev, IORESOURCE_DMA, tmp);
if (pnp_resource_enabled(res)) {
- dev_dbg(&dev->dev, " set dma %d to %lld\n",
+ pnp_dbg(&dev->dev, " set dma %d to %lld\n",
tmp, (unsigned long long) res->start);
isapnp_write_byte(ISAPNP_CFG_DMA + tmp, res->start);
}
@@ -972,7 +972,7 @@ static int isapnp_set_resources(struct pnp_dev *dev)
for (tmp = 0; tmp < ISAPNP_MAX_MEM; tmp++) {
res = pnp_get_resource(dev, IORESOURCE_MEM, tmp);
if (pnp_resource_enabled(res)) {
- dev_dbg(&dev->dev, " set mem %d to %#llx\n",
+ pnp_dbg(&dev->dev, " set mem %d to %#llx\n",
tmp, (unsigned long long) res->start);
isapnp_write_word(ISAPNP_CFG_MEM + (tmp << 3),
(res->start >> 8) & 0xffff);
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index b526eaad3f6..00fd3577b98 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -25,7 +25,7 @@ static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
res = pnp_get_resource(dev, IORESOURCE_IO, idx);
if (res) {
- dev_dbg(&dev->dev, " io %d already set to %#llx-%#llx "
+ pnp_dbg(&dev->dev, " io %d already set to %#llx-%#llx "
"flags %#lx\n", idx, (unsigned long long) res->start,
(unsigned long long) res->end, res->flags);
return 0;
@@ -38,7 +38,7 @@ static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
if (!rule->size) {
res->flags |= IORESOURCE_DISABLED;
- dev_dbg(&dev->dev, " io %d disabled\n", idx);
+ pnp_dbg(&dev->dev, " io %d disabled\n", idx);
goto __add;
}
@@ -49,7 +49,7 @@ static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
res->start += rule->align;
res->end = res->start + rule->size - 1;
if (res->start > rule->max || !rule->align) {
- dev_dbg(&dev->dev, " couldn't assign io %d "
+ pnp_dbg(&dev->dev, " couldn't assign io %d "
"(min %#llx max %#llx)\n", idx,
(unsigned long long) rule->min,
(unsigned long long) rule->max);
@@ -68,7 +68,7 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
res = pnp_get_resource(dev, IORESOURCE_MEM, idx);
if (res) {
- dev_dbg(&dev->dev, " mem %d already set to %#llx-%#llx "
+ pnp_dbg(&dev->dev, " mem %d already set to %#llx-%#llx "
"flags %#lx\n", idx, (unsigned long long) res->start,
(unsigned long long) res->end, res->flags);
return 0;
@@ -90,7 +90,7 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
if (!rule->size) {
res->flags |= IORESOURCE_DISABLED;
- dev_dbg(&dev->dev, " mem %d disabled\n", idx);
+ pnp_dbg(&dev->dev, " mem %d disabled\n", idx);
goto __add;
}
@@ -101,7 +101,7 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
res->start += rule->align;
res->end = res->start + rule->size - 1;
if (res->start > rule->max || !rule->align) {
- dev_dbg(&dev->dev, " couldn't assign mem %d "
+ pnp_dbg(&dev->dev, " couldn't assign mem %d "
"(min %#llx max %#llx)\n", idx,
(unsigned long long) rule->min,
(unsigned long long) rule->max);
@@ -126,7 +126,7 @@ static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
res = pnp_get_resource(dev, IORESOURCE_IRQ, idx);
if (res) {
- dev_dbg(&dev->dev, " irq %d already set to %d flags %#lx\n",
+ pnp_dbg(&dev->dev, " irq %d already set to %d flags %#lx\n",
idx, (int) res->start, res->flags);
return 0;
}
@@ -138,7 +138,7 @@ static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
if (bitmap_empty(rule->map.bits, PNP_IRQ_NR)) {
res->flags |= IORESOURCE_DISABLED;
- dev_dbg(&dev->dev, " irq %d disabled\n", idx);
+ pnp_dbg(&dev->dev, " irq %d disabled\n", idx);
goto __add;
}
@@ -160,11 +160,11 @@ static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
res->start = -1;
res->end = -1;
res->flags |= IORESOURCE_DISABLED;
- dev_dbg(&dev->dev, " irq %d disabled (optional)\n", idx);
+ pnp_dbg(&dev->dev, " irq %d disabled (optional)\n", idx);
goto __add;
}
- dev_dbg(&dev->dev, " couldn't assign irq %d\n", idx);
+ pnp_dbg(&dev->dev, " couldn't assign irq %d\n", idx);
return -EBUSY;
__add:
@@ -184,7 +184,7 @@ static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
res = pnp_get_resource(dev, IORESOURCE_DMA, idx);
if (res) {
- dev_dbg(&dev->dev, " dma %d already set to %d flags %#lx\n",
+ pnp_dbg(&dev->dev, " dma %d already set to %d flags %#lx\n",
idx, (int) res->start, res->flags);
return 0;
}
@@ -205,7 +205,7 @@ static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
res->start = res->end = MAX_DMA_CHANNELS;
#endif
res->flags |= IORESOURCE_DISABLED;
- dev_dbg(&dev->dev, " disable dma %d\n", idx);
+ pnp_dbg(&dev->dev, " disable dma %d\n", idx);
__add:
pnp_add_dma_resource(dev, res->start, res->flags);
@@ -238,7 +238,7 @@ static int pnp_assign_resources(struct pnp_dev *dev, int set)
int nport = 0, nmem = 0, nirq = 0, ndma = 0;
int ret = 0;
- dev_dbg(&dev->dev, "pnp_assign_resources, try dependent set %d\n", set);
+ pnp_dbg(&dev->dev, "pnp_assign_resources, try dependent set %d\n", set);
mutex_lock(&pnp_res_mutex);
pnp_clean_resource_table(dev);
@@ -270,7 +270,7 @@ static int pnp_assign_resources(struct pnp_dev *dev, int set)
mutex_unlock(&pnp_res_mutex);
if (ret < 0) {
- dev_dbg(&dev->dev, "pnp_assign_resources failed (%d)\n", ret);
+ pnp_dbg(&dev->dev, "pnp_assign_resources failed (%d)\n", ret);
pnp_clean_resource_table(dev);
} else
dbg_pnp_show_resources(dev, "pnp_assign_resources succeeded");
@@ -286,7 +286,7 @@ int pnp_auto_config_dev(struct pnp_dev *dev)
int i, ret;
if (!pnp_can_configure(dev)) {
- dev_dbg(&dev->dev, "configuration not supported\n");
+ pnp_dbg(&dev->dev, "configuration not supported\n");
return -ENODEV;
}
@@ -313,7 +313,7 @@ int pnp_auto_config_dev(struct pnp_dev *dev)
int pnp_start_dev(struct pnp_dev *dev)
{
if (!pnp_can_write(dev)) {
- dev_dbg(&dev->dev, "activation not supported\n");
+ pnp_dbg(&dev->dev, "activation not supported\n");
return -EINVAL;
}
@@ -336,7 +336,7 @@ int pnp_start_dev(struct pnp_dev *dev)
int pnp_stop_dev(struct pnp_dev *dev)
{
if (!pnp_can_disable(dev)) {
- dev_dbg(&dev->dev, "disabling not supported\n");
+ pnp_dbg(&dev->dev, "disabling not supported\n");
return -EINVAL;
}
if (dev->protocol->disable(dev) < 0) {
diff --git a/drivers/pnp/pnpacpi/Makefile b/drivers/pnp/pnpacpi/Makefile
index 2d7a1e6908b..905326fcca8 100644
--- a/drivers/pnp/pnpacpi/Makefile
+++ b/drivers/pnp/pnpacpi/Makefile
@@ -3,7 +3,3 @@
#
obj-y := core.o rsparser.o
-
-ifeq ($(CONFIG_PNP_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 53561d72b4e..383e47c392a 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -75,7 +75,7 @@ static int __init ispnpidacpi(char *id)
static int pnpacpi_get_resources(struct pnp_dev *dev)
{
- dev_dbg(&dev->dev, "get resources\n");
+ pnp_dbg(&dev->dev, "get resources\n");
return pnpacpi_parse_allocated_resource(dev);
}
@@ -86,7 +86,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
int ret;
acpi_status status;
- dev_dbg(&dev->dev, "set resources\n");
+ pnp_dbg(&dev->dev, "set resources\n");
ret = pnpacpi_build_resource_template(dev, &buffer);
if (ret)
return ret;
@@ -148,9 +148,13 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
acpi_status status;
struct pnp_dev *dev;
+ /*
+ * If a PnPacpi device is not present , the device
+ * driver should not be loaded.
+ */
status = acpi_get_handle(device->handle, "_CRS", &temp);
if (ACPI_FAILURE(status) || !ispnpidacpi(acpi_device_hid(device)) ||
- is_exclusive_device(device))
+ is_exclusive_device(device) || (!device->status.present))
return 0;
dev = pnp_alloc_dev(&pnpacpi_protocol, num, acpi_device_hid(device));
@@ -255,14 +259,14 @@ int pnpacpi_disabled __initdata;
static int __init pnpacpi_init(void)
{
if (acpi_disabled || pnpacpi_disabled) {
- pnp_info("PnP ACPI: disabled");
+ printk(KERN_INFO "pnp: PnP ACPI: disabled\n");
return 0;
}
- pnp_info("PnP ACPI init");
+ printk(KERN_INFO "pnp: PnP ACPI init\n");
pnp_register_protocol(&pnpacpi_protocol);
register_acpi_bus_type(&acpi_pnp_bus);
acpi_get_devices(NULL, pnpacpi_add_device_handler, NULL, NULL);
- pnp_info("PnP ACPI: found %d devices", num);
+ printk(KERN_INFO "pnp: PnP ACPI: found %d devices\n", num);
unregister_acpi_bus_type(&acpi_pnp_bus);
pnp_platform_devices = 1;
return 0;
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 95015cbfd33..adf17856bac 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -132,7 +132,8 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev,
pnp_add_irq_resource(dev, irq, flags);
}
-static int dma_flags(int type, int bus_master, int transfer)
+static int dma_flags(struct pnp_dev *dev, int type, int bus_master,
+ int transfer)
{
int flags = 0;
@@ -154,7 +155,7 @@ static int dma_flags(int type, int bus_master, int transfer)
default:
/* Set a default value ? */
flags |= IORESOURCE_DMA_COMPATIBLE;
- pnp_err("Invalid DMA type");
+ dev_err(&dev->dev, "invalid DMA type %d\n", type);
}
switch (transfer) {
case ACPI_TRANSFER_8:
@@ -169,7 +170,7 @@ static int dma_flags(int type, int bus_master, int transfer)
default:
/* Set a default value ? */
flags |= IORESOURCE_DMA_8AND16BIT;
- pnp_err("Invalid DMA transfer type");
+ dev_err(&dev->dev, "invalid DMA transfer type %d\n", transfer);
}
return flags;
@@ -336,7 +337,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
case ACPI_RESOURCE_TYPE_DMA:
dma = &res->data.dma;
if (dma->channel_count > 0 && dma->channels[0] != (u8) -1)
- flags = dma_flags(dma->type, dma->bus_master,
+ flags = dma_flags(dev, dma->type, dma->bus_master,
dma->transfer);
else
flags = IORESOURCE_DISABLED;
@@ -449,7 +450,7 @@ int pnpacpi_parse_allocated_resource(struct pnp_dev *dev)
acpi_handle handle = dev->data;
acpi_status status;
- dev_dbg(&dev->dev, "parse allocated resources\n");
+ pnp_dbg(&dev->dev, "parse allocated resources\n");
pnp_init_resources(dev);
@@ -477,7 +478,7 @@ static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev,
for (i = 0; i < p->channel_count; i++)
map |= 1 << p->channels[i];
- flags = dma_flags(p->type, p->bus_master, p->transfer);
+ flags = dma_flags(dev, p->type, p->bus_master, p->transfer);
pnp_register_dma_resource(dev, option_flags, map, flags);
}
@@ -608,8 +609,8 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
unsigned char flags = 0;
status = acpi_resource_to_address64(r, p);
- if (!ACPI_SUCCESS(status)) {
- pnp_warn("PnPACPI: failed to convert resource type %d",
+ if (ACPI_FAILURE(status)) {
+ dev_warn(&dev->dev, "can't convert resource type %d\n",
r->type);
return;
}
@@ -735,7 +736,7 @@ int __init pnpacpi_parse_resource_option_data(struct pnp_dev *dev)
acpi_status status;
struct acpipnp_parse_option_s parse_data;
- dev_dbg(&dev->dev, "parse resource options\n");
+ pnp_dbg(&dev->dev, "parse resource options\n");
parse_data.dev = dev;
parse_data.option_flags = 0;
@@ -843,7 +844,7 @@ static void pnpacpi_encode_irq(struct pnp_dev *dev,
if (!pnp_resource_enabled(p)) {
irq->interrupt_count = 0;
- dev_dbg(&dev->dev, " encode irq (%s)\n",
+ pnp_dbg(&dev->dev, " encode irq (%s)\n",
p ? "disabled" : "missing");
return;
}
@@ -855,7 +856,7 @@ static void pnpacpi_encode_irq(struct pnp_dev *dev,
irq->interrupt_count = 1;
irq->interrupts[0] = p->start;
- dev_dbg(&dev->dev, " encode irq %d %s %s %s (%d-byte descriptor)\n",
+ pnp_dbg(&dev->dev, " encode irq %d %s %s %s (%d-byte descriptor)\n",
(int) p->start,
triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge",
polarity == ACPI_ACTIVE_LOW ? "low" : "high",
@@ -872,7 +873,7 @@ static void pnpacpi_encode_ext_irq(struct pnp_dev *dev,
if (!pnp_resource_enabled(p)) {
extended_irq->interrupt_count = 0;
- dev_dbg(&dev->dev, " encode extended irq (%s)\n",
+ pnp_dbg(&dev->dev, " encode extended irq (%s)\n",
p ? "disabled" : "missing");
return;
}
@@ -885,7 +886,7 @@ static void pnpacpi_encode_ext_irq(struct pnp_dev *dev,
extended_irq->interrupt_count = 1;
extended_irq->interrupts[0] = p->start;
- dev_dbg(&dev->dev, " encode irq %d %s %s %s\n", (int) p->start,
+ pnp_dbg(&dev->dev, " encode irq %d %s %s %s\n", (int) p->start,
triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge",
polarity == ACPI_ACTIVE_LOW ? "low" : "high",
extended_irq->sharable == ACPI_SHARED ? "shared" : "exclusive");
@@ -899,7 +900,7 @@ static void pnpacpi_encode_dma(struct pnp_dev *dev,
if (!pnp_resource_enabled(p)) {
dma->channel_count = 0;
- dev_dbg(&dev->dev, " encode dma (%s)\n",
+ pnp_dbg(&dev->dev, " encode dma (%s)\n",
p ? "disabled" : "missing");
return;
}
@@ -934,7 +935,7 @@ static void pnpacpi_encode_dma(struct pnp_dev *dev,
dma->channel_count = 1;
dma->channels[0] = p->start;
- dev_dbg(&dev->dev, " encode dma %d "
+ pnp_dbg(&dev->dev, " encode dma %d "
"type %#x transfer %#x master %d\n",
(int) p->start, dma->type, dma->transfer, dma->bus_master);
}
@@ -958,7 +959,7 @@ static void pnpacpi_encode_io(struct pnp_dev *dev,
io->address_length = 0;
}
- dev_dbg(&dev->dev, " encode io %#x-%#x decode %#x\n", io->minimum,
+ pnp_dbg(&dev->dev, " encode io %#x-%#x decode %#x\n", io->minimum,
io->minimum + io->address_length - 1, io->io_decode);
}
@@ -976,7 +977,7 @@ static void pnpacpi_encode_fixed_io(struct pnp_dev *dev,
fixed_io->address_length = 0;
}
- dev_dbg(&dev->dev, " encode fixed_io %#x-%#x\n", fixed_io->address,
+ pnp_dbg(&dev->dev, " encode fixed_io %#x-%#x\n", fixed_io->address,
fixed_io->address + fixed_io->address_length - 1);
}
@@ -999,7 +1000,7 @@ static void pnpacpi_encode_mem24(struct pnp_dev *dev,
memory24->address_length = 0;
}
- dev_dbg(&dev->dev, " encode mem24 %#x-%#x write_protect %#x\n",
+ pnp_dbg(&dev->dev, " encode mem24 %#x-%#x write_protect %#x\n",
memory24->minimum,
memory24->minimum + memory24->address_length - 1,
memory24->write_protect);
@@ -1023,7 +1024,7 @@ static void pnpacpi_encode_mem32(struct pnp_dev *dev,
memory32->alignment = 0;
}
- dev_dbg(&dev->dev, " encode mem32 %#x-%#x write_protect %#x\n",
+ pnp_dbg(&dev->dev, " encode mem32 %#x-%#x write_protect %#x\n",
memory32->minimum,
memory32->minimum + memory32->address_length - 1,
memory32->write_protect);
@@ -1046,7 +1047,7 @@ static void pnpacpi_encode_fixed_mem32(struct pnp_dev *dev,
fixed_memory32->address_length = 0;
}
- dev_dbg(&dev->dev, " encode fixed_mem32 %#x-%#x write_protect %#x\n",
+ pnp_dbg(&dev->dev, " encode fixed_mem32 %#x-%#x write_protect %#x\n",
fixed_memory32->address,
fixed_memory32->address + fixed_memory32->address_length - 1,
fixed_memory32->write_protect);
@@ -1060,7 +1061,7 @@ int pnpacpi_encode_resources(struct pnp_dev *dev, struct acpi_buffer *buffer)
struct acpi_resource *resource = buffer->pointer;
int port = 0, irq = 0, dma = 0, mem = 0;
- dev_dbg(&dev->dev, "encode %d resources\n", res_cnt);
+ pnp_dbg(&dev->dev, "encode %d resources\n", res_cnt);
while (i < res_cnt) {
switch (resource->type) {
case ACPI_RESOURCE_TYPE_IRQ:
diff --git a/drivers/pnp/pnpbios/Makefile b/drivers/pnp/pnpbios/Makefile
index 310e2b3a771..3cd3ed76060 100644
--- a/drivers/pnp/pnpbios/Makefile
+++ b/drivers/pnp/pnpbios/Makefile
@@ -5,7 +5,3 @@
pnpbios-proc-$(CONFIG_PNPBIOS_PROC_FS) = proc.o
obj-y := core.o bioscalls.o rsparser.o $(pnpbios-proc-y)
-
-ifeq ($(CONFIG_PNP_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 2bfe13369df..996f6483807 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -211,7 +211,7 @@ static int pnpbios_get_resources(struct pnp_dev *dev)
if (!pnpbios_is_dynamic(dev))
return -EPERM;
- dev_dbg(&dev->dev, "get resources\n");
+ pnp_dbg(&dev->dev, "get resources\n");
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
if (!node)
return -1;
@@ -234,7 +234,7 @@ static int pnpbios_set_resources(struct pnp_dev *dev)
if (!pnpbios_is_dynamic(dev))
return -EPERM;
- dev_dbg(&dev->dev, "set resources\n");
+ pnp_dbg(&dev->dev, "set resources\n");
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
if (!node)
return -1;
diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
index ca567671379..87b4f49a525 100644
--- a/drivers/pnp/pnpbios/rsparser.c
+++ b/drivers/pnp/pnpbios/rsparser.c
@@ -87,7 +87,7 @@ static unsigned char *pnpbios_parse_allocated_resource_data(struct pnp_dev *dev,
if (!p)
return NULL;
- dev_dbg(&dev->dev, "parse allocated resources\n");
+ pnp_dbg(&dev->dev, "parse allocated resources\n");
pnp_init_resources(dev);
@@ -324,7 +324,7 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
if (!p)
return NULL;
- dev_dbg(&dev->dev, "parse resource options\n");
+ pnp_dbg(&dev->dev, "parse resource options\n");
option_flags = 0;
while ((char *)p < (char *)end) {
@@ -519,7 +519,7 @@ static void pnpbios_encode_mem(struct pnp_dev *dev, unsigned char *p,
p[10] = (len >> 8) & 0xff;
p[11] = ((len >> 8) >> 8) & 0xff;
- dev_dbg(&dev->dev, " encode mem %#lx-%#lx\n", base, base + len - 1);
+ pnp_dbg(&dev->dev, " encode mem %#lx-%#lx\n", base, base + len - 1);
}
static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p,
@@ -549,7 +549,7 @@ static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p,
p[18] = (len >> 16) & 0xff;
p[19] = (len >> 24) & 0xff;
- dev_dbg(&dev->dev, " encode mem32 %#lx-%#lx\n", base, base + len - 1);
+ pnp_dbg(&dev->dev, " encode mem32 %#lx-%#lx\n", base, base + len - 1);
}
static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p,
@@ -575,7 +575,7 @@ static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p,
p[10] = (len >> 16) & 0xff;
p[11] = (len >> 24) & 0xff;
- dev_dbg(&dev->dev, " encode fixed_mem32 %#lx-%#lx\n", base,
+ pnp_dbg(&dev->dev, " encode fixed_mem32 %#lx-%#lx\n", base,
base + len - 1);
}
@@ -592,7 +592,7 @@ static void pnpbios_encode_irq(struct pnp_dev *dev, unsigned char *p,
p[1] = map & 0xff;
p[2] = (map >> 8) & 0xff;
- dev_dbg(&dev->dev, " encode irq mask %#lx\n", map);
+ pnp_dbg(&dev->dev, " encode irq mask %#lx\n", map);
}
static void pnpbios_encode_dma(struct pnp_dev *dev, unsigned char *p,
@@ -607,7 +607,7 @@ static void pnpbios_encode_dma(struct pnp_dev *dev, unsigned char *p,
p[1] = map & 0xff;
- dev_dbg(&dev->dev, " encode dma mask %#lx\n", map);
+ pnp_dbg(&dev->dev, " encode dma mask %#lx\n", map);
}
static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p,
@@ -630,7 +630,7 @@ static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p,
p[5] = (base >> 8) & 0xff;
p[7] = len & 0xff;
- dev_dbg(&dev->dev, " encode io %#lx-%#lx\n", base, base + len - 1);
+ pnp_dbg(&dev->dev, " encode io %#lx-%#lx\n", base, base + len - 1);
}
static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p,
@@ -651,7 +651,7 @@ static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p,
p[2] = (base >> 8) & 0xff;
p[3] = len & 0xff;
- dev_dbg(&dev->dev, " encode fixed_io %#lx-%#lx\n", base,
+ pnp_dbg(&dev->dev, " encode fixed_io %#lx-%#lx\n", base,
base + len - 1);
}
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index c144bd57561..8473fe5ed7f 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -337,9 +337,8 @@ void pnp_fixup_device(struct pnp_dev *dev)
for (f = pnp_fixups; *f->id; f++) {
if (!compare_pnp_id(dev->id, f->id))
continue;
-#ifdef DEBUG
- dev_dbg(&dev->dev, "%s: calling %pF\n", f->id, f->quirk_function);
-#endif
+ pnp_dbg(&dev->dev, "%s: calling %pF\n", f->id,
+ f->quirk_function);
f->quirk_function(dev);
}
}
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index dbae23acdd5..f604061d2bb 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -294,7 +294,7 @@ static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci,
u8 progif;
if (pci->irq == irq) {
- dev_dbg(&pnp->dev, "device %s using irq %d\n",
+ pnp_dbg(&pnp->dev, " device %s using irq %d\n",
pci_name(pci), irq);
return 1;
}
@@ -316,7 +316,7 @@ static int pci_dev_uses_irq(struct pnp_dev *pnp, struct pci_dev *pci,
if ((progif & 0x5) != 0x5)
if (pci_get_legacy_ide_irq(pci, 0) == irq ||
pci_get_legacy_ide_irq(pci, 1) == irq) {
- dev_dbg(&pnp->dev, "legacy IDE device %s "
+ pnp_dbg(&pnp->dev, " legacy IDE device %s "
"using irq %d\n", pci_name(pci), irq);
return 1;
}
@@ -517,7 +517,7 @@ struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
res->start = irq;
res->end = irq;
- dev_dbg(&dev->dev, " add irq %d flags %#x\n", irq, flags);
+ pnp_dbg(&dev->dev, " add irq %d flags %#x\n", irq, flags);
return pnp_res;
}
@@ -538,7 +538,7 @@ struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
res->start = dma;
res->end = dma;
- dev_dbg(&dev->dev, " add dma %d flags %#x\n", dma, flags);
+ pnp_dbg(&dev->dev, " add dma %d flags %#x\n", dma, flags);
return pnp_res;
}
@@ -562,7 +562,7 @@ struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
res->start = start;
res->end = end;
- dev_dbg(&dev->dev, " add io %#llx-%#llx flags %#x\n",
+ pnp_dbg(&dev->dev, " add io %#llx-%#llx flags %#x\n",
(unsigned long long) start, (unsigned long long) end, flags);
return pnp_res;
}
@@ -587,7 +587,7 @@ struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
res->start = start;
res->end = end;
- dev_dbg(&dev->dev, " add mem %#llx-%#llx flags %#x\n",
+ pnp_dbg(&dev->dev, " add mem %#llx-%#llx flags %#x\n",
(unsigned long long) start, (unsigned long long) end, flags);
return pnp_res;
}
diff --git a/drivers/pnp/support.c b/drivers/pnp/support.c
index b42df162071..63087d5ce60 100644
--- a/drivers/pnp/support.c
+++ b/drivers/pnp/support.c
@@ -75,18 +75,17 @@ char *pnp_resource_type_name(struct resource *res)
void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
{
-#ifdef DEBUG
char buf[128];
int len;
struct pnp_resource *pnp_res;
struct resource *res;
if (list_empty(&dev->resources)) {
- dev_dbg(&dev->dev, "%s: no current resources\n", desc);
+ pnp_dbg(&dev->dev, "%s: no current resources\n", desc);
return;
}
- dev_dbg(&dev->dev, "%s: current resources:\n", desc);
+ pnp_dbg(&dev->dev, "%s: current resources:\n", desc);
list_for_each_entry(pnp_res, &dev->resources, list) {
res = &pnp_res->res;
len = 0;
@@ -95,7 +94,7 @@ void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
pnp_resource_type_name(res));
if (res->flags & IORESOURCE_DISABLED) {
- dev_dbg(&dev->dev, "%sdisabled\n", buf);
+ pnp_dbg(&dev->dev, "%sdisabled\n", buf);
continue;
}
@@ -116,9 +115,8 @@ void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
res->flags);
break;
}
- dev_dbg(&dev->dev, "%s\n", buf);
+ pnp_dbg(&dev->dev, "%s\n", buf);
}
-#endif
}
char *pnp_option_priority_name(struct pnp_option *option)
@@ -136,7 +134,6 @@ char *pnp_option_priority_name(struct pnp_option *option)
void dbg_pnp_show_option(struct pnp_dev *dev, struct pnp_option *option)
{
-#ifdef DEBUG
char buf[128];
int len = 0, i;
struct pnp_port *port;
@@ -208,6 +205,5 @@ void dbg_pnp_show_option(struct pnp_dev *dev, struct pnp_option *option)
"flags %#x", dma->map, dma->flags);
break;
}
- dev_dbg(&dev->dev, "%s\n", buf);
-#endif
+ pnp_dbg(&dev->dev, "%s\n", buf);
}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 814f49fde53..8abbb2020af 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -171,10 +171,10 @@ config RTC_DRV_MAX6900
will be called rtc-max6900.
config RTC_DRV_RS5C372
- tristate "Ricoh RS5C372A/B, RV5C386, RV5C387A"
+ tristate "Ricoh R2025S/D, RS5C372A/B, RV5C386, RV5C387A"
help
If you say yes here you get support for the
- Ricoh RS5C372A, RS5C372B, RV5C386, and RV5C387A RTC chips.
+ Ricoh R2025S/D, RS5C372A, RS5C372B, RV5C386, and RV5C387A RTC chips.
This driver can also be built as a module. If so, the module
will be called rtc-rs5c372.
@@ -246,6 +246,16 @@ config RTC_DRV_TWL92330
platforms. The support is integrated with the rest of
the Menelaus driver; it's not separate module.
+config RTC_DRV_TWL4030
+ tristate "TI TWL4030/TWL5030/TPS659x0"
+ depends on RTC_CLASS && TWL4030_CORE
+ help
+ If you say yes here you get support for the RTC on the
+ TWL4030 family chips, used mostly with OMAP3 platforms.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-twl4030.
+
config RTC_DRV_S35390A
tristate "Seiko Instruments S-35390A"
select BITREVERSE
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index d6a9ac7176e..e9e8474cc8f 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
+obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl4030.o
obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
diff --git a/drivers/rtc/rtc-twl4030.c b/drivers/rtc/rtc-twl4030.c
new file mode 100644
index 00000000000..abe87a4d266
--- /dev/null
+++ b/drivers/rtc/rtc-twl4030.c
@@ -0,0 +1,564 @@
+/*
+ * rtc-twl4030.c -- TWL4030 Real Time Clock interface
+ *
+ * Copyright (C) 2007 MontaVista Software, Inc
+ * Author: Alexandre Rusev <source@mvista.com>
+ *
+ * Based on original TI driver twl4030-rtc.c
+ * Copyright (C) 2006 Texas Instruments, Inc.
+ *
+ * Based on rtc-omap.c
+ * Copyright (C) 2003 MontaVista Software, Inc.
+ * Author: George G. Davis <gdavis@mvista.com> or <source@mvista.com>
+ * Copyright (C) 2006 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+
+#include <linux/i2c/twl4030.h>
+
+
+/*
+ * RTC block register offsets (use TWL_MODULE_RTC)
+ */
+#define REG_SECONDS_REG 0x00
+#define REG_MINUTES_REG 0x01
+#define REG_HOURS_REG 0x02
+#define REG_DAYS_REG 0x03
+#define REG_MONTHS_REG 0x04
+#define REG_YEARS_REG 0x05
+#define REG_WEEKS_REG 0x06
+
+#define REG_ALARM_SECONDS_REG 0x07
+#define REG_ALARM_MINUTES_REG 0x08
+#define REG_ALARM_HOURS_REG 0x09
+#define REG_ALARM_DAYS_REG 0x0A
+#define REG_ALARM_MONTHS_REG 0x0B
+#define REG_ALARM_YEARS_REG 0x0C
+
+#define REG_RTC_CTRL_REG 0x0D
+#define REG_RTC_STATUS_REG 0x0E
+#define REG_RTC_INTERRUPTS_REG 0x0F
+
+#define REG_RTC_COMP_LSB_REG 0x10
+#define REG_RTC_COMP_MSB_REG 0x11
+
+/* RTC_CTRL_REG bitfields */
+#define BIT_RTC_CTRL_REG_STOP_RTC_M 0x01
+#define BIT_RTC_CTRL_REG_ROUND_30S_M 0x02
+#define BIT_RTC_CTRL_REG_AUTO_COMP_M 0x04
+#define BIT_RTC_CTRL_REG_MODE_12_24_M 0x08
+#define BIT_RTC_CTRL_REG_TEST_MODE_M 0x10
+#define BIT_RTC_CTRL_REG_SET_32_COUNTER_M 0x20
+#define BIT_RTC_CTRL_REG_GET_TIME_M 0x40
+
+/* RTC_STATUS_REG bitfields */
+#define BIT_RTC_STATUS_REG_RUN_M 0x02
+#define BIT_RTC_STATUS_REG_1S_EVENT_M 0x04
+#define BIT_RTC_STATUS_REG_1M_EVENT_M 0x08
+#define BIT_RTC_STATUS_REG_1H_EVENT_M 0x10
+#define BIT_RTC_STATUS_REG_1D_EVENT_M 0x20
+#define BIT_RTC_STATUS_REG_ALARM_M 0x40
+#define BIT_RTC_STATUS_REG_POWER_UP_M 0x80
+
+/* RTC_INTERRUPTS_REG bitfields */
+#define BIT_RTC_INTERRUPTS_REG_EVERY_M 0x03
+#define BIT_RTC_INTERRUPTS_REG_IT_TIMER_M 0x04
+#define BIT_RTC_INTERRUPTS_REG_IT_ALARM_M 0x08
+
+
+/* REG_SECONDS_REG through REG_YEARS_REG is how many registers? */
+#define ALL_TIME_REGS 6
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Supports 1 byte read from TWL4030 RTC register.
+ */
+static int twl4030_rtc_read_u8(u8 *data, u8 reg)
+{
+ int ret;
+
+ ret = twl4030_i2c_read_u8(TWL4030_MODULE_RTC, data, reg);
+ if (ret < 0)
+ pr_err("twl4030_rtc: Could not read TWL4030"
+ "register %X - error %d\n", reg, ret);
+ return ret;
+}
+
+/*
+ * Supports 1 byte write to TWL4030 RTC registers.
+ */
+static int twl4030_rtc_write_u8(u8 data, u8 reg)
+{
+ int ret;
+
+ ret = twl4030_i2c_write_u8(TWL4030_MODULE_RTC, data, reg);
+ if (ret < 0)
+ pr_err("twl4030_rtc: Could not write TWL4030"
+ "register %X - error %d\n", reg, ret);
+ return ret;
+}
+
+/*
+ * Cache the value for timer/alarm interrupts register; this is
+ * only changed by callers holding rtc ops lock (or resume).
+ */
+static unsigned char rtc_irq_bits;
+
+/*
+ * Enable timer and/or alarm interrupts.
+ */
+static int set_rtc_irq_bit(unsigned char bit)
+{
+ unsigned char val;
+ int ret;
+
+ val = rtc_irq_bits | bit;
+ ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
+ if (ret == 0)
+ rtc_irq_bits = val;
+
+ return ret;
+}
+
+/*
+ * Disable timer and/or alarm interrupts.
+ */
+static int mask_rtc_irq_bit(unsigned char bit)
+{
+ unsigned char val;
+ int ret;
+
+ val = rtc_irq_bits & ~bit;
+ ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
+ if (ret == 0)
+ rtc_irq_bits = val;
+
+ return ret;
+}
+
+static inline int twl4030_rtc_alarm_irq_set_state(int enabled)
+{
+ int ret;
+
+ if (enabled)
+ ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+ else
+ ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+
+ return ret;
+}
+
+static inline int twl4030_rtc_irq_set_state(int enabled)
+{
+ int ret;
+
+ if (enabled)
+ ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
+ else
+ ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
+
+ return ret;
+}
+
+/*
+ * Gets current TWL4030 RTC time and date parameters.
+ *
+ * The RTC's time/alarm representation is not what gmtime(3) requires
+ * Linux to use:
+ *
+ * - Months are 1..12 vs Linux 0-11
+ * - Years are 0..99 vs Linux 1900..N (we assume 21st century)
+ */
+static int twl4030_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned char rtc_data[ALL_TIME_REGS + 1];
+ int ret;
+ u8 save_control;
+
+ ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
+ if (ret < 0)
+ return ret;
+
+ save_control |= BIT_RTC_CTRL_REG_GET_TIME_M;
+
+ ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ if (ret < 0)
+ return ret;
+
+ ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data,
+ REG_SECONDS_REG, ALL_TIME_REGS);
+
+ if (ret < 0) {
+ dev_err(dev, "rtc_read_time error %d\n", ret);
+ return ret;
+ }
+
+ tm->tm_sec = bcd2bin(rtc_data[0]);
+ tm->tm_min = bcd2bin(rtc_data[1]);
+ tm->tm_hour = bcd2bin(rtc_data[2]);
+ tm->tm_mday = bcd2bin(rtc_data[3]);
+ tm->tm_mon = bcd2bin(rtc_data[4]) - 1;
+ tm->tm_year = bcd2bin(rtc_data[5]) + 100;
+
+ return ret;
+}
+
+static int twl4030_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned char save_control;
+ unsigned char rtc_data[ALL_TIME_REGS + 1];
+ int ret;
+
+ rtc_data[1] = bin2bcd(tm->tm_sec);
+ rtc_data[2] = bin2bcd(tm->tm_min);
+ rtc_data[3] = bin2bcd(tm->tm_hour);
+ rtc_data[4] = bin2bcd(tm->tm_mday);
+ rtc_data[5] = bin2bcd(tm->tm_mon + 1);
+ rtc_data[6] = bin2bcd(tm->tm_year - 100);
+
+ /* Stop RTC while updating the TC registers */
+ ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
+ if (ret < 0)
+ goto out;
+
+ save_control &= ~BIT_RTC_CTRL_REG_STOP_RTC_M;
+ twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ if (ret < 0)
+ goto out;
+
+ /* update all the time registers in one shot */
+ ret = twl4030_i2c_write(TWL4030_MODULE_RTC, rtc_data,
+ REG_SECONDS_REG, ALL_TIME_REGS);
+ if (ret < 0) {
+ dev_err(dev, "rtc_set_time error %d\n", ret);
+ goto out;
+ }
+
+ /* Start back RTC */
+ save_control |= BIT_RTC_CTRL_REG_STOP_RTC_M;
+ ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+
+out:
+ return ret;
+}
+
+/*
+ * Gets current TWL4030 RTC alarm time.
+ */
+static int twl4030_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ unsigned char rtc_data[ALL_TIME_REGS + 1];
+ int ret;
+
+ ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data,
+ REG_ALARM_SECONDS_REG, ALL_TIME_REGS);
+ if (ret < 0) {
+ dev_err(dev, "rtc_read_alarm error %d\n", ret);
+ return ret;
+ }
+
+ /* some of these fields may be wildcard/"match all" */
+ alm->time.tm_sec = bcd2bin(rtc_data[0]);
+ alm->time.tm_min = bcd2bin(rtc_data[1]);
+ alm->time.tm_hour = bcd2bin(rtc_data[2]);
+ alm->time.tm_mday = bcd2bin(rtc_data[3]);
+ alm->time.tm_mon = bcd2bin(rtc_data[4]) - 1;
+ alm->time.tm_year = bcd2bin(rtc_data[5]) + 100;
+
+ /* report cached alarm enable state */
+ if (rtc_irq_bits & BIT_RTC_INTERRUPTS_REG_IT_ALARM_M)
+ alm->enabled = 1;
+
+ return ret;
+}
+
+static int twl4030_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ unsigned char alarm_data[ALL_TIME_REGS + 1];
+ int ret;
+
+ ret = twl4030_rtc_alarm_irq_set_state(0);
+ if (ret)
+ goto out;
+
+ alarm_data[1] = bin2bcd(alm->time.tm_sec);
+ alarm_data[2] = bin2bcd(alm->time.tm_min);
+ alarm_data[3] = bin2bcd(alm->time.tm_hour);
+ alarm_data[4] = bin2bcd(alm->time.tm_mday);
+ alarm_data[5] = bin2bcd(alm->time.tm_mon + 1);
+ alarm_data[6] = bin2bcd(alm->time.tm_year - 100);
+
+ /* update all the alarm registers in one shot */
+ ret = twl4030_i2c_write(TWL4030_MODULE_RTC, alarm_data,
+ REG_ALARM_SECONDS_REG, ALL_TIME_REGS);
+ if (ret) {
+ dev_err(dev, "rtc_set_alarm error %d\n", ret);
+ goto out;
+ }
+
+ if (alm->enabled)
+ ret = twl4030_rtc_alarm_irq_set_state(1);
+out:
+ return ret;
+}
+
+#ifdef CONFIG_RTC_INTF_DEV
+
+static int twl4030_rtc_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case RTC_AIE_OFF:
+ return twl4030_rtc_alarm_irq_set_state(0);
+ case RTC_AIE_ON:
+ return twl4030_rtc_alarm_irq_set_state(1);
+ case RTC_UIE_OFF:
+ return twl4030_rtc_irq_set_state(0);
+ case RTC_UIE_ON:
+ return twl4030_rtc_irq_set_state(1);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+#else
+#define omap_rtc_ioctl NULL
+#endif
+
+static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
+{
+ unsigned long events = 0;
+ int ret = IRQ_NONE;
+ int res;
+ u8 rd_reg;
+
+#ifdef CONFIG_LOCKDEP
+ /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
+ * we don't want and can't tolerate. Although it might be
+ * friendlier not to borrow this thread context...
+ */
+ local_irq_enable();
+#endif
+
+ res = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
+ if (res)
+ goto out;
+ /*
+ * Figure out source of interrupt: ALARM or TIMER in RTC_STATUS_REG.
+ * only one (ALARM or RTC) interrupt source may be enabled
+ * at time, we also could check our results
+ * by reading RTS_INTERRUPTS_REGISTER[IT_TIMER,IT_ALARM]
+ */
+ if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M)
+ events |= RTC_IRQF | RTC_AF;
+ else
+ events |= RTC_IRQF | RTC_UF;
+
+ res = twl4030_rtc_write_u8(rd_reg | BIT_RTC_STATUS_REG_ALARM_M,
+ REG_RTC_STATUS_REG);
+ if (res)
+ goto out;
+
+ /* Clear on Read enabled. RTC_IT bit of TWL4030_INT_PWR_ISR1
+ * needs 2 reads to clear the interrupt. One read is done in
+ * do_twl4030_pwrirq(). Doing the second read, to clear
+ * the bit.
+ *
+ * FIXME the reason PWR_ISR1 needs an extra read is that
+ * RTC_IF retriggered until we cleared REG_ALARM_M above.
+ * But re-reading like this is a bad hack; by doing so we
+ * risk wrongly clearing status for some other IRQ (losing
+ * the interrupt). Be smarter about handling RTC_UF ...
+ */
+ res = twl4030_i2c_read_u8(TWL4030_MODULE_INT,
+ &rd_reg, TWL4030_INT_PWR_ISR1);
+ if (res)
+ goto out;
+
+ /* Notify RTC core on event */
+ rtc_update_irq(rtc, 1, events);
+
+ ret = IRQ_HANDLED;
+out:
+ return ret;
+}
+
+static struct rtc_class_ops twl4030_rtc_ops = {
+ .ioctl = twl4030_rtc_ioctl,
+ .read_time = twl4030_rtc_read_time,
+ .set_time = twl4030_rtc_set_time,
+ .read_alarm = twl4030_rtc_read_alarm,
+ .set_alarm = twl4030_rtc_set_alarm,
+};
+
+/*----------------------------------------------------------------------*/
+
+static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
+{
+ struct rtc_device *rtc;
+ int ret = 0;
+ int irq = platform_get_irq(pdev, 0);
+ u8 rd_reg;
+
+ if (irq < 0)
+ return irq;
+
+ rtc = rtc_device_register(pdev->name,
+ &pdev->dev, &twl4030_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
+ PTR_ERR(rtc));
+ goto out0;
+
+ }
+
+ platform_set_drvdata(pdev, rtc);
+
+ ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
+
+ if (ret < 0)
+ goto out1;
+
+ if (rd_reg & BIT_RTC_STATUS_REG_POWER_UP_M)
+ dev_warn(&pdev->dev, "Power up reset detected.\n");
+
+ if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M)
+ dev_warn(&pdev->dev, "Pending Alarm interrupt detected.\n");
+
+ /* Clear RTC Power up reset and pending alarm interrupts */
+ ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG);
+ if (ret < 0)
+ goto out1;
+
+ ret = request_irq(irq, twl4030_rtc_interrupt,
+ IRQF_TRIGGER_RISING,
+ rtc->dev.bus_id, rtc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "IRQ is not free.\n");
+ goto out1;
+ }
+
+ /* Check RTC module status, Enable if it is off */
+ ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
+ if (ret < 0)
+ goto out2;
+
+ if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
+ dev_info(&pdev->dev, "Enabling TWL4030-RTC.\n");
+ rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
+ ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
+ if (ret < 0)
+ goto out2;
+ }
+
+ /* init cached IRQ enable bits */
+ ret = twl4030_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
+ if (ret < 0)
+ goto out2;
+
+ return ret;
+
+
+out2:
+ free_irq(irq, rtc);
+out1:
+ rtc_device_unregister(rtc);
+out0:
+ return ret;
+}
+
+/*
+ * Disable all TWL4030 RTC module interrupts.
+ * Sets status flag to free.
+ */
+static int __devexit twl4030_rtc_remove(struct platform_device *pdev)
+{
+ /* leave rtc running, but disable irqs */
+ struct rtc_device *rtc = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+ mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
+
+ free_irq(irq, rtc);
+
+ rtc_device_unregister(rtc);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static void twl4030_rtc_shutdown(struct platform_device *pdev)
+{
+ mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M |
+ BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+}
+
+#ifdef CONFIG_PM
+
+static unsigned char irqstat;
+
+static int twl4030_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ irqstat = rtc_irq_bits;
+
+ /* REVISIT alarm may need to wake us from sleep */
+ mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M |
+ BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
+ return 0;
+}
+
+static int twl4030_rtc_resume(struct platform_device *pdev)
+{
+ set_rtc_irq_bit(irqstat);
+ return 0;
+}
+
+#else
+#define twl4030_rtc_suspend NULL
+#define twl4030_rtc_resume NULL
+#endif
+
+MODULE_ALIAS("platform:twl4030_rtc");
+
+static struct platform_driver twl4030rtc_driver = {
+ .probe = twl4030_rtc_probe,
+ .remove = __devexit_p(twl4030_rtc_remove),
+ .shutdown = twl4030_rtc_shutdown,
+ .suspend = twl4030_rtc_suspend,
+ .resume = twl4030_rtc_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "twl4030_rtc",
+ },
+};
+
+static int __init twl4030_rtc_init(void)
+{
+ return platform_driver_register(&twl4030rtc_driver);
+}
+module_init(twl4030_rtc_init);
+
+static void __exit twl4030_rtc_exit(void)
+{
+ platform_driver_unregister(&twl4030rtc_driver);
+}
+module_exit(twl4030_rtc_exit);
+
+MODULE_AUTHOR("Texas Instruments, MontaVista Software");
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 0a225ccda02..4b76fca64a6 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2011,10 +2011,9 @@ static void dasd_flush_request_queue(struct dasd_block *block)
spin_unlock_irq(&block->request_queue_lock);
}
-static int dasd_open(struct inode *inp, struct file *filp)
+static int dasd_open(struct block_device *bdev, fmode_t mode)
{
- struct gendisk *disk = inp->i_bdev->bd_disk;
- struct dasd_block *block = disk->private_data;
+ struct dasd_block *block = bdev->bd_disk->private_data;
struct dasd_device *base = block->base;
int rc;
@@ -2052,9 +2051,8 @@ unlock:
return rc;
}
-static int dasd_release(struct inode *inp, struct file *filp)
+static int dasd_release(struct gendisk *disk, fmode_t mode)
{
- struct gendisk *disk = inp->i_bdev->bd_disk;
struct dasd_block *block = disk->private_data;
atomic_dec(&block->open_count);
@@ -2089,8 +2087,7 @@ dasd_device_operations = {
.owner = THIS_MODULE,
.open = dasd_open,
.release = dasd_release,
- .ioctl = dasd_ioctl,
- .compat_ioctl = dasd_compat_ioctl,
+ .locked_ioctl = dasd_ioctl,
.getgeo = dasd_getgeo,
};
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index aee6565aaf9..e99d566b69c 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -99,7 +99,7 @@ int dasd_scan_partitions(struct dasd_block *block)
struct block_device *bdev;
bdev = bdget_disk(block->gdp, 0);
- if (!bdev || blkdev_get(bdev, FMODE_READ, 1) < 0)
+ if (!bdev || blkdev_get(bdev, FMODE_READ) < 0)
return -ENODEV;
/*
* See fs/partition/check.c:register_disk,rescan_partitions
@@ -152,7 +152,7 @@ void dasd_destroy_partitions(struct dasd_block *block)
invalidate_partition(block->gdp, 0);
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
- blkdev_put(bdev);
+ blkdev_put(bdev, FMODE_READ);
set_capacity(block->gdp, 0);
}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 489d5fe488f..05a14536c36 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -610,8 +610,7 @@ int dasd_scan_partitions(struct dasd_block *);
void dasd_destroy_partitions(struct dasd_block *);
/* externals in dasd_ioctl.c */
-int dasd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
-long dasd_compat_ioctl(struct file *, unsigned int, unsigned long);
+int dasd_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long);
/* externals in dasd_proc.c */
int dasd_proc_init(void);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 91a64630cb0..b82d816d9ef 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -366,10 +366,9 @@ static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
}
int
-dasd_ioctl(struct inode *inode, struct file *file,
+dasd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct block_device *bdev = inode->i_bdev;
struct dasd_block *block = bdev->bd_disk->private_data;
void __user *argp = (void __user *)arg;
@@ -421,15 +420,3 @@ dasd_ioctl(struct inode *inode, struct file *file,
return -EINVAL;
}
}
-
-long
-dasd_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- int rval;
-
- lock_kernel();
- rval = dasd_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
-
- return (rval == -EINVAL) ? -ENOIOCTLCMD : rval;
-}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index a7ff167d5b8..63f26a135fe 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -31,8 +31,8 @@
#define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x)
#define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x)
-static int dcssblk_open(struct inode *inode, struct file *filp);
-static int dcssblk_release(struct inode *inode, struct file *filp);
+static int dcssblk_open(struct block_device *bdev, fmode_t mode);
+static int dcssblk_release(struct gendisk *disk, fmode_t mode);
static int dcssblk_make_request(struct request_queue *q, struct bio *bio);
static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
void **kaddr, unsigned long *pfn);
@@ -776,32 +776,31 @@ out_buf:
}
static int
-dcssblk_open(struct inode *inode, struct file *filp)
+dcssblk_open(struct block_device *bdev, fmode_t mode)
{
struct dcssblk_dev_info *dev_info;
int rc;
- dev_info = inode->i_bdev->bd_disk->private_data;
+ dev_info = bdev->bd_disk->private_data;
if (NULL == dev_info) {
rc = -ENODEV;
goto out;
}
atomic_inc(&dev_info->use_count);
- inode->i_bdev->bd_block_size = 4096;
+ bdev->bd_block_size = 4096;
rc = 0;
out:
return rc;
}
static int
-dcssblk_release(struct inode *inode, struct file *filp)
+dcssblk_release(struct gendisk *disk, fmode_t mode)
{
- struct dcssblk_dev_info *dev_info;
+ struct dcssblk_dev_info *dev_info = disk->private_data;
struct segment_info *entry;
int rc;
- dev_info = inode->i_bdev->bd_disk->private_data;
- if (NULL == dev_info) {
+ if (!dev_info) {
rc = -ENODEV;
goto out;
}
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index a25b8bf54f4..023803dbb0c 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -43,9 +43,9 @@
/*
* file operation structure for tape block frontend
*/
-static int tapeblock_open(struct inode *, struct file *);
-static int tapeblock_release(struct inode *, struct file *);
-static int tapeblock_ioctl(struct inode *, struct file *, unsigned int,
+static int tapeblock_open(struct block_device *, fmode_t);
+static int tapeblock_release(struct gendisk *, fmode_t);
+static int tapeblock_ioctl(struct block_device *, fmode_t, unsigned int,
unsigned long);
static int tapeblock_medium_changed(struct gendisk *);
static int tapeblock_revalidate_disk(struct gendisk *);
@@ -54,7 +54,7 @@ static struct block_device_operations tapeblock_fops = {
.owner = THIS_MODULE,
.open = tapeblock_open,
.release = tapeblock_release,
- .ioctl = tapeblock_ioctl,
+ .locked_ioctl = tapeblock_ioctl,
.media_changed = tapeblock_medium_changed,
.revalidate_disk = tapeblock_revalidate_disk,
};
@@ -364,13 +364,12 @@ tapeblock_medium_changed(struct gendisk *disk)
* Block frontend tape device open function.
*/
static int
-tapeblock_open(struct inode *inode, struct file *filp)
+tapeblock_open(struct block_device *bdev, fmode_t mode)
{
- struct gendisk * disk;
+ struct gendisk * disk = bdev->bd_disk;
struct tape_device * device;
int rc;
- disk = inode->i_bdev->bd_disk;
device = tape_get_device_reference(disk->private_data);
if (device->required_tapemarks) {
@@ -410,9 +409,8 @@ release:
* we just get the pointer here and release the reference.
*/
static int
-tapeblock_release(struct inode *inode, struct file *filp)
+tapeblock_release(struct gendisk *disk, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
struct tape_device *device = disk->private_data;
tape_state_set(device, TS_IN_USE);
@@ -427,22 +425,21 @@ tapeblock_release(struct inode *inode, struct file *filp)
*/
static int
tapeblock_ioctl(
- struct inode * inode,
- struct file * file,
+ struct block_device * bdev,
+ fmode_t mode,
unsigned int command,
unsigned long arg
) {
int rc;
int minor;
- struct gendisk *disk;
+ struct gendisk *disk = bdev->bd_disk;
struct tape_device *device;
rc = 0;
- disk = inode->i_bdev->bd_disk;
BUG_ON(!disk);
device = disk->private_data;
BUG_ON(!device);
- minor = iminor(inode);
+ minor = MINOR(bdev->bd_dev);
DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor);
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index afc96e844a2..2370fd82ebf 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -452,40 +452,34 @@ static ide_driver_t idescsi_driver = {
#endif
};
-static int idescsi_ide_open(struct inode *inode, struct file *filp)
+static int idescsi_ide_open(struct block_device *bdev, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
- struct ide_scsi_obj *scsi;
+ struct ide_scsi_obj *scsi = ide_scsi_get(bdev->bd_disk);
- if (!(scsi = ide_scsi_get(disk)))
+ if (!scsi)
return -ENXIO;
return 0;
}
-static int idescsi_ide_release(struct inode *inode, struct file *filp)
+static int idescsi_ide_release(struct gendisk *disk, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
- struct ide_scsi_obj *scsi = ide_scsi_g(disk);
-
- ide_scsi_put(scsi);
-
+ ide_scsi_put(ide_scsi_g(disk));
return 0;
}
-static int idescsi_ide_ioctl(struct inode *inode, struct file *file,
+static int idescsi_ide_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct block_device *bdev = inode->i_bdev;
struct ide_scsi_obj *scsi = ide_scsi_g(bdev->bd_disk);
- return generic_ide_ioctl(scsi->drive, file, bdev, cmd, arg);
+ return generic_ide_ioctl(scsi->drive, bdev, cmd, arg);
}
static struct block_device_operations idescsi_ops = {
.owner = THIS_MODULE,
.open = idescsi_ide_open,
.release = idescsi_ide_release,
- .ioctl = idescsi_ide_ioctl,
+ .locked_ioctl = idescsi_ide_ioctl,
};
static int idescsi_slave_configure(struct scsi_device * sdp)
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 28b19ef2630..dc1cfb2fd76 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -237,7 +237,7 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
case SCSI_IOCTL_SEND_COMMAND:
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- return sg_scsi_ioctl(NULL, sdev->request_queue, NULL, arg);
+ return sg_scsi_ioctl(sdev->request_queue, NULL, 0, arg);
case SCSI_IOCTL_DOORLOCK:
return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
case SCSI_IOCTL_DOORUNLOCK:
@@ -277,14 +277,14 @@ EXPORT_SYMBOL(scsi_ioctl);
* @filp: either NULL or a &struct file which must have the O_NONBLOCK flag.
*/
int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
- void __user *arg, struct file *filp)
+ void __user *arg, int ndelay)
{
int val, result;
/* The first set of iocts may be executed even if we're doing
* error processing, as long as the device was opened
* non-blocking */
- if (filp && (filp->f_flags & O_NONBLOCK)) {
+ if (ndelay) {
if (scsi_host_in_recovery(sdev->host))
return -ENODEV;
} else if (!scsi_block_when_processing_errors(sdev))
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 7c4d2e68df1..43f34c73df1 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -609,17 +609,15 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
* In the latter case @inode and @filp carry an abridged amount
* of information as noted above.
**/
-static int sd_open(struct inode *inode, struct file *filp)
+static int sd_open(struct block_device *bdev, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
- struct scsi_disk *sdkp;
+ struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
struct scsi_device *sdev;
int retval;
- if (!(sdkp = scsi_disk_get(disk)))
+ if (!sdkp)
return -ENXIO;
-
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
sdev = sdkp->device;
@@ -633,14 +631,13 @@ static int sd_open(struct inode *inode, struct file *filp)
goto error_out;
if (sdev->removable || sdkp->write_prot)
- check_disk_change(inode->i_bdev);
+ check_disk_change(bdev);
/*
* If the drive is empty, just let the open fail.
*/
retval = -ENOMEDIUM;
- if (sdev->removable && !sdkp->media_present &&
- !(filp->f_flags & O_NDELAY))
+ if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
goto error_out;
/*
@@ -648,7 +645,7 @@ static int sd_open(struct inode *inode, struct file *filp)
* if the user expects to be able to write to the thing.
*/
retval = -EROFS;
- if (sdkp->write_prot && (filp->f_mode & FMODE_WRITE))
+ if (sdkp->write_prot && (mode & FMODE_WRITE))
goto error_out;
/*
@@ -684,9 +681,8 @@ error_out:
* Note: may block (uninterruptible) if error recovery is underway
* on this disk.
**/
-static int sd_release(struct inode *inode, struct file *filp)
+static int sd_release(struct gendisk *disk, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdev = sdkp->device;
@@ -743,10 +739,9 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
* Note: most ioctls are forward onto the block subsystem or further
* down in the scsi subsystem.
**/
-static int sd_ioctl(struct inode * inode, struct file * filp,
+static int sd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct block_device *bdev = inode->i_bdev;
struct gendisk *disk = bdev->bd_disk;
struct scsi_device *sdp = scsi_disk(disk)->device;
void __user *p = (void __user *)arg;
@@ -761,7 +756,8 @@ static int sd_ioctl(struct inode * inode, struct file * filp,
* may try and take the device offline, in which case all further
* access to the device is prohibited.
*/
- error = scsi_nonblockable_ioctl(sdp, cmd, p, filp);
+ error = scsi_nonblockable_ioctl(sdp, cmd, p,
+ (mode & FMODE_NDELAY_NOW) != 0);
if (!scsi_block_when_processing_errors(sdp) || !error)
return error;
@@ -775,7 +771,7 @@ static int sd_ioctl(struct inode * inode, struct file * filp,
case SCSI_IOCTL_GET_BUS_NUMBER:
return scsi_ioctl(sdp, cmd, p);
default:
- error = scsi_cmd_ioctl(filp, disk->queue, disk, cmd, p);
+ error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
if (error != -ENOTTY)
return error;
}
@@ -928,11 +924,10 @@ static void sd_rescan(struct device *dev)
* This gets directly called from VFS. When the ioctl
* is not recognized we go back to the other translation paths.
*/
-static long sd_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
- struct block_device *bdev = file->f_path.dentry->d_inode->i_bdev;
- struct gendisk *disk = bdev->bd_disk;
- struct scsi_device *sdev = scsi_disk(disk)->device;
+ struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
/*
* If we are in the middle of error recovery, don't let anyone
@@ -962,7 +957,7 @@ static struct block_device_operations sd_fops = {
.owner = THIS_MODULE,
.open = sd_open,
.release = sd_release,
- .ioctl = sd_ioctl,
+ .locked_ioctl = sd_ioctl,
.getgeo = sd_getgeo,
#ifdef CONFIG_COMPAT
.compat_ioctl = sd_compat_ioctl,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 93bd59a1ed7..9adf35bd8b5 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1059,7 +1059,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
if (sg_allow_access(filp, &opcode))
return -EPERM;
}
- return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);
+ return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
if (result)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0f17009c99d..62b6633e3a9 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -471,38 +471,31 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
return scsi_prep_return(q, rq, ret);
}
-static int sr_block_open(struct inode *inode, struct file *file)
+static int sr_block_open(struct block_device *bdev, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
- struct scsi_cd *cd;
- int ret = 0;
-
- if(!(cd = scsi_cd_get(disk)))
- return -ENXIO;
-
- if((ret = cdrom_open(&cd->cdi, inode, file)) != 0)
- scsi_cd_put(cd);
+ struct scsi_cd *cd = scsi_cd_get(bdev->bd_disk);
+ int ret = -ENXIO;
+ if (cd) {
+ ret = cdrom_open(&cd->cdi, bdev, mode);
+ if (ret)
+ scsi_cd_put(cd);
+ }
return ret;
}
-static int sr_block_release(struct inode *inode, struct file *file)
+static int sr_block_release(struct gendisk *disk, fmode_t mode)
{
- int ret;
- struct scsi_cd *cd = scsi_cd(inode->i_bdev->bd_disk);
- ret = cdrom_release(&cd->cdi, file);
- if(ret)
- return ret;
-
+ struct scsi_cd *cd = scsi_cd(disk);
+ cdrom_release(&cd->cdi, mode);
scsi_cd_put(cd);
-
return 0;
}
-static int sr_block_ioctl(struct inode *inode, struct file *file, unsigned cmd,
+static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
unsigned long arg)
{
- struct scsi_cd *cd = scsi_cd(inode->i_bdev->bd_disk);
+ struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
struct scsi_device *sdev = cd->device;
void __user *argp = (void __user *)arg;
int ret;
@@ -517,7 +510,7 @@ static int sr_block_ioctl(struct inode *inode, struct file *file, unsigned cmd,
return scsi_ioctl(sdev, cmd, argp);
}
- ret = cdrom_ioctl(file, &cd->cdi, inode, cmd, arg);
+ ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
if (ret != -ENOSYS)
return ret;
@@ -527,7 +520,8 @@ static int sr_block_ioctl(struct inode *inode, struct file *file, unsigned cmd,
* case fall through to scsi_ioctl, which will return ENDOEV again
* if it doesn't recognise the ioctl
*/
- ret = scsi_nonblockable_ioctl(sdev, cmd, argp, NULL);
+ ret = scsi_nonblockable_ioctl(sdev, cmd, argp,
+ (mode & FMODE_NDELAY_NOW) != 0);
if (ret != -ENODEV)
return ret;
return scsi_ioctl(sdev, cmd, argp);
@@ -544,7 +538,7 @@ static struct block_device_operations sr_bdops =
.owner = THIS_MODULE,
.open = sr_block_open,
.release = sr_block_release,
- .ioctl = sr_block_ioctl,
+ .locked_ioctl = sr_block_ioctl,
.media_changed = sr_block_media_changed,
/*
* No compat_ioctl for now because sr_block_ioctl never
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 5c28d08f18f..c959bdc55f4 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3263,7 +3263,8 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
* may try and take the device offline, in which case all further
* access to the device is prohibited.
*/
- retval = scsi_nonblockable_ioctl(STp->device, cmd_in, p, file);
+ retval = scsi_nonblockable_ioctl(STp->device, cmd_in, p,
+ file->f_flags & O_NDELAY);
if (!scsi_block_when_processing_errors(STp->device) || retval != -ENODEV)
goto out;
retval = 0;
@@ -3567,8 +3568,8 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
!capable(CAP_SYS_RAWIO))
i = -EPERM;
else
- i = scsi_cmd_ioctl(file, STp->disk->queue,
- STp->disk, cmd_in, p);
+ i = scsi_cmd_ioctl(STp->disk->queue, STp->disk,
+ file->f_mode, cmd_in, p);
if (i != -ENOTTY)
return i;
break;
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index c014ffb110e..5450a0e5ecd 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -1100,6 +1100,8 @@ enum pci_board_num_t {
pbn_b0_4_1843200_200,
pbn_b0_8_1843200_200,
+ pbn_b0_1_4000000,
+
pbn_b0_bt_1_115200,
pbn_b0_bt_2_115200,
pbn_b0_bt_8_115200,
@@ -1167,6 +1169,10 @@ enum pci_board_num_t {
pbn_exsys_4055,
pbn_plx_romulus,
pbn_oxsemi,
+ pbn_oxsemi_1_4000000,
+ pbn_oxsemi_2_4000000,
+ pbn_oxsemi_4_4000000,
+ pbn_oxsemi_8_4000000,
pbn_intel_i960,
pbn_sgi_ioc3,
pbn_computone_4,
@@ -1290,6 +1296,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
.base_baud = 1843200,
.uart_offset = 0x200,
},
+ [pbn_b0_1_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 4000000,
+ .uart_offset = 8,
+ },
[pbn_b0_bt_1_115200] = {
.flags = FL_BASE0|FL_BASE_BARS,
@@ -1625,6 +1637,35 @@ static struct pciserial_board pci_boards[] __devinitdata = {
.base_baud = 115200,
.uart_offset = 8,
},
+ [pbn_oxsemi_1_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+ [pbn_oxsemi_2_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+ [pbn_oxsemi_4_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+ [pbn_oxsemi_8_4000000] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 4000000,
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
+
/*
* EKF addition for i960 Boards form EKF with serial port.
@@ -1813,6 +1854,39 @@ serial_pci_matches(struct pciserial_board *board,
board->first_offset == guessed->first_offset;
}
+/*
+ * Oxford Semiconductor Inc.
+ * Check that device is part of the Tornado range of devices, then determine
+ * the number of ports available on the device.
+ */
+static int pci_oxsemi_tornado_init(struct pci_dev *dev, struct pciserial_board *board)
+{
+ u8 __iomem *p;
+ unsigned long deviceID;
+ unsigned int number_uarts;
+
+ /* OxSemi Tornado devices are all 0xCxxx */
+ if (dev->vendor == PCI_VENDOR_ID_OXSEMI &&
+ (dev->device & 0xF000) != 0xC000)
+ return 0;
+
+ p = pci_iomap(dev, 0, 5);
+ if (p == NULL)
+ return -ENOMEM;
+
+ deviceID = ioread32(p);
+ /* Tornado device */
+ if (deviceID == 0x07000200) {
+ number_uarts = ioread8(p + 4);
+ board->num_ports = number_uarts;
+ printk(KERN_DEBUG
+ "%d ports detected on Oxford PCI Express device\n",
+ number_uarts);
+ }
+ pci_iounmap(dev, p);
+ return 0;
+}
+
struct serial_private *
pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board)
{
@@ -1821,6 +1895,13 @@ pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board)
struct pci_serial_quirk *quirk;
int rc, nr_ports, i;
+ /*
+ * Find number of ports on board
+ */
+ if (dev->vendor == PCI_VENDOR_ID_OXSEMI ||
+ dev->vendor == PCI_VENDOR_ID_MAINPINE)
+ pci_oxsemi_tornado_init(dev, board);
+
nr_ports = board->num_ports;
/*
@@ -2301,6 +2382,156 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_b0_bt_2_921600 },
/*
+ * Oxford Semiconductor Inc. Tornado PCI express device range.
+ */
+ { PCI_VENDOR_ID_OXSEMI, 0xc101, /* OXPCIe952 1 Legacy UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc105, /* OXPCIe952 1 Legacy UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc11b, /* OXPCIe952 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc11f, /* OXPCIe952 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc120, /* OXPCIe952 1 Legacy UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc124, /* OXPCIe952 1 Legacy UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc138, /* OXPCIe952 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc13d, /* OXPCIe952 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc140, /* OXPCIe952 1 Legacy UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc141, /* OXPCIe952 1 Legacy UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc144, /* OXPCIe952 1 Legacy UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc145, /* OXPCIe952 1 Legacy UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc158, /* OXPCIe952 2 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_2_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc15d, /* OXPCIe952 2 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_2_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc208, /* OXPCIe954 4 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_4_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc20d, /* OXPCIe954 4 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_4_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc308, /* OXPCIe958 8 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_8_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc30d, /* OXPCIe958 8 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_8_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc40b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc40f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc41b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc41f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc42b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc42f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc43b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc43f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc44b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc44f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc45b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc45f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc46b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc46f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc47b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc47f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc48b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc48f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc49b, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc49f, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc4ab, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc4af, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc4bb, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc4bf, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc4cb, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_OXSEMI, 0xc4cf, /* OXPCIe200 1 Native UART */
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ /*
+ * Mainpine Inc. IQ Express "Rev3" utilizing OxSemi Tornado
+ */
+ { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 1 Port V.34 Super-G3 Fax */
+ PCI_VENDOR_ID_MAINPINE, 0x4001, 0, 0,
+ pbn_oxsemi_1_4000000 },
+ { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 2 Port V.34 Super-G3 Fax */
+ PCI_VENDOR_ID_MAINPINE, 0x4002, 0, 0,
+ pbn_oxsemi_2_4000000 },
+ { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 4 Port V.34 Super-G3 Fax */
+ PCI_VENDOR_ID_MAINPINE, 0x4004, 0, 0,
+ pbn_oxsemi_4_4000000 },
+ { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 8 Port V.34 Super-G3 Fax */
+ PCI_VENDOR_ID_MAINPINE, 0x4008, 0, 0,
+ pbn_oxsemi_8_4000000 },
+ /*
* SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards,
* from skokodyn@yahoo.com
*/
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index db783b77a88..c94d3c4b752 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -457,7 +457,7 @@ config SERIAL_SAMSUNG
config SERIAL_SAMSUNG_DEBUG
bool "Samsung SoC serial debug"
- depends on SERIAL_SAMSUNG
+ depends on SERIAL_SAMSUNG && DEBUG_LL
help
Add support for debugging the serial driver. Since this is
generally being used as a console, we use our own output
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 2a79decd7df..c4eff44c9f2 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -43,4 +43,8 @@ source "drivers/staging/echo/Kconfig"
source "drivers/staging/at76_usb/Kconfig"
+source "drivers/staging/pcc-acpi/Kconfig"
+
+source "drivers/staging/poch/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 325bca4f71c..7cb8701d96d 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_W35UND) += winbond/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_USB_ATMEL) += at76_usb/
+obj-$(CONFIG_PCC_ACPI) += pcc-acpi/
+obj-$(CONFIG_POCH) += poch/
diff --git a/drivers/staging/at76_usb/at76_usb.c b/drivers/staging/at76_usb/at76_usb.c
index 52df0c66518..174e2bec922 100644
--- a/drivers/staging/at76_usb/at76_usb.c
+++ b/drivers/staging/at76_usb/at76_usb.c
@@ -2319,9 +2319,11 @@ static int at76_iw_handler_get_scan(struct net_device *netdev,
if (!iwe)
return -ENOMEM;
- if (priv->scan_state != SCAN_COMPLETED)
+ if (priv->scan_state != SCAN_COMPLETED) {
/* scan not yet finished */
+ kfree(iwe);
return -EAGAIN;
+ }
spin_lock_irqsave(&priv->bss_list_spinlock, flags);
diff --git a/drivers/staging/echo/bit_operations.h b/drivers/staging/echo/bit_operations.h
index b32f4bf9939..cecdcf3fd75 100644
--- a/drivers/staging/echo/bit_operations.h
+++ b/drivers/staging/echo/bit_operations.h
@@ -30,114 +30,98 @@
#if !defined(_BIT_OPERATIONS_H_)
#define _BIT_OPERATIONS_H_
-#ifdef __cplusplus
-extern "C" {
-#endif
-
#if defined(__i386__) || defined(__x86_64__)
/*! \brief Find the bit position of the highest set bit in a word
\param bits The word to be searched
\return The bit number of the highest set bit, or -1 if the word is zero. */
static __inline__ int top_bit(unsigned int bits)
{
- int res;
-
- __asm__ (" xorl %[res],%[res];\n"
- " decl %[res];\n"
- " bsrl %[bits],%[res]\n"
- : [res] "=&r" (res)
- : [bits] "rm" (bits));
- return res;
+ int res;
+
+ __asm__(" xorl %[res],%[res];\n"
+ " decl %[res];\n"
+ " bsrl %[bits],%[res]\n"
+ :[res] "=&r" (res)
+ :[bits] "rm"(bits)
+ );
+ return res;
}
-/*- End of function --------------------------------------------------------*/
/*! \brief Find the bit position of the lowest set bit in a word
\param bits The word to be searched
\return The bit number of the lowest set bit, or -1 if the word is zero. */
static __inline__ int bottom_bit(unsigned int bits)
{
- int res;
-
- __asm__ (" xorl %[res],%[res];\n"
- " decl %[res];\n"
- " bsfl %[bits],%[res]\n"
- : [res] "=&r" (res)
- : [bits] "rm" (bits));
- return res;
+ int res;
+
+ __asm__(" xorl %[res],%[res];\n"
+ " decl %[res];\n"
+ " bsfl %[bits],%[res]\n"
+ :[res] "=&r" (res)
+ :[bits] "rm"(bits)
+ );
+ return res;
}
-/*- End of function --------------------------------------------------------*/
#else
static __inline__ int top_bit(unsigned int bits)
{
- int i;
-
- if (bits == 0)
- return -1;
- i = 0;
- if (bits & 0xFFFF0000)
- {
- bits &= 0xFFFF0000;
- i += 16;
- }
- if (bits & 0xFF00FF00)
- {
- bits &= 0xFF00FF00;
- i += 8;
- }
- if (bits & 0xF0F0F0F0)
- {
- bits &= 0xF0F0F0F0;
- i += 4;
- }
- if (bits & 0xCCCCCCCC)
- {
- bits &= 0xCCCCCCCC;
- i += 2;
- }
- if (bits & 0xAAAAAAAA)
- {
- bits &= 0xAAAAAAAA;
- i += 1;
- }
- return i;
+ int i;
+
+ if (bits == 0)
+ return -1;
+ i = 0;
+ if (bits & 0xFFFF0000) {
+ bits &= 0xFFFF0000;
+ i += 16;
+ }
+ if (bits & 0xFF00FF00) {
+ bits &= 0xFF00FF00;
+ i += 8;
+ }
+ if (bits & 0xF0F0F0F0) {
+ bits &= 0xF0F0F0F0;
+ i += 4;
+ }
+ if (bits & 0xCCCCCCCC) {
+ bits &= 0xCCCCCCCC;
+ i += 2;
+ }
+ if (bits & 0xAAAAAAAA) {
+ bits &= 0xAAAAAAAA;
+ i += 1;
+ }
+ return i;
}
-/*- End of function --------------------------------------------------------*/
static __inline__ int bottom_bit(unsigned int bits)
{
- int i;
-
- if (bits == 0)
- return -1;
- i = 32;
- if (bits & 0x0000FFFF)
- {
- bits &= 0x0000FFFF;
- i -= 16;
- }
- if (bits & 0x00FF00FF)
- {
- bits &= 0x00FF00FF;
- i -= 8;
- }
- if (bits & 0x0F0F0F0F)
- {
- bits &= 0x0F0F0F0F;
- i -= 4;
- }
- if (bits & 0x33333333)
- {
- bits &= 0x33333333;
- i -= 2;
- }
- if (bits & 0x55555555)
- {
- bits &= 0x55555555;
- i -= 1;
- }
- return i;
+ int i;
+
+ if (bits == 0)
+ return -1;
+ i = 32;
+ if (bits & 0x0000FFFF) {
+ bits &= 0x0000FFFF;
+ i -= 16;
+ }
+ if (bits & 0x00FF00FF) {
+ bits &= 0x00FF00FF;
+ i -= 8;
+ }
+ if (bits & 0x0F0F0F0F) {
+ bits &= 0x0F0F0F0F;
+ i -= 4;
+ }
+ if (bits & 0x33333333) {
+ bits &= 0x33333333;
+ i -= 2;
+ }
+ if (bits & 0x55555555) {
+ bits &= 0x55555555;
+ i -= 1;
+ }
+ return i;
}
-/*- End of function --------------------------------------------------------*/
#endif
/*! \brief Bit reverse a byte.
@@ -146,16 +130,16 @@ static __inline__ int bottom_bit(unsigned int bits)
static __inline__ uint8_t bit_reverse8(uint8_t x)
{
#if defined(__i386__) || defined(__x86_64__)
- /* If multiply is fast */
- return ((x*0x0802U & 0x22110U) | (x*0x8020U & 0x88440U))*0x10101U >> 16;
+ /* If multiply is fast */
+ return ((x * 0x0802U & 0x22110U) | (x * 0x8020U & 0x88440U)) *
+ 0x10101U >> 16;
#else
- /* If multiply is slow, but we have a barrel shifter */
- x = (x >> 4) | (x << 4);
- x = ((x & 0xCC) >> 2) | ((x & 0x33) << 2);
- return ((x & 0xAA) >> 1) | ((x & 0x55) << 1);
+ /* If multiply is slow, but we have a barrel shifter */
+ x = (x >> 4) | (x << 4);
+ x = ((x & 0xCC) >> 2) | ((x & 0x33) << 2);
+ return ((x & 0xAA) >> 1) | ((x & 0x55) << 1);
#endif
}
-/*- End of function --------------------------------------------------------*/
/*! \brief Bit reverse a 16 bit word.
\param data The word to be reversed.
@@ -193,9 +177,8 @@ uint16_t make_mask16(uint16_t x);
\return The word with the single set bit. */
static __inline__ uint32_t least_significant_one32(uint32_t x)
{
- return (x & (-(int32_t) x));
+ return (x & (-(int32_t) x));
}
-/*- End of function --------------------------------------------------------*/
/*! \brief Find the most significant one in a word, and return a word
with just that bit set.
@@ -204,50 +187,42 @@ static __inline__ uint32_t least_significant_one32(uint32_t x)
static __inline__ uint32_t most_significant_one32(uint32_t x)
{
#if defined(__i386__) || defined(__x86_64__)
- return 1 << top_bit(x);
+ return 1 << top_bit(x);
#else
- x = make_mask32(x);
- return (x ^ (x >> 1));
+ x = make_mask32(x);
+ return (x ^ (x >> 1));
#endif
}
-/*- End of function --------------------------------------------------------*/
/*! \brief Find the parity of a byte.
\param x The byte to be checked.
\return 1 for odd, or 0 for even. */
static __inline__ int parity8(uint8_t x)
{
- x = (x ^ (x >> 4)) & 0x0F;
- return (0x6996 >> x) & 1;
+ x = (x ^ (x >> 4)) & 0x0F;
+ return (0x6996 >> x) & 1;
}
-/*- End of function --------------------------------------------------------*/
/*! \brief Find the parity of a 16 bit word.
\param x The word to be checked.
\return 1 for odd, or 0 for even. */
static __inline__ int parity16(uint16_t x)
{
- x ^= (x >> 8);
- x = (x ^ (x >> 4)) & 0x0F;
- return (0x6996 >> x) & 1;
+ x ^= (x >> 8);
+ x = (x ^ (x >> 4)) & 0x0F;
+ return (0x6996 >> x) & 1;
}
-/*- End of function --------------------------------------------------------*/
/*! \brief Find the parity of a 32 bit word.
\param x The word to be checked.
\return 1 for odd, or 0 for even. */
static __inline__ int parity32(uint32_t x)
{
- x ^= (x >> 16);
- x ^= (x >> 8);
- x = (x ^ (x >> 4)) & 0x0F;
- return (0x6996 >> x) & 1;
+ x ^= (x >> 16);
+ x ^= (x >> 8);
+ x = (x ^ (x >> 4)) & 0x0F;
+ return (0x6996 >> x) & 1;
}
-/*- End of function --------------------------------------------------------*/
-
-#ifdef __cplusplus
-}
-#endif
#endif
/*- End of file ------------------------------------------------------------*/
diff --git a/drivers/staging/echo/echo.c b/drivers/staging/echo/echo.c
index 4a281b14fc5..b8f2c5e9dee 100644
--- a/drivers/staging/echo/echo.c
+++ b/drivers/staging/echo/echo.c
@@ -74,7 +74,6 @@
Steve also has some nice notes on echo cancellers in echo.h
-
References:
[1] Ochiai, Areseki, and Ogihara, "Echo Canceller with Two Echo
@@ -105,20 +104,18 @@
Mark, Pawel, and Pavel.
*/
-#include <linux/kernel.h> /* We're doing kernel work */
+#include <linux/kernel.h> /* We're doing kernel work */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#define malloc(a) kmalloc((a), GFP_KERNEL)
-#define free(a) kfree(a)
#include "bit_operations.h"
#include "echo.h"
#define MIN_TX_POWER_FOR_ADAPTION 64
#define MIN_RX_POWER_FOR_ADAPTION 64
-#define DTD_HANGOVER 600 /* 600 samples, or 75ms */
-#define DC_LOG2BETA 3 /* log2() of DC filter Beta */
+#define DTD_HANGOVER 600 /* 600 samples, or 75ms */
+#define DC_LOG2BETA 3 /* log2() of DC filter Beta */
/*-----------------------------------------------------------------------*\
FUNCTIONS
@@ -126,59 +123,58 @@
/* adapting coeffs using the traditional stochastic descent (N)LMS algorithm */
-
-#ifdef __BLACKFIN_ASM__
-static void __inline__ lms_adapt_bg(echo_can_state_t *ec, int clean, int shift)
+#ifdef __bfin__
+static void __inline__ lms_adapt_bg(struct oslec_state *ec, int clean,
+ int shift)
{
- int i, j;
- int offset1;
- int offset2;
- int factor;
- int exp;
- int16_t *phist;
- int n;
-
- if (shift > 0)
- factor = clean << shift;
- else
- factor = clean >> -shift;
-
- /* Update the FIR taps */
-
- offset2 = ec->curr_pos;
- offset1 = ec->taps - offset2;
- phist = &ec->fir_state_bg.history[offset2];
-
- /* st: and en: help us locate the assembler in echo.s */
-
- //asm("st:");
- n = ec->taps;
- for (i = 0, j = offset2; i < n; i++, j++)
- {
- exp = *phist++ * factor;
- ec->fir_taps16[1][i] += (int16_t) ((exp+(1<<14)) >> 15);
- }
- //asm("en:");
-
- /* Note the asm for the inner loop above generated by Blackfin gcc
- 4.1.1 is pretty good (note even parallel instructions used):
-
- R0 = W [P0++] (X);
- R0 *= R2;
- R0 = R0 + R3 (NS) ||
- R1 = W [P1] (X) ||
- nop;
- R0 >>>= 15;
- R0 = R0 + R1;
- W [P1++] = R0;
-
- A block based update algorithm would be much faster but the
- above can't be improved on much. Every instruction saved in
- the loop above is 2 MIPs/ch! The for loop above is where the
- Blackfin spends most of it's time - about 17 MIPs/ch measured
- with speedtest.c with 256 taps (32ms). Write-back and
- Write-through cache gave about the same performance.
- */
+ int i, j;
+ int offset1;
+ int offset2;
+ int factor;
+ int exp;
+ int16_t *phist;
+ int n;
+
+ if (shift > 0)
+ factor = clean << shift;
+ else
+ factor = clean >> -shift;
+
+ /* Update the FIR taps */
+
+ offset2 = ec->curr_pos;
+ offset1 = ec->taps - offset2;
+ phist = &ec->fir_state_bg.history[offset2];
+
+ /* st: and en: help us locate the assembler in echo.s */
+
+ //asm("st:");
+ n = ec->taps;
+ for (i = 0, j = offset2; i < n; i++, j++) {
+ exp = *phist++ * factor;
+ ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
+ }
+ //asm("en:");
+
+ /* Note the asm for the inner loop above generated by Blackfin gcc
+ 4.1.1 is pretty good (note even parallel instructions used):
+
+ R0 = W [P0++] (X);
+ R0 *= R2;
+ R0 = R0 + R3 (NS) ||
+ R1 = W [P1] (X) ||
+ nop;
+ R0 >>>= 15;
+ R0 = R0 + R1;
+ W [P1++] = R0;
+
+ A block based update algorithm would be much faster but the
+ above can't be improved on much. Every instruction saved in
+ the loop above is 2 MIPs/ch! The for loop above is where the
+ Blackfin spends most of it's time - about 17 MIPs/ch measured
+ with speedtest.c with 256 taps (32ms). Write-back and
+ Write-through cache gave about the same performance.
+ */
}
/*
@@ -200,392 +196,393 @@ static void __inline__ lms_adapt_bg(echo_can_state_t *ec, int clean, int shift)
*/
#else
-static __inline__ void lms_adapt_bg(echo_can_state_t *ec, int clean, int shift)
+static __inline__ void lms_adapt_bg(struct oslec_state *ec, int clean,
+ int shift)
{
- int i;
-
- int offset1;
- int offset2;
- int factor;
- int exp;
-
- if (shift > 0)
- factor = clean << shift;
- else
- factor = clean >> -shift;
-
- /* Update the FIR taps */
-
- offset2 = ec->curr_pos;
- offset1 = ec->taps - offset2;
-
- for (i = ec->taps - 1; i >= offset1; i--)
- {
- exp = (ec->fir_state_bg.history[i - offset1]*factor);
- ec->fir_taps16[1][i] += (int16_t) ((exp+(1<<14)) >> 15);
- }
- for ( ; i >= 0; i--)
- {
- exp = (ec->fir_state_bg.history[i + offset2]*factor);
- ec->fir_taps16[1][i] += (int16_t) ((exp+(1<<14)) >> 15);
- }
+ int i;
+
+ int offset1;
+ int offset2;
+ int factor;
+ int exp;
+
+ if (shift > 0)
+ factor = clean << shift;
+ else
+ factor = clean >> -shift;
+
+ /* Update the FIR taps */
+
+ offset2 = ec->curr_pos;
+ offset1 = ec->taps - offset2;
+
+ for (i = ec->taps - 1; i >= offset1; i--) {
+ exp = (ec->fir_state_bg.history[i - offset1] * factor);
+ ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
+ }
+ for (; i >= 0; i--) {
+ exp = (ec->fir_state_bg.history[i + offset2] * factor);
+ ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
+ }
}
#endif
-/*- End of function --------------------------------------------------------*/
-
-echo_can_state_t *echo_can_create(int len, int adaption_mode)
+struct oslec_state *oslec_create(int len, int adaption_mode)
{
- echo_can_state_t *ec;
- int i;
- int j;
-
- ec = kmalloc(sizeof(*ec), GFP_KERNEL);
- if (ec == NULL)
- return NULL;
- memset(ec, 0, sizeof(*ec));
-
- ec->taps = len;
- ec->log2taps = top_bit(len);
- ec->curr_pos = ec->taps - 1;
-
- for (i = 0; i < 2; i++)
- {
- if ((ec->fir_taps16[i] = (int16_t *) malloc((ec->taps)*sizeof(int16_t))) == NULL)
- {
- for (j = 0; j < i; j++)
- kfree(ec->fir_taps16[j]);
- kfree(ec);
- return NULL;
- }
- memset(ec->fir_taps16[i], 0, (ec->taps)*sizeof(int16_t));
- }
-
- fir16_create(&ec->fir_state,
- ec->fir_taps16[0],
- ec->taps);
- fir16_create(&ec->fir_state_bg,
- ec->fir_taps16[1],
- ec->taps);
-
- for(i=0; i<5; i++) {
- ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0;
- }
-
- ec->cng_level = 1000;
- echo_can_adaption_mode(ec, adaption_mode);
-
- ec->snapshot = (int16_t*)malloc(ec->taps*sizeof(int16_t));
- memset(ec->snapshot, 0, sizeof(int16_t)*ec->taps);
-
- ec->cond_met = 0;
- ec->Pstates = 0;
- ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0;
- ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0;
- ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
- ec->Lbgn = ec->Lbgn_acc = 0;
- ec->Lbgn_upper = 200;
- ec->Lbgn_upper_acc = ec->Lbgn_upper << 13;
-
- return ec;
+ struct oslec_state *ec;
+ int i;
+
+ ec = kzalloc(sizeof(*ec), GFP_KERNEL);
+ if (!ec)
+ return NULL;
+
+ ec->taps = len;
+ ec->log2taps = top_bit(len);
+ ec->curr_pos = ec->taps - 1;
+
+ for (i = 0; i < 2; i++) {
+ ec->fir_taps16[i] =
+ kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
+ if (!ec->fir_taps16[i])
+ goto error_oom;
+ }
+
+ fir16_create(&ec->fir_state, ec->fir_taps16[0], ec->taps);
+ fir16_create(&ec->fir_state_bg, ec->fir_taps16[1], ec->taps);
+
+ for (i = 0; i < 5; i++) {
+ ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0;
+ }
+
+ ec->cng_level = 1000;
+ oslec_adaption_mode(ec, adaption_mode);
+
+ ec->snapshot = kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
+ if (!ec->snapshot)
+ goto error_oom;
+
+ ec->cond_met = 0;
+ ec->Pstates = 0;
+ ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0;
+ ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0;
+ ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
+ ec->Lbgn = ec->Lbgn_acc = 0;
+ ec->Lbgn_upper = 200;
+ ec->Lbgn_upper_acc = ec->Lbgn_upper << 13;
+
+ return ec;
+
+ error_oom:
+ for (i = 0; i < 2; i++)
+ kfree(ec->fir_taps16[i]);
+
+ kfree(ec);
+ return NULL;
}
-/*- End of function --------------------------------------------------------*/
-void echo_can_free(echo_can_state_t *ec)
+EXPORT_SYMBOL_GPL(oslec_create);
+
+void oslec_free(struct oslec_state *ec)
{
int i;
fir16_free(&ec->fir_state);
fir16_free(&ec->fir_state_bg);
- for (i = 0; i < 2; i++)
+ for (i = 0; i < 2; i++)
kfree(ec->fir_taps16[i]);
kfree(ec->snapshot);
kfree(ec);
}
-/*- End of function --------------------------------------------------------*/
-void echo_can_adaption_mode(echo_can_state_t *ec, int adaption_mode)
+EXPORT_SYMBOL_GPL(oslec_free);
+
+void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode)
{
- ec->adaption_mode = adaption_mode;
+ ec->adaption_mode = adaption_mode;
}
-/*- End of function --------------------------------------------------------*/
-void echo_can_flush(echo_can_state_t *ec)
+EXPORT_SYMBOL_GPL(oslec_adaption_mode);
+
+void oslec_flush(struct oslec_state *ec)
{
- int i;
+ int i;
- ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0;
- ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0;
- ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
+ ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0;
+ ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0;
+ ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
- ec->Lbgn = ec->Lbgn_acc = 0;
- ec->Lbgn_upper = 200;
- ec->Lbgn_upper_acc = ec->Lbgn_upper << 13;
+ ec->Lbgn = ec->Lbgn_acc = 0;
+ ec->Lbgn_upper = 200;
+ ec->Lbgn_upper_acc = ec->Lbgn_upper << 13;
- ec->nonupdate_dwell = 0;
+ ec->nonupdate_dwell = 0;
- fir16_flush(&ec->fir_state);
- fir16_flush(&ec->fir_state_bg);
- ec->fir_state.curr_pos = ec->taps - 1;
- ec->fir_state_bg.curr_pos = ec->taps - 1;
- for (i = 0; i < 2; i++)
- memset(ec->fir_taps16[i], 0, ec->taps*sizeof(int16_t));
+ fir16_flush(&ec->fir_state);
+ fir16_flush(&ec->fir_state_bg);
+ ec->fir_state.curr_pos = ec->taps - 1;
+ ec->fir_state_bg.curr_pos = ec->taps - 1;
+ for (i = 0; i < 2; i++)
+ memset(ec->fir_taps16[i], 0, ec->taps * sizeof(int16_t));
- ec->curr_pos = ec->taps - 1;
- ec->Pstates = 0;
+ ec->curr_pos = ec->taps - 1;
+ ec->Pstates = 0;
}
-/*- End of function --------------------------------------------------------*/
-void echo_can_snapshot(echo_can_state_t *ec) {
- memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps*sizeof(int16_t));
+EXPORT_SYMBOL_GPL(oslec_flush);
+
+void oslec_snapshot(struct oslec_state *ec)
+{
+ memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps * sizeof(int16_t));
}
-/*- End of function --------------------------------------------------------*/
+
+EXPORT_SYMBOL_GPL(oslec_snapshot);
/* Dual Path Echo Canceller ------------------------------------------------*/
-int16_t echo_can_update(echo_can_state_t *ec, int16_t tx, int16_t rx)
+int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
{
- int32_t echo_value;
- int clean_bg;
- int tmp, tmp1;
-
- /* Input scaling was found be required to prevent problems when tx
- starts clipping. Another possible way to handle this would be the
- filter coefficent scaling. */
-
- ec->tx = tx; ec->rx = rx;
- tx >>=1;
- rx >>=1;
-
- /*
- Filter DC, 3dB point is 160Hz (I think), note 32 bit precision required
- otherwise values do not track down to 0. Zero at DC, Pole at (1-Beta)
- only real axis. Some chip sets (like Si labs) don't need
- this, but something like a $10 X100P card does. Any DC really slows
- down convergence.
-
- Note: removes some low frequency from the signal, this reduces
- the speech quality when listening to samples through headphones
- but may not be obvious through a telephone handset.
-
- Note that the 3dB frequency in radians is approx Beta, e.g. for
- Beta = 2^(-3) = 0.125, 3dB freq is 0.125 rads = 159Hz.
- */
-
- if (ec->adaption_mode & ECHO_CAN_USE_RX_HPF) {
- tmp = rx << 15;
+ int32_t echo_value;
+ int clean_bg;
+ int tmp, tmp1;
+
+ /* Input scaling was found be required to prevent problems when tx
+ starts clipping. Another possible way to handle this would be the
+ filter coefficent scaling. */
+
+ ec->tx = tx;
+ ec->rx = rx;
+ tx >>= 1;
+ rx >>= 1;
+
+ /*
+ Filter DC, 3dB point is 160Hz (I think), note 32 bit precision required
+ otherwise values do not track down to 0. Zero at DC, Pole at (1-Beta)
+ only real axis. Some chip sets (like Si labs) don't need
+ this, but something like a $10 X100P card does. Any DC really slows
+ down convergence.
+
+ Note: removes some low frequency from the signal, this reduces
+ the speech quality when listening to samples through headphones
+ but may not be obvious through a telephone handset.
+
+ Note that the 3dB frequency in radians is approx Beta, e.g. for
+ Beta = 2^(-3) = 0.125, 3dB freq is 0.125 rads = 159Hz.
+ */
+
+ if (ec->adaption_mode & ECHO_CAN_USE_RX_HPF) {
+ tmp = rx << 15;
#if 1
- /* Make sure the gain of the HPF is 1.0. This can still saturate a little under
- impulse conditions, and it might roll to 32768 and need clipping on sustained peak
- level signals. However, the scale of such clipping is small, and the error due to
- any saturation should not markedly affect the downstream processing. */
- tmp -= (tmp >> 4);
+ /* Make sure the gain of the HPF is 1.0. This can still saturate a little under
+ impulse conditions, and it might roll to 32768 and need clipping on sustained peak
+ level signals. However, the scale of such clipping is small, and the error due to
+ any saturation should not markedly affect the downstream processing. */
+ tmp -= (tmp >> 4);
#endif
- ec->rx_1 += -(ec->rx_1>>DC_LOG2BETA) + tmp - ec->rx_2;
+ ec->rx_1 += -(ec->rx_1 >> DC_LOG2BETA) + tmp - ec->rx_2;
+
+ /* hard limit filter to prevent clipping. Note that at this stage
+ rx should be limited to +/- 16383 due to right shift above */
+ tmp1 = ec->rx_1 >> 15;
+ if (tmp1 > 16383)
+ tmp1 = 16383;
+ if (tmp1 < -16383)
+ tmp1 = -16383;
+ rx = tmp1;
+ ec->rx_2 = tmp;
+ }
- /* hard limit filter to prevent clipping. Note that at this stage
- rx should be limited to +/- 16383 due to right shift above */
- tmp1 = ec->rx_1 >> 15;
- if (tmp1 > 16383) tmp1 = 16383;
- if (tmp1 < -16383) tmp1 = -16383;
- rx = tmp1;
- ec->rx_2 = tmp;
- }
+ /* Block average of power in the filter states. Used for
+ adaption power calculation. */
- /* Block average of power in the filter states. Used for
- adaption power calculation. */
+ {
+ int new, old;
+
+ /* efficient "out with the old and in with the new" algorithm so
+ we don't have to recalculate over the whole block of
+ samples. */
+ new = (int)tx *(int)tx;
+ old = (int)ec->fir_state.history[ec->fir_state.curr_pos] *
+ (int)ec->fir_state.history[ec->fir_state.curr_pos];
+ ec->Pstates +=
+ ((new - old) + (1 << ec->log2taps)) >> ec->log2taps;
+ if (ec->Pstates < 0)
+ ec->Pstates = 0;
+ }
- {
- int new, old;
+ /* Calculate short term average levels using simple single pole IIRs */
- /* efficient "out with the old and in with the new" algorithm so
- we don't have to recalculate over the whole block of
- samples. */
- new = (int)tx * (int)tx;
- old = (int)ec->fir_state.history[ec->fir_state.curr_pos] *
- (int)ec->fir_state.history[ec->fir_state.curr_pos];
- ec->Pstates += ((new - old) + (1<<ec->log2taps)) >> ec->log2taps;
- if (ec->Pstates < 0) ec->Pstates = 0;
- }
-
- /* Calculate short term average levels using simple single pole IIRs */
-
- ec->Ltxacc += abs(tx) - ec->Ltx;
- ec->Ltx = (ec->Ltxacc + (1<<4)) >> 5;
- ec->Lrxacc += abs(rx) - ec->Lrx;
- ec->Lrx = (ec->Lrxacc + (1<<4)) >> 5;
-
- /* Foreground filter ---------------------------------------------------*/
-
- ec->fir_state.coeffs = ec->fir_taps16[0];
- echo_value = fir16(&ec->fir_state, tx);
- ec->clean = rx - echo_value;
- ec->Lcleanacc += abs(ec->clean) - ec->Lclean;
- ec->Lclean = (ec->Lcleanacc + (1<<4)) >> 5;
-
- /* Background filter ---------------------------------------------------*/
-
- echo_value = fir16(&ec->fir_state_bg, tx);
- clean_bg = rx - echo_value;
- ec->Lclean_bgacc += abs(clean_bg) - ec->Lclean_bg;
- ec->Lclean_bg = (ec->Lclean_bgacc + (1<<4)) >> 5;
-
- /* Background Filter adaption -----------------------------------------*/
-
- /* Almost always adap bg filter, just simple DT and energy
- detection to minimise adaption in cases of strong double talk.
- However this is not critical for the dual path algorithm.
- */
- ec->factor = 0;
- ec->shift = 0;
- if ((ec->nonupdate_dwell == 0)) {
- int P, logP, shift;
-
- /* Determine:
-
- f = Beta * clean_bg_rx/P ------ (1)
-
- where P is the total power in the filter states.
-
- The Boffins have shown that if we obey (1) we converge
- quickly and avoid instability.
-
- The correct factor f must be in Q30, as this is the fixed
- point format required by the lms_adapt_bg() function,
- therefore the scaled version of (1) is:
-
- (2^30) * f = (2^30) * Beta * clean_bg_rx/P
- factor = (2^30) * Beta * clean_bg_rx/P ----- (2)
-
- We have chosen Beta = 0.25 by experiment, so:
-
- factor = (2^30) * (2^-2) * clean_bg_rx/P
-
- (30 - 2 - log2(P))
- factor = clean_bg_rx 2 ----- (3)
-
- To avoid a divide we approximate log2(P) as top_bit(P),
- which returns the position of the highest non-zero bit in
- P. This approximation introduces an error as large as a
- factor of 2, but the algorithm seems to handle it OK.
-
- Come to think of it a divide may not be a big deal on a
- modern DSP, so its probably worth checking out the cycles
- for a divide versus a top_bit() implementation.
- */
-
- P = MIN_TX_POWER_FOR_ADAPTION + ec->Pstates;
- logP = top_bit(P) + ec->log2taps;
- shift = 30 - 2 - logP;
- ec->shift = shift;
-
- lms_adapt_bg(ec, clean_bg, shift);
- }
-
- /* very simple DTD to make sure we dont try and adapt with strong
- near end speech */
-
- ec->adapt = 0;
- if ((ec->Lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->Lrx > ec->Ltx))
- ec->nonupdate_dwell = DTD_HANGOVER;
- if (ec->nonupdate_dwell)
- ec->nonupdate_dwell--;
+ ec->Ltxacc += abs(tx) - ec->Ltx;
+ ec->Ltx = (ec->Ltxacc + (1 << 4)) >> 5;
+ ec->Lrxacc += abs(rx) - ec->Lrx;
+ ec->Lrx = (ec->Lrxacc + (1 << 4)) >> 5;
- /* Transfer logic ------------------------------------------------------*/
+ /* Foreground filter --------------------------------------------------- */
- /* These conditions are from the dual path paper [1], I messed with
- them a bit to improve performance. */
+ ec->fir_state.coeffs = ec->fir_taps16[0];
+ echo_value = fir16(&ec->fir_state, tx);
+ ec->clean = rx - echo_value;
+ ec->Lcleanacc += abs(ec->clean) - ec->Lclean;
+ ec->Lclean = (ec->Lcleanacc + (1 << 4)) >> 5;
- if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) &&
- (ec->nonupdate_dwell == 0) &&
- (8*ec->Lclean_bg < 7*ec->Lclean) /* (ec->Lclean_bg < 0.875*ec->Lclean) */ &&
- (8*ec->Lclean_bg < ec->Ltx) /* (ec->Lclean_bg < 0.125*ec->Ltx) */ )
- {
- if (ec->cond_met == 6) {
- /* BG filter has had better results for 6 consecutive samples */
- ec->adapt = 1;
- memcpy(ec->fir_taps16[0], ec->fir_taps16[1], ec->taps*sizeof(int16_t));
- }
- else
- ec->cond_met++;
- }
- else
- ec->cond_met = 0;
+ /* Background filter --------------------------------------------------- */
- /* Non-Linear Processing ---------------------------------------------------*/
+ echo_value = fir16(&ec->fir_state_bg, tx);
+ clean_bg = rx - echo_value;
+ ec->Lclean_bgacc += abs(clean_bg) - ec->Lclean_bg;
+ ec->Lclean_bg = (ec->Lclean_bgacc + (1 << 4)) >> 5;
- ec->clean_nlp = ec->clean;
- if (ec->adaption_mode & ECHO_CAN_USE_NLP)
- {
- /* Non-linear processor - a fancy way to say "zap small signals, to avoid
- residual echo due to (uLaw/ALaw) non-linearity in the channel.". */
+ /* Background Filter adaption ----------------------------------------- */
- if ((16*ec->Lclean < ec->Ltx))
- {
- /* Our e/c has improved echo by at least 24 dB (each factor of 2 is 6dB,
- so 2*2*2*2=16 is the same as 6+6+6+6=24dB) */
- if (ec->adaption_mode & ECHO_CAN_USE_CNG)
- {
- ec->cng_level = ec->Lbgn;
-
- /* Very elementary comfort noise generation. Just random
- numbers rolled off very vaguely Hoth-like. DR: This
- noise doesn't sound quite right to me - I suspect there
- are some overlfow issues in the filtering as it's too
- "crackly". TODO: debug this, maybe just play noise at
- high level or look at spectrum.
- */
-
- ec->cng_rndnum = 1664525U*ec->cng_rndnum + 1013904223U;
- ec->cng_filter = ((ec->cng_rndnum & 0xFFFF) - 32768 + 5*ec->cng_filter) >> 3;
- ec->clean_nlp = (ec->cng_filter*ec->cng_level*8) >> 14;
-
- }
- else if (ec->adaption_mode & ECHO_CAN_USE_CLIP)
- {
- /* This sounds much better than CNG */
- if (ec->clean_nlp > ec->Lbgn)
- ec->clean_nlp = ec->Lbgn;
- if (ec->clean_nlp < -ec->Lbgn)
- ec->clean_nlp = -ec->Lbgn;
+ /* Almost always adap bg filter, just simple DT and energy
+ detection to minimise adaption in cases of strong double talk.
+ However this is not critical for the dual path algorithm.
+ */
+ ec->factor = 0;
+ ec->shift = 0;
+ if ((ec->nonupdate_dwell == 0)) {
+ int P, logP, shift;
+
+ /* Determine:
+
+ f = Beta * clean_bg_rx/P ------ (1)
+
+ where P is the total power in the filter states.
+
+ The Boffins have shown that if we obey (1) we converge
+ quickly and avoid instability.
+
+ The correct factor f must be in Q30, as this is the fixed
+ point format required by the lms_adapt_bg() function,
+ therefore the scaled version of (1) is:
+
+ (2^30) * f = (2^30) * Beta * clean_bg_rx/P
+ factor = (2^30) * Beta * clean_bg_rx/P ----- (2)
+
+ We have chosen Beta = 0.25 by experiment, so:
+
+ factor = (2^30) * (2^-2) * clean_bg_rx/P
+
+ (30 - 2 - log2(P))
+ factor = clean_bg_rx 2 ----- (3)
+
+ To avoid a divide we approximate log2(P) as top_bit(P),
+ which returns the position of the highest non-zero bit in
+ P. This approximation introduces an error as large as a
+ factor of 2, but the algorithm seems to handle it OK.
+
+ Come to think of it a divide may not be a big deal on a
+ modern DSP, so its probably worth checking out the cycles
+ for a divide versus a top_bit() implementation.
+ */
+
+ P = MIN_TX_POWER_FOR_ADAPTION + ec->Pstates;
+ logP = top_bit(P) + ec->log2taps;
+ shift = 30 - 2 - logP;
+ ec->shift = shift;
+
+ lms_adapt_bg(ec, clean_bg, shift);
}
- else
- {
- /* just mute the residual, doesn't sound very good, used mainly
- in G168 tests */
- ec->clean_nlp = 0;
- }
- }
- else {
- /* Background noise estimator. I tried a few algorithms
- here without much luck. This very simple one seems to
- work best, we just average the level using a slow (1 sec
- time const) filter if the current level is less than a
- (experimentally derived) constant. This means we dont
- include high level signals like near end speech. When
- combined with CNG or especially CLIP seems to work OK.
- */
- if (ec->Lclean < 40) {
- ec->Lbgn_acc += abs(ec->clean) - ec->Lbgn;
- ec->Lbgn = (ec->Lbgn_acc + (1<<11)) >> 12;
- }
- }
- }
-
- /* Roll around the taps buffer */
- if (ec->curr_pos <= 0)
- ec->curr_pos = ec->taps;
- ec->curr_pos--;
-
- if (ec->adaption_mode & ECHO_CAN_DISABLE)
- ec->clean_nlp = rx;
-
- /* Output scaled back up again to match input scaling */
-
- return (int16_t) ec->clean_nlp << 1;
+
+ /* very simple DTD to make sure we dont try and adapt with strong
+ near end speech */
+
+ ec->adapt = 0;
+ if ((ec->Lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->Lrx > ec->Ltx))
+ ec->nonupdate_dwell = DTD_HANGOVER;
+ if (ec->nonupdate_dwell)
+ ec->nonupdate_dwell--;
+
+ /* Transfer logic ------------------------------------------------------ */
+
+ /* These conditions are from the dual path paper [1], I messed with
+ them a bit to improve performance. */
+
+ if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) &&
+ (ec->nonupdate_dwell == 0) &&
+ (8 * ec->Lclean_bg <
+ 7 * ec->Lclean) /* (ec->Lclean_bg < 0.875*ec->Lclean) */ &&
+ (8 * ec->Lclean_bg <
+ ec->Ltx) /* (ec->Lclean_bg < 0.125*ec->Ltx) */ ) {
+ if (ec->cond_met == 6) {
+ /* BG filter has had better results for 6 consecutive samples */
+ ec->adapt = 1;
+ memcpy(ec->fir_taps16[0], ec->fir_taps16[1],
+ ec->taps * sizeof(int16_t));
+ } else
+ ec->cond_met++;
+ } else
+ ec->cond_met = 0;
+
+ /* Non-Linear Processing --------------------------------------------------- */
+
+ ec->clean_nlp = ec->clean;
+ if (ec->adaption_mode & ECHO_CAN_USE_NLP) {
+ /* Non-linear processor - a fancy way to say "zap small signals, to avoid
+ residual echo due to (uLaw/ALaw) non-linearity in the channel.". */
+
+ if ((16 * ec->Lclean < ec->Ltx)) {
+ /* Our e/c has improved echo by at least 24 dB (each factor of 2 is 6dB,
+ so 2*2*2*2=16 is the same as 6+6+6+6=24dB) */
+ if (ec->adaption_mode & ECHO_CAN_USE_CNG) {
+ ec->cng_level = ec->Lbgn;
+
+ /* Very elementary comfort noise generation. Just random
+ numbers rolled off very vaguely Hoth-like. DR: This
+ noise doesn't sound quite right to me - I suspect there
+ are some overlfow issues in the filtering as it's too
+ "crackly". TODO: debug this, maybe just play noise at
+ high level or look at spectrum.
+ */
+
+ ec->cng_rndnum =
+ 1664525U * ec->cng_rndnum + 1013904223U;
+ ec->cng_filter =
+ ((ec->cng_rndnum & 0xFFFF) - 32768 +
+ 5 * ec->cng_filter) >> 3;
+ ec->clean_nlp =
+ (ec->cng_filter * ec->cng_level * 8) >> 14;
+
+ } else if (ec->adaption_mode & ECHO_CAN_USE_CLIP) {
+ /* This sounds much better than CNG */
+ if (ec->clean_nlp > ec->Lbgn)
+ ec->clean_nlp = ec->Lbgn;
+ if (ec->clean_nlp < -ec->Lbgn)
+ ec->clean_nlp = -ec->Lbgn;
+ } else {
+ /* just mute the residual, doesn't sound very good, used mainly
+ in G168 tests */
+ ec->clean_nlp = 0;
+ }
+ } else {
+ /* Background noise estimator. I tried a few algorithms
+ here without much luck. This very simple one seems to
+ work best, we just average the level using a slow (1 sec
+ time const) filter if the current level is less than a
+ (experimentally derived) constant. This means we dont
+ include high level signals like near end speech. When
+ combined with CNG or especially CLIP seems to work OK.
+ */
+ if (ec->Lclean < 40) {
+ ec->Lbgn_acc += abs(ec->clean) - ec->Lbgn;
+ ec->Lbgn = (ec->Lbgn_acc + (1 << 11)) >> 12;
+ }
+ }
+ }
+
+ /* Roll around the taps buffer */
+ if (ec->curr_pos <= 0)
+ ec->curr_pos = ec->taps;
+ ec->curr_pos--;
+
+ if (ec->adaption_mode & ECHO_CAN_DISABLE)
+ ec->clean_nlp = rx;
+
+ /* Output scaled back up again to match input scaling */
+
+ return (int16_t) ec->clean_nlp << 1;
}
-/*- End of function --------------------------------------------------------*/
+EXPORT_SYMBOL_GPL(oslec_update);
/* This function is seperated from the echo canceller is it is usually called
as part of the tx process. See rx HP (DC blocking) filter above, it's
@@ -608,25 +605,35 @@ int16_t echo_can_update(echo_can_state_t *ec, int16_t tx, int16_t rx)
precision, which noise shapes things, giving very clean DC removal.
*/
-int16_t echo_can_hpf_tx(echo_can_state_t *ec, int16_t tx) {
- int tmp, tmp1;
+int16_t oslec_hpf_tx(struct oslec_state * ec, int16_t tx)
+{
+ int tmp, tmp1;
- if (ec->adaption_mode & ECHO_CAN_USE_TX_HPF) {
- tmp = tx << 15;
+ if (ec->adaption_mode & ECHO_CAN_USE_TX_HPF) {
+ tmp = tx << 15;
#if 1
- /* Make sure the gain of the HPF is 1.0. The first can still saturate a little under
- impulse conditions, and it might roll to 32768 and need clipping on sustained peak
- level signals. However, the scale of such clipping is small, and the error due to
- any saturation should not markedly affect the downstream processing. */
- tmp -= (tmp >> 4);
+ /* Make sure the gain of the HPF is 1.0. The first can still saturate a little under
+ impulse conditions, and it might roll to 32768 and need clipping on sustained peak
+ level signals. However, the scale of such clipping is small, and the error due to
+ any saturation should not markedly affect the downstream processing. */
+ tmp -= (tmp >> 4);
#endif
- ec->tx_1 += -(ec->tx_1>>DC_LOG2BETA) + tmp - ec->tx_2;
- tmp1 = ec->tx_1 >> 15;
- if (tmp1 > 32767) tmp1 = 32767;
- if (tmp1 < -32767) tmp1 = -32767;
- tx = tmp1;
- ec->tx_2 = tmp;
- }
-
- return tx;
+ ec->tx_1 += -(ec->tx_1 >> DC_LOG2BETA) + tmp - ec->tx_2;
+ tmp1 = ec->tx_1 >> 15;
+ if (tmp1 > 32767)
+ tmp1 = 32767;
+ if (tmp1 < -32767)
+ tmp1 = -32767;
+ tx = tmp1;
+ ec->tx_2 = tmp;
+ }
+
+ return tx;
}
+
+EXPORT_SYMBOL_GPL(oslec_hpf_tx);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Rowe");
+MODULE_DESCRIPTION("Open Source Line Echo Canceller");
+MODULE_VERSION("0.3.0");
diff --git a/drivers/staging/echo/echo.h b/drivers/staging/echo/echo.h
index 7a91b4390f3..9fb9543c4f1 100644
--- a/drivers/staging/echo/echo.h
+++ b/drivers/staging/echo/echo.h
@@ -118,23 +118,14 @@ a minor burden.
*/
#include "fir.h"
-
-/* Mask bits for the adaption mode */
-#define ECHO_CAN_USE_ADAPTION 0x01
-#define ECHO_CAN_USE_NLP 0x02
-#define ECHO_CAN_USE_CNG 0x04
-#define ECHO_CAN_USE_CLIP 0x08
-#define ECHO_CAN_USE_TX_HPF 0x10
-#define ECHO_CAN_USE_RX_HPF 0x20
-#define ECHO_CAN_DISABLE 0x40
+#include "oslec.h"
/*!
G.168 echo canceller descriptor. This defines the working state for a line
echo canceller.
*/
-typedef struct
-{
- int16_t tx,rx;
+struct oslec_state {
+ int16_t tx, rx;
int16_t clean;
int16_t clean_nlp;
@@ -176,45 +167,6 @@ typedef struct
/* snapshot sample of coeffs used for development */
int16_t *snapshot;
-} echo_can_state_t;
-
-/*! Create a voice echo canceller context.
- \param len The length of the canceller, in samples.
- \return The new canceller context, or NULL if the canceller could not be created.
-*/
-echo_can_state_t *echo_can_create(int len, int adaption_mode);
-
-/*! Free a voice echo canceller context.
- \param ec The echo canceller context.
-*/
-void echo_can_free(echo_can_state_t *ec);
-
-/*! Flush (reinitialise) a voice echo canceller context.
- \param ec The echo canceller context.
-*/
-void echo_can_flush(echo_can_state_t *ec);
-
-/*! Set the adaption mode of a voice echo canceller context.
- \param ec The echo canceller context.
- \param adapt The mode.
-*/
-void echo_can_adaption_mode(echo_can_state_t *ec, int adaption_mode);
-
-void echo_can_snapshot(echo_can_state_t *ec);
-
-/*! Process a sample through a voice echo canceller.
- \param ec The echo canceller context.
- \param tx The transmitted audio sample.
- \param rx The received audio sample.
- \return The clean (echo cancelled) received sample.
-*/
-int16_t echo_can_update(echo_can_state_t *ec, int16_t tx, int16_t rx);
-
-/*! Process to high pass filter the tx signal.
- \param ec The echo canceller context.
- \param tx The transmitted auio sample.
- \return The HP filtered transmit sample, send this to your D/A.
-*/
-int16_t echo_can_hpf_tx(echo_can_state_t *ec, int16_t tx);
+};
-#endif /* __ECHO_H */
+#endif /* __ECHO_H */
diff --git a/drivers/staging/echo/fir.h b/drivers/staging/echo/fir.h
index e1bfc499488..5645cb1b2f9 100644
--- a/drivers/staging/echo/fir.h
+++ b/drivers/staging/echo/fir.h
@@ -72,8 +72,7 @@
16 bit integer FIR descriptor. This defines the working state for a single
instance of an FIR filter using 16 bit integer coefficients.
*/
-typedef struct
-{
+typedef struct {
int taps;
int curr_pos;
const int16_t *coeffs;
@@ -85,8 +84,7 @@ typedef struct
instance of an FIR filter using 32 bit integer coefficients, and filtering
16 bit integer data.
*/
-typedef struct
-{
+typedef struct {
int taps;
int curr_pos;
const int32_t *coeffs;
@@ -97,273 +95,201 @@ typedef struct
Floating point FIR descriptor. This defines the working state for a single
instance of an FIR filter using floating point coefficients and data.
*/
-typedef struct
-{
+typedef struct {
int taps;
int curr_pos;
const float *coeffs;
float *history;
} fir_float_state_t;
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-static __inline__ const int16_t *fir16_create(fir16_state_t *fir,
- const int16_t *coeffs,
- int taps)
+static __inline__ const int16_t *fir16_create(fir16_state_t * fir,
+ const int16_t * coeffs, int taps)
{
fir->taps = taps;
fir->curr_pos = taps - 1;
fir->coeffs = coeffs;
-#if defined(USE_MMX) || defined(USE_SSE2) || defined(__BLACKFIN_ASM__)
- if ((fir->history = malloc(2*taps*sizeof(int16_t))))
- memset(fir->history, 0, 2*taps*sizeof(int16_t));
+#if defined(USE_MMX) || defined(USE_SSE2) || defined(__bfin__)
+ fir->history = kcalloc(2 * taps, sizeof(int16_t), GFP_KERNEL);
#else
- if ((fir->history = (int16_t *) malloc(taps*sizeof(int16_t))))
- memset(fir->history, 0, taps*sizeof(int16_t));
+ fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL);
#endif
return fir->history;
}
-/*- End of function --------------------------------------------------------*/
-static __inline__ void fir16_flush(fir16_state_t *fir)
+static __inline__ void fir16_flush(fir16_state_t * fir)
{
-#if defined(USE_MMX) || defined(USE_SSE2) || defined(__BLACKFIN_ASM__)
- memset(fir->history, 0, 2*fir->taps*sizeof(int16_t));
+#if defined(USE_MMX) || defined(USE_SSE2) || defined(__bfin__)
+ memset(fir->history, 0, 2 * fir->taps * sizeof(int16_t));
#else
- memset(fir->history, 0, fir->taps*sizeof(int16_t));
+ memset(fir->history, 0, fir->taps * sizeof(int16_t));
#endif
}
-/*- End of function --------------------------------------------------------*/
-static __inline__ void fir16_free(fir16_state_t *fir)
+static __inline__ void fir16_free(fir16_state_t * fir)
{
- free(fir->history);
+ kfree(fir->history);
}
-/*- End of function --------------------------------------------------------*/
-#ifdef __BLACKFIN_ASM__
+#ifdef __bfin__
static inline int32_t dot_asm(short *x, short *y, int len)
{
- int dot;
-
- len--;
-
- __asm__
- (
- "I0 = %1;\n\t"
- "I1 = %2;\n\t"
- "A0 = 0;\n\t"
- "R0.L = W[I0++] || R1.L = W[I1++];\n\t"
- "LOOP dot%= LC0 = %3;\n\t"
- "LOOP_BEGIN dot%=;\n\t"
- "A0 += R0.L * R1.L (IS) || R0.L = W[I0++] || R1.L = W[I1++];\n\t"
- "LOOP_END dot%=;\n\t"
- "A0 += R0.L*R1.L (IS);\n\t"
- "R0 = A0;\n\t"
- "%0 = R0;\n\t"
- : "=&d" (dot)
- : "a" (x), "a" (y), "a" (len)
- : "I0", "I1", "A1", "A0", "R0", "R1"
- );
-
- return dot;
+ int dot;
+
+ len--;
+
+ __asm__("I0 = %1;\n\t"
+ "I1 = %2;\n\t"
+ "A0 = 0;\n\t"
+ "R0.L = W[I0++] || R1.L = W[I1++];\n\t"
+ "LOOP dot%= LC0 = %3;\n\t"
+ "LOOP_BEGIN dot%=;\n\t"
+ "A0 += R0.L * R1.L (IS) || R0.L = W[I0++] || R1.L = W[I1++];\n\t"
+ "LOOP_END dot%=;\n\t"
+ "A0 += R0.L*R1.L (IS);\n\t"
+ "R0 = A0;\n\t"
+ "%0 = R0;\n\t"
+ :"=&d"(dot)
+ :"a"(x), "a"(y), "a"(len)
+ :"I0", "I1", "A1", "A0", "R0", "R1"
+ );
+
+ return dot;
}
#endif
-/*- End of function --------------------------------------------------------*/
-static __inline__ int16_t fir16(fir16_state_t *fir, int16_t sample)
+static __inline__ int16_t fir16(fir16_state_t * fir, int16_t sample)
{
- int32_t y;
+ int32_t y;
#if defined(USE_MMX)
- int i;
- mmx_t *mmx_coeffs;
- mmx_t *mmx_hist;
-
- fir->history[fir->curr_pos] = sample;
- fir->history[fir->curr_pos + fir->taps] = sample;
-
- mmx_coeffs = (mmx_t *) fir->coeffs;
- mmx_hist = (mmx_t *) &fir->history[fir->curr_pos];
- i = fir->taps;
- pxor_r2r(mm4, mm4);
- /* 8 samples per iteration, so the filter must be a multiple of 8 long. */
- while (i > 0)
- {
- movq_m2r(mmx_coeffs[0], mm0);
- movq_m2r(mmx_coeffs[1], mm2);
- movq_m2r(mmx_hist[0], mm1);
- movq_m2r(mmx_hist[1], mm3);
- mmx_coeffs += 2;
- mmx_hist += 2;
- pmaddwd_r2r(mm1, mm0);
- pmaddwd_r2r(mm3, mm2);
- paddd_r2r(mm0, mm4);
- paddd_r2r(mm2, mm4);
- i -= 8;
- }
- movq_r2r(mm4, mm0);
- psrlq_i2r(32, mm0);
- paddd_r2r(mm0, mm4);
- movd_r2m(mm4, y);
- emms();
+ int i;
+ mmx_t *mmx_coeffs;
+ mmx_t *mmx_hist;
+
+ fir->history[fir->curr_pos] = sample;
+ fir->history[fir->curr_pos + fir->taps] = sample;
+
+ mmx_coeffs = (mmx_t *) fir->coeffs;
+ mmx_hist = (mmx_t *) & fir->history[fir->curr_pos];
+ i = fir->taps;
+ pxor_r2r(mm4, mm4);
+ /* 8 samples per iteration, so the filter must be a multiple of 8 long. */
+ while (i > 0) {
+ movq_m2r(mmx_coeffs[0], mm0);
+ movq_m2r(mmx_coeffs[1], mm2);
+ movq_m2r(mmx_hist[0], mm1);
+ movq_m2r(mmx_hist[1], mm3);
+ mmx_coeffs += 2;
+ mmx_hist += 2;
+ pmaddwd_r2r(mm1, mm0);
+ pmaddwd_r2r(mm3, mm2);
+ paddd_r2r(mm0, mm4);
+ paddd_r2r(mm2, mm4);
+ i -= 8;
+ }
+ movq_r2r(mm4, mm0);
+ psrlq_i2r(32, mm0);
+ paddd_r2r(mm0, mm4);
+ movd_r2m(mm4, y);
+ emms();
#elif defined(USE_SSE2)
- int i;
- xmm_t *xmm_coeffs;
- xmm_t *xmm_hist;
-
- fir->history[fir->curr_pos] = sample;
- fir->history[fir->curr_pos + fir->taps] = sample;
-
- xmm_coeffs = (xmm_t *) fir->coeffs;
- xmm_hist = (xmm_t *) &fir->history[fir->curr_pos];
- i = fir->taps;
- pxor_r2r(xmm4, xmm4);
- /* 16 samples per iteration, so the filter must be a multiple of 16 long. */
- while (i > 0)
- {
- movdqu_m2r(xmm_coeffs[0], xmm0);
- movdqu_m2r(xmm_coeffs[1], xmm2);
- movdqu_m2r(xmm_hist[0], xmm1);
- movdqu_m2r(xmm_hist[1], xmm3);
- xmm_coeffs += 2;
- xmm_hist += 2;
- pmaddwd_r2r(xmm1, xmm0);
- pmaddwd_r2r(xmm3, xmm2);
- paddd_r2r(xmm0, xmm4);
- paddd_r2r(xmm2, xmm4);
- i -= 16;
- }
- movdqa_r2r(xmm4, xmm0);
- psrldq_i2r(8, xmm0);
- paddd_r2r(xmm0, xmm4);
- movdqa_r2r(xmm4, xmm0);
- psrldq_i2r(4, xmm0);
- paddd_r2r(xmm0, xmm4);
- movd_r2m(xmm4, y);
-#elif defined(__BLACKFIN_ASM__)
- fir->history[fir->curr_pos] = sample;
- fir->history[fir->curr_pos + fir->taps] = sample;
- y = dot_asm((int16_t*)fir->coeffs, &fir->history[fir->curr_pos], fir->taps);
+ int i;
+ xmm_t *xmm_coeffs;
+ xmm_t *xmm_hist;
+
+ fir->history[fir->curr_pos] = sample;
+ fir->history[fir->curr_pos + fir->taps] = sample;
+
+ xmm_coeffs = (xmm_t *) fir->coeffs;
+ xmm_hist = (xmm_t *) & fir->history[fir->curr_pos];
+ i = fir->taps;
+ pxor_r2r(xmm4, xmm4);
+ /* 16 samples per iteration, so the filter must be a multiple of 16 long. */
+ while (i > 0) {
+ movdqu_m2r(xmm_coeffs[0], xmm0);
+ movdqu_m2r(xmm_coeffs[1], xmm2);
+ movdqu_m2r(xmm_hist[0], xmm1);
+ movdqu_m2r(xmm_hist[1], xmm3);
+ xmm_coeffs += 2;
+ xmm_hist += 2;
+ pmaddwd_r2r(xmm1, xmm0);
+ pmaddwd_r2r(xmm3, xmm2);
+ paddd_r2r(xmm0, xmm4);
+ paddd_r2r(xmm2, xmm4);
+ i -= 16;
+ }
+ movdqa_r2r(xmm4, xmm0);
+ psrldq_i2r(8, xmm0);
+ paddd_r2r(xmm0, xmm4);
+ movdqa_r2r(xmm4, xmm0);
+ psrldq_i2r(4, xmm0);
+ paddd_r2r(xmm0, xmm4);
+ movd_r2m(xmm4, y);
+#elif defined(__bfin__)
+ fir->history[fir->curr_pos] = sample;
+ fir->history[fir->curr_pos + fir->taps] = sample;
+ y = dot_asm((int16_t *) fir->coeffs, &fir->history[fir->curr_pos],
+ fir->taps);
#else
- int i;
- int offset1;
- int offset2;
-
- fir->history[fir->curr_pos] = sample;
-
- offset2 = fir->curr_pos;
- offset1 = fir->taps - offset2;
- y = 0;
- for (i = fir->taps - 1; i >= offset1; i--)
- y += fir->coeffs[i]*fir->history[i - offset1];
- for ( ; i >= 0; i--)
- y += fir->coeffs[i]*fir->history[i + offset2];
+ int i;
+ int offset1;
+ int offset2;
+
+ fir->history[fir->curr_pos] = sample;
+
+ offset2 = fir->curr_pos;
+ offset1 = fir->taps - offset2;
+ y = 0;
+ for (i = fir->taps - 1; i >= offset1; i--)
+ y += fir->coeffs[i] * fir->history[i - offset1];
+ for (; i >= 0; i--)
+ y += fir->coeffs[i] * fir->history[i + offset2];
#endif
- if (fir->curr_pos <= 0)
- fir->curr_pos = fir->taps;
- fir->curr_pos--;
- return (int16_t) (y >> 15);
-}
-/*- End of function --------------------------------------------------------*/
-
-static __inline__ const int16_t *fir32_create(fir32_state_t *fir,
- const int32_t *coeffs,
- int taps)
-{
- fir->taps = taps;
- fir->curr_pos = taps - 1;
- fir->coeffs = coeffs;
- fir->history = (int16_t *) malloc(taps*sizeof(int16_t));
- if (fir->history)
- memset(fir->history, '\0', taps*sizeof(int16_t));
- return fir->history;
-}
-/*- End of function --------------------------------------------------------*/
-
-static __inline__ void fir32_flush(fir32_state_t *fir)
-{
- memset(fir->history, 0, fir->taps*sizeof(int16_t));
+ if (fir->curr_pos <= 0)
+ fir->curr_pos = fir->taps;
+ fir->curr_pos--;
+ return (int16_t) (y >> 15);
}
-/*- End of function --------------------------------------------------------*/
-static __inline__ void fir32_free(fir32_state_t *fir)
+static __inline__ const int16_t *fir32_create(fir32_state_t * fir,
+ const int32_t * coeffs, int taps)
{
- free(fir->history);
-}
-/*- End of function --------------------------------------------------------*/
-
-static __inline__ int16_t fir32(fir32_state_t *fir, int16_t sample)
-{
- int i;
- int32_t y;
- int offset1;
- int offset2;
-
- fir->history[fir->curr_pos] = sample;
- offset2 = fir->curr_pos;
- offset1 = fir->taps - offset2;
- y = 0;
- for (i = fir->taps - 1; i >= offset1; i--)
- y += fir->coeffs[i]*fir->history[i - offset1];
- for ( ; i >= 0; i--)
- y += fir->coeffs[i]*fir->history[i + offset2];
- if (fir->curr_pos <= 0)
- fir->curr_pos = fir->taps;
- fir->curr_pos--;
- return (int16_t) (y >> 15);
+ fir->taps = taps;
+ fir->curr_pos = taps - 1;
+ fir->coeffs = coeffs;
+ fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL);
+ return fir->history;
}
-/*- End of function --------------------------------------------------------*/
-#ifndef __KERNEL__
-static __inline__ const float *fir_float_create(fir_float_state_t *fir,
- const float *coeffs,
- int taps)
+static __inline__ void fir32_flush(fir32_state_t * fir)
{
- fir->taps = taps;
- fir->curr_pos = taps - 1;
- fir->coeffs = coeffs;
- fir->history = (float *) malloc(taps*sizeof(float));
- if (fir->history)
- memset(fir->history, '\0', taps*sizeof(float));
- return fir->history;
+ memset(fir->history, 0, fir->taps * sizeof(int16_t));
}
-/*- End of function --------------------------------------------------------*/
-static __inline__ void fir_float_free(fir_float_state_t *fir)
+static __inline__ void fir32_free(fir32_state_t * fir)
{
- free(fir->history);
+ kfree(fir->history);
}
-/*- End of function --------------------------------------------------------*/
-static __inline__ int16_t fir_float(fir_float_state_t *fir, int16_t sample)
+static __inline__ int16_t fir32(fir32_state_t * fir, int16_t sample)
{
- int i;
- float y;
- int offset1;
- int offset2;
-
- fir->history[fir->curr_pos] = sample;
-
- offset2 = fir->curr_pos;
- offset1 = fir->taps - offset2;
- y = 0;
- for (i = fir->taps - 1; i >= offset1; i--)
- y += fir->coeffs[i]*fir->history[i - offset1];
- for ( ; i >= 0; i--)
- y += fir->coeffs[i]*fir->history[i + offset2];
- if (fir->curr_pos <= 0)
- fir->curr_pos = fir->taps;
- fir->curr_pos--;
- return (int16_t) y;
+ int i;
+ int32_t y;
+ int offset1;
+ int offset2;
+
+ fir->history[fir->curr_pos] = sample;
+ offset2 = fir->curr_pos;
+ offset1 = fir->taps - offset2;
+ y = 0;
+ for (i = fir->taps - 1; i >= offset1; i--)
+ y += fir->coeffs[i] * fir->history[i - offset1];
+ for (; i >= 0; i--)
+ y += fir->coeffs[i] * fir->history[i + offset2];
+ if (fir->curr_pos <= 0)
+ fir->curr_pos = fir->taps;
+ fir->curr_pos--;
+ return (int16_t) (y >> 15);
}
-/*- End of function --------------------------------------------------------*/
-#endif
-
-#ifdef __cplusplus
-}
-#endif
#endif
/*- End of file ------------------------------------------------------------*/
diff --git a/drivers/staging/echo/mmx.h b/drivers/staging/echo/mmx.h
index b5a3964865b..35412efe61c 100644
--- a/drivers/staging/echo/mmx.h
+++ b/drivers/staging/echo/mmx.h
@@ -27,24 +27,23 @@
* values by ULL, lest they be truncated by the compiler)
*/
-typedef union {
- long long q; /* Quadword (64-bit) value */
- unsigned long long uq; /* Unsigned Quadword */
- int d[2]; /* 2 Doubleword (32-bit) values */
- unsigned int ud[2]; /* 2 Unsigned Doubleword */
- short w[4]; /* 4 Word (16-bit) values */
- unsigned short uw[4]; /* 4 Unsigned Word */
- char b[8]; /* 8 Byte (8-bit) values */
- unsigned char ub[8]; /* 8 Unsigned Byte */
- float s[2]; /* Single-precision (32-bit) value */
-} mmx_t; /* On an 8-byte (64-bit) boundary */
+typedef union {
+ long long q; /* Quadword (64-bit) value */
+ unsigned long long uq; /* Unsigned Quadword */
+ int d[2]; /* 2 Doubleword (32-bit) values */
+ unsigned int ud[2]; /* 2 Unsigned Doubleword */
+ short w[4]; /* 4 Word (16-bit) values */
+ unsigned short uw[4]; /* 4 Unsigned Word */
+ char b[8]; /* 8 Byte (8-bit) values */
+ unsigned char ub[8]; /* 8 Unsigned Byte */
+ float s[2]; /* Single-precision (32-bit) value */
+} mmx_t; /* On an 8-byte (64-bit) boundary */
/* SSE registers */
typedef union {
char b[16];
} xmm_t;
-
#define mmx_i2r(op,imm,reg) \
__asm__ __volatile__ (#op " %0, %%" #reg \
: /* nothing */ \
@@ -63,7 +62,6 @@ typedef union {
#define mmx_r2r(op,regs,regd) \
__asm__ __volatile__ (#op " %" #regs ", %" #regd)
-
#define emms() __asm__ __volatile__ ("emms")
#define movd_m2r(var,reg) mmx_m2r (movd, var, reg)
@@ -192,16 +190,13 @@ typedef union {
#define pxor_m2r(var,reg) mmx_m2r (pxor, var, reg)
#define pxor_r2r(regs,regd) mmx_r2r (pxor, regs, regd)
-
/* 3DNOW extensions */
#define pavgusb_m2r(var,reg) mmx_m2r (pavgusb, var, reg)
#define pavgusb_r2r(regs,regd) mmx_r2r (pavgusb, regs, regd)
-
/* AMD MMX extensions - also available in intel SSE */
-
#define mmx_m2ri(op,mem,reg,imm) \
__asm__ __volatile__ (#op " %1, %0, %%" #reg \
: /* nothing */ \
@@ -216,7 +211,6 @@ typedef union {
: /* nothing */ \
: "m" (mem))
-
#define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg)
#define movntq_r2m(mmreg,var) mmx_r2m (movntq, mmreg, var)
@@ -284,5 +278,4 @@ typedef union {
#define punpcklqdq_r2r(regs,regd) mmx_r2r (punpcklqdq, regs, regd)
#define punpckhqdq_r2r(regs,regd) mmx_r2r (punpckhqdq, regs, regd)
-
#endif /* AVCODEC_I386MMX_H */
diff --git a/drivers/staging/echo/oslec.h b/drivers/staging/echo/oslec.h
new file mode 100644
index 00000000000..bad852328a2
--- /dev/null
+++ b/drivers/staging/echo/oslec.h
@@ -0,0 +1,86 @@
+/*
+ * OSLEC - A line echo canceller. This code is being developed
+ * against and partially complies with G168. Using code from SpanDSP
+ *
+ * Written by Steve Underwood <steveu@coppice.org>
+ * and David Rowe <david_at_rowetel_dot_com>
+ *
+ * Copyright (C) 2001 Steve Underwood and 2007-2008 David Rowe
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __OSLEC_H
+#define __OSLEC_H
+
+/* TODO: document interface */
+
+/* Mask bits for the adaption mode */
+#define ECHO_CAN_USE_ADAPTION 0x01
+#define ECHO_CAN_USE_NLP 0x02
+#define ECHO_CAN_USE_CNG 0x04
+#define ECHO_CAN_USE_CLIP 0x08
+#define ECHO_CAN_USE_TX_HPF 0x10
+#define ECHO_CAN_USE_RX_HPF 0x20
+#define ECHO_CAN_DISABLE 0x40
+
+/*!
+ G.168 echo canceller descriptor. This defines the working state for a line
+ echo canceller.
+*/
+struct oslec_state;
+
+/*! Create a voice echo canceller context.
+ \param len The length of the canceller, in samples.
+ \return The new canceller context, or NULL if the canceller could not be created.
+*/
+struct oslec_state *oslec_create(int len, int adaption_mode);
+
+/*! Free a voice echo canceller context.
+ \param ec The echo canceller context.
+*/
+void oslec_free(struct oslec_state *ec);
+
+/*! Flush (reinitialise) a voice echo canceller context.
+ \param ec The echo canceller context.
+*/
+void oslec_flush(struct oslec_state *ec);
+
+/*! Set the adaption mode of a voice echo canceller context.
+ \param ec The echo canceller context.
+ \param adapt The mode.
+*/
+void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode);
+
+void oslec_snapshot(struct oslec_state *ec);
+
+/*! Process a sample through a voice echo canceller.
+ \param ec The echo canceller context.
+ \param tx The transmitted audio sample.
+ \param rx The received audio sample.
+ \return The clean (echo cancelled) received sample.
+*/
+int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx);
+
+/*! Process to high pass filter the tx signal.
+ \param ec The echo canceller context.
+ \param tx The transmitted auio sample.
+ \return The HP filtered transmit sample, send this to your D/A.
+*/
+int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx);
+
+#endif /* __OSLEC_H */
diff --git a/drivers/staging/et131x/et1310_phy.c b/drivers/staging/et131x/et1310_phy.c
index 6c4fa54419e..9dd6dfd9a03 100644
--- a/drivers/staging/et131x/et1310_phy.c
+++ b/drivers/staging/et131x/et1310_phy.c
@@ -84,7 +84,6 @@
#include <linux/if_arp.h>
#include <linux/ioport.h>
#include <linux/random.h>
-#include <linux/delay.h>
#include "et1310_phy.h"
#include "et1310_pm.h"
@@ -95,7 +94,6 @@
#include "et131x_initpci.h"
#include "et1310_address_map.h"
-#include "et1310_jagcore.h"
#include "et1310_tx.h"
#include "et1310_rx.h"
#include "et1310_mac.h"
diff --git a/drivers/staging/et131x/et131x_debug.c b/drivers/staging/et131x/et131x_debug.c
index 9ee5bce92c2..d1dd46e0a9c 100644
--- a/drivers/staging/et131x/et131x_debug.c
+++ b/drivers/staging/et131x/et131x_debug.c
@@ -97,7 +97,6 @@
#include "et131x_isr.h"
#include "et1310_address_map.h"
-#include "et1310_jagcore.h"
#include "et1310_tx.h"
#include "et1310_rx.h"
#include "et1310_mac.h"
diff --git a/drivers/staging/et131x/et131x_initpci.c b/drivers/staging/et131x/et131x_initpci.c
index 4c6f171f5b7..a18c499d0ae 100644
--- a/drivers/staging/et131x/et131x_initpci.c
+++ b/drivers/staging/et131x/et131x_initpci.c
@@ -97,7 +97,6 @@
#include "et131x_isr.h"
#include "et1310_address_map.h"
-#include "et1310_jagcore.h"
#include "et1310_tx.h"
#include "et1310_rx.h"
#include "et1310_mac.h"
diff --git a/drivers/staging/go7007/go7007-driver.c b/drivers/staging/go7007/go7007-driver.c
index 81ae4b0fa89..e4ead96679c 100644
--- a/drivers/staging/go7007/go7007-driver.c
+++ b/drivers/staging/go7007/go7007-driver.c
@@ -16,7 +16,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
diff --git a/drivers/staging/go7007/go7007-fw.c b/drivers/staging/go7007/go7007-fw.c
index c2aea1020b0..a0e17b0e0ce 100644
--- a/drivers/staging/go7007/go7007-fw.c
+++ b/drivers/staging/go7007/go7007-fw.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/version.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/device.h>
diff --git a/drivers/staging/go7007/go7007-i2c.c b/drivers/staging/go7007/go7007-i2c.c
index 10baae3dade..cd55b76eabc 100644
--- a/drivers/staging/go7007/go7007-i2c.c
+++ b/drivers/staging/go7007/go7007-i2c.c
@@ -15,7 +15,6 @@
* Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/drivers/staging/go7007/go7007-usb.c b/drivers/staging/go7007/go7007-usb.c
index d4ed6d2b715..3f5ee3424e7 100644
--- a/drivers/staging/go7007/go7007-usb.c
+++ b/drivers/staging/go7007/go7007-usb.c
@@ -16,7 +16,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/wait.h>
diff --git a/drivers/staging/go7007/snd-go7007.c b/drivers/staging/go7007/snd-go7007.c
index 382740c405f..a7de401f61a 100644
--- a/drivers/staging/go7007/snd-go7007.c
+++ b/drivers/staging/go7007/snd-go7007.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/spinlock.h>
diff --git a/drivers/staging/go7007/wis-ov7640.c b/drivers/staging/go7007/wis-ov7640.c
index f5f11e927af..2f9efca0460 100644
--- a/drivers/staging/go7007/wis-ov7640.c
+++ b/drivers/staging/go7007/wis-ov7640.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/version.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
diff --git a/drivers/staging/go7007/wis-saa7113.c b/drivers/staging/go7007/wis-saa7113.c
index c1aff1b923a..11689723945 100644
--- a/drivers/staging/go7007/wis-saa7113.c
+++ b/drivers/staging/go7007/wis-saa7113.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/version.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <linux/ioctl.h>
diff --git a/drivers/staging/go7007/wis-saa7115.c b/drivers/staging/go7007/wis-saa7115.c
index 5c94c883b31..59417a7174d 100644
--- a/drivers/staging/go7007/wis-saa7115.c
+++ b/drivers/staging/go7007/wis-saa7115.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/version.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <linux/ioctl.h>
diff --git a/drivers/staging/go7007/wis-sony-tuner.c b/drivers/staging/go7007/wis-sony-tuner.c
index 5997fb47945..5a91ee409a7 100644
--- a/drivers/staging/go7007/wis-sony-tuner.c
+++ b/drivers/staging/go7007/wis-sony-tuner.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/version.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/tuner.h>
diff --git a/drivers/staging/go7007/wis-tw2804.c b/drivers/staging/go7007/wis-tw2804.c
index 27fe4d0d4ed..57b8f2b1caa 100644
--- a/drivers/staging/go7007/wis-tw2804.c
+++ b/drivers/staging/go7007/wis-tw2804.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/version.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <linux/ioctl.h>
diff --git a/drivers/staging/go7007/wis-tw9903.c b/drivers/staging/go7007/wis-tw9903.c
index d8e41968022..40627b282cb 100644
--- a/drivers/staging/go7007/wis-tw9903.c
+++ b/drivers/staging/go7007/wis-tw9903.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/version.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <linux/ioctl.h>
diff --git a/drivers/staging/go7007/wis-uda1342.c b/drivers/staging/go7007/wis-uda1342.c
index a0894e3cb8c..555645c0cc1 100644
--- a/drivers/staging/go7007/wis-uda1342.c
+++ b/drivers/staging/go7007/wis-uda1342.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/version.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/tvaudio.h>
diff --git a/drivers/staging/me4000/me4000.c b/drivers/staging/me4000/me4000.c
index 862dd7ffb5c..0b33773bb4f 100644
--- a/drivers/staging/me4000/me4000.c
+++ b/drivers/staging/me4000/me4000.c
@@ -25,24 +25,21 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
#include <linux/errno.h>
#include <linux/delay.h>
-#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/unistd.h>
#include <linux/list.h>
#include <linux/proc_fs.h>
-
+#include <linux/types.h>
#include <linux/poll.h>
#include <linux/vmalloc.h>
+#include <linux/slab.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
-#include <linux/types.h>
-
-#include <linux/slab.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
/* Include-File for the Meilhaus ME-4000 I/O board */
#include "me4000.h"
@@ -57,14 +54,14 @@ MODULE_SUPPORTED_DEVICE("Meilhaus ME-4000 Multi I/O boards");
MODULE_LICENSE("GPL");
/* Board specific data are kept in a global list */
-LIST_HEAD(me4000_board_info_list);
+static LIST_HEAD(me4000_board_info_list);
/* Major Device Numbers. 0 means to get it automatically from the System */
-static int me4000_ao_major_driver_no = 0;
-static int me4000_ai_major_driver_no = 0;
-static int me4000_dio_major_driver_no = 0;
-static int me4000_cnt_major_driver_no = 0;
-static int me4000_ext_int_major_driver_no = 0;
+static int me4000_ao_major_driver_no;
+static int me4000_ai_major_driver_no;
+static int me4000_dio_major_driver_no;
+static int me4000_cnt_major_driver_no;
+static int me4000_ext_int_major_driver_no;
/* Let the user specify a custom major driver number */
module_param(me4000_ao_major_driver_no, int, 0);
@@ -88,36 +85,22 @@ MODULE_PARM_DESC(me4000_ext_int_major_driver_no,
"Major driver number for external interrupt (default 0)");
/*-----------------------------------------------------------------------------
- Module stuff
- ---------------------------------------------------------------------------*/
-int init_module(void);
-void cleanup_module(void);
-
-/*-----------------------------------------------------------------------------
Board detection and initialization
---------------------------------------------------------------------------*/
static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id);
-static int me4000_xilinx_download(me4000_info_t *);
-static int me4000_reset_board(me4000_info_t *);
+static int me4000_xilinx_download(struct me4000_info *);
+static int me4000_reset_board(struct me4000_info *);
static void clear_board_info_list(void);
-static int get_registers(struct pci_dev *dev, me4000_info_t * info);
-static int init_board_info(struct pci_dev *dev, me4000_info_t * board_info);
-static int alloc_ao_contexts(me4000_info_t * info);
-static void release_ao_contexts(me4000_info_t * board_info);
-static int alloc_ai_context(me4000_info_t * info);
-static int alloc_dio_context(me4000_info_t * info);
-static int alloc_cnt_context(me4000_info_t * info);
-static int alloc_ext_int_context(me4000_info_t * info);
-
+static void release_ao_contexts(struct me4000_info *board_info);
/*-----------------------------------------------------------------------------
Stuff used by all device parts
---------------------------------------------------------------------------*/
static int me4000_open(struct inode *, struct file *);
static int me4000_release(struct inode *, struct file *);
-static int me4000_get_user_info(me4000_user_info_t *,
- me4000_info_t * board_info);
+static int me4000_get_user_info(struct me4000_user_info *,
+ struct me4000_info *board_info);
static int me4000_read_procmem(char *, char **, off_t, int, int *, void *);
/*-----------------------------------------------------------------------------
@@ -140,40 +123,42 @@ static int me4000_ao_ioctl_cont(struct inode *, struct file *, unsigned int,
static unsigned int me4000_ao_poll_cont(struct file *, poll_table *);
static int me4000_ao_fsync_cont(struct file *, struct dentry *, int);
-static int me4000_ao_start(unsigned long *, me4000_ao_context_t *);
-static int me4000_ao_stop(me4000_ao_context_t *);
-static int me4000_ao_immediate_stop(me4000_ao_context_t *);
-static int me4000_ao_timer_set_divisor(u32 *, me4000_ao_context_t *);
-static int me4000_ao_preload(me4000_ao_context_t *);
-static int me4000_ao_preload_update(me4000_ao_context_t *);
-static int me4000_ao_ex_trig_set_edge(int *, me4000_ao_context_t *);
-static int me4000_ao_ex_trig_enable(me4000_ao_context_t *);
-static int me4000_ao_ex_trig_disable(me4000_ao_context_t *);
-static int me4000_ao_prepare(me4000_ao_context_t * ao_info);
-static int me4000_ao_reset(me4000_ao_context_t * ao_info);
-static int me4000_ao_enable_do(me4000_ao_context_t *);
-static int me4000_ao_disable_do(me4000_ao_context_t *);
-static int me4000_ao_fsm_state(int *, me4000_ao_context_t *);
-
-static int me4000_ao_simultaneous_ex_trig(me4000_ao_context_t * ao_context);
-static int me4000_ao_simultaneous_sw(me4000_ao_context_t * ao_context);
-static int me4000_ao_simultaneous_disable(me4000_ao_context_t * ao_context);
-static int me4000_ao_simultaneous_update(me4000_ao_channel_list_t * channels,
- me4000_ao_context_t * ao_context);
-
-static int me4000_ao_synchronous_ex_trig(me4000_ao_context_t * ao_context);
-static int me4000_ao_synchronous_sw(me4000_ao_context_t * ao_context);
-static int me4000_ao_synchronous_disable(me4000_ao_context_t * ao_context);
+static int me4000_ao_start(unsigned long *, struct me4000_ao_context *);
+static int me4000_ao_stop(struct me4000_ao_context *);
+static int me4000_ao_immediate_stop(struct me4000_ao_context *);
+static int me4000_ao_timer_set_divisor(u32 *, struct me4000_ao_context *);
+static int me4000_ao_preload(struct me4000_ao_context *);
+static int me4000_ao_preload_update(struct me4000_ao_context *);
+static int me4000_ao_ex_trig_set_edge(int *, struct me4000_ao_context *);
+static int me4000_ao_ex_trig_enable(struct me4000_ao_context *);
+static int me4000_ao_ex_trig_disable(struct me4000_ao_context *);
+static int me4000_ao_prepare(struct me4000_ao_context *ao_info);
+static int me4000_ao_reset(struct me4000_ao_context *ao_info);
+static int me4000_ao_enable_do(struct me4000_ao_context *);
+static int me4000_ao_disable_do(struct me4000_ao_context *);
+static int me4000_ao_fsm_state(int *, struct me4000_ao_context *);
+
+static int me4000_ao_simultaneous_ex_trig(struct me4000_ao_context *ao_context);
+static int me4000_ao_simultaneous_sw(struct me4000_ao_context *ao_context);
+static int me4000_ao_simultaneous_disable(struct me4000_ao_context *ao_context);
+static int me4000_ao_simultaneous_update(
+ struct me4000_ao_channel_list *channels,
+ struct me4000_ao_context *ao_context);
+
+static int me4000_ao_synchronous_ex_trig(struct me4000_ao_context *ao_context);
+static int me4000_ao_synchronous_sw(struct me4000_ao_context *ao_context);
+static int me4000_ao_synchronous_disable(struct me4000_ao_context *ao_context);
static int me4000_ao_ex_trig_timeout(unsigned long *arg,
- me4000_ao_context_t * ao_context);
+ struct me4000_ao_context *ao_context);
static int me4000_ao_get_free_buffer(unsigned long *arg,
- me4000_ao_context_t * ao_context);
+ struct me4000_ao_context *ao_context);
/*-----------------------------------------------------------------------------
Analog input stuff
---------------------------------------------------------------------------*/
-static int me4000_ai_single(me4000_ai_single_t *, me4000_ai_context_t *);
+static int me4000_ai_single(struct me4000_ai_single *,
+ struct me4000_ai_context *);
static int me4000_ai_ioctl_sing(struct inode *, struct file *, unsigned int,
unsigned long);
@@ -186,68 +171,69 @@ static int me4000_ai_fasync(int fd, struct file *file_p, int mode);
static int me4000_ai_ioctl_ext(struct inode *, struct file *, unsigned int,
unsigned long);
-static int me4000_ai_prepare(me4000_ai_context_t * ai_context);
-static int me4000_ai_reset(me4000_ai_context_t * ai_context);
-static int me4000_ai_config(me4000_ai_config_t *, me4000_ai_context_t *);
-static int me4000_ai_start(me4000_ai_context_t *);
-static int me4000_ai_start_ex(unsigned long *, me4000_ai_context_t *);
-static int me4000_ai_stop(me4000_ai_context_t *);
-static int me4000_ai_immediate_stop(me4000_ai_context_t *);
-static int me4000_ai_ex_trig_enable(me4000_ai_context_t *);
-static int me4000_ai_ex_trig_disable(me4000_ai_context_t *);
-static int me4000_ai_ex_trig_setup(me4000_ai_trigger_t *,
- me4000_ai_context_t *);
-static int me4000_ai_sc_setup(me4000_ai_sc_t * arg,
- me4000_ai_context_t * ai_context);
-static int me4000_ai_offset_enable(me4000_ai_context_t * ai_context);
-static int me4000_ai_offset_disable(me4000_ai_context_t * ai_context);
-static int me4000_ai_fullscale_enable(me4000_ai_context_t * ai_context);
-static int me4000_ai_fullscale_disable(me4000_ai_context_t * ai_context);
-static int me4000_ai_fsm_state(int *arg, me4000_ai_context_t * ai_context);
+static int me4000_ai_prepare(struct me4000_ai_context *ai_context);
+static int me4000_ai_reset(struct me4000_ai_context *ai_context);
+static int me4000_ai_config(struct me4000_ai_config *,
+ struct me4000_ai_context *);
+static int me4000_ai_start(struct me4000_ai_context *);
+static int me4000_ai_start_ex(unsigned long *, struct me4000_ai_context *);
+static int me4000_ai_stop(struct me4000_ai_context *);
+static int me4000_ai_immediate_stop(struct me4000_ai_context *);
+static int me4000_ai_ex_trig_enable(struct me4000_ai_context *);
+static int me4000_ai_ex_trig_disable(struct me4000_ai_context *);
+static int me4000_ai_ex_trig_setup(struct me4000_ai_trigger *,
+ struct me4000_ai_context *);
+static int me4000_ai_sc_setup(struct me4000_ai_sc *arg,
+ struct me4000_ai_context *ai_context);
+static int me4000_ai_offset_enable(struct me4000_ai_context *ai_context);
+static int me4000_ai_offset_disable(struct me4000_ai_context *ai_context);
+static int me4000_ai_fullscale_enable(struct me4000_ai_context *ai_context);
+static int me4000_ai_fullscale_disable(struct me4000_ai_context *ai_context);
+static int me4000_ai_fsm_state(int *arg, struct me4000_ai_context *ai_context);
static int me4000_ai_get_count_buffer(unsigned long *arg,
- me4000_ai_context_t * ai_context);
+ struct me4000_ai_context *ai_context);
/*-----------------------------------------------------------------------------
EEPROM stuff
---------------------------------------------------------------------------*/
-static int me4000_eeprom_read(me4000_eeprom_t * arg,
- me4000_ai_context_t * ai_context);
-static int me4000_eeprom_write(me4000_eeprom_t * arg,
- me4000_ai_context_t * ai_context);
-static unsigned short eeprom_read_cmd(me4000_ai_context_t * ai_context,
- unsigned long cmd, int length);
-static int eeprom_write_cmd(me4000_ai_context_t * ai_context, unsigned long cmd,
- int length);
+static int me4000_eeprom_read(struct me4000_eeprom *arg,
+ struct me4000_ai_context *ai_context);
+static int me4000_eeprom_write(struct me4000_eeprom *arg,
+ struct me4000_ai_context *ai_context);
/*-----------------------------------------------------------------------------
Digital I/O stuff
---------------------------------------------------------------------------*/
static int me4000_dio_ioctl(struct inode *, struct file *, unsigned int,
unsigned long);
-static int me4000_dio_config(me4000_dio_config_t *, me4000_dio_context_t *);
-static int me4000_dio_get_byte(me4000_dio_byte_t *, me4000_dio_context_t *);
-static int me4000_dio_set_byte(me4000_dio_byte_t *, me4000_dio_context_t *);
-static int me4000_dio_reset(me4000_dio_context_t *);
+static int me4000_dio_config(struct me4000_dio_config *,
+ struct me4000_dio_context *);
+static int me4000_dio_get_byte(struct me4000_dio_byte *,
+ struct me4000_dio_context *);
+static int me4000_dio_set_byte(struct me4000_dio_byte *,
+ struct me4000_dio_context *);
+static int me4000_dio_reset(struct me4000_dio_context *);
/*-----------------------------------------------------------------------------
Counter stuff
---------------------------------------------------------------------------*/
static int me4000_cnt_ioctl(struct inode *, struct file *, unsigned int,
unsigned long);
-static int me4000_cnt_config(me4000_cnt_config_t *, me4000_cnt_context_t *);
-static int me4000_cnt_read(me4000_cnt_t *, me4000_cnt_context_t *);
-static int me4000_cnt_write(me4000_cnt_t *, me4000_cnt_context_t *);
-static int me4000_cnt_reset(me4000_cnt_context_t *);
+static int me4000_cnt_config(struct me4000_cnt_config *,
+ struct me4000_cnt_context *);
+static int me4000_cnt_read(struct me4000_cnt *, struct me4000_cnt_context *);
+static int me4000_cnt_write(struct me4000_cnt *, struct me4000_cnt_context *);
+static int me4000_cnt_reset(struct me4000_cnt_context *);
/*-----------------------------------------------------------------------------
External interrupt routines
---------------------------------------------------------------------------*/
static int me4000_ext_int_ioctl(struct inode *, struct file *, unsigned int,
unsigned long);
-static int me4000_ext_int_enable(me4000_ext_int_context_t *);
-static int me4000_ext_int_disable(me4000_ext_int_context_t *);
+static int me4000_ext_int_enable(struct me4000_ext_int_context *);
+static int me4000_ext_int_disable(struct me4000_ext_int_context *);
static int me4000_ext_int_count(unsigned long *arg,
- me4000_ext_int_context_t * ext_int_context);
+ struct me4000_ext_int_context *ext_int_context);
static int me4000_ext_int_fasync(int fd, struct file *file_ptr, int mode);
/*-----------------------------------------------------------------------------
@@ -260,27 +246,18 @@ static irqreturn_t me4000_ext_int_isr(int, void *);
/*-----------------------------------------------------------------------------
Inline functions
---------------------------------------------------------------------------*/
-static int inline me4000_buf_count(me4000_circ_buf_t, int);
-static int inline me4000_buf_space(me4000_circ_buf_t, int);
-static int inline me4000_space_to_end(me4000_circ_buf_t, int);
-static int inline me4000_values_to_end(me4000_circ_buf_t, int);
-
-static void inline me4000_outb(unsigned char value, unsigned long port);
-static void inline me4000_outl(unsigned long value, unsigned long port);
-static unsigned long inline me4000_inl(unsigned long port);
-static unsigned char inline me4000_inb(unsigned long port);
-static int me4000_buf_count(me4000_circ_buf_t buf, int size)
+static int inline me4000_buf_count(struct me4000_circ_buf buf, int size)
{
return ((buf.head - buf.tail) & (size - 1));
}
-static int me4000_buf_space(me4000_circ_buf_t buf, int size)
+static int inline me4000_buf_space(struct me4000_circ_buf buf, int size)
{
return ((buf.tail - (buf.head + 1)) & (size - 1));
}
-static int me4000_values_to_end(me4000_circ_buf_t buf, int size)
+static int inline me4000_values_to_end(struct me4000_circ_buf buf, int size)
{
int end;
int n;
@@ -289,7 +266,7 @@ static int me4000_values_to_end(me4000_circ_buf_t buf, int size)
return (n < end) ? n : end;
}
-static int me4000_space_to_end(me4000_circ_buf_t buf, int size)
+static int inline me4000_space_to_end(struct me4000_circ_buf buf, int size)
{
int end;
int n;
@@ -299,19 +276,19 @@ static int me4000_space_to_end(me4000_circ_buf_t buf, int size)
return (n <= end) ? n : (end + 1);
}
-static void me4000_outb(unsigned char value, unsigned long port)
+static void inline me4000_outb(unsigned char value, unsigned long port)
{
PORT_PDEBUG("--> 0x%02X port 0x%04lX\n", value, port);
outb(value, port);
}
-static void me4000_outl(unsigned long value, unsigned long port)
+static void inline me4000_outl(unsigned long value, unsigned long port)
{
PORT_PDEBUG("--> 0x%08lX port 0x%04lX\n", value, port);
outl(value, port);
}
-static unsigned long me4000_inl(unsigned long port)
+static unsigned long inline me4000_inl(unsigned long port)
{
unsigned long value;
value = inl(port);
@@ -319,7 +296,7 @@ static unsigned long me4000_inl(unsigned long port)
return value;
}
-static unsigned char me4000_inb(unsigned long port)
+static unsigned char inline me4000_inb(unsigned long port)
{
unsigned char value;
value = inb(port);
@@ -327,102 +304,102 @@ static unsigned char me4000_inb(unsigned long port)
return value;
}
-struct pci_driver me4000_driver = {
+static struct pci_driver me4000_driver = {
.name = ME4000_NAME,
.id_table = me4000_pci_table,
.probe = me4000_probe
};
static struct file_operations me4000_ao_fops_sing = {
- owner:THIS_MODULE,
- write:me4000_ao_write_sing,
- ioctl:me4000_ao_ioctl_sing,
- open:me4000_open,
- release:me4000_release,
+ .owner = THIS_MODULE,
+ .write = me4000_ao_write_sing,
+ .ioctl = me4000_ao_ioctl_sing,
+ .open = me4000_open,
+ .release = me4000_release,
};
static struct file_operations me4000_ao_fops_wrap = {
- owner:THIS_MODULE,
- write:me4000_ao_write_wrap,
- ioctl:me4000_ao_ioctl_wrap,
- open:me4000_open,
- release:me4000_release,
+ .owner = THIS_MODULE,
+ .write = me4000_ao_write_wrap,
+ .ioctl = me4000_ao_ioctl_wrap,
+ .open = me4000_open,
+ .release = me4000_release,
};
static struct file_operations me4000_ao_fops_cont = {
- owner:THIS_MODULE,
- write:me4000_ao_write_cont,
- poll:me4000_ao_poll_cont,
- ioctl:me4000_ao_ioctl_cont,
- open:me4000_open,
- release:me4000_release,
- fsync:me4000_ao_fsync_cont,
+ .owner = THIS_MODULE,
+ .write = me4000_ao_write_cont,
+ .poll = me4000_ao_poll_cont,
+ .ioctl = me4000_ao_ioctl_cont,
+ .open = me4000_open,
+ .release = me4000_release,
+ .fsync = me4000_ao_fsync_cont,
};
static struct file_operations me4000_ai_fops_sing = {
- owner:THIS_MODULE,
- ioctl:me4000_ai_ioctl_sing,
- open:me4000_open,
- release:me4000_release,
+ .owner = THIS_MODULE,
+ .ioctl = me4000_ai_ioctl_sing,
+ .open = me4000_open,
+ .release = me4000_release,
};
static struct file_operations me4000_ai_fops_cont_sw = {
- owner:THIS_MODULE,
- read:me4000_ai_read,
- poll:me4000_ai_poll,
- ioctl:me4000_ai_ioctl_sw,
- open:me4000_open,
- release:me4000_release,
- fasync:me4000_ai_fasync,
+ .owner = THIS_MODULE,
+ .read = me4000_ai_read,
+ .poll = me4000_ai_poll,
+ .ioctl = me4000_ai_ioctl_sw,
+ .open = me4000_open,
+ .release = me4000_release,
+ .fasync = me4000_ai_fasync,
};
static struct file_operations me4000_ai_fops_cont_et = {
- owner:THIS_MODULE,
- read:me4000_ai_read,
- poll:me4000_ai_poll,
- ioctl:me4000_ai_ioctl_ext,
- open:me4000_open,
- release:me4000_release,
+ .owner = THIS_MODULE,
+ .read = me4000_ai_read,
+ .poll = me4000_ai_poll,
+ .ioctl = me4000_ai_ioctl_ext,
+ .open = me4000_open,
+ .release = me4000_release,
};
static struct file_operations me4000_ai_fops_cont_et_value = {
- owner:THIS_MODULE,
- read:me4000_ai_read,
- poll:me4000_ai_poll,
- ioctl:me4000_ai_ioctl_ext,
- open:me4000_open,
- release:me4000_release,
+ .owner = THIS_MODULE,
+ .read = me4000_ai_read,
+ .poll = me4000_ai_poll,
+ .ioctl = me4000_ai_ioctl_ext,
+ .open = me4000_open,
+ .release = me4000_release,
};
static struct file_operations me4000_ai_fops_cont_et_chanlist = {
- owner:THIS_MODULE,
- read:me4000_ai_read,
- poll:me4000_ai_poll,
- ioctl:me4000_ai_ioctl_ext,
- open:me4000_open,
- release:me4000_release,
+ .owner = THIS_MODULE,
+ .read = me4000_ai_read,
+ .poll = me4000_ai_poll,
+ .ioctl = me4000_ai_ioctl_ext,
+ .open = me4000_open,
+ .release = me4000_release,
};
static struct file_operations me4000_dio_fops = {
- owner:THIS_MODULE,
- ioctl:me4000_dio_ioctl,
- open:me4000_open,
- release:me4000_release,
+ .owner = THIS_MODULE,
+ .ioctl = me4000_dio_ioctl,
+ .open = me4000_open,
+ .release = me4000_release,
};
static struct file_operations me4000_cnt_fops = {
- owner:THIS_MODULE,
- ioctl:me4000_cnt_ioctl,
- open:me4000_open,
- release:me4000_release,
+ .owner = THIS_MODULE,
+ .ioctl = me4000_cnt_ioctl,
+ .open = me4000_open,
+ .release = me4000_release,
};
static struct file_operations me4000_ext_int_fops = {
- owner:THIS_MODULE,
- ioctl:me4000_ext_int_ioctl,
- open:me4000_open,
- release:me4000_release,
- fasync:me4000_ext_int_fasync,
+ .owner = THIS_MODULE,
+ .ioctl = me4000_ext_int_ioctl,
+ .open = me4000_open,
+ .release = me4000_release,
+ .fasync = me4000_ext_int_fasync,
};
static struct file_operations *me4000_ao_fops_array[] = {
@@ -439,9 +416,9 @@ static struct file_operations *me4000_ai_fops_array[] = {
&me4000_ai_fops_cont_et_chanlist, // work through one channel list by external trigger
};
-int __init me4000_init_module(void)
+static int __init me4000_init_module(void)
{
- int result = 0;
+ int result;
CALL_PDEBUG("init_module() is executed\n");
@@ -533,26 +510,26 @@ int __init me4000_init_module(void)
return 0;
- INIT_ERROR_7:
+INIT_ERROR_7:
unregister_chrdev(me4000_ext_int_major_driver_no, ME4000_EXT_INT_NAME);
- INIT_ERROR_6:
+INIT_ERROR_6:
unregister_chrdev(me4000_cnt_major_driver_no, ME4000_CNT_NAME);
- INIT_ERROR_5:
+INIT_ERROR_5:
unregister_chrdev(me4000_dio_major_driver_no, ME4000_DIO_NAME);
- INIT_ERROR_4:
+INIT_ERROR_4:
unregister_chrdev(me4000_ai_major_driver_no, ME4000_AI_NAME);
- INIT_ERROR_3:
+INIT_ERROR_3:
unregister_chrdev(me4000_ao_major_driver_no, ME4000_AO_NAME);
- INIT_ERROR_2:
+INIT_ERROR_2:
pci_unregister_driver(&me4000_driver);
clear_board_info_list();
- INIT_ERROR_1:
+INIT_ERROR_1:
return result;
}
@@ -562,18 +539,18 @@ static void clear_board_info_list(void)
{
struct list_head *board_p;
struct list_head *dac_p;
- me4000_info_t *board_info;
- me4000_ao_context_t *ao_context;
+ struct me4000_info *board_info;
+ struct me4000_ao_context *ao_context;
/* Clear context lists */
for (board_p = me4000_board_info_list.next;
board_p != &me4000_board_info_list; board_p = board_p->next) {
- board_info = list_entry(board_p, me4000_info_t, list);
+ board_info = list_entry(board_p, struct me4000_info, list);
/* Clear analog output context list */
while (!list_empty(&board_info->ao_context_list)) {
dac_p = board_info->ao_context_list.next;
ao_context =
- list_entry(dac_p, me4000_ao_context_t, list);
+ list_entry(dac_p, struct me4000_ao_context, list);
me4000_ao_reset(ao_context);
free_irq(ao_context->irq, ao_context);
if (ao_context->circ_buf.buf)
@@ -600,14 +577,14 @@ static void clear_board_info_list(void)
/* Clear the board info list */
while (!list_empty(&me4000_board_info_list)) {
board_p = me4000_board_info_list.next;
- board_info = list_entry(board_p, me4000_info_t, list);
+ board_info = list_entry(board_p, struct me4000_info, list);
pci_release_regions(board_info->pci_dev_p);
list_del(board_p);
kfree(board_info);
}
}
-static int get_registers(struct pci_dev *dev, me4000_info_t * board_info)
+static int get_registers(struct pci_dev *dev, struct me4000_info *board_info)
{
/*--------------------------- plx regbase ---------------------------------*/
@@ -667,20 +644,20 @@ static int get_registers(struct pci_dev *dev, me4000_info_t * board_info)
}
static int init_board_info(struct pci_dev *pci_dev_p,
- me4000_info_t * board_info)
+ struct me4000_info *board_info)
{
int i;
int result;
struct list_head *board_p;
board_info->pci_dev_p = pci_dev_p;
- for (i = 0; i < ME4000_BOARD_VERSIONS; i++) {
+ for (i = 0; i < ARRAY_SIZE(me4000_boards); i++) {
if (me4000_boards[i].device_id == pci_dev_p->device) {
board_info->board_p = &me4000_boards[i];
break;
}
}
- if (i == ME4000_BOARD_VERSIONS) {
+ if (i == ARRAY_SIZE(me4000_boards)) {
printk(KERN_ERR
"ME4000:init_board_info():Device ID not valid\n");
return -ENODEV;
@@ -755,21 +732,21 @@ static int init_board_info(struct pci_dev *pci_dev_p,
return 0;
}
-static int alloc_ao_contexts(me4000_info_t * info)
+static int alloc_ao_contexts(struct me4000_info *info)
{
int i;
int err;
- me4000_ao_context_t *ao_context;
+ struct me4000_ao_context *ao_context;
for (i = 0; i < info->board_p->ao.count; i++) {
- ao_context = kmalloc(sizeof(me4000_ao_context_t), GFP_KERNEL);
+ ao_context = kzalloc(sizeof(struct me4000_ao_context),
+ GFP_KERNEL);
if (!ao_context) {
printk(KERN_ERR
"alloc_ao_contexts():Can't get memory for ao context\n");
release_ao_contexts(info);
return -ENOMEM;
}
- memset(ao_context, 0, sizeof(me4000_ao_context_t));
spin_lock_init(&ao_context->use_lock);
spin_lock_init(&ao_context->int_lock);
@@ -780,15 +757,13 @@ static int alloc_ao_contexts(me4000_info_t * info)
if (info->board_p->ao.fifo_count) {
/* Allocate circular buffer */
ao_context->circ_buf.buf =
- kmalloc(ME4000_AO_BUFFER_SIZE, GFP_KERNEL);
+ kzalloc(ME4000_AO_BUFFER_SIZE, GFP_KERNEL);
if (!ao_context->circ_buf.buf) {
printk(KERN_ERR
"alloc_ao_contexts():Can't get circular buffer\n");
release_ao_contexts(info);
return -ENOMEM;
}
- memset(ao_context->circ_buf.buf, 0,
- ME4000_AO_BUFFER_SIZE);
/* Clear the circular buffer */
ao_context->circ_buf.head = 0;
@@ -872,9 +847,8 @@ static int alloc_ao_contexts(me4000_info_t * info)
ME4000_NAME, ao_context);
if (err) {
printk(KERN_ERR
- "alloc_ao_contexts():Can't get interrupt line");
- if (ao_context->circ_buf.buf)
- kfree(ao_context->circ_buf.buf);
+ "%s:Can't get interrupt line", __func__);
+ kfree(ao_context->circ_buf.buf);
kfree(ao_context);
release_ao_contexts(info);
return -ENODEV;
@@ -888,35 +862,34 @@ static int alloc_ao_contexts(me4000_info_t * info)
return 0;
}
-static void release_ao_contexts(me4000_info_t * board_info)
+static void release_ao_contexts(struct me4000_info *board_info)
{
struct list_head *dac_p;
- me4000_ao_context_t *ao_context;
+ struct me4000_ao_context *ao_context;
/* Clear analog output context list */
while (!list_empty(&board_info->ao_context_list)) {
dac_p = board_info->ao_context_list.next;
- ao_context = list_entry(dac_p, me4000_ao_context_t, list);
+ ao_context = list_entry(dac_p, struct me4000_ao_context, list);
free_irq(ao_context->irq, ao_context);
- if (ao_context->circ_buf.buf)
- kfree(ao_context->circ_buf.buf);
+ kfree(ao_context->circ_buf.buf);
list_del(dac_p);
kfree(ao_context);
}
}
-static int alloc_ai_context(me4000_info_t * info)
+static int alloc_ai_context(struct me4000_info *info)
{
- me4000_ai_context_t *ai_context;
+ struct me4000_ai_context *ai_context;
if (info->board_p->ai.count) {
- ai_context = kmalloc(sizeof(me4000_ai_context_t), GFP_KERNEL);
+ ai_context = kzalloc(sizeof(struct me4000_ai_context),
+ GFP_KERNEL);
if (!ai_context) {
printk(KERN_ERR
"ME4000:alloc_ai_context():Can't get memory for ai context\n");
return -ENOMEM;
}
- memset(ai_context, 0, sizeof(me4000_ai_context_t));
info->ai_context = ai_context;
@@ -958,18 +931,18 @@ static int alloc_ai_context(me4000_info_t * info)
return 0;
}
-static int alloc_dio_context(me4000_info_t * info)
+static int alloc_dio_context(struct me4000_info *info)
{
- me4000_dio_context_t *dio_context;
+ struct me4000_dio_context *dio_context;
if (info->board_p->dio.count) {
- dio_context = kmalloc(sizeof(me4000_dio_context_t), GFP_KERNEL);
+ dio_context = kzalloc(sizeof(struct me4000_dio_context),
+ GFP_KERNEL);
if (!dio_context) {
printk(KERN_ERR
"ME4000:alloc_dio_context():Can't get memory for dio context\n");
return -ENOMEM;
}
- memset(dio_context, 0, sizeof(me4000_dio_context_t));
info->dio_context = dio_context;
@@ -995,18 +968,18 @@ static int alloc_dio_context(me4000_info_t * info)
return 0;
}
-static int alloc_cnt_context(me4000_info_t * info)
+static int alloc_cnt_context(struct me4000_info *info)
{
- me4000_cnt_context_t *cnt_context;
+ struct me4000_cnt_context *cnt_context;
if (info->board_p->cnt.count) {
- cnt_context = kmalloc(sizeof(me4000_cnt_context_t), GFP_KERNEL);
+ cnt_context = kzalloc(sizeof(struct me4000_cnt_context),
+ GFP_KERNEL);
if (!cnt_context) {
printk(KERN_ERR
"ME4000:alloc_cnt_context():Can't get memory for cnt context\n");
return -ENOMEM;
}
- memset(cnt_context, 0, sizeof(me4000_cnt_context_t));
info->cnt_context = cnt_context;
@@ -1026,19 +999,18 @@ static int alloc_cnt_context(me4000_info_t * info)
return 0;
}
-static int alloc_ext_int_context(me4000_info_t * info)
+static int alloc_ext_int_context(struct me4000_info *info)
{
- me4000_ext_int_context_t *ext_int_context;
+ struct me4000_ext_int_context *ext_int_context;
if (info->board_p->cnt.count) {
ext_int_context =
- kmalloc(sizeof(me4000_ext_int_context_t), GFP_KERNEL);
+ kzalloc(sizeof(struct me4000_ext_int_context), GFP_KERNEL);
if (!ext_int_context) {
printk(KERN_ERR
"ME4000:alloc_ext_int_context():Can't get memory for cnt context\n");
return -ENOMEM;
}
- memset(ext_int_context, 0, sizeof(me4000_ext_int_context_t));
info->ext_int_context = ext_int_context;
@@ -1060,19 +1032,18 @@ static int alloc_ext_int_context(me4000_info_t * info)
static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int result = 0;
- me4000_info_t *board_info;
+ struct me4000_info *board_info;
CALL_PDEBUG("me4000_probe() is executed\n");
/* Allocate structure for board context */
- board_info = kmalloc(sizeof(me4000_info_t), GFP_KERNEL);
+ board_info = kzalloc(sizeof(struct me4000_info), GFP_KERNEL);
if (!board_info) {
printk(KERN_ERR
"ME4000:Can't get memory for board info structure\n");
result = -ENOMEM;
goto PROBE_ERROR_1;
}
- memset(board_info, 0, sizeof(me4000_info_t));
/* Add to global linked list */
list_add_tail(&board_info->list, &me4000_board_info_list);
@@ -1080,70 +1051,70 @@ static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Get the PCI base registers */
result = get_registers(dev, board_info);
if (result) {
- printk(KERN_ERR "me4000_probe():Cannot get registers\n");
+ printk(KERN_ERR "%s:Cannot get registers\n", __func__);
goto PROBE_ERROR_2;
}
/* Enable the device */
result = pci_enable_device(dev);
if (result < 0) {
- printk(KERN_ERR "me4000_probe():Cannot enable PCI device\n");
+ printk(KERN_ERR "%s:Cannot enable PCI device\n", __func__);
goto PROBE_ERROR_2;
}
/* Request the PCI register regions */
result = pci_request_regions(dev, ME4000_NAME);
if (result < 0) {
- printk(KERN_ERR "me4000_probe():Cannot request I/O regions\n");
+ printk(KERN_ERR "%s:Cannot request I/O regions\n", __func__);
goto PROBE_ERROR_2;
}
/* Initialize board info */
result = init_board_info(dev, board_info);
if (result) {
- printk(KERN_ERR "me4000_probe():Cannot init baord info\n");
+ printk(KERN_ERR "%s:Cannot init baord info\n", __func__);
goto PROBE_ERROR_3;
}
/* Download the xilinx firmware */
result = me4000_xilinx_download(board_info);
if (result) {
- printk(KERN_ERR "me4000_probe:Can't download firmware\n");
+ printk(KERN_ERR "%s:Can't download firmware\n", __func__);
goto PROBE_ERROR_3;
}
/* Make a hardware reset */
result = me4000_reset_board(board_info);
if (result) {
- printk(KERN_ERR "me4000_probe:Can't reset board\n");
+ printk(KERN_ERR "%s :Can't reset board\n", __func__);
goto PROBE_ERROR_3;
}
/* Allocate analog output context structures */
result = alloc_ao_contexts(board_info);
if (result) {
- printk(KERN_ERR "me4000_probe():Cannot allocate ao contexts\n");
+ printk(KERN_ERR "%s:Cannot allocate ao contexts\n", __func__);
goto PROBE_ERROR_3;
}
/* Allocate analog input context */
result = alloc_ai_context(board_info);
if (result) {
- printk(KERN_ERR "me4000_probe():Cannot allocate ai context\n");
+ printk(KERN_ERR "%s:Cannot allocate ai context\n", __func__);
goto PROBE_ERROR_4;
}
/* Allocate digital I/O context */
result = alloc_dio_context(board_info);
if (result) {
- printk(KERN_ERR "me4000_probe():Cannot allocate dio context\n");
+ printk(KERN_ERR "%s:Cannot allocate dio context\n", __func__);
goto PROBE_ERROR_5;
}
/* Allocate counter context */
result = alloc_cnt_context(board_info);
if (result) {
- printk(KERN_ERR "me4000_probe():Cannot allocate cnt context\n");
+ printk(KERN_ERR "%s:Cannot allocate cnt context\n", __func__);
goto PROBE_ERROR_6;
}
@@ -1151,36 +1122,36 @@ static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id)
result = alloc_ext_int_context(board_info);
if (result) {
printk(KERN_ERR
- "me4000_probe():Cannot allocate ext_int context\n");
+ "%s:Cannot allocate ext_int context\n", __func__);
goto PROBE_ERROR_7;
}
return 0;
- PROBE_ERROR_7:
+PROBE_ERROR_7:
kfree(board_info->cnt_context);
- PROBE_ERROR_6:
+PROBE_ERROR_6:
kfree(board_info->dio_context);
- PROBE_ERROR_5:
+PROBE_ERROR_5:
kfree(board_info->ai_context);
- PROBE_ERROR_4:
+PROBE_ERROR_4:
release_ao_contexts(board_info);
- PROBE_ERROR_3:
+PROBE_ERROR_3:
pci_release_regions(dev);
- PROBE_ERROR_2:
+PROBE_ERROR_2:
list_del(&board_info->list);
kfree(board_info);
- PROBE_ERROR_1:
+PROBE_ERROR_1:
return result;
}
-static int me4000_xilinx_download(me4000_info_t * info)
+static int me4000_xilinx_download(struct me4000_info *info)
{
int size = 0;
u32 value = 0;
@@ -1211,7 +1182,7 @@ static int me4000_xilinx_download(me4000_info_t * info)
/* Wait until /INIT pin is set */
udelay(20);
if (!inl(info->plx_regbase + PLX_INTCSR) & 0x20) {
- printk(KERN_ERR "me4000_xilinx_download():Can't init Xilinx\n");
+ printk(KERN_ERR "%s:Can't init Xilinx\n", __func__);
return -EIO;
}
@@ -1232,7 +1203,7 @@ static int me4000_xilinx_download(me4000_info_t * info)
/* Check if BUSY flag is low */
if (inl(info->plx_regbase + PLX_ICR) & 0x20) {
printk(KERN_ERR
- "me4000_xilinx_download():Xilinx is still busy (idx = %d)\n",
+ "%s:Xilinx is still busy (idx = %d)\n", __func__,
idx);
return -EIO;
}
@@ -1246,9 +1217,9 @@ static int me4000_xilinx_download(me4000_info_t * info)
PDEBUG("me4000_xilinx_download():Download was successful\n");
} else {
printk(KERN_ERR
- "ME4000:me4000_xilinx_download():DONE flag is not set\n");
+ "ME4000:%s:DONE flag is not set\n", __func__);
printk(KERN_ERR
- "ME4000:me4000_xilinx_download():Download not succesful\n");
+ "ME4000:%s:Download not succesful\n", __func__);
return -EIO;
}
@@ -1260,7 +1231,7 @@ static int me4000_xilinx_download(me4000_info_t * info)
return 0;
}
-static int me4000_reset_board(me4000_info_t * info)
+static int me4000_reset_board(struct me4000_info *info)
{
unsigned long icr;
@@ -1314,12 +1285,12 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
int err = 0;
int i;
struct list_head *ptr;
- me4000_info_t *board_info = NULL;
- me4000_ao_context_t *ao_context = NULL;
- me4000_ai_context_t *ai_context = NULL;
- me4000_dio_context_t *dio_context = NULL;
- me4000_cnt_context_t *cnt_context = NULL;
- me4000_ext_int_context_t *ext_int_context = NULL;
+ struct me4000_info *board_info = NULL;
+ struct me4000_ao_context *ao_context = NULL;
+ struct me4000_ai_context *ai_context = NULL;
+ struct me4000_dio_context *dio_context = NULL;
+ struct me4000_cnt_context *cnt_context = NULL;
+ struct me4000_ext_int_context *ext_int_context = NULL;
CALL_PDEBUG("me4000_open() is executed\n");
@@ -1335,7 +1306,7 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
/* Search for the board context */
for (ptr = me4000_board_info_list.next, i = 0;
ptr != &me4000_board_info_list; ptr = ptr->next, i++) {
- board_info = list_entry(ptr, me4000_info_t, list);
+ board_info = list_entry(ptr, struct me4000_info, list);
if (i == board)
break;
}
@@ -1351,7 +1322,8 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
for (ptr = board_info->ao_context_list.next, i = 0;
ptr != &board_info->ao_context_list;
ptr = ptr->next, i++) {
- ao_context = list_entry(ptr, me4000_ao_context_t, list);
+ ao_context = list_entry(ptr, struct me4000_ao_context,
+ list);
if (i == dev)
break;
}
@@ -1415,7 +1387,7 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
/* Search for the board context */
for (ptr = me4000_board_info_list.next, i = 0;
ptr != &me4000_board_info_list; ptr = ptr->next, i++) {
- board_info = list_entry(ptr, me4000_info_t, list);
+ board_info = list_entry(ptr, struct me4000_info, list);
if (i == board)
break;
}
@@ -1469,7 +1441,7 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
/* Search for the board context */
for (ptr = me4000_board_info_list.next;
ptr != &me4000_board_info_list; ptr = ptr->next) {
- board_info = list_entry(ptr, me4000_info_t, list);
+ board_info = list_entry(ptr, struct me4000_info, list);
if (board_info->board_count == board)
break;
}
@@ -1514,7 +1486,7 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
/* Search for the board context */
for (ptr = me4000_board_info_list.next;
ptr != &me4000_board_info_list; ptr = ptr->next) {
- board_info = list_entry(ptr, me4000_info_t, list);
+ board_info = list_entry(ptr, struct me4000_info, list);
if (board_info->board_count == board)
break;
}
@@ -1557,7 +1529,7 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
/* Search for the board context */
for (ptr = me4000_board_info_list.next;
ptr != &me4000_board_info_list; ptr = ptr->next) {
- board_info = list_entry(ptr, me4000_info_t, list);
+ board_info = list_entry(ptr, struct me4000_info, list);
if (board_info->board_count == board)
break;
}
@@ -1613,11 +1585,11 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
static int me4000_release(struct inode *inode_p, struct file *file_p)
{
- me4000_ao_context_t *ao_context;
- me4000_ai_context_t *ai_context;
- me4000_dio_context_t *dio_context;
- me4000_cnt_context_t *cnt_context;
- me4000_ext_int_context_t *ext_int_context;
+ struct me4000_ao_context *ao_context;
+ struct me4000_ai_context *ai_context;
+ struct me4000_dio_context *dio_context;
+ struct me4000_cnt_context *cnt_context;
+ struct me4000_ext_int_context *ext_int_context;
CALL_PDEBUG("me4000_release() is executed\n");
@@ -1677,7 +1649,7 @@ static int me4000_release(struct inode *inode_p, struct file *file_p)
/*------------------------------- Analog output stuff --------------------------------------*/
-static int me4000_ao_prepare(me4000_ao_context_t * ao_context)
+static int me4000_ao_prepare(struct me4000_ao_context *ao_context)
{
unsigned long flags;
@@ -1756,7 +1728,7 @@ static int me4000_ao_prepare(me4000_ao_context_t * ao_context)
return 0;
}
-static int me4000_ao_reset(me4000_ao_context_t * ao_context)
+static int me4000_ao_reset(struct me4000_ao_context *ao_context)
{
u32 tmp;
wait_queue_head_t queue;
@@ -1777,9 +1749,10 @@ static int me4000_ao_reset(me4000_ao_context_t * ao_context)
tmp |= ME4000_AO_CTRL_BIT_IMMEDIATE_STOP;
me4000_outl(tmp, ao_context->ctrl_reg);
- while (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) {
- sleep_on_timeout(&queue, 1);
- }
+ wait_event_timeout(queue,
+ (inl(ao_context->status_reg) &
+ ME4000_AO_STATUS_BIT_FSM) == 0,
+ 1);
/* Set to transparent mode */
me4000_ao_simultaneous_disable(ao_context);
@@ -1812,9 +1785,10 @@ static int me4000_ao_reset(me4000_ao_context_t * ao_context)
me4000_outl(tmp, ao_context->ctrl_reg);
spin_unlock_irqrestore(&ao_context->int_lock, flags);
- while (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) {
- sleep_on_timeout(&queue, 1);
- }
+ wait_event_timeout(queue,
+ (inl(ao_context->status_reg) &
+ ME4000_AO_STATUS_BIT_FSM) == 0,
+ 1);
/* Clear the circular buffer */
ao_context->circ_buf.head = 0;
@@ -1853,9 +1827,9 @@ static int me4000_ao_reset(me4000_ao_context_t * ao_context)
}
static ssize_t me4000_ao_write_sing(struct file *filep, const char *buff,
- size_t cnt, loff_t * offp)
+ size_t cnt, loff_t *offp)
{
- me4000_ao_context_t *ao_context = filep->private_data;
+ struct me4000_ao_context *ao_context = filep->private_data;
u32 value;
const u16 *buffer = (const u16 *)buff;
@@ -1863,13 +1837,13 @@ static ssize_t me4000_ao_write_sing(struct file *filep, const char *buff,
if (cnt != 2) {
printk(KERN_ERR
- "me4000_ao_write_sing():Write count is not 2\n");
+ "%s:Write count is not 2\n", __func__);
return -EINVAL;
}
if (get_user(value, buffer)) {
printk(KERN_ERR
- "me4000_ao_write_sing():Cannot copy data from user\n");
+ "%s:Cannot copy data from user\n", __func__);
return -EFAULT;
}
@@ -1879,9 +1853,9 @@ static ssize_t me4000_ao_write_sing(struct file *filep, const char *buff,
}
static ssize_t me4000_ao_write_wrap(struct file *filep, const char *buff,
- size_t cnt, loff_t * offp)
+ size_t cnt, loff_t *offp)
{
- me4000_ao_context_t *ao_context = filep->private_data;
+ struct me4000_ao_context *ao_context = filep->private_data;
size_t i;
u32 value;
u32 tmp;
@@ -1893,13 +1867,13 @@ static ssize_t me4000_ao_write_wrap(struct file *filep, const char *buff,
/* Check if a conversion is already running */
if (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) {
printk(KERN_ERR
- "ME4000:me4000_ao_write_wrap():There is already a conversion running\n");
+ "%s:There is already a conversion running\n", __func__);
return -EBUSY;
}
if (count > ME4000_AO_FIFO_COUNT) {
printk(KERN_ERR
- "me4000_ao_write_wrap():Can't load more than %d values\n",
+ "%s:Can't load more than %d values\n", __func__,
ME4000_AO_FIFO_COUNT);
return -ENOSPC;
}
@@ -1914,7 +1888,7 @@ static ssize_t me4000_ao_write_wrap(struct file *filep, const char *buff,
for (i = 0; i < count; i++) {
if (get_user(value, buffer + i)) {
printk(KERN_ERR
- "me4000_ao_write_single():Cannot copy data from user\n");
+ "%s:Cannot copy data from user\n", __func__);
return -EFAULT;
}
if (((ao_context->fifo_reg & 0xFF) == ME4000_AO_01_FIFO_REG)
@@ -1928,9 +1902,9 @@ static ssize_t me4000_ao_write_wrap(struct file *filep, const char *buff,
}
static ssize_t me4000_ao_write_cont(struct file *filep, const char *buff,
- size_t cnt, loff_t * offp)
+ size_t cnt, loff_t *offp)
{
- me4000_ao_context_t *ao_context = filep->private_data;
+ struct me4000_ao_context *ao_context = filep->private_data;
const u16 *buffer = (const u16 *)buff;
size_t count = cnt / 2;
unsigned long flags;
@@ -2154,9 +2128,9 @@ static ssize_t me4000_ao_write_cont(struct file *filep, const char *buff,
return 2 * ret;
}
-static unsigned int me4000_ao_poll_cont(struct file *file_p, poll_table * wait)
+static unsigned int me4000_ao_poll_cont(struct file *file_p, poll_table *wait)
{
- me4000_ao_context_t *ao_context;
+ struct me4000_ao_context *ao_context;
unsigned long mask = 0;
CALL_PDEBUG("me4000_ao_poll_cont() is executed\n");
@@ -2177,7 +2151,7 @@ static unsigned int me4000_ao_poll_cont(struct file *file_p, poll_table * wait)
static int me4000_ao_fsync_cont(struct file *file_p, struct dentry *dentry_p,
int datasync)
{
- me4000_ao_context_t *ao_context;
+ struct me4000_ao_context *ao_context;
wait_queue_head_t queue;
CALL_PDEBUG("me4000_ao_fsync_cont() is executed\n");
@@ -2187,15 +2161,19 @@ static int me4000_ao_fsync_cont(struct file *file_p, struct dentry *dentry_p,
while (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) {
interruptible_sleep_on_timeout(&queue, 1);
+ wait_event_interruptible_timeout(queue,
+ !(inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM),
+ 1);
if (ao_context->pipe_flag) {
printk(KERN_ERR
- "me4000_ao_fsync_cont():Broken pipe detected\n");
+ "%s:Broken pipe detected\n", __func__);
return -EPIPE;
}
if (signal_pending(current)) {
printk(KERN_ERR
- "me4000_ao_fsync_cont():Wait on state machine interrupted\n");
+ "%s:Wait on state machine interrupted\n",
+ __func__);
return -EINTR;
}
}
@@ -2206,7 +2184,7 @@ static int me4000_ao_fsync_cont(struct file *file_p, struct dentry *dentry_p,
static int me4000_ao_ioctl_sing(struct inode *inode_p, struct file *file_p,
unsigned int service, unsigned long arg)
{
- me4000_ao_context_t *ao_context;
+ struct me4000_ao_context *ao_context;
CALL_PDEBUG("me4000_ao_ioctl_sing() is executed\n");
@@ -2229,7 +2207,7 @@ static int me4000_ao_ioctl_sing(struct inode *inode_p, struct file *file_p,
case ME4000_AO_PRELOAD_UPDATE:
return me4000_ao_preload_update(ao_context);
case ME4000_GET_USER_INFO:
- return me4000_get_user_info((me4000_user_info_t *) arg,
+ return me4000_get_user_info((struct me4000_user_info *)arg,
ao_context->board_info);
case ME4000_AO_SIMULTANEOUS_EX_TRIG:
return me4000_ao_simultaneous_ex_trig(ao_context);
@@ -2239,8 +2217,9 @@ static int me4000_ao_ioctl_sing(struct inode *inode_p, struct file *file_p,
return me4000_ao_simultaneous_disable(ao_context);
case ME4000_AO_SIMULTANEOUS_UPDATE:
return
- me4000_ao_simultaneous_update((me4000_ao_channel_list_t *)
- arg, ao_context);
+ me4000_ao_simultaneous_update(
+ (struct me4000_ao_channel_list *)arg,
+ ao_context);
case ME4000_AO_EX_TRIG_TIMEOUT:
return me4000_ao_ex_trig_timeout((unsigned long *)arg,
ao_context);
@@ -2258,7 +2237,7 @@ static int me4000_ao_ioctl_sing(struct inode *inode_p, struct file *file_p,
static int me4000_ao_ioctl_wrap(struct inode *inode_p, struct file *file_p,
unsigned int service, unsigned long arg)
{
- me4000_ao_context_t *ao_context;
+ struct me4000_ao_context *ao_context;
CALL_PDEBUG("me4000_ao_ioctl_wrap() is executed\n");
@@ -2287,7 +2266,7 @@ static int me4000_ao_ioctl_wrap(struct inode *inode_p, struct file *file_p,
case ME4000_AO_EX_TRIG_DISABLE:
return me4000_ao_ex_trig_disable(ao_context);
case ME4000_GET_USER_INFO:
- return me4000_get_user_info((me4000_user_info_t *) arg,
+ return me4000_get_user_info((struct me4000_user_info *)arg,
ao_context->board_info);
case ME4000_AO_FSM_STATE:
return me4000_ao_fsm_state((int *)arg, ao_context);
@@ -2310,7 +2289,7 @@ static int me4000_ao_ioctl_wrap(struct inode *inode_p, struct file *file_p,
static int me4000_ao_ioctl_cont(struct inode *inode_p, struct file *file_p,
unsigned int service, unsigned long arg)
{
- me4000_ao_context_t *ao_context;
+ struct me4000_ao_context *ao_context;
CALL_PDEBUG("me4000_ao_ioctl_cont() is executed\n");
@@ -2345,7 +2324,7 @@ static int me4000_ao_ioctl_cont(struct inode *inode_p, struct file *file_p,
case ME4000_AO_FSM_STATE:
return me4000_ao_fsm_state((int *)arg, ao_context);
case ME4000_GET_USER_INFO:
- return me4000_get_user_info((me4000_user_info_t *) arg,
+ return me4000_get_user_info((struct me4000_user_info *)arg,
ao_context->board_info);
case ME4000_AO_SYNCHRONOUS_EX_TRIG:
return me4000_ao_synchronous_ex_trig(ao_context);
@@ -2362,7 +2341,8 @@ static int me4000_ao_ioctl_cont(struct inode *inode_p, struct file *file_p,
return 0;
}
-static int me4000_ao_start(unsigned long *arg, me4000_ao_context_t * ao_context)
+static int me4000_ao_start(unsigned long *arg,
+ struct me4000_ao_context *ao_context)
{
u32 tmp;
wait_queue_head_t queue;
@@ -2412,7 +2392,7 @@ static int me4000_ao_start(unsigned long *arg, me4000_ao_context_t * ao_context)
return 0;
}
-static int me4000_ao_stop(me4000_ao_context_t * ao_context)
+static int me4000_ao_stop(struct me4000_ao_context *ao_context)
{
u32 tmp;
wait_queue_head_t queue;
@@ -2445,7 +2425,7 @@ static int me4000_ao_stop(me4000_ao_context_t * ao_context)
return 0;
}
-static int me4000_ao_immediate_stop(me4000_ao_context_t * ao_context)
+static int me4000_ao_immediate_stop(struct me4000_ao_context *ao_context)
{
u32 tmp;
wait_queue_head_t queue;
@@ -2477,8 +2457,8 @@ static int me4000_ao_immediate_stop(me4000_ao_context_t * ao_context)
return 0;
}
-static int me4000_ao_timer_set_divisor(u32 * arg,
- me4000_ao_context_t * ao_context)
+static int me4000_ao_timer_set_divisor(u32 *arg,
+ struct me4000_ao_context *ao_context)
{
u32 divisor;
u32 tmp;
@@ -2518,7 +2498,7 @@ static int me4000_ao_timer_set_divisor(u32 * arg,
}
static int me4000_ao_ex_trig_set_edge(int *arg,
- me4000_ao_context_t * ao_context)
+ struct me4000_ao_context *ao_context)
{
int mode;
u32 tmp;
@@ -2569,7 +2549,7 @@ static int me4000_ao_ex_trig_set_edge(int *arg,
return 0;
}
-static int me4000_ao_ex_trig_enable(me4000_ao_context_t * ao_context)
+static int me4000_ao_ex_trig_enable(struct me4000_ao_context *ao_context)
{
u32 tmp;
unsigned long flags;
@@ -2593,7 +2573,7 @@ static int me4000_ao_ex_trig_enable(me4000_ao_context_t * ao_context)
return 0;
}
-static int me4000_ao_ex_trig_disable(me4000_ao_context_t * ao_context)
+static int me4000_ao_ex_trig_disable(struct me4000_ao_context *ao_context)
{
u32 tmp;
unsigned long flags;
@@ -2617,7 +2597,7 @@ static int me4000_ao_ex_trig_disable(me4000_ao_context_t * ao_context)
return 0;
}
-static int me4000_ao_simultaneous_disable(me4000_ao_context_t * ao_context)
+static int me4000_ao_simultaneous_disable(struct me4000_ao_context *ao_context)
{
u32 tmp;
@@ -2643,7 +2623,7 @@ static int me4000_ao_simultaneous_disable(me4000_ao_context_t * ao_context)
return 0;
}
-static int me4000_ao_simultaneous_ex_trig(me4000_ao_context_t * ao_context)
+static int me4000_ao_simultaneous_ex_trig(struct me4000_ao_context *ao_context)
{
u32 tmp;
@@ -2659,7 +2639,7 @@ static int me4000_ao_simultaneous_ex_trig(me4000_ao_context_t * ao_context)
return 0;
}
-static int me4000_ao_simultaneous_sw(me4000_ao_context_t * ao_context)
+static int me4000_ao_simultaneous_sw(struct me4000_ao_context *ao_context)
{
u32 tmp;
@@ -2675,13 +2655,13 @@ static int me4000_ao_simultaneous_sw(me4000_ao_context_t * ao_context)
return 0;
}
-static int me4000_ao_preload(me4000_ao_context_t * ao_context)
+static int me4000_ao_preload(struct me4000_ao_context *ao_context)
{
CALL_PDEBUG("me4000_ao_preload() is executed\n");
return me4000_ao_simultaneous_sw(ao_context);
}
-static int me4000_ao_preload_update(me4000_ao_context_t * ao_context)
+static int me4000_ao_preload_update(struct me4000_ao_context *ao_context)
{
u32 tmp;
u32 ctrl;
@@ -2705,10 +2685,12 @@ static int me4000_ao_preload_update(me4000_ao_context_t * ao_context)
if (!
(tmp &
(0x1 <<
- (((me4000_ao_context_t *) entry)->index + 16)))) {
+ (((struct me4000_ao_context *)entry)->index
+ + 16)))) {
tmp &=
~(0x1 <<
- (((me4000_ao_context_t *) entry)->index));
+ (((struct me4000_ao_context *)entry)->
+ index));
}
}
}
@@ -2718,18 +2700,19 @@ static int me4000_ao_preload_update(me4000_ao_context_t * ao_context)
return 0;
}
-static int me4000_ao_simultaneous_update(me4000_ao_channel_list_t * arg,
- me4000_ao_context_t * ao_context)
+static int me4000_ao_simultaneous_update(struct me4000_ao_channel_list *arg,
+ struct me4000_ao_context *ao_context)
{
int err;
int i;
u32 tmp;
- me4000_ao_channel_list_t channels;
+ struct me4000_ao_channel_list channels;
CALL_PDEBUG("me4000_ao_simultaneous_update() is executed\n");
/* Copy data from user */
- err = copy_from_user(&channels, arg, sizeof(me4000_ao_channel_list_t));
+ err = copy_from_user(&channels, arg,
+ sizeof(struct me4000_ao_channel_list));
if (err) {
printk(KERN_ERR
"ME4000:me4000_ao_simultaneous_update():Can't copy command\n");
@@ -2737,13 +2720,12 @@ static int me4000_ao_simultaneous_update(me4000_ao_channel_list_t * arg,
}
channels.list =
- kmalloc(sizeof(unsigned long) * channels.count, GFP_KERNEL);
+ kzalloc(sizeof(unsigned long) * channels.count, GFP_KERNEL);
if (!channels.list) {
printk(KERN_ERR
"ME4000:me4000_ao_simultaneous_update():Can't get buffer\n");
return -ENOMEM;
}
- memset(channels.list, 0, sizeof(unsigned long) * channels.count);
/* Copy channel list from user */
err =
@@ -2777,7 +2759,7 @@ static int me4000_ao_simultaneous_update(me4000_ao_channel_list_t * arg,
return 0;
}
-static int me4000_ao_synchronous_ex_trig(me4000_ao_context_t * ao_context)
+static int me4000_ao_synchronous_ex_trig(struct me4000_ao_context *ao_context)
{
u32 tmp;
unsigned long flags;
@@ -2813,7 +2795,7 @@ static int me4000_ao_synchronous_ex_trig(me4000_ao_context_t * ao_context)
return 0;
}
-static int me4000_ao_synchronous_sw(me4000_ao_context_t * ao_context)
+static int me4000_ao_synchronous_sw(struct me4000_ao_context *ao_context)
{
u32 tmp;
unsigned long flags;
@@ -2848,13 +2830,13 @@ static int me4000_ao_synchronous_sw(me4000_ao_context_t * ao_context)
return 0;
}
-static int me4000_ao_synchronous_disable(me4000_ao_context_t * ao_context)
+static int me4000_ao_synchronous_disable(struct me4000_ao_context *ao_context)
{
return me4000_ao_simultaneous_disable(ao_context);
}
static int me4000_ao_get_free_buffer(unsigned long *arg,
- me4000_ao_context_t * ao_context)
+ struct me4000_ao_context *ao_context)
{
unsigned long c;
int err;
@@ -2864,7 +2846,7 @@ static int me4000_ao_get_free_buffer(unsigned long *arg,
err = copy_to_user(arg, &c, sizeof(unsigned long));
if (err) {
printk(KERN_ERR
- "ME4000:me4000_ao_get_free_buffer():Can't copy to user space\n");
+ "%s:Can't copy to user space\n", __func__);
return -EFAULT;
}
@@ -2872,7 +2854,7 @@ static int me4000_ao_get_free_buffer(unsigned long *arg,
}
static int me4000_ao_ex_trig_timeout(unsigned long *arg,
- me4000_ao_context_t * ao_context)
+ struct me4000_ao_context *ao_context)
{
u32 tmp;
wait_queue_head_t queue;
@@ -2928,7 +2910,7 @@ static int me4000_ao_ex_trig_timeout(unsigned long *arg,
return 0;
}
-static int me4000_ao_enable_do(me4000_ao_context_t * ao_context)
+static int me4000_ao_enable_do(struct me4000_ao_context *ao_context)
{
u32 tmp;
unsigned long flags;
@@ -2959,7 +2941,7 @@ static int me4000_ao_enable_do(me4000_ao_context_t * ao_context)
return 0;
}
-static int me4000_ao_disable_do(me4000_ao_context_t * ao_context)
+static int me4000_ao_disable_do(struct me4000_ao_context *ao_context)
{
u32 tmp;
unsigned long flags;
@@ -2989,7 +2971,7 @@ static int me4000_ao_disable_do(me4000_ao_context_t * ao_context)
return 0;
}
-static int me4000_ao_fsm_state(int *arg, me4000_ao_context_t * ao_context)
+static int me4000_ao_fsm_state(int *arg, struct me4000_ao_context *ao_context)
{
unsigned long tmp;
@@ -3012,9 +2994,9 @@ static int me4000_ao_fsm_state(int *arg, me4000_ao_context_t * ao_context)
return 0;
}
-/*------------------------------- Analog input stuff --------------------------------------*/
+/*------------------------- Analog input stuff -------------------------------*/
-static int me4000_ai_prepare(me4000_ai_context_t * ai_context)
+static int me4000_ai_prepare(struct me4000_ai_context *ai_context)
{
wait_queue_head_t queue;
int err;
@@ -3057,14 +3039,13 @@ static int me4000_ai_prepare(me4000_ai_context_t * ai_context)
/* Allocate circular buffer */
ai_context->circ_buf.buf =
- kmalloc(ME4000_AI_BUFFER_SIZE, GFP_KERNEL);
+ kzalloc(ME4000_AI_BUFFER_SIZE, GFP_KERNEL);
if (!ai_context->circ_buf.buf) {
printk(KERN_ERR
"ME4000:me4000_ai_prepare():Can't get circular buffer\n");
free_irq(ai_context->irq, ai_context);
return -ENOMEM;
}
- memset(ai_context->circ_buf.buf, 0, ME4000_AI_BUFFER_SIZE);
/* Clear the circular buffer */
ai_context->circ_buf.head = 0;
@@ -3074,7 +3055,7 @@ static int me4000_ai_prepare(me4000_ai_context_t * ai_context)
return 0;
}
-static int me4000_ai_reset(me4000_ai_context_t * ai_context)
+static int me4000_ai_reset(struct me4000_ai_context *ai_context)
{
wait_queue_head_t queue;
u32 tmp;
@@ -3139,7 +3120,7 @@ static int me4000_ai_reset(me4000_ai_context_t * ai_context)
static int me4000_ai_ioctl_sing(struct inode *inode_p, struct file *file_p,
unsigned int service, unsigned long arg)
{
- me4000_ai_context_t *ai_context;
+ struct me4000_ai_context *ai_context;
CALL_PDEBUG("me4000_ai_ioctl_sing() is executed\n");
@@ -3157,16 +3138,17 @@ static int me4000_ai_ioctl_sing(struct inode *inode_p, struct file *file_p,
switch (service) {
case ME4000_AI_SINGLE:
- return me4000_ai_single((me4000_ai_single_t *) arg, ai_context);
+ return me4000_ai_single((struct me4000_ai_single *)arg,
+ ai_context);
case ME4000_AI_EX_TRIG_ENABLE:
return me4000_ai_ex_trig_enable(ai_context);
case ME4000_AI_EX_TRIG_DISABLE:
return me4000_ai_ex_trig_disable(ai_context);
case ME4000_AI_EX_TRIG_SETUP:
- return me4000_ai_ex_trig_setup((me4000_ai_trigger_t *) arg,
+ return me4000_ai_ex_trig_setup((struct me4000_ai_trigger *)arg,
ai_context);
case ME4000_GET_USER_INFO:
- return me4000_get_user_info((me4000_user_info_t *) arg,
+ return me4000_get_user_info((struct me4000_user_info *)arg,
ai_context->board_info);
case ME4000_AI_OFFSET_ENABLE:
return me4000_ai_offset_enable(ai_context);
@@ -3177,9 +3159,11 @@ static int me4000_ai_ioctl_sing(struct inode *inode_p, struct file *file_p,
case ME4000_AI_FULLSCALE_DISABLE:
return me4000_ai_fullscale_disable(ai_context);
case ME4000_AI_EEPROM_READ:
- return me4000_eeprom_read((me4000_eeprom_t *) arg, ai_context);
+ return me4000_eeprom_read((struct me4000_eeprom *)arg,
+ ai_context);
case ME4000_AI_EEPROM_WRITE:
- return me4000_eeprom_write((me4000_eeprom_t *) arg, ai_context);
+ return me4000_eeprom_write((struct me4000_eeprom *)arg,
+ ai_context);
default:
printk(KERN_ERR
"me4000_ai_ioctl_sing():Invalid service number\n");
@@ -3188,10 +3172,10 @@ static int me4000_ai_ioctl_sing(struct inode *inode_p, struct file *file_p,
return 0;
}
-static int me4000_ai_single(me4000_ai_single_t * arg,
- me4000_ai_context_t * ai_context)
+static int me4000_ai_single(struct me4000_ai_single *arg,
+ struct me4000_ai_context *ai_context)
{
- me4000_ai_single_t cmd;
+ struct me4000_ai_single cmd;
int err;
u32 tmp;
wait_queue_head_t queue;
@@ -3202,7 +3186,7 @@ static int me4000_ai_single(me4000_ai_single_t * arg,
init_waitqueue_head(&queue);
/* Copy data from user */
- err = copy_from_user(&cmd, arg, sizeof(me4000_ai_single_t));
+ err = copy_from_user(&cmd, arg, sizeof(struct me4000_ai_single));
if (err) {
printk(KERN_ERR
"ME4000:me4000_ai_single():Can't copy from user space\n");
@@ -3301,7 +3285,7 @@ static int me4000_ai_single(me4000_ai_single_t * arg,
cmd.value = me4000_inl(ai_context->data_reg) & 0xFFFF;
/* Copy result back to user */
- err = copy_to_user(arg, &cmd, sizeof(me4000_ai_single_t));
+ err = copy_to_user(arg, &cmd, sizeof(struct me4000_ai_single));
if (err) {
printk(KERN_ERR
"ME4000:me4000_ai_single():Can't copy to user space\n");
@@ -3314,7 +3298,7 @@ static int me4000_ai_single(me4000_ai_single_t * arg,
static int me4000_ai_ioctl_sw(struct inode *inode_p, struct file *file_p,
unsigned int service, unsigned long arg)
{
- me4000_ai_context_t *ai_context;
+ struct me4000_ai_context *ai_context;
CALL_PDEBUG("me4000_ai_ioctl_sw() is executed\n");
@@ -3332,9 +3316,11 @@ static int me4000_ai_ioctl_sw(struct inode *inode_p, struct file *file_p,
switch (service) {
case ME4000_AI_SC_SETUP:
- return me4000_ai_sc_setup((me4000_ai_sc_t *) arg, ai_context);
+ return me4000_ai_sc_setup((struct me4000_ai_sc *)arg,
+ ai_context);
case ME4000_AI_CONFIG:
- return me4000_ai_config((me4000_ai_config_t *) arg, ai_context);
+ return me4000_ai_config((struct me4000_ai_config *)arg,
+ ai_context);
case ME4000_AI_START:
return me4000_ai_start(ai_context);
case ME4000_AI_STOP:
@@ -3344,19 +3330,20 @@ static int me4000_ai_ioctl_sw(struct inode *inode_p, struct file *file_p,
case ME4000_AI_FSM_STATE:
return me4000_ai_fsm_state((int *)arg, ai_context);
case ME4000_GET_USER_INFO:
- return me4000_get_user_info((me4000_user_info_t *) arg,
+ return me4000_get_user_info((struct me4000_user_info *)arg,
ai_context->board_info);
case ME4000_AI_EEPROM_READ:
- return me4000_eeprom_read((me4000_eeprom_t *) arg, ai_context);
+ return me4000_eeprom_read((struct me4000_eeprom *)arg,
+ ai_context);
case ME4000_AI_EEPROM_WRITE:
- return me4000_eeprom_write((me4000_eeprom_t *) arg, ai_context);
+ return me4000_eeprom_write((struct me4000_eeprom *)arg,
+ ai_context);
case ME4000_AI_GET_COUNT_BUFFER:
return me4000_ai_get_count_buffer((unsigned long *)arg,
ai_context);
default:
printk(KERN_ERR
- "ME4000:me4000_ai_ioctl_sw():Invalid service number %d\n",
- service);
+ "%s:Invalid service number %d\n", __func__, service);
return -ENOTTY;
}
return 0;
@@ -3365,7 +3352,7 @@ static int me4000_ai_ioctl_sw(struct inode *inode_p, struct file *file_p,
static int me4000_ai_ioctl_ext(struct inode *inode_p, struct file *file_p,
unsigned int service, unsigned long arg)
{
- me4000_ai_context_t *ai_context;
+ struct me4000_ai_context *ai_context;
CALL_PDEBUG("me4000_ai_ioctl_ext() is executed\n");
@@ -3383,9 +3370,11 @@ static int me4000_ai_ioctl_ext(struct inode *inode_p, struct file *file_p,
switch (service) {
case ME4000_AI_SC_SETUP:
- return me4000_ai_sc_setup((me4000_ai_sc_t *) arg, ai_context);
+ return me4000_ai_sc_setup((struct me4000_ai_sc *)arg,
+ ai_context);
case ME4000_AI_CONFIG:
- return me4000_ai_config((me4000_ai_config_t *) arg, ai_context);
+ return me4000_ai_config((struct me4000_ai_config *)arg,
+ ai_context);
case ME4000_AI_START:
return me4000_ai_start_ex((unsigned long *)arg, ai_context);
case ME4000_AI_STOP:
@@ -3397,20 +3386,19 @@ static int me4000_ai_ioctl_ext(struct inode *inode_p, struct file *file_p,
case ME4000_AI_EX_TRIG_DISABLE:
return me4000_ai_ex_trig_disable(ai_context);
case ME4000_AI_EX_TRIG_SETUP:
- return me4000_ai_ex_trig_setup((me4000_ai_trigger_t *) arg,
+ return me4000_ai_ex_trig_setup((struct me4000_ai_trigger *)arg,
ai_context);
case ME4000_AI_FSM_STATE:
return me4000_ai_fsm_state((int *)arg, ai_context);
case ME4000_GET_USER_INFO:
- return me4000_get_user_info((me4000_user_info_t *) arg,
+ return me4000_get_user_info((struct me4000_user_info *)arg,
ai_context->board_info);
case ME4000_AI_GET_COUNT_BUFFER:
return me4000_ai_get_count_buffer((unsigned long *)arg,
ai_context);
default:
printk(KERN_ERR
- "ME4000:me4000_ai_ioctl_ext():Invalid service number %d\n",
- service);
+ "%s:Invalid service number %d\n", __func__ , service);
return -ENOTTY;
}
return 0;
@@ -3418,7 +3406,7 @@ static int me4000_ai_ioctl_ext(struct inode *inode_p, struct file *file_p,
static int me4000_ai_fasync(int fd, struct file *file_p, int mode)
{
- me4000_ai_context_t *ai_context;
+ struct me4000_ai_context *ai_context;
CALL_PDEBUG("me4000_ao_fasync_cont() is executed\n");
@@ -3426,10 +3414,10 @@ static int me4000_ai_fasync(int fd, struct file *file_p, int mode)
return fasync_helper(fd, file_p, mode, &ai_context->fasync_p);
}
-static int me4000_ai_config(me4000_ai_config_t * arg,
- me4000_ai_context_t * ai_context)
+static int me4000_ai_config(struct me4000_ai_config *arg,
+ struct me4000_ai_context *ai_context)
{
- me4000_ai_config_t cmd;
+ struct me4000_ai_config cmd;
u32 *list = NULL;
u32 mode;
int i;
@@ -3451,7 +3439,7 @@ static int me4000_ai_config(me4000_ai_config_t * arg,
}
/* Copy data from user */
- err = copy_from_user(&cmd, arg, sizeof(me4000_ai_config_t));
+ err = copy_from_user(&cmd, arg, sizeof(struct me4000_ai_config));
if (err) {
printk(KERN_ERR
"ME4000:me4000_ai_config():Can't copy from user space\n");
@@ -3671,7 +3659,7 @@ static int me4000_ai_config(me4000_ai_config_t * arg,
return 0;
- AI_CONFIG_ERR:
+AI_CONFIG_ERR:
/* Reset the timers */
ai_context->chan_timer = 66;
@@ -3699,7 +3687,7 @@ static int me4000_ai_config(me4000_ai_config_t * arg,
}
-static int ai_common_start(me4000_ai_context_t * ai_context)
+static int ai_common_start(struct me4000_ai_context *ai_context)
{
u32 tmp;
CALL_PDEBUG("ai_common_start() is executed\n");
@@ -3762,7 +3750,7 @@ static int ai_common_start(me4000_ai_context_t * ai_context)
return 0;
}
-static int me4000_ai_start(me4000_ai_context_t * ai_context)
+static int me4000_ai_start(struct me4000_ai_context *ai_context)
{
int err;
CALL_PDEBUG("me4000_ai_start() is executed\n");
@@ -3779,7 +3767,7 @@ static int me4000_ai_start(me4000_ai_context_t * ai_context)
}
static int me4000_ai_start_ex(unsigned long *arg,
- me4000_ai_context_t * ai_context)
+ struct me4000_ai_context *ai_context)
{
int err;
wait_queue_head_t queue;
@@ -3834,7 +3822,7 @@ static int me4000_ai_start_ex(unsigned long *arg,
return 0;
}
-static int me4000_ai_stop(me4000_ai_context_t * ai_context)
+static int me4000_ai_stop(struct me4000_ai_context *ai_context)
{
wait_queue_head_t queue;
u32 tmp;
@@ -3871,7 +3859,7 @@ static int me4000_ai_stop(me4000_ai_context_t * ai_context)
return 0;
}
-static int me4000_ai_immediate_stop(me4000_ai_context_t * ai_context)
+static int me4000_ai_immediate_stop(struct me4000_ai_context *ai_context)
{
wait_queue_head_t queue;
u32 tmp;
@@ -3908,7 +3896,7 @@ static int me4000_ai_immediate_stop(me4000_ai_context_t * ai_context)
return 0;
}
-static int me4000_ai_ex_trig_enable(me4000_ai_context_t * ai_context)
+static int me4000_ai_ex_trig_enable(struct me4000_ai_context *ai_context)
{
u32 tmp;
unsigned long flags;
@@ -3924,7 +3912,7 @@ static int me4000_ai_ex_trig_enable(me4000_ai_context_t * ai_context)
return 0;
}
-static int me4000_ai_ex_trig_disable(me4000_ai_context_t * ai_context)
+static int me4000_ai_ex_trig_disable(struct me4000_ai_context *ai_context)
{
u32 tmp;
unsigned long flags;
@@ -3940,10 +3928,10 @@ static int me4000_ai_ex_trig_disable(me4000_ai_context_t * ai_context)
return 0;
}
-static int me4000_ai_ex_trig_setup(me4000_ai_trigger_t * arg,
- me4000_ai_context_t * ai_context)
+static int me4000_ai_ex_trig_setup(struct me4000_ai_trigger *arg,
+ struct me4000_ai_context *ai_context)
{
- me4000_ai_trigger_t cmd;
+ struct me4000_ai_trigger cmd;
int err;
u32 tmp;
unsigned long flags;
@@ -3951,7 +3939,7 @@ static int me4000_ai_ex_trig_setup(me4000_ai_trigger_t * arg,
CALL_PDEBUG("me4000_ai_ex_trig_setup() is executed\n");
/* Copy data from user */
- err = copy_from_user(&cmd, arg, sizeof(me4000_ai_trigger_t));
+ err = copy_from_user(&cmd, arg, sizeof(struct me4000_ai_trigger));
if (err) {
printk(KERN_ERR
"ME4000:me4000_ai_ex_trig_setup():Can't copy from user space\n");
@@ -4000,16 +3988,16 @@ static int me4000_ai_ex_trig_setup(me4000_ai_trigger_t * arg,
return 0;
}
-static int me4000_ai_sc_setup(me4000_ai_sc_t * arg,
- me4000_ai_context_t * ai_context)
+static int me4000_ai_sc_setup(struct me4000_ai_sc *arg,
+ struct me4000_ai_context *ai_context)
{
- me4000_ai_sc_t cmd;
+ struct me4000_ai_sc cmd;
int err;
CALL_PDEBUG("me4000_ai_sc_setup() is executed\n");
/* Copy data from user */
- err = copy_from_user(&cmd, arg, sizeof(me4000_ai_sc_t));
+ err = copy_from_user(&cmd, arg, sizeof(struct me4000_ai_sc));
if (err) {
printk(KERN_ERR
"ME4000:me4000_ai_sc_setup():Can't copy from user space\n");
@@ -4023,9 +4011,9 @@ static int me4000_ai_sc_setup(me4000_ai_sc_t * arg,
}
static ssize_t me4000_ai_read(struct file *filep, char *buff, size_t cnt,
- loff_t * offp)
+ loff_t *offp)
{
- me4000_ai_context_t *ai_context = filep->private_data;
+ struct me4000_ai_context *ai_context = filep->private_data;
s16 *buffer = (s16 *) buff;
size_t count = cnt / 2;
unsigned long flags;
@@ -4150,9 +4138,9 @@ static ssize_t me4000_ai_read(struct file *filep, char *buff, size_t cnt,
return ret * 2;
}
-static unsigned int me4000_ai_poll(struct file *file_p, poll_table * wait)
+static unsigned int me4000_ai_poll(struct file *file_p, poll_table *wait)
{
- me4000_ai_context_t *ai_context;
+ struct me4000_ai_context *ai_context;
unsigned long mask = 0;
CALL_PDEBUG("me4000_ai_poll() is executed\n");
@@ -4171,7 +4159,7 @@ static unsigned int me4000_ai_poll(struct file *file_p, poll_table * wait)
return mask;
}
-static int me4000_ai_offset_enable(me4000_ai_context_t * ai_context)
+static int me4000_ai_offset_enable(struct me4000_ai_context *ai_context)
{
unsigned long tmp;
@@ -4184,7 +4172,7 @@ static int me4000_ai_offset_enable(me4000_ai_context_t * ai_context)
return 0;
}
-static int me4000_ai_offset_disable(me4000_ai_context_t * ai_context)
+static int me4000_ai_offset_disable(struct me4000_ai_context *ai_context)
{
unsigned long tmp;
@@ -4197,7 +4185,7 @@ static int me4000_ai_offset_disable(me4000_ai_context_t * ai_context)
return 0;
}
-static int me4000_ai_fullscale_enable(me4000_ai_context_t * ai_context)
+static int me4000_ai_fullscale_enable(struct me4000_ai_context *ai_context)
{
unsigned long tmp;
@@ -4210,7 +4198,7 @@ static int me4000_ai_fullscale_enable(me4000_ai_context_t * ai_context)
return 0;
}
-static int me4000_ai_fullscale_disable(me4000_ai_context_t * ai_context)
+static int me4000_ai_fullscale_disable(struct me4000_ai_context *ai_context)
{
unsigned long tmp;
@@ -4223,7 +4211,7 @@ static int me4000_ai_fullscale_disable(me4000_ai_context_t * ai_context)
return 0;
}
-static int me4000_ai_fsm_state(int *arg, me4000_ai_context_t * ai_context)
+static int me4000_ai_fsm_state(int *arg, struct me4000_ai_context *ai_context)
{
unsigned long tmp;
@@ -4242,7 +4230,7 @@ static int me4000_ai_fsm_state(int *arg, me4000_ai_context_t * ai_context)
}
static int me4000_ai_get_count_buffer(unsigned long *arg,
- me4000_ai_context_t * ai_context)
+ struct me4000_ai_context *ai_context)
{
unsigned long c;
int err;
@@ -4252,7 +4240,7 @@ static int me4000_ai_get_count_buffer(unsigned long *arg,
err = copy_to_user(arg, &c, sizeof(unsigned long));
if (err) {
printk(KERN_ERR
- "ME4000:me4000_ai_get_count_buffer():Can't copy to user space\n");
+ "%s:Can't copy to user space\n", __func__);
return -EFAULT;
}
@@ -4261,7 +4249,7 @@ static int me4000_ai_get_count_buffer(unsigned long *arg,
/*---------------------------------- EEPROM stuff ---------------------------*/
-static int eeprom_write_cmd(me4000_ai_context_t * ai_context, unsigned long cmd,
+static int eeprom_write_cmd(struct me4000_ai_context *ai_context, unsigned long cmd,
int length)
{
int i;
@@ -4318,7 +4306,7 @@ static int eeprom_write_cmd(me4000_ai_context_t * ai_context, unsigned long cmd,
return 0;
}
-static unsigned short eeprom_read_cmd(me4000_ai_context_t * ai_context,
+static unsigned short eeprom_read_cmd(struct me4000_ai_context *ai_context,
unsigned long cmd, int length)
{
int i;
@@ -4397,11 +4385,11 @@ static unsigned short eeprom_read_cmd(me4000_ai_context_t * ai_context,
return id;
}
-static int me4000_eeprom_write(me4000_eeprom_t * arg,
- me4000_ai_context_t * ai_context)
+static int me4000_eeprom_write(struct me4000_eeprom *arg,
+ struct me4000_ai_context *ai_context)
{
int err;
- me4000_eeprom_t setup;
+ struct me4000_eeprom setup;
unsigned long cmd;
unsigned long date_high;
unsigned long date_low;
@@ -4594,12 +4582,12 @@ static int me4000_eeprom_write(me4000_eeprom_t * arg,
return 0;
}
-static int me4000_eeprom_read(me4000_eeprom_t * arg,
- me4000_ai_context_t * ai_context)
+static int me4000_eeprom_read(struct me4000_eeprom *arg,
+ struct me4000_ai_context *ai_context)
{
int err;
unsigned long cmd;
- me4000_eeprom_t setup;
+ struct me4000_eeprom setup;
CALL_PDEBUG("me4000_eeprom_read() is executed\n");
@@ -4687,7 +4675,7 @@ static int me4000_eeprom_read(me4000_eeprom_t * arg,
static int me4000_dio_ioctl(struct inode *inode_p, struct file *file_p,
unsigned int service, unsigned long arg)
{
- me4000_dio_context_t *dio_context;
+ struct me4000_dio_context *dio_context;
CALL_PDEBUG("me4000_dio_ioctl() is executed\n");
@@ -4704,13 +4692,13 @@ static int me4000_dio_ioctl(struct inode *inode_p, struct file *file_p,
switch (service) {
case ME4000_DIO_CONFIG:
- return me4000_dio_config((me4000_dio_config_t *) arg,
+ return me4000_dio_config((struct me4000_dio_config *)arg,
dio_context);
case ME4000_DIO_SET_BYTE:
- return me4000_dio_set_byte((me4000_dio_byte_t *) arg,
+ return me4000_dio_set_byte((struct me4000_dio_byte *)arg,
dio_context);
case ME4000_DIO_GET_BYTE:
- return me4000_dio_get_byte((me4000_dio_byte_t *) arg,
+ return me4000_dio_get_byte((struct me4000_dio_byte *)arg,
dio_context);
case ME4000_DIO_RESET:
return me4000_dio_reset(dio_context);
@@ -4723,17 +4711,17 @@ static int me4000_dio_ioctl(struct inode *inode_p, struct file *file_p,
return 0;
}
-static int me4000_dio_config(me4000_dio_config_t * arg,
- me4000_dio_context_t * dio_context)
+static int me4000_dio_config(struct me4000_dio_config *arg,
+ struct me4000_dio_context *dio_context)
{
- me4000_dio_config_t cmd;
+ struct me4000_dio_config cmd;
u32 tmp;
int err;
CALL_PDEBUG("me4000_dio_config() is executed\n");
/* Copy data from user */
- err = copy_from_user(&cmd, arg, sizeof(me4000_dio_config_t));
+ err = copy_from_user(&cmd, arg, sizeof(struct me4000_dio_config));
if (err) {
printk(KERN_ERR
"ME4000:me4000_dio_config():Can't copy from user space\n");
@@ -4964,16 +4952,16 @@ static int me4000_dio_config(me4000_dio_config_t * arg,
return 0;
}
-static int me4000_dio_set_byte(me4000_dio_byte_t * arg,
- me4000_dio_context_t * dio_context)
+static int me4000_dio_set_byte(struct me4000_dio_byte *arg,
+ struct me4000_dio_context *dio_context)
{
- me4000_dio_byte_t cmd;
+ struct me4000_dio_byte cmd;
int err;
CALL_PDEBUG("me4000_dio_set_byte() is executed\n");
/* Copy data from user */
- err = copy_from_user(&cmd, arg, sizeof(me4000_dio_byte_t));
+ err = copy_from_user(&cmd, arg, sizeof(struct me4000_dio_byte));
if (err) {
printk(KERN_ERR
"ME4000:me4000_dio_set_byte():Can't copy from user space\n");
@@ -5030,16 +5018,16 @@ static int me4000_dio_set_byte(me4000_dio_byte_t * arg,
return 0;
}
-static int me4000_dio_get_byte(me4000_dio_byte_t * arg,
- me4000_dio_context_t * dio_context)
+static int me4000_dio_get_byte(struct me4000_dio_byte *arg,
+ struct me4000_dio_context *dio_context)
{
- me4000_dio_byte_t cmd;
+ struct me4000_dio_byte cmd;
int err;
CALL_PDEBUG("me4000_dio_get_byte() is executed\n");
/* Copy data from user */
- err = copy_from_user(&cmd, arg, sizeof(me4000_dio_byte_t));
+ err = copy_from_user(&cmd, arg, sizeof(struct me4000_dio_byte));
if (err) {
printk(KERN_ERR
"ME4000:me4000_dio_get_byte():Can't copy from user space\n");
@@ -5070,7 +5058,7 @@ static int me4000_dio_get_byte(me4000_dio_byte_t * arg,
}
/* Copy result back to user */
- err = copy_to_user(arg, &cmd, sizeof(me4000_dio_byte_t));
+ err = copy_to_user(arg, &cmd, sizeof(struct me4000_dio_byte));
if (err) {
printk(KERN_ERR
"ME4000:me4000_dio_get_byte():Can't copy to user space\n");
@@ -5080,7 +5068,7 @@ static int me4000_dio_get_byte(me4000_dio_byte_t * arg,
return 0;
}
-static int me4000_dio_reset(me4000_dio_context_t * dio_context)
+static int me4000_dio_reset(struct me4000_dio_context *dio_context)
{
CALL_PDEBUG("me4000_dio_reset() is executed\n");
@@ -5101,7 +5089,7 @@ static int me4000_dio_reset(me4000_dio_context_t * dio_context)
static int me4000_cnt_ioctl(struct inode *inode_p, struct file *file_p,
unsigned int service, unsigned long arg)
{
- me4000_cnt_context_t *cnt_context;
+ struct me4000_cnt_context *cnt_context;
CALL_PDEBUG("me4000_cnt_ioctl() is executed\n");
@@ -5118,11 +5106,11 @@ static int me4000_cnt_ioctl(struct inode *inode_p, struct file *file_p,
switch (service) {
case ME4000_CNT_READ:
- return me4000_cnt_read((me4000_cnt_t *) arg, cnt_context);
+ return me4000_cnt_read((struct me4000_cnt *)arg, cnt_context);
case ME4000_CNT_WRITE:
- return me4000_cnt_write((me4000_cnt_t *) arg, cnt_context);
+ return me4000_cnt_write((struct me4000_cnt *)arg, cnt_context);
case ME4000_CNT_CONFIG:
- return me4000_cnt_config((me4000_cnt_config_t *) arg,
+ return me4000_cnt_config((struct me4000_cnt_config *)arg,
cnt_context);
case ME4000_CNT_RESET:
return me4000_cnt_reset(cnt_context);
@@ -5135,10 +5123,10 @@ static int me4000_cnt_ioctl(struct inode *inode_p, struct file *file_p,
return 0;
}
-static int me4000_cnt_config(me4000_cnt_config_t * arg,
- me4000_cnt_context_t * cnt_context)
+static int me4000_cnt_config(struct me4000_cnt_config *arg,
+ struct me4000_cnt_context *cnt_context)
{
- me4000_cnt_config_t cmd;
+ struct me4000_cnt_config cmd;
u8 counter;
u8 mode;
int err;
@@ -5146,7 +5134,7 @@ static int me4000_cnt_config(me4000_cnt_config_t * arg,
CALL_PDEBUG("me4000_cnt_config() is executed\n");
/* Copy data from user */
- err = copy_from_user(&cmd, arg, sizeof(me4000_cnt_config_t));
+ err = copy_from_user(&cmd, arg, sizeof(struct me4000_cnt_config));
if (err) {
printk(KERN_ERR
"ME4000:me4000_cnt_config():Can't copy from user space\n");
@@ -5204,17 +5192,17 @@ static int me4000_cnt_config(me4000_cnt_config_t * arg,
return 0;
}
-static int me4000_cnt_read(me4000_cnt_t * arg,
- me4000_cnt_context_t * cnt_context)
+static int me4000_cnt_read(struct me4000_cnt *arg,
+ struct me4000_cnt_context *cnt_context)
{
- me4000_cnt_t cmd;
+ struct me4000_cnt cmd;
u8 tmp;
int err;
CALL_PDEBUG("me4000_cnt_read() is executed\n");
/* Copy data from user */
- err = copy_from_user(&cmd, arg, sizeof(me4000_cnt_t));
+ err = copy_from_user(&cmd, arg, sizeof(struct me4000_cnt));
if (err) {
printk(KERN_ERR
"ME4000:me4000_cnt_read():Can't copy from user space\n");
@@ -5249,7 +5237,7 @@ static int me4000_cnt_read(me4000_cnt_t * arg,
}
/* Copy result back to user */
- err = copy_to_user(arg, &cmd, sizeof(me4000_cnt_t));
+ err = copy_to_user(arg, &cmd, sizeof(struct me4000_cnt));
if (err) {
printk(KERN_ERR
"ME4000:me4000_cnt_read():Can't copy to user space\n");
@@ -5259,17 +5247,17 @@ static int me4000_cnt_read(me4000_cnt_t * arg,
return 0;
}
-static int me4000_cnt_write(me4000_cnt_t * arg,
- me4000_cnt_context_t * cnt_context)
+static int me4000_cnt_write(struct me4000_cnt *arg,
+ struct me4000_cnt_context *cnt_context)
{
- me4000_cnt_t cmd;
+ struct me4000_cnt cmd;
u8 tmp;
int err;
CALL_PDEBUG("me4000_cnt_write() is executed\n");
/* Copy data from user */
- err = copy_from_user(&cmd, arg, sizeof(me4000_cnt_t));
+ err = copy_from_user(&cmd, arg, sizeof(struct me4000_cnt));
if (err) {
printk(KERN_ERR
"ME4000:me4000_cnt_write():Can't copy from user space\n");
@@ -5306,7 +5294,7 @@ static int me4000_cnt_write(me4000_cnt_t * arg,
return 0;
}
-static int me4000_cnt_reset(me4000_cnt_context_t * cnt_context)
+static int me4000_cnt_reset(struct me4000_cnt_context *cnt_context)
{
CALL_PDEBUG("me4000_cnt_reset() is executed\n");
@@ -5333,7 +5321,7 @@ static int me4000_cnt_reset(me4000_cnt_context_t * cnt_context)
static int me4000_ext_int_ioctl(struct inode *inode_p, struct file *file_p,
unsigned int service, unsigned long arg)
{
- me4000_ext_int_context_t *ext_int_context;
+ struct me4000_ext_int_context *ext_int_context;
CALL_PDEBUG("me4000_ext_int_ioctl() is executed\n");
@@ -5366,7 +5354,7 @@ static int me4000_ext_int_ioctl(struct inode *inode_p, struct file *file_p,
return 0;
}
-static int me4000_ext_int_enable(me4000_ext_int_context_t * ext_int_context)
+static int me4000_ext_int_enable(struct me4000_ext_int_context *ext_int_context)
{
unsigned long tmp;
@@ -5379,7 +5367,7 @@ static int me4000_ext_int_enable(me4000_ext_int_context_t * ext_int_context)
return 0;
}
-static int me4000_ext_int_disable(me4000_ext_int_context_t * ext_int_context)
+static int me4000_ext_int_disable(struct me4000_ext_int_context *ext_int_context)
{
unsigned long tmp;
@@ -5393,7 +5381,7 @@ static int me4000_ext_int_disable(me4000_ext_int_context_t * ext_int_context)
}
static int me4000_ext_int_count(unsigned long *arg,
- me4000_ext_int_context_t * ext_int_context)
+ struct me4000_ext_int_context *ext_int_context)
{
CALL_PDEBUG("me4000_ext_int_count() is executed\n");
@@ -5404,10 +5392,10 @@ static int me4000_ext_int_count(unsigned long *arg,
/*------------------------------------ General stuff ------------------------------------*/
-static int me4000_get_user_info(me4000_user_info_t * arg,
- me4000_info_t * board_info)
+static int me4000_get_user_info(struct me4000_user_info *arg,
+ struct me4000_info *board_info)
{
- me4000_user_info_t user_info;
+ struct me4000_user_info user_info;
CALL_PDEBUG("me4000_get_user_info() is executed\n");
@@ -5437,7 +5425,7 @@ static int me4000_get_user_info(me4000_user_info_t * arg,
user_info.cnt_count = board_info->board_p->cnt.count;
- if (copy_to_user(arg, &user_info, sizeof(me4000_user_info_t)))
+ if (copy_to_user(arg, &user_info, sizeof(struct me4000_user_info)))
return -EFAULT;
return 0;
@@ -5448,7 +5436,7 @@ static int me4000_get_user_info(me4000_user_info_t * arg,
static int me4000_ext_int_fasync(int fd, struct file *file_ptr, int mode)
{
int result = 0;
- me4000_ext_int_context_t *ext_int_context;
+ struct me4000_ext_int_context *ext_int_context;
CALL_PDEBUG("me4000_ext_int_fasync() is executed\n");
@@ -5465,7 +5453,7 @@ static irqreturn_t me4000_ao_isr(int irq, void *dev_id)
{
u32 tmp;
u32 value;
- me4000_ao_context_t *ao_context;
+ struct me4000_ao_context *ao_context;
int i;
int c = 0;
int c1 = 0;
@@ -5589,7 +5577,7 @@ static irqreturn_t me4000_ao_isr(int irq, void *dev_id)
static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
{
u32 tmp;
- me4000_ai_context_t *ai_context;
+ struct me4000_ai_context *ai_context;
int i;
int c = 0;
int c1 = 0;
@@ -5933,7 +5921,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
static irqreturn_t me4000_ext_int_isr(int irq, void *dev_id)
{
- me4000_ext_int_context_t *ext_int_context;
+ struct me4000_ext_int_context *ext_int_context;
unsigned long tmp;
ISR_PDEBUG("me4000_ext_int_isr() is executed\n");
@@ -5969,10 +5957,10 @@ static irqreturn_t me4000_ext_int_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-void __exit me4000_module_exit(void)
+static void __exit me4000_module_exit(void)
{
struct list_head *board_p;
- me4000_info_t *board_info;
+ struct me4000_info *board_info;
CALL_PDEBUG("cleanup_module() is executed\n");
@@ -5993,7 +5981,7 @@ void __exit me4000_module_exit(void)
/* Reset the boards */
for (board_p = me4000_board_info_list.next;
board_p != &me4000_board_info_list; board_p = board_p->next) {
- board_info = list_entry(board_p, me4000_info_t, list);
+ board_info = list_entry(board_p, struct me4000_info, list);
me4000_reset_board(board_info);
}
@@ -6007,7 +5995,7 @@ static int me4000_read_procmem(char *buf, char **start, off_t offset, int count,
{
int len = 0;
int limit = count - 1000;
- me4000_info_t *board_info;
+ struct me4000_info *board_info;
struct list_head *ptr;
len += sprintf(buf + len, "\nME4000 DRIVER VERSION %X.%X.%X\n\n",
@@ -6019,7 +6007,7 @@ static int me4000_read_procmem(char *buf, char **start, off_t offset, int count,
for (ptr = me4000_board_info_list.next;
(ptr != &me4000_board_info_list) && (len < limit);
ptr = ptr->next) {
- board_info = list_entry(ptr, me4000_info_t, list);
+ board_info = list_entry(ptr, struct me4000_info, list);
len +=
sprintf(buf + len, "Board number %d:\n",
@@ -6029,14 +6017,14 @@ static int me4000_read_procmem(char *buf, char **start, off_t offset, int count,
sprintf(buf + len, "PLX base register = 0x%lX\n",
board_info->plx_regbase);
len +=
- sprintf(buf + len, "PLX base register size = 0x%lX\n",
- board_info->plx_regbase_size);
+ sprintf(buf + len, "PLX base register size = 0x%X\n",
+ (unsigned int)board_info->plx_regbase_size);
len +=
- sprintf(buf + len, "ME4000 base register = 0x%lX\n",
- board_info->me4000_regbase);
+ sprintf(buf + len, "ME4000 base register = 0x%X\n",
+ (unsigned int)board_info->me4000_regbase);
len +=
- sprintf(buf + len, "ME4000 base register size = 0x%lX\n",
- board_info->me4000_regbase_size);
+ sprintf(buf + len, "ME4000 base register size = 0x%X\n",
+ (unsigned int)board_info->me4000_regbase_size);
len +=
sprintf(buf + len, "Serial number = 0x%X\n",
board_info->serial_no);
diff --git a/drivers/staging/me4000/me4000.h b/drivers/staging/me4000/me4000.h
index c35e4b9793a..81c6f4d5e25 100644
--- a/drivers/staging/me4000/me4000.h
+++ b/drivers/staging/me4000/me4000.h
@@ -329,46 +329,46 @@
Circular buffer used for analog input/output reads/writes.
===========================================================================*/
-typedef struct me4000_circ_buf {
+struct me4000_circ_buf {
s16 *buf;
int volatile head;
int volatile tail;
-} me4000_circ_buf_t;
+};
/*=============================================================================
Information about the hardware capabilities
===========================================================================*/
-typedef struct me4000_ao_info {
+struct me4000_ao_info {
int count;
int fifo_count;
-} me4000_ao_info_t;
+};
-typedef struct me4000_ai_info {
+struct me4000_ai_info {
int count;
int sh_count;
int diff_count;
int ex_trig_analog;
-} me4000_ai_info_t;
+};
-typedef struct me4000_dio_info {
+struct me4000_dio_info {
int count;
-} me4000_dio_info_t;
+};
-typedef struct me4000_cnt_info {
+struct me4000_cnt_info {
int count;
-} me4000_cnt_info_t;
+};
-typedef struct me4000_board {
+struct me4000_board {
u16 vendor_id;
u16 device_id;
- me4000_ao_info_t ao;
- me4000_ai_info_t ai;
- me4000_dio_info_t dio;
- me4000_cnt_info_t cnt;
-} me4000_board_t;
+ struct me4000_ao_info ao;
+ struct me4000_ai_info ai;
+ struct me4000_dio_info dio;
+ struct me4000_cnt_info cnt;
+};
-static me4000_board_t me4000_boards[] = {
+static struct me4000_board me4000_boards[] = {
{PCI_VENDOR_ID_MEILHAUS, 0x4610, {0, 0}, {16, 0, 0, 0}, {4}, {3}},
{PCI_VENDOR_ID_MEILHAUS, 0x4650, {0, 0}, {16, 0, 0, 0}, {4}, {0}},
@@ -391,8 +391,6 @@ static me4000_board_t me4000_boards[] = {
{0},
};
-#define ME4000_BOARD_VERSIONS (sizeof(me4000_boards) / sizeof(me4000_board_t) - 1)
-
/*=============================================================================
PCI device table.
This is used by modprobe to translate PCI IDs to drivers.
@@ -427,19 +425,19 @@ MODULE_DEVICE_TABLE(pci, me4000_pci_table);
Global board and subdevice information structures
===========================================================================*/
-typedef struct me4000_info {
+struct me4000_info {
struct list_head list; // List of all detected boards
int board_count; // Index of the board after detection
unsigned long plx_regbase; // PLX configuration space base address
- unsigned long me4000_regbase; // Base address of the ME4000
- unsigned long timer_regbase; // Base address of the timer circuit
- unsigned long program_regbase; // Base address to set the program pin for the xilinx
+ resource_size_t me4000_regbase; // Base address of the ME4000
+ resource_size_t timer_regbase; // Base address of the timer circuit
+ resource_size_t program_regbase; // Base address to set the program pin for the xilinx
unsigned long plx_regbase_size; // PLX register set space
- unsigned long me4000_regbase_size; // ME4000 register set space
- unsigned long timer_regbase_size; // Timer circuit register set space
- unsigned long program_regbase_size; // Size of program base address of the ME4000
+ resource_size_t me4000_regbase_size; // ME4000 register set space
+ resource_size_t timer_regbase_size; // Timer circuit register set space
+ resource_size_t program_regbase_size; // Size of program base address of the ME4000
unsigned int serial_no; // Serial number of the board
unsigned char hw_revision; // Hardware revision of the board
@@ -451,7 +449,7 @@ typedef struct me4000_info {
int pci_func_no; // PCI function number
struct pci_dev *pci_dev_p; // General PCI information
- me4000_board_t *board_p; // Holds the board capabilities
+ struct me4000_board *board_p; // Holds the board capabilities
unsigned int irq; // IRQ assigned from the PCI BIOS
unsigned int irq_count; // Count of external interrupts
@@ -464,18 +462,18 @@ typedef struct me4000_info {
struct me4000_dio_context *dio_context; // Digital I/O specific context
struct me4000_cnt_context *cnt_context; // Counter specific context
struct me4000_ext_int_context *ext_int_context; // External interrupt specific context
-} me4000_info_t;
+};
-typedef struct me4000_ao_context {
+struct me4000_ao_context {
struct list_head list; // linked list of me4000_ao_context_t
int index; // Index in the list
int mode; // Indicates mode (0 = single, 1 = wraparound, 2 = continous)
int dac_in_use; // Indicates if already opend
spinlock_t use_lock; // Guards in_use
spinlock_t int_lock; // Used when locking out interrupts
- me4000_circ_buf_t circ_buf; // Circular buffer
+ struct me4000_circ_buf circ_buf; // Circular buffer
wait_queue_head_t wait_queue; // Wait queue to sleep while blocking write
- me4000_info_t *board_info;
+ struct me4000_info *board_info;
unsigned int irq; // The irq associated with this ADC
int volatile pipe_flag; // Indicates broken pipe set from me4000_ao_isr()
unsigned long ctrl_reg;
@@ -486,9 +484,9 @@ typedef struct me4000_ao_context {
unsigned long irq_status_reg;
unsigned long preload_reg;
struct fasync_struct *fasync_p; // Queue for asynchronous notification
-} me4000_ao_context_t;
+};
-typedef struct me4000_ai_context {
+struct me4000_ai_context {
struct list_head list; // linked list of me4000_ai_info_t
int mode; // Indicates mode
int in_use; // Indicates if already opend
@@ -496,9 +494,9 @@ typedef struct me4000_ai_context {
spinlock_t int_lock; // Used when locking out interrupts
int number; // Number of the DAC
unsigned int irq; // The irq associated with this ADC
- me4000_circ_buf_t circ_buf; // Circular buffer
+ struct me4000_circ_buf circ_buf; // Circular buffer
wait_queue_head_t wait_queue; // Wait queue to sleep while blocking read
- me4000_info_t *board_info;
+ struct me4000_info *board_info;
struct fasync_struct *fasync_p; // Queue for asynchronous notification
@@ -523,48 +521,48 @@ typedef struct me4000_ai_context {
unsigned long channel_list_count;
unsigned long sample_counter;
int sample_counter_reload;
-} me4000_ai_context_t;
+};
-typedef struct me4000_dio_context {
+struct me4000_dio_context {
struct list_head list; // linked list of me4000_dio_context_t
int in_use; // Indicates if already opend
spinlock_t use_lock; // Guards in_use
int number;
int dio_count;
- me4000_info_t *board_info;
+ struct me4000_info *board_info;
unsigned long dir_reg;
unsigned long ctrl_reg;
unsigned long port_0_reg;
unsigned long port_1_reg;
unsigned long port_2_reg;
unsigned long port_3_reg;
-} me4000_dio_context_t;
+};
-typedef struct me4000_cnt_context {
+struct me4000_cnt_context {
struct list_head list; // linked list of me4000_dio_context_t
int in_use; // Indicates if already opend
spinlock_t use_lock; // Guards in_use
int number;
int cnt_count;
- me4000_info_t *board_info;
+ struct me4000_info *board_info;
unsigned long ctrl_reg;
unsigned long counter_0_reg;
unsigned long counter_1_reg;
unsigned long counter_2_reg;
-} me4000_cnt_context_t;
+};
-typedef struct me4000_ext_int_context {
+struct me4000_ext_int_context {
struct list_head list; // linked list of me4000_dio_context_t
int in_use; // Indicates if already opend
spinlock_t use_lock; // Guards in_use
int number;
- me4000_info_t *board_info;
+ struct me4000_info *board_info;
unsigned int irq;
unsigned long int_count;
struct fasync_struct *fasync_ptr;
unsigned long ctrl_reg;
unsigned long irq_status_reg;
-} me4000_ext_int_context_t;
+};
#endif
@@ -745,12 +743,12 @@ typedef struct me4000_ext_int_context {
General type definitions
----------------------------------------------------------------------------*/
-typedef struct me4000_user_info {
+struct me4000_user_info {
int board_count; // Index of the board after detection
unsigned long plx_regbase; // PLX configuration space base address
- unsigned long me4000_regbase; // Base address of the ME4000
+ resource_size_t me4000_regbase; // Base address of the ME4000
unsigned long plx_regbase_size; // PLX register set space
- unsigned long me4000_regbase_size; // ME4000 register set space
+ resource_size_t me4000_regbase_size; // ME4000 register set space
unsigned long serial_no; // Serial number of the board
unsigned char hw_revision; // Hardware revision of the board
unsigned short vendor_id; // Meilhaus vendor id (0x1402)
@@ -773,62 +771,62 @@ typedef struct me4000_user_info {
int dio_count; // Count of digital I/O ports
int cnt_count; // Count of counters
-} me4000_user_info_t;
+};
/*-----------------------------------------------------------------------------
Type definitions for analog output
----------------------------------------------------------------------------*/
-typedef struct me4000_ao_channel_list {
+struct me4000_ao_channel_list {
unsigned long count;
unsigned long *list;
-} me4000_ao_channel_list_t;
+};
/*-----------------------------------------------------------------------------
Type definitions for analog input
----------------------------------------------------------------------------*/
-typedef struct me4000_ai_channel_list {
+struct me4000_ai_channel_list {
unsigned long count;
unsigned long *list;
-} me4000_ai_channel_list_t;
+};
-typedef struct me4000_ai_timer {
+struct me4000_ai_timer {
unsigned long pre_chan;
unsigned long chan;
unsigned long scan_low;
unsigned long scan_high;
-} me4000_ai_timer_t;
+};
-typedef struct me4000_ai_config {
- me4000_ai_timer_t timer;
- me4000_ai_channel_list_t channel_list;
+struct me4000_ai_config {
+ struct me4000_ai_timer timer;
+ struct me4000_ai_channel_list channel_list;
int sh;
-} me4000_ai_config_t;
+};
-typedef struct me4000_ai_single {
+struct me4000_ai_single {
int channel;
int range;
int mode;
short value;
unsigned long timeout;
-} me4000_ai_single_t;
+};
-typedef struct me4000_ai_trigger {
+struct me4000_ai_trigger {
int mode;
int edge;
-} me4000_ai_trigger_t;
+};
-typedef struct me4000_ai_sc {
+struct me4000_ai_sc {
unsigned long value;
int reload;
-} me4000_ai_sc_t;
+};
/*-----------------------------------------------------------------------------
Type definitions for eeprom
----------------------------------------------------------------------------*/
-typedef struct me4000_eeprom {
+struct me4000_eeprom {
unsigned long date;
short uni_10_offset;
short uni_10_fullscale;
@@ -842,45 +840,45 @@ typedef struct me4000_eeprom {
short diff_10_fullscale;
short diff_2_5_offset;
short diff_2_5_fullscale;
-} me4000_eeprom_t;
+};
/*-----------------------------------------------------------------------------
Type definitions for digital I/O
----------------------------------------------------------------------------*/
-typedef struct me4000_dio_config {
+struct me4000_dio_config {
int port;
int mode;
int function;
-} me4000_dio_config_t;
+};
-typedef struct me4000_dio_byte {
+struct me4000_dio_byte {
int port;
unsigned char byte;
-} me4000_dio_byte_t;
+};
/*-----------------------------------------------------------------------------
Type definitions for counters
----------------------------------------------------------------------------*/
-typedef struct me4000_cnt {
+struct me4000_cnt {
int counter;
unsigned short value;
-} me4000_cnt_t;
+};
-typedef struct me4000_cnt_config {
+struct me4000_cnt_config {
int counter;
int mode;
-} me4000_cnt_config_t;
+};
/*-----------------------------------------------------------------------------
Type definitions for external interrupt
----------------------------------------------------------------------------*/
-typedef struct {
+struct me4000_int {
int int1_count;
int int2_count;
-} me4000_int_type;
+};
/*-----------------------------------------------------------------------------
The ioctls of the board
@@ -888,7 +886,8 @@ typedef struct {
#define ME4000_IOCTL_MAXNR 50
#define ME4000_MAGIC 'y'
-#define ME4000_GET_USER_INFO _IOR (ME4000_MAGIC, 0, me4000_user_info_t)
+#define ME4000_GET_USER_INFO _IOR (ME4000_MAGIC, 0, \
+ struct me4000_user_info)
#define ME4000_AO_START _IOW (ME4000_MAGIC, 1, unsigned long)
#define ME4000_AO_STOP _IO (ME4000_MAGIC, 2)
@@ -904,25 +903,35 @@ typedef struct {
#define ME4000_AO_DISABLE_DO _IO (ME4000_MAGIC, 12)
#define ME4000_AO_FSM_STATE _IOR (ME4000_MAGIC, 13, int)
-#define ME4000_AI_SINGLE _IOR (ME4000_MAGIC, 14, me4000_ai_single_t)
+#define ME4000_AI_SINGLE _IOR (ME4000_MAGIC, 14, \
+ struct me4000_ai_single)
#define ME4000_AI_START _IOW (ME4000_MAGIC, 15, unsigned long)
#define ME4000_AI_STOP _IO (ME4000_MAGIC, 16)
#define ME4000_AI_IMMEDIATE_STOP _IO (ME4000_MAGIC, 17)
#define ME4000_AI_EX_TRIG_ENABLE _IO (ME4000_MAGIC, 18)
#define ME4000_AI_EX_TRIG_DISABLE _IO (ME4000_MAGIC, 19)
-#define ME4000_AI_EX_TRIG_SETUP _IOW (ME4000_MAGIC, 20, me4000_ai_trigger_t)
-#define ME4000_AI_CONFIG _IOW (ME4000_MAGIC, 21, me4000_ai_config_t)
-#define ME4000_AI_SC_SETUP _IOW (ME4000_MAGIC, 22, me4000_ai_sc_t)
+#define ME4000_AI_EX_TRIG_SETUP _IOW (ME4000_MAGIC, 20, \
+ struct me4000_ai_trigger)
+#define ME4000_AI_CONFIG _IOW (ME4000_MAGIC, 21, \
+ struct me4000_ai_config)
+#define ME4000_AI_SC_SETUP _IOW (ME4000_MAGIC, 22, \
+ struct me4000_ai_sc)
#define ME4000_AI_FSM_STATE _IOR (ME4000_MAGIC, 23, int)
-#define ME4000_DIO_CONFIG _IOW (ME4000_MAGIC, 24, me4000_dio_config_t)
-#define ME4000_DIO_GET_BYTE _IOR (ME4000_MAGIC, 25, me4000_dio_byte_t)
-#define ME4000_DIO_SET_BYTE _IOW (ME4000_MAGIC, 26, me4000_dio_byte_t)
+#define ME4000_DIO_CONFIG _IOW (ME4000_MAGIC, 24, \
+ struct me4000_dio_config)
+#define ME4000_DIO_GET_BYTE _IOR (ME4000_MAGIC, 25, \
+ struct me4000_dio_byte)
+#define ME4000_DIO_SET_BYTE _IOW (ME4000_MAGIC, 26, \
+ struct me4000_dio_byte)
#define ME4000_DIO_RESET _IO (ME4000_MAGIC, 27)
-#define ME4000_CNT_READ _IOR (ME4000_MAGIC, 28, me4000_cnt_t)
-#define ME4000_CNT_WRITE _IOW (ME4000_MAGIC, 29, me4000_cnt_t)
-#define ME4000_CNT_CONFIG _IOW (ME4000_MAGIC, 30, me4000_cnt_config_t)
+#define ME4000_CNT_READ _IOR (ME4000_MAGIC, 28, \
+ struct me4000_cnt)
+#define ME4000_CNT_WRITE _IOW (ME4000_MAGIC, 29, \
+ struct me4000_cnt)
+#define ME4000_CNT_CONFIG _IOW (ME4000_MAGIC, 30, \
+ struct me4000_cnt_config)
#define ME4000_CNT_RESET _IO (ME4000_MAGIC, 31)
#define ME4000_EXT_INT_DISABLE _IO (ME4000_MAGIC, 32)
@@ -934,13 +943,16 @@ typedef struct {
#define ME4000_AI_FULLSCALE_ENABLE _IO (ME4000_MAGIC, 37)
#define ME4000_AI_FULLSCALE_DISABLE _IO (ME4000_MAGIC, 38)
-#define ME4000_AI_EEPROM_READ _IOR (ME4000_MAGIC, 39, me4000_eeprom_t)
-#define ME4000_AI_EEPROM_WRITE _IOW (ME4000_MAGIC, 40, me4000_eeprom_t)
+#define ME4000_AI_EEPROM_READ _IOR (ME4000_MAGIC, 39, \
+ struct me4000_eeprom)
+#define ME4000_AI_EEPROM_WRITE _IOW (ME4000_MAGIC, 40, \
+ struct me4000_eeprom)
#define ME4000_AO_SIMULTANEOUS_EX_TRIG _IO (ME4000_MAGIC, 41)
#define ME4000_AO_SIMULTANEOUS_SW _IO (ME4000_MAGIC, 42)
#define ME4000_AO_SIMULTANEOUS_DISABLE _IO (ME4000_MAGIC, 43)
-#define ME4000_AO_SIMULTANEOUS_UPDATE _IOW (ME4000_MAGIC, 44, me4000_ao_channel_list_t)
+#define ME4000_AO_SIMULTANEOUS_UPDATE _IOW (ME4000_MAGIC, 44, \
+ struct me4000_ao_channel_list)
#define ME4000_AO_SYNCHRONOUS_EX_TRIG _IO (ME4000_MAGIC, 45)
#define ME4000_AO_SYNCHRONOUS_SW _IO (ME4000_MAGIC, 46)
diff --git a/drivers/staging/pcc-acpi/Kconfig b/drivers/staging/pcc-acpi/Kconfig
new file mode 100644
index 00000000000..6720d4086ba
--- /dev/null
+++ b/drivers/staging/pcc-acpi/Kconfig
@@ -0,0 +1,11 @@
+config PCC_ACPI
+ tristate "Panasonic ACPI Hotkey support"
+ depends on ACPI
+ default n
+ ---help---
+ This driver provides support for Panasonic hotkeys through the
+ ACPI interface. This works for the Panasonic R1 (N variant),
+ R2, R3, T2, W2, and Y2 laptops.
+
+ To compile this driver as a module, choose M here. The module
+ will be called pcc-acpi.
diff --git a/drivers/staging/pcc-acpi/Makefile b/drivers/staging/pcc-acpi/Makefile
new file mode 100644
index 00000000000..f93b29edf61
--- /dev/null
+++ b/drivers/staging/pcc-acpi/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PCC_ACPI) += pcc-acpi.o
diff --git a/drivers/staging/pcc-acpi/TODO b/drivers/staging/pcc-acpi/TODO
new file mode 100644
index 00000000000..fab24098228
--- /dev/null
+++ b/drivers/staging/pcc-acpi/TODO
@@ -0,0 +1,7 @@
+TODO:
+ - Lindent fixes
+ - checkpatch.pl fixes
+ - verify that the acpi interface is correct
+ - remove /proc dependancy if needed (not sure yet.)
+
+Please send any patches for this driver to Greg Kroah-Hartman <greg@kroah.com>
diff --git a/drivers/staging/pcc-acpi/pcc-acpi.c b/drivers/staging/pcc-acpi/pcc-acpi.c
new file mode 100644
index 00000000000..7715c31f273
--- /dev/null
+++ b/drivers/staging/pcc-acpi/pcc-acpi.c
@@ -0,0 +1,1111 @@
+/*
+ * Panasonic HotKey and lcd brightness control Extra driver
+ * (C) 2004 Hiroshi Miura <miura@da-cha.org>
+ * (C) 2004 NTT DATA Intellilink Co. http://www.intellilink.co.jp/
+ * (C) YOKOTA Hiroshi <yokota (at) netlab. is. tsukuba. ac. jp>
+ * (C) 2004 David Bronaugh <dbronaugh>
+ *
+ * derived from toshiba_acpi.c, Copyright (C) 2002-2004 John Belmonte
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * publicshed by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *---------------------------------------------------------------------------
+ *
+ * ChangeLog:
+ *
+ * Nov.04, 2006 Hiroshi Miura <miura@da-cha.org>
+ * -v0.9 remove warning about section reference.
+ * remove acpi_os_free
+ * add /proc/acpi/pcc/brightness interface to
+ * allow HAL to access.
+ * merge dbronaugh's enhancement
+ * Aug.17, 2004 David Bronaugh (dbronaugh)
+ * - Added screen brightness setting interface
+ * Thanks to the FreeBSD crew
+ * (acpi_panasonic.c authors)
+ * for the ideas I needed to accomplish it
+ *
+ * May.29, 2006 Hiroshi Miura <miura@da-cha.org>
+ * -v0.8.4 follow to change keyinput structure
+ * thanks Fabian Yamaguchi <fabs@cs.tu-berlin.de>,
+ * Jacob Bower <jacob.bower@ic.ac.uk> and
+ * Hiroshi Yokota for providing solutions.
+ *
+ * Oct.02, 2004 Hiroshi Miura <miura@da-cha.org>
+ * -v0.8.2 merge code of YOKOTA Hiroshi
+ * <yokota@netlab.is.tsukuba.ac.jp>.
+ * Add sticky key mode interface.
+ * Refactoring acpi_pcc_generete_keyinput().
+ *
+ * Sep.15, 2004 Hiroshi Miura <miura@da-cha.org>
+ * -v0.8 Generate key input event on input subsystem.
+ * This is based on yet another driver
+ * written by Ryuta Nakanishi.
+ *
+ * Sep.10, 2004 Hiroshi Miura <miura@da-cha.org>
+ * -v0.7 Change proc interface functions using seq_file
+ * facility as same as other ACPI drivers.
+ *
+ * Aug.28, 2004 Hiroshi Miura <miura@da-cha.org>
+ * -v0.6.4 Fix a silly error with status checking
+ *
+ * Aug.25, 2004 Hiroshi Miura <miura@da-cha.org>
+ * -v0.6.3 replace read_acpi_int by standard
+ * function acpi_evaluate_integer
+ * some clean up and make smart copyright notice.
+ * fix return value of pcc_acpi_get_key()
+ * fix checking return value of acpi_bus_register_driver()
+ *
+ * Aug.22, 2004 David Bronaugh <dbronaugh@linuxboxen.org>
+ * -v0.6.2 Add check on ACPI data (num_sifr)
+ * Coding style cleanups, better error messages/handling
+ * Fixed an off-by-one error in memory allocation
+ *
+ * Aug.21, 2004 David Bronaugh <dbronaugh@linuxboxen.org>
+ * -v0.6.1 Fix a silly error with status checking
+ *
+ * Aug.20, 2004 David Bronaugh <dbronaugh@linuxboxen.org>
+ * - v0.6 Correct brightness controls to reflect reality
+ * based on information gleaned by Hiroshi Miura
+ * and discussions with Hiroshi Miura
+ *
+ * Aug.10, 2004 Hiroshi Miura <miura@da-cha.org>
+ * - v0.5 support LCD brightness control
+ * based on the disclosed information by MEI.
+ *
+ * Jul.25, 2004 Hiroshi Miura <miura@da-cha.org>
+ * - v0.4 first post version
+ * add function to retrive SIFR
+ *
+ * Jul.24, 2004 Hiroshi Miura <miura@da-cha.org>
+ * - v0.3 get proper status of hotkey
+ *
+ * Jul.22, 2004 Hiroshi Miura <miura@da-cha.org>
+ * - v0.2 add HotKey handler
+ *
+ * Jul.17, 2004 Hiroshi Miura <miura@da-cha.org>
+ * - v0.1 start from toshiba_acpi driver written by John Belmonte
+ *
+ */
+
+#define ACPI_PCC_VERSION "0.9+hy"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+
+/*************************************************************************
+ * "seq" file template definition.
+ */
+/* "seq" initializer */
+#define SEQ_OPEN_FS(_open_func_name_, _show_func_name_) \
+static int _open_func_name_(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, _show_func_name_, PDE(inode)->data); \
+}
+
+/*-------------------------------------------------------------------------
+ * "seq" fops template for read-only files.
+ */
+#define SEQ_FILEOPS_R(_open_func_name_) \
+{ \
+ .open = _open_func_name_, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+/*------------------------------------------------------------------------
+ * "seq" fops template for read-write files.
+ */
+#define SEQ_FILEOPS_RW(_open_func_name_, _write_func_name_) \
+{ \
+ .open = _open_func_name_ , \
+ .read = seq_read, \
+ .write = _write_func_name_, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+/*
+ * "seq" file template definition ended.
+ ***************************************************************************
+ */
+#ifndef ACPI_HOTKEY_COMPONENT
+#define ACPI_HOTKEY_COMPONENT 0x10000000
+#endif
+
+#define _COMPONENT ACPI_HOTKEY_COMPONENT
+ACPI_MODULE_NAME("pcc_acpi");
+
+MODULE_AUTHOR("Hiroshi Miura, Hiroshi Yokota");
+MODULE_DESCRIPTION("ACPI HotKey driver for Panasonic Let's Note laptops");
+MODULE_LICENSE("GPL");
+
+#define LOGPREFIX "pcc_acpi: "
+
+/****************************************************
+ * Define ACPI PATHs
+ ****************************************************/
+/* Lets note hotkeys */
+#define METHOD_HKEY_QUERY "HINF"
+#define METHOD_HKEY_SQTY "SQTY"
+#define METHOD_HKEY_SINF "SINF"
+#define METHOD_HKEY_SSET "SSET"
+#define HKEY_NOTIFY 0x80
+
+/* for brightness control */
+#define LCD_MAX_BRIGHTNESS 255
+/* This may be magical -- beware */
+#define LCD_BRIGHTNESS_INCREMENT 17
+/* Registers of SINF */
+#define SINF_LCD_BRIGHTNESS 4
+
+/*******************************************************************
+ *
+ * definitions for /proc/ interface
+ *
+ *******************************************************************/
+#define ACPI_PCC_DRIVER_NAME "pcc_acpi"
+#define ACPI_PCC_DEVICE_NAME "PCCExtra"
+#define ACPI_PCC_CLASS "pcc"
+#define PROC_PCC ACPI_PCC_CLASS
+
+#define ACPI_PCC_INPUT_PHYS "panasonic/hkey0"
+
+/* This is transitional definition */
+#ifndef KEY_BATT
+# define KEY_BATT 227
+#endif
+
+#define PROC_STR_MAX_LEN 8
+
+#define BUS_PCC_HOTKEY BUS_I8042 /*0x1a*/ /* FIXME: BUS_I8042? */
+
+/* Fn+F4/F5 confricts with Shift+F1/F2 */
+/* This hack avoids key number confrict */
+#define PCC_KEYINPUT_MODE (0)
+
+/* LCD_TYPEs: 0 = Normal, 1 = Semi-transparent
+ ENV_STATEs: Normal temp=0x01, High temp=0x81, N/A=0x00
+*/
+enum SINF_BITS { SINF_NUM_BATTERIES = 0,
+ SINF_LCD_TYPE,
+ SINF_AC_MAX_BRIGHT,
+ SINF_AC_MIN_BRIGHT,
+ SINF_AC_CUR_BRIGHT,
+ /* 4 = R1 only handle SINF_AC_CUR_BRIGHT
+ * as SINF_CUR_BRIGHT and don't know AC state */
+ SINF_DC_MAX_BRIGHT,
+ SINF_DC_MIN_BRIGHT,
+ SINF_DC_CUR_BRIGHT,
+ SINF_MUTE,
+ SINF_RESERVED,
+ SINF_ENV_STATE, /* 10 */
+ SINF_STICKY_KEY = 0x80,
+};
+
+static struct acpi_device_id pcc_device_ids[] = {
+ {"MAT0012", 0},
+ {"MAT0013", 0},
+ {"MAT0018", 0},
+ {"MAT0019", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, pcc_device_ids);
+
+
+static int __devinit acpi_pcc_hotkey_add(struct acpi_device *device);
+static int __devexit acpi_pcc_hotkey_remove(struct acpi_device *device,
+ int type);
+static int acpi_pcc_hotkey_resume(struct acpi_device *device);
+
+
+static struct acpi_driver acpi_pcc_driver = {
+ .name = ACPI_PCC_DRIVER_NAME,
+ .class = ACPI_PCC_CLASS,
+ .ids = pcc_device_ids,
+ .ops = {
+ .add = acpi_pcc_hotkey_add,
+ .remove = __devexit_p(acpi_pcc_hotkey_remove),
+#ifdef CONFIG_PM
+ /*.suspend = acpi_pcc_hotkey_suspend,*/
+ .resume = acpi_pcc_hotkey_resume,
+#endif
+ },
+};
+
+struct acpi_hotkey {
+ acpi_handle handle;
+ struct acpi_device *device;
+ struct proc_dir_entry *proc_dir_entry;
+ unsigned long num_sifr;
+ unsigned long status;
+ struct input_dev *input_dev;
+ int sticky_mode;
+};
+
+struct pcc_keyinput {
+ struct acpi_hotkey *hotkey;
+ int key_mode;
+};
+
+/* *************************************************************************
+ Hotkey driver core
+ ************************************************************************* */
+/* -------------------------------------------------------------------------
+ method access functions
+ ------------------------------------------------------------------------- */
+static int acpi_pcc_write_sset(struct acpi_hotkey *hotkey, int func, int val)
+{
+ union acpi_object in_objs[] = {
+ { .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = func, },
+ { .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = val, },
+ };
+ struct acpi_object_list params = {
+ .count = ARRAY_SIZE(in_objs),
+ .pointer = in_objs,
+ };
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_write_sset");
+
+ status = acpi_evaluate_object(hotkey->handle, METHOD_HKEY_SSET,
+ &params, NULL);
+
+ return_VALUE(status == AE_OK ? AE_OK : AE_ERROR);
+}
+
+static inline int acpi_pcc_get_sqty(struct acpi_device *device)
+{
+ unsigned long s;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_get_sqty");
+
+ status = acpi_evaluate_integer(device->handle, METHOD_HKEY_SQTY,
+ NULL, &s);
+ if (ACPI_SUCCESS(status)) {
+ return_VALUE(s);
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "evaluation error HKEY.SQTY\n"));
+ return_VALUE(-EINVAL);
+ }
+}
+
+static int acpi_pcc_retrieve_biosdata(struct acpi_hotkey *hotkey, u32 *sinf)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *hkey = NULL;
+ int i;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_retrieve_biosdata");
+
+ status = acpi_evaluate_object(hotkey->handle, METHOD_HKEY_SINF, 0,
+ &buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "evaluation error HKEY.SINF\n"));
+ status = AE_ERROR;
+ return_VALUE(status);
+ }
+
+ hkey = buffer.pointer;
+ if (!hkey || (hkey->type != ACPI_TYPE_PACKAGE)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid HKEY.SINF\n"));
+ goto free_buffer;
+ }
+
+ if (hotkey->num_sifr < hkey->package.count) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "SQTY reports bad SINF length\n"));
+ status = AE_ERROR;
+ goto free_buffer;
+ }
+
+ for (i = 0; i < hkey->package.count; i++) {
+ union acpi_object *element = &(hkey->package.elements[i]);
+ if (likely(element->type == ACPI_TYPE_INTEGER)) {
+ sinf[i] = element->integer.value;
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Invalid HKEY.SINF data\n"));
+ status = AE_ERROR;
+ break;
+ }
+ }
+ sinf[hkey->package.count] = -1;
+
+ free_buffer:
+ kfree(buffer.pointer);
+ return_VALUE(status == AE_OK ? AE_OK : AE_ERROR);
+}
+
+static int acpi_pcc_read_sinf_field(struct seq_file *seq, int field)
+{
+ struct acpi_hotkey *hotkey = (struct acpi_hotkey *) seq->private;
+ u32 sinf[hotkey->num_sifr + 1];
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_read_sinf_field");
+
+ if (ACPI_SUCCESS(acpi_pcc_retrieve_biosdata(hotkey, sinf)))
+ seq_printf(seq, "%u\n", sinf[field]);
+ else
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Couldn't retrieve BIOS data\n"));
+
+ return_VALUE(AE_OK);
+}
+
+/* -------------------------------------------------------------------------
+ user interface functions
+ ------------------------------------------------------------------------- */
+/* read methods */
+/* Sinf read methods */
+#define PCC_SINF_READ_F(_name_, FUNC) \
+static int _name_(struct seq_file *seq, void *offset) \
+{ \
+ return_VALUE(ACPI_SUCCESS(acpi_pcc_read_sinf_field(seq, \
+ (FUNC))) \
+ ? 0 : -EINVAL); \
+}
+
+PCC_SINF_READ_F(acpi_pcc_numbatteries_show, SINF_NUM_BATTERIES);
+PCC_SINF_READ_F(acpi_pcc_lcdtype_show, SINF_LCD_TYPE);
+PCC_SINF_READ_F(acpi_pcc_ac_brightness_max_show, SINF_AC_MAX_BRIGHT);
+PCC_SINF_READ_F(acpi_pcc_ac_brightness_min_show, SINF_AC_MIN_BRIGHT);
+PCC_SINF_READ_F(acpi_pcc_ac_brightness_show, SINF_AC_CUR_BRIGHT);
+PCC_SINF_READ_F(acpi_pcc_dc_brightness_max_show, SINF_DC_MAX_BRIGHT);
+PCC_SINF_READ_F(acpi_pcc_dc_brightness_min_show, SINF_DC_MIN_BRIGHT);
+PCC_SINF_READ_F(acpi_pcc_dc_brightness_show, SINF_DC_CUR_BRIGHT);
+PCC_SINF_READ_F(acpi_pcc_brightness_show, SINF_AC_CUR_BRIGHT);
+PCC_SINF_READ_F(acpi_pcc_mute_show, SINF_MUTE);
+
+static int acpi_pcc_sticky_key_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_hotkey *hotkey = seq->private;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_sticky_key_show");
+
+ if (!hotkey || !hotkey->device)
+ return_VALUE(-EINVAL);
+
+ seq_printf(seq, "%d\n", hotkey->sticky_mode);
+
+ return_VALUE(0);
+}
+
+static int acpi_pcc_keyinput_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_hotkey *hotkey = seq->private;
+ struct input_dev *hotk_input_dev = hotkey->input_dev;
+ struct pcc_keyinput *keyinput = input_get_drvdata(hotk_input_dev);
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_keyinput_show");
+
+ seq_printf(seq, "%d\n", keyinput->key_mode);
+
+ return_VALUE(0);
+}
+
+static int acpi_pcc_version_show(struct seq_file *seq, void *offset)
+{
+ struct acpi_hotkey *hotkey = seq->private;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_version_show");
+
+ if (!hotkey || !hotkey->device)
+ return_VALUE(-EINVAL);
+
+ seq_printf(seq, "%s version %s\n", ACPI_PCC_DRIVER_NAME,
+ ACPI_PCC_VERSION);
+ seq_printf(seq, "%li functions\n", hotkey->num_sifr);
+
+ return_VALUE(0);
+}
+
+/* write methods */
+static ssize_t acpi_pcc_write_single_flag(struct file *file,
+ const char __user *buffer,
+ size_t count,
+ int sinf_func)
+{
+ struct seq_file *seq = file->private_data;
+ struct acpi_hotkey *hotkey = seq->private;
+ char write_string[PROC_STR_MAX_LEN];
+ u32 val;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_write_single_flag");
+
+ if (!hotkey || (count > sizeof(write_string) - 1))
+ return_VALUE(-EINVAL);
+
+ if (copy_from_user(write_string, buffer, count))
+ return_VALUE(-EFAULT);
+
+ write_string[count] = '\0';
+
+ if ((sscanf(write_string, "%3i", &val) == 1) &&
+ (val == 0 || val == 1))
+ acpi_pcc_write_sset(hotkey, sinf_func, val);
+
+ return_VALUE(count);
+}
+
+static unsigned long acpi_pcc_write_brightness(struct file *file,
+ const char __user *buffer,
+ size_t count,
+ int min_index, int max_index,
+ int cur_index)
+{
+ struct seq_file *seq = (struct seq_file *)file->private_data;
+ struct acpi_hotkey *hotkey = (struct acpi_hotkey *)seq->private;
+ char write_string[PROC_STR_MAX_LEN];
+ u32 bright;
+ u32 sinf[hotkey->num_sifr + 1];
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_write_brightness");
+
+ if (!hotkey || (count > sizeof(write_string) - 1))
+ return_VALUE(-EINVAL);
+
+ if (copy_from_user(write_string, buffer, count))
+ return_VALUE(-EFAULT);
+
+ write_string[count] = '\0';
+
+ if (ACPI_FAILURE(acpi_pcc_retrieve_biosdata(hotkey, sinf))) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Couldn't retrieve BIOS data\n"));
+ goto end;
+ }
+
+ if ((sscanf(write_string, "%4i", &bright) == 1) &&
+ (bright >= sinf[min_index]) &&
+ (bright <= sinf[max_index]))
+ acpi_pcc_write_sset(hotkey, cur_index, bright);
+
+end:
+ return_VALUE(count);
+}
+
+static ssize_t acpi_pcc_write_ac_brightness(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return_VALUE(acpi_pcc_write_brightness(file, buffer, count,
+ SINF_AC_MIN_BRIGHT,
+ SINF_AC_MAX_BRIGHT,
+ SINF_AC_CUR_BRIGHT));
+}
+
+static ssize_t acpi_pcc_write_dc_brightness(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return_VALUE(acpi_pcc_write_brightness(file, buffer, count,
+ SINF_DC_MIN_BRIGHT,
+ SINF_DC_MAX_BRIGHT,
+ SINF_DC_CUR_BRIGHT));
+}
+
+static ssize_t acpi_pcc_write_no_brightness(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return acpi_pcc_write_brightness(file, buffer, count,
+ SINF_AC_MIN_BRIGHT,
+ SINF_AC_MAX_BRIGHT,
+ SINF_AC_CUR_BRIGHT);
+}
+
+static ssize_t acpi_pcc_write_mute(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return_VALUE(acpi_pcc_write_single_flag(file, buffer, count,
+ SINF_MUTE));
+}
+
+static ssize_t acpi_pcc_write_sticky_key(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq = (struct seq_file *)file->private_data;
+ struct acpi_hotkey *hotkey = (struct acpi_hotkey *)seq->private;
+ char write_string[PROC_STR_MAX_LEN];
+ int mode;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_write_sticky_key");
+
+ if (!hotkey || (count > sizeof(write_string) - 1))
+ return_VALUE(-EINVAL);
+
+ if (copy_from_user(write_string, buffer, count))
+ return_VALUE(-EFAULT);
+
+ write_string[count] = '\0';
+
+ if ((sscanf(write_string, "%3i", &mode) == 1) &&
+ (mode == 0 || mode == 1)) {
+ acpi_pcc_write_sset(hotkey, SINF_STICKY_KEY, mode);
+ hotkey->sticky_mode = mode;
+ }
+
+ return_VALUE(count);
+}
+
+static ssize_t acpi_pcc_write_keyinput(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq = (struct seq_file *)file->private_data;
+ struct acpi_hotkey *hotkey = (struct acpi_hotkey *)seq->private;
+ struct pcc_keyinput *keyinput;
+ char write_string[PROC_STR_MAX_LEN];
+ int key_mode;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_write_keyinput");
+
+ if (!hotkey || (count > (sizeof(write_string) - 1)))
+ return_VALUE(-EINVAL);
+
+ if (copy_from_user(write_string, buffer, count))
+ return_VALUE(-EFAULT);
+
+ write_string[count] = '\0';
+
+ if ((sscanf(write_string, "%4i", &key_mode) == 1) &&
+ (key_mode == 0 || key_mode == 1)) {
+ keyinput = input_get_drvdata(hotkey->input_dev);
+ keyinput->key_mode = key_mode;
+ }
+
+ return_VALUE(count);
+}
+
+/* -------------------------------------------------------------------------
+ hotkey driver
+ ------------------------------------------------------------------------- */
+static void acpi_pcc_generete_keyinput(struct acpi_hotkey *hotkey)
+{
+ struct input_dev *hotk_input_dev = hotkey->input_dev;
+ struct pcc_keyinput *keyinput = input_get_drvdata(hotk_input_dev);
+ int hinf = hotkey->status;
+ int key_code, hkey_num;
+ const int key_map[] = {
+ /* 0 */ -1,
+ /* 1 */ KEY_BRIGHTNESSDOWN,
+ /* 2 */ KEY_BRIGHTNESSUP,
+ /* 3 */ -1, /* vga/lcd switch event is not occur on
+ hotkey driver. */
+ /* 4 */ KEY_MUTE,
+ /* 5 */ KEY_VOLUMEDOWN,
+ /* 6 */ KEY_VOLUMEUP,
+ /* 7 */ KEY_SLEEP,
+ /* 8 */ -1, /* Change CPU boost: do nothing */
+ /* 9 */ KEY_BATT,
+ /* 10 */ KEY_SUSPEND,
+ };
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_generete_keyinput");
+
+ if (keyinput->key_mode == 0)
+ return_VOID;
+
+ hkey_num = hinf & 0xf;
+
+ if ((0 > hkey_num) ||
+ (hkey_num > ARRAY_SIZE(key_map))) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "hotkey number out of range: %d\n",
+ hkey_num));
+ return_VOID;
+ }
+
+ key_code = key_map[hkey_num];
+
+ if (key_code != -1) {
+ int pushed = (hinf & 0x80) ? TRUE : FALSE;
+
+ input_report_key(hotk_input_dev, key_code, pushed);
+ input_sync(hotk_input_dev);
+ }
+}
+
+static int acpi_pcc_hotkey_get_key(struct acpi_hotkey *hotkey)
+{
+ unsigned long result;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_get_key");
+
+ status = acpi_evaluate_integer(hotkey->handle, METHOD_HKEY_QUERY,
+ NULL, &result);
+ if (likely(ACPI_SUCCESS(status)))
+ hotkey->status = result;
+ else
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "error getting hotkey status\n"));
+
+ return_VALUE(status == AE_OK);
+}
+
+void acpi_pcc_hotkey_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct acpi_hotkey *hotkey = (struct acpi_hotkey *) data;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_notify");
+
+ switch (event) {
+ case HKEY_NOTIFY:
+ if (acpi_pcc_hotkey_get_key(hotkey)) {
+ /* generate event like '"pcc HKEY 00000080 00000084"'
+ * when Fn+F4 pressed */
+ acpi_bus_generate_proc_event(hotkey->device, event,
+ hotkey->status);
+ }
+ acpi_pcc_generete_keyinput(hotkey);
+ break;
+ default:
+ /* nothing to do */
+ break;
+ }
+ return_VOID;
+}
+
+/* *************************************************************************
+ FS Interface (/proc)
+ ************************************************************************* */
+/* oepn proc file fs*/
+SEQ_OPEN_FS(acpi_pcc_dc_brightness_open_fs, acpi_pcc_dc_brightness_show);
+SEQ_OPEN_FS(acpi_pcc_numbatteries_open_fs, acpi_pcc_numbatteries_show);
+SEQ_OPEN_FS(acpi_pcc_lcdtype_open_fs, acpi_pcc_lcdtype_show);
+SEQ_OPEN_FS(acpi_pcc_ac_brightness_max_open_fs,
+ acpi_pcc_ac_brightness_max_show);
+SEQ_OPEN_FS(acpi_pcc_ac_brightness_min_open_fs,
+ acpi_pcc_ac_brightness_min_show);
+SEQ_OPEN_FS(acpi_pcc_ac_brightness_open_fs, acpi_pcc_ac_brightness_show);
+SEQ_OPEN_FS(acpi_pcc_dc_brightness_max_open_fs,
+ acpi_pcc_dc_brightness_max_show);
+SEQ_OPEN_FS(acpi_pcc_dc_brightness_min_open_fs,
+ acpi_pcc_dc_brightness_min_show);
+SEQ_OPEN_FS(acpi_pcc_brightness_open_fs, acpi_pcc_brightness_show);
+SEQ_OPEN_FS(acpi_pcc_mute_open_fs, acpi_pcc_mute_show);
+SEQ_OPEN_FS(acpi_pcc_version_open_fs, acpi_pcc_version_show);
+SEQ_OPEN_FS(acpi_pcc_keyinput_open_fs, acpi_pcc_keyinput_show);
+SEQ_OPEN_FS(acpi_pcc_sticky_key_open_fs, acpi_pcc_sticky_key_show);
+
+static struct file_operations acpi_pcc_numbatteries_fops =
+ SEQ_FILEOPS_R(acpi_pcc_numbatteries_open_fs);
+static struct file_operations acpi_pcc_lcdtype_fops =
+ SEQ_FILEOPS_R(acpi_pcc_lcdtype_open_fs);
+static struct file_operations acpi_pcc_mute_fops =
+ SEQ_FILEOPS_RW(acpi_pcc_mute_open_fs, acpi_pcc_write_mute);
+static struct file_operations acpi_pcc_ac_brightness_fops =
+ SEQ_FILEOPS_RW(acpi_pcc_ac_brightness_open_fs,
+ acpi_pcc_write_ac_brightness);
+static struct file_operations acpi_pcc_ac_brightness_max_fops =
+ SEQ_FILEOPS_R(acpi_pcc_ac_brightness_max_open_fs);
+static struct file_operations acpi_pcc_ac_brightness_min_fops =
+ SEQ_FILEOPS_R(acpi_pcc_ac_brightness_min_open_fs);
+static struct file_operations acpi_pcc_dc_brightness_fops =
+ SEQ_FILEOPS_RW(acpi_pcc_dc_brightness_open_fs,
+ acpi_pcc_write_dc_brightness);
+static struct file_operations acpi_pcc_dc_brightness_max_fops =
+ SEQ_FILEOPS_R(acpi_pcc_dc_brightness_max_open_fs);
+static struct file_operations acpi_pcc_dc_brightness_min_fops =
+ SEQ_FILEOPS_R(acpi_pcc_dc_brightness_min_open_fs);
+static struct file_operations acpi_pcc_brightness_fops =
+ SEQ_FILEOPS_RW(acpi_pcc_brightness_open_fs,
+ acpi_pcc_write_no_brightness);
+static struct file_operations acpi_pcc_sticky_key_fops =
+ SEQ_FILEOPS_RW(acpi_pcc_sticky_key_open_fs, acpi_pcc_write_sticky_key);
+static struct file_operations acpi_pcc_keyinput_fops =
+ SEQ_FILEOPS_RW(acpi_pcc_keyinput_open_fs, acpi_pcc_write_keyinput);
+static struct file_operations acpi_pcc_version_fops =
+ SEQ_FILEOPS_R(acpi_pcc_version_open_fs);
+
+struct proc_item {
+ const char *name;
+ struct file_operations *fops;
+ mode_t flag;
+};
+
+/* Note: These functions map *exactly* to the SINF/SSET functions */
+struct proc_item acpi_pcc_proc_items_sifr[] = {
+ { "num_batteries", &acpi_pcc_numbatteries_fops, S_IRUGO },
+ { "lcd_type", &acpi_pcc_lcdtype_fops, S_IRUGO },
+ { "ac_brightness_max", &acpi_pcc_ac_brightness_max_fops, S_IRUGO },
+ { "ac_brightness_min", &acpi_pcc_ac_brightness_min_fops, S_IRUGO },
+ { "ac_brightness", &acpi_pcc_ac_brightness_fops,
+ S_IFREG | S_IRUGO | S_IWUSR },
+ { "dc_brightness_max", &acpi_pcc_dc_brightness_max_fops, S_IRUGO },
+ { "dc_brightness_min", &acpi_pcc_dc_brightness_min_fops, S_IRUGO },
+ { "dc_brightness", &acpi_pcc_dc_brightness_fops,
+ S_IFREG | S_IRUGO | S_IWUSR },
+ { "brightness", &acpi_pcc_brightness_fops, S_IFREG | S_IRUGO | S_IWUSR},
+ { "mute", &acpi_pcc_mute_fops, S_IFREG | S_IRUGO | S_IWUSR },
+ { NULL, NULL, 0 },
+};
+
+struct proc_item acpi_pcc_proc_items[] = {
+ { "sticky_key", &acpi_pcc_sticky_key_fops, S_IFREG | S_IRUGO | S_IWUSR},
+ { "keyinput", &acpi_pcc_keyinput_fops, S_IFREG | S_IRUGO | S_IWUSR },
+ { "version", &acpi_pcc_version_fops, S_IRUGO },
+ { NULL, NULL, 0 },
+};
+
+static int __devinit acpi_pcc_add_device(struct acpi_device *device,
+ struct proc_item *proc_items,
+ int num)
+{
+ struct acpi_hotkey *hotkey = acpi_driver_data(device);
+ struct proc_dir_entry *proc;
+ struct proc_item *item;
+ int i;
+
+ for (item = proc_items, i = 0; item->name && i < num; ++item, ++i) {
+ proc = create_proc_entry(item->name, item->flag,
+ hotkey->proc_dir_entry);
+ if (likely(proc)) {
+ proc->proc_fops = item->fops;
+ proc->data = hotkey;
+ proc->owner = THIS_MODULE;
+ } else {
+ while (i-- > 0) {
+ item--;
+ remove_proc_entry(item->name,
+ hotkey->proc_dir_entry);
+ }
+ return_VALUE(-ENODEV);
+ }
+ }
+ return_VALUE(0);
+}
+
+static int __devinit acpi_pcc_proc_init(struct acpi_device *device)
+{
+ struct proc_dir_entry *acpi_pcc_dir;
+ struct acpi_hotkey *hotkey = acpi_driver_data(device);
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_proc_init");
+
+ acpi_pcc_dir = proc_mkdir(PROC_PCC, acpi_root_dir);
+
+ if (unlikely(!acpi_pcc_dir)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Couldn't create dir in /proc\n"));
+ return_VALUE(-ENODEV);
+ }
+
+ acpi_pcc_dir->owner = THIS_MODULE;
+ hotkey->proc_dir_entry = acpi_pcc_dir;
+
+ status = acpi_pcc_add_device(device, acpi_pcc_proc_items_sifr,
+ hotkey->num_sifr);
+ status |= acpi_pcc_add_device(device, acpi_pcc_proc_items,
+ ARRAY_SIZE(acpi_pcc_proc_items));
+ if (unlikely(status)) {
+ remove_proc_entry(PROC_PCC, acpi_root_dir);
+ hotkey->proc_dir_entry = NULL;
+ return_VALUE(-ENODEV);
+ }
+
+ return_VALUE(status);
+}
+
+static void __devexit acpi_pcc_remove_device(struct acpi_device *device,
+ struct proc_item *proc_items,
+ int num)
+{
+ struct acpi_hotkey *hotkey = acpi_driver_data(device);
+ struct proc_item *item;
+ int i;
+
+ for (item = proc_items, i = 0;
+ item->name != NULL && i < num;
+ ++item, ++i) {
+ remove_proc_entry(item->name, hotkey->proc_dir_entry);
+ }
+
+ return_VOID;
+}
+
+/* *************************************************************************
+ Power Management
+ ************************************************************************* */
+#ifdef CONFIG_PM
+static int acpi_pcc_hotkey_resume(struct acpi_device *device)
+{
+ struct acpi_hotkey *hotkey = acpi_driver_data(device);
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_resume");
+
+ if (device == NULL || hotkey == NULL)
+ return_VALUE(-EINVAL);
+
+ if (hotkey->num_sifr != 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Sticky mode restore: %d\n",
+ hotkey->sticky_mode));
+
+ status = acpi_pcc_write_sset(hotkey, SINF_STICKY_KEY,
+ hotkey->sticky_mode);
+ }
+ if (status != AE_OK)
+ return_VALUE(-EINVAL);
+
+ return_VALUE(0);
+}
+#endif
+
+/* *************************************************************************
+ Module init/remove
+ ************************************************************************* */
+/* -------------------------------------------------------------------------
+ input
+ ------------------------------------------------------------------------- */
+static int __devinit acpi_pcc_init_input(struct acpi_hotkey *hotkey)
+{
+ struct input_dev *hotk_input_dev;
+ struct pcc_keyinput *pcc_keyinput;
+ int error;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_init_input");
+
+ hotk_input_dev = input_allocate_device();
+ if (hotk_input_dev == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Couldn't allocate input device for hotkey"));
+ goto err_input;
+ }
+
+ pcc_keyinput = kcalloc(1, sizeof(struct pcc_keyinput), GFP_KERNEL);
+
+ if (pcc_keyinput == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Couldn't allocate mem for private data"));
+ goto err_pcc;
+ }
+
+ hotk_input_dev->evbit[0] = BIT(EV_KEY);
+
+ set_bit(KEY_BRIGHTNESSDOWN, hotk_input_dev->keybit);
+ set_bit(KEY_BRIGHTNESSUP, hotk_input_dev->keybit);
+ set_bit(KEY_MUTE, hotk_input_dev->keybit);
+ set_bit(KEY_VOLUMEDOWN, hotk_input_dev->keybit);
+ set_bit(KEY_VOLUMEUP, hotk_input_dev->keybit);
+ set_bit(KEY_SLEEP, hotk_input_dev->keybit);
+ set_bit(KEY_BATT, hotk_input_dev->keybit);
+ set_bit(KEY_SUSPEND, hotk_input_dev->keybit);
+
+ hotk_input_dev->name = ACPI_PCC_DRIVER_NAME;
+ hotk_input_dev->phys = ACPI_PCC_INPUT_PHYS;
+ hotk_input_dev->id.bustype = BUS_PCC_HOTKEY;
+ hotk_input_dev->id.vendor = 0x0001;
+ hotk_input_dev->id.product = 0x0001;
+ hotk_input_dev->id.version = 0x0100;
+
+ pcc_keyinput->key_mode = PCC_KEYINPUT_MODE;
+ pcc_keyinput->hotkey = hotkey;
+
+ input_set_drvdata(hotk_input_dev, pcc_keyinput);
+
+ hotkey->input_dev = hotk_input_dev;
+
+ error = input_register_device(hotk_input_dev);
+
+ if (error)
+ goto err_pcc;
+
+ return_VALUE(0);
+
+ err_pcc:
+ input_unregister_device(hotk_input_dev);
+ err_input:
+ return_VALUE(-ENOMEM);
+}
+
+static void __devexit acpi_pcc_remove_input(struct acpi_hotkey *hotkey)
+{
+ struct input_dev *hotk_input_dev;
+ struct pcc_keyinput *pcc_keyinput;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_remove_input");
+
+ if (hotkey == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Can't free memory"));
+ return_VOID;
+ }
+
+ hotk_input_dev = hotkey->input_dev;
+ pcc_keyinput = input_get_drvdata(hotk_input_dev);
+
+ input_unregister_device(hotk_input_dev);
+
+ kfree(pcc_keyinput);
+}
+
+/* -------------------------------------------------------------------------
+ ACPI
+ ------------------------------------------------------------------------- */
+static int __devinit acpi_pcc_hotkey_add(struct acpi_device *device)
+{
+ acpi_status status = AE_OK;
+ struct acpi_hotkey *hotkey = NULL;
+ int sifr_status, num_sifr, result;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_add");
+
+ if (device == NULL)
+ return_VALUE(-EINVAL);
+
+ sifr_status = acpi_pcc_get_sqty(device);
+
+ if (sifr_status > 255) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr too large"));
+ return_VALUE(-ENODEV);
+ }
+
+ if (sifr_status < 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "not support SQTY"));
+ num_sifr = 0;
+ } else {
+ num_sifr = sifr_status;
+ }
+
+ hotkey = kcalloc(1, sizeof(struct acpi_hotkey), GFP_KERNEL);
+ if (hotkey == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Couldn't allocate mem for hotkey"));
+ return_VALUE(-ENOMEM);
+ }
+
+ hotkey->device = device;
+ hotkey->handle = device->handle;
+ hotkey->num_sifr = num_sifr;
+ acpi_driver_data(device) = hotkey;
+ strcpy(acpi_device_name(device), ACPI_PCC_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_PCC_CLASS);
+
+ status = acpi_install_notify_handler(hotkey->handle,
+ ACPI_DEVICE_NOTIFY,
+ acpi_pcc_hotkey_notify,
+ hotkey);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Error installing notify handler\n"));
+ kfree(hotkey);
+ return_VALUE(-ENODEV);
+ }
+
+ result = acpi_pcc_init_input(hotkey);
+ if (result != 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Error installing keyinput handler\n"));
+ kfree(hotkey);
+ return_VALUE(result);
+ }
+
+ return_VALUE(acpi_pcc_proc_init(device));
+}
+
+static int __devexit acpi_pcc_hotkey_remove(struct acpi_device *device,
+ int type)
+{
+ acpi_status status = AE_OK;
+ struct acpi_hotkey *hotkey = acpi_driver_data(device);
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_remove");
+
+ if (!device || !hotkey)
+ return_VALUE(-EINVAL);
+
+ if (hotkey->proc_dir_entry) {
+ acpi_pcc_remove_device(device, acpi_pcc_proc_items_sifr,
+ hotkey->num_sifr);
+ acpi_pcc_remove_device(device, acpi_pcc_proc_items,
+ ARRAY_SIZE(acpi_pcc_proc_items));
+ remove_proc_entry(PROC_PCC, acpi_root_dir);
+ }
+
+ status = acpi_remove_notify_handler(hotkey->handle,
+ ACPI_DEVICE_NOTIFY, acpi_pcc_hotkey_notify);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Error removing notify handler\n"));
+ }
+
+ acpi_pcc_remove_input(hotkey);
+ kfree(hotkey);
+ return_VALUE(status == AE_OK);
+}
+
+/* *********************************************************************
+ Module entry point
+ ********************************************************************* */
+static int __init acpi_pcc_init(void)
+{
+ int result;
+
+ ACPI_FUNCTION_TRACE("acpi_pcc_init");
+
+ printk(KERN_INFO LOGPREFIX "loading...\n");
+
+ if (acpi_disabled) {
+ printk(KERN_INFO LOGPREFIX "ACPI disabled.\n");
+ return_VALUE(-ENODEV);
+ }
+
+ result = acpi_bus_register_driver(&acpi_pcc_driver);
+ if (result < 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Error registering hotkey driver\n"));
+ return_VALUE(-ENODEV);
+ }
+
+ return_VALUE(result);
+}
+
+static void __exit acpi_pcc_exit(void)
+{
+ ACPI_FUNCTION_TRACE("acpi_pcc_exit");
+
+ printk(KERN_INFO LOGPREFIX "unloading...\n");
+
+ acpi_bus_unregister_driver(&acpi_pcc_driver);
+
+ return_VOID;
+}
+
+module_init(acpi_pcc_init);
+module_exit(acpi_pcc_exit);
diff --git a/drivers/staging/poch/Kconfig b/drivers/staging/poch/Kconfig
new file mode 100644
index 00000000000..b3b33b984a5
--- /dev/null
+++ b/drivers/staging/poch/Kconfig
@@ -0,0 +1,6 @@
+config POCH
+ tristate "Redrapids Pocket Change CardBus support"
+ depends on PCI && UIO
+ default N
+ ---help---
+ Enable support for Redrapids Pocket Change CardBus devices.
diff --git a/drivers/staging/poch/Makefile b/drivers/staging/poch/Makefile
new file mode 100644
index 00000000000..d2b96805cb9
--- /dev/null
+++ b/drivers/staging/poch/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_POCH) += poch.o
diff --git a/drivers/staging/poch/README b/drivers/staging/poch/README
new file mode 100644
index 00000000000..f65e979743b
--- /dev/null
+++ b/drivers/staging/poch/README
@@ -0,0 +1,7 @@
+TODO:
+ - fix transmit overflows
+ - audit userspace interfaces
+ - get reserved major/minor if needed
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
+Vijay Kumar <vijaykumar@bravegnu.org> and Jaya Kumar <jayakumar.lkml@gmail.com>
diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
new file mode 100644
index 00000000000..0e113f9a158
--- /dev/null
+++ b/drivers/staging/poch/poch.c
@@ -0,0 +1,1425 @@
+/*
+ * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
+ *
+ * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
+ *
+ * Licensed under GPL version 2 only.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/uio_driver.h>
+#include <linux/spinlock.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/poll.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/io.h>
+
+#include "poch.h"
+
+#include <asm/cacheflush.h>
+
+#ifndef PCI_VENDOR_ID_RRAPIDS
+#define PCI_VENDOR_ID_RRAPIDS 0x17D2
+#endif
+
+#ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
+#define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351
+#endif
+
+#define POCH_NCHANNELS 2
+
+#define MAX_POCH_CARDS 8
+#define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS)
+
+#define DRV_NAME "poch"
+#define PFX DRV_NAME ": "
+
+/*
+ * BAR0 Bridge Register Definitions
+ */
+
+#define BRIDGE_REV_REG 0x0
+#define BRIDGE_INT_MASK_REG 0x4
+#define BRIDGE_INT_STAT_REG 0x8
+
+#define BRIDGE_INT_ACTIVE (0x1 << 31)
+#define BRIDGE_INT_FPGA (0x1 << 2)
+#define BRIDGE_INT_TEMP_FAIL (0x1 << 1)
+#define BRIDGE_INT_TEMP_WARN (0x1 << 0)
+
+#define BRIDGE_FPGA_RESET_REG 0xC
+
+#define BRIDGE_CARD_POWER_REG 0x10
+#define BRIDGE_CARD_POWER_EN (0x1 << 0)
+#define BRIDGE_CARD_POWER_PROG_DONE (0x1 << 31)
+
+#define BRIDGE_JTAG_REG 0x14
+#define BRIDGE_DMA_GO_REG 0x18
+#define BRIDGE_STAT_0_REG 0x1C
+#define BRIDGE_STAT_1_REG 0x20
+#define BRIDGE_STAT_2_REG 0x24
+#define BRIDGE_STAT_3_REG 0x28
+#define BRIDGE_TEMP_STAT_REG 0x2C
+#define BRIDGE_TEMP_THRESH_REG 0x30
+#define BRIDGE_EEPROM_REVSEL_REG 0x34
+#define BRIDGE_CIS_STRUCT_REG 0x100
+#define BRIDGE_BOARDREV_REG 0x124
+
+/*
+ * BAR1 FPGA Register Definitions
+ */
+
+#define FPGA_IFACE_REV_REG 0x0
+#define FPGA_RX_BLOCK_SIZE_REG 0x8
+#define FPGA_TX_BLOCK_SIZE_REG 0xC
+#define FPGA_RX_BLOCK_COUNT_REG 0x10
+#define FPGA_TX_BLOCK_COUNT_REG 0x14
+#define FPGA_RX_CURR_DMA_BLOCK_REG 0x18
+#define FPGA_TX_CURR_DMA_BLOCK_REG 0x1C
+#define FPGA_RX_GROUP_COUNT_REG 0x20
+#define FPGA_TX_GROUP_COUNT_REG 0x24
+#define FPGA_RX_CURR_GROUP_REG 0x28
+#define FPGA_TX_CURR_GROUP_REG 0x2C
+#define FPGA_RX_CURR_PCI_REG 0x38
+#define FPGA_TX_CURR_PCI_REG 0x3C
+#define FPGA_RX_GROUP0_START_REG 0x40
+#define FPGA_TX_GROUP0_START_REG 0xC0
+#define FPGA_DMA_DESC_1_REG 0x140
+#define FPGA_DMA_DESC_2_REG 0x144
+#define FPGA_DMA_DESC_3_REG 0x148
+#define FPGA_DMA_DESC_4_REG 0x14C
+
+#define FPGA_DMA_INT_STAT_REG 0x150
+#define FPGA_DMA_INT_MASK_REG 0x154
+#define FPGA_DMA_INT_RX (1 << 0)
+#define FPGA_DMA_INT_TX (1 << 1)
+
+#define FPGA_RX_GROUPS_PER_INT_REG 0x158
+#define FPGA_TX_GROUPS_PER_INT_REG 0x15C
+#define FPGA_DMA_ADR_PAGE_REG 0x160
+#define FPGA_FPGA_REV_REG 0x200
+
+#define FPGA_ADC_CLOCK_CTL_REG 0x204
+#define FPGA_ADC_CLOCK_CTL_OSC_EN (0x1 << 3)
+#define FPGA_ADC_CLOCK_LOCAL_CLK (0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN)
+#define FPGA_ADC_CLOCK_EXT_SAMP_CLK 0X0
+
+#define FPGA_ADC_DAC_EN_REG 0x208
+#define FPGA_ADC_DAC_EN_DAC_OFF (0x1 << 1)
+#define FPGA_ADC_DAC_EN_ADC_OFF (0x1 << 0)
+
+#define FPGA_INT_STAT_REG 0x20C
+#define FPGA_INT_MASK_REG 0x210
+#define FPGA_INT_PLL_UNLOCKED (0x1 << 9)
+#define FPGA_INT_DMA_CORE (0x1 << 8)
+#define FPGA_INT_TX_FF_EMPTY (0x1 << 7)
+#define FPGA_INT_RX_FF_EMPTY (0x1 << 6)
+#define FPGA_INT_TX_FF_OVRFLW (0x1 << 3)
+#define FPGA_INT_RX_FF_OVRFLW (0x1 << 2)
+#define FPGA_INT_TX_ACQ_DONE (0x1 << 1)
+#define FPGA_INT_RX_ACQ_DONE (0x1)
+
+#define FPGA_RX_ADC_CTL_REG 0x214
+#define FPGA_RX_ADC_CTL_CONT_CAP (0x0)
+#define FPGA_RX_ADC_CTL_SNAP_CAP (0x1)
+
+#define FPGA_RX_ARM_REG 0x21C
+
+#define FPGA_DOM_REG 0x224
+#define FPGA_DOM_DCM_RESET (0x1 << 5)
+#define FPGA_DOM_SOFT_RESET (0x1 << 4)
+#define FPGA_DOM_DUAL_M_SG_DMA (0x0)
+#define FPGA_DOM_TARGET_ACCESS (0x1)
+
+#define FPGA_TX_CTL_REG 0x228
+#define FPGA_TX_CTL_FIFO_FLUSH (0x1 << 9)
+#define FPGA_TX_CTL_OUTPUT_ZERO (0x0 << 2)
+#define FPGA_TX_CTL_OUTPUT_CARDBUS (0x1 << 2)
+#define FPGA_TX_CTL_OUTPUT_ADC (0x2 << 2)
+#define FPGA_TX_CTL_OUTPUT_SNAPSHOT (0x3 << 2)
+#define FPGA_TX_CTL_LOOPBACK (0x1 << 0)
+
+#define FPGA_ENDIAN_MODE_REG 0x22C
+#define FPGA_RX_FIFO_COUNT_REG 0x28C
+#define FPGA_TX_ENABLE_REG 0x298
+#define FPGA_TX_TRIGGER_REG 0x29C
+#define FPGA_TX_DATAMEM_COUNT_REG 0x2A8
+#define FPGA_CAP_FIFO_REG 0x300
+#define FPGA_TX_SNAPSHOT_REG 0x8000
+
+/*
+ * Channel Index Definitions
+ */
+
+enum {
+ CHNO_RX_CHANNEL,
+ CHNO_TX_CHANNEL,
+};
+
+struct poch_dev;
+
+enum channel_dir {
+ CHANNEL_DIR_RX,
+ CHANNEL_DIR_TX,
+};
+
+struct poch_group_info {
+ struct page *pg;
+ dma_addr_t dma_addr;
+ unsigned long user_offset;
+};
+
+struct channel_info {
+ unsigned int chno;
+
+ atomic_t sys_block_size;
+ atomic_t sys_group_size;
+ atomic_t sys_group_count;
+
+ enum channel_dir dir;
+
+ unsigned long block_size;
+ unsigned long group_size;
+ unsigned long group_count;
+
+ /* Contains the DMA address and VM offset of each group. */
+ struct poch_group_info *groups;
+
+ /* Contains the header and circular buffer exported to userspace. */
+ spinlock_t group_offsets_lock;
+ struct poch_cbuf_header *header;
+ struct page *header_pg;
+ unsigned long header_size;
+
+ /* Last group indicated as 'complete' to user space. */
+ unsigned int transfer;
+
+ wait_queue_head_t wq;
+
+ union {
+ unsigned int data_available;
+ unsigned int space_available;
+ };
+
+ void __iomem *bridge_iomem;
+ void __iomem *fpga_iomem;
+ spinlock_t *iomem_lock;
+
+ atomic_t free;
+ atomic_t inited;
+
+ /* Error counters */
+ struct poch_counters counters;
+ spinlock_t counters_lock;
+
+ struct device *dev;
+};
+
+struct poch_dev {
+ struct uio_info uio;
+ struct pci_dev *pci_dev;
+ unsigned int nchannels;
+ struct channel_info channels[POCH_NCHANNELS];
+ struct cdev cdev;
+
+ /* Counts the no. of channels that have been opened. On first
+ * open, the card is powered on. On last channel close, the
+ * card is powered off.
+ */
+ atomic_t usage;
+
+ void __iomem *bridge_iomem;
+ void __iomem *fpga_iomem;
+ spinlock_t iomem_lock;
+
+ struct device *dev;
+};
+
+static dev_t poch_first_dev;
+static struct class *poch_cls;
+static DEFINE_IDR(poch_ids);
+
+static ssize_t store_block_size(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct channel_info *channel = dev_get_drvdata(dev);
+ unsigned long block_size;
+
+ sscanf(buf, "%lu", &block_size);
+ atomic_set(&channel->sys_block_size, block_size);
+
+ return count;
+}
+static DEVICE_ATTR(block_size, S_IWUSR|S_IWGRP, NULL, store_block_size);
+
+static ssize_t store_group_size(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct channel_info *channel = dev_get_drvdata(dev);
+ unsigned long group_size;
+
+ sscanf(buf, "%lu", &group_size);
+ atomic_set(&channel->sys_group_size, group_size);
+
+ return count;
+}
+static DEVICE_ATTR(group_size, S_IWUSR|S_IWGRP, NULL, store_group_size);
+
+static ssize_t store_group_count(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct channel_info *channel = dev_get_drvdata(dev);
+ unsigned long group_count;
+
+ sscanf(buf, "%lu", &group_count);
+ atomic_set(&channel->sys_group_count, group_count);
+
+ return count;
+}
+static DEVICE_ATTR(group_count, S_IWUSR|S_IWGRP, NULL, store_group_count);
+
+static ssize_t show_direction(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_info *channel = dev_get_drvdata(dev);
+ int len;
+
+ len = sprintf(buf, "%s\n", (channel->dir ? "tx" : "rx"));
+ return len;
+}
+static DEVICE_ATTR(dir, S_IRUSR|S_IRGRP, show_direction, NULL);
+
+static ssize_t show_mmap_size(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct channel_info *channel = dev_get_drvdata(dev);
+ int len;
+ unsigned long mmap_size;
+ unsigned long group_pages;
+ unsigned long header_pages;
+ unsigned long total_group_pages;
+
+ /* FIXME: We do not have to add 1, if group_size a multiple of
+ PAGE_SIZE. */
+ group_pages = (channel->group_size / PAGE_SIZE) + 1;
+ header_pages = (channel->header_size / PAGE_SIZE) + 1;
+ total_group_pages = group_pages * channel->group_count;
+
+ mmap_size = (header_pages + total_group_pages) * PAGE_SIZE;
+ len = sprintf(buf, "%lu\n", mmap_size);
+ return len;
+}
+static DEVICE_ATTR(mmap_size, S_IRUSR|S_IRGRP, show_mmap_size, NULL);
+
+static struct device_attribute *poch_class_attrs[] = {
+ &dev_attr_block_size,
+ &dev_attr_group_size,
+ &dev_attr_group_count,
+ &dev_attr_dir,
+ &dev_attr_mmap_size,
+};
+
+static void poch_channel_free_groups(struct channel_info *channel)
+{
+ unsigned long i;
+
+ for (i = 0; i < channel->group_count; i++) {
+ struct poch_group_info *group;
+ unsigned int order;
+
+ group = &channel->groups[i];
+ order = get_order(channel->group_size);
+ if (group->pg)
+ __free_pages(group->pg, order);
+ }
+}
+
+static int poch_channel_alloc_groups(struct channel_info *channel)
+{
+ unsigned long i;
+ unsigned long group_pages;
+ unsigned long header_pages;
+
+ group_pages = (channel->group_size / PAGE_SIZE) + 1;
+ header_pages = (channel->header_size / PAGE_SIZE) + 1;
+
+ for (i = 0; i < channel->group_count; i++) {
+ struct poch_group_info *group;
+ unsigned int order;
+ gfp_t gfp_mask;
+
+ group = &channel->groups[i];
+ order = get_order(channel->group_size);
+
+ /*
+ * __GFP_COMP is required here since we are going to
+ * perform non-linear mapping to userspace. For more
+ * information read the vm_insert_page() function
+ * comments.
+ */
+
+ gfp_mask = GFP_KERNEL | GFP_DMA32 | __GFP_ZERO;
+ group->pg = alloc_pages(gfp_mask, order);
+ if (!group->pg) {
+ poch_channel_free_groups(channel);
+ return -ENOMEM;
+ }
+
+ /* FIXME: This is the physical address not the bus
+ * address! This won't work in architectures that
+ * have an IOMMU. Can we use pci_map_single() for
+ * this?
+ */
+ group->dma_addr = page_to_pfn(group->pg) * PAGE_SIZE;
+ group->user_offset =
+ (header_pages + (i * group_pages)) * PAGE_SIZE;
+
+ printk(KERN_INFO PFX "%ld: user_offset: 0x%lx dma: 0x%x\n", i,
+ group->user_offset, group->dma_addr);
+ }
+
+ return 0;
+}
+
+static void channel_latch_attr(struct channel_info *channel)
+{
+ channel->group_count = atomic_read(&channel->sys_group_count);
+ channel->group_size = atomic_read(&channel->sys_group_size);
+ channel->block_size = atomic_read(&channel->sys_block_size);
+}
+
+/*
+ * Configure DMA group registers
+ */
+static void channel_dma_init(struct channel_info *channel)
+{
+ void __iomem *fpga = channel->fpga_iomem;
+ u32 group_regs_base;
+ u32 group_reg;
+ unsigned int page;
+ unsigned int group_in_page;
+ unsigned long i;
+ u32 block_size_reg;
+ u32 block_count_reg;
+ u32 group_count_reg;
+ u32 groups_per_int_reg;
+ u32 curr_pci_reg;
+
+ if (channel->chno == CHNO_RX_CHANNEL) {
+ group_regs_base = FPGA_RX_GROUP0_START_REG;
+ block_size_reg = FPGA_RX_BLOCK_SIZE_REG;
+ block_count_reg = FPGA_RX_BLOCK_COUNT_REG;
+ group_count_reg = FPGA_RX_GROUP_COUNT_REG;
+ groups_per_int_reg = FPGA_RX_GROUPS_PER_INT_REG;
+ curr_pci_reg = FPGA_RX_CURR_PCI_REG;
+ } else {
+ group_regs_base = FPGA_TX_GROUP0_START_REG;
+ block_size_reg = FPGA_TX_BLOCK_SIZE_REG;
+ block_count_reg = FPGA_TX_BLOCK_COUNT_REG;
+ group_count_reg = FPGA_TX_GROUP_COUNT_REG;
+ groups_per_int_reg = FPGA_TX_GROUPS_PER_INT_REG;
+ curr_pci_reg = FPGA_TX_CURR_PCI_REG;
+ }
+
+ printk(KERN_WARNING "block_size, group_size, group_count\n");
+ iowrite32(channel->block_size, fpga + block_size_reg);
+ iowrite32(channel->group_size / channel->block_size,
+ fpga + block_count_reg);
+ iowrite32(channel->group_count, fpga + group_count_reg);
+ /* FIXME: Hardcoded groups per int. Get it from sysfs? */
+ iowrite32(1, fpga + groups_per_int_reg);
+
+ /* Unlock PCI address? Not defined in the data sheet, but used
+ * in the reference code by Redrapids.
+ */
+ iowrite32(0x1, fpga + curr_pci_reg);
+
+ /* The DMA address page register is shared between the RX and
+ * TX channels, so acquire lock.
+ */
+ spin_lock(channel->iomem_lock);
+ for (i = 0; i < channel->group_count; i++) {
+ page = i / 32;
+ group_in_page = i % 32;
+
+ group_reg = group_regs_base + (group_in_page * 4);
+
+ iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
+ iowrite32(channel->groups[i].dma_addr, fpga + group_reg);
+ }
+ for (i = 0; i < channel->group_count; i++) {
+ page = i / 32;
+ group_in_page = i % 32;
+
+ group_reg = group_regs_base + (group_in_page * 4);
+
+ iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
+ printk(KERN_INFO PFX "%ld: read dma_addr: 0x%x\n", i,
+ ioread32(fpga + group_reg));
+ }
+ spin_unlock(channel->iomem_lock);
+
+}
+
+static int poch_channel_alloc_header(struct channel_info *channel)
+{
+ struct poch_cbuf_header *header = channel->header;
+ unsigned long group_offset_size;
+ unsigned long tot_group_offsets_size;
+
+ /* Allocate memory to hold header exported userspace */
+ group_offset_size = sizeof(header->group_offsets[0]);
+ tot_group_offsets_size = group_offset_size * channel->group_count;
+ channel->header_size = sizeof(*header) + tot_group_offsets_size;
+ channel->header_pg = alloc_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(channel->header_size));
+ if (!channel->header_pg)
+ return -ENOMEM;
+
+ channel->header = page_address(channel->header_pg);
+
+ return 0;
+}
+
+static void poch_channel_free_header(struct channel_info *channel)
+{
+ unsigned int order;
+
+ order = get_order(channel->header_size);
+ __free_pages(channel->header_pg, order);
+}
+
+static void poch_channel_init_header(struct channel_info *channel)
+{
+ int i;
+ struct poch_group_info *groups;
+ s32 *group_offsets;
+
+ channel->header->group_size_bytes = channel->group_size;
+ channel->header->group_count = channel->group_count;
+
+ spin_lock_init(&channel->group_offsets_lock);
+
+ group_offsets = channel->header->group_offsets;
+ groups = channel->groups;
+
+ for (i = 0; i < channel->group_count; i++) {
+ if (channel->dir == CHANNEL_DIR_RX)
+ group_offsets[i] = -1;
+ else
+ group_offsets[i] = groups[i].user_offset;
+ }
+}
+
+static void __poch_channel_clear_counters(struct channel_info *channel)
+{
+ channel->counters.pll_unlock = 0;
+ channel->counters.fifo_empty = 0;
+ channel->counters.fifo_overflow = 0;
+}
+
+static int poch_channel_init(struct channel_info *channel,
+ struct poch_dev *poch_dev)
+{
+ struct pci_dev *pdev = poch_dev->pci_dev;
+ struct device *dev = &pdev->dev;
+ unsigned long alloc_size;
+ int ret;
+
+ printk(KERN_WARNING "channel_latch_attr\n");
+
+ channel_latch_attr(channel);
+
+ channel->transfer = 0;
+
+ /* Allocate memory to hold group information. */
+ alloc_size = channel->group_count * sizeof(struct poch_group_info);
+ channel->groups = kzalloc(alloc_size, GFP_KERNEL);
+ if (!channel->groups) {
+ dev_err(dev, "error allocating memory for group info\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ printk(KERN_WARNING "poch_channel_alloc_groups\n");
+
+ ret = poch_channel_alloc_groups(channel);
+ if (ret) {
+ dev_err(dev, "error allocating groups of order %d\n",
+ get_order(channel->group_size));
+ goto out_free_group_info;
+ }
+
+ ret = poch_channel_alloc_header(channel);
+ if (ret) {
+ dev_err(dev, "error allocating user space header\n");
+ goto out_free_groups;
+ }
+
+ channel->fpga_iomem = poch_dev->fpga_iomem;
+ channel->bridge_iomem = poch_dev->bridge_iomem;
+ channel->iomem_lock = &poch_dev->iomem_lock;
+ spin_lock_init(&channel->counters_lock);
+
+ __poch_channel_clear_counters(channel);
+
+ printk(KERN_WARNING "poch_channel_init_header\n");
+
+ poch_channel_init_header(channel);
+
+ return 0;
+
+ out_free_groups:
+ poch_channel_free_groups(channel);
+ out_free_group_info:
+ kfree(channel->groups);
+ out:
+ return ret;
+}
+
+static int poch_wait_fpga_prog(void __iomem *bridge)
+{
+ unsigned long total_wait;
+ const unsigned long wait_period = 100;
+ /* FIXME: Get the actual timeout */
+ const unsigned long prog_timeo = 10000; /* 10 Seconds */
+ u32 card_power;
+
+ printk(KERN_WARNING "poch_wait_fpg_prog\n");
+
+ printk(KERN_INFO PFX "programming fpga ...\n");
+ total_wait = 0;
+ while (1) {
+ msleep(wait_period);
+ total_wait += wait_period;
+
+ card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
+ if (card_power & BRIDGE_CARD_POWER_PROG_DONE) {
+ printk(KERN_INFO PFX "programming done\n");
+ return 0;
+ }
+ if (total_wait > prog_timeo) {
+ printk(KERN_ERR PFX
+ "timed out while programming FPGA\n");
+ return -EIO;
+ }
+ }
+}
+
+static void poch_card_power_off(struct poch_dev *poch_dev)
+{
+ void __iomem *bridge = poch_dev->bridge_iomem;
+ u32 card_power;
+
+ iowrite32(0, bridge + BRIDGE_INT_MASK_REG);
+ iowrite32(0, bridge + BRIDGE_DMA_GO_REG);
+
+ card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
+ iowrite32(card_power & ~BRIDGE_CARD_POWER_EN,
+ bridge + BRIDGE_CARD_POWER_REG);
+}
+
+enum clk_src {
+ CLK_SRC_ON_BOARD,
+ CLK_SRC_EXTERNAL
+};
+
+static void poch_card_clock_on(void __iomem *fpga)
+{
+ /* FIXME: Get this data through sysfs? */
+ enum clk_src clk_src = CLK_SRC_ON_BOARD;
+
+ if (clk_src == CLK_SRC_ON_BOARD) {
+ iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK | FPGA_ADC_CLOCK_CTL_OSC_EN,
+ fpga + FPGA_ADC_CLOCK_CTL_REG);
+ } else if (clk_src == CLK_SRC_EXTERNAL) {
+ iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK,
+ fpga + FPGA_ADC_CLOCK_CTL_REG);
+ }
+}
+
+static int poch_card_power_on(struct poch_dev *poch_dev)
+{
+ void __iomem *bridge = poch_dev->bridge_iomem;
+ void __iomem *fpga = poch_dev->fpga_iomem;
+
+ iowrite32(BRIDGE_CARD_POWER_EN, bridge + BRIDGE_CARD_POWER_REG);
+
+ if (poch_wait_fpga_prog(bridge) != 0) {
+ poch_card_power_off(poch_dev);
+ return -EIO;
+ }
+
+ poch_card_clock_on(fpga);
+
+ /* Sync to new clock, reset state machines, set DMA mode. */
+ iowrite32(FPGA_DOM_DCM_RESET | FPGA_DOM_SOFT_RESET
+ | FPGA_DOM_DUAL_M_SG_DMA, fpga + FPGA_DOM_REG);
+
+ /* FIXME: The time required for sync. needs to be tuned. */
+ msleep(1000);
+
+ return 0;
+}
+
+static void poch_channel_analog_on(struct channel_info *channel)
+{
+ void __iomem *fpga = channel->fpga_iomem;
+ u32 adc_dac_en;
+
+ spin_lock(channel->iomem_lock);
+ adc_dac_en = ioread32(fpga + FPGA_ADC_DAC_EN_REG);
+ switch (channel->chno) {
+ case CHNO_RX_CHANNEL:
+ iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_ADC_OFF,
+ fpga + FPGA_ADC_DAC_EN_REG);
+ break;
+ case CHNO_TX_CHANNEL:
+ iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_DAC_OFF,
+ fpga + FPGA_ADC_DAC_EN_REG);
+ break;
+ }
+ spin_unlock(channel->iomem_lock);
+}
+
+static int poch_open(struct inode *inode, struct file *filp)
+{
+ struct poch_dev *poch_dev;
+ struct channel_info *channel;
+ void __iomem *bridge;
+ void __iomem *fpga;
+ int chno;
+ int usage;
+ int ret;
+
+ poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
+ bridge = poch_dev->bridge_iomem;
+ fpga = poch_dev->fpga_iomem;
+
+ chno = iminor(inode) % poch_dev->nchannels;
+ channel = &poch_dev->channels[chno];
+
+ if (!atomic_dec_and_test(&channel->free)) {
+ atomic_inc(&channel->free);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ usage = atomic_inc_return(&poch_dev->usage);
+
+ printk(KERN_WARNING "poch_card_power_on\n");
+
+ if (usage == 1) {
+ ret = poch_card_power_on(poch_dev);
+ if (ret)
+ goto out_dec_usage;
+ }
+
+ printk(KERN_INFO "CardBus Bridge Revision: %x\n",
+ ioread32(bridge + BRIDGE_REV_REG));
+ printk(KERN_INFO "CardBus Interface Revision: %x\n",
+ ioread32(fpga + FPGA_IFACE_REV_REG));
+
+ channel->chno = chno;
+ filp->private_data = channel;
+
+ printk(KERN_WARNING "poch_channel_init\n");
+
+ ret = poch_channel_init(channel, poch_dev);
+ if (ret)
+ goto out_power_off;
+
+ poch_channel_analog_on(channel);
+
+ printk(KERN_WARNING "channel_dma_init\n");
+
+ channel_dma_init(channel);
+
+ printk(KERN_WARNING "poch_channel_analog_on\n");
+
+ if (usage == 1) {
+ printk(KERN_WARNING "setting up DMA\n");
+
+ /* Initialize DMA Controller. */
+ iowrite32(FPGA_CAP_FIFO_REG, bridge + BRIDGE_STAT_2_REG);
+ iowrite32(FPGA_DMA_DESC_1_REG, bridge + BRIDGE_STAT_3_REG);
+
+ ioread32(fpga + FPGA_DMA_INT_STAT_REG);
+ ioread32(fpga + FPGA_INT_STAT_REG);
+ ioread32(bridge + BRIDGE_INT_STAT_REG);
+
+ /* Initialize Interrupts. FIXME: Enable temperature
+ * handling We are enabling both Tx and Rx channel
+ * interrupts here. Do we need to enable interrupts
+ * only for the current channel? Anyways we won't get
+ * the interrupt unless the DMA is activated.
+ */
+ iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
+ iowrite32(FPGA_INT_DMA_CORE
+ | FPGA_INT_PLL_UNLOCKED
+ | FPGA_INT_TX_FF_EMPTY
+ | FPGA_INT_RX_FF_EMPTY
+ | FPGA_INT_TX_FF_OVRFLW
+ | FPGA_INT_RX_FF_OVRFLW,
+ fpga + FPGA_INT_MASK_REG);
+ iowrite32(FPGA_DMA_INT_RX | FPGA_DMA_INT_TX,
+ fpga + FPGA_DMA_INT_MASK_REG);
+ }
+
+ if (channel->dir == CHANNEL_DIR_TX) {
+ /* Flush TX FIFO and output data from cardbus. */
+ iowrite32(FPGA_TX_CTL_FIFO_FLUSH
+ | FPGA_TX_CTL_OUTPUT_CARDBUS,
+ fpga + FPGA_TX_CTL_REG);
+ }
+
+ atomic_inc(&channel->inited);
+
+ return 0;
+
+ out_power_off:
+ if (usage == 1)
+ poch_card_power_off(poch_dev);
+ out_dec_usage:
+ atomic_dec(&poch_dev->usage);
+ atomic_inc(&channel->free);
+ out:
+ return ret;
+}
+
+static int poch_release(struct inode *inode, struct file *filp)
+{
+ struct channel_info *channel = filp->private_data;
+ struct poch_dev *poch_dev;
+ int usage;
+
+ poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
+
+ usage = atomic_dec_return(&poch_dev->usage);
+ if (usage == 0) {
+ printk(KERN_WARNING "poch_card_power_off\n");
+ poch_card_power_off(poch_dev);
+ }
+
+ atomic_dec(&channel->inited);
+ poch_channel_free_header(channel);
+ poch_channel_free_groups(channel);
+ kfree(channel->groups);
+ atomic_inc(&channel->free);
+
+ return 0;
+}
+
+/*
+ * Map the header and the group buffers, to user space.
+ */
+static int poch_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct channel_info *channel = filp->private_data;
+
+ unsigned long start;
+ unsigned long size;
+
+ unsigned long group_pages;
+ unsigned long header_pages;
+ unsigned long total_group_pages;
+
+ int pg_num;
+ struct page *pg;
+
+ int i;
+ int ret;
+
+ printk(KERN_WARNING "poch_mmap\n");
+
+ if (vma->vm_pgoff) {
+ printk(KERN_WARNING PFX "page offset: %lu\n", vma->vm_pgoff);
+ return -EINVAL;
+ }
+
+ group_pages = (channel->group_size / PAGE_SIZE) + 1;
+ header_pages = (channel->header_size / PAGE_SIZE) + 1;
+ total_group_pages = group_pages * channel->group_count;
+
+ size = vma->vm_end - vma->vm_start;
+ if (size != (header_pages + total_group_pages) * PAGE_SIZE) {
+ printk(KERN_WARNING PFX "required %lu bytes\n", size);
+ return -EINVAL;
+ }
+
+ start = vma->vm_start;
+
+ /* FIXME: Cleanup required on failure? */
+ pg = channel->header_pg;
+ for (pg_num = 0; pg_num < header_pages; pg_num++, pg++) {
+ printk(KERN_DEBUG PFX "page_count: %d\n", page_count(pg));
+ printk(KERN_DEBUG PFX "%d: header: 0x%lx\n", pg_num, start);
+ ret = vm_insert_page(vma, start, pg);
+ if (ret) {
+ printk(KERN_DEBUG "vm_insert 1 failed at %lx\n", start);
+ return ret;
+ }
+ start += PAGE_SIZE;
+ }
+
+ for (i = 0; i < channel->group_count; i++) {
+ pg = channel->groups[i].pg;
+ for (pg_num = 0; pg_num < group_pages; pg_num++, pg++) {
+ printk(KERN_DEBUG PFX "%d: group %d: 0x%lx\n",
+ pg_num, i, start);
+ ret = vm_insert_page(vma, start, pg);
+ if (ret) {
+ printk(KERN_DEBUG PFX
+ "vm_insert 2 failed at %d\n", pg_num);
+ return ret;
+ }
+ start += PAGE_SIZE;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Check whether there is some group that the user space has not
+ * consumed yet. When the user space consumes a group, it sets it to
+ * -1. Cosuming could be reading data in case of RX and filling a
+ * buffer in case of TX.
+ */
+static int poch_channel_available(struct channel_info *channel)
+{
+ int i;
+
+ spin_lock_irq(&channel->group_offsets_lock);
+
+ for (i = 0; i < channel->group_count; i++) {
+ if (channel->dir == CHANNEL_DIR_RX
+ && channel->header->group_offsets[i] == -1) {
+ spin_unlock_irq(&channel->group_offsets_lock);
+ return 1;
+ }
+
+ if (channel->dir == CHANNEL_DIR_TX
+ && channel->header->group_offsets[i] != -1) {
+ spin_unlock_irq(&channel->group_offsets_lock);
+ return 1;
+ }
+ }
+
+ spin_unlock_irq(&channel->group_offsets_lock);
+
+ return 0;
+}
+
+static unsigned int poch_poll(struct file *filp, poll_table *pt)
+{
+ struct channel_info *channel = filp->private_data;
+ unsigned int ret = 0;
+
+ poll_wait(filp, &channel->wq, pt);
+
+ if (poch_channel_available(channel)) {
+ if (channel->dir == CHANNEL_DIR_RX)
+ ret = POLLIN | POLLRDNORM;
+ else
+ ret = POLLOUT | POLLWRNORM;
+ }
+
+ return ret;
+}
+
+static int poch_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct channel_info *channel = filp->private_data;
+ void __iomem *fpga = channel->fpga_iomem;
+ void __iomem *bridge = channel->bridge_iomem;
+ void __user *argp = (void __user *)arg;
+ struct vm_area_struct *vms;
+ struct poch_counters counters;
+ int ret;
+
+ switch (cmd) {
+ case POCH_IOC_TRANSFER_START:
+ switch (channel->chno) {
+ case CHNO_TX_CHANNEL:
+ printk(KERN_INFO PFX "ioctl: Tx start\n");
+ iowrite32(0x1, fpga + FPGA_TX_TRIGGER_REG);
+ iowrite32(0x1, fpga + FPGA_TX_ENABLE_REG);
+
+ /* FIXME: Does it make sense to do a DMA GO
+ * twice, once in Tx and once in Rx.
+ */
+ iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
+ break;
+ case CHNO_RX_CHANNEL:
+ printk(KERN_INFO PFX "ioctl: Rx start\n");
+ iowrite32(0x1, fpga + FPGA_RX_ARM_REG);
+ iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
+ break;
+ }
+ break;
+ case POCH_IOC_TRANSFER_STOP:
+ switch (channel->chno) {
+ case CHNO_TX_CHANNEL:
+ printk(KERN_INFO PFX "ioctl: Tx stop\n");
+ iowrite32(0x0, fpga + FPGA_TX_ENABLE_REG);
+ iowrite32(0x0, fpga + FPGA_TX_TRIGGER_REG);
+ iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
+ break;
+ case CHNO_RX_CHANNEL:
+ printk(KERN_INFO PFX "ioctl: Rx stop\n");
+ iowrite32(0x0, fpga + FPGA_RX_ARM_REG);
+ iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
+ break;
+ }
+ break;
+ case POCH_IOC_GET_COUNTERS:
+ if (access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters)))
+ return -EFAULT;
+
+ spin_lock_irq(&channel->counters_lock);
+ counters = channel->counters;
+ __poch_channel_clear_counters(channel);
+ spin_unlock_irq(&channel->counters_lock);
+
+ ret = copy_to_user(argp, &counters,
+ sizeof(struct poch_counters));
+ if (ret)
+ return ret;
+
+ break;
+ case POCH_IOC_SYNC_GROUP_FOR_USER:
+ case POCH_IOC_SYNC_GROUP_FOR_DEVICE:
+ vms = find_vma(current->mm, arg);
+ if (!vms)
+ /* Address not mapped. */
+ return -EINVAL;
+ if (vms->vm_file != filp)
+ /* Address mapped from different device/file. */
+ return -EINVAL;
+
+ flush_cache_range(vms, arg, arg + channel->group_size);
+ break;
+ }
+ return 0;
+}
+
+static struct file_operations poch_fops = {
+ .owner = THIS_MODULE,
+ .open = poch_open,
+ .release = poch_release,
+ .ioctl = poch_ioctl,
+ .poll = poch_poll,
+ .mmap = poch_mmap
+};
+
+static void poch_irq_dma(struct channel_info *channel)
+{
+ u32 prev_transfer;
+ u32 curr_transfer;
+ long groups_done;
+ unsigned long i, j;
+ struct poch_group_info *groups;
+ s32 *group_offsets;
+ u32 curr_group_reg;
+
+ if (!atomic_read(&channel->inited))
+ return;
+
+ prev_transfer = channel->transfer;
+
+ if (channel->chno == CHNO_RX_CHANNEL)
+ curr_group_reg = FPGA_RX_CURR_GROUP_REG;
+ else
+ curr_group_reg = FPGA_TX_CURR_GROUP_REG;
+
+ curr_transfer = ioread32(channel->fpga_iomem + curr_group_reg);
+
+ groups_done = curr_transfer - prev_transfer;
+ /* Check wrap over, and handle it. */
+ if (groups_done <= 0)
+ groups_done += channel->group_count;
+
+ group_offsets = channel->header->group_offsets;
+ groups = channel->groups;
+
+ spin_lock(&channel->group_offsets_lock);
+
+ for (i = 0; i < groups_done; i++) {
+ j = (prev_transfer + i) % channel->group_count;
+ if (channel->dir == CHANNEL_DIR_RX)
+ group_offsets[j] = -1;
+ else
+ group_offsets[j] = groups[j].user_offset;
+ }
+
+ spin_unlock(&channel->group_offsets_lock);
+
+ channel->transfer = curr_transfer;
+
+ wake_up_interruptible(&channel->wq);
+}
+
+static irqreturn_t poch_irq_handler(int irq, void *p)
+{
+ struct poch_dev *poch_dev = p;
+ void __iomem *bridge = poch_dev->bridge_iomem;
+ void __iomem *fpga = poch_dev->fpga_iomem;
+ struct channel_info *channel_rx = &poch_dev->channels[CHNO_RX_CHANNEL];
+ struct channel_info *channel_tx = &poch_dev->channels[CHNO_TX_CHANNEL];
+ u32 bridge_stat;
+ u32 fpga_stat;
+ u32 dma_stat;
+
+ bridge_stat = ioread32(bridge + BRIDGE_INT_STAT_REG);
+ fpga_stat = ioread32(fpga + FPGA_INT_STAT_REG);
+ dma_stat = ioread32(fpga + FPGA_DMA_INT_STAT_REG);
+
+ ioread32(fpga + FPGA_DMA_INT_STAT_REG);
+ ioread32(fpga + FPGA_INT_STAT_REG);
+ ioread32(bridge + BRIDGE_INT_STAT_REG);
+
+ if (bridge_stat & BRIDGE_INT_FPGA) {
+ if (fpga_stat & FPGA_INT_DMA_CORE) {
+ if (dma_stat & FPGA_DMA_INT_RX)
+ poch_irq_dma(channel_rx);
+ if (dma_stat & FPGA_DMA_INT_TX)
+ poch_irq_dma(channel_tx);
+ }
+ if (fpga_stat & FPGA_INT_PLL_UNLOCKED) {
+ channel_tx->counters.pll_unlock++;
+ channel_rx->counters.pll_unlock++;
+ if (printk_ratelimit())
+ printk(KERN_WARNING PFX "PLL unlocked\n");
+ }
+ if (fpga_stat & FPGA_INT_TX_FF_EMPTY)
+ channel_tx->counters.fifo_empty++;
+ if (fpga_stat & FPGA_INT_TX_FF_OVRFLW)
+ channel_tx->counters.fifo_overflow++;
+ if (fpga_stat & FPGA_INT_RX_FF_EMPTY)
+ channel_rx->counters.fifo_empty++;
+ if (fpga_stat & FPGA_INT_RX_FF_OVRFLW)
+ channel_rx->counters.fifo_overflow++;
+
+ /*
+ * FIXME: These errors should be notified through the
+ * poll interface as POLLERR.
+ */
+
+ /* Re-enable interrupts. */
+ iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void poch_class_dev_unregister(struct poch_dev *poch_dev, int id)
+{
+ int i, j;
+ int nattrs;
+ struct channel_info *channel;
+ dev_t devno;
+
+ if (poch_dev->dev == NULL)
+ return;
+
+ for (i = 0; i < poch_dev->nchannels; i++) {
+ channel = &poch_dev->channels[i];
+ devno = poch_first_dev + (id * poch_dev->nchannels) + i;
+
+ if (!channel->dev)
+ continue;
+
+ nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
+ for (j = 0; j < nattrs; j++)
+ device_remove_file(channel->dev, poch_class_attrs[j]);
+
+ device_unregister(channel->dev);
+ }
+
+ device_unregister(poch_dev->dev);
+}
+
+static int __devinit poch_class_dev_register(struct poch_dev *poch_dev,
+ int id)
+{
+ struct device *dev = &poch_dev->pci_dev->dev;
+ int i, j;
+ int nattrs;
+ int ret;
+ struct channel_info *channel;
+ dev_t devno;
+
+ poch_dev->dev = device_create(poch_cls, &poch_dev->pci_dev->dev,
+ MKDEV(0, 0), NULL, "poch%d", id);
+ if (IS_ERR(poch_dev->dev)) {
+ dev_err(dev, "error creating parent class device");
+ ret = PTR_ERR(poch_dev->dev);
+ poch_dev->dev = NULL;
+ return ret;
+ }
+
+ for (i = 0; i < poch_dev->nchannels; i++) {
+ channel = &poch_dev->channels[i];
+
+ devno = poch_first_dev + (id * poch_dev->nchannels) + i;
+ channel->dev = device_create(poch_cls, poch_dev->dev, devno,
+ NULL, "ch%d", i);
+ if (IS_ERR(channel->dev)) {
+ dev_err(dev, "error creating channel class device");
+ ret = PTR_ERR(channel->dev);
+ channel->dev = NULL;
+ poch_class_dev_unregister(poch_dev, id);
+ return ret;
+ }
+
+ dev_set_drvdata(channel->dev, channel);
+ nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
+ for (j = 0; j < nattrs; j++) {
+ ret = device_create_file(channel->dev,
+ poch_class_attrs[j]);
+ if (ret) {
+ dev_err(dev, "error creating attribute file");
+ poch_class_dev_unregister(poch_dev, id);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int __devinit poch_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ struct device *dev = &pdev->dev;
+ struct poch_dev *poch_dev;
+ struct uio_info *uio;
+ int ret;
+ int id;
+ int i;
+
+ poch_dev = kzalloc(sizeof(struct poch_dev), GFP_KERNEL);
+ if (!poch_dev) {
+ dev_err(dev, "error allocating priv. data memory\n");
+ return -ENOMEM;
+ }
+
+ poch_dev->pci_dev = pdev;
+ uio = &poch_dev->uio;
+
+ pci_set_drvdata(pdev, poch_dev);
+
+ spin_lock_init(&poch_dev->iomem_lock);
+
+ poch_dev->nchannels = POCH_NCHANNELS;
+ poch_dev->channels[CHNO_RX_CHANNEL].dir = CHANNEL_DIR_RX;
+ poch_dev->channels[CHNO_TX_CHANNEL].dir = CHANNEL_DIR_TX;
+
+ for (i = 0; i < poch_dev->nchannels; i++) {
+ init_waitqueue_head(&poch_dev->channels[i].wq);
+ atomic_set(&poch_dev->channels[i].free, 1);
+ atomic_set(&poch_dev->channels[i].inited, 0);
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "error enabling device\n");
+ goto out_free;
+ }
+
+ ret = pci_request_regions(pdev, "poch");
+ if (ret) {
+ dev_err(dev, "error requesting resources\n");
+ goto out_disable;
+ }
+
+ uio->mem[0].addr = pci_resource_start(pdev, 1);
+ if (!uio->mem[0].addr) {
+ dev_err(dev, "invalid BAR1\n");
+ ret = -ENODEV;
+ goto out_release;
+ }
+
+ uio->mem[0].size = pci_resource_len(pdev, 1);
+ uio->mem[0].memtype = UIO_MEM_PHYS;
+
+ uio->name = "poch";
+ uio->version = "0.0.1";
+ uio->irq = -1;
+ ret = uio_register_device(dev, uio);
+ if (ret) {
+ dev_err(dev, "error register UIO device: %d\n", ret);
+ goto out_release;
+ }
+
+ poch_dev->bridge_iomem = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (poch_dev->bridge_iomem == NULL) {
+ dev_err(dev, "error mapping bridge (bar0) registers\n");
+ ret = -ENOMEM;
+ goto out_uio_unreg;
+ }
+
+ poch_dev->fpga_iomem = ioremap(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
+ if (poch_dev->fpga_iomem == NULL) {
+ dev_err(dev, "error mapping fpga (bar1) registers\n");
+ ret = -ENOMEM;
+ goto out_bar0_unmap;
+ }
+
+ ret = request_irq(pdev->irq, poch_irq_handler, IRQF_SHARED,
+ dev->bus_id, poch_dev);
+ if (ret) {
+ dev_err(dev, "error requesting IRQ %u\n", pdev->irq);
+ ret = -ENOMEM;
+ goto out_bar1_unmap;
+ }
+
+ if (!idr_pre_get(&poch_ids, GFP_KERNEL)) {
+ dev_err(dev, "error allocating memory ids\n");
+ ret = -ENOMEM;
+ goto out_free_irq;
+ }
+
+ idr_get_new(&poch_ids, poch_dev, &id);
+ if (id >= MAX_POCH_CARDS) {
+ dev_err(dev, "minors exhausted\n");
+ ret = -EBUSY;
+ goto out_free_irq;
+ }
+
+ cdev_init(&poch_dev->cdev, &poch_fops);
+ poch_dev->cdev.owner = THIS_MODULE;
+ ret = cdev_add(&poch_dev->cdev,
+ poch_first_dev + (id * poch_dev->nchannels),
+ poch_dev->nchannels);
+ if (ret) {
+ dev_err(dev, "error register character device\n");
+ goto out_idr_remove;
+ }
+
+ ret = poch_class_dev_register(poch_dev, id);
+ if (ret)
+ goto out_cdev_del;
+
+ return 0;
+
+ out_cdev_del:
+ cdev_del(&poch_dev->cdev);
+ out_idr_remove:
+ idr_remove(&poch_ids, id);
+ out_free_irq:
+ free_irq(pdev->irq, poch_dev);
+ out_bar1_unmap:
+ iounmap(poch_dev->fpga_iomem);
+ out_bar0_unmap:
+ iounmap(poch_dev->bridge_iomem);
+ out_uio_unreg:
+ uio_unregister_device(uio);
+ out_release:
+ pci_release_regions(pdev);
+ out_disable:
+ pci_disable_device(pdev);
+ out_free:
+ kfree(poch_dev);
+ return ret;
+}
+
+/*
+ * FIXME: We are yet to handle the hot unplug case.
+ */
+static void poch_pci_remove(struct pci_dev *pdev)
+{
+ struct poch_dev *poch_dev = pci_get_drvdata(pdev);
+ struct uio_info *uio = &poch_dev->uio;
+ unsigned int minor = MINOR(poch_dev->cdev.dev);
+ unsigned int id = minor / poch_dev->nchannels;
+
+ /* FIXME: unmap fpga_iomem and bridge_iomem */
+
+ poch_class_dev_unregister(poch_dev, id);
+ cdev_del(&poch_dev->cdev);
+ idr_remove(&poch_ids, id);
+ free_irq(pdev->irq, poch_dev);
+ uio_unregister_device(uio);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ iounmap(uio->mem[0].internal_addr);
+
+ kfree(poch_dev);
+}
+
+static const struct pci_device_id poch_pci_ids[] /* __devinitconst */ = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS,
+ PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE) },
+ { 0, }
+};
+
+static struct pci_driver poch_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = poch_pci_ids,
+ .probe = poch_pci_probe,
+ .remove = poch_pci_remove,
+};
+
+static int __init poch_init_module(void)
+{
+ int ret = 0;
+
+ ret = alloc_chrdev_region(&poch_first_dev, 0,
+ MAX_POCH_DEVICES, DRV_NAME);
+ if (ret) {
+ printk(KERN_ERR PFX "error allocating device no.");
+ return ret;
+ }
+
+ poch_cls = class_create(THIS_MODULE, "pocketchange");
+ if (IS_ERR(poch_cls)) {
+ ret = PTR_ERR(poch_cls);
+ goto out_unreg_chrdev;
+ }
+
+ ret = pci_register_driver(&poch_pci_driver);
+ if (ret) {
+ printk(KERN_ERR PFX "error register PCI device");
+ goto out_class_destroy;
+ }
+
+ return 0;
+
+ out_class_destroy:
+ class_destroy(poch_cls);
+
+ out_unreg_chrdev:
+ unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
+
+ return ret;
+}
+
+static void __exit poch_exit_module(void)
+{
+ pci_unregister_driver(&poch_pci_driver);
+ class_destroy(poch_cls);
+ unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
+}
+
+module_init(poch_init_module);
+module_exit(poch_exit_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/poch/poch.h b/drivers/staging/poch/poch.h
new file mode 100644
index 00000000000..51a2d145798
--- /dev/null
+++ b/drivers/staging/poch/poch.h
@@ -0,0 +1,29 @@
+/*
+ * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
+ *
+ * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
+ *
+ * Part of userspace API. Should be moved to a header file in
+ * include/linux for final version.
+ *
+ */
+struct poch_cbuf_header {
+ __s32 group_size_bytes;
+ __s32 group_count;
+ __s32 group_offsets[0];
+};
+
+struct poch_counters {
+ __u32 fifo_empty;
+ __u32 fifo_overflow;
+ __u32 pll_unlock;
+};
+
+#define POCH_IOC_NUM '9'
+
+#define POCH_IOC_TRANSFER_START _IO(POCH_IOC_NUM, 0)
+#define POCH_IOC_TRANSFER_STOP _IO(POCH_IOC_NUM, 1)
+#define POCH_IOC_GET_COUNTERS _IOR(POCH_IOC_NUM, 2, \
+ struct poch_counters)
+#define POCH_IOC_SYNC_GROUP_FOR_USER _IO(POCH_IOC_NUM, 3)
+#define POCH_IOC_SYNC_GROUP_FOR_DEVICE _IO(POCH_IOC_NUM, 4)
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index b61ac4b2db9..8fa9490b3e2 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -54,7 +54,6 @@
* IS-NIC driver.
*/
-#include <linux/version.h>
#define SLIC_DUMP_ENABLED 0
#define KLUDGE_FOR_4GB_BOUNDARY 1
@@ -96,17 +95,9 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/pci.h>
#include <linux/dma-mapping.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
#include <linux/mii.h>
#include <linux/if_vlan.h>
-#include <linux/skbuff.h>
-#include <linux/string.h>
#include <asm/unaligned.h>
#include <linux/ethtool.h>
@@ -275,7 +266,6 @@ static void slic_dbg_register_trace(struct adapter *adapter,
card->reg_value[i], card->reg_valueh[i]);
}
}
-}
#endif
static void slic_init_adapter(struct net_device *netdev,
@@ -606,6 +596,7 @@ static void __devexit slic_entry_remove(struct pci_dev *pcidev)
uint mmio_len = 0;
struct adapter *adapter = (struct adapter *) netdev_priv(dev);
struct sliccard *card;
+ struct mcast_address *mcaddr, *mlist;
ASSERT(adapter);
DBG_MSG("slicoss: %s ENTER dev[%p] adapter[%p]\n", __func__, dev,
@@ -625,6 +616,13 @@ static void __devexit slic_entry_remove(struct pci_dev *pcidev)
DBG_MSG("slicoss: %s iounmap dev->base_addr[%x]\n", __func__,
(uint) dev->base_addr);
iounmap((void __iomem *)dev->base_addr);
+ /* free multicast addresses */
+ mlist = adapter->mcastaddrs;
+ while (mlist) {
+ mcaddr = mlist;
+ mlist = mlist->next;
+ kfree(mcaddr);
+ }
ASSERT(adapter->card);
card = adapter->card;
ASSERT(card->adapters_allocated);
diff --git a/drivers/staging/sxg/README b/drivers/staging/sxg/README
index 4d1ddbe4c33..d514d184880 100644
--- a/drivers/staging/sxg/README
+++ b/drivers/staging/sxg/README
@@ -7,6 +7,7 @@ TODO:
- remove wrappers
- checkpatch.pl cleanups
- new functionality that the card needs
+ - remove reliance on x86
Please send patches to:
Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/drivers/staging/sxg/sxg.c b/drivers/staging/sxg/sxg.c
index 6ccbee875ab..5272a18e204 100644
--- a/drivers/staging/sxg/sxg.c
+++ b/drivers/staging/sxg/sxg.c
@@ -112,12 +112,16 @@ static bool sxg_mac_filter(p_adapter_t adapter,
static struct net_device_stats *sxg_get_stats(p_net_device dev);
#endif
+#define XXXTODO 0
+
+#if XXXTODO
static int sxg_mac_set_address(p_net_device dev, void *ptr);
+static void sxg_mcast_set_list(p_net_device dev);
+#endif
static void sxg_adapter_set_hwaddr(p_adapter_t adapter);
static void sxg_unmap_mmio_space(p_adapter_t adapter);
-static void sxg_mcast_set_mask(p_adapter_t adapter);
static int sxg_initialize_adapter(p_adapter_t adapter);
static void sxg_stock_rcv_buffers(p_adapter_t adapter);
@@ -132,9 +136,6 @@ static int sxg_write_mdio_reg(p_adapter_t adapter,
u32 DevAddr, u32 RegAddr, u32 Value);
static int sxg_read_mdio_reg(p_adapter_t adapter,
u32 DevAddr, u32 RegAddr, u32 *pValue);
-static void sxg_mcast_set_list(p_net_device dev);
-
-#define XXXTODO 0
static unsigned int sxg_first_init = 1;
static char *sxg_banner =
@@ -202,7 +203,7 @@ static void sxg_init_driver(void)
{
if (sxg_first_init) {
DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
- __FUNCTION__, jiffies);
+ __func__, jiffies);
sxg_first_init = 0;
spin_lock_init(&sxg_global.driver_lock);
}
@@ -223,7 +224,7 @@ static void sxg_dbg_macaddrs(p_adapter_t adapter)
return;
}
-// SXG Globals
+/* SXG Globals */
static SXG_DRIVER SxgDriver;
#ifdef ATKDBG
@@ -250,7 +251,7 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
u32 ThisSectionSize;
u32 *Instruction = NULL;
u32 BaseAddress, AddressOffset, Address;
-// u32 Failure;
+/* u32 Failure; */
u32 ValueRead;
u32 i;
u32 numSections = 0;
@@ -259,10 +260,10 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
adapter, 0, 0, 0);
- DBG_ERROR("sxg: %s ENTER\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s ENTER\n", __func__);
switch (UcodeSel) {
- case SXG_UCODE_SAHARA: // Sahara operational ucode
+ case SXG_UCODE_SAHARA: /* Sahara operational ucode */
numSections = SNumSections;
for (i = 0; i < numSections; i++) {
sectionSize[i] = SSectionSize[i];
@@ -276,13 +277,13 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
}
DBG_ERROR("sxg: RESET THE CARD\n");
- // First, reset the card
+ /* First, reset the card */
WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
- // Download each section of the microcode as specified in
- // its download file. The *download.c file is generated using
- // the saharaobjtoc facility which converts the metastep .obj
- // file to a .c file which contains a two dimentional array.
+ /* Download each section of the microcode as specified in */
+ /* its download file. The *download.c file is generated using */
+ /* the saharaobjtoc facility which converts the metastep .obj */
+ /* file to a .c file which contains a two dimentional array. */
for (Section = 0; Section < numSections; Section++) {
DBG_ERROR("sxg: SECTION # %d\n", Section);
switch (UcodeSel) {
@@ -294,35 +295,35 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
break;
}
BaseAddress = sectionStart[Section];
- ThisSectionSize = sectionSize[Section] / 12; // Size in instructions
+ ThisSectionSize = sectionSize[Section] / 12; /* Size in instructions */
for (AddressOffset = 0; AddressOffset < ThisSectionSize;
AddressOffset++) {
Address = BaseAddress + AddressOffset;
ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
- // Write instruction bits 31 - 0
+ /* Write instruction bits 31 - 0 */
WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH);
- // Write instruction bits 63-32
+ /* Write instruction bits 63-32 */
WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1),
FLUSH);
- // Write instruction bits 95-64
+ /* Write instruction bits 95-64 */
WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2),
FLUSH);
- // Write instruction address with the WRITE bit set
+ /* Write instruction address with the WRITE bit set */
WRITE_REG(HwRegs->UcodeAddr,
(Address | MICROCODE_ADDRESS_WRITE), FLUSH);
- // Sahara bug in the ucode download logic - the write to DataLow
- // for the next instruction could get corrupted. To avoid this,
- // write to DataLow again for this instruction (which may get
- // corrupted, but it doesn't matter), then increment the address
- // and write the data for the next instruction to DataLow. That
- // write should succeed.
+ /* Sahara bug in the ucode download logic - the write to DataLow */
+ /* for the next instruction could get corrupted. To avoid this, */
+ /* write to DataLow again for this instruction (which may get */
+ /* corrupted, but it doesn't matter), then increment the address */
+ /* and write the data for the next instruction to DataLow. That */
+ /* write should succeed. */
WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE);
- // Advance 3 u32S to start of next instruction
+ /* Advance 3 u32S to start of next instruction */
Instruction += 3;
}
}
- // Now repeat the entire operation reading the instruction back and
- // checking for parity errors
+ /* Now repeat the entire operation reading the instruction back and */
+ /* checking for parity errors */
for (Section = 0; Section < numSections; Section++) {
DBG_ERROR("sxg: check SECTION # %d\n", Section);
switch (UcodeSel) {
@@ -334,74 +335,74 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
break;
}
BaseAddress = sectionStart[Section];
- ThisSectionSize = sectionSize[Section] / 12; // Size in instructions
+ ThisSectionSize = sectionSize[Section] / 12; /* Size in instructions */
for (AddressOffset = 0; AddressOffset < ThisSectionSize;
AddressOffset++) {
Address = BaseAddress + AddressOffset;
- // Write the address with the READ bit set
+ /* Write the address with the READ bit set */
WRITE_REG(HwRegs->UcodeAddr,
(Address | MICROCODE_ADDRESS_READ), FLUSH);
- // Read it back and check parity bit.
+ /* Read it back and check parity bit. */
READ_REG(HwRegs->UcodeAddr, ValueRead);
if (ValueRead & MICROCODE_ADDRESS_PARITY) {
DBG_ERROR("sxg: %s PARITY ERROR\n",
- __FUNCTION__);
+ __func__);
- return (FALSE); // Parity error
+ return (FALSE); /* Parity error */
}
ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
- // Read the instruction back and compare
+ /* Read the instruction back and compare */
READ_REG(HwRegs->UcodeDataLow, ValueRead);
if (ValueRead != *Instruction) {
DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
- __FUNCTION__);
- return (FALSE); // Miscompare
+ __func__);
+ return (FALSE); /* Miscompare */
}
READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
if (ValueRead != *(Instruction + 1)) {
DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
- __FUNCTION__);
- return (FALSE); // Miscompare
+ __func__);
+ return (FALSE); /* Miscompare */
}
READ_REG(HwRegs->UcodeDataHigh, ValueRead);
if (ValueRead != *(Instruction + 2)) {
DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
- __FUNCTION__);
- return (FALSE); // Miscompare
+ __func__);
+ return (FALSE); /* Miscompare */
}
- // Advance 3 u32S to start of next instruction
+ /* Advance 3 u32S to start of next instruction */
Instruction += 3;
}
}
- // Everything OK, Go.
+ /* Everything OK, Go. */
WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
- // Poll the CardUp register to wait for microcode to initialize
- // Give up after 10,000 attemps (500ms).
+ /* Poll the CardUp register to wait for microcode to initialize */
+ /* Give up after 10,000 attemps (500ms). */
for (i = 0; i < 10000; i++) {
udelay(50);
READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
if (ValueRead == 0xCAFE) {
- DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __func__);
break;
}
}
if (i == 10000) {
- DBG_ERROR("sxg: %s TIMEOUT\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s TIMEOUT\n", __func__);
- return (FALSE); // Timeout
+ return (FALSE); /* Timeout */
}
- // Now write the LoadSync register. This is used to
- // synchronize with the card so it can scribble on the memory
- // that contained 0xCAFE from the "CardUp" step above
+ /* Now write the LoadSync register. This is used to */
+ /* synchronize with the card so it can scribble on the memory */
+ /* that contained 0xCAFE from the "CardUp" step above */
if (UcodeSel == SXG_UCODE_SAHARA) {
WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
}
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
adapter, 0, 0, 0);
- DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s EXIT\n", __func__);
return (TRUE);
}
@@ -420,29 +421,29 @@ static int sxg_allocate_resources(p_adapter_t adapter)
int status;
u32 i;
u32 RssIds, IsrCount;
-// PSXG_XMT_RING XmtRing;
-// PSXG_RCV_RING RcvRing;
+/* PSXG_XMT_RING XmtRing; */
+/* PSXG_RCV_RING RcvRing; */
- DBG_ERROR("%s ENTER\n", __FUNCTION__);
+ DBG_ERROR("%s ENTER\n", __func__);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
adapter, 0, 0, 0);
- // Windows tells us how many CPUs it plans to use for
- // RSS
+ /* Windows tells us how many CPUs it plans to use for */
+ /* RSS */
RssIds = SXG_RSS_CPU_COUNT(adapter);
IsrCount = adapter->MsiEnabled ? RssIds : 1;
- DBG_ERROR("%s Setup the spinlocks\n", __FUNCTION__);
+ DBG_ERROR("%s Setup the spinlocks\n", __func__);
- // Allocate spinlocks and initialize listheads first.
+ /* Allocate spinlocks and initialize listheads first. */
spin_lock_init(&adapter->RcvQLock);
spin_lock_init(&adapter->SglQLock);
spin_lock_init(&adapter->XmtZeroLock);
spin_lock_init(&adapter->Bit64RegLock);
spin_lock_init(&adapter->AdapterLock);
- DBG_ERROR("%s Setup the lists\n", __FUNCTION__);
+ DBG_ERROR("%s Setup the lists\n", __func__);
InitializeListHead(&adapter->FreeRcvBuffers);
InitializeListHead(&adapter->FreeRcvBlocks);
@@ -450,39 +451,39 @@ static int sxg_allocate_resources(p_adapter_t adapter)
InitializeListHead(&adapter->FreeSglBuffers);
InitializeListHead(&adapter->AllSglBuffers);
- // Mark these basic allocations done. This flags essentially
- // tells the SxgFreeResources routine that it can grab spinlocks
- // and reference listheads.
+ /* Mark these basic allocations done. This flags essentially */
+ /* tells the SxgFreeResources routine that it can grab spinlocks */
+ /* and reference listheads. */
adapter->BasicAllocations = TRUE;
- // Main allocation loop. Start with the maximum supported by
- // the microcode and back off if memory allocation
- // fails. If we hit a minimum, fail.
+ /* Main allocation loop. Start with the maximum supported by */
+ /* the microcode and back off if memory allocation */
+ /* fails. If we hit a minimum, fail. */
for (;;) {
- DBG_ERROR("%s Allocate XmtRings size[%lx]\n", __FUNCTION__,
- (sizeof(SXG_XMT_RING) * 1));
+ DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
+ (unsigned int)(sizeof(SXG_XMT_RING) * 1));
- // Start with big items first - receive and transmit rings. At the moment
- // I'm going to keep the ring size fixed and adjust the number of
- // TCBs if we fail. Later we might consider reducing the ring size as well..
+ /* Start with big items first - receive and transmit rings. At the moment */
+ /* I'm going to keep the ring size fixed and adjust the number of */
+ /* TCBs if we fail. Later we might consider reducing the ring size as well.. */
adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
sizeof(SXG_XMT_RING) *
1,
&adapter->PXmtRings);
- DBG_ERROR("%s XmtRings[%p]\n", __FUNCTION__, adapter->XmtRings);
+ DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
if (!adapter->XmtRings) {
goto per_tcb_allocation_failed;
}
memset(adapter->XmtRings, 0, sizeof(SXG_XMT_RING) * 1);
- DBG_ERROR("%s Allocate RcvRings size[%lx]\n", __FUNCTION__,
- (sizeof(SXG_RCV_RING) * 1));
+ DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__,
+ (unsigned int)(sizeof(SXG_RCV_RING) * 1));
adapter->RcvRings =
pci_alloc_consistent(adapter->pcidev,
sizeof(SXG_RCV_RING) * 1,
&adapter->PRcvRings);
- DBG_ERROR("%s RcvRings[%p]\n", __FUNCTION__, adapter->RcvRings);
+ DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings);
if (!adapter->RcvRings) {
goto per_tcb_allocation_failed;
}
@@ -490,7 +491,7 @@ static int sxg_allocate_resources(p_adapter_t adapter)
break;
per_tcb_allocation_failed:
- // an allocation failed. Free any successful allocations.
+ /* an allocation failed. Free any successful allocations. */
if (adapter->XmtRings) {
pci_free_consistent(adapter->pcidev,
sizeof(SXG_XMT_RING) * 4096,
@@ -505,22 +506,22 @@ static int sxg_allocate_resources(p_adapter_t adapter)
adapter->PRcvRings);
adapter->RcvRings = NULL;
}
- // Loop around and try again....
+ /* Loop around and try again.... */
}
- DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __FUNCTION__);
- // Initialize rcv zero and xmt zero rings
+ DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
+ /* Initialize rcv zero and xmt zero rings */
SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
- // Sanity check receive data structure format
+ /* Sanity check receive data structure format */
ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
(adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
ASSERT(sizeof(SXG_RCV_DESCRIPTOR_BLOCK) ==
SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
- // Allocate receive data buffers. We allocate a block of buffers and
- // a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
+ /* Allocate receive data buffers. We allocate a block of buffers and */
+ /* a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK */
for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
sxg_allocate_buffer_memory(adapter,
@@ -528,8 +529,8 @@ static int sxg_allocate_resources(p_adapter_t adapter)
ReceiveBufferSize),
SXG_BUFFER_TYPE_RCV);
}
- // NBL resource allocation can fail in the 'AllocateComplete' routine, which
- // doesn't return status. Make sure we got the number of buffers we requested
+ /* NBL resource allocation can fail in the 'AllocateComplete' routine, which */
+ /* doesn't return status. Make sure we got the number of buffers we requested */
if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
@@ -537,17 +538,17 @@ static int sxg_allocate_resources(p_adapter_t adapter)
return (STATUS_RESOURCES);
}
- DBG_ERROR("%s Allocate EventRings size[%lx]\n", __FUNCTION__,
- (sizeof(SXG_EVENT_RING) * RssIds));
+ DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__,
+ (unsigned int)(sizeof(SXG_EVENT_RING) * RssIds));
- // Allocate event queues.
+ /* Allocate event queues. */
adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
sizeof(SXG_EVENT_RING) *
RssIds,
&adapter->PEventRings);
if (!adapter->EventRings) {
- // Caller will call SxgFreeAdapter to clean up above allocations
+ /* Caller will call SxgFreeAdapter to clean up above allocations */
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
adapter, SXG_MAX_ENTRIES, 0, 0);
status = STATUS_RESOURCES;
@@ -555,12 +556,12 @@ static int sxg_allocate_resources(p_adapter_t adapter)
}
memset(adapter->EventRings, 0, sizeof(SXG_EVENT_RING) * RssIds);
- DBG_ERROR("%s Allocate ISR size[%x]\n", __FUNCTION__, IsrCount);
- // Allocate ISR
+ DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
+ /* Allocate ISR */
adapter->Isr = pci_alloc_consistent(adapter->pcidev,
IsrCount, &adapter->PIsr);
if (!adapter->Isr) {
- // Caller will call SxgFreeAdapter to clean up above allocations
+ /* Caller will call SxgFreeAdapter to clean up above allocations */
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
adapter, SXG_MAX_ENTRIES, 0, 0);
status = STATUS_RESOURCES;
@@ -568,10 +569,10 @@ static int sxg_allocate_resources(p_adapter_t adapter)
}
memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
- DBG_ERROR("%s Allocate shared XMT ring zero index location size[%lx]\n",
- __FUNCTION__, sizeof(u32));
+ DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
+ __func__, (unsigned int)sizeof(u32));
- // Allocate shared XMT ring zero index location
+ /* Allocate shared XMT ring zero index location */
adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
sizeof(u32),
&adapter->
@@ -587,7 +588,7 @@ static int sxg_allocate_resources(p_adapter_t adapter)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
adapter, SXG_MAX_ENTRIES, 0, 0);
- DBG_ERROR("%s EXIT\n", __FUNCTION__);
+ DBG_ERROR("%s EXIT\n", __func__);
return (STATUS_SUCCESS);
}
@@ -606,17 +607,17 @@ static void sxg_config_pci(struct pci_dev *pcidev)
u16 new_command;
pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
- DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __FUNCTION__, pci_command);
- // Set the command register
- new_command = pci_command | (PCI_COMMAND_MEMORY | // Memory Space Enable
- PCI_COMMAND_MASTER | // Bus master enable
- PCI_COMMAND_INVALIDATE | // Memory write and invalidate
- PCI_COMMAND_PARITY | // Parity error response
- PCI_COMMAND_SERR | // System ERR
- PCI_COMMAND_FAST_BACK); // Fast back-to-back
+ DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command);
+ /* Set the command register */
+ new_command = pci_command | (PCI_COMMAND_MEMORY | /* Memory Space Enable */
+ PCI_COMMAND_MASTER | /* Bus master enable */
+ PCI_COMMAND_INVALIDATE | /* Memory write and invalidate */
+ PCI_COMMAND_PARITY | /* Parity error response */
+ PCI_COMMAND_SERR | /* System ERR */
+ PCI_COMMAND_FAST_BACK); /* Fast back-to-back */
if (pci_command != new_command) {
DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
- __FUNCTION__, pci_command, new_command);
+ __func__, pci_command, new_command);
pci_write_config_word(pcidev, PCI_COMMAND, new_command);
}
}
@@ -634,9 +635,9 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
ulong mmio_len = 0;
DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
- __FUNCTION__, jiffies, smp_processor_id());
+ __func__, jiffies, smp_processor_id());
- // Initialize trace buffer
+ /* Initialize trace buffer */
#ifdef ATKDBG
SxgTraceBuffer = &LSxgTraceBuffer;
SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
@@ -701,11 +702,11 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
mmio_start, mmio_len);
memmapped_ioaddr = ioremap(mmio_start, mmio_len);
- DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
memmapped_ioaddr);
if (!memmapped_ioaddr) {
DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
- __FUNCTION__, mmio_len, mmio_start);
+ __func__, mmio_len, mmio_start);
goto err_out_free_mmio_region;
}
@@ -727,7 +728,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
memmapped_ioaddr);
if (!memmapped_ioaddr) {
DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
- __FUNCTION__, mmio_len, mmio_start);
+ __func__, mmio_len, mmio_start);
goto err_out_free_mmio_region;
}
@@ -738,13 +739,13 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
adapter->UcodeRegs = (void *)memmapped_ioaddr;
adapter->State = SXG_STATE_INITIALIZING;
- // Maintain a list of all adapters anchored by
- // the global SxgDriver structure.
+ /* Maintain a list of all adapters anchored by */
+ /* the global SxgDriver structure. */
adapter->Next = SxgDriver.Adapters;
SxgDriver.Adapters = adapter;
adapter->AdapterID = ++SxgDriver.AdapterID;
- // Initialize CRC table used to determine multicast hash
+ /* Initialize CRC table used to determine multicast hash */
sxg_mcast_init_crc32();
adapter->JumboEnabled = FALSE;
@@ -757,18 +758,18 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
}
-// status = SXG_READ_EEPROM(adapter);
-// if (!status) {
-// goto sxg_init_bad;
-// }
+/* status = SXG_READ_EEPROM(adapter); */
+/* if (!status) { */
+/* goto sxg_init_bad; */
+/* } */
- DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
sxg_config_pci(pcidev);
- DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__);
- DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__);
sxg_init_driver();
- DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__);
adapter->vendid = pci_tbl_entry->vendor;
adapter->devid = pci_tbl_entry->device;
@@ -780,23 +781,23 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
adapter->irq = pcidev->irq;
adapter->next_netdevice = head_netdevice;
head_netdevice = netdev;
-// adapter->chipid = chip_idx;
- adapter->port = 0; //adapter->functionnumber;
+/* adapter->chipid = chip_idx; */
+ adapter->port = 0; /*adapter->functionnumber; */
adapter->cardindex = adapter->port;
- // Allocate memory and other resources
- DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __FUNCTION__);
+ /* Allocate memory and other resources */
+ DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
status = sxg_allocate_resources(adapter);
DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
- __FUNCTION__, status);
+ __func__, status);
if (status != STATUS_SUCCESS) {
goto err_out_unmap;
}
- DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__);
if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
- __FUNCTION__);
+ __func__);
sxg_adapter_set_hwaddr(adapter);
} else {
adapter->state = ADAPT_FAIL;
@@ -819,7 +820,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
#endif
strcpy(netdev->name, "eth%d");
-// strcpy(netdev->name, pci_name(pcidev));
+/* strcpy(netdev->name, pci_name(pcidev)); */
if ((err = register_netdev(netdev))) {
DBG_ERROR("Cannot register net device, aborting. %s\n",
netdev->name);
@@ -832,11 +833,11 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
netdev->dev_addr[4], netdev->dev_addr[5]);
-//sxg_init_bad:
+/*sxg_init_bad: */
ASSERT(status == FALSE);
-// sxg_free_adapter(adapter);
+/* sxg_free_adapter(adapter); */
- DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
status, jiffies, smp_processor_id());
return status;
@@ -848,7 +849,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
err_out_exit_sxg_probe:
- DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __FUNCTION__, jiffies,
+ DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
smp_processor_id());
return -ENODEV;
@@ -874,12 +875,12 @@ static void sxg_disable_interrupt(p_adapter_t adapter)
{
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
adapter, adapter->InterruptsEnabled, 0, 0);
- // For now, RSS is disabled with line based interrupts
+ /* For now, RSS is disabled with line based interrupts */
ASSERT(adapter->RssEnabled == FALSE);
ASSERT(adapter->MsiEnabled == FALSE);
- //
- // Turn off interrupts by writing to the icr register.
- //
+ /* */
+ /* Turn off interrupts by writing to the icr register. */
+ /* */
WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
adapter->InterruptsEnabled = 0;
@@ -905,12 +906,12 @@ static void sxg_enable_interrupt(p_adapter_t adapter)
{
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
adapter, adapter->InterruptsEnabled, 0, 0);
- // For now, RSS is disabled with line based interrupts
+ /* For now, RSS is disabled with line based interrupts */
ASSERT(adapter->RssEnabled == FALSE);
ASSERT(adapter->MsiEnabled == FALSE);
- //
- // Turn on interrupts by writing to the icr register.
- //
+ /* */
+ /* Turn on interrupts by writing to the icr register. */
+ /* */
WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
adapter->InterruptsEnabled = 1;
@@ -935,29 +936,29 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
{
p_net_device dev = (p_net_device) dev_id;
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
-// u32 CpuMask = 0, i;
+/* u32 CpuMask = 0, i; */
adapter->Stats.NumInts++;
if (adapter->Isr[0] == 0) {
- // The SLIC driver used to experience a number of spurious interrupts
- // due to the delay associated with the masking of the interrupt
- // (we'd bounce back in here). If we see that again with Sahara,
- // add a READ_REG of the Icr register after the WRITE_REG below.
+ /* The SLIC driver used to experience a number of spurious interrupts */
+ /* due to the delay associated with the masking of the interrupt */
+ /* (we'd bounce back in here). If we see that again with Sahara, */
+ /* add a READ_REG of the Icr register after the WRITE_REG below. */
adapter->Stats.FalseInts++;
return IRQ_NONE;
}
- //
- // Move the Isr contents and clear the value in
- // shared memory, and mask interrupts
- //
+ /* */
+ /* Move the Isr contents and clear the value in */
+ /* shared memory, and mask interrupts */
+ /* */
adapter->IsrCopy[0] = adapter->Isr[0];
adapter->Isr[0] = 0;
WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
-// ASSERT(adapter->IsrDpcsPending == 0);
-#if XXXTODO // RSS Stuff
- // If RSS is enabled and the ISR specifies
- // SXG_ISR_EVENT, then schedule DPC's
- // based on event queues.
+/* ASSERT(adapter->IsrDpcsPending == 0); */
+#if XXXTODO /* RSS Stuff */
+ /* If RSS is enabled and the ISR specifies */
+ /* SXG_ISR_EVENT, then schedule DPC's */
+ /* based on event queues. */
if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
for (i = 0;
i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
@@ -973,8 +974,8 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
}
}
}
- // Now, either schedule the CPUs specified by the CpuMask,
- // or queue default
+ /* Now, either schedule the CPUs specified by the CpuMask, */
+ /* or queue default */
if (CpuMask) {
*QueueDefault = FALSE;
} else {
@@ -983,9 +984,9 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
}
*TargetCpus = CpuMask;
#endif
- //
- // There are no DPCs in Linux, so call the handler now
- //
+ /* */
+ /* There are no DPCs in Linux, so call the handler now */
+ /* */
sxg_handle_interrupt(adapter);
return IRQ_HANDLED;
@@ -993,7 +994,7 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
static void sxg_handle_interrupt(p_adapter_t adapter)
{
-// unsigned char RssId = 0;
+/* unsigned char RssId = 0; */
u32 NewIsr;
if (adapter->Stats.RcvNoBuffer < 5) {
@@ -1002,32 +1003,32 @@ static void sxg_handle_interrupt(p_adapter_t adapter)
}
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
adapter, adapter->IsrCopy[0], 0, 0);
- // For now, RSS is disabled with line based interrupts
+ /* For now, RSS is disabled with line based interrupts */
ASSERT(adapter->RssEnabled == FALSE);
ASSERT(adapter->MsiEnabled == FALSE);
ASSERT(adapter->IsrCopy[0]);
-/////////////////////////////
+/*/////////////////////////// */
- // Always process the event queue.
+ /* Always process the event queue. */
sxg_process_event_queue(adapter,
(adapter->RssEnabled ? /*RssId */ 0 : 0));
-#if XXXTODO // RSS stuff
+#if XXXTODO /* RSS stuff */
if (--adapter->IsrDpcsPending) {
- // We're done.
+ /* We're done. */
ASSERT(adapter->RssEnabled);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
adapter, 0, 0, 0);
return;
}
#endif
- //
- // Last (or only) DPC processes the ISR and clears the interrupt.
- //
+ /* */
+ /* Last (or only) DPC processes the ISR and clears the interrupt. */
+ /* */
NewIsr = sxg_process_isr(adapter, 0);
- //
- // Reenable interrupts
- //
+ /* */
+ /* Reenable interrupts */
+ /* */
adapter->IsrCopy[0] = 0;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
adapter, NewIsr, 0, 0);
@@ -1063,75 +1064,75 @@ static int sxg_process_isr(p_adapter_t adapter, u32 MessageId)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
adapter, Isr, 0, 0);
- // Error
+ /* Error */
if (Isr & SXG_ISR_ERR) {
if (Isr & SXG_ISR_PDQF) {
adapter->Stats.PdqFull++;
- DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __FUNCTION__);
+ DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__);
}
- // No host buffer
+ /* No host buffer */
if (Isr & SXG_ISR_RMISS) {
- // There is a bunch of code in the SLIC driver which
- // attempts to process more receive events per DPC
- // if we start to fall behind. We'll probably
- // need to do something similar here, but hold
- // off for now. I don't want to make the code more
- // complicated than strictly needed.
+ /* There is a bunch of code in the SLIC driver which */
+ /* attempts to process more receive events per DPC */
+ /* if we start to fall behind. We'll probably */
+ /* need to do something similar here, but hold */
+ /* off for now. I don't want to make the code more */
+ /* complicated than strictly needed. */
adapter->Stats.RcvNoBuffer++;
if (adapter->Stats.RcvNoBuffer < 5) {
DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
- __FUNCTION__);
+ __func__);
}
}
- // Card crash
+ /* Card crash */
if (Isr & SXG_ISR_DEAD) {
- // Set aside the crash info and set the adapter state to RESET
+ /* Set aside the crash info and set the adapter state to RESET */
adapter->CrashCpu =
(unsigned char)((Isr & SXG_ISR_CPU) >>
SXG_ISR_CPU_SHIFT);
adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
adapter->Dead = TRUE;
- DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __FUNCTION__,
+ DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
adapter->CrashLocation, adapter->CrashCpu);
}
- // Event ring full
+ /* Event ring full */
if (Isr & SXG_ISR_ERFULL) {
- // Same issue as RMISS, really. This means the
- // host is falling behind the card. Need to increase
- // event ring size, process more events per interrupt,
- // and/or reduce/remove interrupt aggregation.
+ /* Same issue as RMISS, really. This means the */
+ /* host is falling behind the card. Need to increase */
+ /* event ring size, process more events per interrupt, */
+ /* and/or reduce/remove interrupt aggregation. */
adapter->Stats.EventRingFull++;
DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
- __FUNCTION__);
+ __func__);
}
- // Transmit drop - no DRAM buffers or XMT error
+ /* Transmit drop - no DRAM buffers or XMT error */
if (Isr & SXG_ISR_XDROP) {
adapter->Stats.XmtDrops++;
adapter->Stats.XmtErrors++;
- DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __FUNCTION__);
+ DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__);
}
}
- // Slowpath send completions
+ /* Slowpath send completions */
if (Isr & SXG_ISR_SPSEND) {
sxg_complete_slow_send(adapter);
}
- // Dump
+ /* Dump */
if (Isr & SXG_ISR_UPC) {
- ASSERT(adapter->DumpCmdRunning); // Maybe change when debug is added..
+ ASSERT(adapter->DumpCmdRunning); /* Maybe change when debug is added.. */
adapter->DumpCmdRunning = FALSE;
}
- // Link event
+ /* Link event */
if (Isr & SXG_ISR_LINK) {
sxg_link_event(adapter);
}
- // Debug - breakpoint hit
+ /* Debug - breakpoint hit */
if (Isr & SXG_ISR_BREAK) {
- // At the moment AGDB isn't written to support interactive
- // debug sessions. When it is, this interrupt will be used
- // to signal AGDB that it has hit a breakpoint. For now, ASSERT.
+ /* At the moment AGDB isn't written to support interactive */
+ /* debug sessions. When it is, this interrupt will be used */
+ /* to signal AGDB that it has hit a breakpoint. For now, ASSERT. */
ASSERT(0);
}
- // Heartbeat response
+ /* Heartbeat response */
if (Isr & SXG_ISR_PING) {
adapter->PingOutstanding = FALSE;
}
@@ -1171,39 +1172,39 @@ static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
(adapter->State == SXG_STATE_PAUSING) ||
(adapter->State == SXG_STATE_PAUSED) ||
(adapter->State == SXG_STATE_HALTING));
- // We may still have unprocessed events on the queue if
- // the card crashed. Don't process them.
+ /* We may still have unprocessed events on the queue if */
+ /* the card crashed. Don't process them. */
if (adapter->Dead) {
return (0);
}
- // In theory there should only be a single processor that
- // accesses this queue, and only at interrupt-DPC time. So
- // we shouldn't need a lock for any of this.
+ /* In theory there should only be a single processor that */
+ /* accesses this queue, and only at interrupt-DPC time. So */
+ /* we shouldn't need a lock for any of this. */
while (Event->Status & EVENT_STATUS_VALID) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
Event, Event->Code, Event->Status,
adapter->NextEvent);
switch (Event->Code) {
case EVENT_CODE_BUFFERS:
- ASSERT(!(Event->CommandIndex & 0xFF00)); // SXG_RING_INFO Head & Tail == unsigned char
- //
+ ASSERT(!(Event->CommandIndex & 0xFF00)); /* SXG_RING_INFO Head & Tail == unsigned char */
+ /* */
sxg_complete_descriptor_blocks(adapter,
Event->CommandIndex);
- //
+ /* */
break;
case EVENT_CODE_SLOWRCV:
--adapter->RcvBuffersOnCard;
if ((skb = sxg_slow_receive(adapter, Event))) {
u32 rx_bytes;
#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
- // Add it to our indication list
+ /* Add it to our indication list */
SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
IndicationList, num_skbs);
- // In Linux, we just pass up each skb to the protocol above at this point,
- // there is no capability of an indication list.
+ /* In Linux, we just pass up each skb to the protocol above at this point, */
+ /* there is no capability of an indication list. */
#else
-// CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE);
- rx_bytes = Event->Length; // (rcvbuf->length & IRHDDR_FLEN_MSK);
+/* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
+ rx_bytes = Event->Length; /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
skb_put(skb, rx_bytes);
adapter->stats.rx_packets++;
adapter->stats.rx_bytes += rx_bytes;
@@ -1218,43 +1219,43 @@ static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
break;
default:
DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
- __FUNCTION__, Event->Code);
-// ASSERT(0);
+ __func__, Event->Code);
+/* ASSERT(0); */
}
- // See if we need to restock card receive buffers.
- // There are two things to note here:
- // First - This test is not SMP safe. The
- // adapter->BuffersOnCard field is protected via atomic interlocked calls, but
- // we do not protect it with respect to these tests. The only way to do that
- // is with a lock, and I don't want to grab a lock every time we adjust the
- // BuffersOnCard count. Instead, we allow the buffer replenishment to be off
- // once in a while. The worst that can happen is the card is given one
- // more-or-less descriptor block than the arbitrary value we've chosen.
- // No big deal
- // In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard is adjusted.
- // Second - We expect this test to rarely evaluate to true. We attempt to
- // refill descriptor blocks as they are returned to us
- // (sxg_complete_descriptor_blocks), so The only time this should evaluate
- // to true is when sxg_complete_descriptor_blocks failed to allocate
- // receive buffers.
+ /* See if we need to restock card receive buffers. */
+ /* There are two things to note here: */
+ /* First - This test is not SMP safe. The */
+ /* adapter->BuffersOnCard field is protected via atomic interlocked calls, but */
+ /* we do not protect it with respect to these tests. The only way to do that */
+ /* is with a lock, and I don't want to grab a lock every time we adjust the */
+ /* BuffersOnCard count. Instead, we allow the buffer replenishment to be off */
+ /* once in a while. The worst that can happen is the card is given one */
+ /* more-or-less descriptor block than the arbitrary value we've chosen. */
+ /* No big deal */
+ /* In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard is adjusted. */
+ /* Second - We expect this test to rarely evaluate to true. We attempt to */
+ /* refill descriptor blocks as they are returned to us */
+ /* (sxg_complete_descriptor_blocks), so The only time this should evaluate */
+ /* to true is when sxg_complete_descriptor_blocks failed to allocate */
+ /* receive buffers. */
if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
sxg_stock_rcv_buffers(adapter);
}
- // It's more efficient to just set this to zero.
- // But clearing the top bit saves potential debug info...
+ /* It's more efficient to just set this to zero. */
+ /* But clearing the top bit saves potential debug info... */
Event->Status &= ~EVENT_STATUS_VALID;
- // Advanct to the next event
+ /* Advanct to the next event */
SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
Event = &EventRing->Ring[adapter->NextEvent[RssId]];
EventsProcessed++;
if (EventsProcessed == EVENT_RING_BATCH) {
- // Release a batch of events back to the card
+ /* Release a batch of events back to the card */
WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
EVENT_RING_BATCH, FALSE);
EventsProcessed = 0;
- // If we've processed our batch limit, break out of the
- // loop and return SXG_ISR_EVENT to arrange for us to
- // be called again
+ /* If we've processed our batch limit, break out of the */
+ /* loop and return SXG_ISR_EVENT to arrange for us to */
+ /* be called again */
if (Batches++ == EVENT_BATCH_LIMIT) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
TRACE_NOISY, "EvtLimit", Batches,
@@ -1265,14 +1266,14 @@ static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
}
}
#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
- //
- // Indicate any received dumb-nic frames
- //
+ /* */
+ /* Indicate any received dumb-nic frames */
+ /* */
SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
#endif
- //
- // Release events back to the card.
- //
+ /* */
+ /* Release events back to the card. */
+ /* */
if (EventsProcessed) {
WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
EventsProcessed, FALSE);
@@ -1299,43 +1300,43 @@ static void sxg_complete_slow_send(p_adapter_t adapter)
u32 *ContextType;
PSXG_CMD XmtCmd;
- // NOTE - This lock is dropped and regrabbed in this loop.
- // This means two different processors can both be running
- // through this loop. Be *very* careful.
+ /* NOTE - This lock is dropped and regrabbed in this loop. */
+ /* This means two different processors can both be running */
+ /* through this loop. Be *very* careful. */
spin_lock(&adapter->XmtZeroLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
while (XmtRingInfo->Tail != *adapter->XmtRingZeroIndex) {
- // Locate the current Cmd (ring descriptor entry), and
- // associated SGL, and advance the tail
+ /* Locate the current Cmd (ring descriptor entry), and */
+ /* associated SGL, and advance the tail */
SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
ASSERT(ContextType);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
- // Clear the SGL field.
+ /* Clear the SGL field. */
XmtCmd->Sgl = 0;
switch (*ContextType) {
case SXG_SGL_DUMB:
{
struct sk_buff *skb;
- // Dumb-nic send. Command context is the dumb-nic SGL
+ /* Dumb-nic send. Command context is the dumb-nic SGL */
skb = (struct sk_buff *)ContextType;
- // Complete the send
+ /* Complete the send */
SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
TRACE_IMPORTANT, "DmSndCmp", skb, 0,
0, 0);
ASSERT(adapter->Stats.XmtQLen);
- adapter->Stats.XmtQLen--; // within XmtZeroLock
+ adapter->Stats.XmtQLen--; /* within XmtZeroLock */
adapter->Stats.XmtOk++;
- // Now drop the lock and complete the send back to
- // Microsoft. We need to drop the lock because
- // Microsoft can come back with a chimney send, which
- // results in a double trip in SxgTcpOuput
+ /* Now drop the lock and complete the send back to */
+ /* Microsoft. We need to drop the lock because */
+ /* Microsoft can come back with a chimney send, which */
+ /* results in a double trip in SxgTcpOuput */
spin_unlock(&adapter->XmtZeroLock);
SXG_COMPLETE_DUMB_SEND(adapter, skb);
- // and reacquire..
+ /* and reacquire.. */
spin_lock(&adapter->XmtZeroLock);
}
break;
@@ -1371,7 +1372,7 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
RcvDataBufferHdr, RcvDataBufferHdr->State,
RcvDataBufferHdr->VirtualAddress);
- // Drop rcv frames in non-running state
+ /* Drop rcv frames in non-running state */
switch (adapter->State) {
case SXG_STATE_RUNNING:
break;
@@ -1384,12 +1385,12 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
goto drop;
}
- // Change buffer state to UPSTREAM
+ /* Change buffer state to UPSTREAM */
RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
if (Event->Status & EVENT_STATUS_RCVERR) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
Event, Event->Status, Event->HostHandle, 0);
- // XXXTODO - Remove this print later
+ /* XXXTODO - Remove this print later */
DBG_ERROR("SXG: Receive error %x\n", *(u32 *)
SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr));
sxg_process_rcv_error(adapter, *(u32 *)
@@ -1397,8 +1398,8 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
(RcvDataBufferHdr));
goto drop;
}
-#if XXXTODO // VLAN stuff
- // If there's a VLAN tag, extract it and validate it
+#if XXXTODO /* VLAN stuff */
+ /* If there's a VLAN tag, extract it and validate it */
if (((p_ether_header) (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->
EtherType == ETHERTYPE_VLAN) {
if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
@@ -1411,9 +1412,9 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
}
}
#endif
- //
- // Dumb-nic frame. See if it passes our mac filter and update stats
- //
+ /* */
+ /* Dumb-nic frame. See if it passes our mac filter and update stats */
+ /* */
if (!sxg_mac_filter(adapter, (p_ether_header)
SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
Event->Length)) {
@@ -1427,9 +1428,9 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
RcvDataBufferHdr, Packet, Event->Length, 0);
- //
- // Lastly adjust the receive packet length.
- //
+ /* */
+ /* Lastly adjust the receive packet length. */
+ /* */
SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
return (Packet);
@@ -1541,7 +1542,7 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
if (SXG_MULTICAST_PACKET(EtherHdr)) {
if (SXG_BROADCAST_PACKET(EtherHdr)) {
- // broadcast
+ /* broadcast */
if (adapter->MacFilter & MAC_BCAST) {
adapter->Stats.DumbRcvBcastPkts++;
adapter->Stats.DumbRcvBcastBytes += length;
@@ -1550,7 +1551,7 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
return (TRUE);
}
} else {
- // multicast
+ /* multicast */
if (adapter->MacFilter & MAC_ALLMCAST) {
adapter->Stats.DumbRcvMcastPkts++;
adapter->Stats.DumbRcvMcastBytes += length;
@@ -1580,9 +1581,9 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
}
}
} else if (adapter->MacFilter & MAC_DIRECTED) {
- // Not broadcast or multicast. Must be directed at us or
- // the card is in promiscuous mode. Either way, consider it
- // ours if MAC_DIRECTED is set
+ /* Not broadcast or multicast. Must be directed at us or */
+ /* the card is in promiscuous mode. Either way, consider it */
+ /* ours if MAC_DIRECTED is set */
adapter->Stats.DumbRcvUcastPkts++;
adapter->Stats.DumbRcvUcastBytes += length;
adapter->Stats.DumbRcvPkts++;
@@ -1590,7 +1591,7 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
return (TRUE);
}
if (adapter->MacFilter & MAC_PROMISC) {
- // Whatever it is, keep it.
+ /* Whatever it is, keep it. */
adapter->Stats.DumbRcvPkts++;
adapter->Stats.DumbRcvBytes += length;
return (TRUE);
@@ -1606,7 +1607,7 @@ static int sxg_register_interrupt(p_adapter_t adapter)
DBG_ERROR
("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
- __FUNCTION__, adapter, adapter->netdev->irq, NR_IRQS);
+ __func__, adapter, adapter->netdev->irq, NR_IRQS);
spin_unlock_irqrestore(&sxg_global.driver_lock,
sxg_global.flags);
@@ -1625,18 +1626,18 @@ static int sxg_register_interrupt(p_adapter_t adapter)
}
adapter->intrregistered = 1;
adapter->IntRegistered = TRUE;
- // Disable RSS with line-based interrupts
+ /* Disable RSS with line-based interrupts */
adapter->MsiEnabled = FALSE;
adapter->RssEnabled = FALSE;
DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
- __FUNCTION__, adapter, adapter->netdev->irq);
+ __func__, adapter, adapter->netdev->irq);
}
return (STATUS_SUCCESS);
}
static void sxg_deregister_interrupt(p_adapter_t adapter)
{
- DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __FUNCTION__, adapter);
+ DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter);
#if XXXTODO
slic_init_cleanup(adapter);
#endif
@@ -1651,7 +1652,7 @@ static void sxg_deregister_interrupt(p_adapter_t adapter)
adapter->rcv_broadcasts = 0;
adapter->rcv_multicasts = 0;
adapter->rcv_unicasts = 0;
- DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s EXIT\n", __func__);
}
/*
@@ -1666,7 +1667,7 @@ static int sxg_if_init(p_adapter_t adapter)
int status = 0;
DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d:%d] flags[%x]\n",
- __FUNCTION__, adapter->netdev->name,
+ __func__, adapter->netdev->name,
adapter->queues_initialized, adapter->state,
adapter->linkstate, dev->flags);
@@ -1680,7 +1681,7 @@ static int sxg_if_init(p_adapter_t adapter)
adapter->devflags_prev = dev->flags;
adapter->macopts = MAC_DIRECTED;
if (dev->flags) {
- DBG_ERROR("sxg: %s (%s) Set MAC options: ", __FUNCTION__,
+ DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__,
adapter->netdev->name);
if (dev->flags & IFF_BROADCAST) {
adapter->macopts |= MAC_BCAST;
@@ -1713,7 +1714,7 @@ static int sxg_if_init(p_adapter_t adapter)
/*
* clear any pending events, then enable interrupts
*/
- DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
return (STATUS_SUCCESS);
}
@@ -1724,11 +1725,11 @@ static int sxg_entry_open(p_net_device dev)
int status;
ASSERT(adapter);
- DBG_ERROR("sxg: %s adapter->activated[%d]\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
adapter->activated);
DBG_ERROR
("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
- __FUNCTION__, adapter->netdev->name, jiffies, smp_processor_id(),
+ __func__, adapter->netdev->name, jiffies, smp_processor_id(),
adapter->netdev, adapter, adapter->port);
netif_stop_queue(adapter->netdev);
@@ -1738,16 +1739,16 @@ static int sxg_entry_open(p_net_device dev)
sxg_global.num_sxg_ports_active++;
adapter->activated = 1;
}
- // Initialize the adapter
- DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __FUNCTION__);
+ /* Initialize the adapter */
+ DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
status = sxg_initialize_adapter(adapter);
DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
- __FUNCTION__, status);
+ __func__, status);
if (status == STATUS_SUCCESS) {
- DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__);
status = sxg_if_init(adapter);
- DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__,
status);
}
@@ -1760,12 +1761,12 @@ static int sxg_entry_open(p_net_device dev)
sxg_global.flags);
return (status);
}
- DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
- // Enable interrupts
+ /* Enable interrupts */
SXG_ENABLE_ALL_INTERRUPTS(adapter);
- DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s EXIT\n", __func__);
spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
return STATUS_SUCCESS;
@@ -1779,27 +1780,27 @@ static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
ASSERT(adapter);
- DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __FUNCTION__, dev,
+ DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __func__, dev,
adapter);
sxg_deregister_interrupt(adapter);
sxg_unmap_mmio_space(adapter);
- DBG_ERROR("sxg: %s unregister_netdev\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s unregister_netdev\n", __func__);
unregister_netdev(dev);
mmio_start = pci_resource_start(pcidev, 0);
mmio_len = pci_resource_len(pcidev, 0);
- DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __func__,
mmio_start, mmio_len);
release_mem_region(mmio_start, mmio_len);
- DBG_ERROR("sxg: %s iounmap dev->base_addr[%x]\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s iounmap dev->base_addr[%x]\n", __func__,
(unsigned int)dev->base_addr);
iounmap((char *)dev->base_addr);
- DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s deallocate device\n", __func__);
kfree(dev);
- DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s EXIT\n", __func__);
}
static int sxg_entry_halt(p_net_device dev)
@@ -1807,17 +1808,17 @@ static int sxg_entry_halt(p_net_device dev)
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
- DBG_ERROR("sxg: %s (%s) ENTER\n", __FUNCTION__, dev->name);
+ DBG_ERROR("sxg: %s (%s) ENTER\n", __func__, dev->name);
netif_stop_queue(adapter->netdev);
adapter->state = ADAPT_DOWN;
adapter->linkstate = LINK_DOWN;
adapter->devflags_prev = 0;
DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
- __FUNCTION__, dev->name, adapter, adapter->state);
+ __func__, dev->name, adapter, adapter->state);
- DBG_ERROR("sxg: %s (%s) EXIT\n", __FUNCTION__, dev->name);
- DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
+ DBG_ERROR("sxg: %s (%s) EXIT\n", __func__, dev->name);
+ DBG_ERROR("sxg: %s EXIT\n", __func__);
spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
return (STATUS_SUCCESS);
}
@@ -1825,11 +1826,11 @@ static int sxg_entry_halt(p_net_device dev)
static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd)
{
ASSERT(rq);
-// DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __FUNCTION__, cmd, rq, dev);
+/* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev); */
switch (cmd) {
case SIOCSLICSETINTAGG:
{
-// p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
+/* p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); */
u32 data[7];
u32 intagg;
@@ -1841,12 +1842,12 @@ static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd)
intagg = data[0];
printk(KERN_EMERG
"%s: set interrupt aggregation to %d\n",
- __FUNCTION__, intagg);
+ __func__, intagg);
return 0;
}
default:
-// DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __FUNCTION__, cmd);
+/* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
return -EOPNOTSUPP;
}
return 0;
@@ -1870,15 +1871,15 @@ static int sxg_send_packets(struct sk_buff *skb, p_net_device dev)
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
u32 status = STATUS_SUCCESS;
- DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __func__,
skb);
- // Check the adapter state
+ /* Check the adapter state */
switch (adapter->State) {
case SXG_STATE_INITIALIZING:
case SXG_STATE_HALTED:
case SXG_STATE_SHUTDOWN:
- ASSERT(0); // unexpected
- // fall through
+ ASSERT(0); /* unexpected */
+ /* fall through */
case SXG_STATE_RESETTING:
case SXG_STATE_SLEEP:
case SXG_STATE_BOOTDIAG:
@@ -1898,23 +1899,23 @@ static int sxg_send_packets(struct sk_buff *skb, p_net_device dev)
if (status != STATUS_SUCCESS) {
goto xmit_fail;
}
- // send a packet
+ /* send a packet */
status = sxg_transmit_packet(adapter, skb);
if (status == STATUS_SUCCESS) {
goto xmit_done;
}
xmit_fail:
- // reject & complete all the packets if they cant be sent
+ /* reject & complete all the packets if they cant be sent */
if (status != STATUS_SUCCESS) {
#if XXXTODO
-// sxg_send_packets_fail(adapter, skb, status);
+/* sxg_send_packets_fail(adapter, skb, status); */
#else
SXG_DROP_DUMB_SEND(adapter, skb);
adapter->stats.tx_dropped++;
#endif
}
- DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
status);
xmit_done:
@@ -1940,12 +1941,12 @@ static int sxg_transmit_packet(p_adapter_t adapter, struct sk_buff *skb)
void *SglBuffer;
u32 SglBufferLength;
- // The vast majority of work is done in the shared
- // sxg_dumb_sgl routine.
+ /* The vast majority of work is done in the shared */
+ /* sxg_dumb_sgl routine. */
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
adapter, skb, 0, 0);
- // Allocate a SGL buffer
+ /* Allocate a SGL buffer */
SXG_GET_SGL_BUFFER(adapter, SxgSgl);
if (!SxgSgl) {
adapter->Stats.NoSglBuf++;
@@ -1963,9 +1964,9 @@ static int sxg_transmit_packet(p_adapter_t adapter, struct sk_buff *skb)
SxgSgl->DumbPacket = skb;
pSgl = NULL;
- // Call the common sxg_dumb_sgl routine to complete the send.
+ /* Call the common sxg_dumb_sgl routine to complete the send. */
sxg_dumb_sgl(pSgl, SxgSgl);
- // Return success sxg_dumb_sgl (or something later) will complete it.
+ /* Return success sxg_dumb_sgl (or something later) will complete it. */
return (STATUS_SUCCESS);
}
@@ -1983,39 +1984,39 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
{
p_adapter_t adapter = SxgSgl->adapter;
struct sk_buff *skb = SxgSgl->DumbPacket;
- // For now, all dumb-nic sends go on RSS queue zero
+ /* For now, all dumb-nic sends go on RSS queue zero */
PSXG_XMT_RING XmtRing = &adapter->XmtRings[0];
PSXG_RING_INFO XmtRingInfo = &adapter->XmtRingZeroInfo;
PSXG_CMD XmtCmd = NULL;
-// u32 Index = 0;
+/* u32 Index = 0; */
u32 DataLength = skb->len;
-// unsigned int BufLen;
-// u32 SglOffset;
+/* unsigned int BufLen; */
+/* u32 SglOffset; */
u64 phys_addr;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
pSgl, SxgSgl, 0, 0);
- // Set aside a pointer to the sgl
+ /* Set aside a pointer to the sgl */
SxgSgl->pSgl = pSgl;
- // Sanity check that our SGL format is as we expect.
+ /* Sanity check that our SGL format is as we expect. */
ASSERT(sizeof(SXG_X64_SGE) == sizeof(SCATTER_GATHER_ELEMENT));
- // Shouldn't be a vlan tag on this frame
+ /* Shouldn't be a vlan tag on this frame */
ASSERT(SxgSgl->VlanTag.VlanTci == 0);
ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
- // From here below we work with the SGL placed in our
- // buffer.
+ /* From here below we work with the SGL placed in our */
+ /* buffer. */
SxgSgl->Sgl.NumberOfElements = 1;
- // Grab the spinlock and acquire a command
+ /* Grab the spinlock and acquire a command */
spin_lock(&adapter->XmtZeroLock);
SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
if (XmtCmd == NULL) {
- // Call sxg_complete_slow_send to see if we can
- // free up any XmtRingZero entries and then try again
+ /* Call sxg_complete_slow_send to see if we can */
+ /* free up any XmtRingZero entries and then try again */
spin_unlock(&adapter->XmtZeroLock);
sxg_complete_slow_send(adapter);
spin_lock(&adapter->XmtZeroLock);
@@ -2027,10 +2028,10 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
}
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
- // Update stats
+ /* Update stats */
adapter->Stats.DumbXmtPkts++;
adapter->Stats.DumbXmtBytes += DataLength;
-#if XXXTODO // Stats stuff
+#if XXXTODO /* Stats stuff */
if (SXG_MULTICAST_PACKET(EtherHdr)) {
if (SXG_BROADCAST_PACKET(EtherHdr)) {
adapter->Stats.DumbXmtBcastPkts++;
@@ -2044,8 +2045,8 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
adapter->Stats.DumbXmtUcastBytes += DataLength;
}
#endif
- // Fill in the command
- // Copy out the first SGE to the command and adjust for offset
+ /* Fill in the command */
+ /* Copy out the first SGE to the command and adjust for offset */
phys_addr =
pci_map_single(adapter->pcidev, skb->data, skb->len,
PCI_DMA_TODEVICE);
@@ -2053,54 +2054,54 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
XmtCmd->Buffer.FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress << 32;
XmtCmd->Buffer.FirstSgeAddress =
XmtCmd->Buffer.FirstSgeAddress | SXG_GET_ADDR_LOW(phys_addr);
-// XmtCmd->Buffer.FirstSgeAddress = SxgSgl->Sgl.Elements[Index].Address;
-// XmtCmd->Buffer.FirstSgeAddress.LowPart += MdlOffset;
+/* XmtCmd->Buffer.FirstSgeAddress = SxgSgl->Sgl.Elements[Index].Address; */
+/* XmtCmd->Buffer.FirstSgeAddress.LowPart += MdlOffset; */
XmtCmd->Buffer.FirstSgeLength = DataLength;
- // Set a pointer to the remaining SGL entries
-// XmtCmd->Sgl = SxgSgl->PhysicalAddress;
- // Advance the physical address of the SxgSgl structure to
- // the second SGE
-// SglOffset = (u32)((u32 *)(&SxgSgl->Sgl.Elements[Index+1]) -
-// (u32 *)SxgSgl);
-// XmtCmd->Sgl.LowPart += SglOffset;
+ /* Set a pointer to the remaining SGL entries */
+/* XmtCmd->Sgl = SxgSgl->PhysicalAddress; */
+ /* Advance the physical address of the SxgSgl structure to */
+ /* the second SGE */
+/* SglOffset = (u32)((u32 *)(&SxgSgl->Sgl.Elements[Index+1]) - */
+/* (u32 *)SxgSgl); */
+/* XmtCmd->Sgl.LowPart += SglOffset; */
XmtCmd->Buffer.SgeOffset = 0;
- // Note - TotalLength might be overwritten with MSS below..
+ /* Note - TotalLength might be overwritten with MSS below.. */
XmtCmd->Buffer.TotalLength = DataLength;
- XmtCmd->SgEntries = 1; //(ushort)(SxgSgl->Sgl.NumberOfElements - Index);
+ XmtCmd->SgEntries = 1; /*(ushort)(SxgSgl->Sgl.NumberOfElements - Index); */
XmtCmd->Flags = 0;
- //
- // Advance transmit cmd descripter by 1.
- // NOTE - See comments in SxgTcpOutput where we write
- // to the XmtCmd register regarding CPU ID values and/or
- // multiple commands.
- //
- //
+ /* */
+ /* Advance transmit cmd descripter by 1. */
+ /* NOTE - See comments in SxgTcpOutput where we write */
+ /* to the XmtCmd register regarding CPU ID values and/or */
+ /* multiple commands. */
+ /* */
+ /* */
WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE);
- //
- //
- adapter->Stats.XmtQLen++; // Stats within lock
+ /* */
+ /* */
+ adapter->Stats.XmtQLen++; /* Stats within lock */
spin_unlock(&adapter->XmtZeroLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
XmtCmd, pSgl, SxgSgl, 0);
return;
abortcmd:
- // NOTE - Only jump to this label AFTER grabbing the
- // XmtZeroLock, and DO NOT DROP IT between the
- // command allocation and the following abort.
+ /* NOTE - Only jump to this label AFTER grabbing the */
+ /* XmtZeroLock, and DO NOT DROP IT between the */
+ /* command allocation and the following abort. */
if (XmtCmd) {
SXG_ABORT_CMD(XmtRingInfo);
}
spin_unlock(&adapter->XmtZeroLock);
-// failsgl:
- // Jump to this label if failure occurs before the
- // XmtZeroLock is grabbed
+/* failsgl: */
+ /* Jump to this label if failure occurs before the */
+ /* XmtZeroLock is grabbed */
adapter->Stats.XmtErrors++;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
- SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); // SxgSgl->DumbPacket is the skb
+ SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); /* SxgSgl->DumbPacket is the skb */
}
/***************************************************************
@@ -2127,122 +2128,122 @@ static int sxg_initialize_link(p_adapter_t adapter)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
adapter, 0, 0, 0);
- // Reset PHY and XGXS module
+ /* Reset PHY and XGXS module */
WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
- // Reset transmit configuration register
+ /* Reset transmit configuration register */
WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
- // Reset receive configuration register
+ /* Reset receive configuration register */
WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
- // Reset all MAC modules
+ /* Reset all MAC modules */
WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
- // Link address 0
- // XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
- // is stored with the first nibble (0a) in the byte 0
- // of the Mac address. Possibly reverse?
+ /* Link address 0 */
+ /* XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f) */
+ /* is stored with the first nibble (0a) in the byte 0 */
+ /* of the Mac address. Possibly reverse? */
Value = *(u32 *) adapter->MacAddr;
WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
- // also write the MAC address to the MAC. Endian is reversed.
+ /* also write the MAC address to the MAC. Endian is reversed. */
WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
Value = (*(u16 *) & adapter->MacAddr[4] & 0x0000FFFF);
WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
- // endian swap for the MAC (put high bytes in bits [31:16], swapped)
+ /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
Value = ntohl(Value);
WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
- // Link address 1
+ /* Link address 1 */
WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
- // Link address 2
+ /* Link address 2 */
WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
- // Link address 3
+ /* Link address 3 */
WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
- // Enable MAC modules
+ /* Enable MAC modules */
WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
- // Configure MAC
- WRITE_REG(HwRegs->MacConfig1, (AXGMAC_CFG1_XMT_PAUSE | // Allow sending of pause
- AXGMAC_CFG1_XMT_EN | // Enable XMT
- AXGMAC_CFG1_RCV_PAUSE | // Enable detection of pause
- AXGMAC_CFG1_RCV_EN | // Enable receive
- AXGMAC_CFG1_SHORT_ASSERT | // short frame detection
- AXGMAC_CFG1_CHECK_LEN | // Verify frame length
- AXGMAC_CFG1_GEN_FCS | // Generate FCS
- AXGMAC_CFG1_PAD_64), // Pad frames to 64 bytes
+ /* Configure MAC */
+ WRITE_REG(HwRegs->MacConfig1, (AXGMAC_CFG1_XMT_PAUSE | /* Allow sending of pause */
+ AXGMAC_CFG1_XMT_EN | /* Enable XMT */
+ AXGMAC_CFG1_RCV_PAUSE | /* Enable detection of pause */
+ AXGMAC_CFG1_RCV_EN | /* Enable receive */
+ AXGMAC_CFG1_SHORT_ASSERT | /* short frame detection */
+ AXGMAC_CFG1_CHECK_LEN | /* Verify frame length */
+ AXGMAC_CFG1_GEN_FCS | /* Generate FCS */
+ AXGMAC_CFG1_PAD_64), /* Pad frames to 64 bytes */
TRUE);
- // Set AXGMAC max frame length if jumbo. Not needed for standard MTU
+ /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
if (adapter->JumboEnabled) {
WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
}
- // AMIIM Configuration Register -
- // The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
- // (bottom bits) of this register is used to determine the
- // MDC frequency as specified in the A-XGMAC Design Document.
- // This value must not be zero. The following value (62 or 0x3E)
- // is based on our MAC transmit clock frequency (MTCLK) of 312.5 MHz.
- // Given a maximum MDIO clock frequency of 2.5 MHz (see the PHY spec),
- // we get: 312.5/(2*(X+1)) < 2.5 ==> X = 62.
- // This value happens to be the default value for this register,
- // so we really don't have to do this.
+ /* AMIIM Configuration Register - */
+ /* The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion */
+ /* (bottom bits) of this register is used to determine the */
+ /* MDC frequency as specified in the A-XGMAC Design Document. */
+ /* This value must not be zero. The following value (62 or 0x3E) */
+ /* is based on our MAC transmit clock frequency (MTCLK) of 312.5 MHz. */
+ /* Given a maximum MDIO clock frequency of 2.5 MHz (see the PHY spec), */
+ /* we get: 312.5/(2*(X+1)) < 2.5 ==> X = 62. */
+ /* This value happens to be the default value for this register, */
+ /* so we really don't have to do this. */
WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
- // Power up and enable PHY and XAUI/XGXS/Serdes logic
+ /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
WRITE_REG(HwRegs->LinkStatus,
(LS_PHY_CLR_RESET |
LS_XGXS_ENABLE |
LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE);
DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
- // Per information given by Aeluros, wait 100 ms after removing reset.
- // It's not enough to wait for the self-clearing reset bit in reg 0 to clear.
+ /* Per information given by Aeluros, wait 100 ms after removing reset. */
+ /* It's not enough to wait for the self-clearing reset bit in reg 0 to clear. */
mdelay(100);
- // Verify the PHY has come up by checking that the Reset bit has cleared.
- status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
- PHY_PMA_CONTROL1, // PMA/PMD control register
+ /* Verify the PHY has come up by checking that the Reset bit has cleared. */
+ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
+ PHY_PMA_CONTROL1, /* PMA/PMD control register */
&Value);
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
- if (Value & PMA_CONTROL1_RESET) // reset complete if bit is 0
+ if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
return (STATUS_FAILURE);
- // The SERDES should be initialized by now - confirm
+ /* The SERDES should be initialized by now - confirm */
READ_REG(HwRegs->LinkStatus, Value);
- if (Value & LS_SERDES_DOWN) // verify SERDES is initialized
+ if (Value & LS_SERDES_DOWN) /* verify SERDES is initialized */
return (STATUS_FAILURE);
- // The XAUI link should also be up - confirm
- if (!(Value & LS_XAUI_LINK_UP)) // verify XAUI link is up
+ /* The XAUI link should also be up - confirm */
+ if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */
return (STATUS_FAILURE);
- // Initialize the PHY
+ /* Initialize the PHY */
status = sxg_phy_init(adapter);
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
- // Enable the Link Alarm
- status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
- LASI_CONTROL, // LASI control register
- LASI_CTL_LS_ALARM_ENABLE); // enable link alarm bit
+ /* Enable the Link Alarm */
+ status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
+ LASI_CONTROL, /* LASI control register */
+ LASI_CTL_LS_ALARM_ENABLE); /* enable link alarm bit */
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
- // XXXTODO - temporary - verify bit is set
- status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
- LASI_CONTROL, // LASI control register
+ /* XXXTODO - temporary - verify bit is set */
+ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
+ LASI_CONTROL, /* LASI control register */
&Value);
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
}
- // Enable receive
+ /* Enable receive */
MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
ConfigData = (RCV_CONFIG_ENABLE |
RCV_CONFIG_ENPARSE |
@@ -2256,7 +2257,7 @@ static int sxg_initialize_link(p_adapter_t adapter)
WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
- // Mark the link as down. We'll get a link event when it comes up.
+ /* Mark the link as down. We'll get a link event when it comes up. */
sxg_link_state(adapter, SXG_LINK_DOWN);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
@@ -2279,35 +2280,35 @@ static int sxg_phy_init(p_adapter_t adapter)
PPHY_UCODE p;
int status;
- DBG_ERROR("ENTER %s\n", __FUNCTION__);
+ DBG_ERROR("ENTER %s\n", __func__);
- // Read a register to identify the PHY type
- status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
- 0xC205, // PHY ID register (?)
- &Value); // XXXTODO - add def
+ /* Read a register to identify the PHY type */
+ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
+ 0xC205, /* PHY ID register (?) */
+ &Value); /* XXXTODO - add def */
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
- if (Value == 0x0012) { // 0x0012 == AEL2005C PHY(?) - XXXTODO - add def
+ if (Value == 0x0012) { /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
DBG_ERROR
("AEL2005C PHY detected. Downloading PHY microcode.\n");
- // Initialize AEL2005C PHY and download PHY microcode
+ /* Initialize AEL2005C PHY and download PHY microcode */
for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
if (p->Addr == 0) {
- // if address == 0, data == sleep time in ms
+ /* if address == 0, data == sleep time in ms */
mdelay(p->Data);
} else {
- // write the given data to the specified address
- status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
- p->Addr, // PHY address
- p->Data); // PHY data
+ /* write the given data to the specified address */
+ status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
+ p->Addr, /* PHY address */
+ p->Data); /* PHY data */
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
}
}
}
- DBG_ERROR("EXIT %s\n", __FUNCTION__);
+ DBG_ERROR("EXIT %s\n", __func__);
return (STATUS_SUCCESS);
}
@@ -2330,42 +2331,42 @@ static void sxg_link_event(p_adapter_t adapter)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
adapter, 0, 0, 0);
- DBG_ERROR("ENTER %s\n", __FUNCTION__);
+ DBG_ERROR("ENTER %s\n", __func__);
- // Check the Link Status register. We should have a Link Alarm.
+ /* Check the Link Status register. We should have a Link Alarm. */
READ_REG(HwRegs->LinkStatus, Value);
if (Value & LS_LINK_ALARM) {
- // We got a Link Status alarm. First, pause to let the
- // link state settle (it can bounce a number of times)
+ /* We got a Link Status alarm. First, pause to let the */
+ /* link state settle (it can bounce a number of times) */
mdelay(10);
- // Now clear the alarm by reading the LASI status register.
- status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
- LASI_STATUS, // LASI status register
+ /* Now clear the alarm by reading the LASI status register. */
+ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
+ LASI_STATUS, /* LASI status register */
&Value);
if (status != STATUS_SUCCESS) {
DBG_ERROR("Error reading LASI Status MDIO register!\n");
sxg_link_state(adapter, SXG_LINK_DOWN);
-// ASSERT(0);
+/* ASSERT(0); */
}
ASSERT(Value & LASI_STATUS_LS_ALARM);
- // Now get and set the link state
+ /* Now get and set the link state */
LinkState = sxg_get_link_state(adapter);
sxg_link_state(adapter, LinkState);
DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
} else {
- // XXXTODO - Assuming Link Attention is only being generated for the
- // Link Alarm pin (and not for a XAUI Link Status change), then it's
- // impossible to get here. Yet we've gotten here twice (under extreme
- // conditions - bouncing the link up and down many times a second).
- // Needs further investigation.
+ /* XXXTODO - Assuming Link Attention is only being generated for the */
+ /* Link Alarm pin (and not for a XAUI Link Status change), then it's */
+ /* impossible to get here. Yet we've gotten here twice (under extreme */
+ /* conditions - bouncing the link up and down many times a second). */
+ /* Needs further investigation. */
DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
-// ASSERT(0);
+/* ASSERT(0); */
}
- DBG_ERROR("EXIT %s\n", __FUNCTION__);
+ DBG_ERROR("EXIT %s\n", __func__);
}
@@ -2383,50 +2384,50 @@ static SXG_LINK_STATE sxg_get_link_state(p_adapter_t adapter)
int status;
u32 Value;
- DBG_ERROR("ENTER %s\n", __FUNCTION__);
+ DBG_ERROR("ENTER %s\n", __func__);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
adapter, 0, 0, 0);
- // Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
- // the following 3 bits (from 3 different MDIO registers) are all true.
- status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
- PHY_PMA_RCV_DET, // PMA/PMD Receive Signal Detect register
+ /* Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if */
+ /* the following 3 bits (from 3 different MDIO registers) are all true. */
+ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
+ PHY_PMA_RCV_DET, /* PMA/PMD Receive Signal Detect register */
&Value);
if (status != STATUS_SUCCESS)
goto bad;
- // If PMA/PMD receive signal detect is 0, then the link is down
+ /* If PMA/PMD receive signal detect is 0, then the link is down */
if (!(Value & PMA_RCV_DETECT))
return (SXG_LINK_DOWN);
- status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS, // PHY PCS module
- PHY_PCS_10G_STATUS1, // PCS 10GBASE-R Status 1 register
+ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS, /* PHY PCS module */
+ PHY_PCS_10G_STATUS1, /* PCS 10GBASE-R Status 1 register */
&Value);
if (status != STATUS_SUCCESS)
goto bad;
- // If PCS is not locked to receive blocks, then the link is down
+ /* If PCS is not locked to receive blocks, then the link is down */
if (!(Value & PCS_10B_BLOCK_LOCK))
return (SXG_LINK_DOWN);
- status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS, // PHY XS module
- PHY_XS_LANE_STATUS, // XS Lane Status register
+ status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS, /* PHY XS module */
+ PHY_XS_LANE_STATUS, /* XS Lane Status register */
&Value);
if (status != STATUS_SUCCESS)
goto bad;
- // If XS transmit lanes are not aligned, then the link is down
+ /* If XS transmit lanes are not aligned, then the link is down */
if (!(Value & XS_LANE_ALIGN))
return (SXG_LINK_DOWN);
- // All 3 bits are true, so the link is up
- DBG_ERROR("EXIT %s\n", __FUNCTION__);
+ /* All 3 bits are true, so the link is up */
+ DBG_ERROR("EXIT %s\n", __func__);
return (SXG_LINK_UP);
bad:
- // An error occurred reading an MDIO register. This shouldn't happen.
+ /* An error occurred reading an MDIO register. This shouldn't happen. */
DBG_ERROR("Error reading an MDIO register!\n");
ASSERT(0);
return (SXG_LINK_DOWN);
@@ -2437,11 +2438,11 @@ static void sxg_indicate_link_state(p_adapter_t adapter,
{
if (adapter->LinkState == SXG_LINK_UP) {
DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
- __FUNCTION__);
+ __func__);
netif_start_queue(adapter->netdev);
} else {
DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
- __FUNCTION__);
+ __func__);
netif_stop_queue(adapter->netdev);
}
}
@@ -2464,23 +2465,23 @@ static void sxg_link_state(p_adapter_t adapter, SXG_LINK_STATE LinkState)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
adapter, LinkState, adapter->LinkState, adapter->State);
- DBG_ERROR("ENTER %s\n", __FUNCTION__);
+ DBG_ERROR("ENTER %s\n", __func__);
- // Hold the adapter lock during this routine. Maybe move
- // the lock to the caller.
+ /* Hold the adapter lock during this routine. Maybe move */
+ /* the lock to the caller. */
spin_lock(&adapter->AdapterLock);
if (LinkState == adapter->LinkState) {
- // Nothing changed..
+ /* Nothing changed.. */
spin_unlock(&adapter->AdapterLock);
- DBG_ERROR("EXIT #0 %s\n", __FUNCTION__);
+ DBG_ERROR("EXIT #0 %s\n", __func__);
return;
}
- // Save the adapter state
+ /* Save the adapter state */
adapter->LinkState = LinkState;
- // Drop the lock and indicate link state
+ /* Drop the lock and indicate link state */
spin_unlock(&adapter->AdapterLock);
- DBG_ERROR("EXIT #1 %s\n", __FUNCTION__);
+ DBG_ERROR("EXIT #1 %s\n", __func__);
sxg_indicate_link_state(adapter, LinkState);
}
@@ -2501,76 +2502,76 @@ static int sxg_write_mdio_reg(p_adapter_t adapter,
u32 DevAddr, u32 RegAddr, u32 Value)
{
PSXG_HW_REGS HwRegs = adapter->HwRegs;
- u32 AddrOp; // Address operation (written to MIIM field reg)
- u32 WriteOp; // Write operation (written to MIIM field reg)
- u32 Cmd; // Command (written to MIIM command reg)
+ u32 AddrOp; /* Address operation (written to MIIM field reg) */
+ u32 WriteOp; /* Write operation (written to MIIM field reg) */
+ u32 Cmd; /* Command (written to MIIM command reg) */
u32 ValueRead;
u32 Timeout;
-// DBG_ERROR("ENTER %s\n", __FUNCTION__);
+/* DBG_ERROR("ENTER %s\n", __func__); */
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
adapter, 0, 0, 0);
- // Ensure values don't exceed field width
- DevAddr &= 0x001F; // 5-bit field
- RegAddr &= 0xFFFF; // 16-bit field
- Value &= 0xFFFF; // 16-bit field
+ /* Ensure values don't exceed field width */
+ DevAddr &= 0x001F; /* 5-bit field */
+ RegAddr &= 0xFFFF; /* 16-bit field */
+ Value &= 0xFFFF; /* 16-bit field */
- // Set MIIM field register bits for an MIIM address operation
+ /* Set MIIM field register bits for an MIIM address operation */
AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
(DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
(MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
(MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
- // Set MIIM field register bits for an MIIM write operation
+ /* Set MIIM field register bits for an MIIM write operation */
WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
(DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
(MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
(MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
- // Set MIIM command register bits to execute an MIIM command
+ /* Set MIIM command register bits to execute an MIIM command */
Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
- // Reset the command register command bit (in case it's not 0)
+ /* Reset the command register command bit (in case it's not 0) */
WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
- // MIIM write to set the address of the specified MDIO register
+ /* MIIM write to set the address of the specified MDIO register */
WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
- // Write to MIIM Command Register to execute to address operation
+ /* Write to MIIM Command Register to execute to address operation */
WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
- // Poll AMIIM Indicator register to wait for completion
+ /* Poll AMIIM Indicator register to wait for completion */
Timeout = SXG_LINK_TIMEOUT;
do {
- udelay(100); // Timeout in 100us units
+ udelay(100); /* Timeout in 100us units */
READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
if (--Timeout == 0) {
return (STATUS_FAILURE);
}
} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
- // Reset the command register command bit
+ /* Reset the command register command bit */
WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
- // MIIM write to set up an MDIO write operation
+ /* MIIM write to set up an MDIO write operation */
WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
- // Write to MIIM Command Register to execute the write operation
+ /* Write to MIIM Command Register to execute the write operation */
WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
- // Poll AMIIM Indicator register to wait for completion
+ /* Poll AMIIM Indicator register to wait for completion */
Timeout = SXG_LINK_TIMEOUT;
do {
- udelay(100); // Timeout in 100us units
+ udelay(100); /* Timeout in 100us units */
READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
if (--Timeout == 0) {
return (STATUS_FAILURE);
}
} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
-// DBG_ERROR("EXIT %s\n", __FUNCTION__);
+/* DBG_ERROR("EXIT %s\n", __func__); */
return (STATUS_SUCCESS);
}
@@ -2591,110 +2592,78 @@ static int sxg_read_mdio_reg(p_adapter_t adapter,
u32 DevAddr, u32 RegAddr, u32 *pValue)
{
PSXG_HW_REGS HwRegs = adapter->HwRegs;
- u32 AddrOp; // Address operation (written to MIIM field reg)
- u32 ReadOp; // Read operation (written to MIIM field reg)
- u32 Cmd; // Command (written to MIIM command reg)
+ u32 AddrOp; /* Address operation (written to MIIM field reg) */
+ u32 ReadOp; /* Read operation (written to MIIM field reg) */
+ u32 Cmd; /* Command (written to MIIM command reg) */
u32 ValueRead;
u32 Timeout;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
adapter, 0, 0, 0);
-// DBG_ERROR("ENTER %s\n", __FUNCTION__);
+/* DBG_ERROR("ENTER %s\n", __func__); */
- // Ensure values don't exceed field width
- DevAddr &= 0x001F; // 5-bit field
- RegAddr &= 0xFFFF; // 16-bit field
+ /* Ensure values don't exceed field width */
+ DevAddr &= 0x001F; /* 5-bit field */
+ RegAddr &= 0xFFFF; /* 16-bit field */
- // Set MIIM field register bits for an MIIM address operation
+ /* Set MIIM field register bits for an MIIM address operation */
AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
(DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
(MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
(MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
- // Set MIIM field register bits for an MIIM read operation
+ /* Set MIIM field register bits for an MIIM read operation */
ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
(DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
(MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
(MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
- // Set MIIM command register bits to execute an MIIM command
+ /* Set MIIM command register bits to execute an MIIM command */
Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
- // Reset the command register command bit (in case it's not 0)
+ /* Reset the command register command bit (in case it's not 0) */
WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
- // MIIM write to set the address of the specified MDIO register
+ /* MIIM write to set the address of the specified MDIO register */
WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
- // Write to MIIM Command Register to execute to address operation
+ /* Write to MIIM Command Register to execute to address operation */
WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
- // Poll AMIIM Indicator register to wait for completion
+ /* Poll AMIIM Indicator register to wait for completion */
Timeout = SXG_LINK_TIMEOUT;
do {
- udelay(100); // Timeout in 100us units
+ udelay(100); /* Timeout in 100us units */
READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
if (--Timeout == 0) {
return (STATUS_FAILURE);
}
} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
- // Reset the command register command bit
+ /* Reset the command register command bit */
WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
- // MIIM write to set up an MDIO register read operation
+ /* MIIM write to set up an MDIO register read operation */
WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
- // Write to MIIM Command Register to execute the read operation
+ /* Write to MIIM Command Register to execute the read operation */
WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
- // Poll AMIIM Indicator register to wait for completion
+ /* Poll AMIIM Indicator register to wait for completion */
Timeout = SXG_LINK_TIMEOUT;
do {
- udelay(100); // Timeout in 100us units
+ udelay(100); /* Timeout in 100us units */
READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
if (--Timeout == 0) {
return (STATUS_FAILURE);
}
} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
- // Read the MDIO register data back from the field register
+ /* Read the MDIO register data back from the field register */
READ_REG(HwRegs->MacAmiimField, *pValue);
- *pValue &= 0xFFFF; // data is in the lower 16 bits
+ *pValue &= 0xFFFF; /* data is in the lower 16 bits */
-// DBG_ERROR("EXIT %s\n", __FUNCTION__);
-
- return (STATUS_SUCCESS);
-}
-
-/*
- * Allocate a mcast_address structure to hold the multicast address.
- * Link it in.
- */
-static int sxg_mcast_add_list(p_adapter_t adapter, char *address)
-{
- p_mcast_address_t mcaddr, mlist;
- bool equaladdr;
-
- /* Check to see if it already exists */
- mlist = adapter->mcastaddrs;
- while (mlist) {
- ETHER_EQ_ADDR(mlist->address, address, equaladdr);
- if (equaladdr) {
- return (STATUS_SUCCESS);
- }
- mlist = mlist->next;
- }
-
- /* Doesn't already exist. Allocate a structure to hold it */
- mcaddr = kmalloc(sizeof(mcast_address_t), GFP_ATOMIC);
- if (mcaddr == NULL)
- return 1;
-
- memcpy(mcaddr->address, address, 6);
-
- mcaddr->next = adapter->mcastaddrs;
- adapter->mcastaddrs = mcaddr;
+/* DBG_ERROR("EXIT %s\n", __func__); */
return (STATUS_SUCCESS);
}
@@ -2710,7 +2679,6 @@ static int sxg_mcast_add_list(p_adapter_t adapter, char *address)
*
*/
static u32 sxg_crc_table[256]; /* Table of CRC's for all possible byte values */
-static u32 sxg_crc_init; /* Is table initialized */
/*
* Contruct the CRC32 table
@@ -2737,6 +2705,8 @@ static void sxg_mcast_init_crc32(void)
}
}
+#if XXXTODO
+static u32 sxg_crc_init; /* Is table initialized */
/*
* Return the MAC hast as described above.
*/
@@ -2765,6 +2735,74 @@ static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
return (machash);
}
+static void sxg_mcast_set_mask(p_adapter_t adapter)
+{
+ PSXG_UCODE_REGS sxg_regs = adapter->UcodeRegs;
+
+ DBG_ERROR("%s ENTER (%s) macopts[%x] mask[%llx]\n", __func__,
+ adapter->netdev->name, (unsigned int)adapter->MacFilter,
+ adapter->MulticastMask);
+
+ if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
+ /* Turn on all multicast addresses. We have to do this for promiscuous
+ * mode as well as ALLMCAST mode. It saves the Microcode from having
+ * to keep state about the MAC configuration.
+ */
+/* DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n SLUT MODE!!!\n",__func__); */
+ WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
+ WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
+/* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__func__, adapter->netdev->name); */
+
+ } else {
+ /* Commit our multicast mast to the SLIC by writing to the multicast
+ * address mask registers
+ */
+ DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
+ __func__, adapter->netdev->name,
+ ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
+ ((ulong)
+ ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
+
+ WRITE_REG(sxg_regs->McastLow,
+ (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
+ WRITE_REG(sxg_regs->McastHigh,
+ (u32) ((adapter->
+ MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
+ }
+}
+
+/*
+ * Allocate a mcast_address structure to hold the multicast address.
+ * Link it in.
+ */
+static int sxg_mcast_add_list(p_adapter_t adapter, char *address)
+{
+ p_mcast_address_t mcaddr, mlist;
+ bool equaladdr;
+
+ /* Check to see if it already exists */
+ mlist = adapter->mcastaddrs;
+ while (mlist) {
+ ETHER_EQ_ADDR(mlist->address, address, equaladdr);
+ if (equaladdr) {
+ return (STATUS_SUCCESS);
+ }
+ mlist = mlist->next;
+ }
+
+ /* Doesn't already exist. Allocate a structure to hold it */
+ mcaddr = kmalloc(sizeof(mcast_address_t), GFP_ATOMIC);
+ if (mcaddr == NULL)
+ return 1;
+
+ memcpy(mcaddr->address, address, 6);
+
+ mcaddr->next = adapter->mcastaddrs;
+ adapter->mcastaddrs = mcaddr;
+
+ return (STATUS_SUCCESS);
+}
+
static void sxg_mcast_set_bit(p_adapter_t adapter, char *address)
{
unsigned char crcpoly;
@@ -2783,7 +2821,6 @@ static void sxg_mcast_set_bit(p_adapter_t adapter, char *address)
static void sxg_mcast_set_list(p_net_device dev)
{
-#if XXXTODO
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
int status = STATUS_SUCCESS;
int i;
@@ -2809,7 +2846,7 @@ static void sxg_mcast_set_list(p_net_device dev)
}
DBG_ERROR("%s a->devflags_prev[%x] dev->flags[%x] status[%x]\n",
- __FUNCTION__, adapter->devflags_prev, dev->flags, status);
+ __func__, adapter->devflags_prev, dev->flags, status);
if (adapter->devflags_prev != dev->flags) {
adapter->macopts = MAC_DIRECTED;
if (dev->flags) {
@@ -2828,60 +2865,24 @@ static void sxg_mcast_set_list(p_net_device dev)
}
adapter->devflags_prev = dev->flags;
DBG_ERROR("%s call sxg_config_set adapter->macopts[%x]\n",
- __FUNCTION__, adapter->macopts);
+ __func__, adapter->macopts);
sxg_config_set(adapter, TRUE);
} else {
if (status == STATUS_SUCCESS) {
sxg_mcast_set_mask(adapter);
}
}
-#endif
return;
}
-
-static void sxg_mcast_set_mask(p_adapter_t adapter)
-{
- PSXG_UCODE_REGS sxg_regs = adapter->UcodeRegs;
-
- DBG_ERROR("%s ENTER (%s) macopts[%x] mask[%llx]\n", __FUNCTION__,
- adapter->netdev->name, (unsigned int)adapter->MacFilter,
- adapter->MulticastMask);
-
- if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
- /* Turn on all multicast addresses. We have to do this for promiscuous
- * mode as well as ALLMCAST mode. It saves the Microcode from having
- * to keep state about the MAC configuration.
- */
-// DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n SLUT MODE!!!\n",__FUNCTION__);
- WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
- WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
-// DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__FUNCTION__, adapter->netdev->name);
-
- } else {
- /* Commit our multicast mast to the SLIC by writing to the multicast
- * address mask registers
- */
- DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
- __FUNCTION__, adapter->netdev->name,
- ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
- ((ulong)
- ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
-
- WRITE_REG(sxg_regs->McastLow,
- (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
- WRITE_REG(sxg_regs->McastHigh,
- (u32) ((adapter->
- MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
- }
-}
+#endif
static void sxg_unmap_mmio_space(p_adapter_t adapter)
{
#if LINUX_FREES_ADAPTER_RESOURCES
-// if (adapter->Regs) {
-// iounmap(adapter->Regs);
-// }
-// adapter->slic_regs = NULL;
+/* if (adapter->Regs) { */
+/* iounmap(adapter->Regs); */
+/* } */
+/* adapter->slic_regs = NULL; */
#endif
}
@@ -2909,8 +2910,8 @@ void SxgFreeResources(p_adapter_t adapter)
IsrCount = adapter->MsiEnabled ? RssIds : 1;
if (adapter->BasicAllocations == FALSE) {
- // No allocations have been made, including spinlocks,
- // or listhead initializations. Return.
+ /* No allocations have been made, including spinlocks, */
+ /* or listhead initializations. Return. */
return;
}
@@ -2920,7 +2921,7 @@ void SxgFreeResources(p_adapter_t adapter)
if (!(IsListEmpty(&adapter->AllSglBuffers))) {
SxgFreeSglBuffers(adapter);
}
- // Free event queues.
+ /* Free event queues. */
if (adapter->EventRings) {
pci_free_consistent(adapter->pcidev,
sizeof(SXG_EVENT_RING) * RssIds,
@@ -2947,17 +2948,17 @@ void SxgFreeResources(p_adapter_t adapter)
SXG_FREE_PACKET_POOL(adapter->PacketPoolHandle);
SXG_FREE_BUFFER_POOL(adapter->BufferPoolHandle);
- // Unmap register spaces
+ /* Unmap register spaces */
SxgUnmapResources(adapter);
- // Deregister DMA
+ /* Deregister DMA */
if (adapter->DmaHandle) {
SXG_DEREGISTER_DMA(adapter->DmaHandle);
}
- // Deregister interrupt
+ /* Deregister interrupt */
SxgDeregisterInterrupt(adapter);
- // Possibly free system info (5.2 only)
+ /* Possibly free system info (5.2 only) */
SXG_RELEASE_SYSTEM_INFO(adapter);
SxgDiagFreeResources(adapter);
@@ -3047,23 +3048,23 @@ static int sxg_allocate_buffer_memory(p_adapter_t adapter,
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
adapter, Size, BufferType, 0);
- // Grab the adapter lock and check the state.
- // If we're in anything other than INITIALIZING or
- // RUNNING state, fail. This is to prevent
- // allocations in an improper driver state
+ /* Grab the adapter lock and check the state. */
+ /* If we're in anything other than INITIALIZING or */
+ /* RUNNING state, fail. This is to prevent */
+ /* allocations in an improper driver state */
spin_lock(&adapter->AdapterLock);
- // Increment the AllocationsPending count while holding
- // the lock. Pause processing relies on this
+ /* Increment the AllocationsPending count while holding */
+ /* the lock. Pause processing relies on this */
++adapter->AllocationsPending;
spin_unlock(&adapter->AdapterLock);
- // At initialization time allocate resources synchronously.
+ /* At initialization time allocate resources synchronously. */
Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
if (Buffer == NULL) {
spin_lock(&adapter->AdapterLock);
- // Decrement the AllocationsPending count while holding
- // the lock. Pause processing relies on this
+ /* Decrement the AllocationsPending count while holding */
+ /* the lock. Pause processing relies on this */
--adapter->AllocationsPending;
spin_unlock(&adapter->AdapterLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
@@ -3113,10 +3114,10 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
(BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
ASSERT(Length == SXG_RCV_BLOCK_SIZE(BufferSize));
- // First, initialize the contained pool of receive data
- // buffers. This initialization requires NBL/NB/MDL allocations,
- // If any of them fail, free the block and return without
- // queueing the shared memory
+ /* First, initialize the contained pool of receive data */
+ /* buffers. This initialization requires NBL/NB/MDL allocations, */
+ /* If any of them fail, free the block and return without */
+ /* queueing the shared memory */
RcvDataBuffer = RcvBlock;
#if 0
for (i = 0, Paddr = *PhysicalAddress;
@@ -3126,14 +3127,14 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
for (i = 0, Paddr = PhysicalAddress;
i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) {
- //
+ /* */
RcvDataBufferHdr =
(PSXG_RCV_DATA_BUFFER_HDR) (RcvDataBuffer +
SXG_RCV_DATA_BUFFER_HDR_OFFSET
(BufferSize));
RcvDataBufferHdr->VirtualAddress = RcvDataBuffer;
RcvDataBufferHdr->PhysicalAddress = Paddr;
- RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; // For FREE macro assertion
+ RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; /* For FREE macro assertion */
RcvDataBufferHdr->Size =
SXG_RCV_BUFFER_DATA_SIZE(BufferSize);
@@ -3143,8 +3144,8 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
}
- // Place this entire block of memory on the AllRcvBlocks queue so it can be
- // free later
+ /* Place this entire block of memory on the AllRcvBlocks queue so it can be */
+ /* free later */
RcvBlockHdr =
(PSXG_RCV_BLOCK_HDR) ((unsigned char *)RcvBlock +
SXG_RCV_BLOCK_HDR_OFFSET(BufferSize));
@@ -3155,7 +3156,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
spin_unlock(&adapter->RcvQLock);
- // Now free the contained receive data buffers that we initialized above
+ /* Now free the contained receive data buffers that we initialized above */
RcvDataBuffer = RcvBlock;
for (i = 0, Paddr = PhysicalAddress;
i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
@@ -3168,7 +3169,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
spin_unlock(&adapter->RcvQLock);
}
- // Locate the descriptor block and put it on a separate free queue
+ /* Locate the descriptor block and put it on a separate free queue */
RcvDescriptorBlock =
(PSXG_RCV_DESCRIPTOR_BLOCK) ((unsigned char *)RcvBlock +
SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
@@ -3186,7 +3187,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
adapter, RcvBlock, Length, 0);
return;
fail:
- // Free any allocated resources
+ /* Free any allocated resources */
if (RcvBlock) {
RcvDataBuffer = RcvBlock;
for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
@@ -3200,7 +3201,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
pci_free_consistent(adapter->pcidev,
Length, RcvBlock, PhysicalAddress);
}
- DBG_ERROR("%s: OUT OF RESOURCES\n", __FUNCTION__);
+ DBG_ERROR("%s: OUT OF RESOURCES\n", __func__);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
adapter, adapter->FreeRcvBufferCount,
adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
@@ -3230,7 +3231,7 @@ static void sxg_allocate_sgl_buffer_complete(p_adapter_t adapter,
adapter->AllSglBufferCount++;
memset(SxgSgl, 0, sizeof(SXG_SCATTER_GATHER));
SxgSgl->PhysicalAddress = PhysicalAddress; /* *PhysicalAddress; */
- SxgSgl->adapter = adapter; // Initialize backpointer once
+ SxgSgl->adapter = adapter; /* Initialize backpointer once */
InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
spin_unlock(&adapter->SglQLock);
SxgSgl->State = SXG_BUFFER_BUSY;
@@ -3244,14 +3245,14 @@ static unsigned char temp_mac_address[6] =
static void sxg_adapter_set_hwaddr(p_adapter_t adapter)
{
-// DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __FUNCTION__,
-// card->config_set, adapter->port, adapter->physport, adapter->functionnumber);
-//
-// sxg_dbg_macaddrs(adapter);
+/* DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __func__, */
+/* card->config_set, adapter->port, adapter->physport, adapter->functionnumber); */
+/* */
+/* sxg_dbg_macaddrs(adapter); */
memcpy(adapter->macaddr, temp_mac_address, sizeof(SXG_CONFIG_MAC));
-// DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __FUNCTION__);
-// sxg_dbg_macaddrs(adapter);
+/* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __func__); */
+/* sxg_dbg_macaddrs(adapter); */
if (!(adapter->currmacaddr[0] ||
adapter->currmacaddr[1] ||
adapter->currmacaddr[2] ||
@@ -3262,18 +3263,18 @@ static void sxg_adapter_set_hwaddr(p_adapter_t adapter)
if (adapter->netdev) {
memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
}
-// DBG_ERROR ("%s EXIT port %d\n", __FUNCTION__, adapter->port);
+/* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
sxg_dbg_macaddrs(adapter);
}
+#if XXXTODO
static int sxg_mac_set_address(p_net_device dev, void *ptr)
{
-#if XXXTODO
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
struct sockaddr *addr = ptr;
- DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
+ DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name);
if (netif_running(dev)) {
return -EBUSY;
@@ -3282,22 +3283,22 @@ static int sxg_mac_set_address(p_net_device dev, void *ptr)
return -EBUSY;
}
DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- __FUNCTION__, adapter->netdev->name, adapter->currmacaddr[0],
+ __func__, adapter->netdev->name, adapter->currmacaddr[0],
adapter->currmacaddr[1], adapter->currmacaddr[2],
adapter->currmacaddr[3], adapter->currmacaddr[4],
adapter->currmacaddr[5]);
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- __FUNCTION__, adapter->netdev->name, adapter->currmacaddr[0],
+ __func__, adapter->netdev->name, adapter->currmacaddr[0],
adapter->currmacaddr[1], adapter->currmacaddr[2],
adapter->currmacaddr[3], adapter->currmacaddr[4],
adapter->currmacaddr[5]);
sxg_config_set(adapter, TRUE);
-#endif
return 0;
}
+#endif
/*****************************************************************************/
/************* SXG DRIVER FUNCTIONS (below) ********************************/
@@ -3321,77 +3322,77 @@ static int sxg_initialize_adapter(p_adapter_t adapter)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
adapter, 0, 0, 0);
- RssIds = 1; // XXXTODO SXG_RSS_CPU_COUNT(adapter);
+ RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
IsrCount = adapter->MsiEnabled ? RssIds : 1;
- // Sanity check SXG_UCODE_REGS structure definition to
- // make sure the length is correct
+ /* Sanity check SXG_UCODE_REGS structure definition to */
+ /* make sure the length is correct */
ASSERT(sizeof(SXG_UCODE_REGS) == SXG_REGISTER_SIZE_PER_CPU);
- // Disable interrupts
+ /* Disable interrupts */
SXG_DISABLE_ALL_INTERRUPTS(adapter);
- // Set MTU
+ /* Set MTU */
ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
(adapter->FrameSize == JUMBOMAXFRAME));
WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
- // Set event ring base address and size
+ /* Set event ring base address and size */
WRITE_REG64(adapter,
adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
- // Per-ISR initialization
+ /* Per-ISR initialization */
for (i = 0; i < IsrCount; i++) {
u64 Addr;
- // Set interrupt status pointer
+ /* Set interrupt status pointer */
Addr = adapter->PIsr + (i * sizeof(u32));
WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
}
- // XMT ring zero index
+ /* XMT ring zero index */
WRITE_REG64(adapter,
adapter->UcodeRegs[0].SPSendIndex,
adapter->PXmtRingZeroIndex, 0);
- // Per-RSS initialization
+ /* Per-RSS initialization */
for (i = 0; i < RssIds; i++) {
- // Release all event ring entries to the Microcode
+ /* Release all event ring entries to the Microcode */
WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
TRUE);
}
- // Transmit ring base and size
+ /* Transmit ring base and size */
WRITE_REG64(adapter,
adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
- // Receive ring base and size
+ /* Receive ring base and size */
WRITE_REG64(adapter,
adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
WRITE_REG(adapter->UcodeRegs[0].RcvSize, SXG_RCV_RING_SIZE, TRUE);
- // Populate the card with receive buffers
+ /* Populate the card with receive buffers */
sxg_stock_rcv_buffers(adapter);
- // Initialize checksum offload capabilities. At the moment
- // we always enable IP and TCP receive checksums on the card.
- // Depending on the checksum configuration specified by the
- // user, we can choose to report or ignore the checksum
- // information provided by the card.
+ /* Initialize checksum offload capabilities. At the moment */
+ /* we always enable IP and TCP receive checksums on the card. */
+ /* Depending on the checksum configuration specified by the */
+ /* user, we can choose to report or ignore the checksum */
+ /* information provided by the card. */
WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
- // Initialize the MAC, XAUI
- DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __FUNCTION__);
+ /* Initialize the MAC, XAUI */
+ DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
status = sxg_initialize_link(adapter);
- DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __FUNCTION__,
+ DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
status);
if (status != STATUS_SUCCESS) {
return (status);
}
- // Initialize Dead to FALSE.
- // SlicCheckForHang or SlicDumpThread will take it from here.
+ /* Initialize Dead to FALSE. */
+ /* SlicCheckForHang or SlicDumpThread will take it from here. */
adapter->Dead = FALSE;
adapter->PingOutstanding = FALSE;
@@ -3428,14 +3429,14 @@ static int sxg_fill_descriptor_block(p_adapter_t adapter,
ASSERT(RcvDescriptorBlockHdr);
- // If we don't have the resources to fill the descriptor block,
- // return failure
+ /* If we don't have the resources to fill the descriptor block, */
+ /* return failure */
if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
SXG_RING_FULL(RcvRingInfo)) {
adapter->Stats.NoMem++;
return (STATUS_FAILURE);
}
- // Get a ring descriptor command
+ /* Get a ring descriptor command */
SXG_GET_CMD(RingZero,
RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
ASSERT(RingDescriptorCmd);
@@ -3443,7 +3444,7 @@ static int sxg_fill_descriptor_block(p_adapter_t adapter,
RcvDescriptorBlock =
(PSXG_RCV_DESCRIPTOR_BLOCK) RcvDescriptorBlockHdr->VirtualAddress;
- // Fill in the descriptor block
+ /* Fill in the descriptor block */
for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
ASSERT(RcvDataBufferHdr);
@@ -3454,13 +3455,13 @@ static int sxg_fill_descriptor_block(p_adapter_t adapter,
RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
RcvDataBufferHdr->PhysicalAddress;
}
- // Add the descriptor block to receive descriptor ring 0
+ /* Add the descriptor block to receive descriptor ring 0 */
RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
- // RcvBuffersOnCard is not protected via the receive lock (see
- // sxg_process_event_queue) We don't want to grap a lock every time a
- // buffer is returned to us, so we use atomic interlocked functions
- // instead.
+ /* RcvBuffersOnCard is not protected via the receive lock (see */
+ /* sxg_process_event_queue) We don't want to grap a lock every time a */
+ /* buffer is returned to us, so we use atomic interlocked functions */
+ /* instead. */
adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
@@ -3490,10 +3491,10 @@ static void sxg_stock_rcv_buffers(p_adapter_t adapter)
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
adapter, adapter->RcvBuffersOnCard,
adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
- // First, see if we've got less than our minimum threshold of
- // receive buffers, there isn't an allocation in progress, and
- // we haven't exceeded our maximum.. get another block of buffers
- // None of this needs to be SMP safe. It's round numbers.
+ /* First, see if we've got less than our minimum threshold of */
+ /* receive buffers, there isn't an allocation in progress, and */
+ /* we haven't exceeded our maximum.. get another block of buffers */
+ /* None of this needs to be SMP safe. It's round numbers. */
if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) &&
(adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
(adapter->AllocationsPending == 0)) {
@@ -3502,12 +3503,12 @@ static void sxg_stock_rcv_buffers(p_adapter_t adapter)
ReceiveBufferSize),
SXG_BUFFER_TYPE_RCV);
}
- // Now grab the RcvQLock lock and proceed
+ /* Now grab the RcvQLock lock and proceed */
spin_lock(&adapter->RcvQLock);
while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
PLIST_ENTRY _ple;
- // Get a descriptor block
+ /* Get a descriptor block */
RcvDescriptorBlockHdr = NULL;
if (adapter->FreeRcvBlockCount) {
_ple = RemoveHeadList(&adapter->FreeRcvBlocks);
@@ -3519,14 +3520,14 @@ static void sxg_stock_rcv_buffers(p_adapter_t adapter)
}
if (RcvDescriptorBlockHdr == NULL) {
- // Bail out..
+ /* Bail out.. */
adapter->Stats.NoMem++;
break;
}
- // Fill in the descriptor block and give it to the card
+ /* Fill in the descriptor block and give it to the card */
if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
STATUS_FAILURE) {
- // Free the descriptor block
+ /* Free the descriptor block */
SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
RcvDescriptorBlockHdr);
break;
@@ -3560,15 +3561,15 @@ static void sxg_complete_descriptor_blocks(p_adapter_t adapter,
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
- // Now grab the RcvQLock lock and proceed
+ /* Now grab the RcvQLock lock and proceed */
spin_lock(&adapter->RcvQLock);
ASSERT(Index != RcvRingInfo->Tail);
while (RcvRingInfo->Tail != Index) {
- //
- // Locate the current Cmd (ring descriptor entry), and
- // associated receive descriptor block, and advance
- // the tail
- //
+ /* */
+ /* Locate the current Cmd (ring descriptor entry), and */
+ /* associated receive descriptor block, and advance */
+ /* the tail */
+ /* */
SXG_RETURN_CMD(RingZero,
RcvRingInfo,
RingDescriptorCmd, RcvDescriptorBlockHdr);
@@ -3576,12 +3577,12 @@ static void sxg_complete_descriptor_blocks(p_adapter_t adapter,
RcvRingInfo->Head, RcvRingInfo->Tail,
RingDescriptorCmd, RcvDescriptorBlockHdr);
- // Clear the SGL field
+ /* Clear the SGL field */
RingDescriptorCmd->Sgl = 0;
- // Attempt to refill it and hand it right back to the
- // card. If we fail to refill it, free the descriptor block
- // header. The card will be restocked later via the
- // RcvBuffersOnCard test
+ /* Attempt to refill it and hand it right back to the */
+ /* card. If we fail to refill it, free the descriptor block */
+ /* header. The card will be restocked later via the */
+ /* RcvBuffersOnCard test */
if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
STATUS_FAILURE) {
SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
diff --git a/drivers/staging/sxg/sxg_os.h b/drivers/staging/sxg/sxg_os.h
index 26fb0ffafa5..01182689aab 100644
--- a/drivers/staging/sxg/sxg_os.h
+++ b/drivers/staging/sxg/sxg_os.h
@@ -44,7 +44,6 @@
#define FALSE (0)
#define TRUE (1)
-
typedef struct _LIST_ENTRY {
struct _LIST_ENTRY *nle_flink;
struct _LIST_ENTRY *nle_blink;
@@ -69,35 +68,32 @@ typedef struct _LIST_ENTRY {
/* These two have to be inlined since they return things. */
-static __inline PLIST_ENTRY
-RemoveHeadList(list_entry *l)
+static __inline PLIST_ENTRY RemoveHeadList(list_entry * l)
{
- list_entry *f;
- list_entry *e;
+ list_entry *f;
+ list_entry *e;
- e = l->nle_flink;
- f = e->nle_flink;
- l->nle_flink = f;
- f->nle_blink = l;
+ e = l->nle_flink;
+ f = e->nle_flink;
+ l->nle_flink = f;
+ f->nle_blink = l;
- return (e);
+ return (e);
}
-static __inline PLIST_ENTRY
-RemoveTailList(list_entry *l)
+static __inline PLIST_ENTRY RemoveTailList(list_entry * l)
{
- list_entry *b;
- list_entry *e;
+ list_entry *b;
+ list_entry *e;
- e = l->nle_blink;
- b = e->nle_blink;
- l->nle_blink = b;
- b->nle_flink = l;
+ e = l->nle_blink;
+ b = e->nle_blink;
+ l->nle_blink = b;
+ b->nle_flink = l;
- return (e);
+ return (e);
}
-
#define InsertTailList(l, e) \
do { \
list_entry *b; \
@@ -120,7 +116,6 @@ RemoveTailList(list_entry *l)
(l)->nle_flink = (e); \
} while (0)
-
#define ATK_DEBUG 1
#if ATK_DEBUG
@@ -133,7 +128,6 @@ RemoveTailList(list_entry *l)
#define SLIC_TIMESTAMP(value)
#endif
-
/****************** SXG DEFINES *****************************************/
#ifdef ATKDBG
@@ -150,5 +144,4 @@ RemoveTailList(list_entry *l)
#define WRITE_REG64(a,reg,value,cpu) sxg_reg64_write((a),(&reg),(value),(cpu))
#define READ_REG(reg,value) (value) = readl((void __iomem *)(&reg))
-#endif /* _SLIC_OS_SPECIFIC_H_ */
-
+#endif /* _SLIC_OS_SPECIFIC_H_ */
diff --git a/drivers/staging/sxg/sxgdbg.h b/drivers/staging/sxg/sxgdbg.h
index cfb6c7c77a9..4522b8d7149 100644
--- a/drivers/staging/sxg/sxgdbg.h
+++ b/drivers/staging/sxg/sxgdbg.h
@@ -58,7 +58,7 @@
{ \
if (!(a)) { \
DBG_ERROR("ASSERT() Failure: file %s, function %s line %d\n",\
- __FILE__, __FUNCTION__, __LINE__); \
+ __FILE__, __func__, __LINE__); \
} \
}
#endif
diff --git a/drivers/staging/sxg/sxghif.h b/drivers/staging/sxg/sxghif.h
index ed26ceaa131..88bffbaa3be 100644
--- a/drivers/staging/sxg/sxghif.h
+++ b/drivers/staging/sxg/sxghif.h
@@ -14,119 +14,119 @@
*******************************************************************************/
typedef struct _SXG_UCODE_REGS {
// Address 0 - 0x3F = Command codes 0-15 for TCB 0. Excode 0
- u32 Icr; // Code = 0 (extended), ExCode = 0 - Int control
- u32 RsvdReg1; // Code = 1 - TOE -NA
- u32 RsvdReg2; // Code = 2 - TOE -NA
- u32 RsvdReg3; // Code = 3 - TOE -NA
- u32 RsvdReg4; // Code = 4 - TOE -NA
- u32 RsvdReg5; // Code = 5 - TOE -NA
- u32 CardUp; // Code = 6 - Microcode initialized when 1
- u32 RsvdReg7; // Code = 7 - TOE -NA
- u32 CodeNotUsed[8]; // Codes 8-15 not used. ExCode = 0
+ u32 Icr; // Code = 0 (extended), ExCode = 0 - Int control
+ u32 RsvdReg1; // Code = 1 - TOE -NA
+ u32 RsvdReg2; // Code = 2 - TOE -NA
+ u32 RsvdReg3; // Code = 3 - TOE -NA
+ u32 RsvdReg4; // Code = 4 - TOE -NA
+ u32 RsvdReg5; // Code = 5 - TOE -NA
+ u32 CardUp; // Code = 6 - Microcode initialized when 1
+ u32 RsvdReg7; // Code = 7 - TOE -NA
+ u32 CodeNotUsed[8]; // Codes 8-15 not used. ExCode = 0
// This brings us to ExCode 1 at address 0x40 = Interrupt status pointer
- u32 Isp; // Code = 0 (extended), ExCode = 1
- u32 PadEx1[15]; // Codes 1-15 not used with extended codes
+ u32 Isp; // Code = 0 (extended), ExCode = 1
+ u32 PadEx1[15]; // Codes 1-15 not used with extended codes
// ExCode 2 = Interrupt Status Register
- u32 Isr; // Code = 0 (extended), ExCode = 2
- u32 PadEx2[15];
+ u32 Isr; // Code = 0 (extended), ExCode = 2
+ u32 PadEx2[15];
// ExCode 3 = Event base register. Location of event rings
- u32 EventBase; // Code = 0 (extended), ExCode = 3
- u32 PadEx3[15];
+ u32 EventBase; // Code = 0 (extended), ExCode = 3
+ u32 PadEx3[15];
// ExCode 4 = Event ring size
- u32 EventSize; // Code = 0 (extended), ExCode = 4
- u32 PadEx4[15];
+ u32 EventSize; // Code = 0 (extended), ExCode = 4
+ u32 PadEx4[15];
// ExCode 5 = TCB Buffers base address
- u32 TcbBase; // Code = 0 (extended), ExCode = 5
- u32 PadEx5[15];
+ u32 TcbBase; // Code = 0 (extended), ExCode = 5
+ u32 PadEx5[15];
// ExCode 6 = TCB Composite Buffers base address
- u32 TcbCompBase; // Code = 0 (extended), ExCode = 6
- u32 PadEx6[15];
+ u32 TcbCompBase; // Code = 0 (extended), ExCode = 6
+ u32 PadEx6[15];
// ExCode 7 = Transmit ring base address
- u32 XmtBase; // Code = 0 (extended), ExCode = 7
- u32 PadEx7[15];
+ u32 XmtBase; // Code = 0 (extended), ExCode = 7
+ u32 PadEx7[15];
// ExCode 8 = Transmit ring size
- u32 XmtSize; // Code = 0 (extended), ExCode = 8
- u32 PadEx8[15];
+ u32 XmtSize; // Code = 0 (extended), ExCode = 8
+ u32 PadEx8[15];
// ExCode 9 = Receive ring base address
- u32 RcvBase; // Code = 0 (extended), ExCode = 9
- u32 PadEx9[15];
+ u32 RcvBase; // Code = 0 (extended), ExCode = 9
+ u32 PadEx9[15];
// ExCode 10 = Receive ring size
- u32 RcvSize; // Code = 0 (extended), ExCode = 10
- u32 PadEx10[15];
+ u32 RcvSize; // Code = 0 (extended), ExCode = 10
+ u32 PadEx10[15];
// ExCode 11 = Read EEPROM Config
- u32 Config; // Code = 0 (extended), ExCode = 11
- u32 PadEx11[15];
+ u32 Config; // Code = 0 (extended), ExCode = 11
+ u32 PadEx11[15];
// ExCode 12 = Multicast bits 31:0
- u32 McastLow; // Code = 0 (extended), ExCode = 12
- u32 PadEx12[15];
+ u32 McastLow; // Code = 0 (extended), ExCode = 12
+ u32 PadEx12[15];
// ExCode 13 = Multicast bits 63:32
- u32 McastHigh; // Code = 0 (extended), ExCode = 13
- u32 PadEx13[15];
+ u32 McastHigh; // Code = 0 (extended), ExCode = 13
+ u32 PadEx13[15];
// ExCode 14 = Ping
- u32 Ping; // Code = 0 (extended), ExCode = 14
- u32 PadEx14[15];
+ u32 Ping; // Code = 0 (extended), ExCode = 14
+ u32 PadEx14[15];
// ExCode 15 = Link MTU
- u32 LinkMtu; // Code = 0 (extended), ExCode = 15
- u32 PadEx15[15];
+ u32 LinkMtu; // Code = 0 (extended), ExCode = 15
+ u32 PadEx15[15];
// ExCode 16 = Download synchronization
- u32 LoadSync; // Code = 0 (extended), ExCode = 16
- u32 PadEx16[15];
+ u32 LoadSync; // Code = 0 (extended), ExCode = 16
+ u32 PadEx16[15];
// ExCode 17 = Upper DRAM address bits on 32-bit systems
- u32 Upper; // Code = 0 (extended), ExCode = 17
- u32 PadEx17[15];
+ u32 Upper; // Code = 0 (extended), ExCode = 17
+ u32 PadEx17[15];
// ExCode 18 = Slowpath Send Index Address
- u32 SPSendIndex; // Code = 0 (extended), ExCode = 18
- u32 PadEx18[15];
- u32 RsvdXF; // Code = 0 (extended), ExCode = 19
- u32 PadEx19[15];
+ u32 SPSendIndex; // Code = 0 (extended), ExCode = 18
+ u32 PadEx18[15];
+ u32 RsvdXF; // Code = 0 (extended), ExCode = 19
+ u32 PadEx19[15];
// ExCode 20 = Aggregation
- u32 Aggregation; // Code = 0 (extended), ExCode = 20
- u32 PadEx20[15];
+ u32 Aggregation; // Code = 0 (extended), ExCode = 20
+ u32 PadEx20[15];
// ExCode 21 = Receive MDL push timer
- u32 PushTicks; // Code = 0 (extended), ExCode = 21
- u32 PadEx21[15];
+ u32 PushTicks; // Code = 0 (extended), ExCode = 21
+ u32 PadEx21[15];
// ExCode 22 = TOE NA
- u32 AckFrequency; // Code = 0 (extended), ExCode = 22
- u32 PadEx22[15];
+ u32 AckFrequency; // Code = 0 (extended), ExCode = 22
+ u32 PadEx22[15];
// ExCode 23 = TOE NA
- u32 RsvdReg23;
- u32 PadEx23[15];
+ u32 RsvdReg23;
+ u32 PadEx23[15];
// ExCode 24 = TOE NA
- u32 RsvdReg24;
- u32 PadEx24[15];
+ u32 RsvdReg24;
+ u32 PadEx24[15];
// ExCode 25 = TOE NA
- u32 RsvdReg25; // Code = 0 (extended), ExCode = 25
- u32 PadEx25[15];
+ u32 RsvdReg25; // Code = 0 (extended), ExCode = 25
+ u32 PadEx25[15];
// ExCode 26 = Receive checksum requirements
- u32 ReceiveChecksum; // Code = 0 (extended), ExCode = 26
- u32 PadEx26[15];
+ u32 ReceiveChecksum; // Code = 0 (extended), ExCode = 26
+ u32 PadEx26[15];
// ExCode 27 = RSS Requirements
- u32 Rss; // Code = 0 (extended), ExCode = 27
- u32 PadEx27[15];
+ u32 Rss; // Code = 0 (extended), ExCode = 27
+ u32 PadEx27[15];
// ExCode 28 = RSS Table
- u32 RssTable; // Code = 0 (extended), ExCode = 28
- u32 PadEx28[15];
+ u32 RssTable; // Code = 0 (extended), ExCode = 28
+ u32 PadEx28[15];
// ExCode 29 = Event ring release entries
- u32 EventRelease; // Code = 0 (extended), ExCode = 29
- u32 PadEx29[15];
+ u32 EventRelease; // Code = 0 (extended), ExCode = 29
+ u32 PadEx29[15];
// ExCode 30 = Number of receive bufferlist commands on ring 0
- u32 RcvCmd; // Code = 0 (extended), ExCode = 30
- u32 PadEx30[15];
+ u32 RcvCmd; // Code = 0 (extended), ExCode = 30
+ u32 PadEx30[15];
// ExCode 31 = slowpath transmit command - Data[31:0] = 1
- u32 XmtCmd; // Code = 0 (extended), ExCode = 31
- u32 PadEx31[15];
+ u32 XmtCmd; // Code = 0 (extended), ExCode = 31
+ u32 PadEx31[15];
// ExCode 32 = Dump command
- u32 DumpCmd; // Code = 0 (extended), ExCode = 32
- u32 PadEx32[15];
+ u32 DumpCmd; // Code = 0 (extended), ExCode = 32
+ u32 PadEx32[15];
// ExCode 33 = Debug command
- u32 DebugCmd; // Code = 0 (extended), ExCode = 33
- u32 PadEx33[15];
+ u32 DebugCmd; // Code = 0 (extended), ExCode = 33
+ u32 PadEx33[15];
// There are 128 possible extended commands - each of account for 16
// words (including the non-relevent base command codes 1-15).
// Pad for the remainder of these here to bring us to the next CPU
// base. As extended codes are added, reduce the first array value in
// the following field
- u32 PadToNextCpu[94][16]; // 94 = 128 - 34 (34 = Excodes 0 - 33)
+ u32 PadToNextCpu[94][16]; // 94 = 128 - 34 (34 = Excodes 0 - 33)
} SXG_UCODE_REGS, *PSXG_UCODE_REGS;
// Interrupt control register (0) values
@@ -141,7 +141,7 @@ typedef struct _SXG_UCODE_REGS {
// The Microcode supports up to 16 RSS queues
#define SXG_MAX_RSS 16
-#define SXG_MAX_RSS_TABLE_SIZE 256 // 256-byte max
+#define SXG_MAX_RSS_TABLE_SIZE 256 // 256-byte max
#define SXG_RSS_TCP6 0x00000001 // RSS TCP over IPv6
#define SXG_RSS_TCP4 0x00000002 // RSS TCP over IPv4
@@ -170,16 +170,16 @@ typedef struct _SXG_UCODE_REGS {
* SXG_UCODE_REGS definition above
*/
typedef struct _SXG_TCB_REGS {
- u32 ExCode; /* Extended codes - see SXG_UCODE_REGS */
- u32 Xmt; /* Code = 1 - # of Xmt descriptors added to ring */
- u32 Rcv; /* Code = 2 - # of Rcv descriptors added to ring */
- u32 Rsvd1; /* Code = 3 - TOE NA */
- u32 Rsvd2; /* Code = 4 - TOE NA */
- u32 Rsvd3; /* Code = 5 - TOE NA */
- u32 Invalid; /* Code = 6 - Reserved for "CardUp" see above */
- u32 Rsvd4; /* Code = 7 - TOE NA */
- u32 Rsvd5; /* Code = 8 - TOE NA */
- u32 Pad[7]; /* Codes 8-15 - Not used. */
+ u32 ExCode; /* Extended codes - see SXG_UCODE_REGS */
+ u32 Xmt; /* Code = 1 - # of Xmt descriptors added to ring */
+ u32 Rcv; /* Code = 2 - # of Rcv descriptors added to ring */
+ u32 Rsvd1; /* Code = 3 - TOE NA */
+ u32 Rsvd2; /* Code = 4 - TOE NA */
+ u32 Rsvd3; /* Code = 5 - TOE NA */
+ u32 Invalid; /* Code = 6 - Reserved for "CardUp" see above */
+ u32 Rsvd4; /* Code = 7 - TOE NA */
+ u32 Rsvd5; /* Code = 8 - TOE NA */
+ u32 Pad[7]; /* Codes 8-15 - Not used. */
} SXG_TCB_REGS, *PSXG_TCB_REGS;
/***************************************************************************
@@ -273,27 +273,27 @@ typedef struct _SXG_TCB_REGS {
*/
#pragma pack(push, 1)
typedef struct _SXG_EVENT {
- u32 Pad[1]; // not used
- u32 SndUna; // SndUna value
- u32 Resid; // receive MDL resid
+ u32 Pad[1]; // not used
+ u32 SndUna; // SndUna value
+ u32 Resid; // receive MDL resid
union {
- void * HostHandle; // Receive host handle
- u32 Rsvd1; // TOE NA
+ void *HostHandle; // Receive host handle
+ u32 Rsvd1; // TOE NA
struct {
- u32 NotUsed;
- u32 Rsvd2; // TOE NA
+ u32 NotUsed;
+ u32 Rsvd2; // TOE NA
} Flush;
};
- u32 Toeplitz; // RSS Toeplitz hash
+ u32 Toeplitz; // RSS Toeplitz hash
union {
- ushort Rsvd3; // TOE NA
- ushort HdrOffset; // Slowpath
+ ushort Rsvd3; // TOE NA
+ ushort HdrOffset; // Slowpath
};
- ushort Length; //
- unsigned char Rsvd4; // TOE NA
- unsigned char Code; // Event code
- unsigned char CommandIndex; // New ring index
- unsigned char Status; // Event status
+ ushort Length; //
+ unsigned char Rsvd4; // TOE NA
+ unsigned char Code; // Event code
+ unsigned char CommandIndex; // New ring index
+ unsigned char Status; // Event status
} SXG_EVENT, *PSXG_EVENT;
#pragma pack(pop)
@@ -318,12 +318,12 @@ typedef struct _SXG_EVENT {
// Event ring
// Size must be power of 2, between 128 and 16k
#define EVENT_RING_SIZE 4096 // ??
-#define EVENT_RING_BATCH 16 // Hand entries back 16 at a time.
-#define EVENT_BATCH_LIMIT 256 // Stop processing events after 256 (16 * 16)
+#define EVENT_RING_BATCH 16 // Hand entries back 16 at a time.
+#define EVENT_BATCH_LIMIT 256 // Stop processing events after 256 (16 * 16)
typedef struct _SXG_EVENT_RING {
- SXG_EVENT Ring[EVENT_RING_SIZE];
-}SXG_EVENT_RING, *PSXG_EVENT_RING;
+ SXG_EVENT Ring[EVENT_RING_SIZE];
+} SXG_EVENT_RING, *PSXG_EVENT_RING;
/***************************************************************************
*
@@ -341,7 +341,7 @@ typedef struct _SXG_EVENT_RING {
#define SXG_TCB_PER_BUCKET 16
#define SXG_TCB_BUCKET_MASK 0xFF0 // Bucket portion of TCB ID
#define SXG_TCB_ELEMENT_MASK 0x00F // Element within bucket
-#define SXG_TCB_BUCKETS 256 // 256 * 16 = 4k
+#define SXG_TCB_BUCKETS 256 // 256 * 16 = 4k
#define SXG_TCB_BUFFER_SIZE 512 // ASSERT format is correct
@@ -368,7 +368,6 @@ typedef struct _SXG_EVENT_RING {
&(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.Ip : \
&(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.Ip
-
#if DBG
// Horrible kludge to distinguish dumb-nic, slowpath, and
// fastpath traffic. Decrement the HopLimit by one
@@ -396,16 +395,16 @@ typedef struct _SXG_EVENT_RING {
* Receive and transmit rings
***************************************************************************/
#define SXG_MAX_RING_SIZE 256
-#define SXG_XMT_RING_SIZE 128 // Start with 128
-#define SXG_RCV_RING_SIZE 128 // Start with 128
+#define SXG_XMT_RING_SIZE 128 // Start with 128
+#define SXG_RCV_RING_SIZE 128 // Start with 128
#define SXG_MAX_ENTRIES 4096
// Structure and macros to manage a ring
typedef struct _SXG_RING_INFO {
- unsigned char Head; // Where we add entries - Note unsigned char:RING_SIZE
- unsigned char Tail; // Where we pull off completed entries
- ushort Size; // Ring size - Must be multiple of 2
- void * Context[SXG_MAX_RING_SIZE]; // Shadow ring
+ unsigned char Head; // Where we add entries - Note unsigned char:RING_SIZE
+ unsigned char Tail; // Where we pull off completed entries
+ ushort Size; // Ring size - Must be multiple of 2
+ void *Context[SXG_MAX_RING_SIZE]; // Shadow ring
} SXG_RING_INFO, *PSXG_RING_INFO;
#define SXG_INITIALIZE_RING(_ring, _size) { \
@@ -483,40 +482,40 @@ typedef struct _SXG_RING_INFO {
*/
#pragma pack(push, 1)
typedef struct _SXG_CMD {
- dma_addr_t Sgl; // Physical address of SGL
+ dma_addr_t Sgl; // Physical address of SGL
union {
struct {
- dma64_addr_t FirstSgeAddress;// Address of first SGE
- u32 FirstSgeLength; // Length of first SGE
+ dma64_addr_t FirstSgeAddress; // Address of first SGE
+ u32 FirstSgeLength; // Length of first SGE
union {
- u32 Rsvd1; // TOE NA
- u32 SgeOffset; // Slowpath - 2nd SGE offset
- u32 Resid; // MDL completion - clobbers update
+ u32 Rsvd1; // TOE NA
+ u32 SgeOffset; // Slowpath - 2nd SGE offset
+ u32 Resid; // MDL completion - clobbers update
};
union {
- u32 TotalLength; // Total transfer length
- u32 Mss; // LSO MSS
+ u32 TotalLength; // Total transfer length
+ u32 Mss; // LSO MSS
};
} Buffer;
};
union {
struct {
- unsigned char Flags:4; // slowpath flags
- unsigned char IpHl:4; // Ip header length (>>2)
- unsigned char MacLen; // Mac header len
+ unsigned char Flags:4; // slowpath flags
+ unsigned char IpHl:4; // Ip header length (>>2)
+ unsigned char MacLen; // Mac header len
} CsumFlags;
struct {
- ushort Flags:4; // slowpath flags
- ushort TcpHdrOff:7; // TCP
- ushort MacLen:5; // Mac header len
+ ushort Flags:4; // slowpath flags
+ ushort TcpHdrOff:7; // TCP
+ ushort MacLen:5; // Mac header len
} LsoFlags;
- ushort Flags; // flags
+ ushort Flags; // flags
};
union {
- ushort SgEntries; // SG entry count including first sge
+ ushort SgEntries; // SG entry count including first sge
struct {
- unsigned char Status; // Copied from event status
- unsigned char NotUsed;
+ unsigned char Status; // Copied from event status
+ unsigned char NotUsed;
} Status;
};
} SXG_CMD, *PSXG_CMD;
@@ -524,8 +523,8 @@ typedef struct _SXG_CMD {
#pragma pack(push, 1)
typedef struct _VLAN_HDR {
- ushort VlanTci;
- ushort VlanTpid;
+ ushort VlanTci;
+ ushort VlanTpid;
} VLAN_HDR, *PVLAN_HDR;
#pragma pack(pop)
@@ -561,16 +560,16 @@ typedef struct _VLAN_HDR {
*
*/
// Slowpath CMD flags
-#define SXG_SLOWCMD_CSUM_IP 0x01 // Checksum IP
-#define SXG_SLOWCMD_CSUM_TCP 0x02 // Checksum TCP
-#define SXG_SLOWCMD_LSO 0x04 // Large segment send
+#define SXG_SLOWCMD_CSUM_IP 0x01 // Checksum IP
+#define SXG_SLOWCMD_CSUM_TCP 0x02 // Checksum TCP
+#define SXG_SLOWCMD_LSO 0x04 // Large segment send
typedef struct _SXG_XMT_RING {
- SXG_CMD Descriptors[SXG_XMT_RING_SIZE];
+ SXG_CMD Descriptors[SXG_XMT_RING_SIZE];
} SXG_XMT_RING, *PSXG_XMT_RING;
typedef struct _SXG_RCV_RING {
- SXG_CMD Descriptors[SXG_RCV_RING_SIZE];
+ SXG_CMD Descriptors[SXG_RCV_RING_SIZE];
} SXG_RCV_RING, *PSXG_RCV_RING;
/***************************************************************************
@@ -578,8 +577,8 @@ typedef struct _SXG_RCV_RING {
* shared memory allocation
***************************************************************************/
typedef enum {
- SXG_BUFFER_TYPE_RCV, // Receive buffer
- SXG_BUFFER_TYPE_SGL // SGL buffer
+ SXG_BUFFER_TYPE_RCV, // Receive buffer
+ SXG_BUFFER_TYPE_SGL // SGL buffer
} SXG_BUFFER_TYPE;
// State for SXG buffers
@@ -668,60 +667,60 @@ typedef enum {
#define SXG_RCV_DATA_BUFFERS 4096 // Amount to give to the card
#define SXG_INITIAL_RCV_DATA_BUFFERS 8192 // Initial pool of buffers
#define SXG_MIN_RCV_DATA_BUFFERS 2048 // Minimum amount and when to get more
-#define SXG_MAX_RCV_BLOCKS 128 // = 16384 receive buffers
+#define SXG_MAX_RCV_BLOCKS 128 // = 16384 receive buffers
// Receive buffer header
typedef struct _SXG_RCV_DATA_BUFFER_HDR {
- dma_addr_t PhysicalAddress; // Buffer physical address
+ dma_addr_t PhysicalAddress; // Buffer physical address
// Note - DO NOT USE the VirtualAddress field to locate data.
// Use the sxg.h:SXG_RECEIVE_DATA_LOCATION macro instead.
- void *VirtualAddress; // Start of buffer
- LIST_ENTRY FreeList; // Free queue of buffers
- struct _SXG_RCV_DATA_BUFFER_HDR *Next; // Fastpath data buffer queue
- u32 Size; // Buffer size
- u32 ByteOffset; // See SXG_RESTORE_MDL_OFFSET
- unsigned char State; // See SXG_BUFFER state above
- unsigned char Status; // Event status (to log PUSH)
- struct sk_buff * skb; // Double mapped (nbl and pkt)
+ void *VirtualAddress; // Start of buffer
+ LIST_ENTRY FreeList; // Free queue of buffers
+ struct _SXG_RCV_DATA_BUFFER_HDR *Next; // Fastpath data buffer queue
+ u32 Size; // Buffer size
+ u32 ByteOffset; // See SXG_RESTORE_MDL_OFFSET
+ unsigned char State; // See SXG_BUFFER state above
+ unsigned char Status; // Event status (to log PUSH)
+ struct sk_buff *skb; // Double mapped (nbl and pkt)
} SXG_RCV_DATA_BUFFER_HDR, *PSXG_RCV_DATA_BUFFER_HDR;
// SxgSlowReceive uses the PACKET (skb) contained
// in the SXG_RCV_DATA_BUFFER_HDR when indicating dumb-nic data
#define SxgDumbRcvPacket skb
-#define SXG_RCV_DATA_HDR_SIZE 256 // Space for SXG_RCV_DATA_BUFFER_HDR
+#define SXG_RCV_DATA_HDR_SIZE 256 // Space for SXG_RCV_DATA_BUFFER_HDR
#define SXG_RCV_DATA_BUFFER_SIZE 2048 // Non jumbo = 2k including HDR
#define SXG_RCV_JUMBO_BUFFER_SIZE 10240 // jumbo = 10k including HDR
// Receive data descriptor
typedef struct _SXG_RCV_DATA_DESCRIPTOR {
union {
- struct sk_buff * VirtualAddress; // Host handle
- u64 ForceTo8Bytes; // Force x86 to 8-byte boundary
+ struct sk_buff *VirtualAddress; // Host handle
+ u64 ForceTo8Bytes; // Force x86 to 8-byte boundary
};
- dma_addr_t PhysicalAddress;
+ dma_addr_t PhysicalAddress;
} SXG_RCV_DATA_DESCRIPTOR, *PSXG_RCV_DATA_DESCRIPTOR;
// Receive descriptor block
#define SXG_RCV_DESCRIPTORS_PER_BLOCK 128
#define SXG_RCV_DESCRIPTOR_BLOCK_SIZE 2048 // For sanity check
typedef struct _SXG_RCV_DESCRIPTOR_BLOCK {
- SXG_RCV_DATA_DESCRIPTOR Descriptors[SXG_RCV_DESCRIPTORS_PER_BLOCK];
+ SXG_RCV_DATA_DESCRIPTOR Descriptors[SXG_RCV_DESCRIPTORS_PER_BLOCK];
} SXG_RCV_DESCRIPTOR_BLOCK, *PSXG_RCV_DESCRIPTOR_BLOCK;
// Receive descriptor block header
typedef struct _SXG_RCV_DESCRIPTOR_BLOCK_HDR {
- void * VirtualAddress; // Start of 2k buffer
- dma_addr_t PhysicalAddress; // ..and it's physical address
- LIST_ENTRY FreeList; // Free queue of descriptor blocks
- unsigned char State; // See SXG_BUFFER state above
+ void *VirtualAddress; // Start of 2k buffer
+ dma_addr_t PhysicalAddress; // ..and it's physical address
+ LIST_ENTRY FreeList; // Free queue of descriptor blocks
+ unsigned char State; // See SXG_BUFFER state above
} SXG_RCV_DESCRIPTOR_BLOCK_HDR, *PSXG_RCV_DESCRIPTOR_BLOCK_HDR;
// Receive block header
typedef struct _SXG_RCV_BLOCK_HDR {
- void * VirtualAddress; // Start of virtual memory
- dma_addr_t PhysicalAddress; // ..and it's physical address
- LIST_ENTRY AllList; // Queue of all SXG_RCV_BLOCKS
+ void *VirtualAddress; // Start of virtual memory
+ dma_addr_t PhysicalAddress; // ..and it's physical address
+ LIST_ENTRY AllList; // Queue of all SXG_RCV_BLOCKS
} SXG_RCV_BLOCK_HDR, *PSXG_RCV_BLOCK_HDR;
// Macros to determine data structure offsets into receive block
@@ -747,8 +746,8 @@ typedef struct _SXG_RCV_BLOCK_HDR {
// Use the miniport reserved portion of the NBL to locate
// our SXG_RCV_DATA_BUFFER_HDR structure.
typedef struct _SXG_RCV_NBL_RESERVED {
- PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr;
- void * Available;
+ PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr;
+ void *Available;
} SXG_RCV_NBL_RESERVED, *PSXG_RCV_NBL_RESERVED;
#define SXG_RCV_NBL_BUFFER_HDR(_NBL) (((PSXG_RCV_NBL_RESERVED)NET_BUFFER_LIST_MINIPORT_RESERVED(_NBL))->RcvDataBufferHdr)
@@ -760,12 +759,11 @@ typedef struct _SXG_RCV_NBL_RESERVED {
#define SXG_MIN_SGL_BUFFERS 2048 // Minimum amount and when to get more
#define SXG_MAX_SGL_BUFFERS 16384 // Maximum to allocate (note ADAPT:ushort)
-
// Self identifying structure type
typedef enum _SXG_SGL_TYPE {
- SXG_SGL_DUMB, // Dumb NIC SGL
- SXG_SGL_SLOW, // Slowpath protocol header - see below
- SXG_SGL_CHIMNEY // Chimney offload SGL
+ SXG_SGL_DUMB, // Dumb NIC SGL
+ SXG_SGL_SLOW, // Slowpath protocol header - see below
+ SXG_SGL_CHIMNEY // Chimney offload SGL
} SXG_SGL_TYPE, PSXG_SGL_TYPE;
// Note - the description below is Microsoft specific
@@ -774,14 +772,14 @@ typedef enum _SXG_SGL_TYPE {
// for the SCATTER_GATHER_LIST portion of the SXG_SCATTER_GATHER data structure.
// The following considerations apply when setting this value:
// - First, the Sahara card is designed to read the Microsoft SGL structure
-// straight out of host memory. This means that the SGL must reside in
-// shared memory. If the length here is smaller than the SGL for the
-// NET_BUFFER, then NDIS will allocate its own buffer. The buffer
-// that NDIS allocates is not in shared memory, so when this happens,
-// the SGL will need to be copied to a set of SXG_SCATTER_GATHER buffers.
-// In other words.. we don't want this value to be too small.
+// straight out of host memory. This means that the SGL must reside in
+// shared memory. If the length here is smaller than the SGL for the
+// NET_BUFFER, then NDIS will allocate its own buffer. The buffer
+// that NDIS allocates is not in shared memory, so when this happens,
+// the SGL will need to be copied to a set of SXG_SCATTER_GATHER buffers.
+// In other words.. we don't want this value to be too small.
// - On the other hand.. we're allocating up to 16k of these things. If
-// we make this too big, we start to consume a ton of memory..
+// we make this too big, we start to consume a ton of memory..
// At the moment, I'm going to limit the number of SG entries to 150.
// If each entry maps roughly 4k, then this should cover roughly 600kB
// NET_BUFFERs. Furthermore, since each entry is 24 bytes, the total
@@ -801,24 +799,23 @@ typedef enum _SXG_SGL_TYPE {
// the SGL. The following structure defines an x64
// formatted SGL entry
typedef struct _SXG_X64_SGE {
- dma64_addr_t Address; // same as wdm.h
- u32 Length; // same as wdm.h
- u32 CompilerPad;// The compiler pads to 8-bytes
- u64 Reserved; // u32 * in wdm.h. Force to 8 bytes
+ dma64_addr_t Address; // same as wdm.h
+ u32 Length; // same as wdm.h
+ u32 CompilerPad; // The compiler pads to 8-bytes
+ u64 Reserved; // u32 * in wdm.h. Force to 8 bytes
} SXG_X64_SGE, *PSXG_X64_SGE;
typedef struct _SCATTER_GATHER_ELEMENT {
- dma64_addr_t Address; // same as wdm.h
- u32 Length; // same as wdm.h
- u32 CompilerPad;// The compiler pads to 8-bytes
- u64 Reserved; // u32 * in wdm.h. Force to 8 bytes
+ dma64_addr_t Address; // same as wdm.h
+ u32 Length; // same as wdm.h
+ u32 CompilerPad; // The compiler pads to 8-bytes
+ u64 Reserved; // u32 * in wdm.h. Force to 8 bytes
} SCATTER_GATHER_ELEMENT, *PSCATTER_GATHER_ELEMENT;
-
typedef struct _SCATTER_GATHER_LIST {
- u32 NumberOfElements;
- u32 * Reserved;
- SCATTER_GATHER_ELEMENT Elements[];
+ u32 NumberOfElements;
+ u32 *Reserved;
+ SCATTER_GATHER_ELEMENT Elements[];
} SCATTER_GATHER_LIST, *PSCATTER_GATHER_LIST;
// The card doesn't care about anything except elements, so
@@ -826,26 +823,26 @@ typedef struct _SCATTER_GATHER_LIST {
// SGL structure. But redefine from wdm.h:SCATTER_GATHER_LIST so
// we can specify SXG_X64_SGE and define a fixed number of elements
typedef struct _SXG_X64_SGL {
- u32 NumberOfElements;
- u32 * Reserved;
- SXG_X64_SGE Elements[SXG_SGL_ENTRIES];
+ u32 NumberOfElements;
+ u32 *Reserved;
+ SXG_X64_SGE Elements[SXG_SGL_ENTRIES];
} SXG_X64_SGL, *PSXG_X64_SGL;
typedef struct _SXG_SCATTER_GATHER {
- SXG_SGL_TYPE Type; // FIRST! Dumb-nic or offload
- void * adapter; // Back pointer to adapter
- LIST_ENTRY FreeList; // Free SXG_SCATTER_GATHER blocks
- LIST_ENTRY AllList; // All SXG_SCATTER_GATHER blocks
- dma_addr_t PhysicalAddress;// physical address
- unsigned char State; // See SXG_BUFFER state above
- unsigned char CmdIndex; // Command ring index
- struct sk_buff * DumbPacket; // Associated Packet
- u32 Direction; // For asynchronous completions
- u32 CurOffset; // Current SGL offset
- u32 SglRef; // SGL reference count
- VLAN_HDR VlanTag; // VLAN tag to be inserted into SGL
- PSCATTER_GATHER_LIST pSgl; // SGL Addr. Possibly &Sgl
- SXG_X64_SGL Sgl; // SGL handed to card
+ SXG_SGL_TYPE Type; // FIRST! Dumb-nic or offload
+ void *adapter; // Back pointer to adapter
+ LIST_ENTRY FreeList; // Free SXG_SCATTER_GATHER blocks
+ LIST_ENTRY AllList; // All SXG_SCATTER_GATHER blocks
+ dma_addr_t PhysicalAddress; // physical address
+ unsigned char State; // See SXG_BUFFER state above
+ unsigned char CmdIndex; // Command ring index
+ struct sk_buff *DumbPacket; // Associated Packet
+ u32 Direction; // For asynchronous completions
+ u32 CurOffset; // Current SGL offset
+ u32 SglRef; // SGL reference count
+ VLAN_HDR VlanTag; // VLAN tag to be inserted into SGL
+ PSCATTER_GATHER_LIST pSgl; // SGL Addr. Possibly &Sgl
+ SXG_X64_SGL Sgl; // SGL handed to card
} SXG_SCATTER_GATHER, *PSXG_SCATTER_GATHER;
#if defined(CONFIG_X86_64)
@@ -856,6 +853,5 @@ typedef struct _SXG_SCATTER_GATHER {
#define SXG_SGL_BUFFER(_SxgSgl) NULL
#define SXG_SGL_BUF_SIZE 0
#else
- Stop Compilation;
+Stop Compilation;
#endif
-
diff --git a/drivers/staging/sxg/sxghw.h b/drivers/staging/sxg/sxghw.h
index 8f4f6effdd9..2222ae91fd9 100644
--- a/drivers/staging/sxg/sxghw.h
+++ b/drivers/staging/sxg/sxghw.h
@@ -13,11 +13,11 @@
/*******************************************************************************
* Configuration space
*******************************************************************************/
-// PCI Vendor ID
-#define SXG_VENDOR_ID 0x139A // Alacritech's Vendor ID
+/* PCI Vendor ID */
+#define SXG_VENDOR_ID 0x139A /* Alacritech's Vendor ID */
// PCI Device ID
-#define SXG_DEVICE_ID 0x0009 // Sahara Device ID
+#define SXG_DEVICE_ID 0x0009 /* Sahara Device ID */
//
// Subsystem IDs.
@@ -141,7 +141,7 @@ typedef struct _SXG_HW_REGS {
#define SXG_REGISTER_SIZE_PER_CPU 0x00002000 // Used to sanity check UCODE_REGS structure
// Sahara receive sequencer status values
-#define SXG_RCV_STATUS_ATTN 0x80000000 // Attention
+#define SXG_RCV_STATUS_ATTN 0x80000000 // Attention
#define SXG_RCV_STATUS_TRANSPORT_MASK 0x3F000000 // Transport mask
#define SXG_RCV_STATUS_TRANSPORT_ERROR 0x20000000 // Transport error
#define SXG_RCV_STATUS_TRANSPORT_CSUM 0x23000000 // Transport cksum error
@@ -156,9 +156,9 @@ typedef struct _SXG_HW_REGS {
#define SXG_RCV_STATUS_TRANSPORT_FTP 0x03000000 // Transport FTP
#define SXG_RCV_STATUS_TRANSPORT_HTTP 0x02000000 // Transport HTTP
#define SXG_RCV_STATUS_TRANSPORT_SMB 0x01000000 // Transport SMB
-#define SXG_RCV_STATUS_NETWORK_MASK 0x00FF0000 // Network mask
+#define SXG_RCV_STATUS_NETWORK_MASK 0x00FF0000 // Network mask
#define SXG_RCV_STATUS_NETWORK_ERROR 0x00800000 // Network error
-#define SXG_RCV_STATUS_NETWORK_CSUM 0x00830000 // Network cksum error
+#define SXG_RCV_STATUS_NETWORK_CSUM 0x00830000 // Network cksum error
#define SXG_RCV_STATUS_NETWORK_UFLOW 0x00820000 // Network underflow error
#define SXG_RCV_STATUS_NETWORK_HDRLEN 0x00800000 // Network header length
#define SXG_RCV_STATUS_NETWORK_OFLOW 0x00400000 // Network overflow detected
@@ -167,67 +167,67 @@ typedef struct _SXG_HW_REGS {
#define SXG_RCV_STATUS_NETWORK_OFFSET 0x00080000 // Network offset detected
#define SXG_RCV_STATUS_NETWORK_FRAGMENT 0x00040000 // Network fragment detected
#define SXG_RCV_STATUS_NETWORK_TRANS_MASK 0x00030000 // Network transport type mask
-#define SXG_RCV_STATUS_NETWORK_UDP 0x00020000 // UDP
-#define SXG_RCV_STATUS_NETWORK_TCP 0x00010000 // TCP
-#define SXG_RCV_STATUS_IPONLY 0x00008000 // IP-only not TCP
-#define SXG_RCV_STATUS_PKT_PRI 0x00006000 // Receive priority
-#define SXG_RCV_STATUS_PKT_PRI_SHFT 13 // Receive priority shift
-#define SXG_RCV_STATUS_PARITY 0x00001000 // MAC Receive RAM parity error
-#define SXG_RCV_STATUS_ADDRESS_MASK 0x00000F00 // Link address detection mask
-#define SXG_RCV_STATUS_ADDRESS_D 0x00000B00 // Link address D
-#define SXG_RCV_STATUS_ADDRESS_C 0x00000A00 // Link address C
-#define SXG_RCV_STATUS_ADDRESS_B 0x00000900 // Link address B
-#define SXG_RCV_STATUS_ADDRESS_A 0x00000800 // Link address A
+#define SXG_RCV_STATUS_NETWORK_UDP 0x00020000 // UDP
+#define SXG_RCV_STATUS_NETWORK_TCP 0x00010000 // TCP
+#define SXG_RCV_STATUS_IPONLY 0x00008000 // IP-only not TCP
+#define SXG_RCV_STATUS_PKT_PRI 0x00006000 // Receive priority
+#define SXG_RCV_STATUS_PKT_PRI_SHFT 13 // Receive priority shift
+#define SXG_RCV_STATUS_PARITY 0x00001000 // MAC Receive RAM parity error
+#define SXG_RCV_STATUS_ADDRESS_MASK 0x00000F00 // Link address detection mask
+#define SXG_RCV_STATUS_ADDRESS_D 0x00000B00 // Link address D
+#define SXG_RCV_STATUS_ADDRESS_C 0x00000A00 // Link address C
+#define SXG_RCV_STATUS_ADDRESS_B 0x00000900 // Link address B
+#define SXG_RCV_STATUS_ADDRESS_A 0x00000800 // Link address A
#define SXG_RCV_STATUS_ADDRESS_BCAST 0x00000300 // Link address broadcast
#define SXG_RCV_STATUS_ADDRESS_MCAST 0x00000200 // Link address multicast
#define SXG_RCV_STATUS_ADDRESS_CMCAST 0x00000100 // Link control multicast
-#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask
-#define SXG_RCV_STATUS_LINK_ERROR 0x00000080 // Link error
-#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask
-#define SXG_RCV_STATUS_LINK_PARITY 0x00000087 // RcvMacQ parity error
-#define SXG_RCV_STATUS_LINK_EARLY 0x00000086 // Data early
+#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask
+#define SXG_RCV_STATUS_LINK_ERROR 0x00000080 // Link error
+#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask
+#define SXG_RCV_STATUS_LINK_PARITY 0x00000087 // RcvMacQ parity error
+#define SXG_RCV_STATUS_LINK_EARLY 0x00000086 // Data early
#define SXG_RCV_STATUS_LINK_BUFOFLOW 0x00000085 // Buffer overflow
-#define SXG_RCV_STATUS_LINK_CODE 0x00000084 // Link code error
-#define SXG_RCV_STATUS_LINK_DRIBBLE 0x00000083 // Dribble nibble
-#define SXG_RCV_STATUS_LINK_CRC 0x00000082 // CRC error
-#define SXG_RCV_STATUS_LINK_OFLOW 0x00000081 // Link overflow
-#define SXG_RCV_STATUS_LINK_UFLOW 0x00000080 // Link underflow
-#define SXG_RCV_STATUS_LINK_8023 0x00000020 // 802.3
-#define SXG_RCV_STATUS_LINK_SNAP 0x00000010 // Snap
-#define SXG_RCV_STATUS_LINK_VLAN 0x00000008 // VLAN
+#define SXG_RCV_STATUS_LINK_CODE 0x00000084 // Link code error
+#define SXG_RCV_STATUS_LINK_DRIBBLE 0x00000083 // Dribble nibble
+#define SXG_RCV_STATUS_LINK_CRC 0x00000082 // CRC error
+#define SXG_RCV_STATUS_LINK_OFLOW 0x00000081 // Link overflow
+#define SXG_RCV_STATUS_LINK_UFLOW 0x00000080 // Link underflow
+#define SXG_RCV_STATUS_LINK_8023 0x00000020 // 802.3
+#define SXG_RCV_STATUS_LINK_SNAP 0x00000010 // Snap
+#define SXG_RCV_STATUS_LINK_VLAN 0x00000008 // VLAN
#define SXG_RCV_STATUS_LINK_TYPE_MASK 0x00000007 // Network type mask
-#define SXG_RCV_STATUS_LINK_CONTROL 0x00000003 // Control packet
-#define SXG_RCV_STATUS_LINK_IPV6 0x00000002 // IPv6 packet
-#define SXG_RCV_STATUS_LINK_IPV4 0x00000001 // IPv4 packet
+#define SXG_RCV_STATUS_LINK_CONTROL 0x00000003 // Control packet
+#define SXG_RCV_STATUS_LINK_IPV6 0x00000002 // IPv6 packet
+#define SXG_RCV_STATUS_LINK_IPV4 0x00000001 // IPv4 packet
/***************************************************************************
* Sahara receive and transmit configuration registers
***************************************************************************/
-#define RCV_CONFIG_RESET 0x80000000 // RcvConfig register reset
-#define RCV_CONFIG_ENABLE 0x40000000 // Enable the receive logic
-#define RCV_CONFIG_ENPARSE 0x20000000 // Enable the receive parser
-#define RCV_CONFIG_SOCKET 0x10000000 // Enable the socket detector
-#define RCV_CONFIG_RCVBAD 0x08000000 // Receive all bad frames
-#define RCV_CONFIG_CONTROL 0x04000000 // Receive all control frames
-#define RCV_CONFIG_RCVPAUSE 0x02000000 // Enable pause transmit when attn
-#define RCV_CONFIG_TZIPV6 0x01000000 // Include TCP port w/ IPv6 toeplitz
-#define RCV_CONFIG_TZIPV4 0x00800000 // Include TCP port w/ IPv4 toeplitz
-#define RCV_CONFIG_FLUSH 0x00400000 // Flush buffers
+#define RCV_CONFIG_RESET 0x80000000 // RcvConfig register reset
+#define RCV_CONFIG_ENABLE 0x40000000 // Enable the receive logic
+#define RCV_CONFIG_ENPARSE 0x20000000 // Enable the receive parser
+#define RCV_CONFIG_SOCKET 0x10000000 // Enable the socket detector
+#define RCV_CONFIG_RCVBAD 0x08000000 // Receive all bad frames
+#define RCV_CONFIG_CONTROL 0x04000000 // Receive all control frames
+#define RCV_CONFIG_RCVPAUSE 0x02000000 // Enable pause transmit when attn
+#define RCV_CONFIG_TZIPV6 0x01000000 // Include TCP port w/ IPv6 toeplitz
+#define RCV_CONFIG_TZIPV4 0x00800000 // Include TCP port w/ IPv4 toeplitz
+#define RCV_CONFIG_FLUSH 0x00400000 // Flush buffers
#define RCV_CONFIG_PRIORITY_MASK 0x00300000 // Priority level
#define RCV_CONFIG_HASH_MASK 0x00030000 // Hash depth
-#define RCV_CONFIG_HASH_8 0x00000000 // Hash depth 8
-#define RCV_CONFIG_HASH_16 0x00010000 // Hash depth 16
-#define RCV_CONFIG_HASH_4 0x00020000 // Hash depth 4
-#define RCV_CONFIG_HASH_2 0x00030000 // Hash depth 2
+#define RCV_CONFIG_HASH_8 0x00000000 // Hash depth 8
+#define RCV_CONFIG_HASH_16 0x00010000 // Hash depth 16
+#define RCV_CONFIG_HASH_4 0x00020000 // Hash depth 4
+#define RCV_CONFIG_HASH_2 0x00030000 // Hash depth 2
#define RCV_CONFIG_BUFLEN_MASK 0x0000FFF0 // Buffer length bits 15:4. ie multiple of 16.
-#define RCV_CONFIG_SKT_DIS 0x00000008 // Disable socket detection on attn
+#define RCV_CONFIG_SKT_DIS 0x00000008 // Disable socket detection on attn
// Macro to determine RCV_CONFIG_BUFLEN based on maximum frame size.
// We add 18 bytes for Sahara receive status and padding, plus 4 bytes for CRC,
// and round up to nearest 16 byte boundary
#define RCV_CONFIG_BUFSIZE(_MaxFrame) ((((_MaxFrame) + 22) + 15) & RCV_CONFIG_BUFLEN_MASK)
-#define XMT_CONFIG_RESET 0x80000000 // XmtConfig register reset
-#define XMT_CONFIG_ENABLE 0x40000000 // Enable transmit logic
+#define XMT_CONFIG_RESET 0x80000000 // XmtConfig register reset
+#define XMT_CONFIG_ENABLE 0x40000000 // Enable transmit logic
#define XMT_CONFIG_MAC_PARITY 0x20000000 // Inhibit MAC RAM parity error
#define XMT_CONFIG_BUF_PARITY 0x10000000 // Inhibit D2F buffer parity error
#define XMT_CONFIG_MEM_PARITY 0x08000000 // Inhibit 1T SRAM parity error
@@ -249,9 +249,9 @@ typedef struct _SXG_HW_REGS {
// A-XGMAC Configuration Register 1
#define AXGMAC_CFG1_XMT_PAUSE 0x80000000 // Allow the sending of Pause frames
-#define AXGMAC_CFG1_XMT_EN 0x40000000 // Enable transmit
+#define AXGMAC_CFG1_XMT_EN 0x40000000 // Enable transmit
#define AXGMAC_CFG1_RCV_PAUSE 0x20000000 // Allow the detection of Pause frames
-#define AXGMAC_CFG1_RCV_EN 0x10000000 // Enable receive
+#define AXGMAC_CFG1_RCV_EN 0x10000000 // Enable receive
#define AXGMAC_CFG1_XMT_STATE 0x04000000 // Current transmit state - READ ONLY
#define AXGMAC_CFG1_RCV_STATE 0x01000000 // Current receive state - READ ONLY
#define AXGMAC_CFG1_XOFF_SHORT 0x00001000 // Only pause for 64 slot on XOFF
@@ -262,24 +262,24 @@ typedef struct _SXG_HW_REGS {
#define AXGMAC_CFG1_RCV_FCS2 0x00000200 // Delay receive FCS 2 4-byte words
#define AXGMAC_CFG1_RCV_FCS3 0x00000300 // Delay receive FCS 3 4-byte words
#define AXGMAC_CFG1_PKT_OVERRIDE 0x00000080 // Per-packet override enable
-#define AXGMAC_CFG1_SWAP 0x00000040 // Byte swap enable
+#define AXGMAC_CFG1_SWAP 0x00000040 // Byte swap enable
#define AXGMAC_CFG1_SHORT_ASSERT 0x00000020 // ASSERT srdrpfrm on short frame (<64)
#define AXGMAC_CFG1_RCV_STRICT 0x00000010 // RCV only 802.3AE when CLEAR
#define AXGMAC_CFG1_CHECK_LEN 0x00000008 // Verify frame length
-#define AXGMAC_CFG1_GEN_FCS 0x00000004 // Generate FCS
+#define AXGMAC_CFG1_GEN_FCS 0x00000004 // Generate FCS
#define AXGMAC_CFG1_PAD_MASK 0x00000003 // Mask for pad bits
-#define AXGMAC_CFG1_PAD_64 0x00000001 // Pad frames to 64 bytes
+#define AXGMAC_CFG1_PAD_64 0x00000001 // Pad frames to 64 bytes
#define AXGMAC_CFG1_PAD_VLAN 0x00000002 // Detect VLAN and pad to 68 bytes
-#define AXGMAC_CFG1_PAD_68 0x00000003 // Pad to 68 bytes
+#define AXGMAC_CFG1_PAD_68 0x00000003 // Pad to 68 bytes
// A-XGMAC Configuration Register 2
#define AXGMAC_CFG2_GEN_PAUSE 0x80000000 // Generate single pause frame (test)
#define AXGMAC_CFG2_LF_MANUAL 0x08000000 // Manual link fault sequence
-#define AXGMAC_CFG2_LF_AUTO 0x04000000 // Auto link fault sequence
+#define AXGMAC_CFG2_LF_AUTO 0x04000000 // Auto link fault sequence
#define AXGMAC_CFG2_LF_REMOTE 0x02000000 // Remote link fault (READ ONLY)
#define AXGMAC_CFG2_LF_LOCAL 0x01000000 // Local link fault (READ ONLY)
#define AXGMAC_CFG2_IPG_MASK 0x001F0000 // Inter packet gap
-#define AXGMAC_CFG2_IPG_SHIFT 16
+#define AXGMAC_CFG2_IPG_SHIFT 16
#define AXGMAC_CFG2_PAUSE_XMT 0x00008000 // Pause transmit module
#define AXGMAC_CFG2_IPG_EXTEN 0x00000020 // Enable IPG extension algorithm
#define AXGMAC_CFG2_IPGEX_MASK 0x0000001F // IPG extension
@@ -299,9 +299,9 @@ typedef struct _SXG_HW_REGS {
#define AXGMAC_SARHIGH_OCTET_SIX 0x00FF0000 // Sixth octet
// A-XGMAC Maximum frame length register
-#define AXGMAC_MAXFRAME_XMT 0x3FFF0000 // Maximum transmit frame length
+#define AXGMAC_MAXFRAME_XMT 0x3FFF0000 // Maximum transmit frame length
#define AXGMAC_MAXFRAME_XMT_SHIFT 16
-#define AXGMAC_MAXFRAME_RCV 0x0000FFFF // Maximum receive frame length
+#define AXGMAC_MAXFRAME_RCV 0x0000FFFF // Maximum receive frame length
// This register doesn't need to be written for standard MTU.
// For jumbo, I'll just statically define the value here. This
// value sets the receive byte count to 9036 (0x234C) and the
@@ -324,34 +324,34 @@ typedef struct _SXG_HW_REGS {
// A-XGMAC AMIIM Field Register
#define AXGMAC_AMIIM_FIELD_ST 0xC0000000 // 2-bit ST field
-#define AXGMAC_AMIIM_FIELD_ST_SHIFT 30
+#define AXGMAC_AMIIM_FIELD_ST_SHIFT 30
#define AXGMAC_AMIIM_FIELD_OP 0x30000000 // 2-bit OP field
-#define AXGMAC_AMIIM_FIELD_OP_SHIFT 28
-#define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000 // Port address field (hstphyadx in spec)
+#define AXGMAC_AMIIM_FIELD_OP_SHIFT 28
+#define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000 // Port address field (hstphyadx in spec)
#define AXGMAC_AMIIM_FIELD_PORT_SHIFT 23
#define AXGMAC_AMIIM_FIELD_DEV_ADDR 0x007C0000 // Device address field (hstregadx in spec)
#define AXGMAC_AMIIM_FIELD_DEV_SHIFT 18
#define AXGMAC_AMIIM_FIELD_TA 0x00030000 // 2-bit TA field
-#define AXGMAC_AMIIM_FIELD_TA_SHIFT 16
+#define AXGMAC_AMIIM_FIELD_TA_SHIFT 16
#define AXGMAC_AMIIM_FIELD_DATA 0x0000FFFF // Data field
// Values for the AXGMAC_AMIIM_FIELD_OP field in the A-XGMAC AMIIM Field Register
-#define MIIM_OP_ADDR 0 // MIIM Address set operation
-#define MIIM_OP_WRITE 1 // MIIM Write register operation
-#define MIIM_OP_READ 2 // MIIM Read register operation
+#define MIIM_OP_ADDR 0 // MIIM Address set operation
+#define MIIM_OP_WRITE 1 // MIIM Write register operation
+#define MIIM_OP_READ 2 // MIIM Read register operation
#define MIIM_OP_ADDR_SHIFT (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT)
// Values for the AXGMAC_AMIIM_FIELD_PORT_ADDR field in the A-XGMAC AMIIM Field Register
-#define MIIM_PORT_NUM 1 // All Sahara MIIM modules use port 1
+#define MIIM_PORT_NUM 1 // All Sahara MIIM modules use port 1
// Values for the AXGMAC_AMIIM_FIELD_DEV_ADDR field in the A-XGMAC AMIIM Field Register
-#define MIIM_DEV_PHY_PMA 1 // PHY PMA/PMD module MIIM device number
-#define MIIM_DEV_PHY_PCS 3 // PHY PCS module MIIM device number
-#define MIIM_DEV_PHY_XS 4 // PHY XS module MIIM device number
-#define MIIM_DEV_XGXS 5 // XGXS MIIM device number
+#define MIIM_DEV_PHY_PMA 1 // PHY PMA/PMD module MIIM device number
+#define MIIM_DEV_PHY_PCS 3 // PHY PCS module MIIM device number
+#define MIIM_DEV_PHY_XS 4 // PHY XS module MIIM device number
+#define MIIM_DEV_XGXS 5 // XGXS MIIM device number
// Values for the AXGMAC_AMIIM_FIELD_TA field in the A-XGMAC AMIIM Field Register
-#define MIIM_TA_10GB 2 // set to 2 for 10 GB operation
+#define MIIM_TA_10GB 2 // set to 2 for 10 GB operation
// A-XGMAC AMIIM Configuration Register
#define AXGMAC_AMIIM_CFG_NOPREAM 0x00000080 // Bypass preamble of mngmt frame
@@ -365,25 +365,25 @@ typedef struct _SXG_HW_REGS {
#define AXGMAC_AMIIM_INDC_BUSY 0x00000001 // Set until cmd operation complete
// Link Status and Control Register
-#define LS_PHY_CLR_RESET 0x80000000 // Clear reset signal to PHY
+#define LS_PHY_CLR_RESET 0x80000000 // Clear reset signal to PHY
#define LS_SERDES_POWER_DOWN 0x40000000 // Power down the Sahara Serdes
-#define LS_XGXS_ENABLE 0x20000000 // Enable the XAUI XGXS logic
-#define LS_XGXS_CTL 0x10000000 // Hold XAUI XGXS logic reset until Serdes is up
-#define LS_SERDES_DOWN 0x08000000 // When 0, XAUI Serdes is up and initialization is complete
-#define LS_TRACE_DOWN 0x04000000 // When 0, Trace Serdes is up and initialization is complete
-#define LS_PHY_CLK_25MHZ 0x02000000 // Set PHY clock to 25 MHz (else 156.125 MHz)
-#define LS_PHY_CLK_EN 0x01000000 // Enable clock to PHY
-#define LS_XAUI_LINK_UP 0x00000010 // XAUI link is up
-#define LS_XAUI_LINK_CHNG 0x00000008 // XAUI link status has changed
-#define LS_LINK_ALARM 0x00000004 // Link alarm pin
-#define LS_ATTN_CTRL_MASK 0x00000003 // Mask link attention control bits
-#define LS_ATTN_ALARM 0x00000000 // 00 => Attn on link alarm
+#define LS_XGXS_ENABLE 0x20000000 // Enable the XAUI XGXS logic
+#define LS_XGXS_CTL 0x10000000 // Hold XAUI XGXS logic reset until Serdes is up
+#define LS_SERDES_DOWN 0x08000000 // When 0, XAUI Serdes is up and initialization is complete
+#define LS_TRACE_DOWN 0x04000000 // When 0, Trace Serdes is up and initialization is complete
+#define LS_PHY_CLK_25MHZ 0x02000000 // Set PHY clock to 25 MHz (else 156.125 MHz)
+#define LS_PHY_CLK_EN 0x01000000 // Enable clock to PHY
+#define LS_XAUI_LINK_UP 0x00000010 // XAUI link is up
+#define LS_XAUI_LINK_CHNG 0x00000008 // XAUI link status has changed
+#define LS_LINK_ALARM 0x00000004 // Link alarm pin
+#define LS_ATTN_CTRL_MASK 0x00000003 // Mask link attention control bits
+#define LS_ATTN_ALARM 0x00000000 // 00 => Attn on link alarm
#define LS_ATTN_ALARM_OR_STAT_CHNG 0x00000001 // 01 => Attn on link alarm or status change
-#define LS_ATTN_STAT_CHNG 0x00000002 // 10 => Attn on link status change
-#define LS_ATTN_NONE 0x00000003 // 11 => no Attn
+#define LS_ATTN_STAT_CHNG 0x00000002 // 10 => Attn on link status change
+#define LS_ATTN_NONE 0x00000003 // 11 => no Attn
// Link Address High Registers
-#define LINK_ADDR_ENABLE 0x80000000 // Enable this link address
+#define LINK_ADDR_ENABLE 0x80000000 // Enable this link address
/***************************************************************************
@@ -396,7 +396,7 @@ typedef struct _SXG_HW_REGS {
#define XGXS_ADDRESS_STATUS1 0x0001 // XS Status 1
#define XGXS_ADDRESS_DEVID_LOW 0x0002 // XS Device ID (low)
#define XGXS_ADDRESS_DEVID_HIGH 0x0003 // XS Device ID (high)
-#define XGXS_ADDRESS_SPEED 0x0004 // XS Speed ability
+#define XGXS_ADDRESS_SPEED 0x0004 // XS Speed ability
#define XGXS_ADDRESS_DEV_LOW 0x0005 // XS Devices in package
#define XGXS_ADDRESS_DEV_HIGH 0x0006 // XS Devices in package
#define XGXS_ADDRESS_STATUS2 0x0008 // XS Status 2
@@ -410,27 +410,27 @@ typedef struct _SXG_HW_REGS {
#define XGXS_ADDRESS_RESET_HI2 0x8003 // Vendor-Specific Reset Hi 2
// XS Control 1 register bit definitions
-#define XGXS_CONTROL1_RESET 0x8000 // Reset - self clearing
+#define XGXS_CONTROL1_RESET 0x8000 // Reset - self clearing
#define XGXS_CONTROL1_LOOPBACK 0x4000 // Enable loopback
#define XGXS_CONTROL1_SPEED1 0x2000 // 0 = unspecified, 1 = 10Gb+
#define XGXS_CONTROL1_LOWPOWER 0x0400 // 1 = Low power mode
#define XGXS_CONTROL1_SPEED2 0x0040 // Same as SPEED1 (?)
-#define XGXS_CONTROL1_SPEED 0x003C // Everything reserved except zero (?)
+#define XGXS_CONTROL1_SPEED 0x003C // Everything reserved except zero (?)
// XS Status 1 register bit definitions
-#define XGXS_STATUS1_FAULT 0x0080 // Fault detected
-#define XGXS_STATUS1_LINK 0x0004 // 1 = Link up
+#define XGXS_STATUS1_FAULT 0x0080 // Fault detected
+#define XGXS_STATUS1_LINK 0x0004 // 1 = Link up
#define XGXS_STATUS1_LOWPOWER 0x0002 // 1 = Low power supported
// XS Speed register bit definitions
-#define XGXS_SPEED_10G 0x0001 // 1 = 10G capable
+#define XGXS_SPEED_10G 0x0001 // 1 = 10G capable
// XS Devices register bit definitions
-#define XGXS_DEVICES_DTE 0x0020 // DTE XS Present
-#define XGXS_DEVICES_PHY 0x0010 // PHY XS Present
-#define XGXS_DEVICES_PCS 0x0008 // PCS Present
-#define XGXS_DEVICES_WIS 0x0004 // WIS Present
-#define XGXS_DEVICES_PMD 0x0002 // PMD/PMA Present
+#define XGXS_DEVICES_DTE 0x0020 // DTE XS Present
+#define XGXS_DEVICES_PHY 0x0010 // PHY XS Present
+#define XGXS_DEVICES_PCS 0x0008 // PCS Present
+#define XGXS_DEVICES_WIS 0x0004 // WIS Present
+#define XGXS_DEVICES_PMD 0x0002 // PMD/PMA Present
#define XGXS_DEVICES_CLAUSE22 0x0001 // Clause 22 registers present
// XS Devices High register bit definitions
@@ -444,18 +444,18 @@ typedef struct _SXG_HW_REGS {
#define XGXS_STATUS2_RCV_FAULT 0x0400 // Receive fault
// XS Package ID High register bit definitions
-#define XGXS_PKGID_HIGH_ORG 0xFC00 // Organizationally Unique
-#define XGXS_PKGID_HIGH_MFG 0x03F0 // Manufacturer Model
-#define XGXS_PKGID_HIGH_REV 0x000F // Revision Number
+#define XGXS_PKGID_HIGH_ORG 0xFC00 // Organizationally Unique
+#define XGXS_PKGID_HIGH_MFG 0x03F0 // Manufacturer Model
+#define XGXS_PKGID_HIGH_REV 0x000F // Revision Number
// XS Lane Status register bit definitions
-#define XGXS_LANE_PHY 0x1000 // PHY/DTE lane alignment status
-#define XGXS_LANE_PATTERN 0x0800 // Pattern testing ability
-#define XGXS_LANE_LOOPBACK 0x0400 // PHY loopback ability
-#define XGXS_LANE_SYNC3 0x0008 // Lane 3 sync
-#define XGXS_LANE_SYNC2 0x0004 // Lane 2 sync
-#define XGXS_LANE_SYNC1 0x0002 // Lane 1 sync
-#define XGXS_LANE_SYNC0 0x0001 // Lane 0 sync
+#define XGXS_LANE_PHY 0x1000 // PHY/DTE lane alignment status
+#define XGXS_LANE_PATTERN 0x0800 // Pattern testing ability
+#define XGXS_LANE_LOOPBACK 0x0400 // PHY loopback ability
+#define XGXS_LANE_SYNC3 0x0008 // Lane 3 sync
+#define XGXS_LANE_SYNC2 0x0004 // Lane 2 sync
+#define XGXS_LANE_SYNC1 0x0002 // Lane 1 sync
+#define XGXS_LANE_SYNC0 0x0001 // Lane 0 sync
// XS Test Control register bit definitions
#define XGXS_TEST_PATTERN_ENABLE 0x0004 // Test pattern enabled
@@ -473,10 +473,10 @@ typedef struct _SXG_HW_REGS {
// LASI (Link Alarm Status Interrupt) Registers (located in MIIM_DEV_PHY_PMA device)
#define LASI_RX_ALARM_CONTROL 0x9000 // LASI RX_ALARM Control
#define LASI_TX_ALARM_CONTROL 0x9001 // LASI TX_ALARM Control
-#define LASI_CONTROL 0x9002 // LASI Control
+#define LASI_CONTROL 0x9002 // LASI Control
#define LASI_RX_ALARM_STATUS 0x9003 // LASI RX_ALARM Status
#define LASI_TX_ALARM_STATUS 0x9004 // LASI TX_ALARM Status
-#define LASI_STATUS 0x9005 // LASI Status
+#define LASI_STATUS 0x9005 // LASI Status
// LASI_CONTROL bit definitions
#define LASI_CTL_RX_ALARM_ENABLE 0x0004 // Enable RX_ALARM interrupts
@@ -489,34 +489,34 @@ typedef struct _SXG_HW_REGS {
#define LASI_STATUS_LS_ALARM 0x0001 // Link Status
// PHY registers - PMA/PMD (device 1)
-#define PHY_PMA_CONTROL1 0x0000 // PMA/PMD Control 1
-#define PHY_PMA_STATUS1 0x0001 // PMA/PMD Status 1
-#define PHY_PMA_RCV_DET 0x000A // PMA/PMD Receive Signal Detect
+#define PHY_PMA_CONTROL1 0x0000 // PMA/PMD Control 1
+#define PHY_PMA_STATUS1 0x0001 // PMA/PMD Status 1
+#define PHY_PMA_RCV_DET 0x000A // PMA/PMD Receive Signal Detect
// other PMA/PMD registers exist and can be defined as needed
// PHY registers - PCS (device 3)
-#define PHY_PCS_CONTROL1 0x0000 // PCS Control 1
-#define PHY_PCS_STATUS1 0x0001 // PCS Status 1
-#define PHY_PCS_10G_STATUS1 0x0020 // PCS 10GBASE-R Status 1
+#define PHY_PCS_CONTROL1 0x0000 // PCS Control 1
+#define PHY_PCS_STATUS1 0x0001 // PCS Status 1
+#define PHY_PCS_10G_STATUS1 0x0020 // PCS 10GBASE-R Status 1
// other PCS registers exist and can be defined as needed
// PHY registers - XS (device 4)
-#define PHY_XS_CONTROL1 0x0000 // XS Control 1
-#define PHY_XS_STATUS1 0x0001 // XS Status 1
-#define PHY_XS_LANE_STATUS 0x0018 // XS Lane Status
+#define PHY_XS_CONTROL1 0x0000 // XS Control 1
+#define PHY_XS_STATUS1 0x0001 // XS Status 1
+#define PHY_XS_LANE_STATUS 0x0018 // XS Lane Status
// other XS registers exist and can be defined as needed
// PHY_PMA_CONTROL1 register bit definitions
-#define PMA_CONTROL1_RESET 0x8000 // PMA/PMD reset
+#define PMA_CONTROL1_RESET 0x8000 // PMA/PMD reset
// PHY_PMA_RCV_DET register bit definitions
-#define PMA_RCV_DETECT 0x0001 // PMA/PMD receive signal detect
+#define PMA_RCV_DETECT 0x0001 // PMA/PMD receive signal detect
// PHY_PCS_10G_STATUS1 register bit definitions
-#define PCS_10B_BLOCK_LOCK 0x0001 // PCS 10GBASE-R locked to receive blocks
+#define PCS_10B_BLOCK_LOCK 0x0001 // PCS 10GBASE-R locked to receive blocks
// PHY_XS_LANE_STATUS register bit definitions
-#define XS_LANE_ALIGN 0x1000 // XS transmit lanes aligned
+#define XS_LANE_ALIGN 0x1000 // XS transmit lanes aligned
// PHY Microcode download data structure
typedef struct _PHY_UCODE {
@@ -558,8 +558,8 @@ typedef struct _XMT_DESC {
// command codes
#define XMT_DESC_CMD_RAW_SEND 0 // raw send descriptor
#define XMT_DESC_CMD_CSUM_INSERT 1 // checksum insert descriptor
-#define XMT_DESC_CMD_FORMAT 2 // format descriptor
-#define XMT_DESC_CMD_PRIME 3 // prime descriptor
+#define XMT_DESC_CMD_FORMAT 2 // format descriptor
+#define XMT_DESC_CMD_PRIME 3 // prime descriptor
#define XMT_DESC_CMD_CODE_SHFT 6 // comand code shift (shift to bits [31:30] in word 0)
// shifted command codes
#define XMT_RAW_SEND (XMT_DESC_CMD_RAW_SEND << XMT_DESC_CMD_CODE_SHFT)
@@ -569,22 +569,22 @@ typedef struct _XMT_DESC {
// XMT_DESC Control Byte (XmtCtl) definitions
// NOTE: These bits do not work on Sahara (Rev A)!
-#define XMT_CTL_PAUSE_FRAME 0x80 // current frame is a pause control frame (for statistics)
+#define XMT_CTL_PAUSE_FRAME 0x80 // current frame is a pause control frame (for statistics)
#define XMT_CTL_CONTROL_FRAME 0x40 // current frame is a control frame (for statistics)
#define XMT_CTL_PER_PKT_QUAL 0x20 // per packet qualifier
#define XMT_CTL_PAD_MODE_NONE 0x00 // do not pad frame
-#define XMT_CTL_PAD_MODE_64 0x08 // pad frame to 64 bytes
+#define XMT_CTL_PAD_MODE_64 0x08 // pad frame to 64 bytes
#define XMT_CTL_PAD_MODE_VLAN_68 0x10 // pad frame to 64 bytes, and VLAN frames to 68 bytes
-#define XMT_CTL_PAD_MODE_68 0x18 // pad frame to 68 bytes
-#define XMT_CTL_GEN_FCS 0x04 // generate FCS (CRC) for this frame
-#define XMT_CTL_DELAY_FCS_0 0x00 // do not delay FCS calcution
-#define XMT_CTL_DELAY_FCS_1 0x01 // delay FCS calculation by 1 (4-byte) word
-#define XMT_CTL_DELAY_FCS_2 0x02 // delay FCS calculation by 2 (4-byte) words
-#define XMT_CTL_DELAY_FCS_3 0x03 // delay FCS calculation by 3 (4-byte) words
+#define XMT_CTL_PAD_MODE_68 0x18 // pad frame to 68 bytes
+#define XMT_CTL_GEN_FCS 0x04 // generate FCS (CRC) for this frame
+#define XMT_CTL_DELAY_FCS_0 0x00 // do not delay FCS calcution
+#define XMT_CTL_DELAY_FCS_1 0x01 // delay FCS calculation by 1 (4-byte) word
+#define XMT_CTL_DELAY_FCS_2 0x02 // delay FCS calculation by 2 (4-byte) words
+#define XMT_CTL_DELAY_FCS_3 0x03 // delay FCS calculation by 3 (4-byte) words
// XMT_DESC XmtBufId definition
-#define XMT_BUF_ID_SHFT 8 // The Xmt buffer ID is formed by dividing
- // the buffer (DRAM) address by 256 (or << 8)
+#define XMT_BUF_ID_SHFT 8 // The Xmt buffer ID is formed by dividing
+ // the buffer (DRAM) address by 256 (or << 8)
/*****************************************************************************
* Receiver Sequencer Definitions
@@ -594,8 +594,8 @@ typedef struct _XMT_DESC {
#define RCV_EVTQ_RBFID_MASK 0x0000FFFF // bit mask for the Receive Buffer ID
// Receive Buffer ID definition
-#define RCV_BUF_ID_SHFT 5 // The Rcv buffer ID is formed by dividing
- // the buffer (DRAM) address by 32 (or << 5)
+#define RCV_BUF_ID_SHFT 5 // The Rcv buffer ID is formed by dividing
+ // the buffer (DRAM) address by 32 (or << 5)
// Format of the 18 byte Receive Buffer returned by the
// Receive Sequencer for received packets
@@ -623,48 +623,48 @@ typedef struct _RCV_BUF_HDR {
* Queue definitions
*****************************************************************************/
-// Ingress (read only) queue numbers
-#define PXY_BUF_Q 0 // Proxy Buffer Queue
-#define HST_EVT_Q 1 // Host Event Queue
-#define XMT_BUF_Q 2 // Transmit Buffer Queue
-#define SKT_EVL_Q 3 // RcvSqr Socket Event Low Priority Queue
-#define RCV_EVL_Q 4 // RcvSqr Rcv Event Low Priority Queue
-#define SKT_EVH_Q 5 // RcvSqr Socket Event High Priority Queue
-#define RCV_EVH_Q 6 // RcvSqr Rcv Event High Priority Queue
-#define DMA_RSP_Q 7 // Dma Response Queue - one per CPU context
-// Local (read/write) queue numbers
-#define LOCAL_A_Q 8 // Spare local Queue
-#define LOCAL_B_Q 9 // Spare local Queue
-#define LOCAL_C_Q 10 // Spare local Queue
-#define FSM_EVT_Q 11 // Finite-State-Machine Event Queue
-#define SBF_PAL_Q 12 // System Buffer Physical Address (low) Queue
-#define SBF_PAH_Q 13 // System Buffer Physical Address (high) Queue
-#define SBF_VAL_Q 14 // System Buffer Virtual Address (low) Queue
-#define SBF_VAH_Q 15 // System Buffer Virtual Address (high) Queue
-// Egress (write only) queue numbers
-#define H2G_CMD_Q 16 // Host to GlbRam DMA Command Queue
-#define H2D_CMD_Q 17 // Host to DRAM DMA Command Queue
-#define G2H_CMD_Q 18 // GlbRam to Host DMA Command Queue
-#define G2D_CMD_Q 19 // GlbRam to DRAM DMA Command Queue
-#define D2H_CMD_Q 20 // DRAM to Host DMA Command Queue
-#define D2G_CMD_Q 21 // DRAM to GlbRam DMA Command Queue
-#define D2D_CMD_Q 22 // DRAM to DRAM DMA Command Queue
-#define PXL_CMD_Q 23 // Low Priority Proxy Command Queue
-#define PXH_CMD_Q 24 // High Priority Proxy Command Queue
-#define RSQ_CMD_Q 25 // Receive Sequencer Command Queue
-#define RCV_BUF_Q 26 // Receive Buffer Queue
-
-// Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q)
-#define PXY_COPY_EN 0x00200000 // enable copy of xmt descriptor to xmt command queue
-#define PXY_SIZE_16 0x00000000 // copy 16 bytes
-#define PXY_SIZE_32 0x00100000 // copy 32 bytes
+/* Ingress (read only) queue numbers */
+#define PXY_BUF_Q 0 /* Proxy Buffer Queue */
+#define HST_EVT_Q 1 /* Host Event Queue */
+#define XMT_BUF_Q 2 /* Transmit Buffer Queue */
+#define SKT_EVL_Q 3 /* RcvSqr Socket Event Low Priority Queue */
+#define RCV_EVL_Q 4 /* RcvSqr Rcv Event Low Priority Queue */
+#define SKT_EVH_Q 5 /* RcvSqr Socket Event High Priority Queue */
+#define RCV_EVH_Q 6 /* RcvSqr Rcv Event High Priority Queue */
+#define DMA_RSP_Q 7 /* Dma Response Queue - one per CPU context */
+/* Local (read/write) queue numbers */
+#define LOCAL_A_Q 8 /* Spare local Queue */
+#define LOCAL_B_Q 9 /* Spare local Queue */
+#define LOCAL_C_Q 10 /* Spare local Queue */
+#define FSM_EVT_Q 11 /* Finite-State-Machine Event Queue */
+#define SBF_PAL_Q 12 /* System Buffer Physical Address (low) Queue */
+#define SBF_PAH_Q 13 /* System Buffer Physical Address (high) Queue */
+#define SBF_VAL_Q 14 /* System Buffer Virtual Address (low) Queue */
+#define SBF_VAH_Q 15 /* System Buffer Virtual Address (high) Queue */
+/* Egress (write only) queue numbers */
+#define H2G_CMD_Q 16 /* Host to GlbRam DMA Command Queue */
+#define H2D_CMD_Q 17 /* Host to DRAM DMA Command Queue */
+#define G2H_CMD_Q 18 /* GlbRam to Host DMA Command Queue */
+#define G2D_CMD_Q 19 /* GlbRam to DRAM DMA Command Queue */
+#define D2H_CMD_Q 20 /* DRAM to Host DMA Command Queue */
+#define D2G_CMD_Q 21 /* DRAM to GlbRam DMA Command Queue */
+#define D2D_CMD_Q 22 /* DRAM to DRAM DMA Command Queue */
+#define PXL_CMD_Q 23 /* Low Priority Proxy Command Queue */
+#define PXH_CMD_Q 24 /* High Priority Proxy Command Queue */
+#define RSQ_CMD_Q 25 /* Receive Sequencer Command Queue */
+#define RCV_BUF_Q 26 /* Receive Buffer Queue */
+
+/* Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q) */
+#define PXY_COPY_EN 0x00200000 /* enable copy of xmt descriptor to xmt command queue */
+#define PXY_SIZE_16 0x00000000 /* copy 16 bytes */
+#define PXY_SIZE_32 0x00100000 /* copy 32 bytes */
/*****************************************************************************
* SXG EEPROM/Flash Configuration Definitions
*****************************************************************************/
#pragma pack(push, 1)
-//
+/* */
typedef struct _HW_CFG_DATA {
ushort Addr;
union {
@@ -673,22 +673,22 @@ typedef struct _HW_CFG_DATA {
};
} HW_CFG_DATA, *PHW_CFG_DATA;
-//
+/* */
#define NUM_HW_CFG_ENTRIES ((128/sizeof(HW_CFG_DATA)) - 4)
-// MAC address
+/* MAC address */
typedef struct _SXG_CONFIG_MAC {
- unsigned char MacAddr[6]; // MAC Address
+ unsigned char MacAddr[6]; /* MAC Address */
} SXG_CONFIG_MAC, *PSXG_CONFIG_MAC;
-//
+/* */
typedef struct _ATK_FRU {
unsigned char PartNum[6];
unsigned char Revision[2];
unsigned char Serial[14];
} ATK_FRU, *PATK_FRU;
-// OEM FRU Format types
+/* OEM FRU Format types */
#define ATK_FRU_FORMAT 0x0000
#define CPQ_FRU_FORMAT 0x0001
#define DELL_FRU_FORMAT 0x0002
@@ -697,24 +697,24 @@ typedef struct _ATK_FRU {
#define EMC_FRU_FORMAT 0x0005
#define NO_FRU_FORMAT 0xFFFF
-// EEPROM/Flash Format
+/* EEPROM/Flash Format */
typedef struct _SXG_CONFIG {
- //
- // Section 1 (128 bytes)
- //
- ushort MagicWord; // EEPROM/FLASH Magic code 'A5A5'
- ushort SpiClks; // SPI bus clock dividers
+ /* */
+ /* Section 1 (128 bytes) */
+ /* */
+ ushort MagicWord; /* EEPROM/FLASH Magic code 'A5A5' */
+ ushort SpiClks; /* SPI bus clock dividers */
HW_CFG_DATA HwCfg[NUM_HW_CFG_ENTRIES];
- //
- //
- //
- ushort Version; // EEPROM format version
- SXG_CONFIG_MAC MacAddr[4]; // space for 4 MAC addresses
- ATK_FRU AtkFru; // FRU information
- ushort OemFruFormat; // OEM FRU format type
- unsigned char OemFru[76]; // OEM FRU information (optional)
- ushort Checksum; // Checksum of section 2
- // CS info XXXTODO
+ /* */
+ /* */
+ /* */
+ ushort Version; /* EEPROM format version */
+ SXG_CONFIG_MAC MacAddr[4]; /* space for 4 MAC addresses */
+ ATK_FRU AtkFru; /* FRU information */
+ ushort OemFruFormat; /* OEM FRU format type */
+ unsigned char OemFru[76]; /* OEM FRU information (optional) */
+ ushort Checksum; /* Checksum of section 2 */
+ /* CS info XXXTODO */
} SXG_CONFIG, *PSXG_CONFIG;
#pragma pack(pop)
@@ -723,12 +723,12 @@ typedef struct _SXG_CONFIG {
*****************************************************************************/
// Sahara (ASIC level) defines
-#define SAHARA_GRAM_SIZE 0x020000 // GRAM size - 128 KB
-#define SAHARA_DRAM_SIZE 0x200000 // DRAM size - 2 MB
-#define SAHARA_QRAM_SIZE 0x004000 // QRAM size - 16K entries (64 KB)
-#define SAHARA_WCS_SIZE 0x002000 // WCS - 8K instructions (x 108 bits)
+#define SAHARA_GRAM_SIZE 0x020000 // GRAM size - 128 KB
+#define SAHARA_DRAM_SIZE 0x200000 // DRAM size - 2 MB
+#define SAHARA_QRAM_SIZE 0x004000 // QRAM size - 16K entries (64 KB)
+#define SAHARA_WCS_SIZE 0x002000 // WCS - 8K instructions (x 108 bits)
// Arabia (board level) defines
-#define FLASH_SIZE 0x080000 // 512 KB (4 Mb)
-#define EEPROM_SIZE_XFMR 512 // true EEPROM size (bytes), including xfmr area
-#define EEPROM_SIZE_NO_XFMR 256 // EEPROM size excluding xfmr area
+#define FLASH_SIZE 0x080000 // 512 KB (4 Mb)
+#define EEPROM_SIZE_XFMR 512 // true EEPROM size (bytes), including xfmr area
+#define EEPROM_SIZE_NO_XFMR 256 // EEPROM size excluding xfmr area
diff --git a/drivers/staging/sxg/sxgphycode.h b/drivers/staging/sxg/sxgphycode.h
index 26b36c81eb1..8dbaeda7eca 100644
--- a/drivers/staging/sxg/sxgphycode.h
+++ b/drivers/staging/sxg/sxgphycode.h
@@ -34,7 +34,7 @@ static PHY_UCODE PhyUcode[] = {
*/
/* Addr, Data */
{0xc017, 0xfeb0}, /* flip RX_LOS polarity (mandatory */
- /* patch for SFP+ applications) */
+ /* patch for SFP+ applications) */
{0xC001, 0x0428}, /* flip RX serial polarity */
{0xc013, 0xf341}, /* invert lxmit clock (mandatory patch) */
@@ -43,7 +43,7 @@ static PHY_UCODE PhyUcode[] = {
{0xc210, 0x8000}, /* reset datapath (mandatory patch) */
{0xc210, 0x0000}, /* reset datapath (mandatory patch) */
{0x0000, 0x0032}, /* wait for 50ms for datapath reset to */
- /* complete. (mandatory patch) */
+ /* complete. (mandatory patch) */
/* Configure the LED's */
{0xc214, 0x0099}, /* configure the LED drivers */
@@ -52,15 +52,15 @@ static PHY_UCODE PhyUcode[] = {
/* Transceiver-specific MDIO Patches: */
{0xc010, 0x448a}, /* (bit 14) mask out high BER input from the */
- /* LOS signal in 1.000A */
- /* (mandatory patch for SR code)*/
+ /* LOS signal in 1.000A */
+ /* (mandatory patch for SR code) */
{0xc003, 0x0181}, /* (bit 7) enable the CDR inc setting in */
- /* 1.C005 (mandatory patch for SR code) */
+ /* 1.C005 (mandatory patch for SR code) */
/* Transceiver-specific Microcontroller Initialization: */
{0xc04a, 0x5200}, /* activate microcontroller and pause */
{0x0000, 0x0032}, /* wait 50ms for microcontroller before */
- /* writing in code. */
+ /* writing in code. */
/* code block starts here: */
{0xcc00, 0x2009},
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index e64918f42ff..72e209276ea 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -221,7 +221,7 @@ static void usbip_dump_request_type(__u8 rt)
static void usbip_dump_usb_ctrlrequest(struct usb_ctrlrequest *cmd)
{
if (!cmd) {
- printk(" %s : null pointer\n", __FUNCTION__);
+ printk(" %s : null pointer\n", __func__);
return;
}
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 933ccaf50af..58e3995d0e2 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -202,7 +202,7 @@ static void vhci_rx_pdu(struct usbip_device *ud)
ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0);
if (ret != sizeof(pdu)) {
uerr("receiving pdu failed! size is %d, should be %d\n",
- ret, sizeof(pdu));
+ ret, (unsigned int)sizeof(pdu));
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
return;
}
diff --git a/drivers/staging/winbond/Kconfig b/drivers/staging/winbond/Kconfig
index 10d72bec88a..425219ed7ab 100644
--- a/drivers/staging/winbond/Kconfig
+++ b/drivers/staging/winbond/Kconfig
@@ -1,6 +1,6 @@
config W35UND
tristate "Winbond driver"
- depends on MAC80211 && WLAN_80211 && EXPERIMENTAL && !4KSTACKS
+ depends on MAC80211 && WLAN_80211 && USB && EXPERIMENTAL && !4KSTACKS
default n
---help---
This is highly experimental driver for winbond wifi card on some Kohjinsha notebooks
diff --git a/drivers/staging/winbond/README b/drivers/staging/winbond/README
index 707b6b354dc..cb944e4bf17 100644
--- a/drivers/staging/winbond/README
+++ b/drivers/staging/winbond/README
@@ -5,6 +5,7 @@ TODO:
- remove typedefs
- remove unused ioctls
- use cfg80211 for regulatory stuff
+ - fix 4k stack problems
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
Pavel Machek <pavel@suse.cz>
diff --git a/drivers/staging/winbond/bss_f.h b/drivers/staging/winbond/bss_f.h
index c957bc94f08..01318315399 100644
--- a/drivers/staging/winbond/bss_f.h
+++ b/drivers/staging/winbond/bss_f.h
@@ -24,7 +24,7 @@ void DesiredRate2InfoElement(PWB32_ADAPTER Adapter, u8 *addr, u16 *iFildOffset,
u8 *pBasicRateSet, u8 BasicRateCount,
u8 *pOperationRateSet, u8 OperationRateCount);
void BSSAddIBSSdata(PWB32_ADAPTER Adapter, PWB_BSSDESCRIPTION psDesData);
-unsigned char boCmpMacAddr( PUCHAR, PUCHAR );
+unsigned char boCmpMacAddr( u8 *, u8 *);
unsigned char boCmpSSID(struct SSID_Element *psSSID1, struct SSID_Element *psSSID2);
u16 wBSSfindSSID(PWB32_ADAPTER Adapter, struct SSID_Element *psSsid);
u16 wRoamingQuery(PWB32_ADAPTER Adapter);
@@ -42,11 +42,11 @@ void RateReSortForSRate(PWB32_ADAPTER Adapter, u8 *RateArray, u8 num);
void Assemble_IE(PWB32_ADAPTER Adapter, u16 wBssIdx);
void SetMaxTxRate(PWB32_ADAPTER Adapter);
-void CreateWpaIE(PWB32_ADAPTER Adapter, u16* iFildOffset, PUCHAR msg, struct Management_Frame* msgHeader,
+void CreateWpaIE(PWB32_ADAPTER Adapter, u16* iFildOffset, u8 *msg, struct Management_Frame* msgHeader,
struct Association_Request_Frame_Body* msgBody, u16 iMSindex); //added by WS 05/14/05
#ifdef _WPA2_
-void CreateRsnIE(PWB32_ADAPTER Adapter, u16* iFildOffset, PUCHAR msg, struct Management_Frame* msgHeader,
+void CreateRsnIE(PWB32_ADAPTER Adapter, u16* iFildOffset, u8 *msg, struct Management_Frame* msgHeader,
struct Association_Request_Frame_Body* msgBody, u16 iMSindex);//added by WS 05/14/05
u16 SearchPmkid(PWB32_ADAPTER Adapter, struct Management_Frame* msgHeader,
diff --git a/drivers/staging/winbond/ds_tkip.h b/drivers/staging/winbond/ds_tkip.h
index 29e5055b45a..6841d66e7e8 100644
--- a/drivers/staging/winbond/ds_tkip.h
+++ b/drivers/staging/winbond/ds_tkip.h
@@ -25,9 +25,9 @@ typedef struct tkip
s32 bytes_in_M; // # bytes in M
} tkip_t;
-//void _append_data( PUCHAR pData, u16 size, tkip_t *p );
-void Mds_MicGet( void* Adapter, void* pRxLayer1, PUCHAR pKey, PUCHAR pMic );
-void Mds_MicFill( void* Adapter, void* pDes, PUCHAR XmitBufAddress );
+//void _append_data( u8 *pData, u16 size, tkip_t *p );
+void Mds_MicGet( void* Adapter, void* pRxLayer1, u8 *pKey, u8 *pMic );
+void Mds_MicFill( void* Adapter, void* pDes, u8 *XmitBufAddress );
diff --git a/drivers/staging/winbond/linux/common.h b/drivers/staging/winbond/linux/common.h
index 6b00bad74f7..712a86cfa68 100644
--- a/drivers/staging/winbond/linux/common.h
+++ b/drivers/staging/winbond/linux/common.h
@@ -39,14 +39,6 @@
// Common type definition
//===============================================================
-typedef u8* PUCHAR;
-typedef s8* PCHAR;
-typedef u8* PBOOLEAN;
-typedef u16* PUSHORT;
-typedef u32* PULONG;
-typedef s16* PSHORT;
-
-
//===========================================
#define IGNORE 2
#define SUCCESS 1
@@ -110,16 +102,9 @@ typedef struct urb * PURB;
#define OS_ATOMIC_READ( _A, _V ) _V
#define OS_ATOMIC_INC( _A, _V ) EncapAtomicInc( _A, (void*)_V )
#define OS_ATOMIC_DEC( _A, _V ) EncapAtomicDec( _A, (void*)_V )
-#define OS_MEMORY_CLEAR( _A, _S ) memset( (PUCHAR)_A,0,_S)
+#define OS_MEMORY_CLEAR( _A, _S ) memset( (u8 *)_A,0,_S)
#define OS_MEMORY_COMPARE( _A, _B, _S ) (memcmp(_A,_B,_S)? 0 : 1) // Definition is reverse with Ndis 1: the same 0: different
-
-#define OS_SPIN_LOCK spinlock_t
-#define OS_SPIN_LOCK_ALLOCATE( _S ) spin_lock_init( _S );
-#define OS_SPIN_LOCK_FREE( _S )
-#define OS_SPIN_LOCK_ACQUIRED( _S ) spin_lock_irq( _S )
-#define OS_SPIN_LOCK_RELEASED( _S ) spin_unlock_irq( _S );
-
#define OS_TIMER struct timer_list
#define OS_TIMER_INITIAL( _T, _F, _P ) \
{ \
diff --git a/drivers/staging/winbond/linux/wb35reg.c b/drivers/staging/winbond/linux/wb35reg.c
index 2c0b454e8ca..ebb6db5438a 100644
--- a/drivers/staging/winbond/linux/wb35reg.c
+++ b/drivers/staging/winbond/linux/wb35reg.c
@@ -10,7 +10,7 @@ extern void phy_calibration_winbond(hw_data_t *phw_data, u32 frequency);
// Flag : AUTO_INCREMENT - RegisterNo will auto increment 4
// NO_INCREMENT - Function will write data into the same register
unsigned char
-Wb35Reg_BurstWrite(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterData, u8 NumberOfData, u8 Flag)
+Wb35Reg_BurstWrite(phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterData, u8 NumberOfData, u8 Flag)
{
PWB35REG pWb35Reg = &pHwData->Wb35Reg;
PURB pUrb = NULL;
@@ -30,13 +30,13 @@ Wb35Reg_BurstWrite(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterData, u8
if( pUrb && pRegQueue ) {
pRegQueue->DIRECT = 2;// burst write register
pRegQueue->INDEX = RegisterNo;
- pRegQueue->pBuffer = (PULONG)((PUCHAR)pRegQueue + sizeof(REG_QUEUE));
+ pRegQueue->pBuffer = (u32 *)((u8 *)pRegQueue + sizeof(REG_QUEUE));
memcpy( pRegQueue->pBuffer, pRegisterData, DataSize );
//the function for reversing register data from little endian to big endian
for( i=0; i<NumberOfData ; i++ )
pRegQueue->pBuffer[i] = cpu_to_le32( pRegQueue->pBuffer[i] );
- dr = (struct usb_ctrlrequest *)((PUCHAR)pRegQueue + sizeof(REG_QUEUE) + DataSize);
+ dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE) + DataSize);
dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
dr->bRequest = 0x04; // USB or vendor-defined request code, burst mode
dr->wValue = cpu_to_le16( Flag ); // 0: Register number auto-increment, 1: No auto increment
@@ -46,14 +46,14 @@ Wb35Reg_BurstWrite(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterData, u8
pRegQueue->pUsbReq = dr;
pRegQueue->pUrb = pUrb;
- OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock );
+ spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
if (pWb35Reg->pRegFirst == NULL)
pWb35Reg->pRegFirst = pRegQueue;
else
pWb35Reg->pRegLast->Next = pRegQueue;
pWb35Reg->pRegLast = pRegQueue;
- OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock );
+ spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
// Start EP0VM
Wb35Reg_EP0VM_start(pHwData);
@@ -181,7 +181,7 @@ Wb35Reg_Write( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue )
pRegQueue->INDEX = RegisterNo;
pRegQueue->VALUE = cpu_to_le32(RegisterValue);
pRegQueue->RESERVED_VALID = FALSE;
- dr = (struct usb_ctrlrequest *)((PUCHAR)pRegQueue + sizeof(REG_QUEUE));
+ dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE));
dr->bRequestType = USB_TYPE_VENDOR|USB_DIR_OUT |USB_RECIP_DEVICE;
dr->bRequest = 0x03; // USB or vendor-defined request code, burst mode
dr->wValue = cpu_to_le16(0x0);
@@ -193,14 +193,14 @@ Wb35Reg_Write( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue )
pRegQueue->pUsbReq = dr;
pRegQueue->pUrb = pUrb;
- OS_SPIN_LOCK_ACQUIRED(&pWb35Reg->EP0VM_spin_lock );
+ spin_lock_irq(&pWb35Reg->EP0VM_spin_lock );
if (pWb35Reg->pRegFirst == NULL)
pWb35Reg->pRegFirst = pRegQueue;
else
pWb35Reg->pRegLast->Next = pRegQueue;
pWb35Reg->pRegLast = pRegQueue;
- OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock );
+ spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
// Start EP0VM
Wb35Reg_EP0VM_start(pHwData);
@@ -220,7 +220,7 @@ Wb35Reg_Write( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue )
// FALSE : register not support
unsigned char
Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue,
- PCHAR pValue, s8 Len)
+ s8 *pValue, s8 Len)
{
PWB35REG pWb35Reg = &pHwData->Wb35Reg;
struct usb_ctrlrequest *dr;
@@ -243,7 +243,7 @@ Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, u16 RegisterNo, u32 Register
//NOTE : Users must guarantee the size of value will not exceed the buffer size.
memcpy(pRegQueue->RESERVED, pValue, Len);
pRegQueue->RESERVED_VALID = TRUE;
- dr = (struct usb_ctrlrequest *)((PUCHAR)pRegQueue + sizeof(REG_QUEUE));
+ dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE));
dr->bRequestType = USB_TYPE_VENDOR|USB_DIR_OUT |USB_RECIP_DEVICE;
dr->bRequest = 0x03; // USB or vendor-defined request code, burst mode
dr->wValue = cpu_to_le16(0x0);
@@ -254,14 +254,14 @@ Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, u16 RegisterNo, u32 Register
pRegQueue->Next = NULL;
pRegQueue->pUsbReq = dr;
pRegQueue->pUrb = pUrb;
- OS_SPIN_LOCK_ACQUIRED (&pWb35Reg->EP0VM_spin_lock );
+ spin_lock_irq (&pWb35Reg->EP0VM_spin_lock );
if( pWb35Reg->pRegFirst == NULL )
pWb35Reg->pRegFirst = pRegQueue;
else
pWb35Reg->pRegLast->Next = pRegQueue;
pWb35Reg->pRegLast = pRegQueue;
- OS_SPIN_LOCK_RELEASED ( &pWb35Reg->EP0VM_spin_lock );
+ spin_unlock_irq ( &pWb35Reg->EP0VM_spin_lock );
// Start EP0VM
Wb35Reg_EP0VM_start(pHwData);
@@ -278,10 +278,10 @@ Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, u16 RegisterNo, u32 Register
// FALSE : register not support
// pRegisterValue : It must be a resident buffer due to asynchronous read register.
unsigned char
-Wb35Reg_ReadSync( phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue )
+Wb35Reg_ReadSync( phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterValue )
{
PWB35REG pWb35Reg = &pHwData->Wb35Reg;
- PULONG pltmp = pRegisterValue;
+ u32 * pltmp = pRegisterValue;
int ret = -1;
// Module shutdown
@@ -327,7 +327,7 @@ Wb35Reg_ReadSync( phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue
// FALSE : register not support
// pRegisterValue : It must be a resident buffer due to asynchronous read register.
unsigned char
-Wb35Reg_Read(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue )
+Wb35Reg_Read(phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterValue )
{
PWB35REG pWb35Reg = &pHwData->Wb35Reg;
struct usb_ctrlrequest * dr;
@@ -348,7 +348,7 @@ Wb35Reg_Read(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue )
pRegQueue->DIRECT = 0;// read register
pRegQueue->INDEX = RegisterNo;
pRegQueue->pBuffer = pRegisterValue;
- dr = (struct usb_ctrlrequest *)((PUCHAR)pRegQueue + sizeof(REG_QUEUE));
+ dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE));
dr->bRequestType = USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN;
dr->bRequest = 0x01; // USB or vendor-defined request code, burst mode
dr->wValue = cpu_to_le16(0x0);
@@ -359,14 +359,14 @@ Wb35Reg_Read(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue )
pRegQueue->Next = NULL;
pRegQueue->pUsbReq = dr;
pRegQueue->pUrb = pUrb;
- OS_SPIN_LOCK_ACQUIRED ( &pWb35Reg->EP0VM_spin_lock );
+ spin_lock_irq ( &pWb35Reg->EP0VM_spin_lock );
if( pWb35Reg->pRegFirst == NULL )
pWb35Reg->pRegFirst = pRegQueue;
else
pWb35Reg->pRegLast->Next = pRegQueue;
pWb35Reg->pRegLast = pRegQueue;
- OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock );
+ spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
// Start EP0VM
Wb35Reg_EP0VM_start( pHwData );
@@ -399,7 +399,7 @@ Wb35Reg_EP0VM(phw_data_t pHwData )
PWB35REG pWb35Reg = &pHwData->Wb35Reg;
PURB pUrb;
struct usb_ctrlrequest *dr;
- PULONG pBuffer;
+ u32 * pBuffer;
int ret = -1;
PREG_QUEUE pRegQueue;
@@ -411,9 +411,9 @@ Wb35Reg_EP0VM(phw_data_t pHwData )
goto cleanup;
// Get the register data and send to USB through Irp
- OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock );
+ spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
pRegQueue = pWb35Reg->pRegFirst;
- OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock );
+ spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
if (!pRegQueue)
goto cleanup;
@@ -429,7 +429,7 @@ Wb35Reg_EP0VM(phw_data_t pHwData )
usb_fill_control_urb( pUrb, pHwData->WbUsb.udev,
REG_DIRECTION(pHwData->WbUsb.udev,pRegQueue),
- (PUCHAR)dr,pBuffer,cpu_to_le16(dr->wLength),
+ (u8 *)dr,pBuffer,cpu_to_le16(dr->wLength),
Wb35Reg_EP0VM_complete, (void*)pHwData);
pWb35Reg->EP0vm_state = VM_RUNNING;
@@ -468,12 +468,12 @@ Wb35Reg_EP0VM_complete(PURB pUrb)
OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Reg->RegFireCount );
} else {
// Complete to send, remove the URB from the first
- OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock );
+ spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
pRegQueue = pWb35Reg->pRegFirst;
if (pRegQueue == pWb35Reg->pRegLast)
pWb35Reg->pRegLast = NULL;
pWb35Reg->pRegFirst = pWb35Reg->pRegFirst->Next;
- OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock );
+ spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
if (pWb35Reg->EP0VM_status) {
#ifdef _PE_REG_DUMP_
@@ -513,7 +513,7 @@ Wb35Reg_destroy(phw_data_t pHwData)
OS_SLEEP(10000); // Delay for waiting function enter 940623.1.b
// Release all the data in RegQueue
- OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock );
+ spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
pRegQueue = pWb35Reg->pRegFirst;
while (pRegQueue) {
if (pRegQueue == pWb35Reg->pRegLast)
@@ -521,7 +521,7 @@ Wb35Reg_destroy(phw_data_t pHwData)
pWb35Reg->pRegFirst = pWb35Reg->pRegFirst->Next;
pUrb = pRegQueue->pUrb;
- OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock );
+ spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
if (pUrb) {
usb_free_urb(pUrb);
kfree(pRegQueue);
@@ -530,14 +530,11 @@ Wb35Reg_destroy(phw_data_t pHwData)
WBDEBUG(("EP0 queue release error\n"));
#endif
}
- OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock );
+ spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
pRegQueue = pWb35Reg->pRegFirst;
}
- OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock );
-
- // Free resource
- OS_SPIN_LOCK_FREE( &pWb35Reg->EP0VM_spin_lock );
+ spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
}
//====================================================================================
@@ -550,7 +547,7 @@ unsigned char Wb35Reg_initial(phw_data_t pHwData)
u32 SoftwareSet, VCO_trim, TxVga, Region_ScanInterval;
// Spin lock is acquired for read and write IRP command
- OS_SPIN_LOCK_ALLOCATE( &pWb35Reg->EP0VM_spin_lock );
+ spin_lock_init( &pWb35Reg->EP0VM_spin_lock );
// Getting RF module type from EEPROM ------------------------------------
Wb35Reg_WriteSync( pHwData, 0x03b4, 0x080d0000 ); // Start EEPROM access + Read + address(0x0d)
@@ -655,7 +652,7 @@ unsigned char Wb35Reg_initial(phw_data_t pHwData)
// version in _GENREQ.ASM of the DWB NE1000/2000 driver.
//==================================================================================
u32
-CardComputeCrc(PUCHAR Buffer, u32 Length)
+CardComputeCrc(u8 * Buffer, u32 Length)
{
u32 Crc, Carry;
u32 i, j;
diff --git a/drivers/staging/winbond/linux/wb35reg_f.h b/drivers/staging/winbond/linux/wb35reg_f.h
index 38e2906b51a..3006cfe99cc 100644
--- a/drivers/staging/winbond/linux/wb35reg_f.h
+++ b/drivers/staging/winbond/linux/wb35reg_f.h
@@ -29,16 +29,16 @@ void EEPROMTxVgaAdjust( phw_data_t pHwData ); // 20060619.5 Add
void Wb35Reg_destroy( phw_data_t pHwData );
-unsigned char Wb35Reg_Read( phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue );
-unsigned char Wb35Reg_ReadSync( phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue );
+unsigned char Wb35Reg_Read( phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterValue );
+unsigned char Wb35Reg_ReadSync( phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterValue );
unsigned char Wb35Reg_Write( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue );
unsigned char Wb35Reg_WriteSync( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue );
unsigned char Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData,
u16 RegisterNo,
u32 RegisterValue,
- PCHAR pValue,
- s8 Len);
-unsigned char Wb35Reg_BurstWrite( phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterData, u8 NumberOfData, u8 Flag );
+ s8 *pValue,
+ s8 Len);
+unsigned char Wb35Reg_BurstWrite( phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterData, u8 NumberOfData, u8 Flag );
void Wb35Reg_EP0VM( phw_data_t pHwData );
void Wb35Reg_EP0VM_start( phw_data_t pHwData );
@@ -47,7 +47,7 @@ void Wb35Reg_EP0VM_complete( PURB pUrb );
u32 BitReverse( u32 dwData, u32 DataLength);
void CardGetMulticastBit( u8 Address[MAC_ADDR_LENGTH], u8 *Byte, u8 *Value );
-u32 CardComputeCrc( PUCHAR Buffer, u32 Length );
+u32 CardComputeCrc( u8 * Buffer, u32 Length );
void Wb35Reg_phy_calibration( phw_data_t pHwData );
void Wb35Reg_Update( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue );
diff --git a/drivers/staging/winbond/linux/wb35reg_s.h b/drivers/staging/winbond/linux/wb35reg_s.h
index a7595b1e733..8b35b93f7f0 100644
--- a/drivers/staging/winbond/linux/wb35reg_s.h
+++ b/drivers/staging/winbond/linux/wb35reg_s.h
@@ -75,7 +75,7 @@ typedef struct _REG_QUEUE
union
{
u32 VALUE;
- PULONG pBuffer;
+ u32 * pBuffer;
};
u8 RESERVED[4];// space reserved for communication
@@ -143,7 +143,7 @@ typedef struct _WB35REG
//-------------------
// VM
//-------------------
- OS_SPIN_LOCK EP0VM_spin_lock; // 4B
+ spinlock_t EP0VM_spin_lock; // 4B
u32 EP0VM_status;//$$
PREG_QUEUE pRegFirst;
PREG_QUEUE pRegLast;
diff --git a/drivers/staging/winbond/linux/wb35rx.c b/drivers/staging/winbond/linux/wb35rx.c
index 26157eb3d5a..b4b9f5f371d 100644
--- a/drivers/staging/winbond/linux/wb35rx.c
+++ b/drivers/staging/winbond/linux/wb35rx.c
@@ -27,7 +27,7 @@ void Wb35Rx_start(phw_data_t pHwData)
void Wb35Rx( phw_data_t pHwData )
{
PWB35RX pWb35Rx = &pHwData->Wb35Rx;
- PUCHAR pRxBufferAddress;
+ u8 * pRxBufferAddress;
PURB pUrb = (PURB)pWb35Rx->RxUrb;
int retv;
u32 RxBufferId;
@@ -35,51 +35,50 @@ void Wb35Rx( phw_data_t pHwData )
//
// Issuing URB
//
- do {
- if (pHwData->SurpriseRemove || pHwData->HwStop)
- break;
+ if (pHwData->SurpriseRemove || pHwData->HwStop)
+ goto error;
- if (pWb35Rx->rx_halt)
- break;
+ if (pWb35Rx->rx_halt)
+ goto error;
- // Get RxBuffer's ID
- RxBufferId = pWb35Rx->RxBufferId;
- if (!pWb35Rx->RxOwner[RxBufferId]) {
- // It's impossible to run here.
- #ifdef _PE_RX_DUMP_
- WBDEBUG(("Rx driver fifo unavailable\n"));
- #endif
- break;
- }
+ // Get RxBuffer's ID
+ RxBufferId = pWb35Rx->RxBufferId;
+ if (!pWb35Rx->RxOwner[RxBufferId]) {
+ // It's impossible to run here.
+ #ifdef _PE_RX_DUMP_
+ WBDEBUG(("Rx driver fifo unavailable\n"));
+ #endif
+ goto error;
+ }
- // Update buffer point, then start to bulkin the data from USB
- pWb35Rx->RxBufferId++;
- pWb35Rx->RxBufferId %= MAX_USB_RX_BUFFER_NUMBER;
+ // Update buffer point, then start to bulkin the data from USB
+ pWb35Rx->RxBufferId++;
+ pWb35Rx->RxBufferId %= MAX_USB_RX_BUFFER_NUMBER;
- pWb35Rx->CurrentRxBufferId = RxBufferId;
+ pWb35Rx->CurrentRxBufferId = RxBufferId;
- if (1 != OS_MEMORY_ALLOC((void* *)&pWb35Rx->pDRx, MAX_USB_RX_BUFFER)) {
- printk("w35und: Rx memory alloc failed\n");
- break;
- }
- pRxBufferAddress = pWb35Rx->pDRx;
+ if (1 != OS_MEMORY_ALLOC((void* *)&pWb35Rx->pDRx, MAX_USB_RX_BUFFER)) {
+ printk("w35und: Rx memory alloc failed\n");
+ goto error;
+ }
+ pRxBufferAddress = pWb35Rx->pDRx;
- usb_fill_bulk_urb(pUrb, pHwData->WbUsb.udev,
- usb_rcvbulkpipe(pHwData->WbUsb.udev, 3),
- pRxBufferAddress, MAX_USB_RX_BUFFER,
- Wb35Rx_Complete, pHwData);
+ usb_fill_bulk_urb(pUrb, pHwData->WbUsb.udev,
+ usb_rcvbulkpipe(pHwData->WbUsb.udev, 3),
+ pRxBufferAddress, MAX_USB_RX_BUFFER,
+ Wb35Rx_Complete, pHwData);
- pWb35Rx->EP3vm_state = VM_RUNNING;
+ pWb35Rx->EP3vm_state = VM_RUNNING;
- retv = wb_usb_submit_urb(pUrb);
+ retv = wb_usb_submit_urb(pUrb);
- if (retv != 0) {
- printk("Rx URB sending error\n");
- break;
- }
- return;
- } while(FALSE);
+ if (retv != 0) {
+ printk("Rx URB sending error\n");
+ goto error;
+ }
+ return;
+error:
// VM stop
pWb35Rx->EP3vm_state = VM_STOP;
OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Rx->RxFireCounter );
@@ -89,7 +88,7 @@ void Wb35Rx_Complete(PURB pUrb)
{
phw_data_t pHwData = pUrb->context;
PWB35RX pWb35Rx = &pHwData->Wb35Rx;
- PUCHAR pRxBufferAddress;
+ u8 * pRxBufferAddress;
u32 SizeCheck;
u16 BulkLength;
u32 RxBufferId;
@@ -99,65 +98,63 @@ void Wb35Rx_Complete(PURB pUrb)
pWb35Rx->EP3vm_state = VM_COMPLETED;
pWb35Rx->EP3VM_status = pUrb->status;//Store the last result of Irp
- do {
- RxBufferId = pWb35Rx->CurrentRxBufferId;
+ RxBufferId = pWb35Rx->CurrentRxBufferId;
- pRxBufferAddress = pWb35Rx->pDRx;
- BulkLength = (u16)pUrb->actual_length;
+ pRxBufferAddress = pWb35Rx->pDRx;
+ BulkLength = (u16)pUrb->actual_length;
- // The IRP is completed
- pWb35Rx->EP3vm_state = VM_COMPLETED;
+ // The IRP is completed
+ pWb35Rx->EP3vm_state = VM_COMPLETED;
- if (pHwData->SurpriseRemove || pHwData->HwStop) // Must be here, or RxBufferId is invalid
- break;
+ if (pHwData->SurpriseRemove || pHwData->HwStop) // Must be here, or RxBufferId is invalid
+ goto error;
- if (pWb35Rx->rx_halt)
- break;
+ if (pWb35Rx->rx_halt)
+ goto error;
- // Start to process the data only in successful condition
- pWb35Rx->RxOwner[ RxBufferId ] = 0; // Set the owner to driver
- R00.value = le32_to_cpu(*(PULONG)pRxBufferAddress);
+ // Start to process the data only in successful condition
+ pWb35Rx->RxOwner[ RxBufferId ] = 0; // Set the owner to driver
+ R00.value = le32_to_cpu(*(u32 *)pRxBufferAddress);
- // The URB is completed, check the result
- if (pWb35Rx->EP3VM_status != 0) {
- #ifdef _PE_USB_STATE_DUMP_
- WBDEBUG(("EP3 IoCompleteRoutine return error\n"));
- DebugUsbdStatusInformation( pWb35Rx->EP3VM_status );
- #endif
- pWb35Rx->EP3vm_state = VM_STOP;
- break;
- }
+ // The URB is completed, check the result
+ if (pWb35Rx->EP3VM_status != 0) {
+ #ifdef _PE_USB_STATE_DUMP_
+ WBDEBUG(("EP3 IoCompleteRoutine return error\n"));
+ DebugUsbdStatusInformation( pWb35Rx->EP3VM_status );
+ #endif
+ pWb35Rx->EP3vm_state = VM_STOP;
+ goto error;
+ }
- // 20060220 For recovering. check if operating in single USB mode
- if (!HAL_USB_MODE_BURST(pHwData)) {
- SizeCheck = R00.R00_receive_byte_count; //20060926 anson's endian
- if ((SizeCheck & 0x03) > 0)
- SizeCheck -= 4;
- SizeCheck = (SizeCheck + 3) & ~0x03;
- SizeCheck += 12; // 8 + 4 badbeef
- if ((BulkLength > 1600) ||
- (SizeCheck > 1600) ||
- (BulkLength != SizeCheck) ||
- (BulkLength == 0)) { // Add for fail Urb
- pWb35Rx->EP3vm_state = VM_STOP;
- pWb35Rx->Ep3ErrorCount2++;
- }
+ // 20060220 For recovering. check if operating in single USB mode
+ if (!HAL_USB_MODE_BURST(pHwData)) {
+ SizeCheck = R00.R00_receive_byte_count; //20060926 anson's endian
+ if ((SizeCheck & 0x03) > 0)
+ SizeCheck -= 4;
+ SizeCheck = (SizeCheck + 3) & ~0x03;
+ SizeCheck += 12; // 8 + 4 badbeef
+ if ((BulkLength > 1600) ||
+ (SizeCheck > 1600) ||
+ (BulkLength != SizeCheck) ||
+ (BulkLength == 0)) { // Add for fail Urb
+ pWb35Rx->EP3vm_state = VM_STOP;
+ pWb35Rx->Ep3ErrorCount2++;
}
+ }
- // Indicating the receiving data
- pWb35Rx->ByteReceived += BulkLength;
- pWb35Rx->RxBufferSize[ RxBufferId ] = BulkLength;
-
- if (!pWb35Rx->RxOwner[ RxBufferId ])
- Wb35Rx_indicate(pHwData);
+ // Indicating the receiving data
+ pWb35Rx->ByteReceived += BulkLength;
+ pWb35Rx->RxBufferSize[ RxBufferId ] = BulkLength;
- kfree(pWb35Rx->pDRx);
- // Do the next receive
- Wb35Rx(pHwData);
- return;
+ if (!pWb35Rx->RxOwner[ RxBufferId ])
+ Wb35Rx_indicate(pHwData);
- } while(FALSE);
+ kfree(pWb35Rx->pDRx);
+ // Do the next receive
+ Wb35Rx(pHwData);
+ return;
+error:
pWb35Rx->RxOwner[ RxBufferId ] = 1; // Set the owner to hardware
OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Rx->RxFireCounter );
pWb35Rx->EP3vm_state = VM_STOP;
@@ -223,7 +220,7 @@ void Wb35Rx_reset_descriptor( phw_data_t pHwData )
void Wb35Rx_adjust(PDESCRIPTOR pRxDes)
{
- PULONG pRxBufferAddress;
+ u32 * pRxBufferAddress;
u32 DecryptionMethod;
u32 i;
u16 BufferSize;
@@ -264,7 +261,7 @@ u16 Wb35Rx_indicate(phw_data_t pHwData)
{
DESCRIPTOR RxDes;
PWB35RX pWb35Rx = &pHwData->Wb35Rx;
- PUCHAR pRxBufferAddress;
+ u8 * pRxBufferAddress;
u16 PacketSize;
u16 stmp, BufferSize, stmp2 = 0;
u32 RxBufferId;
@@ -283,13 +280,13 @@ u16 Wb35Rx_indicate(phw_data_t pHwData)
// Parse the bulkin buffer
while (BufferSize >= 4) {
- if ((cpu_to_le32(*(PULONG)pRxBufferAddress) & 0x0fffffff) == RX_END_TAG) //Is ending? 921002.9.a
+ if ((cpu_to_le32(*(u32 *)pRxBufferAddress) & 0x0fffffff) == RX_END_TAG) //Is ending? 921002.9.a
break;
// Get the R00 R01 first
- RxDes.R00.value = le32_to_cpu(*(PULONG)pRxBufferAddress);
+ RxDes.R00.value = le32_to_cpu(*(u32 *)pRxBufferAddress);
PacketSize = (u16)RxDes.R00.R00_receive_byte_count;
- RxDes.R01.value = le32_to_cpu(*((PULONG)(pRxBufferAddress+4)));
+ RxDes.R01.value = le32_to_cpu(*((u32 *)(pRxBufferAddress+4)));
// For new DMA 4k
if ((PacketSize & 0x03) > 0)
PacketSize -= 4;
diff --git a/drivers/staging/winbond/linux/wb35rx_s.h b/drivers/staging/winbond/linux/wb35rx_s.h
index 53b831fdeb7..b90c269e6ad 100644
--- a/drivers/staging/winbond/linux/wb35rx_s.h
+++ b/drivers/staging/winbond/linux/wb35rx_s.h
@@ -41,7 +41,7 @@ typedef struct _WB35RX
u32 Ep3ErrorCount2; // 20060625.1 Usbd for Rx DMA error count
int EP3VM_status;
- PUCHAR pDRx;
+ u8 * pDRx;
} WB35RX, *PWB35RX;
diff --git a/drivers/staging/winbond/linux/wb35tx.c b/drivers/staging/winbond/linux/wb35tx.c
index cf19c3bc524..ba9d51244e2 100644
--- a/drivers/staging/winbond/linux/wb35tx.c
+++ b/drivers/staging/winbond/linux/wb35tx.c
@@ -12,7 +12,7 @@
unsigned char
-Wb35Tx_get_tx_buffer(phw_data_t pHwData, PUCHAR *pBuffer )
+Wb35Tx_get_tx_buffer(phw_data_t pHwData, u8 **pBuffer)
{
PWB35TX pWb35Tx = &pHwData->Wb35Tx;
@@ -37,7 +37,7 @@ void Wb35Tx(phw_data_t pHwData)
{
PWB35TX pWb35Tx = &pHwData->Wb35Tx;
PADAPTER Adapter = pHwData->Adapter;
- PUCHAR pTxBufferAddress;
+ u8 *pTxBufferAddress;
PMDS pMds = &Adapter->Mds;
struct urb * pUrb = (struct urb *)pWb35Tx->Tx4Urb;
int retv;
@@ -100,25 +100,24 @@ void Wb35Tx_complete(struct urb * pUrb)
pWb35Tx->TxSendIndex++;
pWb35Tx->TxSendIndex %= MAX_USB_TX_BUFFER_NUMBER;
- do {
- if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove
- break;
+ if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove
+ goto error;
- if (pWb35Tx->tx_halt)
- break;
+ if (pWb35Tx->tx_halt)
+ goto error;
- // The URB is completed, check the result
- if (pWb35Tx->EP4VM_status != 0) {
- printk("URB submission failed\n");
- pWb35Tx->EP4vm_state = VM_STOP;
- break; // Exit while(FALSE);
- }
+ // The URB is completed, check the result
+ if (pWb35Tx->EP4VM_status != 0) {
+ printk("URB submission failed\n");
+ pWb35Tx->EP4vm_state = VM_STOP;
+ goto error;
+ }
- Mds_Tx(Adapter);
- Wb35Tx(pHwData);
- return;
- } while(FALSE);
+ Mds_Tx(Adapter);
+ Wb35Tx(pHwData);
+ return;
+error:
OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxFireCounter );
pWb35Tx->EP4vm_state = VM_STOP;
}
@@ -225,36 +224,33 @@ void Wb35Tx_EP2VM(phw_data_t pHwData)
{
PWB35TX pWb35Tx = &pHwData->Wb35Tx;
struct urb * pUrb = (struct urb *)pWb35Tx->Tx2Urb;
- PULONG pltmp = (PULONG)pWb35Tx->EP2_buf;
+ u32 * pltmp = (u32 *)pWb35Tx->EP2_buf;
int retv;
- do {
- if (pHwData->SurpriseRemove || pHwData->HwStop)
- break;
-
- if (pWb35Tx->tx_halt)
- break;
-
- //
- // Issuing URB
- //
- usb_fill_int_urb( pUrb, pHwData->WbUsb.udev, usb_rcvintpipe(pHwData->WbUsb.udev,2),
- pltmp, MAX_INTERRUPT_LENGTH, Wb35Tx_EP2VM_complete, pHwData, 32);
+ if (pHwData->SurpriseRemove || pHwData->HwStop)
+ goto error;
- pWb35Tx->EP2vm_state = VM_RUNNING;
- retv = wb_usb_submit_urb( pUrb );
+ if (pWb35Tx->tx_halt)
+ goto error;
- if(retv < 0) {
- #ifdef _PE_TX_DUMP_
- WBDEBUG(("EP2 Tx Irp sending error\n"));
- #endif
- break;
- }
+ //
+ // Issuing URB
+ //
+ usb_fill_int_urb( pUrb, pHwData->WbUsb.udev, usb_rcvintpipe(pHwData->WbUsb.udev,2),
+ pltmp, MAX_INTERRUPT_LENGTH, Wb35Tx_EP2VM_complete, pHwData, 32);
- return;
+ pWb35Tx->EP2vm_state = VM_RUNNING;
+ retv = wb_usb_submit_urb( pUrb );
- } while(FALSE);
+ if (retv < 0) {
+ #ifdef _PE_TX_DUMP_
+ WBDEBUG(("EP2 Tx Irp sending error\n"));
+ #endif
+ goto error;
+ }
+ return;
+error:
pWb35Tx->EP2vm_state = VM_STOP;
OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxResultCount );
}
@@ -266,7 +262,7 @@ void Wb35Tx_EP2VM_complete(struct urb * pUrb)
T02_DESCRIPTOR T02, TSTATUS;
PADAPTER Adapter = (PADAPTER)pHwData->Adapter;
PWB35TX pWb35Tx = &pHwData->Wb35Tx;
- PULONG pltmp = (PULONG)pWb35Tx->EP2_buf;
+ u32 * pltmp = (u32 *)pWb35Tx->EP2_buf;
u32 i;
u16 InterruptInLength;
@@ -275,38 +271,36 @@ void Wb35Tx_EP2VM_complete(struct urb * pUrb)
pWb35Tx->EP2vm_state = VM_COMPLETED;
pWb35Tx->EP2VM_status = pUrb->status;
- do {
- // For Linux 2.4. Interrupt will always trigger
- if( pHwData->SurpriseRemove || pHwData->HwStop ) // Let WbWlanHalt to handle surprise remove
- break;
-
- if( pWb35Tx->tx_halt )
- break;
-
- //The Urb is completed, check the result
- if (pWb35Tx->EP2VM_status != 0) {
- WBDEBUG(("EP2 IoCompleteRoutine return error\n"));
- pWb35Tx->EP2vm_state= VM_STOP;
- break; // Exit while(FALSE);
- }
-
- // Update the Tx result
- InterruptInLength = pUrb->actual_length;
- // Modify for minimum memory access and DWORD alignment.
- T02.value = cpu_to_le32(pltmp[0]) >> 8; // [31:8] -> [24:0]
- InterruptInLength -= 1;// 20051221.1.c Modify the follow for more stable
- InterruptInLength >>= 2; // InterruptInLength/4
- for (i=1; i<=InterruptInLength; i++) {
- T02.value |= ((cpu_to_le32(pltmp[i]) & 0xff) << 24);
-
- TSTATUS.value = T02.value; //20061009 anson's endian
- Mds_SendComplete( Adapter, &TSTATUS );
- T02.value = cpu_to_le32(pltmp[i]) >> 8;
- }
-
- return;
- } while(FALSE);
+ // For Linux 2.4. Interrupt will always trigger
+ if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove
+ goto error;
+
+ if (pWb35Tx->tx_halt)
+ goto error;
+
+ //The Urb is completed, check the result
+ if (pWb35Tx->EP2VM_status != 0) {
+ WBDEBUG(("EP2 IoCompleteRoutine return error\n"));
+ pWb35Tx->EP2vm_state= VM_STOP;
+ goto error;
+ }
+ // Update the Tx result
+ InterruptInLength = pUrb->actual_length;
+ // Modify for minimum memory access and DWORD alignment.
+ T02.value = cpu_to_le32(pltmp[0]) >> 8; // [31:8] -> [24:0]
+ InterruptInLength -= 1;// 20051221.1.c Modify the follow for more stable
+ InterruptInLength >>= 2; // InterruptInLength/4
+ for (i = 1; i <= InterruptInLength; i++) {
+ T02.value |= ((cpu_to_le32(pltmp[i]) & 0xff) << 24);
+
+ TSTATUS.value = T02.value; //20061009 anson's endian
+ Mds_SendComplete( Adapter, &TSTATUS );
+ T02.value = cpu_to_le32(pltmp[i]) >> 8;
+ }
+
+ return;
+error:
OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxResultCount );
pWb35Tx->EP2vm_state = VM_STOP;
}
diff --git a/drivers/staging/winbond/linux/wb35tx_f.h b/drivers/staging/winbond/linux/wb35tx_f.h
index 7705a8454dc..107b1291813 100644
--- a/drivers/staging/winbond/linux/wb35tx_f.h
+++ b/drivers/staging/winbond/linux/wb35tx_f.h
@@ -3,7 +3,7 @@
//====================================
unsigned char Wb35Tx_initial( phw_data_t pHwData );
void Wb35Tx_destroy( phw_data_t pHwData );
-unsigned char Wb35Tx_get_tx_buffer( phw_data_t pHwData, PUCHAR *pBuffer );
+unsigned char Wb35Tx_get_tx_buffer( phw_data_t pHwData, u8 **pBuffer );
void Wb35Tx_EP2VM( phw_data_t pHwData );
void Wb35Tx_EP2VM_start( phw_data_t pHwData );
diff --git a/drivers/staging/winbond/linux/wbusb.c b/drivers/staging/winbond/linux/wbusb.c
index cbad5fb0595..f4a7875f238 100644
--- a/drivers/staging/winbond/linux/wbusb.c
+++ b/drivers/staging/winbond/linux/wbusb.c
@@ -6,42 +6,29 @@
#include "sysdef.h"
#include <net/mac80211.h>
-
-MODULE_AUTHOR( DRIVER_AUTHOR );
-MODULE_DESCRIPTION( DRIVER_DESC );
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
-
-//============================================================
-// vendor ID and product ID can into here for others
-//============================================================
-static struct usb_device_id Id_Table[] =
-{
- {USB_DEVICE( 0x0416, 0x0035 )},
- {USB_DEVICE( 0x18E8, 0x6201 )},
- {USB_DEVICE( 0x18E8, 0x6206 )},
- {USB_DEVICE( 0x18E8, 0x6217 )},
- {USB_DEVICE( 0x18E8, 0x6230 )},
- {USB_DEVICE( 0x18E8, 0x6233 )},
- {USB_DEVICE( 0x1131, 0x2035 )},
- { }
+static struct usb_device_id wb35_table[] __devinitdata = {
+ {USB_DEVICE(0x0416, 0x0035)},
+ {USB_DEVICE(0x18E8, 0x6201)},
+ {USB_DEVICE(0x18E8, 0x6206)},
+ {USB_DEVICE(0x18E8, 0x6217)},
+ {USB_DEVICE(0x18E8, 0x6230)},
+ {USB_DEVICE(0x18E8, 0x6233)},
+ {USB_DEVICE(0x1131, 0x2035)},
+ { 0, }
};
-MODULE_DEVICE_TABLE(usb, Id_Table);
+MODULE_DEVICE_TABLE(usb, wb35_table);
-static struct usb_driver wb35_driver = {
- .name = "w35und",
- .probe = wb35_probe,
- .disconnect = wb35_disconnect,
- .id_table = Id_Table,
-};
-
-static const struct ieee80211_rate wbsoft_rates[] = {
+static struct ieee80211_rate wbsoft_rates[] = {
{ .bitrate = 10, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
};
-static const struct ieee80211_channel wbsoft_channels[] = {
+static struct ieee80211_channel wbsoft_channels[] = {
{ .center_freq = 2412},
};
@@ -62,9 +49,22 @@ static void wbsoft_remove_interface(struct ieee80211_hw *dev,
printk("wbsoft_remove interface called\n");
}
-static int wbsoft_nop(void)
+static void wbsoft_stop(struct ieee80211_hw *hw)
+{
+ printk(KERN_INFO "%s called\n", __func__);
+}
+
+static int wbsoft_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
{
- printk("wbsoft_nop called\n");
+ printk(KERN_INFO "%s called\n", __func__);
+ return 0;
+}
+
+static int wbsoft_get_tx_stats(struct ieee80211_hw *hw,
+ struct ieee80211_tx_queue_stats *stats)
+{
+ printk(KERN_INFO "%s called\n", __func__);
return 0;
}
@@ -105,8 +105,7 @@ static void wbsoft_configure_filter(struct ieee80211_hw *dev,
*total_flags = new_flags;
}
-static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
- struct ieee80211_tx_control *control)
+static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
char *buffer = kmalloc(skb->len, GFP_ATOMIC);
printk("Sending frame %d bytes\n", skb->len);
@@ -136,7 +135,7 @@ static int wbsoft_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
hal_set_current_channel(&my_adapter->sHwData, ch);
hal_set_beacon_period(&my_adapter->sHwData, conf->beacon_int);
// hal_set_cap_info(&my_adapter->sHwData, ?? );
-// hal_set_ssid(phw_data_t pHwData, PUCHAR pssid, u8 ssid_len); ??
+// hal_set_ssid(phw_data_t pHwData, u8 * pssid, u8 ssid_len); ??
hal_set_accept_broadcast(&my_adapter->sHwData, 1);
hal_set_accept_promiscuous(&my_adapter->sHwData, 1);
hal_set_accept_multicast(&my_adapter->sHwData, 1);
@@ -148,7 +147,7 @@ static int wbsoft_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
// hal_start_bss(&my_adapter->sHwData, WLAN_BSSTYPE_INFRASTRUCTURE); ??
-//void hal_set_rates(phw_data_t pHwData, PUCHAR pbss_rates,
+//void hal_set_rates(phw_data_t pHwData, u8 * pbss_rates,
// u8 length, unsigned char basic_rate_set)
return 0;
@@ -171,14 +170,14 @@ static u64 wbsoft_get_tsf(struct ieee80211_hw *dev)
static const struct ieee80211_ops wbsoft_ops = {
.tx = wbsoft_tx,
.start = wbsoft_start, /* Start can be pretty much empty as we do WbWLanInitialize() during probe? */
- .stop = wbsoft_nop,
+ .stop = wbsoft_stop,
.add_interface = wbsoft_add_interface,
.remove_interface = wbsoft_remove_interface,
.config = wbsoft_config,
.config_interface = wbsoft_config_interface,
.configure_filter = wbsoft_configure_filter,
- .get_stats = wbsoft_nop,
- .get_tx_stats = wbsoft_nop,
+ .get_stats = wbsoft_get_stats,
+ .get_tx_stats = wbsoft_get_tx_stats,
.get_tsf = wbsoft_get_tsf,
// conf_tx: hal_set_cwmin()/hal_set_cwmax;
};
@@ -187,21 +186,6 @@ struct wbsoft_priv {
};
-int __init wb35_init(void)
-{
- printk("[w35und]driver init\n");
- return usb_register(&wb35_driver);
-}
-
-void __exit wb35_exit(void)
-{
- printk("[w35und]driver exit\n");
- usb_deregister( &wb35_driver );
-}
-
-module_init(wb35_init);
-module_exit(wb35_exit);
-
// Usb kernel subsystem will call this function when a new device is plugged into.
int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id_table)
{
@@ -210,7 +194,7 @@ int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id_table)
PWBUSB pWbUsb;
struct usb_host_interface *interface;
struct usb_endpoint_descriptor *endpoint;
- int i, ret = -1;
+ int ret = -1;
u32 ltmp;
struct usb_device *udev = interface_to_usbdev(intf);
@@ -218,114 +202,95 @@ int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id_table)
printk("[w35und]wb35_probe ->\n");
- do {
- for (i=0; i<(sizeof(Id_Table)/sizeof(struct usb_device_id)); i++ ) {
- if ((udev->descriptor.idVendor == Id_Table[i].idVendor) &&
- (udev->descriptor.idProduct == Id_Table[i].idProduct)) {
- printk("[w35und]Found supported hardware\n");
- break;
- }
- }
- if ((i == (sizeof(Id_Table)/sizeof(struct usb_device_id)))) {
- #ifdef _PE_USB_INI_DUMP_
- WBDEBUG(("[w35und] This is not the one we are interested about\n"));
- #endif
- return -ENODEV;
- }
-
- // 20060630.2 Check the device if it already be opened
- ret = usb_control_msg(udev, usb_rcvctrlpipe( udev, 0 ),
- 0x01, USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN,
- 0x0, 0x400, &ltmp, 4, HZ*100 );
- if( ret < 0 )
- break;
+ // 20060630.2 Check the device if it already be opened
+ ret = usb_control_msg(udev, usb_rcvctrlpipe( udev, 0 ),
+ 0x01, USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN,
+ 0x0, 0x400, &ltmp, 4, HZ*100 );
+ if (ret < 0)
+ goto error;
- ltmp = cpu_to_le32(ltmp);
- if (ltmp) // Is already initialized?
- break;
+ ltmp = cpu_to_le32(ltmp);
+ if (ltmp) // Is already initialized?
+ goto error;
+ Adapter = kzalloc(sizeof(ADAPTER), GFP_KERNEL);
- Adapter = kzalloc(sizeof(ADAPTER), GFP_KERNEL);
+ my_adapter = Adapter;
+ pWbLinux = &Adapter->WbLinux;
+ pWbUsb = &Adapter->sHwData.WbUsb;
+ pWbUsb->udev = udev;
- my_adapter = Adapter;
- pWbLinux = &Adapter->WbLinux;
- pWbUsb = &Adapter->sHwData.WbUsb;
- pWbUsb->udev = udev;
+ interface = intf->cur_altsetting;
+ endpoint = &interface->endpoint[0].desc;
- interface = intf->cur_altsetting;
- endpoint = &interface->endpoint[0].desc;
-
- if (endpoint[2].wMaxPacketSize == 512) {
- printk("[w35und] Working on USB 2.0\n");
- pWbUsb->IsUsb20 = 1;
- }
-
- if (!WbWLanInitialize(Adapter)) {
- printk("[w35und]WbWLanInitialize fail\n");
- break;
- }
+ if (endpoint[2].wMaxPacketSize == 512) {
+ printk("[w35und] Working on USB 2.0\n");
+ pWbUsb->IsUsb20 = 1;
+ }
- {
- struct wbsoft_priv *priv;
- struct ieee80211_hw *dev;
- int res;
+ if (!WbWLanInitialize(Adapter)) {
+ printk("[w35und]WbWLanInitialize fail\n");
+ goto error;
+ }
- dev = ieee80211_alloc_hw(sizeof(*priv), &wbsoft_ops);
+ {
+ struct wbsoft_priv *priv;
+ struct ieee80211_hw *dev;
+ static struct ieee80211_supported_band band;
+ int res;
- if (!dev) {
- printk("w35und: ieee80211 alloc failed\n" );
- BUG();
- }
+ dev = ieee80211_alloc_hw(sizeof(*priv), &wbsoft_ops);
- my_dev = dev;
+ if (!dev) {
+ printk("w35und: ieee80211 alloc failed\n" );
+ BUG();
+ }
- SET_IEEE80211_DEV(dev, &udev->dev);
- {
- phw_data_t pHwData = &Adapter->sHwData;
- unsigned char dev_addr[MAX_ADDR_LEN];
- hal_get_permanent_address(pHwData, dev_addr);
- SET_IEEE80211_PERM_ADDR(dev, dev_addr);
- }
+ my_dev = dev;
+ SET_IEEE80211_DEV(dev, &udev->dev);
+ {
+ phw_data_t pHwData = &Adapter->sHwData;
+ unsigned char dev_addr[MAX_ADDR_LEN];
+ hal_get_permanent_address(pHwData, dev_addr);
+ SET_IEEE80211_PERM_ADDR(dev, dev_addr);
+ }
- dev->extra_tx_headroom = 12; /* FIXME */
- dev->flags = 0;
- dev->channel_change_time = 1000;
-// dev->max_rssi = 100;
+ dev->extra_tx_headroom = 12; /* FIXME */
+ dev->flags = 0;
- dev->queues = 1;
+ dev->channel_change_time = 1000;
+// dev->max_rssi = 100;
- static struct ieee80211_supported_band band;
+ dev->queues = 1;
- band.channels = wbsoft_channels;
- band.n_channels = ARRAY_SIZE(wbsoft_channels);
- band.bitrates = wbsoft_rates;
- band.n_bitrates = ARRAY_SIZE(wbsoft_rates);
+ band.channels = wbsoft_channels;
+ band.n_channels = ARRAY_SIZE(wbsoft_channels);
+ band.bitrates = wbsoft_rates;
+ band.n_bitrates = ARRAY_SIZE(wbsoft_rates);
- dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band;
+ dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band;
#if 0
- wbsoft_modes[0].num_channels = 1;
- wbsoft_modes[0].channels = wbsoft_channels;
- wbsoft_modes[0].mode = MODE_IEEE80211B;
- wbsoft_modes[0].num_rates = ARRAY_SIZE(wbsoft_rates);
- wbsoft_modes[0].rates = wbsoft_rates;
-
- res = ieee80211_register_hwmode(dev, &wbsoft_modes[0]);
- BUG_ON(res);
+ wbsoft_modes[0].num_channels = 1;
+ wbsoft_modes[0].channels = wbsoft_channels;
+ wbsoft_modes[0].mode = MODE_IEEE80211B;
+ wbsoft_modes[0].num_rates = ARRAY_SIZE(wbsoft_rates);
+ wbsoft_modes[0].rates = wbsoft_rates;
+
+ res = ieee80211_register_hwmode(dev, &wbsoft_modes[0]);
+ BUG_ON(res);
#endif
- res = ieee80211_register_hw(dev);
- BUG_ON(res);
- }
-
- usb_set_intfdata( intf, Adapter );
-
- printk("[w35und] _probe OK\n");
- return 0;
+ res = ieee80211_register_hw(dev);
+ BUG_ON(res);
+ }
- } while(FALSE);
+ usb_set_intfdata( intf, Adapter );
+ printk("[w35und] _probe OK\n");
+ return 0;
+error:
return -ENOMEM;
}
@@ -401,4 +366,22 @@ void wb35_disconnect(struct usb_interface *intf)
}
+static struct usb_driver wb35_driver = {
+ .name = "w35und",
+ .id_table = wb35_table,
+ .probe = wb35_probe,
+ .disconnect = wb35_disconnect,
+};
+static int __init wb35_init(void)
+{
+ return usb_register(&wb35_driver);
+}
+
+static void __exit wb35_exit(void)
+{
+ usb_deregister(&wb35_driver);
+}
+
+module_init(wb35_init);
+module_exit(wb35_exit);
diff --git a/drivers/staging/winbond/mds.c b/drivers/staging/winbond/mds.c
index 8ce6389c413..f1de813f9c7 100644
--- a/drivers/staging/winbond/mds.c
+++ b/drivers/staging/winbond/mds.c
@@ -40,7 +40,7 @@ Mds_Tx(PADAPTER Adapter)
PMDS pMds = &Adapter->Mds;
DESCRIPTOR TxDes;
PDESCRIPTOR pTxDes = &TxDes;
- PUCHAR XmitBufAddress;
+ u8 *XmitBufAddress;
u16 XmitBufSize, PacketSize, stmp, CurrentSize, FragmentThreshold;
u8 FillIndex, TxDesIndex, FragmentCount, FillCount;
unsigned char BufferFilled = FALSE, MICAdd = 0;
@@ -90,7 +90,7 @@ Mds_Tx(PADAPTER Adapter)
BufferFilled = TRUE;
/* Leaves first u8 intact */
- memset((PUCHAR)pTxDes + 1, 0, sizeof(DESCRIPTOR) - 1);
+ memset((u8 *)pTxDes + 1, 0, sizeof(DESCRIPTOR) - 1);
TxDesIndex = pMds->TxDesIndex;//Get the current ID
pTxDes->Descriptor_ID = TxDesIndex;
@@ -229,10 +229,10 @@ Mds_SendComplete(PADAPTER Adapter, PT02_DESCRIPTOR pT02)
}
void
-Mds_HeaderCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
+Mds_HeaderCopy(PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer)
{
PMDS pMds = &Adapter->Mds;
- PUCHAR src_buffer = pDes->buffer_address[0];//931130.5.g
+ u8 *src_buffer = pDes->buffer_address[0];//931130.5.g
PT00_DESCRIPTOR pT00;
PT01_DESCRIPTOR pT01;
u16 stmp;
@@ -276,7 +276,7 @@ Mds_HeaderCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
//
// Set tx rate
//
- stmp = *(PUSHORT)(TargetBuffer+30); // 2n alignment address
+ stmp = *(u16 *)(TargetBuffer+30); // 2n alignment address
//Use basic rate
ctmp1 = ctmpf = CURRENT_TX_RATE_FOR_MNG;
@@ -326,11 +326,13 @@ Mds_HeaderCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
// The function return the 4n size of usb pk
u16
-Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
+Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer)
{
PT00_DESCRIPTOR pT00;
PMDS pMds = &Adapter->Mds;
- PUCHAR buffer, src_buffer, pctmp;
+ u8 *buffer;
+ u8 *src_buffer;
+ u8 *pctmp;
u16 Size = 0;
u16 SizeLeft, CopySize, CopyLeft, stmp;
u8 buf_index, FragmentCount = 0;
@@ -354,7 +356,7 @@ Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
SizeLeft -= CopySize;
// 1 Byte operation
- pctmp = (PUCHAR)( buffer + 8 + DOT_11_SEQUENCE_OFFSET );
+ pctmp = (u8 *)( buffer + 8 + DOT_11_SEQUENCE_OFFSET );
*pctmp &= 0xf0;
*pctmp |= FragmentCount;//931130.5.m
if( !FragmentCount )
@@ -379,7 +381,7 @@ Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
buf_index++;
buf_index %= MAX_DESCRIPTOR_BUFFER_INDEX;
} else {
- PUCHAR pctmp = pDes->buffer_address[buf_index];
+ u8 *pctmp = pDes->buffer_address[buf_index];
pctmp += CopySize;
pDes->buffer_address[buf_index] = pctmp;
pDes->buffer_size[buf_index] -= CopySize;
@@ -419,7 +421,7 @@ Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
pT00->T00_last_mpdu = 1;
pT00->T00_IsLastMpdu = 1;
- buffer = (PUCHAR)pT00 + 8; // +8 for USB hdr
+ buffer = (u8 *)pT00 + 8; // +8 for USB hdr
buffer[1] &= ~0x04; // Clear more frag bit of 802.11 frame control
pDes->FragmentCount = FragmentCount; // Update the correct fragment number
return Size;
@@ -427,7 +429,7 @@ Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
void
-Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR buffer )
+Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, u8 *buffer )
{
PT00_DESCRIPTOR pT00;
PT01_DESCRIPTOR pT01;
@@ -435,7 +437,7 @@ Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR buffer )
u8 Rate, i;
unsigned char CTS_on = FALSE, RTS_on = FALSE;
PT00_DESCRIPTOR pNextT00;
- u16 BodyLen;
+ u16 BodyLen = 0;
unsigned char boGroupAddr = FALSE;
@@ -574,7 +576,7 @@ Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR buffer )
DEFAULT_SIFSTIME*3 );
}
- ((PUSHORT)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration
+ ((u16 *)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration
//----20061009 add by anson's endian
pNextT00->value = cpu_to_le32(pNextT00->value);
@@ -615,7 +617,7 @@ Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR buffer )
}
}
- ((PUSHORT)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration
+ ((u16 *)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration
pT00->value = cpu_to_le32(pT00->value);
pT01->value = cpu_to_le32(pT01->value);
//--end 20061009 add
diff --git a/drivers/staging/winbond/mds_f.h b/drivers/staging/winbond/mds_f.h
index 651188be106..7a682d4cfbd 100644
--- a/drivers/staging/winbond/mds_f.h
+++ b/drivers/staging/winbond/mds_f.h
@@ -1,9 +1,9 @@
unsigned char Mds_initial( PADAPTER Adapter );
void Mds_Destroy( PADAPTER Adapter );
void Mds_Tx( PADAPTER Adapter );
-void Mds_HeaderCopy( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer );
-u16 Mds_BodyCopy( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer );
-void Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer );
+void Mds_HeaderCopy( PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer );
+u16 Mds_BodyCopy( PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer );
+void Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer );
void Mds_SendComplete( PADAPTER Adapter, PT02_DESCRIPTOR pT02 );
void Mds_MpduProcess( PADAPTER Adapter, PDESCRIPTOR pRxDes );
void Mds_reset_descriptor( PADAPTER Adapter );
diff --git a/drivers/staging/winbond/mds_s.h b/drivers/staging/winbond/mds_s.h
index 4738279d5f3..9df2e0936bf 100644
--- a/drivers/staging/winbond/mds_s.h
+++ b/drivers/staging/winbond/mds_s.h
@@ -86,7 +86,7 @@ typedef struct _MDS
{
// For Tx usage
u8 TxOwner[ ((MAX_USB_TX_BUFFER_NUMBER + 3) & ~0x03) ];
- PUCHAR pTxBuffer;
+ u8 *pTxBuffer;
u16 TxBufferSize[ ((MAX_USB_TX_BUFFER_NUMBER + 1) & ~0x01) ];
u8 TxDesFrom[ ((MAX_USB_TX_DESCRIPTOR + 3) & ~0x03) ];//931130.4.u // 1: MLME 2: NDIS control 3: NDIS data
u8 TxCountInBuffer[ ((MAX_USB_TX_DESCRIPTOR + 3) & ~0x03) ]; // 20060928
@@ -103,7 +103,7 @@ typedef struct _MDS
u16 TxResult[ ((MAX_USB_TX_DESCRIPTOR + 1) & ~0x01) ];//Collect the sending result of Mpdu
u8 MicRedundant[8]; // For tmp use
- PUCHAR MicWriteAddress[2]; //The start address to fill the Mic, use 2 point due to Mic maybe fragment
+ u8 *MicWriteAddress[2]; //The start address to fill the Mic, use 2 point due to Mic maybe fragment
u16 MicWriteSize[2]; //931130.4.x
@@ -144,7 +144,7 @@ typedef struct _MDS
typedef struct _RxBuffer
{
- PUCHAR pBufferAddress; // Pointer the received data buffer.
+ u8 * pBufferAddress; // Pointer the received data buffer.
u16 BufferSize;
u8 RESERVED;
u8 BufferIndex;// Only 1 byte
@@ -176,7 +176,7 @@ typedef struct _RXLAYER1
/////////////////////////////////////////////////////////////////////////////////////////////
// For brand-new Rx system
u8 ReservedBuffer[ 2400 ];//If Buffer ID is reserved one, it must copy the data into this area
- PUCHAR ReservedBufferPoint;// Point to the next availabe address of reserved buffer
+ u8 *ReservedBufferPoint;// Point to the next availabe address of reserved buffer
}RXLAYER1, * PRXLAYER1;
diff --git a/drivers/staging/winbond/mlme_s.h b/drivers/staging/winbond/mlme_s.h
index 58094f61c03..039fd408ba6 100644
--- a/drivers/staging/winbond/mlme_s.h
+++ b/drivers/staging/winbond/mlme_s.h
@@ -125,12 +125,12 @@
typedef struct _MLME_FRAME
{
//NDIS_PACKET MLME_Packet;
- PCHAR pMMPDU;
+ s8 * pMMPDU;
u16 len;
u8 DataType;
u8 IsInUsed;
- OS_SPIN_LOCK MLMESpinLock;
+ spinlock_t MLMESpinLock;
u8 TxMMPDU[MAX_NUM_TX_MMPDU][MAX_MMPDU_SIZE];
u8 TxMMPDUInUse[ (MAX_NUM_TX_MMPDU+3) & ~0x03 ];
diff --git a/drivers/staging/winbond/mlmetxrx.c b/drivers/staging/winbond/mlmetxrx.c
index 46b091e9679..e8533b8d197 100644
--- a/drivers/staging/winbond/mlmetxrx.c
+++ b/drivers/staging/winbond/mlmetxrx.c
@@ -113,13 +113,13 @@ MLME_GetNextPacket(PADAPTER Adapter, PDESCRIPTOR pDes)
pDes->Type = Adapter->sMlmeFrame.DataType;
}
-void MLMEfreeMMPDUBuffer(PWB32_ADAPTER Adapter, PCHAR pData)
+void MLMEfreeMMPDUBuffer(PWB32_ADAPTER Adapter, s8 *pData)
{
int i;
// Reclaim the data buffer
for (i = 0; i < MAX_NUM_TX_MMPDU; i++) {
- if (pData == (PCHAR)&(Adapter->sMlmeFrame.TxMMPDU[i]))
+ if (pData == (s8 *)&(Adapter->sMlmeFrame.TxMMPDU[i]))
break;
}
if (Adapter->sMlmeFrame.TxMMPDUInUse[i])
diff --git a/drivers/staging/winbond/mlmetxrx_f.h b/drivers/staging/winbond/mlmetxrx_f.h
index d74e225be21..24cd5f308d9 100644
--- a/drivers/staging/winbond/mlmetxrx_f.h
+++ b/drivers/staging/winbond/mlmetxrx_f.h
@@ -20,7 +20,7 @@ MLMEGetMMPDUBuffer(
PWB32_ADAPTER Adapter
);
-void MLMEfreeMMPDUBuffer( PWB32_ADAPTER Adapter, PCHAR pData);
+void MLMEfreeMMPDUBuffer( PWB32_ADAPTER Adapter, s8 * pData);
void MLME_GetNextPacket( PADAPTER Adapter, PDESCRIPTOR pDes );
u8 MLMESendFrame( PWB32_ADAPTER Adapter,
@@ -42,7 +42,7 @@ MLMERcvFrame(
void
MLMEReturnPacket(
PWB32_ADAPTER Adapter,
- PUCHAR pRxBufer
+ u8 * pRxBufer
);
#ifdef _IBSS_BEACON_SEQ_STICK_
s8 SendBCNullData(PWB32_ADAPTER Adapter, u16 wIdx);
diff --git a/drivers/staging/winbond/reg.c b/drivers/staging/winbond/reg.c
index b475c7a7c42..57af5b83150 100644
--- a/drivers/staging/winbond/reg.c
+++ b/drivers/staging/winbond/reg.c
@@ -922,16 +922,16 @@ Uxx_ReadEthernetAddress( phw_data_t pHwData )
// Only unplug and plug again can make hardware read EEPROM again. 20060727
Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08000000 ); // Start EEPROM access + Read + address(0x0d)
Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
- *(PUSHORT)pHwData->PermanentMacAddress = cpu_to_le16((u16)ltmp); //20060926 anson's endian
+ *(u16 *)pHwData->PermanentMacAddress = cpu_to_le16((u16)ltmp); //20060926 anson's endian
Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08010000 ); // Start EEPROM access + Read + address(0x0d)
Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
- *(PUSHORT)(pHwData->PermanentMacAddress + 2) = cpu_to_le16((u16)ltmp); //20060926 anson's endian
+ *(u16 *)(pHwData->PermanentMacAddress + 2) = cpu_to_le16((u16)ltmp); //20060926 anson's endian
Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08020000 ); // Start EEPROM access + Read + address(0x0d)
Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
- *(PUSHORT)(pHwData->PermanentMacAddress + 4) = cpu_to_le16((u16)ltmp); //20060926 anson's endian
- *(PUSHORT)(pHwData->PermanentMacAddress + 6) = 0;
- Wb35Reg_WriteSync( pHwData, 0x03e8, cpu_to_le32(*(PULONG)pHwData->PermanentMacAddress) ); //20060926 anson's endian
- Wb35Reg_WriteSync( pHwData, 0x03ec, cpu_to_le32(*(PULONG)(pHwData->PermanentMacAddress+4)) ); //20060926 anson's endian
+ *(u16 *)(pHwData->PermanentMacAddress + 4) = cpu_to_le16((u16)ltmp); //20060926 anson's endian
+ *(u16 *)(pHwData->PermanentMacAddress + 6) = 0;
+ Wb35Reg_WriteSync( pHwData, 0x03e8, cpu_to_le32(*(u32 *)pHwData->PermanentMacAddress) ); //20060926 anson's endian
+ Wb35Reg_WriteSync( pHwData, 0x03ec, cpu_to_le32(*(u32 *)(pHwData->PermanentMacAddress+4)) ); //20060926 anson's endian
}
@@ -1038,7 +1038,7 @@ void
RFSynthesizer_initial(phw_data_t pHwData)
{
u32 altmp[32];
- PULONG pltmp = altmp;
+ u32 * pltmp = altmp;
u32 ltmp;
u8 number=0x00; // The number of register vale
u8 i;
@@ -2358,11 +2358,11 @@ void Mxx_initial( phw_data_t pHwData )
pltmp[2] = pWb35Reg->M2C_MacControl;
// M30 BSSID
- pltmp[3] = *(PULONG)pHwData->bssid;
+ pltmp[3] = *(u32 *)pHwData->bssid;
// M34
pHwData->AID = DEFAULT_AID;
- tmp = *(PUSHORT)(pHwData->bssid+4);
+ tmp = *(u16 *)(pHwData->bssid+4);
tmp |= DEFAULT_AID << 16;
pltmp[4] = tmp;
@@ -2428,7 +2428,7 @@ void GetTxVgaFromEEPROM( phw_data_t pHwData )
{
u32 i, j, ltmp;
u16 Value[MAX_TXVGA_EEPROM];
- PUCHAR pctmp;
+ u8 *pctmp;
u8 ctmp=0;
// Get the entire TxVga setting in EEPROM
@@ -2441,7 +2441,7 @@ void GetTxVgaFromEEPROM( phw_data_t pHwData )
}
// Adjust the filed which fills with reserved value.
- pctmp = (PUCHAR)Value;
+ pctmp = (u8 *)Value;
for( i=0; i<(MAX_TXVGA_EEPROM*2); i++ )
{
if( pctmp[i] != 0xff )
@@ -2480,7 +2480,7 @@ void GetTxVgaFromEEPROM( phw_data_t pHwData )
// This function will use default TxVgaSettingInEEPROM data to calculate new TxVga.
void EEPROMTxVgaAdjust( phw_data_t pHwData ) // 20060619.5 Add
{
- PUCHAR pTxVga = pHwData->TxVgaSettingInEEPROM;
+ u8 * pTxVga = pHwData->TxVgaSettingInEEPROM;
s16 i, stmp;
//-- 2.4G -- 20060704.2 Request from Tiger
diff --git a/drivers/staging/winbond/sme_api.c b/drivers/staging/winbond/sme_api.c
index 40e93b7600e..31c9673ea86 100644
--- a/drivers/staging/winbond/sme_api.c
+++ b/drivers/staging/winbond/sme_api.c
@@ -10,4 +10,5 @@
s8 sme_get_rssi(void *pcore_data, s32 *prssi)
{
BUG();
+ return 0;
}
diff --git a/drivers/staging/winbond/sme_api.h b/drivers/staging/winbond/sme_api.h
index 016b225ca4a..745eb376bc7 100644
--- a/drivers/staging/winbond/sme_api.h
+++ b/drivers/staging/winbond/sme_api.h
@@ -208,7 +208,7 @@ s8 sme_set_tx_antenna(void *pcore_data, u32 TxAntenna);
s8 sme_set_IBSS_chan(void *pcore_data, ChanInfo chan);
//20061108 WPS
-s8 sme_set_IE_append(void *pcore_data, PUCHAR buffer, u16 buf_len);
+s8 sme_set_IE_append(void *pcore_data, u8 *buffer, u16 buf_len);
diff --git a/drivers/staging/winbond/wbhal.c b/drivers/staging/winbond/wbhal.c
index daf44224755..5d68ecec34c 100644
--- a/drivers/staging/winbond/wbhal.c
+++ b/drivers/staging/winbond/wbhal.c
@@ -1,13 +1,13 @@
#include "os_common.h"
-void hal_get_ethernet_address( phw_data_t pHwData, PUCHAR current_address )
+void hal_get_ethernet_address( phw_data_t pHwData, u8 *current_address )
{
if( pHwData->SurpriseRemove ) return;
memcpy( current_address, pHwData->CurrentMacAddress, ETH_LENGTH_OF_ADDRESS );
}
-void hal_set_ethernet_address( phw_data_t pHwData, PUCHAR current_address )
+void hal_set_ethernet_address( phw_data_t pHwData, u8 *current_address )
{
u32 ltmp[2];
@@ -15,13 +15,13 @@ void hal_set_ethernet_address( phw_data_t pHwData, PUCHAR current_address )
memcpy( pHwData->CurrentMacAddress, current_address, ETH_LENGTH_OF_ADDRESS );
- ltmp[0]= cpu_to_le32( *(PULONG)pHwData->CurrentMacAddress );
- ltmp[1]= cpu_to_le32( *(PULONG)(pHwData->CurrentMacAddress + 4) ) & 0xffff;
+ ltmp[0]= cpu_to_le32( *(u32 *)pHwData->CurrentMacAddress );
+ ltmp[1]= cpu_to_le32( *(u32 *)(pHwData->CurrentMacAddress + 4) ) & 0xffff;
Wb35Reg_BurstWrite( pHwData, 0x03e8, ltmp, 2, AUTO_INCREMENT );
}
-void hal_get_permanent_address( phw_data_t pHwData, PUCHAR pethernet_address )
+void hal_get_permanent_address( phw_data_t pHwData, u8 *pethernet_address )
{
if( pHwData->SurpriseRemove ) return;
@@ -89,7 +89,7 @@ void hal_halt(phw_data_t pHwData, void *ppa_data)
}
//---------------------------------------------------------------------------------------------------
-void hal_set_rates(phw_data_t pHwData, PUCHAR pbss_rates,
+void hal_set_rates(phw_data_t pHwData, u8 *pbss_rates,
u8 length, unsigned char basic_rate_set)
{
PWB35REG pWb35Reg = &pHwData->Wb35Reg;
@@ -158,13 +158,13 @@ void hal_set_rates(phw_data_t pHwData, PUCHAR pbss_rates,
// Fill data into support rate until buffer full
//---20060926 add by anson's endian
for (i=0; i<4; i++)
- *(PULONG)(SupportedRate+(i<<2)) = cpu_to_le32( *(PULONG)(SupportedRate+(i<<2)) );
+ *(u32 *)(SupportedRate+(i<<2)) = cpu_to_le32( *(u32 *)(SupportedRate+(i<<2)) );
//--- end 20060926 add by anson's endian
- Wb35Reg_BurstWrite( pHwData,0x087c, (PULONG)SupportedRate, 4, AUTO_INCREMENT );
- pWb35Reg->M7C_MacControl = ((PULONG)SupportedRate)[0];
- pWb35Reg->M80_MacControl = ((PULONG)SupportedRate)[1];
- pWb35Reg->M84_MacControl = ((PULONG)SupportedRate)[2];
- pWb35Reg->M88_MacControl = ((PULONG)SupportedRate)[3];
+ Wb35Reg_BurstWrite( pHwData,0x087c, (u32 *)SupportedRate, 4, AUTO_INCREMENT );
+ pWb35Reg->M7C_MacControl = ((u32 *)SupportedRate)[0];
+ pWb35Reg->M80_MacControl = ((u32 *)SupportedRate)[1];
+ pWb35Reg->M84_MacControl = ((u32 *)SupportedRate)[2];
+ pWb35Reg->M88_MacControl = ((u32 *)SupportedRate)[3];
// Fill length
tmp = Count1<<28 | Count2<<24;
@@ -206,7 +206,7 @@ void hal_set_current_channel_ex( phw_data_t pHwData, ChanInfo channel )
pWb35Reg->M28_MacControl &= ~0xff; // Clean channel information field
pWb35Reg->M28_MacControl |= channel.ChanNo;
Wb35Reg_WriteWithCallbackValue( pHwData, 0x0828, pWb35Reg->M28_MacControl,
- (PCHAR)&channel, sizeof(ChanInfo));
+ (s8 *)&channel, sizeof(ChanInfo));
}
//---------------------------------------------------------------------------------------------------
void hal_set_current_channel( phw_data_t pHwData, ChanInfo channel )
@@ -277,7 +277,7 @@ void hal_set_accept_beacon( phw_data_t pHwData, u8 enable )
Wb35Reg_Write( pHwData, 0x0800, pWb35Reg->M00_MacControl );
}
//---------------------------------------------------------------------------------------------------
-void hal_set_multicast_address( phw_data_t pHwData, PUCHAR address, u8 number )
+void hal_set_multicast_address( phw_data_t pHwData, u8 *address, u8 number )
{
PWB35REG pWb35Reg = &pHwData->Wb35Reg;
u8 Byte, Bit;
@@ -297,7 +297,7 @@ void hal_set_multicast_address( phw_data_t pHwData, PUCHAR address, u8 number )
}
// Updating register
- Wb35Reg_BurstWrite( pHwData, 0x0804, (PULONG)pWb35Reg->Multicast, 2, AUTO_INCREMENT );
+ Wb35Reg_BurstWrite( pHwData, 0x0804, (u32 *)pWb35Reg->Multicast, 2, AUTO_INCREMENT );
}
//---------------------------------------------------------------------------------------------------
u8 hal_get_accept_beacon( phw_data_t pHwData )
@@ -806,7 +806,7 @@ u8 hal_get_hw_radio_off( phw_data_t pHwData )
}
}
-unsigned char hal_get_dxx_reg( phw_data_t pHwData, u16 number, PULONG pValue )
+unsigned char hal_get_dxx_reg( phw_data_t pHwData, u16 number, u32 * pValue )
{
if( number < 0x1000 )
number += 0x1000;
diff --git a/drivers/staging/winbond/wbhal_f.h b/drivers/staging/winbond/wbhal_f.h
index fe25f97af72..ea9531ac847 100644
--- a/drivers/staging/winbond/wbhal_f.h
+++ b/drivers/staging/winbond/wbhal_f.h
@@ -16,23 +16,23 @@
//====================================================================================
// Function declaration
//====================================================================================
-void hal_remove_mapping_key( phw_data_t pHwData, PUCHAR pmac_addr );
+void hal_remove_mapping_key( phw_data_t pHwData, u8 *pmac_addr );
void hal_remove_default_key( phw_data_t pHwData, u32 index );
-unsigned char hal_set_mapping_key( phw_data_t Adapter, PUCHAR pmac_addr, u8 null_key, u8 wep_on, PUCHAR ptx_tsc, PUCHAR prx_tsc, u8 key_type, u8 key_len, PUCHAR pkey_data );
-unsigned char hal_set_default_key( phw_data_t Adapter, u8 index, u8 null_key, u8 wep_on, PUCHAR ptx_tsc, PUCHAR prx_tsc, u8 key_type, u8 key_len, PUCHAR pkey_data );
+unsigned char hal_set_mapping_key( phw_data_t Adapter, u8 *pmac_addr, u8 null_key, u8 wep_on, u8 *ptx_tsc, u8 *prx_tsc, u8 key_type, u8 key_len, u8 *pkey_data );
+unsigned char hal_set_default_key( phw_data_t Adapter, u8 index, u8 null_key, u8 wep_on, u8 *ptx_tsc, u8 *prx_tsc, u8 key_type, u8 key_len, u8 *pkey_data );
void hal_clear_all_default_key( phw_data_t pHwData );
void hal_clear_all_group_key( phw_data_t pHwData );
void hal_clear_all_mapping_key( phw_data_t pHwData );
void hal_clear_all_key( phw_data_t pHwData );
-void hal_get_ethernet_address( phw_data_t pHwData, PUCHAR current_address );
-void hal_set_ethernet_address( phw_data_t pHwData, PUCHAR current_address );
-void hal_get_permanent_address( phw_data_t pHwData, PUCHAR pethernet_address );
+void hal_get_ethernet_address( phw_data_t pHwData, u8 *current_address );
+void hal_set_ethernet_address( phw_data_t pHwData, u8 *current_address );
+void hal_get_permanent_address( phw_data_t pHwData, u8 *pethernet_address );
unsigned char hal_init_hardware( phw_data_t pHwData, PADAPTER Adapter );
void hal_set_power_save_mode( phw_data_t pHwData, unsigned char power_save, unsigned char wakeup, unsigned char dtim );
-void hal_get_power_save_mode( phw_data_t pHwData, PBOOLEAN pin_pwr_save );
+void hal_get_power_save_mode( phw_data_t pHwData, u8 *pin_pwr_save );
void hal_set_slot_time( phw_data_t pHwData, u8 type );
#define hal_set_atim_window( _A, _ATM )
-void hal_set_rates( phw_data_t pHwData, PUCHAR pbss_rates, u8 length, unsigned char basic_rate_set );
+void hal_set_rates( phw_data_t pHwData, u8 *pbss_rates, u8 length, unsigned char basic_rate_set );
#define hal_set_basic_rates( _A, _R, _L ) hal_set_rates( _A, _R, _L, TRUE )
#define hal_set_op_rates( _A, _R, _L ) hal_set_rates( _A, _R, _L, FALSE )
void hal_start_bss( phw_data_t pHwData, u8 mac_op_mode );
@@ -40,19 +40,19 @@ void hal_join_request( phw_data_t pHwData, u8 bss_type ); // 0:BSS STA 1:IBSS
void hal_stop_sync_bss( phw_data_t pHwData );
void hal_resume_sync_bss( phw_data_t pHwData);
void hal_set_aid( phw_data_t pHwData, u16 aid );
-void hal_set_bssid( phw_data_t pHwData, PUCHAR pbssid );
-void hal_get_bssid( phw_data_t pHwData, PUCHAR pbssid );
+void hal_set_bssid( phw_data_t pHwData, u8 *pbssid );
+void hal_get_bssid( phw_data_t pHwData, u8 *pbssid );
void hal_set_beacon_period( phw_data_t pHwData, u16 beacon_period );
void hal_set_listen_interval( phw_data_t pHwData, u16 listen_interval );
void hal_set_cap_info( phw_data_t pHwData, u16 capability_info );
-void hal_set_ssid( phw_data_t pHwData, PUCHAR pssid, u8 ssid_len );
+void hal_set_ssid( phw_data_t pHwData, u8 *pssid, u8 ssid_len );
void hal_set_current_channel( phw_data_t pHwData, ChanInfo channel );
void hal_set_current_channel_ex( phw_data_t pHwData, ChanInfo channel );
void hal_get_current_channel( phw_data_t pHwData, ChanInfo *channel );
void hal_set_accept_broadcast( phw_data_t pHwData, u8 enable );
void hal_set_accept_multicast( phw_data_t pHwData, u8 enable );
void hal_set_accept_beacon( phw_data_t pHwData, u8 enable );
-void hal_set_multicast_address( phw_data_t pHwData, PUCHAR address, u8 number );
+void hal_set_multicast_address( phw_data_t pHwData, u8 *address, u8 number );
u8 hal_get_accept_beacon( phw_data_t pHwData );
void hal_stop( phw_data_t pHwData );
void hal_halt( phw_data_t pHwData, void *ppa_data );
@@ -97,7 +97,7 @@ void hal_surprise_remove( phw_data_t pHwData );
void hal_rate_change( phw_data_t pHwData ); // Notify the HAL rate is changing 20060613.1
-unsigned char hal_get_dxx_reg( phw_data_t pHwData, u16 number, PULONG pValue );
+unsigned char hal_get_dxx_reg( phw_data_t pHwData, u16 number, u32 * pValue );
unsigned char hal_set_dxx_reg( phw_data_t pHwData, u16 number, u32 value );
#define hal_get_time_count( _P ) (_P->time_count/10) // return 100ms count
#define hal_detect_error( _P ) (_P->WbUsb.DetectCount)
@@ -116,7 +116,7 @@ unsigned char hal_idle( phw_data_t pHwData );
#define pa_stall_execution( _A ) //OS_SLEEP( 1 )
#define hw_get_cxx_reg( _A, _B, _C )
#define hw_set_cxx_reg( _A, _B, _C )
-#define hw_get_dxx_reg( _A, _B, _C ) hal_get_dxx_reg( _A, _B, (PULONG)_C )
+#define hw_get_dxx_reg( _A, _B, _C ) hal_get_dxx_reg( _A, _B, (u32 *)_C )
#define hw_set_dxx_reg( _A, _B, _C ) hal_set_dxx_reg( _A, _B, (u32)_C )
diff --git a/drivers/staging/winbond/wbhal_s.h b/drivers/staging/winbond/wbhal_s.h
index 5b862ff357b..2ee3f0fc1ad 100644
--- a/drivers/staging/winbond/wbhal_s.h
+++ b/drivers/staging/winbond/wbhal_s.h
@@ -461,7 +461,7 @@ typedef struct _HW_DATA_T
//=====================================================================
// Definition for 802.11
//=====================================================================
- PUCHAR bssid_pointer; // Used by hal_get_bssid for return value
+ u8 *bssid_pointer; // Used by hal_get_bssid for return value
u8 bssid[8];// Only 6 byte will be used. 8 byte is required for read buffer
u8 ssid[32];// maximum ssid length is 32 byte
@@ -486,7 +486,7 @@ typedef struct _HW_DATA_T
u32 CurrentRadioSw; // 20060320.2 0:On 1:Off
u32 CurrentRadioHw; // 20060825 0:On 1:Off
- PUCHAR power_save_point; // Used by hal_get_power_save_mode for return value
+ u8 *power_save_point; // Used by hal_get_power_save_mode for return value
u8 cwmin;
u8 desired_power_save;
u8 dtim;// Is running dtim
diff --git a/drivers/staging/winbond/wblinux.c b/drivers/staging/winbond/wblinux.c
index 2eade5a47b1..4ed45e48831 100644
--- a/drivers/staging/winbond/wblinux.c
+++ b/drivers/staging/winbond/wblinux.c
@@ -25,11 +25,11 @@ EncapAtomicInc(PADAPTER Adapter, void* pAtomic)
{
PWBLINUX pWbLinux = &Adapter->WbLinux;
u32 ltmp;
- PULONG pltmp = (PULONG)pAtomic;
- OS_SPIN_LOCK_ACQUIRED( &pWbLinux->AtomicSpinLock );
+ u32 * pltmp = (u32 *)pAtomic;
+ spin_lock_irq( &pWbLinux->AtomicSpinLock );
(*pltmp)++;
ltmp = (*pltmp);
- OS_SPIN_LOCK_RELEASED( &pWbLinux->AtomicSpinLock );
+ spin_unlock_irq( &pWbLinux->AtomicSpinLock );
return ltmp;
}
@@ -38,11 +38,11 @@ EncapAtomicDec(PADAPTER Adapter, void* pAtomic)
{
PWBLINUX pWbLinux = &Adapter->WbLinux;
u32 ltmp;
- PULONG pltmp = (PULONG)pAtomic;
- OS_SPIN_LOCK_ACQUIRED( &pWbLinux->AtomicSpinLock );
+ u32 * pltmp = (u32 *)pAtomic;
+ spin_lock_irq( &pWbLinux->AtomicSpinLock );
(*pltmp)--;
ltmp = (*pltmp);
- OS_SPIN_LOCK_RELEASED( &pWbLinux->AtomicSpinLock );
+ spin_unlock_irq( &pWbLinux->AtomicSpinLock );
return ltmp;
}
@@ -51,8 +51,8 @@ WBLINUX_Initial(PADAPTER Adapter)
{
PWBLINUX pWbLinux = &Adapter->WbLinux;
- OS_SPIN_LOCK_ALLOCATE( &pWbLinux->SpinLock );
- OS_SPIN_LOCK_ALLOCATE( &pWbLinux->AtomicSpinLock );
+ spin_lock_init( &pWbLinux->SpinLock );
+ spin_lock_init( &pWbLinux->AtomicSpinLock );
return TRUE;
}
@@ -79,7 +79,6 @@ void
WBLINUX_Destroy(PADAPTER Adapter)
{
WBLINUX_stop( Adapter );
- OS_SPIN_LOCK_FREE( &pWbNdis->SpinLock );
#ifdef _PE_USB_INI_DUMP_
WBDEBUG(("[w35und] unregister_netdev!\n"));
#endif
@@ -142,119 +141,118 @@ unsigned char
WbWLanInitialize(PADAPTER Adapter)
{
phw_data_t pHwData;
- PUCHAR pMacAddr, pMacAddr2;
+ u8 *pMacAddr;
+ u8 *pMacAddr2;
u32 InitStep = 0;
u8 EEPROM_region;
u8 HwRadioOff;
- do {
- //
- // Setting default value for Linux
- //
- Adapter->sLocalPara.region_INF = REGION_AUTO;
- Adapter->sLocalPara.TxRateMode = RATE_AUTO;
- psLOCAL->bMacOperationMode = MODE_802_11_BG; // B/G mode
- Adapter->Mds.TxRTSThreshold = DEFAULT_RTSThreshold;
- Adapter->Mds.TxFragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD;
- hal_set_phy_type( &Adapter->sHwData, RF_WB_242_1 );
- Adapter->sLocalPara.MTUsize = MAX_ETHERNET_PACKET_SIZE;
- psLOCAL->bPreambleMode = AUTO_MODE;
- Adapter->sLocalPara.RadioOffStatus.boSwRadioOff = FALSE;
- pHwData = &Adapter->sHwData;
- hal_set_phy_type( pHwData, RF_DECIDE_BY_INF );
-
- //
- // Initial each module and variable
- //
- if (!WBLINUX_Initial(Adapter)) {
+ //
+ // Setting default value for Linux
+ //
+ Adapter->sLocalPara.region_INF = REGION_AUTO;
+ Adapter->sLocalPara.TxRateMode = RATE_AUTO;
+ psLOCAL->bMacOperationMode = MODE_802_11_BG; // B/G mode
+ Adapter->Mds.TxRTSThreshold = DEFAULT_RTSThreshold;
+ Adapter->Mds.TxFragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD;
+ hal_set_phy_type( &Adapter->sHwData, RF_WB_242_1 );
+ Adapter->sLocalPara.MTUsize = MAX_ETHERNET_PACKET_SIZE;
+ psLOCAL->bPreambleMode = AUTO_MODE;
+ Adapter->sLocalPara.RadioOffStatus.boSwRadioOff = FALSE;
+ pHwData = &Adapter->sHwData;
+ hal_set_phy_type( pHwData, RF_DECIDE_BY_INF );
+
+ //
+ // Initial each module and variable
+ //
+ if (!WBLINUX_Initial(Adapter)) {
#ifdef _PE_USB_INI_DUMP_
- WBDEBUG(("[w35und]WBNDIS initialization failed\n"));
+ WBDEBUG(("[w35und]WBNDIS initialization failed\n"));
#endif
- break;
- }
+ goto error;
+ }
- // Initial Software variable
- Adapter->sLocalPara.ShutDowned = FALSE;
-
- //added by ws for wep key error detection
- Adapter->sLocalPara.bWepKeyError= FALSE;
- Adapter->sLocalPara.bToSelfPacketReceived = FALSE;
- Adapter->sLocalPara.WepKeyDetectTimerCount= 2 * 100; /// 2 seconds
-
- // Initial USB hal
- InitStep = 1;
- pHwData = &Adapter->sHwData;
- if (!hal_init_hardware(pHwData, Adapter))
- break;
-
- EEPROM_region = hal_get_region_from_EEPROM( pHwData );
- if (EEPROM_region != REGION_AUTO)
- psLOCAL->region = EEPROM_region;
- else {
- if (psLOCAL->region_INF != REGION_AUTO)
- psLOCAL->region = psLOCAL->region_INF;
- else
- psLOCAL->region = REGION_USA; //default setting
- }
+ // Initial Software variable
+ Adapter->sLocalPara.ShutDowned = FALSE;
+
+ //added by ws for wep key error detection
+ Adapter->sLocalPara.bWepKeyError= FALSE;
+ Adapter->sLocalPara.bToSelfPacketReceived = FALSE;
+ Adapter->sLocalPara.WepKeyDetectTimerCount= 2 * 100; /// 2 seconds
+
+ // Initial USB hal
+ InitStep = 1;
+ pHwData = &Adapter->sHwData;
+ if (!hal_init_hardware(pHwData, Adapter))
+ goto error;
+
+ EEPROM_region = hal_get_region_from_EEPROM( pHwData );
+ if (EEPROM_region != REGION_AUTO)
+ psLOCAL->region = EEPROM_region;
+ else {
+ if (psLOCAL->region_INF != REGION_AUTO)
+ psLOCAL->region = psLOCAL->region_INF;
+ else
+ psLOCAL->region = REGION_USA; //default setting
+ }
- // Get Software setting flag from hal
- Adapter->sLocalPara.boAntennaDiversity = FALSE;
- if (hal_software_set(pHwData) & 0x00000001)
- Adapter->sLocalPara.boAntennaDiversity = TRUE;
-
- //
- // For TS module
- //
- InitStep = 2;
-
- // For MDS module
- InitStep = 3;
- Mds_initial(Adapter);
-
- //=======================================
- // Initialize the SME, SCAN, MLME, ROAM
- //=======================================
- InitStep = 4;
- InitStep = 5;
- InitStep = 6;
-
- // If no user-defined address in the registry, use the addresss "burned" on the NIC instead.
- pMacAddr = Adapter->sLocalPara.ThisMacAddress;
- pMacAddr2 = Adapter->sLocalPara.PermanentAddress;
- hal_get_permanent_address( pHwData, Adapter->sLocalPara.PermanentAddress );// Reading ethernet address from EEPROM
- if (OS_MEMORY_COMPARE(pMacAddr, "\x00\x00\x00\x00\x00\x00", MAC_ADDR_LENGTH )) // Is equal
- {
- memcpy( pMacAddr, pMacAddr2, MAC_ADDR_LENGTH );
- } else {
- // Set the user define MAC address
- hal_set_ethernet_address( pHwData, Adapter->sLocalPara.ThisMacAddress );
- }
+ // Get Software setting flag from hal
+ Adapter->sLocalPara.boAntennaDiversity = FALSE;
+ if (hal_software_set(pHwData) & 0x00000001)
+ Adapter->sLocalPara.boAntennaDiversity = TRUE;
+
+ //
+ // For TS module
+ //
+ InitStep = 2;
+
+ // For MDS module
+ InitStep = 3;
+ Mds_initial(Adapter);
+
+ //=======================================
+ // Initialize the SME, SCAN, MLME, ROAM
+ //=======================================
+ InitStep = 4;
+ InitStep = 5;
+ InitStep = 6;
+
+ // If no user-defined address in the registry, use the addresss "burned" on the NIC instead.
+ pMacAddr = Adapter->sLocalPara.ThisMacAddress;
+ pMacAddr2 = Adapter->sLocalPara.PermanentAddress;
+ hal_get_permanent_address( pHwData, Adapter->sLocalPara.PermanentAddress );// Reading ethernet address from EEPROM
+ if (OS_MEMORY_COMPARE(pMacAddr, "\x00\x00\x00\x00\x00\x00", MAC_ADDR_LENGTH )) // Is equal
+ {
+ memcpy( pMacAddr, pMacAddr2, MAC_ADDR_LENGTH );
+ } else {
+ // Set the user define MAC address
+ hal_set_ethernet_address( pHwData, Adapter->sLocalPara.ThisMacAddress );
+ }
- //get current antenna
- psLOCAL->bAntennaNo = hal_get_antenna_number(pHwData);
+ //get current antenna
+ psLOCAL->bAntennaNo = hal_get_antenna_number(pHwData);
#ifdef _PE_STATE_DUMP_
- WBDEBUG(("Driver init, antenna no = %d\n", psLOCAL->bAntennaNo));
+ WBDEBUG(("Driver init, antenna no = %d\n", psLOCAL->bAntennaNo));
#endif
- hal_get_hw_radio_off( pHwData );
+ hal_get_hw_radio_off( pHwData );
- // Waiting for HAL setting OK
- while (!hal_idle(pHwData))
- OS_SLEEP(10000);
+ // Waiting for HAL setting OK
+ while (!hal_idle(pHwData))
+ OS_SLEEP(10000);
- MTO_Init(Adapter);
+ MTO_Init(Adapter);
- HwRadioOff = hal_get_hw_radio_off( pHwData );
- psLOCAL->RadioOffStatus.boHwRadioOff = !!HwRadioOff;
+ HwRadioOff = hal_get_hw_radio_off( pHwData );
+ psLOCAL->RadioOffStatus.boHwRadioOff = !!HwRadioOff;
- hal_set_radio_mode( pHwData, (unsigned char)(psLOCAL->RadioOffStatus.boSwRadioOff || psLOCAL->RadioOffStatus.boHwRadioOff) );
+ hal_set_radio_mode( pHwData, (unsigned char)(psLOCAL->RadioOffStatus.boSwRadioOff || psLOCAL->RadioOffStatus.boHwRadioOff) );
- hal_driver_init_OK(pHwData) = 1; // Notify hal that the driver is ready now.
- //set a tx power for reference.....
-// sme_set_tx_power_level(Adapter, 12); FIXME?
- return TRUE;
- }
- while(FALSE);
+ hal_driver_init_OK(pHwData) = 1; // Notify hal that the driver is ready now.
+ //set a tx power for reference.....
+// sme_set_tx_power_level(Adapter, 12); FIXME?
+ return TRUE;
+error:
switch (InitStep) {
case 5:
case 4:
diff --git a/drivers/staging/winbond/wblinux_s.h b/drivers/staging/winbond/wblinux_s.h
index 97e9167ab83..fd2bb43bf3c 100644
--- a/drivers/staging/winbond/wblinux_s.h
+++ b/drivers/staging/winbond/wblinux_s.h
@@ -24,8 +24,8 @@
typedef struct _WBLINUX
{
- OS_SPIN_LOCK AtomicSpinLock;
- OS_SPIN_LOCK SpinLock;
+ spinlock_t AtomicSpinLock;
+ spinlock_t SpinLock;
u32 shutdown;
OS_ATOMIC ThreadCount;
diff --git a/drivers/staging/wlan-ng/Kconfig b/drivers/staging/wlan-ng/Kconfig
index 10b1f0f634d..2425d860dca 100644
--- a/drivers/staging/wlan-ng/Kconfig
+++ b/drivers/staging/wlan-ng/Kconfig
@@ -1,6 +1,6 @@
config PRISM2_USB
tristate "Prism2.5 USB driver"
- depends on USB
+ depends on WLAN_80211 && USB
default n
---help---
This is the wlan-ng prism 2.5 USB driver for a wide range of
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index a2054639d24..0dfb8ce9aae 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -824,7 +824,7 @@ PD Record codes
#define HFA384x_CMD_MACPORT_SET(value) ((UINT16)HFA384x_CMD_AINFO_SET(value))
#define HFA384x_CMD_ISRECL(value) ((UINT16)(HFA384x_CMD_AINFO_GET((UINT16)(value) & HFA384x_CMD_RECL)))
#define HFA384x_CMD_RECL_SET(value) ((UINT16)HFA384x_CMD_AINFO_SET(value))
-#define HFA384x_CMD_QOS_GET(value) ((UINT16((((UINT16)(value))&((UINT16)0x3000)) >> 12))
+#define HFA384x_CMD_QOS_GET(value) ((UINT16)((((UINT16)(value))&((UINT16)0x3000)) >> 12))
#define HFA384x_CMD_QOS_SET(value) ((UINT16)((((UINT16)(value)) << 12) & 0x3000))
#define HFA384x_CMD_ISWRITE(value) ((UINT16)(HFA384x_CMD_AINFO_GET((UINT16)(value) & HFA384x_CMD_WRITE)))
#define HFA384x_CMD_WRITE_SET(value) ((UINT16)HFA384x_CMD_AINFO_SET((UINT16)value))
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index 53fe2985971..11a50c7fbfc 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -64,7 +64,6 @@
/*================================================================*/
/* Project Includes */
-#include "version.h"
#include "p80211hdr.h"
#include "p80211types.h"
#include "p80211msg.h"
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index 268fd9bba1e..eac06f793d8 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -90,8 +90,6 @@
#include <linux/usb.h>
//#endif
-#include "wlan_compat.h"
-
/*================================================================*/
/* Project Includes */
diff --git a/drivers/staging/wlan-ng/wlan_compat.h b/drivers/staging/wlan-ng/wlan_compat.h
index 17026570708..59dfa8f84cb 100644
--- a/drivers/staging/wlan-ng/wlan_compat.h
+++ b/drivers/staging/wlan-ng/wlan_compat.h
@@ -245,11 +245,11 @@ typedef int64_t INT64;
# define preempt_count() (0UL)
#endif
-#define WLAN_LOG_ERROR(x,args...) printk(KERN_ERR "%s: " x , __FUNCTION__ , ##args);
+#define WLAN_LOG_ERROR(x,args...) printk(KERN_ERR "%s: " x , __func__ , ##args);
-#define WLAN_LOG_WARNING(x,args...) printk(KERN_WARNING "%s: " x , __FUNCTION__ , ##args);
+#define WLAN_LOG_WARNING(x,args...) printk(KERN_WARNING "%s: " x , __func__ , ##args);
-#define WLAN_LOG_NOTICE(x,args...) printk(KERN_NOTICE "%s: " x , __FUNCTION__ , ##args);
+#define WLAN_LOG_NOTICE(x,args...) printk(KERN_NOTICE "%s: " x , __func__ , ##args);
#define WLAN_LOG_INFO(args... ) printk(KERN_INFO args)
@@ -265,7 +265,7 @@ typedef int64_t INT64;
#define DBFENTER { if ( WLAN_DBVAR >= 5 ){ WLAN_LOG_DEBUG(3,"---->\n"); } }
#define DBFEXIT { if ( WLAN_DBVAR >= 5 ){ WLAN_LOG_DEBUG(3,"<----\n"); } }
- #define WLAN_LOG_DEBUG(l,x,args...) if ( WLAN_DBVAR >= (l)) printk(KERN_DEBUG "%s(%lu): " x , __FUNCTION__, (preempt_count() & PREEMPT_MASK), ##args );
+ #define WLAN_LOG_DEBUG(l,x,args...) if ( WLAN_DBVAR >= (l)) printk(KERN_DEBUG "%s(%lu): " x , __func__, (preempt_count() & PREEMPT_MASK), ##args );
#else
#define WLAN_ASSERT(c)
#define WLAN_HEX_DUMP( l, s, p, n)
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index bcefbddeba5..289d81adfb9 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -36,7 +36,8 @@ config USB_ARCH_HAS_OHCI
default y if PXA3xx
default y if ARCH_EP93XX
default y if ARCH_AT91
- default y if ARCH_PNX4008
+ default y if ARCH_PNX4008 && I2C
+ default y if MFD_TC6393XB
# PPC:
default y if STB03xxx
default y if PPC_MPC52xx
@@ -97,6 +98,8 @@ source "drivers/usb/core/Kconfig"
source "drivers/usb/mon/Kconfig"
+source "drivers/usb/wusbcore/Kconfig"
+
source "drivers/usb/host/Kconfig"
source "drivers/usb/musb/Kconfig"
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index a419c42e880..8b7c419b876 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -16,9 +16,12 @@ obj-$(CONFIG_USB_UHCI_HCD) += host/
obj-$(CONFIG_USB_SL811_HCD) += host/
obj-$(CONFIG_USB_U132_HCD) += host/
obj-$(CONFIG_USB_R8A66597_HCD) += host/
+obj-$(CONFIG_USB_HWA_HCD) += host/
obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
+obj-$(CONFIG_USB_WUSB) += wusbcore/
+
obj-$(CONFIG_USB_ACM) += class/
obj-$(CONFIG_USB_PRINTER) += class/
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 76fce44c2f9..3e862401a63 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -722,6 +722,16 @@ static void speedtch_atm_stop(struct usbatm_data *usbatm, struct atm_dev *atm_de
flush_scheduled_work();
}
+static int speedtch_pre_reset(struct usb_interface *intf)
+{
+ return 0;
+}
+
+static int speedtch_post_reset(struct usb_interface *intf)
+{
+ return 0;
+}
+
/**********
** USB **
@@ -740,6 +750,8 @@ static struct usb_driver speedtch_usb_driver = {
.name = speedtch_driver_name,
.probe = speedtch_usb_probe,
.disconnect = usbatm_usb_disconnect,
+ .pre_reset = speedtch_pre_reset,
+ .post_reset = speedtch_post_reset,
.id_table = speedtch_usb_ids
};
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index fab23ee8702..20104443081 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -849,9 +849,10 @@ static void acm_write_buffers_free(struct acm *acm)
{
int i;
struct acm_wb *wb;
+ struct usb_device *usb_dev = interface_to_usbdev(acm->control);
for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) {
- usb_buffer_free(acm->dev, acm->writesize, wb->buf, wb->dmah);
+ usb_buffer_free(usb_dev, acm->writesize, wb->buf, wb->dmah);
}
}
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 7429f70b9d0..5a8ecc045e3 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -42,6 +42,8 @@ static struct usb_device_id wdm_ids[] = {
{ }
};
+MODULE_DEVICE_TABLE (usb, wdm_ids);
+
#define WDM_MINOR_BASE 176
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e935be7eb46..3d7793d9303 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1610,7 +1610,8 @@ int usb_external_resume_device(struct usb_device *udev)
status = usb_resume_both(udev);
udev->last_busy = jiffies;
usb_pm_unlock(udev);
- do_unbind_rebind(udev, DO_REBIND);
+ if (status == 0)
+ do_unbind_rebind(udev, DO_REBIND);
/* Now that the device is awake, we can start trying to autosuspend
* it again. */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d73ce262c36..9b3f16bd12c 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3504,7 +3504,7 @@ int usb_reset_device(struct usb_device *udev)
USB_INTERFACE_BOUND)
rebind = 1;
}
- if (rebind)
+ if (ret == 0 && rebind)
usb_rebind_intf(cintf);
}
}
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 1ca1c326392..e1191b9a316 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -168,7 +168,7 @@ usb_copy_descriptors(struct usb_descriptor_header **src)
* usb_find_endpoint - find a copy of an endpoint descriptor
* @src: original vector of descriptors
* @copy: copy of @src
- * @ep: endpoint descriptor found in @src
+ * @match: endpoint descriptor found in @src
*
* This returns the copy of the @match descriptor made for @copy. Its
* intended use is to help remembering the endpoint descriptor to use
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index bcf375ca3d7..caa37c95802 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -650,7 +650,7 @@ pxa_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
struct pxa27x_request *req;
req = kzalloc(sizeof *req, gfp_flags);
- if (!req || !_ep)
+ if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 48f51b12d2e..00ba06b4475 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1894,11 +1894,8 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
udc->regs_info = debugfs_create_file("registers", S_IRUGO,
s3c2410_udc_debugfs_root,
udc, &s3c2410_udc_debugfs_fops);
- if (IS_ERR(udc->regs_info)) {
- dev_warn(dev, "debugfs file creation failed %ld\n",
- PTR_ERR(udc->regs_info));
- udc->regs_info = NULL;
- }
+ if (!udc->regs_info)
+ dev_warn(dev, "debugfs file creation failed\n");
}
dev_dbg(dev, "probe ok\n");
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 228797e54f9..56f592dc0b3 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -138,7 +138,6 @@ config USB_OHCI_HCD
tristate "OHCI HCD support"
depends on USB && USB_ARCH_HAS_OHCI
select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
- select I2C if ARCH_PNX4008
---help---
The Open Host Controller Interface (OHCI) is a standard for accessing
USB 1.1 host controller hardware. It does more in hardware than Intel's
@@ -305,3 +304,31 @@ config SUPERH_ON_CHIP_R8A66597
help
This driver enables support for the on-chip R8A66597 in the
SH7366 and SH7723 processors.
+
+config USB_WHCI_HCD
+ tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on PCI && USB
+ select USB_WUSB
+ select UWB_WHCI
+ help
+ A driver for PCI-based Wireless USB Host Controllers that are
+ compliant with the WHCI specification.
+
+ To compile this driver a module, choose M here: the module
+ will be called "whci-hcd".
+
+config USB_HWA_HCD
+ tristate "Host Wire Adapter (HWA) driver (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on USB
+ select USB_WUSB
+ select UWB_HWA
+ help
+ This driver enables you to connect Wireless USB devices to
+ your system using a Host Wire Adaptor USB dongle. This is an
+ UWB Radio Controller and WUSB Host Controller connected to
+ your machine via USB (specified in WUSB1.0).
+
+ To compile this driver a module, choose M here: the module
+ will be called "hwa-hc".
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index f1edda2dcfd..23be2222404 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -8,6 +8,8 @@ endif
isp1760-objs := isp1760-hcd.o isp1760-if.o
+obj-$(CONFIG_USB_WHCI_HCD) += whci/
+
obj-$(CONFIG_PCI) += pci-quirks.o
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
@@ -19,3 +21,4 @@ obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
+obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
new file mode 100644
index 00000000000..64be4d88df1
--- /dev/null
+++ b/drivers/usb/host/hwa-hc.c
@@ -0,0 +1,925 @@
+/*
+ * Host Wire Adapter:
+ * Driver glue, HWA-specific functions, bridges to WAHC and WUSBHC
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * The HWA driver is a simple layer that forwards requests to the WAHC
+ * (Wire Adater Host Controller) or WUSBHC (Wireless USB Host
+ * Controller) layers.
+ *
+ * Host Wire Adapter is the 'WUSB 1.0 standard' name for Wireless-USB
+ * Host Controller that is connected to your system via USB (a USB
+ * dongle that implements a USB host...). There is also a Device Wired
+ * Adaptor, DWA (Wireless USB hub) that uses the same mechanism for
+ * transferring data (it is after all a USB host connected via
+ * Wireless USB), we have a common layer called Wire Adapter Host
+ * Controller that does all the hard work. The WUSBHC (Wireless USB
+ * Host Controller) is the part common to WUSB Host Controllers, the
+ * HWA and the PCI-based one, that is implemented following the WHCI
+ * spec. All these layers are implemented in ../wusbcore.
+ *
+ * The main functions are hwahc_op_urb_{en,de}queue(), that pass the
+ * job of converting a URB to a Wire Adapter
+ *
+ * Entry points:
+ *
+ * hwahc_driver_*() Driver initialization, registration and
+ * teardown.
+ *
+ * hwahc_probe() New device came up, create an instance for
+ * it [from device enumeration].
+ *
+ * hwahc_disconnect() Remove device instance [from device
+ * enumeration].
+ *
+ * [__]hwahc_op_*() Host-Wire-Adaptor specific functions for
+ * starting/stopping/etc (some might be made also
+ * DWA).
+ */
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/completion.h>
+#include "../wusbcore/wa-hc.h"
+#include "../wusbcore/wusbhc.h"
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+struct hwahc {
+ struct wusbhc wusbhc; /* has to be 1st */
+ struct wahc wa;
+ u8 buffer[16]; /* for misc usb transactions */
+};
+
+/**
+ * FIXME should be wusbhc
+ *
+ * NOTE: we need to cache the Cluster ID because later...there is no
+ * way to get it :)
+ */
+static int __hwahc_set_cluster_id(struct hwahc *hwahc, u8 cluster_id)
+{
+ int result;
+ struct wusbhc *wusbhc = &hwahc->wusbhc;
+ struct wahc *wa = &hwahc->wa;
+ struct device *dev = &wa->usb_iface->dev;
+
+ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ WUSB_REQ_SET_CLUSTER_ID,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ cluster_id,
+ wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ NULL, 0, 1000 /* FIXME: arbitrary */);
+ if (result < 0)
+ dev_err(dev, "Cannot set WUSB Cluster ID to 0x%02x: %d\n",
+ cluster_id, result);
+ else
+ wusbhc->cluster_id = cluster_id;
+ dev_info(dev, "Wireless USB Cluster ID set to 0x%02x\n", cluster_id);
+ return result;
+}
+
+static int __hwahc_op_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
+{
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+
+ return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ WUSB_REQ_SET_NUM_DNTS,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ interval << 8 | slots,
+ wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ NULL, 0, 1000 /* FIXME: arbitrary */);
+}
+
+/*
+ * Reset a WUSB host controller and wait for it to complete doing it.
+ *
+ * @usb_hcd: Pointer to WUSB Host Controller instance.
+ *
+ */
+static int hwahc_op_reset(struct usb_hcd *usb_hcd)
+{
+ int result;
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct device *dev = &hwahc->wa.usb_iface->dev;
+
+ d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
+ mutex_lock(&wusbhc->mutex);
+ wa_nep_disarm(&hwahc->wa);
+ result = __wa_set_feature(&hwahc->wa, WA_RESET);
+ if (result < 0) {
+ dev_err(dev, "error commanding HC to reset: %d\n", result);
+ goto error_unlock;
+ }
+ d_printf(3, dev, "reset: waiting for device to change state\n");
+ result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0);
+ if (result < 0) {
+ dev_err(dev, "error waiting for HC to reset: %d\n", result);
+ goto error_unlock;
+ }
+error_unlock:
+ mutex_unlock(&wusbhc->mutex);
+ d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
+ return result;
+}
+
+/*
+ * FIXME: break this function up
+ */
+static int hwahc_op_start(struct usb_hcd *usb_hcd)
+{
+ u8 addr;
+ int result;
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct device *dev = &hwahc->wa.usb_iface->dev;
+
+ /* Set up a Host Info WUSB Information Element */
+ d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
+ result = -ENOSPC;
+ mutex_lock(&wusbhc->mutex);
+ /* Start the numbering from the top so that the bottom
+ * range of the unauth addr space is used for devices,
+ * the top for HCs; use 0xfe - RC# */
+ addr = wusb_cluster_id_get();
+ if (addr == 0)
+ goto error_cluster_id_get;
+ result = __hwahc_set_cluster_id(hwahc, addr);
+ if (result < 0)
+ goto error_set_cluster_id;
+
+ result = wa_nep_arm(&hwahc->wa, GFP_KERNEL);
+ if (result < 0) {
+ dev_err(dev, "cannot listen to notifications: %d\n", result);
+ goto error_stop;
+ }
+ usb_hcd->uses_new_polling = 1;
+ usb_hcd->poll_rh = 1;
+ usb_hcd->state = HC_STATE_RUNNING;
+ result = 0;
+out:
+ mutex_unlock(&wusbhc->mutex);
+ d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
+ return result;
+
+error_stop:
+ __wa_stop(&hwahc->wa);
+error_set_cluster_id:
+ wusb_cluster_id_put(wusbhc->cluster_id);
+error_cluster_id_get:
+ goto out;
+
+}
+
+/*
+ * FIXME: break this function up
+ */
+static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc)
+{
+ int result;
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct device *dev = &hwahc->wa.usb_iface->dev;
+
+ /* Set up a Host Info WUSB Information Element */
+ d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
+ result = -ENOSPC;
+
+ result = __wa_set_feature(&hwahc->wa, WA_ENABLE);
+ if (result < 0) {
+ dev_err(dev, "error commanding HC to start: %d\n", result);
+ goto error_stop;
+ }
+ result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE);
+ if (result < 0) {
+ dev_err(dev, "error waiting for HC to start: %d\n", result);
+ goto error_stop;
+ }
+ result = 0;
+out:
+ d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
+ return result;
+
+error_stop:
+ result = __wa_clear_feature(&hwahc->wa, WA_ENABLE);
+ goto out;
+}
+
+static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ dev_err(wusbhc->dev, "%s (%p [%p], 0x%lx) UNIMPLEMENTED\n", __func__,
+ usb_hcd, hwahc, *(unsigned long *) &msg);
+ return -ENOSYS;
+}
+
+static int hwahc_op_resume(struct usb_hcd *usb_hcd)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+ dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
+ usb_hcd, hwahc);
+ return -ENOSYS;
+}
+
+static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc)
+{
+ int result;
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct device *dev = &hwahc->wa.usb_iface->dev;
+
+ d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
+ /* Nothing for now */
+ d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
+ return;
+}
+
+/*
+ * No need to abort pipes, as when this is called, all the children
+ * has been disconnected and that has done it [through
+ * usb_disable_interface() -> usb_disable_endpoint() ->
+ * hwahc_op_ep_disable() - >rpipe_ep_disable()].
+ */
+static void hwahc_op_stop(struct usb_hcd *usb_hcd)
+{
+ int result;
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+ struct device *dev = &wa->usb_iface->dev;
+
+ d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
+ mutex_lock(&wusbhc->mutex);
+ wusbhc_stop(wusbhc);
+ wa_nep_disarm(&hwahc->wa);
+ result = __wa_stop(&hwahc->wa);
+ wusb_cluster_id_put(wusbhc->cluster_id);
+ mutex_unlock(&wusbhc->mutex);
+ d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
+ return;
+}
+
+static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+ dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
+ usb_hcd, hwahc);
+ return -ENOSYS;
+}
+
+static int hwahc_op_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
+ gfp_t gfp)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+ return wa_urb_enqueue(&hwahc->wa, urb->ep, urb, gfp);
+}
+
+static int hwahc_op_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb,
+ int status)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+ return wa_urb_dequeue(&hwahc->wa, urb);
+}
+
+/*
+ * Release resources allocated for an endpoint
+ *
+ * If there is an associated rpipe to this endpoint, go ahead and put it.
+ */
+static void hwahc_op_endpoint_disable(struct usb_hcd *usb_hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+ rpipe_ep_disable(&hwahc->wa, ep);
+}
+
+/*
+ * Set the UWB MAS allocation for the WUSB cluster
+ *
+ * @stream_index: stream to use (-1 for cancelling the allocation)
+ * @mas: mas bitmap to use
+ */
+static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
+ const struct uwb_mas_bm *mas)
+{
+ int result;
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+ struct device *dev = &wa->usb_iface->dev;
+ u8 mas_le[UWB_NUM_MAS/8];
+
+ /* Set the stream index */
+ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ WUSB_REQ_SET_STREAM_IDX,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ stream_index,
+ wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ NULL, 0, 1000 /* FIXME: arbitrary */);
+ if (result < 0) {
+ dev_err(dev, "Cannot set WUSB stream index: %d\n", result);
+ goto out;
+ }
+ uwb_mas_bm_copy_le(mas_le, mas);
+ /* Set the MAS allocation */
+ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ WUSB_REQ_SET_WUSB_MAS,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ mas_le, 32, 1000 /* FIXME: arbitrary */);
+ if (result < 0)
+ dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
+out:
+ return result;
+}
+
+/*
+ * Add an IE to the host's MMC
+ *
+ * @interval: See WUSB1.0[8.5.3.1]
+ * @repeat_cnt: See WUSB1.0[8.5.3.1]
+ * @handle: See WUSB1.0[8.5.3.1]
+ * @wuie: Pointer to the header of the WUSB IE data to add.
+ * MUST BE allocated in a kmalloc buffer (no stack or
+ * vmalloc).
+ *
+ * NOTE: the format of the WUSB IEs for MMCs are different to the
+ * normal MBOA MAC IEs (IE Id + Length in MBOA MAC vs. Length +
+ * Id in WUSB IEs). Standards...you gotta love'em.
+ */
+static int __hwahc_op_mmcie_add(struct wusbhc *wusbhc, u8 interval,
+ u8 repeat_cnt, u8 handle,
+ struct wuie_hdr *wuie)
+{
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+ u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+
+ return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ WUSB_REQ_ADD_MMC_IE,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ interval << 8 | repeat_cnt,
+ handle << 8 | iface_no,
+ wuie, wuie->bLength, 1000 /* FIXME: arbitrary */);
+}
+
+/*
+ * Remove an IE to the host's MMC
+ *
+ * @handle: See WUSB1.0[8.5.3.1]
+ */
+static int __hwahc_op_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
+{
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+ u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+ return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ WUSB_REQ_REMOVE_MMC_IE,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0, handle << 8 | iface_no,
+ NULL, 0, 1000 /* FIXME: arbitrary */);
+}
+
+/*
+ * Update device information for a given fake port
+ *
+ * @port_idx: Fake port to which device is connected (wusbhc index, not
+ * USB port number).
+ */
+static int __hwahc_op_dev_info_set(struct wusbhc *wusbhc,
+ struct wusb_dev *wusb_dev)
+{
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+ u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+ struct hwa_dev_info *dev_info;
+ int ret;
+
+ /* fill out the Device Info buffer and send it */
+ dev_info = kzalloc(sizeof(struct hwa_dev_info), GFP_KERNEL);
+ if (!dev_info)
+ return -ENOMEM;
+ uwb_mas_bm_copy_le(dev_info->bmDeviceAvailability,
+ &wusb_dev->availability);
+ dev_info->bDeviceAddress = wusb_dev->addr;
+
+ /*
+ * If the descriptors haven't been read yet, use a default PHY
+ * rate of 53.3 Mbit/s only. The correct value will be used
+ * when this will be called again as part of the
+ * authentication process (which occurs after the descriptors
+ * have been read).
+ */
+ if (wusb_dev->wusb_cap_descr)
+ dev_info->wPHYRates = wusb_dev->wusb_cap_descr->wPHYRates;
+ else
+ dev_info->wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53);
+
+ ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ WUSB_REQ_SET_DEV_INFO,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0, wusb_dev->port_idx << 8 | iface_no,
+ dev_info, sizeof(struct hwa_dev_info),
+ 1000 /* FIXME: arbitrary */);
+ kfree(dev_info);
+ return ret;
+}
+
+/*
+ * Set host's idea of which encryption (and key) method to use when
+ * talking to ad evice on a given port.
+ *
+ * If key is NULL, it means disable encryption for that "virtual port"
+ * (used when we disconnect).
+ */
+static int __hwahc_dev_set_key(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+ const void *key, size_t key_size,
+ u8 key_idx)
+{
+ int result = -ENOMEM;
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+ u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+ struct usb_key_descriptor *keyd;
+ size_t keyd_len;
+
+ keyd_len = sizeof(*keyd) + key_size;
+ keyd = kzalloc(keyd_len, GFP_KERNEL);
+ if (keyd == NULL)
+ return -ENOMEM;
+
+ keyd->bLength = keyd_len;
+ keyd->bDescriptorType = USB_DT_KEY;
+ keyd->tTKID[0] = (tkid >> 0) & 0xff;
+ keyd->tTKID[1] = (tkid >> 8) & 0xff;
+ keyd->tTKID[2] = (tkid >> 16) & 0xff;
+ memcpy(keyd->bKeyData, key, key_size);
+
+ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ USB_REQ_SET_DESCRIPTOR,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ USB_DT_KEY << 8 | key_idx,
+ port_idx << 8 | iface_no,
+ keyd, keyd_len, 1000 /* FIXME: arbitrary */);
+
+ memset(keyd, 0, sizeof(*keyd)); /* clear keys etc. */
+ kfree(keyd);
+ return result;
+}
+
+/*
+ * Set host's idea of which encryption (and key) method to use when
+ * talking to ad evice on a given port.
+ *
+ * If key is NULL, it means disable encryption for that "virtual port"
+ * (used when we disconnect).
+ */
+static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+ const void *key, size_t key_size)
+{
+ int result = -ENOMEM;
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+ u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+ u8 encryption_value;
+
+ /* Tell the host which key to use to talk to the device */
+ if (key) {
+ u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_PTK,
+ WUSB_KEY_INDEX_ORIGINATOR_HOST);
+
+ result = __hwahc_dev_set_key(wusbhc, port_idx, tkid,
+ key, key_size, key_idx);
+ if (result < 0)
+ goto error_set_key;
+ encryption_value = wusbhc->ccm1_etd->bEncryptionValue;
+ } else {
+ /* FIXME: this should come from wusbhc->etd[UNSECURE].value */
+ encryption_value = 0;
+ }
+
+ /* Set the encryption type for commmunicating with the device */
+ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ USB_REQ_SET_ENCRYPTION,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ encryption_value, port_idx << 8 | iface_no,
+ NULL, 0, 1000 /* FIXME: arbitrary */);
+ if (result < 0)
+ dev_err(wusbhc->dev, "Can't set host's WUSB encryption for "
+ "port index %u to %s (value %d): %d\n", port_idx,
+ wusb_et_name(wusbhc->ccm1_etd->bEncryptionType),
+ wusbhc->ccm1_etd->bEncryptionValue, result);
+error_set_key:
+ return result;
+}
+
+/*
+ * Set host's GTK key
+ */
+static int __hwahc_op_set_gtk(struct wusbhc *wusbhc, u32 tkid,
+ const void *key, size_t key_size)
+{
+ u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK,
+ WUSB_KEY_INDEX_ORIGINATOR_HOST);
+
+ return __hwahc_dev_set_key(wusbhc, 0, tkid, key, key_size, key_idx);
+}
+
+/*
+ * Get the Wire Adapter class-specific descriptor
+ *
+ * NOTE: this descriptor comes with the big bundled configuration
+ * descriptor that includes the interfaces' and endpoints', so
+ * we just look for it in the cached copy kept by the USB stack.
+ *
+ * NOTE2: We convert LE fields to CPU order.
+ */
+static int wa_fill_descr(struct wahc *wa)
+{
+ int result;
+ struct device *dev = &wa->usb_iface->dev;
+ char *itr;
+ struct usb_device *usb_dev = wa->usb_dev;
+ struct usb_descriptor_header *hdr;
+ struct usb_wa_descriptor *wa_descr;
+ size_t itr_size, actconfig_idx;
+
+ actconfig_idx = (usb_dev->actconfig - usb_dev->config) /
+ sizeof(usb_dev->config[0]);
+ itr = usb_dev->rawdescriptors[actconfig_idx];
+ itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
+ while (itr_size >= sizeof(*hdr)) {
+ hdr = (struct usb_descriptor_header *) itr;
+ d_printf(3, dev, "Extra device descriptor: "
+ "type %02x/%u bytes @ %zu (%zu left)\n",
+ hdr->bDescriptorType, hdr->bLength,
+ (itr - usb_dev->rawdescriptors[actconfig_idx]),
+ itr_size);
+ if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER)
+ goto found;
+ itr += hdr->bLength;
+ itr_size -= hdr->bLength;
+ }
+ dev_err(dev, "cannot find Wire Adapter Class descriptor\n");
+ return -ENODEV;
+
+found:
+ result = -EINVAL;
+ if (hdr->bLength > itr_size) { /* is it available? */
+ dev_err(dev, "incomplete Wire Adapter Class descriptor "
+ "(%zu bytes left, %u needed)\n",
+ itr_size, hdr->bLength);
+ goto error;
+ }
+ if (hdr->bLength < sizeof(*wa->wa_descr)) {
+ dev_err(dev, "short Wire Adapter Class descriptor\n");
+ goto error;
+ }
+ wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr;
+ /* Make LE fields CPU order */
+ wa_descr->bcdWAVersion = le16_to_cpu(wa_descr->bcdWAVersion);
+ wa_descr->wNumRPipes = le16_to_cpu(wa_descr->wNumRPipes);
+ wa_descr->wRPipeMaxBlock = le16_to_cpu(wa_descr->wRPipeMaxBlock);
+ if (wa_descr->bcdWAVersion > 0x0100)
+ dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n",
+ wa_descr->bcdWAVersion & 0xff00 >> 8,
+ wa_descr->bcdWAVersion & 0x00ff);
+ result = 0;
+error:
+ return result;
+}
+
+static struct hc_driver hwahc_hc_driver = {
+ .description = "hwa-hcd",
+ .product_desc = "Wireless USB HWA host controller",
+ .hcd_priv_size = sizeof(struct hwahc) - sizeof(struct usb_hcd),
+ .irq = NULL, /* FIXME */
+ .flags = HCD_USB2, /* FIXME */
+ .reset = hwahc_op_reset,
+ .start = hwahc_op_start,
+ .pci_suspend = hwahc_op_suspend,
+ .pci_resume = hwahc_op_resume,
+ .stop = hwahc_op_stop,
+ .get_frame_number = hwahc_op_get_frame_number,
+ .urb_enqueue = hwahc_op_urb_enqueue,
+ .urb_dequeue = hwahc_op_urb_dequeue,
+ .endpoint_disable = hwahc_op_endpoint_disable,
+
+ .hub_status_data = wusbhc_rh_status_data,
+ .hub_control = wusbhc_rh_control,
+ .bus_suspend = wusbhc_rh_suspend,
+ .bus_resume = wusbhc_rh_resume,
+ .start_port_reset = wusbhc_rh_start_port_reset,
+};
+
+static int hwahc_security_create(struct hwahc *hwahc)
+{
+ int result;
+ struct wusbhc *wusbhc = &hwahc->wusbhc;
+ struct usb_device *usb_dev = hwahc->wa.usb_dev;
+ struct device *dev = &usb_dev->dev;
+ struct usb_security_descriptor *secd;
+ struct usb_encryption_descriptor *etd;
+ void *itr, *top;
+ size_t itr_size, needed, bytes;
+ u8 index;
+ char buf[64];
+
+ /* Find the host's security descriptors in the config descr bundle */
+ index = (usb_dev->actconfig - usb_dev->config) /
+ sizeof(usb_dev->config[0]);
+ itr = usb_dev->rawdescriptors[index];
+ itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
+ top = itr + itr_size;
+ result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index],
+ le16_to_cpu(usb_dev->actconfig->desc.wTotalLength),
+ USB_DT_SECURITY, (void **) &secd);
+ if (result == -1) {
+ dev_warn(dev, "BUG? WUSB host has no security descriptors\n");
+ return 0;
+ }
+ needed = sizeof(*secd);
+ if (top - (void *)secd < needed) {
+ dev_err(dev, "BUG? Not enough data to process security "
+ "descriptor header (%zu bytes left vs %zu needed)\n",
+ top - (void *) secd, needed);
+ return 0;
+ }
+ needed = le16_to_cpu(secd->wTotalLength);
+ if (top - (void *)secd < needed) {
+ dev_err(dev, "BUG? Not enough data to process security "
+ "descriptors (%zu bytes left vs %zu needed)\n",
+ top - (void *) secd, needed);
+ return 0;
+ }
+ /* Walk over the sec descriptors and store CCM1's on wusbhc */
+ itr = (void *) secd + sizeof(*secd);
+ top = (void *) secd + le16_to_cpu(secd->wTotalLength);
+ index = 0;
+ bytes = 0;
+ while (itr < top) {
+ etd = itr;
+ if (top - itr < sizeof(*etd)) {
+ dev_err(dev, "BUG: bad host security descriptor; "
+ "not enough data (%zu vs %zu left)\n",
+ top - itr, sizeof(*etd));
+ break;
+ }
+ if (etd->bLength < sizeof(*etd)) {
+ dev_err(dev, "BUG: bad host encryption descriptor; "
+ "descriptor is too short "
+ "(%zu vs %zu needed)\n",
+ (size_t)etd->bLength, sizeof(*etd));
+ break;
+ }
+ itr += etd->bLength;
+ bytes += snprintf(buf + bytes, sizeof(buf) - bytes,
+ "%s (0x%02x) ",
+ wusb_et_name(etd->bEncryptionType),
+ etd->bEncryptionValue);
+ wusbhc->ccm1_etd = etd;
+ }
+ dev_info(dev, "supported encryption types: %s\n", buf);
+ if (wusbhc->ccm1_etd == NULL) {
+ dev_err(dev, "E: host doesn't support CCM-1 crypto\n");
+ return 0;
+ }
+ /* Pretty print what we support */
+ return 0;
+}
+
+static void hwahc_security_release(struct hwahc *hwahc)
+{
+ /* nothing to do here so far... */
+}
+
+static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface)
+{
+ int result;
+ struct device *dev = &iface->dev;
+ struct wusbhc *wusbhc = &hwahc->wusbhc;
+ struct wahc *wa = &hwahc->wa;
+ struct usb_device *usb_dev = interface_to_usbdev(iface);
+
+ wa->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */
+ wa->usb_iface = usb_get_intf(iface);
+ wusbhc->dev = dev;
+ wusbhc->uwb_rc = uwb_rc_get_by_grandpa(iface->dev.parent);
+ if (wusbhc->uwb_rc == NULL) {
+ result = -ENODEV;
+ dev_err(dev, "Cannot get associated UWB Host Controller\n");
+ goto error_rc_get;
+ }
+ result = wa_fill_descr(wa); /* Get the device descriptor */
+ if (result < 0)
+ goto error_fill_descriptor;
+ if (wa->wa_descr->bNumPorts > USB_MAXCHILDREN) {
+ dev_err(dev, "FIXME: USB_MAXCHILDREN too low for WUSB "
+ "adapter (%u ports)\n", wa->wa_descr->bNumPorts);
+ wusbhc->ports_max = USB_MAXCHILDREN;
+ } else {
+ wusbhc->ports_max = wa->wa_descr->bNumPorts;
+ }
+ wusbhc->mmcies_max = wa->wa_descr->bNumMMCIEs;
+ wusbhc->start = __hwahc_op_wusbhc_start;
+ wusbhc->stop = __hwahc_op_wusbhc_stop;
+ wusbhc->mmcie_add = __hwahc_op_mmcie_add;
+ wusbhc->mmcie_rm = __hwahc_op_mmcie_rm;
+ wusbhc->dev_info_set = __hwahc_op_dev_info_set;
+ wusbhc->bwa_set = __hwahc_op_bwa_set;
+ wusbhc->set_num_dnts = __hwahc_op_set_num_dnts;
+ wusbhc->set_ptk = __hwahc_op_set_ptk;
+ wusbhc->set_gtk = __hwahc_op_set_gtk;
+ result = hwahc_security_create(hwahc);
+ if (result < 0) {
+ dev_err(dev, "Can't initialize security: %d\n", result);
+ goto error_security_create;
+ }
+ wa->wusb = wusbhc; /* FIXME: ugly, need to fix */
+ result = wusbhc_create(&hwahc->wusbhc);
+ if (result < 0) {
+ dev_err(dev, "Can't create WUSB HC structures: %d\n", result);
+ goto error_wusbhc_create;
+ }
+ result = wa_create(&hwahc->wa, iface);
+ if (result < 0)
+ goto error_wa_create;
+ return 0;
+
+error_wa_create:
+ wusbhc_destroy(&hwahc->wusbhc);
+error_wusbhc_create:
+ /* WA Descr fill allocs no resources */
+error_security_create:
+error_fill_descriptor:
+ uwb_rc_put(wusbhc->uwb_rc);
+error_rc_get:
+ usb_put_intf(iface);
+ usb_put_dev(usb_dev);
+ return result;
+}
+
+static void hwahc_destroy(struct hwahc *hwahc)
+{
+ struct wusbhc *wusbhc = &hwahc->wusbhc;
+
+ d_fnstart(1, NULL, "(hwahc %p)\n", hwahc);
+ mutex_lock(&wusbhc->mutex);
+ __wa_destroy(&hwahc->wa);
+ wusbhc_destroy(&hwahc->wusbhc);
+ hwahc_security_release(hwahc);
+ hwahc->wusbhc.dev = NULL;
+ uwb_rc_put(wusbhc->uwb_rc);
+ usb_put_intf(hwahc->wa.usb_iface);
+ usb_put_dev(hwahc->wa.usb_dev);
+ mutex_unlock(&wusbhc->mutex);
+ d_fnend(1, NULL, "(hwahc %p) = void\n", hwahc);
+}
+
+static void hwahc_init(struct hwahc *hwahc)
+{
+ wa_init(&hwahc->wa);
+}
+
+static int hwahc_probe(struct usb_interface *usb_iface,
+ const struct usb_device_id *id)
+{
+ int result;
+ struct usb_hcd *usb_hcd;
+ struct wusbhc *wusbhc;
+ struct hwahc *hwahc;
+ struct device *dev = &usb_iface->dev;
+
+ d_fnstart(4, dev, "(%p, %p)\n", usb_iface, id);
+ result = -ENOMEM;
+ usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa");
+ if (usb_hcd == NULL) {
+ dev_err(dev, "unable to allocate instance\n");
+ goto error_alloc;
+ }
+ usb_hcd->wireless = 1;
+ usb_hcd->flags |= HCD_FLAG_SAW_IRQ;
+ wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ hwahc_init(hwahc);
+ result = hwahc_create(hwahc, usb_iface);
+ if (result < 0) {
+ dev_err(dev, "Cannot initialize internals: %d\n", result);
+ goto error_hwahc_create;
+ }
+ result = usb_add_hcd(usb_hcd, 0, 0);
+ if (result < 0) {
+ dev_err(dev, "Cannot add HCD: %d\n", result);
+ goto error_add_hcd;
+ }
+ result = wusbhc_b_create(&hwahc->wusbhc);
+ if (result < 0) {
+ dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result);
+ goto error_wusbhc_b_create;
+ }
+ d_fnend(4, dev, "(%p, %p) = 0\n", usb_iface, id);
+ return 0;
+
+error_wusbhc_b_create:
+ usb_remove_hcd(usb_hcd);
+error_add_hcd:
+ hwahc_destroy(hwahc);
+error_hwahc_create:
+ usb_put_hcd(usb_hcd);
+error_alloc:
+ d_fnend(4, dev, "(%p, %p) = %d\n", usb_iface, id, result);
+ return result;
+}
+
+static void hwahc_disconnect(struct usb_interface *usb_iface)
+{
+ struct usb_hcd *usb_hcd;
+ struct wusbhc *wusbhc;
+ struct hwahc *hwahc;
+
+ usb_hcd = usb_get_intfdata(usb_iface);
+ wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+ d_fnstart(1, NULL, "(hwahc %p [usb_iface %p])\n", hwahc, usb_iface);
+ wusbhc_b_destroy(&hwahc->wusbhc);
+ usb_remove_hcd(usb_hcd);
+ hwahc_destroy(hwahc);
+ usb_put_hcd(usb_hcd);
+ d_fnend(1, NULL, "(hwahc %p [usb_iface %p]) = void\n", hwahc,
+ usb_iface);
+}
+
+/** USB device ID's that we handle */
+static struct usb_device_id hwahc_id_table[] = {
+ /* FIXME: use class labels for this */
+ { USB_INTERFACE_INFO(0xe0, 0x02, 0x01), },
+ {},
+};
+MODULE_DEVICE_TABLE(usb, hwahc_id_table);
+
+static struct usb_driver hwahc_driver = {
+ .name = "hwa-hc",
+ .probe = hwahc_probe,
+ .disconnect = hwahc_disconnect,
+ .id_table = hwahc_id_table,
+};
+
+static int __init hwahc_driver_init(void)
+{
+ int result;
+ result = usb_register(&hwahc_driver);
+ if (result < 0) {
+ printk(KERN_ERR "WA-CDS: Cannot register USB driver: %d\n",
+ result);
+ goto error_usb_register;
+ }
+ return 0;
+
+error_usb_register:
+ return result;
+
+}
+module_init(hwahc_driver_init);
+
+static void __exit hwahc_driver_exit(void)
+{
+ usb_deregister(&hwahc_driver);
+}
+module_exit(hwahc_driver_exit);
+
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 8647dab0d7f..8aa3f4556a3 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1075,12 +1075,18 @@ MODULE_LICENSE ("GPL");
#define SM501_OHCI_DRIVER ohci_hcd_sm501_driver
#endif
+#ifdef CONFIG_MFD_TC6393XB
+#include "ohci-tmio.c"
+#define TMIO_OHCI_DRIVER ohci_hcd_tmio_driver
+#endif
+
#if !defined(PCI_DRIVER) && \
!defined(PLATFORM_DRIVER) && \
!defined(OF_PLATFORM_DRIVER) && \
!defined(SA1111_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && \
!defined(SM501_OHCI_DRIVER) && \
+ !defined(TMIO_OHCI_DRIVER) && \
!defined(SSB_OHCI_DRIVER)
#error "missing bus glue for ohci-hcd"
#endif
@@ -1147,13 +1153,25 @@ static int __init ohci_hcd_mod_init(void)
goto error_sm501;
#endif
+#ifdef TMIO_OHCI_DRIVER
+ retval = platform_driver_register(&TMIO_OHCI_DRIVER);
+ if (retval < 0)
+ goto error_tmio;
+#endif
+
return retval;
/* Error path */
+#ifdef TMIO_OHCI_DRIVER
+ platform_driver_unregister(&TMIO_OHCI_DRIVER);
+ error_tmio:
+#endif
#ifdef SM501_OHCI_DRIVER
+ platform_driver_unregister(&SM501_OHCI_DRIVER);
error_sm501:
#endif
#ifdef SSB_OHCI_DRIVER
+ ssb_driver_unregister(&SSB_OHCI_DRIVER);
error_ssb:
#endif
#ifdef PCI_DRIVER
@@ -1189,6 +1207,9 @@ module_init(ohci_hcd_mod_init);
static void __exit ohci_hcd_mod_exit(void)
{
+#ifdef TMIO_OHCI_DRIVER
+ platform_driver_unregister(&TMIO_OHCI_DRIVER);
+#endif
#ifdef SM501_OHCI_DRIVER
platform_driver_unregister(&SM501_OHCI_DRIVER);
#endif
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
new file mode 100644
index 00000000000..f9f134af0bd
--- /dev/null
+++ b/drivers/usb/host/ohci-tmio.c
@@ -0,0 +1,376 @@
+/*
+ * OHCI HCD(Host Controller Driver) for USB.
+ *
+ *(C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ *(C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ *(C) Copyright 2002 Hewlett-Packard Company
+ *
+ * Bus glue for Toshiba Mobile IO(TMIO) Controller's OHCI core
+ * (C) Copyright 2005 Chris Humbert <mahadri-usb@drigon.com>
+ * (C) Copyright 2007, 2008 Dmitry Baryshkov <dbaryshkov@gmail.com>
+ *
+ * This is known to work with the following variants:
+ * TC6393XB revision 3 (32kB SRAM)
+ *
+ * The TMIO's OHCI core DMAs through a small internal buffer that
+ * is directly addressable by the CPU.
+ *
+ * Written from sparse documentation from Toshiba and Sharp's driver
+ * for the 2.4 kernel,
+ * usb-ohci-tc6393.c(C) Copyright 2004 Lineo Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+#include <linux/sched.h>*/
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tmio.h>
+#include <linux/dma-mapping.h>
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * USB Host Controller Configuration Register
+ */
+#define CCR_REVID 0x08 /* b Revision ID */
+#define CCR_BASE 0x10 /* l USB Control Register Base Address Low */
+#define CCR_ILME 0x40 /* b Internal Local Memory Enable */
+#define CCR_PM 0x4c /* w Power Management */
+#define CCR_INTC 0x50 /* b INT Control */
+#define CCR_LMW1L 0x54 /* w Local Memory Window 1 LMADRS Low */
+#define CCR_LMW1H 0x56 /* w Local Memory Window 1 LMADRS High */
+#define CCR_LMW1BL 0x58 /* w Local Memory Window 1 Base Address Low */
+#define CCR_LMW1BH 0x5A /* w Local Memory Window 1 Base Address High */
+#define CCR_LMW2L 0x5C /* w Local Memory Window 2 LMADRS Low */
+#define CCR_LMW2H 0x5E /* w Local Memory Window 2 LMADRS High */
+#define CCR_LMW2BL 0x60 /* w Local Memory Window 2 Base Address Low */
+#define CCR_LMW2BH 0x62 /* w Local Memory Window 2 Base Address High */
+#define CCR_MISC 0xFC /* b MISC */
+
+#define CCR_PM_GKEN 0x0001
+#define CCR_PM_CKRNEN 0x0002
+#define CCR_PM_USBPW1 0x0004
+#define CCR_PM_USBPW2 0x0008
+#define CCR_PM_USBPW3 0x0008
+#define CCR_PM_PMEE 0x0100
+#define CCR_PM_PMES 0x8000
+
+/*-------------------------------------------------------------------------*/
+
+struct tmio_hcd {
+ void __iomem *ccr;
+ spinlock_t lock; /* protects RMW cycles */
+};
+
+#define hcd_to_tmio(hcd) ((struct tmio_hcd *)(hcd_to_ohci(hcd) + 1))
+
+/*-------------------------------------------------------------------------*/
+
+static void tmio_write_pm(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+ u16 pm;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tmio->lock, flags);
+
+ pm = CCR_PM_GKEN | CCR_PM_CKRNEN |
+ CCR_PM_PMEE | CCR_PM_PMES;
+
+ tmio_iowrite16(pm, tmio->ccr + CCR_PM);
+ spin_unlock_irqrestore(&tmio->lock, flags);
+}
+
+static void tmio_stop_hc(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+ u16 pm;
+
+ pm = CCR_PM_GKEN | CCR_PM_CKRNEN;
+ switch (ohci->num_ports) {
+ default:
+ dev_err(&dev->dev, "Unsupported amount of ports: %d\n", ohci->num_ports);
+ case 3:
+ pm |= CCR_PM_USBPW3;
+ case 2:
+ pm |= CCR_PM_USBPW2;
+ case 1:
+ pm |= CCR_PM_USBPW1;
+ }
+ tmio_iowrite8(0, tmio->ccr + CCR_INTC);
+ tmio_iowrite8(0, tmio->ccr + CCR_ILME);
+ tmio_iowrite16(0, tmio->ccr + CCR_BASE);
+ tmio_iowrite16(0, tmio->ccr + CCR_BASE + 2);
+ tmio_iowrite16(pm, tmio->ccr + CCR_PM);
+}
+
+static void tmio_start_hc(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+ unsigned long base = hcd->rsrc_start;
+
+ tmio_write_pm(dev);
+ tmio_iowrite16(base, tmio->ccr + CCR_BASE);
+ tmio_iowrite16(base >> 16, tmio->ccr + CCR_BASE + 2);
+ tmio_iowrite8(1, tmio->ccr + CCR_ILME);
+ tmio_iowrite8(2, tmio->ccr + CCR_INTC);
+
+ dev_info(&dev->dev, "revision %d @ 0x%08llx, irq %d\n",
+ tmio_ioread8(tmio->ccr + CCR_REVID), hcd->rsrc_start, hcd->irq);
+}
+
+static int ohci_tmio_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
+
+ if ((ret = ohci_init(ohci)) < 0)
+ return ret;
+
+ if ((ret = ohci_run(ohci)) < 0) {
+ err("can't start %s", hcd->self.bus_name);
+ ohci_stop(hcd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct hc_driver ohci_tmio_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "TMIO OHCI USB Host Controller",
+ .hcd_priv_size = sizeof(struct ohci_hcd) + sizeof (struct tmio_hcd),
+
+ /* generic hardware linkage */
+ .irq = ohci_irq,
+ .flags = HCD_USB11 | HCD_MEMORY | HCD_LOCAL_MEM,
+
+ /* basic lifecycle operations */
+ .start = ohci_tmio_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ /* managing i/o requests and associated device resources */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /* scheduling support */
+ .get_frame_number = ohci_get_frame,
+
+ /* root hub support */
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
+#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+/*-------------------------------------------------------------------------*/
+static struct platform_driver ohci_hcd_tmio_driver;
+
+static int __devinit ohci_hcd_tmio_drv_probe(struct platform_device *dev)
+{
+ struct mfd_cell *cell = dev->dev.platform_data;
+ struct resource *regs = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ struct resource *config = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ struct resource *sram = platform_get_resource(dev, IORESOURCE_MEM, 2);
+ int irq = platform_get_irq(dev, 0);
+ struct tmio_hcd *tmio;
+ struct ohci_hcd *ohci;
+ struct usb_hcd *hcd;
+ int ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ if (!cell)
+ return -EINVAL;
+
+ hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev->dev.bus_id);
+ if (!hcd) {
+ ret = -ENOMEM;
+ goto err_usb_create_hcd;
+ }
+
+ hcd->rsrc_start = regs->start;
+ hcd->rsrc_len = regs->end - regs->start + 1;
+
+ tmio = hcd_to_tmio(hcd);
+
+ spin_lock_init(&tmio->lock);
+
+ tmio->ccr = ioremap(config->start, config->end - config->start + 1);
+ if (!tmio->ccr) {
+ ret = -ENOMEM;
+ goto err_ioremap_ccr;
+ }
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ ret = -ENOMEM;
+ goto err_ioremap_regs;
+ }
+
+ if (!dma_declare_coherent_memory(&dev->dev, sram->start,
+ sram->start,
+ sram->end - sram->start + 1,
+ DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE)) {
+ ret = -EBUSY;
+ goto err_dma_declare;
+ }
+
+ if (cell->enable) {
+ ret = cell->enable(dev);
+ if (ret)
+ goto err_enable;
+ }
+
+ tmio_start_hc(dev);
+ ohci = hcd_to_ohci(hcd);
+ ohci_hcd_init(ohci);
+
+ ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ if (ret)
+ goto err_add_hcd;
+
+ if (ret == 0)
+ return ret;
+
+ usb_remove_hcd(hcd);
+
+err_add_hcd:
+ tmio_stop_hc(dev);
+ if (cell->disable)
+ cell->disable(dev);
+err_enable:
+ dma_release_declared_memory(&dev->dev);
+err_dma_declare:
+ iounmap(hcd->regs);
+err_ioremap_regs:
+ iounmap(tmio->ccr);
+err_ioremap_ccr:
+ usb_put_hcd(hcd);
+err_usb_create_hcd:
+
+ return ret;
+}
+
+static int __devexit ohci_hcd_tmio_drv_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+ struct mfd_cell *cell = dev->dev.platform_data;
+
+ usb_remove_hcd(hcd);
+ tmio_stop_hc(dev);
+ if (cell->disable)
+ cell->disable(dev);
+ dma_release_declared_memory(&dev->dev);
+ iounmap(hcd->regs);
+ iounmap(tmio->ccr);
+ usb_put_hcd(hcd);
+
+ platform_set_drvdata(dev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct mfd_cell *cell = dev->dev.platform_data;
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+ unsigned long flags;
+ u8 misc;
+ int ret;
+
+ if (time_before(jiffies, ohci->next_statechange))
+ msleep(5);
+ ohci->next_statechange = jiffies;
+
+ spin_lock_irqsave(&tmio->lock, flags);
+
+ misc = tmio_ioread8(tmio->ccr + CCR_MISC);
+ misc |= 1 << 3; /* USSUSP */
+ tmio_iowrite8(misc, tmio->ccr + CCR_MISC);
+
+ spin_unlock_irqrestore(&tmio->lock, flags);
+
+ if (cell->suspend) {
+ ret = cell->suspend(dev);
+ if (ret)
+ return ret;
+ }
+
+ hcd->state = HC_STATE_SUSPENDED;
+
+ return 0;
+}
+
+static int ohci_hcd_tmio_drv_resume(struct platform_device *dev)
+{
+ struct mfd_cell *cell = dev->dev.platform_data;
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ struct tmio_hcd *tmio = hcd_to_tmio(hcd);
+ unsigned long flags;
+ u8 misc;
+ int ret;
+
+ if (time_before(jiffies, ohci->next_statechange))
+ msleep(5);
+ ohci->next_statechange = jiffies;
+
+ if (cell->resume) {
+ ret = cell->resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ tmio_start_hc(dev);
+
+ spin_lock_irqsave(&tmio->lock, flags);
+
+ misc = tmio_ioread8(tmio->ccr + CCR_MISC);
+ misc &= ~(1 << 3); /* USSUSP */
+ tmio_iowrite8(misc, tmio->ccr + CCR_MISC);
+
+ spin_unlock_irqrestore(&tmio->lock, flags);
+
+ ohci_finish_controller_resume(hcd);
+
+ return 0;
+}
+#else
+#define ohci_hcd_tmio_drv_suspend NULL
+#define ohci_hcd_tmio_drv_resume NULL
+#endif
+
+static struct platform_driver ohci_hcd_tmio_driver = {
+ .probe = ohci_hcd_tmio_drv_probe,
+ .remove = __devexit_p(ohci_hcd_tmio_drv_remove),
+ .shutdown = usb_hcd_platform_shutdown,
+ .suspend = ohci_hcd_tmio_drv_suspend,
+ .resume = ohci_hcd_tmio_drv_resume,
+ .driver = {
+ .name = "tmio-ohci",
+ .owner = THIS_MODULE,
+ },
+};
diff --git a/drivers/usb/host/whci/Kbuild b/drivers/usb/host/whci/Kbuild
new file mode 100644
index 00000000000..26a3871ea0f
--- /dev/null
+++ b/drivers/usb/host/whci/Kbuild
@@ -0,0 +1,11 @@
+obj-$(CONFIG_USB_WHCI_HCD) += whci-hcd.o
+
+whci-hcd-y := \
+ asl.o \
+ hcd.o \
+ hw.o \
+ init.o \
+ int.o \
+ pzl.o \
+ qset.o \
+ wusb.o
diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c
new file mode 100644
index 00000000000..4d7078e5057
--- /dev/null
+++ b/drivers/usb/host/whci/asl.c
@@ -0,0 +1,367 @@
+/*
+ * Wireless Host Controller (WHC) asynchronous schedule management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+#include <linux/usb.h>
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+#if D_LOCAL >= 4
+static void dump_asl(struct whc *whc, const char *tag)
+{
+ struct device *dev = &whc->umc->dev;
+ struct whc_qset *qset;
+
+ d_printf(4, dev, "ASL %s\n", tag);
+
+ list_for_each_entry(qset, &whc->async_list, list_node) {
+ dump_qset(qset, dev);
+ }
+}
+#else
+static inline void dump_asl(struct whc *whc, const char *tag)
+{
+}
+#endif
+
+
+static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset,
+ struct whc_qset **next, struct whc_qset **prev)
+{
+ struct list_head *n, *p;
+
+ BUG_ON(list_empty(&whc->async_list));
+
+ n = qset->list_node.next;
+ if (n == &whc->async_list)
+ n = n->next;
+ p = qset->list_node.prev;
+ if (p == &whc->async_list)
+ p = p->prev;
+
+ *next = container_of(n, struct whc_qset, list_node);
+ *prev = container_of(p, struct whc_qset, list_node);
+
+}
+
+static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset)
+{
+ list_move(&qset->list_node, &whc->async_list);
+ qset->in_sw_list = true;
+}
+
+static void asl_qset_insert(struct whc *whc, struct whc_qset *qset)
+{
+ struct whc_qset *next, *prev;
+
+ qset_clear(whc, qset);
+
+ /* Link into ASL. */
+ qset_get_next_prev(whc, qset, &next, &prev);
+ whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma);
+ whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma);
+ qset->in_hw_list = true;
+}
+
+static void asl_qset_remove(struct whc *whc, struct whc_qset *qset)
+{
+ struct whc_qset *prev, *next;
+
+ qset_get_next_prev(whc, qset, &next, &prev);
+
+ list_move(&qset->list_node, &whc->async_removed_list);
+ qset->in_sw_list = false;
+
+ /*
+ * No more qsets in the ASL? The caller must stop the ASL as
+ * it's no longer valid.
+ */
+ if (list_empty(&whc->async_list))
+ return;
+
+ /* Remove from ASL. */
+ whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma);
+ qset->in_hw_list = false;
+}
+
+/**
+ * process_qset - process any recently inactivated or halted qTDs in a
+ * qset.
+ *
+ * After inactive qTDs are removed, new qTDs can be added if the
+ * urb queue still contains URBs.
+ *
+ * Returns any additional WUSBCMD bits for the ASL sync command (i.e.,
+ * WUSBCMD_ASYNC_QSET_RM if a halted qset was removed).
+ */
+static uint32_t process_qset(struct whc *whc, struct whc_qset *qset)
+{
+ enum whc_update update = 0;
+ uint32_t status = 0;
+
+ while (qset->ntds) {
+ struct whc_qtd *td;
+ int t;
+
+ t = qset->td_start;
+ td = &qset->qtd[qset->td_start];
+ status = le32_to_cpu(td->status);
+
+ /*
+ * Nothing to do with a still active qTD.
+ */
+ if (status & QTD_STS_ACTIVE)
+ break;
+
+ if (status & QTD_STS_HALTED) {
+ /* Ug, an error. */
+ process_halted_qtd(whc, qset, td);
+ goto done;
+ }
+
+ /* Mmm, a completed qTD. */
+ process_inactive_qtd(whc, qset, td);
+ }
+
+ update |= qset_add_qtds(whc, qset);
+
+done:
+ /*
+ * Remove this qset from the ASL if requested, but only if has
+ * no qTDs.
+ */
+ if (qset->remove && qset->ntds == 0) {
+ asl_qset_remove(whc, qset);
+ update |= WHC_UPDATE_REMOVED;
+ }
+ return update;
+}
+
+void asl_start(struct whc *whc)
+{
+ struct whc_qset *qset;
+
+ qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
+
+ le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR);
+
+ whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+ WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED,
+ 1000, "start ASL");
+}
+
+void asl_stop(struct whc *whc)
+{
+ whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+ WUSBSTS_ASYNC_SCHED, 0,
+ 1000, "stop ASL");
+}
+
+void asl_update(struct whc *whc, uint32_t wusbcmd)
+{
+ whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
+ wait_event(whc->async_list_wq,
+ (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0);
+}
+
+/**
+ * scan_async_work - scan the ASL for qsets to process.
+ *
+ * Process each qset in the ASL in turn and then signal the WHC that
+ * the ASL has been updated.
+ *
+ * Then start, stop or update the asynchronous schedule as required.
+ */
+void scan_async_work(struct work_struct *work)
+{
+ struct whc *whc = container_of(work, struct whc, async_work);
+ struct whc_qset *qset, *t;
+ enum whc_update update = 0;
+
+ spin_lock_irq(&whc->lock);
+
+ dump_asl(whc, "before processing");
+
+ /*
+ * Transerve the software list backwards so new qsets can be
+ * safely inserted into the ASL without making it non-circular.
+ */
+ list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) {
+ if (!qset->in_hw_list) {
+ asl_qset_insert(whc, qset);
+ update |= WHC_UPDATE_ADDED;
+ }
+
+ update |= process_qset(whc, qset);
+ }
+
+ dump_asl(whc, "after processing");
+
+ spin_unlock_irq(&whc->lock);
+
+ if (update) {
+ uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB;
+ if (update & WHC_UPDATE_REMOVED)
+ wusbcmd |= WUSBCMD_ASYNC_QSET_RM;
+ asl_update(whc, wusbcmd);
+ }
+
+ /*
+ * Now that the ASL is updated, complete the removal of any
+ * removed qsets.
+ */
+ spin_lock(&whc->lock);
+
+ list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) {
+ qset_remove_complete(whc, qset);
+ }
+
+ spin_unlock(&whc->lock);
+}
+
+/**
+ * asl_urb_enqueue - queue an URB onto the asynchronous list (ASL).
+ * @whc: the WHCI host controller
+ * @urb: the URB to enqueue
+ * @mem_flags: flags for any memory allocations
+ *
+ * The qset for the endpoint is obtained and the urb queued on to it.
+ *
+ * Work is scheduled to update the hardware's view of the ASL.
+ */
+int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
+{
+ struct whc_qset *qset;
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ qset = get_qset(whc, urb, GFP_ATOMIC);
+ if (qset == NULL)
+ err = -ENOMEM;
+ else
+ err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
+ if (!err) {
+ usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
+ if (!qset->in_sw_list)
+ asl_qset_insert_begin(whc, qset);
+ }
+
+ spin_unlock_irqrestore(&whc->lock, flags);
+
+ if (!err)
+ queue_work(whc->workqueue, &whc->async_work);
+
+ return 0;
+}
+
+/**
+ * asl_urb_dequeue - remove an URB (qset) from the async list.
+ * @whc: the WHCI host controller
+ * @urb: the URB to dequeue
+ * @status: the current status of the URB
+ *
+ * URBs that do yet have qTDs can simply be removed from the software
+ * queue, otherwise the qset must be removed from the ASL so the qTDs
+ * can be removed.
+ */
+int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
+{
+ struct whc_urb *wurb = urb->hcpriv;
+ struct whc_qset *qset = wurb->qset;
+ struct whc_std *std, *t;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
+ if (ret < 0)
+ goto out;
+
+ list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+ if (std->urb == urb)
+ qset_free_std(whc, std);
+ else
+ std->qtd = NULL; /* so this std is re-added when the qset is */
+ }
+
+ asl_qset_remove(whc, qset);
+ wurb->status = status;
+ wurb->is_async = true;
+ queue_work(whc->workqueue, &wurb->dequeue_work);
+
+out:
+ spin_unlock_irqrestore(&whc->lock, flags);
+
+ return ret;
+}
+
+/**
+ * asl_qset_delete - delete a qset from the ASL
+ */
+void asl_qset_delete(struct whc *whc, struct whc_qset *qset)
+{
+ qset->remove = 1;
+ queue_work(whc->workqueue, &whc->async_work);
+ qset_delete(whc, qset);
+}
+
+/**
+ * asl_init - initialize the asynchronous schedule list
+ *
+ * A dummy qset with no qTDs is added to the ASL to simplify removing
+ * qsets (no need to stop the ASL when the last qset is removed).
+ */
+int asl_init(struct whc *whc)
+{
+ struct whc_qset *qset;
+
+ qset = qset_alloc(whc, GFP_KERNEL);
+ if (qset == NULL)
+ return -ENOMEM;
+
+ asl_qset_insert_begin(whc, qset);
+ asl_qset_insert(whc, qset);
+
+ return 0;
+}
+
+/**
+ * asl_clean_up - free ASL resources
+ *
+ * The ASL is stopped and empty except for the dummy qset.
+ */
+void asl_clean_up(struct whc *whc)
+{
+ struct whc_qset *qset;
+
+ if (!list_empty(&whc->async_list)) {
+ qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
+ list_del(&qset->list_node);
+ qset_free(whc, qset);
+ }
+}
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
new file mode 100644
index 00000000000..ef3ad4dca94
--- /dev/null
+++ b/drivers/usb/host/whci/hcd.c
@@ -0,0 +1,339 @@
+/*
+ * Wireless Host Controller (WHC) driver.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+/*
+ * One time initialization.
+ *
+ * Nothing to do here.
+ */
+static int whc_reset(struct usb_hcd *usb_hcd)
+{
+ return 0;
+}
+
+/*
+ * Start the wireless host controller.
+ *
+ * Start device notification.
+ *
+ * Put hc into run state, set DNTS parameters.
+ */
+static int whc_start(struct usb_hcd *usb_hcd)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u8 bcid;
+ int ret;
+
+ mutex_lock(&wusbhc->mutex);
+
+ le_writel(WUSBINTR_GEN_CMD_DONE
+ | WUSBINTR_HOST_ERR
+ | WUSBINTR_ASYNC_SCHED_SYNCED
+ | WUSBINTR_DNTS_INT
+ | WUSBINTR_ERR_INT
+ | WUSBINTR_INT,
+ whc->base + WUSBINTR);
+
+ /* set cluster ID */
+ bcid = wusb_cluster_id_get();
+ ret = whc_set_cluster_id(whc, bcid);
+ if (ret < 0)
+ goto out;
+ wusbhc->cluster_id = bcid;
+
+ /* start HC */
+ whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN);
+
+ usb_hcd->uses_new_polling = 1;
+ usb_hcd->poll_rh = 1;
+ usb_hcd->state = HC_STATE_RUNNING;
+
+out:
+ mutex_unlock(&wusbhc->mutex);
+ return ret;
+}
+
+
+/*
+ * Stop the wireless host controller.
+ *
+ * Stop device notification.
+ *
+ * Wait for pending transfer to stop? Put hc into stop state?
+ */
+static void whc_stop(struct usb_hcd *usb_hcd)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+
+ mutex_lock(&wusbhc->mutex);
+
+ wusbhc_stop(wusbhc);
+
+ /* stop HC */
+ le_writel(0, whc->base + WUSBINTR);
+ whc_write_wusbcmd(whc, WUSBCMD_RUN, 0);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+ WUSBSTS_HCHALTED, WUSBSTS_HCHALTED,
+ 100, "HC to halt");
+
+ wusb_cluster_id_put(wusbhc->cluster_id);
+
+ mutex_unlock(&wusbhc->mutex);
+}
+
+static int whc_get_frame_number(struct usb_hcd *usb_hcd)
+{
+ /* Frame numbers are not applicable to WUSB. */
+ return -ENOSYS;
+}
+
+
+/*
+ * Queue an URB to the ASL or PZL
+ */
+static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ int ret;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_INTERRUPT:
+ ret = pzl_urb_enqueue(whc, urb, mem_flags);
+ break;
+ case PIPE_ISOCHRONOUS:
+ dev_err(&whc->umc->dev, "isochronous transfers unsupported\n");
+ ret = -ENOTSUPP;
+ break;
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ default:
+ ret = asl_urb_enqueue(whc, urb, mem_flags);
+ break;
+ };
+
+ return ret;
+}
+
+/*
+ * Remove a queued URB from the ASL or PZL.
+ */
+static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ int ret;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_INTERRUPT:
+ ret = pzl_urb_dequeue(whc, urb, status);
+ break;
+ case PIPE_ISOCHRONOUS:
+ ret = -ENOTSUPP;
+ break;
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ default:
+ ret = asl_urb_dequeue(whc, urb, status);
+ break;
+ };
+
+ return ret;
+}
+
+/*
+ * Wait for all URBs to the endpoint to be completed, then delete the
+ * qset.
+ */
+static void whc_endpoint_disable(struct usb_hcd *usb_hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ struct whc_qset *qset;
+
+ qset = ep->hcpriv;
+ if (qset) {
+ ep->hcpriv = NULL;
+ if (usb_endpoint_xfer_bulk(&ep->desc)
+ || usb_endpoint_xfer_control(&ep->desc))
+ asl_qset_delete(whc, qset);
+ else
+ pzl_qset_delete(whc, qset);
+ }
+}
+
+static struct hc_driver whc_hc_driver = {
+ .description = "whci-hcd",
+ .product_desc = "Wireless host controller",
+ .hcd_priv_size = sizeof(struct whc) - sizeof(struct usb_hcd),
+ .irq = whc_int_handler,
+ .flags = HCD_USB2,
+
+ .reset = whc_reset,
+ .start = whc_start,
+ .stop = whc_stop,
+ .get_frame_number = whc_get_frame_number,
+ .urb_enqueue = whc_urb_enqueue,
+ .urb_dequeue = whc_urb_dequeue,
+ .endpoint_disable = whc_endpoint_disable,
+
+ .hub_status_data = wusbhc_rh_status_data,
+ .hub_control = wusbhc_rh_control,
+ .bus_suspend = wusbhc_rh_suspend,
+ .bus_resume = wusbhc_rh_resume,
+ .start_port_reset = wusbhc_rh_start_port_reset,
+};
+
+static int whc_probe(struct umc_dev *umc)
+{
+ int ret = -ENOMEM;
+ struct usb_hcd *usb_hcd;
+ struct wusbhc *wusbhc = NULL;
+ struct whc *whc = NULL;
+ struct device *dev = &umc->dev;
+
+ usb_hcd = usb_create_hcd(&whc_hc_driver, dev, "whci");
+ if (usb_hcd == NULL) {
+ dev_err(dev, "unable to create hcd\n");
+ goto error;
+ }
+
+ usb_hcd->wireless = 1;
+
+ wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ whc = wusbhc_to_whc(wusbhc);
+ whc->umc = umc;
+
+ ret = whc_init(whc);
+ if (ret)
+ goto error;
+
+ wusbhc->dev = dev;
+ wusbhc->uwb_rc = uwb_rc_get_by_grandpa(umc->dev.parent);
+ if (!wusbhc->uwb_rc) {
+ ret = -ENODEV;
+ dev_err(dev, "cannot get radio controller\n");
+ goto error;
+ }
+
+ if (whc->n_devices > USB_MAXCHILDREN) {
+ dev_warn(dev, "USB_MAXCHILDREN too low for WUSB adapter (%u ports)\n",
+ whc->n_devices);
+ wusbhc->ports_max = USB_MAXCHILDREN;
+ } else
+ wusbhc->ports_max = whc->n_devices;
+ wusbhc->mmcies_max = whc->n_mmc_ies;
+ wusbhc->start = whc_wusbhc_start;
+ wusbhc->stop = whc_wusbhc_stop;
+ wusbhc->mmcie_add = whc_mmcie_add;
+ wusbhc->mmcie_rm = whc_mmcie_rm;
+ wusbhc->dev_info_set = whc_dev_info_set;
+ wusbhc->bwa_set = whc_bwa_set;
+ wusbhc->set_num_dnts = whc_set_num_dnts;
+ wusbhc->set_ptk = whc_set_ptk;
+ wusbhc->set_gtk = whc_set_gtk;
+
+ ret = wusbhc_create(wusbhc);
+ if (ret)
+ goto error_wusbhc_create;
+
+ ret = usb_add_hcd(usb_hcd, whc->umc->irq, IRQF_SHARED);
+ if (ret) {
+ dev_err(dev, "cannot add HCD: %d\n", ret);
+ goto error_usb_add_hcd;
+ }
+
+ ret = wusbhc_b_create(wusbhc);
+ if (ret) {
+ dev_err(dev, "WUSBHC phase B setup failed: %d\n", ret);
+ goto error_wusbhc_b_create;
+ }
+
+ return 0;
+
+error_wusbhc_b_create:
+ usb_remove_hcd(usb_hcd);
+error_usb_add_hcd:
+ wusbhc_destroy(wusbhc);
+error_wusbhc_create:
+ uwb_rc_put(wusbhc->uwb_rc);
+error:
+ whc_clean_up(whc);
+ if (usb_hcd)
+ usb_put_hcd(usb_hcd);
+ return ret;
+}
+
+
+static void whc_remove(struct umc_dev *umc)
+{
+ struct usb_hcd *usb_hcd = dev_get_drvdata(&umc->dev);
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+
+ if (usb_hcd) {
+ wusbhc_b_destroy(wusbhc);
+ usb_remove_hcd(usb_hcd);
+ wusbhc_destroy(wusbhc);
+ uwb_rc_put(wusbhc->uwb_rc);
+ whc_clean_up(whc);
+ usb_put_hcd(usb_hcd);
+ }
+}
+
+static struct umc_driver whci_hc_driver = {
+ .name = "whci-hcd",
+ .cap_id = UMC_CAP_ID_WHCI_WUSB_HC,
+ .probe = whc_probe,
+ .remove = whc_remove,
+};
+
+static int __init whci_hc_driver_init(void)
+{
+ return umc_driver_register(&whci_hc_driver);
+}
+module_init(whci_hc_driver_init);
+
+static void __exit whci_hc_driver_exit(void)
+{
+ umc_driver_unregister(&whci_hc_driver);
+}
+module_exit(whci_hc_driver_exit);
+
+/* PCI device ID's that we handle (so it gets loaded) */
+static struct pci_device_id whci_hcd_id_table[] = {
+ { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
+ { /* empty last entry */ }
+};
+MODULE_DEVICE_TABLE(pci, whci_hcd_id_table);
+
+MODULE_DESCRIPTION("WHCI Wireless USB host controller driver");
+MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/whci/hw.c b/drivers/usb/host/whci/hw.c
new file mode 100644
index 00000000000..ac86e59c122
--- /dev/null
+++ b/drivers/usb/host/whci/hw.c
@@ -0,0 +1,87 @@
+/*
+ * Wireless Host Controller (WHC) hardware access helpers.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val)
+{
+ unsigned long flags;
+ u32 cmd;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ cmd = le_readl(whc->base + WUSBCMD);
+ cmd = (cmd & ~mask) | val;
+ le_writel(cmd, whc->base + WUSBCMD);
+
+ spin_unlock_irqrestore(&whc->lock, flags);
+}
+
+/**
+ * whc_do_gencmd - start a generic command via the WUSBGENCMDSTS register
+ * @whc: the WHCI HC
+ * @cmd: command to start.
+ * @params: parameters for the command (the WUSBGENCMDPARAMS register value).
+ * @addr: pointer to any data for the command (may be NULL).
+ * @len: length of the data (if any).
+ */
+int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len)
+{
+ unsigned long flags;
+ dma_addr_t dma_addr;
+ int t;
+
+ mutex_lock(&whc->mutex);
+
+ /* Wait for previous command to complete. */
+ t = wait_event_timeout(whc->cmd_wq,
+ (le_readl(whc->base + WUSBGENCMDSTS) & WUSBGENCMDSTS_ACTIVE) == 0,
+ WHC_GENCMD_TIMEOUT_MS);
+ if (t == 0) {
+ dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n",
+ le_readl(whc->base + WUSBGENCMDSTS),
+ le_readl(whc->base + WUSBGENCMDPARAMS));
+ return -ETIMEDOUT;
+ }
+
+ if (addr) {
+ memcpy(whc->gen_cmd_buf, addr, len);
+ dma_addr = whc->gen_cmd_buf_dma;
+ } else
+ dma_addr = 0;
+
+ /* Poke registers to start cmd. */
+ spin_lock_irqsave(&whc->lock, flags);
+
+ le_writel(params, whc->base + WUSBGENCMDPARAMS);
+ le_writeq(dma_addr, whc->base + WUSBGENADDR);
+
+ le_writel(WUSBGENCMDSTS_ACTIVE | WUSBGENCMDSTS_IOC | cmd,
+ whc->base + WUSBGENCMDSTS);
+
+ spin_unlock_irqrestore(&whc->lock, flags);
+
+ mutex_unlock(&whc->mutex);
+
+ return 0;
+}
diff --git a/drivers/usb/host/whci/init.c b/drivers/usb/host/whci/init.c
new file mode 100644
index 00000000000..34a783cb013
--- /dev/null
+++ b/drivers/usb/host/whci/init.c
@@ -0,0 +1,188 @@
+/*
+ * Wireless Host Controller (WHC) initialization.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+/*
+ * Reset the host controller.
+ */
+static void whc_hw_reset(struct whc *whc)
+{
+ le_writel(WUSBCMD_WHCRESET, whc->base + WUSBCMD);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBCMD, WUSBCMD_WHCRESET, 0,
+ 100, "reset");
+}
+
+static void whc_hw_init_di_buf(struct whc *whc)
+{
+ int d;
+
+ /* Disable all entries in the Device Information buffer. */
+ for (d = 0; d < whc->n_devices; d++)
+ whc->di_buf[d].addr_sec_info = WHC_DI_DISABLE;
+
+ le_writeq(whc->di_buf_dma, whc->base + WUSBDEVICEINFOADDR);
+}
+
+static void whc_hw_init_dn_buf(struct whc *whc)
+{
+ /* Clear the Device Notification buffer to ensure the V (valid)
+ * bits are clear. */
+ memset(whc->dn_buf, 0, 4096);
+
+ le_writeq(whc->dn_buf_dma, whc->base + WUSBDNTSBUFADDR);
+}
+
+int whc_init(struct whc *whc)
+{
+ u32 whcsparams;
+ int ret, i;
+ resource_size_t start, len;
+
+ spin_lock_init(&whc->lock);
+ mutex_init(&whc->mutex);
+ init_waitqueue_head(&whc->cmd_wq);
+ init_waitqueue_head(&whc->async_list_wq);
+ init_waitqueue_head(&whc->periodic_list_wq);
+ whc->workqueue = create_singlethread_workqueue(dev_name(&whc->umc->dev));
+ if (whc->workqueue == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ INIT_WORK(&whc->dn_work, whc_dn_work);
+
+ INIT_WORK(&whc->async_work, scan_async_work);
+ INIT_LIST_HEAD(&whc->async_list);
+ INIT_LIST_HEAD(&whc->async_removed_list);
+
+ INIT_WORK(&whc->periodic_work, scan_periodic_work);
+ for (i = 0; i < 5; i++)
+ INIT_LIST_HEAD(&whc->periodic_list[i]);
+ INIT_LIST_HEAD(&whc->periodic_removed_list);
+
+ /* Map HC registers. */
+ start = whc->umc->resource.start;
+ len = whc->umc->resource.end - start + 1;
+ if (!request_mem_region(start, len, "whci-hc")) {
+ dev_err(&whc->umc->dev, "can't request HC region\n");
+ ret = -EBUSY;
+ goto error;
+ }
+ whc->base_phys = start;
+ whc->base = ioremap(start, len);
+ if (!whc->base) {
+ dev_err(&whc->umc->dev, "ioremap\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ whc_hw_reset(whc);
+
+ /* Read maximum number of devices, keys and MMC IEs. */
+ whcsparams = le_readl(whc->base + WHCSPARAMS);
+ whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams);
+ whc->n_keys = WHCSPARAMS_TO_N_KEYS(whcsparams);
+ whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams);
+
+ dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n",
+ whc->n_devices, whc->n_keys, whc->n_mmc_ies);
+
+ whc->qset_pool = dma_pool_create("qset", &whc->umc->dev,
+ sizeof(struct whc_qset), 64, 0);
+ if (whc->qset_pool == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = asl_init(whc);
+ if (ret < 0)
+ goto error;
+ ret = pzl_init(whc);
+ if (ret < 0)
+ goto error;
+
+ /* Allocate and initialize a buffer for generic commands, the
+ Device Information buffer, and the Device Notification
+ buffer. */
+
+ whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
+ &whc->gen_cmd_buf_dma, GFP_KERNEL);
+ if (whc->gen_cmd_buf == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ whc->dn_buf = dma_alloc_coherent(&whc->umc->dev,
+ sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
+ &whc->dn_buf_dma, GFP_KERNEL);
+ if (!whc->dn_buf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ whc_hw_init_dn_buf(whc);
+
+ whc->di_buf = dma_alloc_coherent(&whc->umc->dev,
+ sizeof(struct di_buf_entry) * whc->n_devices,
+ &whc->di_buf_dma, GFP_KERNEL);
+ if (!whc->di_buf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ whc_hw_init_di_buf(whc);
+
+ return 0;
+
+error:
+ whc_clean_up(whc);
+ return ret;
+}
+
+void whc_clean_up(struct whc *whc)
+{
+ resource_size_t len;
+
+ if (whc->di_buf)
+ dma_free_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices,
+ whc->di_buf, whc->di_buf_dma);
+ if (whc->dn_buf)
+ dma_free_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
+ whc->dn_buf, whc->dn_buf_dma);
+ if (whc->gen_cmd_buf)
+ dma_free_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
+ whc->gen_cmd_buf, whc->gen_cmd_buf_dma);
+
+ pzl_clean_up(whc);
+ asl_clean_up(whc);
+
+ if (whc->qset_pool)
+ dma_pool_destroy(whc->qset_pool);
+
+ len = whc->umc->resource.end - whc->umc->resource.start + 1;
+ if (whc->base)
+ iounmap(whc->base);
+ if (whc->base_phys)
+ release_mem_region(whc->base_phys, len);
+
+ if (whc->workqueue)
+ destroy_workqueue(whc->workqueue);
+}
diff --git a/drivers/usb/host/whci/int.c b/drivers/usb/host/whci/int.c
new file mode 100644
index 00000000000..fce01174aa9
--- /dev/null
+++ b/drivers/usb/host/whci/int.c
@@ -0,0 +1,95 @@
+/*
+ * Wireless Host Controller (WHC) interrupt handling.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/uwb/umc.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+static void transfer_done(struct whc *whc)
+{
+ queue_work(whc->workqueue, &whc->async_work);
+ queue_work(whc->workqueue, &whc->periodic_work);
+}
+
+irqreturn_t whc_int_handler(struct usb_hcd *hcd)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u32 sts;
+
+ sts = le_readl(whc->base + WUSBSTS);
+ if (!(sts & WUSBSTS_INT_MASK))
+ return IRQ_NONE;
+ le_writel(sts & WUSBSTS_INT_MASK, whc->base + WUSBSTS);
+
+ if (sts & WUSBSTS_GEN_CMD_DONE)
+ wake_up(&whc->cmd_wq);
+
+ if (sts & WUSBSTS_HOST_ERR)
+ dev_err(&whc->umc->dev, "FIXME: host system error\n");
+
+ if (sts & WUSBSTS_ASYNC_SCHED_SYNCED)
+ wake_up(&whc->async_list_wq);
+
+ if (sts & WUSBSTS_PERIODIC_SCHED_SYNCED)
+ wake_up(&whc->periodic_list_wq);
+
+ if (sts & WUSBSTS_DNTS_INT)
+ queue_work(whc->workqueue, &whc->dn_work);
+
+ /*
+ * A transfer completed (see [WHCI] section 4.7.1.2 for when
+ * this occurs).
+ */
+ if (sts & (WUSBSTS_INT | WUSBSTS_ERR_INT))
+ transfer_done(whc);
+
+ return IRQ_HANDLED;
+}
+
+static int process_dn_buf(struct whc *whc)
+{
+ struct wusbhc *wusbhc = &whc->wusbhc;
+ struct dn_buf_entry *dn;
+ int processed = 0;
+
+ for (dn = whc->dn_buf; dn < whc->dn_buf + WHC_N_DN_ENTRIES; dn++) {
+ if (dn->status & WHC_DN_STATUS_VALID) {
+ wusbhc_handle_dn(wusbhc, dn->src_addr,
+ (struct wusb_dn_hdr *)dn->dn_data,
+ dn->msg_size);
+ dn->status &= ~WHC_DN_STATUS_VALID;
+ processed++;
+ }
+ }
+ return processed;
+}
+
+void whc_dn_work(struct work_struct *work)
+{
+ struct whc *whc = container_of(work, struct whc, dn_work);
+ int processed;
+
+ do {
+ processed = process_dn_buf(whc);
+ } while (processed);
+}
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c
new file mode 100644
index 00000000000..8d62df0c330
--- /dev/null
+++ b/drivers/usb/host/whci/pzl.c
@@ -0,0 +1,398 @@
+/*
+ * Wireless Host Controller (WHC) periodic schedule management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+#include <linux/usb.h>
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+#if D_LOCAL >= 4
+static void dump_pzl(struct whc *whc, const char *tag)
+{
+ struct device *dev = &whc->umc->dev;
+ struct whc_qset *qset;
+ int period = 0;
+
+ d_printf(4, dev, "PZL %s\n", tag);
+
+ for (period = 0; period < 5; period++) {
+ d_printf(4, dev, "Period %d\n", period);
+ list_for_each_entry(qset, &whc->periodic_list[period], list_node) {
+ dump_qset(qset, dev);
+ }
+ }
+}
+#else
+static inline void dump_pzl(struct whc *whc, const char *tag)
+{
+}
+#endif
+
+static void update_pzl_pointers(struct whc *whc, int period, u64 addr)
+{
+ switch (period) {
+ case 0:
+ whc_qset_set_link_ptr(&whc->pz_list[0], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[2], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[4], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[6], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[8], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[10], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[12], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[14], addr);
+ break;
+ case 1:
+ whc_qset_set_link_ptr(&whc->pz_list[1], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[5], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[9], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[13], addr);
+ break;
+ case 2:
+ whc_qset_set_link_ptr(&whc->pz_list[3], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[11], addr);
+ break;
+ case 3:
+ whc_qset_set_link_ptr(&whc->pz_list[7], addr);
+ break;
+ case 4:
+ whc_qset_set_link_ptr(&whc->pz_list[15], addr);
+ break;
+ }
+}
+
+/*
+ * Return the 'period' to use for this qset. The minimum interval for
+ * the endpoint is used so whatever urbs are submitted the device is
+ * polled often enough.
+ */
+static int qset_get_period(struct whc *whc, struct whc_qset *qset)
+{
+ uint8_t bInterval = qset->ep->desc.bInterval;
+
+ if (bInterval < 6)
+ bInterval = 6;
+ if (bInterval > 10)
+ bInterval = 10;
+ return bInterval - 6;
+}
+
+static void qset_insert_in_sw_list(struct whc *whc, struct whc_qset *qset)
+{
+ int period;
+
+ period = qset_get_period(whc, qset);
+
+ qset_clear(whc, qset);
+ list_move(&qset->list_node, &whc->periodic_list[period]);
+ qset->in_sw_list = true;
+}
+
+static void pzl_qset_remove(struct whc *whc, struct whc_qset *qset)
+{
+ list_move(&qset->list_node, &whc->periodic_removed_list);
+ qset->in_hw_list = false;
+ qset->in_sw_list = false;
+}
+
+/**
+ * pzl_process_qset - process any recently inactivated or halted qTDs
+ * in a qset.
+ *
+ * After inactive qTDs are removed, new qTDs can be added if the
+ * urb queue still contains URBs.
+ *
+ * Returns the schedule updates required.
+ */
+static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset)
+{
+ enum whc_update update = 0;
+ uint32_t status = 0;
+
+ while (qset->ntds) {
+ struct whc_qtd *td;
+ int t;
+
+ t = qset->td_start;
+ td = &qset->qtd[qset->td_start];
+ status = le32_to_cpu(td->status);
+
+ /*
+ * Nothing to do with a still active qTD.
+ */
+ if (status & QTD_STS_ACTIVE)
+ break;
+
+ if (status & QTD_STS_HALTED) {
+ /* Ug, an error. */
+ process_halted_qtd(whc, qset, td);
+ goto done;
+ }
+
+ /* Mmm, a completed qTD. */
+ process_inactive_qtd(whc, qset, td);
+ }
+
+ update |= qset_add_qtds(whc, qset);
+
+done:
+ /*
+ * If there are no qTDs in this qset, remove it from the PZL.
+ */
+ if (qset->remove && qset->ntds == 0) {
+ pzl_qset_remove(whc, qset);
+ update |= WHC_UPDATE_REMOVED;
+ }
+
+ return update;
+}
+
+/**
+ * pzl_start - start the periodic schedule
+ * @whc: the WHCI host controller
+ *
+ * The PZL must be valid (e.g., all entries in the list should have
+ * the T bit set).
+ */
+void pzl_start(struct whc *whc)
+{
+ le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
+
+ whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, WUSBCMD_PERIODIC_EN);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+ WUSBSTS_PERIODIC_SCHED, WUSBSTS_PERIODIC_SCHED,
+ 1000, "start PZL");
+}
+
+/**
+ * pzl_stop - stop the periodic schedule
+ * @whc: the WHCI host controller
+ */
+void pzl_stop(struct whc *whc)
+{
+ whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, 0);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+ WUSBSTS_PERIODIC_SCHED, 0,
+ 1000, "stop PZL");
+}
+
+void pzl_update(struct whc *whc, uint32_t wusbcmd)
+{
+ whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
+ wait_event(whc->periodic_list_wq,
+ (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0);
+}
+
+static void update_pzl_hw_view(struct whc *whc)
+{
+ struct whc_qset *qset, *t;
+ int period;
+ u64 tmp_qh = 0;
+
+ for (period = 0; period < 5; period++) {
+ list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
+ whc_qset_set_link_ptr(&qset->qh.link, tmp_qh);
+ tmp_qh = qset->qset_dma;
+ qset->in_hw_list = true;
+ }
+ update_pzl_pointers(whc, period, tmp_qh);
+ }
+}
+
+/**
+ * scan_periodic_work - scan the PZL for qsets to process.
+ *
+ * Process each qset in the PZL in turn and then signal the WHC that
+ * the PZL has been updated.
+ *
+ * Then start, stop or update the periodic schedule as required.
+ */
+void scan_periodic_work(struct work_struct *work)
+{
+ struct whc *whc = container_of(work, struct whc, periodic_work);
+ struct whc_qset *qset, *t;
+ enum whc_update update = 0;
+ int period;
+
+ spin_lock_irq(&whc->lock);
+
+ dump_pzl(whc, "before processing");
+
+ for (period = 4; period >= 0; period--) {
+ list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
+ if (!qset->in_hw_list)
+ update |= WHC_UPDATE_ADDED;
+ update |= pzl_process_qset(whc, qset);
+ }
+ }
+
+ if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED))
+ update_pzl_hw_view(whc);
+
+ dump_pzl(whc, "after processing");
+
+ spin_unlock_irq(&whc->lock);
+
+ if (update) {
+ uint32_t wusbcmd = WUSBCMD_PERIODIC_UPDATED | WUSBCMD_PERIODIC_SYNCED_DB;
+ if (update & WHC_UPDATE_REMOVED)
+ wusbcmd |= WUSBCMD_PERIODIC_QSET_RM;
+ pzl_update(whc, wusbcmd);
+ }
+
+ /*
+ * Now that the PZL is updated, complete the removal of any
+ * removed qsets.
+ */
+ spin_lock(&whc->lock);
+
+ list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) {
+ qset_remove_complete(whc, qset);
+ }
+
+ spin_unlock(&whc->lock);
+}
+
+/**
+ * pzl_urb_enqueue - queue an URB onto the periodic list (PZL)
+ * @whc: the WHCI host controller
+ * @urb: the URB to enqueue
+ * @mem_flags: flags for any memory allocations
+ *
+ * The qset for the endpoint is obtained and the urb queued on to it.
+ *
+ * Work is scheduled to update the hardware's view of the PZL.
+ */
+int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
+{
+ struct whc_qset *qset;
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ qset = get_qset(whc, urb, GFP_ATOMIC);
+ if (qset == NULL)
+ err = -ENOMEM;
+ else
+ err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
+ if (!err) {
+ usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
+ if (!qset->in_sw_list)
+ qset_insert_in_sw_list(whc, qset);
+ }
+
+ spin_unlock_irqrestore(&whc->lock, flags);
+
+ if (!err)
+ queue_work(whc->workqueue, &whc->periodic_work);
+
+ return 0;
+}
+
+/**
+ * pzl_urb_dequeue - remove an URB (qset) from the periodic list
+ * @whc: the WHCI host controller
+ * @urb: the URB to dequeue
+ * @status: the current status of the URB
+ *
+ * URBs that do yet have qTDs can simply be removed from the software
+ * queue, otherwise the qset must be removed so the qTDs can be safely
+ * removed.
+ */
+int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
+{
+ struct whc_urb *wurb = urb->hcpriv;
+ struct whc_qset *qset = wurb->qset;
+ struct whc_std *std, *t;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
+ if (ret < 0)
+ goto out;
+
+ list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+ if (std->urb == urb)
+ qset_free_std(whc, std);
+ else
+ std->qtd = NULL; /* so this std is re-added when the qset is */
+ }
+
+ pzl_qset_remove(whc, qset);
+ wurb->status = status;
+ wurb->is_async = false;
+ queue_work(whc->workqueue, &wurb->dequeue_work);
+
+out:
+ spin_unlock_irqrestore(&whc->lock, flags);
+
+ return ret;
+}
+
+/**
+ * pzl_qset_delete - delete a qset from the PZL
+ */
+void pzl_qset_delete(struct whc *whc, struct whc_qset *qset)
+{
+ qset->remove = 1;
+ queue_work(whc->workqueue, &whc->periodic_work);
+ qset_delete(whc, qset);
+}
+
+
+/**
+ * pzl_init - initialize the periodic zone list
+ * @whc: the WHCI host controller
+ */
+int pzl_init(struct whc *whc)
+{
+ int i;
+
+ whc->pz_list = dma_alloc_coherent(&whc->umc->dev, sizeof(u64) * 16,
+ &whc->pz_list_dma, GFP_KERNEL);
+ if (whc->pz_list == NULL)
+ return -ENOMEM;
+
+ /* Set T bit on all elements in PZL. */
+ for (i = 0; i < 16; i++)
+ whc->pz_list[i] = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
+
+ le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
+
+ return 0;
+}
+
+/**
+ * pzl_clean_up - free PZL resources
+ * @whc: the WHCI host controller
+ *
+ * The PZL is stopped and empty.
+ */
+void pzl_clean_up(struct whc *whc)
+{
+ if (whc->pz_list)
+ dma_free_coherent(&whc->umc->dev, sizeof(u64) * 16, whc->pz_list,
+ whc->pz_list_dma);
+}
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
new file mode 100644
index 00000000000..0420037d2e1
--- /dev/null
+++ b/drivers/usb/host/whci/qset.c
@@ -0,0 +1,567 @@
+/*
+ * Wireless Host Controller (WHC) qset management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/umc.h>
+#include <linux/usb.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+void dump_qset(struct whc_qset *qset, struct device *dev)
+{
+ struct whc_std *std;
+ struct urb *urb = NULL;
+ int i;
+
+ dev_dbg(dev, "qset %08x\n", (u32)qset->qset_dma);
+ dev_dbg(dev, " -> %08x\n", (u32)qset->qh.link);
+ dev_dbg(dev, " info: %08x %08x %08x\n",
+ qset->qh.info1, qset->qh.info2, qset->qh.info3);
+ dev_dbg(dev, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count);
+ dev_dbg(dev, " TD: sts: %08x opts: %08x\n",
+ qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
+
+ for (i = 0; i < WHCI_QSET_TD_MAX; i++) {
+ dev_dbg(dev, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n",
+ i == qset->td_start ? 'S' : ' ',
+ i == qset->td_end ? 'E' : ' ',
+ i, qset->qtd[i].status, qset->qtd[i].options,
+ (u32)qset->qtd[i].page_list_ptr);
+ }
+ dev_dbg(dev, " ntds: %d\n", qset->ntds);
+ list_for_each_entry(std, &qset->stds, list_node) {
+ if (urb != std->urb) {
+ urb = std->urb;
+ dev_dbg(dev, " urb %p transferred: %d bytes\n", urb,
+ urb->actual_length);
+ }
+ if (std->qtd)
+ dev_dbg(dev, " sTD[%td]: %zu bytes @ %08x\n",
+ std->qtd - &qset->qtd[0],
+ std->len, std->num_pointers ?
+ (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
+ else
+ dev_dbg(dev, " sTD[-]: %zd bytes @ %08x\n",
+ std->len, std->num_pointers ?
+ (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
+ }
+}
+
+struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
+{
+ struct whc_qset *qset;
+ dma_addr_t dma;
+
+ qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma);
+ if (qset == NULL)
+ return NULL;
+ memset(qset, 0, sizeof(struct whc_qset));
+
+ qset->qset_dma = dma;
+ qset->whc = whc;
+
+ INIT_LIST_HEAD(&qset->list_node);
+ INIT_LIST_HEAD(&qset->stds);
+
+ return qset;
+}
+
+/**
+ * qset_fill_qh - fill the static endpoint state in a qset's QHead
+ * @qset: the qset whose QH needs initializing with static endpoint
+ * state
+ * @urb: an urb for a transfer to this endpoint
+ */
+static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
+{
+ struct usb_device *usb_dev = urb->dev;
+ struct usb_wireless_ep_comp_descriptor *epcd;
+ bool is_out;
+
+ is_out = usb_pipeout(urb->pipe);
+
+ epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
+
+ if (epcd) {
+ qset->max_seq = epcd->bMaxSequence;
+ qset->max_burst = epcd->bMaxBurst;
+ } else {
+ qset->max_seq = 2;
+ qset->max_burst = 1;
+ }
+
+ qset->qh.info1 = cpu_to_le32(
+ QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
+ | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
+ | usb_pipe_to_qh_type(urb->pipe)
+ | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
+ | QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out))
+ );
+ qset->qh.info2 = cpu_to_le32(
+ QH_INFO2_BURST(qset->max_burst)
+ | QH_INFO2_DBP(0)
+ | QH_INFO2_MAX_COUNT(3)
+ | QH_INFO2_MAX_RETRY(3)
+ | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
+ );
+ /* FIXME: where can we obtain these Tx parameters from? Why
+ * doesn't the chip know what Tx power to use? It knows the Rx
+ * strength and can presumably guess the Tx power required
+ * from that? */
+ qset->qh.info3 = cpu_to_le32(
+ QH_INFO3_TX_RATE_53_3
+ | QH_INFO3_TX_PWR(0) /* 0 == max power */
+ );
+}
+
+/**
+ * qset_clear - clear fields in a qset so it may be reinserted into a
+ * schedule
+ */
+void qset_clear(struct whc *whc, struct whc_qset *qset)
+{
+ qset->td_start = qset->td_end = qset->ntds = 0;
+ qset->remove = 0;
+
+ qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
+ qset->qh.status = cpu_to_le16(QH_STATUS_ICUR(qset->td_start));
+ qset->qh.err_count = 0;
+ qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
+ qset->qh.scratch[0] = 0;
+ qset->qh.scratch[1] = 0;
+ qset->qh.scratch[2] = 0;
+
+ memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
+
+ init_completion(&qset->remove_complete);
+}
+
+/**
+ * get_qset - get the qset for an async endpoint
+ *
+ * A new qset is created if one does not already exist.
+ */
+struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct whc_qset *qset;
+
+ qset = urb->ep->hcpriv;
+ if (qset == NULL) {
+ qset = qset_alloc(whc, mem_flags);
+ if (qset == NULL)
+ return NULL;
+
+ qset->ep = urb->ep;
+ urb->ep->hcpriv = qset;
+ qset_fill_qh(qset, urb);
+ }
+ return qset;
+}
+
+void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
+{
+ list_del_init(&qset->list_node);
+ complete(&qset->remove_complete);
+}
+
+/**
+ * qset_add_qtds - add qTDs for an URB to a qset
+ *
+ * Returns true if the list (ASL/PZL) must be updated because (for a
+ * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
+ */
+enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
+{
+ struct whc_std *std;
+ enum whc_update update = 0;
+
+ list_for_each_entry(std, &qset->stds, list_node) {
+ struct whc_qtd *qtd;
+ uint32_t status;
+
+ if (qset->ntds >= WHCI_QSET_TD_MAX
+ || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
+ break;
+
+ if (std->qtd)
+ continue; /* already has a qTD */
+
+ qtd = std->qtd = &qset->qtd[qset->td_end];
+
+ /* Fill in setup bytes for control transfers. */
+ if (usb_pipecontrol(std->urb->pipe))
+ memcpy(qtd->setup, std->urb->setup_packet, 8);
+
+ status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
+
+ if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
+ status |= QTD_STS_LAST_PKT;
+
+ /*
+ * For an IN transfer the iAlt field should be set so
+ * the h/w will automatically advance to the next
+ * transfer. However, if there are 8 or more TDs
+ * remaining in this transfer then iAlt cannot be set
+ * as it could point to somewhere in this transfer.
+ */
+ if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
+ int ialt;
+ ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
+ status |= QTD_STS_IALT(ialt);
+ } else if (usb_pipein(std->urb->pipe))
+ qset->pause_after_urb = std->urb;
+
+ if (std->num_pointers)
+ qtd->options = cpu_to_le32(QTD_OPT_IOC);
+ else
+ qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
+ qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
+
+ qtd->status = cpu_to_le32(status);
+
+ if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
+ update = WHC_UPDATE_UPDATED;
+
+ if (++qset->td_end >= WHCI_QSET_TD_MAX)
+ qset->td_end = 0;
+ qset->ntds++;
+ }
+
+ return update;
+}
+
+/**
+ * qset_remove_qtd - remove the first qTD from a qset.
+ *
+ * The qTD might be still active (if it's part of a IN URB that
+ * resulted in a short read) so ensure it's deactivated.
+ */
+static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
+{
+ qset->qtd[qset->td_start].status = 0;
+
+ if (++qset->td_start >= WHCI_QSET_TD_MAX)
+ qset->td_start = 0;
+ qset->ntds--;
+}
+
+/**
+ * qset_free_std - remove an sTD and free it.
+ * @whc: the WHCI host controller
+ * @std: the sTD to remove and free.
+ */
+void qset_free_std(struct whc *whc, struct whc_std *std)
+{
+ list_del(&std->list_node);
+ if (std->num_pointers) {
+ dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
+ std->num_pointers * sizeof(struct whc_page_list_entry),
+ DMA_TO_DEVICE);
+ kfree(std->pl_virt);
+ }
+
+ kfree(std);
+}
+
+/**
+ * qset_remove_qtds - remove an URB's qTDs (and sTDs).
+ */
+static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
+ struct urb *urb)
+{
+ struct whc_std *std, *t;
+
+ list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+ if (std->urb != urb)
+ break;
+ if (std->qtd != NULL)
+ qset_remove_qtd(whc, qset);
+ qset_free_std(whc, std);
+ }
+}
+
+/**
+ * qset_free_stds - free any remaining sTDs for an URB.
+ */
+static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
+{
+ struct whc_std *std, *t;
+
+ list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+ if (std->urb == urb)
+ qset_free_std(qset->whc, std);
+ }
+}
+
+static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
+{
+ dma_addr_t dma_addr = std->dma_addr;
+ dma_addr_t sp, ep;
+ size_t std_len = std->len;
+ size_t pl_len;
+ int p;
+
+ sp = ALIGN(dma_addr, WHCI_PAGE_SIZE);
+ ep = dma_addr + std_len;
+ std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
+
+ pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
+ std->pl_virt = kmalloc(pl_len, mem_flags);
+ if (std->pl_virt == NULL)
+ return -ENOMEM;
+ std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
+
+ for (p = 0; p < std->num_pointers; p++) {
+ std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
+ dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE);
+ }
+
+ return 0;
+}
+
+/**
+ * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
+ */
+static void urb_dequeue_work(struct work_struct *work)
+{
+ struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
+ struct whc_qset *qset = wurb->qset;
+ struct whc *whc = qset->whc;
+ unsigned long flags;
+
+ if (wurb->is_async == true)
+ asl_update(whc, WUSBCMD_ASYNC_UPDATED
+ | WUSBCMD_ASYNC_SYNCED_DB
+ | WUSBCMD_ASYNC_QSET_RM);
+ else
+ pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
+ | WUSBCMD_PERIODIC_SYNCED_DB
+ | WUSBCMD_PERIODIC_QSET_RM);
+
+ spin_lock_irqsave(&whc->lock, flags);
+ qset_remove_urb(whc, qset, wurb->urb, wurb->status);
+ spin_unlock_irqrestore(&whc->lock, flags);
+}
+
+/**
+ * qset_add_urb - add an urb to the qset's queue.
+ *
+ * The URB is chopped into sTDs, one for each qTD that will required.
+ * At least one qTD (and sTD) is required even if the transfer has no
+ * data (e.g., for some control transfers).
+ */
+int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct whc_urb *wurb;
+ int remaining = urb->transfer_buffer_length;
+ u64 transfer_dma = urb->transfer_dma;
+ int ntds_remaining;
+
+ ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
+ if (ntds_remaining == 0)
+ ntds_remaining = 1;
+
+ wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
+ if (wurb == NULL)
+ goto err_no_mem;
+ urb->hcpriv = wurb;
+ wurb->qset = qset;
+ wurb->urb = urb;
+ INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
+
+ while (ntds_remaining) {
+ struct whc_std *std;
+ size_t std_len;
+
+ std = kmalloc(sizeof(struct whc_std), mem_flags);
+ if (std == NULL)
+ goto err_no_mem;
+
+ std_len = remaining;
+ if (std_len > QTD_MAX_XFER_SIZE)
+ std_len = QTD_MAX_XFER_SIZE;
+
+ std->urb = urb;
+ std->dma_addr = transfer_dma;
+ std->len = std_len;
+ std->ntds_remaining = ntds_remaining;
+ std->qtd = NULL;
+
+ INIT_LIST_HEAD(&std->list_node);
+ list_add_tail(&std->list_node, &qset->stds);
+
+ if (std_len > WHCI_PAGE_SIZE) {
+ if (qset_fill_page_list(whc, std, mem_flags) < 0)
+ goto err_no_mem;
+ } else
+ std->num_pointers = 0;
+
+ ntds_remaining--;
+ remaining -= std_len;
+ transfer_dma += std_len;
+ }
+
+ return 0;
+
+err_no_mem:
+ qset_free_stds(qset, urb);
+ return -ENOMEM;
+}
+
+/**
+ * qset_remove_urb - remove an URB from the urb queue.
+ *
+ * The URB is returned to the USB subsystem.
+ */
+void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
+ struct urb *urb, int status)
+{
+ struct wusbhc *wusbhc = &whc->wusbhc;
+ struct whc_urb *wurb = urb->hcpriv;
+
+ usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
+ /* Drop the lock as urb->complete() may enqueue another urb. */
+ spin_unlock(&whc->lock);
+ wusbhc_giveback_urb(wusbhc, urb, status);
+ spin_lock(&whc->lock);
+
+ kfree(wurb);
+}
+
+/**
+ * get_urb_status_from_qtd - get the completed urb status from qTD status
+ * @urb: completed urb
+ * @status: qTD status
+ */
+static int get_urb_status_from_qtd(struct urb *urb, u32 status)
+{
+ if (status & QTD_STS_HALTED) {
+ if (status & QTD_STS_DBE)
+ return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
+ else if (status & QTD_STS_BABBLE)
+ return -EOVERFLOW;
+ else if (status & QTD_STS_RCE)
+ return -ETIME;
+ return -EPIPE;
+ }
+ if (usb_pipein(urb->pipe)
+ && (urb->transfer_flags & URB_SHORT_NOT_OK)
+ && urb->actual_length < urb->transfer_buffer_length)
+ return -EREMOTEIO;
+ return 0;
+}
+
+/**
+ * process_inactive_qtd - process an inactive (but not halted) qTD.
+ *
+ * Update the urb with the transfer bytes from the qTD, if the urb is
+ * completely transfered or (in the case of an IN only) the LPF is
+ * set, then the transfer is complete and the urb should be returned
+ * to the system.
+ */
+void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
+ struct whc_qtd *qtd)
+{
+ struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
+ struct urb *urb = std->urb;
+ uint32_t status;
+ bool complete;
+
+ status = le32_to_cpu(qtd->status);
+
+ urb->actual_length += std->len - QTD_STS_TO_LEN(status);
+
+ if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
+ complete = true;
+ else
+ complete = whc_std_last(std);
+
+ qset_remove_qtd(whc, qset);
+ qset_free_std(whc, std);
+
+ /*
+ * Transfers for this URB are complete? Then return it to the
+ * USB subsystem.
+ */
+ if (complete) {
+ qset_remove_qtds(whc, qset, urb);
+ qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
+
+ /*
+ * If iAlt isn't valid then the hardware didn't
+ * advance iCur. Adjust the start and end pointers to
+ * match iCur.
+ */
+ if (!(status & QTD_STS_IALT_VALID))
+ qset->td_start = qset->td_end
+ = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
+ qset->pause_after_urb = NULL;
+ }
+}
+
+/**
+ * process_halted_qtd - process a qset with a halted qtd
+ *
+ * Remove all the qTDs for the failed URB and return the failed URB to
+ * the USB subsystem. Then remove all other qTDs so the qset can be
+ * removed.
+ *
+ * FIXME: this is the point where rate adaptation can be done. If a
+ * transfer failed because it exceeded the maximum number of retries
+ * then it could be reactivated with a slower rate without having to
+ * remove the qset.
+ */
+void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
+ struct whc_qtd *qtd)
+{
+ struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
+ struct urb *urb = std->urb;
+ int urb_status;
+
+ urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
+
+ qset_remove_qtds(whc, qset, urb);
+ qset_remove_urb(whc, qset, urb, urb_status);
+
+ list_for_each_entry(std, &qset->stds, list_node) {
+ if (qset->ntds == 0)
+ break;
+ qset_remove_qtd(whc, qset);
+ std->qtd = NULL;
+ }
+
+ qset->remove = 1;
+}
+
+void qset_free(struct whc *whc, struct whc_qset *qset)
+{
+ dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
+}
+
+/**
+ * qset_delete - wait for a qset to be unused, then free it.
+ */
+void qset_delete(struct whc *whc, struct whc_qset *qset)
+{
+ wait_for_completion(&qset->remove_complete);
+ qset_free(whc, qset);
+}
diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h
new file mode 100644
index 00000000000..1d2a53bd39f
--- /dev/null
+++ b/drivers/usb/host/whci/whcd.h
@@ -0,0 +1,197 @@
+/*
+ * Wireless Host Controller (WHC) private header.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#ifndef __WHCD_H
+#define __WHCD_H
+
+#include <linux/uwb/whci.h>
+#include <linux/workqueue.h>
+
+#include "whci-hc.h"
+
+/* Generic command timeout. */
+#define WHC_GENCMD_TIMEOUT_MS 100
+
+
+struct whc {
+ struct wusbhc wusbhc;
+ struct umc_dev *umc;
+
+ resource_size_t base_phys;
+ void __iomem *base;
+ int irq;
+
+ u8 n_devices;
+ u8 n_keys;
+ u8 n_mmc_ies;
+
+ u64 *pz_list;
+ struct dn_buf_entry *dn_buf;
+ struct di_buf_entry *di_buf;
+ dma_addr_t pz_list_dma;
+ dma_addr_t dn_buf_dma;
+ dma_addr_t di_buf_dma;
+
+ spinlock_t lock;
+ struct mutex mutex;
+
+ void * gen_cmd_buf;
+ dma_addr_t gen_cmd_buf_dma;
+ wait_queue_head_t cmd_wq;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct dn_work;
+
+ struct dma_pool *qset_pool;
+
+ struct list_head async_list;
+ struct list_head async_removed_list;
+ wait_queue_head_t async_list_wq;
+ struct work_struct async_work;
+
+ struct list_head periodic_list[5];
+ struct list_head periodic_removed_list;
+ wait_queue_head_t periodic_list_wq;
+ struct work_struct periodic_work;
+};
+
+#define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc))
+
+/**
+ * struct whc_std - a software TD.
+ * @urb: the URB this sTD is for.
+ * @offset: start of the URB's data for this TD.
+ * @len: the length of data in the associated TD.
+ * @ntds_remaining: number of TDs (starting from this one) in this transfer.
+ *
+ * Queued URBs may require more TDs than are available in a qset so we
+ * use a list of these "software TDs" (sTDs) to hold per-TD data.
+ */
+struct whc_std {
+ struct urb *urb;
+ size_t len;
+ int ntds_remaining;
+ struct whc_qtd *qtd;
+
+ struct list_head list_node;
+ int num_pointers;
+ dma_addr_t dma_addr;
+ struct whc_page_list_entry *pl_virt;
+};
+
+/**
+ * struct whc_urb - per URB host controller structure.
+ * @urb: the URB this struct is for.
+ * @qset: the qset associated to the URB.
+ * @dequeue_work: the work to remove the URB when dequeued.
+ * @is_async: the URB belongs to async sheduler or not.
+ * @status: the status to be returned when calling wusbhc_giveback_urb.
+ */
+struct whc_urb {
+ struct urb *urb;
+ struct whc_qset *qset;
+ struct work_struct dequeue_work;
+ bool is_async;
+ int status;
+};
+
+/**
+ * whc_std_last - is this sTD the URB's last?
+ * @std: the sTD to check.
+ */
+static inline bool whc_std_last(struct whc_std *std)
+{
+ return std->ntds_remaining <= 1;
+}
+
+enum whc_update {
+ WHC_UPDATE_ADDED = 0x01,
+ WHC_UPDATE_REMOVED = 0x02,
+ WHC_UPDATE_UPDATED = 0x04,
+};
+
+/* init.c */
+int whc_init(struct whc *whc);
+void whc_clean_up(struct whc *whc);
+
+/* hw.c */
+void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val);
+int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len);
+
+/* wusb.c */
+int whc_wusbhc_start(struct wusbhc *wusbhc);
+void whc_wusbhc_stop(struct wusbhc *wusbhc);
+int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
+ u8 handle, struct wuie_hdr *wuie);
+int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle);
+int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm);
+int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev);
+int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots);
+int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+ const void *ptk, size_t key_size);
+int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
+ const void *gtk, size_t key_size);
+int whc_set_cluster_id(struct whc *whc, u8 bcid);
+
+/* int.c */
+irqreturn_t whc_int_handler(struct usb_hcd *hcd);
+void whc_dn_work(struct work_struct *work);
+
+/* asl.c */
+void asl_start(struct whc *whc);
+void asl_stop(struct whc *whc);
+int asl_init(struct whc *whc);
+void asl_clean_up(struct whc *whc);
+int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
+int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
+void asl_qset_delete(struct whc *whc, struct whc_qset *qset);
+void scan_async_work(struct work_struct *work);
+
+/* pzl.c */
+int pzl_init(struct whc *whc);
+void pzl_clean_up(struct whc *whc);
+void pzl_start(struct whc *whc);
+void pzl_stop(struct whc *whc);
+int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
+int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
+void pzl_qset_delete(struct whc *whc, struct whc_qset *qset);
+void scan_periodic_work(struct work_struct *work);
+
+/* qset.c */
+struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags);
+void qset_free(struct whc *whc, struct whc_qset *qset);
+struct whc_qset *get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags);
+void qset_delete(struct whc *whc, struct whc_qset *qset);
+void qset_clear(struct whc *whc, struct whc_qset *qset);
+int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
+ gfp_t mem_flags);
+void qset_free_std(struct whc *whc, struct whc_std *std);
+void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
+ struct urb *urb, int status);
+void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
+ struct whc_qtd *qtd);
+void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
+ struct whc_qtd *qtd);
+enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset);
+void qset_remove_complete(struct whc *whc, struct whc_qset *qset);
+void dump_qset(struct whc_qset *qset, struct device *dev);
+void pzl_update(struct whc *whc, uint32_t wusbcmd);
+void asl_update(struct whc *whc, uint32_t wusbcmd);
+
+#endif /* #ifndef __WHCD_H */
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h
new file mode 100644
index 00000000000..bff1eb7a35c
--- /dev/null
+++ b/drivers/usb/host/whci/whci-hc.h
@@ -0,0 +1,416 @@
+/*
+ * Wireless Host Controller (WHC) data structures.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#ifndef _WHCI_WHCI_HC_H
+#define _WHCI_WHCI_HC_H
+
+#include <linux/list.h>
+
+/**
+ * WHCI_PAGE_SIZE - page size use by WHCI
+ *
+ * WHCI assumes that host system uses pages of 4096 octets.
+ */
+#define WHCI_PAGE_SIZE 4096
+
+
+/**
+ * QTD_MAX_TXFER_SIZE - max number of bytes to transfer with a single
+ * qtd.
+ *
+ * This is 2^20 - 1.
+ */
+#define QTD_MAX_XFER_SIZE 1048575
+
+
+/**
+ * struct whc_qtd - Queue Element Transfer Descriptors (qTD)
+ *
+ * This describes the data for a bulk, control or interrupt transfer.
+ *
+ * [WHCI] section 3.2.4
+ */
+struct whc_qtd {
+ __le32 status; /*< remaining transfer len and transfer status */
+ __le32 options;
+ __le64 page_list_ptr; /*< physical pointer to data buffer page list*/
+ __u8 setup[8]; /*< setup data for control transfers */
+} __attribute__((packed));
+
+#define QTD_STS_ACTIVE (1 << 31) /* enable execution of transaction */
+#define QTD_STS_HALTED (1 << 30) /* transfer halted */
+#define QTD_STS_DBE (1 << 29) /* data buffer error */
+#define QTD_STS_BABBLE (1 << 28) /* babble detected */
+#define QTD_STS_RCE (1 << 27) /* retry count exceeded */
+#define QTD_STS_LAST_PKT (1 << 26) /* set Last Packet Flag in WUSB header */
+#define QTD_STS_INACTIVE (1 << 25) /* queue set is marked inactive */
+#define QTD_STS_IALT_VALID (1 << 23) /* iAlt field is valid */
+#define QTD_STS_IALT(i) (QTD_STS_IALT_VALID | ((i) << 20)) /* iAlt field */
+#define QTD_STS_LEN(l) ((l) << 0) /* transfer length */
+#define QTD_STS_TO_LEN(s) ((s) & 0x000fffff)
+
+#define QTD_OPT_IOC (1 << 1) /* page_list_ptr points to buffer directly */
+#define QTD_OPT_SMALL (1 << 0) /* interrupt on complete */
+
+/**
+ * struct whc_itd - Isochronous Queue Element Transfer Descriptors (iTD)
+ *
+ * This describes the data and other parameters for an isochronous
+ * transfer.
+ *
+ * [WHCI] section 3.2.5
+ */
+struct whc_itd {
+ __le16 presentation_time; /*< presentation time for OUT transfers */
+ __u8 num_segments; /*< number of data segments in segment list */
+ __u8 status; /*< command execution status */
+ __le32 options; /*< misc transfer options */
+ __le64 page_list_ptr; /*< physical pointer to data buffer page list */
+ __le64 seg_list_ptr; /*< physical pointer to segment list */
+} __attribute__((packed));
+
+#define ITD_STS_ACTIVE (1 << 7) /* enable execution of transaction */
+#define ITD_STS_DBE (1 << 5) /* data buffer error */
+#define ITD_STS_BABBLE (1 << 4) /* babble detected */
+#define ITD_STS_INACTIVE (1 << 1) /* queue set is marked inactive */
+
+#define ITD_OPT_IOC (1 << 1) /* interrupt on complete */
+#define ITD_OPT_SMALL (1 << 0) /* page_list_ptr points to buffer directly */
+
+/**
+ * Page list entry.
+ *
+ * A TD's page list must contain sufficient page list entries for the
+ * total data length in the TD.
+ *
+ * [WHCI] section 3.2.4.3
+ */
+struct whc_page_list_entry {
+ __le64 buf_ptr; /*< physical pointer to buffer */
+} __attribute__((packed));
+
+/**
+ * struct whc_seg_list_entry - Segment list entry.
+ *
+ * Describes a portion of the data buffer described in the containing
+ * qTD's page list.
+ *
+ * seg_ptr = qtd->page_list_ptr[qtd->seg_list_ptr[seg].idx].buf_ptr
+ * + qtd->seg_list_ptr[seg].offset;
+ *
+ * Segments can't cross page boundries.
+ *
+ * [WHCI] section 3.2.5.5
+ */
+struct whc_seg_list_entry {
+ __le16 len; /*< segment length */
+ __u8 idx; /*< index into page list */
+ __u8 status; /*< segment status */
+ __le16 offset; /*< 12 bit offset into page */
+} __attribute__((packed));
+
+/**
+ * struct whc_qhead - endpoint and status information for a qset.
+ *
+ * [WHCI] section 3.2.6
+ */
+struct whc_qhead {
+ __le64 link; /*< next qset in list */
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le16 status;
+ __le16 err_count; /*< transaction error count */
+ __le32 cur_window;
+ __le32 scratch[3]; /*< h/w scratch area */
+ union {
+ struct whc_qtd qtd;
+ struct whc_itd itd;
+ } overlay;
+} __attribute__((packed));
+
+#define QH_LINK_PTR_MASK (~0x03Full)
+#define QH_LINK_PTR(ptr) ((ptr) & QH_LINK_PTR_MASK)
+#define QH_LINK_IQS (1 << 4) /* isochronous queue set */
+#define QH_LINK_NTDS(n) (((n) - 1) << 1) /* number of TDs in queue set */
+#define QH_LINK_T (1 << 0) /* last queue set in periodic schedule list */
+
+#define QH_INFO1_EP(e) ((e) << 0) /* endpoint number */
+#define QH_INFO1_DIR_IN (1 << 4) /* IN transfer */
+#define QH_INFO1_DIR_OUT (0 << 4) /* OUT transfer */
+#define QH_INFO1_TR_TYPE_CTRL (0x0 << 5) /* control transfer */
+#define QH_INFO1_TR_TYPE_ISOC (0x1 << 5) /* isochronous transfer */
+#define QH_INFO1_TR_TYPE_BULK (0x2 << 5) /* bulk transfer */
+#define QH_INFO1_TR_TYPE_INT (0x3 << 5) /* interrupt */
+#define QH_INFO1_TR_TYPE_LP_INT (0x7 << 5) /* low power interrupt */
+#define QH_INFO1_DEV_INFO_IDX(i) ((i) << 8) /* index into device info buffer */
+#define QH_INFO1_SET_INACTIVE (1 << 15) /* set inactive after transfer */
+#define QH_INFO1_MAX_PKT_LEN(l) ((l) << 16) /* maximum packet length */
+
+#define QH_INFO2_BURST(b) ((b) << 0) /* maximum burst length */
+#define QH_INFO2_DBP(p) ((p) << 5) /* data burst policy (see [WUSB] table 5-7) */
+#define QH_INFO2_MAX_COUNT(c) ((c) << 8) /* max isoc/int pkts per zone */
+#define QH_INFO2_RQS (1 << 15) /* reactivate queue set */
+#define QH_INFO2_MAX_RETRY(r) ((r) << 16) /* maximum transaction retries */
+#define QH_INFO2_MAX_SEQ(s) ((s) << 20) /* maximum sequence number */
+#define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */
+#define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */
+
+#define QH_INFO3_TX_RATE_53_3 (0 << 24)
+#define QH_INFO3_TX_RATE_80 (1 << 24)
+#define QH_INFO3_TX_RATE_106_7 (2 << 24)
+#define QH_INFO3_TX_RATE_160 (3 << 24)
+#define QH_INFO3_TX_RATE_200 (4 << 24)
+#define QH_INFO3_TX_RATE_320 (5 << 24)
+#define QH_INFO3_TX_RATE_400 (6 << 24)
+#define QH_INFO3_TX_RATE_480 (7 << 24)
+#define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */
+
+#define QH_STATUS_FLOW_CTRL (1 << 15)
+#define QH_STATUS_ICUR(i) ((i) << 5)
+#define QH_STATUS_TO_ICUR(s) (((s) >> 5) & 0x7)
+
+/**
+ * usb_pipe_to_qh_type - USB core pipe type to QH transfer type
+ *
+ * Returns the QH type field for a USB core pipe type.
+ */
+static inline unsigned usb_pipe_to_qh_type(unsigned pipe)
+{
+ static const unsigned type[] = {
+ [PIPE_ISOCHRONOUS] = QH_INFO1_TR_TYPE_ISOC,
+ [PIPE_INTERRUPT] = QH_INFO1_TR_TYPE_INT,
+ [PIPE_CONTROL] = QH_INFO1_TR_TYPE_CTRL,
+ [PIPE_BULK] = QH_INFO1_TR_TYPE_BULK,
+ };
+ return type[usb_pipetype(pipe)];
+}
+
+/**
+ * Maxiumum number of TDs in a qset.
+ */
+#define WHCI_QSET_TD_MAX 8
+
+/**
+ * struct whc_qset - WUSB data transfers to a specific endpoint
+ * @qh: the QHead of this qset
+ * @qtd: up to 8 qTDs (for qsets for control, bulk and interrupt
+ * transfers)
+ * @itd: up to 8 iTDs (for qsets for isochronous transfers)
+ * @qset_dma: DMA address for this qset
+ * @whc: WHCI HC this qset is for
+ * @ep: endpoint
+ * @stds: list of sTDs queued to this qset
+ * @ntds: number of qTDs queued (not necessarily the same as nTDs
+ * field in the QH)
+ * @td_start: index of the first qTD in the list
+ * @td_end: index of next free qTD in the list (provided
+ * ntds < WHCI_QSET_TD_MAX)
+ *
+ * Queue Sets (qsets) are added to the asynchronous schedule list
+ * (ASL) or the periodic zone list (PZL).
+ *
+ * qsets may contain up to 8 TDs (either qTDs or iTDs as appropriate).
+ * Each TD may refer to at most 1 MiB of data. If a single transfer
+ * has > 8MiB of data, TDs can be reused as they are completed since
+ * the TD list is used as a circular buffer. Similarly, several
+ * (smaller) transfers may be queued in a qset.
+ *
+ * WHCI controllers may cache portions of the qsets in the ASL and
+ * PZL, requiring the WHCD to inform the WHC that the lists have been
+ * updated (fields changed or qsets inserted or removed). For safe
+ * insertion and removal of qsets from the lists the schedule must be
+ * stopped to avoid races in updating the QH link pointers.
+ *
+ * Since the HC is free to execute qsets in any order, all transfers
+ * to an endpoint should use the same qset to ensure transfers are
+ * executed in the order they're submitted.
+ *
+ * [WHCI] section 3.2.3
+ */
+struct whc_qset {
+ struct whc_qhead qh;
+ union {
+ struct whc_qtd qtd[WHCI_QSET_TD_MAX];
+ struct whc_itd itd[WHCI_QSET_TD_MAX];
+ };
+
+ /* private data for WHCD */
+ dma_addr_t qset_dma;
+ struct whc *whc;
+ struct usb_host_endpoint *ep;
+ struct list_head stds;
+ int ntds;
+ int td_start;
+ int td_end;
+ struct list_head list_node;
+ unsigned in_sw_list:1;
+ unsigned in_hw_list:1;
+ unsigned remove:1;
+ struct urb *pause_after_urb;
+ struct completion remove_complete;
+ int max_burst;
+ int max_seq;
+};
+
+static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target)
+{
+ if (target)
+ *ptr = (*ptr & ~(QH_LINK_PTR_MASK | QH_LINK_T)) | QH_LINK_PTR(target);
+ else
+ *ptr = QH_LINK_T;
+}
+
+/**
+ * struct di_buf_entry - Device Information (DI) buffer entry.
+ *
+ * There's one of these per connected device.
+ */
+struct di_buf_entry {
+ __le32 availability_info[8]; /*< MAS availability information, one MAS per bit */
+ __le32 addr_sec_info; /*< addressing and security info */
+ __le32 reserved[7];
+} __attribute__((packed));
+
+#define WHC_DI_SECURE (1 << 31)
+#define WHC_DI_DISABLE (1 << 30)
+#define WHC_DI_KEY_IDX(k) ((k) << 8)
+#define WHC_DI_KEY_IDX_MASK 0x0000ff00
+#define WHC_DI_DEV_ADDR(a) ((a) << 0)
+#define WHC_DI_DEV_ADDR_MASK 0x000000ff
+
+/**
+ * struct dn_buf_entry - Device Notification (DN) buffer entry.
+ *
+ * [WHCI] section 3.2.8
+ */
+struct dn_buf_entry {
+ __u8 msg_size; /*< number of octets of valid DN data */
+ __u8 reserved1;
+ __u8 src_addr; /*< source address */
+ __u8 status; /*< buffer entry status */
+ __le32 tkid; /*< TKID for source device, valid if secure bit is set */
+ __u8 dn_data[56]; /*< up to 56 octets of DN data */
+} __attribute__((packed));
+
+#define WHC_DN_STATUS_VALID (1 << 7) /* buffer entry is valid */
+#define WHC_DN_STATUS_SECURE (1 << 6) /* notification received using secure frame */
+
+#define WHC_N_DN_ENTRIES (4096 / sizeof(struct dn_buf_entry))
+
+/* The Add MMC IE WUSB Generic Command may take up to 256 bytes of
+ data. [WHCI] section 2.4.7. */
+#define WHC_GEN_CMD_DATA_LEN 256
+
+/*
+ * HC registers.
+ *
+ * [WHCI] section 2.4
+ */
+
+#define WHCIVERSION 0x00
+
+#define WHCSPARAMS 0x04
+# define WHCSPARAMS_TO_N_MMC_IES(p) (((p) >> 16) & 0xff)
+# define WHCSPARAMS_TO_N_KEYS(p) (((p) >> 8) & 0xff)
+# define WHCSPARAMS_TO_N_DEVICES(p) (((p) >> 0) & 0x7f)
+
+#define WUSBCMD 0x08
+# define WUSBCMD_BCID(b) ((b) << 16)
+# define WUSBCMD_BCID_MASK (0xff << 16)
+# define WUSBCMD_ASYNC_QSET_RM (1 << 12)
+# define WUSBCMD_PERIODIC_QSET_RM (1 << 11)
+# define WUSBCMD_WUSBSI(s) ((s) << 8)
+# define WUSBCMD_WUSBSI_MASK (0x7 << 8)
+# define WUSBCMD_ASYNC_SYNCED_DB (1 << 7)
+# define WUSBCMD_PERIODIC_SYNCED_DB (1 << 6)
+# define WUSBCMD_ASYNC_UPDATED (1 << 5)
+# define WUSBCMD_PERIODIC_UPDATED (1 << 4)
+# define WUSBCMD_ASYNC_EN (1 << 3)
+# define WUSBCMD_PERIODIC_EN (1 << 2)
+# define WUSBCMD_WHCRESET (1 << 1)
+# define WUSBCMD_RUN (1 << 0)
+
+#define WUSBSTS 0x0c
+# define WUSBSTS_ASYNC_SCHED (1 << 15)
+# define WUSBSTS_PERIODIC_SCHED (1 << 14)
+# define WUSBSTS_DNTS_SCHED (1 << 13)
+# define WUSBSTS_HCHALTED (1 << 12)
+# define WUSBSTS_GEN_CMD_DONE (1 << 9)
+# define WUSBSTS_CHAN_TIME_ROLLOVER (1 << 8)
+# define WUSBSTS_DNTS_OVERFLOW (1 << 7)
+# define WUSBSTS_BPST_ADJUSTMENT_CHANGED (1 << 6)
+# define WUSBSTS_HOST_ERR (1 << 5)
+# define WUSBSTS_ASYNC_SCHED_SYNCED (1 << 4)
+# define WUSBSTS_PERIODIC_SCHED_SYNCED (1 << 3)
+# define WUSBSTS_DNTS_INT (1 << 2)
+# define WUSBSTS_ERR_INT (1 << 1)
+# define WUSBSTS_INT (1 << 0)
+# define WUSBSTS_INT_MASK 0x3ff
+
+#define WUSBINTR 0x10
+# define WUSBINTR_GEN_CMD_DONE (1 << 9)
+# define WUSBINTR_CHAN_TIME_ROLLOVER (1 << 8)
+# define WUSBINTR_DNTS_OVERFLOW (1 << 7)
+# define WUSBINTR_BPST_ADJUSTMENT_CHANGED (1 << 6)
+# define WUSBINTR_HOST_ERR (1 << 5)
+# define WUSBINTR_ASYNC_SCHED_SYNCED (1 << 4)
+# define WUSBINTR_PERIODIC_SCHED_SYNCED (1 << 3)
+# define WUSBINTR_DNTS_INT (1 << 2)
+# define WUSBINTR_ERR_INT (1 << 1)
+# define WUSBINTR_INT (1 << 0)
+# define WUSBINTR_ALL 0x3ff
+
+#define WUSBGENCMDSTS 0x14
+# define WUSBGENCMDSTS_ACTIVE (1 << 31)
+# define WUSBGENCMDSTS_ERROR (1 << 24)
+# define WUSBGENCMDSTS_IOC (1 << 23)
+# define WUSBGENCMDSTS_MMCIE_ADD 0x01
+# define WUSBGENCMDSTS_MMCIE_RM 0x02
+# define WUSBGENCMDSTS_SET_MAS 0x03
+# define WUSBGENCMDSTS_CHAN_STOP 0x04
+# define WUSBGENCMDSTS_RWP_EN 0x05
+
+#define WUSBGENCMDPARAMS 0x18
+#define WUSBGENADDR 0x20
+#define WUSBASYNCLISTADDR 0x28
+#define WUSBDNTSBUFADDR 0x30
+#define WUSBDEVICEINFOADDR 0x38
+
+#define WUSBSETSECKEYCMD 0x40
+# define WUSBSETSECKEYCMD_SET (1 << 31)
+# define WUSBSETSECKEYCMD_ERASE (1 << 30)
+# define WUSBSETSECKEYCMD_GTK (1 << 8)
+# define WUSBSETSECKEYCMD_IDX(i) ((i) << 0)
+
+#define WUSBTKID 0x44
+#define WUSBSECKEY 0x48
+#define WUSBPERIODICLISTBASE 0x58
+#define WUSBMASINDEX 0x60
+
+#define WUSBDNTSCTRL 0x64
+# define WUSBDNTSCTRL_ACTIVE (1 << 31)
+# define WUSBDNTSCTRL_INTERVAL(i) ((i) << 8)
+# define WUSBDNTSCTRL_SLOTS(s) ((s) << 0)
+
+#define WUSBTIME 0x68
+#define WUSBBPST 0x6c
+#define WUSBDIBUPDATED 0x70
+
+#endif /* #ifndef _WHCI_WHCI_HC_H */
diff --git a/drivers/usb/host/whci/wusb.c b/drivers/usb/host/whci/wusb.c
new file mode 100644
index 00000000000..66e4ddcd961
--- /dev/null
+++ b/drivers/usb/host/whci/wusb.c
@@ -0,0 +1,241 @@
+/*
+ * Wireless Host Controller (WHC) WUSB operations.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/uwb/umc.h>
+#define D_LOCAL 1
+#include <linux/uwb/debug.h>
+
+#include "../../wusbcore/wusbhc.h"
+
+#include "whcd.h"
+
+#if D_LOCAL >= 1
+static void dump_di(struct whc *whc, int idx)
+{
+ struct di_buf_entry *di = &whc->di_buf[idx];
+ struct device *dev = &whc->umc->dev;
+ char buf[128];
+
+ bitmap_scnprintf(buf, sizeof(buf), (unsigned long *)di->availability_info, UWB_NUM_MAS);
+
+ d_printf(1, dev, "DI[%d]\n", idx);
+ d_printf(1, dev, " availability: %s\n", buf);
+ d_printf(1, dev, " %c%c key idx: %d dev addr: %d\n",
+ (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ',
+ (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ',
+ (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8,
+ (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK));
+}
+#else
+static inline void dump_di(struct whc *whc, int idx)
+{
+}
+#endif
+
+static int whc_update_di(struct whc *whc, int idx)
+{
+ int offset = idx / 32;
+ u32 bit = 1 << (idx % 32);
+
+ dump_di(whc, idx);
+
+ le_writel(bit, whc->base + WUSBDIBUPDATED + offset);
+
+ return whci_wait_for(&whc->umc->dev,
+ whc->base + WUSBDIBUPDATED + offset, bit, 0,
+ 100, "DI update");
+}
+
+/*
+ * WHCI starts and stops MMCs based on there being a valid GTK so
+ * these need only start/stop the asynchronous and periodic schedules.
+ */
+
+int whc_wusbhc_start(struct wusbhc *wusbhc)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+
+ asl_start(whc);
+ pzl_start(whc);
+
+ return 0;
+}
+
+void whc_wusbhc_stop(struct wusbhc *wusbhc)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+
+ pzl_stop(whc);
+ asl_stop(whc);
+}
+
+int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
+ u8 handle, struct wuie_hdr *wuie)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u32 params;
+
+ params = (interval << 24)
+ | (repeat_cnt << 16)
+ | (wuie->bLength << 8)
+ | handle;
+
+ return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_ADD, params, wuie, wuie->bLength);
+}
+
+int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u32 params;
+
+ params = handle;
+
+ return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_RM, params, NULL, 0);
+}
+
+int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+
+ if (stream_index >= 0)
+ whc_write_wusbcmd(whc, WUSBCMD_WUSBSI_MASK, WUSBCMD_WUSBSI(stream_index));
+
+ return whc_do_gencmd(whc, WUSBGENCMDSTS_SET_MAS, 0, (void *)mas_bm, sizeof(*mas_bm));
+}
+
+int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ int idx = wusb_dev->port_idx;
+ struct di_buf_entry *di = &whc->di_buf[idx];
+ int ret;
+
+ mutex_lock(&whc->mutex);
+
+ uwb_mas_bm_copy_le(di->availability_info, &wusb_dev->availability);
+ di->addr_sec_info &= ~(WHC_DI_DISABLE | WHC_DI_DEV_ADDR_MASK);
+ di->addr_sec_info |= WHC_DI_DEV_ADDR(wusb_dev->addr);
+
+ ret = whc_update_di(whc, idx);
+
+ mutex_unlock(&whc->mutex);
+
+ return ret;
+}
+
+/*
+ * Set the number of Device Notification Time Slots (DNTS) and enable
+ * device notifications.
+ */
+int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u32 dntsctrl;
+
+ dntsctrl = WUSBDNTSCTRL_ACTIVE
+ | WUSBDNTSCTRL_INTERVAL(interval)
+ | WUSBDNTSCTRL_SLOTS(slots);
+
+ le_writel(dntsctrl, whc->base + WUSBDNTSCTRL);
+
+ return 0;
+}
+
+static int whc_set_key(struct whc *whc, u8 key_index, uint32_t tkid,
+ const void *key, size_t key_size, bool is_gtk)
+{
+ uint32_t setkeycmd;
+ uint32_t seckey[4];
+ int i;
+ int ret;
+
+ memcpy(seckey, key, key_size);
+ setkeycmd = WUSBSETSECKEYCMD_SET | WUSBSETSECKEYCMD_IDX(key_index);
+ if (is_gtk)
+ setkeycmd |= WUSBSETSECKEYCMD_GTK;
+
+ le_writel(tkid, whc->base + WUSBTKID);
+ for (i = 0; i < 4; i++)
+ le_writel(seckey[i], whc->base + WUSBSECKEY + 4*i);
+ le_writel(setkeycmd, whc->base + WUSBSETSECKEYCMD);
+
+ ret = whci_wait_for(&whc->umc->dev, whc->base + WUSBSETSECKEYCMD,
+ WUSBSETSECKEYCMD_SET, 0, 100, "set key");
+
+ return ret;
+}
+
+/**
+ * whc_set_ptk - set the PTK to use for a device.
+ *
+ * The index into the key table for this PTK is the same as the
+ * device's port index.
+ */
+int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+ const void *ptk, size_t key_size)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ struct di_buf_entry *di = &whc->di_buf[port_idx];
+ int ret;
+
+ mutex_lock(&whc->mutex);
+
+ if (ptk) {
+ ret = whc_set_key(whc, port_idx, tkid, ptk, key_size, false);
+ if (ret)
+ goto out;
+
+ di->addr_sec_info &= ~WHC_DI_KEY_IDX_MASK;
+ di->addr_sec_info |= WHC_DI_SECURE | WHC_DI_KEY_IDX(port_idx);
+ } else
+ di->addr_sec_info &= ~WHC_DI_SECURE;
+
+ ret = whc_update_di(whc, port_idx);
+out:
+ mutex_unlock(&whc->mutex);
+ return ret;
+}
+
+/**
+ * whc_set_gtk - set the GTK for subsequent broadcast packets
+ *
+ * The GTK is stored in the last entry in the key table (the previous
+ * N_DEVICES entries are for the per-device PTKs).
+ */
+int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
+ const void *gtk, size_t key_size)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ int ret;
+
+ mutex_lock(&whc->mutex);
+
+ ret = whc_set_key(whc, whc->n_devices, tkid, gtk, key_size, true);
+
+ mutex_unlock(&whc->mutex);
+
+ return ret;
+}
+
+int whc_set_cluster_id(struct whc *whc, u8 bcid)
+{
+ whc_write_wusbcmd(whc, WUSBCMD_BCID_MASK, WUSBCMD_BCID(bcid));
+ return 0;
+}
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index b358c4e1cf2..444c69c447b 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1561,8 +1561,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
if (code != USBTEST_REQUEST)
return -EOPNOTSUPP;
- if (param->iterations <= 0 || param->length < 0
- || param->sglen < 0 || param->vary < 0)
+ if (param->iterations <= 0)
return -EINVAL;
if (mutex_lock_interruptible(&dev->lock))
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3d87eabcd92..bd07eaa300b 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -95,11 +95,20 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define HUAWEI_PRODUCT_E220 0x1003
#define HUAWEI_PRODUCT_E220BIS 0x1004
#define HUAWEI_PRODUCT_E1401 0x1401
+#define HUAWEI_PRODUCT_E1402 0x1402
#define HUAWEI_PRODUCT_E1403 0x1403
+#define HUAWEI_PRODUCT_E1404 0x1404
#define HUAWEI_PRODUCT_E1405 0x1405
#define HUAWEI_PRODUCT_E1406 0x1406
+#define HUAWEI_PRODUCT_E1407 0x1407
#define HUAWEI_PRODUCT_E1408 0x1408
#define HUAWEI_PRODUCT_E1409 0x1409
+#define HUAWEI_PRODUCT_E140A 0x140A
+#define HUAWEI_PRODUCT_E140B 0x140B
+#define HUAWEI_PRODUCT_E140C 0x140C
+#define HUAWEI_PRODUCT_E140D 0x140D
+#define HUAWEI_PRODUCT_E140E 0x140E
+#define HUAWEI_PRODUCT_E140F 0x140F
#define HUAWEI_PRODUCT_E1410 0x1410
#define HUAWEI_PRODUCT_E1411 0x1411
#define HUAWEI_PRODUCT_E1412 0x1412
@@ -110,6 +119,44 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define HUAWEI_PRODUCT_E1417 0x1417
#define HUAWEI_PRODUCT_E1418 0x1418
#define HUAWEI_PRODUCT_E1419 0x1419
+#define HUAWEI_PRODUCT_E141A 0x141A
+#define HUAWEI_PRODUCT_E141B 0x141B
+#define HUAWEI_PRODUCT_E141C 0x141C
+#define HUAWEI_PRODUCT_E141D 0x141D
+#define HUAWEI_PRODUCT_E141E 0x141E
+#define HUAWEI_PRODUCT_E141F 0x141F
+#define HUAWEI_PRODUCT_E1420 0x1420
+#define HUAWEI_PRODUCT_E1421 0x1421
+#define HUAWEI_PRODUCT_E1422 0x1422
+#define HUAWEI_PRODUCT_E1423 0x1423
+#define HUAWEI_PRODUCT_E1424 0x1424
+#define HUAWEI_PRODUCT_E1425 0x1425
+#define HUAWEI_PRODUCT_E1426 0x1426
+#define HUAWEI_PRODUCT_E1427 0x1427
+#define HUAWEI_PRODUCT_E1428 0x1428
+#define HUAWEI_PRODUCT_E1429 0x1429
+#define HUAWEI_PRODUCT_E142A 0x142A
+#define HUAWEI_PRODUCT_E142B 0x142B
+#define HUAWEI_PRODUCT_E142C 0x142C
+#define HUAWEI_PRODUCT_E142D 0x142D
+#define HUAWEI_PRODUCT_E142E 0x142E
+#define HUAWEI_PRODUCT_E142F 0x142F
+#define HUAWEI_PRODUCT_E1430 0x1430
+#define HUAWEI_PRODUCT_E1431 0x1431
+#define HUAWEI_PRODUCT_E1432 0x1432
+#define HUAWEI_PRODUCT_E1433 0x1433
+#define HUAWEI_PRODUCT_E1434 0x1434
+#define HUAWEI_PRODUCT_E1435 0x1435
+#define HUAWEI_PRODUCT_E1436 0x1436
+#define HUAWEI_PRODUCT_E1437 0x1437
+#define HUAWEI_PRODUCT_E1438 0x1438
+#define HUAWEI_PRODUCT_E1439 0x1439
+#define HUAWEI_PRODUCT_E143A 0x143A
+#define HUAWEI_PRODUCT_E143B 0x143B
+#define HUAWEI_PRODUCT_E143C 0x143C
+#define HUAWEI_PRODUCT_E143D 0x143D
+#define HUAWEI_PRODUCT_E143E 0x143E
+#define HUAWEI_PRODUCT_E143F 0x143F
#define NOVATELWIRELESS_VENDOR_ID 0x1410
@@ -207,6 +254,7 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
/* ZTE PRODUCTS */
#define ZTE_VENDOR_ID 0x19d2
#define ZTE_PRODUCT_MF628 0x0015
+#define ZTE_PRODUCT_MF626 0x0031
#define ZTE_PRODUCT_CDMA_TECH 0xfffe
/* Ericsson products */
@@ -248,11 +296,20 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) },
@@ -263,6 +320,44 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */
@@ -336,6 +431,7 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) },
{ USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G) },
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index 4995bb595ae..2dd9bd4bff5 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -95,11 +95,10 @@ int usb_stor_huawei_e220_init(struct us_data *us)
{
int result;
- us->iobuf[0] = 0x1;
result = usb_stor_control_msg(us, us->send_ctrl_pipe,
USB_REQ_SET_FEATURE,
USB_TYPE_STANDARD | USB_RECIP_DEVICE,
- 0x01, 0x0, us->iobuf, 0x1, 1000);
+ 0x01, 0x0, NULL, 0x0, 1000);
US_DEBUGP("usb_control_msg performing result is %d\n", result);
return (result ? 0 : -1);
}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index cd155475cb6..a2b9ebbef38 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1628,97 +1628,332 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
/* Reported by fangxiaozhi <huananhu@huawei.com>
* This brings the HUAWEI data card devices into multi-port mode
*/
-UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
0),
-UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000,
+UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000,
"HUAWEI MOBILE",
"Mass Storage",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
@@ -1745,6 +1980,15 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+/* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
+ * JMicron responds to USN and several other SCSI ioctls with a
+ * residue that causes subsequent I/O requests to fail. */
+UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
+ "JMicron",
+ "USB to ATA/ATAPI Bridge",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/* Reported by Robert Schedel <r.schedel@yahoo.de>
* Note: this is a 'super top' device like the above 14cd/6600 device */
UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
@@ -1818,6 +2062,15 @@ UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
+/* Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
+ * Mio Moov 330
+ */
+UNUSUAL_DEV( 0x3340, 0xffff, 0x0000, 0x0000,
+ "Mitac",
+ "Mio DigiWalker USB Sync",
+ US_SC_DEVICE,US_PR_DEVICE,NULL,
+ US_FL_MAX_SECTORS_64 ),
+
/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
"iRiver",
diff --git a/drivers/usb/wusbcore/Kconfig b/drivers/usb/wusbcore/Kconfig
new file mode 100644
index 00000000000..eb09a0a14a8
--- /dev/null
+++ b/drivers/usb/wusbcore/Kconfig
@@ -0,0 +1,41 @@
+#
+# Wireless USB Core configuration
+#
+config USB_WUSB
+ tristate "Enable Wireless USB extensions (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on USB
+ select UWB
+ select CRYPTO
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_CBC
+ select CRYPTO_MANAGER
+ select CRYPTO_AES
+ help
+ Enable the host-side support for Wireless USB.
+
+ To compile this support select Y (built in). It is safe to
+ select even if you don't have the hardware.
+
+config USB_WUSB_CBAF
+ tristate "Support WUSB Cable Based Association (CBA)"
+ depends on USB
+ help
+ Some WUSB devices support Cable Based Association. It's used to
+ enable the secure communication between the host and the
+ device.
+
+ Enable this option if your WUSB device must to be connected
+ via wired USB before establishing a wireless link.
+
+ It is safe to select even if you don't have a compatible
+ hardware.
+
+config USB_WUSB_CBAF_DEBUG
+ bool "Enable CBA debug messages"
+ depends on USB_WUSB_CBAF
+ help
+ Say Y here if you want the CBA to produce a bunch of debug messages
+ to the system log. Select this if you are having a problem with
+ CBA support and want to see more of what is going on.
+
diff --git a/drivers/usb/wusbcore/Makefile b/drivers/usb/wusbcore/Makefile
new file mode 100644
index 00000000000..75f1ade6625
--- /dev/null
+++ b/drivers/usb/wusbcore/Makefile
@@ -0,0 +1,26 @@
+obj-$(CONFIG_USB_WUSB) += wusbcore.o
+obj-$(CONFIG_USB_HWA_HCD) += wusb-wa.o
+obj-$(CONFIG_USB_WUSB_CBAF) += wusb-cbaf.o
+
+
+wusbcore-objs := \
+ crypto.o \
+ devconnect.o \
+ dev-sysfs.o \
+ mmc.o \
+ pal.o \
+ rh.o \
+ reservation.o \
+ security.o \
+ wusbhc.o
+
+wusb-cbaf-objs := cbaf.o
+
+wusb-wa-objs := wa-hc.o \
+ wa-nep.o \
+ wa-rpipe.o \
+ wa-xfer.o
+
+ifeq ($(CONFIG_USB_WUSB_CBAF_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
new file mode 100644
index 00000000000..ab4788d1785
--- /dev/null
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -0,0 +1,673 @@
+/*
+ * Wireless USB - Cable Based Association
+ *
+ *
+ * Copyright (C) 2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * WUSB devices have to be paired (associated in WUSB lingo) so
+ * that they can connect to the system.
+ *
+ * One way of pairing is using CBA-Cable Based Association. First
+ * time you plug the device with a cable, association is done between
+ * host and device and subsequent times, you can connect wirelessly
+ * without having to associate again. That's the idea.
+ *
+ * This driver does nothing Earth shattering. It just provides an
+ * interface to chat with the wire-connected device so we can get a
+ * CDID (device ID) that might have been previously associated to a
+ * CHID (host ID) and to set up a new <CHID,CDID,CK> triplet
+ * (connection context), with the CK being the secret, or connection
+ * key. This is the pairing data.
+ *
+ * When a device with the CBA capability connects, the probe routine
+ * just creates a bunch of sysfs files that a user space enumeration
+ * manager uses to allow it to connect wirelessly to the system or not.
+ *
+ * The process goes like this:
+ *
+ * 1. Device plugs, cbaf is loaded, notifications happen.
+ *
+ * 2. The connection manager (CM) sees a device with CBAF capability
+ * (the wusb_chid etc. files in /sys/devices/blah/OURDEVICE).
+ *
+ * 3. The CM writes the host name, supported band groups, and the CHID
+ * (host ID) into the wusb_host_name, wusb_host_band_groups and
+ * wusb_chid files. These get sent to the device and the CDID (if
+ * any) for this host is requested.
+ *
+ * 4. The CM can verify that the device's supported band groups
+ * (wusb_device_band_groups) are compatible with the host.
+ *
+ * 5. The CM reads the wusb_cdid file.
+ *
+ * 6. The CM looks up its database
+ *
+ * 6.1 If it has a matching CHID,CDID entry, the device has been
+ * authorized before (paired) and nothing further needs to be
+ * done.
+ *
+ * 6.2 If the CDID is zero (or the CM doesn't find a matching CDID in
+ * its database), the device is assumed to be not known. The CM
+ * may associate the host with device by: writing a randomly
+ * generated CDID to wusb_cdid and then a random CK to wusb_ck
+ * (this uploads the new CC to the device).
+ *
+ * CMD may choose to prompt the user before associating with a new
+ * device.
+ *
+ * 7. Device is unplugged.
+ *
+ * When the device tries to connect wirelessly, it will present its
+ * CDID to the WUSB host controller. The CM will query the
+ * database. If the CHID/CDID pair found, it will (with a 4-way
+ * handshake) challenge the device to demonstrate it has the CK secret
+ * key (from our database) without actually exchanging it. Once
+ * satisfied, crypto keys are derived from the CK, the device is
+ * connected and all communication is encrypted.
+ *
+ * References:
+ * [WUSB-AM] Association Models Supplement to the Certified Wireless
+ * Universal Serial Bus Specification, version 1.0.
+ */
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/version.h>
+#include <linux/usb.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/random.h>
+#include <linux/mutex.h>
+#include <linux/uwb.h>
+#include <linux/usb/wusb.h>
+#include <linux/usb/association.h>
+
+#define CBA_NAME_LEN 0x40 /* [WUSB-AM] table 4-7 */
+
+/* An instance of a Cable-Based-Association-Framework device */
+struct cbaf {
+ struct usb_device *usb_dev;
+ struct usb_interface *usb_iface;
+ void *buffer;
+ size_t buffer_size;
+
+ struct wusb_ckhdid chid;
+ char host_name[CBA_NAME_LEN];
+ u16 host_band_groups;
+
+ struct wusb_ckhdid cdid;
+ char device_name[CBA_NAME_LEN];
+ u16 device_band_groups;
+
+ struct wusb_ckhdid ck;
+};
+
+/*
+ * Verify that a CBAF USB-interface has what we need
+ *
+ * According to [WUSB-AM], CBA devices should provide at least two
+ * interfaces:
+ * - RETRIEVE_HOST_INFO
+ * - ASSOCIATE
+ *
+ * If the device doesn't provide these interfaces, we do not know how
+ * to deal with it.
+ */
+static int cbaf_check(struct cbaf *cbaf)
+{
+ int result;
+ struct device *dev = &cbaf->usb_iface->dev;
+ struct wusb_cbaf_assoc_info *assoc_info;
+ struct wusb_cbaf_assoc_request *assoc_request;
+ size_t assoc_size;
+ void *itr, *top;
+ int ar_rhi = 0, ar_assoc = 0;
+
+ result = usb_control_msg(
+ cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0),
+ CBAF_REQ_GET_ASSOCIATION_INFORMATION,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ cbaf->buffer, cbaf->buffer_size, 1000 /* FIXME: arbitrary */);
+ if (result < 0) {
+ dev_err(dev, "Cannot get available association types: %d\n",
+ result);
+ return result;
+ }
+
+ assoc_info = cbaf->buffer;
+ if (result < sizeof(*assoc_info)) {
+ dev_err(dev, "Not enough data to decode association info "
+ "header (%zu vs %zu bytes required)\n",
+ (size_t)result, sizeof(*assoc_info));
+ return result;
+ }
+
+ assoc_size = le16_to_cpu(assoc_info->Length);
+ if (result < assoc_size) {
+ dev_err(dev, "Not enough data to decode association info "
+ "(%zu vs %zu bytes required)\n",
+ (size_t)assoc_size, sizeof(*assoc_info));
+ return result;
+ }
+ /*
+ * From now on, we just verify, but won't error out unless we
+ * don't find the AR_TYPE_WUSB_{RETRIEVE_HOST_INFO,ASSOCIATE}
+ * types.
+ */
+ itr = cbaf->buffer + sizeof(*assoc_info);
+ top = cbaf->buffer + assoc_size;
+ dev_dbg(dev, "Found %u association requests (%zu bytes)\n",
+ assoc_info->NumAssociationRequests, assoc_size);
+
+ while (itr < top) {
+ u16 ar_type, ar_subtype;
+ u32 ar_size;
+ const char *ar_name;
+
+ assoc_request = itr;
+
+ if (top - itr < sizeof(*assoc_request)) {
+ dev_err(dev, "Not enough data to decode associaton "
+ "request (%zu vs %zu bytes needed)\n",
+ top - itr, sizeof(*assoc_request));
+ break;
+ }
+
+ ar_type = le16_to_cpu(assoc_request->AssociationTypeId);
+ ar_subtype = le16_to_cpu(assoc_request->AssociationSubTypeId);
+ ar_size = le32_to_cpu(assoc_request->AssociationTypeInfoSize);
+ ar_name = "unknown";
+
+ switch (ar_type) {
+ case AR_TYPE_WUSB:
+ /* Verify we have what is mandated by [WUSB-AM]. */
+ switch (ar_subtype) {
+ case AR_TYPE_WUSB_RETRIEVE_HOST_INFO:
+ ar_name = "RETRIEVE_HOST_INFO";
+ ar_rhi = 1;
+ break;
+ case AR_TYPE_WUSB_ASSOCIATE:
+ /* send assoc data */
+ ar_name = "ASSOCIATE";
+ ar_assoc = 1;
+ break;
+ };
+ break;
+ };
+
+ dev_dbg(dev, "Association request #%02u: 0x%04x/%04x "
+ "(%zu bytes): %s\n",
+ assoc_request->AssociationDataIndex, ar_type,
+ ar_subtype, (size_t)ar_size, ar_name);
+
+ itr += sizeof(*assoc_request);
+ }
+
+ if (!ar_rhi) {
+ dev_err(dev, "Missing RETRIEVE_HOST_INFO association "
+ "request\n");
+ return -EINVAL;
+ }
+ if (!ar_assoc) {
+ dev_err(dev, "Missing ASSOCIATE association request\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct wusb_cbaf_host_info cbaf_host_info_defaults = {
+ .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
+ .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
+ .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
+ .AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_RETRIEVE_HOST_INFO),
+ .CHID_hdr = WUSB_AR_CHID,
+ .LangID_hdr = WUSB_AR_LangID,
+ .HostFriendlyName_hdr = WUSB_AR_HostFriendlyName,
+};
+
+/* Send WUSB host information (CHID and name) to a CBAF device */
+static int cbaf_send_host_info(struct cbaf *cbaf)
+{
+ struct wusb_cbaf_host_info *hi;
+ size_t name_len;
+ size_t hi_size;
+
+ hi = cbaf->buffer;
+ memset(hi, 0, sizeof(*hi));
+ *hi = cbaf_host_info_defaults;
+ hi->CHID = cbaf->chid;
+ hi->LangID = 0; /* FIXME: I guess... */
+ strlcpy(hi->HostFriendlyName, cbaf->host_name, CBA_NAME_LEN);
+ name_len = strlen(cbaf->host_name);
+ hi->HostFriendlyName_hdr.len = cpu_to_le16(name_len);
+ hi_size = sizeof(*hi) + name_len;
+
+ return usb_control_msg(cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0),
+ CBAF_REQ_SET_ASSOCIATION_RESPONSE,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0x0101,
+ cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ hi, hi_size, 1000 /* FIXME: arbitrary */);
+}
+
+/*
+ * Get device's information (CDID) associated to CHID
+ *
+ * The device will return it's information (CDID, name, bandgroups)
+ * associated to the CHID we have set before, or 0 CDID and default
+ * name and bandgroup if no CHID set or unknown.
+ */
+static int cbaf_cdid_get(struct cbaf *cbaf)
+{
+ int result;
+ struct device *dev = &cbaf->usb_iface->dev;
+ struct wusb_cbaf_device_info *di;
+ size_t needed;
+
+ di = cbaf->buffer;
+ result = usb_control_msg(
+ cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0),
+ CBAF_REQ_GET_ASSOCIATION_REQUEST,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0x0200, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ di, cbaf->buffer_size, 1000 /* FIXME: arbitrary */);
+ if (result < 0) {
+ dev_err(dev, "Cannot request device information: %d\n", result);
+ return result;
+ }
+
+ needed = result < sizeof(*di) ? sizeof(*di) : le32_to_cpu(di->Length);
+ if (result < needed) {
+ dev_err(dev, "Not enough data in DEVICE_INFO reply (%zu vs "
+ "%zu bytes needed)\n", (size_t)result, needed);
+ return result;
+ }
+
+ strlcpy(cbaf->device_name, di->DeviceFriendlyName, CBA_NAME_LEN);
+ cbaf->cdid = di->CDID;
+ cbaf->device_band_groups = le16_to_cpu(di->BandGroups);
+
+ return 0;
+}
+
+static ssize_t cbaf_wusb_chid_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+ char pr_chid[WUSB_CKHDID_STRSIZE];
+
+ ckhdid_printf(pr_chid, sizeof(pr_chid), &cbaf->chid);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", pr_chid);
+}
+
+static ssize_t cbaf_wusb_chid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ ssize_t result;
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+
+ result = sscanf(buf,
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx",
+ &cbaf->chid.data[0] , &cbaf->chid.data[1],
+ &cbaf->chid.data[2] , &cbaf->chid.data[3],
+ &cbaf->chid.data[4] , &cbaf->chid.data[5],
+ &cbaf->chid.data[6] , &cbaf->chid.data[7],
+ &cbaf->chid.data[8] , &cbaf->chid.data[9],
+ &cbaf->chid.data[10], &cbaf->chid.data[11],
+ &cbaf->chid.data[12], &cbaf->chid.data[13],
+ &cbaf->chid.data[14], &cbaf->chid.data[15]);
+
+ if (result != 16)
+ return -EINVAL;
+
+ result = cbaf_send_host_info(cbaf);
+ if (result < 0)
+ return result;
+ result = cbaf_cdid_get(cbaf);
+ if (result < 0)
+ return -result;
+ return size;
+}
+static DEVICE_ATTR(wusb_chid, 0600, cbaf_wusb_chid_show, cbaf_wusb_chid_store);
+
+static ssize_t cbaf_wusb_host_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->host_name);
+}
+
+static ssize_t cbaf_wusb_host_name_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ ssize_t result;
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+
+ result = sscanf(buf, "%63s", cbaf->host_name);
+ if (result != 1)
+ return -EINVAL;
+
+ return size;
+}
+static DEVICE_ATTR(wusb_host_name, 0600, cbaf_wusb_host_name_show,
+ cbaf_wusb_host_name_store);
+
+static ssize_t cbaf_wusb_host_band_groups_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->host_band_groups);
+}
+
+static ssize_t cbaf_wusb_host_band_groups_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ ssize_t result;
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+ u16 band_groups = 0;
+
+ result = sscanf(buf, "%04hx", &band_groups);
+ if (result != 1)
+ return -EINVAL;
+
+ cbaf->host_band_groups = band_groups;
+
+ return size;
+}
+
+static DEVICE_ATTR(wusb_host_band_groups, 0600,
+ cbaf_wusb_host_band_groups_show,
+ cbaf_wusb_host_band_groups_store);
+
+static const struct wusb_cbaf_device_info cbaf_device_info_defaults = {
+ .Length_hdr = WUSB_AR_Length,
+ .CDID_hdr = WUSB_AR_CDID,
+ .BandGroups_hdr = WUSB_AR_BandGroups,
+ .LangID_hdr = WUSB_AR_LangID,
+ .DeviceFriendlyName_hdr = WUSB_AR_DeviceFriendlyName,
+};
+
+static ssize_t cbaf_wusb_cdid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+ char pr_cdid[WUSB_CKHDID_STRSIZE];
+
+ ckhdid_printf(pr_cdid, sizeof(pr_cdid), &cbaf->cdid);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", pr_cdid);
+}
+
+static ssize_t cbaf_wusb_cdid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ ssize_t result;
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+ struct wusb_ckhdid cdid;
+
+ result = sscanf(buf,
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx",
+ &cdid.data[0] , &cdid.data[1],
+ &cdid.data[2] , &cdid.data[3],
+ &cdid.data[4] , &cdid.data[5],
+ &cdid.data[6] , &cdid.data[7],
+ &cdid.data[8] , &cdid.data[9],
+ &cdid.data[10], &cdid.data[11],
+ &cdid.data[12], &cdid.data[13],
+ &cdid.data[14], &cdid.data[15]);
+ if (result != 16)
+ return -EINVAL;
+
+ cbaf->cdid = cdid;
+
+ return size;
+}
+static DEVICE_ATTR(wusb_cdid, 0600, cbaf_wusb_cdid_show, cbaf_wusb_cdid_store);
+
+static ssize_t cbaf_wusb_device_band_groups_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->device_band_groups);
+}
+
+static DEVICE_ATTR(wusb_device_band_groups, 0600,
+ cbaf_wusb_device_band_groups_show,
+ NULL);
+
+static ssize_t cbaf_wusb_device_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->device_name);
+}
+static DEVICE_ATTR(wusb_device_name, 0600, cbaf_wusb_device_name_show, NULL);
+
+static const struct wusb_cbaf_cc_data cbaf_cc_data_defaults = {
+ .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
+ .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
+ .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
+ .AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_ASSOCIATE),
+ .Length_hdr = WUSB_AR_Length,
+ .Length = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)),
+ .ConnectionContext_hdr = WUSB_AR_ConnectionContext,
+ .BandGroups_hdr = WUSB_AR_BandGroups,
+};
+
+static const struct wusb_cbaf_cc_data_fail cbaf_cc_data_fail_defaults = {
+ .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
+ .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
+ .Length_hdr = WUSB_AR_Length,
+ .AssociationStatus_hdr = WUSB_AR_AssociationStatus,
+};
+
+/*
+ * Send a new CC to the device.
+ */
+static int cbaf_cc_upload(struct cbaf *cbaf)
+{
+ int result;
+ struct device *dev = &cbaf->usb_iface->dev;
+ struct wusb_cbaf_cc_data *ccd;
+ char pr_cdid[WUSB_CKHDID_STRSIZE];
+
+ ccd = cbaf->buffer;
+ *ccd = cbaf_cc_data_defaults;
+ ccd->CHID = cbaf->chid;
+ ccd->CDID = cbaf->cdid;
+ ccd->CK = cbaf->ck;
+ ccd->BandGroups = cpu_to_le16(cbaf->host_band_groups);
+
+ dev_dbg(dev, "Trying to upload CC:\n");
+ ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CHID);
+ dev_dbg(dev, " CHID %s\n", pr_cdid);
+ ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CDID);
+ dev_dbg(dev, " CDID %s\n", pr_cdid);
+ dev_dbg(dev, " Bandgroups 0x%04x\n", cbaf->host_band_groups);
+
+ result = usb_control_msg(
+ cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0),
+ CBAF_REQ_SET_ASSOCIATION_RESPONSE,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0x0201, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ ccd, sizeof(*ccd), 1000 /* FIXME: arbitrary */);
+
+ return result;
+}
+
+static ssize_t cbaf_wusb_ck_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ ssize_t result;
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+
+ result = sscanf(buf,
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx",
+ &cbaf->ck.data[0] , &cbaf->ck.data[1],
+ &cbaf->ck.data[2] , &cbaf->ck.data[3],
+ &cbaf->ck.data[4] , &cbaf->ck.data[5],
+ &cbaf->ck.data[6] , &cbaf->ck.data[7],
+ &cbaf->ck.data[8] , &cbaf->ck.data[9],
+ &cbaf->ck.data[10], &cbaf->ck.data[11],
+ &cbaf->ck.data[12], &cbaf->ck.data[13],
+ &cbaf->ck.data[14], &cbaf->ck.data[15]);
+ if (result != 16)
+ return -EINVAL;
+
+ result = cbaf_cc_upload(cbaf);
+ if (result < 0)
+ return result;
+
+ return size;
+}
+static DEVICE_ATTR(wusb_ck, 0600, NULL, cbaf_wusb_ck_store);
+
+static struct attribute *cbaf_dev_attrs[] = {
+ &dev_attr_wusb_host_name.attr,
+ &dev_attr_wusb_host_band_groups.attr,
+ &dev_attr_wusb_chid.attr,
+ &dev_attr_wusb_cdid.attr,
+ &dev_attr_wusb_device_name.attr,
+ &dev_attr_wusb_device_band_groups.attr,
+ &dev_attr_wusb_ck.attr,
+ NULL,
+};
+
+static struct attribute_group cbaf_dev_attr_group = {
+ .name = NULL, /* we want them in the same directory */
+ .attrs = cbaf_dev_attrs,
+};
+
+static int cbaf_probe(struct usb_interface *iface,
+ const struct usb_device_id *id)
+{
+ struct cbaf *cbaf;
+ struct device *dev = &iface->dev;
+ int result = -ENOMEM;
+
+ cbaf = kzalloc(sizeof(*cbaf), GFP_KERNEL);
+ if (cbaf == NULL)
+ goto error_kzalloc;
+ cbaf->buffer = kmalloc(512, GFP_KERNEL);
+ if (cbaf->buffer == NULL)
+ goto error_kmalloc_buffer;
+
+ cbaf->buffer_size = 512;
+ cbaf->usb_dev = usb_get_dev(interface_to_usbdev(iface));
+ cbaf->usb_iface = usb_get_intf(iface);
+ result = cbaf_check(cbaf);
+ if (result < 0) {
+ dev_err(dev, "This device is not WUSB-CBAF compliant"
+ "and is not supported yet.\n");
+ goto error_check;
+ }
+
+ result = sysfs_create_group(&dev->kobj, &cbaf_dev_attr_group);
+ if (result < 0) {
+ dev_err(dev, "Can't register sysfs attr group: %d\n", result);
+ goto error_create_group;
+ }
+ usb_set_intfdata(iface, cbaf);
+ return 0;
+
+error_create_group:
+error_check:
+ kfree(cbaf->buffer);
+error_kmalloc_buffer:
+ kfree(cbaf);
+error_kzalloc:
+ return result;
+}
+
+static void cbaf_disconnect(struct usb_interface *iface)
+{
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+ struct device *dev = &iface->dev;
+ sysfs_remove_group(&dev->kobj, &cbaf_dev_attr_group);
+ usb_set_intfdata(iface, NULL);
+ usb_put_intf(iface);
+ kfree(cbaf->buffer);
+ /* paranoia: clean up crypto keys */
+ memset(cbaf, 0, sizeof(*cbaf));
+ kfree(cbaf);
+}
+
+static struct usb_device_id cbaf_id_table[] = {
+ { USB_INTERFACE_INFO(0xef, 0x03, 0x01), },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, cbaf_id_table);
+
+static struct usb_driver cbaf_driver = {
+ .name = "wusb-cbaf",
+ .id_table = cbaf_id_table,
+ .probe = cbaf_probe,
+ .disconnect = cbaf_disconnect,
+};
+
+static int __init cbaf_driver_init(void)
+{
+ return usb_register(&cbaf_driver);
+}
+module_init(cbaf_driver_init);
+
+static void __exit cbaf_driver_exit(void)
+{
+ usb_deregister(&cbaf_driver);
+}
+module_exit(cbaf_driver_exit);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Wireless USB Cable Based Association");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
new file mode 100644
index 00000000000..c36c4389baa
--- /dev/null
+++ b/drivers/usb/wusbcore/crypto.c
@@ -0,0 +1,538 @@
+/*
+ * Ultra Wide Band
+ * AES-128 CCM Encryption
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * We don't do any encryption here; we use the Linux Kernel's AES-128
+ * crypto modules to construct keys and payload blocks in a way
+ * defined by WUSB1.0[6]. Check the erratas, as typos are are patched
+ * there.
+ *
+ * Thanks a zillion to John Keys for his help and clarifications over
+ * the designed-by-a-committee text.
+ *
+ * So the idea is that there is this basic Pseudo-Random-Function
+ * defined in WUSB1.0[6.5] which is the core of everything. It works
+ * by tweaking some blocks, AES crypting them and then xoring
+ * something else with them (this seems to be called CBC(AES) -- can
+ * you tell I know jack about crypto?). So we just funnel it into the
+ * Linux Crypto API.
+ *
+ * We leave a crypto test module so we can verify that vectors match,
+ * every now and then.
+ *
+ * Block size: 16 bytes -- AES seems to do things in 'block sizes'. I
+ * am learning a lot...
+ *
+ * Conveniently, some data structures that need to be
+ * funneled through AES are...16 bytes in size!
+ */
+
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/uwb.h>
+#include <linux/usb/wusb.h>
+#include <linux/scatterlist.h>
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+
+/*
+ * Block of data, as understood by AES-CCM
+ *
+ * The code assumes this structure is nothing but a 16 byte array
+ * (packed in a struct to avoid common mess ups that I usually do with
+ * arrays and enforcing type checking).
+ */
+struct aes_ccm_block {
+ u8 data[16];
+} __attribute__((packed));
+
+/*
+ * Counter-mode Blocks (WUSB1.0[6.4])
+ *
+ * According to CCM (or so it seems), for the purpose of calculating
+ * the MIC, the message is broken in N counter-mode blocks, B0, B1,
+ * ... BN.
+ *
+ * B0 contains flags, the CCM nonce and l(m).
+ *
+ * B1 contains l(a), the MAC header, the encryption offset and padding.
+ *
+ * If EO is nonzero, additional blocks are built from payload bytes
+ * until EO is exahusted (FIXME: padding to 16 bytes, I guess). The
+ * padding is not xmitted.
+ */
+
+/* WUSB1.0[T6.4] */
+struct aes_ccm_b0 {
+ u8 flags; /* 0x59, per CCM spec */
+ struct aes_ccm_nonce ccm_nonce;
+ __be16 lm;
+} __attribute__((packed));
+
+/* WUSB1.0[T6.5] */
+struct aes_ccm_b1 {
+ __be16 la;
+ u8 mac_header[10];
+ __le16 eo;
+ u8 security_reserved; /* This is always zero */
+ u8 padding; /* 0 */
+} __attribute__((packed));
+
+/*
+ * Encryption Blocks (WUSB1.0[6.4.4])
+ *
+ * CCM uses Ax blocks to generate a keystream with which the MIC and
+ * the message's payload are encoded. A0 always encrypts/decrypts the
+ * MIC. Ax (x>0) are used for the sucesive payload blocks.
+ *
+ * The x is the counter, and is increased for each block.
+ */
+struct aes_ccm_a {
+ u8 flags; /* 0x01, per CCM spec */
+ struct aes_ccm_nonce ccm_nonce;
+ __be16 counter; /* Value of x */
+} __attribute__((packed));
+
+static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
+ size_t size)
+{
+ u8 *bo = _bo;
+ const u8 *bi1 = _bi1, *bi2 = _bi2;
+ size_t itr;
+ for (itr = 0; itr < size; itr++)
+ bo[itr] = bi1[itr] ^ bi2[itr];
+}
+
+/*
+ * CC-MAC function WUSB1.0[6.5]
+ *
+ * Take a data string and produce the encrypted CBC Counter-mode MIC
+ *
+ * Note the names for most function arguments are made to (more or
+ * less) match those used in the pseudo-function definition given in
+ * WUSB1.0[6.5].
+ *
+ * @tfm_cbc: CBC(AES) blkcipher handle (initialized)
+ *
+ * @tfm_aes: AES cipher handle (initialized)
+ *
+ * @mic: buffer for placing the computed MIC (Message Integrity
+ * Code). This is exactly 8 bytes, and we expect the buffer to
+ * be at least eight bytes in length.
+ *
+ * @key: 128 bit symmetric key
+ *
+ * @n: CCM nonce
+ *
+ * @a: ASCII string, 14 bytes long (I guess zero padded if needed;
+ * we use exactly 14 bytes).
+ *
+ * @b: data stream to be processed; cannot be a global or const local
+ * (will confuse the scatterlists)
+ *
+ * @blen: size of b...
+ *
+ * Still not very clear how this is done, but looks like this: we
+ * create block B0 (as WUSB1.0[6.5] says), then we AES-crypt it with
+ * @key. We bytewise xor B0 with B1 (1) and AES-crypt that. Then we
+ * take the payload and divide it in blocks (16 bytes), xor them with
+ * the previous crypto result (16 bytes) and crypt it, repeat the next
+ * block with the output of the previous one, rinse wash (I guess this
+ * is what AES CBC mode means...but I truly have no idea). So we use
+ * the CBC(AES) blkcipher, that does precisely that. The IV (Initial
+ * Vector) is 16 bytes and is set to zero, so
+ *
+ * See rfc3610. Linux crypto has a CBC implementation, but the
+ * documentation is scarce, to say the least, and the example code is
+ * so intricated that is difficult to understand how things work. Most
+ * of this is guess work -- bite me.
+ *
+ * (1) Created as 6.5 says, again, using as l(a) 'Blen + 14', and
+ * using the 14 bytes of @a to fill up
+ * b1.{mac_header,e0,security_reserved,padding}.
+ *
+ * NOTE: The definiton of l(a) in WUSB1.0[6.5] vs the definition of
+ * l(m) is orthogonal, they bear no relationship, so it is not
+ * in conflict with the parameter's relation that
+ * WUSB1.0[6.4.2]) defines.
+ *
+ * NOTE: WUSB1.0[A.1]: Host Nonce is missing a nibble? (1e); fixed in
+ * first errata released on 2005/07.
+ *
+ * NOTE: we need to clean IV to zero at each invocation to make sure
+ * we start with a fresh empty Initial Vector, so that the CBC
+ * works ok.
+ *
+ * NOTE: blen is not aligned to a block size, we'll pad zeros, that's
+ * what sg[4] is for. Maybe there is a smarter way to do this.
+ */
+static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
+ struct crypto_cipher *tfm_aes, void *mic,
+ const struct aes_ccm_nonce *n,
+ const struct aes_ccm_label *a, const void *b,
+ size_t blen)
+{
+ int result = 0;
+ struct blkcipher_desc desc;
+ struct aes_ccm_b0 b0;
+ struct aes_ccm_b1 b1;
+ struct aes_ccm_a ax;
+ struct scatterlist sg[4], sg_dst;
+ void *iv, *dst_buf;
+ size_t ivsize, dst_size;
+ const u8 bzero[16] = { 0 };
+ size_t zero_padding;
+
+ d_fnstart(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, "
+ "n %p, a %p, b %p, blen %zu)\n",
+ tfm_cbc, tfm_aes, mic, n, a, b, blen);
+ /*
+ * These checks should be compile time optimized out
+ * ensure @a fills b1's mac_header and following fields
+ */
+ WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la));
+ WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block));
+ WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block));
+ WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block));
+
+ result = -ENOMEM;
+ zero_padding = sizeof(struct aes_ccm_block)
+ - blen % sizeof(struct aes_ccm_block);
+ zero_padding = blen % sizeof(struct aes_ccm_block);
+ if (zero_padding)
+ zero_padding = sizeof(struct aes_ccm_block) - zero_padding;
+ dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding;
+ dst_buf = kzalloc(dst_size, GFP_KERNEL);
+ if (dst_buf == NULL) {
+ printk(KERN_ERR "E: can't alloc destination buffer\n");
+ goto error_dst_buf;
+ }
+
+ iv = crypto_blkcipher_crt(tfm_cbc)->iv;
+ ivsize = crypto_blkcipher_ivsize(tfm_cbc);
+ memset(iv, 0, ivsize);
+
+ /* Setup B0 */
+ b0.flags = 0x59; /* Format B0 */
+ b0.ccm_nonce = *n;
+ b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */
+
+ /* Setup B1
+ *
+ * The WUSB spec is anything but clear! WUSB1.0[6.5]
+ * says that to initialize B1 from A with 'l(a) = blen +
+ * 14'--after clarification, it means to use A's contents
+ * for MAC Header, EO, sec reserved and padding.
+ */
+ b1.la = cpu_to_be16(blen + 14);
+ memcpy(&b1.mac_header, a, sizeof(*a));
+
+ d_printf(4, NULL, "I: B0 (%zu bytes)\n", sizeof(b0));
+ d_dump(4, NULL, &b0, sizeof(b0));
+ d_printf(4, NULL, "I: B1 (%zu bytes)\n", sizeof(b1));
+ d_dump(4, NULL, &b1, sizeof(b1));
+ d_printf(4, NULL, "I: B (%zu bytes)\n", blen);
+ d_dump(4, NULL, b, blen);
+ d_printf(4, NULL, "I: B 0-padding (%zu bytes)\n", zero_padding);
+ d_printf(4, NULL, "D: IV before crypto (%zu)\n", ivsize);
+ d_dump(4, NULL, iv, ivsize);
+
+ sg_init_table(sg, ARRAY_SIZE(sg));
+ sg_set_buf(&sg[0], &b0, sizeof(b0));
+ sg_set_buf(&sg[1], &b1, sizeof(b1));
+ sg_set_buf(&sg[2], b, blen);
+ /* 0 if well behaved :) */
+ sg_set_buf(&sg[3], bzero, zero_padding);
+ sg_init_one(&sg_dst, dst_buf, dst_size);
+
+ desc.tfm = tfm_cbc;
+ desc.flags = 0;
+ result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size);
+ if (result < 0) {
+ printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n",
+ result);
+ goto error_cbc_crypt;
+ }
+ d_printf(4, NULL, "D: MIC tag\n");
+ d_dump(4, NULL, iv, ivsize);
+
+ /* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5]
+ * The procedure is to AES crypt the A0 block and XOR the MIC
+ * Tag agains it; we only do the first 8 bytes and place it
+ * directly in the destination buffer.
+ *
+ * POS Crypto API: size is assumed to be AES's block size.
+ * Thanks for documenting it -- tip taken from airo.c
+ */
+ ax.flags = 0x01; /* as per WUSB 1.0 spec */
+ ax.ccm_nonce = *n;
+ ax.counter = 0;
+ crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax);
+ bytewise_xor(mic, &ax, iv, 8);
+ d_printf(4, NULL, "D: CTR[MIC]\n");
+ d_dump(4, NULL, &ax, 8);
+ d_printf(4, NULL, "D: CCM-MIC tag\n");
+ d_dump(4, NULL, mic, 8);
+ result = 8;
+error_cbc_crypt:
+ kfree(dst_buf);
+error_dst_buf:
+ d_fnend(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, "
+ "n %p, a %p, b %p, blen %zu)\n",
+ tfm_cbc, tfm_aes, mic, n, a, b, blen);
+ return result;
+}
+
+/*
+ * WUSB Pseudo Random Function (WUSB1.0[6.5])
+ *
+ * @b: buffer to the source data; cannot be a global or const local
+ * (will confuse the scatterlists)
+ */
+ssize_t wusb_prf(void *out, size_t out_size,
+ const u8 key[16], const struct aes_ccm_nonce *_n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen, size_t len)
+{
+ ssize_t result, bytes = 0, bitr;
+ struct aes_ccm_nonce n = *_n;
+ struct crypto_blkcipher *tfm_cbc;
+ struct crypto_cipher *tfm_aes;
+ u64 sfn = 0;
+ __le64 sfn_le;
+
+ d_fnstart(3, NULL, "(out %p, out_size %zu, key %p, _n %p, "
+ "a %p, b %p, blen %zu, len %zu)\n", out, out_size,
+ key, _n, a, b, blen, len);
+
+ tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm_cbc)) {
+ result = PTR_ERR(tfm_cbc);
+ printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result);
+ goto error_alloc_cbc;
+ }
+ result = crypto_blkcipher_setkey(tfm_cbc, key, 16);
+ if (result < 0) {
+ printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result);
+ goto error_setkey_cbc;
+ }
+
+ tfm_aes = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm_aes)) {
+ result = PTR_ERR(tfm_aes);
+ printk(KERN_ERR "E: can't load AES: %d\n", (int)result);
+ goto error_alloc_aes;
+ }
+ result = crypto_cipher_setkey(tfm_aes, key, 16);
+ if (result < 0) {
+ printk(KERN_ERR "E: can't set AES key: %d\n", (int)result);
+ goto error_setkey_aes;
+ }
+
+ for (bitr = 0; bitr < (len + 63) / 64; bitr++) {
+ sfn_le = cpu_to_le64(sfn++);
+ memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */
+ result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes,
+ &n, a, b, blen);
+ if (result < 0)
+ goto error_ccm_mac;
+ bytes += result;
+ }
+ result = bytes;
+error_ccm_mac:
+error_setkey_aes:
+ crypto_free_cipher(tfm_aes);
+error_alloc_aes:
+error_setkey_cbc:
+ crypto_free_blkcipher(tfm_cbc);
+error_alloc_cbc:
+ d_fnend(3, NULL, "(out %p, out_size %zu, key %p, _n %p, "
+ "a %p, b %p, blen %zu, len %zu) = %d\n", out, out_size,
+ key, _n, a, b, blen, len, (int)bytes);
+ return result;
+}
+
+/* WUSB1.0[A.2] test vectors */
+static const u8 stv_hsmic_key[16] = {
+ 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d,
+ 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f
+};
+
+static const struct aes_ccm_nonce stv_hsmic_n = {
+ .sfn = { 0 },
+ .tkid = { 0x76, 0x98, 0x01, },
+ .dest_addr = { .data = { 0xbe, 0x00 } },
+ .src_addr = { .data = { 0x76, 0x98 } },
+};
+
+/*
+ * Out-of-band MIC Generation verification code
+ *
+ */
+static int wusb_oob_mic_verify(void)
+{
+ int result;
+ u8 mic[8];
+ /* WUSB1.0[A.2] test vectors
+ *
+ * Need to keep it in the local stack as GCC 4.1.3something
+ * messes up and generates noise.
+ */
+ struct usb_handshake stv_hsmic_hs = {
+ .bMessageNumber = 2,
+ .bStatus = 00,
+ .tTKID = { 0x76, 0x98, 0x01 },
+ .bReserved = 00,
+ .CDID = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
+ 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
+ 0x3c, 0x3d, 0x3e, 0x3f },
+ .nonce = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
+ 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
+ 0x2c, 0x2d, 0x2e, 0x2f },
+ .MIC = { 0x75, 0x6a, 0x97, 0x51, 0x0c, 0x8c,
+ 0x14, 0x7b } ,
+ };
+ size_t hs_size;
+
+ result = wusb_oob_mic(mic, stv_hsmic_key, &stv_hsmic_n, &stv_hsmic_hs);
+ if (result < 0)
+ printk(KERN_ERR "E: WUSB OOB MIC test: failed: %d\n", result);
+ else if (memcmp(stv_hsmic_hs.MIC, mic, sizeof(mic))) {
+ printk(KERN_ERR "E: OOB MIC test: "
+ "mismatch between MIC result and WUSB1.0[A2]\n");
+ hs_size = sizeof(stv_hsmic_hs) - sizeof(stv_hsmic_hs.MIC);
+ printk(KERN_ERR "E: Handshake2 in: (%zu bytes)\n", hs_size);
+ dump_bytes(NULL, &stv_hsmic_hs, hs_size);
+ printk(KERN_ERR "E: CCM Nonce in: (%zu bytes)\n",
+ sizeof(stv_hsmic_n));
+ dump_bytes(NULL, &stv_hsmic_n, sizeof(stv_hsmic_n));
+ printk(KERN_ERR "E: MIC out:\n");
+ dump_bytes(NULL, mic, sizeof(mic));
+ printk(KERN_ERR "E: MIC out (from WUSB1.0[A.2]):\n");
+ dump_bytes(NULL, stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC));
+ result = -EINVAL;
+ } else
+ result = 0;
+ return result;
+}
+
+/*
+ * Test vectors for Key derivation
+ *
+ * These come from WUSB1.0[6.5.1], the vectors in WUSB1.0[A.1]
+ * (errata corrected in 2005/07).
+ */
+static const u8 stv_key_a1[16] __attribute__ ((__aligned__(4))) = {
+ 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87,
+ 0x78, 0x69, 0x5a, 0x4b, 0x3c, 0x2d, 0x1e, 0x0f
+};
+
+static const struct aes_ccm_nonce stv_keydvt_n_a1 = {
+ .sfn = { 0 },
+ .tkid = { 0x76, 0x98, 0x01, },
+ .dest_addr = { .data = { 0xbe, 0x00 } },
+ .src_addr = { .data = { 0x76, 0x98 } },
+};
+
+static const struct wusb_keydvt_out stv_keydvt_out_a1 = {
+ .kck = {
+ 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d,
+ 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f
+ },
+ .ptk = {
+ 0xc8, 0x70, 0x62, 0x82, 0xb6, 0x7c, 0xe9, 0x06,
+ 0x7b, 0xc5, 0x25, 0x69, 0xf2, 0x36, 0x61, 0x2d
+ }
+};
+
+/*
+ * Performa a test to make sure we match the vectors defined in
+ * WUSB1.0[A.1](Errata2006/12)
+ */
+static int wusb_key_derive_verify(void)
+{
+ int result = 0;
+ struct wusb_keydvt_out keydvt_out;
+ /* These come from WUSB1.0[A.1] + 2006/12 errata
+ * NOTE: can't make this const or global -- somehow it seems
+ * the scatterlists for crypto get confused and we get
+ * bad data. There is no doc on this... */
+ struct wusb_keydvt_in stv_keydvt_in_a1 = {
+ .hnonce = {
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
+ },
+ .dnonce = {
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f
+ }
+ };
+
+ result = wusb_key_derive(&keydvt_out, stv_key_a1, &stv_keydvt_n_a1,
+ &stv_keydvt_in_a1);
+ if (result < 0)
+ printk(KERN_ERR "E: WUSB key derivation test: "
+ "derivation failed: %d\n", result);
+ if (memcmp(&stv_keydvt_out_a1, &keydvt_out, sizeof(keydvt_out))) {
+ printk(KERN_ERR "E: WUSB key derivation test: "
+ "mismatch between key derivation result "
+ "and WUSB1.0[A1] Errata 2006/12\n");
+ printk(KERN_ERR "E: keydvt in: key (%zu bytes)\n",
+ sizeof(stv_key_a1));
+ dump_bytes(NULL, stv_key_a1, sizeof(stv_key_a1));
+ printk(KERN_ERR "E: keydvt in: nonce (%zu bytes)\n",
+ sizeof(stv_keydvt_n_a1));
+ dump_bytes(NULL, &stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1));
+ printk(KERN_ERR "E: keydvt in: hnonce & dnonce (%zu bytes)\n",
+ sizeof(stv_keydvt_in_a1));
+ dump_bytes(NULL, &stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1));
+ printk(KERN_ERR "E: keydvt out: KCK\n");
+ dump_bytes(NULL, &keydvt_out.kck, sizeof(keydvt_out.kck));
+ printk(KERN_ERR "E: keydvt out: PTK\n");
+ dump_bytes(NULL, &keydvt_out.ptk, sizeof(keydvt_out.ptk));
+ result = -EINVAL;
+ } else
+ result = 0;
+ return result;
+}
+
+/*
+ * Initialize crypto system
+ *
+ * FIXME: we do nothing now, other than verifying. Later on we'll
+ * cache the encryption stuff, so that's why we have a separate init.
+ */
+int wusb_crypto_init(void)
+{
+ int result;
+
+ result = wusb_key_derive_verify();
+ if (result < 0)
+ return result;
+ return wusb_oob_mic_verify();
+}
+
+void wusb_crypto_exit(void)
+{
+ /* FIXME: free cached crypto transforms */
+}
diff --git a/drivers/usb/wusbcore/dev-sysfs.c b/drivers/usb/wusbcore/dev-sysfs.c
new file mode 100644
index 00000000000..7897a19652e
--- /dev/null
+++ b/drivers/usb/wusbcore/dev-sysfs.c
@@ -0,0 +1,143 @@
+/*
+ * WUSB devices
+ * sysfs bindings
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Get them out of the way...
+ */
+
+#include <linux/jiffies.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include "wusbhc.h"
+
+#undef D_LOCAL
+#define D_LOCAL 4
+#include <linux/uwb/debug.h>
+
+static ssize_t wusb_disconnect_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct usb_device *usb_dev;
+ struct wusbhc *wusbhc;
+ unsigned command;
+ u8 port_idx;
+
+ if (sscanf(buf, "%u", &command) != 1)
+ return -EINVAL;
+ if (command == 0)
+ return size;
+ usb_dev = to_usb_device(dev);
+ wusbhc = wusbhc_get_by_usb_dev(usb_dev);
+ if (wusbhc == NULL)
+ return -ENODEV;
+
+ mutex_lock(&wusbhc->mutex);
+ port_idx = wusb_port_no_to_idx(usb_dev->portnum);
+ __wusbhc_dev_disable(wusbhc, port_idx);
+ mutex_unlock(&wusbhc->mutex);
+ wusbhc_put(wusbhc);
+ return size;
+}
+static DEVICE_ATTR(wusb_disconnect, 0200, NULL, wusb_disconnect_store);
+
+static ssize_t wusb_cdid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t result;
+ struct wusb_dev *wusb_dev;
+
+ wusb_dev = wusb_dev_get_by_usb_dev(to_usb_device(dev));
+ if (wusb_dev == NULL)
+ return -ENODEV;
+ result = ckhdid_printf(buf, PAGE_SIZE, &wusb_dev->cdid);
+ strcat(buf, "\n");
+ wusb_dev_put(wusb_dev);
+ return result + 1;
+}
+static DEVICE_ATTR(wusb_cdid, 0444, wusb_cdid_show, NULL);
+
+static ssize_t wusb_ck_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int result;
+ struct usb_device *usb_dev;
+ struct wusbhc *wusbhc;
+ struct wusb_ckhdid ck;
+
+ result = sscanf(buf,
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx\n",
+ &ck.data[0] , &ck.data[1],
+ &ck.data[2] , &ck.data[3],
+ &ck.data[4] , &ck.data[5],
+ &ck.data[6] , &ck.data[7],
+ &ck.data[8] , &ck.data[9],
+ &ck.data[10], &ck.data[11],
+ &ck.data[12], &ck.data[13],
+ &ck.data[14], &ck.data[15]);
+ if (result != 16)
+ return -EINVAL;
+
+ usb_dev = to_usb_device(dev);
+ wusbhc = wusbhc_get_by_usb_dev(usb_dev);
+ if (wusbhc == NULL)
+ return -ENODEV;
+ result = wusb_dev_4way_handshake(wusbhc, usb_dev->wusb_dev, &ck);
+ memset(&ck, 0, sizeof(ck));
+ wusbhc_put(wusbhc);
+ return result < 0 ? result : size;
+}
+static DEVICE_ATTR(wusb_ck, 0200, NULL, wusb_ck_store);
+
+static struct attribute *wusb_dev_attrs[] = {
+ &dev_attr_wusb_disconnect.attr,
+ &dev_attr_wusb_cdid.attr,
+ &dev_attr_wusb_ck.attr,
+ NULL,
+};
+
+static struct attribute_group wusb_dev_attr_group = {
+ .name = NULL, /* we want them in the same directory */
+ .attrs = wusb_dev_attrs,
+};
+
+int wusb_dev_sysfs_add(struct wusbhc *wusbhc, struct usb_device *usb_dev,
+ struct wusb_dev *wusb_dev)
+{
+ int result = sysfs_create_group(&usb_dev->dev.kobj,
+ &wusb_dev_attr_group);
+ struct device *dev = &usb_dev->dev;
+ if (result < 0)
+ dev_err(dev, "Cannot register WUSB-dev attributes: %d\n",
+ result);
+ return result;
+}
+
+void wusb_dev_sysfs_rm(struct wusb_dev *wusb_dev)
+{
+ struct usb_device *usb_dev = wusb_dev->usb_dev;
+ if (usb_dev)
+ sysfs_remove_group(&usb_dev->dev.kobj, &wusb_dev_attr_group);
+}
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
new file mode 100644
index 00000000000..f45d777bef3
--- /dev/null
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -0,0 +1,1297 @@
+/*
+ * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
+ * Device Connect handling
+ *
+ * Copyright (C) 2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ * FIXME: this file needs to be broken up, it's grown too big
+ *
+ *
+ * WUSB1.0[7.1, 7.5.1, ]
+ *
+ * WUSB device connection is kind of messy. Some background:
+ *
+ * When a device wants to connect it scans the UWB radio channels
+ * looking for a WUSB Channel; a WUSB channel is defined by MMCs
+ * (Micro Managed Commands or something like that) [see
+ * Design-overview for more on this] .
+ *
+ * So, device scans the radio, finds MMCs and thus a host and checks
+ * when the next DNTS is. It sends a Device Notification Connect
+ * (DN_Connect); the host picks it up (through nep.c and notif.c, ends
+ * up in wusb_devconnect_ack(), which creates a wusb_dev structure in
+ * wusbhc->port[port_number].wusb_dev), assigns an unauth address
+ * to the device (this means from 0x80 to 0xfe) and sends, in the MMC
+ * a Connect Ack Information Element (ConnAck IE).
+ *
+ * So now the device now has a WUSB address. From now on, we use
+ * that to talk to it in the RPipes.
+ *
+ * ASSUMPTIONS:
+ *
+ * - We use the the as device address the port number where it is
+ * connected (port 0 doesn't exist). For unauth, it is 128 + that.
+ *
+ * ROADMAP:
+ *
+ * This file contains the logic for doing that--entry points:
+ *
+ * wusb_devconnect_ack() Ack a device until _acked() called.
+ * Called by notif.c:wusb_handle_dn_connect()
+ * when a DN_Connect is received.
+ *
+ * wusbhc_devconnect_auth() Called by rh.c:wusbhc_rh_port_reset() when
+ * doing the device connect sequence.
+ *
+ * wusb_devconnect_acked() Ack done, release resources.
+ *
+ * wusb_handle_dn_alive() Called by notif.c:wusb_handle_dn()
+ * for processing a DN_Alive pong from a device.
+ *
+ * wusb_handle_dn_disconnect()Called by notif.c:wusb_handle_dn() to
+ * process a disconenct request from a
+ * device.
+ *
+ * wusb_dev_reset() Called by rh.c:wusbhc_rh_port_reset() when
+ * resetting a device.
+ *
+ * __wusb_dev_disable() Called by rh.c:wusbhc_rh_clear_port_feat() when
+ * disabling a port.
+ *
+ * wusb_devconnect_create() Called when creating the host by
+ * lc.c:wusbhc_create().
+ *
+ * wusb_devconnect_destroy() Cleanup called removing the host. Called
+ * by lc.c:wusbhc_destroy().
+ *
+ * Each Wireless USB host maintains a list of DN_Connect requests
+ * (actually we maintain a list of pending Connect Acks, the
+ * wusbhc->ca_list).
+ *
+ * LIFE CYCLE OF port->wusb_dev
+ *
+ * Before the @wusbhc structure put()s the reference it owns for
+ * port->wusb_dev [and clean the wusb_dev pointer], it needs to
+ * lock @wusbhc->mutex.
+ */
+
+#include <linux/jiffies.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include "wusbhc.h"
+
+#undef D_LOCAL
+#define D_LOCAL 1
+#include <linux/uwb/debug.h>
+
+static void wusbhc_devconnect_acked_work(struct work_struct *work);
+
+static void wusb_dev_free(struct wusb_dev *wusb_dev)
+{
+ if (wusb_dev) {
+ kfree(wusb_dev->set_gtk_req);
+ usb_free_urb(wusb_dev->set_gtk_urb);
+ kfree(wusb_dev);
+ }
+}
+
+static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
+{
+ struct wusb_dev *wusb_dev;
+ struct urb *urb;
+ struct usb_ctrlrequest *req;
+
+ wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL);
+ if (wusb_dev == NULL)
+ goto err;
+
+ wusb_dev->wusbhc = wusbhc;
+
+ INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work);
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (urb == NULL)
+ goto err;
+
+ req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (req == NULL)
+ goto err;
+
+ req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
+ req->bRequest = USB_REQ_SET_DESCRIPTOR;
+ req->wValue = cpu_to_le16(USB_DT_KEY << 8 | wusbhc->gtk_index);
+ req->wIndex = 0;
+ req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength);
+
+ wusb_dev->set_gtk_urb = urb;
+ wusb_dev->set_gtk_req = req;
+
+ return wusb_dev;
+err:
+ wusb_dev_free(wusb_dev);
+ return NULL;
+}
+
+
+/*
+ * Using the Connect-Ack list, fill out the @wusbhc Connect-Ack WUSB IE
+ * properly so that it can be added to the MMC.
+ *
+ * We just get the @wusbhc->ca_list and fill out the first four ones or
+ * less (per-spec WUSB1.0[7.5, before T7-38). If the ConnectAck WUSB
+ * IE is not allocated, we alloc it.
+ *
+ * @wusbhc->mutex must be taken
+ */
+static void wusbhc_fill_cack_ie(struct wusbhc *wusbhc)
+{
+ unsigned cnt;
+ struct wusb_dev *dev_itr;
+ struct wuie_connect_ack *cack_ie;
+
+ cack_ie = &wusbhc->cack_ie;
+ cnt = 0;
+ list_for_each_entry(dev_itr, &wusbhc->cack_list, cack_node) {
+ cack_ie->blk[cnt].CDID = dev_itr->cdid;
+ cack_ie->blk[cnt].bDeviceAddress = dev_itr->addr;
+ if (++cnt >= WUIE_ELT_MAX)
+ break;
+ }
+ cack_ie->hdr.bLength = sizeof(cack_ie->hdr)
+ + cnt * sizeof(cack_ie->blk[0]);
+}
+
+/*
+ * Register a new device that wants to connect
+ *
+ * A new device wants to connect, so we add it to the Connect-Ack
+ * list. We give it an address in the unauthorized range (bit 8 set);
+ * user space will have to drive authorization further on.
+ *
+ * @dev_addr: address to use for the device (which is also the port
+ * number).
+ *
+ * @wusbhc->mutex must be taken
+ */
+static struct wusb_dev *wusbhc_cack_add(struct wusbhc *wusbhc,
+ struct wusb_dn_connect *dnc,
+ const char *pr_cdid, u8 port_idx)
+{
+ struct device *dev = wusbhc->dev;
+ struct wusb_dev *wusb_dev;
+ int new_connection = wusb_dn_connect_new_connection(dnc);
+ u8 dev_addr;
+ int result;
+
+ /* Is it registered already? */
+ list_for_each_entry(wusb_dev, &wusbhc->cack_list, cack_node)
+ if (!memcmp(&wusb_dev->cdid, &dnc->CDID,
+ sizeof(wusb_dev->cdid)))
+ return wusb_dev;
+ /* We don't have it, create an entry, register it */
+ wusb_dev = wusb_dev_alloc(wusbhc);
+ if (wusb_dev == NULL)
+ return NULL;
+ wusb_dev_init(wusb_dev);
+ wusb_dev->cdid = dnc->CDID;
+ wusb_dev->port_idx = port_idx;
+
+ /*
+ * Devices are always available within the cluster reservation
+ * and since the hardware will take the intersection of the
+ * per-device availability and the cluster reservation, the
+ * per-device availability can simply be set to always
+ * available.
+ */
+ bitmap_fill(wusb_dev->availability.bm, UWB_NUM_MAS);
+
+ /* FIXME: handle reconnects instead of assuming connects are
+ always new. */
+ if (1 && new_connection == 0)
+ new_connection = 1;
+ if (new_connection) {
+ dev_addr = (port_idx + 2) | WUSB_DEV_ADDR_UNAUTH;
+
+ dev_info(dev, "Connecting new WUSB device to address %u, "
+ "port %u\n", dev_addr, port_idx);
+
+ result = wusb_set_dev_addr(wusbhc, wusb_dev, dev_addr);
+ if (result < 0)
+ return NULL;
+ }
+ wusb_dev->entry_ts = jiffies;
+ list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list);
+ wusbhc->cack_count++;
+ wusbhc_fill_cack_ie(wusbhc);
+ return wusb_dev;
+}
+
+/*
+ * Remove a Connect-Ack context entry from the HCs view
+ *
+ * @wusbhc->mutex must be taken
+ */
+static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+ struct device *dev = wusbhc->dev;
+ d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev);
+ list_del_init(&wusb_dev->cack_node);
+ wusbhc->cack_count--;
+ wusbhc_fill_cack_ie(wusbhc);
+ d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev);
+}
+
+/*
+ * @wusbhc->mutex must be taken */
+static
+void wusbhc_devconnect_acked(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+ struct device *dev = wusbhc->dev;
+ d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev);
+ wusbhc_cack_rm(wusbhc, wusb_dev);
+ if (wusbhc->cack_count)
+ wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr);
+ else
+ wusbhc_mmcie_rm(wusbhc, &wusbhc->cack_ie.hdr);
+ d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev);
+}
+
+static void wusbhc_devconnect_acked_work(struct work_struct *work)
+{
+ struct wusb_dev *wusb_dev = container_of(work, struct wusb_dev,
+ devconnect_acked_work);
+ struct wusbhc *wusbhc = wusb_dev->wusbhc;
+
+ mutex_lock(&wusbhc->mutex);
+ wusbhc_devconnect_acked(wusbhc, wusb_dev);
+ mutex_unlock(&wusbhc->mutex);
+}
+
+/*
+ * Ack a device for connection
+ *
+ * FIXME: docs
+ *
+ * @pr_cdid: Printable CDID...hex Use @dnc->cdid for the real deal.
+ *
+ * So we get the connect ack IE (may have been allocated already),
+ * find an empty connect block, an empty virtual port, create an
+ * address with it (see below), make it an unauth addr [bit 7 set] and
+ * set the MMC.
+ *
+ * Addresses: because WUSB hosts have no downstream hubs, we can do a
+ * 1:1 mapping between 'port number' and device
+ * address. This simplifies many things, as during this
+ * initial connect phase the USB stack has no knoledge of
+ * the device and hasn't assigned an address yet--we know
+ * USB's choose_address() will use the same euristics we
+ * use here, so we can assume which address will be assigned.
+ *
+ * USB stack always assigns address 1 to the root hub, so
+ * to the port number we add 2 (thus virtual port #0 is
+ * addr #2).
+ *
+ * @wusbhc shall be referenced
+ */
+static
+void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc,
+ const char *pr_cdid)
+{
+ int result;
+ struct device *dev = wusbhc->dev;
+ struct wusb_dev *wusb_dev;
+ struct wusb_port *port;
+ unsigned idx, devnum;
+
+ d_fnstart(3, dev, "(%p, %p, %s)\n", wusbhc, dnc, pr_cdid);
+ mutex_lock(&wusbhc->mutex);
+
+ /* Check we are not handling it already */
+ for (idx = 0; idx < wusbhc->ports_max; idx++) {
+ port = wusb_port_by_idx(wusbhc, idx);
+ if (port->wusb_dev
+ && memcmp(&dnc->CDID, &port->wusb_dev->cdid, sizeof(dnc->CDID)) == 0)
+ goto error_unlock;
+ }
+ /* Look up those fake ports we have for a free one */
+ for (idx = 0; idx < wusbhc->ports_max; idx++) {
+ port = wusb_port_by_idx(wusbhc, idx);
+ if ((port->status & USB_PORT_STAT_POWER)
+ && !(port->status & USB_PORT_STAT_CONNECTION))
+ break;
+ }
+ if (idx >= wusbhc->ports_max) {
+ dev_err(dev, "Host controller can't connect more devices "
+ "(%u already connected); device %s rejected\n",
+ wusbhc->ports_max, pr_cdid);
+ /* NOTE: we could send a WUIE_Disconnect here, but we haven't
+ * event acked, so the device will eventually timeout the
+ * connection, right? */
+ goto error_unlock;
+ }
+
+ devnum = idx + 2;
+
+ /* Make sure we are using no crypto on that "virtual port" */
+ wusbhc->set_ptk(wusbhc, idx, 0, NULL, 0);
+
+ /* Grab a filled in Connect-Ack context, fill out the
+ * Connect-Ack Wireless USB IE, set the MMC */
+ wusb_dev = wusbhc_cack_add(wusbhc, dnc, pr_cdid, idx);
+ if (wusb_dev == NULL)
+ goto error_unlock;
+ result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr);
+ if (result < 0)
+ goto error_unlock;
+ /* Give the device at least 2ms (WUSB1.0[7.5.1p3]), let's do
+ * three for a good measure */
+ msleep(3);
+ port->wusb_dev = wusb_dev;
+ port->status |= USB_PORT_STAT_CONNECTION;
+ port->change |= USB_PORT_STAT_C_CONNECTION;
+ port->reset_count = 0;
+ /* Now the port status changed to connected; khubd will
+ * pick the change up and try to reset the port to bring it to
+ * the enabled state--so this process returns up to the stack
+ * and it calls back into wusbhc_rh_port_reset() who will call
+ * devconnect_auth().
+ */
+error_unlock:
+ mutex_unlock(&wusbhc->mutex);
+ d_fnend(3, dev, "(%p, %p, %s) = void\n", wusbhc, dnc, pr_cdid);
+ return;
+
+}
+
+/*
+ * Disconnect a Wireless USB device from its fake port
+ *
+ * Marks the port as disconnected so that khubd can pick up the change
+ * and drops our knowledge about the device.
+ *
+ * Assumes there is a device connected
+ *
+ * @port_index: zero based port number
+ *
+ * NOTE: @wusbhc->mutex is locked
+ *
+ * WARNING: From here it is not very safe to access anything hanging off
+ * wusb_dev
+ */
+static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc,
+ struct wusb_port *port)
+{
+ struct device *dev = wusbhc->dev;
+ struct wusb_dev *wusb_dev = port->wusb_dev;
+
+ d_fnstart(3, dev, "(wusbhc %p, port %p)\n", wusbhc, port);
+ port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE
+ | USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESET
+ | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED);
+ port->change |= USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE;
+ if (wusb_dev) {
+ if (!list_empty(&wusb_dev->cack_node))
+ list_del_init(&wusb_dev->cack_node);
+ /* For the one in cack_add() */
+ wusb_dev_put(wusb_dev);
+ }
+ port->wusb_dev = NULL;
+ /* don't reset the reset_count to zero or wusbhc_rh_port_reset will get
+ * confused! We only reset to zero when we connect a new device.
+ */
+
+ /* After a device disconnects, change the GTK (see [WUSB]
+ * section 6.2.11.2). */
+ wusbhc_gtk_rekey(wusbhc);
+
+ d_fnend(3, dev, "(wusbhc %p, port %p) = void\n", wusbhc, port);
+ /* The Wireless USB part has forgotten about the device already; now
+ * khubd's timer will pick up the disconnection and remove the USB
+ * device from the system
+ */
+}
+
+/*
+ * Authenticate a device into the WUSB Cluster
+ *
+ * Called from the Root Hub code (rh.c:wusbhc_rh_port_reset()) when
+ * asking for a reset on a port that is not enabled (ie: first connect
+ * on the port).
+ *
+ * Performs the 4way handshake to allow the device to comunicate w/ the
+ * WUSB Cluster securely; once done, issue a request to the device for
+ * it to change to address 0.
+ *
+ * This mimics the reset step of Wired USB that once resetting a
+ * device, leaves the port in enabled state and the dev with the
+ * default address (0).
+ *
+ * WUSB1.0[7.1.2]
+ *
+ * @port_idx: port where the change happened--This is the index into
+ * the wusbhc port array, not the USB port number.
+ */
+int wusbhc_devconnect_auth(struct wusbhc *wusbhc, u8 port_idx)
+{
+ struct device *dev = wusbhc->dev;
+ struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx);
+
+ d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx);
+ port->status &= ~USB_PORT_STAT_RESET;
+ port->status |= USB_PORT_STAT_ENABLE;
+ port->change |= USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_ENABLE;
+ d_fnend(3, dev, "(%p, %u) = 0\n", wusbhc, port_idx);
+ return 0;
+}
+
+/*
+ * Refresh the list of keep alives to emit in the MMC
+ *
+ * Some devices don't respond to keep alives unless they've been
+ * authenticated, so skip unauthenticated devices.
+ *
+ * We only publish the first four devices that have a coming timeout
+ * condition. Then when we are done processing those, we go for the
+ * next ones. We ignore the ones that have timed out already (they'll
+ * be purged).
+ *
+ * This might cause the first devices to timeout the last devices in
+ * the port array...FIXME: come up with a better algorithm?
+ *
+ * Note we can't do much about MMC's ops errors; we hope next refresh
+ * will kind of handle it.
+ *
+ * NOTE: @wusbhc->mutex is locked
+ */
+static void __wusbhc_keep_alive(struct wusbhc *wusbhc)
+{
+ struct device *dev = wusbhc->dev;
+ unsigned cnt;
+ struct wusb_dev *wusb_dev;
+ struct wusb_port *wusb_port;
+ struct wuie_keep_alive *ie = &wusbhc->keep_alive_ie;
+ unsigned keep_alives, old_keep_alives;
+
+ old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr);
+ keep_alives = 0;
+ for (cnt = 0;
+ keep_alives <= WUIE_ELT_MAX && cnt < wusbhc->ports_max;
+ cnt++) {
+ unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout);
+
+ wusb_port = wusb_port_by_idx(wusbhc, cnt);
+ wusb_dev = wusb_port->wusb_dev;
+
+ if (wusb_dev == NULL)
+ continue;
+ if (wusb_dev->usb_dev == NULL || !wusb_dev->usb_dev->authenticated)
+ continue;
+
+ if (time_after(jiffies, wusb_dev->entry_ts + tt)) {
+ dev_err(dev, "KEEPALIVE: device %u timed out\n",
+ wusb_dev->addr);
+ __wusbhc_dev_disconnect(wusbhc, wusb_port);
+ } else if (time_after(jiffies, wusb_dev->entry_ts + tt/2)) {
+ /* Approaching timeout cut out, need to refresh */
+ ie->bDeviceAddress[keep_alives++] = wusb_dev->addr;
+ }
+ }
+ if (keep_alives & 0x1) /* pad to even number ([WUSB] section 7.5.9) */
+ ie->bDeviceAddress[keep_alives++] = 0x7f;
+ ie->hdr.bLength = sizeof(ie->hdr) +
+ keep_alives*sizeof(ie->bDeviceAddress[0]);
+ if (keep_alives > 0)
+ wusbhc_mmcie_set(wusbhc, 10, 5, &ie->hdr);
+ else if (old_keep_alives != 0)
+ wusbhc_mmcie_rm(wusbhc, &ie->hdr);
+}
+
+/*
+ * Do a run through all devices checking for timeouts
+ */
+static void wusbhc_keep_alive_run(struct work_struct *ws)
+{
+ struct delayed_work *dw =
+ container_of(ws, struct delayed_work, work);
+ struct wusbhc *wusbhc =
+ container_of(dw, struct wusbhc, keep_alive_timer);
+
+ d_fnstart(5, wusbhc->dev, "(wusbhc %p)\n", wusbhc);
+ if (wusbhc->active) {
+ mutex_lock(&wusbhc->mutex);
+ __wusbhc_keep_alive(wusbhc);
+ mutex_unlock(&wusbhc->mutex);
+ queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
+ (wusbhc->trust_timeout * CONFIG_HZ)/1000/2);
+ }
+ d_fnend(5, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc);
+ return;
+}
+
+/*
+ * Find the wusb_dev from its device address.
+ *
+ * The device can be found directly from the address (see
+ * wusb_cack_add() for where the device address is set to port_idx
+ * +2), except when the address is zero.
+ */
+static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr)
+{
+ int p;
+
+ if (addr == 0xff) /* unconnected */
+ return NULL;
+
+ if (addr > 0) {
+ int port = (addr & ~0x80) - 2;
+ if (port < 0 || port >= wusbhc->ports_max)
+ return NULL;
+ return wusb_port_by_idx(wusbhc, port)->wusb_dev;
+ }
+
+ /* Look for the device with address 0. */
+ for (p = 0; p < wusbhc->ports_max; p++) {
+ struct wusb_dev *wusb_dev = wusb_port_by_idx(wusbhc, p)->wusb_dev;
+ if (wusb_dev && wusb_dev->addr == addr)
+ return wusb_dev;
+ }
+ return NULL;
+}
+
+/*
+ * Handle a DN_Alive notification (WUSB1.0[7.6.1])
+ *
+ * This just updates the device activity timestamp and then refreshes
+ * the keep alive IE.
+ *
+ * @wusbhc shall be referenced and unlocked
+ */
+static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+ struct device *dev = wusbhc->dev;
+
+ d_printf(2, dev, "DN ALIVE: device 0x%02x pong\n", wusb_dev->addr);
+
+ mutex_lock(&wusbhc->mutex);
+ wusb_dev->entry_ts = jiffies;
+ __wusbhc_keep_alive(wusbhc);
+ mutex_unlock(&wusbhc->mutex);
+}
+
+/*
+ * Handle a DN_Connect notification (WUSB1.0[7.6.1])
+ *
+ * @wusbhc
+ * @pkt_hdr
+ * @size: Size of the buffer where the notification resides; if the
+ * notification data suggests there should be more data than
+ * available, an error will be signaled and the whole buffer
+ * consumed.
+ *
+ * @wusbhc->mutex shall be held
+ */
+static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc,
+ struct wusb_dn_hdr *dn_hdr,
+ size_t size)
+{
+ struct device *dev = wusbhc->dev;
+ struct wusb_dn_connect *dnc;
+ char pr_cdid[WUSB_CKHDID_STRSIZE];
+ static const char *beacon_behaviour[] = {
+ "reserved",
+ "self-beacon",
+ "directed-beacon",
+ "no-beacon"
+ };
+
+ d_fnstart(3, dev, "(%p, %p, %zu)\n", wusbhc, dn_hdr, size);
+ if (size < sizeof(*dnc)) {
+ dev_err(dev, "DN CONNECT: short notification (%zu < %zu)\n",
+ size, sizeof(*dnc));
+ goto out;
+ }
+
+ dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr);
+ ckhdid_printf(pr_cdid, sizeof(pr_cdid), &dnc->CDID);
+ dev_info(dev, "DN CONNECT: device %s @ %x (%s) wants to %s\n",
+ pr_cdid,
+ wusb_dn_connect_prev_dev_addr(dnc),
+ beacon_behaviour[wusb_dn_connect_beacon_behavior(dnc)],
+ wusb_dn_connect_new_connection(dnc) ? "connect" : "reconnect");
+ /* ACK the connect */
+ wusbhc_devconnect_ack(wusbhc, dnc, pr_cdid);
+out:
+ d_fnend(3, dev, "(%p, %p, %zu) = void\n",
+ wusbhc, dn_hdr, size);
+ return;
+}
+
+/*
+ * Handle a DN_Disconnect notification (WUSB1.0[7.6.1])
+ *
+ * Device is going down -- do the disconnect.
+ *
+ * @wusbhc shall be referenced and unlocked
+ */
+static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+ struct device *dev = wusbhc->dev;
+
+ dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", wusb_dev->addr);
+
+ mutex_lock(&wusbhc->mutex);
+ __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx));
+ mutex_unlock(&wusbhc->mutex);
+}
+
+/*
+ * Reset a WUSB device on a HWA
+ *
+ * @wusbhc
+ * @port_idx Index of the port where the device is
+ *
+ * In Wireless USB, a reset is more or less equivalent to a full
+ * disconnect; so we just do a full disconnect and send the device a
+ * Device Reset IE (WUSB1.0[7.5.11]) giving it a few millisecs (6 MMCs).
+ *
+ * @wusbhc should be refcounted and unlocked
+ */
+int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port_idx)
+{
+ int result;
+ struct device *dev = wusbhc->dev;
+ struct wusb_dev *wusb_dev;
+ struct wuie_reset *ie;
+
+ d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx);
+ mutex_lock(&wusbhc->mutex);
+ result = 0;
+ wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev;
+ if (wusb_dev == NULL) {
+ /* reset no device? ignore */
+ dev_dbg(dev, "RESET: no device at port %u, ignoring\n",
+ port_idx);
+ goto error_unlock;
+ }
+ result = -ENOMEM;
+ ie = kzalloc(sizeof(*ie), GFP_KERNEL);
+ if (ie == NULL)
+ goto error_unlock;
+ ie->hdr.bLength = sizeof(ie->hdr) + sizeof(ie->CDID);
+ ie->hdr.bIEIdentifier = WUIE_ID_RESET_DEVICE;
+ ie->CDID = wusb_dev->cdid;
+ result = wusbhc_mmcie_set(wusbhc, 0xff, 6, &ie->hdr);
+ if (result < 0) {
+ dev_err(dev, "RESET: cant's set MMC: %d\n", result);
+ goto error_kfree;
+ }
+ __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx));
+
+ /* 120ms, hopefully 6 MMCs (FIXME) */
+ msleep(120);
+ wusbhc_mmcie_rm(wusbhc, &ie->hdr);
+error_kfree:
+ kfree(ie);
+error_unlock:
+ mutex_unlock(&wusbhc->mutex);
+ d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result);
+ return result;
+}
+
+/*
+ * Handle a Device Notification coming a host
+ *
+ * The Device Notification comes from a host (HWA, DWA or WHCI)
+ * wrapped in a set of headers. Somebody else has peeled off those
+ * headers for us and we just get one Device Notifications.
+ *
+ * Invalid DNs (e.g., too short) are discarded.
+ *
+ * @wusbhc shall be referenced
+ *
+ * FIXMES:
+ * - implement priorities as in WUSB1.0[Table 7-55]?
+ */
+void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr,
+ struct wusb_dn_hdr *dn_hdr, size_t size)
+{
+ struct device *dev = wusbhc->dev;
+ struct wusb_dev *wusb_dev;
+
+ d_fnstart(3, dev, "(%p, %p)\n", wusbhc, dn_hdr);
+
+ if (size < sizeof(struct wusb_dn_hdr)) {
+ dev_err(dev, "DN data shorter than DN header (%d < %d)\n",
+ (int)size, (int)sizeof(struct wusb_dn_hdr));
+ goto out;
+ }
+
+ wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
+ if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) {
+ dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n",
+ dn_hdr->bType, srcaddr);
+ goto out;
+ }
+
+ switch (dn_hdr->bType) {
+ case WUSB_DN_CONNECT:
+ wusbhc_handle_dn_connect(wusbhc, dn_hdr, size);
+ break;
+ case WUSB_DN_ALIVE:
+ wusbhc_handle_dn_alive(wusbhc, wusb_dev);
+ break;
+ case WUSB_DN_DISCONNECT:
+ wusbhc_handle_dn_disconnect(wusbhc, wusb_dev);
+ break;
+ case WUSB_DN_MASAVAILCHANGED:
+ case WUSB_DN_RWAKE:
+ case WUSB_DN_SLEEP:
+ /* FIXME: handle these DNs. */
+ break;
+ case WUSB_DN_EPRDY:
+ /* The hardware handles these. */
+ break;
+ default:
+ dev_warn(dev, "unknown DN %u (%d octets) from %u\n",
+ dn_hdr->bType, (int)size, srcaddr);
+ }
+out:
+ d_fnend(3, dev, "(%p, %p) = void\n", wusbhc, dn_hdr);
+ return;
+}
+EXPORT_SYMBOL_GPL(wusbhc_handle_dn);
+
+/*
+ * Disconnect a WUSB device from a the cluster
+ *
+ * @wusbhc
+ * @port Fake port where the device is (wusbhc index, not USB port number).
+ *
+ * In Wireless USB, a disconnect is basically telling the device he is
+ * being disconnected and forgetting about him.
+ *
+ * We send the device a Device Disconnect IE (WUSB1.0[7.5.11]) for 100
+ * ms and then keep going.
+ *
+ * We don't do much in case of error; we always pretend we disabled
+ * the port and disconnected the device. If physically the request
+ * didn't get there (many things can fail in the way there), the stack
+ * will reject the device's communication attempts.
+ *
+ * @wusbhc should be refcounted and locked
+ */
+void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port_idx)
+{
+ int result;
+ struct device *dev = wusbhc->dev;
+ struct wusb_dev *wusb_dev;
+ struct wuie_disconnect *ie;
+
+ d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx);
+ result = 0;
+ wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev;
+ if (wusb_dev == NULL) {
+ /* reset no device? ignore */
+ dev_dbg(dev, "DISCONNECT: no device at port %u, ignoring\n",
+ port_idx);
+ goto error;
+ }
+ __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx));
+
+ result = -ENOMEM;
+ ie = kzalloc(sizeof(*ie), GFP_KERNEL);
+ if (ie == NULL)
+ goto error;
+ ie->hdr.bLength = sizeof(*ie);
+ ie->hdr.bIEIdentifier = WUIE_ID_DEVICE_DISCONNECT;
+ ie->bDeviceAddress = wusb_dev->addr;
+ result = wusbhc_mmcie_set(wusbhc, 0, 0, &ie->hdr);
+ if (result < 0) {
+ dev_err(dev, "DISCONNECT: can't set MMC: %d\n", result);
+ goto error_kfree;
+ }
+
+ /* 120ms, hopefully 6 MMCs */
+ msleep(100);
+ wusbhc_mmcie_rm(wusbhc, &ie->hdr);
+error_kfree:
+ kfree(ie);
+error:
+ d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result);
+ return;
+}
+
+static void wusb_cap_descr_printf(const unsigned level, struct device *dev,
+ const struct usb_wireless_cap_descriptor *wcd)
+{
+ d_printf(level, dev,
+ "WUSB Capability Descriptor\n"
+ " bDevCapabilityType 0x%02x\n"
+ " bmAttributes 0x%02x\n"
+ " wPhyRates 0x%04x\n"
+ " bmTFITXPowerInfo 0x%02x\n"
+ " bmFFITXPowerInfo 0x%02x\n"
+ " bmBandGroup 0x%04x\n"
+ " bReserved 0x%02x\n",
+ wcd->bDevCapabilityType,
+ wcd->bmAttributes,
+ le16_to_cpu(wcd->wPHYRates),
+ wcd->bmTFITXPowerInfo,
+ wcd->bmFFITXPowerInfo,
+ wcd->bmBandGroup,
+ wcd->bReserved);
+}
+
+/*
+ * Walk over the BOS descriptor, verify and grok it
+ *
+ * @usb_dev: referenced
+ * @wusb_dev: referenced and unlocked
+ *
+ * The BOS descriptor is defined at WUSB1.0[7.4.1], and it defines a
+ * "flexible" way to wrap all kinds of descriptors inside an standard
+ * descriptor (wonder why they didn't use normal descriptors,
+ * btw). Not like they lack code.
+ *
+ * At the end we go to look for the WUSB Device Capabilities
+ * (WUSB1.0[7.4.1.1]) that is wrapped in a device capability descriptor
+ * that is part of the BOS descriptor set. That tells us what does the
+ * device support (dual role, beacon type, UWB PHY rates).
+ */
+static int wusb_dev_bos_grok(struct usb_device *usb_dev,
+ struct wusb_dev *wusb_dev,
+ struct usb_bos_descriptor *bos, size_t desc_size)
+{
+ ssize_t result;
+ struct device *dev = &usb_dev->dev;
+ void *itr, *top;
+
+ /* Walk over BOS capabilities, verify them */
+ itr = (void *)bos + sizeof(*bos);
+ top = itr + desc_size - sizeof(*bos);
+ while (itr < top) {
+ struct usb_dev_cap_header *cap_hdr = itr;
+ size_t cap_size;
+ u8 cap_type;
+ if (top - itr < sizeof(*cap_hdr)) {
+ dev_err(dev, "Device BUG? premature end of BOS header "
+ "data [offset 0x%02x]: only %zu bytes left\n",
+ (int)(itr - (void *)bos), top - itr);
+ result = -ENOSPC;
+ goto error_bad_cap;
+ }
+ cap_size = cap_hdr->bLength;
+ cap_type = cap_hdr->bDevCapabilityType;
+ d_printf(4, dev, "BOS Capability: 0x%02x (%zu bytes)\n",
+ cap_type, cap_size);
+ if (cap_size == 0)
+ break;
+ if (cap_size > top - itr) {
+ dev_err(dev, "Device BUG? premature end of BOS data "
+ "[offset 0x%02x cap %02x %zu bytes]: "
+ "only %zu bytes left\n",
+ (int)(itr - (void *)bos),
+ cap_type, cap_size, top - itr);
+ result = -EBADF;
+ goto error_bad_cap;
+ }
+ d_dump(3, dev, itr, cap_size);
+ switch (cap_type) {
+ case USB_CAP_TYPE_WIRELESS_USB:
+ if (cap_size != sizeof(*wusb_dev->wusb_cap_descr))
+ dev_err(dev, "Device BUG? WUSB Capability "
+ "descriptor is %zu bytes vs %zu "
+ "needed\n", cap_size,
+ sizeof(*wusb_dev->wusb_cap_descr));
+ else {
+ wusb_dev->wusb_cap_descr = itr;
+ wusb_cap_descr_printf(3, dev, itr);
+ }
+ break;
+ default:
+ dev_err(dev, "BUG? Unknown BOS capability 0x%02x "
+ "(%zu bytes) at offset 0x%02x\n", cap_type,
+ cap_size, (int)(itr - (void *)bos));
+ }
+ itr += cap_size;
+ }
+ result = 0;
+error_bad_cap:
+ return result;
+}
+
+/*
+ * Add information from the BOS descriptors to the device
+ *
+ * @usb_dev: referenced
+ * @wusb_dev: referenced and unlocked
+ *
+ * So what we do is we alloc a space for the BOS descriptor of 64
+ * bytes; read the first four bytes which include the wTotalLength
+ * field (WUSB1.0[T7-26]) and if it fits in those 64 bytes, read the
+ * whole thing. If not we realloc to that size.
+ *
+ * Then we call the groking function, that will fill up
+ * wusb_dev->wusb_cap_descr, which is what we'll need later on.
+ */
+static int wusb_dev_bos_add(struct usb_device *usb_dev,
+ struct wusb_dev *wusb_dev)
+{
+ ssize_t result;
+ struct device *dev = &usb_dev->dev;
+ struct usb_bos_descriptor *bos;
+ size_t alloc_size = 32, desc_size = 4;
+
+ bos = kmalloc(alloc_size, GFP_KERNEL);
+ if (bos == NULL)
+ return -ENOMEM;
+ result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size);
+ if (result < 4) {
+ dev_err(dev, "Can't get BOS descriptor or too short: %zd\n",
+ result);
+ goto error_get_descriptor;
+ }
+ desc_size = le16_to_cpu(bos->wTotalLength);
+ if (desc_size >= alloc_size) {
+ kfree(bos);
+ alloc_size = desc_size;
+ bos = kmalloc(alloc_size, GFP_KERNEL);
+ if (bos == NULL)
+ return -ENOMEM;
+ }
+ result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size);
+ if (result < 0 || result != desc_size) {
+ dev_err(dev, "Can't get BOS descriptor or too short (need "
+ "%zu bytes): %zd\n", desc_size, result);
+ goto error_get_descriptor;
+ }
+ if (result < sizeof(*bos)
+ || le16_to_cpu(bos->wTotalLength) != desc_size) {
+ dev_err(dev, "Can't get BOS descriptor or too short (need "
+ "%zu bytes): %zd\n", desc_size, result);
+ goto error_get_descriptor;
+ }
+ d_printf(2, dev, "Got BOS descriptor %zd bytes, %u capabilities\n",
+ result, bos->bNumDeviceCaps);
+ d_dump(2, dev, bos, result);
+ result = wusb_dev_bos_grok(usb_dev, wusb_dev, bos, result);
+ if (result < 0)
+ goto error_bad_bos;
+ wusb_dev->bos = bos;
+ return 0;
+
+error_bad_bos:
+error_get_descriptor:
+ kfree(bos);
+ wusb_dev->wusb_cap_descr = NULL;
+ return result;
+}
+
+static void wusb_dev_bos_rm(struct wusb_dev *wusb_dev)
+{
+ kfree(wusb_dev->bos);
+ wusb_dev->wusb_cap_descr = NULL;
+};
+
+static struct usb_wireless_cap_descriptor wusb_cap_descr_default = {
+ .bLength = sizeof(wusb_cap_descr_default),
+ .bDescriptorType = USB_DT_DEVICE_CAPABILITY,
+ .bDevCapabilityType = USB_CAP_TYPE_WIRELESS_USB,
+
+ .bmAttributes = USB_WIRELESS_BEACON_NONE,
+ .wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53),
+ .bmTFITXPowerInfo = 0,
+ .bmFFITXPowerInfo = 0,
+ .bmBandGroup = cpu_to_le16(0x0001), /* WUSB1.0[7.4.1] bottom */
+ .bReserved = 0
+};
+
+/*
+ * USB stack's device addition Notifier Callback
+ *
+ * Called from drivers/usb/core/hub.c when a new device is added; we
+ * use this hook to perform certain WUSB specific setup work on the
+ * new device. As well, it is the first time we can connect the
+ * wusb_dev and the usb_dev. So we note it down in wusb_dev and take a
+ * reference that we'll drop.
+ *
+ * First we need to determine if the device is a WUSB device (else we
+ * ignore it). For that we use the speed setting (USB_SPEED_VARIABLE)
+ * [FIXME: maybe we'd need something more definitive]. If so, we track
+ * it's usb_busd and from there, the WUSB HC.
+ *
+ * Because all WUSB HCs are contained in a 'struct wusbhc', voila, we
+ * get the wusbhc for the device.
+ *
+ * We have a reference on @usb_dev (as we are called at the end of its
+ * enumeration).
+ *
+ * NOTE: @usb_dev locked
+ */
+static void wusb_dev_add_ncb(struct usb_device *usb_dev)
+{
+ int result = 0;
+ struct wusb_dev *wusb_dev;
+ struct wusbhc *wusbhc;
+ struct device *dev = &usb_dev->dev;
+ u8 port_idx;
+
+ if (usb_dev->wusb == 0 || usb_dev->devnum == 1)
+ return; /* skip non wusb and wusb RHs */
+
+ d_fnstart(3, dev, "(usb_dev %p)\n", usb_dev);
+
+ wusbhc = wusbhc_get_by_usb_dev(usb_dev);
+ if (wusbhc == NULL)
+ goto error_nodev;
+ mutex_lock(&wusbhc->mutex);
+ wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev);
+ port_idx = wusb_port_no_to_idx(usb_dev->portnum);
+ mutex_unlock(&wusbhc->mutex);
+ if (wusb_dev == NULL)
+ goto error_nodev;
+ wusb_dev->usb_dev = usb_get_dev(usb_dev);
+ usb_dev->wusb_dev = wusb_dev_get(wusb_dev);
+ result = wusb_dev_sec_add(wusbhc, usb_dev, wusb_dev);
+ if (result < 0) {
+ dev_err(dev, "Cannot enable security: %d\n", result);
+ goto error_sec_add;
+ }
+ /* Now query the device for it's BOS and attach it to wusb_dev */
+ result = wusb_dev_bos_add(usb_dev, wusb_dev);
+ if (result < 0) {
+ dev_err(dev, "Cannot get BOS descriptors: %d\n", result);
+ goto error_bos_add;
+ }
+ result = wusb_dev_sysfs_add(wusbhc, usb_dev, wusb_dev);
+ if (result < 0)
+ goto error_add_sysfs;
+out:
+ wusb_dev_put(wusb_dev);
+ wusbhc_put(wusbhc);
+error_nodev:
+ d_fnend(3, dev, "(usb_dev %p) = void\n", usb_dev);
+ return;
+
+ wusb_dev_sysfs_rm(wusb_dev);
+error_add_sysfs:
+ wusb_dev_bos_rm(wusb_dev);
+error_bos_add:
+ wusb_dev_sec_rm(wusb_dev);
+error_sec_add:
+ mutex_lock(&wusbhc->mutex);
+ __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx));
+ mutex_unlock(&wusbhc->mutex);
+ goto out;
+}
+
+/*
+ * Undo all the steps done at connection by the notifier callback
+ *
+ * NOTE: @usb_dev locked
+ */
+static void wusb_dev_rm_ncb(struct usb_device *usb_dev)
+{
+ struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
+
+ if (usb_dev->wusb == 0 || usb_dev->devnum == 1)
+ return; /* skip non wusb and wusb RHs */
+
+ wusb_dev_sysfs_rm(wusb_dev);
+ wusb_dev_bos_rm(wusb_dev);
+ wusb_dev_sec_rm(wusb_dev);
+ wusb_dev->usb_dev = NULL;
+ usb_dev->wusb_dev = NULL;
+ wusb_dev_put(wusb_dev);
+ usb_put_dev(usb_dev);
+}
+
+/*
+ * Handle notifications from the USB stack (notifier call back)
+ *
+ * This is called when the USB stack does a
+ * usb_{bus,device}_{add,remove}() so we can do WUSB specific
+ * handling. It is called with [for the case of
+ * USB_DEVICE_{ADD,REMOVE} with the usb_dev locked.
+ */
+int wusb_usb_ncb(struct notifier_block *nb, unsigned long val,
+ void *priv)
+{
+ int result = NOTIFY_OK;
+
+ switch (val) {
+ case USB_DEVICE_ADD:
+ wusb_dev_add_ncb(priv);
+ break;
+ case USB_DEVICE_REMOVE:
+ wusb_dev_rm_ncb(priv);
+ break;
+ case USB_BUS_ADD:
+ /* ignore (for now) */
+ case USB_BUS_REMOVE:
+ break;
+ default:
+ WARN_ON(1);
+ result = NOTIFY_BAD;
+ };
+ return result;
+}
+
+/*
+ * Return a referenced wusb_dev given a @wusbhc and @usb_dev
+ */
+struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *wusbhc,
+ struct usb_device *usb_dev)
+{
+ struct wusb_dev *wusb_dev;
+ u8 port_idx;
+
+ port_idx = wusb_port_no_to_idx(usb_dev->portnum);
+ BUG_ON(port_idx > wusbhc->ports_max);
+ wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev;
+ if (wusb_dev != NULL) /* ops, device is gone */
+ wusb_dev_get(wusb_dev);
+ return wusb_dev;
+}
+EXPORT_SYMBOL_GPL(__wusb_dev_get_by_usb_dev);
+
+void wusb_dev_destroy(struct kref *_wusb_dev)
+{
+ struct wusb_dev *wusb_dev
+ = container_of(_wusb_dev, struct wusb_dev, refcnt);
+ list_del_init(&wusb_dev->cack_node);
+ wusb_dev_free(wusb_dev);
+ d_fnend(1, NULL, "%s (wusb_dev %p) = void\n", __func__, wusb_dev);
+}
+EXPORT_SYMBOL_GPL(wusb_dev_destroy);
+
+/*
+ * Create all the device connect handling infrastructure
+ *
+ * This is basically the device info array, Connect Acknowledgement
+ * (cack) lists, keep-alive timers (and delayed work thread).
+ */
+int wusbhc_devconnect_create(struct wusbhc *wusbhc)
+{
+ d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc);
+
+ wusbhc->keep_alive_ie.hdr.bIEIdentifier = WUIE_ID_KEEP_ALIVE;
+ wusbhc->keep_alive_ie.hdr.bLength = sizeof(wusbhc->keep_alive_ie.hdr);
+ INIT_DELAYED_WORK(&wusbhc->keep_alive_timer, wusbhc_keep_alive_run);
+
+ wusbhc->cack_ie.hdr.bIEIdentifier = WUIE_ID_CONNECTACK;
+ wusbhc->cack_ie.hdr.bLength = sizeof(wusbhc->cack_ie.hdr);
+ INIT_LIST_HEAD(&wusbhc->cack_list);
+
+ d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc);
+ return 0;
+}
+
+/*
+ * Release all resources taken by the devconnect stuff
+ */
+void wusbhc_devconnect_destroy(struct wusbhc *wusbhc)
+{
+ d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc);
+ d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc);
+}
+
+/*
+ * wusbhc_devconnect_start - start accepting device connections
+ * @wusbhc: the WUSB HC
+ *
+ * Sets the Host Info IE to accept all new connections.
+ *
+ * FIXME: This also enables the keep alives but this is not necessary
+ * until there are connected and authenticated devices.
+ */
+int wusbhc_devconnect_start(struct wusbhc *wusbhc,
+ const struct wusb_ckhdid *chid)
+{
+ struct device *dev = wusbhc->dev;
+ struct wuie_host_info *hi;
+ int result;
+
+ hi = kzalloc(sizeof(*hi), GFP_KERNEL);
+ if (hi == NULL)
+ return -ENOMEM;
+
+ hi->hdr.bLength = sizeof(*hi);
+ hi->hdr.bIEIdentifier = WUIE_ID_HOST_INFO;
+ hi->attributes = cpu_to_le16((wusbhc->rsv->stream << 3) | WUIE_HI_CAP_ALL);
+ hi->CHID = *chid;
+ result = wusbhc_mmcie_set(wusbhc, 0, 0, &hi->hdr);
+ if (result < 0) {
+ dev_err(dev, "Cannot add Host Info MMCIE: %d\n", result);
+ goto error_mmcie_set;
+ }
+ wusbhc->wuie_host_info = hi;
+
+ queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
+ (wusbhc->trust_timeout*CONFIG_HZ)/1000/2);
+
+ return 0;
+
+error_mmcie_set:
+ kfree(hi);
+ return result;
+}
+
+/*
+ * wusbhc_devconnect_stop - stop managing connected devices
+ * @wusbhc: the WUSB HC
+ *
+ * Removes the Host Info IE and stops the keep alives.
+ *
+ * FIXME: should this disconnect all devices?
+ */
+void wusbhc_devconnect_stop(struct wusbhc *wusbhc)
+{
+ cancel_delayed_work_sync(&wusbhc->keep_alive_timer);
+ WARN_ON(!list_empty(&wusbhc->cack_list));
+
+ wusbhc_mmcie_rm(wusbhc, &wusbhc->wuie_host_info->hdr);
+ kfree(wusbhc->wuie_host_info);
+ wusbhc->wuie_host_info = NULL;
+}
+
+/*
+ * wusb_set_dev_addr - set the WUSB device address used by the host
+ * @wusbhc: the WUSB HC the device is connect to
+ * @wusb_dev: the WUSB device
+ * @addr: new device address
+ */
+int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, u8 addr)
+{
+ int result;
+
+ wusb_dev->addr = addr;
+ result = wusbhc->dev_info_set(wusbhc, wusb_dev);
+ if (result < 0)
+ dev_err(wusbhc->dev, "device %d: failed to set device "
+ "address\n", wusb_dev->port_idx);
+ else
+ dev_info(wusbhc->dev, "device %d: %s addr %u\n",
+ wusb_dev->port_idx,
+ (addr & WUSB_DEV_ADDR_UNAUTH) ? "unauth" : "auth",
+ wusb_dev->addr);
+
+ return result;
+}
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
new file mode 100644
index 00000000000..cfa77a01ceb
--- /dev/null
+++ b/drivers/usb/wusbcore/mmc.c
@@ -0,0 +1,321 @@
+/*
+ * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
+ * MMC (Microscheduled Management Command) handling
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * WUIEs and MMC IEs...well, they are almost the same at the end. MMC
+ * IEs are Wireless USB IEs that go into the MMC period...[what is
+ * that? look in Design-overview.txt].
+ *
+ *
+ * This is a simple subsystem to keep track of which IEs are being
+ * sent by the host in the MMC period.
+ *
+ * For each WUIE we ask to send, we keep it in an array, so we can
+ * request its removal later, or replace the content. They are tracked
+ * by pointer, so be sure to use the same pointer if you want to
+ * remove it or update the contents.
+ *
+ * FIXME:
+ * - add timers that autoremove intervalled IEs?
+ */
+#include <linux/usb/wusb.h>
+#include "wusbhc.h"
+
+/* Initialize the MMCIEs handling mechanism */
+int wusbhc_mmcie_create(struct wusbhc *wusbhc)
+{
+ u8 mmcies = wusbhc->mmcies_max;
+ wusbhc->mmcie = kcalloc(mmcies, sizeof(wusbhc->mmcie[0]), GFP_KERNEL);
+ if (wusbhc->mmcie == NULL)
+ return -ENOMEM;
+ mutex_init(&wusbhc->mmcie_mutex);
+ return 0;
+}
+
+/* Release resources used by the MMCIEs handling mechanism */
+void wusbhc_mmcie_destroy(struct wusbhc *wusbhc)
+{
+ kfree(wusbhc->mmcie);
+}
+
+/*
+ * Add or replace an MMC Wireless USB IE.
+ *
+ * @interval: See WUSB1.0[8.5.3.1]
+ * @repeat_cnt: See WUSB1.0[8.5.3.1]
+ * @handle: See WUSB1.0[8.5.3.1]
+ * @wuie: Pointer to the header of the WUSB IE data to add.
+ * MUST BE allocated in a kmalloc buffer (no stack or
+ * vmalloc).
+ * THE CALLER ALWAYS OWNS THE POINTER (we don't free it
+ * on remove, we just forget about it).
+ * @returns: 0 if ok, < 0 errno code on error.
+ *
+ * Goes over the *whole* @wusbhc->mmcie array looking for (a) the
+ * first free spot and (b) if @wuie is already in the array (aka:
+ * transmitted in the MMCs) the spot were it is.
+ *
+ * If present, we "overwrite it" (update).
+ *
+ *
+ * NOTE: Need special ordering rules -- see below WUSB1.0 Table 7-38.
+ * The host uses the handle as the 'sort' index. We
+ * allocate the last one always for the WUIE_ID_HOST_INFO, and
+ * the rest, first come first serve in inverse order.
+ *
+ * Host software must make sure that it adds the other IEs in
+ * the right order... the host hardware is responsible for
+ * placing the WCTA IEs in the right place with the other IEs
+ * set by host software.
+ *
+ * NOTE: we can access wusbhc->wa_descr without locking because it is
+ * read only.
+ */
+int wusbhc_mmcie_set(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
+ struct wuie_hdr *wuie)
+{
+ int result = -ENOBUFS;
+ unsigned handle, itr;
+
+ /* Search a handle, taking into account the ordering */
+ mutex_lock(&wusbhc->mmcie_mutex);
+ switch (wuie->bIEIdentifier) {
+ case WUIE_ID_HOST_INFO:
+ /* Always last */
+ handle = wusbhc->mmcies_max - 1;
+ break;
+ case WUIE_ID_ISOCH_DISCARD:
+ dev_err(wusbhc->dev, "Special ordering case for WUIE ID 0x%x "
+ "unimplemented\n", wuie->bIEIdentifier);
+ result = -ENOSYS;
+ goto error_unlock;
+ default:
+ /* search for it or find the last empty slot */
+ handle = ~0;
+ for (itr = 0; itr < wusbhc->mmcies_max - 1; itr++) {
+ if (wusbhc->mmcie[itr] == wuie) {
+ handle = itr;
+ break;
+ }
+ if (wusbhc->mmcie[itr] == NULL)
+ handle = itr;
+ }
+ if (handle == ~0)
+ goto error_unlock;
+ }
+ result = (wusbhc->mmcie_add)(wusbhc, interval, repeat_cnt, handle,
+ wuie);
+ if (result >= 0)
+ wusbhc->mmcie[handle] = wuie;
+error_unlock:
+ mutex_unlock(&wusbhc->mmcie_mutex);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wusbhc_mmcie_set);
+
+/*
+ * Remove an MMC IE previously added with wusbhc_mmcie_set()
+ *
+ * @wuie Pointer used to add the WUIE
+ */
+void wusbhc_mmcie_rm(struct wusbhc *wusbhc, struct wuie_hdr *wuie)
+{
+ int result;
+ unsigned handle, itr;
+
+ mutex_lock(&wusbhc->mmcie_mutex);
+ for (itr = 0; itr < wusbhc->mmcies_max; itr++) {
+ if (wusbhc->mmcie[itr] == wuie) {
+ handle = itr;
+ goto found;
+ }
+ }
+ mutex_unlock(&wusbhc->mmcie_mutex);
+ return;
+
+found:
+ result = (wusbhc->mmcie_rm)(wusbhc, handle);
+ if (result == 0)
+ wusbhc->mmcie[itr] = NULL;
+ mutex_unlock(&wusbhc->mmcie_mutex);
+}
+EXPORT_SYMBOL_GPL(wusbhc_mmcie_rm);
+
+/*
+ * wusbhc_start - start transmitting MMCs and accepting connections
+ * @wusbhc: the HC to start
+ * @chid: the CHID to use for this host
+ *
+ * Establishes a cluster reservation, enables device connections, and
+ * starts MMCs with appropriate DNTS parameters.
+ */
+int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
+{
+ int result;
+ struct device *dev = wusbhc->dev;
+
+ WARN_ON(wusbhc->wuie_host_info != NULL);
+
+ result = wusbhc_rsv_establish(wusbhc);
+ if (result < 0) {
+ dev_err(dev, "cannot establish cluster reservation: %d\n",
+ result);
+ goto error_rsv_establish;
+ }
+
+ result = wusbhc_devconnect_start(wusbhc, chid);
+ if (result < 0) {
+ dev_err(dev, "error enabling device connections: %d\n", result);
+ goto error_devconnect_start;
+ }
+
+ result = wusbhc_sec_start(wusbhc);
+ if (result < 0) {
+ dev_err(dev, "error starting security in the HC: %d\n", result);
+ goto error_sec_start;
+ }
+ /* FIXME: the choice of the DNTS parameters is somewhat
+ * arbitrary */
+ result = wusbhc->set_num_dnts(wusbhc, 0, 15);
+ if (result < 0) {
+ dev_err(dev, "Cannot set DNTS parameters: %d\n", result);
+ goto error_set_num_dnts;
+ }
+ result = wusbhc->start(wusbhc);
+ if (result < 0) {
+ dev_err(dev, "error starting wusbch: %d\n", result);
+ goto error_wusbhc_start;
+ }
+ wusbhc->active = 1;
+ return 0;
+
+error_wusbhc_start:
+ wusbhc_sec_stop(wusbhc);
+error_set_num_dnts:
+error_sec_start:
+ wusbhc_devconnect_stop(wusbhc);
+error_devconnect_start:
+ wusbhc_rsv_terminate(wusbhc);
+error_rsv_establish:
+ return result;
+}
+
+/*
+ * Disconnect all from the WUSB Channel
+ *
+ * Send a Host Disconnect IE in the MMC, wait, don't send it any more
+ */
+static int __wusbhc_host_disconnect_ie(struct wusbhc *wusbhc)
+{
+ int result = -ENOMEM;
+ struct wuie_host_disconnect *host_disconnect_ie;
+ might_sleep();
+ host_disconnect_ie = kmalloc(sizeof(*host_disconnect_ie), GFP_KERNEL);
+ if (host_disconnect_ie == NULL)
+ goto error_alloc;
+ host_disconnect_ie->hdr.bLength = sizeof(*host_disconnect_ie);
+ host_disconnect_ie->hdr.bIEIdentifier = WUIE_ID_HOST_DISCONNECT;
+ result = wusbhc_mmcie_set(wusbhc, 0, 0, &host_disconnect_ie->hdr);
+ if (result < 0)
+ goto error_mmcie_set;
+
+ /* WUSB1.0[8.5.3.1 & 7.5.2] */
+ msleep(100);
+ wusbhc_mmcie_rm(wusbhc, &host_disconnect_ie->hdr);
+error_mmcie_set:
+ kfree(host_disconnect_ie);
+error_alloc:
+ return result;
+}
+
+/*
+ * wusbhc_stop - stop transmitting MMCs
+ * @wusbhc: the HC to stop
+ *
+ * Send a Host Disconnect IE, wait, remove all the MMCs (stop sending MMCs).
+ *
+ * If we can't allocate a Host Stop IE, screw it, we don't notify the
+ * devices we are disconnecting...
+ */
+void wusbhc_stop(struct wusbhc *wusbhc)
+{
+ if (wusbhc->active) {
+ wusbhc->active = 0;
+ wusbhc->stop(wusbhc);
+ wusbhc_sec_stop(wusbhc);
+ __wusbhc_host_disconnect_ie(wusbhc);
+ wusbhc_devconnect_stop(wusbhc);
+ wusbhc_rsv_terminate(wusbhc);
+ }
+}
+EXPORT_SYMBOL_GPL(wusbhc_stop);
+
+/*
+ * Change the CHID in a WUSB Channel
+ *
+ * If it is just a new CHID, send a Host Disconnect IE and then change
+ * the CHID IE.
+ */
+static int __wusbhc_chid_change(struct wusbhc *wusbhc,
+ const struct wusb_ckhdid *chid)
+{
+ int result = -ENOSYS;
+ struct device *dev = wusbhc->dev;
+ dev_err(dev, "%s() not implemented yet\n", __func__);
+ return result;
+
+ BUG_ON(wusbhc->wuie_host_info == NULL);
+ __wusbhc_host_disconnect_ie(wusbhc);
+ wusbhc->wuie_host_info->CHID = *chid;
+ result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->wuie_host_info->hdr);
+ if (result < 0)
+ dev_err(dev, "Can't update Host Info WUSB IE: %d\n", result);
+ return result;
+}
+
+/*
+ * Set/reset/update a new CHID
+ *
+ * Depending on the previous state of the MMCs, start, stop or change
+ * the sent MMC. This effectively switches the host controller on and
+ * off (radio wise).
+ */
+int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
+{
+ int result = 0;
+
+ if (memcmp(chid, &wusb_ckhdid_zero, sizeof(chid)) == 0)
+ chid = NULL;
+
+ mutex_lock(&wusbhc->mutex);
+ if (wusbhc->active) {
+ if (chid)
+ result = __wusbhc_chid_change(wusbhc, chid);
+ else
+ wusbhc_stop(wusbhc);
+ } else {
+ if (chid)
+ wusbhc_start(wusbhc, chid);
+ }
+ mutex_unlock(&wusbhc->mutex);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wusbhc_chid_set);
diff --git a/drivers/usb/wusbcore/pal.c b/drivers/usb/wusbcore/pal.c
new file mode 100644
index 00000000000..7cc51e9905c
--- /dev/null
+++ b/drivers/usb/wusbcore/pal.c
@@ -0,0 +1,42 @@
+/*
+ * Wireless USB Host Controller
+ * UWB Protocol Adaptation Layer (PAL) glue.
+ *
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include "wusbhc.h"
+
+/**
+ * wusbhc_pal_register - register the WUSB HC as a UWB PAL
+ * @wusbhc: the WUSB HC
+ */
+int wusbhc_pal_register(struct wusbhc *wusbhc)
+{
+ uwb_pal_init(&wusbhc->pal);
+
+ wusbhc->pal.name = "wusbhc";
+ wusbhc->pal.device = wusbhc->usb_hcd.self.controller;
+
+ return uwb_pal_register(wusbhc->uwb_rc, &wusbhc->pal);
+}
+
+/**
+ * wusbhc_pal_register - unregister the WUSB HC as a UWB PAL
+ * @wusbhc: the WUSB HC
+ */
+void wusbhc_pal_unregister(struct wusbhc *wusbhc)
+{
+ uwb_pal_unregister(wusbhc->uwb_rc, &wusbhc->pal);
+}
diff --git a/drivers/usb/wusbcore/reservation.c b/drivers/usb/wusbcore/reservation.c
new file mode 100644
index 00000000000..fc63e77ded2
--- /dev/null
+++ b/drivers/usb/wusbcore/reservation.c
@@ -0,0 +1,115 @@
+/*
+ * WUSB cluster reservation management
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/uwb.h>
+
+#include "wusbhc.h"
+
+/*
+ * WUSB cluster reservations are multicast reservations with the
+ * broadcast cluster ID (BCID) as the target DevAddr.
+ *
+ * FIXME: consider adjusting the reservation depending on what devices
+ * are attached.
+ */
+
+static int wusbhc_bwa_set(struct wusbhc *wusbhc, u8 stream,
+ const struct uwb_mas_bm *mas)
+{
+ if (mas == NULL)
+ mas = &uwb_mas_bm_zero;
+ return wusbhc->bwa_set(wusbhc, stream, mas);
+}
+
+/**
+ * wusbhc_rsv_complete_cb - WUSB HC reservation complete callback
+ * @rsv: the reservation
+ *
+ * Either set or clear the HC's view of the reservation.
+ *
+ * FIXME: when a reservation is denied the HC should be stopped.
+ */
+static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv)
+{
+ struct wusbhc *wusbhc = rsv->pal_priv;
+ struct device *dev = wusbhc->dev;
+ char buf[72];
+
+ switch (rsv->state) {
+ case UWB_RSV_STATE_O_ESTABLISHED:
+ bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS);
+ dev_dbg(dev, "established reservation: %s\n", buf);
+ wusbhc_bwa_set(wusbhc, rsv->stream, &rsv->mas);
+ break;
+ case UWB_RSV_STATE_NONE:
+ dev_dbg(dev, "removed reservation\n");
+ wusbhc_bwa_set(wusbhc, 0, NULL);
+ wusbhc->rsv = NULL;
+ break;
+ default:
+ dev_dbg(dev, "unexpected reservation state: %d\n", rsv->state);
+ break;
+ }
+}
+
+
+/**
+ * wusbhc_rsv_establish - establish a reservation for the cluster
+ * @wusbhc: the WUSB HC requesting a bandwith reservation
+ */
+int wusbhc_rsv_establish(struct wusbhc *wusbhc)
+{
+ struct uwb_rc *rc = wusbhc->uwb_rc;
+ struct uwb_rsv *rsv;
+ struct uwb_dev_addr bcid;
+ int ret;
+
+ rsv = uwb_rsv_create(rc, wusbhc_rsv_complete_cb, wusbhc);
+ if (rsv == NULL)
+ return -ENOMEM;
+
+ bcid.data[0] = wusbhc->cluster_id;
+ bcid.data[1] = 0;
+
+ rsv->owner = &rc->uwb_dev;
+ rsv->target.type = UWB_RSV_TARGET_DEVADDR;
+ rsv->target.devaddr = bcid;
+ rsv->type = UWB_DRP_TYPE_PRIVATE;
+ rsv->max_mas = 256;
+ rsv->min_mas = 16; /* one MAS per zone? */
+ rsv->sparsity = 16; /* at least one MAS in each zone? */
+ rsv->is_multicast = true;
+
+ ret = uwb_rsv_establish(rsv);
+ if (ret == 0)
+ wusbhc->rsv = rsv;
+ else
+ uwb_rsv_destroy(rsv);
+ return ret;
+}
+
+
+/**
+ * wusbhc_rsv_terminate - terminate any cluster reservation
+ * @wusbhc: the WUSB host whose reservation is to be terminated
+ */
+void wusbhc_rsv_terminate(struct wusbhc *wusbhc)
+{
+ if (wusbhc->rsv)
+ uwb_rsv_terminate(wusbhc->rsv);
+}
diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c
new file mode 100644
index 00000000000..267a6432510
--- /dev/null
+++ b/drivers/usb/wusbcore/rh.c
@@ -0,0 +1,477 @@
+/*
+ * Wireless USB Host Controller
+ * Root Hub operations
+ *
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * We fake a root hub that has fake ports (as many as simultaneous
+ * devices the Wireless USB Host Controller can deal with). For each
+ * port we keep an state in @wusbhc->port[index] identical to the one
+ * specified in the USB2.0[ch11] spec and some extra device
+ * information that complements the one in 'struct usb_device' (as
+ * this lacs a hcpriv pointer).
+ *
+ * Note this is common to WHCI and HWA host controllers.
+ *
+ * Through here we enable most of the state changes that the USB stack
+ * will use to connect or disconnect devices. We need to do some
+ * forced adaptation of Wireless USB device states vs. wired:
+ *
+ * USB: WUSB:
+ *
+ * Port Powered-off port slot n/a
+ * Powered-on port slot available
+ * Disconnected port slot available
+ * Connected port slot assigned device
+ * device sent DN_Connect
+ * device was authenticated
+ * Enabled device is authenticated, transitioned
+ * from unauth -> auth -> default address
+ * -> enabled
+ * Reset disconnect
+ * Disable disconnect
+ *
+ * This maps the standard USB port states with the WUSB device states
+ * so we can fake ports without having to modify the USB stack.
+ *
+ * FIXME: this process will change in the future
+ *
+ *
+ * ENTRY POINTS
+ *
+ * Our entry points into here are, as in hcd.c, the USB stack root hub
+ * ops defined in the usb_hcd struct:
+ *
+ * wusbhc_rh_status_data() Provide hub and port status data bitmap
+ *
+ * wusbhc_rh_control() Execution of all the major requests
+ * you can do to a hub (Set|Clear
+ * features, get descriptors, status, etc).
+ *
+ * wusbhc_rh_[suspend|resume]() That
+ *
+ * wusbhc_rh_start_port_reset() ??? unimplemented
+ */
+#include "wusbhc.h"
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+/*
+ * Reset a fake port
+ *
+ * This can be called to reset a port from any other state or to reset
+ * it when connecting. In Wireless USB they are different; when doing
+ * a new connect that involves going over the authentication. When
+ * just reseting, its a different story.
+ *
+ * The Linux USB stack resets a port twice before it considers it
+ * enabled, so we have to detect and ignore that.
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ *
+ * Supposedly we are the only thread accesing @wusbhc->port; in any
+ * case, maybe we should move the mutex locking from
+ * wusbhc_devconnect_auth() to here.
+ *
+ * @port_idx refers to the wusbhc's port index, not the USB port number
+ */
+static int wusbhc_rh_port_reset(struct wusbhc *wusbhc, u8 port_idx)
+{
+ int result = 0;
+ struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx);
+
+ d_fnstart(3, wusbhc->dev, "(wusbhc %p port_idx %u)\n",
+ wusbhc, port_idx);
+ if (port->reset_count == 0) {
+ wusbhc_devconnect_auth(wusbhc, port_idx);
+ port->reset_count++;
+ } else if (port->reset_count == 1)
+ /* see header */
+ d_printf(2, wusbhc->dev, "Ignoring second reset on port_idx "
+ "%u\n", port_idx);
+ else
+ result = wusbhc_dev_reset(wusbhc, port_idx);
+ d_fnend(3, wusbhc->dev, "(wusbhc %p port_idx %u) = %d\n",
+ wusbhc, port_idx, result);
+ return result;
+}
+
+/*
+ * Return the hub change status bitmap
+ *
+ * The bits in the change status bitmap are cleared when a
+ * ClearPortFeature request is issued (USB2.0[11.12.3,11.12.4].
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ *
+ * WARNING!! This gets called from atomic context; we cannot get the
+ * mutex--the only race condition we can find is some bit
+ * changing just after we copy it, which shouldn't be too
+ * big of a problem [and we can't make it an spinlock
+ * because other parts need to take it and sleep] .
+ *
+ * @usb_hcd is refcounted, so it won't dissapear under us
+ * and before killing a host, the polling of the root hub
+ * would be stopped anyway.
+ */
+int wusbhc_rh_status_data(struct usb_hcd *usb_hcd, char *_buf)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ size_t cnt, size;
+ unsigned long *buf = (unsigned long *) _buf;
+
+ d_fnstart(1, wusbhc->dev, "(wusbhc %p)\n", wusbhc);
+ /* WE DON'T LOCK, see comment */
+ size = wusbhc->ports_max + 1 /* hub bit */;
+ size = (size + 8 - 1) / 8; /* round to bytes */
+ for (cnt = 0; cnt < wusbhc->ports_max; cnt++)
+ if (wusb_port_by_idx(wusbhc, cnt)->change)
+ set_bit(cnt + 1, buf);
+ else
+ clear_bit(cnt + 1, buf);
+ d_fnend(1, wusbhc->dev, "(wusbhc %p) %u, buffer:\n", wusbhc, (int)size);
+ d_dump(1, wusbhc->dev, _buf, size);
+ return size;
+}
+EXPORT_SYMBOL_GPL(wusbhc_rh_status_data);
+
+/*
+ * Return the hub's desciptor
+ *
+ * NOTE: almost cut and paste from ehci-hub.c
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked
+ */
+static int wusbhc_rh_get_hub_descr(struct wusbhc *wusbhc, u16 wValue,
+ u16 wIndex,
+ struct usb_hub_descriptor *descr,
+ u16 wLength)
+{
+ u16 temp = 1 + (wusbhc->ports_max / 8);
+ u8 length = 7 + 2 * temp;
+
+ if (wLength < length)
+ return -ENOSPC;
+ descr->bDescLength = 7 + 2 * temp;
+ descr->bDescriptorType = 0x29; /* HUB type */
+ descr->bNbrPorts = wusbhc->ports_max;
+ descr->wHubCharacteristics = cpu_to_le16(
+ 0x00 /* All ports power at once */
+ | 0x00 /* not part of compound device */
+ | 0x10 /* No overcurrent protection */
+ | 0x00 /* 8 FS think time FIXME ?? */
+ | 0x00); /* No port indicators */
+ descr->bPwrOn2PwrGood = 0;
+ descr->bHubContrCurrent = 0;
+ /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ memset(&descr->bitmap[0], 0, temp);
+ memset(&descr->bitmap[temp], 0xff, temp);
+ return 0;
+}
+
+/*
+ * Clear a hub feature
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ *
+ * Nothing to do, so no locking needed ;)
+ */
+static int wusbhc_rh_clear_hub_feat(struct wusbhc *wusbhc, u16 feature)
+{
+ int result;
+ struct device *dev = wusbhc->dev;
+
+ d_fnstart(4, dev, "(%p, feature 0x%04u)\n", wusbhc, feature);
+ switch (feature) {
+ case C_HUB_LOCAL_POWER:
+ /* FIXME: maybe plug bit 0 to the power input status,
+ * if any?
+ * see wusbhc_rh_get_hub_status() */
+ case C_HUB_OVER_CURRENT:
+ result = 0;
+ break;
+ default:
+ result = -EPIPE;
+ }
+ d_fnend(4, dev, "(%p, feature 0x%04u), %d\n", wusbhc, feature, result);
+ return result;
+}
+
+/*
+ * Return hub status (it is always zero...)
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ *
+ * Nothing to do, so no locking needed ;)
+ */
+static int wusbhc_rh_get_hub_status(struct wusbhc *wusbhc, u32 *buf,
+ u16 wLength)
+{
+ /* FIXME: maybe plug bit 0 to the power input status (if any)? */
+ *buf = 0;
+ return 0;
+}
+
+/*
+ * Set a port feature
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ */
+static int wusbhc_rh_set_port_feat(struct wusbhc *wusbhc, u16 feature,
+ u8 selector, u8 port_idx)
+{
+ int result = -EINVAL;
+ struct device *dev = wusbhc->dev;
+
+ d_fnstart(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d)\n",
+ feature, selector, port_idx);
+
+ if (port_idx > wusbhc->ports_max)
+ goto error;
+
+ switch (feature) {
+ /* According to USB2.0[11.24.2.13]p2, these features
+ * are not required to be implemented. */
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
+ result = 0;
+ break;
+
+ case USB_PORT_FEAT_POWER:
+ /* No such thing, but we fake it works */
+ mutex_lock(&wusbhc->mutex);
+ wusb_port_by_idx(wusbhc, port_idx)->status |= USB_PORT_STAT_POWER;
+ mutex_unlock(&wusbhc->mutex);
+ result = 0;
+ break;
+ case USB_PORT_FEAT_RESET:
+ result = wusbhc_rh_port_reset(wusbhc, port_idx);
+ break;
+ case USB_PORT_FEAT_ENABLE:
+ case USB_PORT_FEAT_SUSPEND:
+ dev_err(dev, "(port_idx %d) set feat %d/%d UNIMPLEMENTED\n",
+ port_idx, feature, selector);
+ result = -ENOSYS;
+ break;
+ default:
+ dev_err(dev, "(port_idx %d) set feat %d/%d UNKNOWN\n",
+ port_idx, feature, selector);
+ result = -EPIPE;
+ break;
+ }
+error:
+ d_fnend(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d) = %d\n",
+ feature, selector, port_idx, result);
+ return result;
+}
+
+/*
+ * Clear a port feature...
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ */
+static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature,
+ u8 selector, u8 port_idx)
+{
+ int result = -EINVAL;
+ struct device *dev = wusbhc->dev;
+
+ d_fnstart(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d)\n",
+ wusbhc, feature, selector, port_idx);
+
+ if (port_idx > wusbhc->ports_max)
+ goto error;
+
+ mutex_lock(&wusbhc->mutex);
+ result = 0;
+ switch (feature) {
+ case USB_PORT_FEAT_POWER: /* fake port always on */
+ /* According to USB2.0[11.24.2.7.1.4], no need to implement? */
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_RESET;
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_CONNECTION;
+ break;
+ case USB_PORT_FEAT_ENABLE:
+ __wusbhc_dev_disable(wusbhc, port_idx);
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_ENABLE;
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ case USB_PORT_FEAT_C_SUSPEND:
+ case 0xffff: /* ??? FIXME */
+ dev_err(dev, "(port_idx %d) Clear feat %d/%d UNIMPLEMENTED\n",
+ port_idx, feature, selector);
+ /* dump_stack(); */
+ result = -ENOSYS;
+ break;
+ default:
+ dev_err(dev, "(port_idx %d) Clear feat %d/%d UNKNOWN\n",
+ port_idx, feature, selector);
+ result = -EPIPE;
+ break;
+ }
+ mutex_unlock(&wusbhc->mutex);
+error:
+ d_fnend(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d) = "
+ "%d\n", wusbhc, feature, selector, port_idx, result);
+ return result;
+}
+
+/*
+ * Return the port's status
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ */
+static int wusbhc_rh_get_port_status(struct wusbhc *wusbhc, u16 port_idx,
+ u32 *_buf, u16 wLength)
+{
+ int result = -EINVAL;
+ u16 *buf = (u16 *) _buf;
+
+ d_fnstart(1, wusbhc->dev, "(wusbhc %p port_idx %u wLength %u)\n",
+ wusbhc, port_idx, wLength);
+ if (port_idx > wusbhc->ports_max)
+ goto error;
+ mutex_lock(&wusbhc->mutex);
+ buf[0] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->status);
+ buf[1] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->change);
+ result = 0;
+ mutex_unlock(&wusbhc->mutex);
+error:
+ d_fnend(1, wusbhc->dev, "(wusbhc %p) = %d, buffer:\n", wusbhc, result);
+ d_dump(1, wusbhc->dev, _buf, wLength);
+ return result;
+}
+
+/*
+ * Entry point for Root Hub operations
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ */
+int wusbhc_rh_control(struct usb_hcd *usb_hcd, u16 reqntype, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ int result = -ENOSYS;
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+
+ switch (reqntype) {
+ case GetHubDescriptor:
+ result = wusbhc_rh_get_hub_descr(
+ wusbhc, wValue, wIndex,
+ (struct usb_hub_descriptor *) buf, wLength);
+ break;
+ case ClearHubFeature:
+ result = wusbhc_rh_clear_hub_feat(wusbhc, wValue);
+ break;
+ case GetHubStatus:
+ result = wusbhc_rh_get_hub_status(wusbhc, (u32 *)buf, wLength);
+ break;
+
+ case SetPortFeature:
+ result = wusbhc_rh_set_port_feat(wusbhc, wValue, wIndex >> 8,
+ (wIndex & 0xff) - 1);
+ break;
+ case ClearPortFeature:
+ result = wusbhc_rh_clear_port_feat(wusbhc, wValue, wIndex >> 8,
+ (wIndex & 0xff) - 1);
+ break;
+ case GetPortStatus:
+ result = wusbhc_rh_get_port_status(wusbhc, wIndex - 1,
+ (u32 *)buf, wLength);
+ break;
+
+ case SetHubFeature:
+ default:
+ dev_err(wusbhc->dev, "%s (%p [%p], %x, %x, %x, %p, %x) "
+ "UNIMPLEMENTED\n", __func__, usb_hcd, wusbhc, reqntype,
+ wValue, wIndex, buf, wLength);
+ /* dump_stack(); */
+ result = -ENOSYS;
+ }
+ return result;
+}
+EXPORT_SYMBOL_GPL(wusbhc_rh_control);
+
+int wusbhc_rh_suspend(struct usb_hcd *usb_hcd)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
+ usb_hcd, wusbhc);
+ /* dump_stack(); */
+ return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(wusbhc_rh_suspend);
+
+int wusbhc_rh_resume(struct usb_hcd *usb_hcd)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
+ usb_hcd, wusbhc);
+ /* dump_stack(); */
+ return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(wusbhc_rh_resume);
+
+int wusbhc_rh_start_port_reset(struct usb_hcd *usb_hcd, unsigned port_idx)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ dev_err(wusbhc->dev, "%s (%p [%p], port_idx %u) UNIMPLEMENTED\n",
+ __func__, usb_hcd, wusbhc, port_idx);
+ WARN_ON(1);
+ return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(wusbhc_rh_start_port_reset);
+
+static void wusb_port_init(struct wusb_port *port)
+{
+ port->status |= USB_PORT_STAT_HIGH_SPEED;
+}
+
+/*
+ * Alloc fake port specific fields and status.
+ */
+int wusbhc_rh_create(struct wusbhc *wusbhc)
+{
+ int result = -ENOMEM;
+ size_t port_size, itr;
+ port_size = wusbhc->ports_max * sizeof(wusbhc->port[0]);
+ wusbhc->port = kzalloc(port_size, GFP_KERNEL);
+ if (wusbhc->port == NULL)
+ goto error_port_alloc;
+ for (itr = 0; itr < wusbhc->ports_max; itr++)
+ wusb_port_init(&wusbhc->port[itr]);
+ result = 0;
+error_port_alloc:
+ return result;
+}
+
+void wusbhc_rh_destroy(struct wusbhc *wusbhc)
+{
+ kfree(wusbhc->port);
+}
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
new file mode 100644
index 00000000000..a101cad6a8d
--- /dev/null
+++ b/drivers/usb/wusbcore/security.c
@@ -0,0 +1,642 @@
+/*
+ * Wireless USB Host Controller
+ * Security support: encryption enablement, etc
+ *
+ * Copyright (C) 2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+#include <linux/types.h>
+#include <linux/usb/ch9.h>
+#include <linux/random.h>
+#include "wusbhc.h"
+
+/*
+ * DEBUG & SECURITY WARNING!!!!
+ *
+ * If you enable this past 1, the debug code will weaken the
+ * cryptographic safety of the system (on purpose, for debugging).
+ *
+ * Weaken means:
+ * we print secret keys and intermediate values all the way,
+ */
+#undef D_LOCAL
+#define D_LOCAL 2
+#include <linux/uwb/debug.h>
+
+static void wusbhc_set_gtk_callback(struct urb *urb);
+static void wusbhc_gtk_rekey_done_work(struct work_struct *work);
+
+int wusbhc_sec_create(struct wusbhc *wusbhc)
+{
+ wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data);
+ wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY;
+ wusbhc->gtk.descr.bReserved = 0;
+
+ wusbhc->gtk_index = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK,
+ WUSB_KEY_INDEX_ORIGINATOR_HOST);
+
+ INIT_WORK(&wusbhc->gtk_rekey_done_work, wusbhc_gtk_rekey_done_work);
+
+ return 0;
+}
+
+
+/* Called when the HC is destroyed */
+void wusbhc_sec_destroy(struct wusbhc *wusbhc)
+{
+}
+
+
+/**
+ * wusbhc_next_tkid - generate a new, currently unused, TKID
+ * @wusbhc: the WUSB host controller
+ * @wusb_dev: the device whose PTK the TKID is for
+ * (or NULL for a TKID for a GTK)
+ *
+ * The generated TKID consist of two parts: the device's authenicated
+ * address (or 0 or a GTK); and an incrementing number. This ensures
+ * that TKIDs cannot be shared between devices and by the time the
+ * incrementing number wraps around the older TKIDs will no longer be
+ * in use (a maximum of two keys may be active at any one time).
+ */
+static u32 wusbhc_next_tkid(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+ u32 *tkid;
+ u32 addr;
+
+ if (wusb_dev == NULL) {
+ tkid = &wusbhc->gtk_tkid;
+ addr = 0;
+ } else {
+ tkid = &wusb_port_by_idx(wusbhc, wusb_dev->port_idx)->ptk_tkid;
+ addr = wusb_dev->addr & 0x7f;
+ }
+
+ *tkid = (addr << 8) | ((*tkid + 1) & 0xff);
+
+ return *tkid;
+}
+
+static void wusbhc_generate_gtk(struct wusbhc *wusbhc)
+{
+ const size_t key_size = sizeof(wusbhc->gtk.data);
+ u32 tkid;
+
+ tkid = wusbhc_next_tkid(wusbhc, NULL);
+
+ wusbhc->gtk.descr.tTKID[0] = (tkid >> 0) & 0xff;
+ wusbhc->gtk.descr.tTKID[1] = (tkid >> 8) & 0xff;
+ wusbhc->gtk.descr.tTKID[2] = (tkid >> 16) & 0xff;
+
+ get_random_bytes(wusbhc->gtk.descr.bKeyData, key_size);
+}
+
+/**
+ * wusbhc_sec_start - start the security management process
+ * @wusbhc: the WUSB host controller
+ *
+ * Generate and set an initial GTK on the host controller.
+ *
+ * Called when the HC is started.
+ */
+int wusbhc_sec_start(struct wusbhc *wusbhc)
+{
+ const size_t key_size = sizeof(wusbhc->gtk.data);
+ int result;
+
+ wusbhc_generate_gtk(wusbhc);
+
+ result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
+ &wusbhc->gtk.descr.bKeyData, key_size);
+ if (result < 0)
+ dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n",
+ result);
+
+ return result;
+}
+
+/**
+ * wusbhc_sec_stop - stop the security management process
+ * @wusbhc: the WUSB host controller
+ *
+ * Wait for any pending GTK rekeys to stop.
+ */
+void wusbhc_sec_stop(struct wusbhc *wusbhc)
+{
+ cancel_work_sync(&wusbhc->gtk_rekey_done_work);
+}
+
+
+/** @returns encryption type name */
+const char *wusb_et_name(u8 x)
+{
+ switch (x) {
+ case USB_ENC_TYPE_UNSECURE: return "unsecure";
+ case USB_ENC_TYPE_WIRED: return "wired";
+ case USB_ENC_TYPE_CCM_1: return "CCM-1";
+ case USB_ENC_TYPE_RSA_1: return "RSA-1";
+ default: return "unknown";
+ }
+}
+EXPORT_SYMBOL_GPL(wusb_et_name);
+
+/*
+ * Set the device encryption method
+ *
+ * We tell the device which encryption method to use; we do this when
+ * setting up the device's security.
+ */
+static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value)
+{
+ int result;
+ struct device *dev = &usb_dev->dev;
+ struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
+
+ if (value) {
+ value = wusb_dev->ccm1_etd.bEncryptionValue;
+ } else {
+ /* FIXME: should be wusb_dev->etd[UNSECURE].bEncryptionValue */
+ value = 0;
+ }
+ /* Set device's */
+ result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_ENCRYPTION,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ value, 0, NULL, 0, 1000 /* FIXME: arbitrary */);
+ if (result < 0)
+ dev_err(dev, "Can't set device's WUSB encryption to "
+ "%s (value %d): %d\n",
+ wusb_et_name(wusb_dev->ccm1_etd.bEncryptionType),
+ wusb_dev->ccm1_etd.bEncryptionValue, result);
+ return result;
+}
+
+/*
+ * Set the GTK to be used by a device.
+ *
+ * The device must be authenticated.
+ */
+static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+ struct usb_device *usb_dev = wusb_dev->usb_dev;
+
+ return usb_control_msg(
+ usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_DESCRIPTOR,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ USB_DT_KEY << 8 | wusbhc->gtk_index, 0,
+ &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
+ 1000);
+}
+
+
+/* FIXME: prototype for adding security */
+int wusb_dev_sec_add(struct wusbhc *wusbhc,
+ struct usb_device *usb_dev, struct wusb_dev *wusb_dev)
+{
+ int result, bytes, secd_size;
+ struct device *dev = &usb_dev->dev;
+ struct usb_security_descriptor secd;
+ const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL;
+ void *secd_buf;
+ const void *itr, *top;
+ char buf[64];
+
+ d_fnstart(3, dev, "(usb_dev %p, wusb_dev %p)\n", usb_dev, wusb_dev);
+ result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
+ 0, &secd, sizeof(secd));
+ if (result < sizeof(secd)) {
+ dev_err(dev, "Can't read security descriptor or "
+ "not enough data: %d\n", result);
+ goto error_secd;
+ }
+ secd_size = le16_to_cpu(secd.wTotalLength);
+ d_printf(5, dev, "got %d bytes of sec descriptor, total is %d\n",
+ result, secd_size);
+ secd_buf = kmalloc(secd_size, GFP_KERNEL);
+ if (secd_buf == NULL) {
+ dev_err(dev, "Can't allocate space for security descriptors\n");
+ goto error_secd_alloc;
+ }
+ result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
+ 0, secd_buf, secd_size);
+ if (result < secd_size) {
+ dev_err(dev, "Can't read security descriptor or "
+ "not enough data: %d\n", result);
+ goto error_secd_all;
+ }
+ d_printf(5, dev, "got %d bytes of sec descriptors\n", result);
+ bytes = 0;
+ itr = secd_buf + sizeof(secd);
+ top = secd_buf + result;
+ while (itr < top) {
+ etd = itr;
+ if (top - itr < sizeof(*etd)) {
+ dev_err(dev, "BUG: bad device security descriptor; "
+ "not enough data (%zu vs %zu bytes left)\n",
+ top - itr, sizeof(*etd));
+ break;
+ }
+ if (etd->bLength < sizeof(*etd)) {
+ dev_err(dev, "BUG: bad device encryption descriptor; "
+ "descriptor is too short "
+ "(%u vs %zu needed)\n",
+ etd->bLength, sizeof(*etd));
+ break;
+ }
+ itr += etd->bLength;
+ bytes += snprintf(buf + bytes, sizeof(buf) - bytes,
+ "%s (0x%02x/%02x) ",
+ wusb_et_name(etd->bEncryptionType),
+ etd->bEncryptionValue, etd->bAuthKeyIndex);
+ if (etd->bEncryptionType == USB_ENC_TYPE_CCM_1)
+ ccm1_etd = etd;
+ }
+ /* This code only supports CCM1 as of now. */
+ /* FIXME: user has to choose which sec mode to use?
+ * In theory we want CCM */
+ if (ccm1_etd == NULL) {
+ dev_err(dev, "WUSB device doesn't support CCM1 encryption, "
+ "can't use!\n");
+ result = -EINVAL;
+ goto error_no_ccm1;
+ }
+ wusb_dev->ccm1_etd = *ccm1_etd;
+ dev_info(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n",
+ buf, wusb_et_name(ccm1_etd->bEncryptionType),
+ ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex);
+ result = 0;
+ kfree(secd_buf);
+out:
+ d_fnend(3, dev, "(usb_dev %p, wusb_dev %p) = %d\n",
+ usb_dev, wusb_dev, result);
+ return result;
+
+
+error_no_ccm1:
+error_secd_all:
+ kfree(secd_buf);
+error_secd_alloc:
+error_secd:
+ goto out;
+}
+
+void wusb_dev_sec_rm(struct wusb_dev *wusb_dev)
+{
+ /* Nothing so far */
+}
+
+static void hs_printk(unsigned level, struct device *dev,
+ struct usb_handshake *hs)
+{
+ d_printf(level, dev,
+ " bMessageNumber: %u\n"
+ " bStatus: %u\n"
+ " tTKID: %02x %02x %02x\n"
+ " CDID: %02x %02x %02x %02x %02x %02x %02x %02x\n"
+ " %02x %02x %02x %02x %02x %02x %02x %02x\n"
+ " nonce: %02x %02x %02x %02x %02x %02x %02x %02x\n"
+ " %02x %02x %02x %02x %02x %02x %02x %02x\n"
+ " MIC: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ hs->bMessageNumber, hs->bStatus,
+ hs->tTKID[2], hs->tTKID[1], hs->tTKID[0],
+ hs->CDID[0], hs->CDID[1], hs->CDID[2], hs->CDID[3],
+ hs->CDID[4], hs->CDID[5], hs->CDID[6], hs->CDID[7],
+ hs->CDID[8], hs->CDID[9], hs->CDID[10], hs->CDID[11],
+ hs->CDID[12], hs->CDID[13], hs->CDID[14], hs->CDID[15],
+ hs->nonce[0], hs->nonce[1], hs->nonce[2], hs->nonce[3],
+ hs->nonce[4], hs->nonce[5], hs->nonce[6], hs->nonce[7],
+ hs->nonce[8], hs->nonce[9], hs->nonce[10], hs->nonce[11],
+ hs->nonce[12], hs->nonce[13], hs->nonce[14], hs->nonce[15],
+ hs->MIC[0], hs->MIC[1], hs->MIC[2], hs->MIC[3],
+ hs->MIC[4], hs->MIC[5], hs->MIC[6], hs->MIC[7]);
+}
+
+/**
+ * Update the address of an unauthenticated WUSB device
+ *
+ * Once we have successfully authenticated, we take it to addr0 state
+ * and then to a normal address.
+ *
+ * Before the device's address (as known by it) was usb_dev->devnum |
+ * 0x80 (unauthenticated address). With this we update it to usb_dev->devnum.
+ */
+static int wusb_dev_update_address(struct wusbhc *wusbhc,
+ struct wusb_dev *wusb_dev)
+{
+ int result = -ENOMEM;
+ struct usb_device *usb_dev = wusb_dev->usb_dev;
+ struct device *dev = &usb_dev->dev;
+ u8 new_address = wusb_dev->addr & 0x7F;
+
+ /* Set address 0 */
+ result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_ADDRESS, 0,
+ 0, 0, NULL, 0, 1000 /* FIXME: arbitrary */);
+ if (result < 0) {
+ dev_err(dev, "auth failed: can't set address 0: %d\n",
+ result);
+ goto error_addr0;
+ }
+ result = wusb_set_dev_addr(wusbhc, wusb_dev, 0);
+ if (result < 0)
+ goto error_addr0;
+ usb_ep0_reinit(usb_dev);
+
+ /* Set new (authenticated) address. */
+ result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_ADDRESS, 0,
+ new_address, 0, NULL, 0,
+ 1000 /* FIXME: arbitrary */);
+ if (result < 0) {
+ dev_err(dev, "auth failed: can't set address %u: %d\n",
+ new_address, result);
+ goto error_addr;
+ }
+ result = wusb_set_dev_addr(wusbhc, wusb_dev, new_address);
+ if (result < 0)
+ goto error_addr;
+ usb_ep0_reinit(usb_dev);
+ usb_dev->authenticated = 1;
+error_addr:
+error_addr0:
+ return result;
+}
+
+/*
+ *
+ *
+ */
+/* FIXME: split and cleanup */
+int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
+ struct wusb_ckhdid *ck)
+{
+ int result = -ENOMEM;
+ struct usb_device *usb_dev = wusb_dev->usb_dev;
+ struct device *dev = &usb_dev->dev;
+ u32 tkid;
+ __le32 tkid_le;
+ struct usb_handshake *hs;
+ struct aes_ccm_nonce ccm_n;
+ u8 mic[8];
+ struct wusb_keydvt_in keydvt_in;
+ struct wusb_keydvt_out keydvt_out;
+
+ hs = kzalloc(3*sizeof(hs[0]), GFP_KERNEL);
+ if (hs == NULL) {
+ dev_err(dev, "can't allocate handshake data\n");
+ goto error_kzalloc;
+ }
+
+ /* We need to turn encryption before beginning the 4way
+ * hshake (WUSB1.0[.3.2.2]) */
+ result = wusb_dev_set_encryption(usb_dev, 1);
+ if (result < 0)
+ goto error_dev_set_encryption;
+
+ tkid = wusbhc_next_tkid(wusbhc, wusb_dev);
+ tkid_le = cpu_to_le32(tkid);
+
+ hs[0].bMessageNumber = 1;
+ hs[0].bStatus = 0;
+ memcpy(hs[0].tTKID, &tkid_le, sizeof(hs[0].tTKID));
+ hs[0].bReserved = 0;
+ memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID));
+ get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce));
+ memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */
+
+ d_printf(1, dev, "I: sending hs1:\n");
+ hs_printk(2, dev, &hs[0]);
+
+ result = usb_control_msg(
+ usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_HANDSHAKE,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ 1, 0, &hs[0], sizeof(hs[0]), 1000 /* FIXME: arbitrary */);
+ if (result < 0) {
+ dev_err(dev, "Handshake1: request failed: %d\n", result);
+ goto error_hs1;
+ }
+
+ /* Handshake 2, from the device -- need to verify fields */
+ result = usb_control_msg(
+ usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_GET_HANDSHAKE,
+ USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ 2, 0, &hs[1], sizeof(hs[1]), 1000 /* FIXME: arbitrary */);
+ if (result < 0) {
+ dev_err(dev, "Handshake2: request failed: %d\n", result);
+ goto error_hs2;
+ }
+ d_printf(1, dev, "got HS2:\n");
+ hs_printk(2, dev, &hs[1]);
+
+ result = -EINVAL;
+ if (hs[1].bMessageNumber != 2) {
+ dev_err(dev, "Handshake2 failed: bad message number %u\n",
+ hs[1].bMessageNumber);
+ goto error_hs2;
+ }
+ if (hs[1].bStatus != 0) {
+ dev_err(dev, "Handshake2 failed: bad status %u\n",
+ hs[1].bStatus);
+ goto error_hs2;
+ }
+ if (memcmp(hs[0].tTKID, hs[1].tTKID, sizeof(hs[0].tTKID))) {
+ dev_err(dev, "Handshake2 failed: TKID mismatch "
+ "(#1 0x%02x%02x%02x vs #2 0x%02x%02x%02x)\n",
+ hs[0].tTKID[0], hs[0].tTKID[1], hs[0].tTKID[2],
+ hs[1].tTKID[0], hs[1].tTKID[1], hs[1].tTKID[2]);
+ goto error_hs2;
+ }
+ if (memcmp(hs[0].CDID, hs[1].CDID, sizeof(hs[0].CDID))) {
+ dev_err(dev, "Handshake2 failed: CDID mismatch\n");
+ goto error_hs2;
+ }
+
+ /* Setup the CCM nonce */
+ memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */
+ memcpy(ccm_n.tkid, &tkid_le, sizeof(ccm_n.tkid));
+ ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr;
+ ccm_n.dest_addr.data[0] = wusb_dev->addr;
+ ccm_n.dest_addr.data[1] = 0;
+
+ /* Derive the KCK and PTK from CK, the CCM, H and D nonces */
+ memcpy(keydvt_in.hnonce, hs[0].nonce, sizeof(keydvt_in.hnonce));
+ memcpy(keydvt_in.dnonce, hs[1].nonce, sizeof(keydvt_in.dnonce));
+ result = wusb_key_derive(&keydvt_out, ck->data, &ccm_n, &keydvt_in);
+ if (result < 0) {
+ dev_err(dev, "Handshake2 failed: cannot derive keys: %d\n",
+ result);
+ goto error_hs2;
+ }
+ d_printf(2, dev, "KCK:\n");
+ d_dump(2, dev, keydvt_out.kck, sizeof(keydvt_out.kck));
+ d_printf(2, dev, "PTK:\n");
+ d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk));
+
+ /* Compute MIC and verify it */
+ result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]);
+ if (result < 0) {
+ dev_err(dev, "Handshake2 failed: cannot compute MIC: %d\n",
+ result);
+ goto error_hs2;
+ }
+
+ d_printf(2, dev, "MIC:\n");
+ d_dump(2, dev, mic, sizeof(mic));
+ if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) {
+ dev_err(dev, "Handshake2 failed: MIC mismatch\n");
+ goto error_hs2;
+ }
+
+ /* Send Handshake3 */
+ hs[2].bMessageNumber = 3;
+ hs[2].bStatus = 0;
+ memcpy(hs[2].tTKID, &tkid_le, sizeof(hs[2].tTKID));
+ hs[2].bReserved = 0;
+ memcpy(hs[2].CDID, &wusb_dev->cdid, sizeof(hs[2].CDID));
+ memcpy(hs[2].nonce, hs[0].nonce, sizeof(hs[2].nonce));
+ result = wusb_oob_mic(hs[2].MIC, keydvt_out.kck, &ccm_n, &hs[2]);
+ if (result < 0) {
+ dev_err(dev, "Handshake3 failed: cannot compute MIC: %d\n",
+ result);
+ goto error_hs2;
+ }
+
+ d_printf(1, dev, "I: sending hs3:\n");
+ hs_printk(2, dev, &hs[2]);
+
+ result = usb_control_msg(
+ usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_HANDSHAKE,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ 3, 0, &hs[2], sizeof(hs[2]), 1000 /* FIXME: arbitrary */);
+ if (result < 0) {
+ dev_err(dev, "Handshake3: request failed: %d\n", result);
+ goto error_hs3;
+ }
+
+ d_printf(1, dev, "I: turning on encryption on host for device\n");
+ d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk));
+ result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid,
+ keydvt_out.ptk, sizeof(keydvt_out.ptk));
+ if (result < 0)
+ goto error_wusbhc_set_ptk;
+
+ d_printf(1, dev, "I: setting a GTK\n");
+ result = wusb_dev_set_gtk(wusbhc, wusb_dev);
+ if (result < 0) {
+ dev_err(dev, "Set GTK for device: request failed: %d\n",
+ result);
+ goto error_wusbhc_set_gtk;
+ }
+
+ /* Update the device's address from unauth to auth */
+ if (usb_dev->authenticated == 0) {
+ d_printf(1, dev, "I: updating addres to auth from non-auth\n");
+ result = wusb_dev_update_address(wusbhc, wusb_dev);
+ if (result < 0)
+ goto error_dev_update_address;
+ }
+ result = 0;
+ d_printf(1, dev, "I: 4way handshke done, device authenticated\n");
+
+error_dev_update_address:
+error_wusbhc_set_gtk:
+error_wusbhc_set_ptk:
+error_hs3:
+error_hs2:
+error_hs1:
+ memset(hs, 0, 3*sizeof(hs[0]));
+ memset(&keydvt_out, 0, sizeof(keydvt_out));
+ memset(&keydvt_in, 0, sizeof(keydvt_in));
+ memset(&ccm_n, 0, sizeof(ccm_n));
+ memset(mic, 0, sizeof(mic));
+ if (result < 0) {
+ /* error path */
+ wusb_dev_set_encryption(usb_dev, 0);
+ }
+error_dev_set_encryption:
+ kfree(hs);
+error_kzalloc:
+ return result;
+}
+
+/*
+ * Once all connected and authenticated devices have received the new
+ * GTK, switch the host to using it.
+ */
+static void wusbhc_gtk_rekey_done_work(struct work_struct *work)
+{
+ struct wusbhc *wusbhc = container_of(work, struct wusbhc, gtk_rekey_done_work);
+ size_t key_size = sizeof(wusbhc->gtk.data);
+
+ mutex_lock(&wusbhc->mutex);
+
+ if (--wusbhc->pending_set_gtks == 0)
+ wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size);
+
+ mutex_unlock(&wusbhc->mutex);
+}
+
+static void wusbhc_set_gtk_callback(struct urb *urb)
+{
+ struct wusbhc *wusbhc = urb->context;
+
+ queue_work(wusbd, &wusbhc->gtk_rekey_done_work);
+}
+
+/**
+ * wusbhc_gtk_rekey - generate and distribute a new GTK
+ * @wusbhc: the WUSB host controller
+ *
+ * Generate a new GTK and distribute it to all connected and
+ * authenticated devices. When all devices have the new GTK, the host
+ * starts using it.
+ *
+ * This must be called after every device disconnect (see [WUSB]
+ * section 6.2.11.2).
+ */
+void wusbhc_gtk_rekey(struct wusbhc *wusbhc)
+{
+ static const size_t key_size = sizeof(wusbhc->gtk.data);
+ int p;
+
+ wusbhc_generate_gtk(wusbhc);
+
+ for (p = 0; p < wusbhc->ports_max; p++) {
+ struct wusb_dev *wusb_dev;
+
+ wusb_dev = wusbhc->port[p].wusb_dev;
+ if (!wusb_dev || !wusb_dev->usb_dev | !wusb_dev->usb_dev->authenticated)
+ continue;
+
+ usb_fill_control_urb(wusb_dev->set_gtk_urb, wusb_dev->usb_dev,
+ usb_sndctrlpipe(wusb_dev->usb_dev, 0),
+ (void *)wusb_dev->set_gtk_req,
+ &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
+ wusbhc_set_gtk_callback, wusbhc);
+ if (usb_submit_urb(wusb_dev->set_gtk_urb, GFP_KERNEL) == 0)
+ wusbhc->pending_set_gtks++;
+ }
+ if (wusbhc->pending_set_gtks == 0)
+ wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size);
+}
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
new file mode 100644
index 00000000000..9d04722415b
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -0,0 +1,95 @@
+/*
+ * Wire Adapter Host Controller Driver
+ * Common items to HWA and DWA based HCDs
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+#include "wusbhc.h"
+#include "wa-hc.h"
+
+/**
+ * Assumes
+ *
+ * wa->usb_dev and wa->usb_iface initialized and refcounted,
+ * wa->wa_descr initialized.
+ */
+int wa_create(struct wahc *wa, struct usb_interface *iface)
+{
+ int result;
+ struct device *dev = &iface->dev;
+
+ result = wa_rpipes_create(wa);
+ if (result < 0)
+ goto error_rpipes_create;
+ /* Fill up Data Transfer EP pointers */
+ wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc;
+ wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc;
+ wa->xfer_result_size = le16_to_cpu(wa->dti_epd->wMaxPacketSize);
+ wa->xfer_result = kmalloc(wa->xfer_result_size, GFP_KERNEL);
+ if (wa->xfer_result == NULL)
+ goto error_xfer_result_alloc;
+ result = wa_nep_create(wa, iface);
+ if (result < 0) {
+ dev_err(dev, "WA-CDS: can't initialize notif endpoint: %d\n",
+ result);
+ goto error_nep_create;
+ }
+ return 0;
+
+error_nep_create:
+ kfree(wa->xfer_result);
+error_xfer_result_alloc:
+ wa_rpipes_destroy(wa);
+error_rpipes_create:
+ return result;
+}
+EXPORT_SYMBOL_GPL(wa_create);
+
+
+void __wa_destroy(struct wahc *wa)
+{
+ if (wa->dti_urb) {
+ usb_kill_urb(wa->dti_urb);
+ usb_put_urb(wa->dti_urb);
+ usb_kill_urb(wa->buf_in_urb);
+ usb_put_urb(wa->buf_in_urb);
+ }
+ kfree(wa->xfer_result);
+ wa_nep_destroy(wa);
+ wa_rpipes_destroy(wa);
+}
+EXPORT_SYMBOL_GPL(__wa_destroy);
+
+/**
+ * wa_reset_all - reset the WA device
+ * @wa: the WA to be reset
+ *
+ * For HWAs the radio controller and all other PALs are also reset.
+ */
+void wa_reset_all(struct wahc *wa)
+{
+ /* FIXME: assuming HWA. */
+ wusbhc_reset_all(wa->wusb);
+}
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Wireless USB Wire Adapter core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
new file mode 100644
index 00000000000..586d350cdb4
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -0,0 +1,417 @@
+/*
+ * HWA Host Controller Driver
+ * Wire Adapter Control/Data Streaming Iface (WUSB1.0[8])
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This driver implements a USB Host Controller (struct usb_hcd) for a
+ * Wireless USB Host Controller based on the Wireless USB 1.0
+ * Host-Wire-Adapter specification (in layman terms, a USB-dongle that
+ * implements a Wireless USB host).
+ *
+ * Check out the Design-overview.txt file in the source documentation
+ * for other details on the implementation.
+ *
+ * Main blocks:
+ *
+ * driver glue with the driver API, workqueue daemon
+ *
+ * lc RC instance life cycle management (create, destroy...)
+ *
+ * hcd glue with the USB API Host Controller Interface API.
+ *
+ * nep Notification EndPoint managent: collect notifications
+ * and queue them with the workqueue daemon.
+ *
+ * Handle notifications as coming from the NEP. Sends them
+ * off others to their respective modules (eg: connect,
+ * disconnect and reset go to devconnect).
+ *
+ * rpipe Remote Pipe management; rpipe is what we use to write
+ * to an endpoint on a WUSB device that is connected to a
+ * HWA RC.
+ *
+ * xfer Transfer managment -- this is all the code that gets a
+ * buffer and pushes it to a device (or viceversa). *
+ *
+ * Some day a lot of this code will be shared between this driver and
+ * the drivers for DWA (xfer, rpipe).
+ *
+ * All starts at driver.c:hwahc_probe(), when one of this guys is
+ * connected. hwahc_disconnect() stops it.
+ *
+ * During operation, the main driver is devices connecting or
+ * disconnecting. They cause the HWA RC to send notifications into
+ * nep.c:hwahc_nep_cb() that will dispatch them to
+ * notif.c:wa_notif_dispatch(). From there they will fan to cause
+ * device connects, disconnects, etc.
+ *
+ * Note much of the activity is difficult to follow. For example a
+ * device connect goes to devconnect, which will cause the "fake" root
+ * hub port to show a connect and stop there. Then khubd will notice
+ * and call into the rh.c:hwahc_rc_port_reset() code to authenticate
+ * the device (and this might require user intervention) and enable
+ * the port.
+ *
+ * We also have a timer workqueue going from devconnect.c that
+ * schedules in hwahc_devconnect_create().
+ *
+ * The rest of the traffic is in the usual entry points of a USB HCD,
+ * which are hooked up in driver.c:hwahc_rc_driver, and defined in
+ * hcd.c.
+ */
+
+#ifndef __HWAHC_INTERNAL_H__
+#define __HWAHC_INTERNAL_H__
+
+#include <linux/completion.h>
+#include <linux/usb.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/uwb.h>
+#include <linux/usb/wusb.h>
+#include <linux/usb/wusb-wa.h>
+
+struct wusbhc;
+struct wahc;
+extern void wa_urb_enqueue_run(struct work_struct *ws);
+
+/**
+ * RPipe instance
+ *
+ * @descr's fields are kept in LE, as we need to send it back and
+ * forth.
+ *
+ * @wa is referenced when set
+ *
+ * @segs_available is the number of requests segments that still can
+ * be submitted to the controller without overloading
+ * it. It is initialized to descr->wRequests when
+ * aiming.
+ *
+ * A rpipe supports a max of descr->wRequests at the same time; before
+ * submitting seg_lock has to be taken. If segs_avail > 0, then we can
+ * submit; if not, we have to queue them.
+ */
+struct wa_rpipe {
+ struct kref refcnt;
+ struct usb_rpipe_descriptor descr;
+ struct usb_host_endpoint *ep;
+ struct wahc *wa;
+ spinlock_t seg_lock;
+ struct list_head seg_list;
+ atomic_t segs_available;
+ u8 buffer[1]; /* For reads/writes on USB */
+};
+
+
+/**
+ * Instance of a HWA Host Controller
+ *
+ * Except where a more specific lock/mutex applies or atomic, all
+ * fields protected by @mutex.
+ *
+ * @wa_descr Can be accessed without locking because it is in
+ * the same area where the device descriptors were
+ * read, so it is guaranteed to exist umodified while
+ * the device exists.
+ *
+ * Endianess has been converted to CPU's.
+ *
+ * @nep_* can be accessed without locking as its processing is
+ * serialized; we submit a NEP URB and it comes to
+ * hwahc_nep_cb(), which won't issue another URB until it is
+ * done processing it.
+ *
+ * @xfer_list:
+ *
+ * List of active transfers to verify existence from a xfer id
+ * gotten from the xfer result message. Can't use urb->list because
+ * it goes by endpoint, and we don't know the endpoint at the time
+ * when we get the xfer result message. We can't really rely on the
+ * pointer (will have to change for 64 bits) as the xfer id is 32 bits.
+ *
+ * @xfer_delayed_list: List of transfers that need to be started
+ * (with a workqueue, because they were
+ * submitted from an atomic context).
+ *
+ * FIXME: this needs to be layered up: a wusbhc layer (for sharing
+ * comonalities with WHCI), a wa layer (for sharing
+ * comonalities with DWA-RC).
+ */
+struct wahc {
+ struct usb_device *usb_dev;
+ struct usb_interface *usb_iface;
+
+ /* HC to deliver notifications */
+ union {
+ struct wusbhc *wusb;
+ struct dwahc *dwa;
+ };
+
+ const struct usb_endpoint_descriptor *dto_epd, *dti_epd;
+ const struct usb_wa_descriptor *wa_descr;
+
+ struct urb *nep_urb; /* Notification EndPoint [lockless] */
+ struct edc nep_edc;
+ void *nep_buffer;
+ size_t nep_buffer_size;
+
+ atomic_t notifs_queued;
+
+ u16 rpipes;
+ unsigned long *rpipe_bm; /* rpipe usage bitmap */
+ spinlock_t rpipe_bm_lock; /* protect rpipe_bm */
+ struct mutex rpipe_mutex; /* assigning resources to endpoints */
+
+ struct urb *dti_urb; /* URB for reading xfer results */
+ struct urb *buf_in_urb; /* URB for reading data in */
+ struct edc dti_edc; /* DTI error density counter */
+ struct wa_xfer_result *xfer_result; /* real size = dti_ep maxpktsize */
+ size_t xfer_result_size;
+
+ s32 status; /* For reading status */
+
+ struct list_head xfer_list;
+ struct list_head xfer_delayed_list;
+ spinlock_t xfer_list_lock;
+ struct work_struct xfer_work;
+ atomic_t xfer_id_count;
+};
+
+
+extern int wa_create(struct wahc *wa, struct usb_interface *iface);
+extern void __wa_destroy(struct wahc *wa);
+void wa_reset_all(struct wahc *wa);
+
+
+/* Miscellaneous constants */
+enum {
+ /** Max number of EPROTO errors we tolerate on the NEP in a
+ * period of time */
+ HWAHC_EPROTO_MAX = 16,
+ /** Period of time for EPROTO errors (in jiffies) */
+ HWAHC_EPROTO_PERIOD = 4 * HZ,
+};
+
+
+/* Notification endpoint handling */
+extern int wa_nep_create(struct wahc *, struct usb_interface *);
+extern void wa_nep_destroy(struct wahc *);
+
+static inline int wa_nep_arm(struct wahc *wa, gfp_t gfp_mask)
+{
+ struct urb *urb = wa->nep_urb;
+ urb->transfer_buffer = wa->nep_buffer;
+ urb->transfer_buffer_length = wa->nep_buffer_size;
+ return usb_submit_urb(urb, gfp_mask);
+}
+
+static inline void wa_nep_disarm(struct wahc *wa)
+{
+ usb_kill_urb(wa->nep_urb);
+}
+
+
+/* RPipes */
+static inline void wa_rpipe_init(struct wahc *wa)
+{
+ spin_lock_init(&wa->rpipe_bm_lock);
+ mutex_init(&wa->rpipe_mutex);
+}
+
+static inline void wa_init(struct wahc *wa)
+{
+ edc_init(&wa->nep_edc);
+ atomic_set(&wa->notifs_queued, 0);
+ wa_rpipe_init(wa);
+ edc_init(&wa->dti_edc);
+ INIT_LIST_HEAD(&wa->xfer_list);
+ INIT_LIST_HEAD(&wa->xfer_delayed_list);
+ spin_lock_init(&wa->xfer_list_lock);
+ INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
+ atomic_set(&wa->xfer_id_count, 1);
+}
+
+/**
+ * Destroy a pipe (when refcount drops to zero)
+ *
+ * Assumes it has been moved to the "QUIESCING" state.
+ */
+struct wa_xfer;
+extern void rpipe_destroy(struct kref *_rpipe);
+static inline
+void __rpipe_get(struct wa_rpipe *rpipe)
+{
+ kref_get(&rpipe->refcnt);
+}
+extern int rpipe_get_by_ep(struct wahc *, struct usb_host_endpoint *,
+ struct urb *, gfp_t);
+static inline void rpipe_put(struct wa_rpipe *rpipe)
+{
+ kref_put(&rpipe->refcnt, rpipe_destroy);
+
+}
+extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *);
+extern int wa_rpipes_create(struct wahc *);
+extern void wa_rpipes_destroy(struct wahc *);
+static inline void rpipe_avail_dec(struct wa_rpipe *rpipe)
+{
+ atomic_dec(&rpipe->segs_available);
+}
+
+/**
+ * Returns true if the rpipe is ready to submit more segments.
+ */
+static inline int rpipe_avail_inc(struct wa_rpipe *rpipe)
+{
+ return atomic_inc_return(&rpipe->segs_available) > 0
+ && !list_empty(&rpipe->seg_list);
+}
+
+
+/* Transferring data */
+extern int wa_urb_enqueue(struct wahc *, struct usb_host_endpoint *,
+ struct urb *, gfp_t);
+extern int wa_urb_dequeue(struct wahc *, struct urb *);
+extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *);
+
+
+/* Misc
+ *
+ * FIXME: Refcounting for the actual @hwahc object is not correct; I
+ * mean, this should be refcounting on the HCD underneath, but
+ * it is not. In any case, the semantics for HCD refcounting
+ * are *weird*...on refcount reaching zero it just frees
+ * it...no RC specific function is called...unless I miss
+ * something.
+ *
+ * FIXME: has to go away in favour of an 'struct' hcd based sollution
+ */
+static inline struct wahc *wa_get(struct wahc *wa)
+{
+ usb_get_intf(wa->usb_iface);
+ return wa;
+}
+
+static inline void wa_put(struct wahc *wa)
+{
+ usb_put_intf(wa->usb_iface);
+}
+
+
+static inline int __wa_feature(struct wahc *wa, unsigned op, u16 feature)
+{
+ return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ op ? USB_REQ_SET_FEATURE : USB_REQ_CLEAR_FEATURE,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ feature,
+ wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ NULL, 0, 1000 /* FIXME: arbitrary */);
+}
+
+
+static inline int __wa_set_feature(struct wahc *wa, u16 feature)
+{
+ return __wa_feature(wa, 1, feature);
+}
+
+
+static inline int __wa_clear_feature(struct wahc *wa, u16 feature)
+{
+ return __wa_feature(wa, 0, feature);
+}
+
+
+/**
+ * Return the status of a Wire Adapter
+ *
+ * @wa: Wire Adapter instance
+ * @returns < 0 errno code on error, or status bitmap as described
+ * in WUSB1.0[8.3.1.6].
+ *
+ * NOTE: need malloc, some arches don't take USB from the stack
+ */
+static inline
+s32 __wa_get_status(struct wahc *wa)
+{
+ s32 result;
+ result = usb_control_msg(
+ wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
+ USB_REQ_GET_STATUS,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ &wa->status, sizeof(wa->status),
+ 1000 /* FIXME: arbitrary */);
+ if (result >= 0)
+ result = wa->status;
+ return result;
+}
+
+
+/**
+ * Waits until the Wire Adapter's status matches @mask/@value
+ *
+ * @wa: Wire Adapter instance.
+ * @returns < 0 errno code on error, otherwise status.
+ *
+ * Loop until the WAs status matches the mask and value (status & mask
+ * == value). Timeout if it doesn't happen.
+ *
+ * FIXME: is there an official specification on how long status
+ * changes can take?
+ */
+static inline s32 __wa_wait_status(struct wahc *wa, u32 mask, u32 value)
+{
+ s32 result;
+ unsigned loops = 10;
+ do {
+ msleep(50);
+ result = __wa_get_status(wa);
+ if ((result & mask) == value)
+ break;
+ if (loops-- == 0) {
+ result = -ETIMEDOUT;
+ break;
+ }
+ } while (result >= 0);
+ return result;
+}
+
+
+/** Command @hwahc to stop, @returns 0 if ok, < 0 errno code on error */
+static inline int __wa_stop(struct wahc *wa)
+{
+ int result;
+ struct device *dev = &wa->usb_iface->dev;
+
+ result = __wa_clear_feature(wa, WA_ENABLE);
+ if (result < 0 && result != -ENODEV) {
+ dev_err(dev, "error commanding HC to stop: %d\n", result);
+ goto out;
+ }
+ result = __wa_wait_status(wa, WA_ENABLE, 0);
+ if (result < 0 && result != -ENODEV)
+ dev_err(dev, "error waiting for HC to stop: %d\n", result);
+out:
+ return 0;
+}
+
+
+#endif /* #ifndef __HWAHC_INTERNAL_H__ */
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c
new file mode 100644
index 00000000000..3f542990c73
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-nep.c
@@ -0,0 +1,310 @@
+/*
+ * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
+ * Notification EndPoint support
+ *
+ * Copyright (C) 2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This part takes care of getting the notification from the hw
+ * only and dispatching through wusbwad into
+ * wa_notif_dispatch. Handling is done there.
+ *
+ * WA notifications are limited in size; most of them are three or
+ * four bytes long, and the longest is the HWA Device Notification,
+ * which would not exceed 38 bytes (DNs are limited in payload to 32
+ * bytes plus 3 bytes header (WUSB1.0[7.6p2]), plus 3 bytes HWA
+ * header (WUSB1.0[8.5.4.2]).
+ *
+ * It is not clear if more than one Device Notification can be packed
+ * in a HWA Notification, I assume no because of the wording in
+ * WUSB1.0[8.5.4.2]. In any case, the bigger any notification could
+ * get is 256 bytes (as the bLength field is a byte).
+ *
+ * So what we do is we have this buffer and read into it; when a
+ * notification arrives we schedule work to a specific, single thread
+ * workqueue (so notifications are serialized) and copy the
+ * notification data. After scheduling the work, we rearm the read from
+ * the notification endpoint.
+ *
+ * Entry points here are:
+ *
+ * wa_nep_[create|destroy]() To initialize/release this subsystem
+ *
+ * wa_nep_cb() Callback for the notification
+ * endpoint; when data is ready, this
+ * does the dispatching.
+ */
+#include <linux/workqueue.h>
+#include <linux/ctype.h>
+#include <linux/uwb/debug.h>
+#include "wa-hc.h"
+#include "wusbhc.h"
+
+/* Structure for queueing notifications to the workqueue */
+struct wa_notif_work {
+ struct work_struct work;
+ struct wahc *wa;
+ size_t size;
+ u8 data[];
+};
+
+/*
+ * Process incoming notifications from the WA's Notification EndPoint
+ * [the wuswad daemon, basically]
+ *
+ * @_nw: Pointer to a descriptor which has the pointer to the
+ * @wa, the size of the buffer and the work queue
+ * structure (so we can free all when done).
+ * @returns 0 if ok, < 0 errno code on error.
+ *
+ * All notifications follow the same format; they need to start with a
+ * 'struct wa_notif_hdr' header, so it is easy to parse through
+ * them. We just break the buffer in individual notifications (the
+ * standard doesn't say if it can be done or is forbidden, so we are
+ * cautious) and dispatch each.
+ *
+ * So the handling layers are is:
+ *
+ * WA specific notification (from NEP)
+ * Device Notification Received -> wa_handle_notif_dn()
+ * WUSB Device notification generic handling
+ * BPST Adjustment -> wa_handle_notif_bpst_adj()
+ * ... -> ...
+ *
+ * @wa has to be referenced
+ */
+static void wa_notif_dispatch(struct work_struct *ws)
+{
+ void *itr;
+ u8 missing = 0;
+ struct wa_notif_work *nw = container_of(ws, struct wa_notif_work, work);
+ struct wahc *wa = nw->wa;
+ struct wa_notif_hdr *notif_hdr;
+ size_t size;
+
+ struct device *dev = &wa->usb_iface->dev;
+
+#if 0
+ /* FIXME: need to check for this??? */
+ if (usb_hcd->state == HC_STATE_QUIESCING) /* Going down? */
+ goto out; /* screw it */
+#endif
+ atomic_dec(&wa->notifs_queued); /* Throttling ctl */
+ dev = &wa->usb_iface->dev;
+ size = nw->size;
+ itr = nw->data;
+
+ while (size) {
+ if (size < sizeof(*notif_hdr)) {
+ missing = sizeof(*notif_hdr) - size;
+ goto exhausted_buffer;
+ }
+ notif_hdr = itr;
+ if (size < notif_hdr->bLength)
+ goto exhausted_buffer;
+ itr += notif_hdr->bLength;
+ size -= notif_hdr->bLength;
+ /* Dispatch the notification [don't use itr or size!] */
+ switch (notif_hdr->bNotifyType) {
+ case HWA_NOTIF_DN: {
+ struct hwa_notif_dn *hwa_dn;
+ hwa_dn = container_of(notif_hdr, struct hwa_notif_dn,
+ hdr);
+ wusbhc_handle_dn(wa->wusb, hwa_dn->bSourceDeviceAddr,
+ hwa_dn->dndata,
+ notif_hdr->bLength - sizeof(*hwa_dn));
+ break;
+ }
+ case WA_NOTIF_TRANSFER:
+ wa_handle_notif_xfer(wa, notif_hdr);
+ break;
+ case DWA_NOTIF_RWAKE:
+ case DWA_NOTIF_PORTSTATUS:
+ case HWA_NOTIF_BPST_ADJ:
+ /* FIXME: unimplemented WA NOTIFs */
+ /* fallthru */
+ default:
+ if (printk_ratelimit()) {
+ dev_err(dev, "HWA: unknown notification 0x%x, "
+ "%zu bytes; discarding\n",
+ notif_hdr->bNotifyType,
+ (size_t)notif_hdr->bLength);
+ dump_bytes(dev, notif_hdr, 16);
+ }
+ break;
+ }
+ }
+out:
+ wa_put(wa);
+ kfree(nw);
+ return;
+
+ /* THIS SHOULD NOT HAPPEN
+ *
+ * Buffer exahusted with partial data remaining; just warn and
+ * discard the data, as this should not happen.
+ */
+exhausted_buffer:
+ if (!printk_ratelimit())
+ goto out;
+ dev_warn(dev, "HWA: device sent short notification, "
+ "%d bytes missing; discarding %d bytes.\n",
+ missing, (int)size);
+ dump_bytes(dev, itr, size);
+ goto out;
+}
+
+/*
+ * Deliver incoming WA notifications to the wusbwa workqueue
+ *
+ * @wa: Pointer the Wire Adapter Controller Data Streaming
+ * instance (part of an 'struct usb_hcd').
+ * @size: Size of the received buffer
+ * @returns 0 if ok, < 0 errno code on error.
+ *
+ * The input buffer is @wa->nep_buffer, with @size bytes
+ * (guaranteed to fit in the allocated space,
+ * @wa->nep_buffer_size).
+ */
+static int wa_nep_queue(struct wahc *wa, size_t size)
+{
+ int result = 0;
+ struct device *dev = &wa->usb_iface->dev;
+ struct wa_notif_work *nw;
+
+ /* dev_fnstart(dev, "(wa %p, size %zu)\n", wa, size); */
+ BUG_ON(size > wa->nep_buffer_size);
+ if (size == 0)
+ goto out;
+ if (atomic_read(&wa->notifs_queued) > 200) {
+ if (printk_ratelimit())
+ dev_err(dev, "Too many notifications queued, "
+ "throttling back\n");
+ goto out;
+ }
+ nw = kzalloc(sizeof(*nw) + size, GFP_ATOMIC);
+ if (nw == NULL) {
+ if (printk_ratelimit())
+ dev_err(dev, "No memory to queue notification\n");
+ goto out;
+ }
+ INIT_WORK(&nw->work, wa_notif_dispatch);
+ nw->wa = wa_get(wa);
+ nw->size = size;
+ memcpy(nw->data, wa->nep_buffer, size);
+ atomic_inc(&wa->notifs_queued); /* Throttling ctl */
+ queue_work(wusbd, &nw->work);
+out:
+ /* dev_fnend(dev, "(wa %p, size %zu) = result\n", wa, size, result); */
+ return result;
+}
+
+/*
+ * Callback for the notification event endpoint
+ *
+ * Check's that everything is fine and then passes the data to be
+ * queued to the workqueue.
+ */
+static void wa_nep_cb(struct urb *urb)
+{
+ int result;
+ struct wahc *wa = urb->context;
+ struct device *dev = &wa->usb_iface->dev;
+
+ switch (result = urb->status) {
+ case 0:
+ result = wa_nep_queue(wa, urb->actual_length);
+ if (result < 0)
+ dev_err(dev, "NEP: unable to process notification(s): "
+ "%d\n", result);
+ break;
+ case -ECONNRESET: /* Not an error, but a controlled situation; */
+ case -ENOENT: /* (we killed the URB)...so, no broadcast */
+ case -ESHUTDOWN:
+ dev_dbg(dev, "NEP: going down %d\n", urb->status);
+ goto out;
+ default: /* On general errors, we retry unless it gets ugly */
+ if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "NEP: URB max acceptable errors "
+ "exceeded, resetting device\n");
+ wa_reset_all(wa);
+ goto out;
+ }
+ dev_err(dev, "NEP: URB error %d\n", urb->status);
+ }
+ result = wa_nep_arm(wa, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "NEP: cannot submit URB: %d\n", result);
+ wa_reset_all(wa);
+ }
+out:
+ return;
+}
+
+/*
+ * Initialize @wa's notification and event's endpoint stuff
+ *
+ * This includes the allocating the read buffer, the context ID
+ * allocation bitmap, the URB and submitting the URB.
+ */
+int wa_nep_create(struct wahc *wa, struct usb_interface *iface)
+{
+ int result;
+ struct usb_endpoint_descriptor *epd;
+ struct usb_device *usb_dev = interface_to_usbdev(iface);
+ struct device *dev = &iface->dev;
+
+ edc_init(&wa->nep_edc);
+ epd = &iface->cur_altsetting->endpoint[0].desc;
+ wa->nep_buffer_size = 1024;
+ wa->nep_buffer = kmalloc(wa->nep_buffer_size, GFP_KERNEL);
+ if (wa->nep_buffer == NULL) {
+ dev_err(dev, "Unable to allocate notification's read buffer\n");
+ goto error_nep_buffer;
+ }
+ wa->nep_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (wa->nep_urb == NULL) {
+ dev_err(dev, "Unable to allocate notification URB\n");
+ goto error_urb_alloc;
+ }
+ usb_fill_int_urb(wa->nep_urb, usb_dev,
+ usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
+ wa->nep_buffer, wa->nep_buffer_size,
+ wa_nep_cb, wa, epd->bInterval);
+ result = wa_nep_arm(wa, GFP_KERNEL);
+ if (result < 0) {
+ dev_err(dev, "Cannot submit notification URB: %d\n", result);
+ goto error_nep_arm;
+ }
+ return 0;
+
+error_nep_arm:
+ usb_free_urb(wa->nep_urb);
+error_urb_alloc:
+ kfree(wa->nep_buffer);
+error_nep_buffer:
+ return -ENOMEM;
+}
+
+void wa_nep_destroy(struct wahc *wa)
+{
+ wa_nep_disarm(wa);
+ usb_free_urb(wa->nep_urb);
+ kfree(wa->nep_buffer);
+}
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
new file mode 100644
index 00000000000..f18e4aae66e
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -0,0 +1,562 @@
+/*
+ * WUSB Wire Adapter
+ * rpipe management
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ *
+ * RPIPE
+ *
+ * Targetted at different downstream endpoints
+ *
+ * Descriptor: use to config the remote pipe.
+ *
+ * The number of blocks could be dynamic (wBlocks in descriptor is
+ * 0)--need to schedule them then.
+ *
+ * Each bit in wa->rpipe_bm represents if an rpipe is being used or
+ * not. Rpipes are represented with a 'struct wa_rpipe' that is
+ * attached to the hcpriv member of a 'struct usb_host_endpoint'.
+ *
+ * When you need to xfer data to an endpoint, you get an rpipe for it
+ * with wa_ep_rpipe_get(), which gives you a reference to the rpipe
+ * and keeps a single one (the first one) with the endpoint. When you
+ * are done transferring, you drop that reference. At the end the
+ * rpipe is always allocated and bound to the endpoint. There it might
+ * be recycled when not used.
+ *
+ * Addresses:
+ *
+ * We use a 1:1 mapping mechanism between port address (0 based
+ * index, actually) and the address. The USB stack knows about this.
+ *
+ * USB Stack port number 4 (1 based)
+ * WUSB code port index 3 (0 based)
+ * USB Addresss 5 (2 based -- 0 is for default, 1 for root hub)
+ *
+ * Now, because we don't use the concept as default address exactly
+ * like the (wired) USB code does, we need to kind of skip it. So we
+ * never take addresses from the urb->pipe, but from the
+ * urb->dev->devnum, to make sure that we always have the right
+ * destination address.
+ */
+#include <linux/init.h>
+#include <asm/atomic.h>
+#include <linux/bitmap.h>
+#include "wusbhc.h"
+#include "wa-hc.h"
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+
+static int __rpipe_get_descr(struct wahc *wa,
+ struct usb_rpipe_descriptor *descr, u16 index)
+{
+ ssize_t result;
+ struct device *dev = &wa->usb_iface->dev;
+
+ /* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor()
+ * function because the arguments are different.
+ */
+ d_printf(1, dev, "rpipe %u: get descr\n", index);
+ result = usb_control_msg(
+ wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
+ USB_REQ_GET_DESCRIPTOR,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+ USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
+ 1000 /* FIXME: arbitrary */);
+ if (result < 0) {
+ dev_err(dev, "rpipe %u: get descriptor failed: %d\n",
+ index, (int)result);
+ goto error;
+ }
+ if (result < sizeof(*descr)) {
+ dev_err(dev, "rpipe %u: got short descriptor "
+ "(%zd vs %zd bytes needed)\n",
+ index, result, sizeof(*descr));
+ result = -EINVAL;
+ goto error;
+ }
+ result = 0;
+
+error:
+ return result;
+}
+
+/*
+ *
+ * The descriptor is assumed to be properly initialized (ie: you got
+ * it through __rpipe_get_descr()).
+ */
+static int __rpipe_set_descr(struct wahc *wa,
+ struct usb_rpipe_descriptor *descr, u16 index)
+{
+ ssize_t result;
+ struct device *dev = &wa->usb_iface->dev;
+
+ /* we cannot use the usb_get_descriptor() function because the
+ * arguments are different.
+ */
+ d_printf(1, dev, "rpipe %u: set descr\n", index);
+ result = usb_control_msg(
+ wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ USB_REQ_SET_DESCRIPTOR,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+ USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
+ HZ / 10);
+ if (result < 0) {
+ dev_err(dev, "rpipe %u: set descriptor failed: %d\n",
+ index, (int)result);
+ goto error;
+ }
+ if (result < sizeof(*descr)) {
+ dev_err(dev, "rpipe %u: sent short descriptor "
+ "(%zd vs %zd bytes required)\n",
+ index, result, sizeof(*descr));
+ result = -EINVAL;
+ goto error;
+ }
+ result = 0;
+
+error:
+ return result;
+
+}
+
+static void rpipe_init(struct wa_rpipe *rpipe)
+{
+ kref_init(&rpipe->refcnt);
+ spin_lock_init(&rpipe->seg_lock);
+ INIT_LIST_HEAD(&rpipe->seg_list);
+}
+
+static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
+ rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx);
+ if (rpipe_idx < wa->rpipes)
+ set_bit(rpipe_idx, wa->rpipe_bm);
+ spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);
+
+ return rpipe_idx;
+}
+
+static void rpipe_put_idx(struct wahc *wa, unsigned rpipe_idx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
+ clear_bit(rpipe_idx, wa->rpipe_bm);
+ spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);
+}
+
+void rpipe_destroy(struct kref *_rpipe)
+{
+ struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt);
+ u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
+ d_fnstart(1, NULL, "(rpipe %p %u)\n", rpipe, index);
+ if (rpipe->ep)
+ rpipe->ep->hcpriv = NULL;
+ rpipe_put_idx(rpipe->wa, index);
+ wa_put(rpipe->wa);
+ kfree(rpipe);
+ d_fnend(1, NULL, "(rpipe %p %u)\n", rpipe, index);
+}
+EXPORT_SYMBOL_GPL(rpipe_destroy);
+
+/*
+ * Locate an idle rpipe, create an structure for it and return it
+ *
+ * @wa is referenced and unlocked
+ * @crs enum rpipe_attr, required endpoint characteristics
+ *
+ * The rpipe can be used only sequentially (not in parallel).
+ *
+ * The rpipe is moved into the "ready" state.
+ */
+static int rpipe_get_idle(struct wa_rpipe **prpipe, struct wahc *wa, u8 crs,
+ gfp_t gfp)
+{
+ int result;
+ unsigned rpipe_idx;
+ struct wa_rpipe *rpipe;
+ struct device *dev = &wa->usb_iface->dev;
+
+ d_fnstart(3, dev, "(wa %p crs 0x%02x)\n", wa, crs);
+ rpipe = kzalloc(sizeof(*rpipe), gfp);
+ if (rpipe == NULL)
+ return -ENOMEM;
+ rpipe_init(rpipe);
+
+ /* Look for an idle pipe */
+ for (rpipe_idx = 0; rpipe_idx < wa->rpipes; rpipe_idx++) {
+ rpipe_idx = rpipe_get_idx(wa, rpipe_idx);
+ if (rpipe_idx >= wa->rpipes) /* no more pipes :( */
+ break;
+ result = __rpipe_get_descr(wa, &rpipe->descr, rpipe_idx);
+ if (result < 0)
+ dev_err(dev, "Can't get descriptor for rpipe %u: %d\n",
+ rpipe_idx, result);
+ else if ((rpipe->descr.bmCharacteristics & crs) != 0)
+ goto found;
+ rpipe_put_idx(wa, rpipe_idx);
+ }
+ *prpipe = NULL;
+ kfree(rpipe);
+ d_fnend(3, dev, "(wa %p crs 0x%02x) = -ENXIO\n", wa, crs);
+ return -ENXIO;
+
+found:
+ set_bit(rpipe_idx, wa->rpipe_bm);
+ rpipe->wa = wa_get(wa);
+ *prpipe = rpipe;
+ d_fnstart(3, dev, "(wa %p crs 0x%02x) = 0\n", wa, crs);
+ return 0;
+}
+
+static int __rpipe_reset(struct wahc *wa, unsigned index)
+{
+ int result;
+ struct device *dev = &wa->usb_iface->dev;
+
+ d_printf(1, dev, "rpipe %u: reset\n", index);
+ result = usb_control_msg(
+ wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ USB_REQ_RPIPE_RESET,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+ 0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
+ if (result < 0)
+ dev_err(dev, "rpipe %u: reset failed: %d\n",
+ index, result);
+ return result;
+}
+
+/*
+ * Fake companion descriptor for ep0
+ *
+ * See WUSB1.0[7.4.4], most of this is zero for bulk/int/ctl
+ */
+static struct usb_wireless_ep_comp_descriptor epc0 = {
+ .bLength = sizeof(epc0),
+ .bDescriptorType = USB_DT_WIRELESS_ENDPOINT_COMP,
+/* .bMaxBurst = 1, */
+ .bMaxSequence = 31,
+};
+
+/*
+ * Look for EP companion descriptor
+ *
+ * Get there, look for Inara in the endpoint's extra descriptors
+ */
+static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find(
+ struct device *dev, struct usb_host_endpoint *ep)
+{
+ void *itr;
+ size_t itr_size;
+ struct usb_descriptor_header *hdr;
+ struct usb_wireless_ep_comp_descriptor *epcd;
+
+ d_fnstart(3, dev, "(ep %p)\n", ep);
+ if (ep->desc.bEndpointAddress == 0) {
+ epcd = &epc0;
+ goto out;
+ }
+ itr = ep->extra;
+ itr_size = ep->extralen;
+ epcd = NULL;
+ while (itr_size > 0) {
+ if (itr_size < sizeof(*hdr)) {
+ dev_err(dev, "HW Bug? ep 0x%02x: extra descriptors "
+ "at offset %zu: only %zu bytes left\n",
+ ep->desc.bEndpointAddress,
+ itr - (void *) ep->extra, itr_size);
+ break;
+ }
+ hdr = itr;
+ if (hdr->bDescriptorType == USB_DT_WIRELESS_ENDPOINT_COMP) {
+ epcd = itr;
+ break;
+ }
+ if (hdr->bLength > itr_size) {
+ dev_err(dev, "HW Bug? ep 0x%02x: extra descriptor "
+ "at offset %zu (type 0x%02x) "
+ "length %d but only %zu bytes left\n",
+ ep->desc.bEndpointAddress,
+ itr - (void *) ep->extra, hdr->bDescriptorType,
+ hdr->bLength, itr_size);
+ break;
+ }
+ itr += hdr->bLength;
+ itr_size -= hdr->bDescriptorType;
+ }
+out:
+ d_fnend(3, dev, "(ep %p) = %p\n", ep, epcd);
+ return epcd;
+}
+
+/*
+ * Aim an rpipe to its device & endpoint destination
+ *
+ * Make sure we change the address to unauthenticathed if the device
+ * is WUSB and it is not authenticated.
+ */
+static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
+ struct usb_host_endpoint *ep, struct urb *urb, gfp_t gfp)
+{
+ int result = -ENOMSG; /* better code for lack of companion? */
+ struct device *dev = &wa->usb_iface->dev;
+ struct usb_device *usb_dev = urb->dev;
+ struct usb_wireless_ep_comp_descriptor *epcd;
+ u8 unauth;
+
+ d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n",
+ rpipe, wa, ep, urb);
+ epcd = rpipe_epc_find(dev, ep);
+ if (epcd == NULL) {
+ dev_err(dev, "ep 0x%02x: can't find companion descriptor\n",
+ ep->desc.bEndpointAddress);
+ goto error;
+ }
+ unauth = usb_dev->wusb && !usb_dev->authenticated ? 0x80 : 0;
+ __rpipe_reset(wa, le16_to_cpu(rpipe->descr.wRPipeIndex));
+ atomic_set(&rpipe->segs_available, le16_to_cpu(rpipe->descr.wRequests));
+ /* FIXME: block allocation system; request with queuing and timeout */
+ /* FIXME: compute so seg_size > ep->maxpktsize */
+ rpipe->descr.wBlocks = cpu_to_le16(16); /* given */
+ /* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */
+ rpipe->descr.wMaxPacketSize = cpu_to_le16(ep->desc.wMaxPacketSize);
+ rpipe->descr.bHSHubAddress = 0; /* reserved: zero */
+ rpipe->descr.bHSHubPort = wusb_port_no_to_idx(urb->dev->portnum);
+ /* FIXME: use maximum speed as supported or recommended by device */
+ rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ?
+ UWB_PHY_RATE_53 : UWB_PHY_RATE_200;
+ d_printf(2, dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n",
+ urb->dev->devnum, urb->dev->devnum | unauth,
+ le16_to_cpu(rpipe->descr.wRPipeIndex),
+ usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed);
+ /* see security.c:wusb_update_address() */
+ if (unlikely(urb->dev->devnum == 0x80))
+ rpipe->descr.bDeviceAddress = 0;
+ else
+ rpipe->descr.bDeviceAddress = urb->dev->devnum | unauth;
+ rpipe->descr.bEndpointAddress = ep->desc.bEndpointAddress;
+ /* FIXME: bDataSequence */
+ rpipe->descr.bDataSequence = 0;
+ /* FIXME: dwCurrentWindow */
+ rpipe->descr.dwCurrentWindow = cpu_to_le32(1);
+ /* FIXME: bMaxDataSequence */
+ rpipe->descr.bMaxDataSequence = epcd->bMaxSequence - 1;
+ rpipe->descr.bInterval = ep->desc.bInterval;
+ /* FIXME: bOverTheAirInterval */
+ rpipe->descr.bOverTheAirInterval = 0; /* 0 if not isoc */
+ /* FIXME: xmit power & preamble blah blah */
+ rpipe->descr.bmAttribute = ep->desc.bmAttributes & 0x03;
+ /* rpipe->descr.bmCharacteristics RO */
+ /* FIXME: bmRetryOptions */
+ rpipe->descr.bmRetryOptions = 15;
+ /* FIXME: use for assessing link quality? */
+ rpipe->descr.wNumTransactionErrors = 0;
+ result = __rpipe_set_descr(wa, &rpipe->descr,
+ le16_to_cpu(rpipe->descr.wRPipeIndex));
+ if (result < 0) {
+ dev_err(dev, "Cannot aim rpipe: %d\n", result);
+ goto error;
+ }
+ result = 0;
+error:
+ d_fnend(3, dev, "(rpipe %p wa %p ep %p urb %p) = %d\n",
+ rpipe, wa, ep, urb, result);
+ return result;
+}
+
+/*
+ * Check an aimed rpipe to make sure it points to where we want
+ *
+ * We use bit 19 of the Linux USB pipe bitmap for unauth vs auth
+ * space; when it is like that, we or 0x80 to make an unauth address.
+ */
+static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa,
+ const struct usb_host_endpoint *ep,
+ const struct urb *urb, gfp_t gfp)
+{
+ int result = 0; /* better code for lack of companion? */
+ struct device *dev = &wa->usb_iface->dev;
+ struct usb_device *usb_dev = urb->dev;
+ u8 unauth = (usb_dev->wusb && !usb_dev->authenticated) ? 0x80 : 0;
+ u8 portnum = wusb_port_no_to_idx(urb->dev->portnum);
+
+ d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n",
+ rpipe, wa, ep, urb);
+#define AIM_CHECK(rdf, val, text) \
+ do { \
+ if (rpipe->descr.rdf != (val)) { \
+ dev_err(dev, \
+ "rpipe aim discrepancy: " #rdf " " text "\n", \
+ rpipe->descr.rdf, (val)); \
+ result = -EINVAL; \
+ WARN_ON(1); \
+ } \
+ } while (0)
+ AIM_CHECK(wMaxPacketSize, cpu_to_le16(ep->desc.wMaxPacketSize),
+ "(%u vs %u)");
+ AIM_CHECK(bHSHubPort, portnum, "(%u vs %u)");
+ AIM_CHECK(bSpeed, usb_pipeendpoint(urb->pipe) == 0 ?
+ UWB_PHY_RATE_53 : UWB_PHY_RATE_200,
+ "(%u vs %u)");
+ AIM_CHECK(bDeviceAddress, urb->dev->devnum | unauth, "(%u vs %u)");
+ AIM_CHECK(bEndpointAddress, ep->desc.bEndpointAddress, "(%u vs %u)");
+ AIM_CHECK(bInterval, ep->desc.bInterval, "(%u vs %u)");
+ AIM_CHECK(bmAttribute, ep->desc.bmAttributes & 0x03, "(%u vs %u)");
+#undef AIM_CHECK
+ return result;
+}
+
+#ifndef CONFIG_BUG
+#define CONFIG_BUG 0
+#endif
+
+/*
+ * Make sure there is an rpipe allocated for an endpoint
+ *
+ * If already allocated, we just refcount it; if not, we get an
+ * idle one, aim it to the right location and take it.
+ *
+ * Attaches to ep->hcpriv and rpipe->ep to ep.
+ */
+int rpipe_get_by_ep(struct wahc *wa, struct usb_host_endpoint *ep,
+ struct urb *urb, gfp_t gfp)
+{
+ int result = 0;
+ struct device *dev = &wa->usb_iface->dev;
+ struct wa_rpipe *rpipe;
+ u8 eptype;
+
+ d_fnstart(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb,
+ gfp);
+ mutex_lock(&wa->rpipe_mutex);
+ rpipe = ep->hcpriv;
+ if (rpipe != NULL) {
+ if (CONFIG_BUG == 1) {
+ result = rpipe_check_aim(rpipe, wa, ep, urb, gfp);
+ if (result < 0)
+ goto error;
+ }
+ __rpipe_get(rpipe);
+ d_printf(2, dev, "ep 0x%02x: reusing rpipe %u\n",
+ ep->desc.bEndpointAddress,
+ le16_to_cpu(rpipe->descr.wRPipeIndex));
+ } else {
+ /* hmm, assign idle rpipe, aim it */
+ result = -ENOBUFS;
+ eptype = ep->desc.bmAttributes & 0x03;
+ result = rpipe_get_idle(&rpipe, wa, 1 << eptype, gfp);
+ if (result < 0)
+ goto error;
+ result = rpipe_aim(rpipe, wa, ep, urb, gfp);
+ if (result < 0) {
+ rpipe_put(rpipe);
+ goto error;
+ }
+ ep->hcpriv = rpipe;
+ rpipe->ep = ep;
+ __rpipe_get(rpipe); /* for caching into ep->hcpriv */
+ d_printf(2, dev, "ep 0x%02x: using rpipe %u\n",
+ ep->desc.bEndpointAddress,
+ le16_to_cpu(rpipe->descr.wRPipeIndex));
+ }
+ d_dump(4, dev, &rpipe->descr, sizeof(rpipe->descr));
+error:
+ mutex_unlock(&wa->rpipe_mutex);
+ d_fnend(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb, gfp);
+ return result;
+}
+
+/*
+ * Allocate the bitmap for each rpipe.
+ */
+int wa_rpipes_create(struct wahc *wa)
+{
+ wa->rpipes = wa->wa_descr->wNumRPipes;
+ wa->rpipe_bm = kzalloc(BITS_TO_LONGS(wa->rpipes)*sizeof(unsigned long),
+ GFP_KERNEL);
+ if (wa->rpipe_bm == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+void wa_rpipes_destroy(struct wahc *wa)
+{
+ struct device *dev = &wa->usb_iface->dev;
+ d_fnstart(3, dev, "(wa %p)\n", wa);
+ if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) {
+ char buf[256];
+ WARN_ON(1);
+ bitmap_scnprintf(buf, sizeof(buf), wa->rpipe_bm, wa->rpipes);
+ dev_err(dev, "BUG: pipes not released on exit: %s\n", buf);
+ }
+ kfree(wa->rpipe_bm);
+ d_fnend(3, dev, "(wa %p)\n", wa);
+}
+
+/*
+ * Release resources allocated for an endpoint
+ *
+ * If there is an associated rpipe to this endpoint, Abort any pending
+ * transfers and put it. If the rpipe ends up being destroyed,
+ * __rpipe_destroy() will cleanup ep->hcpriv.
+ *
+ * This is called before calling hcd->stop(), so you don't need to do
+ * anything else in there.
+ */
+void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)
+{
+ struct device *dev = &wa->usb_iface->dev;
+ struct wa_rpipe *rpipe;
+ d_fnstart(2, dev, "(wa %p ep %p)\n", wa, ep);
+ mutex_lock(&wa->rpipe_mutex);
+ rpipe = ep->hcpriv;
+ if (rpipe != NULL) {
+ unsigned rc = atomic_read(&rpipe->refcnt.refcount);
+ int result;
+ u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
+
+ if (rc != 1)
+ d_printf(1, dev, "(wa %p ep %p) rpipe %p refcnt %u\n",
+ wa, ep, rpipe, rc);
+
+ d_printf(1, dev, "rpipe %u: abort\n", index);
+ result = usb_control_msg(
+ wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
+ USB_REQ_RPIPE_ABORT,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+ 0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
+ if (result < 0 && result != -ENODEV /* dev is gone */)
+ d_printf(1, dev, "(wa %p rpipe %u): abort failed: %d\n",
+ wa, index, result);
+ rpipe_put(rpipe);
+ }
+ mutex_unlock(&wa->rpipe_mutex);
+ d_fnend(2, dev, "(wa %p ep %p)\n", wa, ep);
+ return;
+}
+EXPORT_SYMBOL_GPL(rpipe_ep_disable);
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
new file mode 100644
index 00000000000..c038635d1c6
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -0,0 +1,1709 @@
+/*
+ * WUSB Wire Adapter
+ * Data transfer and URB enqueing
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * How transfers work: get a buffer, break it up in segments (segment
+ * size is a multiple of the maxpacket size). For each segment issue a
+ * segment request (struct wa_xfer_*), then send the data buffer if
+ * out or nothing if in (all over the DTO endpoint).
+ *
+ * For each submitted segment request, a notification will come over
+ * the NEP endpoint and a transfer result (struct xfer_result) will
+ * arrive in the DTI URB. Read it, get the xfer ID, see if there is
+ * data coming (inbound transfer), schedule a read and handle it.
+ *
+ * Sounds simple, it is a pain to implement.
+ *
+ *
+ * ENTRY POINTS
+ *
+ * FIXME
+ *
+ * LIFE CYCLE / STATE DIAGRAM
+ *
+ * FIXME
+ *
+ * THIS CODE IS DISGUSTING
+ *
+ * Warned you are; it's my second try and still not happy with it.
+ *
+ * NOTES:
+ *
+ * - No iso
+ *
+ * - Supports DMA xfers, control, bulk and maybe interrupt
+ *
+ * - Does not recycle unused rpipes
+ *
+ * An rpipe is assigned to an endpoint the first time it is used,
+ * and then it's there, assigned, until the endpoint is disabled
+ * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
+ * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
+ * (should be a mutex).
+ *
+ * Two methods it could be done:
+ *
+ * (a) set up a timer everytime an rpipe's use count drops to 1
+ * (which means unused) or when a transfer ends. Reset the
+ * timer when a xfer is queued. If the timer expires, release
+ * the rpipe [see rpipe_ep_disable()].
+ *
+ * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
+ * when none are found go over the list, check their endpoint
+ * and their activity record (if no last-xfer-done-ts in the
+ * last x seconds) take it
+ *
+ * However, due to the fact that we have a set of limited
+ * resources (max-segments-at-the-same-time per xfer,
+ * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
+ * we are going to have to rebuild all this based on an scheduler,
+ * to where we have a list of transactions to do and based on the
+ * availability of the different requried components (blocks,
+ * rpipes, segment slots, etc), we go scheduling them. Painful.
+ */
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/hash.h>
+#include "wa-hc.h"
+#include "wusbhc.h"
+
+#undef D_LOCAL
+#define D_LOCAL 0 /* 0 disabled, > 0 different levels... */
+#include <linux/uwb/debug.h>
+
+enum {
+ WA_SEGS_MAX = 255,
+};
+
+enum wa_seg_status {
+ WA_SEG_NOTREADY,
+ WA_SEG_READY,
+ WA_SEG_DELAYED,
+ WA_SEG_SUBMITTED,
+ WA_SEG_PENDING,
+ WA_SEG_DTI_PENDING,
+ WA_SEG_DONE,
+ WA_SEG_ERROR,
+ WA_SEG_ABORTED,
+};
+
+static void wa_xfer_delayed_run(struct wa_rpipe *);
+
+/*
+ * Life cycle governed by 'struct urb' (the refcount of the struct is
+ * that of the 'struct urb' and usb_free_urb() would free the whole
+ * struct).
+ */
+struct wa_seg {
+ struct urb urb;
+ struct urb *dto_urb; /* for data output? */
+ struct list_head list_node; /* for rpipe->req_list */
+ struct wa_xfer *xfer; /* out xfer */
+ u8 index; /* which segment we are */
+ enum wa_seg_status status;
+ ssize_t result; /* bytes xfered or error */
+ struct wa_xfer_hdr xfer_hdr;
+ u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
+};
+
+static void wa_seg_init(struct wa_seg *seg)
+{
+ /* usb_init_urb() repeats a lot of work, so we do it here */
+ kref_init(&seg->urb.kref);
+}
+
+/*
+ * Protected by xfer->lock
+ *
+ */
+struct wa_xfer {
+ struct kref refcnt;
+ struct list_head list_node;
+ spinlock_t lock;
+ u32 id;
+
+ struct wahc *wa; /* Wire adapter we are plugged to */
+ struct usb_host_endpoint *ep;
+ struct urb *urb; /* URB we are transfering for */
+ struct wa_seg **seg; /* transfer segments */
+ u8 segs, segs_submitted, segs_done;
+ unsigned is_inbound:1;
+ unsigned is_dma:1;
+ size_t seg_size;
+ int result;
+
+ gfp_t gfp; /* allocation mask */
+
+ struct wusb_dev *wusb_dev; /* for activity timestamps */
+};
+
+static inline void wa_xfer_init(struct wa_xfer *xfer)
+{
+ kref_init(&xfer->refcnt);
+ INIT_LIST_HEAD(&xfer->list_node);
+ spin_lock_init(&xfer->lock);
+}
+
+/*
+ * Destory a transfer structure
+ *
+ * Note that the xfer->seg[index] thingies follow the URB life cycle,
+ * so we need to put them, not free them.
+ */
+static void wa_xfer_destroy(struct kref *_xfer)
+{
+ struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
+ if (xfer->seg) {
+ unsigned cnt;
+ for (cnt = 0; cnt < xfer->segs; cnt++) {
+ if (xfer->is_inbound)
+ usb_put_urb(xfer->seg[cnt]->dto_urb);
+ usb_put_urb(&xfer->seg[cnt]->urb);
+ }
+ }
+ kfree(xfer);
+ d_printf(2, NULL, "xfer %p destroyed\n", xfer);
+}
+
+static void wa_xfer_get(struct wa_xfer *xfer)
+{
+ kref_get(&xfer->refcnt);
+}
+
+static void wa_xfer_put(struct wa_xfer *xfer)
+{
+ d_fnstart(3, NULL, "(xfer %p) -- ref count bef put %d\n",
+ xfer, atomic_read(&xfer->refcnt.refcount));
+ kref_put(&xfer->refcnt, wa_xfer_destroy);
+ d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
+}
+
+/*
+ * xfer is referenced
+ *
+ * xfer->lock has to be unlocked
+ *
+ * We take xfer->lock for setting the result; this is a barrier
+ * against drivers/usb/core/hcd.c:unlink1() being called after we call
+ * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
+ * reference to the transfer.
+ */
+static void wa_xfer_giveback(struct wa_xfer *xfer)
+{
+ unsigned long flags;
+ d_fnstart(3, NULL, "(xfer %p)\n", xfer);
+ spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
+ list_del_init(&xfer->list_node);
+ spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
+ /* FIXME: segmentation broken -- kills DWA */
+ wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
+ wa_put(xfer->wa);
+ wa_xfer_put(xfer);
+ d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
+}
+
+/*
+ * xfer is referenced
+ *
+ * xfer->lock has to be unlocked
+ */
+static void wa_xfer_completion(struct wa_xfer *xfer)
+{
+ d_fnstart(3, NULL, "(xfer %p)\n", xfer);
+ if (xfer->wusb_dev)
+ wusb_dev_put(xfer->wusb_dev);
+ rpipe_put(xfer->ep->hcpriv);
+ wa_xfer_giveback(xfer);
+ d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
+ return;
+}
+
+/*
+ * If transfer is done, wrap it up and return true
+ *
+ * xfer->lock has to be locked
+ */
+static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
+{
+ unsigned result, cnt;
+ struct wa_seg *seg;
+ struct urb *urb = xfer->urb;
+ unsigned found_short = 0;
+
+ d_fnstart(3, NULL, "(xfer %p)\n", xfer);
+ result = xfer->segs_done == xfer->segs_submitted;
+ if (result == 0)
+ goto out;
+ urb->actual_length = 0;
+ for (cnt = 0; cnt < xfer->segs; cnt++) {
+ seg = xfer->seg[cnt];
+ switch (seg->status) {
+ case WA_SEG_DONE:
+ if (found_short && seg->result > 0) {
+ if (printk_ratelimit())
+ printk(KERN_ERR "xfer %p#%u: bad short "
+ "segments (%zu)\n", xfer, cnt,
+ seg->result);
+ urb->status = -EINVAL;
+ goto out;
+ }
+ urb->actual_length += seg->result;
+ if (seg->result < xfer->seg_size
+ && cnt != xfer->segs-1)
+ found_short = 1;
+ d_printf(2, NULL, "xfer %p#%u: DONE short %d "
+ "result %zu urb->actual_length %d\n",
+ xfer, seg->index, found_short, seg->result,
+ urb->actual_length);
+ break;
+ case WA_SEG_ERROR:
+ xfer->result = seg->result;
+ d_printf(2, NULL, "xfer %p#%u: ERROR result %zu\n",
+ xfer, seg->index, seg->result);
+ goto out;
+ case WA_SEG_ABORTED:
+ WARN_ON(urb->status != -ECONNRESET
+ && urb->status != -ENOENT);
+ d_printf(2, NULL, "xfer %p#%u ABORTED: result %d\n",
+ xfer, seg->index, urb->status);
+ xfer->result = urb->status;
+ goto out;
+ default:
+ /* if (printk_ratelimit()) */
+ printk(KERN_ERR "xfer %p#%u: "
+ "is_done bad state %d\n",
+ xfer, cnt, seg->status);
+ xfer->result = -EINVAL;
+ WARN_ON(1);
+ goto out;
+ }
+ }
+ xfer->result = 0;
+out:
+ d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
+ return result;
+}
+
+/*
+ * Initialize a transfer's ID
+ *
+ * We need to use a sequential number; if we use the pointer or the
+ * hash of the pointer, it can repeat over sequential transfers and
+ * then it will confuse the HWA....wonder why in hell they put a 32
+ * bit handle in there then.
+ */
+static void wa_xfer_id_init(struct wa_xfer *xfer)
+{
+ xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
+}
+
+/*
+ * Return the xfer's ID associated with xfer
+ *
+ * Need to generate a
+ */
+static u32 wa_xfer_id(struct wa_xfer *xfer)
+{
+ return xfer->id;
+}
+
+/*
+ * Search for a transfer list ID on the HCD's URB list
+ *
+ * For 32 bit architectures, we use the pointer itself; for 64 bits, a
+ * 32-bit hash of the pointer.
+ *
+ * @returns NULL if not found.
+ */
+static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
+{
+ unsigned long flags;
+ struct wa_xfer *xfer_itr;
+ spin_lock_irqsave(&wa->xfer_list_lock, flags);
+ list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
+ if (id == xfer_itr->id) {
+ wa_xfer_get(xfer_itr);
+ goto out;
+ }
+ }
+ xfer_itr = NULL;
+out:
+ spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
+ return xfer_itr;
+}
+
+struct wa_xfer_abort_buffer {
+ struct urb urb;
+ struct wa_xfer_abort cmd;
+};
+
+static void __wa_xfer_abort_cb(struct urb *urb)
+{
+ struct wa_xfer_abort_buffer *b = urb->context;
+ usb_put_urb(&b->urb);
+}
+
+/*
+ * Aborts an ongoing transaction
+ *
+ * Assumes the transfer is referenced and locked and in a submitted
+ * state (mainly that there is an endpoint/rpipe assigned).
+ *
+ * The callback (see above) does nothing but freeing up the data by
+ * putting the URB. Because the URB is allocated at the head of the
+ * struct, the whole space we allocated is kfreed.
+ *
+ * We'll get an 'aborted transaction' xfer result on DTI, that'll
+ * politely ignore because at this point the transaction has been
+ * marked as aborted already.
+ */
+static void __wa_xfer_abort(struct wa_xfer *xfer)
+{
+ int result;
+ struct device *dev = &xfer->wa->usb_iface->dev;
+ struct wa_xfer_abort_buffer *b;
+ struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+
+ b = kmalloc(sizeof(*b), GFP_ATOMIC);
+ if (b == NULL)
+ goto error_kmalloc;
+ b->cmd.bLength = sizeof(b->cmd);
+ b->cmd.bRequestType = WA_XFER_ABORT;
+ b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
+ b->cmd.dwTransferID = wa_xfer_id(xfer);
+
+ usb_init_urb(&b->urb);
+ usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
+ usb_sndbulkpipe(xfer->wa->usb_dev,
+ xfer->wa->dto_epd->bEndpointAddress),
+ &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
+ result = usb_submit_urb(&b->urb, GFP_ATOMIC);
+ if (result < 0)
+ goto error_submit;
+ return; /* callback frees! */
+
+
+error_submit:
+ if (printk_ratelimit())
+ dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
+ xfer, result);
+ kfree(b);
+error_kmalloc:
+ return;
+
+}
+
+/*
+ *
+ * @returns < 0 on error, transfer segment request size if ok
+ */
+static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
+ enum wa_xfer_type *pxfer_type)
+{
+ ssize_t result;
+ struct device *dev = &xfer->wa->usb_iface->dev;
+ size_t maxpktsize;
+ struct urb *urb = xfer->urb;
+ struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+
+ d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n",
+ xfer, rpipe, urb);
+ switch (rpipe->descr.bmAttribute & 0x3) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ *pxfer_type = WA_XFER_TYPE_CTL;
+ result = sizeof(struct wa_xfer_ctl);
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ case USB_ENDPOINT_XFER_BULK:
+ *pxfer_type = WA_XFER_TYPE_BI;
+ result = sizeof(struct wa_xfer_bi);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ dev_err(dev, "FIXME: ISOC not implemented\n");
+ result = -ENOSYS;
+ goto error;
+ default:
+ /* never happens */
+ BUG();
+ result = -EINVAL; /* shut gcc up */
+ };
+ xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
+ xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
+ xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
+ * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
+ /* Compute the segment size and make sure it is a multiple of
+ * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
+ * a check (FIXME) */
+ maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
+ if (xfer->seg_size < maxpktsize) {
+ dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
+ "%zu\n", xfer->seg_size, maxpktsize);
+ result = -EINVAL;
+ goto error;
+ }
+ xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
+ xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
+ / xfer->seg_size;
+ if (xfer->segs >= WA_SEGS_MAX) {
+ dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
+ (int)(urb->transfer_buffer_length / xfer->seg_size),
+ WA_SEGS_MAX);
+ result = -EINVAL;
+ goto error;
+ }
+ if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
+ xfer->segs = 1;
+error:
+ d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n",
+ xfer, rpipe, urb, (int)result);
+ return result;
+}
+
+/** Fill in the common request header and xfer-type specific data. */
+static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
+ struct wa_xfer_hdr *xfer_hdr0,
+ enum wa_xfer_type xfer_type,
+ size_t xfer_hdr_size)
+{
+ struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+
+ xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
+ xfer_hdr0->bLength = xfer_hdr_size;
+ xfer_hdr0->bRequestType = xfer_type;
+ xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
+ xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
+ xfer_hdr0->bTransferSegment = 0;
+ switch (xfer_type) {
+ case WA_XFER_TYPE_CTL: {
+ struct wa_xfer_ctl *xfer_ctl =
+ container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
+ xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
+ BUG_ON(xfer->urb->transfer_flags & URB_NO_SETUP_DMA_MAP
+ && xfer->urb->setup_packet == NULL);
+ memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
+ sizeof(xfer_ctl->baSetupData));
+ break;
+ }
+ case WA_XFER_TYPE_BI:
+ break;
+ case WA_XFER_TYPE_ISO:
+ printk(KERN_ERR "FIXME: ISOC not implemented\n");
+ default:
+ BUG();
+ };
+}
+
+/*
+ * Callback for the OUT data phase of the segment request
+ *
+ * Check wa_seg_cb(); most comments also apply here because this
+ * function does almost the same thing and they work closely
+ * together.
+ *
+ * If the seg request has failed but this DTO phase has suceeded,
+ * wa_seg_cb() has already failed the segment and moved the
+ * status to WA_SEG_ERROR, so this will go through 'case 0' and
+ * effectively do nothing.
+ */
+static void wa_seg_dto_cb(struct urb *urb)
+{
+ struct wa_seg *seg = urb->context;
+ struct wa_xfer *xfer = seg->xfer;
+ struct wahc *wa;
+ struct device *dev;
+ struct wa_rpipe *rpipe;
+ unsigned long flags;
+ unsigned rpipe_ready = 0;
+ u8 done = 0;
+
+ d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
+ switch (urb->status) {
+ case 0:
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+ d_printf(2, dev, "xfer %p#%u: data out done (%d bytes)\n",
+ xfer, seg->index, urb->actual_length);
+ if (seg->status < WA_SEG_PENDING)
+ seg->status = WA_SEG_PENDING;
+ seg->result = urb->actual_length;
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ break;
+ case -ECONNRESET: /* URB unlinked; no need to do anything */
+ case -ENOENT: /* as it was done by the who unlinked us */
+ break;
+ default: /* Other errors ... */
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+ rpipe = xfer->ep->hcpriv;
+ if (printk_ratelimit())
+ dev_err(dev, "xfer %p#%u: data out error %d\n",
+ xfer, seg->index, urb->status);
+ if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME)){
+ dev_err(dev, "DTO: URB max acceptable errors "
+ "exceeded, resetting device\n");
+ wa_reset_all(wa);
+ }
+ if (seg->status != WA_SEG_ERROR) {
+ seg->status = WA_SEG_ERROR;
+ seg->result = urb->status;
+ xfer->segs_done++;
+ __wa_xfer_abort(xfer);
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_is_done(xfer);
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ }
+ d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
+}
+
+/*
+ * Callback for the segment request
+ *
+ * If succesful transition state (unless already transitioned or
+ * outbound transfer); otherwise, take a note of the error, mark this
+ * segment done and try completion.
+ *
+ * Note we don't access until we are sure that the transfer hasn't
+ * been cancelled (ECONNRESET, ENOENT), which could mean that
+ * seg->xfer could be already gone.
+ *
+ * We have to check before setting the status to WA_SEG_PENDING
+ * because sometimes the xfer result callback arrives before this
+ * callback (geeeeeeze), so it might happen that we are already in
+ * another state. As well, we don't set it if the transfer is inbound,
+ * as in that case, wa_seg_dto_cb will do it when the OUT data phase
+ * finishes.
+ */
+static void wa_seg_cb(struct urb *urb)
+{
+ struct wa_seg *seg = urb->context;
+ struct wa_xfer *xfer = seg->xfer;
+ struct wahc *wa;
+ struct device *dev;
+ struct wa_rpipe *rpipe;
+ unsigned long flags;
+ unsigned rpipe_ready;
+ u8 done = 0;
+
+ d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
+ switch (urb->status) {
+ case 0:
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+ d_printf(2, dev, "xfer %p#%u: request done\n",
+ xfer, seg->index);
+ if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
+ seg->status = WA_SEG_PENDING;
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ break;
+ case -ECONNRESET: /* URB unlinked; no need to do anything */
+ case -ENOENT: /* as it was done by the who unlinked us */
+ break;
+ default: /* Other errors ... */
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+ rpipe = xfer->ep->hcpriv;
+ if (printk_ratelimit())
+ dev_err(dev, "xfer %p#%u: request error %d\n",
+ xfer, seg->index, urb->status);
+ if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME)){
+ dev_err(dev, "DTO: URB max acceptable errors "
+ "exceeded, resetting device\n");
+ wa_reset_all(wa);
+ }
+ usb_unlink_urb(seg->dto_urb);
+ seg->status = WA_SEG_ERROR;
+ seg->result = urb->status;
+ xfer->segs_done++;
+ __wa_xfer_abort(xfer);
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_is_done(xfer);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ }
+ d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
+}
+
+/*
+ * Allocate the segs array and initialize each of them
+ *
+ * The segments are freed by wa_xfer_destroy() when the xfer use count
+ * drops to zero; however, because each segment is given the same life
+ * cycle as the USB URB it contains, it is actually freed by
+ * usb_put_urb() on the contained USB URB (twisted, eh?).
+ */
+static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
+{
+ int result, cnt;
+ size_t alloc_size = sizeof(*xfer->seg[0])
+ - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
+ struct usb_device *usb_dev = xfer->wa->usb_dev;
+ const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
+ struct wa_seg *seg;
+ size_t buf_itr, buf_size, buf_itr_size;
+
+ result = -ENOMEM;
+ xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
+ if (xfer->seg == NULL)
+ goto error_segs_kzalloc;
+ buf_itr = 0;
+ buf_size = xfer->urb->transfer_buffer_length;
+ for (cnt = 0; cnt < xfer->segs; cnt++) {
+ seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
+ if (seg == NULL)
+ goto error_seg_kzalloc;
+ wa_seg_init(seg);
+ seg->xfer = xfer;
+ seg->index = cnt;
+ usb_fill_bulk_urb(&seg->urb, usb_dev,
+ usb_sndbulkpipe(usb_dev,
+ dto_epd->bEndpointAddress),
+ &seg->xfer_hdr, xfer_hdr_size,
+ wa_seg_cb, seg);
+ buf_itr_size = buf_size > xfer->seg_size ?
+ xfer->seg_size : buf_size;
+ if (xfer->is_inbound == 0 && buf_size > 0) {
+ seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (seg->dto_urb == NULL)
+ goto error_dto_alloc;
+ usb_fill_bulk_urb(
+ seg->dto_urb, usb_dev,
+ usb_sndbulkpipe(usb_dev,
+ dto_epd->bEndpointAddress),
+ NULL, 0, wa_seg_dto_cb, seg);
+ if (xfer->is_dma) {
+ seg->dto_urb->transfer_dma =
+ xfer->urb->transfer_dma + buf_itr;
+ seg->dto_urb->transfer_flags |=
+ URB_NO_TRANSFER_DMA_MAP;
+ } else
+ seg->dto_urb->transfer_buffer =
+ xfer->urb->transfer_buffer + buf_itr;
+ seg->dto_urb->transfer_buffer_length = buf_itr_size;
+ }
+ seg->status = WA_SEG_READY;
+ buf_itr += buf_itr_size;
+ buf_size -= buf_itr_size;
+ }
+ return 0;
+
+error_dto_alloc:
+ kfree(xfer->seg[cnt]);
+ cnt--;
+error_seg_kzalloc:
+ /* use the fact that cnt is left at were it failed */
+ for (; cnt > 0; cnt--) {
+ if (xfer->is_inbound == 0)
+ kfree(xfer->seg[cnt]->dto_urb);
+ kfree(xfer->seg[cnt]);
+ }
+error_segs_kzalloc:
+ return result;
+}
+
+/*
+ * Allocates all the stuff needed to submit a transfer
+ *
+ * Breaks the whole data buffer in a list of segments, each one has a
+ * structure allocated to it and linked in xfer->seg[index]
+ *
+ * FIXME: merge setup_segs() and the last part of this function, no
+ * need to do two for loops when we could run everything in a
+ * single one
+ */
+static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
+{
+ int result;
+ struct device *dev = &xfer->wa->usb_iface->dev;
+ enum wa_xfer_type xfer_type = 0; /* shut up GCC */
+ size_t xfer_hdr_size, cnt, transfer_size;
+ struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
+
+ d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n",
+ xfer, xfer->ep->hcpriv, urb);
+
+ result = __wa_xfer_setup_sizes(xfer, &xfer_type);
+ if (result < 0)
+ goto error_setup_sizes;
+ xfer_hdr_size = result;
+ result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
+ if (result < 0) {
+ dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
+ xfer, xfer->segs, result);
+ goto error_setup_segs;
+ }
+ /* Fill the first header */
+ xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
+ wa_xfer_id_init(xfer);
+ __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
+
+ /* Fill remainig headers */
+ xfer_hdr = xfer_hdr0;
+ transfer_size = urb->transfer_buffer_length;
+ xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
+ xfer->seg_size : transfer_size;
+ transfer_size -= xfer->seg_size;
+ for (cnt = 1; cnt < xfer->segs; cnt++) {
+ xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
+ memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
+ xfer_hdr->bTransferSegment = cnt;
+ xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
+ cpu_to_le32(xfer->seg_size)
+ : cpu_to_le32(transfer_size);
+ xfer->seg[cnt]->status = WA_SEG_READY;
+ transfer_size -= xfer->seg_size;
+ }
+ xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
+ result = 0;
+error_setup_segs:
+error_setup_sizes:
+ d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n",
+ xfer, xfer->ep->hcpriv, urb, result);
+ return result;
+}
+
+/*
+ *
+ *
+ * rpipe->seg_lock is held!
+ */
+static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
+ struct wa_seg *seg)
+{
+ int result;
+ result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
+ if (result < 0) {
+ printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
+ xfer, seg->index, result);
+ goto error_seg_submit;
+ }
+ if (seg->dto_urb) {
+ result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
+ if (result < 0) {
+ printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
+ xfer, seg->index, result);
+ goto error_dto_submit;
+ }
+ }
+ seg->status = WA_SEG_SUBMITTED;
+ rpipe_avail_dec(rpipe);
+ return 0;
+
+error_dto_submit:
+ usb_unlink_urb(&seg->urb);
+error_seg_submit:
+ seg->status = WA_SEG_ERROR;
+ seg->result = result;
+ return result;
+}
+
+/*
+ * Execute more queued request segments until the maximum concurrent allowed
+ *
+ * The ugly unlock/lock sequence on the error path is needed as the
+ * xfer->lock normally nests the seg_lock and not viceversa.
+ *
+ */
+static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
+{
+ int result;
+ struct device *dev = &rpipe->wa->usb_iface->dev;
+ struct wa_seg *seg;
+ struct wa_xfer *xfer;
+ unsigned long flags;
+
+ d_fnstart(1, dev, "(rpipe #%d) %d segments available\n",
+ le16_to_cpu(rpipe->descr.wRPipeIndex),
+ atomic_read(&rpipe->segs_available));
+ spin_lock_irqsave(&rpipe->seg_lock, flags);
+ while (atomic_read(&rpipe->segs_available) > 0
+ && !list_empty(&rpipe->seg_list)) {
+ seg = list_entry(rpipe->seg_list.next, struct wa_seg,
+ list_node);
+ list_del(&seg->list_node);
+ xfer = seg->xfer;
+ result = __wa_seg_submit(rpipe, xfer, seg);
+ d_printf(1, dev, "xfer %p#%u submitted from delayed "
+ "[%d segments available] %d\n",
+ xfer, seg->index,
+ atomic_read(&rpipe->segs_available), result);
+ if (unlikely(result < 0)) {
+ spin_unlock_irqrestore(&rpipe->seg_lock, flags);
+ spin_lock_irqsave(&xfer->lock, flags);
+ __wa_xfer_abort(xfer);
+ xfer->segs_done++;
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ spin_lock_irqsave(&rpipe->seg_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&rpipe->seg_lock, flags);
+ d_fnend(1, dev, "(rpipe #%d) = void, %d segments available\n",
+ le16_to_cpu(rpipe->descr.wRPipeIndex),
+ atomic_read(&rpipe->segs_available));
+
+}
+
+/*
+ *
+ * xfer->lock is taken
+ *
+ * On failure submitting we just stop submitting and return error;
+ * wa_urb_enqueue_b() will execute the completion path
+ */
+static int __wa_xfer_submit(struct wa_xfer *xfer)
+{
+ int result;
+ struct wahc *wa = xfer->wa;
+ struct device *dev = &wa->usb_iface->dev;
+ unsigned cnt;
+ struct wa_seg *seg;
+ unsigned long flags;
+ struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+ size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
+ u8 available;
+ u8 empty;
+
+ d_fnstart(3, dev, "(xfer %p [rpipe %p])\n",
+ xfer, xfer->ep->hcpriv);
+
+ spin_lock_irqsave(&wa->xfer_list_lock, flags);
+ list_add_tail(&xfer->list_node, &wa->xfer_list);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
+
+ BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
+ result = 0;
+ spin_lock_irqsave(&rpipe->seg_lock, flags);
+ for (cnt = 0; cnt < xfer->segs; cnt++) {
+ available = atomic_read(&rpipe->segs_available);
+ empty = list_empty(&rpipe->seg_list);
+ seg = xfer->seg[cnt];
+ d_printf(2, dev, "xfer %p#%u: available %u empty %u (%s)\n",
+ xfer, cnt, available, empty,
+ available == 0 || !empty ? "delayed" : "submitted");
+ if (available == 0 || !empty) {
+ d_printf(1, dev, "xfer %p#%u: delayed\n", xfer, cnt);
+ seg->status = WA_SEG_DELAYED;
+ list_add_tail(&seg->list_node, &rpipe->seg_list);
+ } else {
+ result = __wa_seg_submit(rpipe, xfer, seg);
+ if (result < 0)
+ goto error_seg_submit;
+ }
+ xfer->segs_submitted++;
+ }
+ spin_unlock_irqrestore(&rpipe->seg_lock, flags);
+ d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer,
+ xfer->ep->hcpriv);
+ return result;
+
+error_seg_submit:
+ __wa_xfer_abort(xfer);
+ spin_unlock_irqrestore(&rpipe->seg_lock, flags);
+ d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer,
+ xfer->ep->hcpriv);
+ return result;
+}
+
+/*
+ * Second part of a URB/transfer enqueuement
+ *
+ * Assumes this comes from wa_urb_enqueue() [maybe through
+ * wa_urb_enqueue_run()]. At this point:
+ *
+ * xfer->wa filled and refcounted
+ * xfer->ep filled with rpipe refcounted if
+ * delayed == 0
+ * xfer->urb filled and refcounted (this is the case when called
+ * from wa_urb_enqueue() as we come from usb_submit_urb()
+ * and when called by wa_urb_enqueue_run(), as we took an
+ * extra ref dropped by _run() after we return).
+ * xfer->gfp filled
+ *
+ * If we fail at __wa_xfer_submit(), then we just check if we are done
+ * and if so, we run the completion procedure. However, if we are not
+ * yet done, we do nothing and wait for the completion handlers from
+ * the submitted URBs or from the xfer-result path to kick in. If xfer
+ * result never kicks in, the xfer will timeout from the USB code and
+ * dequeue() will be called.
+ */
+static void wa_urb_enqueue_b(struct wa_xfer *xfer)
+{
+ int result;
+ unsigned long flags;
+ struct urb *urb = xfer->urb;
+ struct wahc *wa = xfer->wa;
+ struct wusbhc *wusbhc = wa->wusb;
+ struct device *dev = &wa->usb_iface->dev;
+ struct wusb_dev *wusb_dev;
+ unsigned done;
+
+ d_fnstart(3, dev, "(wa %p urb %p)\n", wa, urb);
+ result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
+ if (result < 0)
+ goto error_rpipe_get;
+ result = -ENODEV;
+ /* FIXME: segmentation broken -- kills DWA */
+ mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
+ if (urb->dev == NULL)
+ goto error_dev_gone;
+ wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
+ if (wusb_dev == NULL) {
+ mutex_unlock(&wusbhc->mutex);
+ goto error_dev_gone;
+ }
+ mutex_unlock(&wusbhc->mutex);
+
+ spin_lock_irqsave(&xfer->lock, flags);
+ xfer->wusb_dev = wusb_dev;
+ result = urb->status;
+ if (urb->status != -EINPROGRESS)
+ goto error_dequeued;
+
+ result = __wa_xfer_setup(xfer, urb);
+ if (result < 0)
+ goto error_xfer_setup;
+ result = __wa_xfer_submit(xfer);
+ if (result < 0)
+ goto error_xfer_submit;
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ d_fnend(3, dev, "(wa %p urb %p) = void\n", wa, urb);
+ return;
+
+ /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
+ * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
+ * upundo setup().
+ */
+error_xfer_setup:
+error_dequeued:
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ /* FIXME: segmentation broken, kills DWA */
+ if (wusb_dev)
+ wusb_dev_put(wusb_dev);
+error_dev_gone:
+ rpipe_put(xfer->ep->hcpriv);
+error_rpipe_get:
+ xfer->result = result;
+ wa_xfer_giveback(xfer);
+ d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result);
+ return;
+
+error_xfer_submit:
+ done = __wa_xfer_is_done(xfer);
+ xfer->result = result;
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result);
+ return;
+}
+
+/*
+ * Execute the delayed transfers in the Wire Adapter @wa
+ *
+ * We need to be careful here, as dequeue() could be called in the
+ * middle. That's why we do the whole thing under the
+ * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
+ * and then checks the list -- so as we would be acquiring in inverse
+ * order, we just drop the lock once we have the xfer and reacquire it
+ * later.
+ */
+void wa_urb_enqueue_run(struct work_struct *ws)
+{
+ struct wahc *wa = container_of(ws, struct wahc, xfer_work);
+ struct device *dev = &wa->usb_iface->dev;
+ struct wa_xfer *xfer, *next;
+ struct urb *urb;
+
+ d_fnstart(3, dev, "(wa %p)\n", wa);
+ spin_lock_irq(&wa->xfer_list_lock);
+ list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
+ list_node) {
+ list_del_init(&xfer->list_node);
+ spin_unlock_irq(&wa->xfer_list_lock);
+
+ urb = xfer->urb;
+ wa_urb_enqueue_b(xfer);
+ usb_put_urb(urb); /* taken when queuing */
+
+ spin_lock_irq(&wa->xfer_list_lock);
+ }
+ spin_unlock_irq(&wa->xfer_list_lock);
+ d_fnend(3, dev, "(wa %p) = void\n", wa);
+}
+EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
+
+/*
+ * Submit a transfer to the Wire Adapter in a delayed way
+ *
+ * The process of enqueuing involves possible sleeps() [see
+ * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
+ * in an atomic section, we defer the enqueue_b() call--else we call direct.
+ *
+ * @urb: We own a reference to it done by the HCI Linux USB stack that
+ * will be given up by calling usb_hcd_giveback_urb() or by
+ * returning error from this function -> ergo we don't have to
+ * refcount it.
+ */
+int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
+ struct urb *urb, gfp_t gfp)
+{
+ int result;
+ struct device *dev = &wa->usb_iface->dev;
+ struct wa_xfer *xfer;
+ unsigned long my_flags;
+ unsigned cant_sleep = irqs_disabled() | in_atomic();
+
+ d_fnstart(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x)\n",
+ wa, ep, urb, urb->transfer_buffer_length, gfp);
+
+ if (urb->transfer_buffer == NULL
+ && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
+ && urb->transfer_buffer_length != 0) {
+ dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
+ dump_stack();
+ }
+
+ result = -ENOMEM;
+ xfer = kzalloc(sizeof(*xfer), gfp);
+ if (xfer == NULL)
+ goto error_kmalloc;
+
+ result = -ENOENT;
+ if (urb->status != -EINPROGRESS) /* cancelled */
+ goto error_dequeued; /* before starting? */
+ wa_xfer_init(xfer);
+ xfer->wa = wa_get(wa);
+ xfer->urb = urb;
+ xfer->gfp = gfp;
+ xfer->ep = ep;
+ urb->hcpriv = xfer;
+ d_printf(2, dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
+ xfer, urb, urb->pipe, urb->transfer_buffer_length,
+ urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
+ urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
+ cant_sleep ? "deferred" : "inline");
+ if (cant_sleep) {
+ usb_get_urb(urb);
+ spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
+ list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
+ queue_work(wusbd, &wa->xfer_work);
+ } else {
+ wa_urb_enqueue_b(xfer);
+ }
+ d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = 0\n",
+ wa, ep, urb, urb->transfer_buffer_length, gfp);
+ return 0;
+
+error_dequeued:
+ kfree(xfer);
+error_kmalloc:
+ d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = %d\n",
+ wa, ep, urb, urb->transfer_buffer_length, gfp, result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wa_urb_enqueue);
+
+/*
+ * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
+ * handler] is called.
+ *
+ * Until a transfer goes successfully through wa_urb_enqueue() it
+ * needs to be dequeued with completion calling; when stuck in delayed
+ * or before wa_xfer_setup() is called, we need to do completion.
+ *
+ * not setup If there is no hcpriv yet, that means that that enqueue
+ * still had no time to set the xfer up. Because
+ * urb->status should be other than -EINPROGRESS,
+ * enqueue() will catch that and bail out.
+ *
+ * If the transfer has gone through setup, we just need to clean it
+ * up. If it has gone through submit(), we have to abort it [with an
+ * asynch request] and then make sure we cancel each segment.
+ *
+ */
+int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
+{
+ struct device *dev = &wa->usb_iface->dev;
+ unsigned long flags, flags2;
+ struct wa_xfer *xfer;
+ struct wa_seg *seg;
+ struct wa_rpipe *rpipe;
+ unsigned cnt;
+ unsigned rpipe_ready = 0;
+
+ d_fnstart(3, dev, "(wa %p, urb %p)\n", wa, urb);
+
+ d_printf(1, dev, "xfer %p urb %p: aborting\n", urb->hcpriv, urb);
+ xfer = urb->hcpriv;
+ if (xfer == NULL) {
+ /* NOthing setup yet enqueue will see urb->status !=
+ * -EINPROGRESS (by hcd layer) and bail out with
+ * error, no need to do completion
+ */
+ BUG_ON(urb->status == -EINPROGRESS);
+ goto out;
+ }
+ spin_lock_irqsave(&xfer->lock, flags);
+ rpipe = xfer->ep->hcpriv;
+ /* Check the delayed list -> if there, release and complete */
+ spin_lock_irqsave(&wa->xfer_list_lock, flags2);
+ if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
+ goto dequeue_delayed;
+ spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
+ if (xfer->seg == NULL) /* still hasn't reached */
+ goto out_unlock; /* setup(), enqueue_b() completes */
+ /* Ok, the xfer is in flight already, it's been setup and submitted.*/
+ __wa_xfer_abort(xfer);
+ for (cnt = 0; cnt < xfer->segs; cnt++) {
+ seg = xfer->seg[cnt];
+ switch (seg->status) {
+ case WA_SEG_NOTREADY:
+ case WA_SEG_READY:
+ printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
+ xfer, cnt, seg->status);
+ WARN_ON(1);
+ break;
+ case WA_SEG_DELAYED:
+ seg->status = WA_SEG_ABORTED;
+ spin_lock_irqsave(&rpipe->seg_lock, flags2);
+ list_del(&seg->list_node);
+ xfer->segs_done++;
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
+ break;
+ case WA_SEG_SUBMITTED:
+ seg->status = WA_SEG_ABORTED;
+ usb_unlink_urb(&seg->urb);
+ if (xfer->is_inbound == 0)
+ usb_unlink_urb(seg->dto_urb);
+ xfer->segs_done++;
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ break;
+ case WA_SEG_PENDING:
+ seg->status = WA_SEG_ABORTED;
+ xfer->segs_done++;
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ break;
+ case WA_SEG_DTI_PENDING:
+ usb_unlink_urb(wa->dti_urb);
+ seg->status = WA_SEG_ABORTED;
+ xfer->segs_done++;
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ break;
+ case WA_SEG_DONE:
+ case WA_SEG_ERROR:
+ case WA_SEG_ABORTED:
+ break;
+ }
+ }
+ xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
+ __wa_xfer_is_done(xfer);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
+ return 0;
+
+out_unlock:
+ spin_unlock_irqrestore(&xfer->lock, flags);
+out:
+ d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
+ return 0;
+
+dequeue_delayed:
+ list_del_init(&xfer->list_node);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
+ xfer->result = urb->status;
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ wa_xfer_giveback(xfer);
+ usb_put_urb(urb); /* we got a ref in enqueue() */
+ d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wa_urb_dequeue);
+
+/*
+ * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
+ * codes
+ *
+ * Positive errno values are internal inconsistencies and should be
+ * flagged louder. Negative are to be passed up to the user in the
+ * normal way.
+ *
+ * @status: USB WA status code -- high two bits are stripped.
+ */
+static int wa_xfer_status_to_errno(u8 status)
+{
+ int errno;
+ u8 real_status = status;
+ static int xlat[] = {
+ [WA_XFER_STATUS_SUCCESS] = 0,
+ [WA_XFER_STATUS_HALTED] = -EPIPE,
+ [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
+ [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
+ [WA_XFER_RESERVED] = EINVAL,
+ [WA_XFER_STATUS_NOT_FOUND] = 0,
+ [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
+ [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
+ [WA_XFER_STATUS_ABORTED] = -EINTR,
+ [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
+ [WA_XFER_INVALID_FORMAT] = EINVAL,
+ [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
+ [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
+ };
+ status &= 0x3f;
+
+ if (status == 0)
+ return 0;
+ if (status >= ARRAY_SIZE(xlat)) {
+ if (printk_ratelimit())
+ printk(KERN_ERR "%s(): BUG? "
+ "Unknown WA transfer status 0x%02x\n",
+ __func__, real_status);
+ return -EINVAL;
+ }
+ errno = xlat[status];
+ if (unlikely(errno > 0)) {
+ if (printk_ratelimit())
+ printk(KERN_ERR "%s(): BUG? "
+ "Inconsistent WA status: 0x%02x\n",
+ __func__, real_status);
+ errno = -errno;
+ }
+ return errno;
+}
+
+/*
+ * Process a xfer result completion message
+ *
+ * inbound transfers: need to schedule a DTI read
+ *
+ * FIXME: this functio needs to be broken up in parts
+ */
+static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
+{
+ int result;
+ struct device *dev = &wa->usb_iface->dev;
+ unsigned long flags;
+ u8 seg_idx;
+ struct wa_seg *seg;
+ struct wa_rpipe *rpipe;
+ struct wa_xfer_result *xfer_result = wa->xfer_result;
+ u8 done = 0;
+ u8 usb_status;
+ unsigned rpipe_ready = 0;
+
+ d_fnstart(3, dev, "(wa %p xfer %p)\n", wa, xfer);
+ spin_lock_irqsave(&xfer->lock, flags);
+ seg_idx = xfer_result->bTransferSegment & 0x7f;
+ if (unlikely(seg_idx >= xfer->segs))
+ goto error_bad_seg;
+ seg = xfer->seg[seg_idx];
+ rpipe = xfer->ep->hcpriv;
+ usb_status = xfer_result->bTransferStatus;
+ d_printf(2, dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
+ xfer, seg_idx, usb_status, seg->status);
+ if (seg->status == WA_SEG_ABORTED
+ || seg->status == WA_SEG_ERROR) /* already handled */
+ goto segment_aborted;
+ if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
+ seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
+ if (seg->status != WA_SEG_PENDING) {
+ if (printk_ratelimit())
+ dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
+ xfer, seg_idx, seg->status);
+ seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
+ }
+ if (usb_status & 0x80) {
+ seg->result = wa_xfer_status_to_errno(usb_status);
+ dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
+ xfer, seg->index, usb_status);
+ goto error_complete;
+ }
+ /* FIXME: we ignore warnings, tally them for stats */
+ if (usb_status & 0x40) /* Warning?... */
+ usb_status = 0; /* ... pass */
+ if (xfer->is_inbound) { /* IN data phase: read to buffer */
+ seg->status = WA_SEG_DTI_PENDING;
+ BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
+ if (xfer->is_dma) {
+ wa->buf_in_urb->transfer_dma =
+ xfer->urb->transfer_dma
+ + seg_idx * xfer->seg_size;
+ wa->buf_in_urb->transfer_flags
+ |= URB_NO_TRANSFER_DMA_MAP;
+ } else {
+ wa->buf_in_urb->transfer_buffer =
+ xfer->urb->transfer_buffer
+ + seg_idx * xfer->seg_size;
+ wa->buf_in_urb->transfer_flags
+ &= ~URB_NO_TRANSFER_DMA_MAP;
+ }
+ wa->buf_in_urb->transfer_buffer_length =
+ le32_to_cpu(xfer_result->dwTransferLength);
+ wa->buf_in_urb->context = seg;
+ result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
+ if (result < 0)
+ goto error_submit_buf_in;
+ } else {
+ /* OUT data phase, complete it -- */
+ seg->status = WA_SEG_DONE;
+ seg->result = le32_to_cpu(xfer_result->dwTransferLength);
+ xfer->segs_done++;
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_is_done(xfer);
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ d_fnend(3, dev, "(wa %p xfer %p) = void\n", wa, xfer);
+ return;
+
+
+error_submit_buf_in:
+ if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "DTI: URB max acceptable errors "
+ "exceeded, resetting device\n");
+ wa_reset_all(wa);
+ }
+ if (printk_ratelimit())
+ dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
+ xfer, seg_idx, result);
+ seg->result = result;
+error_complete:
+ seg->status = WA_SEG_ERROR;
+ xfer->segs_done++;
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ __wa_xfer_abort(xfer);
+ done = __wa_xfer_is_done(xfer);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ d_fnend(3, dev, "(wa %p xfer %p) = void [segment/DTI-submit error]\n",
+ wa, xfer);
+ return;
+
+
+error_bad_seg:
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ wa_urb_dequeue(wa, xfer->urb);
+ if (printk_ratelimit())
+ dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
+ if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "DTI: URB max acceptable errors "
+ "exceeded, resetting device\n");
+ wa_reset_all(wa);
+ }
+ d_fnend(3, dev, "(wa %p xfer %p) = void [bad seg]\n", wa, xfer);
+ return;
+
+
+segment_aborted:
+ /* nothing to do, as the aborter did the completion */
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ d_fnend(3, dev, "(wa %p xfer %p) = void [segment aborted]\n",
+ wa, xfer);
+ return;
+
+}
+
+/*
+ * Callback for the IN data phase
+ *
+ * If succesful transition state; otherwise, take a note of the
+ * error, mark this segment done and try completion.
+ *
+ * Note we don't access until we are sure that the transfer hasn't
+ * been cancelled (ECONNRESET, ENOENT), which could mean that
+ * seg->xfer could be already gone.
+ */
+static void wa_buf_in_cb(struct urb *urb)
+{
+ struct wa_seg *seg = urb->context;
+ struct wa_xfer *xfer = seg->xfer;
+ struct wahc *wa;
+ struct device *dev;
+ struct wa_rpipe *rpipe;
+ unsigned rpipe_ready;
+ unsigned long flags;
+ u8 done = 0;
+
+ d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
+ switch (urb->status) {
+ case 0:
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+ rpipe = xfer->ep->hcpriv;
+ d_printf(2, dev, "xfer %p#%u: data in done (%zu bytes)\n",
+ xfer, seg->index, (size_t)urb->actual_length);
+ seg->status = WA_SEG_DONE;
+ seg->result = urb->actual_length;
+ xfer->segs_done++;
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_is_done(xfer);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ break;
+ case -ECONNRESET: /* URB unlinked; no need to do anything */
+ case -ENOENT: /* as it was done by the who unlinked us */
+ break;
+ default: /* Other errors ... */
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+ rpipe = xfer->ep->hcpriv;
+ if (printk_ratelimit())
+ dev_err(dev, "xfer %p#%u: data in error %d\n",
+ xfer, seg->index, urb->status);
+ if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME)){
+ dev_err(dev, "DTO: URB max acceptable errors "
+ "exceeded, resetting device\n");
+ wa_reset_all(wa);
+ }
+ seg->status = WA_SEG_ERROR;
+ seg->result = urb->status;
+ xfer->segs_done++;
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ __wa_xfer_abort(xfer);
+ done = __wa_xfer_is_done(xfer);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ }
+ d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
+}
+
+/*
+ * Handle an incoming transfer result buffer
+ *
+ * Given a transfer result buffer, it completes the transfer (possibly
+ * scheduling and buffer in read) and then resubmits the DTI URB for a
+ * new transfer result read.
+ *
+ *
+ * The xfer_result DTI URB state machine
+ *
+ * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
+ *
+ * We start in OFF mode, the first xfer_result notification [through
+ * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
+ * read.
+ *
+ * We receive a buffer -- if it is not a xfer_result, we complain and
+ * repost the DTI-URB. If it is a xfer_result then do the xfer seg
+ * request accounting. If it is an IN segment, we move to RBI and post
+ * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
+ * repost the DTI-URB and move to RXR state. if there was no IN
+ * segment, it will repost the DTI-URB.
+ *
+ * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
+ * errors) in the URBs.
+ */
+static void wa_xfer_result_cb(struct urb *urb)
+{
+ int result;
+ struct wahc *wa = urb->context;
+ struct device *dev = &wa->usb_iface->dev;
+ struct wa_xfer_result *xfer_result;
+ u32 xfer_id;
+ struct wa_xfer *xfer;
+ u8 usb_status;
+
+ d_fnstart(3, dev, "(%p)\n", wa);
+ BUG_ON(wa->dti_urb != urb);
+ switch (wa->dti_urb->status) {
+ case 0:
+ /* We have a xfer result buffer; check it */
+ d_printf(2, dev, "DTI: xfer result %d bytes at %p\n",
+ urb->actual_length, urb->transfer_buffer);
+ d_dump(3, dev, urb->transfer_buffer, urb->actual_length);
+ if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
+ dev_err(dev, "DTI Error: xfer result--bad size "
+ "xfer result (%d bytes vs %zu needed)\n",
+ urb->actual_length, sizeof(*xfer_result));
+ break;
+ }
+ xfer_result = wa->xfer_result;
+ if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
+ dev_err(dev, "DTI Error: xfer result--"
+ "bad header length %u\n",
+ xfer_result->hdr.bLength);
+ break;
+ }
+ if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
+ dev_err(dev, "DTI Error: xfer result--"
+ "bad header type 0x%02x\n",
+ xfer_result->hdr.bNotifyType);
+ break;
+ }
+ usb_status = xfer_result->bTransferStatus & 0x3f;
+ if (usb_status == WA_XFER_STATUS_ABORTED
+ || usb_status == WA_XFER_STATUS_NOT_FOUND)
+ /* taken care of already */
+ break;
+ xfer_id = xfer_result->dwTransferID;
+ xfer = wa_xfer_get_by_id(wa, xfer_id);
+ if (xfer == NULL) {
+ /* FIXME: transaction might have been cancelled */
+ dev_err(dev, "DTI Error: xfer result--"
+ "unknown xfer 0x%08x (status 0x%02x)\n",
+ xfer_id, usb_status);
+ break;
+ }
+ wa_xfer_result_chew(wa, xfer);
+ wa_xfer_put(xfer);
+ break;
+ case -ENOENT: /* (we killed the URB)...so, no broadcast */
+ case -ESHUTDOWN: /* going away! */
+ dev_dbg(dev, "DTI: going down! %d\n", urb->status);
+ goto out;
+ default:
+ /* Unknown error */
+ if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "DTI: URB max acceptable errors "
+ "exceeded, resetting device\n");
+ wa_reset_all(wa);
+ goto out;
+ }
+ if (printk_ratelimit())
+ dev_err(dev, "DTI: URB error %d\n", urb->status);
+ break;
+ }
+ /* Resubmit the DTI URB */
+ result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
+ "resetting\n", result);
+ wa_reset_all(wa);
+ }
+out:
+ d_fnend(3, dev, "(%p) = void\n", wa);
+ return;
+}
+
+/*
+ * Transfer complete notification
+ *
+ * Called from the notif.c code. We get a notification on EP2 saying
+ * that some endpoint has some transfer result data available. We are
+ * about to read it.
+ *
+ * To speed up things, we always have a URB reading the DTI URB; we
+ * don't really set it up and start it until the first xfer complete
+ * notification arrives, which is what we do here.
+ *
+ * Follow up in wa_xfer_result_cb(), as that's where the whole state
+ * machine starts.
+ *
+ * So here we just initialize the DTI URB for reading transfer result
+ * notifications and also the buffer-in URB, for reading buffers. Then
+ * we just submit the DTI URB.
+ *
+ * @wa shall be referenced
+ */
+void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
+{
+ int result;
+ struct device *dev = &wa->usb_iface->dev;
+ struct wa_notif_xfer *notif_xfer;
+ const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
+
+ d_fnstart(4, dev, "(%p, %p)\n", wa, notif_hdr);
+ notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
+ BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
+
+ if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
+ /* FIXME: hardcoded limitation, adapt */
+ dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
+ notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
+ goto error;
+ }
+ if (wa->dti_urb != NULL) /* DTI URB already started */
+ goto out;
+
+ wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (wa->dti_urb == NULL) {
+ dev_err(dev, "Can't allocate DTI URB\n");
+ goto error_dti_urb_alloc;
+ }
+ usb_fill_bulk_urb(
+ wa->dti_urb, wa->usb_dev,
+ usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
+ wa->xfer_result, wa->xfer_result_size,
+ wa_xfer_result_cb, wa);
+
+ wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (wa->buf_in_urb == NULL) {
+ dev_err(dev, "Can't allocate BUF-IN URB\n");
+ goto error_buf_in_urb_alloc;
+ }
+ usb_fill_bulk_urb(
+ wa->buf_in_urb, wa->usb_dev,
+ usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
+ NULL, 0, wa_buf_in_cb, wa);
+ result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
+ "resetting\n", result);
+ goto error_dti_urb_submit;
+ }
+out:
+ d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr);
+ return;
+
+error_dti_urb_submit:
+ usb_put_urb(wa->buf_in_urb);
+error_buf_in_urb_alloc:
+ usb_put_urb(wa->dti_urb);
+ wa->dti_urb = NULL;
+error_dti_urb_alloc:
+error:
+ wa_reset_all(wa);
+ d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr);
+ return;
+}
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
new file mode 100644
index 00000000000..07c63a31c79
--- /dev/null
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -0,0 +1,418 @@
+/*
+ * Wireless USB Host Controller
+ * sysfs glue, wusbcore module support and life cycle management
+ *
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Creation/destruction of wusbhc is split in two parts; that that
+ * doesn't require the HCD to be added (wusbhc_{create,destroy}) and
+ * the one that requires (phase B, wusbhc_b_{create,destroy}).
+ *
+ * This is so because usb_add_hcd() will start the HC, and thus, all
+ * the HC specific stuff has to be already initialiazed (like sysfs
+ * thingies).
+ */
+#include <linux/device.h>
+#include <linux/module.h>
+#include "wusbhc.h"
+
+/**
+ * Extract the wusbhc that corresponds to a USB Host Controller class device
+ *
+ * WARNING! Apply only if @dev is that of a
+ * wusbhc.usb_hcd.self->class_dev; otherwise, you loose.
+ */
+static struct wusbhc *usbhc_dev_to_wusbhc(struct device *dev)
+{
+ struct usb_bus *usb_bus = dev_get_drvdata(dev);
+ struct usb_hcd *usb_hcd = bus_to_hcd(usb_bus);
+ return usb_hcd_to_wusbhc(usb_hcd);
+}
+
+/*
+ * Show & store the current WUSB trust timeout
+ *
+ * We don't do locking--it is an 'atomic' value.
+ *
+ * The units that we store/show are always MILLISECONDS. However, the
+ * value of trust_timeout is jiffies.
+ */
+static ssize_t wusb_trust_timeout_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", wusbhc->trust_timeout);
+}
+
+static ssize_t wusb_trust_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+ ssize_t result = -ENOSYS;
+ unsigned trust_timeout;
+
+ result = sscanf(buf, "%u", &trust_timeout);
+ if (result != 1) {
+ result = -EINVAL;
+ goto out;
+ }
+ /* FIXME: maybe we should check for range validity? */
+ wusbhc->trust_timeout = trust_timeout;
+ cancel_delayed_work(&wusbhc->keep_alive_timer);
+ flush_workqueue(wusbd);
+ queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
+ (trust_timeout * CONFIG_HZ)/1000/2);
+out:
+ return result < 0 ? result : size;
+}
+static DEVICE_ATTR(wusb_trust_timeout, 0644, wusb_trust_timeout_show,
+ wusb_trust_timeout_store);
+
+/*
+ * Show & store the current WUSB CHID
+ */
+static ssize_t wusb_chid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+ ssize_t result = 0;
+
+ if (wusbhc->wuie_host_info != NULL)
+ result += ckhdid_printf(buf, PAGE_SIZE,
+ &wusbhc->wuie_host_info->CHID);
+ return result;
+}
+
+/*
+ * Store a new CHID
+ *
+ * This will (FIXME) trigger many changes.
+ *
+ * - Send an all zeros CHID and it will stop the controller
+ * - Send a non-zero CHID and it will start it
+ * (unless it was started, it will just change the CHID,
+ * diconnecting all devices first).
+ *
+ * So first we scan the MMC we are sent and then we act on it. We
+ * read it in the same format as we print it, an ASCII string of 16
+ * hex bytes.
+ *
+ * See wusbhc_chid_set() for more info.
+ */
+static ssize_t wusb_chid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+ struct wusb_ckhdid chid;
+ ssize_t result;
+
+ result = sscanf(buf,
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx\n",
+ &chid.data[0] , &chid.data[1] ,
+ &chid.data[2] , &chid.data[3] ,
+ &chid.data[4] , &chid.data[5] ,
+ &chid.data[6] , &chid.data[7] ,
+ &chid.data[8] , &chid.data[9] ,
+ &chid.data[10], &chid.data[11],
+ &chid.data[12], &chid.data[13],
+ &chid.data[14], &chid.data[15]);
+ if (result != 16) {
+ dev_err(dev, "Unrecognized CHID (need 16 8-bit hex digits): "
+ "%d\n", (int)result);
+ return -EINVAL;
+ }
+ result = wusbhc_chid_set(wusbhc, &chid);
+ return result < 0 ? result : size;
+}
+static DEVICE_ATTR(wusb_chid, 0644, wusb_chid_show, wusb_chid_store);
+
+/* Group all the WUSBHC attributes */
+static struct attribute *wusbhc_attrs[] = {
+ &dev_attr_wusb_trust_timeout.attr,
+ &dev_attr_wusb_chid.attr,
+ NULL,
+};
+
+static struct attribute_group wusbhc_attr_group = {
+ .name = NULL, /* we want them in the same directory */
+ .attrs = wusbhc_attrs,
+};
+
+/*
+ * Create a wusbhc instance
+ *
+ * NOTEs:
+ *
+ * - assumes *wusbhc has been zeroed and wusbhc->usb_hcd has been
+ * initialized but not added.
+ *
+ * - fill out ports_max, mmcies_max and mmcie_{add,rm} before calling.
+ *
+ * - fill out wusbhc->uwb_rc and refcount it before calling
+ * - fill out the wusbhc->sec_modes array
+ */
+int wusbhc_create(struct wusbhc *wusbhc)
+{
+ int result = 0;
+
+ wusbhc->trust_timeout = WUSB_TRUST_TIMEOUT_MS;
+ mutex_init(&wusbhc->mutex);
+ result = wusbhc_mmcie_create(wusbhc);
+ if (result < 0)
+ goto error_mmcie_create;
+ result = wusbhc_devconnect_create(wusbhc);
+ if (result < 0)
+ goto error_devconnect_create;
+ result = wusbhc_rh_create(wusbhc);
+ if (result < 0)
+ goto error_rh_create;
+ result = wusbhc_sec_create(wusbhc);
+ if (result < 0)
+ goto error_sec_create;
+ return 0;
+
+error_sec_create:
+ wusbhc_rh_destroy(wusbhc);
+error_rh_create:
+ wusbhc_devconnect_destroy(wusbhc);
+error_devconnect_create:
+ wusbhc_mmcie_destroy(wusbhc);
+error_mmcie_create:
+ return result;
+}
+EXPORT_SYMBOL_GPL(wusbhc_create);
+
+static inline struct kobject *wusbhc_kobj(struct wusbhc *wusbhc)
+{
+ return &wusbhc->usb_hcd.self.controller->kobj;
+}
+
+/*
+ * Phase B of a wusbhc instance creation
+ *
+ * Creates fields that depend on wusbhc->usb_hcd having been
+ * added. This is where we create the sysfs files in
+ * /sys/class/usb_host/usb_hostX/.
+ *
+ * NOTE: Assumes wusbhc->usb_hcd has been already added by the upper
+ * layer (hwahc or whci)
+ */
+int wusbhc_b_create(struct wusbhc *wusbhc)
+{
+ int result = 0;
+ struct device *dev = wusbhc->usb_hcd.self.controller;
+
+ result = sysfs_create_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
+ if (result < 0) {
+ dev_err(dev, "Cannot register WUSBHC attributes: %d\n", result);
+ goto error_create_attr_group;
+ }
+
+ result = wusbhc_pal_register(wusbhc);
+ if (result < 0)
+ goto error_pal_register;
+ return 0;
+
+error_pal_register:
+ sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
+error_create_attr_group:
+ return result;
+}
+EXPORT_SYMBOL_GPL(wusbhc_b_create);
+
+void wusbhc_b_destroy(struct wusbhc *wusbhc)
+{
+ wusbhc_pal_unregister(wusbhc);
+ sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
+}
+EXPORT_SYMBOL_GPL(wusbhc_b_destroy);
+
+void wusbhc_destroy(struct wusbhc *wusbhc)
+{
+ wusbhc_sec_destroy(wusbhc);
+ wusbhc_rh_destroy(wusbhc);
+ wusbhc_devconnect_destroy(wusbhc);
+ wusbhc_mmcie_destroy(wusbhc);
+}
+EXPORT_SYMBOL_GPL(wusbhc_destroy);
+
+struct workqueue_struct *wusbd;
+EXPORT_SYMBOL_GPL(wusbd);
+
+/*
+ * WUSB Cluster ID allocation map
+ *
+ * Each WUSB bus in a channel is identified with a Cluster Id in the
+ * unauth address pace (WUSB1.0[4.3]). We take the range 0xe0 to 0xff
+ * (that's space for 31 WUSB controllers, as 0xff can't be taken). We
+ * start taking from 0xff, 0xfe, 0xfd... (hence the += or -= 0xff).
+ *
+ * For each one we taken, we pin it in the bitap
+ */
+#define CLUSTER_IDS 32
+static DECLARE_BITMAP(wusb_cluster_id_table, CLUSTER_IDS);
+static DEFINE_SPINLOCK(wusb_cluster_ids_lock);
+
+/*
+ * Get a WUSB Cluster ID
+ *
+ * Need to release with wusb_cluster_id_put() when done w/ it.
+ */
+/* FIXME: coordinate with the choose_addres() from the USB stack */
+/* we want to leave the top of the 128 range for cluster addresses and
+ * the bottom for device addresses (as we map them one on one with
+ * ports). */
+u8 wusb_cluster_id_get(void)
+{
+ u8 id;
+ spin_lock(&wusb_cluster_ids_lock);
+ id = find_first_zero_bit(wusb_cluster_id_table, CLUSTER_IDS);
+ if (id > CLUSTER_IDS) {
+ id = 0;
+ goto out;
+ }
+ set_bit(id, wusb_cluster_id_table);
+ id = (u8) 0xff - id;
+out:
+ spin_unlock(&wusb_cluster_ids_lock);
+ return id;
+
+}
+EXPORT_SYMBOL_GPL(wusb_cluster_id_get);
+
+/*
+ * Release a WUSB Cluster ID
+ *
+ * Obtained it with wusb_cluster_id_get()
+ */
+void wusb_cluster_id_put(u8 id)
+{
+ id = 0xff - id;
+ BUG_ON(id >= CLUSTER_IDS);
+ spin_lock(&wusb_cluster_ids_lock);
+ WARN_ON(!test_bit(id, wusb_cluster_id_table));
+ clear_bit(id, wusb_cluster_id_table);
+ spin_unlock(&wusb_cluster_ids_lock);
+}
+EXPORT_SYMBOL_GPL(wusb_cluster_id_put);
+
+/**
+ * wusbhc_giveback_urb - return an URB to the USB core
+ * @wusbhc: the host controller the URB is from.
+ * @urb: the URB.
+ * @status: the URB's status.
+ *
+ * Return an URB to the USB core doing some additional WUSB specific
+ * processing.
+ *
+ * - After a successful transfer, update the trust timeout timestamp
+ * for the WUSB device.
+ *
+ * - [WUSB] sections 4.13 and 7.5.1 specifies the stop retrasmittion
+ * condition for the WCONNECTACK_IE is that the host has observed
+ * the associated device responding to a control transfer.
+ */
+void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status)
+{
+ struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
+
+ if (status == 0) {
+ wusb_dev->entry_ts = jiffies;
+
+ /* wusbhc_devconnect_acked() can't be called from from
+ atomic context so defer it to a work queue. */
+ if (!list_empty(&wusb_dev->cack_node))
+ queue_work(wusbd, &wusb_dev->devconnect_acked_work);
+ }
+
+ usb_hcd_giveback_urb(&wusbhc->usb_hcd, urb, status);
+}
+EXPORT_SYMBOL_GPL(wusbhc_giveback_urb);
+
+/**
+ * wusbhc_reset_all - reset the HC hardware
+ * @wusbhc: the host controller to reset.
+ *
+ * Request a full hardware reset of the chip. This will also reset
+ * the radio controller and any other PALs.
+ */
+void wusbhc_reset_all(struct wusbhc *wusbhc)
+{
+ uwb_rc_reset_all(wusbhc->uwb_rc);
+}
+EXPORT_SYMBOL_GPL(wusbhc_reset_all);
+
+static struct notifier_block wusb_usb_notifier = {
+ .notifier_call = wusb_usb_ncb,
+ .priority = INT_MAX /* Need to be called first of all */
+};
+
+static int __init wusbcore_init(void)
+{
+ int result;
+ result = wusb_crypto_init();
+ if (result < 0)
+ goto error_crypto_init;
+ /* WQ is singlethread because we need to serialize notifications */
+ wusbd = create_singlethread_workqueue("wusbd");
+ if (wusbd == NULL) {
+ result = -ENOMEM;
+ printk(KERN_ERR "WUSB-core: Cannot create wusbd workqueue\n");
+ goto error_wusbd_create;
+ }
+ usb_register_notify(&wusb_usb_notifier);
+ bitmap_zero(wusb_cluster_id_table, CLUSTER_IDS);
+ set_bit(0, wusb_cluster_id_table); /* reserve Cluster ID 0xff */
+ return 0;
+
+error_wusbd_create:
+ wusb_crypto_exit();
+error_crypto_init:
+ return result;
+
+}
+module_init(wusbcore_init);
+
+static void __exit wusbcore_exit(void)
+{
+ clear_bit(0, wusb_cluster_id_table);
+ if (!bitmap_empty(wusb_cluster_id_table, CLUSTER_IDS)) {
+ char buf[256];
+ bitmap_scnprintf(buf, sizeof(buf), wusb_cluster_id_table,
+ CLUSTER_IDS);
+ printk(KERN_ERR "BUG: WUSB Cluster IDs not released "
+ "on exit: %s\n", buf);
+ WARN_ON(1);
+ }
+ usb_unregister_notify(&wusb_usb_notifier);
+ destroy_workqueue(wusbd);
+ wusb_crypto_exit();
+}
+module_exit(wusbcore_exit);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Wireless USB core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
new file mode 100644
index 00000000000..d0c132434f1
--- /dev/null
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -0,0 +1,495 @@
+/*
+ * Wireless USB Host Controller
+ * Common infrastructure for WHCI and HWA WUSB-HC drivers
+ *
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This driver implements parts common to all Wireless USB Host
+ * Controllers (struct wusbhc, embedding a struct usb_hcd) and is used
+ * by:
+ *
+ * - hwahc: HWA, USB-dongle that implements a Wireless USB host
+ * controller, (Wireless USB 1.0 Host-Wire-Adapter specification).
+ *
+ * - whci: WHCI, a PCI card with a wireless host controller
+ * (Wireless Host Controller Interface 1.0 specification).
+ *
+ * Check out the Design-overview.txt file in the source documentation
+ * for other details on the implementation.
+ *
+ * Main blocks:
+ *
+ * rh Root Hub emulation (part of the HCD glue)
+ *
+ * devconnect Handle all the issues related to device connection,
+ * authentication, disconnection, timeout, reseting,
+ * keepalives, etc.
+ *
+ * mmc MMC IE broadcasting handling
+ *
+ * A host controller driver just initializes its stuff and as part of
+ * that, creates a 'struct wusbhc' instance that handles all the
+ * common WUSB mechanisms. Links in the function ops that are specific
+ * to it and then registers the host controller. Ready to run.
+ */
+
+#ifndef __WUSBHC_H__
+#define __WUSBHC_H__
+
+#include <linux/usb.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
+/* FIXME: Yes, I know: BAD--it's not my fault the USB HC iface is not
+ * public */
+#include <linux/../../drivers/usb/core/hcd.h>
+#include <linux/uwb.h>
+#include <linux/usb/wusb.h>
+
+
+/**
+ * Wireless USB device
+ *
+ * Describe a WUSB device connected to the cluster. This struct
+ * belongs to the 'struct wusb_port' it is attached to and it is
+ * responsible for putting and clearing the pointer to it.
+ *
+ * Note this "complements" the 'struct usb_device' that the usb_hcd
+ * keeps for each connected USB device. However, it extends some
+ * information that is not available (there is no hcpriv ptr in it!)
+ * *and* most importantly, it's life cycle is different. It is created
+ * as soon as we get a DN_Connect (connect request notification) from
+ * the device through the WUSB host controller; the USB stack doesn't
+ * create the device until we authenticate it. FIXME: this will
+ * change.
+ *
+ * @bos: This is allocated when the BOS descriptors are read from
+ * the device and freed upon the wusb_dev struct dying.
+ * @wusb_cap_descr: points into @bos, and has been verified to be size
+ * safe.
+ */
+struct wusb_dev {
+ struct kref refcnt;
+ struct wusbhc *wusbhc;
+ struct list_head cack_node; /* Connect-Ack list */
+ u8 port_idx;
+ u8 addr;
+ u8 beacon_type:4;
+ struct usb_encryption_descriptor ccm1_etd;
+ struct wusb_ckhdid cdid;
+ unsigned long entry_ts;
+ struct usb_bos_descriptor *bos;
+ struct usb_wireless_cap_descriptor *wusb_cap_descr;
+ struct uwb_mas_bm availability;
+ struct work_struct devconnect_acked_work;
+ struct urb *set_gtk_urb;
+ struct usb_ctrlrequest *set_gtk_req;
+ struct usb_device *usb_dev;
+};
+
+#define WUSB_DEV_ADDR_UNAUTH 0x80
+
+static inline void wusb_dev_init(struct wusb_dev *wusb_dev)
+{
+ kref_init(&wusb_dev->refcnt);
+ /* no need to init the cack_node */
+}
+
+extern void wusb_dev_destroy(struct kref *_wusb_dev);
+
+static inline struct wusb_dev *wusb_dev_get(struct wusb_dev *wusb_dev)
+{
+ kref_get(&wusb_dev->refcnt);
+ return wusb_dev;
+}
+
+static inline void wusb_dev_put(struct wusb_dev *wusb_dev)
+{
+ kref_put(&wusb_dev->refcnt, wusb_dev_destroy);
+}
+
+/**
+ * Wireless USB Host Controlller root hub "fake" ports
+ * (state and device information)
+ *
+ * Wireless USB is wireless, so there are no ports; but we
+ * fake'em. Each RC can connect a max of devices at the same time
+ * (given in the Wireless Adapter descriptor, bNumPorts or WHCI's
+ * caps), referred to in wusbhc->ports_max.
+ *
+ * See rh.c for more information.
+ *
+ * The @status and @change use the same bits as in USB2.0[11.24.2.7],
+ * so we don't have to do much when getting the port's status.
+ *
+ * WUSB1.0[7.1], USB2.0[11.24.2.7.1,fig 11-10],
+ * include/linux/usb_ch9.h (#define USB_PORT_STAT_*)
+ */
+struct wusb_port {
+ u16 status;
+ u16 change;
+ struct wusb_dev *wusb_dev; /* connected device's info */
+ unsigned reset_count;
+ u32 ptk_tkid;
+};
+
+/**
+ * WUSB Host Controller specifics
+ *
+ * All fields that are common to all Wireless USB controller types
+ * (HWA and WHCI) are grouped here. Host Controller
+ * functions/operations that only deal with general Wireless USB HC
+ * issues use this data type to refer to the host.
+ *
+ * @usb_hcd Instantiation of a USB host controller
+ * (initialized by upper layer [HWA=HC or WHCI].
+ *
+ * @dev Device that implements this; initialized by the
+ * upper layer (HWA-HC, WHCI...); this device should
+ * have a refcount.
+ *
+ * @trust_timeout After this time without hearing for device
+ * activity, we consider the device gone and we have to
+ * re-authenticate.
+ *
+ * Can be accessed w/o locking--however, read to a
+ * local variable then use.
+ *
+ * @chid WUSB Cluster Host ID: this is supposed to be a
+ * unique value that doesn't change across reboots (so
+ * that your devices do not require re-association).
+ *
+ * Read/Write protected by @mutex
+ *
+ * @dev_info This array has ports_max elements. It is used to
+ * give the HC information about the WUSB devices (see
+ * 'struct wusb_dev_info').
+ *
+ * For HWA we need to allocate it in heap; for WHCI it
+ * needs to be permanently mapped, so we keep it for
+ * both and make it easy. Call wusbhc->dev_info_set()
+ * to update an entry.
+ *
+ * @ports_max Number of simultaneous device connections (fake
+ * ports) this HC will take. Read-only.
+ *
+ * @port Array of port status for each fake root port. Guaranteed to
+ * always be the same lenght during device existence
+ * [this allows for some unlocked but referenced reading].
+ *
+ * @mmcies_max Max number of Information Elements this HC can send
+ * in its MMC. Read-only.
+ *
+ * @mmcie_add HC specific operation (WHCI or HWA) for adding an
+ * MMCIE.
+ *
+ * @mmcie_rm HC specific operation (WHCI or HWA) for removing an
+ * MMCIE.
+ *
+ * @enc_types Array which describes the encryptions methods
+ * supported by the host as described in WUSB1.0 --
+ * one entry per supported method. As of WUSB1.0 there
+ * is only four methods, we make space for eight just in
+ * case they decide to add some more (and pray they do
+ * it in sequential order). if 'enc_types[enc_method]
+ * != 0', then it is supported by the host. enc_method
+ * is USB_ENC_TYPE*.
+ *
+ * @set_ptk: Set the PTK and enable encryption for a device. Or, if
+ * the supplied key is NULL, disable encryption for that
+ * device.
+ *
+ * @set_gtk: Set the GTK to be used for all future broadcast packets
+ * (i.e., MMCs). With some hardware, setting the GTK may start
+ * MMC transmission.
+ *
+ * NOTE:
+ *
+ * - If wusb_dev->usb_dev is not NULL, then usb_dev is valid
+ * (wusb_dev has a refcount on it). Likewise, if usb_dev->wusb_dev
+ * is not NULL, usb_dev->wusb_dev is valid (usb_dev keeps a
+ * refcount on it).
+ *
+ * Most of the times when you need to use it, it will be non-NULL,
+ * so there is no real need to check for it (wusb_dev will
+ * dissapear before usb_dev).
+ *
+ * - The following fields need to be filled out before calling
+ * wusbhc_create(): ports_max, mmcies_max, mmcie_{add,rm}.
+ *
+ * - there is no wusbhc_init() method, we do everything in
+ * wusbhc_create().
+ *
+ * - Creation is done in two phases, wusbhc_create() and
+ * wusbhc_create_b(); b are the parts that need to be called after
+ * calling usb_hcd_add(&wusbhc->usb_hcd).
+ */
+struct wusbhc {
+ struct usb_hcd usb_hcd; /* HAS TO BE 1st */
+ struct device *dev;
+ struct uwb_rc *uwb_rc;
+ struct uwb_pal pal;
+
+ unsigned trust_timeout; /* in jiffies */
+ struct wuie_host_info *wuie_host_info; /* Includes CHID */
+
+ struct mutex mutex; /* locks everything else */
+ u16 cluster_id; /* Wireless USB Cluster ID */
+ struct wusb_port *port; /* Fake port status handling */
+ struct wusb_dev_info *dev_info; /* for Set Device Info mgmt */
+ u8 ports_max;
+ unsigned active:1; /* currently xmit'ing MMCs */
+ struct wuie_keep_alive keep_alive_ie; /* protected by mutex */
+ struct delayed_work keep_alive_timer;
+ struct list_head cack_list; /* Connect acknowledging */
+ size_t cack_count; /* protected by 'mutex' */
+ struct wuie_connect_ack cack_ie;
+ struct uwb_rsv *rsv; /* cluster bandwidth reservation */
+
+ struct mutex mmcie_mutex; /* MMC WUIE handling */
+ struct wuie_hdr **mmcie; /* WUIE array */
+ u8 mmcies_max;
+ /* FIXME: make wusbhc_ops? */
+ int (*start)(struct wusbhc *wusbhc);
+ void (*stop)(struct wusbhc *wusbhc);
+ int (*mmcie_add)(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
+ u8 handle, struct wuie_hdr *wuie);
+ int (*mmcie_rm)(struct wusbhc *wusbhc, u8 handle);
+ int (*dev_info_set)(struct wusbhc *, struct wusb_dev *wusb_dev);
+ int (*bwa_set)(struct wusbhc *wusbhc, s8 stream_index,
+ const struct uwb_mas_bm *);
+ int (*set_ptk)(struct wusbhc *wusbhc, u8 port_idx,
+ u32 tkid, const void *key, size_t key_size);
+ int (*set_gtk)(struct wusbhc *wusbhc,
+ u32 tkid, const void *key, size_t key_size);
+ int (*set_num_dnts)(struct wusbhc *wusbhc, u8 interval, u8 slots);
+
+ struct {
+ struct usb_key_descriptor descr;
+ u8 data[16]; /* GTK key data */
+ } __attribute__((packed)) gtk;
+ u8 gtk_index;
+ u32 gtk_tkid;
+ struct work_struct gtk_rekey_done_work;
+ int pending_set_gtks;
+
+ struct usb_encryption_descriptor *ccm1_etd;
+};
+
+#define usb_hcd_to_wusbhc(u) container_of((u), struct wusbhc, usb_hcd)
+
+
+extern int wusbhc_create(struct wusbhc *);
+extern int wusbhc_b_create(struct wusbhc *);
+extern void wusbhc_b_destroy(struct wusbhc *);
+extern void wusbhc_destroy(struct wusbhc *);
+extern int wusb_dev_sysfs_add(struct wusbhc *, struct usb_device *,
+ struct wusb_dev *);
+extern void wusb_dev_sysfs_rm(struct wusb_dev *);
+extern int wusbhc_sec_create(struct wusbhc *);
+extern int wusbhc_sec_start(struct wusbhc *);
+extern void wusbhc_sec_stop(struct wusbhc *);
+extern void wusbhc_sec_destroy(struct wusbhc *);
+extern void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb,
+ int status);
+void wusbhc_reset_all(struct wusbhc *wusbhc);
+
+int wusbhc_pal_register(struct wusbhc *wusbhc);
+void wusbhc_pal_unregister(struct wusbhc *wusbhc);
+
+/*
+ * Return @usb_dev's @usb_hcd (properly referenced) or NULL if gone
+ *
+ * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr)
+ *
+ * This is a safe assumption as @usb_dev->bus is referenced all the
+ * time during the @usb_dev life cycle.
+ */
+static inline struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev)
+{
+ struct usb_hcd *usb_hcd;
+ usb_hcd = container_of(usb_dev->bus, struct usb_hcd, self);
+ return usb_get_hcd(usb_hcd);
+}
+
+/*
+ * Increment the reference count on a wusbhc.
+ *
+ * @wusbhc's life cycle is identical to that of the underlying usb_hcd.
+ */
+static inline struct wusbhc *wusbhc_get(struct wusbhc *wusbhc)
+{
+ return usb_get_hcd(&wusbhc->usb_hcd) ? wusbhc : NULL;
+}
+
+/*
+ * Return the wusbhc associated to a @usb_dev
+ *
+ * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr)
+ *
+ * @returns: wusbhc for @usb_dev; NULL if the @usb_dev is being torn down.
+ * WARNING: referenced at the usb_hcd level, unlocked
+ *
+ * FIXME: move offline
+ */
+static inline struct wusbhc *wusbhc_get_by_usb_dev(struct usb_device *usb_dev)
+{
+ struct wusbhc *wusbhc = NULL;
+ struct usb_hcd *usb_hcd;
+ if (usb_dev->devnum > 1 && !usb_dev->wusb) {
+ /* but root hubs */
+ dev_err(&usb_dev->dev, "devnum %d wusb %d\n", usb_dev->devnum,
+ usb_dev->wusb);
+ BUG_ON(usb_dev->devnum > 1 && !usb_dev->wusb);
+ }
+ usb_hcd = usb_hcd_get_by_usb_dev(usb_dev);
+ if (usb_hcd == NULL)
+ return NULL;
+ BUG_ON(usb_hcd->wireless == 0);
+ return wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+}
+
+
+static inline void wusbhc_put(struct wusbhc *wusbhc)
+{
+ usb_put_hcd(&wusbhc->usb_hcd);
+}
+
+int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid);
+void wusbhc_stop(struct wusbhc *wusbhc);
+extern int wusbhc_chid_set(struct wusbhc *, const struct wusb_ckhdid *);
+
+/* Device connect handling */
+extern int wusbhc_devconnect_create(struct wusbhc *);
+extern void wusbhc_devconnect_destroy(struct wusbhc *);
+extern int wusbhc_devconnect_start(struct wusbhc *wusbhc,
+ const struct wusb_ckhdid *chid);
+extern void wusbhc_devconnect_stop(struct wusbhc *wusbhc);
+extern int wusbhc_devconnect_auth(struct wusbhc *, u8);
+extern void wusbhc_handle_dn(struct wusbhc *, u8 srcaddr,
+ struct wusb_dn_hdr *dn_hdr, size_t size);
+extern int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port);
+extern void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port);
+extern int wusb_usb_ncb(struct notifier_block *nb, unsigned long val,
+ void *priv);
+extern int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
+ u8 addr);
+
+/* Wireless USB fake Root Hub methods */
+extern int wusbhc_rh_create(struct wusbhc *);
+extern void wusbhc_rh_destroy(struct wusbhc *);
+
+extern int wusbhc_rh_status_data(struct usb_hcd *, char *);
+extern int wusbhc_rh_control(struct usb_hcd *, u16, u16, u16, char *, u16);
+extern int wusbhc_rh_suspend(struct usb_hcd *);
+extern int wusbhc_rh_resume(struct usb_hcd *);
+extern int wusbhc_rh_start_port_reset(struct usb_hcd *, unsigned);
+
+/* MMC handling */
+extern int wusbhc_mmcie_create(struct wusbhc *);
+extern void wusbhc_mmcie_destroy(struct wusbhc *);
+extern int wusbhc_mmcie_set(struct wusbhc *, u8 interval, u8 repeat_cnt,
+ struct wuie_hdr *);
+extern void wusbhc_mmcie_rm(struct wusbhc *, struct wuie_hdr *);
+
+/* Bandwidth reservation */
+int wusbhc_rsv_establish(struct wusbhc *wusbhc);
+void wusbhc_rsv_terminate(struct wusbhc *wusbhc);
+
+/*
+ * I've always said
+ * I wanted a wedding in a church...
+ *
+ * but lately I've been thinking about
+ * the Botanical Gardens.
+ *
+ * We could do it by the tulips.
+ * It'll be beautiful
+ *
+ * --Security!
+ */
+extern int wusb_dev_sec_add(struct wusbhc *, struct usb_device *,
+ struct wusb_dev *);
+extern void wusb_dev_sec_rm(struct wusb_dev *) ;
+extern int wusb_dev_4way_handshake(struct wusbhc *, struct wusb_dev *,
+ struct wusb_ckhdid *ck);
+void wusbhc_gtk_rekey(struct wusbhc *wusbhc);
+
+
+/* WUSB Cluster ID handling */
+extern u8 wusb_cluster_id_get(void);
+extern void wusb_cluster_id_put(u8);
+
+/*
+ * wusb_port_by_idx - return the port associated to a zero-based port index
+ *
+ * NOTE: valid without locking as long as wusbhc is referenced (as the
+ * number of ports doesn't change). The data pointed to has to
+ * be verified though :)
+ */
+static inline struct wusb_port *wusb_port_by_idx(struct wusbhc *wusbhc,
+ u8 port_idx)
+{
+ return &wusbhc->port[port_idx];
+}
+
+/*
+ * wusb_port_no_to_idx - Convert port number (per usb_dev->portnum) to
+ * a port_idx.
+ *
+ * USB stack USB ports are 1 based!!
+ *
+ * NOTE: only valid for WUSB devices!!!
+ */
+static inline u8 wusb_port_no_to_idx(u8 port_no)
+{
+ return port_no - 1;
+}
+
+extern struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *,
+ struct usb_device *);
+
+/*
+ * Return a referenced wusb_dev given a @usb_dev
+ *
+ * Returns NULL if the usb_dev is being torn down.
+ *
+ * FIXME: move offline
+ */
+static inline
+struct wusb_dev *wusb_dev_get_by_usb_dev(struct usb_device *usb_dev)
+{
+ struct wusbhc *wusbhc;
+ struct wusb_dev *wusb_dev;
+ wusbhc = wusbhc_get_by_usb_dev(usb_dev);
+ if (wusbhc == NULL)
+ return NULL;
+ mutex_lock(&wusbhc->mutex);
+ wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev);
+ mutex_unlock(&wusbhc->mutex);
+ wusbhc_put(wusbhc);
+ return wusb_dev;
+}
+
+/* Misc */
+
+extern struct workqueue_struct *wusbd;
+#endif /* #ifndef __WUSBHC_H__ */
diff --git a/drivers/uwb/Kconfig b/drivers/uwb/Kconfig
new file mode 100644
index 00000000000..ca783127af3
--- /dev/null
+++ b/drivers/uwb/Kconfig
@@ -0,0 +1,90 @@
+#
+# UWB device configuration
+#
+
+menuconfig UWB
+ tristate "Ultra Wideband devices (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on PCI
+ default n
+ help
+ UWB is a high-bandwidth, low-power, point-to-point radio
+ technology using a wide spectrum (3.1-10.6GHz). It is
+ optimized for in-room use (480Mbps at 2 meters, 110Mbps at
+ 10m). It serves as the transport layer for other protocols,
+ such as Wireless USB (WUSB), IP (WLP) and upcoming
+ Bluetooth and 1394
+
+ The topology is peer to peer; however, higher level
+ protocols (such as WUSB) might impose a master/slave
+ relationship.
+
+ Say Y here if your computer has UWB radio controllers (USB or PCI)
+ based. You will need to enable the radio controllers
+ below. It is ok to select all of them, no harm done.
+
+ For more help check the UWB and WUSB related files in
+ <file:Documentation/usb/>.
+
+ To compile the UWB stack as a module, choose M here.
+
+if UWB
+
+config UWB_HWA
+ tristate "UWB Radio Control driver for WUSB-compliant USB dongles (HWA)"
+ depends on USB
+ help
+ This driver enables the radio controller for HWA USB
+ devices. HWA stands for Host Wire Adapter, and it is a UWB
+ Radio Controller connected to your system via USB. Most of
+ them come with a Wireless USB host controller also.
+
+ To compile this driver select Y (built in) or M (module). It
+ is safe to select any even if you do not have the hardware.
+
+config UWB_WHCI
+ tristate "UWB Radio Control driver for WHCI-compliant cards"
+ depends on PCI
+ help
+ This driver enables the radio controller for WHCI cards.
+
+ WHCI is an specification developed by Intel
+ (http://www.intel.com/technology/comms/wusb/whci.htm) much
+ in the spirit of USB's EHCI, but for UWB and Wireless USB
+ radio/host controllers connected via memmory mapping (eg:
+ PCI). Most of these cards come also with a Wireless USB host
+ controller.
+
+ To compile this driver select Y (built in) or M (module). It
+ is safe to select any even if you do not have the hardware.
+
+config UWB_WLP
+ tristate "Support WiMedia Link Protocol (Ethernet/IP over UWB)"
+ depends on UWB && NET
+ help
+ This is a common library for drivers that implement
+ networking over UWB.
+
+config UWB_I1480U
+ tristate "Support for Intel Wireless UWB Link 1480 HWA"
+ depends on UWB_HWA
+ select FW_LOADER
+ help
+ This driver enables support for the i1480 when connected via
+ USB. It consists of a firmware uploader that will enable it
+ to behave as an HWA device.
+
+ To compile this driver select Y (built in) or M (module). It
+ is safe to select any even if you do not have the hardware.
+
+config UWB_I1480U_WLP
+ tristate "Support for Intel Wireless UWB Link 1480 HWA's WLP interface"
+ depends on UWB_I1480U && UWB_WLP && NET
+ help
+ This driver enables WLP support for the i1480 when connected via
+ USB. WLP is the WiMedia Link Protocol, or IP over UWB.
+
+ To compile this driver select Y (built in) or M (module). It
+ is safe to select any even if you don't have the hardware.
+
+endif # UWB
diff --git a/drivers/uwb/Makefile b/drivers/uwb/Makefile
new file mode 100644
index 00000000000..257e6908304
--- /dev/null
+++ b/drivers/uwb/Makefile
@@ -0,0 +1,29 @@
+obj-$(CONFIG_UWB) += uwb.o
+obj-$(CONFIG_UWB_WLP) += wlp/
+obj-$(CONFIG_UWB_WHCI) += umc.o whci.o whc-rc.o
+obj-$(CONFIG_UWB_HWA) += hwa-rc.o
+obj-$(CONFIG_UWB_I1480U) += i1480/
+
+uwb-objs := \
+ address.o \
+ beacon.o \
+ driver.o \
+ drp.o \
+ drp-avail.o \
+ drp-ie.o \
+ est.o \
+ ie.o \
+ lc-dev.o \
+ lc-rc.o \
+ neh.o \
+ pal.o \
+ reset.o \
+ rsv.o \
+ scan.o \
+ uwb-debug.o \
+ uwbd.o
+
+umc-objs := \
+ umc-bus.o \
+ umc-dev.o \
+ umc-drv.o
diff --git a/drivers/uwb/address.c b/drivers/uwb/address.c
new file mode 100644
index 00000000000..1664ae5f170
--- /dev/null
+++ b/drivers/uwb/address.c
@@ -0,0 +1,374 @@
+/*
+ * Ultra Wide Band
+ * Address management
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/random.h>
+#include <linux/etherdevice.h>
+#include <linux/uwb/debug.h>
+#include "uwb-internal.h"
+
+
+/** Device Address Management command */
+struct uwb_rc_cmd_dev_addr_mgmt {
+ struct uwb_rccb rccb;
+ u8 bmOperationType;
+ u8 baAddr[6];
+} __attribute__((packed));
+
+
+/**
+ * Low level command for setting/getting UWB radio's addresses
+ *
+ * @hwarc: HWA Radio Control interface instance
+ * @bmOperationType:
+ * Set/get, MAC/DEV (see WUSB1.0[8.6.2.2])
+ * @baAddr: address buffer--assumed to have enough data to hold
+ * the address type requested.
+ * @reply: Pointer to reply buffer (can be stack allocated)
+ * @returns: 0 if ok, < 0 errno code on error.
+ *
+ * @cmd has to be allocated because USB cannot grok USB or vmalloc
+ * buffers depending on your combination of host architecture.
+ */
+static
+int uwb_rc_dev_addr_mgmt(struct uwb_rc *rc,
+ u8 bmOperationType, const u8 *baAddr,
+ struct uwb_rc_evt_dev_addr_mgmt *reply)
+{
+ int result;
+ struct uwb_rc_cmd_dev_addr_mgmt *cmd;
+
+ result = -ENOMEM;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_kzalloc;
+ cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
+ cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_DEV_ADDR_MGMT);
+ cmd->bmOperationType = bmOperationType;
+ if (baAddr) {
+ size_t size = 0;
+ switch (bmOperationType >> 1) {
+ case 0: size = 2; break;
+ case 1: size = 6; break;
+ default: BUG();
+ }
+ memcpy(cmd->baAddr, baAddr, size);
+ }
+ reply->rceb.bEventType = UWB_RC_CET_GENERAL;
+ reply->rceb.wEvent = UWB_RC_CMD_DEV_ADDR_MGMT;
+ result = uwb_rc_cmd(rc, "DEV-ADDR-MGMT",
+ &cmd->rccb, sizeof(*cmd),
+ &reply->rceb, sizeof(*reply));
+ if (result < 0)
+ goto error_cmd;
+ if (result < sizeof(*reply)) {
+ dev_err(&rc->uwb_dev.dev,
+ "DEV-ADDR-MGMT: not enough data replied: "
+ "%d vs %zu bytes needed\n", result, sizeof(*reply));
+ result = -ENOMSG;
+ } else if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
+ dev_err(&rc->uwb_dev.dev,
+ "DEV-ADDR-MGMT: command execution failed: %s (%d)\n",
+ uwb_rc_strerror(reply->bResultCode),
+ reply->bResultCode);
+ result = -EIO;
+ } else
+ result = 0;
+error_cmd:
+ kfree(cmd);
+error_kzalloc:
+ return result;
+}
+
+
+/**
+ * Set the UWB RC MAC or device address.
+ *
+ * @rc: UWB Radio Controller
+ * @_addr: Pointer to address to write [assumed to be either a
+ * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *'].
+ * @type: Type of address to set (UWB_ADDR_DEV or UWB_ADDR_MAC).
+ * @returns: 0 if ok, < 0 errno code on error.
+ *
+ * Some anal retentivity here: even if both 'struct
+ * uwb_{dev,mac}_addr' have the actual byte array in the same offset
+ * and I could just pass _addr to hwarc_cmd_dev_addr_mgmt(), I prefer
+ * to use some syntatic sugar in case someday we decide to change the
+ * format of the structs. The compiler will optimize it out anyway.
+ */
+static int uwb_rc_addr_set(struct uwb_rc *rc,
+ const void *_addr, enum uwb_addr_type type)
+{
+ int result;
+ u8 bmOperationType = 0x1; /* Set address */
+ const struct uwb_dev_addr *dev_addr = _addr;
+ const struct uwb_mac_addr *mac_addr = _addr;
+ struct uwb_rc_evt_dev_addr_mgmt reply;
+ const u8 *baAddr;
+
+ result = -EINVAL;
+ switch (type) {
+ case UWB_ADDR_DEV:
+ baAddr = dev_addr->data;
+ break;
+ case UWB_ADDR_MAC:
+ baAddr = mac_addr->data;
+ bmOperationType |= 0x2;
+ break;
+ default:
+ return result;
+ }
+ return uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &reply);
+}
+
+
+/**
+ * Get the UWB radio's MAC or device address.
+ *
+ * @rc: UWB Radio Controller
+ * @_addr: Where to write the address data [assumed to be either a
+ * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *'].
+ * @type: Type of address to get (UWB_ADDR_DEV or UWB_ADDR_MAC).
+ * @returns: 0 if ok (and *_addr set), < 0 errno code on error.
+ *
+ * See comment in uwb_rc_addr_set() about anal retentivity in the
+ * type handling of the address variables.
+ */
+static int uwb_rc_addr_get(struct uwb_rc *rc,
+ void *_addr, enum uwb_addr_type type)
+{
+ int result;
+ u8 bmOperationType = 0x0; /* Get address */
+ struct uwb_rc_evt_dev_addr_mgmt evt;
+ struct uwb_dev_addr *dev_addr = _addr;
+ struct uwb_mac_addr *mac_addr = _addr;
+ u8 *baAddr;
+
+ result = -EINVAL;
+ switch (type) {
+ case UWB_ADDR_DEV:
+ baAddr = dev_addr->data;
+ break;
+ case UWB_ADDR_MAC:
+ bmOperationType |= 0x2;
+ baAddr = mac_addr->data;
+ break;
+ default:
+ return result;
+ }
+ result = uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &evt);
+ if (result == 0)
+ switch (type) {
+ case UWB_ADDR_DEV:
+ memcpy(&dev_addr->data, evt.baAddr,
+ sizeof(dev_addr->data));
+ break;
+ case UWB_ADDR_MAC:
+ memcpy(&mac_addr->data, evt.baAddr,
+ sizeof(mac_addr->data));
+ break;
+ default: /* shut gcc up */
+ BUG();
+ }
+ return result;
+}
+
+
+/** Get @rc's MAC address to @addr */
+int uwb_rc_mac_addr_get(struct uwb_rc *rc,
+ struct uwb_mac_addr *addr) {
+ return uwb_rc_addr_get(rc, addr, UWB_ADDR_MAC);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_mac_addr_get);
+
+
+/** Get @rc's device address to @addr */
+int uwb_rc_dev_addr_get(struct uwb_rc *rc,
+ struct uwb_dev_addr *addr) {
+ return uwb_rc_addr_get(rc, addr, UWB_ADDR_DEV);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_dev_addr_get);
+
+
+/** Set @rc's address to @addr */
+int uwb_rc_mac_addr_set(struct uwb_rc *rc,
+ const struct uwb_mac_addr *addr)
+{
+ int result = -EINVAL;
+ mutex_lock(&rc->uwb_dev.mutex);
+ result = uwb_rc_addr_set(rc, addr, UWB_ADDR_MAC);
+ mutex_unlock(&rc->uwb_dev.mutex);
+ return result;
+}
+
+
+/** Set @rc's address to @addr */
+int uwb_rc_dev_addr_set(struct uwb_rc *rc,
+ const struct uwb_dev_addr *addr)
+{
+ int result = -EINVAL;
+ mutex_lock(&rc->uwb_dev.mutex);
+ result = uwb_rc_addr_set(rc, addr, UWB_ADDR_DEV);
+ rc->uwb_dev.dev_addr = *addr;
+ mutex_unlock(&rc->uwb_dev.mutex);
+ return result;
+}
+
+/* Returns !0 if given address is already assigned to device. */
+int __uwb_mac_addr_assigned_check(struct device *dev, void *_addr)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+ struct uwb_mac_addr *addr = _addr;
+
+ if (!uwb_mac_addr_cmp(addr, &uwb_dev->mac_addr))
+ return !0;
+ return 0;
+}
+
+/* Returns !0 if given address is already assigned to device. */
+int __uwb_dev_addr_assigned_check(struct device *dev, void *_addr)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+ struct uwb_dev_addr *addr = _addr;
+ if (!uwb_dev_addr_cmp(addr, &uwb_dev->dev_addr))
+ return !0;
+ return 0;
+}
+
+/**
+ * uwb_dev_addr_assign - assigned a generated DevAddr to a radio controller
+ * @rc: the (local) radio controller device requiring a new DevAddr
+ *
+ * A new DevAddr is required when:
+ * - first setting up a radio controller
+ * - if the hardware reports a DevAddr conflict
+ *
+ * The DevAddr is randomly generated in the generated DevAddr range
+ * [0x100, 0xfeff]. The number of devices in a beacon group is limited
+ * by mMaxBPLength (96) so this address space will never be exhausted.
+ *
+ * [ECMA-368] 17.1.1, 17.16.
+ */
+int uwb_rc_dev_addr_assign(struct uwb_rc *rc)
+{
+ struct uwb_dev_addr new_addr;
+
+ do {
+ get_random_bytes(new_addr.data, sizeof(new_addr.data));
+ } while (new_addr.data[0] == 0x00 || new_addr.data[0] == 0xff
+ || __uwb_dev_addr_assigned(rc, &new_addr));
+
+ return uwb_rc_dev_addr_set(rc, &new_addr);
+}
+
+/**
+ * uwbd_evt_handle_rc_dev_addr_conflict - handle a DEV_ADDR_CONFLICT event
+ * @evt: the DEV_ADDR_CONFLICT notification from the radio controller
+ *
+ * A new (non-conflicting) DevAddr is assigned to the radio controller.
+ *
+ * [ECMA-368] 17.1.1.1.
+ */
+int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt)
+{
+ struct uwb_rc *rc = evt->rc;
+
+ return uwb_rc_dev_addr_assign(rc);
+}
+
+/*
+ * Print the 48-bit EUI MAC address of the radio controller when
+ * reading /sys/class/uwb_rc/XX/mac_address
+ */
+static ssize_t uwb_rc_mac_addr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+ struct uwb_rc *rc = uwb_dev->rc;
+ struct uwb_mac_addr addr;
+ ssize_t result;
+
+ mutex_lock(&rc->uwb_dev.mutex);
+ result = uwb_rc_addr_get(rc, &addr, UWB_ADDR_MAC);
+ mutex_unlock(&rc->uwb_dev.mutex);
+ if (result >= 0) {
+ result = uwb_mac_addr_print(buf, UWB_ADDR_STRSIZE, &addr);
+ buf[result++] = '\n';
+ }
+ return result;
+}
+
+/*
+ * Parse a 48 bit address written to /sys/class/uwb_rc/XX/mac_address
+ * and if correct, set it.
+ */
+static ssize_t uwb_rc_mac_addr_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+ struct uwb_rc *rc = uwb_dev->rc;
+ struct uwb_mac_addr addr;
+ ssize_t result;
+
+ result = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx\n",
+ &addr.data[0], &addr.data[1], &addr.data[2],
+ &addr.data[3], &addr.data[4], &addr.data[5]);
+ if (result != 6) {
+ result = -EINVAL;
+ goto out;
+ }
+ if (is_multicast_ether_addr(addr.data)) {
+ dev_err(&rc->uwb_dev.dev, "refusing to set multicast "
+ "MAC address %s\n", buf);
+ result = -EINVAL;
+ goto out;
+ }
+ result = uwb_rc_mac_addr_set(rc, &addr);
+ if (result == 0)
+ rc->uwb_dev.mac_addr = addr;
+out:
+ return result < 0 ? result : size;
+}
+DEVICE_ATTR(mac_address, S_IRUGO | S_IWUSR, uwb_rc_mac_addr_show, uwb_rc_mac_addr_store);
+
+/** Print @addr to @buf, @return bytes written */
+size_t __uwb_addr_print(char *buf, size_t buf_size, const unsigned char *addr,
+ int type)
+{
+ size_t result;
+ if (type)
+ result = scnprintf(buf, buf_size,
+ "%02x:%02x:%02x:%02x:%02x:%02x",
+ addr[0], addr[1], addr[2],
+ addr[3], addr[4], addr[5]);
+ else
+ result = scnprintf(buf, buf_size, "%02x:%02x",
+ addr[1], addr[0]);
+ return result;
+}
+EXPORT_SYMBOL_GPL(__uwb_addr_print);
diff --git a/drivers/uwb/beacon.c b/drivers/uwb/beacon.c
new file mode 100644
index 00000000000..46b18eec502
--- /dev/null
+++ b/drivers/uwb/beacon.c
@@ -0,0 +1,642 @@
+/*
+ * Ultra Wide Band
+ * Beacon management
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kdev_t.h>
+#include "uwb-internal.h"
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+/** Start Beaconing command structure */
+struct uwb_rc_cmd_start_beacon {
+ struct uwb_rccb rccb;
+ __le16 wBPSTOffset;
+ u8 bChannelNumber;
+} __attribute__((packed));
+
+
+static int uwb_rc_start_beacon(struct uwb_rc *rc, u16 bpst_offset, u8 channel)
+{
+ int result;
+ struct uwb_rc_cmd_start_beacon *cmd;
+ struct uwb_rc_evt_confirm reply;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+ cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
+ cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_START_BEACON);
+ cmd->wBPSTOffset = cpu_to_le16(bpst_offset);
+ cmd->bChannelNumber = channel;
+ reply.rceb.bEventType = UWB_RC_CET_GENERAL;
+ reply.rceb.wEvent = UWB_RC_CMD_START_BEACON;
+ result = uwb_rc_cmd(rc, "START-BEACON", &cmd->rccb, sizeof(*cmd),
+ &reply.rceb, sizeof(reply));
+ if (result < 0)
+ goto error_cmd;
+ if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
+ dev_err(&rc->uwb_dev.dev,
+ "START-BEACON: command execution failed: %s (%d)\n",
+ uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
+ result = -EIO;
+ }
+error_cmd:
+ kfree(cmd);
+ return result;
+}
+
+static int uwb_rc_stop_beacon(struct uwb_rc *rc)
+{
+ int result;
+ struct uwb_rccb *cmd;
+ struct uwb_rc_evt_confirm reply;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+ cmd->bCommandType = UWB_RC_CET_GENERAL;
+ cmd->wCommand = cpu_to_le16(UWB_RC_CMD_STOP_BEACON);
+ reply.rceb.bEventType = UWB_RC_CET_GENERAL;
+ reply.rceb.wEvent = UWB_RC_CMD_STOP_BEACON;
+ result = uwb_rc_cmd(rc, "STOP-BEACON", cmd, sizeof(*cmd),
+ &reply.rceb, sizeof(reply));
+ if (result < 0)
+ goto error_cmd;
+ if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
+ dev_err(&rc->uwb_dev.dev,
+ "STOP-BEACON: command execution failed: %s (%d)\n",
+ uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
+ result = -EIO;
+ }
+error_cmd:
+ kfree(cmd);
+ return result;
+}
+
+/*
+ * Start/stop beacons
+ *
+ * @rc: UWB Radio Controller to operate on
+ * @channel: UWB channel on which to beacon (WUSB[table
+ * 5-12]). If -1, stop beaconing.
+ * @bpst_offset: Beacon Period Start Time offset; FIXME-do zero
+ *
+ * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB
+ * of a SET IE command after the device sent the first beacon that includes
+ * the IEs specified in the SET IE command. So, after we start beaconing we
+ * check if there is anything in the IE cache and call the SET IE command
+ * if needed.
+ */
+int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset)
+{
+ int result;
+ struct device *dev = &rc->uwb_dev.dev;
+
+ mutex_lock(&rc->uwb_dev.mutex);
+ if (channel < 0)
+ channel = -1;
+ if (channel == -1)
+ result = uwb_rc_stop_beacon(rc);
+ else {
+ /* channel >= 0...dah */
+ result = uwb_rc_start_beacon(rc, bpst_offset, channel);
+ if (result < 0)
+ goto out_up;
+ if (le16_to_cpu(rc->ies->wIELength) > 0) {
+ result = uwb_rc_set_ie(rc, rc->ies);
+ if (result < 0) {
+ dev_err(dev, "Cannot set new IE on device: "
+ "%d\n", result);
+ result = uwb_rc_stop_beacon(rc);
+ channel = -1;
+ bpst_offset = 0;
+ } else
+ result = 0;
+ }
+ }
+
+ if (result < 0)
+ goto out_up;
+ rc->beaconing = channel;
+
+ uwb_notify(rc, NULL, uwb_bg_joined(rc) ? UWB_NOTIF_BG_JOIN : UWB_NOTIF_BG_LEAVE);
+
+out_up:
+ mutex_unlock(&rc->uwb_dev.mutex);
+ return result;
+}
+
+/*
+ * Beacon cache
+ *
+ * The purpose of this is to speed up the lookup of becon information
+ * when a new beacon arrives. The UWB Daemon uses it also to keep a
+ * tab of which devices are in radio distance and which not. When a
+ * device's beacon stays present for more than a certain amount of
+ * time, it is considered a new, usable device. When a beacon ceases
+ * to be received for a certain amount of time, it is considered that
+ * the device is gone.
+ *
+ * FIXME: use an allocator for the entries
+ * FIXME: use something faster for search than a list
+ */
+
+struct uwb_beca uwb_beca = {
+ .list = LIST_HEAD_INIT(uwb_beca.list),
+ .mutex = __MUTEX_INITIALIZER(uwb_beca.mutex)
+};
+
+
+void uwb_bce_kfree(struct kref *_bce)
+{
+ struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt);
+
+ kfree(bce->be);
+ kfree(bce);
+}
+
+
+/* Find a beacon by dev addr in the cache */
+static
+struct uwb_beca_e *__uwb_beca_find_bydev(const struct uwb_dev_addr *dev_addr)
+{
+ struct uwb_beca_e *bce, *next;
+ list_for_each_entry_safe(bce, next, &uwb_beca.list, node) {
+ d_printf(6, NULL, "looking for addr %02x:%02x in %02x:%02x\n",
+ dev_addr->data[0], dev_addr->data[1],
+ bce->dev_addr.data[0], bce->dev_addr.data[1]);
+ if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr)))
+ goto out;
+ }
+ bce = NULL;
+out:
+ return bce;
+}
+
+/* Find a beacon by dev addr in the cache */
+static
+struct uwb_beca_e *__uwb_beca_find_bymac(const struct uwb_mac_addr *mac_addr)
+{
+ struct uwb_beca_e *bce, *next;
+ list_for_each_entry_safe(bce, next, &uwb_beca.list, node) {
+ if (!memcmp(bce->mac_addr, mac_addr->data,
+ sizeof(struct uwb_mac_addr)))
+ goto out;
+ }
+ bce = NULL;
+out:
+ return bce;
+}
+
+/**
+ * uwb_dev_get_by_devaddr - get a UWB device with a specific DevAddr
+ * @rc: the radio controller that saw the device
+ * @devaddr: DevAddr of the UWB device to find
+ *
+ * There may be more than one matching device (in the case of a
+ * DevAddr conflict), but only the first one is returned.
+ */
+struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
+ const struct uwb_dev_addr *devaddr)
+{
+ struct uwb_dev *found = NULL;
+ struct uwb_beca_e *bce;
+
+ mutex_lock(&uwb_beca.mutex);
+ bce = __uwb_beca_find_bydev(devaddr);
+ if (bce)
+ found = uwb_dev_try_get(rc, bce->uwb_dev);
+ mutex_unlock(&uwb_beca.mutex);
+
+ return found;
+}
+
+/**
+ * uwb_dev_get_by_macaddr - get a UWB device with a specific EUI-48
+ * @rc: the radio controller that saw the device
+ * @devaddr: EUI-48 of the UWB device to find
+ */
+struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc,
+ const struct uwb_mac_addr *macaddr)
+{
+ struct uwb_dev *found = NULL;
+ struct uwb_beca_e *bce;
+
+ mutex_lock(&uwb_beca.mutex);
+ bce = __uwb_beca_find_bymac(macaddr);
+ if (bce)
+ found = uwb_dev_try_get(rc, bce->uwb_dev);
+ mutex_unlock(&uwb_beca.mutex);
+
+ return found;
+}
+
+/* Initialize a beacon cache entry */
+static void uwb_beca_e_init(struct uwb_beca_e *bce)
+{
+ mutex_init(&bce->mutex);
+ kref_init(&bce->refcnt);
+ stats_init(&bce->lqe_stats);
+ stats_init(&bce->rssi_stats);
+}
+
+/*
+ * Add a beacon to the cache
+ *
+ * @be: Beacon event information
+ * @bf: Beacon frame (part of b, really)
+ * @ts_jiffies: Timestamp (in jiffies) when the beacon was received
+ */
+struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *be,
+ struct uwb_beacon_frame *bf,
+ unsigned long ts_jiffies)
+{
+ struct uwb_beca_e *bce;
+
+ bce = kzalloc(sizeof(*bce), GFP_KERNEL);
+ if (bce == NULL)
+ return NULL;
+ uwb_beca_e_init(bce);
+ bce->ts_jiffies = ts_jiffies;
+ bce->uwb_dev = NULL;
+ list_add(&bce->node, &uwb_beca.list);
+ return bce;
+}
+
+/*
+ * Wipe out beacon entries that became stale
+ *
+ * Remove associated devicest too.
+ */
+void uwb_beca_purge(void)
+{
+ struct uwb_beca_e *bce, *next;
+ unsigned long expires;
+
+ mutex_lock(&uwb_beca.mutex);
+ list_for_each_entry_safe(bce, next, &uwb_beca.list, node) {
+ expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms);
+ if (time_after(jiffies, expires)) {
+ uwbd_dev_offair(bce);
+ list_del(&bce->node);
+ uwb_bce_put(bce);
+ }
+ }
+ mutex_unlock(&uwb_beca.mutex);
+}
+
+/* Clean up the whole beacon cache. Called on shutdown */
+void uwb_beca_release(void)
+{
+ struct uwb_beca_e *bce, *next;
+ mutex_lock(&uwb_beca.mutex);
+ list_for_each_entry_safe(bce, next, &uwb_beca.list, node) {
+ list_del(&bce->node);
+ uwb_bce_put(bce);
+ }
+ mutex_unlock(&uwb_beca.mutex);
+}
+
+static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be,
+ struct uwb_beacon_frame *bf)
+{
+ char macbuf[UWB_ADDR_STRSIZE];
+ char devbuf[UWB_ADDR_STRSIZE];
+ char dstbuf[UWB_ADDR_STRSIZE];
+
+ uwb_mac_addr_print(macbuf, sizeof(macbuf), &bf->Device_Identifier);
+ uwb_dev_addr_print(devbuf, sizeof(devbuf), &bf->hdr.SrcAddr);
+ uwb_dev_addr_print(dstbuf, sizeof(dstbuf), &bf->hdr.DestAddr);
+ dev_info(&rc->uwb_dev.dev,
+ "BEACON from %s to %s (ch%u offset %u slot %u MAC %s)\n",
+ devbuf, dstbuf, be->bChannelNumber, be->wBPSTOffset,
+ bf->Beacon_Slot_Number, macbuf);
+}
+
+/*
+ * @bce: beacon cache entry, referenced
+ */
+ssize_t uwb_bce_print_IEs(struct uwb_dev *uwb_dev, struct uwb_beca_e *bce,
+ char *buf, size_t size)
+{
+ ssize_t result = 0;
+ struct uwb_rc_evt_beacon *be;
+ struct uwb_beacon_frame *bf;
+ struct uwb_buf_ctx ctx = {
+ .buf = buf,
+ .bytes = 0,
+ .size = size
+ };
+
+ mutex_lock(&bce->mutex);
+ be = bce->be;
+ if (be == NULL)
+ goto out;
+ bf = (void *) be->BeaconInfo;
+ uwb_ie_for_each(uwb_dev, uwb_ie_dump_hex, &ctx,
+ bf->IEData, be->wBeaconInfoLength - sizeof(*bf));
+ result = ctx.bytes;
+out:
+ mutex_unlock(&bce->mutex);
+ return result;
+}
+
+/*
+ * Verify that the beacon event, frame and IEs are ok
+ */
+static int uwb_verify_beacon(struct uwb_rc *rc, struct uwb_event *evt,
+ struct uwb_rc_evt_beacon *be)
+{
+ int result = -EINVAL;
+ struct uwb_beacon_frame *bf;
+ struct device *dev = &rc->uwb_dev.dev;
+
+ /* Is there enough data to decode a beacon frame? */
+ if (evt->notif.size < sizeof(*be) + sizeof(*bf)) {
+ dev_err(dev, "BEACON event: Not enough data to decode "
+ "(%zu vs %zu bytes needed)\n", evt->notif.size,
+ sizeof(*be) + sizeof(*bf));
+ goto error;
+ }
+ /* FIXME: make sure beacon frame IEs are fine and that the whole thing
+ * is consistent */
+ result = 0;
+error:
+ return result;
+}
+
+/*
+ * Handle UWB_RC_EVT_BEACON events
+ *
+ * We check the beacon cache to see how the received beacon fares. If
+ * is there already we refresh the timestamp. If not we create a new
+ * entry.
+ *
+ * According to the WHCI and WUSB specs, only one beacon frame is
+ * allowed per notification block, so we don't bother about scanning
+ * for more.
+ */
+int uwbd_evt_handle_rc_beacon(struct uwb_event *evt)
+{
+ int result = -EINVAL;
+ struct uwb_rc *rc;
+ struct uwb_rc_evt_beacon *be;
+ struct uwb_beacon_frame *bf;
+ struct uwb_beca_e *bce;
+ unsigned long last_ts;
+
+ rc = evt->rc;
+ be = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon, rceb);
+ result = uwb_verify_beacon(rc, evt, be);
+ if (result < 0)
+ return result;
+
+ /* FIXME: handle alien beacons. */
+ if (be->bBeaconType == UWB_RC_BEACON_TYPE_OL_ALIEN ||
+ be->bBeaconType == UWB_RC_BEACON_TYPE_NOL_ALIEN) {
+ return -ENOSYS;
+ }
+
+ bf = (struct uwb_beacon_frame *) be->BeaconInfo;
+
+ /*
+ * Drop beacons from devices with a NULL EUI-48 -- they cannot
+ * be uniquely identified.
+ *
+ * It's expected that these will all be WUSB devices and they
+ * have a WUSB specific connection method so ignoring them
+ * here shouldn't be a problem.
+ */
+ if (uwb_mac_addr_bcast(&bf->Device_Identifier))
+ return 0;
+
+ mutex_lock(&uwb_beca.mutex);
+ bce = __uwb_beca_find_bymac(&bf->Device_Identifier);
+ if (bce == NULL) {
+ /* Not in there, a new device is pinging */
+ uwb_beacon_print(evt->rc, be, bf);
+ bce = __uwb_beca_add(be, bf, evt->ts_jiffies);
+ if (bce == NULL) {
+ mutex_unlock(&uwb_beca.mutex);
+ return -ENOMEM;
+ }
+ }
+ mutex_unlock(&uwb_beca.mutex);
+
+ mutex_lock(&bce->mutex);
+ /* purge old beacon data */
+ kfree(bce->be);
+
+ last_ts = bce->ts_jiffies;
+
+ /* Update commonly used fields */
+ bce->ts_jiffies = evt->ts_jiffies;
+ bce->be = be;
+ bce->dev_addr = bf->hdr.SrcAddr;
+ bce->mac_addr = &bf->Device_Identifier;
+ be->wBPSTOffset = le16_to_cpu(be->wBPSTOffset);
+ be->wBeaconInfoLength = le16_to_cpu(be->wBeaconInfoLength);
+ stats_add_sample(&bce->lqe_stats, be->bLQI - 7);
+ stats_add_sample(&bce->rssi_stats, be->bRSSI + 18);
+
+ /*
+ * This might be a beacon from a new device.
+ */
+ if (bce->uwb_dev == NULL)
+ uwbd_dev_onair(evt->rc, bce);
+
+ mutex_unlock(&bce->mutex);
+
+ return 1; /* we keep the event data */
+}
+
+/*
+ * Handle UWB_RC_EVT_BEACON_SIZE events
+ *
+ * XXXXX
+ */
+int uwbd_evt_handle_rc_beacon_size(struct uwb_event *evt)
+{
+ int result = -EINVAL;
+ struct device *dev = &evt->rc->uwb_dev.dev;
+ struct uwb_rc_evt_beacon_size *bs;
+
+ /* Is there enough data to decode the event? */
+ if (evt->notif.size < sizeof(*bs)) {
+ dev_err(dev, "BEACON SIZE notification: Not enough data to "
+ "decode (%zu vs %zu bytes needed)\n",
+ evt->notif.size, sizeof(*bs));
+ goto error;
+ }
+ bs = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon_size, rceb);
+ if (0)
+ dev_info(dev, "Beacon size changed to %u bytes "
+ "(FIXME: action?)\n", le16_to_cpu(bs->wNewBeaconSize));
+ else {
+ /* temporary hack until we do something with this message... */
+ static unsigned count;
+ if (++count % 1000 == 0)
+ dev_info(dev, "Beacon size changed %u times "
+ "(FIXME: action?)\n", count);
+ }
+ result = 0;
+error:
+ return result;
+}
+
+/**
+ * uwbd_evt_handle_rc_bp_slot_change - handle a BP_SLOT_CHANGE event
+ * @evt: the BP_SLOT_CHANGE notification from the radio controller
+ *
+ * If the event indicates that no beacon period slots were available
+ * then radio controller has transitioned to a non-beaconing state.
+ * Otherwise, simply save the current beacon slot.
+ */
+int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *evt)
+{
+ struct uwb_rc *rc = evt->rc;
+ struct device *dev = &rc->uwb_dev.dev;
+ struct uwb_rc_evt_bp_slot_change *bpsc;
+
+ if (evt->notif.size < sizeof(*bpsc)) {
+ dev_err(dev, "BP SLOT CHANGE event: Not enough data\n");
+ return -EINVAL;
+ }
+ bpsc = container_of(evt->notif.rceb, struct uwb_rc_evt_bp_slot_change, rceb);
+
+ mutex_lock(&rc->uwb_dev.mutex);
+ if (uwb_rc_evt_bp_slot_change_no_slot(bpsc)) {
+ dev_info(dev, "stopped beaconing: No free slots in BP\n");
+ rc->beaconing = -1;
+ } else
+ rc->uwb_dev.beacon_slot = uwb_rc_evt_bp_slot_change_slot_num(bpsc);
+ mutex_unlock(&rc->uwb_dev.mutex);
+
+ return 0;
+}
+
+/**
+ * Handle UWB_RC_EVT_BPOIE_CHANGE events
+ *
+ * XXXXX
+ */
+struct uwb_ie_bpo {
+ struct uwb_ie_hdr hdr;
+ u8 bp_length;
+ u8 data[];
+} __attribute__((packed));
+
+int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *evt)
+{
+ int result = -EINVAL;
+ struct device *dev = &evt->rc->uwb_dev.dev;
+ struct uwb_rc_evt_bpoie_change *bpoiec;
+ struct uwb_ie_bpo *bpoie;
+ static unsigned count; /* FIXME: this is a temp hack */
+ size_t iesize;
+
+ /* Is there enough data to decode it? */
+ if (evt->notif.size < sizeof(*bpoiec)) {
+ dev_err(dev, "BPOIEC notification: Not enough data to "
+ "decode (%zu vs %zu bytes needed)\n",
+ evt->notif.size, sizeof(*bpoiec));
+ goto error;
+ }
+ bpoiec = container_of(evt->notif.rceb, struct uwb_rc_evt_bpoie_change, rceb);
+ iesize = le16_to_cpu(bpoiec->wBPOIELength);
+ if (iesize < sizeof(*bpoie)) {
+ dev_err(dev, "BPOIEC notification: Not enough IE data to "
+ "decode (%zu vs %zu bytes needed)\n",
+ iesize, sizeof(*bpoie));
+ goto error;
+ }
+ if (++count % 1000 == 0) /* Lame placeholder */
+ dev_info(dev, "BPOIE: %u changes received\n", count);
+ /*
+ * FIXME: At this point we should go over all the IEs in the
+ * bpoiec->BPOIE array and act on each.
+ */
+ result = 0;
+error:
+ return result;
+}
+
+/**
+ * uwb_bg_joined - is the RC in a beacon group?
+ * @rc: the radio controller
+ *
+ * Returns true if the radio controller is in a beacon group (even if
+ * it's the sole member).
+ */
+int uwb_bg_joined(struct uwb_rc *rc)
+{
+ return rc->beaconing != -1;
+}
+EXPORT_SYMBOL_GPL(uwb_bg_joined);
+
+/*
+ * Print beaconing state.
+ */
+static ssize_t uwb_rc_beacon_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+ struct uwb_rc *rc = uwb_dev->rc;
+ ssize_t result;
+
+ mutex_lock(&rc->uwb_dev.mutex);
+ result = sprintf(buf, "%d\n", rc->beaconing);
+ mutex_unlock(&rc->uwb_dev.mutex);
+ return result;
+}
+
+/*
+ * Start beaconing on the specified channel, or stop beaconing.
+ *
+ * The BPST offset of when to start searching for a beacon group to
+ * join may be specified.
+ */
+static ssize_t uwb_rc_beacon_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+ struct uwb_rc *rc = uwb_dev->rc;
+ int channel;
+ unsigned bpst_offset = 0;
+ ssize_t result = -EINVAL;
+
+ result = sscanf(buf, "%d %u\n", &channel, &bpst_offset);
+ if (result >= 1)
+ result = uwb_rc_beacon(rc, channel, bpst_offset);
+
+ return result < 0 ? result : size;
+}
+DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, uwb_rc_beacon_show, uwb_rc_beacon_store);
diff --git a/drivers/uwb/driver.c b/drivers/uwb/driver.c
new file mode 100644
index 00000000000..521cdeb8497
--- /dev/null
+++ b/drivers/uwb/driver.c
@@ -0,0 +1,144 @@
+/*
+ * Ultra Wide Band
+ * Driver initialization, etc
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ *
+ * Life cycle: FIXME: explain
+ *
+ * UWB radio controller:
+ *
+ * 1. alloc a uwb_rc, zero it
+ * 2. call uwb_rc_init() on it to set it up + ops (won't do any
+ * kind of allocation)
+ * 3. register (now it is owned by the UWB stack--deregister before
+ * freeing/destroying).
+ * 4. It lives on it's own now (UWB stack handles)--when it
+ * disconnects, call unregister()
+ * 5. free it.
+ *
+ * Make sure you have a reference to the uwb_rc before calling
+ * any of the UWB API functions.
+ *
+ * TODO:
+ *
+ * 1. Locking and life cycle management is crappy still. All entry
+ * points to the UWB HCD API assume you have a reference on the
+ * uwb_rc structure and that it won't go away. They mutex lock it
+ * before doing anything.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kdev_t.h>
+#include <linux/random.h>
+#include <linux/uwb/debug.h>
+#include "uwb-internal.h"
+
+
+/* UWB stack attributes (or 'global' constants) */
+
+
+/**
+ * If a beacon dissapears for longer than this, then we consider the
+ * device who was represented by that beacon to be gone.
+ *
+ * ECMA-368[17.2.3, last para] establishes that a device must not
+ * consider a device to be its neighbour if he doesn't receive a beacon
+ * for more than mMaxLostBeacons. mMaxLostBeacons is defined in
+ * ECMA-368[17.16] as 3; because we can get only one beacon per
+ * superframe, that'd be 3 * 65ms = 195 ~ 200 ms. Let's give it time
+ * for jitter and stuff and make it 500 ms.
+ */
+unsigned long beacon_timeout_ms = 500;
+
+static
+ssize_t beacon_timeout_ms_show(struct class *class, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", beacon_timeout_ms);
+}
+
+static
+ssize_t beacon_timeout_ms_store(struct class *class,
+ const char *buf, size_t size)
+{
+ unsigned long bt;
+ ssize_t result;
+ result = sscanf(buf, "%lu", &bt);
+ if (result != 1)
+ return -EINVAL;
+ beacon_timeout_ms = bt;
+ return size;
+}
+
+static struct class_attribute uwb_class_attrs[] = {
+ __ATTR(beacon_timeout_ms, S_IWUSR | S_IRUGO,
+ beacon_timeout_ms_show, beacon_timeout_ms_store),
+ __ATTR_NULL,
+};
+
+/** Device model classes */
+struct class uwb_rc_class = {
+ .name = "uwb_rc",
+ .class_attrs = uwb_class_attrs,
+};
+
+
+static int __init uwb_subsys_init(void)
+{
+ int result = 0;
+
+ result = uwb_est_create();
+ if (result < 0) {
+ printk(KERN_ERR "uwb: Can't initialize EST subsystem\n");
+ goto error_est_init;
+ }
+
+ result = class_register(&uwb_rc_class);
+ if (result < 0)
+ goto error_uwb_rc_class_register;
+ uwbd_start();
+ uwb_dbg_init();
+ return 0;
+
+error_uwb_rc_class_register:
+ uwb_est_destroy();
+error_est_init:
+ return result;
+}
+module_init(uwb_subsys_init);
+
+static void __exit uwb_subsys_exit(void)
+{
+ uwb_dbg_exit();
+ uwbd_stop();
+ class_unregister(&uwb_rc_class);
+ uwb_est_destroy();
+ return;
+}
+module_exit(uwb_subsys_exit);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Ultra Wide Band core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/drp-avail.c b/drivers/uwb/drp-avail.c
new file mode 100644
index 00000000000..3febd855280
--- /dev/null
+++ b/drivers/uwb/drp-avail.c
@@ -0,0 +1,288 @@
+/*
+ * Ultra Wide Band
+ * DRP availability management
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Manage DRP Availability (the MAS available for DRP
+ * reservations). Thus:
+ *
+ * - Handle DRP Availability Change notifications
+ *
+ * - Allow the reservation manager to indicate MAS reserved/released
+ * by local (owned by/targeted at the radio controller)
+ * reservations.
+ *
+ * - Based on the two sources above, generate a DRP Availability IE to
+ * be included in the beacon.
+ *
+ * See also the documentation for struct uwb_drp_avail.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/bitmap.h>
+#include "uwb-internal.h"
+
+/**
+ * uwb_drp_avail_init - initialize an RC's MAS availability
+ *
+ * All MAS are available initially. The RC will inform use which
+ * slots are used for the BP (it may change in size).
+ */
+void uwb_drp_avail_init(struct uwb_rc *rc)
+{
+ bitmap_fill(rc->drp_avail.global, UWB_NUM_MAS);
+ bitmap_fill(rc->drp_avail.local, UWB_NUM_MAS);
+ bitmap_fill(rc->drp_avail.pending, UWB_NUM_MAS);
+}
+
+/*
+ * Determine MAS available for new local reservations.
+ *
+ * avail = global & local & pending
+ */
+static void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail)
+{
+ bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
+ bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS);
+}
+
+/**
+ * uwb_drp_avail_reserve_pending - reserve MAS for a new reservation
+ * @rc: the radio controller
+ * @mas: the MAS to reserve
+ *
+ * Returns 0 on success, or -EBUSY if the MAS requested aren't available.
+ */
+int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas)
+{
+ struct uwb_mas_bm avail;
+
+ uwb_drp_available(rc, &avail);
+ if (!bitmap_subset(mas->bm, avail.bm, UWB_NUM_MAS))
+ return -EBUSY;
+
+ bitmap_andnot(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
+ return 0;
+}
+
+/**
+ * uwb_drp_avail_reserve - reserve MAS for an established reservation
+ * @rc: the radio controller
+ * @mas: the MAS to reserve
+ */
+void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas)
+{
+ bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
+ bitmap_andnot(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
+ rc->drp_avail.ie_valid = false;
+}
+
+/**
+ * uwb_drp_avail_release - release MAS from a pending or established reservation
+ * @rc: the radio controller
+ * @mas: the MAS to release
+ */
+void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas)
+{
+ bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
+ bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
+ rc->drp_avail.ie_valid = false;
+}
+
+/**
+ * uwb_drp_avail_ie_update - update the DRP Availability IE
+ * @rc: the radio controller
+ *
+ * avail = global & local
+ */
+void uwb_drp_avail_ie_update(struct uwb_rc *rc)
+{
+ struct uwb_mas_bm avail;
+
+ bitmap_and(avail.bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
+
+ rc->drp_avail.ie.hdr.element_id = UWB_IE_DRP_AVAILABILITY;
+ rc->drp_avail.ie.hdr.length = UWB_NUM_MAS / 8;
+ uwb_mas_bm_copy_le(rc->drp_avail.ie.bmp, &avail);
+ rc->drp_avail.ie_valid = true;
+}
+
+/**
+ * Create an unsigned long from a buffer containing a byte stream.
+ *
+ * @array: pointer to buffer
+ * @itr: index of buffer from where we start
+ * @len: the buffer's remaining size may not be exact multiple of
+ * sizeof(unsigned long), @len is the length of buffer that needs
+ * to be converted. This will be sizeof(unsigned long) or smaller
+ * (BUG if not). If it is smaller then we will pad the remaining
+ * space of the result with zeroes.
+ */
+static
+unsigned long get_val(u8 *array, size_t itr, size_t len)
+{
+ unsigned long val = 0;
+ size_t top = itr + len;
+
+ BUG_ON(len > sizeof(val));
+
+ while (itr < top) {
+ val <<= 8;
+ val |= array[top - 1];
+ top--;
+ }
+ val <<= 8 * (sizeof(val) - len); /* padding */
+ return val;
+}
+
+/**
+ * Initialize bitmap from data buffer.
+ *
+ * The bitmap to be converted could come from a IE, for example a
+ * DRP Availability IE.
+ * From ECMA-368 1.0 [16.8.7]: "
+ * octets: 1 1 N * (0 to 32)
+ * Element ID Length (=N) DRP Availability Bitmap
+ *
+ * The DRP Availability Bitmap field is up to 256 bits long, one
+ * bit for each MAS in the superframe, where the least-significant
+ * bit of the field corresponds to the first MAS in the superframe
+ * and successive bits correspond to successive MASs."
+ *
+ * The DRP Availability bitmap is in octets from 0 to 32, so octet
+ * 32 contains bits for MAS 1-8, etc. If the bitmap is smaller than 32
+ * octets, the bits in octets not included at the end of the bitmap are
+ * treated as zero. In this case (when the bitmap is smaller than 32
+ * octets) the MAS represented range from MAS 1 to MAS (size of bitmap)
+ * with the last octet still containing bits for MAS 1-8, etc.
+ *
+ * For example:
+ * F00F0102 03040506 0708090A 0B0C0D0E 0F010203
+ * ^^^^
+ * ||||
+ * ||||
+ * |||\LSB of byte is MAS 9
+ * ||\MSB of byte is MAS 16
+ * |\LSB of first byte is MAS 1
+ * \ MSB of byte is MAS 8
+ *
+ * An example of this encoding can be found in ECMA-368 Annex-D [Table D.11]
+ *
+ * The resulting bitmap will have the following mapping:
+ * bit position 0 == MAS 1
+ * bit position 1 == MAS 2
+ * ...
+ * bit position (UWB_NUM_MAS - 1) == MAS UWB_NUM_MAS
+ *
+ * @bmp_itr: pointer to bitmap (can be declared with DECLARE_BITMAP)
+ * @buffer: pointer to buffer containing bitmap data in big endian
+ * format (MSB first)
+ * @buffer_size:number of bytes with which bitmap should be initialized
+ */
+static
+void buffer_to_bmp(unsigned long *bmp_itr, void *_buffer,
+ size_t buffer_size)
+{
+ u8 *buffer = _buffer;
+ size_t itr, len;
+ unsigned long val;
+
+ itr = 0;
+ while (itr < buffer_size) {
+ len = buffer_size - itr >= sizeof(val) ?
+ sizeof(val) : buffer_size - itr;
+ val = get_val(buffer, itr, len);
+ bmp_itr[itr / sizeof(val)] = val;
+ itr += sizeof(val);
+ }
+}
+
+
+/**
+ * Extract DRP Availability bitmap from the notification.
+ *
+ * The notification that comes in contains a bitmap of (UWB_NUM_MAS / 8) bytes
+ * We convert that to our internal representation.
+ */
+static
+int uwbd_evt_get_drp_avail(struct uwb_event *evt, unsigned long *bmp)
+{
+ struct device *dev = &evt->rc->uwb_dev.dev;
+ struct uwb_rc_evt_drp_avail *drp_evt;
+ int result = -EINVAL;
+
+ /* Is there enough data to decode the event? */
+ if (evt->notif.size < sizeof(*drp_evt)) {
+ dev_err(dev, "DRP Availability Change: Not enough "
+ "data to decode event [%zu bytes, %zu "
+ "needed]\n", evt->notif.size, sizeof(*drp_evt));
+ goto error;
+ }
+ drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp_avail, rceb);
+ buffer_to_bmp(bmp, drp_evt->bmp, UWB_NUM_MAS/8);
+ result = 0;
+error:
+ return result;
+}
+
+
+/**
+ * Process an incoming DRP Availability notification.
+ *
+ * @evt: Event information (packs the actual event data, which
+ * radio controller it came to, etc).
+ *
+ * @returns: 0 on success (so uwbd() frees the event buffer), < 0
+ * on error.
+ *
+ * According to ECMA-368 1.0 [16.8.7], bits set to ONE indicate that
+ * the MAS slot is available, bits set to ZERO indicate that the slot
+ * is busy.
+ *
+ * So we clear available slots, we set used slots :)
+ *
+ * The notification only marks non-availability based on the BP and
+ * received DRP IEs that are not for this radio controller. A copy of
+ * this bitmap is needed to generate the real availability (which
+ * includes local and pending reservations).
+ *
+ * The DRP Availability IE that this radio controller emits will need
+ * to be updated.
+ */
+int uwbd_evt_handle_rc_drp_avail(struct uwb_event *evt)
+{
+ int result;
+ struct uwb_rc *rc = evt->rc;
+ DECLARE_BITMAP(bmp, UWB_NUM_MAS);
+
+ result = uwbd_evt_get_drp_avail(evt, bmp);
+ if (result < 0)
+ return result;
+
+ mutex_lock(&rc->rsvs_mutex);
+ bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS);
+ rc->drp_avail.ie_valid = false;
+ mutex_unlock(&rc->rsvs_mutex);
+
+ uwb_rsv_sched_update(rc);
+
+ return 0;
+}
diff --git a/drivers/uwb/drp-ie.c b/drivers/uwb/drp-ie.c
new file mode 100644
index 00000000000..882724c5f12
--- /dev/null
+++ b/drivers/uwb/drp-ie.c
@@ -0,0 +1,232 @@
+/*
+ * UWB DRP IE management.
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/uwb.h>
+
+#include "uwb-internal.h"
+
+/*
+ * Allocate a DRP IE.
+ *
+ * To save having to free/allocate a DRP IE when its MAS changes,
+ * enough memory is allocated for the maxiumum number of DRP
+ * allocation fields. This gives an overhead per reservation of up to
+ * (UWB_NUM_ZONES - 1) * 4 = 60 octets.
+ */
+static struct uwb_ie_drp *uwb_drp_ie_alloc(void)
+{
+ struct uwb_ie_drp *drp_ie;
+ unsigned tiebreaker;
+
+ drp_ie = kzalloc(sizeof(struct uwb_ie_drp) +
+ UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc),
+ GFP_KERNEL);
+ if (drp_ie) {
+ drp_ie->hdr.element_id = UWB_IE_DRP;
+
+ get_random_bytes(&tiebreaker, sizeof(unsigned));
+ uwb_ie_drp_set_tiebreaker(drp_ie, tiebreaker & 1);
+ }
+ return drp_ie;
+}
+
+
+/*
+ * Fill a DRP IE's allocation fields from a MAS bitmap.
+ */
+static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie,
+ struct uwb_mas_bm *mas)
+{
+ int z, i, num_fields = 0, next = 0;
+ struct uwb_drp_alloc *zones;
+ __le16 current_bmp;
+ DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS);
+ DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE);
+
+ zones = drp_ie->allocs;
+
+ bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS);
+
+ /* Determine unique MAS bitmaps in zones from bitmap. */
+ for (z = 0; z < UWB_NUM_ZONES; z++) {
+ bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE);
+ if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) {
+ bool found = false;
+ current_bmp = (__le16) *tmp_mas_bm;
+ for (i = 0; i < next; i++) {
+ if (current_bmp == zones[i].mas_bm) {
+ zones[i].zone_bm |= 1 << z;
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ num_fields++;
+ zones[next].zone_bm = 1 << z;
+ zones[next].mas_bm = current_bmp;
+ next++;
+ }
+ }
+ bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
+ }
+
+ /* Store in format ready for transmission (le16). */
+ for (i = 0; i < num_fields; i++) {
+ drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm);
+ drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm);
+ }
+
+ drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr)
+ + num_fields * sizeof(struct uwb_drp_alloc);
+}
+
+/**
+ * uwb_drp_ie_update - update a reservation's DRP IE
+ * @rsv: the reservation
+ */
+int uwb_drp_ie_update(struct uwb_rsv *rsv)
+{
+ struct device *dev = &rsv->rc->uwb_dev.dev;
+ struct uwb_ie_drp *drp_ie;
+ int reason_code, status;
+
+ switch (rsv->state) {
+ case UWB_RSV_STATE_NONE:
+ kfree(rsv->drp_ie);
+ rsv->drp_ie = NULL;
+ return 0;
+ case UWB_RSV_STATE_O_INITIATED:
+ reason_code = UWB_DRP_REASON_ACCEPTED;
+ status = 0;
+ break;
+ case UWB_RSV_STATE_O_PENDING:
+ reason_code = UWB_DRP_REASON_ACCEPTED;
+ status = 0;
+ break;
+ case UWB_RSV_STATE_O_MODIFIED:
+ reason_code = UWB_DRP_REASON_MODIFIED;
+ status = 1;
+ break;
+ case UWB_RSV_STATE_O_ESTABLISHED:
+ reason_code = UWB_DRP_REASON_ACCEPTED;
+ status = 1;
+ break;
+ case UWB_RSV_STATE_T_ACCEPTED:
+ reason_code = UWB_DRP_REASON_ACCEPTED;
+ status = 1;
+ break;
+ case UWB_RSV_STATE_T_DENIED:
+ reason_code = UWB_DRP_REASON_DENIED;
+ status = 0;
+ break;
+ default:
+ dev_dbg(dev, "rsv with unhandled state (%d)\n", rsv->state);
+ return -EINVAL;
+ }
+
+ if (rsv->drp_ie == NULL) {
+ rsv->drp_ie = uwb_drp_ie_alloc();
+ if (rsv->drp_ie == NULL)
+ return -ENOMEM;
+ }
+ drp_ie = rsv->drp_ie;
+
+ uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv));
+ uwb_ie_drp_set_status(drp_ie, status);
+ uwb_ie_drp_set_reason_code(drp_ie, reason_code);
+ uwb_ie_drp_set_stream_index(drp_ie, rsv->stream);
+ uwb_ie_drp_set_type(drp_ie, rsv->type);
+
+ if (uwb_rsv_is_owner(rsv)) {
+ switch (rsv->target.type) {
+ case UWB_RSV_TARGET_DEV:
+ drp_ie->dev_addr = rsv->target.dev->dev_addr;
+ break;
+ case UWB_RSV_TARGET_DEVADDR:
+ drp_ie->dev_addr = rsv->target.devaddr;
+ break;
+ }
+ } else
+ drp_ie->dev_addr = rsv->owner->dev_addr;
+
+ uwb_drp_ie_from_bm(drp_ie, &rsv->mas);
+
+ rsv->ie_valid = true;
+ return 0;
+}
+
+/*
+ * Set MAS bits from given MAS bitmap in a single zone of large bitmap.
+ *
+ * We are given a zone id and the MAS bitmap of bits that need to be set in
+ * this zone. Note that this zone may already have bits set and this only
+ * adds settings - we cannot simply assign the MAS bitmap contents to the
+ * zone contents. We iterate over the the bits (MAS) in the zone and set the
+ * bits that are set in the given MAS bitmap.
+ */
+static
+void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm)
+{
+ int mas;
+ u16 mas_mask;
+
+ for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++) {
+ mas_mask = 1 << mas;
+ if (mas_bm & mas_mask)
+ set_bit(zone * UWB_NUM_ZONES + mas, bm->bm);
+ }
+}
+
+/**
+ * uwb_drp_ie_zones_to_bm - convert DRP allocation fields to a bitmap
+ * @mas: MAS bitmap that will be populated to correspond to the
+ * allocation fields in the DRP IE
+ * @drp_ie: the DRP IE that contains the allocation fields.
+ *
+ * The input format is an array of MAS allocation fields (16 bit Zone
+ * bitmap, 16 bit MAS bitmap) as described in [ECMA-368] section
+ * 16.8.6. The output is a full 256 bit MAS bitmap.
+ *
+ * We go over all the allocation fields, for each allocation field we
+ * know which zones are impacted. We iterate over all the zones
+ * impacted and call a function that will set the correct MAS bits in
+ * each zone.
+ */
+void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie)
+{
+ int numallocs = (drp_ie->hdr.length - 4) / 4;
+ const struct uwb_drp_alloc *alloc;
+ int cnt;
+ u16 zone_bm, mas_bm;
+ u8 zone;
+ u16 zone_mask;
+
+ for (cnt = 0; cnt < numallocs; cnt++) {
+ alloc = &drp_ie->allocs[cnt];
+ zone_bm = le16_to_cpu(alloc->zone_bm);
+ mas_bm = le16_to_cpu(alloc->mas_bm);
+ for (zone = 0; zone < UWB_NUM_ZONES; zone++) {
+ zone_mask = 1 << zone;
+ if (zone_bm & zone_mask)
+ uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm);
+ }
+ }
+}
diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c
new file mode 100644
index 00000000000..c0b1e5e2bd0
--- /dev/null
+++ b/drivers/uwb/drp.c
@@ -0,0 +1,461 @@
+/*
+ * Ultra Wide Band
+ * Dynamic Reservation Protocol handling
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/delay.h>
+#include "uwb-internal.h"
+
+/**
+ * Construct and send the SET DRP IE
+ *
+ * @rc: UWB Host controller
+ * @returns: >= 0 number of bytes still available in the beacon
+ * < 0 errno code on error.
+ *
+ * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the
+ * device to include in its beacon at the same time. We thus have to
+ * traverse all reservations and include the DRP IEs of all PENDING
+ * and NEGOTIATED reservations in a SET DRP command for transmission.
+ *
+ * A DRP Availability IE is appended.
+ *
+ * rc->uwb_dev.mutex is held
+ *
+ * FIXME We currently ignore the returned value indicating the remaining space
+ * in beacon. This could be used to deny reservation requests earlier if
+ * determined that they would cause the beacon space to be exceeded.
+ */
+static
+int uwb_rc_gen_send_drp_ie(struct uwb_rc *rc)
+{
+ int result;
+ struct device *dev = &rc->uwb_dev.dev;
+ struct uwb_rc_cmd_set_drp_ie *cmd;
+ struct uwb_rc_evt_set_drp_ie reply;
+ struct uwb_rsv *rsv;
+ int num_bytes = 0;
+ u8 *IEDataptr;
+
+ result = -ENOMEM;
+ /* First traverse all reservations to determine memory needed. */
+ list_for_each_entry(rsv, &rc->reservations, rc_node) {
+ if (rsv->drp_ie != NULL)
+ num_bytes += rsv->drp_ie->hdr.length + 2;
+ }
+ num_bytes += sizeof(rc->drp_avail.ie);
+ cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
+ if (cmd == NULL)
+ goto error;
+ cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
+ cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
+ cmd->wIELength = num_bytes;
+ IEDataptr = (u8 *)&cmd->IEData[0];
+
+ /* Next traverse all reservations to place IEs in allocated memory. */
+ list_for_each_entry(rsv, &rc->reservations, rc_node) {
+ if (rsv->drp_ie != NULL) {
+ memcpy(IEDataptr, rsv->drp_ie,
+ rsv->drp_ie->hdr.length + 2);
+ IEDataptr += rsv->drp_ie->hdr.length + 2;
+ }
+ }
+ memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
+
+ reply.rceb.bEventType = UWB_RC_CET_GENERAL;
+ reply.rceb.wEvent = UWB_RC_CMD_SET_DRP_IE;
+ result = uwb_rc_cmd(rc, "SET-DRP-IE", &cmd->rccb,
+ sizeof(*cmd) + num_bytes, &reply.rceb,
+ sizeof(reply));
+ if (result < 0)
+ goto error_cmd;
+ result = le16_to_cpu(reply.wRemainingSpace);
+ if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
+ dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: command execution "
+ "failed: %s (%d). RemainingSpace in beacon "
+ "= %d\n", uwb_rc_strerror(reply.bResultCode),
+ reply.bResultCode, result);
+ result = -EIO;
+ } else {
+ dev_dbg(dev, "SET-DRP-IE sent. RemainingSpace in beacon "
+ "= %d.\n", result);
+ result = 0;
+ }
+error_cmd:
+ kfree(cmd);
+error:
+ return result;
+
+}
+/**
+ * Send all DRP IEs associated with this host
+ *
+ * @returns: >= 0 number of bytes still available in the beacon
+ * < 0 errno code on error.
+ *
+ * As per the protocol we obtain the host controller device lock to access
+ * bandwidth structures.
+ */
+int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
+{
+ int result;
+
+ mutex_lock(&rc->uwb_dev.mutex);
+ result = uwb_rc_gen_send_drp_ie(rc);
+ mutex_unlock(&rc->uwb_dev.mutex);
+ return result;
+}
+
+void uwb_drp_handle_timeout(struct uwb_rsv *rsv)
+{
+ struct device *dev = &rsv->rc->uwb_dev.dev;
+
+ dev_dbg(dev, "reservation timeout in state %s (%d)\n",
+ uwb_rsv_state_str(rsv->state), rsv->state);
+
+ switch (rsv->state) {
+ case UWB_RSV_STATE_O_INITIATED:
+ if (rsv->is_multicast) {
+ uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
+ return;
+ }
+ break;
+ case UWB_RSV_STATE_O_ESTABLISHED:
+ if (rsv->is_multicast)
+ return;
+ break;
+ default:
+ break;
+ }
+ uwb_rsv_remove(rsv);
+}
+
+/*
+ * Based on the DRP IE, transition a target reservation to a new
+ * state.
+ */
+static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
+ struct uwb_ie_drp *drp_ie)
+{
+ struct device *dev = &rc->uwb_dev.dev;
+ int status;
+ enum uwb_drp_reason reason_code;
+
+ status = uwb_ie_drp_status(drp_ie);
+ reason_code = uwb_ie_drp_reason_code(drp_ie);
+
+ if (status) {
+ switch (reason_code) {
+ case UWB_DRP_REASON_ACCEPTED:
+ uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
+ break;
+ case UWB_DRP_REASON_MODIFIED:
+ dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
+ reason_code, status);
+ break;
+ default:
+ dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
+ reason_code, status);
+ }
+ } else {
+ switch (reason_code) {
+ case UWB_DRP_REASON_ACCEPTED:
+ /* New reservations are handled in uwb_rsv_find(). */
+ break;
+ case UWB_DRP_REASON_DENIED:
+ uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
+ break;
+ case UWB_DRP_REASON_CONFLICT:
+ case UWB_DRP_REASON_MODIFIED:
+ dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
+ reason_code, status);
+ break;
+ default:
+ dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
+ reason_code, status);
+ }
+ }
+}
+
+/*
+ * Based on the DRP IE, transition an owner reservation to a new
+ * state.
+ */
+static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
+ struct uwb_ie_drp *drp_ie)
+{
+ struct device *dev = &rc->uwb_dev.dev;
+ int status;
+ enum uwb_drp_reason reason_code;
+
+ status = uwb_ie_drp_status(drp_ie);
+ reason_code = uwb_ie_drp_reason_code(drp_ie);
+
+ if (status) {
+ switch (reason_code) {
+ case UWB_DRP_REASON_ACCEPTED:
+ uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
+ break;
+ case UWB_DRP_REASON_MODIFIED:
+ dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
+ reason_code, status);
+ break;
+ default:
+ dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
+ reason_code, status);
+ }
+ } else {
+ switch (reason_code) {
+ case UWB_DRP_REASON_PENDING:
+ uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
+ break;
+ case UWB_DRP_REASON_DENIED:
+ uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
+ break;
+ case UWB_DRP_REASON_CONFLICT:
+ case UWB_DRP_REASON_MODIFIED:
+ dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
+ reason_code, status);
+ break;
+ default:
+ dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
+ reason_code, status);
+ }
+ }
+}
+
+/*
+ * Process a received DRP IE, it's either for a reservation owned by
+ * the RC or targeted at it (or it's for a WUSB cluster reservation).
+ */
+static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src,
+ struct uwb_ie_drp *drp_ie)
+{
+ struct uwb_rsv *rsv;
+
+ rsv = uwb_rsv_find(rc, src, drp_ie);
+ if (!rsv) {
+ /*
+ * No reservation? It's either for a recently
+ * terminated reservation; or the DRP IE couldn't be
+ * processed (e.g., an invalid IE or out of memory).
+ */
+ return;
+ }
+
+ /*
+ * Do nothing with DRP IEs for reservations that have been
+ * terminated.
+ */
+ if (rsv->state == UWB_RSV_STATE_NONE) {
+ uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
+ return;
+ }
+
+ if (uwb_ie_drp_owner(drp_ie))
+ uwb_drp_process_target(rc, rsv, drp_ie);
+ else
+ uwb_drp_process_owner(rc, rsv, drp_ie);
+}
+
+
+/*
+ * Process all the DRP IEs (both DRP IEs and the DRP Availability IE)
+ * from a device.
+ */
+static
+void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
+ size_t ielen, struct uwb_dev *src_dev)
+{
+ struct device *dev = &rc->uwb_dev.dev;
+ struct uwb_ie_hdr *ie_hdr;
+ void *ptr;
+
+ ptr = drp_evt->ie_data;
+ for (;;) {
+ ie_hdr = uwb_ie_next(&ptr, &ielen);
+ if (!ie_hdr)
+ break;
+
+ switch (ie_hdr->element_id) {
+ case UWB_IE_DRP_AVAILABILITY:
+ /* FIXME: does something need to be done with this? */
+ break;
+ case UWB_IE_DRP:
+ uwb_drp_process(rc, src_dev, (struct uwb_ie_drp *)ie_hdr);
+ break;
+ default:
+ dev_warn(dev, "unexpected IE in DRP notification\n");
+ break;
+ }
+ }
+
+ if (ielen > 0)
+ dev_warn(dev, "%d octets remaining in DRP notification\n",
+ (int)ielen);
+}
+
+
+/*
+ * Go through all the DRP IEs and find the ones that conflict with our
+ * reservations.
+ *
+ * FIXME: must resolve the conflict according the the rules in
+ * [ECMA-368].
+ */
+static
+void uwb_drp_process_conflict_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
+ size_t ielen, struct uwb_dev *src_dev)
+{
+ struct device *dev = &rc->uwb_dev.dev;
+ struct uwb_ie_hdr *ie_hdr;
+ struct uwb_ie_drp *drp_ie;
+ void *ptr;
+
+ ptr = drp_evt->ie_data;
+ for (;;) {
+ ie_hdr = uwb_ie_next(&ptr, &ielen);
+ if (!ie_hdr)
+ break;
+
+ drp_ie = container_of(ie_hdr, struct uwb_ie_drp, hdr);
+
+ /* FIXME: check if this DRP IE conflicts. */
+ }
+
+ if (ielen > 0)
+ dev_warn(dev, "%d octets remaining in DRP notification\n",
+ (int)ielen);
+}
+
+
+/*
+ * Terminate all reservations owned by, or targeted at, 'uwb_dev'.
+ */
+static void uwb_drp_terminate_all(struct uwb_rc *rc, struct uwb_dev *uwb_dev)
+{
+ struct uwb_rsv *rsv;
+
+ list_for_each_entry(rsv, &rc->reservations, rc_node) {
+ if (rsv->owner == uwb_dev
+ || (rsv->target.type == UWB_RSV_TARGET_DEV && rsv->target.dev == uwb_dev))
+ uwb_rsv_remove(rsv);
+ }
+}
+
+
+/**
+ * uwbd_evt_handle_rc_drp - handle a DRP_IE event
+ * @evt: the DRP_IE event from the radio controller
+ *
+ * This processes DRP notifications from the radio controller, either
+ * initiating a new reservation or transitioning an existing
+ * reservation into a different state.
+ *
+ * DRP notifications can occur for three different reasons:
+ *
+ * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as
+ * the target or source have been recieved.
+ *
+ * These DRP IEs could be new or for an existing reservation.
+ *
+ * If the DRP IE for an existing reservation ceases to be to
+ * recieved for at least mMaxLostBeacons, the reservation should be
+ * considered to be terminated. Note that the TERMINATE reason (see
+ * below) may not always be signalled (e.g., the remote device has
+ * two or more reservations established with the RC).
+ *
+ * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon
+ * group conflict with the RC's reservations.
+ *
+ * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received
+ * from a device (i.e., it's terminated all reservations).
+ *
+ * Only the software state of the reservations is changed; the setting
+ * of the radio controller's DRP IEs is done after all the events in
+ * an event buffer are processed. This saves waiting multiple times
+ * for the SET_DRP_IE command to complete.
+ */
+int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
+{
+ struct device *dev = &evt->rc->uwb_dev.dev;
+ struct uwb_rc *rc = evt->rc;
+ struct uwb_rc_evt_drp *drp_evt;
+ size_t ielength, bytes_left;
+ struct uwb_dev_addr src_addr;
+ struct uwb_dev *src_dev;
+ int reason;
+
+ /* Is there enough data to decode the event (and any IEs in
+ its payload)? */
+ if (evt->notif.size < sizeof(*drp_evt)) {
+ dev_err(dev, "DRP event: Not enough data to decode event "
+ "[%zu bytes left, %zu needed]\n",
+ evt->notif.size, sizeof(*drp_evt));
+ return 0;
+ }
+ bytes_left = evt->notif.size - sizeof(*drp_evt);
+ drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
+ ielength = le16_to_cpu(drp_evt->ie_length);
+ if (bytes_left != ielength) {
+ dev_err(dev, "DRP event: Not enough data in payload [%zu"
+ "bytes left, %zu declared in the event]\n",
+ bytes_left, ielength);
+ return 0;
+ }
+
+ memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
+ src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
+ if (!src_dev) {
+ /*
+ * A DRP notification from an unrecognized device.
+ *
+ * This is probably from a WUSB device that doesn't
+ * have an EUI-48 and therefore doesn't show up in the
+ * UWB device database. It's safe to simply ignore
+ * these.
+ */
+ return 0;
+ }
+
+ mutex_lock(&rc->rsvs_mutex);
+
+ reason = uwb_rc_evt_drp_reason(drp_evt);
+
+ switch (reason) {
+ case UWB_DRP_NOTIF_DRP_IE_RCVD:
+ uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
+ break;
+ case UWB_DRP_NOTIF_CONFLICT:
+ uwb_drp_process_conflict_all(rc, drp_evt, ielength, src_dev);
+ break;
+ case UWB_DRP_NOTIF_TERMINATE:
+ uwb_drp_terminate_all(rc, src_dev);
+ break;
+ default:
+ dev_warn(dev, "ignored DRP event with reason code: %d\n", reason);
+ break;
+ }
+
+ mutex_unlock(&rc->rsvs_mutex);
+
+ uwb_dev_put(src_dev);
+ return 0;
+}
diff --git a/drivers/uwb/est.c b/drivers/uwb/est.c
new file mode 100644
index 00000000000..5fe566b7c84
--- /dev/null
+++ b/drivers/uwb/est.c
@@ -0,0 +1,477 @@
+/*
+ * Ultra Wide Band Radio Control
+ * Event Size Tables management
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ *
+ * Infrastructure, code and data tables for guessing the size of
+ * events received on the notification endpoints of UWB radio
+ * controllers.
+ *
+ * You define a table of events and for each, its size and how to get
+ * the extra size.
+ *
+ * ENTRY POINTS:
+ *
+ * uwb_est_{init/destroy}(): To initialize/release the EST subsystem.
+ *
+ * uwb_est_[u]register(): To un/register event size tables
+ * uwb_est_grow()
+ *
+ * uwb_est_find_size(): Get the size of an event
+ * uwb_est_get_size()
+ */
+#include <linux/spinlock.h>
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+#include "uwb-internal.h"
+
+
+struct uwb_est {
+ u16 type_event_high;
+ u16 vendor, product;
+ u8 entries;
+ const struct uwb_est_entry *entry;
+};
+
+
+static struct uwb_est *uwb_est;
+static u8 uwb_est_size;
+static u8 uwb_est_used;
+static DEFINE_RWLOCK(uwb_est_lock);
+
+/**
+ * WUSB Standard Event Size Table, HWA-RC interface
+ *
+ * Sizes for events and notifications type 0 (general), high nibble 0.
+ */
+static
+struct uwb_est_entry uwb_est_00_00xx[] = {
+ [UWB_RC_EVT_IE_RCV] = {
+ .size = sizeof(struct uwb_rc_evt_ie_rcv),
+ .offset = 1 + offsetof(struct uwb_rc_evt_ie_rcv, wIELength),
+ },
+ [UWB_RC_EVT_BEACON] = {
+ .size = sizeof(struct uwb_rc_evt_beacon),
+ .offset = 1 + offsetof(struct uwb_rc_evt_beacon, wBeaconInfoLength),
+ },
+ [UWB_RC_EVT_BEACON_SIZE] = {
+ .size = sizeof(struct uwb_rc_evt_beacon_size),
+ },
+ [UWB_RC_EVT_BPOIE_CHANGE] = {
+ .size = sizeof(struct uwb_rc_evt_bpoie_change),
+ .offset = 1 + offsetof(struct uwb_rc_evt_bpoie_change,
+ wBPOIELength),
+ },
+ [UWB_RC_EVT_BP_SLOT_CHANGE] = {
+ .size = sizeof(struct uwb_rc_evt_bp_slot_change),
+ },
+ [UWB_RC_EVT_BP_SWITCH_IE_RCV] = {
+ .size = sizeof(struct uwb_rc_evt_bp_switch_ie_rcv),
+ .offset = 1 + offsetof(struct uwb_rc_evt_bp_switch_ie_rcv, wIELength),
+ },
+ [UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
+ .size = sizeof(struct uwb_rc_evt_dev_addr_conflict),
+ },
+ [UWB_RC_EVT_DRP_AVAIL] = {
+ .size = sizeof(struct uwb_rc_evt_drp_avail)
+ },
+ [UWB_RC_EVT_DRP] = {
+ .size = sizeof(struct uwb_rc_evt_drp),
+ .offset = 1 + offsetof(struct uwb_rc_evt_drp, ie_length),
+ },
+ [UWB_RC_EVT_BP_SWITCH_STATUS] = {
+ .size = sizeof(struct uwb_rc_evt_bp_switch_status),
+ },
+ [UWB_RC_EVT_CMD_FRAME_RCV] = {
+ .size = sizeof(struct uwb_rc_evt_cmd_frame_rcv),
+ .offset = 1 + offsetof(struct uwb_rc_evt_cmd_frame_rcv, dataLength),
+ },
+ [UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV] = {
+ .size = sizeof(struct uwb_rc_evt_channel_change_ie_rcv),
+ .offset = 1 + offsetof(struct uwb_rc_evt_channel_change_ie_rcv, wIELength),
+ },
+ [UWB_RC_CMD_CHANNEL_CHANGE] = {
+ .size = sizeof(struct uwb_rc_evt_confirm),
+ },
+ [UWB_RC_CMD_DEV_ADDR_MGMT] = {
+ .size = sizeof(struct uwb_rc_evt_dev_addr_mgmt) },
+ [UWB_RC_CMD_GET_IE] = {
+ .size = sizeof(struct uwb_rc_evt_get_ie),
+ .offset = 1 + offsetof(struct uwb_rc_evt_get_ie, wIELength),
+ },
+ [UWB_RC_CMD_RESET] = {
+ .size = sizeof(struct uwb_rc_evt_confirm),
+ },
+ [UWB_RC_CMD_SCAN] = {
+ .size = sizeof(struct uwb_rc_evt_confirm),
+ },
+ [UWB_RC_CMD_SET_BEACON_FILTER] = {
+ .size = sizeof(struct uwb_rc_evt_confirm),
+ },
+ [UWB_RC_CMD_SET_DRP_IE] = {
+ .size = sizeof(struct uwb_rc_evt_set_drp_ie),
+ },
+ [UWB_RC_CMD_SET_IE] = {
+ .size = sizeof(struct uwb_rc_evt_set_ie),
+ },
+ [UWB_RC_CMD_SET_NOTIFICATION_FILTER] = {
+ .size = sizeof(struct uwb_rc_evt_confirm),
+ },
+ [UWB_RC_CMD_SET_TX_POWER] = {
+ .size = sizeof(struct uwb_rc_evt_confirm),
+ },
+ [UWB_RC_CMD_SLEEP] = {
+ .size = sizeof(struct uwb_rc_evt_confirm),
+ },
+ [UWB_RC_CMD_START_BEACON] = {
+ .size = sizeof(struct uwb_rc_evt_confirm),
+ },
+ [UWB_RC_CMD_STOP_BEACON] = {
+ .size = sizeof(struct uwb_rc_evt_confirm),
+ },
+ [UWB_RC_CMD_BP_MERGE] = {
+ .size = sizeof(struct uwb_rc_evt_confirm),
+ },
+ [UWB_RC_CMD_SEND_COMMAND_FRAME] = {
+ .size = sizeof(struct uwb_rc_evt_confirm),
+ },
+ [UWB_RC_CMD_SET_ASIE_NOTIF] = {
+ .size = sizeof(struct uwb_rc_evt_confirm),
+ },
+};
+
+static
+struct uwb_est_entry uwb_est_01_00xx[] = {
+ [UWB_RC_DAA_ENERGY_DETECTED] = {
+ .size = sizeof(struct uwb_rc_evt_daa_energy_detected),
+ },
+ [UWB_RC_SET_DAA_ENERGY_MASK] = {
+ .size = sizeof(struct uwb_rc_evt_set_daa_energy_mask),
+ },
+ [UWB_RC_SET_NOTIFICATION_FILTER_EX] = {
+ .size = sizeof(struct uwb_rc_evt_set_notification_filter_ex),
+ },
+};
+
+/**
+ * Initialize the EST subsystem
+ *
+ * Register the standard tables also.
+ *
+ * FIXME: tag init
+ */
+int uwb_est_create(void)
+{
+ int result;
+
+ uwb_est_size = 2;
+ uwb_est_used = 0;
+ uwb_est = kzalloc(uwb_est_size * sizeof(uwb_est[0]), GFP_KERNEL);
+ if (uwb_est == NULL)
+ return -ENOMEM;
+
+ result = uwb_est_register(UWB_RC_CET_GENERAL, 0, 0xffff, 0xffff,
+ uwb_est_00_00xx, ARRAY_SIZE(uwb_est_00_00xx));
+ if (result < 0)
+ goto out;
+ result = uwb_est_register(UWB_RC_CET_EX_TYPE_1, 0, 0xffff, 0xffff,
+ uwb_est_01_00xx, ARRAY_SIZE(uwb_est_01_00xx));
+out:
+ return result;
+}
+
+
+/** Clean it up */
+void uwb_est_destroy(void)
+{
+ kfree(uwb_est);
+ uwb_est = NULL;
+ uwb_est_size = uwb_est_used = 0;
+}
+
+
+/**
+ * Double the capacity of the EST table
+ *
+ * @returns 0 if ok, < 0 errno no error.
+ */
+static
+int uwb_est_grow(void)
+{
+ size_t actual_size = uwb_est_size * sizeof(uwb_est[0]);
+ void *new = kmalloc(2 * actual_size, GFP_ATOMIC);
+ if (new == NULL)
+ return -ENOMEM;
+ memcpy(new, uwb_est, actual_size);
+ memset(new + actual_size, 0, actual_size);
+ kfree(uwb_est);
+ uwb_est = new;
+ uwb_est_size *= 2;
+ return 0;
+}
+
+
+/**
+ * Register an event size table
+ *
+ * Makes room for it if the table is full, and then inserts it in the
+ * right position (entries are sorted by type, event_high, vendor and
+ * then product).
+ *
+ * @vendor: vendor code for matching against the device (0x0000 and
+ * 0xffff mean any); use 0x0000 to force all to match without
+ * checking possible vendor specific ones, 0xfffff to match
+ * after checking vendor specific ones.
+ *
+ * @product: product code from that vendor; same matching rules, use
+ * 0x0000 for not allowing vendor specific matches, 0xffff
+ * for allowing.
+ *
+ * This arragement just makes the tables sort differenty. Because the
+ * table is sorted by growing type-event_high-vendor-product, a zero
+ * vendor will match before than a 0x456a vendor, that will match
+ * before a 0xfffff vendor.
+ *
+ * @returns 0 if ok, < 0 errno on error (-ENOENT if not found).
+ */
+/* FIXME: add bus type to vendor/product code */
+int uwb_est_register(u8 type, u8 event_high, u16 vendor, u16 product,
+ const struct uwb_est_entry *entry, size_t entries)
+{
+ unsigned long flags;
+ unsigned itr;
+ u16 type_event_high;
+ int result = 0;
+
+ write_lock_irqsave(&uwb_est_lock, flags);
+ if (uwb_est_used == uwb_est_size) {
+ result = uwb_est_grow();
+ if (result < 0)
+ goto out;
+ }
+ /* Find the right spot to insert it in */
+ type_event_high = type << 8 | event_high;
+ for (itr = 0; itr < uwb_est_used; itr++)
+ if (uwb_est[itr].type_event_high < type
+ && uwb_est[itr].vendor < vendor
+ && uwb_est[itr].product < product)
+ break;
+
+ /* Shift others to make room for the new one? */
+ if (itr < uwb_est_used)
+ memmove(&uwb_est[itr+1], &uwb_est[itr], uwb_est_used - itr);
+ uwb_est[itr].type_event_high = type << 8 | event_high;
+ uwb_est[itr].vendor = vendor;
+ uwb_est[itr].product = product;
+ uwb_est[itr].entry = entry;
+ uwb_est[itr].entries = entries;
+ uwb_est_used++;
+out:
+ write_unlock_irqrestore(&uwb_est_lock, flags);
+ return result;
+}
+EXPORT_SYMBOL_GPL(uwb_est_register);
+
+
+/**
+ * Unregister an event size table
+ *
+ * This just removes the specified entry and moves the ones after it
+ * to fill in the gap. This is needed to keep the list sorted; no
+ * reallocation is done to reduce the size of the table.
+ *
+ * We unregister by all the data we used to register instead of by
+ * pointer to the @entry array because we might have used the same
+ * table for a bunch of IDs (for example).
+ *
+ * @returns 0 if ok, < 0 errno on error (-ENOENT if not found).
+ */
+int uwb_est_unregister(u8 type, u8 event_high, u16 vendor, u16 product,
+ const struct uwb_est_entry *entry, size_t entries)
+{
+ unsigned long flags;
+ unsigned itr;
+ struct uwb_est est_cmp = {
+ .type_event_high = type << 8 | event_high,
+ .vendor = vendor,
+ .product = product,
+ .entry = entry,
+ .entries = entries
+ };
+ write_lock_irqsave(&uwb_est_lock, flags);
+ for (itr = 0; itr < uwb_est_used; itr++)
+ if (!memcmp(&uwb_est[itr], &est_cmp, sizeof(est_cmp)))
+ goto found;
+ write_unlock_irqrestore(&uwb_est_lock, flags);
+ return -ENOENT;
+
+found:
+ if (itr < uwb_est_used - 1) /* Not last one? move ones above */
+ memmove(&uwb_est[itr], &uwb_est[itr+1], uwb_est_used - itr - 1);
+ uwb_est_used--;
+ write_unlock_irqrestore(&uwb_est_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uwb_est_unregister);
+
+
+/**
+ * Get the size of an event from a table
+ *
+ * @rceb: pointer to the buffer with the event
+ * @rceb_size: size of the area pointed to by @rceb in bytes.
+ * @returns: > 0 Size of the event
+ * -ENOSPC An area big enough was not provided to look
+ * ahead into the event's guts and guess the size.
+ * -EINVAL Unknown event code (wEvent).
+ *
+ * This will look at the received RCEB and guess what is the total
+ * size. For variable sized events, it will look further ahead into
+ * their length field to see how much data should be read.
+ *
+ * Note this size is *not* final--the neh (Notification/Event Handle)
+ * might specificy an extra size to add.
+ */
+static
+ssize_t uwb_est_get_size(struct uwb_rc *uwb_rc, struct uwb_est *est,
+ u8 event_low, const struct uwb_rceb *rceb,
+ size_t rceb_size)
+{
+ unsigned offset;
+ ssize_t size;
+ struct device *dev = &uwb_rc->uwb_dev.dev;
+ const struct uwb_est_entry *entry;
+
+ size = -ENOENT;
+ if (event_low >= est->entries) { /* in range? */
+ dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u out of range\n",
+ est, est->type_event_high, est->vendor, est->product,
+ est->entries, event_low);
+ goto out;
+ }
+ size = -ENOENT;
+ entry = &est->entry[event_low];
+ if (entry->size == 0 && entry->offset == 0) { /* unknown? */
+ dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u unknown\n",
+ est, est->type_event_high, est->vendor, est->product,
+ est->entries, event_low);
+ goto out;
+ }
+ offset = entry->offset; /* extra fries with that? */
+ if (offset == 0)
+ size = entry->size;
+ else {
+ /* Ops, got an extra size field at 'offset'--read it */
+ const void *ptr = rceb;
+ size_t type_size = 0;
+ offset--;
+ size = -ENOSPC; /* enough data for more? */
+ switch (entry->type) {
+ case UWB_EST_16: type_size = sizeof(__le16); break;
+ case UWB_EST_8: type_size = sizeof(u8); break;
+ default: BUG();
+ }
+ if (offset + type_size > rceb_size) {
+ dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: "
+ "not enough data to read extra size\n",
+ est, est->type_event_high, est->vendor,
+ est->product, est->entries);
+ goto out;
+ }
+ size = entry->size;
+ ptr += offset;
+ switch (entry->type) {
+ case UWB_EST_16: size += le16_to_cpu(*(__le16 *)ptr); break;
+ case UWB_EST_8: size += *(u8 *)ptr; break;
+ default: BUG();
+ }
+ }
+out:
+ return size;
+}
+
+
+/**
+ * Guesses the size of a WA event
+ *
+ * @rceb: pointer to the buffer with the event
+ * @rceb_size: size of the area pointed to by @rceb in bytes.
+ * @returns: > 0 Size of the event
+ * -ENOSPC An area big enough was not provided to look
+ * ahead into the event's guts and guess the size.
+ * -EINVAL Unknown event code (wEvent).
+ *
+ * This will look at the received RCEB and guess what is the total
+ * size by checking all the tables registered with
+ * uwb_est_register(). For variable sized events, it will look further
+ * ahead into their length field to see how much data should be read.
+ *
+ * Note this size is *not* final--the neh (Notification/Event Handle)
+ * might specificy an extra size to add or replace.
+ */
+ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
+ size_t rceb_size)
+{
+ /* FIXME: add vendor/product data */
+ ssize_t size;
+ struct device *dev = &rc->uwb_dev.dev;
+ unsigned long flags;
+ unsigned itr;
+ u16 type_event_high, event;
+ u8 *ptr = (u8 *) rceb;
+
+ read_lock_irqsave(&uwb_est_lock, flags);
+ d_printf(2, dev, "Size query for event 0x%02x/%04x/%02x,"
+ " buffer size %ld\n",
+ (unsigned) rceb->bEventType,
+ (unsigned) le16_to_cpu(rceb->wEvent),
+ (unsigned) rceb->bEventContext,
+ (long) rceb_size);
+ size = -ENOSPC;
+ if (rceb_size < sizeof(*rceb))
+ goto out;
+ event = le16_to_cpu(rceb->wEvent);
+ type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8;
+ for (itr = 0; itr < uwb_est_used; itr++) {
+ d_printf(3, dev, "Checking EST 0x%04x/%04x/%04x\n",
+ uwb_est[itr].type_event_high, uwb_est[itr].vendor,
+ uwb_est[itr].product);
+ if (uwb_est[itr].type_event_high != type_event_high)
+ continue;
+ size = uwb_est_get_size(rc, &uwb_est[itr],
+ event & 0x00ff, rceb, rceb_size);
+ /* try more tables that might handle the same type */
+ if (size != -ENOENT)
+ goto out;
+ }
+ dev_dbg(dev, "event 0x%02x/%04x/%02x: no handlers available; "
+ "RCEB %02x %02x %02x %02x\n",
+ (unsigned) rceb->bEventType,
+ (unsigned) le16_to_cpu(rceb->wEvent),
+ (unsigned) rceb->bEventContext,
+ ptr[0], ptr[1], ptr[2], ptr[3]);
+ size = -ENOENT;
+out:
+ read_unlock_irqrestore(&uwb_est_lock, flags);
+ return size;
+}
+EXPORT_SYMBOL_GPL(uwb_est_find_size);
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
new file mode 100644
index 00000000000..3d26fa0f8ae
--- /dev/null
+++ b/drivers/uwb/hwa-rc.c
@@ -0,0 +1,926 @@
+/*
+ * WUSB Host Wire Adapter: Radio Control Interface (WUSB[8.6])
+ * Radio Control command/event transport
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Initialize the Radio Control interface Driver.
+ *
+ * For each device probed, creates an 'struct hwarc' which contains
+ * just the representation of the UWB Radio Controller, and the logic
+ * for reading notifications and passing them to the UWB Core.
+ *
+ * So we initialize all of those, register the UWB Radio Controller
+ * and setup the notification/event handle to pipe the notifications
+ * to the UWB management Daemon.
+ *
+ * Command and event filtering.
+ *
+ * This is the driver for the Radio Control Interface described in WUSB
+ * 1.0. The core UWB module assumes that all drivers are compliant to the
+ * WHCI 0.95 specification. We thus create a filter that parses all
+ * incoming messages from the (WUSB 1.0) device and manipulate them to
+ * conform to the WHCI 0.95 specification. Similarly, outgoing messages
+ * are parsed and manipulated to conform to the WUSB 1.0 compliant messages
+ * that the device expects. Only a few messages are affected:
+ * Affected events:
+ * UWB_RC_EVT_BEACON
+ * UWB_RC_EVT_BP_SLOT_CHANGE
+ * UWB_RC_EVT_DRP_AVAIL
+ * UWB_RC_EVT_DRP
+ * Affected commands:
+ * UWB_RC_CMD_SCAN
+ * UWB_RC_CMD_SET_DRP_IE
+ *
+ *
+ *
+ */
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/wusb.h>
+#include <linux/usb/wusb-wa.h>
+#include <linux/uwb.h>
+#include "uwb-internal.h"
+#define D_LOCAL 1
+#include <linux/uwb/debug.h>
+
+/* The device uses commands and events from the WHCI specification, although
+ * reporting itself as WUSB compliant. */
+#define WUSB_QUIRK_WHCI_CMD_EVT 0x01
+
+/**
+ * Descriptor for an instance of the UWB Radio Control Driver that
+ * attaches to the RCI interface of the Host Wired Adapter.
+ *
+ * Unless there is a lock specific to the 'data members', all access
+ * is protected by uwb_rc->mutex.
+ *
+ * The NEEP (Notification/Event EndPoint) URB (@neep_urb) writes to
+ * @rd_buffer. Note there is no locking because it is perfectly (heh!)
+ * serialized--probe() submits an URB, callback is called, processes
+ * the data (synchronously), submits another URB, and so on. There is
+ * no concurrent access to the buffer.
+ */
+struct hwarc {
+ struct usb_device *usb_dev;
+ struct usb_interface *usb_iface;
+ struct uwb_rc *uwb_rc; /* UWB host controller */
+ struct urb *neep_urb; /* Notification endpoint handling */
+ struct edc neep_edc;
+ void *rd_buffer; /* NEEP read buffer */
+};
+
+
+/* Beacon received notification (WUSB 1.0 [8.6.3.2]) */
+struct uwb_rc_evt_beacon_WUSB_0100 {
+ struct uwb_rceb rceb;
+ u8 bChannelNumber;
+ __le16 wBPSTOffset;
+ u8 bLQI;
+ u8 bRSSI;
+ __le16 wBeaconInfoLength;
+ u8 BeaconInfo[];
+} __attribute__((packed));
+
+/**
+ * Filter WUSB 1.0 BEACON RCV notification to be WHCI 0.95
+ *
+ * @header: the incoming event
+ * @buf_size: size of buffer containing incoming event
+ * @new_size: size of event after filtering completed
+ *
+ * The WHCI 0.95 spec has a "Beacon Type" field. This value is unknown at
+ * the time we receive the beacon from WUSB so we just set it to
+ * UWB_RC_BEACON_TYPE_NEIGHBOR as a default.
+ * The solution below allocates memory upon receipt of every beacon from a
+ * WUSB device. This will deteriorate performance. What is the right way to
+ * do this?
+ */
+static
+int hwarc_filter_evt_beacon_WUSB_0100(struct uwb_rc *rc,
+ struct uwb_rceb **header,
+ const size_t buf_size,
+ size_t *new_size)
+{
+ struct uwb_rc_evt_beacon_WUSB_0100 *be;
+ struct uwb_rc_evt_beacon *newbe;
+ size_t bytes_left, ielength;
+ struct device *dev = &rc->uwb_dev.dev;
+
+ be = container_of(*header, struct uwb_rc_evt_beacon_WUSB_0100, rceb);
+ bytes_left = buf_size;
+ if (bytes_left < sizeof(*be)) {
+ dev_err(dev, "Beacon Received Notification: Not enough data "
+ "to decode for filtering (%zu vs %zu bytes needed)\n",
+ bytes_left, sizeof(*be));
+ return -EINVAL;
+ }
+ bytes_left -= sizeof(*be);
+ ielength = le16_to_cpu(be->wBeaconInfoLength);
+ if (bytes_left < ielength) {
+ dev_err(dev, "Beacon Received Notification: Not enough data "
+ "to decode IEs (%zu vs %zu bytes needed)\n",
+ bytes_left, ielength);
+ return -EINVAL;
+ }
+ newbe = kzalloc(sizeof(*newbe) + ielength, GFP_ATOMIC);
+ if (newbe == NULL)
+ return -ENOMEM;
+ newbe->rceb = be->rceb;
+ newbe->bChannelNumber = be->bChannelNumber;
+ newbe->bBeaconType = UWB_RC_BEACON_TYPE_NEIGHBOR;
+ newbe->wBPSTOffset = be->wBPSTOffset;
+ newbe->bLQI = be->bLQI;
+ newbe->bRSSI = be->bRSSI;
+ newbe->wBeaconInfoLength = be->wBeaconInfoLength;
+ memcpy(newbe->BeaconInfo, be->BeaconInfo, ielength);
+ *header = &newbe->rceb;
+ *new_size = sizeof(*newbe) + ielength;
+ return 1; /* calling function will free memory */
+}
+
+
+/* DRP Availability change notification (WUSB 1.0 [8.6.3.8]) */
+struct uwb_rc_evt_drp_avail_WUSB_0100 {
+ struct uwb_rceb rceb;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/**
+ * Filter WUSB 1.0 DRP AVAILABILITY CHANGE notification to be WHCI 0.95
+ *
+ * @header: the incoming event
+ * @buf_size: size of buffer containing incoming event
+ * @new_size: size of event after filtering completed
+ */
+static
+int hwarc_filter_evt_drp_avail_WUSB_0100(struct uwb_rc *rc,
+ struct uwb_rceb **header,
+ const size_t buf_size,
+ size_t *new_size)
+{
+ struct uwb_rc_evt_drp_avail_WUSB_0100 *da;
+ struct uwb_rc_evt_drp_avail *newda;
+ struct uwb_ie_hdr *ie_hdr;
+ size_t bytes_left, ielength;
+ struct device *dev = &rc->uwb_dev.dev;
+
+
+ da = container_of(*header, struct uwb_rc_evt_drp_avail_WUSB_0100, rceb);
+ bytes_left = buf_size;
+ if (bytes_left < sizeof(*da)) {
+ dev_err(dev, "Not enough data to decode DRP Avail "
+ "Notification for filtering. Expected %zu, "
+ "received %zu.\n", (size_t)sizeof(*da), bytes_left);
+ return -EINVAL;
+ }
+ bytes_left -= sizeof(*da);
+ ielength = le16_to_cpu(da->wIELength);
+ if (bytes_left < ielength) {
+ dev_err(dev, "DRP Avail Notification filter: IE length "
+ "[%zu bytes] does not match actual length "
+ "[%zu bytes].\n", ielength, bytes_left);
+ return -EINVAL;
+ }
+ if (ielength < sizeof(*ie_hdr)) {
+ dev_err(dev, "DRP Avail Notification filter: Not enough "
+ "data to decode IE [%zu bytes, %zu needed]\n",
+ ielength, sizeof(*ie_hdr));
+ return -EINVAL;
+ }
+ ie_hdr = (void *) da->IEData;
+ if (ie_hdr->length > 32) {
+ dev_err(dev, "DRP Availability Change event has unexpected "
+ "length for filtering. Expected < 32 bytes, "
+ "got %zu bytes.\n", (size_t)ie_hdr->length);
+ return -EINVAL;
+ }
+ newda = kzalloc(sizeof(*newda), GFP_ATOMIC);
+ if (newda == NULL)
+ return -ENOMEM;
+ newda->rceb = da->rceb;
+ memcpy(newda->bmp, (u8 *) ie_hdr + sizeof(*ie_hdr), ie_hdr->length);
+ *header = &newda->rceb;
+ *new_size = sizeof(*newda);
+ return 1; /* calling function will free memory */
+}
+
+
+/* DRP notification (WUSB 1.0 [8.6.3.9]) */
+struct uwb_rc_evt_drp_WUSB_0100 {
+ struct uwb_rceb rceb;
+ struct uwb_dev_addr wSrcAddr;
+ u8 bExplicit;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/**
+ * Filter WUSB 1.0 DRP Notification to be WHCI 0.95
+ *
+ * @header: the incoming event
+ * @buf_size: size of buffer containing incoming event
+ * @new_size: size of event after filtering completed
+ *
+ * It is hard to manage DRP reservations without having a Reason code.
+ * Unfortunately there is none in the WUSB spec. We just set the default to
+ * DRP IE RECEIVED.
+ * We do not currently use the bBeaconSlotNumber value, so we set this to
+ * zero for now.
+ */
+static
+int hwarc_filter_evt_drp_WUSB_0100(struct uwb_rc *rc,
+ struct uwb_rceb **header,
+ const size_t buf_size,
+ size_t *new_size)
+{
+ struct uwb_rc_evt_drp_WUSB_0100 *drpev;
+ struct uwb_rc_evt_drp *newdrpev;
+ size_t bytes_left, ielength;
+ struct device *dev = &rc->uwb_dev.dev;
+
+ drpev = container_of(*header, struct uwb_rc_evt_drp_WUSB_0100, rceb);
+ bytes_left = buf_size;
+ if (bytes_left < sizeof(*drpev)) {
+ dev_err(dev, "Not enough data to decode DRP Notification "
+ "for filtering. Expected %zu, received %zu.\n",
+ (size_t)sizeof(*drpev), bytes_left);
+ return -EINVAL;
+ }
+ ielength = le16_to_cpu(drpev->wIELength);
+ bytes_left -= sizeof(*drpev);
+ if (bytes_left < ielength) {
+ dev_err(dev, "DRP Notification filter: header length [%zu "
+ "bytes] does not match actual length [%zu "
+ "bytes].\n", ielength, bytes_left);
+ return -EINVAL;
+ }
+ newdrpev = kzalloc(sizeof(*newdrpev) + ielength, GFP_ATOMIC);
+ if (newdrpev == NULL)
+ return -ENOMEM;
+ newdrpev->rceb = drpev->rceb;
+ newdrpev->src_addr = drpev->wSrcAddr;
+ newdrpev->reason = UWB_DRP_NOTIF_DRP_IE_RCVD;
+ newdrpev->beacon_slot_number = 0;
+ newdrpev->ie_length = drpev->wIELength;
+ memcpy(newdrpev->ie_data, drpev->IEData, ielength);
+ *header = &newdrpev->rceb;
+ *new_size = sizeof(*newdrpev) + ielength;
+ return 1; /* calling function will free memory */
+}
+
+
+/* Scan Command (WUSB 1.0 [8.6.2.5]) */
+struct uwb_rc_cmd_scan_WUSB_0100 {
+ struct uwb_rccb rccb;
+ u8 bChannelNumber;
+ u8 bScanState;
+} __attribute__((packed));
+
+/**
+ * Filter WHCI 0.95 SCAN command to be WUSB 1.0 SCAN command
+ *
+ * @header: command sent to device (compliant to WHCI 0.95)
+ * @size: size of command sent to device
+ *
+ * We only reduce the size by two bytes because the WUSB 1.0 scan command
+ * does not have the last field (wStarttime). Also, make sure we don't send
+ * the device an unexpected scan type.
+ */
+static
+int hwarc_filter_cmd_scan_WUSB_0100(struct uwb_rc *rc,
+ struct uwb_rccb **header,
+ size_t *size)
+{
+ struct uwb_rc_cmd_scan *sc;
+
+ sc = container_of(*header, struct uwb_rc_cmd_scan, rccb);
+
+ if (sc->bScanState == UWB_SCAN_ONLY_STARTTIME)
+ sc->bScanState = UWB_SCAN_ONLY;
+ /* Don't send the last two bytes. */
+ *size -= 2;
+ return 0;
+}
+
+
+/* SET DRP IE command (WUSB 1.0 [8.6.2.7]) */
+struct uwb_rc_cmd_set_drp_ie_WUSB_0100 {
+ struct uwb_rccb rccb;
+ u8 bExplicit;
+ __le16 wIELength;
+ struct uwb_ie_drp IEData[];
+} __attribute__((packed));
+
+/**
+ * Filter WHCI 0.95 SET DRP IE command to be WUSB 1.0 SET DRP IE command
+ *
+ * @header: command sent to device (compliant to WHCI 0.95)
+ * @size: size of command sent to device
+ *
+ * WUSB has an extra bExplicit field - we assume always explicit
+ * negotiation so this field is set. The command expected by the device is
+ * thus larger than the one prepared by the driver so we need to
+ * reallocate memory to accommodate this.
+ * We trust the driver to send us the correct data so no checking is done
+ * on incoming data - evn though it is variable length.
+ */
+static
+int hwarc_filter_cmd_set_drp_ie_WUSB_0100(struct uwb_rc *rc,
+ struct uwb_rccb **header,
+ size_t *size)
+{
+ struct uwb_rc_cmd_set_drp_ie *orgcmd;
+ struct uwb_rc_cmd_set_drp_ie_WUSB_0100 *cmd;
+ size_t ielength;
+
+ orgcmd = container_of(*header, struct uwb_rc_cmd_set_drp_ie, rccb);
+ ielength = le16_to_cpu(orgcmd->wIELength);
+ cmd = kzalloc(sizeof(*cmd) + ielength, GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+ cmd->rccb = orgcmd->rccb;
+ cmd->bExplicit = 0;
+ cmd->wIELength = orgcmd->wIELength;
+ memcpy(cmd->IEData, orgcmd->IEData, ielength);
+ *header = &cmd->rccb;
+ *size = sizeof(*cmd) + ielength;
+ return 1; /* calling function will free memory */
+}
+
+
+/**
+ * Filter data from WHCI driver to WUSB device
+ *
+ * @header: WHCI 0.95 compliant command from driver
+ * @size: length of command
+ *
+ * The routine managing commands to the device (uwb_rc_cmd()) will call the
+ * filtering function pointer (if it exists) before it passes any data to
+ * the device. At this time the command has been formatted according to
+ * WHCI 0.95 and is ready to be sent to the device.
+ *
+ * The filter function will be provided with the current command and its
+ * length. The function will manipulate the command if necessary and
+ * potentially reallocate memory for a command that needed more memory that
+ * the given command. If new memory was created the function will return 1
+ * to indicate to the calling function that the memory need to be freed
+ * when not needed any more. The size will contain the new length of the
+ * command.
+ * If memory has not been allocated we rely on the original mechanisms to
+ * free the memory of the command - even when we reduce the value of size.
+ */
+static
+int hwarc_filter_cmd_WUSB_0100(struct uwb_rc *rc, struct uwb_rccb **header,
+ size_t *size)
+{
+ int result;
+ struct uwb_rccb *rccb = *header;
+ int cmd = le16_to_cpu(rccb->wCommand);
+ switch (cmd) {
+ case UWB_RC_CMD_SCAN:
+ result = hwarc_filter_cmd_scan_WUSB_0100(rc, header, size);
+ break;
+ case UWB_RC_CMD_SET_DRP_IE:
+ result = hwarc_filter_cmd_set_drp_ie_WUSB_0100(rc, header, size);
+ break;
+ default:
+ result = -ENOANO;
+ break;
+ }
+ return result;
+}
+
+
+/**
+ * Filter data from WHCI driver to WUSB device
+ *
+ * @header: WHCI 0.95 compliant command from driver
+ * @size: length of command
+ *
+ * Filter commands based on which protocol the device supports. The WUSB
+ * errata should be the same as WHCI 0.95 so we do not filter that here -
+ * only WUSB 1.0.
+ */
+static
+int hwarc_filter_cmd(struct uwb_rc *rc, struct uwb_rccb **header,
+ size_t *size)
+{
+ int result = -ENOANO;
+ if (rc->version == 0x0100)
+ result = hwarc_filter_cmd_WUSB_0100(rc, header, size);
+ return result;
+}
+
+
+/**
+ * Compute return value as sum of incoming value and value at given offset
+ *
+ * @rceb: event for which we compute the size, it contains a variable
+ * length field.
+ * @core_size: size of the "non variable" part of the event
+ * @offset: place in event where the length of the variable part is stored
+ * @buf_size: total length of buffer in which event arrived - we need to make
+ * sure we read the offset in memory that is still part of the event
+ */
+static
+ssize_t hwarc_get_event_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
+ size_t core_size, size_t offset,
+ const size_t buf_size)
+{
+ ssize_t size = -ENOSPC;
+ const void *ptr = rceb;
+ size_t type_size = sizeof(__le16);
+ struct device *dev = &rc->uwb_dev.dev;
+
+ if (offset + type_size >= buf_size) {
+ dev_err(dev, "Not enough data to read extra size of event "
+ "0x%02x/%04x/%02x, only got %zu bytes.\n",
+ rceb->bEventType, le16_to_cpu(rceb->wEvent),
+ rceb->bEventContext, buf_size);
+ goto out;
+ }
+ ptr += offset;
+ size = core_size + le16_to_cpu(*(__le16 *)ptr);
+out:
+ return size;
+}
+
+
+/* Beacon slot change notification (WUSB 1.0 [8.6.3.5]) */
+struct uwb_rc_evt_bp_slot_change_WUSB_0100 {
+ struct uwb_rceb rceb;
+ u8 bSlotNumber;
+} __attribute__((packed));
+
+
+/**
+ * Filter data from WUSB device to WHCI driver
+ *
+ * @header: incoming event
+ * @buf_size: size of buffer in which event arrived
+ * @_event_size: actual size of event in the buffer
+ * @new_size: size of event after filtered
+ *
+ * We don't know how the buffer is constructed - there may be more than one
+ * event in it so buffer length does not determine event length. We first
+ * determine the expected size of the incoming event. This value is passed
+ * back only if the actual filtering succeeded (so we know the computed
+ * expected size is correct). This value will be zero if
+ * the event did not need any filtering.
+ *
+ * WHCI interprets the BP Slot Change event's data differently than
+ * WUSB. The event sizes are exactly the same. The data field
+ * indicates the new beacon slot in which a RC is transmitting its
+ * beacon. The maximum value of this is 96 (wMacBPLength ECMA-368
+ * 17.16 (Table 117)). We thus know that the WUSB value will not set
+ * the bit bNoSlot, so we don't really do anything (placeholder).
+ */
+static
+int hwarc_filter_event_WUSB_0100(struct uwb_rc *rc, struct uwb_rceb **header,
+ const size_t buf_size, size_t *_real_size,
+ size_t *_new_size)
+{
+ int result = -ENOANO;
+ struct uwb_rceb *rceb = *header;
+ int event = le16_to_cpu(rceb->wEvent);
+ size_t event_size;
+ size_t core_size, offset;
+
+ if (rceb->bEventType != UWB_RC_CET_GENERAL)
+ goto out;
+ switch (event) {
+ case UWB_RC_EVT_BEACON:
+ core_size = sizeof(struct uwb_rc_evt_beacon_WUSB_0100);
+ offset = offsetof(struct uwb_rc_evt_beacon_WUSB_0100,
+ wBeaconInfoLength);
+ event_size = hwarc_get_event_size(rc, rceb, core_size,
+ offset, buf_size);
+ if (event_size < 0)
+ goto out;
+ *_real_size = event_size;
+ result = hwarc_filter_evt_beacon_WUSB_0100(rc, header,
+ buf_size, _new_size);
+ break;
+ case UWB_RC_EVT_BP_SLOT_CHANGE:
+ *_new_size = *_real_size =
+ sizeof(struct uwb_rc_evt_bp_slot_change_WUSB_0100);
+ result = 0;
+ break;
+
+ case UWB_RC_EVT_DRP_AVAIL:
+ core_size = sizeof(struct uwb_rc_evt_drp_avail_WUSB_0100);
+ offset = offsetof(struct uwb_rc_evt_drp_avail_WUSB_0100,
+ wIELength);
+ event_size = hwarc_get_event_size(rc, rceb, core_size,
+ offset, buf_size);
+ if (event_size < 0)
+ goto out;
+ *_real_size = event_size;
+ result = hwarc_filter_evt_drp_avail_WUSB_0100(
+ rc, header, buf_size, _new_size);
+ break;
+
+ case UWB_RC_EVT_DRP:
+ core_size = sizeof(struct uwb_rc_evt_drp_WUSB_0100);
+ offset = offsetof(struct uwb_rc_evt_drp_WUSB_0100, wIELength);
+ event_size = hwarc_get_event_size(rc, rceb, core_size,
+ offset, buf_size);
+ if (event_size < 0)
+ goto out;
+ *_real_size = event_size;
+ result = hwarc_filter_evt_drp_WUSB_0100(rc, header,
+ buf_size, _new_size);
+ break;
+
+ default:
+ break;
+ }
+out:
+ return result;
+}
+
+/**
+ * Filter data from WUSB device to WHCI driver
+ *
+ * @header: incoming event
+ * @buf_size: size of buffer in which event arrived
+ * @_event_size: actual size of event in the buffer
+ * @_new_size: size of event after filtered
+ *
+ * Filter events based on which protocol the device supports. The WUSB
+ * errata should be the same as WHCI 0.95 so we do not filter that here -
+ * only WUSB 1.0.
+ *
+ * If we don't handle it, we return -ENOANO (why the weird error code?
+ * well, so if I get it, I can pinpoint in the code that raised
+ * it...after all, not too many places use the higher error codes).
+ */
+static
+int hwarc_filter_event(struct uwb_rc *rc, struct uwb_rceb **header,
+ const size_t buf_size, size_t *_real_size,
+ size_t *_new_size)
+{
+ int result = -ENOANO;
+ if (rc->version == 0x0100)
+ result = hwarc_filter_event_WUSB_0100(
+ rc, header, buf_size, _real_size, _new_size);
+ return result;
+}
+
+
+/**
+ * Execute an UWB RC command on HWA
+ *
+ * @rc: Instance of a Radio Controller that is a HWA
+ * @cmd: Buffer containing the RCCB and payload to execute
+ * @cmd_size: Size of the command buffer.
+ *
+ * NOTE: rc's mutex has to be locked
+ */
+static
+int hwarc_cmd(struct uwb_rc *uwb_rc, const struct uwb_rccb *cmd, size_t cmd_size)
+{
+ struct hwarc *hwarc = uwb_rc->priv;
+ return usb_control_msg(
+ hwarc->usb_dev, usb_sndctrlpipe(hwarc->usb_dev, 0),
+ WA_EXEC_RC_CMD, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0, hwarc->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ (void *) cmd, cmd_size, 100 /* FIXME: this is totally arbitrary */);
+}
+
+static
+int hwarc_reset(struct uwb_rc *uwb_rc)
+{
+ struct hwarc *hwarc = uwb_rc->priv;
+ return usb_reset_device(hwarc->usb_dev);
+}
+
+/**
+ * Callback for the notification and event endpoint
+ *
+ * Check's that everything is fine and then passes the read data to
+ * the notification/event handling mechanism (neh).
+ */
+static
+void hwarc_neep_cb(struct urb *urb)
+{
+ struct hwarc *hwarc = urb->context;
+ struct usb_interface *usb_iface = hwarc->usb_iface;
+ struct device *dev = &usb_iface->dev;
+ int result;
+
+ switch (result = urb->status) {
+ case 0:
+ d_printf(3, dev, "NEEP: receive stat %d, %zu bytes\n",
+ urb->status, (size_t)urb->actual_length);
+ uwb_rc_neh_grok(hwarc->uwb_rc, urb->transfer_buffer,
+ urb->actual_length);
+ break;
+ case -ECONNRESET: /* Not an error, but a controlled situation; */
+ case -ENOENT: /* (we killed the URB)...so, no broadcast */
+ d_printf(2, dev, "NEEP: URB reset/noent %d\n", urb->status);
+ goto out;
+ case -ESHUTDOWN: /* going away! */
+ d_printf(2, dev, "NEEP: URB down %d\n", urb->status);
+ goto out;
+ default: /* On general errors, retry unless it gets ugly */
+ if (edc_inc(&hwarc->neep_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME))
+ goto error_exceeded;
+ dev_err(dev, "NEEP: URB error %d\n", urb->status);
+ }
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ d_printf(3, dev, "NEEP: submit %d\n", result);
+ if (result < 0) {
+ dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n",
+ result);
+ goto error;
+ }
+out:
+ return;
+
+error_exceeded:
+ dev_err(dev, "NEEP: URB max acceptable errors "
+ "exceeded, resetting device\n");
+error:
+ uwb_rc_neh_error(hwarc->uwb_rc, result);
+ uwb_rc_reset_all(hwarc->uwb_rc);
+ return;
+}
+
+static void hwarc_init(struct hwarc *hwarc)
+{
+ edc_init(&hwarc->neep_edc);
+}
+
+/**
+ * Initialize the notification/event endpoint stuff
+ *
+ * Note this is effectively a parallel thread; it knows that
+ * hwarc->uwb_rc always exists because the existence of a 'hwarc'
+ * means that there is a reverence on the hwarc->uwb_rc (see
+ * _probe()), and thus _neep_cb() can execute safely.
+ */
+static int hwarc_neep_init(struct uwb_rc *rc)
+{
+ struct hwarc *hwarc = rc->priv;
+ struct usb_interface *iface = hwarc->usb_iface;
+ struct usb_device *usb_dev = interface_to_usbdev(iface);
+ struct device *dev = &iface->dev;
+ int result;
+ struct usb_endpoint_descriptor *epd;
+
+ epd = &iface->cur_altsetting->endpoint[0].desc;
+ hwarc->rd_buffer = (void *) __get_free_page(GFP_KERNEL);
+ if (hwarc->rd_buffer == NULL) {
+ dev_err(dev, "Unable to allocate notification's read buffer\n");
+ goto error_rd_buffer;
+ }
+ hwarc->neep_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (hwarc->neep_urb == NULL) {
+ dev_err(dev, "Unable to allocate notification URB\n");
+ goto error_urb_alloc;
+ }
+ usb_fill_int_urb(hwarc->neep_urb, usb_dev,
+ usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
+ hwarc->rd_buffer, PAGE_SIZE,
+ hwarc_neep_cb, hwarc, epd->bInterval);
+ result = usb_submit_urb(hwarc->neep_urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "Cannot submit notification URB: %d\n", result);
+ goto error_neep_submit;
+ }
+ return 0;
+
+error_neep_submit:
+ usb_free_urb(hwarc->neep_urb);
+error_urb_alloc:
+ free_page((unsigned long)hwarc->rd_buffer);
+error_rd_buffer:
+ return -ENOMEM;
+}
+
+
+/** Clean up all the notification endpoint resources */
+static void hwarc_neep_release(struct uwb_rc *rc)
+{
+ struct hwarc *hwarc = rc->priv;
+
+ usb_kill_urb(hwarc->neep_urb);
+ usb_free_urb(hwarc->neep_urb);
+ free_page((unsigned long)hwarc->rd_buffer);
+}
+
+/**
+ * Get the version from class-specific descriptor
+ *
+ * NOTE: this descriptor comes with the big bundled configuration
+ * descriptor that includes the interfaces' and endpoints', so
+ * we just look for it in the cached copy kept by the USB stack.
+ *
+ * NOTE2: We convert LE fields to CPU order.
+ */
+static int hwarc_get_version(struct uwb_rc *rc)
+{
+ int result;
+
+ struct hwarc *hwarc = rc->priv;
+ struct uwb_rc_control_intf_class_desc *descr;
+ struct device *dev = &rc->uwb_dev.dev;
+ struct usb_device *usb_dev = hwarc->usb_dev;
+ char *itr;
+ struct usb_descriptor_header *hdr;
+ size_t itr_size, actconfig_idx;
+ u16 version;
+
+ actconfig_idx = (usb_dev->actconfig - usb_dev->config) /
+ sizeof(usb_dev->config[0]);
+ itr = usb_dev->rawdescriptors[actconfig_idx];
+ itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
+ while (itr_size >= sizeof(*hdr)) {
+ hdr = (struct usb_descriptor_header *) itr;
+ d_printf(3, dev, "Extra device descriptor: "
+ "type %02x/%u bytes @ %zu (%zu left)\n",
+ hdr->bDescriptorType, hdr->bLength,
+ (itr - usb_dev->rawdescriptors[actconfig_idx]),
+ itr_size);
+ if (hdr->bDescriptorType == USB_DT_CS_RADIO_CONTROL)
+ goto found;
+ itr += hdr->bLength;
+ itr_size -= hdr->bLength;
+ }
+ dev_err(dev, "cannot find Radio Control Interface Class descriptor\n");
+ return -ENODEV;
+
+found:
+ result = -EINVAL;
+ if (hdr->bLength > itr_size) { /* is it available? */
+ dev_err(dev, "incomplete Radio Control Interface Class "
+ "descriptor (%zu bytes left, %u needed)\n",
+ itr_size, hdr->bLength);
+ goto error;
+ }
+ if (hdr->bLength < sizeof(*descr)) {
+ dev_err(dev, "short Radio Control Interface Class "
+ "descriptor\n");
+ goto error;
+ }
+ descr = (struct uwb_rc_control_intf_class_desc *) hdr;
+ /* Make LE fields CPU order */
+ version = __le16_to_cpu(descr->bcdRCIVersion);
+ if (version != 0x0100) {
+ dev_err(dev, "Device reports protocol version 0x%04x. We "
+ "do not support that. \n", version);
+ result = -EINVAL;
+ goto error;
+ }
+ rc->version = version;
+ d_printf(3, dev, "Device supports WUSB protocol version 0x%04x \n",
+ rc->version);
+ result = 0;
+error:
+ return result;
+}
+
+/*
+ * By creating a 'uwb_rc', we have a reference on it -- that reference
+ * is the one we drop when we disconnect.
+ *
+ * No need to switch altsettings; according to WUSB1.0[8.6.1.1], there
+ * is only one altsetting allowed.
+ */
+static int hwarc_probe(struct usb_interface *iface,
+ const struct usb_device_id *id)
+{
+ int result;
+ struct uwb_rc *uwb_rc;
+ struct hwarc *hwarc;
+ struct device *dev = &iface->dev;
+
+ result = -ENOMEM;
+ uwb_rc = uwb_rc_alloc();
+ if (uwb_rc == NULL) {
+ dev_err(dev, "unable to allocate RC instance\n");
+ goto error_rc_alloc;
+ }
+ hwarc = kzalloc(sizeof(*hwarc), GFP_KERNEL);
+ if (hwarc == NULL) {
+ dev_err(dev, "unable to allocate HWA RC instance\n");
+ goto error_alloc;
+ }
+ hwarc_init(hwarc);
+ hwarc->usb_dev = usb_get_dev(interface_to_usbdev(iface));
+ hwarc->usb_iface = usb_get_intf(iface);
+ hwarc->uwb_rc = uwb_rc;
+
+ uwb_rc->owner = THIS_MODULE;
+ uwb_rc->start = hwarc_neep_init;
+ uwb_rc->stop = hwarc_neep_release;
+ uwb_rc->cmd = hwarc_cmd;
+ uwb_rc->reset = hwarc_reset;
+ if (id->driver_info & WUSB_QUIRK_WHCI_CMD_EVT) {
+ uwb_rc->filter_cmd = NULL;
+ uwb_rc->filter_event = NULL;
+ } else {
+ uwb_rc->filter_cmd = hwarc_filter_cmd;
+ uwb_rc->filter_event = hwarc_filter_event;
+ }
+
+ result = uwb_rc_add(uwb_rc, dev, hwarc);
+ if (result < 0)
+ goto error_rc_add;
+ result = hwarc_get_version(uwb_rc);
+ if (result < 0) {
+ dev_err(dev, "cannot retrieve version of RC \n");
+ goto error_get_version;
+ }
+ usb_set_intfdata(iface, hwarc);
+ return 0;
+
+error_get_version:
+ uwb_rc_rm(uwb_rc);
+error_rc_add:
+ usb_put_intf(iface);
+ usb_put_dev(hwarc->usb_dev);
+error_alloc:
+ uwb_rc_put(uwb_rc);
+error_rc_alloc:
+ return result;
+}
+
+static void hwarc_disconnect(struct usb_interface *iface)
+{
+ struct hwarc *hwarc = usb_get_intfdata(iface);
+ struct uwb_rc *uwb_rc = hwarc->uwb_rc;
+
+ usb_set_intfdata(hwarc->usb_iface, NULL);
+ uwb_rc_rm(uwb_rc);
+ usb_put_intf(hwarc->usb_iface);
+ usb_put_dev(hwarc->usb_dev);
+ d_printf(1, &hwarc->usb_iface->dev, "freed hwarc %p\n", hwarc);
+ kfree(hwarc);
+ uwb_rc_put(uwb_rc); /* when creating the device, refcount = 1 */
+}
+
+/** USB device ID's that we handle */
+static struct usb_device_id hwarc_id_table[] = {
+ /* D-Link DUB-1210 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3d02, 0xe0, 0x01, 0x02),
+ .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
+ /* Intel i1480 (using firmware 1.3PA2-20070828) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x8086, 0x0c3b, 0xe0, 0x01, 0x02),
+ .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
+ /* Generic match for the Radio Control interface */
+ { USB_INTERFACE_INFO(0xe0, 0x01, 0x02), },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, hwarc_id_table);
+
+static struct usb_driver hwarc_driver = {
+ .name = "hwa-rc",
+ .probe = hwarc_probe,
+ .disconnect = hwarc_disconnect,
+ .id_table = hwarc_id_table,
+};
+
+static int __init hwarc_driver_init(void)
+{
+ int result;
+ result = usb_register(&hwarc_driver);
+ if (result < 0)
+ printk(KERN_ERR "HWA-RC: Cannot register USB driver: %d\n",
+ result);
+ return result;
+
+}
+module_init(hwarc_driver_init);
+
+static void __exit hwarc_driver_exit(void)
+{
+ usb_deregister(&hwarc_driver);
+}
+module_exit(hwarc_driver_exit);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Host Wireless Adapter Radio Control Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/i1480/Makefile b/drivers/uwb/i1480/Makefile
new file mode 100644
index 00000000000..212bbc7d4c3
--- /dev/null
+++ b/drivers/uwb/i1480/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_UWB_I1480U) += dfu/ i1480-est.o
+obj-$(CONFIG_UWB_I1480U_WLP) += i1480u-wlp/
diff --git a/drivers/uwb/i1480/dfu/Makefile b/drivers/uwb/i1480/dfu/Makefile
new file mode 100644
index 00000000000..bd1b9f25424
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_UWB_I1480U) += i1480-dfu-usb.o
+
+i1480-dfu-usb-objs := \
+ dfu.o \
+ mac.o \
+ phy.o \
+ usb.o
+
+
diff --git a/drivers/uwb/i1480/dfu/dfu.c b/drivers/uwb/i1480/dfu/dfu.c
new file mode 100644
index 00000000000..9097b3b3038
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/dfu.c
@@ -0,0 +1,217 @@
+/*
+ * Intel Wireless UWB Link 1480
+ * Main driver
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Common code for firmware upload used by the USB and PCI version;
+ * i1480_fw_upload() takes a device descriptor and uses the function
+ * pointers it provides to upload firmware and prepare the PHY.
+ *
+ * As well, provides common functions used by the rest of the code.
+ */
+#include "i1480-dfu.h"
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/uwb.h>
+#include <linux/random.h>
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+/**
+ * i1480_rceb_check - Check RCEB for expected field values
+ * @i1480: pointer to device for which RCEB is being checked
+ * @rceb: RCEB being checked
+ * @cmd: which command the RCEB is related to
+ * @context: expected context
+ * @expected_type: expected event type
+ * @expected_event: expected event
+ *
+ * If @cmd is NULL, do not print error messages, but still return an error
+ * code.
+ *
+ * Return 0 if @rceb matches the expected values, -EINVAL otherwise.
+ */
+int i1480_rceb_check(const struct i1480 *i1480, const struct uwb_rceb *rceb,
+ const char *cmd, u8 context, u8 expected_type,
+ unsigned expected_event)
+{
+ int result = 0;
+ struct device *dev = i1480->dev;
+ if (rceb->bEventContext != context) {
+ if (cmd)
+ dev_err(dev, "%s: unexpected context id 0x%02x "
+ "(expected 0x%02x)\n", cmd,
+ rceb->bEventContext, context);
+ result = -EINVAL;
+ }
+ if (rceb->bEventType != expected_type) {
+ if (cmd)
+ dev_err(dev, "%s: unexpected event type 0x%02x "
+ "(expected 0x%02x)\n", cmd,
+ rceb->bEventType, expected_type);
+ result = -EINVAL;
+ }
+ if (le16_to_cpu(rceb->wEvent) != expected_event) {
+ if (cmd)
+ dev_err(dev, "%s: unexpected event 0x%04x "
+ "(expected 0x%04x)\n", cmd,
+ le16_to_cpu(rceb->wEvent), expected_event);
+ result = -EINVAL;
+ }
+ return result;
+}
+EXPORT_SYMBOL_GPL(i1480_rceb_check);
+
+
+/**
+ * Execute a Radio Control Command
+ *
+ * Command data has to be in i1480->cmd_buf.
+ *
+ * @returns size of the reply data filled in i1480->evt_buf or < 0 errno
+ * code on error.
+ */
+ssize_t i1480_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size,
+ size_t reply_size)
+{
+ ssize_t result;
+ struct uwb_rceb *reply = i1480->evt_buf;
+ struct uwb_rccb *cmd = i1480->cmd_buf;
+ u16 expected_event = reply->wEvent;
+ u8 expected_type = reply->bEventType;
+ u8 context;
+
+ d_fnstart(3, i1480->dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size);
+ init_completion(&i1480->evt_complete);
+ i1480->evt_result = -EINPROGRESS;
+ do {
+ get_random_bytes(&context, 1);
+ } while (context == 0x00 || context == 0xff);
+ cmd->bCommandContext = context;
+ result = i1480->cmd(i1480, cmd_name, cmd_size);
+ if (result < 0)
+ goto error;
+ /* wait for the callback to report a event was received */
+ result = wait_for_completion_interruptible_timeout(
+ &i1480->evt_complete, HZ);
+ if (result == 0) {
+ result = -ETIMEDOUT;
+ goto error;
+ }
+ if (result < 0)
+ goto error;
+ result = i1480->evt_result;
+ if (result < 0) {
+ dev_err(i1480->dev, "%s: command reply reception failed: %zd\n",
+ cmd_name, result);
+ goto error;
+ }
+ /*
+ * Firmware versions >= 1.4.12224 for IOGear GUWA100U generate a
+ * spurious notification after firmware is downloaded. So check whether
+ * the receibed RCEB is such notification before assuming that the
+ * command has failed.
+ */
+ if (i1480_rceb_check(i1480, i1480->evt_buf, NULL,
+ 0, 0xfd, 0x0022) == 0) {
+ /* Now wait for the actual RCEB for this command. */
+ result = i1480->wait_init_done(i1480);
+ if (result < 0)
+ goto error;
+ result = i1480->evt_result;
+ }
+ if (result != reply_size) {
+ dev_err(i1480->dev, "%s returned only %zu bytes, %zu expected\n",
+ cmd_name, result, reply_size);
+ result = -EINVAL;
+ goto error;
+ }
+ /* Verify we got the right event in response */
+ result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context,
+ expected_type, expected_event);
+error:
+ d_fnend(3, i1480->dev, "(%p, %s, %zu) = %zd\n",
+ i1480, cmd_name, cmd_size, result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(i1480_cmd);
+
+
+static
+int i1480_print_state(struct i1480 *i1480)
+{
+ int result;
+ u32 *buf = (u32 *) i1480->cmd_buf;
+
+ result = i1480->read(i1480, 0x80080000, 2 * sizeof(*buf));
+ if (result < 0) {
+ dev_err(i1480->dev, "cannot read U & L states: %d\n", result);
+ goto error;
+ }
+ dev_info(i1480->dev, "state U 0x%08x, L 0x%08x\n", buf[0], buf[1]);
+error:
+ return result;
+}
+
+
+/*
+ * PCI probe, firmware uploader
+ *
+ * _mac_fw_upload() will call rc_setup(), which needs an rc_release().
+ */
+int i1480_fw_upload(struct i1480 *i1480)
+{
+ int result;
+
+ result = i1480_pre_fw_upload(i1480); /* PHY pre fw */
+ if (result < 0 && result != -ENOENT) {
+ i1480_print_state(i1480);
+ goto error;
+ }
+ result = i1480_mac_fw_upload(i1480); /* MAC fw */
+ if (result < 0) {
+ if (result == -ENOENT)
+ dev_err(i1480->dev, "Cannot locate MAC FW file '%s'\n",
+ i1480->mac_fw_name);
+ else
+ i1480_print_state(i1480);
+ goto error;
+ }
+ result = i1480_phy_fw_upload(i1480); /* PHY fw */
+ if (result < 0 && result != -ENOENT) {
+ i1480_print_state(i1480);
+ goto error_rc_release;
+ }
+ /*
+ * FIXME: find some reliable way to check whether firmware is running
+ * properly. Maybe use some standard request that has no side effects?
+ */
+ dev_info(i1480->dev, "firmware uploaded successfully\n");
+error_rc_release:
+ if (i1480->rc_release)
+ i1480->rc_release(i1480);
+ result = 0;
+error:
+ return result;
+}
+EXPORT_SYMBOL_GPL(i1480_fw_upload);
diff --git a/drivers/uwb/i1480/dfu/i1480-dfu.h b/drivers/uwb/i1480/dfu/i1480-dfu.h
new file mode 100644
index 00000000000..46f45e800f3
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/i1480-dfu.h
@@ -0,0 +1,260 @@
+/*
+ * i1480 Device Firmware Upload
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This driver is the firmware uploader for the Intel Wireless UWB
+ * Link 1480 device (both in the USB and PCI incarnations).
+ *
+ * The process is quite simple: we stop the device, write the firmware
+ * to its memory and then restart it. Wait for the device to let us
+ * know it is done booting firmware. Ready.
+ *
+ * We might have to upload before or after a phy firmware (which might
+ * be done in two methods, using a normal firmware image or through
+ * the MPI port).
+ *
+ * Because USB and PCI use common methods, we just make ops out of the
+ * common operations (read, write, wait_init_done and cmd) and
+ * implement them in usb.c and pci.c.
+ *
+ * The flow is (some parts omitted):
+ *
+ * i1480_{usb,pci}_probe() On enumerate/discovery
+ * i1480_fw_upload()
+ * i1480_pre_fw_upload()
+ * __mac_fw_upload()
+ * fw_hdrs_load()
+ * mac_fw_hdrs_push()
+ * i1480->write() [i1480_{usb,pci}_write()]
+ * i1480_fw_cmp()
+ * i1480->read() [i1480_{usb,pci}_read()]
+ * i1480_mac_fw_upload()
+ * __mac_fw_upload()
+ * i1480->setup(()
+ * i1480->wait_init_done()
+ * i1480_cmd_reset()
+ * i1480->cmd() [i1480_{usb,pci}_cmd()]
+ * ...
+ * i1480_phy_fw_upload()
+ * request_firmware()
+ * i1480_mpi_write()
+ * i1480->cmd() [i1480_{usb,pci}_cmd()]
+ *
+ * Once the probe function enumerates the device and uploads the
+ * firmware, we just exit with -ENODEV, as we don't really want to
+ * attach to the device.
+ */
+#ifndef __i1480_DFU_H__
+#define __i1480_DFU_H__
+
+#include <linux/uwb/spec.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+
+#define i1480_FW_UPLOAD_MODE_MASK (cpu_to_le32(0x00000018))
+
+#if i1480_FW > 0x00000302
+#define i1480_RCEB_EXTENDED
+#endif
+
+struct uwb_rccb;
+struct uwb_rceb;
+
+/*
+ * Common firmware upload handlers
+ *
+ * Normally you embed this struct in another one specific to your hw.
+ *
+ * @write Write to device's memory from buffer.
+ * @read Read from device's memory to i1480->evt_buf.
+ * @setup Setup device after basic firmware is uploaded
+ * @wait_init_done
+ * Wait for the device to send a notification saying init
+ * is done.
+ * @cmd FOP for issuing the command to the hardware. The
+ * command data is contained in i1480->cmd_buf and the size
+ * is supplied as an argument. The command replied is put
+ * in i1480->evt_buf and the size in i1480->evt_result (or if
+ * an error, a < 0 errno code).
+ *
+ * @cmd_buf Memory buffer used to send commands to the device.
+ * Allocated by the upper layers i1480_fw_upload().
+ * Size has to be @buf_size.
+ * @evt_buf Memory buffer used to place the async notifications
+ * received by the hw. Allocated by the upper layers
+ * i1480_fw_upload().
+ * Size has to be @buf_size.
+ * @cmd_complete
+ * Low level driver uses this to notify code waiting afor
+ * an event that the event has arrived and data is in
+ * i1480->evt_buf (and size/result in i1480->evt_result).
+ * @hw_rev
+ * Use this value to activate dfu code to support new revisions
+ * of hardware. i1480_init() sets this to a default value.
+ * It should be updated by the USB and PCI code.
+ */
+struct i1480 {
+ struct device *dev;
+
+ int (*write)(struct i1480 *, u32 addr, const void *, size_t);
+ int (*read)(struct i1480 *, u32 addr, size_t);
+ int (*rc_setup)(struct i1480 *);
+ void (*rc_release)(struct i1480 *);
+ int (*wait_init_done)(struct i1480 *);
+ int (*cmd)(struct i1480 *, const char *cmd_name, size_t cmd_size);
+ const char *pre_fw_name;
+ const char *mac_fw_name;
+ const char *mac_fw_name_deprecate; /* FIXME: Will go away */
+ const char *phy_fw_name;
+ u8 hw_rev;
+
+ size_t buf_size; /* size of both evt_buf and cmd_buf */
+ void *evt_buf, *cmd_buf;
+ ssize_t evt_result;
+ struct completion evt_complete;
+};
+
+static inline
+void i1480_init(struct i1480 *i1480)
+{
+ i1480->hw_rev = 1;
+ init_completion(&i1480->evt_complete);
+}
+
+extern int i1480_fw_upload(struct i1480 *);
+extern int i1480_pre_fw_upload(struct i1480 *);
+extern int i1480_mac_fw_upload(struct i1480 *);
+extern int i1480_phy_fw_upload(struct i1480 *);
+extern ssize_t i1480_cmd(struct i1480 *, const char *, size_t, size_t);
+extern int i1480_rceb_check(const struct i1480 *,
+ const struct uwb_rceb *, const char *, u8,
+ u8, unsigned);
+
+enum {
+ /* Vendor specific command type */
+ i1480_CET_VS1 = 0xfd,
+ /* i1480 commands */
+ i1480_CMD_SET_IP_MAS = 0x000e,
+ i1480_CMD_GET_MAC_PHY_INFO = 0x0003,
+ i1480_CMD_MPI_WRITE = 0x000f,
+ i1480_CMD_MPI_READ = 0x0010,
+ /* i1480 events */
+#if i1480_FW > 0x00000302
+ i1480_EVT_CONFIRM = 0x0002,
+ i1480_EVT_RM_INIT_DONE = 0x0101,
+ i1480_EVT_DEV_ADD = 0x0103,
+ i1480_EVT_DEV_RM = 0x0104,
+ i1480_EVT_DEV_ID_CHANGE = 0x0105,
+ i1480_EVT_GET_MAC_PHY_INFO = i1480_CMD_GET_MAC_PHY_INFO,
+#else
+ i1480_EVT_CONFIRM = 0x0002,
+ i1480_EVT_RM_INIT_DONE = 0x0101,
+ i1480_EVT_DEV_ADD = 0x0103,
+ i1480_EVT_DEV_RM = 0x0104,
+ i1480_EVT_DEV_ID_CHANGE = 0x0105,
+ i1480_EVT_GET_MAC_PHY_INFO = i1480_EVT_CONFIRM,
+#endif
+};
+
+
+struct i1480_evt_confirm {
+ struct uwb_rceb rceb;
+#ifdef i1480_RCEB_EXTENDED
+ __le16 wParamLength;
+#endif
+ u8 bResultCode;
+} __attribute__((packed));
+
+
+struct i1480_rceb {
+ struct uwb_rceb rceb;
+#ifdef i1480_RCEB_EXTENDED
+ __le16 wParamLength;
+#endif
+} __attribute__((packed));
+
+
+/**
+ * Get MAC & PHY Information confirm event structure
+ *
+ * Confirm event returned by the command.
+ */
+struct i1480_evt_confirm_GMPI {
+#if i1480_FW > 0x00000302
+ struct uwb_rceb rceb;
+ __le16 wParamLength;
+ __le16 status;
+ u8 mac_addr[6]; /* EUI-64 bit IEEE address [still 8 bytes?] */
+ u8 dev_addr[2];
+ __le16 mac_fw_rev; /* major = v >> 8; minor = v & 0xff */
+ u8 hw_rev;
+ u8 phy_vendor;
+ u8 phy_rev; /* major v = >> 8; minor = v & 0xff */
+ __le16 mac_caps;
+ u8 phy_caps[3];
+ u8 key_stores;
+ __le16 mcast_addr_stores;
+ u8 sec_mode_supported;
+#else
+ struct uwb_rceb rceb;
+ u8 status;
+ u8 mac_addr[8]; /* EUI-64 bit IEEE address [still 8 bytes?] */
+ u8 dev_addr[2];
+ __le16 mac_fw_rev; /* major = v >> 8; minor = v & 0xff */
+ __le16 phy_fw_rev; /* major v = >> 8; minor = v & 0xff */
+ __le16 mac_caps;
+ u8 phy_caps;
+ u8 key_stores;
+ __le16 mcast_addr_stores;
+ u8 sec_mode_supported;
+#endif
+} __attribute__((packed));
+
+
+struct i1480_cmd_mpi_write {
+ struct uwb_rccb rccb;
+ __le16 size;
+ u8 data[];
+};
+
+
+struct i1480_cmd_mpi_read {
+ struct uwb_rccb rccb;
+ __le16 size;
+ struct {
+ u8 page, offset;
+ } __attribute__((packed)) data[];
+} __attribute__((packed));
+
+
+struct i1480_evt_mpi_read {
+ struct uwb_rceb rceb;
+#ifdef i1480_RCEB_EXTENDED
+ __le16 wParamLength;
+#endif
+ u8 bResultCode;
+ __le16 size;
+ struct {
+ u8 page, offset, value;
+ } __attribute__((packed)) data[];
+} __attribute__((packed));
+
+
+#endif /* #ifndef __i1480_DFU_H__ */
diff --git a/drivers/uwb/i1480/dfu/mac.c b/drivers/uwb/i1480/dfu/mac.c
new file mode 100644
index 00000000000..2e4d8f07c16
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/mac.c
@@ -0,0 +1,527 @@
+/*
+ * Intel Wireless UWB Link 1480
+ * MAC Firmware upload implementation
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Implementation of the code for parsing the firmware file (extract
+ * the headers and binary code chunks) in the fw_*() functions. The
+ * code to upload pre and mac firmwares is the same, so it uses a
+ * common entry point in __mac_fw_upload(), which uses the i1480
+ * function pointers to push the firmware to the device.
+ */
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/uwb.h>
+#include "i1480-dfu.h"
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+/*
+ * Descriptor for a continuous segment of MAC fw data
+ */
+struct fw_hdr {
+ unsigned long address;
+ size_t length;
+ const u32 *bin;
+ struct fw_hdr *next;
+};
+
+
+/* Free a chain of firmware headers */
+static
+void fw_hdrs_free(struct fw_hdr *hdr)
+{
+ struct fw_hdr *next;
+
+ while (hdr) {
+ next = hdr->next;
+ kfree(hdr);
+ hdr = next;
+ }
+}
+
+
+/* Fill a firmware header descriptor from a memory buffer */
+static
+int fw_hdr_load(struct i1480 *i1480, struct fw_hdr *hdr, unsigned hdr_cnt,
+ const char *_data, const u32 *data_itr, const u32 *data_top)
+{
+ size_t hdr_offset = (const char *) data_itr - _data;
+ size_t remaining_size = (void *) data_top - (void *) data_itr;
+ if (data_itr + 2 > data_top) {
+ dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in header at "
+ "offset %zu, limit %zu\n",
+ hdr_cnt, hdr_offset,
+ (const char *) data_itr + 2 - _data,
+ (const char *) data_top - _data);
+ return -EINVAL;
+ }
+ hdr->next = NULL;
+ hdr->address = le32_to_cpu(*data_itr++);
+ hdr->length = le32_to_cpu(*data_itr++);
+ hdr->bin = data_itr;
+ if (hdr->length > remaining_size) {
+ dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in data; "
+ "chunk too long (%zu bytes), only %zu left\n",
+ hdr_cnt, hdr_offset, hdr->length, remaining_size);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+/**
+ * Get a buffer where the firmware is supposed to be and create a
+ * chain of headers linking them together.
+ *
+ * @phdr: where to place the pointer to the first header (headers link
+ * to the next via the @hdr->next ptr); need to free the whole
+ * chain when done.
+ *
+ * @_data: Pointer to the data buffer.
+ *
+ * @_data_size: Size of the data buffer (bytes); data size has to be a
+ * multiple of 4. Function will fail if not.
+ *
+ * Goes over the whole binary blob; reads the first chunk and creates
+ * a fw hdr from it (which points to where the data is in @_data and
+ * the length of the chunk); then goes on to the next chunk until
+ * done. Each header is linked to the next.
+ */
+static
+int fw_hdrs_load(struct i1480 *i1480, struct fw_hdr **phdr,
+ const char *_data, size_t data_size)
+{
+ int result;
+ unsigned hdr_cnt = 0;
+ u32 *data = (u32 *) _data, *data_itr, *data_top;
+ struct fw_hdr *hdr, **prev_hdr = phdr;
+
+ result = -EINVAL;
+ /* Check size is ok and pointer is aligned */
+ if (data_size % sizeof(u32) != 0)
+ goto error;
+ if ((unsigned long) _data % sizeof(u16) != 0)
+ goto error;
+ *phdr = NULL;
+ data_itr = data;
+ data_top = (u32 *) (_data + data_size);
+ while (data_itr < data_top) {
+ result = -ENOMEM;
+ hdr = kmalloc(sizeof(*hdr), GFP_KERNEL);
+ if (hdr == NULL) {
+ dev_err(i1480->dev, "Cannot allocate fw header "
+ "for chunk #%u\n", hdr_cnt);
+ goto error_alloc;
+ }
+ result = fw_hdr_load(i1480, hdr, hdr_cnt,
+ _data, data_itr, data_top);
+ if (result < 0)
+ goto error_load;
+ data_itr += 2 + hdr->length;
+ *prev_hdr = hdr;
+ prev_hdr = &hdr->next;
+ hdr_cnt++;
+ };
+ *prev_hdr = NULL;
+ return 0;
+
+error_load:
+ kfree(hdr);
+error_alloc:
+ fw_hdrs_free(*phdr);
+error:
+ return result;
+}
+
+
+/**
+ * Compares a chunk of fw with one in the devices's memory
+ *
+ * @i1480: Device instance
+ * @hdr: Pointer to the firmware chunk
+ * @returns: 0 if equal, < 0 errno on error. If > 0, it is the offset
+ * where the difference was found (plus one).
+ *
+ * Kind of dirty and simplistic, but does the trick in both the PCI
+ * and USB version. We do a quick[er] memcmp(), and if it fails, we do
+ * a byte-by-byte to find the offset.
+ */
+static
+ssize_t i1480_fw_cmp(struct i1480 *i1480, struct fw_hdr *hdr)
+{
+ ssize_t result = 0;
+ u32 src_itr = 0, cnt;
+ size_t size = hdr->length*sizeof(hdr->bin[0]);
+ size_t chunk_size;
+ u8 *bin = (u8 *) hdr->bin;
+
+ while (size > 0) {
+ chunk_size = size < i1480->buf_size ? size : i1480->buf_size;
+ result = i1480->read(i1480, hdr->address + src_itr, chunk_size);
+ if (result < 0) {
+ dev_err(i1480->dev, "error reading for verification: "
+ "%zd\n", result);
+ goto error;
+ }
+ if (memcmp(i1480->cmd_buf, bin + src_itr, result)) {
+ u8 *buf = i1480->cmd_buf;
+ d_printf(2, i1480->dev,
+ "original data @ %p + %u, %zu bytes\n",
+ bin, src_itr, result);
+ d_dump(4, i1480->dev, bin + src_itr, result);
+ for (cnt = 0; cnt < result; cnt++)
+ if (bin[src_itr + cnt] != buf[cnt]) {
+ dev_err(i1480->dev, "byte failed at "
+ "src_itr %u cnt %u [0x%02x "
+ "vs 0x%02x]\n", src_itr, cnt,
+ bin[src_itr + cnt], buf[cnt]);
+ result = src_itr + cnt + 1;
+ goto cmp_failed;
+ }
+ }
+ src_itr += result;
+ size -= result;
+ }
+ result = 0;
+error:
+cmp_failed:
+ return result;
+}
+
+
+/**
+ * Writes firmware headers to the device.
+ *
+ * @prd: PRD instance
+ * @hdr: Processed firmware
+ * @returns: 0 if ok, < 0 errno on error.
+ */
+static
+int mac_fw_hdrs_push(struct i1480 *i1480, struct fw_hdr *hdr,
+ const char *fw_name, const char *fw_tag)
+{
+ struct device *dev = i1480->dev;
+ ssize_t result = 0;
+ struct fw_hdr *hdr_itr;
+ int verif_retry_count;
+
+ d_fnstart(3, dev, "(%p, %p)\n", i1480, hdr);
+ /* Now, header by header, push them to the hw */
+ for (hdr_itr = hdr; hdr_itr != NULL; hdr_itr = hdr_itr->next) {
+ verif_retry_count = 0;
+retry:
+ dev_dbg(dev, "fw chunk (%zu @ 0x%08lx)\n",
+ hdr_itr->length * sizeof(hdr_itr->bin[0]),
+ hdr_itr->address);
+ result = i1480->write(i1480, hdr_itr->address, hdr_itr->bin,
+ hdr_itr->length*sizeof(hdr_itr->bin[0]));
+ if (result < 0) {
+ dev_err(dev, "%s fw '%s': write failed (%zuB @ 0x%lx):"
+ " %zd\n", fw_tag, fw_name,
+ hdr_itr->length * sizeof(hdr_itr->bin[0]),
+ hdr_itr->address, result);
+ break;
+ }
+ result = i1480_fw_cmp(i1480, hdr_itr);
+ if (result < 0) {
+ dev_err(dev, "%s fw '%s': verification read "
+ "failed (%zuB @ 0x%lx): %zd\n",
+ fw_tag, fw_name,
+ hdr_itr->length * sizeof(hdr_itr->bin[0]),
+ hdr_itr->address, result);
+ break;
+ }
+ if (result > 0) { /* Offset where it failed + 1 */
+ result--;
+ dev_err(dev, "%s fw '%s': WARNING: verification "
+ "failed at 0x%lx: retrying\n",
+ fw_tag, fw_name, hdr_itr->address + result);
+ if (++verif_retry_count < 3)
+ goto retry; /* write this block again! */
+ dev_err(dev, "%s fw '%s': verification failed at 0x%lx: "
+ "tried %d times\n", fw_tag, fw_name,
+ hdr_itr->address + result, verif_retry_count);
+ result = -EINVAL;
+ break;
+ }
+ }
+ d_fnend(3, dev, "(%zd)\n", result);
+ return result;
+}
+
+
+/** Puts the device in firmware upload mode.*/
+static
+int mac_fw_upload_enable(struct i1480 *i1480)
+{
+ int result;
+ u32 reg = 0x800000c0;
+ u32 *buffer = (u32 *)i1480->cmd_buf;
+
+ if (i1480->hw_rev > 1)
+ reg = 0x8000d0d4;
+ result = i1480->read(i1480, reg, sizeof(u32));
+ if (result < 0)
+ goto error_cmd;
+ *buffer &= ~i1480_FW_UPLOAD_MODE_MASK;
+ result = i1480->write(i1480, reg, buffer, sizeof(u32));
+ if (result < 0)
+ goto error_cmd;
+ return 0;
+error_cmd:
+ dev_err(i1480->dev, "can't enable fw upload mode: %d\n", result);
+ return result;
+}
+
+
+/** Gets the device out of firmware upload mode. */
+static
+int mac_fw_upload_disable(struct i1480 *i1480)
+{
+ int result;
+ u32 reg = 0x800000c0;
+ u32 *buffer = (u32 *)i1480->cmd_buf;
+
+ if (i1480->hw_rev > 1)
+ reg = 0x8000d0d4;
+ result = i1480->read(i1480, reg, sizeof(u32));
+ if (result < 0)
+ goto error_cmd;
+ *buffer |= i1480_FW_UPLOAD_MODE_MASK;
+ result = i1480->write(i1480, reg, buffer, sizeof(u32));
+ if (result < 0)
+ goto error_cmd;
+ return 0;
+error_cmd:
+ dev_err(i1480->dev, "can't disable fw upload mode: %d\n", result);
+ return result;
+}
+
+
+
+/**
+ * Generic function for uploading a MAC firmware.
+ *
+ * @i1480: Device instance
+ * @fw_name: Name of firmware file to upload.
+ * @fw_tag: Name of the firmware type (for messages)
+ * [eg: MAC, PRE]
+ * @do_wait: Wait for device to emit initialization done message (0
+ * for PRE fws, 1 for MAC fws).
+ * @returns: 0 if ok, < 0 errno on error.
+ */
+static
+int __mac_fw_upload(struct i1480 *i1480, const char *fw_name,
+ const char *fw_tag)
+{
+ int result;
+ const struct firmware *fw;
+ struct fw_hdr *fw_hdrs;
+
+ d_fnstart(3, i1480->dev, "(%p, %s, %s)\n", i1480, fw_name, fw_tag);
+ result = request_firmware(&fw, fw_name, i1480->dev);
+ if (result < 0) /* Up to caller to complain on -ENOENT */
+ goto out;
+ d_printf(3, i1480->dev, "%s fw '%s': uploading\n", fw_tag, fw_name);
+ result = fw_hdrs_load(i1480, &fw_hdrs, fw->data, fw->size);
+ if (result < 0) {
+ dev_err(i1480->dev, "%s fw '%s': failed to parse firmware "
+ "file: %d\n", fw_tag, fw_name, result);
+ goto out_release;
+ }
+ result = mac_fw_upload_enable(i1480);
+ if (result < 0)
+ goto out_hdrs_release;
+ result = mac_fw_hdrs_push(i1480, fw_hdrs, fw_name, fw_tag);
+ mac_fw_upload_disable(i1480);
+out_hdrs_release:
+ if (result >= 0)
+ dev_info(i1480->dev, "%s fw '%s': uploaded\n", fw_tag, fw_name);
+ else
+ dev_err(i1480->dev, "%s fw '%s': failed to upload (%d), "
+ "power cycle device\n", fw_tag, fw_name, result);
+ fw_hdrs_free(fw_hdrs);
+out_release:
+ release_firmware(fw);
+out:
+ d_fnend(3, i1480->dev, "(%p, %s, %s) = %d\n", i1480, fw_name, fw_tag,
+ result);
+ return result;
+}
+
+
+/**
+ * Upload a pre-PHY firmware
+ *
+ */
+int i1480_pre_fw_upload(struct i1480 *i1480)
+{
+ int result;
+ result = __mac_fw_upload(i1480, i1480->pre_fw_name, "PRE");
+ if (result == 0)
+ msleep(400);
+ return result;
+}
+
+
+/**
+ * Reset a the MAC and PHY
+ *
+ * @i1480: Device's instance
+ * @returns: 0 if ok, < 0 errno code on error
+ *
+ * We put the command on kmalloc'ed memory as some arches cannot do
+ * USB from the stack. The reply event is copied from an stage buffer,
+ * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
+ *
+ * We issue the reset to make sure the UWB controller reinits the PHY;
+ * this way we can now if the PHY init went ok.
+ */
+static
+int i1480_cmd_reset(struct i1480 *i1480)
+{
+ int result;
+ struct uwb_rccb *cmd = (void *) i1480->cmd_buf;
+ struct i1480_evt_reset {
+ struct uwb_rceb rceb;
+ u8 bResultCode;
+ } __attribute__((packed)) *reply = (void *) i1480->evt_buf;
+
+ result = -ENOMEM;
+ cmd->bCommandType = UWB_RC_CET_GENERAL;
+ cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET);
+ reply->rceb.bEventType = UWB_RC_CET_GENERAL;
+ reply->rceb.wEvent = UWB_RC_CMD_RESET;
+ result = i1480_cmd(i1480, "RESET", sizeof(*cmd), sizeof(*reply));
+ if (result < 0)
+ goto out;
+ if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
+ dev_err(i1480->dev, "RESET: command execution failed: %u\n",
+ reply->bResultCode);
+ result = -EIO;
+ }
+out:
+ return result;
+
+}
+
+
+/* Wait for the MAC FW to start running */
+static
+int i1480_fw_is_running_q(struct i1480 *i1480)
+{
+ int cnt = 0;
+ int result;
+ u32 *val = (u32 *) i1480->cmd_buf;
+
+ d_fnstart(3, i1480->dev, "(i1480 %p)\n", i1480);
+ for (cnt = 0; cnt < 10; cnt++) {
+ msleep(100);
+ result = i1480->read(i1480, 0x80080000, 4);
+ if (result < 0) {
+ dev_err(i1480->dev, "Can't read 0x8008000: %d\n", result);
+ goto out;
+ }
+ if (*val == 0x55555555UL) /* fw running? cool */
+ goto out;
+ }
+ dev_err(i1480->dev, "Timed out waiting for fw to start\n");
+ result = -ETIMEDOUT;
+out:
+ d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result);
+ return result;
+
+}
+
+
+/**
+ * Upload MAC firmware, wait for it to start
+ *
+ * @i1480: Device instance
+ * @fw_name: Name of the file that contains the firmware
+ *
+ * This has to be called after the pre fw has been uploaded (if
+ * there is any).
+ */
+int i1480_mac_fw_upload(struct i1480 *i1480)
+{
+ int result = 0, deprecated_name = 0;
+ struct i1480_rceb *rcebe = (void *) i1480->evt_buf;
+
+ d_fnstart(3, i1480->dev, "(%p)\n", i1480);
+ result = __mac_fw_upload(i1480, i1480->mac_fw_name, "MAC");
+ if (result == -ENOENT) {
+ result = __mac_fw_upload(i1480, i1480->mac_fw_name_deprecate,
+ "MAC");
+ deprecated_name = 1;
+ }
+ if (result < 0)
+ return result;
+ if (deprecated_name == 1)
+ dev_warn(i1480->dev,
+ "WARNING: firmware file name %s is deprecated, "
+ "please rename to %s\n",
+ i1480->mac_fw_name_deprecate, i1480->mac_fw_name);
+ result = i1480_fw_is_running_q(i1480);
+ if (result < 0)
+ goto error_fw_not_running;
+ result = i1480->rc_setup ? i1480->rc_setup(i1480) : 0;
+ if (result < 0) {
+ dev_err(i1480->dev, "Cannot setup after MAC fw upload: %d\n",
+ result);
+ goto error_setup;
+ }
+ result = i1480->wait_init_done(i1480); /* wait init'on */
+ if (result < 0) {
+ dev_err(i1480->dev, "MAC fw '%s': Initialization timed out "
+ "(%d)\n", i1480->mac_fw_name, result);
+ goto error_init_timeout;
+ }
+ /* verify we got the right initialization done event */
+ if (i1480->evt_result != sizeof(*rcebe)) {
+ dev_err(i1480->dev, "MAC fw '%s': initialization event returns "
+ "wrong size (%zu bytes vs %zu needed)\n",
+ i1480->mac_fw_name, i1480->evt_result, sizeof(*rcebe));
+ dump_bytes(i1480->dev, rcebe, min(i1480->evt_result, (ssize_t)32));
+ goto error_size;
+ }
+ result = -EIO;
+ if (i1480_rceb_check(i1480, &rcebe->rceb, NULL, 0, i1480_CET_VS1,
+ i1480_EVT_RM_INIT_DONE) < 0) {
+ dev_err(i1480->dev, "wrong initialization event 0x%02x/%04x/%02x "
+ "received; expected 0x%02x/%04x/00\n",
+ rcebe->rceb.bEventType, le16_to_cpu(rcebe->rceb.wEvent),
+ rcebe->rceb.bEventContext, i1480_CET_VS1,
+ i1480_EVT_RM_INIT_DONE);
+ goto error_init_timeout;
+ }
+ result = i1480_cmd_reset(i1480);
+ if (result < 0)
+ dev_err(i1480->dev, "MAC fw '%s': MBOA reset failed (%d)\n",
+ i1480->mac_fw_name, result);
+error_fw_not_running:
+error_init_timeout:
+error_size:
+error_setup:
+ d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result);
+ return result;
+}
diff --git a/drivers/uwb/i1480/dfu/phy.c b/drivers/uwb/i1480/dfu/phy.c
new file mode 100644
index 00000000000..3b1a87de8e6
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/phy.c
@@ -0,0 +1,203 @@
+/*
+ * Intel Wireless UWB Link 1480
+ * PHY parameters upload
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Code for uploading the PHY parameters to the PHY through the UWB
+ * Radio Control interface.
+ *
+ * We just send the data through the MPI interface using HWA-like
+ * commands and then reset the PHY to make sure it is ok.
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/usb/wusb.h>
+#include "i1480-dfu.h"
+
+
+/**
+ * Write a value array to an address of the MPI interface
+ *
+ * @i1480: Device descriptor
+ * @data: Data array to write
+ * @size: Size of the data array
+ * @returns: 0 if ok, < 0 errno code on error.
+ *
+ * The data array is organized into pairs:
+ *
+ * ADDRESS VALUE
+ *
+ * ADDRESS is BE 16 bit unsigned, VALUE 8 bit unsigned. Size thus has
+ * to be a multiple of three.
+ */
+static
+int i1480_mpi_write(struct i1480 *i1480, const void *data, size_t size)
+{
+ int result;
+ struct i1480_cmd_mpi_write *cmd = i1480->cmd_buf;
+ struct i1480_evt_confirm *reply = i1480->evt_buf;
+
+ BUG_ON(size > 480);
+ result = -ENOMEM;
+ cmd->rccb.bCommandType = i1480_CET_VS1;
+ cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_WRITE);
+ cmd->size = cpu_to_le16(size);
+ memcpy(cmd->data, data, size);
+ reply->rceb.bEventType = i1480_CET_VS1;
+ reply->rceb.wEvent = i1480_CMD_MPI_WRITE;
+ result = i1480_cmd(i1480, "MPI-WRITE", sizeof(*cmd) + size, sizeof(*reply));
+ if (result < 0)
+ goto out;
+ if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
+ dev_err(i1480->dev, "MPI-WRITE: command execution failed: %d\n",
+ reply->bResultCode);
+ result = -EIO;
+ }
+out:
+ return result;
+}
+
+
+/**
+ * Read a value array to from an address of the MPI interface
+ *
+ * @i1480: Device descriptor
+ * @data: where to place the read array
+ * @srcaddr: Where to read from
+ * @size: Size of the data read array
+ * @returns: 0 if ok, < 0 errno code on error.
+ *
+ * The command data array is organized into pairs ADDR0 ADDR1..., and
+ * the returned data in ADDR0 VALUE0 ADDR1 VALUE1...
+ *
+ * We generate the command array to be a sequential read and then
+ * rearrange the result.
+ *
+ * We use the i1480->cmd_buf for the command, i1480->evt_buf for the reply.
+ *
+ * As the reply has to fit in 512 bytes (i1480->evt_buffer), the max amount
+ * of values we can read is (512 - sizeof(*reply)) / 3
+ */
+static
+int i1480_mpi_read(struct i1480 *i1480, u8 *data, u16 srcaddr, size_t size)
+{
+ int result;
+ struct i1480_cmd_mpi_read *cmd = i1480->cmd_buf;
+ struct i1480_evt_mpi_read *reply = i1480->evt_buf;
+ unsigned cnt;
+
+ memset(i1480->cmd_buf, 0x69, 512);
+ memset(i1480->evt_buf, 0x69, 512);
+
+ BUG_ON(size > (i1480->buf_size - sizeof(*reply)) / 3);
+ result = -ENOMEM;
+ cmd->rccb.bCommandType = i1480_CET_VS1;
+ cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_READ);
+ cmd->size = cpu_to_le16(3*size);
+ for (cnt = 0; cnt < size; cnt++) {
+ cmd->data[cnt].page = (srcaddr + cnt) >> 8;
+ cmd->data[cnt].offset = (srcaddr + cnt) & 0xff;
+ }
+ reply->rceb.bEventType = i1480_CET_VS1;
+ reply->rceb.wEvent = i1480_CMD_MPI_READ;
+ result = i1480_cmd(i1480, "MPI-READ", sizeof(*cmd) + 2*size,
+ sizeof(*reply) + 3*size);
+ if (result < 0)
+ goto out;
+ if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
+ dev_err(i1480->dev, "MPI-READ: command execution failed: %d\n",
+ reply->bResultCode);
+ result = -EIO;
+ }
+ for (cnt = 0; cnt < size; cnt++) {
+ if (reply->data[cnt].page != (srcaddr + cnt) >> 8)
+ dev_err(i1480->dev, "MPI-READ: page inconsistency at "
+ "index %u: expected 0x%02x, got 0x%02x\n", cnt,
+ (srcaddr + cnt) >> 8, reply->data[cnt].page);
+ if (reply->data[cnt].offset != ((srcaddr + cnt) & 0x00ff))
+ dev_err(i1480->dev, "MPI-READ: offset inconsistency at "
+ "index %u: expected 0x%02x, got 0x%02x\n", cnt,
+ (srcaddr + cnt) & 0x00ff,
+ reply->data[cnt].offset);
+ data[cnt] = reply->data[cnt].value;
+ }
+ result = 0;
+out:
+ return result;
+}
+
+
+/**
+ * Upload a PHY firmware, wait for it to start
+ *
+ * @i1480: Device instance
+ * @fw_name: Name of the file that contains the firmware
+ *
+ * We assume the MAC fw is up and running. This means we can use the
+ * MPI interface to write the PHY firmware. Once done, we issue an
+ * MBOA Reset, which will force the MAC to reset and reinitialize the
+ * PHY. If that works, we are ready to go.
+ *
+ * Max packet size for the MPI write is 512, so the max buffer is 480
+ * (which gives us 160 byte triads of MSB, LSB and VAL for the data).
+ */
+int i1480_phy_fw_upload(struct i1480 *i1480)
+{
+ int result;
+ const struct firmware *fw;
+ const char *data_itr, *data_top;
+ const size_t MAX_BLK_SIZE = 480; /* 160 triads */
+ size_t data_size;
+ u8 phy_stat;
+
+ result = request_firmware(&fw, i1480->phy_fw_name, i1480->dev);
+ if (result < 0)
+ goto out;
+ /* Loop writing data in chunks as big as possible until done. */
+ for (data_itr = fw->data, data_top = data_itr + fw->size;
+ data_itr < data_top; data_itr += MAX_BLK_SIZE) {
+ data_size = min(MAX_BLK_SIZE, (size_t) (data_top - data_itr));
+ result = i1480_mpi_write(i1480, data_itr, data_size);
+ if (result < 0)
+ goto error_mpi_write;
+ }
+ /* Read MPI page 0, offset 6; if 0, PHY was initialized correctly. */
+ result = i1480_mpi_read(i1480, &phy_stat, 0x0006, 1);
+ if (result < 0) {
+ dev_err(i1480->dev, "PHY: can't get status: %d\n", result);
+ goto error_mpi_status;
+ }
+ if (phy_stat != 0) {
+ result = -ENODEV;
+ dev_info(i1480->dev, "error, PHY not ready: %u\n", phy_stat);
+ goto error_phy_status;
+ }
+ dev_info(i1480->dev, "PHY fw '%s': uploaded\n", i1480->phy_fw_name);
+error_phy_status:
+error_mpi_status:
+error_mpi_write:
+ release_firmware(fw);
+ if (result < 0)
+ dev_err(i1480->dev, "PHY fw '%s': failed to upload (%d), "
+ "power cycle device\n", i1480->phy_fw_name, result);
+out:
+ return result;
+}
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
new file mode 100644
index 00000000000..98eeeff051a
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -0,0 +1,500 @@
+/*
+ * Intel Wireless UWB Link 1480
+ * USB SKU firmware upload implementation
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This driver will prepare the i1480 device to behave as a real
+ * Wireless USB HWA adaptor by uploading the firmware.
+ *
+ * When the device is connected or driver is loaded, i1480_usb_probe()
+ * is called--this will allocate and initialize the device structure,
+ * fill in the pointers to the common functions (read, write,
+ * wait_init_done and cmd for HWA command execution) and once that is
+ * done, call the common firmware uploading routine. Then clean up and
+ * return -ENODEV, as we don't attach to the device.
+ *
+ * The rest are the basic ops we implement that the fw upload code
+ * uses to do its job. All the ops in the common code are i1480->NAME,
+ * the functions are i1480_usb_NAME().
+ */
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/usb.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uwb.h>
+#include <linux/usb/wusb.h>
+#include <linux/usb/wusb-wa.h>
+#include "i1480-dfu.h"
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+
+struct i1480_usb {
+ struct i1480 i1480;
+ struct usb_device *usb_dev;
+ struct usb_interface *usb_iface;
+ struct urb *neep_urb; /* URB for reading from EP1 */
+};
+
+
+static
+void i1480_usb_init(struct i1480_usb *i1480_usb)
+{
+ i1480_init(&i1480_usb->i1480);
+}
+
+
+static
+int i1480_usb_create(struct i1480_usb *i1480_usb, struct usb_interface *iface)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(iface);
+ int result = -ENOMEM;
+
+ i1480_usb->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */
+ i1480_usb->usb_iface = usb_get_intf(iface);
+ usb_set_intfdata(iface, i1480_usb); /* Bind the driver to iface0 */
+ i1480_usb->neep_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (i1480_usb->neep_urb == NULL)
+ goto error;
+ return 0;
+
+error:
+ usb_set_intfdata(iface, NULL);
+ usb_put_intf(iface);
+ usb_put_dev(usb_dev);
+ return result;
+}
+
+
+static
+void i1480_usb_destroy(struct i1480_usb *i1480_usb)
+{
+ usb_kill_urb(i1480_usb->neep_urb);
+ usb_free_urb(i1480_usb->neep_urb);
+ usb_set_intfdata(i1480_usb->usb_iface, NULL);
+ usb_put_intf(i1480_usb->usb_iface);
+ usb_put_dev(i1480_usb->usb_dev);
+}
+
+
+/**
+ * Write a buffer to a memory address in the i1480 device
+ *
+ * @i1480: i1480 instance
+ * @memory_address:
+ * Address where to write the data buffer to.
+ * @buffer: Buffer to the data
+ * @size: Size of the buffer [has to be < 512].
+ * @returns: 0 if ok, < 0 errno code on error.
+ *
+ * Data buffers to USB cannot be on the stack or in vmalloc'ed areas,
+ * so we copy it to the local i1480 buffer before proceeding. In any
+ * case, we have a max size we can send, soooo.
+ */
+static
+int i1480_usb_write(struct i1480 *i1480, u32 memory_address,
+ const void *buffer, size_t size)
+{
+ int result = 0;
+ struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
+ size_t buffer_size, itr = 0;
+
+ d_fnstart(3, i1480->dev, "(%p, 0x%08x, %p, %zu)\n",
+ i1480, memory_address, buffer, size);
+ BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */
+ while (size > 0) {
+ buffer_size = size < i1480->buf_size ? size : i1480->buf_size;
+ memcpy(i1480->cmd_buf, buffer + itr, buffer_size);
+ result = usb_control_msg(
+ i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
+ 0xf0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ cpu_to_le16(memory_address & 0xffff),
+ cpu_to_le16((memory_address >> 16) & 0xffff),
+ i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */);
+ if (result < 0)
+ break;
+ d_printf(3, i1480->dev,
+ "wrote @ 0x%08x %u bytes (of %zu bytes requested)\n",
+ memory_address, result, buffer_size);
+ d_dump(4, i1480->dev, i1480->cmd_buf, result);
+ itr += result;
+ memory_address += result;
+ size -= result;
+ }
+ d_fnend(3, i1480->dev, "(%p, 0x%08x, %p, %zu) = %d\n",
+ i1480, memory_address, buffer, size, result);
+ return result;
+}
+
+
+/**
+ * Read a block [max size 512] of the device's memory to @i1480's buffer.
+ *
+ * @i1480: i1480 instance
+ * @memory_address:
+ * Address where to read from.
+ * @size: Size to read. Smaller than or equal to 512.
+ * @returns: >= 0 number of bytes written if ok, < 0 errno code on error.
+ *
+ * NOTE: if the memory address or block is incorrect, you might get a
+ * stall or a different memory read. Caller has to verify the
+ * memory address and size passed back in the @neh structure.
+ */
+static
+int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size)
+{
+ ssize_t result = 0, bytes = 0;
+ size_t itr, read_size = i1480->buf_size;
+ struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
+
+ d_fnstart(3, i1480->dev, "(%p, 0x%08x, %zu)\n",
+ i1480, addr, size);
+ BUG_ON(size > i1480->buf_size);
+ BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */
+ BUG_ON(read_size > 512);
+
+ if (addr >= 0x8000d200 && addr < 0x8000d400) /* Yeah, HW quirk */
+ read_size = 4;
+
+ for (itr = 0; itr < size; itr += read_size) {
+ size_t itr_addr = addr + itr;
+ size_t itr_size = min(read_size, size - itr);
+ result = usb_control_msg(
+ i1480_usb->usb_dev, usb_rcvctrlpipe(i1480_usb->usb_dev, 0),
+ 0xf0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ cpu_to_le16(itr_addr & 0xffff),
+ cpu_to_le16((itr_addr >> 16) & 0xffff),
+ i1480->cmd_buf + itr, itr_size,
+ 100 /* FIXME: arbitrary */);
+ if (result < 0) {
+ dev_err(i1480->dev, "%s: USB read error: %zd\n",
+ __func__, result);
+ goto out;
+ }
+ if (result != itr_size) {
+ result = -EIO;
+ dev_err(i1480->dev,
+ "%s: partial read got only %zu bytes vs %zu expected\n",
+ __func__, result, itr_size);
+ goto out;
+ }
+ bytes += result;
+ }
+ result = bytes;
+out:
+ d_fnend(3, i1480->dev, "(%p, 0x%08x, %zu) = %zd\n",
+ i1480, addr, size, result);
+ if (result > 0)
+ d_dump(4, i1480->dev, i1480->cmd_buf, result);
+ return result;
+}
+
+
+/**
+ * Callback for reads on the notification/event endpoint
+ *
+ * Just enables the completion read handler.
+ */
+static
+void i1480_usb_neep_cb(struct urb *urb)
+{
+ struct i1480 *i1480 = urb->context;
+ struct device *dev = i1480->dev;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ECONNRESET: /* Not an error, but a controlled situation; */
+ case -ENOENT: /* (we killed the URB)...so, no broadcast */
+ dev_dbg(dev, "NEEP: reset/noent %d\n", urb->status);
+ break;
+ case -ESHUTDOWN: /* going away! */
+ dev_dbg(dev, "NEEP: down %d\n", urb->status);
+ break;
+ default:
+ dev_err(dev, "NEEP: unknown status %d\n", urb->status);
+ break;
+ }
+ i1480->evt_result = urb->actual_length;
+ complete(&i1480->evt_complete);
+ return;
+}
+
+
+/**
+ * Wait for the MAC FW to initialize
+ *
+ * MAC FW sends a 0xfd/0101/00 notification to EP1 when done
+ * initializing. Get that notification into i1480->evt_buf; upper layer
+ * will verify it.
+ *
+ * Set i1480->evt_result with the result of getting the event or its
+ * size (if succesful).
+ *
+ * Delivers the data directly to i1480->evt_buf
+ */
+static
+int i1480_usb_wait_init_done(struct i1480 *i1480)
+{
+ int result;
+ struct device *dev = i1480->dev;
+ struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
+ struct usb_endpoint_descriptor *epd;
+
+ d_fnstart(3, dev, "(%p)\n", i1480);
+ init_completion(&i1480->evt_complete);
+ i1480->evt_result = -EINPROGRESS;
+ epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
+ usb_fill_int_urb(i1480_usb->neep_urb, i1480_usb->usb_dev,
+ usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
+ i1480->evt_buf, i1480->buf_size,
+ i1480_usb_neep_cb, i1480, epd->bInterval);
+ result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
+ if (result < 0) {
+ dev_err(dev, "init done: cannot submit NEEP read: %d\n",
+ result);
+ goto error_submit;
+ }
+ /* Wait for the USB callback to get the data */
+ result = wait_for_completion_interruptible_timeout(
+ &i1480->evt_complete, HZ);
+ if (result <= 0) {
+ result = result == 0 ? -ETIMEDOUT : result;
+ goto error_wait;
+ }
+ usb_kill_urb(i1480_usb->neep_urb);
+ d_fnend(3, dev, "(%p) = 0\n", i1480);
+ return 0;
+
+error_wait:
+ usb_kill_urb(i1480_usb->neep_urb);
+error_submit:
+ i1480->evt_result = result;
+ d_fnend(3, dev, "(%p) = %d\n", i1480, result);
+ return result;
+}
+
+
+/**
+ * Generic function for issuing commands to the i1480
+ *
+ * @i1480: i1480 instance
+ * @cmd_name: Name of the command (for error messages)
+ * @cmd: Pointer to command buffer
+ * @cmd_size: Size of the command buffer
+ * @reply: Buffer for the reply event
+ * @reply_size: Expected size back (including RCEB); the reply buffer
+ * is assumed to be as big as this.
+ * @returns: >= 0 size of the returned event data if ok,
+ * < 0 errno code on error.
+ *
+ * Arms the NE handle, issues the command to the device and checks the
+ * basics of the reply event.
+ */
+static
+int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size)
+{
+ int result;
+ struct device *dev = i1480->dev;
+ struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
+ struct usb_endpoint_descriptor *epd;
+ struct uwb_rccb *cmd = i1480->cmd_buf;
+ u8 iface_no;
+
+ d_fnstart(3, dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size);
+ /* Post a read on the notification & event endpoint */
+ iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+ epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
+ usb_fill_int_urb(
+ i1480_usb->neep_urb, i1480_usb->usb_dev,
+ usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
+ i1480->evt_buf, i1480->buf_size,
+ i1480_usb_neep_cb, i1480, epd->bInterval);
+ result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
+ if (result < 0) {
+ dev_err(dev, "%s: cannot submit NEEP read: %d\n",
+ cmd_name, result);
+ goto error_submit_ep1;
+ }
+ /* Now post the command on EP0 */
+ result = usb_control_msg(
+ i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
+ WA_EXEC_RC_CMD,
+ USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
+ 0, iface_no,
+ cmd, cmd_size,
+ 100 /* FIXME: this is totally arbitrary */);
+ if (result < 0) {
+ dev_err(dev, "%s: control request failed: %d\n",
+ cmd_name, result);
+ goto error_submit_ep0;
+ }
+ d_fnend(3, dev, "(%p, %s, %zu) = %d\n",
+ i1480, cmd_name, cmd_size, result);
+ return result;
+
+error_submit_ep0:
+ usb_kill_urb(i1480_usb->neep_urb);
+error_submit_ep1:
+ d_fnend(3, dev, "(%p, %s, %zu) = %d\n",
+ i1480, cmd_name, cmd_size, result);
+ return result;
+}
+
+
+/*
+ * Probe a i1480 device for uploading firmware.
+ *
+ * We attach only to interface #0, which is the radio control interface.
+ */
+static
+int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
+{
+ struct i1480_usb *i1480_usb;
+ struct i1480 *i1480;
+ struct device *dev = &iface->dev;
+ int result;
+
+ result = -ENODEV;
+ if (iface->cur_altsetting->desc.bInterfaceNumber != 0) {
+ dev_dbg(dev, "not attaching to iface %d\n",
+ iface->cur_altsetting->desc.bInterfaceNumber);
+ goto error;
+ }
+ if (iface->num_altsetting > 1
+ && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) {
+ /* Need altsetting #1 [HW QUIRK] or EP1 won't work */
+ result = usb_set_interface(interface_to_usbdev(iface), 0, 1);
+ if (result < 0)
+ dev_warn(dev,
+ "can't set altsetting 1 on iface 0: %d\n",
+ result);
+ }
+
+ result = -ENOMEM;
+ i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
+ if (i1480_usb == NULL) {
+ dev_err(dev, "Unable to allocate instance\n");
+ goto error;
+ }
+ i1480_usb_init(i1480_usb);
+
+ i1480 = &i1480_usb->i1480;
+ i1480->buf_size = 512;
+ i1480->cmd_buf = kmalloc(2 * i1480->buf_size, GFP_KERNEL);
+ if (i1480->cmd_buf == NULL) {
+ dev_err(dev, "Cannot allocate transfer buffers\n");
+ result = -ENOMEM;
+ goto error_buf_alloc;
+ }
+ i1480->evt_buf = i1480->cmd_buf + i1480->buf_size;
+
+ result = i1480_usb_create(i1480_usb, iface);
+ if (result < 0) {
+ dev_err(dev, "Cannot create instance: %d\n", result);
+ goto error_create;
+ }
+
+ /* setup the fops and upload the firmare */
+ i1480->pre_fw_name = "i1480-pre-phy-0.0.bin";
+ i1480->mac_fw_name = "i1480-usb-0.0.bin";
+ i1480->mac_fw_name_deprecate = "ptc-0.0.bin";
+ i1480->phy_fw_name = "i1480-phy-0.0.bin";
+ i1480->dev = &iface->dev;
+ i1480->write = i1480_usb_write;
+ i1480->read = i1480_usb_read;
+ i1480->rc_setup = NULL;
+ i1480->wait_init_done = i1480_usb_wait_init_done;
+ i1480->cmd = i1480_usb_cmd;
+
+ result = i1480_fw_upload(&i1480_usb->i1480); /* the real thing */
+ if (result >= 0) {
+ usb_reset_device(i1480_usb->usb_dev);
+ result = -ENODEV; /* we don't want to bind to the iface */
+ }
+ i1480_usb_destroy(i1480_usb);
+error_create:
+ kfree(i1480->cmd_buf);
+error_buf_alloc:
+ kfree(i1480_usb);
+error:
+ return result;
+}
+
+#define i1480_USB_DEV(v, p) \
+{ \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE \
+ | USB_DEVICE_ID_MATCH_DEV_INFO \
+ | USB_DEVICE_ID_MATCH_INT_INFO, \
+ .idVendor = (v), \
+ .idProduct = (p), \
+ .bDeviceClass = 0xff, \
+ .bDeviceSubClass = 0xff, \
+ .bDeviceProtocol = 0xff, \
+ .bInterfaceClass = 0xff, \
+ .bInterfaceSubClass = 0xff, \
+ .bInterfaceProtocol = 0xff, \
+}
+
+
+/** USB device ID's that we handle */
+static struct usb_device_id i1480_usb_id_table[] = {
+ i1480_USB_DEV(0x8086, 0xdf3b),
+ i1480_USB_DEV(0x15a9, 0x0005),
+ i1480_USB_DEV(0x07d1, 0x3802),
+ i1480_USB_DEV(0x050d, 0x305a),
+ i1480_USB_DEV(0x3495, 0x3007),
+ {},
+};
+MODULE_DEVICE_TABLE(usb, i1480_usb_id_table);
+
+
+static struct usb_driver i1480_dfu_driver = {
+ .name = "i1480-dfu-usb",
+ .id_table = i1480_usb_id_table,
+ .probe = i1480_usb_probe,
+ .disconnect = NULL,
+};
+
+
+/*
+ * Initialize the i1480 DFU driver.
+ *
+ * We also need to register our function for guessing event sizes.
+ */
+static int __init i1480_dfu_driver_init(void)
+{
+ return usb_register(&i1480_dfu_driver);
+}
+module_init(i1480_dfu_driver_init);
+
+
+static void __exit i1480_dfu_driver_exit(void)
+{
+ usb_deregister(&i1480_dfu_driver);
+}
+module_exit(i1480_dfu_driver_exit);
+
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Intel Wireless UWB Link 1480 firmware uploader for USB");
+MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/i1480/i1480-est.c b/drivers/uwb/i1480/i1480-est.c
new file mode 100644
index 00000000000..7bf8c6febae
--- /dev/null
+++ b/drivers/uwb/i1480/i1480-est.c
@@ -0,0 +1,99 @@
+/*
+ * Intel Wireless UWB Link 1480
+ * Event Size tables for Wired Adaptors
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/uwb.h>
+#include "dfu/i1480-dfu.h"
+
+
+/** Event size table for wEvents 0x00XX */
+static struct uwb_est_entry i1480_est_fd00[] = {
+ /* Anybody expecting this response has to use
+ * neh->extra_size to specify the real size that will
+ * come back. */
+ [i1480_EVT_CONFIRM] = { .size = sizeof(struct i1480_evt_confirm) },
+ [i1480_CMD_SET_IP_MAS] = { .size = sizeof(struct i1480_evt_confirm) },
+#ifdef i1480_RCEB_EXTENDED
+ [0x09] = {
+ .size = sizeof(struct i1480_rceb),
+ .offset = 1 + offsetof(struct i1480_rceb, wParamLength),
+ },
+#endif
+};
+
+/** Event size table for wEvents 0x01XX */
+static struct uwb_est_entry i1480_est_fd01[] = {
+ [0xff & i1480_EVT_RM_INIT_DONE] = { .size = sizeof(struct i1480_rceb) },
+ [0xff & i1480_EVT_DEV_ADD] = { .size = sizeof(struct i1480_rceb) + 9 },
+ [0xff & i1480_EVT_DEV_RM] = { .size = sizeof(struct i1480_rceb) + 9 },
+ [0xff & i1480_EVT_DEV_ID_CHANGE] = {
+ .size = sizeof(struct i1480_rceb) + 2 },
+};
+
+static int i1480_est_init(void)
+{
+ int result = uwb_est_register(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b,
+ i1480_est_fd00,
+ ARRAY_SIZE(i1480_est_fd00));
+ if (result < 0) {
+ printk(KERN_ERR "Can't register EST table fd00: %d\n", result);
+ return result;
+ }
+ result = uwb_est_register(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b,
+ i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01));
+ if (result < 0) {
+ printk(KERN_ERR "Can't register EST table fd01: %d\n", result);
+ return result;
+ }
+ return 0;
+}
+module_init(i1480_est_init);
+
+static void i1480_est_exit(void)
+{
+ uwb_est_unregister(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b,
+ i1480_est_fd00, ARRAY_SIZE(i1480_est_fd00));
+ uwb_est_unregister(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b,
+ i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01));
+}
+module_exit(i1480_est_exit);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("i1480's Vendor Specific Event Size Tables");
+MODULE_LICENSE("GPL");
+
+/**
+ * USB device ID's that we handle
+ *
+ * [so we are loaded when this kind device is connected]
+ */
+static struct usb_device_id i1480_est_id_table[] = {
+ { USB_DEVICE(0x8086, 0xdf3b), },
+ { USB_DEVICE(0x8086, 0x0c3b), },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, i1480_est_id_table);
diff --git a/drivers/uwb/i1480/i1480-wlp.h b/drivers/uwb/i1480/i1480-wlp.h
new file mode 100644
index 00000000000..18a8b0e4567
--- /dev/null
+++ b/drivers/uwb/i1480/i1480-wlp.h
@@ -0,0 +1,200 @@
+/*
+ * Intel 1480 Wireless UWB Link
+ * WLP specific definitions
+ *
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#ifndef __i1480_wlp_h__
+#define __i1480_wlp_h__
+
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/uwb.h>
+#include <linux/if_ether.h>
+#include <asm/byteorder.h>
+
+/* New simplified header format? */
+#undef WLP_HDR_FMT_2 /* FIXME: rename */
+
+/**
+ * Values of the Delivery ID & Type field when PCA or DRP
+ *
+ * The Delivery ID & Type field in the WLP TX header indicates whether
+ * the frame is PCA or DRP. This is done based on the high level bit of
+ * this field.
+ * We use this constant to test if the traffic is PCA or DRP as follows:
+ * if (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)
+ * this is DRP traffic
+ * else
+ * this is PCA traffic
+ */
+enum deliver_id_type_bit {
+ WLP_DRP = 8,
+};
+
+/**
+ * WLP TX header
+ *
+ * Indicates UWB/WLP-specific transmission parameters for a network
+ * packet.
+ */
+struct wlp_tx_hdr {
+ /* dword 0 */
+ struct uwb_dev_addr dstaddr;
+ u8 key_index;
+ u8 mac_params;
+ /* dword 1 */
+ u8 phy_params;
+#ifndef WLP_HDR_FMT_2
+ u8 reserved;
+ __le16 oui01; /* FIXME: not so sure if __le16 or u8[2] */
+ /* dword 2 */
+ u8 oui2; /* if all LE, it could be merged */
+ __le16 prid;
+#endif
+} __attribute__((packed));
+
+static inline int wlp_tx_hdr_delivery_id_type(const struct wlp_tx_hdr *hdr)
+{
+ return hdr->mac_params & 0x0f;
+}
+
+static inline int wlp_tx_hdr_ack_policy(const struct wlp_tx_hdr *hdr)
+{
+ return (hdr->mac_params >> 4) & 0x07;
+}
+
+static inline int wlp_tx_hdr_rts_cts(const struct wlp_tx_hdr *hdr)
+{
+ return (hdr->mac_params >> 7) & 0x01;
+}
+
+static inline void wlp_tx_hdr_set_delivery_id_type(struct wlp_tx_hdr *hdr, int id)
+{
+ hdr->mac_params = (hdr->mac_params & ~0x0f) | id;
+}
+
+static inline void wlp_tx_hdr_set_ack_policy(struct wlp_tx_hdr *hdr,
+ enum uwb_ack_pol policy)
+{
+ hdr->mac_params = (hdr->mac_params & ~0x70) | (policy << 4);
+}
+
+static inline void wlp_tx_hdr_set_rts_cts(struct wlp_tx_hdr *hdr, int rts_cts)
+{
+ hdr->mac_params = (hdr->mac_params & ~0x80) | (rts_cts << 7);
+}
+
+static inline enum uwb_phy_rate wlp_tx_hdr_phy_rate(const struct wlp_tx_hdr *hdr)
+{
+ return hdr->phy_params & 0x0f;
+}
+
+static inline int wlp_tx_hdr_tx_power(const struct wlp_tx_hdr *hdr)
+{
+ return (hdr->phy_params >> 4) & 0x0f;
+}
+
+static inline void wlp_tx_hdr_set_phy_rate(struct wlp_tx_hdr *hdr, enum uwb_phy_rate rate)
+{
+ hdr->phy_params = (hdr->phy_params & ~0x0f) | rate;
+}
+
+static inline void wlp_tx_hdr_set_tx_power(struct wlp_tx_hdr *hdr, int pwr)
+{
+ hdr->phy_params = (hdr->phy_params & ~0xf0) | (pwr << 4);
+}
+
+
+/**
+ * WLP RX header
+ *
+ * Provides UWB/WLP-specific transmission data for a received
+ * network packet.
+ */
+struct wlp_rx_hdr {
+ /* dword 0 */
+ struct uwb_dev_addr dstaddr;
+ struct uwb_dev_addr srcaddr;
+ /* dword 1 */
+ u8 LQI;
+ s8 RSSI;
+ u8 reserved3;
+#ifndef WLP_HDR_FMT_2
+ u8 oui0;
+ /* dword 2 */
+ __le16 oui12;
+ __le16 prid;
+#endif
+} __attribute__((packed));
+
+
+/** User configurable options for WLP */
+struct wlp_options {
+ struct mutex mutex; /* access to user configurable options*/
+ struct wlp_tx_hdr def_tx_hdr; /* default tx hdr */
+ u8 pca_base_priority;
+ u8 bw_alloc; /*index into bw_allocs[] for PCA/DRP reservations*/
+};
+
+
+static inline
+void wlp_options_init(struct wlp_options *options)
+{
+ mutex_init(&options->mutex);
+ wlp_tx_hdr_set_ack_policy(&options->def_tx_hdr, UWB_ACK_INM);
+ wlp_tx_hdr_set_rts_cts(&options->def_tx_hdr, 1);
+ /* FIXME: default to phy caps */
+ wlp_tx_hdr_set_phy_rate(&options->def_tx_hdr, UWB_PHY_RATE_480);
+#ifndef WLP_HDR_FMT_2
+ options->def_tx_hdr.prid = cpu_to_le16(0x0000);
+#endif
+}
+
+
+/* sysfs helpers */
+
+extern ssize_t uwb_pca_base_priority_store(struct wlp_options *,
+ const char *, size_t);
+extern ssize_t uwb_pca_base_priority_show(const struct wlp_options *, char *);
+extern ssize_t uwb_bw_alloc_store(struct wlp_options *, const char *, size_t);
+extern ssize_t uwb_bw_alloc_show(const struct wlp_options *, char *);
+extern ssize_t uwb_ack_policy_store(struct wlp_options *,
+ const char *, size_t);
+extern ssize_t uwb_ack_policy_show(const struct wlp_options *, char *);
+extern ssize_t uwb_rts_cts_store(struct wlp_options *, const char *, size_t);
+extern ssize_t uwb_rts_cts_show(const struct wlp_options *, char *);
+extern ssize_t uwb_phy_rate_store(struct wlp_options *, const char *, size_t);
+extern ssize_t uwb_phy_rate_show(const struct wlp_options *, char *);
+
+
+/** Simple bandwidth allocation (temporary and too simple) */
+struct wlp_bw_allocs {
+ const char *name;
+ struct {
+ u8 mask, stream;
+ } tx, rx;
+};
+
+
+#endif /* #ifndef __i1480_wlp_h__ */
diff --git a/drivers/uwb/i1480/i1480u-wlp/Makefile b/drivers/uwb/i1480/i1480u-wlp/Makefile
new file mode 100644
index 00000000000..fe6709b8e68
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_UWB_I1480U_WLP) += i1480u-wlp.o
+
+i1480u-wlp-objs := \
+ lc.o \
+ netdev.o \
+ rx.o \
+ sysfs.o \
+ tx.o
diff --git a/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
new file mode 100644
index 00000000000..5f1b2951bb8
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
@@ -0,0 +1,284 @@
+/*
+ * Intel 1480 Wireless UWB Link USB
+ * Header formats, constants, general internal interfaces
+ *
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This is not an standard interface.
+ *
+ * FIXME: docs
+ *
+ * i1480u-wlp is pretty simple: two endpoints, one for tx, one for
+ * rx. rx is polled. Network packets (ethernet, whatever) are wrapped
+ * in i1480 TX or RX headers (for sending over the air), and these
+ * packets are wrapped in UNTD headers (for sending to the WLP UWB
+ * controller).
+ *
+ * UNTD packets (UNTD hdr + i1480 hdr + network packet) packets
+ * cannot be bigger than i1480u_MAX_FRG_SIZE. When this happens, the
+ * i1480 packet is broken in chunks/packets:
+ *
+ * UNTD-1st.hdr + i1480.hdr + payload
+ * UNTD-next.hdr + payload
+ * ...
+ * UNTD-last.hdr + payload
+ *
+ * so that each packet is smaller or equal than i1480u_MAX_FRG_SIZE.
+ *
+ * All HW structures and bitmaps are little endian, so we need to play
+ * ugly tricks when defining bitfields. Hoping for the day GCC
+ * implements __attribute__((endian(1234))).
+ *
+ * FIXME: ROADMAP to the whole implementation
+ */
+
+#ifndef __i1480u_wlp_h__
+#define __i1480u_wlp_h__
+
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/uwb.h> /* struct uwb_rc, struct uwb_notifs_handler */
+#include <linux/wlp.h>
+#include "../i1480-wlp.h"
+
+#undef i1480u_FLOW_CONTROL /* Enable flow control code */
+
+/**
+ * Basic flow control
+ */
+enum {
+ i1480u_TX_INFLIGHT_MAX = 1000,
+ i1480u_TX_INFLIGHT_THRESHOLD = 100,
+};
+
+/** Maximum size of a transaction that we can tx/rx */
+enum {
+ /* Maximum packet size computed as follows: max UNTD header (8) +
+ * i1480 RX header (8) + max Ethernet header and payload (4096) +
+ * Padding added by skb_reserve (2) to make post Ethernet payload
+ * start on 16 byte boundary*/
+ i1480u_MAX_RX_PKT_SIZE = 4114,
+ i1480u_MAX_FRG_SIZE = 512,
+ i1480u_RX_BUFS = 9,
+};
+
+
+/**
+ * UNTD packet type
+ *
+ * We need to fragment any payload whose UNTD packet is going to be
+ * bigger than i1480u_MAX_FRG_SIZE.
+ */
+enum i1480u_pkt_type {
+ i1480u_PKT_FRAG_1ST = 0x1,
+ i1480u_PKT_FRAG_NXT = 0x0,
+ i1480u_PKT_FRAG_LST = 0x2,
+ i1480u_PKT_FRAG_CMP = 0x3
+};
+enum {
+ i1480u_PKT_NONE = 0x4,
+};
+
+/** USB Network Transfer Descriptor - common */
+struct untd_hdr {
+ u8 type;
+ __le16 len;
+} __attribute__((packed));
+
+static inline enum i1480u_pkt_type untd_hdr_type(const struct untd_hdr *hdr)
+{
+ return hdr->type & 0x03;
+}
+
+static inline int untd_hdr_rx_tx(const struct untd_hdr *hdr)
+{
+ return (hdr->type >> 2) & 0x01;
+}
+
+static inline void untd_hdr_set_type(struct untd_hdr *hdr, enum i1480u_pkt_type type)
+{
+ hdr->type = (hdr->type & ~0x03) | type;
+}
+
+static inline void untd_hdr_set_rx_tx(struct untd_hdr *hdr, int rx_tx)
+{
+ hdr->type = (hdr->type & ~0x04) | (rx_tx << 2);
+}
+
+
+/**
+ * USB Network Transfer Descriptor - Complete Packet
+ *
+ * This is for a packet that is smaller (header + payload) than
+ * i1480u_MAX_FRG_SIZE.
+ *
+ * @hdr.total_len is the size of the payload; the payload doesn't
+ * count this header nor the padding, but includes the size of i1480
+ * header.
+ */
+struct untd_hdr_cmp {
+ struct untd_hdr hdr;
+ u8 padding;
+} __attribute__((packed));
+
+
+/**
+ * USB Network Transfer Descriptor - First fragment
+ *
+ * @hdr.len is the size of the *whole packet* (excluding UNTD
+ * headers); @fragment_len is the size of the payload (excluding UNTD
+ * headers, but including i1480 headers).
+ */
+struct untd_hdr_1st {
+ struct untd_hdr hdr;
+ __le16 fragment_len;
+ u8 padding[3];
+} __attribute__((packed));
+
+
+/**
+ * USB Network Transfer Descriptor - Next / Last [Rest]
+ *
+ * @hdr.len is the size of the payload, not including headrs.
+ */
+struct untd_hdr_rst {
+ struct untd_hdr hdr;
+ u8 padding;
+} __attribute__((packed));
+
+
+/**
+ * Transmission context
+ *
+ * Wraps all the stuff needed to track a pending/active tx
+ * operation.
+ */
+struct i1480u_tx {
+ struct list_head list_node;
+ struct i1480u *i1480u;
+ struct urb *urb;
+
+ struct sk_buff *skb;
+ struct wlp_tx_hdr *wlp_tx_hdr;
+
+ void *buf; /* if NULL, no new buf was used */
+ size_t buf_size;
+};
+
+/**
+ * Basic flow control
+ *
+ * We maintain a basic flow control counter. "count" how many TX URBs are
+ * outstanding. Only allow "max"
+ * TX URBs to be outstanding. If this value is reached the queue will be
+ * stopped. The queue will be restarted when there are
+ * "threshold" URBs outstanding.
+ * Maintain a counter of how many time the TX queue needed to be restarted
+ * due to the "max" being exceeded and the "threshold" reached again. The
+ * timestamp "restart_ts" is to keep track from when the counter was last
+ * queried (see sysfs handling of file wlp_tx_inflight).
+ */
+struct i1480u_tx_inflight {
+ atomic_t count;
+ unsigned long max;
+ unsigned long threshold;
+ unsigned long restart_ts;
+ atomic_t restart_count;
+};
+
+/**
+ * Instance of a i1480u WLP interface
+ *
+ * Keeps references to the USB device that wraps it, as well as it's
+ * interface and associated UWB host controller. As well, it also
+ * keeps a link to the netdevice for integration into the networking
+ * stack.
+ * We maintian separate error history for the tx and rx endpoints because
+ * the implementation does not rely on locking - having one shared
+ * structure between endpoints may cause problems. Adding locking to the
+ * implementation will have higher cost than adding a separate structure.
+ */
+struct i1480u {
+ struct usb_device *usb_dev;
+ struct usb_interface *usb_iface;
+ struct net_device *net_dev;
+
+ spinlock_t lock;
+ struct net_device_stats stats;
+
+ /* RX context handling */
+ struct sk_buff *rx_skb;
+ struct uwb_dev_addr rx_srcaddr;
+ size_t rx_untd_pkt_size;
+ struct i1480u_rx_buf {
+ struct i1480u *i1480u; /* back pointer */
+ struct urb *urb;
+ struct sk_buff *data; /* i1480u_MAX_RX_PKT_SIZE each */
+ } rx_buf[i1480u_RX_BUFS]; /* N bufs */
+
+ spinlock_t tx_list_lock; /* TX context */
+ struct list_head tx_list;
+ u8 tx_stream;
+
+ struct stats lqe_stats, rssi_stats; /* radio statistics */
+
+ /* Options we can set from sysfs */
+ struct wlp_options options;
+ struct uwb_notifs_handler uwb_notifs_handler;
+ struct edc tx_errors;
+ struct edc rx_errors;
+ struct wlp wlp;
+#ifdef i1480u_FLOW_CONTROL
+ struct urb *notif_urb;
+ struct edc notif_edc; /* error density counter */
+ u8 notif_buffer[1];
+#endif
+ struct i1480u_tx_inflight tx_inflight;
+};
+
+/* Internal interfaces */
+extern void i1480u_rx_cb(struct urb *urb);
+extern int i1480u_rx_setup(struct i1480u *);
+extern void i1480u_rx_release(struct i1480u *);
+extern void i1480u_tx_release(struct i1480u *);
+extern int i1480u_xmit_frame(struct wlp *, struct sk_buff *,
+ struct uwb_dev_addr *);
+extern void i1480u_stop_queue(struct wlp *);
+extern void i1480u_start_queue(struct wlp *);
+extern int i1480u_sysfs_setup(struct i1480u *);
+extern void i1480u_sysfs_release(struct i1480u *);
+
+/* netdev interface */
+extern int i1480u_open(struct net_device *);
+extern int i1480u_stop(struct net_device *);
+extern int i1480u_hard_start_xmit(struct sk_buff *, struct net_device *);
+extern void i1480u_tx_timeout(struct net_device *);
+extern int i1480u_set_config(struct net_device *, struct ifmap *);
+extern struct net_device_stats *i1480u_get_stats(struct net_device *);
+extern int i1480u_change_mtu(struct net_device *, int);
+extern void i1480u_uwb_notifs_cb(void *, struct uwb_dev *, enum uwb_notifs);
+
+/* bandwidth allocation callback */
+extern void i1480u_bw_alloc_cb(struct uwb_rsv *);
+
+/* Sys FS */
+extern struct attribute_group i1480u_wlp_attr_group;
+
+#endif /* #ifndef __i1480u_wlp_h__ */
diff --git a/drivers/uwb/i1480/i1480u-wlp/lc.c b/drivers/uwb/i1480/i1480u-wlp/lc.c
new file mode 100644
index 00000000000..737d60cd5b7
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/lc.c
@@ -0,0 +1,421 @@
+/*
+ * WUSB Wire Adapter: WLP interface
+ * Driver for the Linux Network stack.
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ *
+ * This implements a very simple network driver for the WLP USB
+ * device that is associated to a UWB (Ultra Wide Band) host.
+ *
+ * This is seen as an interface of a composite device. Once the UWB
+ * host has an association to another WLP capable device, the
+ * networking interface (aka WLP) can start to send packets back and
+ * forth.
+ *
+ * Limitations:
+ *
+ * - Hand cranked; can't ifup the interface until there is an association
+ *
+ * - BW allocation very simplistic [see i1480u_mas_set() and callees].
+ *
+ *
+ * ROADMAP:
+ *
+ * ENTRY POINTS (driver model):
+ *
+ * i1480u_driver_{exit,init}(): initialization of the driver.
+ *
+ * i1480u_probe(): called by the driver code when a device
+ * matching 'i1480u_id_table' is connected.
+ *
+ * This allocs a netdev instance, inits with
+ * i1480u_add(), then registers_netdev().
+ * i1480u_init()
+ * i1480u_add()
+ *
+ * i1480u_disconnect(): device has been disconnected/module
+ * is being removed.
+ * i1480u_rm()
+ */
+#include <linux/version.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/uwb/debug.h>
+#include "i1480u-wlp.h"
+
+
+
+static inline
+void i1480u_init(struct i1480u *i1480u)
+{
+ /* nothing so far... doesn't it suck? */
+ spin_lock_init(&i1480u->lock);
+ INIT_LIST_HEAD(&i1480u->tx_list);
+ spin_lock_init(&i1480u->tx_list_lock);
+ wlp_options_init(&i1480u->options);
+ edc_init(&i1480u->tx_errors);
+ edc_init(&i1480u->rx_errors);
+#ifdef i1480u_FLOW_CONTROL
+ edc_init(&i1480u->notif_edc);
+#endif
+ stats_init(&i1480u->lqe_stats);
+ stats_init(&i1480u->rssi_stats);
+ wlp_init(&i1480u->wlp);
+}
+
+/**
+ * Fill WLP device information structure
+ *
+ * The structure will contain a few character arrays, each ending with a
+ * null terminated string. Each string has to fit (excluding terminating
+ * character) into a specified range obtained from the WLP substack.
+ *
+ * It is still not clear exactly how this device information should be
+ * obtained. Until we find out we use the USB device descriptor as backup, some
+ * information elements have intuitive mappings, other not.
+ */
+static
+void i1480u_fill_device_info(struct wlp *wlp, struct wlp_device_info *dev_info)
+{
+ struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
+ struct usb_device *usb_dev = i1480u->usb_dev;
+ /* Treat device name and model name the same */
+ if (usb_dev->descriptor.iProduct) {
+ usb_string(usb_dev, usb_dev->descriptor.iProduct,
+ dev_info->name, sizeof(dev_info->name));
+ usb_string(usb_dev, usb_dev->descriptor.iProduct,
+ dev_info->model_name, sizeof(dev_info->model_name));
+ }
+ if (usb_dev->descriptor.iManufacturer)
+ usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
+ dev_info->manufacturer,
+ sizeof(dev_info->manufacturer));
+ scnprintf(dev_info->model_nr, sizeof(dev_info->model_nr), "%04x",
+ __le16_to_cpu(usb_dev->descriptor.bcdDevice));
+ if (usb_dev->descriptor.iSerialNumber)
+ usb_string(usb_dev, usb_dev->descriptor.iSerialNumber,
+ dev_info->serial, sizeof(dev_info->serial));
+ /* FIXME: where should we obtain category? */
+ dev_info->prim_dev_type.category = cpu_to_le16(WLP_DEV_CAT_OTHER);
+ /* FIXME: Complete OUI and OUIsubdiv attributes */
+}
+
+#ifdef i1480u_FLOW_CONTROL
+/**
+ * Callback for the notification endpoint
+ *
+ * This mostly controls the xon/xoff protocol. In case of hard error,
+ * we stop the queue. If not, we always retry.
+ */
+static
+void i1480u_notif_cb(struct urb *urb, struct pt_regs *regs)
+{
+ struct i1480u *i1480u = urb->context;
+ struct usb_interface *usb_iface = i1480u->usb_iface;
+ struct device *dev = &usb_iface->dev;
+ int result;
+
+ switch (urb->status) {
+ case 0: /* Got valid data, do xon/xoff */
+ switch (i1480u->notif_buffer[0]) {
+ case 'N':
+ dev_err(dev, "XOFF STOPPING queue at %lu\n", jiffies);
+ netif_stop_queue(i1480u->net_dev);
+ break;
+ case 'A':
+ dev_err(dev, "XON STARTING queue at %lu\n", jiffies);
+ netif_start_queue(i1480u->net_dev);
+ break;
+ default:
+ dev_err(dev, "NEP: unknown data 0x%02hhx\n",
+ i1480u->notif_buffer[0]);
+ }
+ break;
+ case -ECONNRESET: /* Controlled situation ... */
+ case -ENOENT: /* we killed the URB... */
+ dev_err(dev, "NEP: URB reset/noent %d\n", urb->status);
+ goto error;
+ case -ESHUTDOWN: /* going away! */
+ dev_err(dev, "NEP: URB down %d\n", urb->status);
+ goto error;
+ default: /* Retry unless it gets ugly */
+ if (edc_inc(&i1480u->notif_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "NEP: URB max acceptable errors "
+ "exceeded; resetting device\n");
+ goto error_reset;
+ }
+ dev_err(dev, "NEP: URB error %d\n", urb->status);
+ break;
+ }
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "NEP: Can't resubmit URB: %d; resetting device\n",
+ result);
+ goto error_reset;
+ }
+ return;
+
+error_reset:
+ wlp_reset_all(&i1480-wlp);
+error:
+ netif_stop_queue(i1480u->net_dev);
+ return;
+}
+#endif
+
+static
+int i1480u_add(struct i1480u *i1480u, struct usb_interface *iface)
+{
+ int result = -ENODEV;
+ struct wlp *wlp = &i1480u->wlp;
+ struct usb_device *usb_dev = interface_to_usbdev(iface);
+ struct net_device *net_dev = i1480u->net_dev;
+ struct uwb_rc *rc;
+ struct uwb_dev *uwb_dev;
+#ifdef i1480u_FLOW_CONTROL
+ struct usb_endpoint_descriptor *epd;
+#endif
+
+ i1480u->usb_dev = usb_get_dev(usb_dev);
+ i1480u->usb_iface = iface;
+ rc = uwb_rc_get_by_grandpa(&i1480u->usb_dev->dev);
+ if (rc == NULL) {
+ dev_err(&iface->dev, "Cannot get associated UWB Radio "
+ "Controller\n");
+ goto out;
+ }
+ wlp->xmit_frame = i1480u_xmit_frame;
+ wlp->fill_device_info = i1480u_fill_device_info;
+ wlp->stop_queue = i1480u_stop_queue;
+ wlp->start_queue = i1480u_start_queue;
+ result = wlp_setup(wlp, rc);
+ if (result < 0) {
+ dev_err(&iface->dev, "Cannot setup WLP\n");
+ goto error_wlp_setup;
+ }
+ result = 0;
+ ether_setup(net_dev); /* make it an etherdevice */
+ uwb_dev = &rc->uwb_dev;
+ /* FIXME: hookup address change notifications? */
+
+ memcpy(net_dev->dev_addr, uwb_dev->mac_addr.data,
+ sizeof(net_dev->dev_addr));
+
+ net_dev->hard_header_len = sizeof(struct untd_hdr_cmp)
+ + sizeof(struct wlp_tx_hdr)
+ + WLP_DATA_HLEN
+ + ETH_HLEN;
+ net_dev->mtu = 3500;
+ net_dev->tx_queue_len = 20; /* FIXME: maybe use 1000? */
+
+/* net_dev->flags &= ~IFF_BROADCAST; FIXME: BUG in firmware */
+ /* FIXME: multicast disabled */
+ net_dev->flags &= ~IFF_MULTICAST;
+ net_dev->features &= ~NETIF_F_SG;
+ net_dev->features &= ~NETIF_F_FRAGLIST;
+ /* All NETIF_F_*_CSUM disabled */
+ net_dev->features |= NETIF_F_HIGHDMA;
+ net_dev->watchdog_timeo = 5*HZ; /* FIXME: a better default? */
+
+ net_dev->open = i1480u_open;
+ net_dev->stop = i1480u_stop;
+ net_dev->hard_start_xmit = i1480u_hard_start_xmit;
+ net_dev->tx_timeout = i1480u_tx_timeout;
+ net_dev->get_stats = i1480u_get_stats;
+ net_dev->set_config = i1480u_set_config;
+ net_dev->change_mtu = i1480u_change_mtu;
+
+#ifdef i1480u_FLOW_CONTROL
+ /* Notification endpoint setup (submitted when we open the device) */
+ i1480u->notif_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (i1480u->notif_urb == NULL) {
+ dev_err(&iface->dev, "Unable to allocate notification URB\n");
+ result = -ENOMEM;
+ goto error_urb_alloc;
+ }
+ epd = &iface->cur_altsetting->endpoint[0].desc;
+ usb_fill_int_urb(i1480u->notif_urb, usb_dev,
+ usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
+ i1480u->notif_buffer, sizeof(i1480u->notif_buffer),
+ i1480u_notif_cb, i1480u, epd->bInterval);
+
+#endif
+
+ i1480u->tx_inflight.max = i1480u_TX_INFLIGHT_MAX;
+ i1480u->tx_inflight.threshold = i1480u_TX_INFLIGHT_THRESHOLD;
+ i1480u->tx_inflight.restart_ts = jiffies;
+ usb_set_intfdata(iface, i1480u);
+ return result;
+
+#ifdef i1480u_FLOW_CONTROL
+error_urb_alloc:
+#endif
+ wlp_remove(wlp);
+error_wlp_setup:
+ uwb_rc_put(rc);
+out:
+ usb_put_dev(i1480u->usb_dev);
+ return result;
+}
+
+static void i1480u_rm(struct i1480u *i1480u)
+{
+ struct uwb_rc *rc = i1480u->wlp.rc;
+ usb_set_intfdata(i1480u->usb_iface, NULL);
+#ifdef i1480u_FLOW_CONTROL
+ usb_kill_urb(i1480u->notif_urb);
+ usb_free_urb(i1480u->notif_urb);
+#endif
+ wlp_remove(&i1480u->wlp);
+ uwb_rc_put(rc);
+ usb_put_dev(i1480u->usb_dev);
+}
+
+/** Just setup @net_dev's i1480u private data */
+static void i1480u_netdev_setup(struct net_device *net_dev)
+{
+ struct i1480u *i1480u = netdev_priv(net_dev);
+ /* Initialize @i1480u */
+ memset(i1480u, 0, sizeof(*i1480u));
+ i1480u_init(i1480u);
+}
+
+/**
+ * Probe a i1480u interface and register it
+ *
+ * @iface: USB interface to link to
+ * @id: USB class/subclass/protocol id
+ * @returns: 0 if ok, < 0 errno code on error.
+ *
+ * Does basic housekeeping stuff and then allocs a netdev with space
+ * for the i1480u data. Initializes, registers in i1480u, registers in
+ * netdev, ready to go.
+ */
+static int i1480u_probe(struct usb_interface *iface,
+ const struct usb_device_id *id)
+{
+ int result;
+ struct net_device *net_dev;
+ struct device *dev = &iface->dev;
+ struct i1480u *i1480u;
+
+ /* Allocate instance [calls i1480u_netdev_setup() on it] */
+ result = -ENOMEM;
+ net_dev = alloc_netdev(sizeof(*i1480u), "wlp%d", i1480u_netdev_setup);
+ if (net_dev == NULL) {
+ dev_err(dev, "no memory for network device instance\n");
+ goto error_alloc_netdev;
+ }
+ SET_NETDEV_DEV(net_dev, dev);
+ i1480u = netdev_priv(net_dev);
+ i1480u->net_dev = net_dev;
+ result = i1480u_add(i1480u, iface); /* Now setup all the wlp stuff */
+ if (result < 0) {
+ dev_err(dev, "cannot add i1480u device: %d\n", result);
+ goto error_i1480u_add;
+ }
+ result = register_netdev(net_dev); /* Okey dokey, bring it up */
+ if (result < 0) {
+ dev_err(dev, "cannot register network device: %d\n", result);
+ goto error_register_netdev;
+ }
+ i1480u_sysfs_setup(i1480u);
+ if (result < 0)
+ goto error_sysfs_init;
+ return 0;
+
+error_sysfs_init:
+ unregister_netdev(net_dev);
+error_register_netdev:
+ i1480u_rm(i1480u);
+error_i1480u_add:
+ free_netdev(net_dev);
+error_alloc_netdev:
+ return result;
+}
+
+
+/**
+ * Disconect a i1480u from the system.
+ *
+ * i1480u_stop() has been called before, so al the rx and tx contexts
+ * have been taken down already. Make sure the queue is stopped,
+ * unregister netdev and i1480u, free and kill.
+ */
+static void i1480u_disconnect(struct usb_interface *iface)
+{
+ struct i1480u *i1480u;
+ struct net_device *net_dev;
+
+ i1480u = usb_get_intfdata(iface);
+ net_dev = i1480u->net_dev;
+ netif_stop_queue(net_dev);
+#ifdef i1480u_FLOW_CONTROL
+ usb_kill_urb(i1480u->notif_urb);
+#endif
+ i1480u_sysfs_release(i1480u);
+ unregister_netdev(net_dev);
+ i1480u_rm(i1480u);
+ free_netdev(net_dev);
+}
+
+static struct usb_device_id i1480u_id_table[] = {
+ {
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE \
+ | USB_DEVICE_ID_MATCH_DEV_INFO \
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x8086,
+ .idProduct = 0x0c3b,
+ .bDeviceClass = 0xef,
+ .bDeviceSubClass = 0x02,
+ .bDeviceProtocol = 0x02,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(usb, i1480u_id_table);
+
+static struct usb_driver i1480u_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = i1480u_probe,
+ .disconnect = i1480u_disconnect,
+ .id_table = i1480u_id_table,
+};
+
+static int __init i1480u_driver_init(void)
+{
+ return usb_register(&i1480u_driver);
+}
+module_init(i1480u_driver_init);
+
+
+static void __exit i1480u_driver_exit(void)
+{
+ usb_deregister(&i1480u_driver);
+}
+module_exit(i1480u_driver_exit);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("i1480 Wireless UWB Link WLP networking for USB");
+MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/i1480/i1480u-wlp/netdev.c b/drivers/uwb/i1480/i1480u-wlp/netdev.c
new file mode 100644
index 00000000000..8802ac43d87
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/netdev.c
@@ -0,0 +1,368 @@
+/*
+ * WUSB Wire Adapter: WLP interface
+ * Driver for the Linux Network stack.
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ *
+ * Implementation of the netdevice linkage (except tx and rx related stuff).
+ *
+ * ROADMAP:
+ *
+ * ENTRY POINTS (Net device):
+ *
+ * i1480u_open(): Called when we ifconfig up the interface;
+ * associates to a UWB host controller, reserves
+ * bandwidth (MAS), sets up RX USB URB and starts
+ * the queue.
+ *
+ * i1480u_stop(): Called when we ifconfig down a interface;
+ * reverses _open().
+ *
+ * i1480u_set_config():
+ */
+
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/uwb/debug.h>
+#include "i1480u-wlp.h"
+
+struct i1480u_cmd_set_ip_mas {
+ struct uwb_rccb rccb;
+ struct uwb_dev_addr addr;
+ u8 stream;
+ u8 owner;
+ u8 type; /* enum uwb_drp_type */
+ u8 baMAS[32];
+} __attribute__((packed));
+
+
+static
+int i1480u_set_ip_mas(
+ struct uwb_rc *rc,
+ const struct uwb_dev_addr *dstaddr,
+ u8 stream, u8 owner, u8 type, unsigned long *mas)
+{
+
+ int result;
+ struct i1480u_cmd_set_ip_mas *cmd;
+ struct uwb_rc_evt_confirm reply;
+
+ result = -ENOMEM;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_kzalloc;
+ cmd->rccb.bCommandType = 0xfd;
+ cmd->rccb.wCommand = cpu_to_le16(0x000e);
+ cmd->addr = *dstaddr;
+ cmd->stream = stream;
+ cmd->owner = owner;
+ cmd->type = type;
+ if (mas == NULL)
+ memset(cmd->baMAS, 0x00, sizeof(cmd->baMAS));
+ else
+ memcpy(cmd->baMAS, mas, sizeof(cmd->baMAS));
+ reply.rceb.bEventType = 0xfd;
+ reply.rceb.wEvent = cpu_to_le16(0x000e);
+ result = uwb_rc_cmd(rc, "SET-IP-MAS", &cmd->rccb, sizeof(*cmd),
+ &reply.rceb, sizeof(reply));
+ if (result < 0)
+ goto error_cmd;
+ if (reply.bResultCode != UWB_RC_RES_FAIL) {
+ dev_err(&rc->uwb_dev.dev,
+ "SET-IP-MAS: command execution failed: %d\n",
+ reply.bResultCode);
+ result = -EIO;
+ }
+error_cmd:
+ kfree(cmd);
+error_kzalloc:
+ return result;
+}
+
+/*
+ * Inform a WLP interface of a MAS reservation
+ *
+ * @rc is assumed refcnted.
+ */
+/* FIXME: detect if remote device is WLP capable? */
+static int i1480u_mas_set_dev(struct uwb_dev *uwb_dev, struct uwb_rc *rc,
+ u8 stream, u8 owner, u8 type, unsigned long *mas)
+{
+ int result = 0;
+ struct device *dev = &rc->uwb_dev.dev;
+
+ result = i1480u_set_ip_mas(rc, &uwb_dev->dev_addr, stream, owner,
+ type, mas);
+ if (result < 0) {
+ char rcaddrbuf[UWB_ADDR_STRSIZE], devaddrbuf[UWB_ADDR_STRSIZE];
+ uwb_dev_addr_print(rcaddrbuf, sizeof(rcaddrbuf),
+ &rc->uwb_dev.dev_addr);
+ uwb_dev_addr_print(devaddrbuf, sizeof(devaddrbuf),
+ &uwb_dev->dev_addr);
+ dev_err(dev, "Set IP MAS (%s to %s) failed: %d\n",
+ rcaddrbuf, devaddrbuf, result);
+ }
+ return result;
+}
+
+/**
+ * Called by bandwidth allocator when change occurs in reservation.
+ *
+ * @rsv: The reservation that is being established, modified, or
+ * terminated.
+ *
+ * When a reservation is established, modified, or terminated the upper layer
+ * (WLP here) needs set/update the currently available Media Access Slots
+ * that can be use for IP traffic.
+ *
+ * Our action taken during failure depends on how the reservation is being
+ * changed:
+ * - if reservation is being established we do nothing if we cannot set the
+ * new MAS to be used
+ * - if reservation is being terminated we revert back to PCA whether the
+ * SET IP MAS command succeeds or not.
+ */
+void i1480u_bw_alloc_cb(struct uwb_rsv *rsv)
+{
+ int result = 0;
+ struct i1480u *i1480u = rsv->pal_priv;
+ struct device *dev = &i1480u->usb_iface->dev;
+ struct uwb_dev *target_dev = rsv->target.dev;
+ struct uwb_rc *rc = i1480u->wlp.rc;
+ u8 stream = rsv->stream;
+ int type = rsv->type;
+ int is_owner = rsv->owner == &rc->uwb_dev;
+ unsigned long *bmp = rsv->mas.bm;
+
+ dev_err(dev, "WLP callback called - sending set ip mas\n");
+ /*user cannot change options while setting configuration*/
+ mutex_lock(&i1480u->options.mutex);
+ switch (rsv->state) {
+ case UWB_RSV_STATE_T_ACCEPTED:
+ case UWB_RSV_STATE_O_ESTABLISHED:
+ result = i1480u_mas_set_dev(target_dev, rc, stream, is_owner,
+ type, bmp);
+ if (result < 0) {
+ dev_err(dev, "MAS reservation failed: %d\n", result);
+ goto out;
+ }
+ if (is_owner) {
+ wlp_tx_hdr_set_delivery_id_type(&i1480u->options.def_tx_hdr,
+ WLP_DRP | stream);
+ wlp_tx_hdr_set_rts_cts(&i1480u->options.def_tx_hdr, 0);
+ }
+ break;
+ case UWB_RSV_STATE_NONE:
+ /* revert back to PCA */
+ result = i1480u_mas_set_dev(target_dev, rc, stream, is_owner,
+ type, bmp);
+ if (result < 0)
+ dev_err(dev, "MAS reservation failed: %d\n", result);
+ /* Revert to PCA even though SET IP MAS failed. */
+ wlp_tx_hdr_set_delivery_id_type(&i1480u->options.def_tx_hdr,
+ i1480u->options.pca_base_priority);
+ wlp_tx_hdr_set_rts_cts(&i1480u->options.def_tx_hdr, 1);
+ break;
+ default:
+ dev_err(dev, "unexpected WLP reservation state: %s (%d).\n",
+ uwb_rsv_state_str(rsv->state), rsv->state);
+ break;
+ }
+out:
+ mutex_unlock(&i1480u->options.mutex);
+ return;
+}
+
+/**
+ *
+ * Called on 'ifconfig up'
+ */
+int i1480u_open(struct net_device *net_dev)
+{
+ int result;
+ struct i1480u *i1480u = netdev_priv(net_dev);
+ struct wlp *wlp = &i1480u->wlp;
+ struct uwb_rc *rc;
+ struct device *dev = &i1480u->usb_iface->dev;
+
+ rc = wlp->rc;
+ result = i1480u_rx_setup(i1480u); /* Alloc RX stuff */
+ if (result < 0)
+ goto error_rx_setup;
+ netif_wake_queue(net_dev);
+#ifdef i1480u_FLOW_CONTROL
+ result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);;
+ if (result < 0) {
+ dev_err(dev, "Can't submit notification URB: %d\n", result);
+ goto error_notif_urb_submit;
+ }
+#endif
+ i1480u->uwb_notifs_handler.cb = i1480u_uwb_notifs_cb;
+ i1480u->uwb_notifs_handler.data = i1480u;
+ if (uwb_bg_joined(rc))
+ netif_carrier_on(net_dev);
+ else
+ netif_carrier_off(net_dev);
+ uwb_notifs_register(rc, &i1480u->uwb_notifs_handler);
+ /* Interface is up with an address, now we can create WSS */
+ result = wlp_wss_setup(net_dev, &wlp->wss);
+ if (result < 0) {
+ dev_err(dev, "Can't create WSS: %d. \n", result);
+ goto error_notif_deregister;
+ }
+ return 0;
+error_notif_deregister:
+ uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler);
+#ifdef i1480u_FLOW_CONTROL
+error_notif_urb_submit:
+#endif
+ netif_stop_queue(net_dev);
+ i1480u_rx_release(i1480u);
+error_rx_setup:
+ return result;
+}
+
+
+/**
+ * Called on 'ifconfig down'
+ */
+int i1480u_stop(struct net_device *net_dev)
+{
+ struct i1480u *i1480u = netdev_priv(net_dev);
+ struct wlp *wlp = &i1480u->wlp;
+ struct uwb_rc *rc = wlp->rc;
+
+ BUG_ON(wlp->rc == NULL);
+ wlp_wss_remove(&wlp->wss);
+ uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler);
+ netif_carrier_off(net_dev);
+#ifdef i1480u_FLOW_CONTROL
+ usb_kill_urb(i1480u->notif_urb);
+#endif
+ netif_stop_queue(net_dev);
+ i1480u_rx_release(i1480u);
+ i1480u_tx_release(i1480u);
+ return 0;
+}
+
+
+/** Report statistics */
+struct net_device_stats *i1480u_get_stats(struct net_device *net_dev)
+{
+ struct i1480u *i1480u = netdev_priv(net_dev);
+ return &i1480u->stats;
+}
+
+
+/**
+ *
+ * Change the interface config--we probably don't have to do anything.
+ */
+int i1480u_set_config(struct net_device *net_dev, struct ifmap *map)
+{
+ int result;
+ struct i1480u *i1480u = netdev_priv(net_dev);
+ BUG_ON(i1480u->wlp.rc == NULL);
+ result = 0;
+ return result;
+}
+
+/**
+ * Change the MTU of the interface
+ */
+int i1480u_change_mtu(struct net_device *net_dev, int mtu)
+{
+ static union {
+ struct wlp_tx_hdr tx;
+ struct wlp_rx_hdr rx;
+ } i1480u_all_hdrs;
+
+ if (mtu < ETH_HLEN) /* We encap eth frames */
+ return -ERANGE;
+ if (mtu > 4000 - sizeof(i1480u_all_hdrs))
+ return -ERANGE;
+ net_dev->mtu = mtu;
+ return 0;
+}
+
+
+/**
+ * Callback function to handle events from UWB
+ * When we see other devices we know the carrier is ok,
+ * if we are the only device in the beacon group we set the carrier
+ * state to off.
+ * */
+void i1480u_uwb_notifs_cb(void *data, struct uwb_dev *uwb_dev,
+ enum uwb_notifs event)
+{
+ struct i1480u *i1480u = data;
+ struct net_device *net_dev = i1480u->net_dev;
+ struct device *dev = &i1480u->usb_iface->dev;
+ switch (event) {
+ case UWB_NOTIF_BG_JOIN:
+ netif_carrier_on(net_dev);
+ dev_info(dev, "Link is up\n");
+ break;
+ case UWB_NOTIF_BG_LEAVE:
+ netif_carrier_off(net_dev);
+ dev_info(dev, "Link is down\n");
+ break;
+ default:
+ dev_err(dev, "don't know how to handle event %d from uwb\n",
+ event);
+ }
+}
+
+/**
+ * Stop the network queue
+ *
+ * Enable WLP substack to stop network queue. We also set the flow control
+ * threshold at this time to prevent the flow control from restarting the
+ * queue.
+ *
+ * we are loosing the current threshold value here ... FIXME?
+ */
+void i1480u_stop_queue(struct wlp *wlp)
+{
+ struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
+ struct net_device *net_dev = i1480u->net_dev;
+ i1480u->tx_inflight.threshold = 0;
+ netif_stop_queue(net_dev);
+}
+
+/**
+ * Start the network queue
+ *
+ * Enable WLP substack to start network queue. Also re-enable the flow
+ * control to manage the queue again.
+ *
+ * We re-enable the flow control by storing the default threshold in the
+ * flow control threshold. This means that if the user modified the
+ * threshold before the queue was stopped and restarted that information
+ * will be lost. FIXME?
+ */
+void i1480u_start_queue(struct wlp *wlp)
+{
+ struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
+ struct net_device *net_dev = i1480u->net_dev;
+ i1480u->tx_inflight.threshold = i1480u_TX_INFLIGHT_THRESHOLD;
+ netif_start_queue(net_dev);
+}
diff --git a/drivers/uwb/i1480/i1480u-wlp/rx.c b/drivers/uwb/i1480/i1480u-wlp/rx.c
new file mode 100644
index 00000000000..9fc035354a7
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/rx.c
@@ -0,0 +1,486 @@
+/*
+ * WUSB Wire Adapter: WLP interface
+ * Driver for the Linux Network stack.
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * i1480u's RX handling is simple. i1480u will send the received
+ * network packets broken up in fragments; 1 to N fragments make a
+ * packet, we assemble them together and deliver the packet with netif_rx().
+ *
+ * Beacuse each USB transfer is a *single* fragment (except when the
+ * transfer contains a first fragment), each URB called thus
+ * back contains one or two fragments. So we queue N URBs, each with its own
+ * fragment buffer. When a URB is done, we process it (adding to the
+ * current skb from the fragment buffer until complete). Once
+ * processed, we requeue the URB. There is always a bunch of URBs
+ * ready to take data, so the intergap should be minimal.
+ *
+ * An URB's transfer buffer is the data field of a socket buffer. This
+ * reduces copying as data can be passed directly to network layer. If a
+ * complete packet or 1st fragment is received the URB's transfer buffer is
+ * taken away from it and used to send data to the network layer. In this
+ * case a new transfer buffer is allocated to the URB before being requeued.
+ * If a "NEXT" or "LAST" fragment is received, the fragment contents is
+ * appended to the RX packet under construction and the transfer buffer
+ * is reused. To be able to use this buffer to assemble complete packets
+ * we set each buffer's size to that of the MAX ethernet packet that can
+ * be received. There is thus room for improvement in memory usage.
+ *
+ * When the max tx fragment size increases, we should be able to read
+ * data into the skbs directly with very simple code.
+ *
+ * ROADMAP:
+ *
+ * ENTRY POINTS:
+ *
+ * i1480u_rx_setup(): setup RX context [from i1480u_open()]
+ *
+ * i1480u_rx_release(): release RX context [from i1480u_stop()]
+ *
+ * i1480u_rx_cb(): called when the RX USB URB receives a
+ * packet. It removes the header and pushes it up
+ * the Linux netdev stack with netif_rx().
+ *
+ * i1480u_rx_buffer()
+ * i1480u_drop() and i1480u_fix()
+ * i1480u_skb_deliver
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include "i1480u-wlp.h"
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+
+/**
+ * Setup the RX context
+ *
+ * Each URB is provided with a transfer_buffer that is the data field
+ * of a new socket buffer.
+ */
+int i1480u_rx_setup(struct i1480u *i1480u)
+{
+ int result, cnt;
+ struct device *dev = &i1480u->usb_iface->dev;
+ struct net_device *net_dev = i1480u->net_dev;
+ struct usb_endpoint_descriptor *epd;
+ struct sk_buff *skb;
+
+ /* Alloc RX stuff */
+ i1480u->rx_skb = NULL; /* not in process of receiving packet */
+ result = -ENOMEM;
+ epd = &i1480u->usb_iface->cur_altsetting->endpoint[1].desc;
+ for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
+ struct i1480u_rx_buf *rx_buf = &i1480u->rx_buf[cnt];
+ rx_buf->i1480u = i1480u;
+ skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
+ if (!skb) {
+ dev_err(dev,
+ "RX: cannot allocate RX buffer %d\n", cnt);
+ result = -ENOMEM;
+ goto error;
+ }
+ skb->dev = net_dev;
+ skb->ip_summed = CHECKSUM_NONE;
+ skb_reserve(skb, 2);
+ rx_buf->data = skb;
+ rx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (unlikely(rx_buf->urb == NULL)) {
+ dev_err(dev, "RX: cannot allocate URB %d\n", cnt);
+ result = -ENOMEM;
+ goto error;
+ }
+ usb_fill_bulk_urb(rx_buf->urb, i1480u->usb_dev,
+ usb_rcvbulkpipe(i1480u->usb_dev, epd->bEndpointAddress),
+ rx_buf->data->data, i1480u_MAX_RX_PKT_SIZE - 2,
+ i1480u_rx_cb, rx_buf);
+ result = usb_submit_urb(rx_buf->urb, GFP_NOIO);
+ if (unlikely(result < 0)) {
+ dev_err(dev, "RX: cannot submit URB %d: %d\n",
+ cnt, result);
+ goto error;
+ }
+ }
+ return 0;
+
+error:
+ i1480u_rx_release(i1480u);
+ return result;
+}
+
+
+/** Release resources associated to the rx context */
+void i1480u_rx_release(struct i1480u *i1480u)
+{
+ int cnt;
+ for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
+ if (i1480u->rx_buf[cnt].data)
+ dev_kfree_skb(i1480u->rx_buf[cnt].data);
+ if (i1480u->rx_buf[cnt].urb) {
+ usb_kill_urb(i1480u->rx_buf[cnt].urb);
+ usb_free_urb(i1480u->rx_buf[cnt].urb);
+ }
+ }
+ if (i1480u->rx_skb != NULL)
+ dev_kfree_skb(i1480u->rx_skb);
+}
+
+static
+void i1480u_rx_unlink_urbs(struct i1480u *i1480u)
+{
+ int cnt;
+ for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
+ if (i1480u->rx_buf[cnt].urb)
+ usb_unlink_urb(i1480u->rx_buf[cnt].urb);
+ }
+}
+
+/** Fix an out-of-sequence packet */
+#define i1480u_fix(i1480u, msg...) \
+do { \
+ if (printk_ratelimit()) \
+ dev_err(&i1480u->usb_iface->dev, msg); \
+ dev_kfree_skb_irq(i1480u->rx_skb); \
+ i1480u->rx_skb = NULL; \
+ i1480u->rx_untd_pkt_size = 0; \
+} while (0)
+
+
+/** Drop an out-of-sequence packet */
+#define i1480u_drop(i1480u, msg...) \
+do { \
+ if (printk_ratelimit()) \
+ dev_err(&i1480u->usb_iface->dev, msg); \
+ i1480u->stats.rx_dropped++; \
+} while (0)
+
+
+
+
+/** Finalizes setting up the SKB and delivers it
+ *
+ * We first pass the incoming frame to WLP substack for verification. It
+ * may also be a WLP association frame in which case WLP will take over the
+ * processing. If WLP does not take it over it will still verify it, if the
+ * frame is invalid the skb will be freed by WLP and we will not continue
+ * parsing.
+ * */
+static
+void i1480u_skb_deliver(struct i1480u *i1480u)
+{
+ int should_parse;
+ struct net_device *net_dev = i1480u->net_dev;
+ struct device *dev = &i1480u->usb_iface->dev;
+
+ d_printf(6, dev, "RX delivered pre skb(%p), %u bytes\n",
+ i1480u->rx_skb, i1480u->rx_skb->len);
+ d_dump(7, dev, i1480u->rx_skb->data, i1480u->rx_skb->len);
+ should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb,
+ &i1480u->rx_srcaddr);
+ if (!should_parse)
+ goto out;
+ i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev);
+ d_printf(5, dev, "RX delivered skb(%p), %u bytes\n",
+ i1480u->rx_skb, i1480u->rx_skb->len);
+ d_dump(7, dev, i1480u->rx_skb->data,
+ i1480u->rx_skb->len > 72 ? 72 : i1480u->rx_skb->len);
+ i1480u->stats.rx_packets++;
+ i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size;
+ net_dev->last_rx = jiffies;
+ /* FIXME: flow control: check netif_rx() retval */
+
+ netif_rx(i1480u->rx_skb); /* deliver */
+out:
+ i1480u->rx_skb = NULL;
+ i1480u->rx_untd_pkt_size = 0;
+}
+
+
+/**
+ * Process a buffer of data received from the USB RX endpoint
+ *
+ * First fragment arrives with next or last fragment. All other fragments
+ * arrive alone.
+ *
+ * /me hates long functions.
+ */
+static
+void i1480u_rx_buffer(struct i1480u_rx_buf *rx_buf)
+{
+ unsigned pkt_completed = 0; /* !0 when we got all pkt fragments */
+ size_t untd_hdr_size, untd_frg_size;
+ size_t i1480u_hdr_size;
+ struct wlp_rx_hdr *i1480u_hdr = NULL;
+
+ struct i1480u *i1480u = rx_buf->i1480u;
+ struct sk_buff *skb = rx_buf->data;
+ int size_left = rx_buf->urb->actual_length;
+ void *ptr = rx_buf->urb->transfer_buffer; /* also rx_buf->data->data */
+ struct untd_hdr *untd_hdr;
+
+ struct net_device *net_dev = i1480u->net_dev;
+ struct device *dev = &i1480u->usb_iface->dev;
+ struct sk_buff *new_skb;
+
+#if 0
+ dev_fnstart(dev,
+ "(i1480u %p ptr %p size_left %zu)\n", i1480u, ptr, size_left);
+ dev_err(dev, "RX packet, %zu bytes\n", size_left);
+ dump_bytes(dev, ptr, size_left);
+#endif
+ i1480u_hdr_size = sizeof(struct wlp_rx_hdr);
+
+ while (size_left > 0) {
+ if (pkt_completed) {
+ i1480u_drop(i1480u, "RX: fragment follows completed"
+ "packet in same buffer. Dropping\n");
+ break;
+ }
+ untd_hdr = ptr;
+ if (size_left < sizeof(*untd_hdr)) { /* Check the UNTD header */
+ i1480u_drop(i1480u, "RX: short UNTD header! Dropping\n");
+ goto out;
+ }
+ if (unlikely(untd_hdr_rx_tx(untd_hdr) == 0)) { /* Paranoia: TX set? */
+ i1480u_drop(i1480u, "RX: TX bit set! Dropping\n");
+ goto out;
+ }
+ switch (untd_hdr_type(untd_hdr)) { /* Check the UNTD header type */
+ case i1480u_PKT_FRAG_1ST: {
+ struct untd_hdr_1st *untd_hdr_1st = (void *) untd_hdr;
+ dev_dbg(dev, "1st fragment\n");
+ untd_hdr_size = sizeof(struct untd_hdr_1st);
+ if (i1480u->rx_skb != NULL)
+ i1480u_fix(i1480u, "RX: 1st fragment out of "
+ "sequence! Fixing\n");
+ if (size_left < untd_hdr_size + i1480u_hdr_size) {
+ i1480u_drop(i1480u, "RX: short 1st fragment! "
+ "Dropping\n");
+ goto out;
+ }
+ i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len)
+ - i1480u_hdr_size;
+ untd_frg_size = le16_to_cpu(untd_hdr_1st->fragment_len);
+ if (size_left < untd_hdr_size + untd_frg_size) {
+ i1480u_drop(i1480u,
+ "RX: short payload! Dropping\n");
+ goto out;
+ }
+ i1480u->rx_skb = skb;
+ i1480u_hdr = (void *) untd_hdr_1st + untd_hdr_size;
+ i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
+ skb_put(i1480u->rx_skb, untd_hdr_size + untd_frg_size);
+ skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
+ stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
+ stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
+ rx_buf->data = NULL; /* need to create new buffer */
+ break;
+ }
+ case i1480u_PKT_FRAG_NXT: {
+ dev_dbg(dev, "nxt fragment\n");
+ untd_hdr_size = sizeof(struct untd_hdr_rst);
+ if (i1480u->rx_skb == NULL) {
+ i1480u_drop(i1480u, "RX: next fragment out of "
+ "sequence! Dropping\n");
+ goto out;
+ }
+ if (size_left < untd_hdr_size) {
+ i1480u_drop(i1480u, "RX: short NXT fragment! "
+ "Dropping\n");
+ goto out;
+ }
+ untd_frg_size = le16_to_cpu(untd_hdr->len);
+ if (size_left < untd_hdr_size + untd_frg_size) {
+ i1480u_drop(i1480u,
+ "RX: short payload! Dropping\n");
+ goto out;
+ }
+ memmove(skb_put(i1480u->rx_skb, untd_frg_size),
+ ptr + untd_hdr_size, untd_frg_size);
+ break;
+ }
+ case i1480u_PKT_FRAG_LST: {
+ dev_dbg(dev, "Lst fragment\n");
+ untd_hdr_size = sizeof(struct untd_hdr_rst);
+ if (i1480u->rx_skb == NULL) {
+ i1480u_drop(i1480u, "RX: last fragment out of "
+ "sequence! Dropping\n");
+ goto out;
+ }
+ if (size_left < untd_hdr_size) {
+ i1480u_drop(i1480u, "RX: short LST fragment! "
+ "Dropping\n");
+ goto out;
+ }
+ untd_frg_size = le16_to_cpu(untd_hdr->len);
+ if (size_left < untd_frg_size + untd_hdr_size) {
+ i1480u_drop(i1480u,
+ "RX: short payload! Dropping\n");
+ goto out;
+ }
+ memmove(skb_put(i1480u->rx_skb, untd_frg_size),
+ ptr + untd_hdr_size, untd_frg_size);
+ pkt_completed = 1;
+ break;
+ }
+ case i1480u_PKT_FRAG_CMP: {
+ dev_dbg(dev, "cmp fragment\n");
+ untd_hdr_size = sizeof(struct untd_hdr_cmp);
+ if (i1480u->rx_skb != NULL)
+ i1480u_fix(i1480u, "RX: fix out-of-sequence CMP"
+ " fragment!\n");
+ if (size_left < untd_hdr_size + i1480u_hdr_size) {
+ i1480u_drop(i1480u, "RX: short CMP fragment! "
+ "Dropping\n");
+ goto out;
+ }
+ i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len);
+ untd_frg_size = i1480u->rx_untd_pkt_size;
+ if (size_left < i1480u->rx_untd_pkt_size + untd_hdr_size) {
+ i1480u_drop(i1480u,
+ "RX: short payload! Dropping\n");
+ goto out;
+ }
+ i1480u->rx_skb = skb;
+ i1480u_hdr = (void *) untd_hdr + untd_hdr_size;
+ i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
+ stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
+ stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
+ skb_put(i1480u->rx_skb, untd_hdr_size + i1480u->rx_untd_pkt_size);
+ skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
+ rx_buf->data = NULL; /* for hand off skb to network stack */
+ pkt_completed = 1;
+ i1480u->rx_untd_pkt_size -= i1480u_hdr_size; /* accurate stat */
+ break;
+ }
+ default:
+ i1480u_drop(i1480u, "RX: unknown packet type %u! "
+ "Dropping\n", untd_hdr_type(untd_hdr));
+ goto out;
+ }
+ size_left -= untd_hdr_size + untd_frg_size;
+ if (size_left > 0)
+ ptr += untd_hdr_size + untd_frg_size;
+ }
+ if (pkt_completed)
+ i1480u_skb_deliver(i1480u);
+out:
+ /* recreate needed RX buffers*/
+ if (rx_buf->data == NULL) {
+ /* buffer is being used to receive packet, create new */
+ new_skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
+ if (!new_skb) {
+ if (printk_ratelimit())
+ dev_err(dev,
+ "RX: cannot allocate RX buffer\n");
+ } else {
+ new_skb->dev = net_dev;
+ new_skb->ip_summed = CHECKSUM_NONE;
+ skb_reserve(new_skb, 2);
+ rx_buf->data = new_skb;
+ }
+ }
+ return;
+}
+
+
+/**
+ * Called when an RX URB has finished receiving or has found some kind
+ * of error condition.
+ *
+ * LIMITATIONS:
+ *
+ * - We read USB-transfers, each transfer contains a SINGLE fragment
+ * (can contain a complete packet, or a 1st, next, or last fragment
+ * of a packet).
+ * Looks like a transfer can contain more than one fragment (07/18/06)
+ *
+ * - Each transfer buffer is the size of the maximum packet size (minus
+ * headroom), i1480u_MAX_PKT_SIZE - 2
+ *
+ * - We always read the full USB-transfer, no partials.
+ *
+ * - Each transfer is read directly into a skb. This skb will be used to
+ * send data to the upper layers if it is the first fragment or a complete
+ * packet. In the other cases the data will be copied from the skb to
+ * another skb that is being prepared for the upper layers from a prev
+ * first fragment.
+ *
+ * It is simply too much of a pain. Gosh, there should be a unified
+ * SG infrastructure for *everything* [so that I could declare a SG
+ * buffer, pass it to USB for receiving, append some space to it if
+ * I wish, receive more until I have the whole chunk, adapt
+ * pointers on each fragment to remove hardware headers and then
+ * attach that to an skbuff and netif_rx()].
+ */
+void i1480u_rx_cb(struct urb *urb)
+{
+ int result;
+ int do_parse_buffer = 1;
+ struct i1480u_rx_buf *rx_buf = urb->context;
+ struct i1480u *i1480u = rx_buf->i1480u;
+ struct device *dev = &i1480u->usb_iface->dev;
+ unsigned long flags;
+ u8 rx_buf_idx = rx_buf - i1480u->rx_buf;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ECONNRESET: /* Not an error, but a controlled situation; */
+ case -ENOENT: /* (we killed the URB)...so, no broadcast */
+ case -ESHUTDOWN: /* going away! */
+ dev_err(dev, "RX URB[%u]: goind down %d\n",
+ rx_buf_idx, urb->status);
+ goto error;
+ default:
+ dev_err(dev, "RX URB[%u]: unknown status %d\n",
+ rx_buf_idx, urb->status);
+ if (edc_inc(&i1480u->rx_errors, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "RX: max acceptable errors exceeded,"
+ " resetting device.\n");
+ i1480u_rx_unlink_urbs(i1480u);
+ wlp_reset_all(&i1480u->wlp);
+ goto error;
+ }
+ do_parse_buffer = 0;
+ break;
+ }
+ spin_lock_irqsave(&i1480u->lock, flags);
+ /* chew the data fragments, extract network packets */
+ if (do_parse_buffer) {
+ i1480u_rx_buffer(rx_buf);
+ if (rx_buf->data) {
+ rx_buf->urb->transfer_buffer = rx_buf->data->data;
+ result = usb_submit_urb(rx_buf->urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "RX URB[%u]: cannot submit %d\n",
+ rx_buf_idx, result);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&i1480u->lock, flags);
+error:
+ return;
+}
+
diff --git a/drivers/uwb/i1480/i1480u-wlp/sysfs.c b/drivers/uwb/i1480/i1480u-wlp/sysfs.c
new file mode 100644
index 00000000000..a1d8ca6ac93
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/sysfs.c
@@ -0,0 +1,408 @@
+/*
+ * WUSB Wire Adapter: WLP interface
+ * Sysfs interfaces
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/uwb/debug.h>
+#include <linux/device.h>
+#include "i1480u-wlp.h"
+
+
+/**
+ *
+ * @dev: Class device from the net_device; assumed refcnted.
+ *
+ * Yes, I don't lock--we assume it is refcounted and I am getting a
+ * single byte value that is kind of atomic to read.
+ */
+ssize_t uwb_phy_rate_show(const struct wlp_options *options, char *buf)
+{
+ return sprintf(buf, "%u\n",
+ wlp_tx_hdr_phy_rate(&options->def_tx_hdr));
+}
+EXPORT_SYMBOL_GPL(uwb_phy_rate_show);
+
+
+ssize_t uwb_phy_rate_store(struct wlp_options *options,
+ const char *buf, size_t size)
+{
+ ssize_t result;
+ unsigned rate;
+
+ result = sscanf(buf, "%u\n", &rate);
+ if (result != 1) {
+ result = -EINVAL;
+ goto out;
+ }
+ result = -EINVAL;
+ if (rate >= UWB_PHY_RATE_INVALID)
+ goto out;
+ wlp_tx_hdr_set_phy_rate(&options->def_tx_hdr, rate);
+ result = 0;
+out:
+ return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(uwb_phy_rate_store);
+
+
+ssize_t uwb_rts_cts_show(const struct wlp_options *options, char *buf)
+{
+ return sprintf(buf, "%u\n",
+ wlp_tx_hdr_rts_cts(&options->def_tx_hdr));
+}
+EXPORT_SYMBOL_GPL(uwb_rts_cts_show);
+
+
+ssize_t uwb_rts_cts_store(struct wlp_options *options,
+ const char *buf, size_t size)
+{
+ ssize_t result;
+ unsigned value;
+
+ result = sscanf(buf, "%u\n", &value);
+ if (result != 1) {
+ result = -EINVAL;
+ goto out;
+ }
+ result = -EINVAL;
+ wlp_tx_hdr_set_rts_cts(&options->def_tx_hdr, !!value);
+ result = 0;
+out:
+ return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(uwb_rts_cts_store);
+
+
+ssize_t uwb_ack_policy_show(const struct wlp_options *options, char *buf)
+{
+ return sprintf(buf, "%u\n",
+ wlp_tx_hdr_ack_policy(&options->def_tx_hdr));
+}
+EXPORT_SYMBOL_GPL(uwb_ack_policy_show);
+
+
+ssize_t uwb_ack_policy_store(struct wlp_options *options,
+ const char *buf, size_t size)
+{
+ ssize_t result;
+ unsigned value;
+
+ result = sscanf(buf, "%u\n", &value);
+ if (result != 1 || value > UWB_ACK_B_REQ) {
+ result = -EINVAL;
+ goto out;
+ }
+ wlp_tx_hdr_set_ack_policy(&options->def_tx_hdr, value);
+ result = 0;
+out:
+ return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(uwb_ack_policy_store);
+
+
+/**
+ * Show the PCA base priority.
+ *
+ * We can access without locking, as the value is (for now) orthogonal
+ * to other values.
+ */
+ssize_t uwb_pca_base_priority_show(const struct wlp_options *options,
+ char *buf)
+{
+ return sprintf(buf, "%u\n",
+ options->pca_base_priority);
+}
+EXPORT_SYMBOL_GPL(uwb_pca_base_priority_show);
+
+
+/**
+ * Set the PCA base priority.
+ *
+ * We can access without locking, as the value is (for now) orthogonal
+ * to other values.
+ */
+ssize_t uwb_pca_base_priority_store(struct wlp_options *options,
+ const char *buf, size_t size)
+{
+ ssize_t result = -EINVAL;
+ u8 pca_base_priority;
+
+ result = sscanf(buf, "%hhu\n", &pca_base_priority);
+ if (result != 1) {
+ result = -EINVAL;
+ goto out;
+ }
+ result = -EINVAL;
+ if (pca_base_priority >= 8)
+ goto out;
+ options->pca_base_priority = pca_base_priority;
+ /* Update TX header if we are currently using PCA. */
+ if (result >= 0 && (wlp_tx_hdr_delivery_id_type(&options->def_tx_hdr) & WLP_DRP) == 0)
+ wlp_tx_hdr_set_delivery_id_type(&options->def_tx_hdr, options->pca_base_priority);
+ result = 0;
+out:
+ return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(uwb_pca_base_priority_store);
+
+/**
+ * Show current inflight values
+ *
+ * Will print the current MAX and THRESHOLD values for the basic flow
+ * control. In addition it will report how many times the TX queue needed
+ * to be restarted since the last time this query was made.
+ */
+static ssize_t wlp_tx_inflight_show(struct i1480u_tx_inflight *inflight,
+ char *buf)
+{
+ ssize_t result;
+ unsigned long sec_elapsed = (jiffies - inflight->restart_ts)/HZ;
+ unsigned long restart_count = atomic_read(&inflight->restart_count);
+
+ result = scnprintf(buf, PAGE_SIZE, "%lu %lu %d %lu %lu %lu\n"
+ "#read: threshold max inflight_count restarts "
+ "seconds restarts/sec\n"
+ "#write: threshold max\n",
+ inflight->threshold, inflight->max,
+ atomic_read(&inflight->count),
+ restart_count, sec_elapsed,
+ sec_elapsed == 0 ? 0 : restart_count/sec_elapsed);
+ inflight->restart_ts = jiffies;
+ atomic_set(&inflight->restart_count, 0);
+ return result;
+}
+
+static
+ssize_t wlp_tx_inflight_store(struct i1480u_tx_inflight *inflight,
+ const char *buf, size_t size)
+{
+ unsigned long in_threshold, in_max;
+ ssize_t result;
+ result = sscanf(buf, "%lu %lu", &in_threshold, &in_max);
+ if (result != 2)
+ return -EINVAL;
+ if (in_max <= in_threshold)
+ return -EINVAL;
+ inflight->max = in_max;
+ inflight->threshold = in_threshold;
+ return size;
+}
+/*
+ * Glue (or function adaptors) for accesing info on sysfs
+ *
+ * [we need this indirection because the PCI driver does almost the
+ * same]
+ *
+ * Linux 2.6.21 changed how 'struct netdevice' does attributes (from
+ * having a 'struct class_dev' to having a 'struct device'). That is
+ * quite of a pain.
+ *
+ * So we try to abstract that here. i1480u_SHOW() and i1480u_STORE()
+ * create adaptors for extracting the 'struct i1480u' from a 'struct
+ * dev' and calling a function for doing a sysfs operation (as we have
+ * them factorized already). i1480u_ATTR creates the attribute file
+ * (CLASS_DEVICE_ATTR or DEVICE_ATTR) and i1480u_ATTR_NAME produces a
+ * class_device_attr_NAME or device_attr_NAME (for group registration).
+ */
+#include <linux/version.h>
+
+#define i1480u_SHOW(name, fn, param) \
+static ssize_t i1480u_show_##name(struct device *dev, \
+ struct device_attribute *attr,\
+ char *buf) \
+{ \
+ struct i1480u *i1480u = netdev_priv(to_net_dev(dev)); \
+ return fn(&i1480u->param, buf); \
+}
+
+#define i1480u_STORE(name, fn, param) \
+static ssize_t i1480u_store_##name(struct device *dev, \
+ struct device_attribute *attr,\
+ const char *buf, size_t size)\
+{ \
+ struct i1480u *i1480u = netdev_priv(to_net_dev(dev)); \
+ return fn(&i1480u->param, buf, size); \
+}
+
+#define i1480u_ATTR(name, perm) static DEVICE_ATTR(name, perm, \
+ i1480u_show_##name,\
+ i1480u_store_##name)
+
+#define i1480u_ATTR_SHOW(name) static DEVICE_ATTR(name, \
+ S_IRUGO, \
+ i1480u_show_##name, NULL)
+
+#define i1480u_ATTR_NAME(a) (dev_attr_##a)
+
+
+/*
+ * Sysfs adaptors
+ */
+i1480u_SHOW(uwb_phy_rate, uwb_phy_rate_show, options);
+i1480u_STORE(uwb_phy_rate, uwb_phy_rate_store, options);
+i1480u_ATTR(uwb_phy_rate, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(uwb_rts_cts, uwb_rts_cts_show, options);
+i1480u_STORE(uwb_rts_cts, uwb_rts_cts_store, options);
+i1480u_ATTR(uwb_rts_cts, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(uwb_ack_policy, uwb_ack_policy_show, options);
+i1480u_STORE(uwb_ack_policy, uwb_ack_policy_store, options);
+i1480u_ATTR(uwb_ack_policy, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(uwb_pca_base_priority, uwb_pca_base_priority_show, options);
+i1480u_STORE(uwb_pca_base_priority, uwb_pca_base_priority_store, options);
+i1480u_ATTR(uwb_pca_base_priority, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_eda, wlp_eda_show, wlp);
+i1480u_STORE(wlp_eda, wlp_eda_store, wlp);
+i1480u_ATTR(wlp_eda, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_uuid, wlp_uuid_show, wlp);
+i1480u_STORE(wlp_uuid, wlp_uuid_store, wlp);
+i1480u_ATTR(wlp_uuid, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_name, wlp_dev_name_show, wlp);
+i1480u_STORE(wlp_dev_name, wlp_dev_name_store, wlp);
+i1480u_ATTR(wlp_dev_name, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_manufacturer, wlp_dev_manufacturer_show, wlp);
+i1480u_STORE(wlp_dev_manufacturer, wlp_dev_manufacturer_store, wlp);
+i1480u_ATTR(wlp_dev_manufacturer, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_model_name, wlp_dev_model_name_show, wlp);
+i1480u_STORE(wlp_dev_model_name, wlp_dev_model_name_store, wlp);
+i1480u_ATTR(wlp_dev_model_name, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_model_nr, wlp_dev_model_nr_show, wlp);
+i1480u_STORE(wlp_dev_model_nr, wlp_dev_model_nr_store, wlp);
+i1480u_ATTR(wlp_dev_model_nr, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_serial, wlp_dev_serial_show, wlp);
+i1480u_STORE(wlp_dev_serial, wlp_dev_serial_store, wlp);
+i1480u_ATTR(wlp_dev_serial, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_prim_category, wlp_dev_prim_category_show, wlp);
+i1480u_STORE(wlp_dev_prim_category, wlp_dev_prim_category_store, wlp);
+i1480u_ATTR(wlp_dev_prim_category, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_prim_OUI, wlp_dev_prim_OUI_show, wlp);
+i1480u_STORE(wlp_dev_prim_OUI, wlp_dev_prim_OUI_store, wlp);
+i1480u_ATTR(wlp_dev_prim_OUI, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_prim_OUI_sub, wlp_dev_prim_OUI_sub_show, wlp);
+i1480u_STORE(wlp_dev_prim_OUI_sub, wlp_dev_prim_OUI_sub_store, wlp);
+i1480u_ATTR(wlp_dev_prim_OUI_sub, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_dev_prim_subcat, wlp_dev_prim_subcat_show, wlp);
+i1480u_STORE(wlp_dev_prim_subcat, wlp_dev_prim_subcat_store, wlp);
+i1480u_ATTR(wlp_dev_prim_subcat, S_IRUGO | S_IWUSR);
+
+i1480u_SHOW(wlp_neighborhood, wlp_neighborhood_show, wlp);
+i1480u_ATTR_SHOW(wlp_neighborhood);
+
+i1480u_SHOW(wss_activate, wlp_wss_activate_show, wlp.wss);
+i1480u_STORE(wss_activate, wlp_wss_activate_store, wlp.wss);
+i1480u_ATTR(wss_activate, S_IRUGO | S_IWUSR);
+
+/*
+ * Show the (min, max, avg) Line Quality Estimate (LQE, in dB) as over
+ * the last 256 received WLP frames (ECMA-368 13.3).
+ *
+ * [the -7dB that have to be substracted from the LQI to make the LQE
+ * are already taken into account].
+ */
+i1480u_SHOW(wlp_lqe, stats_show, lqe_stats);
+i1480u_STORE(wlp_lqe, stats_store, lqe_stats);
+i1480u_ATTR(wlp_lqe, S_IRUGO | S_IWUSR);
+
+/*
+ * Show the Receive Signal Strength Indicator averaged over all the
+ * received WLP frames (ECMA-368 13.3). Still is not clear what
+ * this value is, but is kind of a percentage of the signal strength
+ * at the antenna.
+ */
+i1480u_SHOW(wlp_rssi, stats_show, rssi_stats);
+i1480u_STORE(wlp_rssi, stats_store, rssi_stats);
+i1480u_ATTR(wlp_rssi, S_IRUGO | S_IWUSR);
+
+/**
+ * We maintain a basic flow control counter. "count" how many TX URBs are
+ * outstanding. Only allow "max"
+ * TX URBs to be outstanding. If this value is reached the queue will be
+ * stopped. The queue will be restarted when there are
+ * "threshold" URBs outstanding.
+ */
+i1480u_SHOW(wlp_tx_inflight, wlp_tx_inflight_show, tx_inflight);
+i1480u_STORE(wlp_tx_inflight, wlp_tx_inflight_store, tx_inflight);
+i1480u_ATTR(wlp_tx_inflight, S_IRUGO | S_IWUSR);
+
+static struct attribute *i1480u_attrs[] = {
+ &i1480u_ATTR_NAME(uwb_phy_rate).attr,
+ &i1480u_ATTR_NAME(uwb_rts_cts).attr,
+ &i1480u_ATTR_NAME(uwb_ack_policy).attr,
+ &i1480u_ATTR_NAME(uwb_pca_base_priority).attr,
+ &i1480u_ATTR_NAME(wlp_lqe).attr,
+ &i1480u_ATTR_NAME(wlp_rssi).attr,
+ &i1480u_ATTR_NAME(wlp_eda).attr,
+ &i1480u_ATTR_NAME(wlp_uuid).attr,
+ &i1480u_ATTR_NAME(wlp_dev_name).attr,
+ &i1480u_ATTR_NAME(wlp_dev_manufacturer).attr,
+ &i1480u_ATTR_NAME(wlp_dev_model_name).attr,
+ &i1480u_ATTR_NAME(wlp_dev_model_nr).attr,
+ &i1480u_ATTR_NAME(wlp_dev_serial).attr,
+ &i1480u_ATTR_NAME(wlp_dev_prim_category).attr,
+ &i1480u_ATTR_NAME(wlp_dev_prim_OUI).attr,
+ &i1480u_ATTR_NAME(wlp_dev_prim_OUI_sub).attr,
+ &i1480u_ATTR_NAME(wlp_dev_prim_subcat).attr,
+ &i1480u_ATTR_NAME(wlp_neighborhood).attr,
+ &i1480u_ATTR_NAME(wss_activate).attr,
+ &i1480u_ATTR_NAME(wlp_tx_inflight).attr,
+ NULL,
+};
+
+static struct attribute_group i1480u_attr_group = {
+ .name = NULL, /* we want them in the same directory */
+ .attrs = i1480u_attrs,
+};
+
+int i1480u_sysfs_setup(struct i1480u *i1480u)
+{
+ int result;
+ struct device *dev = &i1480u->usb_iface->dev;
+ result = sysfs_create_group(&i1480u->net_dev->dev.kobj,
+ &i1480u_attr_group);
+ if (result < 0)
+ dev_err(dev, "cannot initialize sysfs attributes: %d\n",
+ result);
+ return result;
+}
+
+
+void i1480u_sysfs_release(struct i1480u *i1480u)
+{
+ sysfs_remove_group(&i1480u->net_dev->dev.kobj,
+ &i1480u_attr_group);
+}
diff --git a/drivers/uwb/i1480/i1480u-wlp/tx.c b/drivers/uwb/i1480/i1480u-wlp/tx.c
new file mode 100644
index 00000000000..3426bfb6824
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/tx.c
@@ -0,0 +1,632 @@
+/*
+ * WUSB Wire Adapter: WLP interface
+ * Deal with TX (massaging data to transmit, handling it)
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Transmission engine. Get an skb, create from that a WLP transmit
+ * context, add a WLP TX header (which we keep prefilled in the
+ * device's instance), fill out the target-specific fields and
+ * fire it.
+ *
+ * ROADMAP:
+ *
+ * Entry points:
+ *
+ * i1480u_tx_release(): called by i1480u_disconnect() to release
+ * pending tx contexts.
+ *
+ * i1480u_tx_cb(): callback for TX contexts (USB URBs)
+ * i1480u_tx_destroy():
+ *
+ * i1480u_tx_timeout(): called for timeout handling from the
+ * network stack.
+ *
+ * i1480u_hard_start_xmit(): called for transmitting an skb from
+ * the network stack. Will interact with WLP
+ * substack to verify and prepare frame.
+ * i1480u_xmit_frame(): actual transmission on hardware
+ *
+ * i1480u_tx_create() Creates TX context
+ * i1480u_tx_create_1() For packets in 1 fragment
+ * i1480u_tx_create_n() For packets in >1 fragments
+ *
+ * TODO:
+ *
+ * - FIXME: rewrite using usb_sg_*(), add asynch support to
+ * usb_sg_*(). It might not make too much sense as most of
+ * the times the MTU will be smaller than one page...
+ */
+
+#include "i1480u-wlp.h"
+#define D_LOCAL 5
+#include <linux/uwb/debug.h>
+
+enum {
+ /* This is only for Next and Last TX packets */
+ i1480u_MAX_PL_SIZE = i1480u_MAX_FRG_SIZE
+ - sizeof(struct untd_hdr_rst),
+};
+
+/** Free resources allocated to a i1480u tx context. */
+static
+void i1480u_tx_free(struct i1480u_tx *wtx)
+{
+ kfree(wtx->buf);
+ if (wtx->skb)
+ dev_kfree_skb_irq(wtx->skb);
+ usb_free_urb(wtx->urb);
+ kfree(wtx);
+}
+
+static
+void i1480u_tx_destroy(struct i1480u *i1480u, struct i1480u_tx *wtx)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&i1480u->tx_list_lock, flags); /* not active any more */
+ list_del(&wtx->list_node);
+ i1480u_tx_free(wtx);
+ spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
+}
+
+static
+void i1480u_tx_unlink_urbs(struct i1480u *i1480u)
+{
+ unsigned long flags;
+ struct i1480u_tx *wtx, *next;
+
+ spin_lock_irqsave(&i1480u->tx_list_lock, flags);
+ list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
+ usb_unlink_urb(wtx->urb);
+ }
+ spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
+}
+
+
+/**
+ * Callback for a completed tx USB URB.
+ *
+ * TODO:
+ *
+ * - FIXME: recover errors more gracefully
+ * - FIXME: handle NAKs (I dont think they come here) for flow ctl
+ */
+static
+void i1480u_tx_cb(struct urb *urb)
+{
+ struct i1480u_tx *wtx = urb->context;
+ struct i1480u *i1480u = wtx->i1480u;
+ struct net_device *net_dev = i1480u->net_dev;
+ struct device *dev = &i1480u->usb_iface->dev;
+ unsigned long flags;
+
+ switch (urb->status) {
+ case 0:
+ spin_lock_irqsave(&i1480u->lock, flags);
+ i1480u->stats.tx_packets++;
+ i1480u->stats.tx_bytes += urb->actual_length;
+ spin_unlock_irqrestore(&i1480u->lock, flags);
+ break;
+ case -ECONNRESET: /* Not an error, but a controlled situation; */
+ case -ENOENT: /* (we killed the URB)...so, no broadcast */
+ dev_dbg(dev, "notif endp: reset/noent %d\n", urb->status);
+ netif_stop_queue(net_dev);
+ break;
+ case -ESHUTDOWN: /* going away! */
+ dev_dbg(dev, "notif endp: down %d\n", urb->status);
+ netif_stop_queue(net_dev);
+ break;
+ default:
+ dev_err(dev, "TX: unknown URB status %d\n", urb->status);
+ if (edc_inc(&i1480u->tx_errors, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "TX: max acceptable errors exceeded."
+ "Reset device.\n");
+ netif_stop_queue(net_dev);
+ i1480u_tx_unlink_urbs(i1480u);
+ wlp_reset_all(&i1480u->wlp);
+ }
+ break;
+ }
+ i1480u_tx_destroy(i1480u, wtx);
+ if (atomic_dec_return(&i1480u->tx_inflight.count)
+ <= i1480u->tx_inflight.threshold
+ && netif_queue_stopped(net_dev)
+ && i1480u->tx_inflight.threshold != 0) {
+ if (d_test(2) && printk_ratelimit())
+ d_printf(2, dev, "Restart queue. \n");
+ netif_start_queue(net_dev);
+ atomic_inc(&i1480u->tx_inflight.restart_count);
+ }
+ return;
+}
+
+
+/**
+ * Given a buffer that doesn't fit in a single fragment, create an
+ * scatter/gather structure for delivery to the USB pipe.
+ *
+ * Implements functionality of i1480u_tx_create().
+ *
+ * @wtx: tx descriptor
+ * @skb: skb to send
+ * @gfp_mask: gfp allocation mask
+ * @returns: Pointer to @wtx if ok, NULL on error.
+ *
+ * Sorry, TOO LONG a function, but breaking it up is kind of hard
+ *
+ * This will break the buffer in chunks smaller than
+ * i1480u_MAX_FRG_SIZE (including the header) and add proper headers
+ * to each:
+ *
+ * 1st header \
+ * i1480 tx header | fragment 1
+ * fragment data /
+ * nxt header \ fragment 2
+ * fragment data /
+ * ..
+ * ..
+ * last header \ fragment 3
+ * last fragment data /
+ *
+ * This does not fill the i1480 TX header, it is left up to the
+ * caller to do that; you can get it from @wtx->wlp_tx_hdr.
+ *
+ * This function consumes the skb unless there is an error.
+ */
+static
+int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb,
+ gfp_t gfp_mask)
+{
+ int result;
+ void *pl;
+ size_t pl_size;
+
+ void *pl_itr, *buf_itr;
+ size_t pl_size_left, frgs, pl_size_1st, frg_pl_size = 0;
+ struct untd_hdr_1st *untd_hdr_1st;
+ struct wlp_tx_hdr *wlp_tx_hdr;
+ struct untd_hdr_rst *untd_hdr_rst;
+
+ wtx->skb = NULL;
+ pl = skb->data;
+ pl_itr = pl;
+ pl_size = skb->len;
+ pl_size_left = pl_size; /* payload size */
+ /* First fragment; fits as much as i1480u_MAX_FRG_SIZE minus
+ * the headers */
+ pl_size_1st = i1480u_MAX_FRG_SIZE
+ - sizeof(struct untd_hdr_1st) - sizeof(struct wlp_tx_hdr);
+ BUG_ON(pl_size_1st > pl_size);
+ pl_size_left -= pl_size_1st;
+ /* The rest have an smaller header (no i1480 TX header). We
+ * need to break up the payload in blocks smaller than
+ * i1480u_MAX_PL_SIZE (payload excluding header). */
+ frgs = (pl_size_left + i1480u_MAX_PL_SIZE - 1) / i1480u_MAX_PL_SIZE;
+ /* Allocate space for the new buffer. In this new buffer we'll
+ * place the headers followed by the data fragment, headers,
+ * data fragments, etc..
+ */
+ result = -ENOMEM;
+ wtx->buf_size = sizeof(*untd_hdr_1st)
+ + sizeof(*wlp_tx_hdr)
+ + frgs * sizeof(*untd_hdr_rst)
+ + pl_size;
+ wtx->buf = kmalloc(wtx->buf_size, gfp_mask);
+ if (wtx->buf == NULL)
+ goto error_buf_alloc;
+
+ buf_itr = wtx->buf; /* We got the space, let's fill it up */
+ /* Fill 1st fragment */
+ untd_hdr_1st = buf_itr;
+ buf_itr += sizeof(*untd_hdr_1st);
+ untd_hdr_set_type(&untd_hdr_1st->hdr, i1480u_PKT_FRAG_1ST);
+ untd_hdr_set_rx_tx(&untd_hdr_1st->hdr, 0);
+ untd_hdr_1st->hdr.len = cpu_to_le16(pl_size + sizeof(*wlp_tx_hdr));
+ untd_hdr_1st->fragment_len =
+ cpu_to_le16(pl_size_1st + sizeof(*wlp_tx_hdr));
+ memset(untd_hdr_1st->padding, 0, sizeof(untd_hdr_1st->padding));
+ /* Set up i1480 header info */
+ wlp_tx_hdr = wtx->wlp_tx_hdr = buf_itr;
+ buf_itr += sizeof(*wlp_tx_hdr);
+ /* Copy the first fragment */
+ memcpy(buf_itr, pl_itr, pl_size_1st);
+ pl_itr += pl_size_1st;
+ buf_itr += pl_size_1st;
+
+ /* Now do each remaining fragment */
+ result = -EINVAL;
+ while (pl_size_left > 0) {
+ d_printf(5, NULL, "ITR HDR: pl_size_left %zu buf_itr %zu\n",
+ pl_size_left, buf_itr - wtx->buf);
+ if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf
+ > wtx->buf_size) {
+ printk(KERN_ERR "BUG: no space for header\n");
+ goto error_bug;
+ }
+ d_printf(5, NULL, "ITR HDR 2: pl_size_left %zu buf_itr %zu\n",
+ pl_size_left, buf_itr - wtx->buf);
+ untd_hdr_rst = buf_itr;
+ buf_itr += sizeof(*untd_hdr_rst);
+ if (pl_size_left > i1480u_MAX_PL_SIZE) {
+ frg_pl_size = i1480u_MAX_PL_SIZE;
+ untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_NXT);
+ } else {
+ frg_pl_size = pl_size_left;
+ untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST);
+ }
+ d_printf(5, NULL,
+ "ITR PL: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n",
+ pl_size_left, buf_itr - wtx->buf, frg_pl_size);
+ untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0);
+ untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size);
+ untd_hdr_rst->padding = 0;
+ if (buf_itr + frg_pl_size - wtx->buf
+ > wtx->buf_size) {
+ printk(KERN_ERR "BUG: no space for payload\n");
+ goto error_bug;
+ }
+ memcpy(buf_itr, pl_itr, frg_pl_size);
+ buf_itr += frg_pl_size;
+ pl_itr += frg_pl_size;
+ pl_size_left -= frg_pl_size;
+ d_printf(5, NULL,
+ "ITR PL 2: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n",
+ pl_size_left, buf_itr - wtx->buf, frg_pl_size);
+ }
+ dev_kfree_skb_irq(skb);
+ return 0;
+
+error_bug:
+ printk(KERN_ERR
+ "BUG: skb %u bytes\n"
+ "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n"
+ "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n",
+ skb->len,
+ frg_pl_size, i1480u_MAX_FRG_SIZE,
+ buf_itr - wtx->buf, wtx->buf_size, pl_size_left);
+
+ kfree(wtx->buf);
+error_buf_alloc:
+ return result;
+}
+
+
+/**
+ * Given a buffer that fits in a single fragment, fill out a @wtx
+ * struct for transmitting it down the USB pipe.
+ *
+ * Uses the fact that we have space reserved in front of the skbuff
+ * for hardware headers :]
+ *
+ * This does not fill the i1480 TX header, it is left up to the
+ * caller to do that; you can get it from @wtx->wlp_tx_hdr.
+ *
+ * @pl: pointer to payload data
+ * @pl_size: size of the payuload
+ *
+ * This function does not consume the @skb.
+ */
+static
+int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb,
+ gfp_t gfp_mask)
+{
+ struct untd_hdr_cmp *untd_hdr_cmp;
+ struct wlp_tx_hdr *wlp_tx_hdr;
+
+ wtx->buf = NULL;
+ wtx->skb = skb;
+ BUG_ON(skb_headroom(skb) < sizeof(*wlp_tx_hdr));
+ wlp_tx_hdr = (void *) __skb_push(skb, sizeof(*wlp_tx_hdr));
+ wtx->wlp_tx_hdr = wlp_tx_hdr;
+ BUG_ON(skb_headroom(skb) < sizeof(*untd_hdr_cmp));
+ untd_hdr_cmp = (void *) __skb_push(skb, sizeof(*untd_hdr_cmp));
+
+ untd_hdr_set_type(&untd_hdr_cmp->hdr, i1480u_PKT_FRAG_CMP);
+ untd_hdr_set_rx_tx(&untd_hdr_cmp->hdr, 0);
+ untd_hdr_cmp->hdr.len = cpu_to_le16(skb->len - sizeof(*untd_hdr_cmp));
+ untd_hdr_cmp->padding = 0;
+ return 0;
+}
+
+
+/**
+ * Given a skb to transmit, massage it to become palatable for the TX pipe
+ *
+ * This will break the buffer in chunks smaller than
+ * i1480u_MAX_FRG_SIZE and add proper headers to each.
+ *
+ * 1st header \
+ * i1480 tx header | fragment 1
+ * fragment data /
+ * nxt header \ fragment 2
+ * fragment data /
+ * ..
+ * ..
+ * last header \ fragment 3
+ * last fragment data /
+ *
+ * Each fragment will be always smaller or equal to i1480u_MAX_FRG_SIZE.
+ *
+ * If the first fragment is smaller than i1480u_MAX_FRG_SIZE, then the
+ * following is composed:
+ *
+ * complete header \
+ * i1480 tx header | single fragment
+ * packet data /
+ *
+ * We were going to use s/g support, but because the interface is
+ * synch and at the end there is plenty of overhead to do it, it
+ * didn't seem that worth for data that is going to be smaller than
+ * one page.
+ */
+static
+struct i1480u_tx *i1480u_tx_create(struct i1480u *i1480u,
+ struct sk_buff *skb, gfp_t gfp_mask)
+{
+ int result;
+ struct usb_endpoint_descriptor *epd;
+ int usb_pipe;
+ unsigned long flags;
+
+ struct i1480u_tx *wtx;
+ const size_t pl_max_size =
+ i1480u_MAX_FRG_SIZE - sizeof(struct untd_hdr_cmp)
+ - sizeof(struct wlp_tx_hdr);
+
+ wtx = kmalloc(sizeof(*wtx), gfp_mask);
+ if (wtx == NULL)
+ goto error_wtx_alloc;
+ wtx->urb = usb_alloc_urb(0, gfp_mask);
+ if (wtx->urb == NULL)
+ goto error_urb_alloc;
+ epd = &i1480u->usb_iface->cur_altsetting->endpoint[2].desc;
+ usb_pipe = usb_sndbulkpipe(i1480u->usb_dev, epd->bEndpointAddress);
+ /* Fits in a single complete packet or need to split? */
+ if (skb->len > pl_max_size) {
+ result = i1480u_tx_create_n(wtx, skb, gfp_mask);
+ if (result < 0)
+ goto error_create;
+ usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
+ wtx->buf, wtx->buf_size, i1480u_tx_cb, wtx);
+ } else {
+ result = i1480u_tx_create_1(wtx, skb, gfp_mask);
+ if (result < 0)
+ goto error_create;
+ usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
+ skb->data, skb->len, i1480u_tx_cb, wtx);
+ }
+ spin_lock_irqsave(&i1480u->tx_list_lock, flags);
+ list_add(&wtx->list_node, &i1480u->tx_list);
+ spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
+ return wtx;
+
+error_create:
+ kfree(wtx->urb);
+error_urb_alloc:
+ kfree(wtx);
+error_wtx_alloc:
+ return NULL;
+}
+
+/**
+ * Actual fragmentation and transmission of frame
+ *
+ * @wlp: WLP substack data structure
+ * @skb: To be transmitted
+ * @dst: Device address of destination
+ * @returns: 0 on success, <0 on failure
+ *
+ * This function can also be called directly (not just from
+ * hard_start_xmit), so we also check here if the interface is up before
+ * taking sending anything.
+ */
+int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb,
+ struct uwb_dev_addr *dst)
+{
+ int result = -ENXIO;
+ struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
+ struct device *dev = &i1480u->usb_iface->dev;
+ struct net_device *net_dev = i1480u->net_dev;
+ struct i1480u_tx *wtx;
+ struct wlp_tx_hdr *wlp_tx_hdr;
+ static unsigned char dev_bcast[2] = { 0xff, 0xff };
+#if 0
+ int lockup = 50;
+#endif
+
+ d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len,
+ net_dev);
+ BUG_ON(i1480u->wlp.rc == NULL);
+ if ((net_dev->flags & IFF_UP) == 0)
+ goto out;
+ result = -EBUSY;
+ if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) {
+ if (d_test(2) && printk_ratelimit())
+ d_printf(2, dev, "Max frames in flight "
+ "stopping queue.\n");
+ netif_stop_queue(net_dev);
+ goto error_max_inflight;
+ }
+ result = -ENOMEM;
+ wtx = i1480u_tx_create(i1480u, skb, GFP_ATOMIC);
+ if (unlikely(wtx == NULL)) {
+ if (printk_ratelimit())
+ dev_err(dev, "TX: no memory for WLP TX URB,"
+ "dropping packet (in flight %d)\n",
+ atomic_read(&i1480u->tx_inflight.count));
+ netif_stop_queue(net_dev);
+ goto error_wtx_alloc;
+ }
+ wtx->i1480u = i1480u;
+ /* Fill out the i1480 header; @i1480u->def_tx_hdr read without
+ * locking. We do so because they are kind of orthogonal to
+ * each other (and thus not changed in an atomic batch).
+ * The ETH header is right after the WLP TX header. */
+ wlp_tx_hdr = wtx->wlp_tx_hdr;
+ *wlp_tx_hdr = i1480u->options.def_tx_hdr;
+ wlp_tx_hdr->dstaddr = *dst;
+ if (!memcmp(&wlp_tx_hdr->dstaddr, dev_bcast, sizeof(dev_bcast))
+ && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)) {
+ /*Broadcast message directed to DRP host. Send as best effort
+ * on PCA. */
+ wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority);
+ }
+
+#if 0
+ dev_info(dev, "TX delivering skb -> USB, %zu bytes\n", skb->len);
+ dump_bytes(dev, skb->data, skb->len > 72 ? 72 : skb->len);
+#endif
+#if 0
+ /* simulates a device lockup after every lockup# packets */
+ if (lockup && ((i1480u->stats.tx_packets + 1) % lockup) == 0) {
+ /* Simulate a dropped transmit interrupt */
+ net_dev->trans_start = jiffies;
+ netif_stop_queue(net_dev);
+ dev_err(dev, "Simulate lockup at %ld\n", jiffies);
+ return result;
+ }
+#endif
+
+ result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */
+ if (result < 0) {
+ dev_err(dev, "TX: cannot submit URB: %d\n", result);
+ /* We leave the freeing of skb to calling function */
+ wtx->skb = NULL;
+ goto error_tx_urb_submit;
+ }
+ atomic_inc(&i1480u->tx_inflight.count);
+ net_dev->trans_start = jiffies;
+ d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
+ net_dev, result);
+ return result;
+
+error_tx_urb_submit:
+ i1480u_tx_destroy(i1480u, wtx);
+error_wtx_alloc:
+error_max_inflight:
+out:
+ d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
+ net_dev, result);
+ return result;
+}
+
+
+/**
+ * Transmit an skb Called when an skbuf has to be transmitted
+ *
+ * The skb is first passed to WLP substack to ensure this is a valid
+ * frame. If valid the device address of destination will be filled and
+ * the WLP header prepended to the skb. If this step fails we fake sending
+ * the frame, if we return an error the network stack will just keep trying.
+ *
+ * Broadcast frames inside a WSS needs to be treated special as multicast is
+ * not supported. A broadcast frame is sent as unicast to each member of the
+ * WSS - this is done by the WLP substack when it finds a broadcast frame.
+ * So, we test if the WLP substack took over the skb and only transmit it
+ * if it has not (been taken over).
+ *
+ * @net_dev->xmit_lock is held
+ */
+int i1480u_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+{
+ int result;
+ struct i1480u *i1480u = netdev_priv(net_dev);
+ struct device *dev = &i1480u->usb_iface->dev;
+ struct uwb_dev_addr dst;
+
+ d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len,
+ net_dev);
+ BUG_ON(i1480u->wlp.rc == NULL);
+ if ((net_dev->flags & IFF_UP) == 0)
+ goto error;
+ result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst);
+ if (result < 0) {
+ dev_err(dev, "WLP verification of TX frame failed (%d). "
+ "Dropping packet.\n", result);
+ goto error;
+ } else if (result == 1) {
+ d_printf(6, dev, "WLP will transmit frame. \n");
+ /* trans_start time will be set when WLP actually transmits
+ * the frame */
+ goto out;
+ }
+ d_printf(6, dev, "Transmitting frame. \n");
+ result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst);
+ if (result < 0) {
+ dev_err(dev, "Frame TX failed (%d).\n", result);
+ goto error;
+ }
+ d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
+ net_dev, result);
+ return NETDEV_TX_OK;
+error:
+ dev_kfree_skb_any(skb);
+ i1480u->stats.tx_dropped++;
+out:
+ d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
+ net_dev, result);
+ return NETDEV_TX_OK;
+}
+
+
+/**
+ * Called when a pkt transmission doesn't complete in a reasonable period
+ * Device reset may sleep - do it outside of interrupt context (delayed)
+ */
+void i1480u_tx_timeout(struct net_device *net_dev)
+{
+ struct i1480u *i1480u = netdev_priv(net_dev);
+
+ wlp_reset_all(&i1480u->wlp);
+}
+
+
+void i1480u_tx_release(struct i1480u *i1480u)
+{
+ unsigned long flags;
+ struct i1480u_tx *wtx, *next;
+ int count = 0, empty;
+
+ spin_lock_irqsave(&i1480u->tx_list_lock, flags);
+ list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
+ count++;
+ usb_unlink_urb(wtx->urb);
+ }
+ spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
+ count = count*10; /* i1480ut 200ms per unlinked urb (intervals of 20ms) */
+ /*
+ * We don't like this sollution too much (dirty as it is), but
+ * it is cheaper than putting a refcount on each i1480u_tx and
+ * i1480uting for all of them to go away...
+ *
+ * Called when no more packets can be added to tx_list
+ * so can i1480ut for it to be empty.
+ */
+ while (1) {
+ spin_lock_irqsave(&i1480u->tx_list_lock, flags);
+ empty = list_empty(&i1480u->tx_list);
+ spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
+ if (empty)
+ break;
+ count--;
+ BUG_ON(count == 0);
+ msleep(20);
+ }
+}
diff --git a/drivers/uwb/ie.c b/drivers/uwb/ie.c
new file mode 100644
index 00000000000..cf6f3d152b9
--- /dev/null
+++ b/drivers/uwb/ie.c
@@ -0,0 +1,541 @@
+/*
+ * Ultra Wide Band
+ * Information Element Handling
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#include "uwb-internal.h"
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+/**
+ * uwb_ie_next - get the next IE in a buffer
+ * @ptr: start of the buffer containing the IE data
+ * @len: length of the buffer
+ *
+ * Both @ptr and @len are updated so subsequent calls to uwb_ie_next()
+ * will get the next IE.
+ *
+ * NULL is returned (and @ptr and @len will not be updated) if there
+ * are no more IEs in the buffer or the buffer is too short.
+ */
+struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len)
+{
+ struct uwb_ie_hdr *hdr;
+ size_t ie_len;
+
+ if (*len < sizeof(struct uwb_ie_hdr))
+ return NULL;
+
+ hdr = *ptr;
+ ie_len = sizeof(struct uwb_ie_hdr) + hdr->length;
+
+ if (*len < ie_len)
+ return NULL;
+
+ *ptr += ie_len;
+ *len -= ie_len;
+
+ return hdr;
+}
+EXPORT_SYMBOL_GPL(uwb_ie_next);
+
+/**
+ * Get the IEs that a radio controller is sending in its beacon
+ *
+ * @uwb_rc: UWB Radio Controller
+ * @returns: Size read from the system
+ *
+ * We don't need to lock the uwb_rc's mutex because we don't modify
+ * anything. Once done with the iedata buffer, call
+ * uwb_rc_ie_release(iedata). Don't call kfree on it.
+ */
+ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie)
+{
+ ssize_t result;
+ struct device *dev = &uwb_rc->uwb_dev.dev;
+ struct uwb_rccb *cmd = NULL;
+ struct uwb_rceb *reply = NULL;
+ struct uwb_rc_evt_get_ie *get_ie;
+
+ d_fnstart(3, dev, "(%p, %p)\n", uwb_rc, pget_ie);
+ result = -ENOMEM;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_kzalloc;
+ cmd->bCommandType = UWB_RC_CET_GENERAL;
+ cmd->wCommand = cpu_to_le16(UWB_RC_CMD_GET_IE);
+ result = uwb_rc_vcmd(uwb_rc, "GET_IE", cmd, sizeof(*cmd),
+ UWB_RC_CET_GENERAL, UWB_RC_CMD_GET_IE,
+ &reply);
+ if (result < 0)
+ goto error_cmd;
+ get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb);
+ if (result < sizeof(*get_ie)) {
+ dev_err(dev, "not enough data returned for decoding GET IE "
+ "(%zu bytes received vs %zu needed)\n",
+ result, sizeof(*get_ie));
+ result = -EINVAL;
+ } else if (result < sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)) {
+ dev_err(dev, "not enough data returned for decoding GET IE "
+ "payload (%zu bytes received vs %zu needed)\n", result,
+ sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength));
+ result = -EINVAL;
+ } else
+ *pget_ie = get_ie;
+error_cmd:
+ kfree(cmd);
+error_kzalloc:
+ d_fnend(3, dev, "(%p, %p) = %d\n", uwb_rc, pget_ie, (int)result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_get_ie);
+
+
+/*
+ * Given a pointer to an IE, print it in ASCII/hex followed by a new line
+ *
+ * @ie_hdr: pointer to the IE header. Length is in there, and it is
+ * guaranteed that the ie_hdr->length bytes following it are
+ * safely accesible.
+ *
+ * @_data: context data passed from uwb_ie_for_each(), an struct output_ctx
+ */
+int uwb_ie_dump_hex(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr,
+ size_t offset, void *_ctx)
+{
+ struct uwb_buf_ctx *ctx = _ctx;
+ const u8 *pl = (void *)(ie_hdr + 1);
+ u8 pl_itr;
+
+ ctx->bytes += scnprintf(ctx->buf + ctx->bytes, ctx->size - ctx->bytes,
+ "%02x %02x ", (unsigned) ie_hdr->element_id,
+ (unsigned) ie_hdr->length);
+ pl_itr = 0;
+ while (pl_itr < ie_hdr->length && ctx->bytes < ctx->size)
+ ctx->bytes += scnprintf(ctx->buf + ctx->bytes,
+ ctx->size - ctx->bytes,
+ "%02x ", (unsigned) pl[pl_itr++]);
+ if (ctx->bytes < ctx->size)
+ ctx->buf[ctx->bytes++] = '\n';
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uwb_ie_dump_hex);
+
+
+/**
+ * Verify that a pointer in a buffer points to valid IE
+ *
+ * @start: pointer to start of buffer in which IE appears
+ * @itr: pointer to IE inside buffer that will be verified
+ * @top: pointer to end of buffer
+ *
+ * @returns: 0 if IE is valid, <0 otherwise
+ *
+ * Verification involves checking that the buffer can contain a
+ * header and the amount of data reported in the IE header can be found in
+ * the buffer.
+ */
+static
+int uwb_rc_ie_verify(struct uwb_dev *uwb_dev, const void *start,
+ const void *itr, const void *top)
+{
+ struct device *dev = &uwb_dev->dev;
+ const struct uwb_ie_hdr *ie_hdr;
+
+ if (top - itr < sizeof(*ie_hdr)) {
+ dev_err(dev, "Bad IE: no data to decode header "
+ "(%zu bytes left vs %zu needed) at offset %zu\n",
+ top - itr, sizeof(*ie_hdr), itr - start);
+ return -EINVAL;
+ }
+ ie_hdr = itr;
+ itr += sizeof(*ie_hdr);
+ if (top - itr < ie_hdr->length) {
+ dev_err(dev, "Bad IE: not enough data for payload "
+ "(%zu bytes left vs %zu needed) at offset %zu\n",
+ top - itr, (size_t)ie_hdr->length,
+ (void *)ie_hdr - start);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+/**
+ * Walk a buffer filled with consecutive IE's a buffer
+ *
+ * @uwb_dev: UWB device this IEs belong to (for err messages mainly)
+ *
+ * @fn: function to call with each IE; if it returns 0, we keep
+ * traversing the buffer. If it returns !0, we'll stop and return
+ * that value.
+ *
+ * @data: pointer passed to @fn
+ *
+ * @buf: buffer where the consecutive IEs are located
+ *
+ * @size: size of @buf
+ *
+ * Each IE is checked for basic correctness (there is space left for
+ * the header and the payload). If that test is failed, we stop
+ * processing. For every good IE, @fn is called.
+ */
+ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data,
+ const void *buf, size_t size)
+{
+ ssize_t result = 0;
+ const struct uwb_ie_hdr *ie_hdr;
+ const void *itr = buf, *top = itr + size;
+
+ while (itr < top) {
+ if (uwb_rc_ie_verify(uwb_dev, buf, itr, top) != 0)
+ break;
+ ie_hdr = itr;
+ itr += sizeof(*ie_hdr) + ie_hdr->length;
+ result = fn(uwb_dev, ie_hdr, itr - buf, data);
+ if (result != 0)
+ break;
+ }
+ return result;
+}
+EXPORT_SYMBOL_GPL(uwb_ie_for_each);
+
+
+/**
+ * Replace all IEs currently being transmitted by a device
+ *
+ * @cmd: pointer to the SET-IE command with the IEs to set
+ * @size: size of @buf
+ */
+int uwb_rc_set_ie(struct uwb_rc *rc, struct uwb_rc_cmd_set_ie *cmd)
+{
+ int result;
+ struct device *dev = &rc->uwb_dev.dev;
+ struct uwb_rc_evt_set_ie reply;
+
+ reply.rceb.bEventType = UWB_RC_CET_GENERAL;
+ reply.rceb.wEvent = UWB_RC_CMD_SET_IE;
+ result = uwb_rc_cmd(rc, "SET-IE", &cmd->rccb,
+ sizeof(*cmd) + le16_to_cpu(cmd->wIELength),
+ &reply.rceb, sizeof(reply));
+ if (result < 0)
+ goto error_cmd;
+ else if (result != sizeof(reply)) {
+ dev_err(dev, "SET-IE: not enough data to decode reply "
+ "(%d bytes received vs %zu needed)\n",
+ result, sizeof(reply));
+ result = -EIO;
+ } else if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
+ dev_err(dev, "SET-IE: command execution failed: %s (%d)\n",
+ uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
+ result = -EIO;
+ } else
+ result = 0;
+error_cmd:
+ return result;
+}
+
+/**
+ * Determine by IE id if IE is host settable
+ * WUSB 1.0 [8.6.2.8 Table 8.85]
+ *
+ * EXCEPTION:
+ * All but UWB_IE_WLP appears in Table 8.85 from WUSB 1.0. Setting this IE
+ * is required for the WLP substack to perform association with its WSS so
+ * we hope that the WUSB spec will be changed to reflect this.
+ */
+static
+int uwb_rc_ie_is_host_settable(enum uwb_ie element_id)
+{
+ if (element_id == UWB_PCA_AVAILABILITY ||
+ element_id == UWB_BP_SWITCH_IE ||
+ element_id == UWB_MAC_CAPABILITIES_IE ||
+ element_id == UWB_PHY_CAPABILITIES_IE ||
+ element_id == UWB_APP_SPEC_PROBE_IE ||
+ element_id == UWB_IDENTIFICATION_IE ||
+ element_id == UWB_MASTER_KEY_ID_IE ||
+ element_id == UWB_IE_WLP ||
+ element_id == UWB_APP_SPEC_IE)
+ return 1;
+ return 0;
+}
+
+
+/**
+ * Extract Host Settable IEs from IE
+ *
+ * @ie_data: pointer to buffer containing all IEs
+ * @size: size of buffer
+ *
+ * @returns: length of buffer that only includes host settable IEs
+ *
+ * Given a buffer of IEs we move all Host Settable IEs to front of buffer
+ * by overwriting the IEs that are not Host Settable.
+ * Buffer length is adjusted accordingly.
+ */
+static
+ssize_t uwb_rc_parse_host_settable_ie(struct uwb_dev *uwb_dev,
+ void *ie_data, size_t size)
+{
+ size_t new_len = size;
+ struct uwb_ie_hdr *ie_hdr;
+ size_t ie_length;
+ void *itr = ie_data, *top = itr + size;
+
+ while (itr < top) {
+ if (uwb_rc_ie_verify(uwb_dev, ie_data, itr, top) != 0)
+ break;
+ ie_hdr = itr;
+ ie_length = sizeof(*ie_hdr) + ie_hdr->length;
+ if (uwb_rc_ie_is_host_settable(ie_hdr->element_id)) {
+ itr += ie_length;
+ } else {
+ memmove(itr, itr + ie_length, top - (itr + ie_length));
+ new_len -= ie_length;
+ top -= ie_length;
+ }
+ }
+ return new_len;
+}
+
+
+/* Cleanup the whole IE management subsystem */
+void uwb_rc_ie_init(struct uwb_rc *uwb_rc)
+{
+ mutex_init(&uwb_rc->ies_mutex);
+}
+
+
+/**
+ * Set up cache for host settable IEs currently being transmitted
+ *
+ * First we just call GET-IE to get the current IEs being transmitted
+ * (or we workaround and pretend we did) and (because the format is
+ * the same) reuse that as the IE cache (with the command prefix, as
+ * explained in 'struct uwb_rc').
+ *
+ * @returns: size of cache created
+ */
+ssize_t uwb_rc_ie_setup(struct uwb_rc *uwb_rc)
+{
+ struct device *dev = &uwb_rc->uwb_dev.dev;
+ ssize_t result;
+ size_t capacity;
+ struct uwb_rc_evt_get_ie *ie_info;
+
+ d_fnstart(3, dev, "(%p)\n", uwb_rc);
+ mutex_lock(&uwb_rc->ies_mutex);
+ result = uwb_rc_get_ie(uwb_rc, &ie_info);
+ if (result < 0)
+ goto error_get_ie;
+ capacity = result;
+ d_printf(5, dev, "Got IEs %zu bytes (%zu long at %p)\n", result,
+ (size_t)le16_to_cpu(ie_info->wIELength), ie_info);
+
+ /* Remove IEs that host should not set. */
+ result = uwb_rc_parse_host_settable_ie(&uwb_rc->uwb_dev,
+ ie_info->IEData, le16_to_cpu(ie_info->wIELength));
+ if (result < 0)
+ goto error_parse;
+ d_printf(5, dev, "purged non-settable IEs to %zu bytes\n", result);
+ uwb_rc->ies = (void *) ie_info;
+ uwb_rc->ies->rccb.bCommandType = UWB_RC_CET_GENERAL;
+ uwb_rc->ies->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_IE);
+ uwb_rc->ies_capacity = capacity;
+ d_printf(5, dev, "IE cache at %p %zu bytes, %zu capacity\n",
+ ie_info, result, capacity);
+ result = 0;
+error_parse:
+error_get_ie:
+ mutex_unlock(&uwb_rc->ies_mutex);
+ d_fnend(3, dev, "(%p) = %zu\n", uwb_rc, result);
+ return result;
+}
+
+
+/* Cleanup the whole IE management subsystem */
+void uwb_rc_ie_release(struct uwb_rc *uwb_rc)
+{
+ kfree(uwb_rc->ies);
+ uwb_rc->ies = NULL;
+ uwb_rc->ies_capacity = 0;
+}
+
+
+static
+int __acc_size(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr,
+ size_t offset, void *_ctx)
+{
+ size_t *acc_size = _ctx;
+ *acc_size += sizeof(*ie_hdr) + ie_hdr->length;
+ d_printf(6, &uwb_dev->dev, "new acc size %zu\n", *acc_size);
+ return 0;
+}
+
+
+/**
+ * Add a new IE to IEs currently being transmitted by device
+ *
+ * @ies: the buffer containing the new IE or IEs to be added to
+ * the device's beacon. The buffer will be verified for
+ * consistence (meaning the headers should be right) and
+ * consistent with the buffer size.
+ * @size: size of @ies (in bytes, total buffer size)
+ * @returns: 0 if ok, <0 errno code on error
+ *
+ * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB
+ * after the device sent the first beacon that includes the IEs specified
+ * in the SET IE command. We thus cannot send this command if the device is
+ * not beaconing. Instead, a SET IE command will be sent later right after
+ * we start beaconing.
+ *
+ * Setting an IE on the device will overwrite all current IEs in device. So
+ * we take the current IEs being transmitted by the device, append the
+ * new one, and call SET IE with all the IEs needed.
+ *
+ * The local IE cache will only be updated with the new IE if SET IE
+ * completed successfully.
+ */
+int uwb_rc_ie_add(struct uwb_rc *uwb_rc,
+ const struct uwb_ie_hdr *ies, size_t size)
+{
+ int result = 0;
+ struct device *dev = &uwb_rc->uwb_dev.dev;
+ struct uwb_rc_cmd_set_ie *new_ies;
+ size_t ies_size, total_size, acc_size = 0;
+
+ if (uwb_rc->ies == NULL)
+ return -ESHUTDOWN;
+ uwb_ie_for_each(&uwb_rc->uwb_dev, __acc_size, &acc_size, ies, size);
+ if (acc_size != size) {
+ dev_err(dev, "BUG: bad IEs, misconstructed headers "
+ "[%zu bytes reported vs %zu calculated]\n",
+ size, acc_size);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ mutex_lock(&uwb_rc->ies_mutex);
+ ies_size = le16_to_cpu(uwb_rc->ies->wIELength);
+ total_size = sizeof(*uwb_rc->ies) + ies_size;
+ if (total_size + size > uwb_rc->ies_capacity) {
+ d_printf(4, dev, "Reallocating IE cache from %p capacity %zu "
+ "to capacity %zu\n", uwb_rc->ies, uwb_rc->ies_capacity,
+ total_size + size);
+ new_ies = kzalloc(total_size + size, GFP_KERNEL);
+ if (new_ies == NULL) {
+ dev_err(dev, "No memory for adding new IE\n");
+ result = -ENOMEM;
+ goto error_alloc;
+ }
+ memcpy(new_ies, uwb_rc->ies, total_size);
+ uwb_rc->ies_capacity = total_size + size;
+ kfree(uwb_rc->ies);
+ uwb_rc->ies = new_ies;
+ d_printf(4, dev, "New IE cache at %p capacity %zu\n",
+ uwb_rc->ies, uwb_rc->ies_capacity);
+ }
+ memcpy((void *)uwb_rc->ies + total_size, ies, size);
+ uwb_rc->ies->wIELength = cpu_to_le16(ies_size + size);
+ if (uwb_rc->beaconing != -1) {
+ result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies);
+ if (result < 0) {
+ dev_err(dev, "Cannot set new IE on device: %d\n",
+ result);
+ uwb_rc->ies->wIELength = cpu_to_le16(ies_size);
+ } else
+ result = 0;
+ }
+ d_printf(4, dev, "IEs now occupy %hu bytes of %zu capacity at %p\n",
+ le16_to_cpu(uwb_rc->ies->wIELength), uwb_rc->ies_capacity,
+ uwb_rc->ies);
+error_alloc:
+ mutex_unlock(&uwb_rc->ies_mutex);
+ return result;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_ie_add);
+
+
+/*
+ * Remove an IE from internal cache
+ *
+ * We are dealing with our internal IE cache so no need to verify that the
+ * IEs are valid (it has been done already).
+ *
+ * Should be called with ies_mutex held
+ *
+ * We do not break out once an IE is found in the cache. It is currently
+ * possible to have more than one IE with the same ID included in the
+ * beacon. We don't reallocate, we just mark the size smaller.
+ */
+static
+int uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove)
+{
+ struct uwb_ie_hdr *ie_hdr;
+ size_t new_len = le16_to_cpu(uwb_rc->ies->wIELength);
+ void *itr = uwb_rc->ies->IEData;
+ void *top = itr + new_len;
+
+ while (itr < top) {
+ ie_hdr = itr;
+ if (ie_hdr->element_id != to_remove) {
+ itr += sizeof(*ie_hdr) + ie_hdr->length;
+ } else {
+ int ie_length;
+ ie_length = sizeof(*ie_hdr) + ie_hdr->length;
+ if (top - itr != ie_length)
+ memmove(itr, itr + ie_length, top - itr + ie_length);
+ top -= ie_length;
+ new_len -= ie_length;
+ }
+ }
+ uwb_rc->ies->wIELength = cpu_to_le16(new_len);
+ return 0;
+}
+
+
+/**
+ * Remove an IE currently being transmitted by device
+ *
+ * @element_id: id of IE to be removed from device's beacon
+ */
+int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id)
+{
+ struct device *dev = &uwb_rc->uwb_dev.dev;
+ int result;
+
+ if (uwb_rc->ies == NULL)
+ return -ESHUTDOWN;
+ mutex_lock(&uwb_rc->ies_mutex);
+ result = uwb_rc_ie_cache_rm(uwb_rc, element_id);
+ if (result < 0)
+ dev_err(dev, "Cannot remove IE from cache.\n");
+ if (uwb_rc->beaconing != -1) {
+ result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies);
+ if (result < 0)
+ dev_err(dev, "Cannot set new IE on device.\n");
+ }
+ mutex_unlock(&uwb_rc->ies_mutex);
+ return result;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_ie_rm);
diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c
new file mode 100644
index 00000000000..15f856c9689
--- /dev/null
+++ b/drivers/uwb/lc-dev.c
@@ -0,0 +1,492 @@
+/*
+ * Ultra Wide Band
+ * Life cycle of devices
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kdev_t.h>
+#include <linux/random.h>
+#include "uwb-internal.h"
+
+#define D_LOCAL 1
+#include <linux/uwb/debug.h>
+
+
+/* We initialize addresses to 0xff (invalid, as it is bcast) */
+static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr)
+{
+ memset(&addr->data, 0xff, sizeof(addr->data));
+}
+
+static inline void uwb_mac_addr_init(struct uwb_mac_addr *addr)
+{
+ memset(&addr->data, 0xff, sizeof(addr->data));
+}
+
+/* @returns !0 if a device @addr is a broadcast address */
+static inline int uwb_dev_addr_bcast(const struct uwb_dev_addr *addr)
+{
+ static const struct uwb_dev_addr bcast = { .data = { 0xff, 0xff } };
+ return !uwb_dev_addr_cmp(addr, &bcast);
+}
+
+/*
+ * Add callback @new to be called when an event occurs in @rc.
+ */
+int uwb_notifs_register(struct uwb_rc *rc, struct uwb_notifs_handler *new)
+{
+ if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
+ return -ERESTARTSYS;
+ list_add(&new->list_node, &rc->notifs_chain.list);
+ mutex_unlock(&rc->notifs_chain.mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uwb_notifs_register);
+
+/*
+ * Remove event handler (callback)
+ */
+int uwb_notifs_deregister(struct uwb_rc *rc, struct uwb_notifs_handler *entry)
+{
+ if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
+ return -ERESTARTSYS;
+ list_del(&entry->list_node);
+ mutex_unlock(&rc->notifs_chain.mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uwb_notifs_deregister);
+
+/*
+ * Notify all event handlers of a given event on @rc
+ *
+ * We are called with a valid reference to the device, or NULL if the
+ * event is not for a particular event (e.g., a BG join event).
+ */
+void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event)
+{
+ struct uwb_notifs_handler *handler;
+ if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
+ return;
+ if (!list_empty(&rc->notifs_chain.list)) {
+ list_for_each_entry(handler, &rc->notifs_chain.list, list_node) {
+ handler->cb(handler->data, uwb_dev, event);
+ }
+ }
+ mutex_unlock(&rc->notifs_chain.mutex);
+}
+
+/*
+ * Release the backing device of a uwb_dev that has been dynamically allocated.
+ */
+static void uwb_dev_sys_release(struct device *dev)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+
+ d_fnstart(4, NULL, "(dev %p uwb_dev %p)\n", dev, uwb_dev);
+ uwb_bce_put(uwb_dev->bce);
+ d_printf(0, &uwb_dev->dev, "uwb_dev %p freed\n", uwb_dev);
+ memset(uwb_dev, 0x69, sizeof(*uwb_dev));
+ kfree(uwb_dev);
+ d_fnend(4, NULL, "(dev %p uwb_dev %p) = void\n", dev, uwb_dev);
+}
+
+/*
+ * Initialize a UWB device instance
+ *
+ * Alloc, zero and call this function.
+ */
+void uwb_dev_init(struct uwb_dev *uwb_dev)
+{
+ mutex_init(&uwb_dev->mutex);
+ device_initialize(&uwb_dev->dev);
+ uwb_dev->dev.release = uwb_dev_sys_release;
+ uwb_dev_addr_init(&uwb_dev->dev_addr);
+ uwb_mac_addr_init(&uwb_dev->mac_addr);
+ bitmap_fill(uwb_dev->streams, UWB_NUM_GLOBAL_STREAMS);
+}
+
+static ssize_t uwb_dev_EUI_48_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+ char addr[UWB_ADDR_STRSIZE];
+
+ uwb_mac_addr_print(addr, sizeof(addr), &uwb_dev->mac_addr);
+ return sprintf(buf, "%s\n", addr);
+}
+static DEVICE_ATTR(EUI_48, S_IRUGO, uwb_dev_EUI_48_show, NULL);
+
+static ssize_t uwb_dev_DevAddr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+ char addr[UWB_ADDR_STRSIZE];
+
+ uwb_dev_addr_print(addr, sizeof(addr), &uwb_dev->dev_addr);
+ return sprintf(buf, "%s\n", addr);
+}
+static DEVICE_ATTR(DevAddr, S_IRUGO, uwb_dev_DevAddr_show, NULL);
+
+/*
+ * Show the BPST of this device.
+ *
+ * Calculated from the receive time of the device's beacon and it's
+ * slot number.
+ */
+static ssize_t uwb_dev_BPST_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+ struct uwb_beca_e *bce;
+ struct uwb_beacon_frame *bf;
+ u16 bpst;
+
+ bce = uwb_dev->bce;
+ mutex_lock(&bce->mutex);
+ bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo;
+ bpst = bce->be->wBPSTOffset
+ - (u16)(bf->Beacon_Slot_Number * UWB_BEACON_SLOT_LENGTH_US);
+ mutex_unlock(&bce->mutex);
+
+ return sprintf(buf, "%d\n", bpst);
+}
+static DEVICE_ATTR(BPST, S_IRUGO, uwb_dev_BPST_show, NULL);
+
+/*
+ * Show the IEs a device is beaconing
+ *
+ * We need to access the beacon cache, so we just lock it really
+ * quick, print the IEs and unlock.
+ *
+ * We have a reference on the cache entry, so that should be
+ * quite safe.
+ */
+static ssize_t uwb_dev_IEs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+
+ return uwb_bce_print_IEs(uwb_dev, uwb_dev->bce, buf, PAGE_SIZE);
+}
+static DEVICE_ATTR(IEs, S_IRUGO | S_IWUSR, uwb_dev_IEs_show, NULL);
+
+static ssize_t uwb_dev_LQE_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+ struct uwb_beca_e *bce = uwb_dev->bce;
+ size_t result;
+
+ mutex_lock(&bce->mutex);
+ result = stats_show(&uwb_dev->bce->lqe_stats, buf);
+ mutex_unlock(&bce->mutex);
+ return result;
+}
+
+static ssize_t uwb_dev_LQE_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+ struct uwb_beca_e *bce = uwb_dev->bce;
+ ssize_t result;
+
+ mutex_lock(&bce->mutex);
+ result = stats_store(&uwb_dev->bce->lqe_stats, buf, size);
+ mutex_unlock(&bce->mutex);
+ return result;
+}
+static DEVICE_ATTR(LQE, S_IRUGO | S_IWUSR, uwb_dev_LQE_show, uwb_dev_LQE_store);
+
+static ssize_t uwb_dev_RSSI_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+ struct uwb_beca_e *bce = uwb_dev->bce;
+ size_t result;
+
+ mutex_lock(&bce->mutex);
+ result = stats_show(&uwb_dev->bce->rssi_stats, buf);
+ mutex_unlock(&bce->mutex);
+ return result;
+}
+
+static ssize_t uwb_dev_RSSI_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+ struct uwb_beca_e *bce = uwb_dev->bce;
+ ssize_t result;
+
+ mutex_lock(&bce->mutex);
+ result = stats_store(&uwb_dev->bce->rssi_stats, buf, size);
+ mutex_unlock(&bce->mutex);
+ return result;
+}
+static DEVICE_ATTR(RSSI, S_IRUGO | S_IWUSR, uwb_dev_RSSI_show, uwb_dev_RSSI_store);
+
+
+static struct attribute *dev_attrs[] = {
+ &dev_attr_EUI_48.attr,
+ &dev_attr_DevAddr.attr,
+ &dev_attr_BPST.attr,
+ &dev_attr_IEs.attr,
+ &dev_attr_LQE.attr,
+ &dev_attr_RSSI.attr,
+ NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+ .attrs = dev_attrs,
+};
+
+static struct attribute_group *groups[] = {
+ &dev_attr_group,
+ NULL,
+};
+
+/**
+ * Device SYSFS registration
+ *
+ *
+ */
+static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev)
+{
+ int result;
+ struct device *dev;
+
+ d_fnstart(4, NULL, "(uwb_dev %p parent_dev %p)\n", uwb_dev, parent_dev);
+ BUG_ON(parent_dev == NULL);
+
+ dev = &uwb_dev->dev;
+ /* Device sysfs files are only useful for neighbor devices not
+ local radio controllers. */
+ if (&uwb_dev->rc->uwb_dev != uwb_dev)
+ dev->groups = groups;
+ dev->parent = parent_dev;
+ dev_set_drvdata(dev, uwb_dev);
+
+ result = device_add(dev);
+ d_fnend(4, NULL, "(uwb_dev %p parent_dev %p) = %d\n", uwb_dev, parent_dev, result);
+ return result;
+}
+
+
+static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev)
+{
+ d_fnstart(4, NULL, "(uwb_dev %p)\n", uwb_dev);
+ dev_set_drvdata(&uwb_dev->dev, NULL);
+ device_del(&uwb_dev->dev);
+ d_fnend(4, NULL, "(uwb_dev %p) = void\n", uwb_dev);
+}
+
+
+/**
+ * Register and initialize a new UWB device
+ *
+ * Did you call uwb_dev_init() on it?
+ *
+ * @parent_rc: is the parent radio controller who has the link to the
+ * device. When registering the UWB device that is a UWB
+ * Radio Controller, we point back to it.
+ *
+ * If registering the device that is part of a radio, caller has set
+ * rc->uwb_dev->dev. Otherwise it is to be left NULL--a new one will
+ * be allocated.
+ */
+int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev,
+ struct uwb_rc *parent_rc)
+{
+ int result;
+ struct device *dev;
+
+ BUG_ON(uwb_dev == NULL);
+ BUG_ON(parent_dev == NULL);
+ BUG_ON(parent_rc == NULL);
+
+ mutex_lock(&uwb_dev->mutex);
+ dev = &uwb_dev->dev;
+ uwb_dev->rc = parent_rc;
+ result = __uwb_dev_sys_add(uwb_dev, parent_dev);
+ if (result < 0)
+ printk(KERN_ERR "UWB: unable to register dev %s with sysfs: %d\n",
+ dev_name(dev), result);
+ mutex_unlock(&uwb_dev->mutex);
+ return result;
+}
+
+
+void uwb_dev_rm(struct uwb_dev *uwb_dev)
+{
+ mutex_lock(&uwb_dev->mutex);
+ __uwb_dev_sys_rm(uwb_dev);
+ mutex_unlock(&uwb_dev->mutex);
+}
+
+
+static
+int __uwb_dev_try_get(struct device *dev, void *__target_uwb_dev)
+{
+ struct uwb_dev *target_uwb_dev = __target_uwb_dev;
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+ if (uwb_dev == target_uwb_dev) {
+ uwb_dev_get(uwb_dev);
+ return 1;
+ } else
+ return 0;
+}
+
+
+/**
+ * Given a UWB device descriptor, validate and refcount it
+ *
+ * @returns NULL if the device does not exist or is quiescing; the ptr to
+ * it otherwise.
+ */
+struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev)
+{
+ if (uwb_dev_for_each(rc, __uwb_dev_try_get, uwb_dev))
+ return uwb_dev;
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(uwb_dev_try_get);
+
+
+/**
+ * Remove a device from the system [grunt for other functions]
+ */
+int __uwb_dev_offair(struct uwb_dev *uwb_dev, struct uwb_rc *rc)
+{
+ struct device *dev = &uwb_dev->dev;
+ char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
+
+ d_fnstart(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p)\n", dev, uwb_dev, rc);
+ uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr);
+ uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr);
+ dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n",
+ macbuf, devbuf,
+ rc ? rc->uwb_dev.dev.parent->bus->name : "n/a",
+ rc ? dev_name(rc->uwb_dev.dev.parent) : "");
+ uwb_dev_rm(uwb_dev);
+ uwb_dev_put(uwb_dev); /* for the creation in _onair() */
+ d_fnend(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p) = 0\n", dev, uwb_dev, rc);
+ return 0;
+}
+
+
+/**
+ * A device went off the air, clean up after it!
+ *
+ * This is called by the UWB Daemon (through the beacon purge function
+ * uwb_bcn_cache_purge) when it is detected that a device has been in
+ * radio silence for a while.
+ *
+ * If this device is actually a local radio controller we don't need
+ * to go through the offair process, as it is not registered as that.
+ *
+ * NOTE: uwb_bcn_cache.mutex is held!
+ */
+void uwbd_dev_offair(struct uwb_beca_e *bce)
+{
+ struct uwb_dev *uwb_dev;
+
+ uwb_dev = bce->uwb_dev;
+ if (uwb_dev) {
+ uwb_notify(uwb_dev->rc, uwb_dev, UWB_NOTIF_OFFAIR);
+ __uwb_dev_offair(uwb_dev, uwb_dev->rc);
+ }
+}
+
+
+/**
+ * A device went on the air, start it up!
+ *
+ * This is called by the UWB Daemon when it is detected that a device
+ * has popped up in the radio range of the radio controller.
+ *
+ * It will just create the freaking device, register the beacon and
+ * stuff and yatla, done.
+ *
+ *
+ * NOTE: uwb_beca.mutex is held, bce->mutex is held
+ */
+void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
+{
+ int result;
+ struct device *dev = &rc->uwb_dev.dev;
+ struct uwb_dev *uwb_dev;
+ char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
+
+ uwb_mac_addr_print(macbuf, sizeof(macbuf), bce->mac_addr);
+ uwb_dev_addr_print(devbuf, sizeof(devbuf), &bce->dev_addr);
+ uwb_dev = kzalloc(sizeof(struct uwb_dev), GFP_KERNEL);
+ if (uwb_dev == NULL) {
+ dev_err(dev, "new device %s: Cannot allocate memory\n",
+ macbuf);
+ return;
+ }
+ uwb_dev_init(uwb_dev); /* This sets refcnt to one, we own it */
+ uwb_dev->mac_addr = *bce->mac_addr;
+ uwb_dev->dev_addr = bce->dev_addr;
+ dev_set_name(&uwb_dev->dev, macbuf);
+ result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc);
+ if (result < 0) {
+ dev_err(dev, "new device %s: cannot instantiate device\n",
+ macbuf);
+ goto error_dev_add;
+ }
+ /* plug the beacon cache */
+ bce->uwb_dev = uwb_dev;
+ uwb_dev->bce = bce;
+ uwb_bce_get(bce); /* released in uwb_dev_sys_release() */
+ dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n",
+ macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name,
+ dev_name(rc->uwb_dev.dev.parent));
+ uwb_notify(rc, uwb_dev, UWB_NOTIF_ONAIR);
+ return;
+
+error_dev_add:
+ kfree(uwb_dev);
+ return;
+}
+
+/**
+ * Iterate over the list of UWB devices, calling a @function on each
+ *
+ * See docs for bus_for_each()....
+ *
+ * @rc: radio controller for the devices.
+ * @function: function to call.
+ * @priv: data to pass to @function.
+ * @returns: 0 if no invocation of function() returned a value
+ * different to zero. That value otherwise.
+ */
+int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f function, void *priv)
+{
+ return device_for_each_child(&rc->uwb_dev.dev, priv, function);
+}
+EXPORT_SYMBOL_GPL(uwb_dev_for_each);
diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
new file mode 100644
index 00000000000..ee5772f00d4
--- /dev/null
+++ b/drivers/uwb/lc-rc.c
@@ -0,0 +1,495 @@
+/*
+ * Ultra Wide Band
+ * Life cycle of radio controllers
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ *
+ * A UWB radio controller is also a UWB device, so it embeds one...
+ *
+ * List of RCs comes from the 'struct class uwb_rc_class'.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/random.h>
+#include <linux/kdev_t.h>
+#include <linux/etherdevice.h>
+#include <linux/usb.h>
+
+#define D_LOCAL 1
+#include <linux/uwb/debug.h>
+#include "uwb-internal.h"
+
+static int uwb_rc_index_match(struct device *dev, void *data)
+{
+ int *index = data;
+ struct uwb_rc *rc = dev_get_drvdata(dev);
+
+ if (rc->index == *index)
+ return 1;
+ return 0;
+}
+
+static struct uwb_rc *uwb_rc_find_by_index(int index)
+{
+ struct device *dev;
+ struct uwb_rc *rc = NULL;
+
+ dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
+ if (dev)
+ rc = dev_get_drvdata(dev);
+ return rc;
+}
+
+static int uwb_rc_new_index(void)
+{
+ int index = 0;
+
+ for (;;) {
+ if (!uwb_rc_find_by_index(index))
+ return index;
+ if (++index < 0)
+ index = 0;
+ }
+}
+
+/**
+ * Release the backing device of a uwb_rc that has been dynamically allocated.
+ */
+static void uwb_rc_sys_release(struct device *dev)
+{
+ struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev);
+ struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev);
+
+ uwb_rc_neh_destroy(rc);
+ uwb_rc_ie_release(rc);
+ d_printf(1, dev, "freed uwb_rc %p\n", rc);
+ kfree(rc);
+}
+
+
+void uwb_rc_init(struct uwb_rc *rc)
+{
+ struct uwb_dev *uwb_dev = &rc->uwb_dev;
+
+ uwb_dev_init(uwb_dev);
+ rc->uwb_dev.dev.class = &uwb_rc_class;
+ rc->uwb_dev.dev.release = uwb_rc_sys_release;
+ uwb_rc_neh_create(rc);
+ rc->beaconing = -1;
+ rc->scan_type = UWB_SCAN_DISABLED;
+ INIT_LIST_HEAD(&rc->notifs_chain.list);
+ mutex_init(&rc->notifs_chain.mutex);
+ uwb_drp_avail_init(rc);
+ uwb_rc_ie_init(rc);
+ uwb_rsv_init(rc);
+ uwb_rc_pal_init(rc);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_init);
+
+
+struct uwb_rc *uwb_rc_alloc(void)
+{
+ struct uwb_rc *rc;
+ rc = kzalloc(sizeof(*rc), GFP_KERNEL);
+ if (rc == NULL)
+ return NULL;
+ uwb_rc_init(rc);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_alloc);
+
+static struct attribute *rc_attrs[] = {
+ &dev_attr_mac_address.attr,
+ &dev_attr_scan.attr,
+ &dev_attr_beacon.attr,
+ NULL,
+};
+
+static struct attribute_group rc_attr_group = {
+ .attrs = rc_attrs,
+};
+
+/*
+ * Registration of sysfs specific stuff
+ */
+static int uwb_rc_sys_add(struct uwb_rc *rc)
+{
+ return sysfs_create_group(&rc->uwb_dev.dev.kobj, &rc_attr_group);
+}
+
+
+static void __uwb_rc_sys_rm(struct uwb_rc *rc)
+{
+ sysfs_remove_group(&rc->uwb_dev.dev.kobj, &rc_attr_group);
+}
+
+/**
+ * uwb_rc_mac_addr_setup - get an RC's EUI-48 address or set it
+ * @rc: the radio controller.
+ *
+ * If the EUI-48 address is 00:00:00:00:00:00 or FF:FF:FF:FF:FF:FF
+ * then a random locally administered EUI-48 is generated and set on
+ * the device. The probability of address collisions is sufficiently
+ * unlikely (1/2^40 = 9.1e-13) that they're not checked for.
+ */
+static
+int uwb_rc_mac_addr_setup(struct uwb_rc *rc)
+{
+ int result;
+ struct device *dev = &rc->uwb_dev.dev;
+ struct uwb_dev *uwb_dev = &rc->uwb_dev;
+ char devname[UWB_ADDR_STRSIZE];
+ struct uwb_mac_addr addr;
+
+ result = uwb_rc_mac_addr_get(rc, &addr);
+ if (result < 0) {
+ dev_err(dev, "cannot retrieve UWB EUI-48 address: %d\n", result);
+ return result;
+ }
+
+ if (uwb_mac_addr_unset(&addr) || uwb_mac_addr_bcast(&addr)) {
+ addr.data[0] = 0x02; /* locally adminstered and unicast */
+ get_random_bytes(&addr.data[1], sizeof(addr.data)-1);
+
+ result = uwb_rc_mac_addr_set(rc, &addr);
+ if (result < 0) {
+ uwb_mac_addr_print(devname, sizeof(devname), &addr);
+ dev_err(dev, "cannot set EUI-48 address %s: %d\n",
+ devname, result);
+ return result;
+ }
+ }
+ uwb_dev->mac_addr = addr;
+ return 0;
+}
+
+
+
+static int uwb_rc_setup(struct uwb_rc *rc)
+{
+ int result;
+ struct device *dev = &rc->uwb_dev.dev;
+
+ result = uwb_rc_reset(rc);
+ if (result < 0) {
+ dev_err(dev, "cannot reset UWB radio: %d\n", result);
+ goto error;
+ }
+ result = uwb_rc_mac_addr_setup(rc);
+ if (result < 0) {
+ dev_err(dev, "cannot setup UWB MAC address: %d\n", result);
+ goto error;
+ }
+ result = uwb_rc_dev_addr_assign(rc);
+ if (result < 0) {
+ dev_err(dev, "cannot assign UWB DevAddr: %d\n", result);
+ goto error;
+ }
+ result = uwb_rc_ie_setup(rc);
+ if (result < 0) {
+ dev_err(dev, "cannot setup IE subsystem: %d\n", result);
+ goto error_ie_setup;
+ }
+ result = uwb_rsv_setup(rc);
+ if (result < 0) {
+ dev_err(dev, "cannot setup reservation subsystem: %d\n", result);
+ goto error_rsv_setup;
+ }
+ uwb_dbg_add_rc(rc);
+ return 0;
+
+error_rsv_setup:
+ uwb_rc_ie_release(rc);
+error_ie_setup:
+error:
+ return result;
+}
+
+
+/**
+ * Register a new UWB radio controller
+ *
+ * Did you call uwb_rc_init() on your rc?
+ *
+ * We assume that this is being called with a > 0 refcount on
+ * it [through ops->{get|put}_device(). We'll take our own, though.
+ *
+ * @parent_dev is our real device, the one that provides the actual UWB device
+ */
+int uwb_rc_add(struct uwb_rc *rc, struct device *parent_dev, void *priv)
+{
+ int result;
+ struct device *dev;
+ char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
+
+ rc->index = uwb_rc_new_index();
+
+ dev = &rc->uwb_dev.dev;
+ dev_set_name(dev, "uwb%d", rc->index);
+
+ rc->priv = priv;
+
+ result = rc->start(rc);
+ if (result < 0)
+ goto error_rc_start;
+
+ result = uwb_rc_setup(rc);
+ if (result < 0) {
+ dev_err(dev, "cannot setup UWB radio controller: %d\n", result);
+ goto error_rc_setup;
+ }
+
+ result = uwb_dev_add(&rc->uwb_dev, parent_dev, rc);
+ if (result < 0 && result != -EADDRNOTAVAIL)
+ goto error_dev_add;
+
+ result = uwb_rc_sys_add(rc);
+ if (result < 0) {
+ dev_err(parent_dev, "cannot register UWB radio controller "
+ "dev attributes: %d\n", result);
+ goto error_sys_add;
+ }
+
+ uwb_mac_addr_print(macbuf, sizeof(macbuf), &rc->uwb_dev.mac_addr);
+ uwb_dev_addr_print(devbuf, sizeof(devbuf), &rc->uwb_dev.dev_addr);
+ dev_info(dev,
+ "new uwb radio controller (mac %s dev %s) on %s %s\n",
+ macbuf, devbuf, parent_dev->bus->name, dev_name(parent_dev));
+ rc->ready = 1;
+ return 0;
+
+error_sys_add:
+ uwb_dev_rm(&rc->uwb_dev);
+error_dev_add:
+error_rc_setup:
+ rc->stop(rc);
+ uwbd_flush(rc);
+error_rc_start:
+ return result;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_add);
+
+
+static int uwb_dev_offair_helper(struct device *dev, void *priv)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+
+ return __uwb_dev_offair(uwb_dev, uwb_dev->rc);
+}
+
+/*
+ * Remove a Radio Controller; stop beaconing/scanning, disconnect all children
+ */
+void uwb_rc_rm(struct uwb_rc *rc)
+{
+ rc->ready = 0;
+
+ uwb_dbg_del_rc(rc);
+ uwb_rsv_cleanup(rc);
+ uwb_rc_ie_rm(rc, UWB_IDENTIFICATION_IE);
+ if (rc->beaconing >= 0)
+ uwb_rc_beacon(rc, -1, 0);
+ if (rc->scan_type != UWB_SCAN_DISABLED)
+ uwb_rc_scan(rc, rc->scanning, UWB_SCAN_DISABLED, 0);
+ uwb_rc_reset(rc);
+
+ rc->stop(rc);
+ uwbd_flush(rc);
+
+ uwb_dev_lock(&rc->uwb_dev);
+ rc->priv = NULL;
+ rc->cmd = NULL;
+ uwb_dev_unlock(&rc->uwb_dev);
+ mutex_lock(&uwb_beca.mutex);
+ uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL);
+ __uwb_rc_sys_rm(rc);
+ mutex_unlock(&uwb_beca.mutex);
+ uwb_dev_rm(&rc->uwb_dev);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_rm);
+
+static int find_rc_try_get(struct device *dev, void *data)
+{
+ struct uwb_rc *target_rc = data;
+ struct uwb_rc *rc = dev_get_drvdata(dev);
+
+ if (rc == NULL) {
+ WARN_ON(1);
+ return 0;
+ }
+ if (rc == target_rc) {
+ if (rc->ready == 0)
+ return 0;
+ else
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * Given a radio controller descriptor, validate and refcount it
+ *
+ * @returns NULL if the rc does not exist or is quiescing; the ptr to
+ * it otherwise.
+ */
+struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
+{
+ struct device *dev;
+ struct uwb_rc *rc = NULL;
+
+ dev = class_find_device(&uwb_rc_class, NULL, target_rc,
+ find_rc_try_get);
+ if (dev) {
+ rc = dev_get_drvdata(dev);
+ __uwb_rc_get(rc);
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
+
+/*
+ * RC get for external refcount acquirers...
+ *
+ * Increments the refcount of the device and it's backend modules
+ */
+static inline struct uwb_rc *uwb_rc_get(struct uwb_rc *rc)
+{
+ if (rc->ready == 0)
+ return NULL;
+ uwb_dev_get(&rc->uwb_dev);
+ return rc;
+}
+
+static int find_rc_grandpa(struct device *dev, void *data)
+{
+ struct device *grandpa_dev = data;
+ struct uwb_rc *rc = dev_get_drvdata(dev);
+
+ if (rc->uwb_dev.dev.parent->parent == grandpa_dev) {
+ rc = uwb_rc_get(rc);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * Locate and refcount a radio controller given a common grand-parent
+ *
+ * @grandpa_dev Pointer to the 'grandparent' device structure.
+ * @returns NULL If the rc does not exist or is quiescing; the ptr to
+ * it otherwise, properly referenced.
+ *
+ * The Radio Control interface (or the UWB Radio Controller) is always
+ * an interface of a device. The parent is the interface, the
+ * grandparent is the device that encapsulates the interface.
+ *
+ * There is no need to lock around as the "grandpa" would be
+ * refcounted by the target, and to remove the referemes, the
+ * uwb_rc_class->sem would have to be taken--we hold it, ergo we
+ * should be safe.
+ */
+struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
+{
+ struct device *dev;
+ struct uwb_rc *rc = NULL;
+
+ dev = class_find_device(&uwb_rc_class, NULL, (void *)grandpa_dev,
+ find_rc_grandpa);
+ if (dev)
+ rc = dev_get_drvdata(dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
+
+/**
+ * Find a radio controller by device address
+ *
+ * @returns the pointer to the radio controller, properly referenced
+ */
+static int find_rc_dev(struct device *dev, void *data)
+{
+ struct uwb_dev_addr *addr = data;
+ struct uwb_rc *rc = dev_get_drvdata(dev);
+
+ if (rc == NULL) {
+ WARN_ON(1);
+ return 0;
+ }
+ if (!uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, addr)) {
+ rc = uwb_rc_get(rc);
+ return 1;
+ }
+ return 0;
+}
+
+struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
+{
+ struct device *dev;
+ struct uwb_rc *rc = NULL;
+
+ dev = class_find_device(&uwb_rc_class, NULL, (void *)addr,
+ find_rc_dev);
+ if (dev)
+ rc = dev_get_drvdata(dev);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_get_by_dev);
+
+/**
+ * Drop a reference on a radio controller
+ *
+ * This is the version that should be done by entities external to the
+ * UWB Radio Control stack (ie: clients of the API).
+ */
+void uwb_rc_put(struct uwb_rc *rc)
+{
+ __uwb_rc_put(rc);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_put);
+
+/*
+ *
+ *
+ */
+ssize_t uwb_rc_print_IEs(struct uwb_rc *uwb_rc, char *buf, size_t size)
+{
+ ssize_t result;
+ struct uwb_rc_evt_get_ie *ie_info;
+ struct uwb_buf_ctx ctx;
+
+ result = uwb_rc_get_ie(uwb_rc, &ie_info);
+ if (result < 0)
+ goto error_get_ie;
+ ctx.buf = buf;
+ ctx.size = size;
+ ctx.bytes = 0;
+ uwb_ie_for_each(&uwb_rc->uwb_dev, uwb_ie_dump_hex, &ctx,
+ ie_info->IEData, result - sizeof(*ie_info));
+ result = ctx.bytes;
+ kfree(ie_info);
+error_get_ie:
+ return result;
+}
+
diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c
new file mode 100644
index 00000000000..9b4eb64327a
--- /dev/null
+++ b/drivers/uwb/neh.c
@@ -0,0 +1,616 @@
+/*
+ * WUSB Wire Adapter: Radio Control Interface (WUSB[8])
+ * Notification and Event Handling
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * The RC interface of the Host Wire Adapter (USB dongle) or WHCI PCI
+ * card delivers a stream of notifications and events to the
+ * notification end event endpoint or area. This code takes care of
+ * getting a buffer with that data, breaking it up in separate
+ * notifications and events and then deliver those.
+ *
+ * Events are answers to commands and they carry a context ID that
+ * associates them to the command. Notifications are that,
+ * notifications, they come out of the blue and have a context ID of
+ * zero. Think of the context ID kind of like a handler. The
+ * uwb_rc_neh_* code deals with managing context IDs.
+ *
+ * This is why you require a handle to operate on a UWB host. When you
+ * open a handle a context ID is assigned to you.
+ *
+ * So, as it is done is:
+ *
+ * 1. Add an event handler [uwb_rc_neh_add()] (assigns a ctx id)
+ * 2. Issue command [rc->cmd(rc, ...)]
+ * 3. Arm the timeout timer [uwb_rc_neh_arm()]
+ * 4, Release the reference to the neh [uwb_rc_neh_put()]
+ * 5. Wait for the callback
+ * 6. Command result (RCEB) is passed to the callback
+ *
+ * If (2) fails, you should remove the handle [uwb_rc_neh_rm()]
+ * instead of arming the timer.
+ *
+ * Handles are for using in *serialized* code, single thread.
+ *
+ * When the notification/event comes, the IRQ handler/endpoint
+ * callback passes the data read to uwb_rc_neh_grok() which will break
+ * it up in a discrete series of events, look up who is listening for
+ * them and execute the pertinent callbacks.
+ *
+ * If the reader detects an error while reading the data stream, call
+ * uwb_rc_neh_error().
+ *
+ * CONSTRAINTS/ASSUMPTIONS:
+ *
+ * - Most notifications/events are small (less thank .5k), copying
+ * around is ok.
+ *
+ * - Notifications/events are ALWAYS smaller than PAGE_SIZE
+ *
+ * - Notifications/events always come in a single piece (ie: a buffer
+ * will always contain entire notifications/events).
+ *
+ * - we cannot know in advance how long each event is (because they
+ * lack a length field in their header--smart move by the standards
+ * body, btw). So we need a facility to get the event size given the
+ * header. This is what the EST code does (notif/Event Size
+ * Tables), check nest.c--as well, you can associate the size to
+ * the handle [w/ neh->extra_size()].
+ *
+ * - Most notifications/events are fixed size; only a few are variable
+ * size (NEST takes care of that).
+ *
+ * - Listeners of events expect them, so they usually provide a
+ * buffer, as they know the size. Listeners to notifications don't,
+ * so we allocate their buffers dynamically.
+ */
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/err.h>
+
+#include "uwb-internal.h"
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+/*
+ * UWB Radio Controller Notification/Event Handle
+ *
+ * Represents an entity waiting for an event coming from the UWB Radio
+ * Controller with a given context id (context) and type (evt_type and
+ * evt). On reception of the notification/event, the callback (cb) is
+ * called with the event.
+ *
+ * If the timer expires before the event is received, the callback is
+ * called with -ETIMEDOUT as the event size.
+ */
+struct uwb_rc_neh {
+ struct kref kref;
+
+ struct uwb_rc *rc;
+ u8 evt_type;
+ __le16 evt;
+ u8 context;
+ uwb_rc_cmd_cb_f cb;
+ void *arg;
+
+ struct timer_list timer;
+ struct list_head list_node;
+};
+
+static void uwb_rc_neh_timer(unsigned long arg);
+
+static void uwb_rc_neh_release(struct kref *kref)
+{
+ struct uwb_rc_neh *neh = container_of(kref, struct uwb_rc_neh, kref);
+
+ kfree(neh);
+}
+
+static void uwb_rc_neh_get(struct uwb_rc_neh *neh)
+{
+ kref_get(&neh->kref);
+}
+
+/**
+ * uwb_rc_neh_put - release reference to a neh
+ * @neh: the neh
+ */
+void uwb_rc_neh_put(struct uwb_rc_neh *neh)
+{
+ kref_put(&neh->kref, uwb_rc_neh_release);
+}
+
+
+/**
+ * Assigns @neh a context id from @rc's pool
+ *
+ * @rc: UWB Radio Controller descriptor; @rc->neh_lock taken
+ * @neh: Notification/Event Handle
+ * @returns 0 if context id was assigned ok; < 0 errno on error (if
+ * all the context IDs are taken).
+ *
+ * (assumes @wa is locked).
+ *
+ * NOTE: WUSB spec reserves context ids 0x00 for notifications and
+ * 0xff is invalid, so they must not be used. Initialization
+ * fills up those two in the bitmap so they are not allocated.
+ *
+ * We spread the allocation around to reduce the posiblity of two
+ * consecutive opened @neh's getting the same context ID assigned (to
+ * avoid surprises with late events that timed out long time ago). So
+ * first we search from where @rc->ctx_roll is, if not found, we
+ * search from zero.
+ */
+static
+int __uwb_rc_ctx_get(struct uwb_rc *rc, struct uwb_rc_neh *neh)
+{
+ int result;
+ result = find_next_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX,
+ rc->ctx_roll++);
+ if (result < UWB_RC_CTX_MAX)
+ goto found;
+ result = find_first_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX);
+ if (result < UWB_RC_CTX_MAX)
+ goto found;
+ return -ENFILE;
+found:
+ set_bit(result, rc->ctx_bm);
+ neh->context = result;
+ return 0;
+}
+
+
+/** Releases @neh's context ID back to @rc (@rc->neh_lock is locked). */
+static
+void __uwb_rc_ctx_put(struct uwb_rc *rc, struct uwb_rc_neh *neh)
+{
+ struct device *dev = &rc->uwb_dev.dev;
+ if (neh->context == 0)
+ return;
+ if (test_bit(neh->context, rc->ctx_bm) == 0) {
+ dev_err(dev, "context %u not set in bitmap\n",
+ neh->context);
+ WARN_ON(1);
+ }
+ clear_bit(neh->context, rc->ctx_bm);
+ neh->context = 0;
+}
+
+/**
+ * uwb_rc_neh_add - add a neh for a radio controller command
+ * @rc: the radio controller
+ * @cmd: the radio controller command
+ * @expected_type: the type of the expected response event
+ * @expected_event: the expected event ID
+ * @cb: callback for when the event is received
+ * @arg: argument for the callback
+ *
+ * Creates a neh and adds it to the list of those waiting for an
+ * event. A context ID will be assigned to the command.
+ */
+struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
+ u8 expected_type, u16 expected_event,
+ uwb_rc_cmd_cb_f cb, void *arg)
+{
+ int result;
+ unsigned long flags;
+ struct device *dev = &rc->uwb_dev.dev;
+ struct uwb_rc_neh *neh;
+
+ neh = kzalloc(sizeof(*neh), GFP_KERNEL);
+ if (neh == NULL) {
+ result = -ENOMEM;
+ goto error_kzalloc;
+ }
+
+ kref_init(&neh->kref);
+ INIT_LIST_HEAD(&neh->list_node);
+ init_timer(&neh->timer);
+ neh->timer.function = uwb_rc_neh_timer;
+ neh->timer.data = (unsigned long)neh;
+
+ neh->rc = rc;
+ neh->evt_type = expected_type;
+ neh->evt = cpu_to_le16(expected_event);
+ neh->cb = cb;
+ neh->arg = arg;
+
+ spin_lock_irqsave(&rc->neh_lock, flags);
+ result = __uwb_rc_ctx_get(rc, neh);
+ if (result >= 0) {
+ cmd->bCommandContext = neh->context;
+ list_add_tail(&neh->list_node, &rc->neh_list);
+ uwb_rc_neh_get(neh);
+ }
+ spin_unlock_irqrestore(&rc->neh_lock, flags);
+ if (result < 0)
+ goto error_ctx_get;
+
+ return neh;
+
+error_ctx_get:
+ kfree(neh);
+error_kzalloc:
+ dev_err(dev, "cannot open handle to radio controller: %d\n", result);
+ return ERR_PTR(result);
+}
+
+static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
+{
+ del_timer(&neh->timer);
+ __uwb_rc_ctx_put(rc, neh);
+ list_del(&neh->list_node);
+}
+
+/**
+ * uwb_rc_neh_rm - remove a neh.
+ * @rc: the radio controller
+ * @neh: the neh to remove
+ *
+ * Remove an active neh immediately instead of waiting for the event
+ * (or a time out).
+ */
+void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rc->neh_lock, flags);
+ __uwb_rc_neh_rm(rc, neh);
+ spin_unlock_irqrestore(&rc->neh_lock, flags);
+
+ uwb_rc_neh_put(neh);
+}
+
+/**
+ * uwb_rc_neh_arm - arm an event handler timeout timer
+ *
+ * @rc: UWB Radio Controller
+ * @neh: Notification/event handler for @rc
+ *
+ * The timer is only armed if the neh is active.
+ */
+void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rc->neh_lock, flags);
+ if (neh->context)
+ mod_timer(&neh->timer,
+ jiffies + msecs_to_jiffies(UWB_RC_CMD_TIMEOUT_MS));
+ spin_unlock_irqrestore(&rc->neh_lock, flags);
+}
+
+static void uwb_rc_neh_cb(struct uwb_rc_neh *neh, struct uwb_rceb *rceb, size_t size)
+{
+ (*neh->cb)(neh->rc, neh->arg, rceb, size);
+ uwb_rc_neh_put(neh);
+}
+
+static bool uwb_rc_neh_match(struct uwb_rc_neh *neh, const struct uwb_rceb *rceb)
+{
+ return neh->evt_type == rceb->bEventType
+ && neh->evt == rceb->wEvent
+ && neh->context == rceb->bEventContext;
+}
+
+/**
+ * Find the handle waiting for a RC Radio Control Event
+ *
+ * @rc: UWB Radio Controller
+ * @rceb: Pointer to the RCEB buffer
+ * @event_size: Pointer to the size of the RCEB buffer. Might be
+ * adjusted to take into account the @neh->extra_size
+ * settings.
+ *
+ * If the listener has no buffer (NULL buffer), one is allocated for
+ * the right size (the amount of data received). @neh->ptr will point
+ * to the event payload, which always starts with a 'struct
+ * uwb_rceb'. kfree() it when done.
+ */
+static
+struct uwb_rc_neh *uwb_rc_neh_lookup(struct uwb_rc *rc,
+ const struct uwb_rceb *rceb)
+{
+ struct uwb_rc_neh *neh = NULL, *h;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rc->neh_lock, flags);
+
+ list_for_each_entry(h, &rc->neh_list, list_node) {
+ if (uwb_rc_neh_match(h, rceb)) {
+ neh = h;
+ break;
+ }
+ }
+
+ if (neh)
+ __uwb_rc_neh_rm(rc, neh);
+
+ spin_unlock_irqrestore(&rc->neh_lock, flags);
+
+ return neh;
+}
+
+
+/**
+ * Process notifications coming from the radio control interface
+ *
+ * @rc: UWB Radio Control Interface descriptor
+ * @neh: Notification/Event Handler @neh->ptr points to
+ * @uwb_evt->buffer.
+ *
+ * This function is called by the event/notif handling subsystem when
+ * notifications arrive (hwarc_probe() arms a notification/event handle
+ * that calls back this function for every received notification; this
+ * function then will rearm itself).
+ *
+ * Notification data buffers are dynamically allocated by the NEH
+ * handling code in neh.c [uwb_rc_neh_lookup()]. What is actually
+ * allocated is space to contain the notification data.
+ *
+ * Buffers are prefixed with a Radio Control Event Block (RCEB) as
+ * defined by the WUSB Wired-Adapter Radio Control interface. We
+ * just use it for the notification code.
+ *
+ * On each case statement we just transcode endianess of the different
+ * fields. We declare a pointer to a RCI definition of an event, and
+ * then to a UWB definition of the same event (which are the same,
+ * remember). Event if we use different pointers
+ */
+static
+void uwb_rc_notif(struct uwb_rc *rc, struct uwb_rceb *rceb, ssize_t size)
+{
+ struct device *dev = &rc->uwb_dev.dev;
+ struct uwb_event *uwb_evt;
+
+ if (size == -ESHUTDOWN)
+ return;
+ if (size < 0) {
+ dev_err(dev, "ignoring event with error code %zu\n",
+ size);
+ return;
+ }
+
+ uwb_evt = kzalloc(sizeof(*uwb_evt), GFP_ATOMIC);
+ if (unlikely(uwb_evt == NULL)) {
+ dev_err(dev, "no memory to queue event 0x%02x/%04x/%02x\n",
+ rceb->bEventType, le16_to_cpu(rceb->wEvent),
+ rceb->bEventContext);
+ return;
+ }
+ uwb_evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */
+ uwb_evt->ts_jiffies = jiffies;
+ uwb_evt->type = UWB_EVT_TYPE_NOTIF;
+ uwb_evt->notif.size = size;
+ uwb_evt->notif.rceb = rceb;
+
+ switch (le16_to_cpu(rceb->wEvent)) {
+ /* Trap some vendor specific events
+ *
+ * FIXME: move this to handling in ptc-est, where we
+ * register a NULL event handler for these two guys
+ * using the Intel IDs.
+ */
+ case 0x0103:
+ dev_info(dev, "FIXME: DEVICE ADD\n");
+ return;
+ case 0x0104:
+ dev_info(dev, "FIXME: DEVICE RM\n");
+ return;
+ default:
+ break;
+ }
+
+ uwbd_event_queue(uwb_evt);
+}
+
+static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size_t size)
+{
+ struct device *dev = &rc->uwb_dev.dev;
+ struct uwb_rc_neh *neh;
+ struct uwb_rceb *notif;
+
+ if (rceb->bEventContext == 0) {
+ notif = kmalloc(size, GFP_ATOMIC);
+ if (notif) {
+ memcpy(notif, rceb, size);
+ uwb_rc_notif(rc, notif, size);
+ } else
+ dev_err(dev, "event 0x%02x/%04x/%02x (%zu bytes): no memory\n",
+ rceb->bEventType, le16_to_cpu(rceb->wEvent),
+ rceb->bEventContext, size);
+ } else {
+ neh = uwb_rc_neh_lookup(rc, rceb);
+ if (neh)
+ uwb_rc_neh_cb(neh, rceb, size);
+ else
+ dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n",
+ rceb->bEventType, le16_to_cpu(rceb->wEvent),
+ rceb->bEventContext, size);
+ }
+}
+
+/**
+ * Given a buffer with one or more UWB RC events/notifications, break
+ * them up and dispatch them.
+ *
+ * @rc: UWB Radio Controller
+ * @buf: Buffer with the stream of notifications/events
+ * @buf_size: Amount of data in the buffer
+ *
+ * Note each notification/event starts always with a 'struct
+ * uwb_rceb', so the minimum size if 4 bytes.
+ *
+ * The device may pass us events formatted differently than expected.
+ * These are first filtered, potentially creating a new event in a new
+ * memory location. If a new event is created by the filter it is also
+ * freed here.
+ *
+ * For each notif/event, tries to guess the size looking at the EST
+ * tables, then looks for a neh that is waiting for that event and if
+ * found, copies the payload to the neh's buffer and calls it back. If
+ * not, the data is ignored.
+ *
+ * Note that if we can't find a size description in the EST tables, we
+ * still might find a size in the 'neh' handle in uwb_rc_neh_lookup().
+ *
+ * Assumptions:
+ *
+ * @rc->neh_lock is NOT taken
+ *
+ * We keep track of various sizes here:
+ * size: contains the size of the buffer that is processed for the
+ * incoming event. this buffer may contain events that are not
+ * formatted as WHCI.
+ * real_size: the actual space taken by this event in the buffer.
+ * We need to keep track of the real size of an event to be able to
+ * advance the buffer correctly.
+ * event_size: the size of the event as expected by the core layer
+ * [OR] the size of the event after filtering. if the filtering
+ * created a new event in a new memory location then this is
+ * effectively the size of a new event buffer
+ */
+void uwb_rc_neh_grok(struct uwb_rc *rc, void *buf, size_t buf_size)
+{
+ struct device *dev = &rc->uwb_dev.dev;
+ void *itr;
+ struct uwb_rceb *rceb;
+ size_t size, real_size, event_size;
+ int needtofree;
+
+ d_fnstart(3, dev, "(rc %p buf %p %zu buf_size)\n", rc, buf, buf_size);
+ d_printf(2, dev, "groking event block: %zu bytes\n", buf_size);
+ itr = buf;
+ size = buf_size;
+ while (size > 0) {
+ if (size < sizeof(*rceb)) {
+ dev_err(dev, "not enough data in event buffer to "
+ "process incoming events (%zu left, minimum is "
+ "%zu)\n", size, sizeof(*rceb));
+ break;
+ }
+
+ rceb = itr;
+ if (rc->filter_event) {
+ needtofree = rc->filter_event(rc, &rceb, size,
+ &real_size, &event_size);
+ if (needtofree < 0 && needtofree != -ENOANO) {
+ dev_err(dev, "BUG: Unable to filter event "
+ "(0x%02x/%04x/%02x) from "
+ "device. \n", rceb->bEventType,
+ le16_to_cpu(rceb->wEvent),
+ rceb->bEventContext);
+ break;
+ }
+ } else
+ needtofree = -ENOANO;
+ /* do real processing if there was no filtering or the
+ * filtering didn't act */
+ if (needtofree == -ENOANO) {
+ ssize_t ret = uwb_est_find_size(rc, rceb, size);
+ if (ret < 0)
+ break;
+ if (ret > size) {
+ dev_err(dev, "BUG: hw sent incomplete event "
+ "0x%02x/%04x/%02x (%zd bytes), only got "
+ "%zu bytes. We don't handle that.\n",
+ rceb->bEventType, le16_to_cpu(rceb->wEvent),
+ rceb->bEventContext, ret, size);
+ break;
+ }
+ real_size = event_size = ret;
+ }
+ uwb_rc_neh_grok_event(rc, rceb, event_size);
+
+ if (needtofree == 1)
+ kfree(rceb);
+
+ itr += real_size;
+ size -= real_size;
+ d_printf(2, dev, "consumed %zd bytes, %zu left\n",
+ event_size, size);
+ }
+ d_fnend(3, dev, "(rc %p buf %p %zu buf_size) = void\n", rc, buf, buf_size);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_neh_grok);
+
+
+/**
+ * The entity that reads from the device notification/event channel has
+ * detected an error.
+ *
+ * @rc: UWB Radio Controller
+ * @error: Errno error code
+ *
+ */
+void uwb_rc_neh_error(struct uwb_rc *rc, int error)
+{
+ struct uwb_rc_neh *neh, *next;
+ unsigned long flags;
+
+ BUG_ON(error >= 0);
+ spin_lock_irqsave(&rc->neh_lock, flags);
+ list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) {
+ __uwb_rc_neh_rm(rc, neh);
+ uwb_rc_neh_cb(neh, NULL, error);
+ }
+ spin_unlock_irqrestore(&rc->neh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_neh_error);
+
+
+static void uwb_rc_neh_timer(unsigned long arg)
+{
+ struct uwb_rc_neh *neh = (struct uwb_rc_neh *)arg;
+ struct uwb_rc *rc = neh->rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rc->neh_lock, flags);
+ __uwb_rc_neh_rm(rc, neh);
+ spin_unlock_irqrestore(&rc->neh_lock, flags);
+
+ uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT);
+}
+
+/** Initializes the @rc's neh subsystem
+ */
+void uwb_rc_neh_create(struct uwb_rc *rc)
+{
+ spin_lock_init(&rc->neh_lock);
+ INIT_LIST_HEAD(&rc->neh_list);
+ set_bit(0, rc->ctx_bm); /* 0 is reserved (see [WUSB] table 8-65) */
+ set_bit(0xff, rc->ctx_bm); /* and 0xff is invalid */
+ rc->ctx_roll = 1;
+}
+
+
+/** Release's the @rc's neh subsystem */
+void uwb_rc_neh_destroy(struct uwb_rc *rc)
+{
+ unsigned long flags;
+ struct uwb_rc_neh *neh, *next;
+
+ spin_lock_irqsave(&rc->neh_lock, flags);
+ list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) {
+ __uwb_rc_neh_rm(rc, neh);
+ uwb_rc_neh_put(neh);
+ }
+ spin_unlock_irqrestore(&rc->neh_lock, flags);
+}
diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
new file mode 100644
index 00000000000..1afb38eacb9
--- /dev/null
+++ b/drivers/uwb/pal.c
@@ -0,0 +1,91 @@
+/*
+ * UWB PAL support.
+ *
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/uwb.h>
+
+#include "uwb-internal.h"
+
+/**
+ * uwb_pal_init - initialize a UWB PAL
+ * @pal: the PAL to initialize
+ */
+void uwb_pal_init(struct uwb_pal *pal)
+{
+ INIT_LIST_HEAD(&pal->node);
+}
+EXPORT_SYMBOL_GPL(uwb_pal_init);
+
+/**
+ * uwb_pal_register - register a UWB PAL
+ * @rc: the radio controller the PAL will be using
+ * @pal: the PAL
+ *
+ * The PAL must be initialized with uwb_pal_init().
+ */
+int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal)
+{
+ int ret;
+
+ if (pal->device) {
+ ret = sysfs_create_link(&pal->device->kobj,
+ &rc->uwb_dev.dev.kobj, "uwb_rc");
+ if (ret < 0)
+ return ret;
+ ret = sysfs_create_link(&rc->uwb_dev.dev.kobj,
+ &pal->device->kobj, pal->name);
+ if (ret < 0) {
+ sysfs_remove_link(&pal->device->kobj, "uwb_rc");
+ return ret;
+ }
+ }
+
+ spin_lock(&rc->pal_lock);
+ list_add(&pal->node, &rc->pals);
+ spin_unlock(&rc->pal_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(uwb_pal_register);
+
+/**
+ * uwb_pal_register - unregister a UWB PAL
+ * @rc: the radio controller the PAL was using
+ * @pal: the PAL
+ */
+void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal)
+{
+ spin_lock(&rc->pal_lock);
+ list_del(&pal->node);
+ spin_unlock(&rc->pal_lock);
+
+ if (pal->device) {
+ sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name);
+ sysfs_remove_link(&pal->device->kobj, "uwb_rc");
+ }
+}
+EXPORT_SYMBOL_GPL(uwb_pal_unregister);
+
+/**
+ * uwb_rc_pal_init - initialize the PAL related parts of a radio controller
+ * @rc: the radio controller
+ */
+void uwb_rc_pal_init(struct uwb_rc *rc)
+{
+ spin_lock_init(&rc->pal_lock);
+ INIT_LIST_HEAD(&rc->pals);
+}
diff --git a/drivers/uwb/reset.c b/drivers/uwb/reset.c
new file mode 100644
index 00000000000..8de856fa795
--- /dev/null
+++ b/drivers/uwb/reset.c
@@ -0,0 +1,362 @@
+/*
+ * Ultra Wide Band
+ * UWB basic command support and radio reset
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME:
+ *
+ * - docs
+ *
+ * - Now we are serializing (using the uwb_dev->mutex) the command
+ * execution; it should be parallelized as much as possible some
+ * day.
+ */
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "uwb-internal.h"
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+/**
+ * Command result codes (WUSB1.0[T8-69])
+ */
+static
+const char *__strerror[] = {
+ "success",
+ "failure",
+ "hardware failure",
+ "no more slots",
+ "beacon is too large",
+ "invalid parameter",
+ "unsupported power level",
+ "time out (wa) or invalid ie data (whci)",
+ "beacon size exceeded",
+ "cancelled",
+ "invalid state",
+ "invalid size",
+ "ack not recieved",
+ "no more asie notification",
+};
+
+
+/** Return a string matching the given error code */
+const char *uwb_rc_strerror(unsigned code)
+{
+ if (code == 255)
+ return "time out";
+ if (code >= ARRAY_SIZE(__strerror))
+ return "unknown error";
+ return __strerror[code];
+}
+
+int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
+ struct uwb_rccb *cmd, size_t cmd_size,
+ u8 expected_type, u16 expected_event,
+ uwb_rc_cmd_cb_f cb, void *arg)
+{
+ struct device *dev = &rc->uwb_dev.dev;
+ struct uwb_rc_neh *neh;
+ int needtofree = 0;
+ int result;
+
+ uwb_dev_lock(&rc->uwb_dev); /* Protect against rc->priv being removed */
+ if (rc->priv == NULL) {
+ uwb_dev_unlock(&rc->uwb_dev);
+ return -ESHUTDOWN;
+ }
+
+ if (rc->filter_cmd) {
+ needtofree = rc->filter_cmd(rc, &cmd, &cmd_size);
+ if (needtofree < 0 && needtofree != -ENOANO) {
+ dev_err(dev, "%s: filter error: %d\n",
+ cmd_name, needtofree);
+ uwb_dev_unlock(&rc->uwb_dev);
+ return needtofree;
+ }
+ }
+
+ neh = uwb_rc_neh_add(rc, cmd, expected_type, expected_event, cb, arg);
+ if (IS_ERR(neh)) {
+ result = PTR_ERR(neh);
+ goto out;
+ }
+
+ result = rc->cmd(rc, cmd, cmd_size);
+ uwb_dev_unlock(&rc->uwb_dev);
+ if (result < 0)
+ uwb_rc_neh_rm(rc, neh);
+ else
+ uwb_rc_neh_arm(rc, neh);
+ uwb_rc_neh_put(neh);
+out:
+ if (needtofree == 1)
+ kfree(cmd);
+ return result < 0 ? result : 0;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_cmd_async);
+
+struct uwb_rc_cmd_done_params {
+ struct completion completion;
+ struct uwb_rceb *reply;
+ ssize_t reply_size;
+};
+
+static void uwb_rc_cmd_done(struct uwb_rc *rc, void *arg,
+ struct uwb_rceb *reply, ssize_t reply_size)
+{
+ struct uwb_rc_cmd_done_params *p = (struct uwb_rc_cmd_done_params *)arg;
+
+ if (reply_size > 0) {
+ if (p->reply)
+ reply_size = min(p->reply_size, reply_size);
+ else
+ p->reply = kmalloc(reply_size, GFP_ATOMIC);
+
+ if (p->reply)
+ memcpy(p->reply, reply, reply_size);
+ else
+ reply_size = -ENOMEM;
+ }
+ p->reply_size = reply_size;
+ complete(&p->completion);
+}
+
+
+/**
+ * Generic function for issuing commands to the Radio Control Interface
+ *
+ * @rc: UWB Radio Control descriptor
+ * @cmd_name: Name of the command being issued (for error messages)
+ * @cmd: Pointer to rccb structure containing the command;
+ * normally you embed this structure as the first member of
+ * the full command structure.
+ * @cmd_size: Size of the whole command buffer pointed to by @cmd.
+ * @reply: Pointer to where to store the reply
+ * @reply_size: @reply's size
+ * @expected_type: Expected type in the return event
+ * @expected_event: Expected event code in the return event
+ * @preply: Here a pointer to where the event data is received will
+ * be stored. Once done with the data, free with kfree().
+ *
+ * This function is generic; it works for commands that return a fixed
+ * and known size or for commands that return a variable amount of data.
+ *
+ * If a buffer is provided, that is used, although it could be chopped
+ * to the maximum size of the buffer. If the buffer is NULL, then one
+ * be allocated in *preply with the whole contents of the reply.
+ *
+ * @rc needs to be referenced
+ */
+static
+ssize_t __uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
+ struct uwb_rccb *cmd, size_t cmd_size,
+ struct uwb_rceb *reply, size_t reply_size,
+ u8 expected_type, u16 expected_event,
+ struct uwb_rceb **preply)
+{
+ ssize_t result = 0;
+ struct device *dev = &rc->uwb_dev.dev;
+ struct uwb_rc_cmd_done_params params;
+
+ init_completion(&params.completion);
+ params.reply = reply;
+ params.reply_size = reply_size;
+
+ result = uwb_rc_cmd_async(rc, cmd_name, cmd, cmd_size,
+ expected_type, expected_event,
+ uwb_rc_cmd_done, &params);
+ if (result)
+ return result;
+
+ wait_for_completion(&params.completion);
+
+ if (preply)
+ *preply = params.reply;
+
+ if (params.reply_size < 0)
+ dev_err(dev, "%s: confirmation event 0x%02x/%04x/%02x "
+ "reception failed: %d\n", cmd_name,
+ expected_type, expected_event, cmd->bCommandContext,
+ (int)params.reply_size);
+ return params.reply_size;
+}
+
+
+/**
+ * Generic function for issuing commands to the Radio Control Interface
+ *
+ * @rc: UWB Radio Control descriptor
+ * @cmd_name: Name of the command being issued (for error messages)
+ * @cmd: Pointer to rccb structure containing the command;
+ * normally you embed this structure as the first member of
+ * the full command structure.
+ * @cmd_size: Size of the whole command buffer pointed to by @cmd.
+ * @reply: Pointer to the beginning of the confirmation event
+ * buffer. Normally bigger than an 'struct hwarc_rceb'.
+ * You need to fill out reply->bEventType and reply->wEvent (in
+ * cpu order) as the function will use them to verify the
+ * confirmation event.
+ * @reply_size: Size of the reply buffer
+ *
+ * The function checks that the length returned in the reply is at
+ * least as big as @reply_size; if not, it will be deemed an error and
+ * -EIO returned.
+ *
+ * @rc needs to be referenced
+ */
+ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
+ struct uwb_rccb *cmd, size_t cmd_size,
+ struct uwb_rceb *reply, size_t reply_size)
+{
+ struct device *dev = &rc->uwb_dev.dev;
+ ssize_t result;
+
+ result = __uwb_rc_cmd(rc, cmd_name,
+ cmd, cmd_size, reply, reply_size,
+ reply->bEventType, reply->wEvent, NULL);
+
+ if (result > 0 && result < reply_size) {
+ dev_err(dev, "%s: not enough data returned for decoding reply "
+ "(%zu bytes received vs at least %zu needed)\n",
+ cmd_name, result, reply_size);
+ result = -EIO;
+ }
+ return result;
+}
+EXPORT_SYMBOL_GPL(uwb_rc_cmd);
+
+
+/**
+ * Generic function for issuing commands to the Radio Control
+ * Interface that return an unknown amount of data
+ *
+ * @rc: UWB Radio Control descriptor
+ * @cmd_name: Name of the command being issued (for error messages)
+ * @cmd: Pointer to rccb structure containing the command;
+ * normally you embed this structure as the first member of
+ * the full command structure.
+ * @cmd_size: Size of the whole command buffer pointed to by @cmd.
+ * @expected_type: Expected type in the return event
+ * @expected_event: Expected event code in the return event
+ * @preply: Here a pointer to where the event data is received will
+ * be stored. Once done with the data, free with kfree().
+ *
+ * The function checks that the length returned in the reply is at
+ * least as big as a 'struct uwb_rceb *'; if not, it will be deemed an
+ * error and -EIO returned.
+ *
+ * @rc needs to be referenced
+ */
+ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name,
+ struct uwb_rccb *cmd, size_t cmd_size,
+ u8 expected_type, u16 expected_event,
+ struct uwb_rceb **preply)
+{
+ return __uwb_rc_cmd(rc, cmd_name, cmd, cmd_size, NULL, 0,
+ expected_type, expected_event, preply);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_vcmd);
+
+
+/**
+ * Reset a UWB Host Controller (and all radio settings)
+ *
+ * @rc: Host Controller descriptor
+ * @returns: 0 if ok, < 0 errno code on error
+ *
+ * We put the command on kmalloc'ed memory as some arches cannot do
+ * USB from the stack. The reply event is copied from an stage buffer,
+ * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
+ */
+int uwb_rc_reset(struct uwb_rc *rc)
+{
+ int result = -ENOMEM;
+ struct uwb_rc_evt_confirm reply;
+ struct uwb_rccb *cmd;
+ size_t cmd_size = sizeof(*cmd);
+
+ mutex_lock(&rc->uwb_dev.mutex);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_kzalloc;
+ cmd->bCommandType = UWB_RC_CET_GENERAL;
+ cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET);
+ reply.rceb.bEventType = UWB_RC_CET_GENERAL;
+ reply.rceb.wEvent = UWB_RC_CMD_RESET;
+ result = uwb_rc_cmd(rc, "RESET", cmd, cmd_size,
+ &reply.rceb, sizeof(reply));
+ if (result < 0)
+ goto error_cmd;
+ if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
+ dev_err(&rc->uwb_dev.dev,
+ "RESET: command execution failed: %s (%d)\n",
+ uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
+ result = -EIO;
+ }
+error_cmd:
+ kfree(cmd);
+error_kzalloc:
+ mutex_unlock(&rc->uwb_dev.mutex);
+ return result;
+}
+
+int uwbd_msg_handle_reset(struct uwb_event *evt)
+{
+ struct uwb_rc *rc = evt->rc;
+ int ret;
+
+ /* Need to prevent the RC hardware module going away while in
+ the rc->reset() call. */
+ if (!try_module_get(rc->owner))
+ return 0;
+
+ dev_info(&rc->uwb_dev.dev, "resetting radio controller\n");
+ ret = rc->reset(rc);
+ if (ret)
+ dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret);
+
+ module_put(rc->owner);
+ return ret;
+}
+
+/**
+ * uwb_rc_reset_all - request a reset of the radio controller and PALs
+ * @rc: the radio controller of the hardware device to be reset.
+ *
+ * The full hardware reset of the radio controller and all the PALs
+ * will be scheduled.
+ */
+void uwb_rc_reset_all(struct uwb_rc *rc)
+{
+ struct uwb_event *evt;
+
+ evt = kzalloc(sizeof(struct uwb_event), GFP_ATOMIC);
+ if (unlikely(evt == NULL))
+ return;
+
+ evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */
+ evt->ts_jiffies = jiffies;
+ evt->type = UWB_EVT_TYPE_MSG;
+ evt->message = UWB_EVT_MSG_RESET;
+
+ uwbd_event_queue(evt);
+}
+EXPORT_SYMBOL_GPL(uwb_rc_reset_all);
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c
new file mode 100644
index 00000000000..bae16204576
--- /dev/null
+++ b/drivers/uwb/rsv.c
@@ -0,0 +1,680 @@
+/*
+ * UWB reservation management.
+ *
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/uwb.h>
+
+#include "uwb-internal.h"
+
+static void uwb_rsv_timer(unsigned long arg);
+
+static const char *rsv_states[] = {
+ [UWB_RSV_STATE_NONE] = "none",
+ [UWB_RSV_STATE_O_INITIATED] = "initiated",
+ [UWB_RSV_STATE_O_PENDING] = "pending",
+ [UWB_RSV_STATE_O_MODIFIED] = "modified",
+ [UWB_RSV_STATE_O_ESTABLISHED] = "established",
+ [UWB_RSV_STATE_T_ACCEPTED] = "accepted",
+ [UWB_RSV_STATE_T_DENIED] = "denied",
+ [UWB_RSV_STATE_T_PENDING] = "pending",
+};
+
+static const char *rsv_types[] = {
+ [UWB_DRP_TYPE_ALIEN_BP] = "alien-bp",
+ [UWB_DRP_TYPE_HARD] = "hard",
+ [UWB_DRP_TYPE_SOFT] = "soft",
+ [UWB_DRP_TYPE_PRIVATE] = "private",
+ [UWB_DRP_TYPE_PCA] = "pca",
+};
+
+/**
+ * uwb_rsv_state_str - return a string for a reservation state
+ * @state: the reservation state.
+ */
+const char *uwb_rsv_state_str(enum uwb_rsv_state state)
+{
+ if (state < UWB_RSV_STATE_NONE || state >= UWB_RSV_STATE_LAST)
+ return "unknown";
+ return rsv_states[state];
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_state_str);
+
+/**
+ * uwb_rsv_type_str - return a string for a reservation type
+ * @type: the reservation type
+ */
+const char *uwb_rsv_type_str(enum uwb_drp_type type)
+{
+ if (type < UWB_DRP_TYPE_ALIEN_BP || type > UWB_DRP_TYPE_PCA)
+ return "invalid";
+ return rsv_types[type];
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_type_str);
+
+static void uwb_rsv_dump(struct uwb_rsv *rsv)
+{
+ struct device *dev = &rsv->rc->uwb_dev.dev;
+ struct uwb_dev_addr devaddr;
+ char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
+
+ uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
+ if (rsv->target.type == UWB_RSV_TARGET_DEV)
+ devaddr = rsv->target.dev->dev_addr;
+ else
+ devaddr = rsv->target.devaddr;
+ uwb_dev_addr_print(target, sizeof(target), &devaddr);
+
+ dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state));
+}
+
+/*
+ * Get a free stream index for a reservation.
+ *
+ * If the target is a DevAddr (e.g., a WUSB cluster reservation) then
+ * the stream is allocated from a pool of per-RC stream indexes,
+ * otherwise a unique stream index for the target is selected.
+ */
+static int uwb_rsv_get_stream(struct uwb_rsv *rsv)
+{
+ struct uwb_rc *rc = rsv->rc;
+ unsigned long *streams_bm;
+ int stream;
+
+ switch (rsv->target.type) {
+ case UWB_RSV_TARGET_DEV:
+ streams_bm = rsv->target.dev->streams;
+ break;
+ case UWB_RSV_TARGET_DEVADDR:
+ streams_bm = rc->uwb_dev.streams;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ stream = find_first_zero_bit(streams_bm, UWB_NUM_STREAMS);
+ if (stream >= UWB_NUM_STREAMS)
+ return -EBUSY;
+
+ rsv->stream = stream;
+ set_bit(stream, streams_bm);
+
+ return 0;
+}
+
+static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
+{
+ struct uwb_rc *rc = rsv->rc;
+ unsigned long *streams_bm;
+
+ switch (rsv->target.type) {
+ case UWB_RSV_TARGET_DEV:
+ streams_bm = rsv->target.dev->streams;
+ break;
+ case UWB_RSV_TARGET_DEVADDR:
+ streams_bm = rc->uwb_dev.streams;
+ break;
+ default:
+ return;
+ }
+
+ clear_bit(rsv->stream, streams_bm);
+}
+
+/*
+ * Generate a MAS allocation with a single row component.
+ */
+static void uwb_rsv_gen_alloc_row(struct uwb_mas_bm *mas,
+ int first_mas, int mas_per_zone,
+ int zs, int ze)
+{
+ struct uwb_mas_bm col;
+ int z;
+
+ bitmap_zero(mas->bm, UWB_NUM_MAS);
+ bitmap_zero(col.bm, UWB_NUM_MAS);
+ bitmap_fill(col.bm, mas_per_zone);
+ bitmap_shift_left(col.bm, col.bm, first_mas + zs * UWB_MAS_PER_ZONE, UWB_NUM_MAS);
+
+ for (z = zs; z <= ze; z++) {
+ bitmap_or(mas->bm, mas->bm, col.bm, UWB_NUM_MAS);
+ bitmap_shift_left(col.bm, col.bm, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
+ }
+}
+
+/*
+ * Allocate some MAS for this reservation based on current local
+ * availability, the reservation parameters (max_mas, min_mas,
+ * sparsity), and the WiMedia rules for MAS allocations.
+ *
+ * Returns -EBUSY is insufficient free MAS are available.
+ *
+ * FIXME: to simplify this, only safe reservations with a single row
+ * component in zones 1 to 15 are tried (zone 0 is skipped to avoid
+ * problems with the MAS reserved for the BP).
+ *
+ * [ECMA-368] section B.2.
+ */
+static int uwb_rsv_alloc_mas(struct uwb_rsv *rsv)
+{
+ static const int safe_mas_in_row[UWB_NUM_ZONES] = {
+ 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1,
+ };
+ int n, r;
+ struct uwb_mas_bm mas;
+ bool found = false;
+
+ /*
+ * Search all valid safe allocations until either: too few MAS
+ * are available; or the smallest allocation with sufficient
+ * MAS is found.
+ *
+ * The top of the zones are preferred, so space for larger
+ * allocations is available in the bottom of the zone (e.g., a
+ * 15 MAS allocation should start in row 14 leaving space for
+ * a 120 MAS allocation at row 0).
+ */
+ for (n = safe_mas_in_row[0]; n >= 1; n--) {
+ int num_mas;
+
+ num_mas = n * (UWB_NUM_ZONES - 1);
+ if (num_mas < rsv->min_mas)
+ break;
+ if (found && num_mas < rsv->max_mas)
+ break;
+
+ for (r = UWB_MAS_PER_ZONE-1; r >= 0; r--) {
+ if (safe_mas_in_row[r] < n)
+ continue;
+ uwb_rsv_gen_alloc_row(&mas, r, n, 1, UWB_NUM_ZONES);
+ if (uwb_drp_avail_reserve_pending(rsv->rc, &mas) == 0) {
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (!found)
+ return -EBUSY;
+
+ bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
+ return 0;
+}
+
+static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv)
+{
+ int sframes = UWB_MAX_LOST_BEACONS;
+
+ /*
+ * Multicast reservations can become established within 1
+ * super frame and should not be terminated if no response is
+ * received.
+ */
+ if (rsv->is_multicast) {
+ if (rsv->state == UWB_RSV_STATE_O_INITIATED)
+ sframes = 1;
+ if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED)
+ sframes = 0;
+ }
+
+ rsv->expired = false;
+ if (sframes > 0) {
+ /*
+ * Add an additional 2 superframes to account for the
+ * time to send the SET DRP IE command.
+ */
+ unsigned timeout_us = (sframes + 2) * UWB_SUPERFRAME_LENGTH_US;
+ mod_timer(&rsv->timer, jiffies + usecs_to_jiffies(timeout_us));
+ } else
+ del_timer(&rsv->timer);
+}
+
+/*
+ * Update a reservations state, and schedule an update of the
+ * transmitted DRP IEs.
+ */
+static void uwb_rsv_state_update(struct uwb_rsv *rsv,
+ enum uwb_rsv_state new_state)
+{
+ rsv->state = new_state;
+ rsv->ie_valid = false;
+
+ uwb_rsv_dump(rsv);
+
+ uwb_rsv_stroke_timer(rsv);
+ uwb_rsv_sched_update(rsv->rc);
+}
+
+static void uwb_rsv_callback(struct uwb_rsv *rsv)
+{
+ if (rsv->callback)
+ rsv->callback(rsv);
+}
+
+void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
+{
+ if (rsv->state == new_state) {
+ switch (rsv->state) {
+ case UWB_RSV_STATE_O_ESTABLISHED:
+ case UWB_RSV_STATE_T_ACCEPTED:
+ case UWB_RSV_STATE_NONE:
+ uwb_rsv_stroke_timer(rsv);
+ break;
+ default:
+ /* Expecting a state transition so leave timer
+ as-is. */
+ break;
+ }
+ return;
+ }
+
+ switch (new_state) {
+ case UWB_RSV_STATE_NONE:
+ uwb_drp_avail_release(rsv->rc, &rsv->mas);
+ uwb_rsv_put_stream(rsv);
+ uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE);
+ uwb_rsv_callback(rsv);
+ break;
+ case UWB_RSV_STATE_O_INITIATED:
+ uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_INITIATED);
+ break;
+ case UWB_RSV_STATE_O_PENDING:
+ uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING);
+ break;
+ case UWB_RSV_STATE_O_ESTABLISHED:
+ uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
+ uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED);
+ uwb_rsv_callback(rsv);
+ break;
+ case UWB_RSV_STATE_T_ACCEPTED:
+ uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
+ uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED);
+ uwb_rsv_callback(rsv);
+ break;
+ case UWB_RSV_STATE_T_DENIED:
+ uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED);
+ break;
+ default:
+ dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n",
+ uwb_rsv_state_str(new_state), new_state);
+ }
+}
+
+static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
+{
+ struct uwb_rsv *rsv;
+
+ rsv = kzalloc(sizeof(struct uwb_rsv), GFP_KERNEL);
+ if (!rsv)
+ return NULL;
+
+ INIT_LIST_HEAD(&rsv->rc_node);
+ INIT_LIST_HEAD(&rsv->pal_node);
+ init_timer(&rsv->timer);
+ rsv->timer.function = uwb_rsv_timer;
+ rsv->timer.data = (unsigned long)rsv;
+
+ rsv->rc = rc;
+
+ return rsv;
+}
+
+static void uwb_rsv_free(struct uwb_rsv *rsv)
+{
+ uwb_dev_put(rsv->owner);
+ if (rsv->target.type == UWB_RSV_TARGET_DEV)
+ uwb_dev_put(rsv->target.dev);
+ kfree(rsv);
+}
+
+/**
+ * uwb_rsv_create - allocate and initialize a UWB reservation structure
+ * @rc: the radio controller
+ * @cb: callback to use when the reservation completes or terminates
+ * @pal_priv: data private to the PAL to be passed in the callback
+ *
+ * The callback is called when the state of the reservation changes from:
+ *
+ * - pending to accepted
+ * - pending to denined
+ * - accepted to terminated
+ * - pending to terminated
+ */
+struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, void *pal_priv)
+{
+ struct uwb_rsv *rsv;
+
+ rsv = uwb_rsv_alloc(rc);
+ if (!rsv)
+ return NULL;
+
+ rsv->callback = cb;
+ rsv->pal_priv = pal_priv;
+
+ return rsv;
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_create);
+
+void uwb_rsv_remove(struct uwb_rsv *rsv)
+{
+ if (rsv->state != UWB_RSV_STATE_NONE)
+ uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
+ del_timer_sync(&rsv->timer);
+ list_del(&rsv->rc_node);
+ uwb_rsv_free(rsv);
+}
+
+/**
+ * uwb_rsv_destroy - free a UWB reservation structure
+ * @rsv: the reservation to free
+ *
+ * The reservation will be terminated if it is pending or established.
+ */
+void uwb_rsv_destroy(struct uwb_rsv *rsv)
+{
+ struct uwb_rc *rc = rsv->rc;
+
+ mutex_lock(&rc->rsvs_mutex);
+ uwb_rsv_remove(rsv);
+ mutex_unlock(&rc->rsvs_mutex);
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_destroy);
+
+/**
+ * usb_rsv_establish - start a reservation establishment
+ * @rsv: the reservation
+ *
+ * The PAL should fill in @rsv's owner, target, type, max_mas,
+ * min_mas, sparsity and is_multicast fields. If the target is a
+ * uwb_dev it must be referenced.
+ *
+ * The reservation's callback will be called when the reservation is
+ * accepted, denied or times out.
+ */
+int uwb_rsv_establish(struct uwb_rsv *rsv)
+{
+ struct uwb_rc *rc = rsv->rc;
+ int ret;
+
+ mutex_lock(&rc->rsvs_mutex);
+
+ ret = uwb_rsv_get_stream(rsv);
+ if (ret)
+ goto out;
+
+ ret = uwb_rsv_alloc_mas(rsv);
+ if (ret) {
+ uwb_rsv_put_stream(rsv);
+ goto out;
+ }
+
+ list_add_tail(&rsv->rc_node, &rc->reservations);
+ rsv->owner = &rc->uwb_dev;
+ uwb_dev_get(rsv->owner);
+ uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_INITIATED);
+out:
+ mutex_unlock(&rc->rsvs_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_establish);
+
+/**
+ * uwb_rsv_modify - modify an already established reservation
+ * @rsv: the reservation to modify
+ * @max_mas: new maximum MAS to reserve
+ * @min_mas: new minimum MAS to reserve
+ * @sparsity: new sparsity to use
+ *
+ * FIXME: implement this once there are PALs that use it.
+ */
+int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int sparsity)
+{
+ return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_modify);
+
+/**
+ * uwb_rsv_terminate - terminate an established reservation
+ * @rsv: the reservation to terminate
+ *
+ * A reservation is terminated by removing the DRP IE from the beacon,
+ * the other end will consider the reservation to be terminated when
+ * it does not see the DRP IE for at least mMaxLostBeacons.
+ *
+ * If applicable, the reference to the target uwb_dev will be released.
+ */
+void uwb_rsv_terminate(struct uwb_rsv *rsv)
+{
+ struct uwb_rc *rc = rsv->rc;
+
+ mutex_lock(&rc->rsvs_mutex);
+
+ uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
+
+ mutex_unlock(&rc->rsvs_mutex);
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_terminate);
+
+/**
+ * uwb_rsv_accept - accept a new reservation from a peer
+ * @rsv: the reservation
+ * @cb: call back for reservation changes
+ * @pal_priv: data to be passed in the above call back
+ *
+ * Reservation requests from peers are denied unless a PAL accepts it
+ * by calling this function.
+ */
+void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv)
+{
+ rsv->callback = cb;
+ rsv->pal_priv = pal_priv;
+ rsv->state = UWB_RSV_STATE_T_ACCEPTED;
+}
+EXPORT_SYMBOL_GPL(uwb_rsv_accept);
+
+/*
+ * Is a received DRP IE for this reservation?
+ */
+static bool uwb_rsv_match(struct uwb_rsv *rsv, struct uwb_dev *src,
+ struct uwb_ie_drp *drp_ie)
+{
+ struct uwb_dev_addr *rsv_src;
+ int stream;
+
+ stream = uwb_ie_drp_stream_index(drp_ie);
+
+ if (rsv->stream != stream)
+ return false;
+
+ switch (rsv->target.type) {
+ case UWB_RSV_TARGET_DEVADDR:
+ return rsv->stream == stream;
+ case UWB_RSV_TARGET_DEV:
+ if (uwb_ie_drp_owner(drp_ie))
+ rsv_src = &rsv->owner->dev_addr;
+ else
+ rsv_src = &rsv->target.dev->dev_addr;
+ return uwb_dev_addr_cmp(&src->dev_addr, rsv_src) == 0;
+ }
+ return false;
+}
+
+static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc,
+ struct uwb_dev *src,
+ struct uwb_ie_drp *drp_ie)
+{
+ struct uwb_rsv *rsv;
+ struct uwb_pal *pal;
+ enum uwb_rsv_state state;
+
+ rsv = uwb_rsv_alloc(rc);
+ if (!rsv)
+ return NULL;
+
+ rsv->rc = rc;
+ rsv->owner = src;
+ uwb_dev_get(rsv->owner);
+ rsv->target.type = UWB_RSV_TARGET_DEV;
+ rsv->target.dev = &rc->uwb_dev;
+ rsv->type = uwb_ie_drp_type(drp_ie);
+ rsv->stream = uwb_ie_drp_stream_index(drp_ie);
+ set_bit(rsv->stream, rsv->owner->streams);
+ uwb_drp_ie_to_bm(&rsv->mas, drp_ie);
+
+ /*
+ * See if any PALs are interested in this reservation. If not,
+ * deny the request.
+ */
+ rsv->state = UWB_RSV_STATE_T_DENIED;
+ spin_lock(&rc->pal_lock);
+ list_for_each_entry(pal, &rc->pals, node) {
+ if (pal->new_rsv)
+ pal->new_rsv(rsv);
+ if (rsv->state == UWB_RSV_STATE_T_ACCEPTED)
+ break;
+ }
+ spin_unlock(&rc->pal_lock);
+
+ list_add_tail(&rsv->rc_node, &rc->reservations);
+ state = rsv->state;
+ rsv->state = UWB_RSV_STATE_NONE;
+ uwb_rsv_set_state(rsv, state);
+
+ return rsv;
+}
+
+/**
+ * uwb_rsv_find - find a reservation for a received DRP IE.
+ * @rc: the radio controller
+ * @src: source of the DRP IE
+ * @drp_ie: the DRP IE
+ *
+ * If the reservation cannot be found and the DRP IE is from a peer
+ * attempting to establish a new reservation, create a new reservation
+ * and add it to the list.
+ */
+struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
+ struct uwb_ie_drp *drp_ie)
+{
+ struct uwb_rsv *rsv;
+
+ list_for_each_entry(rsv, &rc->reservations, rc_node) {
+ if (uwb_rsv_match(rsv, src, drp_ie))
+ return rsv;
+ }
+
+ if (uwb_ie_drp_owner(drp_ie))
+ return uwb_rsv_new_target(rc, src, drp_ie);
+
+ return NULL;
+}
+
+/*
+ * Go through all the reservations and check for timeouts and (if
+ * necessary) update their DRP IEs.
+ *
+ * FIXME: look at building the SET_DRP_IE command here rather than
+ * having to rescan the list in uwb_rc_send_all_drp_ie().
+ */
+static bool uwb_rsv_update_all(struct uwb_rc *rc)
+{
+ struct uwb_rsv *rsv, *t;
+ bool ie_updated = false;
+
+ list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
+ if (rsv->expired)
+ uwb_drp_handle_timeout(rsv);
+ if (!rsv->ie_valid) {
+ uwb_drp_ie_update(rsv);
+ ie_updated = true;
+ }
+ }
+
+ return ie_updated;
+}
+
+void uwb_rsv_sched_update(struct uwb_rc *rc)
+{
+ queue_work(rc->rsv_workq, &rc->rsv_update_work);
+}
+
+/*
+ * Update DRP IEs and, if necessary, the DRP Availability IE and send
+ * the updated IEs to the radio controller.
+ */
+static void uwb_rsv_update_work(struct work_struct *work)
+{
+ struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work);
+ bool ie_updated;
+
+ mutex_lock(&rc->rsvs_mutex);
+
+ ie_updated = uwb_rsv_update_all(rc);
+
+ if (!rc->drp_avail.ie_valid) {
+ uwb_drp_avail_ie_update(rc);
+ ie_updated = true;
+ }
+
+ if (ie_updated)
+ uwb_rc_send_all_drp_ie(rc);
+
+ mutex_unlock(&rc->rsvs_mutex);
+}
+
+static void uwb_rsv_timer(unsigned long arg)
+{
+ struct uwb_rsv *rsv = (struct uwb_rsv *)arg;
+
+ rsv->expired = true;
+ uwb_rsv_sched_update(rsv->rc);
+}
+
+void uwb_rsv_init(struct uwb_rc *rc)
+{
+ INIT_LIST_HEAD(&rc->reservations);
+ mutex_init(&rc->rsvs_mutex);
+ INIT_WORK(&rc->rsv_update_work, uwb_rsv_update_work);
+
+ bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
+}
+
+int uwb_rsv_setup(struct uwb_rc *rc)
+{
+ char name[16];
+
+ snprintf(name, sizeof(name), "%s_rsvd", dev_name(&rc->uwb_dev.dev));
+ rc->rsv_workq = create_singlethread_workqueue(name);
+ if (rc->rsv_workq == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void uwb_rsv_cleanup(struct uwb_rc *rc)
+{
+ struct uwb_rsv *rsv, *t;
+
+ mutex_lock(&rc->rsvs_mutex);
+ list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
+ uwb_rsv_remove(rsv);
+ }
+ mutex_unlock(&rc->rsvs_mutex);
+
+ cancel_work_sync(&rc->rsv_update_work);
+ destroy_workqueue(rc->rsv_workq);
+}
diff --git a/drivers/uwb/scan.c b/drivers/uwb/scan.c
new file mode 100644
index 00000000000..2d270748f32
--- /dev/null
+++ b/drivers/uwb/scan.c
@@ -0,0 +1,133 @@
+/*
+ * Ultra Wide Band
+ * Scanning management
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ *
+ * FIXME: docs
+ * FIXME: there are issues here on how BEACON and SCAN on USB RCI deal
+ * with each other. Currently seems that START_BEACON while
+ * SCAN_ONLY will cancel the scan, so we need to update the
+ * state here. Clarification request sent by email on
+ * 10/05/2005.
+ * 10/28/2005 No clear answer heard--maybe we'll hack the API
+ * so that when we start beaconing, if the HC is
+ * scanning in a mode not compatible with beaconing
+ * we just fail.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include "uwb-internal.h"
+
+
+/**
+ * Start/stop scanning in a radio controller
+ *
+ * @rc: UWB Radio Controlller
+ * @channel: Channel to scan; encodings in WUSB1.0[Table 5.12]
+ * @type: Type of scanning to do.
+ * @bpst_offset: value at which to start scanning (if type ==
+ * UWB_SCAN_ONLY_STARTTIME)
+ * @returns: 0 if ok, < 0 errno code on error
+ *
+ * We put the command on kmalloc'ed memory as some arches cannot do
+ * USB from the stack. The reply event is copied from an stage buffer,
+ * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
+ */
+int uwb_rc_scan(struct uwb_rc *rc,
+ unsigned channel, enum uwb_scan_type type,
+ unsigned bpst_offset)
+{
+ int result;
+ struct uwb_rc_cmd_scan *cmd;
+ struct uwb_rc_evt_confirm reply;
+
+ result = -ENOMEM;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_kzalloc;
+ mutex_lock(&rc->uwb_dev.mutex);
+ cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
+ cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SCAN);
+ cmd->bChannelNumber = channel;
+ cmd->bScanState = type;
+ cmd->wStartTime = cpu_to_le16(bpst_offset);
+ reply.rceb.bEventType = UWB_RC_CET_GENERAL;
+ reply.rceb.wEvent = UWB_RC_CMD_SCAN;
+ result = uwb_rc_cmd(rc, "SCAN", &cmd->rccb, sizeof(*cmd),
+ &reply.rceb, sizeof(reply));
+ if (result < 0)
+ goto error_cmd;
+ if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
+ dev_err(&rc->uwb_dev.dev,
+ "SCAN: command execution failed: %s (%d)\n",
+ uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
+ result = -EIO;
+ goto error_cmd;
+ }
+ rc->scanning = channel;
+ rc->scan_type = type;
+error_cmd:
+ mutex_unlock(&rc->uwb_dev.mutex);
+ kfree(cmd);
+error_kzalloc:
+ return result;
+}
+
+/*
+ * Print scanning state
+ */
+static ssize_t uwb_rc_scan_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+ struct uwb_rc *rc = uwb_dev->rc;
+ ssize_t result;
+
+ mutex_lock(&rc->uwb_dev.mutex);
+ result = sprintf(buf, "%d %d\n", rc->scanning, rc->scan_type);
+ mutex_unlock(&rc->uwb_dev.mutex);
+ return result;
+}
+
+/*
+ *
+ */
+static ssize_t uwb_rc_scan_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+ struct uwb_rc *rc = uwb_dev->rc;
+ unsigned channel;
+ unsigned type;
+ unsigned bpst_offset = 0;
+ ssize_t result = -EINVAL;
+
+ result = sscanf(buf, "%u %u %u\n", &channel, &type, &bpst_offset);
+ if (result >= 2 && type < UWB_SCAN_TOP)
+ result = uwb_rc_scan(rc, channel, type, bpst_offset);
+
+ return result < 0 ? result : size;
+}
+
+/** Radio Control sysfs interface (declaration) */
+DEVICE_ATTR(scan, S_IRUGO | S_IWUSR, uwb_rc_scan_show, uwb_rc_scan_store);
diff --git a/drivers/uwb/umc-bus.c b/drivers/uwb/umc-bus.c
new file mode 100644
index 00000000000..2d8d62d9f53
--- /dev/null
+++ b/drivers/uwb/umc-bus.c
@@ -0,0 +1,218 @@
+/*
+ * Bus for UWB Multi-interface Controller capabilities.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This file is released under the GNU GPL v2.
+ */
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+#include <linux/uwb/umc.h>
+#include <linux/pci.h>
+
+static int umc_bus_unbind_helper(struct device *dev, void *data)
+{
+ struct device *parent = data;
+
+ if (dev->parent == parent && dev->driver)
+ device_release_driver(dev);
+ return 0;
+}
+
+/**
+ * umc_controller_reset - reset the whole UMC controller
+ * @umc: the UMC device for the radio controller.
+ *
+ * Drivers will be unbound from all UMC devices belonging to the
+ * controller and then the radio controller will be rebound. The
+ * radio controller is expected to do a full hardware reset when it is
+ * probed.
+ *
+ * If this is called while a probe() or remove() is in progress it
+ * will return -EAGAIN and not perform the reset.
+ */
+int umc_controller_reset(struct umc_dev *umc)
+{
+ struct device *parent = umc->dev.parent;
+ int ret;
+
+ if (down_trylock(&parent->sem))
+ return -EAGAIN;
+ bus_for_each_dev(&umc_bus_type, NULL, parent, umc_bus_unbind_helper);
+ ret = device_attach(&umc->dev);
+ if (ret == 1)
+ ret = 0;
+ up(&parent->sem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(umc_controller_reset);
+
+/**
+ * umc_match_pci_id - match a UMC driver to a UMC device's parent PCI device.
+ * @umc_drv: umc driver with match_data pointing to a zero-terminated
+ * table of pci_device_id's.
+ * @umc: umc device whose parent is to be matched.
+ */
+int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc)
+{
+ const struct pci_device_id *id_table = umc_drv->match_data;
+ struct pci_dev *pci;
+
+ if (umc->dev.parent->bus != &pci_bus_type)
+ return 0;
+
+ pci = to_pci_dev(umc->dev.parent);
+ return pci_match_id(id_table, pci) != NULL;
+}
+EXPORT_SYMBOL_GPL(umc_match_pci_id);
+
+static int umc_bus_rescan_helper(struct device *dev, void *data)
+{
+ int ret = 0;
+
+ if (!dev->driver)
+ ret = device_attach(dev);
+
+ return ret < 0 ? ret : 0;
+}
+
+static void umc_bus_rescan(void)
+{
+ int err;
+
+ /*
+ * We can't use bus_rescan_devices() here as it deadlocks when
+ * it tries to retake the dev->parent semaphore.
+ */
+ err = bus_for_each_dev(&umc_bus_type, NULL, NULL, umc_bus_rescan_helper);
+ if (err < 0)
+ printk(KERN_WARNING "%s: rescan of bus failed: %d\n",
+ KBUILD_MODNAME, err);
+}
+
+static int umc_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct umc_dev *umc = to_umc_dev(dev);
+ struct umc_driver *umc_driver = to_umc_driver(drv);
+
+ if (umc->cap_id == umc_driver->cap_id) {
+ if (umc_driver->match)
+ return umc_driver->match(umc_driver, umc);
+ else
+ return 1;
+ }
+ return 0;
+}
+
+static int umc_device_probe(struct device *dev)
+{
+ struct umc_dev *umc;
+ struct umc_driver *umc_driver;
+ int err;
+
+ umc_driver = to_umc_driver(dev->driver);
+ umc = to_umc_dev(dev);
+
+ get_device(dev);
+ err = umc_driver->probe(umc);
+ if (err)
+ put_device(dev);
+ else
+ umc_bus_rescan();
+
+ return err;
+}
+
+static int umc_device_remove(struct device *dev)
+{
+ struct umc_dev *umc;
+ struct umc_driver *umc_driver;
+
+ umc_driver = to_umc_driver(dev->driver);
+ umc = to_umc_dev(dev);
+
+ umc_driver->remove(umc);
+ put_device(dev);
+ return 0;
+}
+
+static int umc_device_suspend(struct device *dev, pm_message_t state)
+{
+ struct umc_dev *umc;
+ struct umc_driver *umc_driver;
+ int err = 0;
+
+ umc = to_umc_dev(dev);
+
+ if (dev->driver) {
+ umc_driver = to_umc_driver(dev->driver);
+ if (umc_driver->suspend)
+ err = umc_driver->suspend(umc, state);
+ }
+ return err;
+}
+
+static int umc_device_resume(struct device *dev)
+{
+ struct umc_dev *umc;
+ struct umc_driver *umc_driver;
+ int err = 0;
+
+ umc = to_umc_dev(dev);
+
+ if (dev->driver) {
+ umc_driver = to_umc_driver(dev->driver);
+ if (umc_driver->resume)
+ err = umc_driver->resume(umc);
+ }
+ return err;
+}
+
+static ssize_t capability_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct umc_dev *umc = to_umc_dev(dev);
+
+ return sprintf(buf, "0x%02x\n", umc->cap_id);
+}
+
+static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct umc_dev *umc = to_umc_dev(dev);
+
+ return sprintf(buf, "0x%04x\n", umc->version);
+}
+
+static struct device_attribute umc_dev_attrs[] = {
+ __ATTR_RO(capability_id),
+ __ATTR_RO(version),
+ __ATTR_NULL,
+};
+
+struct bus_type umc_bus_type = {
+ .name = "umc",
+ .match = umc_bus_match,
+ .probe = umc_device_probe,
+ .remove = umc_device_remove,
+ .suspend = umc_device_suspend,
+ .resume = umc_device_resume,
+ .dev_attrs = umc_dev_attrs,
+};
+EXPORT_SYMBOL_GPL(umc_bus_type);
+
+static int __init umc_bus_init(void)
+{
+ return bus_register(&umc_bus_type);
+}
+module_init(umc_bus_init);
+
+static void __exit umc_bus_exit(void)
+{
+ bus_unregister(&umc_bus_type);
+}
+module_exit(umc_bus_exit);
+
+MODULE_DESCRIPTION("UWB Multi-interface Controller capability bus");
+MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/umc-dev.c b/drivers/uwb/umc-dev.c
new file mode 100644
index 00000000000..aa44e1c1a10
--- /dev/null
+++ b/drivers/uwb/umc-dev.c
@@ -0,0 +1,104 @@
+/*
+ * UWB Multi-interface Controller device management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This file is released under the GNU GPL v2.
+ */
+#include <linux/kernel.h>
+#include <linux/uwb/umc.h>
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+static void umc_device_release(struct device *dev)
+{
+ struct umc_dev *umc = to_umc_dev(dev);
+
+ kfree(umc);
+}
+
+/**
+ * umc_device_create - allocate a child UMC device
+ * @parent: parent of the new UMC device.
+ * @n: index of the new device.
+ *
+ * The new UMC device will have a bus ID of the parent with '-n'
+ * appended.
+ */
+struct umc_dev *umc_device_create(struct device *parent, int n)
+{
+ struct umc_dev *umc;
+
+ umc = kzalloc(sizeof(struct umc_dev), GFP_KERNEL);
+ if (umc) {
+ snprintf(umc->dev.bus_id, sizeof(umc->dev.bus_id), "%s-%d",
+ parent->bus_id, n);
+ umc->dev.parent = parent;
+ umc->dev.bus = &umc_bus_type;
+ umc->dev.release = umc_device_release;
+
+ umc->dev.dma_mask = parent->dma_mask;
+ }
+ return umc;
+}
+EXPORT_SYMBOL_GPL(umc_device_create);
+
+/**
+ * umc_device_register - register a UMC device
+ * @umc: pointer to the UMC device
+ *
+ * The memory resource for the UMC device is acquired and the device
+ * registered with the system.
+ */
+int umc_device_register(struct umc_dev *umc)
+{
+ int err;
+
+ d_fnstart(3, &umc->dev, "(umc_dev %p)\n", umc);
+
+ err = request_resource(umc->resource.parent, &umc->resource);
+ if (err < 0) {
+ dev_err(&umc->dev, "can't allocate resource range "
+ "%016Lx to %016Lx: %d\n",
+ (unsigned long long)umc->resource.start,
+ (unsigned long long)umc->resource.end,
+ err);
+ goto error_request_resource;
+ }
+
+ err = device_register(&umc->dev);
+ if (err < 0)
+ goto error_device_register;
+ d_fnend(3, &umc->dev, "(umc_dev %p) = 0\n", umc);
+ return 0;
+
+error_device_register:
+ release_resource(&umc->resource);
+error_request_resource:
+ d_fnend(3, &umc->dev, "(umc_dev %p) = %d\n", umc, err);
+ return err;
+}
+EXPORT_SYMBOL_GPL(umc_device_register);
+
+/**
+ * umc_device_unregister - unregister a UMC device
+ * @umc: pointer to the UMC device
+ *
+ * First we unregister the device, make sure the driver can do it's
+ * resource release thing and then we try to release any left over
+ * resources. We take a ref to the device, to make sure it doesn't
+ * dissapear under our feet.
+ */
+void umc_device_unregister(struct umc_dev *umc)
+{
+ struct device *dev;
+ if (!umc)
+ return;
+ dev = get_device(&umc->dev);
+ d_fnstart(3, dev, "(umc_dev %p)\n", umc);
+ device_unregister(&umc->dev);
+ release_resource(&umc->resource);
+ d_fnend(3, dev, "(umc_dev %p) = void\n", umc);
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(umc_device_unregister);
diff --git a/drivers/uwb/umc-drv.c b/drivers/uwb/umc-drv.c
new file mode 100644
index 00000000000..367b5eb85d6
--- /dev/null
+++ b/drivers/uwb/umc-drv.c
@@ -0,0 +1,31 @@
+/*
+ * UWB Multi-interface Controller driver management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This file is released under the GNU GPL v2.
+ */
+#include <linux/kernel.h>
+#include <linux/uwb/umc.h>
+
+int __umc_driver_register(struct umc_driver *umc_drv, struct module *module,
+ const char *mod_name)
+{
+ umc_drv->driver.name = umc_drv->name;
+ umc_drv->driver.owner = module;
+ umc_drv->driver.mod_name = mod_name;
+ umc_drv->driver.bus = &umc_bus_type;
+
+ return driver_register(&umc_drv->driver);
+}
+EXPORT_SYMBOL_GPL(__umc_driver_register);
+
+/**
+ * umc_driver_register - unregister a UMC capabiltity driver.
+ * @umc_drv: pointer to the driver.
+ */
+void umc_driver_unregister(struct umc_driver *umc_drv)
+{
+ driver_unregister(&umc_drv->driver);
+}
+EXPORT_SYMBOL_GPL(umc_driver_unregister);
diff --git a/drivers/uwb/uwb-debug.c b/drivers/uwb/uwb-debug.c
new file mode 100644
index 00000000000..6d232c35d07
--- /dev/null
+++ b/drivers/uwb/uwb-debug.c
@@ -0,0 +1,367 @@
+/*
+ * Ultra Wide Band
+ * Debug support
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: doc
+ */
+
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+
+#include <linux/uwb/debug-cmd.h>
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+#include "uwb-internal.h"
+
+void dump_bytes(struct device *dev, const void *_buf, size_t rsize)
+{
+ const char *buf = _buf;
+ char line[32];
+ size_t offset = 0;
+ int cnt, cnt2;
+ for (cnt = 0; cnt < rsize; cnt += 8) {
+ size_t rtop = rsize - cnt < 8 ? rsize - cnt : 8;
+ for (offset = cnt2 = 0; cnt2 < rtop; cnt2++) {
+ offset += scnprintf(line + offset, sizeof(line) - offset,
+ "%02x ", buf[cnt + cnt2] & 0xff);
+ }
+ if (dev)
+ dev_info(dev, "%s\n", line);
+ else
+ printk(KERN_INFO "%s\n", line);
+ }
+}
+EXPORT_SYMBOL_GPL(dump_bytes);
+
+/*
+ * Debug interface
+ *
+ * Per radio controller debugfs files (in uwb/uwbN/):
+ *
+ * command: Flexible command interface (see <linux/uwb/debug-cmd.h>).
+ *
+ * reservations: information on reservations.
+ *
+ * accept: Set to true (Y or 1) to accept reservation requests from
+ * peers.
+ *
+ * drp_avail: DRP availability information.
+ */
+
+struct uwb_dbg {
+ struct uwb_pal pal;
+
+ u32 accept;
+ struct list_head rsvs;
+
+ struct dentry *root_d;
+ struct dentry *command_f;
+ struct dentry *reservations_f;
+ struct dentry *accept_f;
+ struct dentry *drp_avail_f;
+};
+
+static struct dentry *root_dir;
+
+static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv)
+{
+ struct uwb_rc *rc = rsv->rc;
+ struct device *dev = &rc->uwb_dev.dev;
+ struct uwb_dev_addr devaddr;
+ char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
+
+ uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
+ if (rsv->target.type == UWB_RSV_TARGET_DEV)
+ devaddr = rsv->target.dev->dev_addr;
+ else
+ devaddr = rsv->target.devaddr;
+ uwb_dev_addr_print(target, sizeof(target), &devaddr);
+
+ dev_dbg(dev, "debug: rsv %s -> %s: %s\n",
+ owner, target, uwb_rsv_state_str(rsv->state));
+}
+
+static int cmd_rsv_establish(struct uwb_rc *rc,
+ struct uwb_dbg_cmd_rsv_establish *cmd)
+{
+ struct uwb_mac_addr macaddr;
+ struct uwb_rsv *rsv;
+ struct uwb_dev *target;
+ int ret;
+
+ memcpy(&macaddr, cmd->target, sizeof(macaddr));
+ target = uwb_dev_get_by_macaddr(rc, &macaddr);
+ if (target == NULL)
+ return -ENODEV;
+
+ rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, NULL);
+ if (rsv == NULL) {
+ uwb_dev_put(target);
+ return -ENOMEM;
+ }
+
+ rsv->owner = &rc->uwb_dev;
+ rsv->target.type = UWB_RSV_TARGET_DEV;
+ rsv->target.dev = target;
+ rsv->type = cmd->type;
+ rsv->max_mas = cmd->max_mas;
+ rsv->min_mas = cmd->min_mas;
+ rsv->sparsity = cmd->sparsity;
+
+ ret = uwb_rsv_establish(rsv);
+ if (ret)
+ uwb_rsv_destroy(rsv);
+ else
+ list_add_tail(&rsv->pal_node, &rc->dbg->rsvs);
+
+ return ret;
+}
+
+static int cmd_rsv_terminate(struct uwb_rc *rc,
+ struct uwb_dbg_cmd_rsv_terminate *cmd)
+{
+ struct uwb_rsv *rsv, *found = NULL;
+ int i = 0;
+
+ list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) {
+ if (i == cmd->index) {
+ found = rsv;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+
+ list_del(&found->pal_node);
+ uwb_rsv_terminate(found);
+
+ return 0;
+}
+
+static int command_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static ssize_t command_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *off)
+{
+ struct uwb_rc *rc = file->private_data;
+ struct uwb_dbg_cmd cmd;
+ int ret;
+
+ if (len != sizeof(struct uwb_dbg_cmd))
+ return -EINVAL;
+
+ if (copy_from_user(&cmd, buf, len) != 0)
+ return -EFAULT;
+
+ switch (cmd.type) {
+ case UWB_DBG_CMD_RSV_ESTABLISH:
+ ret = cmd_rsv_establish(rc, &cmd.rsv_establish);
+ break;
+ case UWB_DBG_CMD_RSV_TERMINATE:
+ ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret < 0 ? ret : len;
+}
+
+static struct file_operations command_fops = {
+ .open = command_open,
+ .write = command_write,
+ .read = NULL,
+ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+};
+
+static int reservations_print(struct seq_file *s, void *p)
+{
+ struct uwb_rc *rc = s->private;
+ struct uwb_rsv *rsv;
+
+ mutex_lock(&rc->rsvs_mutex);
+
+ list_for_each_entry(rsv, &rc->reservations, rc_node) {
+ struct uwb_dev_addr devaddr;
+ char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
+ bool is_owner;
+ char buf[72];
+
+ uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
+ if (rsv->target.type == UWB_RSV_TARGET_DEV) {
+ devaddr = rsv->target.dev->dev_addr;
+ is_owner = &rc->uwb_dev == rsv->owner;
+ } else {
+ devaddr = rsv->target.devaddr;
+ is_owner = true;
+ }
+ uwb_dev_addr_print(target, sizeof(target), &devaddr);
+
+ seq_printf(s, "%c %s -> %s: %s\n",
+ is_owner ? 'O' : 'T',
+ owner, target, uwb_rsv_state_str(rsv->state));
+ seq_printf(s, " stream: %d type: %s\n",
+ rsv->stream, uwb_rsv_type_str(rsv->type));
+ bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS);
+ seq_printf(s, " %s\n", buf);
+ }
+
+ mutex_unlock(&rc->rsvs_mutex);
+
+ return 0;
+}
+
+static int reservations_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, reservations_print, inode->i_private);
+}
+
+static struct file_operations reservations_fops = {
+ .open = reservations_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int drp_avail_print(struct seq_file *s, void *p)
+{
+ struct uwb_rc *rc = s->private;
+ char buf[72];
+
+ bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.global, UWB_NUM_MAS);
+ seq_printf(s, "global: %s\n", buf);
+ bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.local, UWB_NUM_MAS);
+ seq_printf(s, "local: %s\n", buf);
+ bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.pending, UWB_NUM_MAS);
+ seq_printf(s, "pending: %s\n", buf);
+
+ return 0;
+}
+
+static int drp_avail_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, drp_avail_print, inode->i_private);
+}
+
+static struct file_operations drp_avail_fops = {
+ .open = drp_avail_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static void uwb_dbg_new_rsv(struct uwb_rsv *rsv)
+{
+ struct uwb_rc *rc = rsv->rc;
+
+ if (rc->dbg->accept)
+ uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, NULL);
+}
+
+/**
+ * uwb_dbg_add_rc - add a debug interface for a radio controller
+ * @rc: the radio controller
+ */
+void uwb_dbg_add_rc(struct uwb_rc *rc)
+{
+ rc->dbg = kzalloc(sizeof(struct uwb_dbg), GFP_KERNEL);
+ if (rc->dbg == NULL)
+ return;
+
+ INIT_LIST_HEAD(&rc->dbg->rsvs);
+
+ uwb_pal_init(&rc->dbg->pal);
+ rc->dbg->pal.new_rsv = uwb_dbg_new_rsv;
+ uwb_pal_register(rc, &rc->dbg->pal);
+ if (root_dir) {
+ rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev),
+ root_dir);
+ rc->dbg->command_f = debugfs_create_file("command", 0200,
+ rc->dbg->root_d, rc,
+ &command_fops);
+ rc->dbg->reservations_f = debugfs_create_file("reservations", 0444,
+ rc->dbg->root_d, rc,
+ &reservations_fops);
+ rc->dbg->accept_f = debugfs_create_bool("accept", 0644,
+ rc->dbg->root_d,
+ &rc->dbg->accept);
+ rc->dbg->drp_avail_f = debugfs_create_file("drp_avail", 0444,
+ rc->dbg->root_d, rc,
+ &drp_avail_fops);
+ }
+}
+
+/**
+ * uwb_dbg_add_rc - remove a radio controller's debug interface
+ * @rc: the radio controller
+ */
+void uwb_dbg_del_rc(struct uwb_rc *rc)
+{
+ struct uwb_rsv *rsv, *t;
+
+ if (rc->dbg == NULL)
+ return;
+
+ list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) {
+ uwb_rsv_destroy(rsv);
+ }
+
+ uwb_pal_unregister(rc, &rc->dbg->pal);
+
+ if (root_dir) {
+ debugfs_remove(rc->dbg->drp_avail_f);
+ debugfs_remove(rc->dbg->accept_f);
+ debugfs_remove(rc->dbg->reservations_f);
+ debugfs_remove(rc->dbg->command_f);
+ debugfs_remove(rc->dbg->root_d);
+ }
+}
+
+/**
+ * uwb_dbg_exit - initialize the debug interface sub-module
+ */
+void uwb_dbg_init(void)
+{
+ root_dir = debugfs_create_dir("uwb", NULL);
+}
+
+/**
+ * uwb_dbg_exit - clean-up the debug interface sub-module
+ */
+void uwb_dbg_exit(void)
+{
+ debugfs_remove(root_dir);
+}
diff --git a/drivers/uwb/uwb-internal.h b/drivers/uwb/uwb-internal.h
new file mode 100644
index 00000000000..2ad307d1296
--- /dev/null
+++ b/drivers/uwb/uwb-internal.h
@@ -0,0 +1,305 @@
+/*
+ * Ultra Wide Band
+ * UWB internal API
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ * This contains most of the internal API for UWB. This is stuff used
+ * across the stack that of course, is of no interest to the rest.
+ *
+ * Some parts might end up going public (like uwb_rc_*())...
+ */
+
+#ifndef __UWB_INTERNAL_H__
+#define __UWB_INTERNAL_H__
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/uwb.h>
+#include <linux/mutex.h>
+
+struct uwb_beca_e;
+
+/* General device API */
+extern void uwb_dev_init(struct uwb_dev *uwb_dev);
+extern int __uwb_dev_offair(struct uwb_dev *, struct uwb_rc *);
+extern int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev,
+ struct uwb_rc *parent_rc);
+extern void uwb_dev_rm(struct uwb_dev *uwb_dev);
+extern void uwbd_dev_onair(struct uwb_rc *, struct uwb_beca_e *);
+extern void uwbd_dev_offair(struct uwb_beca_e *);
+void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event);
+
+/* General UWB Radio Controller Internal API */
+extern struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *);
+static inline struct uwb_rc *__uwb_rc_get(struct uwb_rc *rc)
+{
+ uwb_dev_get(&rc->uwb_dev);
+ return rc;
+}
+
+static inline void __uwb_rc_put(struct uwb_rc *rc)
+{
+ uwb_dev_put(&rc->uwb_dev);
+}
+
+extern int uwb_rc_reset(struct uwb_rc *rc);
+extern int uwb_rc_beacon(struct uwb_rc *rc,
+ int channel, unsigned bpst_offset);
+extern int uwb_rc_scan(struct uwb_rc *rc,
+ unsigned channel, enum uwb_scan_type type,
+ unsigned bpst_offset);
+extern int uwb_rc_send_all_drp_ie(struct uwb_rc *rc);
+extern ssize_t uwb_rc_print_IEs(struct uwb_rc *rc, char *, size_t);
+extern void uwb_rc_ie_init(struct uwb_rc *);
+extern void uwb_rc_ie_init(struct uwb_rc *);
+extern ssize_t uwb_rc_ie_setup(struct uwb_rc *);
+extern void uwb_rc_ie_release(struct uwb_rc *);
+extern int uwb_rc_ie_add(struct uwb_rc *,
+ const struct uwb_ie_hdr *, size_t);
+extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie);
+
+extern const char *uwb_rc_strerror(unsigned code);
+
+/*
+ * Time to wait for a response to an RC command.
+ *
+ * Some commands can take a long time to response. e.g., START_BEACON
+ * may scan for several superframes before joining an existing beacon
+ * group and this can take around 600 ms.
+ */
+#define UWB_RC_CMD_TIMEOUT_MS 1000 /* ms */
+
+/*
+ * Notification/Event Handlers
+ */
+
+struct uwb_rc_neh;
+
+void uwb_rc_neh_create(struct uwb_rc *rc);
+void uwb_rc_neh_destroy(struct uwb_rc *rc);
+
+struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
+ u8 expected_type, u16 expected_event,
+ uwb_rc_cmd_cb_f cb, void *arg);
+void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh);
+void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh);
+void uwb_rc_neh_put(struct uwb_rc_neh *neh);
+
+/* Event size tables */
+extern int uwb_est_create(void);
+extern void uwb_est_destroy(void);
+
+
+/*
+ * UWB Events & management daemon
+ */
+
+/**
+ * enum uwb_event_type - types of UWB management daemon events
+ *
+ * The UWB management daemon (uwbd) can receive two types of events:
+ * UWB_EVT_TYPE_NOTIF - notification from the radio controller.
+ * UWB_EVT_TYPE_MSG - a simple message.
+ */
+enum uwb_event_type {
+ UWB_EVT_TYPE_NOTIF,
+ UWB_EVT_TYPE_MSG,
+};
+
+/**
+ * struct uwb_event_notif - an event for a radio controller notification
+ * @size: Size of the buffer (ie: Guaranteed to contain at least
+ * a full 'struct uwb_rceb')
+ * @rceb: Pointer to a kmalloced() event payload
+ */
+struct uwb_event_notif {
+ size_t size;
+ struct uwb_rceb *rceb;
+};
+
+/**
+ * enum uwb_event_message - an event for a message for asynchronous processing
+ *
+ * UWB_EVT_MSG_RESET - reset the radio controller and all PAL hardware.
+ */
+enum uwb_event_message {
+ UWB_EVT_MSG_RESET,
+};
+
+/**
+ * UWB Event
+ * @rc: Radio controller that emitted the event (referenced)
+ * @ts_jiffies: Timestamp, when was it received
+ * @type: This event's type.
+ */
+struct uwb_event {
+ struct list_head list_node;
+ struct uwb_rc *rc;
+ unsigned long ts_jiffies;
+ enum uwb_event_type type;
+ union {
+ struct uwb_event_notif notif;
+ enum uwb_event_message message;
+ };
+};
+
+extern void uwbd_start(void);
+extern void uwbd_stop(void);
+extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask);
+extern void uwbd_event_queue(struct uwb_event *);
+void uwbd_flush(struct uwb_rc *rc);
+
+/* UWB event handlers */
+extern int uwbd_evt_handle_rc_beacon(struct uwb_event *);
+extern int uwbd_evt_handle_rc_beacon_size(struct uwb_event *);
+extern int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *);
+extern int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *);
+extern int uwbd_evt_handle_rc_drp(struct uwb_event *);
+extern int uwbd_evt_handle_rc_drp_avail(struct uwb_event *);
+
+int uwbd_msg_handle_reset(struct uwb_event *evt);
+
+
+/*
+ * Address management
+ */
+int uwb_rc_dev_addr_assign(struct uwb_rc *rc);
+int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt);
+
+/*
+ * UWB Beacon Cache
+ *
+ * Each beacon we received is kept in a cache--when we receive that
+ * beacon consistently, that means there is a new device that we have
+ * to add to the system.
+ */
+
+extern unsigned long beacon_timeout_ms;
+
+/** Beacon cache list */
+struct uwb_beca {
+ struct list_head list;
+ size_t entries;
+ struct mutex mutex;
+};
+
+extern struct uwb_beca uwb_beca;
+
+/**
+ * Beacon cache entry
+ *
+ * @jiffies_refresh: last time a beacon was received that refreshed
+ * this cache entry.
+ * @uwb_dev: device connected to this beacon. This pointer is not
+ * safe, you need to get it with uwb_dev_try_get()
+ *
+ * @hits: how many time we have seen this beacon since last time we
+ * cleared it
+ */
+struct uwb_beca_e {
+ struct mutex mutex;
+ struct kref refcnt;
+ struct list_head node;
+ struct uwb_mac_addr *mac_addr;
+ struct uwb_dev_addr dev_addr;
+ u8 hits;
+ unsigned long ts_jiffies;
+ struct uwb_dev *uwb_dev;
+ struct uwb_rc_evt_beacon *be;
+ struct stats lqe_stats, rssi_stats; /* radio statistics */
+};
+struct uwb_beacon_frame;
+extern ssize_t uwb_bce_print_IEs(struct uwb_dev *, struct uwb_beca_e *,
+ char *, size_t);
+extern struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *,
+ struct uwb_beacon_frame *,
+ unsigned long);
+
+extern void uwb_bce_kfree(struct kref *_bce);
+static inline void uwb_bce_get(struct uwb_beca_e *bce)
+{
+ kref_get(&bce->refcnt);
+}
+static inline void uwb_bce_put(struct uwb_beca_e *bce)
+{
+ kref_put(&bce->refcnt, uwb_bce_kfree);
+}
+extern void uwb_beca_purge(void);
+extern void uwb_beca_release(void);
+
+struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
+ const struct uwb_dev_addr *devaddr);
+struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc,
+ const struct uwb_mac_addr *macaddr);
+
+/* -- UWB Sysfs representation */
+extern struct class uwb_rc_class;
+extern struct device_attribute dev_attr_mac_address;
+extern struct device_attribute dev_attr_beacon;
+extern struct device_attribute dev_attr_scan;
+
+/* -- DRP Bandwidth allocator: bandwidth allocations, reservations, DRP */
+void uwb_rsv_init(struct uwb_rc *rc);
+int uwb_rsv_setup(struct uwb_rc *rc);
+void uwb_rsv_cleanup(struct uwb_rc *rc);
+
+void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state);
+void uwb_rsv_remove(struct uwb_rsv *rsv);
+struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
+ struct uwb_ie_drp *drp_ie);
+void uwb_rsv_sched_update(struct uwb_rc *rc);
+
+void uwb_drp_handle_timeout(struct uwb_rsv *rsv);
+int uwb_drp_ie_update(struct uwb_rsv *rsv);
+void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie);
+
+void uwb_drp_avail_init(struct uwb_rc *rc);
+int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas);
+void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas);
+void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas);
+void uwb_drp_avail_ie_update(struct uwb_rc *rc);
+
+/* -- PAL support */
+void uwb_rc_pal_init(struct uwb_rc *rc);
+
+/* -- Misc */
+
+extern ssize_t uwb_mac_frame_hdr_print(char *, size_t,
+ const struct uwb_mac_frame_hdr *);
+
+/* -- Debug interface */
+void uwb_dbg_init(void);
+void uwb_dbg_exit(void);
+void uwb_dbg_add_rc(struct uwb_rc *rc);
+void uwb_dbg_del_rc(struct uwb_rc *rc);
+
+/* Workarounds for version specific stuff */
+
+static inline void uwb_dev_lock(struct uwb_dev *uwb_dev)
+{
+ down(&uwb_dev->dev.sem);
+}
+
+static inline void uwb_dev_unlock(struct uwb_dev *uwb_dev)
+{
+ up(&uwb_dev->dev.sem);
+}
+
+#endif /* #ifndef __UWB_INTERNAL_H__ */
diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c
new file mode 100644
index 00000000000..78908416e42
--- /dev/null
+++ b/drivers/uwb/uwbd.c
@@ -0,0 +1,410 @@
+/*
+ * Ultra Wide Band
+ * Neighborhood Management Daemon
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * This daemon takes care of maintaing information that describes the
+ * UWB neighborhood that the radios in this machine can see. It also
+ * keeps a tab of which devices are visible, makes sure each HC sits
+ * on a different channel to avoid interfering, etc.
+ *
+ * Different drivers (radio controller, device, any API in general)
+ * communicate with this daemon through an event queue. Daemon wakes
+ * up, takes a list of events and handles them one by one; handling
+ * function is extracted from a table based on the event's type and
+ * subtype. Events are freed only if the handling function says so.
+ *
+ * . Lock protecting the event list has to be an spinlock and locked
+ * with IRQSAVE because it might be called from an interrupt
+ * context (ie: when events arrive and the notification drops
+ * down from the ISR).
+ *
+ * . UWB radio controller drivers queue events to the daemon using
+ * uwbd_event_queue(). They just get the event, chew it to make it
+ * look like UWBD likes it and pass it in a buffer allocated with
+ * uwb_event_alloc().
+ *
+ * EVENTS
+ *
+ * Events have a type, a subtype, a lenght, some other stuff and the
+ * data blob, which depends on the event. The header is 'struct
+ * uwb_event'; for payloads, see 'struct uwbd_evt_*'.
+ *
+ * EVENT HANDLER TABLES
+ *
+ * To find a handling function for an event, the type is used to index
+ * a subtype-table in the type-table. The subtype-table is indexed
+ * with the subtype to get the function that handles the event. Start
+ * with the main type-table 'uwbd_evt_type_handler'.
+ *
+ * DEVICES
+ *
+ * Devices are created when a bunch of beacons have been received and
+ * it is stablished that the device has stable radio presence. CREATED
+ * only, not configured. Devices are ONLY configured when an
+ * Application-Specific IE Probe is receieved, in which the device
+ * declares which Protocol ID it groks. Then the device is CONFIGURED
+ * (and the driver->probe() stuff of the device model is invoked).
+ *
+ * Devices are considered disconnected when a certain number of
+ * beacons are not received in an amount of time.
+ *
+ * Handler functions are called normally uwbd_evt_handle_*().
+ */
+
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/freezer.h>
+#include "uwb-internal.h"
+
+#define D_LOCAL 1
+#include <linux/uwb/debug.h>
+
+
+/**
+ * UWBD Event handler function signature
+ *
+ * Return !0 if the event needs not to be freed (ie the handler
+ * takes/took care of it). 0 means the daemon code will free the
+ * event.
+ *
+ * @evt->rc is already referenced and guaranteed to exist. See
+ * uwb_evt_handle().
+ */
+typedef int (*uwbd_evt_handler_f)(struct uwb_event *);
+
+/**
+ * Properties of a UWBD event
+ *
+ * @handler: the function that will handle this event
+ * @name: text name of event
+ */
+struct uwbd_event {
+ uwbd_evt_handler_f handler;
+ const char *name;
+};
+
+/** Table of handlers for and properties of the UWBD Radio Control Events */
+static
+struct uwbd_event uwbd_events[] = {
+ [UWB_RC_EVT_BEACON] = {
+ .handler = uwbd_evt_handle_rc_beacon,
+ .name = "BEACON_RECEIVED"
+ },
+ [UWB_RC_EVT_BEACON_SIZE] = {
+ .handler = uwbd_evt_handle_rc_beacon_size,
+ .name = "BEACON_SIZE_CHANGE"
+ },
+ [UWB_RC_EVT_BPOIE_CHANGE] = {
+ .handler = uwbd_evt_handle_rc_bpoie_change,
+ .name = "BPOIE_CHANGE"
+ },
+ [UWB_RC_EVT_BP_SLOT_CHANGE] = {
+ .handler = uwbd_evt_handle_rc_bp_slot_change,
+ .name = "BP_SLOT_CHANGE"
+ },
+ [UWB_RC_EVT_DRP_AVAIL] = {
+ .handler = uwbd_evt_handle_rc_drp_avail,
+ .name = "DRP_AVAILABILITY_CHANGE"
+ },
+ [UWB_RC_EVT_DRP] = {
+ .handler = uwbd_evt_handle_rc_drp,
+ .name = "DRP"
+ },
+ [UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
+ .handler = uwbd_evt_handle_rc_dev_addr_conflict,
+ .name = "DEV_ADDR_CONFLICT",
+ },
+};
+
+
+
+struct uwbd_evt_type_handler {
+ const char *name;
+ struct uwbd_event *uwbd_events;
+ size_t size;
+};
+
+#define UWBD_EVT_TYPE_HANDLER(n,a) { \
+ .name = (n), \
+ .uwbd_events = (a), \
+ .size = sizeof(a)/sizeof((a)[0]) \
+}
+
+
+/** Table of handlers for each UWBD Event type. */
+static
+struct uwbd_evt_type_handler uwbd_evt_type_handlers[] = {
+ [UWB_RC_CET_GENERAL] = UWBD_EVT_TYPE_HANDLER("RC", uwbd_events)
+};
+
+static const
+size_t uwbd_evt_type_handlers_len =
+ sizeof(uwbd_evt_type_handlers) / sizeof(uwbd_evt_type_handlers[0]);
+
+static const struct uwbd_event uwbd_message_handlers[] = {
+ [UWB_EVT_MSG_RESET] = {
+ .handler = uwbd_msg_handle_reset,
+ .name = "reset",
+ },
+};
+
+static DEFINE_MUTEX(uwbd_event_mutex);
+
+/**
+ * Handle an URC event passed to the UWB Daemon
+ *
+ * @evt: the event to handle
+ * @returns: 0 if the event can be kfreed, !0 on the contrary
+ * (somebody else took ownership) [coincidentally, returning
+ * a <0 errno code will free it :)].
+ *
+ * Looks up the two indirection tables (one for the type, one for the
+ * subtype) to decide which function handles it and then calls the
+ * handler.
+ *
+ * The event structure passed to the event handler has the radio
+ * controller in @evt->rc referenced. The reference will be dropped
+ * once the handler returns, so if it needs it for longer (async),
+ * it'll need to take another one.
+ */
+static
+int uwbd_event_handle_urc(struct uwb_event *evt)
+{
+ struct uwbd_evt_type_handler *type_table;
+ uwbd_evt_handler_f handler;
+ u8 type, context;
+ u16 event;
+
+ type = evt->notif.rceb->bEventType;
+ event = le16_to_cpu(evt->notif.rceb->wEvent);
+ context = evt->notif.rceb->bEventContext;
+
+ if (type > uwbd_evt_type_handlers_len) {
+ printk(KERN_ERR "UWBD: event type %u: unknown (too high)\n", type);
+ return -EINVAL;
+ }
+ type_table = &uwbd_evt_type_handlers[type];
+ if (type_table->uwbd_events == NULL) {
+ printk(KERN_ERR "UWBD: event type %u: unknown\n", type);
+ return -EINVAL;
+ }
+ if (event > type_table->size) {
+ printk(KERN_ERR "UWBD: event %s[%u]: unknown (too high)\n",
+ type_table->name, event);
+ return -EINVAL;
+ }
+ handler = type_table->uwbd_events[event].handler;
+ if (handler == NULL) {
+ printk(KERN_ERR "UWBD: event %s[%u]: unknown\n", type_table->name, event);
+ return -EINVAL;
+ }
+ return (*handler)(evt);
+}
+
+static void uwbd_event_handle_message(struct uwb_event *evt)
+{
+ struct uwb_rc *rc;
+ int result;
+
+ rc = evt->rc;
+
+ if (evt->message < 0 || evt->message >= ARRAY_SIZE(uwbd_message_handlers)) {
+ dev_err(&rc->uwb_dev.dev, "UWBD: invalid message type %d\n", evt->message);
+ return;
+ }
+
+ /* If this is a reset event we need to drop the
+ * uwbd_event_mutex or it deadlocks when the reset handler
+ * attempts to flush the uwbd events. */
+ if (evt->message == UWB_EVT_MSG_RESET)
+ mutex_unlock(&uwbd_event_mutex);
+
+ result = uwbd_message_handlers[evt->message].handler(evt);
+ if (result < 0)
+ dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n",
+ uwbd_message_handlers[evt->message].name, result);
+
+ if (evt->message == UWB_EVT_MSG_RESET)
+ mutex_lock(&uwbd_event_mutex);
+}
+
+static void uwbd_event_handle(struct uwb_event *evt)
+{
+ struct uwb_rc *rc;
+ int should_keep;
+
+ rc = evt->rc;
+
+ if (rc->ready) {
+ switch (evt->type) {
+ case UWB_EVT_TYPE_NOTIF:
+ should_keep = uwbd_event_handle_urc(evt);
+ if (should_keep <= 0)
+ kfree(evt->notif.rceb);
+ break;
+ case UWB_EVT_TYPE_MSG:
+ uwbd_event_handle_message(evt);
+ break;
+ default:
+ dev_err(&rc->uwb_dev.dev, "UWBD: invalid event type %d\n", evt->type);
+ break;
+ }
+ }
+
+ __uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */
+}
+/* The UWB Daemon */
+
+
+/** Daemon's PID: used to decide if we can queue or not */
+static int uwbd_pid;
+/** Daemon's task struct for managing the kthread */
+static struct task_struct *uwbd_task;
+/** Daemon's waitqueue for waiting for new events */
+static DECLARE_WAIT_QUEUE_HEAD(uwbd_wq);
+/** Daemon's list of events; we queue/dequeue here */
+static struct list_head uwbd_event_list = LIST_HEAD_INIT(uwbd_event_list);
+/** Daemon's list lock to protect concurent access */
+static DEFINE_SPINLOCK(uwbd_event_list_lock);
+
+
+/**
+ * UWB Daemon
+ *
+ * Listens to all UWB notifications and takes care to track the state
+ * of the UWB neighboorhood for the kernel. When we do a run, we
+ * spinlock, move the list to a private copy and release the
+ * lock. Hold it as little as possible. Not a conflict: it is
+ * guaranteed we own the events in the private list.
+ *
+ * FIXME: should change so we don't have a 1HZ timer all the time, but
+ * only if there are devices.
+ */
+static int uwbd(void *unused)
+{
+ unsigned long flags;
+ struct list_head list = LIST_HEAD_INIT(list);
+ struct uwb_event *evt, *nxt;
+ int should_stop = 0;
+ while (1) {
+ wait_event_interruptible_timeout(
+ uwbd_wq,
+ !list_empty(&uwbd_event_list)
+ || (should_stop = kthread_should_stop()),
+ HZ);
+ if (should_stop)
+ break;
+ try_to_freeze();
+
+ mutex_lock(&uwbd_event_mutex);
+ spin_lock_irqsave(&uwbd_event_list_lock, flags);
+ list_splice_init(&uwbd_event_list, &list);
+ spin_unlock_irqrestore(&uwbd_event_list_lock, flags);
+ list_for_each_entry_safe(evt, nxt, &list, list_node) {
+ list_del(&evt->list_node);
+ uwbd_event_handle(evt);
+ kfree(evt);
+ }
+ mutex_unlock(&uwbd_event_mutex);
+
+ uwb_beca_purge(); /* Purge devices that left */
+ }
+ return 0;
+}
+
+
+/** Start the UWB daemon */
+void uwbd_start(void)
+{
+ uwbd_task = kthread_run(uwbd, NULL, "uwbd");
+ if (uwbd_task == NULL)
+ printk(KERN_ERR "UWB: Cannot start management daemon; "
+ "UWB won't work\n");
+ else
+ uwbd_pid = uwbd_task->pid;
+}
+
+/* Stop the UWB daemon and free any unprocessed events */
+void uwbd_stop(void)
+{
+ unsigned long flags;
+ struct uwb_event *evt, *nxt;
+ kthread_stop(uwbd_task);
+ spin_lock_irqsave(&uwbd_event_list_lock, flags);
+ uwbd_pid = 0;
+ list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) {
+ if (evt->type == UWB_EVT_TYPE_NOTIF)
+ kfree(evt->notif.rceb);
+ kfree(evt);
+ }
+ spin_unlock_irqrestore(&uwbd_event_list_lock, flags);
+ uwb_beca_release();
+}
+
+/*
+ * Queue an event for the management daemon
+ *
+ * When some lower layer receives an event, it uses this function to
+ * push it forward to the UWB daemon.
+ *
+ * Once you pass the event, you don't own it any more, but the daemon
+ * does. It will uwb_event_free() it when done, so make sure you
+ * uwb_event_alloc()ed it or bad things will happen.
+ *
+ * If the daemon is not running, we just free the event.
+ */
+void uwbd_event_queue(struct uwb_event *evt)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&uwbd_event_list_lock, flags);
+ if (uwbd_pid != 0) {
+ list_add(&evt->list_node, &uwbd_event_list);
+ wake_up_all(&uwbd_wq);
+ } else {
+ __uwb_rc_put(evt->rc);
+ if (evt->type == UWB_EVT_TYPE_NOTIF)
+ kfree(evt->notif.rceb);
+ kfree(evt);
+ }
+ spin_unlock_irqrestore(&uwbd_event_list_lock, flags);
+ return;
+}
+
+void uwbd_flush(struct uwb_rc *rc)
+{
+ struct uwb_event *evt, *nxt;
+
+ mutex_lock(&uwbd_event_mutex);
+
+ spin_lock_irq(&uwbd_event_list_lock);
+ list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) {
+ if (evt->rc == rc) {
+ __uwb_rc_put(rc);
+ list_del(&evt->list_node);
+ if (evt->type == UWB_EVT_TYPE_NOTIF)
+ kfree(evt->notif.rceb);
+ kfree(evt);
+ }
+ }
+ spin_unlock_irq(&uwbd_event_list_lock);
+
+ mutex_unlock(&uwbd_event_mutex);
+}
diff --git a/drivers/uwb/whc-rc.c b/drivers/uwb/whc-rc.c
new file mode 100644
index 00000000000..1711deadb11
--- /dev/null
+++ b/drivers/uwb/whc-rc.c
@@ -0,0 +1,520 @@
+/*
+ * Wireless Host Controller: Radio Control Interface (WHCI v0.95[2.3])
+ * Radio Control command/event transport to the UWB stack
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Initialize and hook up the Radio Control interface.
+ *
+ * For each device probed, creates an 'struct whcrc' which contains
+ * just the representation of the UWB Radio Controller, and the logic
+ * for reading notifications and passing them to the UWB Core.
+ *
+ * So we initialize all of those, register the UWB Radio Controller
+ * and setup the notification/event handle to pipe the notifications
+ * to the UWB management Daemon.
+ *
+ * Once uwb_rc_add() is called, the UWB stack takes control, resets
+ * the radio and readies the device to take commands the UWB
+ * API/user-space.
+ *
+ * Note this driver is just a transport driver; the commands are
+ * formed at the UWB stack and given to this driver who will deliver
+ * them to the hw and transfer the replies/notifications back to the
+ * UWB stack through the UWB daemon (UWBD).
+ */
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/uwb.h>
+#include <linux/uwb/whci.h>
+#include <linux/uwb/umc.h>
+#include "uwb-internal.h"
+
+#define D_LOCAL 0
+#include <linux/uwb/debug.h>
+
+/**
+ * Descriptor for an instance of the UWB Radio Control Driver that
+ * attaches to the URC interface of the WHCI PCI card.
+ *
+ * Unless there is a lock specific to the 'data members', all access
+ * is protected by uwb_rc->mutex.
+ */
+struct whcrc {
+ struct umc_dev *umc_dev;
+ struct uwb_rc *uwb_rc; /* UWB host controller */
+
+ unsigned long area;
+ void __iomem *rc_base;
+ size_t rc_len;
+ spinlock_t irq_lock;
+
+ void *evt_buf, *cmd_buf;
+ dma_addr_t evt_dma_buf, cmd_dma_buf;
+ wait_queue_head_t cmd_wq;
+ struct work_struct event_work;
+};
+
+/**
+ * Execute an UWB RC command on WHCI/RC
+ *
+ * @rc: Instance of a Radio Controller that is a whcrc
+ * @cmd: Buffer containing the RCCB and payload to execute
+ * @cmd_size: Size of the command buffer.
+ *
+ * We copy the command into whcrc->cmd_buf (as it is pretty and
+ * aligned`and physically contiguous) and then press the right keys in
+ * the controller's URCCMD register to get it to read it. We might
+ * have to wait for the cmd_sem to be open to us.
+ *
+ * NOTE: rc's mutex has to be locked
+ */
+static int whcrc_cmd(struct uwb_rc *uwb_rc,
+ const struct uwb_rccb *cmd, size_t cmd_size)
+{
+ int result = 0;
+ struct whcrc *whcrc = uwb_rc->priv;
+ struct device *dev = &whcrc->umc_dev->dev;
+ u32 urccmd;
+
+ d_fnstart(3, dev, "(%p, %p, %zu)\n", uwb_rc, cmd, cmd_size);
+ might_sleep();
+
+ if (cmd_size >= 4096) {
+ result = -E2BIG;
+ goto error;
+ }
+
+ /*
+ * If the URC is halted, then the hardware has reset itself.
+ * Attempt to recover by restarting the device and then return
+ * an error as it's likely that the current command isn't
+ * valid for a newly started RC.
+ */
+ if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) {
+ dev_err(dev, "requesting reset of halted radio controller\n");
+ uwb_rc_reset_all(uwb_rc);
+ result = -EIO;
+ goto error;
+ }
+
+ result = wait_event_timeout(whcrc->cmd_wq,
+ !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2);
+ if (result == 0) {
+ dev_err(dev, "device is not ready to execute commands\n");
+ result = -ETIMEDOUT;
+ goto error;
+ }
+
+ memmove(whcrc->cmd_buf, cmd, cmd_size);
+ le_writeq(whcrc->cmd_dma_buf, whcrc->rc_base + URCCMDADDR);
+
+ spin_lock(&whcrc->irq_lock);
+ urccmd = le_readl(whcrc->rc_base + URCCMD);
+ urccmd &= ~(URCCMD_EARV | URCCMD_SIZE_MASK);
+ le_writel(urccmd | URCCMD_ACTIVE | URCCMD_IWR | cmd_size,
+ whcrc->rc_base + URCCMD);
+ spin_unlock(&whcrc->irq_lock);
+
+error:
+ d_fnend(3, dev, "(%p, %p, %zu) = %d\n",
+ uwb_rc, cmd, cmd_size, result);
+ return result;
+}
+
+static int whcrc_reset(struct uwb_rc *rc)
+{
+ struct whcrc *whcrc = rc->priv;
+
+ return umc_controller_reset(whcrc->umc_dev);
+}
+
+/**
+ * Reset event reception mechanism and tell hw we are ready to get more
+ *
+ * We have read all the events in the event buffer, so we are ready to
+ * reset it to the beginning.
+ *
+ * This is only called during initialization or after an event buffer
+ * has been retired. This means we can be sure that event processing
+ * is disabled and it's safe to update the URCEVTADDR register.
+ *
+ * There's no need to wait for the event processing to start as the
+ * URC will not clear URCCMD_ACTIVE until (internal) event buffer
+ * space is available.
+ */
+static
+void whcrc_enable_events(struct whcrc *whcrc)
+{
+ struct device *dev = &whcrc->umc_dev->dev;
+ u32 urccmd;
+
+ d_fnstart(4, dev, "(whcrc %p)\n", whcrc);
+
+ le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR);
+
+ spin_lock(&whcrc->irq_lock);
+ urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE;
+ le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD);
+ spin_unlock(&whcrc->irq_lock);
+
+ d_fnend(4, dev, "(whcrc %p) = void\n", whcrc);
+}
+
+static void whcrc_event_work(struct work_struct *work)
+{
+ struct whcrc *whcrc = container_of(work, struct whcrc, event_work);
+ struct device *dev = &whcrc->umc_dev->dev;
+ size_t size;
+ u64 urcevtaddr;
+
+ urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR);
+ size = urcevtaddr & URCEVTADDR_OFFSET_MASK;
+
+ d_printf(3, dev, "received %zu octet event\n", size);
+ d_dump(4, dev, whcrc->evt_buf, size > 32 ? 32 : size);
+
+ uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size);
+ whcrc_enable_events(whcrc);
+}
+
+/**
+ * Catch interrupts?
+ *
+ * We ack inmediately (and expect the hw to do the right thing and
+ * raise another IRQ if things have changed :)
+ */
+static
+irqreturn_t whcrc_irq_cb(int irq, void *_whcrc)
+{
+ struct whcrc *whcrc = _whcrc;
+ struct device *dev = &whcrc->umc_dev->dev;
+ u32 urcsts;
+
+ urcsts = le_readl(whcrc->rc_base + URCSTS);
+ if (!(urcsts & URCSTS_INT_MASK))
+ return IRQ_NONE;
+ le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS);
+
+ d_printf(4, dev, "acked 0x%08x, urcsts 0x%08x\n",
+ le_readl(whcrc->rc_base + URCSTS), urcsts);
+
+ if (urcsts & URCSTS_HSE) {
+ dev_err(dev, "host system error -- hardware halted\n");
+ /* FIXME: do something sensible here */
+ goto out;
+ }
+ if (urcsts & URCSTS_ER) {
+ d_printf(3, dev, "ER: event ready\n");
+ schedule_work(&whcrc->event_work);
+ }
+ if (urcsts & URCSTS_RCI) {
+ d_printf(3, dev, "RCI: ready to execute another command\n");
+ wake_up_all(&whcrc->cmd_wq);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+
+/**
+ * Initialize a UMC RC interface: map regions, get (shared) IRQ
+ */
+static
+int whcrc_setup_rc_umc(struct whcrc *whcrc)
+{
+ int result = 0;
+ struct device *dev = &whcrc->umc_dev->dev;
+ struct umc_dev *umc_dev = whcrc->umc_dev;
+
+ whcrc->area = umc_dev->resource.start;
+ whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1;
+ result = -EBUSY;
+ if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME)
+ == NULL) {
+ dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n",
+ whcrc->rc_len, whcrc->area, result);
+ goto error_request_region;
+ }
+
+ whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len);
+ if (whcrc->rc_base == NULL) {
+ dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n",
+ whcrc->rc_len, whcrc->area, result);
+ goto error_ioremap_nocache;
+ }
+
+ result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED,
+ KBUILD_MODNAME, whcrc);
+ if (result < 0) {
+ dev_err(dev, "can't allocate IRQ %d: %d\n",
+ umc_dev->irq, result);
+ goto error_request_irq;
+ }
+
+ result = -ENOMEM;
+ whcrc->cmd_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
+ &whcrc->cmd_dma_buf, GFP_KERNEL);
+ if (whcrc->cmd_buf == NULL) {
+ dev_err(dev, "Can't allocate cmd transfer buffer\n");
+ goto error_cmd_buffer;
+ }
+
+ whcrc->evt_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
+ &whcrc->evt_dma_buf, GFP_KERNEL);
+ if (whcrc->evt_buf == NULL) {
+ dev_err(dev, "Can't allocate evt transfer buffer\n");
+ goto error_evt_buffer;
+ }
+ d_printf(3, dev, "UWB RC Interface: %zu bytes at 0x%p, irq %u\n",
+ whcrc->rc_len, whcrc->rc_base, umc_dev->irq);
+ return 0;
+
+error_evt_buffer:
+ dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
+ whcrc->cmd_dma_buf);
+error_cmd_buffer:
+ free_irq(umc_dev->irq, whcrc);
+error_request_irq:
+ iounmap(whcrc->rc_base);
+error_ioremap_nocache:
+ release_mem_region(whcrc->area, whcrc->rc_len);
+error_request_region:
+ return result;
+}
+
+
+/**
+ * Release RC's UMC resources
+ */
+static
+void whcrc_release_rc_umc(struct whcrc *whcrc)
+{
+ struct umc_dev *umc_dev = whcrc->umc_dev;
+
+ dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->evt_buf,
+ whcrc->evt_dma_buf);
+ dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
+ whcrc->cmd_dma_buf);
+ free_irq(umc_dev->irq, whcrc);
+ iounmap(whcrc->rc_base);
+ release_mem_region(whcrc->area, whcrc->rc_len);
+}
+
+
+/**
+ * whcrc_start_rc - start a WHCI radio controller
+ * @whcrc: the radio controller to start
+ *
+ * Reset the UMC device, start the radio controller, enable events and
+ * finally enable interrupts.
+ */
+static int whcrc_start_rc(struct uwb_rc *rc)
+{
+ struct whcrc *whcrc = rc->priv;
+ int result = 0;
+ struct device *dev = &whcrc->umc_dev->dev;
+ unsigned long start, duration;
+
+ /* Reset the thing */
+ le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD);
+ if (d_test(3))
+ start = jiffies;
+ if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0,
+ 5000, "device to reset at init") < 0) {
+ result = -EBUSY;
+ goto error;
+ } else if (d_test(3)) {
+ duration = jiffies - start;
+ if (duration > msecs_to_jiffies(40))
+ dev_err(dev, "Device took %ums to "
+ "reset. MAX expected: 40ms\n",
+ jiffies_to_msecs(duration));
+ }
+
+ /* Set the event buffer, start the controller (enable IRQs later) */
+ le_writel(0, whcrc->rc_base + URCINTR);
+ le_writel(URCCMD_RS, whcrc->rc_base + URCCMD);
+ result = -ETIMEDOUT;
+ if (d_test(3))
+ start = jiffies;
+ if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0,
+ 5000, "device to start") < 0)
+ goto error;
+ if (d_test(3)) {
+ duration = jiffies - start;
+ if (duration > msecs_to_jiffies(40))
+ dev_err(dev, "Device took %ums to start. "
+ "MAX expected: 40ms\n",
+ jiffies_to_msecs(duration));
+ }
+ whcrc_enable_events(whcrc);
+ result = 0;
+ le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR);
+error:
+ return result;
+}
+
+
+/**
+ * whcrc_stop_rc - stop a WHCI radio controller
+ * @whcrc: the radio controller to stop
+ *
+ * Disable interrupts and cancel any pending event processing work
+ * before clearing the Run/Stop bit.
+ */
+static
+void whcrc_stop_rc(struct uwb_rc *rc)
+{
+ struct whcrc *whcrc = rc->priv;
+ struct umc_dev *umc_dev = whcrc->umc_dev;
+
+ le_writel(0, whcrc->rc_base + URCINTR);
+ cancel_work_sync(&whcrc->event_work);
+
+ le_writel(0, whcrc->rc_base + URCCMD);
+ whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS,
+ URCSTS_HALTED, 0, 40, "URCSTS.HALTED");
+}
+
+static void whcrc_init(struct whcrc *whcrc)
+{
+ spin_lock_init(&whcrc->irq_lock);
+ init_waitqueue_head(&whcrc->cmd_wq);
+ INIT_WORK(&whcrc->event_work, whcrc_event_work);
+}
+
+/**
+ * Initialize the radio controller.
+ *
+ * NOTE: we setup whcrc->uwb_rc before calling uwb_rc_add(); in the
+ * IRQ handler we use that to determine if the hw is ready to
+ * handle events. Looks like a race condition, but it really is
+ * not.
+ */
+static
+int whcrc_probe(struct umc_dev *umc_dev)
+{
+ int result;
+ struct uwb_rc *uwb_rc;
+ struct whcrc *whcrc;
+ struct device *dev = &umc_dev->dev;
+
+ d_fnstart(3, dev, "(umc_dev %p)\n", umc_dev);
+ result = -ENOMEM;
+ uwb_rc = uwb_rc_alloc();
+ if (uwb_rc == NULL) {
+ dev_err(dev, "unable to allocate RC instance\n");
+ goto error_rc_alloc;
+ }
+ whcrc = kzalloc(sizeof(*whcrc), GFP_KERNEL);
+ if (whcrc == NULL) {
+ dev_err(dev, "unable to allocate WHC-RC instance\n");
+ goto error_alloc;
+ }
+ whcrc_init(whcrc);
+ whcrc->umc_dev = umc_dev;
+
+ result = whcrc_setup_rc_umc(whcrc);
+ if (result < 0) {
+ dev_err(dev, "Can't setup RC UMC interface: %d\n", result);
+ goto error_setup_rc_umc;
+ }
+ whcrc->uwb_rc = uwb_rc;
+
+ uwb_rc->owner = THIS_MODULE;
+ uwb_rc->cmd = whcrc_cmd;
+ uwb_rc->reset = whcrc_reset;
+ uwb_rc->start = whcrc_start_rc;
+ uwb_rc->stop = whcrc_stop_rc;
+
+ result = uwb_rc_add(uwb_rc, dev, whcrc);
+ if (result < 0)
+ goto error_rc_add;
+ umc_set_drvdata(umc_dev, whcrc);
+ d_fnend(3, dev, "(umc_dev %p) = 0\n", umc_dev);
+ return 0;
+
+error_rc_add:
+ whcrc_release_rc_umc(whcrc);
+error_setup_rc_umc:
+ kfree(whcrc);
+error_alloc:
+ uwb_rc_put(uwb_rc);
+error_rc_alloc:
+ d_fnend(3, dev, "(umc_dev %p) = %d\n", umc_dev, result);
+ return result;
+}
+
+/**
+ * Clean up the radio control resources
+ *
+ * When we up the command semaphore, everybody possibly held trying to
+ * execute a command should be granted entry and then they'll see the
+ * host is quiescing and up it (so it will chain to the next waiter).
+ * This should not happen (in any case), as we can only remove when
+ * there are no handles open...
+ */
+static void whcrc_remove(struct umc_dev *umc_dev)
+{
+ struct whcrc *whcrc = umc_get_drvdata(umc_dev);
+ struct uwb_rc *uwb_rc = whcrc->uwb_rc;
+
+ umc_set_drvdata(umc_dev, NULL);
+ uwb_rc_rm(uwb_rc);
+ whcrc_release_rc_umc(whcrc);
+ kfree(whcrc);
+ uwb_rc_put(uwb_rc);
+ d_printf(1, &umc_dev->dev, "freed whcrc %p\n", whcrc);
+}
+
+/* PCI device ID's that we handle [so it gets loaded] */
+static struct pci_device_id whcrc_id_table[] = {
+ { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
+ { /* empty last entry */ }
+};
+MODULE_DEVICE_TABLE(pci, whcrc_id_table);
+
+static struct umc_driver whcrc_driver = {
+ .name = "whc-rc",
+ .cap_id = UMC_CAP_ID_WHCI_RC,
+ .probe = whcrc_probe,
+ .remove = whcrc_remove,
+};
+
+static int __init whcrc_driver_init(void)
+{
+ return umc_driver_register(&whcrc_driver);
+}
+module_init(whcrc_driver_init);
+
+static void __exit whcrc_driver_exit(void)
+{
+ umc_driver_unregister(&whcrc_driver);
+}
+module_exit(whcrc_driver_exit);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Wireless Host Controller Radio Control Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/whci.c b/drivers/uwb/whci.c
new file mode 100644
index 00000000000..3df2388f908
--- /dev/null
+++ b/drivers/uwb/whci.c
@@ -0,0 +1,269 @@
+/*
+ * WHCI UWB Multi-interface Controller enumerator.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This file is released under the GNU GPL v2.
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/uwb/whci.h>
+#include <linux/uwb/umc.h>
+
+struct whci_card {
+ struct pci_dev *pci;
+ void __iomem *uwbbase;
+ u8 n_caps;
+ struct umc_dev *devs[0];
+};
+
+
+/* Fix faulty HW :( */
+static
+u64 whci_capdata_quirks(struct whci_card *card, u64 capdata)
+{
+ u64 capdata_orig = capdata;
+ struct pci_dev *pci_dev = card->pci;
+ if (pci_dev->vendor == PCI_VENDOR_ID_INTEL
+ && (pci_dev->device == 0x0c3b || pci_dev->device == 0004)
+ && pci_dev->class == 0x0d1010) {
+ switch (UWBCAPDATA_TO_CAP_ID(capdata)) {
+ /* WLP capability has 0x100 bytes of aperture */
+ case 0x80:
+ capdata |= 0x40 << 8; break;
+ /* WUSB capability has 0x80 bytes of aperture
+ * and ID is 1 */
+ case 0x02:
+ capdata &= ~0xffff;
+ capdata |= 0x2001;
+ break;
+ }
+ }
+ if (capdata_orig != capdata)
+ dev_warn(&pci_dev->dev,
+ "PCI v%04x d%04x c%06x#%02x: "
+ "corrected capdata from %016Lx to %016Lx\n",
+ pci_dev->vendor, pci_dev->device, pci_dev->class,
+ (unsigned)UWBCAPDATA_TO_CAP_ID(capdata),
+ (unsigned long long)capdata_orig,
+ (unsigned long long)capdata);
+ return capdata;
+}
+
+
+/**
+ * whci_wait_for - wait for a WHCI register to be set
+ *
+ * Polls (for at most @max_ms ms) until '*@reg & @mask == @result'.
+ */
+int whci_wait_for(struct device *dev, u32 __iomem *reg, u32 mask, u32 result,
+ unsigned long max_ms, const char *tag)
+{
+ unsigned t = 0;
+ u32 val;
+ for (;;) {
+ val = le_readl(reg);
+ if ((val & mask) == result)
+ break;
+ msleep(10);
+ if (t >= max_ms) {
+ dev_err(dev, "timed out waiting for %s ", tag);
+ return -ETIMEDOUT;
+ }
+ t += 10;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(whci_wait_for);
+
+
+/*
+ * NOTE: the capinfo and capdata registers are slightly different
+ * (size and cap-id fields). So for cap #0, we need to fill
+ * in. Size comes from the size of the register block
+ * (statically calculated); cap_id comes from nowhere, we use
+ * zero, that is reserved, for the radio controller, because
+ * none was defined at the spec level.
+ */
+static int whci_add_cap(struct whci_card *card, int n)
+{
+ struct umc_dev *umc;
+ u64 capdata;
+ int bar, err;
+
+ umc = umc_device_create(&card->pci->dev, n);
+ if (umc == NULL)
+ return -ENOMEM;
+
+ capdata = le_readq(card->uwbbase + UWBCAPDATA(n));
+
+ bar = UWBCAPDATA_TO_BAR(capdata) << 1;
+
+ capdata = whci_capdata_quirks(card, capdata);
+ /* Capability 0 is the radio controller. It's size is 32
+ * bytes (WHCI0.95[2.3, T2-9]). */
+ umc->version = UWBCAPDATA_TO_VERSION(capdata);
+ umc->cap_id = n == 0 ? 0 : UWBCAPDATA_TO_CAP_ID(capdata);
+ umc->bar = bar;
+ umc->resource.start = pci_resource_start(card->pci, bar)
+ + UWBCAPDATA_TO_OFFSET(capdata);
+ umc->resource.end = umc->resource.start
+ + (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1;
+ umc->resource.name = umc->dev.bus_id;
+ umc->resource.flags = card->pci->resource[bar].flags;
+ umc->resource.parent = &card->pci->resource[bar];
+ umc->irq = card->pci->irq;
+
+ err = umc_device_register(umc);
+ if (err < 0)
+ goto error;
+ card->devs[n] = umc;
+ return 0;
+
+error:
+ kfree(umc);
+ return err;
+}
+
+static void whci_del_cap(struct whci_card *card, int n)
+{
+ struct umc_dev *umc = card->devs[n];
+
+ if (umc != NULL)
+ umc_device_unregister(umc);
+}
+
+static int whci_n_caps(struct pci_dev *pci)
+{
+ void __iomem *uwbbase;
+ u64 capinfo;
+
+ uwbbase = pci_iomap(pci, 0, 8);
+ if (!uwbbase)
+ return -ENOMEM;
+ capinfo = le_readq(uwbbase + UWBCAPINFO);
+ pci_iounmap(pci, uwbbase);
+
+ return UWBCAPINFO_TO_N_CAPS(capinfo);
+}
+
+static int whci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+ struct whci_card *card;
+ int err, n_caps, n;
+
+ err = pci_enable_device(pci);
+ if (err < 0)
+ goto error;
+ pci_enable_msi(pci);
+ pci_set_master(pci);
+ err = -ENXIO;
+ if (!pci_set_dma_mask(pci, DMA_64BIT_MASK))
+ pci_set_consistent_dma_mask(pci, DMA_64BIT_MASK);
+ else if (!pci_set_dma_mask(pci, DMA_32BIT_MASK))
+ pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK);
+ else
+ goto error_dma;
+
+ err = n_caps = whci_n_caps(pci);
+ if (n_caps < 0)
+ goto error_ncaps;
+
+ err = -ENOMEM;
+ card = kzalloc(sizeof(struct whci_card)
+ + sizeof(struct whci_dev *) * (n_caps + 1),
+ GFP_KERNEL);
+ if (card == NULL)
+ goto error_kzalloc;
+ card->pci = pci;
+ card->n_caps = n_caps;
+
+ err = -EBUSY;
+ if (!request_mem_region(pci_resource_start(pci, 0),
+ UWBCAPDATA_SIZE(card->n_caps),
+ "whci (capability data)"))
+ goto error_request_memregion;
+ err = -ENOMEM;
+ card->uwbbase = pci_iomap(pci, 0, UWBCAPDATA_SIZE(card->n_caps));
+ if (!card->uwbbase)
+ goto error_iomap;
+
+ /* Add each capability. */
+ for (n = 0; n <= card->n_caps; n++) {
+ err = whci_add_cap(card, n);
+ if (err < 0 && n == 0) {
+ dev_err(&pci->dev, "cannot bind UWB radio controller:"
+ " %d\n", err);
+ goto error_bind;
+ }
+ if (err < 0)
+ dev_warn(&pci->dev, "warning: cannot bind capability "
+ "#%u: %d\n", n, err);
+ }
+ pci_set_drvdata(pci, card);
+ return 0;
+
+error_bind:
+ pci_iounmap(pci, card->uwbbase);
+error_iomap:
+ release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps));
+error_request_memregion:
+ kfree(card);
+error_kzalloc:
+error_ncaps:
+error_dma:
+ pci_disable_msi(pci);
+ pci_disable_device(pci);
+error:
+ return err;
+}
+
+static void whci_remove(struct pci_dev *pci)
+{
+ struct whci_card *card = pci_get_drvdata(pci);
+ int n;
+
+ pci_set_drvdata(pci, NULL);
+ /* Unregister each capability in reverse (so the master device
+ * is unregistered last). */
+ for (n = card->n_caps; n >= 0 ; n--)
+ whci_del_cap(card, n);
+ pci_iounmap(pci, card->uwbbase);
+ release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps));
+ kfree(card);
+ pci_disable_msi(pci);
+ pci_disable_device(pci);
+}
+
+static struct pci_device_id whci_id_table[] = {
+ { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, whci_id_table);
+
+
+static struct pci_driver whci_driver = {
+ .name = "whci",
+ .id_table = whci_id_table,
+ .probe = whci_probe,
+ .remove = whci_remove,
+};
+
+static int __init whci_init(void)
+{
+ return pci_register_driver(&whci_driver);
+}
+
+static void __exit whci_exit(void)
+{
+ pci_unregister_driver(&whci_driver);
+}
+
+module_init(whci_init);
+module_exit(whci_exit);
+
+MODULE_DESCRIPTION("WHCI UWB Multi-interface Controller enumerator");
+MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/wlp/Makefile b/drivers/uwb/wlp/Makefile
new file mode 100644
index 00000000000..c72c11db5b1
--- /dev/null
+++ b/drivers/uwb/wlp/Makefile
@@ -0,0 +1,10 @@
+obj-$(CONFIG_UWB_WLP) := wlp.o
+
+wlp-objs := \
+ driver.o \
+ eda.o \
+ messages.o \
+ sysfs.o \
+ txrx.o \
+ wlp-lc.o \
+ wss-lc.o
diff --git a/drivers/uwb/wlp/driver.c b/drivers/uwb/wlp/driver.c
new file mode 100644
index 00000000000..cb8d699b6a6
--- /dev/null
+++ b/drivers/uwb/wlp/driver.c
@@ -0,0 +1,43 @@
+/*
+ * WiMedia Logical Link Control Protocol (WLP)
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Life cycle of WLP substack
+ *
+ * FIXME: Docs
+ */
+
+#include <linux/module.h>
+
+static int __init wlp_subsys_init(void)
+{
+ return 0;
+}
+module_init(wlp_subsys_init);
+
+static void __exit wlp_subsys_exit(void)
+{
+ return;
+}
+module_exit(wlp_subsys_exit);
+
+MODULE_AUTHOR("Reinette Chatre <reinette.chatre@intel.com>");
+MODULE_DESCRIPTION("WiMedia Logical Link Control Protocol (WLP)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/wlp/eda.c b/drivers/uwb/wlp/eda.c
new file mode 100644
index 00000000000..cdfe8dfc434
--- /dev/null
+++ b/drivers/uwb/wlp/eda.c
@@ -0,0 +1,449 @@
+/*
+ * WUSB Wire Adapter: WLP interface
+ * Ethernet to device address cache
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * We need to be able to map ethernet addresses to device addresses
+ * and back because there is not explicit relationship between the eth
+ * addresses used in the ETH frames and the device addresses (no, it
+ * would not have been simpler to force as ETH address the MBOA MAC
+ * address...no, not at all :).
+ *
+ * A device has one MBOA MAC address and one device address. It is possible
+ * for a device to have more than one virtual MAC address (although a
+ * virtual address can be the same as the MBOA MAC address). The device
+ * address is guaranteed to be unique among the devices in the extended
+ * beacon group (see ECMA 17.1.1). We thus use the device address as index
+ * to this cache. We do allow searching based on virtual address as this
+ * is how Ethernet frames will be addressed.
+ *
+ * We need to support virtual EUI-48. Although, right now the virtual
+ * EUI-48 will always be the same as the MAC SAP address. The EDA cache
+ * entry thus contains a MAC SAP address as well as the virtual address
+ * (used to map the network stack address to a neighbor). When we move
+ * to support more than one virtual MAC on a host then this organization
+ * will have to change. Perhaps a neighbor has a list of WSSs, each with a
+ * tag and virtual EUI-48.
+ *
+ * On data transmission
+ * it is used to determine if the neighbor is connected and what WSS it
+ * belongs to. With this we know what tag to add to the WLP frame. Storing
+ * the WSS in the EDA cache may be overkill because we only support one
+ * WSS. Hopefully we will support more than one WSS at some point.
+ * On data reception it is used to determine the WSS based on
+ * the tag and address of the transmitting neighbor.
+ */
+
+#define D_LOCAL 5
+#include <linux/netdevice.h>
+#include <linux/uwb/debug.h>
+#include <linux/etherdevice.h>
+#include <linux/wlp.h>
+#include "wlp-internal.h"
+
+
+/* FIXME: cache is not purged, only on device close */
+
+/* FIXME: does not scale, change to dynamic array */
+
+/*
+ * Initialize the EDA cache
+ *
+ * @returns 0 if ok, < 0 errno code on error
+ *
+ * Call when the interface is being brought up
+ *
+ * NOTE: Keep it as a separate function as the implementation will
+ * change and be more complex.
+ */
+void wlp_eda_init(struct wlp_eda *eda)
+{
+ INIT_LIST_HEAD(&eda->cache);
+ spin_lock_init(&eda->lock);
+}
+
+/*
+ * Release the EDA cache
+ *
+ * @returns 0 if ok, < 0 errno code on error
+ *
+ * Called when the interface is brought down
+ */
+void wlp_eda_release(struct wlp_eda *eda)
+{
+ unsigned long flags;
+ struct wlp_eda_node *itr, *next;
+
+ spin_lock_irqsave(&eda->lock, flags);
+ list_for_each_entry_safe(itr, next, &eda->cache, list_node) {
+ list_del(&itr->list_node);
+ kfree(itr);
+ }
+ spin_unlock_irqrestore(&eda->lock, flags);
+}
+
+/*
+ * Add an address mapping
+ *
+ * @returns 0 if ok, < 0 errno code on error
+ *
+ * An address mapping is initially created when the neighbor device is seen
+ * for the first time (it is "onair"). At this time the neighbor is not
+ * connected or associated with a WSS so we only populate the Ethernet and
+ * Device address fields.
+ *
+ */
+int wlp_eda_create_node(struct wlp_eda *eda,
+ const unsigned char eth_addr[ETH_ALEN],
+ const struct uwb_dev_addr *dev_addr)
+{
+ int result = 0;
+ struct wlp_eda_node *itr;
+ unsigned long flags;
+
+ BUG_ON(dev_addr == NULL || eth_addr == NULL);
+ spin_lock_irqsave(&eda->lock, flags);
+ list_for_each_entry(itr, &eda->cache, list_node) {
+ if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
+ printk(KERN_ERR "EDA cache already contains entry "
+ "for neighbor %02x:%02x\n",
+ dev_addr->data[1], dev_addr->data[0]);
+ result = -EEXIST;
+ goto out_unlock;
+ }
+ }
+ itr = kzalloc(sizeof(*itr), GFP_ATOMIC);
+ if (itr != NULL) {
+ memcpy(itr->eth_addr, eth_addr, sizeof(itr->eth_addr));
+ itr->dev_addr = *dev_addr;
+ list_add(&itr->list_node, &eda->cache);
+ } else
+ result = -ENOMEM;
+out_unlock:
+ spin_unlock_irqrestore(&eda->lock, flags);
+ return result;
+}
+
+/*
+ * Remove entry from EDA cache
+ *
+ * This is done when the device goes off air.
+ */
+void wlp_eda_rm_node(struct wlp_eda *eda, const struct uwb_dev_addr *dev_addr)
+{
+ struct wlp_eda_node *itr, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&eda->lock, flags);
+ list_for_each_entry_safe(itr, next, &eda->cache, list_node) {
+ if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
+ list_del(&itr->list_node);
+ kfree(itr);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&eda->lock, flags);
+}
+
+/*
+ * Update an address mapping
+ *
+ * @returns 0 if ok, < 0 errno code on error
+ */
+int wlp_eda_update_node(struct wlp_eda *eda,
+ const struct uwb_dev_addr *dev_addr,
+ struct wlp_wss *wss,
+ const unsigned char virt_addr[ETH_ALEN],
+ const u8 tag, const enum wlp_wss_connect state)
+{
+ int result = -ENOENT;
+ struct wlp_eda_node *itr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&eda->lock, flags);
+ list_for_each_entry(itr, &eda->cache, list_node) {
+ if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
+ /* Found it, update it */
+ itr->wss = wss;
+ memcpy(itr->virt_addr, virt_addr,
+ sizeof(itr->virt_addr));
+ itr->tag = tag;
+ itr->state = state;
+ result = 0;
+ goto out_unlock;
+ }
+ }
+ /* Not found */
+out_unlock:
+ spin_unlock_irqrestore(&eda->lock, flags);
+ return result;
+}
+
+/*
+ * Update only state field of an address mapping
+ *
+ * @returns 0 if ok, < 0 errno code on error
+ */
+int wlp_eda_update_node_state(struct wlp_eda *eda,
+ const struct uwb_dev_addr *dev_addr,
+ const enum wlp_wss_connect state)
+{
+ int result = -ENOENT;
+ struct wlp_eda_node *itr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&eda->lock, flags);
+ list_for_each_entry(itr, &eda->cache, list_node) {
+ if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
+ /* Found it, update it */
+ itr->state = state;
+ result = 0;
+ goto out_unlock;
+ }
+ }
+ /* Not found */
+out_unlock:
+ spin_unlock_irqrestore(&eda->lock, flags);
+ return result;
+}
+
+/*
+ * Return contents of EDA cache entry
+ *
+ * @dev_addr: index to EDA cache
+ * @eda_entry: pointer to where contents of EDA cache will be copied
+ */
+int wlp_copy_eda_node(struct wlp_eda *eda, struct uwb_dev_addr *dev_addr,
+ struct wlp_eda_node *eda_entry)
+{
+ int result = -ENOENT;
+ struct wlp_eda_node *itr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&eda->lock, flags);
+ list_for_each_entry(itr, &eda->cache, list_node) {
+ if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
+ *eda_entry = *itr;
+ result = 0;
+ goto out_unlock;
+ }
+ }
+ /* Not found */
+out_unlock:
+ spin_unlock_irqrestore(&eda->lock, flags);
+ return result;
+}
+
+/*
+ * Execute function for every element in the cache
+ *
+ * @function: function to execute on element of cache (must be atomic)
+ * @priv: private data of function
+ * @returns: result of first function that failed, or last function
+ * executed if no function failed.
+ *
+ * Stop executing when function returns error for any element in cache.
+ *
+ * IMPORTANT: We are using a spinlock here: the function executed on each
+ * element has to be atomic.
+ */
+int wlp_eda_for_each(struct wlp_eda *eda, wlp_eda_for_each_f function,
+ void *priv)
+{
+ int result = 0;
+ struct wlp *wlp = container_of(eda, struct wlp, eda);
+ struct wlp_eda_node *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&eda->lock, flags);
+ list_for_each_entry(entry, &eda->cache, list_node) {
+ result = (*function)(wlp, entry, priv);
+ if (result < 0)
+ break;
+ }
+ spin_unlock_irqrestore(&eda->lock, flags);
+ return result;
+}
+
+/*
+ * Execute function for single element in the cache (return dev addr)
+ *
+ * @virt_addr: index into EDA cache used to determine which element to
+ * execute the function on
+ * @dev_addr: device address of element in cache will be returned using
+ * @dev_addr
+ * @function: function to execute on element of cache (must be atomic)
+ * @priv: private data of function
+ * @returns: result of function
+ *
+ * IMPORTANT: We are using a spinlock here: the function executed on the
+ * element has to be atomic.
+ */
+int wlp_eda_for_virtual(struct wlp_eda *eda,
+ const unsigned char virt_addr[ETH_ALEN],
+ struct uwb_dev_addr *dev_addr,
+ wlp_eda_for_each_f function,
+ void *priv)
+{
+ int result = 0;
+ struct wlp *wlp = container_of(eda, struct wlp, eda);
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct wlp_eda_node *itr;
+ unsigned long flags;
+ int found = 0;
+
+ spin_lock_irqsave(&eda->lock, flags);
+ list_for_each_entry(itr, &eda->cache, list_node) {
+ if (!memcmp(itr->virt_addr, virt_addr,
+ sizeof(itr->virt_addr))) {
+ d_printf(6, dev, "EDA: looking for "
+ "%02x:%02x:%02x:%02x:%02x:%02x hit %02x:%02x "
+ "wss %p tag 0x%02x state %u\n",
+ virt_addr[0], virt_addr[1],
+ virt_addr[2], virt_addr[3],
+ virt_addr[4], virt_addr[5],
+ itr->dev_addr.data[1],
+ itr->dev_addr.data[0], itr->wss,
+ itr->tag, itr->state);
+ result = (*function)(wlp, itr, priv);
+ *dev_addr = itr->dev_addr;
+ found = 1;
+ break;
+ } else
+ d_printf(6, dev, "EDA: looking for "
+ "%02x:%02x:%02x:%02x:%02x:%02x "
+ "against "
+ "%02x:%02x:%02x:%02x:%02x:%02x miss\n",
+ virt_addr[0], virt_addr[1],
+ virt_addr[2], virt_addr[3],
+ virt_addr[4], virt_addr[5],
+ itr->virt_addr[0], itr->virt_addr[1],
+ itr->virt_addr[2], itr->virt_addr[3],
+ itr->virt_addr[4], itr->virt_addr[5]);
+ }
+ if (!found) {
+ if (printk_ratelimit())
+ dev_err(dev, "EDA: Eth addr %02x:%02x:%02x"
+ ":%02x:%02x:%02x not found.\n",
+ virt_addr[0], virt_addr[1],
+ virt_addr[2], virt_addr[3],
+ virt_addr[4], virt_addr[5]);
+ result = -ENODEV;
+ }
+ spin_unlock_irqrestore(&eda->lock, flags);
+ return result;
+}
+
+static const char *__wlp_wss_connect_state[] = { "WLP_WSS_UNCONNECTED",
+ "WLP_WSS_CONNECTED",
+ "WLP_WSS_CONNECT_FAILED",
+};
+
+static const char *wlp_wss_connect_state_str(unsigned id)
+{
+ if (id >= ARRAY_SIZE(__wlp_wss_connect_state))
+ return "unknown WSS connection state";
+ return __wlp_wss_connect_state[id];
+}
+
+/*
+ * View EDA cache from user space
+ *
+ * A debugging feature to give user visibility into the EDA cache. Also
+ * used to display members of WSS to user (called from wlp_wss_members_show())
+ */
+ssize_t wlp_eda_show(struct wlp *wlp, char *buf)
+{
+ ssize_t result = 0;
+ struct wlp_eda_node *entry;
+ unsigned long flags;
+ struct wlp_eda *eda = &wlp->eda;
+ spin_lock_irqsave(&eda->lock, flags);
+ result = scnprintf(buf, PAGE_SIZE, "#eth_addr dev_addr wss_ptr "
+ "tag state virt_addr\n");
+ list_for_each_entry(entry, &eda->cache, list_node) {
+ result += scnprintf(buf + result, PAGE_SIZE - result,
+ "%02x:%02x:%02x:%02x:%02x:%02x %02x:%02x "
+ "%p 0x%02x %s "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ entry->eth_addr[0], entry->eth_addr[1],
+ entry->eth_addr[2], entry->eth_addr[3],
+ entry->eth_addr[4], entry->eth_addr[5],
+ entry->dev_addr.data[1],
+ entry->dev_addr.data[0], entry->wss,
+ entry->tag,
+ wlp_wss_connect_state_str(entry->state),
+ entry->virt_addr[0], entry->virt_addr[1],
+ entry->virt_addr[2], entry->virt_addr[3],
+ entry->virt_addr[4], entry->virt_addr[5]);
+ if (result >= PAGE_SIZE)
+ break;
+ }
+ spin_unlock_irqrestore(&eda->lock, flags);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wlp_eda_show);
+
+/*
+ * Add new EDA cache entry based on user input in sysfs
+ *
+ * Should only be used for debugging.
+ *
+ * The WSS is assumed to be the only WSS supported. This needs to be
+ * redesigned when we support more than one WSS.
+ */
+ssize_t wlp_eda_store(struct wlp *wlp, const char *buf, size_t size)
+{
+ ssize_t result;
+ struct wlp_eda *eda = &wlp->eda;
+ u8 eth_addr[6];
+ struct uwb_dev_addr dev_addr;
+ u8 tag;
+ unsigned state;
+
+ result = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx "
+ "%02hhx:%02hhx %02hhx %u\n",
+ &eth_addr[0], &eth_addr[1],
+ &eth_addr[2], &eth_addr[3],
+ &eth_addr[4], &eth_addr[5],
+ &dev_addr.data[1], &dev_addr.data[0], &tag, &state);
+ switch (result) {
+ case 6: /* no dev addr specified -- remove entry NOT IMPLEMENTED */
+ /*result = wlp_eda_rm(eda, eth_addr, &dev_addr);*/
+ result = -ENOSYS;
+ break;
+ case 10:
+ state = state >= 1 ? 1 : 0;
+ result = wlp_eda_create_node(eda, eth_addr, &dev_addr);
+ if (result < 0 && result != -EEXIST)
+ goto error;
+ /* Set virtual addr to be same as MAC */
+ result = wlp_eda_update_node(eda, &dev_addr, &wlp->wss,
+ eth_addr, tag, state);
+ if (result < 0)
+ goto error;
+ break;
+ default: /* bad format */
+ result = -EINVAL;
+ }
+error:
+ return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(wlp_eda_store);
diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
new file mode 100644
index 00000000000..a64cb824171
--- /dev/null
+++ b/drivers/uwb/wlp/messages.c
@@ -0,0 +1,1946 @@
+/*
+ * WiMedia Logical Link Control Protocol (WLP)
+ * Message construction and parsing
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#include <linux/wlp.h>
+#define D_LOCAL 6
+#include <linux/uwb/debug.h>
+#include "wlp-internal.h"
+
+static
+const char *__wlp_assoc_frame[] = {
+ [WLP_ASSOC_D1] = "WLP_ASSOC_D1",
+ [WLP_ASSOC_D2] = "WLP_ASSOC_D2",
+ [WLP_ASSOC_M1] = "WLP_ASSOC_M1",
+ [WLP_ASSOC_M2] = "WLP_ASSOC_M2",
+ [WLP_ASSOC_M3] = "WLP_ASSOC_M3",
+ [WLP_ASSOC_M4] = "WLP_ASSOC_M4",
+ [WLP_ASSOC_M5] = "WLP_ASSOC_M5",
+ [WLP_ASSOC_M6] = "WLP_ASSOC_M6",
+ [WLP_ASSOC_M7] = "WLP_ASSOC_M7",
+ [WLP_ASSOC_M8] = "WLP_ASSOC_M8",
+ [WLP_ASSOC_F0] = "WLP_ASSOC_F0",
+ [WLP_ASSOC_E1] = "WLP_ASSOC_E1",
+ [WLP_ASSOC_E2] = "WLP_ASSOC_E2",
+ [WLP_ASSOC_C1] = "WLP_ASSOC_C1",
+ [WLP_ASSOC_C2] = "WLP_ASSOC_C2",
+ [WLP_ASSOC_C3] = "WLP_ASSOC_C3",
+ [WLP_ASSOC_C4] = "WLP_ASSOC_C4",
+};
+
+static const char *wlp_assoc_frame_str(unsigned id)
+{
+ if (id >= ARRAY_SIZE(__wlp_assoc_frame))
+ return "unknown association frame";
+ return __wlp_assoc_frame[id];
+}
+
+static const char *__wlp_assc_error[] = {
+ "none",
+ "Authenticator Failure",
+ "Rogue activity suspected",
+ "Device busy",
+ "Setup Locked",
+ "Registrar not ready",
+ "Invalid WSS selection",
+ "Message timeout",
+ "Enrollment session timeout",
+ "Device password invalid",
+ "Unsupported version",
+ "Internal error",
+ "Undefined error",
+ "Numeric comparison failure",
+ "Waiting for user input",
+};
+
+static const char *wlp_assc_error_str(unsigned id)
+{
+ if (id >= ARRAY_SIZE(__wlp_assc_error))
+ return "unknown WLP association error";
+ return __wlp_assc_error[id];
+}
+
+static inline void wlp_set_attr_hdr(struct wlp_attr_hdr *hdr, unsigned type,
+ size_t len)
+{
+ hdr->type = cpu_to_le16(type);
+ hdr->length = cpu_to_le16(len);
+}
+
+/*
+ * Populate fields of a constant sized attribute
+ *
+ * @returns: total size of attribute including size of new value
+ *
+ * We have two instances of this function (wlp_pset and wlp_set): one takes
+ * the value as a parameter, the other takes a pointer to the value as
+ * parameter. They thus only differ in how the value is assigned to the
+ * attribute.
+ *
+ * We use sizeof(*attr) - sizeof(struct wlp_attr_hdr) instead of
+ * sizeof(type) to be able to use this same code for the structures that
+ * contain 8bit enum values and be able to deal with pointer types.
+ */
+#define wlp_set(type, type_code, name) \
+static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \
+{ \
+ d_fnstart(6, NULL, "(attribute %p)\n", attr); \
+ wlp_set_attr_hdr(&attr->hdr, type_code, \
+ sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \
+ attr->name = value; \
+ d_dump(6, NULL, attr, sizeof(*attr)); \
+ d_fnend(6, NULL, "(attribute %p)\n", attr); \
+ return sizeof(*attr); \
+}
+
+#define wlp_pset(type, type_code, name) \
+static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \
+{ \
+ d_fnstart(6, NULL, "(attribute %p)\n", attr); \
+ wlp_set_attr_hdr(&attr->hdr, type_code, \
+ sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \
+ attr->name = *value; \
+ d_dump(6, NULL, attr, sizeof(*attr)); \
+ d_fnend(6, NULL, "(attribute %p)\n", attr); \
+ return sizeof(*attr); \
+}
+
+/**
+ * Populate fields of a variable attribute
+ *
+ * @returns: total size of attribute including size of new value
+ *
+ * Provided with a pointer to the memory area reserved for the
+ * attribute structure, the field is populated with the value. The
+ * reserved memory has to contain enough space for the value.
+ */
+#define wlp_vset(type, type_code, name) \
+static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value, \
+ size_t len) \
+{ \
+ d_fnstart(6, NULL, "(attribute %p)\n", attr); \
+ wlp_set_attr_hdr(&attr->hdr, type_code, len); \
+ memcpy(attr->name, value, len); \
+ d_dump(6, NULL, attr, sizeof(*attr) + len); \
+ d_fnend(6, NULL, "(attribute %p)\n", attr); \
+ return sizeof(*attr) + len; \
+}
+
+wlp_vset(char *, WLP_ATTR_DEV_NAME, dev_name)
+wlp_vset(char *, WLP_ATTR_MANUF, manufacturer)
+wlp_set(enum wlp_assoc_type, WLP_ATTR_MSG_TYPE, msg_type)
+wlp_vset(char *, WLP_ATTR_MODEL_NAME, model_name)
+wlp_vset(char *, WLP_ATTR_MODEL_NR, model_nr)
+wlp_vset(char *, WLP_ATTR_SERIAL, serial)
+wlp_vset(char *, WLP_ATTR_WSS_NAME, wss_name)
+wlp_pset(struct wlp_uuid *, WLP_ATTR_UUID_E, uuid_e)
+wlp_pset(struct wlp_uuid *, WLP_ATTR_UUID_R, uuid_r)
+wlp_pset(struct wlp_uuid *, WLP_ATTR_WSSID, wssid)
+wlp_pset(struct wlp_dev_type *, WLP_ATTR_PRI_DEV_TYPE, prim_dev_type)
+/*wlp_pset(struct wlp_dev_type *, WLP_ATTR_SEC_DEV_TYPE, sec_dev_type)*/
+wlp_set(u8, WLP_ATTR_WLP_VER, version)
+wlp_set(enum wlp_assc_error, WLP_ATTR_WLP_ASSC_ERR, wlp_assc_err)
+wlp_set(enum wlp_wss_sel_mthd, WLP_ATTR_WSS_SEL_MTHD, wss_sel_mthd)
+wlp_set(u8, WLP_ATTR_ACC_ENRL, accept_enrl)
+wlp_set(u8, WLP_ATTR_WSS_SEC_STAT, wss_sec_status)
+wlp_pset(struct uwb_mac_addr *, WLP_ATTR_WSS_BCAST, wss_bcast)
+wlp_pset(struct wlp_nonce *, WLP_ATTR_ENRL_NONCE, enonce)
+wlp_pset(struct wlp_nonce *, WLP_ATTR_REG_NONCE, rnonce)
+wlp_set(u8, WLP_ATTR_WSS_TAG, wss_tag)
+wlp_pset(struct uwb_mac_addr *, WLP_ATTR_WSS_VIRT, wss_virt)
+
+/**
+ * Fill in the WSS information attributes
+ *
+ * We currently only support one WSS, and this is assumed in this function
+ * that can populate only one WSS information attribute.
+ */
+static size_t wlp_set_wss_info(struct wlp_attr_wss_info *attr,
+ struct wlp_wss *wss)
+{
+ size_t datalen;
+ void *ptr = attr->wss_info;
+ size_t used = sizeof(*attr);
+ d_fnstart(6, NULL, "(attribute %p)\n", attr);
+ datalen = sizeof(struct wlp_wss_info) + strlen(wss->name);
+ wlp_set_attr_hdr(&attr->hdr, WLP_ATTR_WSS_INFO, datalen);
+ used = wlp_set_wssid(ptr, &wss->wssid);
+ used += wlp_set_wss_name(ptr + used, wss->name, strlen(wss->name));
+ used += wlp_set_accept_enrl(ptr + used, wss->accept_enroll);
+ used += wlp_set_wss_sec_status(ptr + used, wss->secure_status);
+ used += wlp_set_wss_bcast(ptr + used, &wss->bcast);
+ d_dump(6, NULL, attr, sizeof(*attr) + datalen);
+ d_fnend(6, NULL, "(attribute %p, used %d)\n",
+ attr, (int)(sizeof(*attr) + used));
+ return sizeof(*attr) + used;
+}
+
+/**
+ * Verify attribute header
+ *
+ * @hdr: Pointer to attribute header that will be verified.
+ * @type: Expected attribute type.
+ * @len: Expected length of attribute value (excluding header).
+ *
+ * Most attribute values have a known length even when they do have a
+ * length field. This knowledge can be used via this function to verify
+ * that the length field matches the expected value.
+ */
+static int wlp_check_attr_hdr(struct wlp *wlp, struct wlp_attr_hdr *hdr,
+ enum wlp_attr_type type, unsigned len)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+
+ if (le16_to_cpu(hdr->type) != type) {
+ dev_err(dev, "WLP: unexpected header type. Expected "
+ "%u, got %u.\n", type, le16_to_cpu(hdr->type));
+ return -EINVAL;
+ }
+ if (le16_to_cpu(hdr->length) != len) {
+ dev_err(dev, "WLP: unexpected length in header. Expected "
+ "%u, got %u.\n", len, le16_to_cpu(hdr->length));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * Check if header of WSS information attribute valid
+ *
+ * @returns: length of WSS attributes (value of length attribute field) if
+ * valid WSS information attribute found
+ * -ENODATA if no WSS information attribute found
+ * -EIO other error occured
+ *
+ * The WSS information attribute is optional. The function will be provided
+ * with a pointer to data that could _potentially_ be a WSS information
+ * attribute. If a valid WSS information attribute is found it will return
+ * 0, if no WSS information attribute is found it will return -ENODATA, and
+ * another error will be returned if it is a WSS information attribute, but
+ * some parsing failure occured.
+ */
+static int wlp_check_wss_info_attr_hdr(struct wlp *wlp,
+ struct wlp_attr_hdr *hdr, size_t buflen)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ size_t len;
+ int result = 0;
+
+ if (buflen < sizeof(*hdr)) {
+ dev_err(dev, "WLP: Not enough space in buffer to parse"
+ " WSS information attribute header.\n");
+ result = -EIO;
+ goto out;
+ }
+ if (le16_to_cpu(hdr->type) != WLP_ATTR_WSS_INFO) {
+ /* WSS information is optional */
+ result = -ENODATA;
+ goto out;
+ }
+ len = le16_to_cpu(hdr->length);
+ if (buflen < sizeof(*hdr) + len) {
+ dev_err(dev, "WLP: Not enough space in buffer to parse "
+ "variable data. Got %d, expected %d.\n",
+ (int)buflen, (int)(sizeof(*hdr) + len));
+ result = -EIO;
+ goto out;
+ }
+ result = len;
+out:
+ return result;
+}
+
+
+/**
+ * Get value of attribute from fixed size attribute field.
+ *
+ * @attr: Pointer to attribute field.
+ * @value: Pointer to variable in which attribute value will be placed.
+ * @buflen: Size of buffer in which attribute field (including header)
+ * can be found.
+ * @returns: Amount of given buffer consumed by parsing for this attribute.
+ *
+ * The size and type of the value is known by the type of the attribute.
+ */
+#define wlp_get(type, type_code, name) \
+ssize_t wlp_get_##name(struct wlp *wlp, struct wlp_attr_##name *attr, \
+ type *value, ssize_t buflen) \
+{ \
+ struct device *dev = &wlp->rc->uwb_dev.dev; \
+ if (buflen < 0) \
+ return -EINVAL; \
+ if (buflen < sizeof(*attr)) { \
+ dev_err(dev, "WLP: Not enough space in buffer to parse" \
+ " attribute field. Need %d, received %zu\n", \
+ (int)sizeof(*attr), buflen); \
+ return -EIO; \
+ } \
+ if (wlp_check_attr_hdr(wlp, &attr->hdr, type_code, \
+ sizeof(attr->name)) < 0) { \
+ dev_err(dev, "WLP: Header verification failed. \n"); \
+ return -EINVAL; \
+ } \
+ *value = attr->name; \
+ return sizeof(*attr); \
+}
+
+#define wlp_get_sparse(type, type_code, name) \
+ static wlp_get(type, type_code, name)
+
+/**
+ * Get value of attribute from variable sized attribute field.
+ *
+ * @max: The maximum size of this attribute. This value is dictated by
+ * the maximum value from the WLP specification.
+ *
+ * @attr: Pointer to attribute field.
+ * @value: Pointer to variable that will contain the value. The memory
+ * must already have been allocated for this value.
+ * @buflen: Size of buffer in which attribute field (including header)
+ * can be found.
+ * @returns: Amount of given bufferconsumed by parsing for this attribute.
+ */
+#define wlp_vget(type_val, type_code, name, max) \
+static ssize_t wlp_get_##name(struct wlp *wlp, \
+ struct wlp_attr_##name *attr, \
+ type_val *value, ssize_t buflen) \
+{ \
+ struct device *dev = &wlp->rc->uwb_dev.dev; \
+ size_t len; \
+ if (buflen < 0) \
+ return -EINVAL; \
+ if (buflen < sizeof(*attr)) { \
+ dev_err(dev, "WLP: Not enough space in buffer to parse" \
+ " header.\n"); \
+ return -EIO; \
+ } \
+ if (le16_to_cpu(attr->hdr.type) != type_code) { \
+ dev_err(dev, "WLP: Unexpected attribute type. Got %u, " \
+ "expected %u.\n", le16_to_cpu(attr->hdr.type), \
+ type_code); \
+ return -EINVAL; \
+ } \
+ len = le16_to_cpu(attr->hdr.length); \
+ if (len > max) { \
+ dev_err(dev, "WLP: Attribute larger than maximum " \
+ "allowed. Received %zu, max is %d.\n", len, \
+ (int)max); \
+ return -EFBIG; \
+ } \
+ if (buflen < sizeof(*attr) + len) { \
+ dev_err(dev, "WLP: Not enough space in buffer to parse "\
+ "variable data.\n"); \
+ return -EIO; \
+ } \
+ memcpy(value, (void *) attr + sizeof(*attr), len); \
+ return sizeof(*attr) + len; \
+}
+
+wlp_get(u8, WLP_ATTR_WLP_VER, version)
+wlp_get_sparse(enum wlp_wss_sel_mthd, WLP_ATTR_WSS_SEL_MTHD, wss_sel_mthd)
+wlp_get_sparse(struct wlp_dev_type, WLP_ATTR_PRI_DEV_TYPE, prim_dev_type)
+wlp_get_sparse(enum wlp_assc_error, WLP_ATTR_WLP_ASSC_ERR, wlp_assc_err)
+wlp_get_sparse(struct wlp_uuid, WLP_ATTR_UUID_E, uuid_e)
+wlp_get_sparse(struct wlp_uuid, WLP_ATTR_UUID_R, uuid_r)
+wlp_get(struct wlp_uuid, WLP_ATTR_WSSID, wssid)
+wlp_get_sparse(u8, WLP_ATTR_ACC_ENRL, accept_enrl)
+wlp_get_sparse(u8, WLP_ATTR_WSS_SEC_STAT, wss_sec_status)
+wlp_get_sparse(struct uwb_mac_addr, WLP_ATTR_WSS_BCAST, wss_bcast)
+wlp_get_sparse(u8, WLP_ATTR_WSS_TAG, wss_tag)
+wlp_get_sparse(struct uwb_mac_addr, WLP_ATTR_WSS_VIRT, wss_virt)
+wlp_get_sparse(struct wlp_nonce, WLP_ATTR_ENRL_NONCE, enonce)
+wlp_get_sparse(struct wlp_nonce, WLP_ATTR_REG_NONCE, rnonce)
+
+/* The buffers for the device info attributes can be found in the
+ * wlp_device_info struct. These buffers contain one byte more than the
+ * max allowed by the spec - this is done to be able to add the
+ * terminating \0 for user display. This terminating byte is not required
+ * in the actual attribute field (because it has a length field) so the
+ * maximum allowed for this value is one less than its size in the
+ * structure.
+ */
+wlp_vget(char, WLP_ATTR_WSS_NAME, wss_name,
+ FIELD_SIZEOF(struct wlp_wss, name) - 1)
+wlp_vget(char, WLP_ATTR_DEV_NAME, dev_name,
+ FIELD_SIZEOF(struct wlp_device_info, name) - 1)
+wlp_vget(char, WLP_ATTR_MANUF, manufacturer,
+ FIELD_SIZEOF(struct wlp_device_info, manufacturer) - 1)
+wlp_vget(char, WLP_ATTR_MODEL_NAME, model_name,
+ FIELD_SIZEOF(struct wlp_device_info, model_name) - 1)
+wlp_vget(char, WLP_ATTR_MODEL_NR, model_nr,
+ FIELD_SIZEOF(struct wlp_device_info, model_nr) - 1)
+wlp_vget(char, WLP_ATTR_SERIAL, serial,
+ FIELD_SIZEOF(struct wlp_device_info, serial) - 1)
+
+/**
+ * Retrieve WSS Name, Accept enroll, Secure status, Broadcast from WSS info
+ *
+ * @attr: pointer to WSS name attribute in WSS information attribute field
+ * @info: structure that will be populated with data from WSS information
+ * field (WSS name, Accept enroll, secure status, broadcast address)
+ * @buflen: size of buffer
+ *
+ * Although the WSSID attribute forms part of the WSS info attribute it is
+ * retrieved separately and stored in a different location.
+ */
+static ssize_t wlp_get_wss_info_attrs(struct wlp *wlp,
+ struct wlp_attr_hdr *attr,
+ struct wlp_wss_tmp_info *info,
+ ssize_t buflen)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ void *ptr = attr;
+ size_t used = 0;
+ ssize_t result = -EINVAL;
+
+ d_printf(6, dev, "WLP: WSS info: Retrieving WSS name\n");
+ result = wlp_get_wss_name(wlp, ptr, info->name, buflen);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain WSS name from "
+ "WSS info in D2 message.\n");
+ goto error_parse;
+ }
+ used += result;
+ d_printf(6, dev, "WLP: WSS info: Retrieving accept enroll\n");
+ result = wlp_get_accept_enrl(wlp, ptr + used, &info->accept_enroll,
+ buflen - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain accepting "
+ "enrollment from WSS info in D2 message.\n");
+ goto error_parse;
+ }
+ if (info->accept_enroll != 0 && info->accept_enroll != 1) {
+ dev_err(dev, "WLP: invalid value for accepting "
+ "enrollment in D2 message.\n");
+ result = -EINVAL;
+ goto error_parse;
+ }
+ used += result;
+ d_printf(6, dev, "WLP: WSS info: Retrieving secure status\n");
+ result = wlp_get_wss_sec_status(wlp, ptr + used, &info->sec_status,
+ buflen - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain secure "
+ "status from WSS info in D2 message.\n");
+ goto error_parse;
+ }
+ if (info->sec_status != 0 && info->sec_status != 1) {
+ dev_err(dev, "WLP: invalid value for secure "
+ "status in D2 message.\n");
+ result = -EINVAL;
+ goto error_parse;
+ }
+ used += result;
+ d_printf(6, dev, "WLP: WSS info: Retrieving broadcast\n");
+ result = wlp_get_wss_bcast(wlp, ptr + used, &info->bcast,
+ buflen - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain broadcast "
+ "address from WSS info in D2 message.\n");
+ goto error_parse;
+ }
+ used += result;
+ result = used;
+error_parse:
+ return result;
+}
+
+/**
+ * Create a new WSSID entry for the neighbor, allocate temporary storage
+ *
+ * Each neighbor can have many WSS active. We maintain a list of WSSIDs
+ * advertised by neighbor. During discovery we also cache information about
+ * these WSS in temporary storage.
+ *
+ * The temporary storage will be removed after it has been used (eg.
+ * displayed to user), the wssid element will be removed from the list when
+ * the neighbor is rediscovered or when it disappears.
+ */
+static struct wlp_wssid_e *wlp_create_wssid_e(struct wlp *wlp,
+ struct wlp_neighbor_e *neighbor)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct wlp_wssid_e *wssid_e;
+
+ wssid_e = kzalloc(sizeof(*wssid_e), GFP_KERNEL);
+ if (wssid_e == NULL) {
+ dev_err(dev, "WLP: unable to allocate memory "
+ "for WSS information.\n");
+ goto error_alloc;
+ }
+ wssid_e->info = kzalloc(sizeof(struct wlp_wss_tmp_info), GFP_KERNEL);
+ if (wssid_e->info == NULL) {
+ dev_err(dev, "WLP: unable to allocate memory "
+ "for temporary WSS information.\n");
+ kfree(wssid_e);
+ wssid_e = NULL;
+ goto error_alloc;
+ }
+ list_add(&wssid_e->node, &neighbor->wssid);
+error_alloc:
+ return wssid_e;
+}
+
+/**
+ * Parse WSS information attribute
+ *
+ * @attr: pointer to WSS information attribute header
+ * @buflen: size of buffer in which WSS information attribute appears
+ * @wssid: will place wssid from WSS info attribute in this location
+ * @wss_info: will place other information from WSS information attribute
+ * in this location
+ *
+ * memory for @wssid and @wss_info must be allocated when calling this
+ */
+static ssize_t wlp_get_wss_info(struct wlp *wlp, struct wlp_attr_wss_info *attr,
+ size_t buflen, struct wlp_uuid *wssid,
+ struct wlp_wss_tmp_info *wss_info)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ ssize_t result;
+ size_t len;
+ size_t used = 0;
+ void *ptr;
+
+ result = wlp_check_wss_info_attr_hdr(wlp, (struct wlp_attr_hdr *)attr,
+ buflen);
+ if (result < 0)
+ goto out;
+ len = result;
+ used = sizeof(*attr);
+ ptr = attr;
+ d_printf(6, dev, "WLP: WSS info: Retrieving WSSID\n");
+ result = wlp_get_wssid(wlp, ptr + used, wssid, buflen - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain WSSID from WSS info.\n");
+ goto out;
+ }
+ used += result;
+ result = wlp_get_wss_info_attrs(wlp, ptr + used, wss_info,
+ buflen - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain WSS information "
+ "from WSS information attributes. \n");
+ goto out;
+ }
+ used += result;
+ if (len + sizeof(*attr) != used) {
+ dev_err(dev, "WLP: Amount of data parsed does not "
+ "match length field. Parsed %zu, length "
+ "field %zu. \n", used, len);
+ result = -EINVAL;
+ goto out;
+ }
+ result = used;
+ d_printf(6, dev, "WLP: Successfully parsed WLP information "
+ "attribute. used %zu bytes\n", used);
+out:
+ return result;
+}
+
+/**
+ * Retrieve WSS info from association frame
+ *
+ * @attr: pointer to WSS information attribute
+ * @neighbor: ptr to neighbor being discovered, NULL if enrollment in
+ * progress
+ * @wss: ptr to WSS being enrolled in, NULL if discovery in progress
+ * @buflen: size of buffer in which WSS information appears
+ *
+ * The WSS information attribute appears in the D2 association message.
+ * This message is used in two ways: to discover all neighbors or to enroll
+ * into a WSS activated by a neighbor. During discovery we only want to
+ * store the WSS info in a cache, to be deleted right after it has been
+ * used (eg. displayed to the user). During enrollment we store the WSS
+ * information for the lifetime of enrollment.
+ *
+ * During discovery we are interested in all WSS information, during
+ * enrollment we are only interested in the WSS being enrolled in. Even so,
+ * when in enrollment we keep parsing the message after finding the WSS of
+ * interest, this simplifies the calling routine in that it can be sure
+ * that all WSS information attributes have been parsed out of the message.
+ *
+ * Association frame is process with nbmutex held. The list access is safe.
+ */
+static ssize_t wlp_get_all_wss_info(struct wlp *wlp,
+ struct wlp_attr_wss_info *attr,
+ struct wlp_neighbor_e *neighbor,
+ struct wlp_wss *wss, ssize_t buflen)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ size_t used = 0;
+ ssize_t result = -EINVAL;
+ struct wlp_attr_wss_info *cur;
+ struct wlp_uuid wssid;
+ struct wlp_wss_tmp_info wss_info;
+ unsigned enroll; /* 0 - discovery to cache, 1 - enrollment */
+ struct wlp_wssid_e *wssid_e;
+ char buf[WLP_WSS_UUID_STRSIZE];
+
+ d_fnstart(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d \n",
+ wlp, attr, neighbor, wss, (int)buflen);
+ if (buflen < 0)
+ goto out;
+
+ if (neighbor != NULL && wss == NULL)
+ enroll = 0; /* discovery */
+ else if (wss != NULL && neighbor == NULL)
+ enroll = 1; /* enrollment */
+ else
+ goto out;
+
+ cur = attr;
+ while (buflen - used > 0) {
+ memset(&wss_info, 0, sizeof(wss_info));
+ cur = (void *)cur + used;
+ result = wlp_get_wss_info(wlp, cur, buflen - used, &wssid,
+ &wss_info);
+ if (result == -ENODATA) {
+ result = used;
+ goto out;
+ } else if (result < 0) {
+ dev_err(dev, "WLP: Unable to parse WSS information "
+ "from WSS information attribute. \n");
+ result = -EINVAL;
+ goto error_parse;
+ }
+ if (enroll && !memcmp(&wssid, &wss->wssid, sizeof(wssid))) {
+ if (wss_info.accept_enroll != 1) {
+ dev_err(dev, "WLP: Requested WSS does "
+ "not accept enrollment.\n");
+ result = -EINVAL;
+ goto out;
+ }
+ memcpy(wss->name, wss_info.name, sizeof(wss->name));
+ wss->bcast = wss_info.bcast;
+ wss->secure_status = wss_info.sec_status;
+ wss->accept_enroll = wss_info.accept_enroll;
+ wss->state = WLP_WSS_STATE_PART_ENROLLED;
+ wlp_wss_uuid_print(buf, sizeof(buf), &wssid);
+ d_printf(2, dev, "WLP: Found WSS %s. Enrolling.\n",
+ buf);
+ } else {
+ wssid_e = wlp_create_wssid_e(wlp, neighbor);
+ if (wssid_e == NULL) {
+ dev_err(dev, "WLP: Cannot create new WSSID "
+ "entry for neighbor %02x:%02x.\n",
+ neighbor->uwb_dev->dev_addr.data[1],
+ neighbor->uwb_dev->dev_addr.data[0]);
+ result = -ENOMEM;
+ goto out;
+ }
+ wssid_e->wssid = wssid;
+ *wssid_e->info = wss_info;
+ }
+ used += result;
+ }
+ result = used;
+error_parse:
+ if (result < 0 && !enroll) /* this was a discovery */
+ wlp_remove_neighbor_tmp_info(neighbor);
+out:
+ d_fnend(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d, "
+ "result %d \n", wlp, attr, neighbor, wss, (int)buflen,
+ (int)result);
+ return result;
+
+}
+
+/**
+ * Parse WSS information attributes into cache for discovery
+ *
+ * @attr: the first WSS information attribute in message
+ * @neighbor: the neighbor whose cache will be populated
+ * @buflen: size of the input buffer
+ */
+static ssize_t wlp_get_wss_info_to_cache(struct wlp *wlp,
+ struct wlp_attr_wss_info *attr,
+ struct wlp_neighbor_e *neighbor,
+ ssize_t buflen)
+{
+ return wlp_get_all_wss_info(wlp, attr, neighbor, NULL, buflen);
+}
+
+/**
+ * Parse WSS information attributes into WSS struct for enrollment
+ *
+ * @attr: the first WSS information attribute in message
+ * @wss: the WSS that will be enrolled
+ * @buflen: size of the input buffer
+ */
+static ssize_t wlp_get_wss_info_to_enroll(struct wlp *wlp,
+ struct wlp_attr_wss_info *attr,
+ struct wlp_wss *wss, ssize_t buflen)
+{
+ return wlp_get_all_wss_info(wlp, attr, NULL, wss, buflen);
+}
+
+/**
+ * Construct a D1 association frame
+ *
+ * We use the radio control functions to determine the values of the device
+ * properties. These are of variable length and the total space needed is
+ * tallied first before we start constructing the message. The radio
+ * control functions return strings that are terminated with \0. This
+ * character should not be included in the message (there is a length field
+ * accompanying it in the attribute).
+ */
+static int wlp_build_assoc_d1(struct wlp *wlp, struct wlp_wss *wss,
+ struct sk_buff **skb)
+{
+
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ int result = 0;
+ struct wlp_device_info *info;
+ size_t used = 0;
+ struct wlp_frame_assoc *_d1;
+ struct sk_buff *_skb;
+ void *d1_itr;
+
+ d_fnstart(6, dev, "wlp %p\n", wlp);
+ if (wlp->dev_info == NULL) {
+ result = __wlp_setup_device_info(wlp);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to setup device "
+ "information for D1 message.\n");
+ goto error;
+ }
+ }
+ info = wlp->dev_info;
+ d_printf(6, dev, "Local properties:\n"
+ "Device name (%d bytes): %s\n"
+ "Model name (%d bytes): %s\n"
+ "Manufacturer (%d bytes): %s\n"
+ "Model number (%d bytes): %s\n"
+ "Serial number (%d bytes): %s\n"
+ "Primary device type: \n"
+ " Category: %d \n"
+ " OUI: %02x:%02x:%02x \n"
+ " OUI Subdivision: %u \n",
+ (int)strlen(info->name), info->name,
+ (int)strlen(info->model_name), info->model_name,
+ (int)strlen(info->manufacturer), info->manufacturer,
+ (int)strlen(info->model_nr), info->model_nr,
+ (int)strlen(info->serial), info->serial,
+ info->prim_dev_type.category,
+ info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1],
+ info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv);
+ _skb = dev_alloc_skb(sizeof(*_d1)
+ + sizeof(struct wlp_attr_uuid_e)
+ + sizeof(struct wlp_attr_wss_sel_mthd)
+ + sizeof(struct wlp_attr_dev_name)
+ + strlen(info->name)
+ + sizeof(struct wlp_attr_manufacturer)
+ + strlen(info->manufacturer)
+ + sizeof(struct wlp_attr_model_name)
+ + strlen(info->model_name)
+ + sizeof(struct wlp_attr_model_nr)
+ + strlen(info->model_nr)
+ + sizeof(struct wlp_attr_serial)
+ + strlen(info->serial)
+ + sizeof(struct wlp_attr_prim_dev_type)
+ + sizeof(struct wlp_attr_wlp_assc_err));
+ if (_skb == NULL) {
+ dev_err(dev, "WLP: Cannot allocate memory for association "
+ "message.\n");
+ result = -ENOMEM;
+ goto error;
+ }
+ _d1 = (void *) _skb->data;
+ d_printf(6, dev, "D1 starts at %p \n", _d1);
+ _d1->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
+ _d1->hdr.type = WLP_FRAME_ASSOCIATION;
+ _d1->type = WLP_ASSOC_D1;
+
+ wlp_set_version(&_d1->version, WLP_VERSION);
+ wlp_set_msg_type(&_d1->msg_type, WLP_ASSOC_D1);
+ d1_itr = _d1->attr;
+ used = wlp_set_uuid_e(d1_itr, &wlp->uuid);
+ used += wlp_set_wss_sel_mthd(d1_itr + used, WLP_WSS_REG_SELECT);
+ used += wlp_set_dev_name(d1_itr + used, info->name,
+ strlen(info->name));
+ used += wlp_set_manufacturer(d1_itr + used, info->manufacturer,
+ strlen(info->manufacturer));
+ used += wlp_set_model_name(d1_itr + used, info->model_name,
+ strlen(info->model_name));
+ used += wlp_set_model_nr(d1_itr + used, info->model_nr,
+ strlen(info->model_nr));
+ used += wlp_set_serial(d1_itr + used, info->serial,
+ strlen(info->serial));
+ used += wlp_set_prim_dev_type(d1_itr + used, &info->prim_dev_type);
+ used += wlp_set_wlp_assc_err(d1_itr + used, WLP_ASSOC_ERROR_NONE);
+ skb_put(_skb, sizeof(*_d1) + used);
+ d_printf(6, dev, "D1 message:\n");
+ d_dump(6, dev, _d1, sizeof(*_d1)
+ + sizeof(struct wlp_attr_uuid_e)
+ + sizeof(struct wlp_attr_wss_sel_mthd)
+ + sizeof(struct wlp_attr_dev_name)
+ + strlen(info->name)
+ + sizeof(struct wlp_attr_manufacturer)
+ + strlen(info->manufacturer)
+ + sizeof(struct wlp_attr_model_name)
+ + strlen(info->model_name)
+ + sizeof(struct wlp_attr_model_nr)
+ + strlen(info->model_nr)
+ + sizeof(struct wlp_attr_serial)
+ + strlen(info->serial)
+ + sizeof(struct wlp_attr_prim_dev_type)
+ + sizeof(struct wlp_attr_wlp_assc_err));
+ *skb = _skb;
+error:
+ d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result);
+ return result;
+}
+
+/**
+ * Construct a D2 association frame
+ *
+ * We use the radio control functions to determine the values of the device
+ * properties. These are of variable length and the total space needed is
+ * tallied first before we start constructing the message. The radio
+ * control functions return strings that are terminated with \0. This
+ * character should not be included in the message (there is a length field
+ * accompanying it in the attribute).
+ */
+static
+int wlp_build_assoc_d2(struct wlp *wlp, struct wlp_wss *wss,
+ struct sk_buff **skb, struct wlp_uuid *uuid_e)
+{
+
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ int result = 0;
+ struct wlp_device_info *info;
+ size_t used = 0;
+ struct wlp_frame_assoc *_d2;
+ struct sk_buff *_skb;
+ void *d2_itr;
+ size_t mem_needed;
+
+ d_fnstart(6, dev, "wlp %p\n", wlp);
+ if (wlp->dev_info == NULL) {
+ result = __wlp_setup_device_info(wlp);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to setup device "
+ "information for D2 message.\n");
+ goto error;
+ }
+ }
+ info = wlp->dev_info;
+ d_printf(6, dev, "Local properties:\n"
+ "Device name (%d bytes): %s\n"
+ "Model name (%d bytes): %s\n"
+ "Manufacturer (%d bytes): %s\n"
+ "Model number (%d bytes): %s\n"
+ "Serial number (%d bytes): %s\n"
+ "Primary device type: \n"
+ " Category: %d \n"
+ " OUI: %02x:%02x:%02x \n"
+ " OUI Subdivision: %u \n",
+ (int)strlen(info->name), info->name,
+ (int)strlen(info->model_name), info->model_name,
+ (int)strlen(info->manufacturer), info->manufacturer,
+ (int)strlen(info->model_nr), info->model_nr,
+ (int)strlen(info->serial), info->serial,
+ info->prim_dev_type.category,
+ info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1],
+ info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv);
+ mem_needed = sizeof(*_d2)
+ + sizeof(struct wlp_attr_uuid_e)
+ + sizeof(struct wlp_attr_uuid_r)
+ + sizeof(struct wlp_attr_dev_name)
+ + strlen(info->name)
+ + sizeof(struct wlp_attr_manufacturer)
+ + strlen(info->manufacturer)
+ + sizeof(struct wlp_attr_model_name)
+ + strlen(info->model_name)
+ + sizeof(struct wlp_attr_model_nr)
+ + strlen(info->model_nr)
+ + sizeof(struct wlp_attr_serial)
+ + strlen(info->serial)
+ + sizeof(struct wlp_attr_prim_dev_type)
+ + sizeof(struct wlp_attr_wlp_assc_err);
+ if (wlp->wss.state >= WLP_WSS_STATE_ACTIVE)
+ mem_needed += sizeof(struct wlp_attr_wss_info)
+ + sizeof(struct wlp_wss_info)
+ + strlen(wlp->wss.name);
+ _skb = dev_alloc_skb(mem_needed);
+ if (_skb == NULL) {
+ dev_err(dev, "WLP: Cannot allocate memory for association "
+ "message.\n");
+ result = -ENOMEM;
+ goto error;
+ }
+ _d2 = (void *) _skb->data;
+ d_printf(6, dev, "D2 starts at %p \n", _d2);
+ _d2->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
+ _d2->hdr.type = WLP_FRAME_ASSOCIATION;
+ _d2->type = WLP_ASSOC_D2;
+
+ wlp_set_version(&_d2->version, WLP_VERSION);
+ wlp_set_msg_type(&_d2->msg_type, WLP_ASSOC_D2);
+ d2_itr = _d2->attr;
+ used = wlp_set_uuid_e(d2_itr, uuid_e);
+ used += wlp_set_uuid_r(d2_itr + used, &wlp->uuid);
+ if (wlp->wss.state >= WLP_WSS_STATE_ACTIVE)
+ used += wlp_set_wss_info(d2_itr + used, &wlp->wss);
+ used += wlp_set_dev_name(d2_itr + used, info->name,
+ strlen(info->name));
+ used += wlp_set_manufacturer(d2_itr + used, info->manufacturer,
+ strlen(info->manufacturer));
+ used += wlp_set_model_name(d2_itr + used, info->model_name,
+ strlen(info->model_name));
+ used += wlp_set_model_nr(d2_itr + used, info->model_nr,
+ strlen(info->model_nr));
+ used += wlp_set_serial(d2_itr + used, info->serial,
+ strlen(info->serial));
+ used += wlp_set_prim_dev_type(d2_itr + used, &info->prim_dev_type);
+ used += wlp_set_wlp_assc_err(d2_itr + used, WLP_ASSOC_ERROR_NONE);
+ skb_put(_skb, sizeof(*_d2) + used);
+ d_printf(6, dev, "D2 message:\n");
+ d_dump(6, dev, _d2, mem_needed);
+ *skb = _skb;
+error:
+ d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result);
+ return result;
+}
+
+/**
+ * Allocate memory for and populate fields of F0 association frame
+ *
+ * Currently (while focusing on unsecure enrollment) we ignore the
+ * nonce's that could be placed in the message. Only the error field is
+ * populated by the value provided by the caller.
+ */
+static
+int wlp_build_assoc_f0(struct wlp *wlp, struct sk_buff **skb,
+ enum wlp_assc_error error)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ int result = -ENOMEM;
+ struct {
+ struct wlp_frame_assoc f0_hdr;
+ struct wlp_attr_enonce enonce;
+ struct wlp_attr_rnonce rnonce;
+ struct wlp_attr_wlp_assc_err assc_err;
+ } *f0;
+ struct sk_buff *_skb;
+ struct wlp_nonce tmp;
+
+ d_fnstart(6, dev, "wlp %p\n", wlp);
+ _skb = dev_alloc_skb(sizeof(*f0));
+ if (_skb == NULL) {
+ dev_err(dev, "WLP: Unable to allocate memory for F0 "
+ "association frame. \n");
+ goto error_alloc;
+ }
+ f0 = (void *) _skb->data;
+ d_printf(6, dev, "F0 starts at %p \n", f0);
+ f0->f0_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
+ f0->f0_hdr.hdr.type = WLP_FRAME_ASSOCIATION;
+ f0->f0_hdr.type = WLP_ASSOC_F0;
+ wlp_set_version(&f0->f0_hdr.version, WLP_VERSION);
+ wlp_set_msg_type(&f0->f0_hdr.msg_type, WLP_ASSOC_F0);
+ memset(&tmp, 0, sizeof(tmp));
+ wlp_set_enonce(&f0->enonce, &tmp);
+ wlp_set_rnonce(&f0->rnonce, &tmp);
+ wlp_set_wlp_assc_err(&f0->assc_err, error);
+ skb_put(_skb, sizeof(*f0));
+ *skb = _skb;
+ result = 0;
+error_alloc:
+ d_fnend(6, dev, "wlp %p, result %d \n", wlp, result);
+ return result;
+}
+
+/**
+ * Parse F0 frame
+ *
+ * We just retrieve the values and print it as an error to the user.
+ * Calling function already knows an error occured (F0 indicates error), so
+ * we just parse the content as debug for higher layers.
+ */
+int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct wlp_frame_assoc *f0 = (void *) skb->data;
+ void *ptr = skb->data;
+ size_t len = skb->len;
+ size_t used;
+ ssize_t result;
+ struct wlp_nonce enonce, rnonce;
+ enum wlp_assc_error assc_err;
+ char enonce_buf[WLP_WSS_NONCE_STRSIZE];
+ char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
+
+ used = sizeof(*f0);
+ result = wlp_get_enonce(wlp, ptr + used, &enonce, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain Enrollee nonce "
+ "attribute from F0 message.\n");
+ goto error_parse;
+ }
+ used += result;
+ result = wlp_get_rnonce(wlp, ptr + used, &rnonce, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain Registrar nonce "
+ "attribute from F0 message.\n");
+ goto error_parse;
+ }
+ used += result;
+ result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain WLP Association error "
+ "attribute from F0 message.\n");
+ goto error_parse;
+ }
+ wlp_wss_nonce_print(enonce_buf, sizeof(enonce_buf), &enonce);
+ wlp_wss_nonce_print(rnonce_buf, sizeof(rnonce_buf), &rnonce);
+ dev_err(dev, "WLP: Received F0 error frame from neighbor. Enrollee "
+ "nonce: %s, Registrar nonce: %s, WLP Association error: %s.\n",
+ enonce_buf, rnonce_buf, wlp_assc_error_str(assc_err));
+ result = 0;
+error_parse:
+ return result;
+}
+
+/**
+ * Retrieve variable device information from association message
+ *
+ * The device information parsed is not required in any message. This
+ * routine will thus not fail if an attribute is not present.
+ * The attributes are expected in a certain order, even if all are not
+ * present. The "attribute type" value is used to ensure the attributes
+ * are parsed in the correct order.
+ *
+ * If an error is encountered during parsing the function will return an
+ * error code, when this happens the given device_info structure may be
+ * partially filled.
+ */
+static
+int wlp_get_variable_info(struct wlp *wlp, void *data,
+ struct wlp_device_info *dev_info, ssize_t len)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ size_t used = 0;
+ struct wlp_attr_hdr *hdr;
+ ssize_t result = 0;
+ unsigned last = 0;
+
+ while (len - used > 0) {
+ if (len - used < sizeof(*hdr)) {
+ dev_err(dev, "WLP: Partial data in frame, cannot "
+ "parse. \n");
+ goto error_parse;
+ }
+ hdr = data + used;
+ switch (le16_to_cpu(hdr->type)) {
+ case WLP_ATTR_MANUF:
+ if (last >= WLP_ATTR_MANUF) {
+ dev_err(dev, "WLP: Incorrect order of "
+ "attribute values in D1 msg.\n");
+ goto error_parse;
+ }
+ result = wlp_get_manufacturer(wlp, data + used,
+ dev_info->manufacturer,
+ len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to obtain "
+ "Manufacturer attribute from D1 "
+ "message.\n");
+ goto error_parse;
+ }
+ last = WLP_ATTR_MANUF;
+ used += result;
+ break;
+ case WLP_ATTR_MODEL_NAME:
+ if (last >= WLP_ATTR_MODEL_NAME) {
+ dev_err(dev, "WLP: Incorrect order of "
+ "attribute values in D1 msg.\n");
+ goto error_parse;
+ }
+ result = wlp_get_model_name(wlp, data + used,
+ dev_info->model_name,
+ len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to obtain Model "
+ "name attribute from D1 message.\n");
+ goto error_parse;
+ }
+ last = WLP_ATTR_MODEL_NAME;
+ used += result;
+ break;
+ case WLP_ATTR_MODEL_NR:
+ if (last >= WLP_ATTR_MODEL_NR) {
+ dev_err(dev, "WLP: Incorrect order of "
+ "attribute values in D1 msg.\n");
+ goto error_parse;
+ }
+ result = wlp_get_model_nr(wlp, data + used,
+ dev_info->model_nr,
+ len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to obtain Model "
+ "number attribute from D1 message.\n");
+ goto error_parse;
+ }
+ last = WLP_ATTR_MODEL_NR;
+ used += result;
+ break;
+ case WLP_ATTR_SERIAL:
+ if (last >= WLP_ATTR_SERIAL) {
+ dev_err(dev, "WLP: Incorrect order of "
+ "attribute values in D1 msg.\n");
+ goto error_parse;
+ }
+ result = wlp_get_serial(wlp, data + used,
+ dev_info->serial, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to obtain Serial "
+ "number attribute from D1 message.\n");
+ goto error_parse;
+ }
+ last = WLP_ATTR_SERIAL;
+ used += result;
+ break;
+ case WLP_ATTR_PRI_DEV_TYPE:
+ if (last >= WLP_ATTR_PRI_DEV_TYPE) {
+ dev_err(dev, "WLP: Incorrect order of "
+ "attribute values in D1 msg.\n");
+ goto error_parse;
+ }
+ result = wlp_get_prim_dev_type(wlp, data + used,
+ &dev_info->prim_dev_type,
+ len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to obtain Primary "
+ "device type attribute from D1 "
+ "message.\n");
+ goto error_parse;
+ }
+ dev_info->prim_dev_type.category =
+ le16_to_cpu(dev_info->prim_dev_type.category);
+ dev_info->prim_dev_type.subID =
+ le16_to_cpu(dev_info->prim_dev_type.subID);
+ last = WLP_ATTR_PRI_DEV_TYPE;
+ used += result;
+ break;
+ default:
+ /* This is not variable device information. */
+ goto out;
+ break;
+ }
+ }
+out:
+ return used;
+error_parse:
+ return -EINVAL;
+}
+
+/**
+ * Parse incoming D1 frame, populate attribute values
+ *
+ * Caller provides pointers to memory already allocated for attributes
+ * expected in the D1 frame. These variables will be populated.
+ */
+static
+int wlp_parse_d1_frame(struct wlp *wlp, struct sk_buff *skb,
+ struct wlp_uuid *uuid_e,
+ enum wlp_wss_sel_mthd *sel_mthd,
+ struct wlp_device_info *dev_info,
+ enum wlp_assc_error *assc_err)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct wlp_frame_assoc *d1 = (void *) skb->data;
+ void *ptr = skb->data;
+ size_t len = skb->len;
+ size_t used;
+ ssize_t result;
+
+ used = sizeof(*d1);
+ result = wlp_get_uuid_e(wlp, ptr + used, uuid_e, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain UUID-E attribute from D1 "
+ "message.\n");
+ goto error_parse;
+ }
+ used += result;
+ result = wlp_get_wss_sel_mthd(wlp, ptr + used, sel_mthd, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain WSS selection method "
+ "from D1 message.\n");
+ goto error_parse;
+ }
+ used += result;
+ result = wlp_get_dev_name(wlp, ptr + used, dev_info->name,
+ len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain Device Name from D1 "
+ "message.\n");
+ goto error_parse;
+ }
+ used += result;
+ result = wlp_get_variable_info(wlp, ptr + used, dev_info, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain Device Information from "
+ "D1 message.\n");
+ goto error_parse;
+ }
+ used += result;
+ result = wlp_get_wlp_assc_err(wlp, ptr + used, assc_err, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain WLP Association Error "
+ "Information from D1 message.\n");
+ goto error_parse;
+ }
+ result = 0;
+error_parse:
+ return result;
+}
+/**
+ * Handle incoming D1 frame
+ *
+ * The frame has already been verified to contain an Association header with
+ * the correct version number. Parse the incoming frame, construct and send
+ * a D2 frame in response.
+ *
+ * It is not clear what to do with most fields in the incoming D1 frame. We
+ * retrieve and discard the information here for now.
+ */
+void wlp_handle_d1_frame(struct work_struct *ws)
+{
+ struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws,
+ struct wlp_assoc_frame_ctx,
+ ws);
+ struct wlp *wlp = frame_ctx->wlp;
+ struct wlp_wss *wss = &wlp->wss;
+ struct sk_buff *skb = frame_ctx->skb;
+ struct uwb_dev_addr *src = &frame_ctx->src;
+ int result;
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct wlp_uuid uuid_e;
+ enum wlp_wss_sel_mthd sel_mthd = 0;
+ struct wlp_device_info dev_info;
+ enum wlp_assc_error assc_err;
+ char uuid[WLP_WSS_UUID_STRSIZE];
+ struct sk_buff *resp = NULL;
+
+ /* Parse D1 frame */
+ d_fnstart(6, dev, "WLP: handle D1 frame. wlp = %p, skb = %p\n",
+ wlp, skb);
+ mutex_lock(&wss->mutex);
+ mutex_lock(&wlp->mutex); /* to access wlp->uuid */
+ memset(&dev_info, 0, sizeof(dev_info));
+ result = wlp_parse_d1_frame(wlp, skb, &uuid_e, &sel_mthd, &dev_info,
+ &assc_err);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to parse incoming D1 frame.\n");
+ kfree_skb(skb);
+ goto out;
+ }
+ wlp_wss_uuid_print(uuid, sizeof(uuid), &uuid_e);
+ d_printf(6, dev, "From D1 frame:\n"
+ "UUID-E: %s\n"
+ "Selection method: %d\n"
+ "Device name (%d bytes): %s\n"
+ "Model name (%d bytes): %s\n"
+ "Manufacturer (%d bytes): %s\n"
+ "Model number (%d bytes): %s\n"
+ "Serial number (%d bytes): %s\n"
+ "Primary device type: \n"
+ " Category: %d \n"
+ " OUI: %02x:%02x:%02x \n"
+ " OUI Subdivision: %u \n",
+ uuid, sel_mthd,
+ (int)strlen(dev_info.name), dev_info.name,
+ (int)strlen(dev_info.model_name), dev_info.model_name,
+ (int)strlen(dev_info.manufacturer), dev_info.manufacturer,
+ (int)strlen(dev_info.model_nr), dev_info.model_nr,
+ (int)strlen(dev_info.serial), dev_info.serial,
+ dev_info.prim_dev_type.category,
+ dev_info.prim_dev_type.OUI[0],
+ dev_info.prim_dev_type.OUI[1],
+ dev_info.prim_dev_type.OUI[2],
+ dev_info.prim_dev_type.OUIsubdiv);
+
+ kfree_skb(skb);
+ if (!wlp_uuid_is_set(&wlp->uuid)) {
+ dev_err(dev, "WLP: UUID is not set. Set via sysfs to "
+ "proceed. Respong to D1 message with error F0.\n");
+ result = wlp_build_assoc_f0(wlp, &resp,
+ WLP_ASSOC_ERROR_NOT_READY);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to construct F0 message.\n");
+ goto out;
+ }
+ } else {
+ /* Construct D2 frame */
+ result = wlp_build_assoc_d2(wlp, wss, &resp, &uuid_e);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to construct D2 message.\n");
+ goto out;
+ }
+ }
+ /* Send D2 frame */
+ BUG_ON(wlp->xmit_frame == NULL);
+ result = wlp->xmit_frame(wlp, resp, src);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to transmit D2 association "
+ "message: %d\n", result);
+ if (result == -ENXIO)
+ dev_err(dev, "WLP: Is network interface up? \n");
+ /* We could try again ... */
+ dev_kfree_skb_any(resp); /* we need to free if tx fails */
+ }
+out:
+ kfree(frame_ctx);
+ mutex_unlock(&wlp->mutex);
+ mutex_unlock(&wss->mutex);
+ d_fnend(6, dev, "WLP: handle D1 frame. wlp = %p\n", wlp);
+}
+
+/**
+ * Parse incoming D2 frame, create and populate temporary cache
+ *
+ * @skb: socket buffer in which D2 frame can be found
+ * @neighbor: the neighbor that sent the D2 frame
+ *
+ * Will allocate memory for temporary storage of information learned during
+ * discovery.
+ */
+int wlp_parse_d2_frame_to_cache(struct wlp *wlp, struct sk_buff *skb,
+ struct wlp_neighbor_e *neighbor)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct wlp_frame_assoc *d2 = (void *) skb->data;
+ void *ptr = skb->data;
+ size_t len = skb->len;
+ size_t used;
+ ssize_t result;
+ struct wlp_uuid uuid_e;
+ struct wlp_device_info *nb_info;
+ enum wlp_assc_error assc_err;
+
+ used = sizeof(*d2);
+ result = wlp_get_uuid_e(wlp, ptr + used, &uuid_e, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain UUID-E attribute from D2 "
+ "message.\n");
+ goto error_parse;
+ }
+ if (memcmp(&uuid_e, &wlp->uuid, sizeof(uuid_e))) {
+ dev_err(dev, "WLP: UUID-E in incoming D2 does not match "
+ "local UUID sent in D1. \n");
+ goto error_parse;
+ }
+ used += result;
+ result = wlp_get_uuid_r(wlp, ptr + used, &neighbor->uuid, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain UUID-R attribute from D2 "
+ "message.\n");
+ goto error_parse;
+ }
+ used += result;
+ result = wlp_get_wss_info_to_cache(wlp, ptr + used, neighbor,
+ len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain WSS information "
+ "from D2 message.\n");
+ goto error_parse;
+ }
+ used += result;
+ neighbor->info = kzalloc(sizeof(struct wlp_device_info), GFP_KERNEL);
+ if (neighbor->info == NULL) {
+ dev_err(dev, "WLP: cannot allocate memory to store device "
+ "info.\n");
+ result = -ENOMEM;
+ goto error_parse;
+ }
+ nb_info = neighbor->info;
+ result = wlp_get_dev_name(wlp, ptr + used, nb_info->name,
+ len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain Device Name from D2 "
+ "message.\n");
+ goto error_parse;
+ }
+ used += result;
+ result = wlp_get_variable_info(wlp, ptr + used, nb_info, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain Device Information from "
+ "D2 message.\n");
+ goto error_parse;
+ }
+ used += result;
+ result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain WLP Association Error "
+ "Information from D2 message.\n");
+ goto error_parse;
+ }
+ if (assc_err != WLP_ASSOC_ERROR_NONE) {
+ dev_err(dev, "WLP: neighbor device returned association "
+ "error %d\n", assc_err);
+ result = -EINVAL;
+ goto error_parse;
+ }
+ result = 0;
+error_parse:
+ if (result < 0)
+ wlp_remove_neighbor_tmp_info(neighbor);
+ return result;
+}
+
+/**
+ * Parse incoming D2 frame, populate attribute values of WSS bein enrolled in
+ *
+ * @wss: our WSS that will be enrolled
+ * @skb: socket buffer in which D2 frame can be found
+ * @neighbor: the neighbor that sent the D2 frame
+ * @wssid: the wssid of the WSS in which we want to enroll
+ *
+ * Forms part of enrollment sequence. We are trying to enroll in WSS with
+ * @wssid by using @neighbor as registrar. A D1 message was sent to
+ * @neighbor and now we need to parse the D2 response. The neighbor's
+ * response is searched for the requested WSS and if found (and it accepts
+ * enrollment), we store the information.
+ */
+int wlp_parse_d2_frame_to_enroll(struct wlp_wss *wss, struct sk_buff *skb,
+ struct wlp_neighbor_e *neighbor,
+ struct wlp_uuid *wssid)
+{
+ struct wlp *wlp = container_of(wss, struct wlp, wss);
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ void *ptr = skb->data;
+ size_t len = skb->len;
+ size_t used;
+ ssize_t result;
+ struct wlp_uuid uuid_e;
+ struct wlp_uuid uuid_r;
+ struct wlp_device_info nb_info;
+ enum wlp_assc_error assc_err;
+ char uuid_bufA[WLP_WSS_UUID_STRSIZE];
+ char uuid_bufB[WLP_WSS_UUID_STRSIZE];
+
+ used = sizeof(struct wlp_frame_assoc);
+ result = wlp_get_uuid_e(wlp, ptr + used, &uuid_e, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain UUID-E attribute from D2 "
+ "message.\n");
+ goto error_parse;
+ }
+ if (memcmp(&uuid_e, &wlp->uuid, sizeof(uuid_e))) {
+ dev_err(dev, "WLP: UUID-E in incoming D2 does not match "
+ "local UUID sent in D1. \n");
+ goto error_parse;
+ }
+ used += result;
+ result = wlp_get_uuid_r(wlp, ptr + used, &uuid_r, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain UUID-R attribute from D2 "
+ "message.\n");
+ goto error_parse;
+ }
+ if (memcmp(&uuid_r, &neighbor->uuid, sizeof(uuid_r))) {
+ wlp_wss_uuid_print(uuid_bufA, sizeof(uuid_bufA),
+ &neighbor->uuid);
+ wlp_wss_uuid_print(uuid_bufB, sizeof(uuid_bufB), &uuid_r);
+ dev_err(dev, "WLP: UUID of neighbor does not match UUID "
+ "learned during discovery. Originally discovered: %s, "
+ "now from D2 message: %s\n", uuid_bufA, uuid_bufB);
+ result = -EINVAL;
+ goto error_parse;
+ }
+ used += result;
+ wss->wssid = *wssid;
+ result = wlp_get_wss_info_to_enroll(wlp, ptr + used, wss, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain WSS information "
+ "from D2 message.\n");
+ goto error_parse;
+ }
+ if (wss->state != WLP_WSS_STATE_PART_ENROLLED) {
+ dev_err(dev, "WLP: D2 message did not contain information "
+ "for successful enrollment. \n");
+ result = -EINVAL;
+ goto error_parse;
+ }
+ used += result;
+ /* Place device information on stack to continue parsing of message */
+ result = wlp_get_dev_name(wlp, ptr + used, nb_info.name,
+ len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain Device Name from D2 "
+ "message.\n");
+ goto error_parse;
+ }
+ used += result;
+ result = wlp_get_variable_info(wlp, ptr + used, &nb_info, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain Device Information from "
+ "D2 message.\n");
+ goto error_parse;
+ }
+ used += result;
+ result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain WLP Association Error "
+ "Information from D2 message.\n");
+ goto error_parse;
+ }
+ if (assc_err != WLP_ASSOC_ERROR_NONE) {
+ dev_err(dev, "WLP: neighbor device returned association "
+ "error %d\n", assc_err);
+ if (wss->state == WLP_WSS_STATE_PART_ENROLLED) {
+ dev_err(dev, "WLP: Enrolled in WSS (should not "
+ "happen according to spec). Undoing. \n");
+ wlp_wss_reset(wss);
+ }
+ result = -EINVAL;
+ goto error_parse;
+ }
+ result = 0;
+error_parse:
+ return result;
+}
+
+/**
+ * Parse C3/C4 frame into provided variables
+ *
+ * @wssid: will point to copy of wssid retrieved from C3/C4 frame
+ * @tag: will point to copy of tag retrieved from C3/C4 frame
+ * @virt_addr: will point to copy of virtual address retrieved from C3/C4
+ * frame.
+ *
+ * Calling function has to allocate memory for these values.
+ *
+ * skb contains a valid C3/C4 frame, return the individual fields of this
+ * frame in the provided variables.
+ */
+int wlp_parse_c3c4_frame(struct wlp *wlp, struct sk_buff *skb,
+ struct wlp_uuid *wssid, u8 *tag,
+ struct uwb_mac_addr *virt_addr)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ int result;
+ void *ptr = skb->data;
+ size_t len = skb->len;
+ size_t used;
+ char buf[WLP_WSS_UUID_STRSIZE];
+ struct wlp_frame_assoc *assoc = ptr;
+
+ d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb);
+ used = sizeof(*assoc);
+ result = wlp_get_wssid(wlp, ptr + used, wssid, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain WSSID attribute from "
+ "%s message.\n", wlp_assoc_frame_str(assoc->type));
+ goto error_parse;
+ }
+ used += result;
+ result = wlp_get_wss_tag(wlp, ptr + used, tag, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain WSS tag attribute from "
+ "%s message.\n", wlp_assoc_frame_str(assoc->type));
+ goto error_parse;
+ }
+ used += result;
+ result = wlp_get_wss_virt(wlp, ptr + used, virt_addr, len - used);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain WSS virtual address "
+ "attribute from %s message.\n",
+ wlp_assoc_frame_str(assoc->type));
+ goto error_parse;
+ }
+ wlp_wss_uuid_print(buf, sizeof(buf), wssid);
+ d_printf(6, dev, "WLP: parsed: WSSID %s, tag 0x%02x, virt "
+ "%02x:%02x:%02x:%02x:%02x:%02x \n", buf, *tag,
+ virt_addr->data[0], virt_addr->data[1], virt_addr->data[2],
+ virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]);
+
+error_parse:
+ d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result);
+ return result;
+}
+
+/**
+ * Allocate memory for and populate fields of C1 or C2 association frame
+ *
+ * The C1 and C2 association frames appear identical - except for the type.
+ */
+static
+int wlp_build_assoc_c1c2(struct wlp *wlp, struct wlp_wss *wss,
+ struct sk_buff **skb, enum wlp_assoc_type type)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ int result = -ENOMEM;
+ struct {
+ struct wlp_frame_assoc c_hdr;
+ struct wlp_attr_wssid wssid;
+ } *c;
+ struct sk_buff *_skb;
+
+ d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss);
+ _skb = dev_alloc_skb(sizeof(*c));
+ if (_skb == NULL) {
+ dev_err(dev, "WLP: Unable to allocate memory for C1/C2 "
+ "association frame. \n");
+ goto error_alloc;
+ }
+ c = (void *) _skb->data;
+ d_printf(6, dev, "C1/C2 starts at %p \n", c);
+ c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
+ c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION;
+ c->c_hdr.type = type;
+ wlp_set_version(&c->c_hdr.version, WLP_VERSION);
+ wlp_set_msg_type(&c->c_hdr.msg_type, type);
+ wlp_set_wssid(&c->wssid, &wss->wssid);
+ skb_put(_skb, sizeof(*c));
+ d_printf(6, dev, "C1/C2 message:\n");
+ d_dump(6, dev, c, sizeof(*c));
+ *skb = _skb;
+ result = 0;
+error_alloc:
+ d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result);
+ return result;
+}
+
+
+static
+int wlp_build_assoc_c1(struct wlp *wlp, struct wlp_wss *wss,
+ struct sk_buff **skb)
+{
+ return wlp_build_assoc_c1c2(wlp, wss, skb, WLP_ASSOC_C1);
+}
+
+static
+int wlp_build_assoc_c2(struct wlp *wlp, struct wlp_wss *wss,
+ struct sk_buff **skb)
+{
+ return wlp_build_assoc_c1c2(wlp, wss, skb, WLP_ASSOC_C2);
+}
+
+
+/**
+ * Allocate memory for and populate fields of C3 or C4 association frame
+ *
+ * The C3 and C4 association frames appear identical - except for the type.
+ */
+static
+int wlp_build_assoc_c3c4(struct wlp *wlp, struct wlp_wss *wss,
+ struct sk_buff **skb, enum wlp_assoc_type type)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ int result = -ENOMEM;
+ struct {
+ struct wlp_frame_assoc c_hdr;
+ struct wlp_attr_wssid wssid;
+ struct wlp_attr_wss_tag wss_tag;
+ struct wlp_attr_wss_virt wss_virt;
+ } *c;
+ struct sk_buff *_skb;
+
+ d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss);
+ _skb = dev_alloc_skb(sizeof(*c));
+ if (_skb == NULL) {
+ dev_err(dev, "WLP: Unable to allocate memory for C3/C4 "
+ "association frame. \n");
+ goto error_alloc;
+ }
+ c = (void *) _skb->data;
+ d_printf(6, dev, "C3/C4 starts at %p \n", c);
+ c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
+ c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION;
+ c->c_hdr.type = type;
+ wlp_set_version(&c->c_hdr.version, WLP_VERSION);
+ wlp_set_msg_type(&c->c_hdr.msg_type, type);
+ wlp_set_wssid(&c->wssid, &wss->wssid);
+ wlp_set_wss_tag(&c->wss_tag, wss->tag);
+ wlp_set_wss_virt(&c->wss_virt, &wss->virtual_addr);
+ skb_put(_skb, sizeof(*c));
+ d_printf(6, dev, "C3/C4 message:\n");
+ d_dump(6, dev, c, sizeof(*c));
+ *skb = _skb;
+ result = 0;
+error_alloc:
+ d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result);
+ return result;
+}
+
+static
+int wlp_build_assoc_c3(struct wlp *wlp, struct wlp_wss *wss,
+ struct sk_buff **skb)
+{
+ return wlp_build_assoc_c3c4(wlp, wss, skb, WLP_ASSOC_C3);
+}
+
+static
+int wlp_build_assoc_c4(struct wlp *wlp, struct wlp_wss *wss,
+ struct sk_buff **skb)
+{
+ return wlp_build_assoc_c3c4(wlp, wss, skb, WLP_ASSOC_C4);
+}
+
+
+#define wlp_send_assoc(type, id) \
+static int wlp_send_assoc_##type(struct wlp *wlp, struct wlp_wss *wss, \
+ struct uwb_dev_addr *dev_addr) \
+{ \
+ struct device *dev = &wlp->rc->uwb_dev.dev; \
+ int result; \
+ struct sk_buff *skb = NULL; \
+ d_fnstart(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \
+ wlp, wss, dev_addr->data[1], dev_addr->data[0]); \
+ d_printf(6, dev, "WLP: Constructing %s frame. \n", \
+ wlp_assoc_frame_str(id)); \
+ /* Build the frame */ \
+ result = wlp_build_assoc_##type(wlp, wss, &skb); \
+ if (result < 0) { \
+ dev_err(dev, "WLP: Unable to construct %s association " \
+ "frame: %d\n", wlp_assoc_frame_str(id), result);\
+ goto error_build_assoc; \
+ } \
+ /* Send the frame */ \
+ d_printf(6, dev, "Transmitting %s frame to %02x:%02x \n", \
+ wlp_assoc_frame_str(id), \
+ dev_addr->data[1], dev_addr->data[0]); \
+ BUG_ON(wlp->xmit_frame == NULL); \
+ result = wlp->xmit_frame(wlp, skb, dev_addr); \
+ if (result < 0) { \
+ dev_err(dev, "WLP: Unable to transmit %s association " \
+ "message: %d\n", wlp_assoc_frame_str(id), \
+ result); \
+ if (result == -ENXIO) \
+ dev_err(dev, "WLP: Is network interface " \
+ "up? \n"); \
+ goto error_xmit; \
+ } \
+ return 0; \
+error_xmit: \
+ /* We could try again ... */ \
+ dev_kfree_skb_any(skb);/*we need to free if tx fails*/ \
+error_build_assoc: \
+ d_fnend(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \
+ wlp, wss, dev_addr->data[1], dev_addr->data[0]); \
+ return result; \
+}
+
+wlp_send_assoc(d1, WLP_ASSOC_D1)
+wlp_send_assoc(c1, WLP_ASSOC_C1)
+wlp_send_assoc(c3, WLP_ASSOC_C3)
+
+int wlp_send_assoc_frame(struct wlp *wlp, struct wlp_wss *wss,
+ struct uwb_dev_addr *dev_addr,
+ enum wlp_assoc_type type)
+{
+ int result = 0;
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ switch (type) {
+ case WLP_ASSOC_D1:
+ result = wlp_send_assoc_d1(wlp, wss, dev_addr);
+ break;
+ case WLP_ASSOC_C1:
+ result = wlp_send_assoc_c1(wlp, wss, dev_addr);
+ break;
+ case WLP_ASSOC_C3:
+ result = wlp_send_assoc_c3(wlp, wss, dev_addr);
+ break;
+ default:
+ dev_err(dev, "WLP: Received request to send unknown "
+ "association message.\n");
+ result = -EINVAL;
+ break;
+ }
+ return result;
+}
+
+/**
+ * Handle incoming C1 frame
+ *
+ * The frame has already been verified to contain an Association header with
+ * the correct version number. Parse the incoming frame, construct and send
+ * a C2 frame in response.
+ */
+void wlp_handle_c1_frame(struct work_struct *ws)
+{
+ struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws,
+ struct wlp_assoc_frame_ctx,
+ ws);
+ struct wlp *wlp = frame_ctx->wlp;
+ struct wlp_wss *wss = &wlp->wss;
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct wlp_frame_assoc *c1 = (void *) frame_ctx->skb->data;
+ unsigned int len = frame_ctx->skb->len;
+ struct uwb_dev_addr *src = &frame_ctx->src;
+ int result;
+ struct wlp_uuid wssid;
+ char buf[WLP_WSS_UUID_STRSIZE];
+ struct sk_buff *resp = NULL;
+
+ /* Parse C1 frame */
+ d_fnstart(6, dev, "WLP: handle C1 frame. wlp = %p, c1 = %p\n",
+ wlp, c1);
+ mutex_lock(&wss->mutex);
+ result = wlp_get_wssid(wlp, (void *)c1 + sizeof(*c1), &wssid,
+ len - sizeof(*c1));
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain WSSID from C1 frame.\n");
+ goto out;
+ }
+ wlp_wss_uuid_print(buf, sizeof(buf), &wssid);
+ d_printf(6, dev, "Received C1 frame with WSSID %s \n", buf);
+ if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))
+ && wss->state == WLP_WSS_STATE_ACTIVE) {
+ d_printf(6, dev, "WSSID from C1 frame is known locally "
+ "and is active\n");
+ /* Construct C2 frame */
+ result = wlp_build_assoc_c2(wlp, wss, &resp);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to construct C2 message.\n");
+ goto out;
+ }
+ } else {
+ d_printf(6, dev, "WSSID from C1 frame is not known locally "
+ "or is not active\n");
+ /* Construct F0 frame */
+ result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to construct F0 message.\n");
+ goto out;
+ }
+ }
+ /* Send C2 frame */
+ d_printf(6, dev, "Transmitting response (C2/F0) frame to %02x:%02x \n",
+ src->data[1], src->data[0]);
+ BUG_ON(wlp->xmit_frame == NULL);
+ result = wlp->xmit_frame(wlp, resp, src);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to transmit response association "
+ "message: %d\n", result);
+ if (result == -ENXIO)
+ dev_err(dev, "WLP: Is network interface up? \n");
+ /* We could try again ... */
+ dev_kfree_skb_any(resp); /* we need to free if tx fails */
+ }
+out:
+ kfree_skb(frame_ctx->skb);
+ kfree(frame_ctx);
+ mutex_unlock(&wss->mutex);
+ d_fnend(6, dev, "WLP: handle C1 frame. wlp = %p\n", wlp);
+}
+
+/**
+ * Handle incoming C3 frame
+ *
+ * The frame has already been verified to contain an Association header with
+ * the correct version number. Parse the incoming frame, construct and send
+ * a C4 frame in response. If the C3 frame identifies a WSS that is locally
+ * active then we connect to this neighbor (add it to our EDA cache).
+ */
+void wlp_handle_c3_frame(struct work_struct *ws)
+{
+ struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws,
+ struct wlp_assoc_frame_ctx,
+ ws);
+ struct wlp *wlp = frame_ctx->wlp;
+ struct wlp_wss *wss = &wlp->wss;
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct sk_buff *skb = frame_ctx->skb;
+ struct uwb_dev_addr *src = &frame_ctx->src;
+ int result;
+ char buf[WLP_WSS_UUID_STRSIZE];
+ struct sk_buff *resp = NULL;
+ struct wlp_uuid wssid;
+ u8 tag;
+ struct uwb_mac_addr virt_addr;
+
+ /* Parse C3 frame */
+ d_fnstart(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n",
+ wlp, skb);
+ mutex_lock(&wss->mutex);
+ result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr);
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain values from C3 frame.\n");
+ goto out;
+ }
+ wlp_wss_uuid_print(buf, sizeof(buf), &wssid);
+ d_printf(6, dev, "Received C3 frame with WSSID %s \n", buf);
+ if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))
+ && wss->state >= WLP_WSS_STATE_ACTIVE) {
+ d_printf(6, dev, "WSSID from C3 frame is known locally "
+ "and is active\n");
+ result = wlp_eda_update_node(&wlp->eda, src, wss,
+ (void *) virt_addr.data, tag,
+ WLP_WSS_CONNECTED);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to update EDA cache "
+ "with new connected neighbor information.\n");
+ result = wlp_build_assoc_f0(wlp, &resp,
+ WLP_ASSOC_ERROR_INT);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to construct F0 "
+ "message.\n");
+ goto out;
+ }
+ } else {
+ wss->state = WLP_WSS_STATE_CONNECTED;
+ /* Construct C4 frame */
+ result = wlp_build_assoc_c4(wlp, wss, &resp);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to construct C4 "
+ "message.\n");
+ goto out;
+ }
+ }
+ } else {
+ d_printf(6, dev, "WSSID from C3 frame is not known locally "
+ "or is not active\n");
+ /* Construct F0 frame */
+ result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to construct F0 message.\n");
+ goto out;
+ }
+ }
+ /* Send C4 frame */
+ d_printf(6, dev, "Transmitting response (C4/F0) frame to %02x:%02x \n",
+ src->data[1], src->data[0]);
+ BUG_ON(wlp->xmit_frame == NULL);
+ result = wlp->xmit_frame(wlp, resp, src);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to transmit response association "
+ "message: %d\n", result);
+ if (result == -ENXIO)
+ dev_err(dev, "WLP: Is network interface up? \n");
+ /* We could try again ... */
+ dev_kfree_skb_any(resp); /* we need to free if tx fails */
+ }
+out:
+ kfree_skb(frame_ctx->skb);
+ kfree(frame_ctx);
+ mutex_unlock(&wss->mutex);
+ d_fnend(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n",
+ wlp, skb);
+}
+
+
diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
new file mode 100644
index 00000000000..1bb9b1f97d4
--- /dev/null
+++ b/drivers/uwb/wlp/sysfs.c
@@ -0,0 +1,709 @@
+/*
+ * WiMedia Logical Link Control Protocol (WLP)
+ * sysfs functions
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: Docs
+ *
+ */
+
+#include <linux/wlp.h>
+#include "wlp-internal.h"
+
+static
+size_t wlp_wss_wssid_e_print(char *buf, size_t bufsize,
+ struct wlp_wssid_e *wssid_e)
+{
+ size_t used = 0;
+ used += scnprintf(buf, bufsize, " WSS: ");
+ used += wlp_wss_uuid_print(buf + used, bufsize - used,
+ &wssid_e->wssid);
+
+ if (wssid_e->info != NULL) {
+ used += scnprintf(buf + used, bufsize - used, " ");
+ used += uwb_mac_addr_print(buf + used, bufsize - used,
+ &wssid_e->info->bcast);
+ used += scnprintf(buf + used, bufsize - used, " %u %u %s\n",
+ wssid_e->info->accept_enroll,
+ wssid_e->info->sec_status,
+ wssid_e->info->name);
+ }
+ return used;
+}
+
+/**
+ * Print out information learned from neighbor discovery
+ *
+ * Some fields being printed may not be included in the device discovery
+ * information (it is not mandatory). We are thus careful how the
+ * information is printed to ensure it is clear to the user what field is
+ * being referenced.
+ * The information being printed is for one time use - temporary storage is
+ * cleaned after it is printed.
+ *
+ * Ideally sysfs output should be on one line. The information printed here
+ * contain a few strings so it will be hard to parse if they are all
+ * printed on the same line - without agreeing on a standard field
+ * separator.
+ */
+static
+ssize_t wlp_wss_neighborhood_print_remove(struct wlp *wlp, char *buf,
+ size_t bufsize)
+{
+ size_t used = 0;
+ struct wlp_neighbor_e *neighb;
+ struct wlp_wssid_e *wssid_e;
+
+ mutex_lock(&wlp->nbmutex);
+ used = scnprintf(buf, bufsize, "#Neighbor information\n"
+ "#uuid dev_addr\n"
+ "# Device Name:\n# Model Name:\n# Manufacturer:\n"
+ "# Model Nr:\n# Serial:\n"
+ "# Pri Dev type: CategoryID OUI OUISubdiv "
+ "SubcategoryID\n"
+ "# WSS: WSSID WSS_name accept_enroll sec_status "
+ "bcast\n"
+ "# WSS: WSSID WSS_name accept_enroll sec_status "
+ "bcast\n\n");
+ list_for_each_entry(neighb, &wlp->neighbors, node) {
+ if (bufsize - used <= 0)
+ goto out;
+ used += wlp_wss_uuid_print(buf + used, bufsize - used,
+ &neighb->uuid);
+ buf[used++] = ' ';
+ used += uwb_dev_addr_print(buf + used, bufsize - used,
+ &neighb->uwb_dev->dev_addr);
+ if (neighb->info != NULL)
+ used += scnprintf(buf + used, bufsize - used,
+ "\n Device Name: %s\n"
+ " Model Name: %s\n"
+ " Manufacturer:%s \n"
+ " Model Nr: %s\n"
+ " Serial: %s\n"
+ " Pri Dev type: "
+ "%u %02x:%02x:%02x %u %u\n",
+ neighb->info->name,
+ neighb->info->model_name,
+ neighb->info->manufacturer,
+ neighb->info->model_nr,
+ neighb->info->serial,
+ neighb->info->prim_dev_type.category,
+ neighb->info->prim_dev_type.OUI[0],
+ neighb->info->prim_dev_type.OUI[1],
+ neighb->info->prim_dev_type.OUI[2],
+ neighb->info->prim_dev_type.OUIsubdiv,
+ neighb->info->prim_dev_type.subID);
+ list_for_each_entry(wssid_e, &neighb->wssid, node) {
+ used += wlp_wss_wssid_e_print(buf + used,
+ bufsize - used,
+ wssid_e);
+ }
+ buf[used++] = '\n';
+ wlp_remove_neighbor_tmp_info(neighb);
+ }
+
+
+out:
+ mutex_unlock(&wlp->nbmutex);
+ return used;
+}
+
+
+/**
+ * Show properties of all WSS in neighborhood.
+ *
+ * Will trigger a complete discovery of WSS activated by this device and
+ * its neighbors.
+ */
+ssize_t wlp_neighborhood_show(struct wlp *wlp, char *buf)
+{
+ wlp_discover(wlp);
+ return wlp_wss_neighborhood_print_remove(wlp, buf, PAGE_SIZE);
+}
+EXPORT_SYMBOL_GPL(wlp_neighborhood_show);
+
+static
+ssize_t __wlp_wss_properties_show(struct wlp_wss *wss, char *buf,
+ size_t bufsize)
+{
+ ssize_t result;
+
+ result = wlp_wss_uuid_print(buf, bufsize, &wss->wssid);
+ result += scnprintf(buf + result, bufsize - result, " ");
+ result += uwb_mac_addr_print(buf + result, bufsize - result,
+ &wss->bcast);
+ result += scnprintf(buf + result, bufsize - result,
+ " 0x%02x %u ", wss->hash, wss->secure_status);
+ result += wlp_wss_key_print(buf + result, bufsize - result,
+ wss->master_key);
+ result += scnprintf(buf + result, bufsize - result, " 0x%02x ",
+ wss->tag);
+ result += uwb_mac_addr_print(buf + result, bufsize - result,
+ &wss->virtual_addr);
+ result += scnprintf(buf + result, bufsize - result, " %s", wss->name);
+ result += scnprintf(buf + result, bufsize - result,
+ "\n\n#WSSID\n#WSS broadcast address\n"
+ "#WSS hash\n#WSS secure status\n"
+ "#WSS master key\n#WSS local tag\n"
+ "#WSS local virtual EUI-48\n#WSS name\n");
+ return result;
+}
+
+/**
+ * Show which WSS is activated.
+ */
+ssize_t wlp_wss_activate_show(struct wlp_wss *wss, char *buf)
+{
+ int result = 0;
+
+ if (mutex_lock_interruptible(&wss->mutex))
+ goto out;
+ if (wss->state >= WLP_WSS_STATE_ACTIVE)
+ result = __wlp_wss_properties_show(wss, buf, PAGE_SIZE);
+ else
+ result = scnprintf(buf, PAGE_SIZE, "No local WSS active.\n");
+ result += scnprintf(buf + result, PAGE_SIZE - result,
+ "\n\n"
+ "# echo WSSID SECURE_STATUS ACCEPT_ENROLLMENT "
+ "NAME #create new WSS\n"
+ "# echo WSSID [DEV ADDR] #enroll in and activate "
+ "existing WSS, can request registrar\n"
+ "#\n"
+ "# WSSID is a 16 byte hex array. Eg. 12 A3 3B ... \n"
+ "# SECURE_STATUS 0 - unsecure, 1 - secure (default)\n"
+ "# ACCEPT_ENROLLMENT 0 - no, 1 - yes (default)\n"
+ "# NAME is the text string identifying the WSS\n"
+ "# DEV ADDR is the device address of neighbor "
+ "that should be registrar. Eg. 32:AB\n");
+
+ mutex_unlock(&wss->mutex);
+out:
+ return result;
+
+}
+EXPORT_SYMBOL_GPL(wlp_wss_activate_show);
+
+/**
+ * Create/activate a new WSS or enroll/activate in neighboring WSS
+ *
+ * The user can provide the WSSID of a WSS in which it wants to enroll.
+ * Only the WSSID is necessary if the WSS have been discovered before. If
+ * the WSS has not been discovered before, or the user wants to use a
+ * particular neighbor as its registrar, then the user can also provide a
+ * device address or the neighbor that will be used as registrar.
+ *
+ * A new WSS is created when the user provides a WSSID, secure status, and
+ * WSS name.
+ */
+ssize_t wlp_wss_activate_store(struct wlp_wss *wss,
+ const char *buf, size_t size)
+{
+ ssize_t result = -EINVAL;
+ struct wlp_uuid wssid;
+ struct uwb_dev_addr dev;
+ struct uwb_dev_addr bcast = {.data = {0xff, 0xff} };
+ char name[65];
+ unsigned sec_status, accept;
+ memset(name, 0, sizeof(name));
+ result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx:%02hhx",
+ &wssid.data[0] , &wssid.data[1],
+ &wssid.data[2] , &wssid.data[3],
+ &wssid.data[4] , &wssid.data[5],
+ &wssid.data[6] , &wssid.data[7],
+ &wssid.data[8] , &wssid.data[9],
+ &wssid.data[10], &wssid.data[11],
+ &wssid.data[12], &wssid.data[13],
+ &wssid.data[14], &wssid.data[15],
+ &dev.data[1], &dev.data[0]);
+ if (result == 16 || result == 17) {
+ result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%u %u %64c",
+ &wssid.data[0] , &wssid.data[1],
+ &wssid.data[2] , &wssid.data[3],
+ &wssid.data[4] , &wssid.data[5],
+ &wssid.data[6] , &wssid.data[7],
+ &wssid.data[8] , &wssid.data[9],
+ &wssid.data[10], &wssid.data[11],
+ &wssid.data[12], &wssid.data[13],
+ &wssid.data[14], &wssid.data[15],
+ &sec_status, &accept, name);
+ if (result == 16)
+ result = wlp_wss_enroll_activate(wss, &wssid, &bcast);
+ else if (result == 19) {
+ sec_status = sec_status == 0 ? 0 : 1;
+ accept = accept == 0 ? 0 : 1;
+ /* We read name using %c, so the newline needs to be
+ * removed */
+ if (strlen(name) != sizeof(name) - 1)
+ name[strlen(name) - 1] = '\0';
+ result = wlp_wss_create_activate(wss, &wssid, name,
+ sec_status, accept);
+ } else
+ result = -EINVAL;
+ } else if (result == 18)
+ result = wlp_wss_enroll_activate(wss, &wssid, &dev);
+ else
+ result = -EINVAL;
+ return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(wlp_wss_activate_store);
+
+/**
+ * Show the UUID of this host
+ */
+ssize_t wlp_uuid_show(struct wlp *wlp, char *buf)
+{
+ ssize_t result = 0;
+
+ mutex_lock(&wlp->mutex);
+ result = wlp_wss_uuid_print(buf, PAGE_SIZE, &wlp->uuid);
+ buf[result++] = '\n';
+ mutex_unlock(&wlp->mutex);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wlp_uuid_show);
+
+/**
+ * Store a new UUID for this host
+ *
+ * According to the spec this should be encoded as an octet string in the
+ * order the octets are shown in string representation in RFC 4122 (WLP
+ * 0.99 [Table 6])
+ *
+ * We do not check value provided by user.
+ */
+ssize_t wlp_uuid_store(struct wlp *wlp, const char *buf, size_t size)
+{
+ ssize_t result;
+ struct wlp_uuid uuid;
+
+ mutex_lock(&wlp->mutex);
+ result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx ",
+ &uuid.data[0] , &uuid.data[1],
+ &uuid.data[2] , &uuid.data[3],
+ &uuid.data[4] , &uuid.data[5],
+ &uuid.data[6] , &uuid.data[7],
+ &uuid.data[8] , &uuid.data[9],
+ &uuid.data[10], &uuid.data[11],
+ &uuid.data[12], &uuid.data[13],
+ &uuid.data[14], &uuid.data[15]);
+ if (result != 16) {
+ result = -EINVAL;
+ goto error;
+ }
+ wlp->uuid = uuid;
+error:
+ mutex_unlock(&wlp->mutex);
+ return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(wlp_uuid_store);
+
+/**
+ * Show contents of members of device information structure
+ */
+#define wlp_dev_info_show(type) \
+ssize_t wlp_dev_##type##_show(struct wlp *wlp, char *buf) \
+{ \
+ ssize_t result = 0; \
+ mutex_lock(&wlp->mutex); \
+ if (wlp->dev_info == NULL) { \
+ result = __wlp_setup_device_info(wlp); \
+ if (result < 0) \
+ goto out; \
+ } \
+ result = scnprintf(buf, PAGE_SIZE, "%s\n", wlp->dev_info->type);\
+out: \
+ mutex_unlock(&wlp->mutex); \
+ return result; \
+} \
+EXPORT_SYMBOL_GPL(wlp_dev_##type##_show);
+
+wlp_dev_info_show(name)
+wlp_dev_info_show(model_name)
+wlp_dev_info_show(model_nr)
+wlp_dev_info_show(manufacturer)
+wlp_dev_info_show(serial)
+
+/**
+ * Store contents of members of device information structure
+ */
+#define wlp_dev_info_store(type, len) \
+ssize_t wlp_dev_##type##_store(struct wlp *wlp, const char *buf, size_t size)\
+{ \
+ ssize_t result; \
+ char format[10]; \
+ mutex_lock(&wlp->mutex); \
+ if (wlp->dev_info == NULL) { \
+ result = __wlp_alloc_device_info(wlp); \
+ if (result < 0) \
+ goto out; \
+ } \
+ memset(wlp->dev_info->type, 0, sizeof(wlp->dev_info->type)); \
+ sprintf(format, "%%%uc", len); \
+ result = sscanf(buf, format, wlp->dev_info->type); \
+out: \
+ mutex_unlock(&wlp->mutex); \
+ return result < 0 ? result : size; \
+} \
+EXPORT_SYMBOL_GPL(wlp_dev_##type##_store);
+
+wlp_dev_info_store(name, 32)
+wlp_dev_info_store(manufacturer, 64)
+wlp_dev_info_store(model_name, 32)
+wlp_dev_info_store(model_nr, 32)
+wlp_dev_info_store(serial, 32)
+
+static
+const char *__wlp_dev_category[] = {
+ [WLP_DEV_CAT_COMPUTER] = "Computer",
+ [WLP_DEV_CAT_INPUT] = "Input device",
+ [WLP_DEV_CAT_PRINT_SCAN_FAX_COPIER] = "Printer, scanner, FAX, or "
+ "Copier",
+ [WLP_DEV_CAT_CAMERA] = "Camera",
+ [WLP_DEV_CAT_STORAGE] = "Storage Network",
+ [WLP_DEV_CAT_INFRASTRUCTURE] = "Infrastructure",
+ [WLP_DEV_CAT_DISPLAY] = "Display",
+ [WLP_DEV_CAT_MULTIM] = "Multimedia device",
+ [WLP_DEV_CAT_GAMING] = "Gaming device",
+ [WLP_DEV_CAT_TELEPHONE] = "Telephone",
+ [WLP_DEV_CAT_OTHER] = "Other",
+};
+
+static
+const char *wlp_dev_category_str(unsigned cat)
+{
+ if ((cat >= WLP_DEV_CAT_COMPUTER && cat <= WLP_DEV_CAT_TELEPHONE)
+ || cat == WLP_DEV_CAT_OTHER)
+ return __wlp_dev_category[cat];
+ return "unknown category";
+}
+
+ssize_t wlp_dev_prim_category_show(struct wlp *wlp, char *buf)
+{
+ ssize_t result = 0;
+ mutex_lock(&wlp->mutex);
+ if (wlp->dev_info == NULL) {
+ result = __wlp_setup_device_info(wlp);
+ if (result < 0)
+ goto out;
+ }
+ result = scnprintf(buf, PAGE_SIZE, "%s\n",
+ wlp_dev_category_str(wlp->dev_info->prim_dev_type.category));
+out:
+ mutex_unlock(&wlp->mutex);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wlp_dev_prim_category_show);
+
+ssize_t wlp_dev_prim_category_store(struct wlp *wlp, const char *buf,
+ size_t size)
+{
+ ssize_t result;
+ u16 cat;
+ mutex_lock(&wlp->mutex);
+ if (wlp->dev_info == NULL) {
+ result = __wlp_alloc_device_info(wlp);
+ if (result < 0)
+ goto out;
+ }
+ result = sscanf(buf, "%hu", &cat);
+ if ((cat >= WLP_DEV_CAT_COMPUTER && cat <= WLP_DEV_CAT_TELEPHONE)
+ || cat == WLP_DEV_CAT_OTHER)
+ wlp->dev_info->prim_dev_type.category = cat;
+ else
+ result = -EINVAL;
+out:
+ mutex_unlock(&wlp->mutex);
+ return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(wlp_dev_prim_category_store);
+
+ssize_t wlp_dev_prim_OUI_show(struct wlp *wlp, char *buf)
+{
+ ssize_t result = 0;
+ mutex_lock(&wlp->mutex);
+ if (wlp->dev_info == NULL) {
+ result = __wlp_setup_device_info(wlp);
+ if (result < 0)
+ goto out;
+ }
+ result = scnprintf(buf, PAGE_SIZE, "%02x:%02x:%02x\n",
+ wlp->dev_info->prim_dev_type.OUI[0],
+ wlp->dev_info->prim_dev_type.OUI[1],
+ wlp->dev_info->prim_dev_type.OUI[2]);
+out:
+ mutex_unlock(&wlp->mutex);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_show);
+
+ssize_t wlp_dev_prim_OUI_store(struct wlp *wlp, const char *buf, size_t size)
+{
+ ssize_t result;
+ u8 OUI[3];
+ mutex_lock(&wlp->mutex);
+ if (wlp->dev_info == NULL) {
+ result = __wlp_alloc_device_info(wlp);
+ if (result < 0)
+ goto out;
+ }
+ result = sscanf(buf, "%hhx:%hhx:%hhx",
+ &OUI[0], &OUI[1], &OUI[2]);
+ if (result != 3) {
+ result = -EINVAL;
+ goto out;
+ } else
+ memcpy(wlp->dev_info->prim_dev_type.OUI, OUI, sizeof(OUI));
+out:
+ mutex_unlock(&wlp->mutex);
+ return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_store);
+
+
+ssize_t wlp_dev_prim_OUI_sub_show(struct wlp *wlp, char *buf)
+{
+ ssize_t result = 0;
+ mutex_lock(&wlp->mutex);
+ if (wlp->dev_info == NULL) {
+ result = __wlp_setup_device_info(wlp);
+ if (result < 0)
+ goto out;
+ }
+ result = scnprintf(buf, PAGE_SIZE, "%u\n",
+ wlp->dev_info->prim_dev_type.OUIsubdiv);
+out:
+ mutex_unlock(&wlp->mutex);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_sub_show);
+
+ssize_t wlp_dev_prim_OUI_sub_store(struct wlp *wlp, const char *buf,
+ size_t size)
+{
+ ssize_t result;
+ unsigned sub;
+ u8 max_sub = ~0;
+ mutex_lock(&wlp->mutex);
+ if (wlp->dev_info == NULL) {
+ result = __wlp_alloc_device_info(wlp);
+ if (result < 0)
+ goto out;
+ }
+ result = sscanf(buf, "%u", &sub);
+ if (sub <= max_sub)
+ wlp->dev_info->prim_dev_type.OUIsubdiv = sub;
+ else
+ result = -EINVAL;
+out:
+ mutex_unlock(&wlp->mutex);
+ return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_sub_store);
+
+ssize_t wlp_dev_prim_subcat_show(struct wlp *wlp, char *buf)
+{
+ ssize_t result = 0;
+ mutex_lock(&wlp->mutex);
+ if (wlp->dev_info == NULL) {
+ result = __wlp_setup_device_info(wlp);
+ if (result < 0)
+ goto out;
+ }
+ result = scnprintf(buf, PAGE_SIZE, "%u\n",
+ wlp->dev_info->prim_dev_type.subID);
+out:
+ mutex_unlock(&wlp->mutex);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wlp_dev_prim_subcat_show);
+
+ssize_t wlp_dev_prim_subcat_store(struct wlp *wlp, const char *buf,
+ size_t size)
+{
+ ssize_t result;
+ unsigned sub;
+ __le16 max_sub = ~0;
+ mutex_lock(&wlp->mutex);
+ if (wlp->dev_info == NULL) {
+ result = __wlp_alloc_device_info(wlp);
+ if (result < 0)
+ goto out;
+ }
+ result = sscanf(buf, "%u", &sub);
+ if (sub <= max_sub)
+ wlp->dev_info->prim_dev_type.subID = sub;
+ else
+ result = -EINVAL;
+out:
+ mutex_unlock(&wlp->mutex);
+ return result < 0 ? result : size;
+}
+EXPORT_SYMBOL_GPL(wlp_dev_prim_subcat_store);
+
+/**
+ * Subsystem implementation for interaction with individual WSS via sysfs
+ *
+ * Followed instructions for subsystem in Documentation/filesystems/sysfs.txt
+ */
+
+#define kobj_to_wlp_wss(obj) container_of(obj, struct wlp_wss, kobj)
+#define attr_to_wlp_wss_attr(_attr) \
+ container_of(_attr, struct wlp_wss_attribute, attr)
+
+/**
+ * Sysfs subsystem: forward read calls
+ *
+ * Sysfs operation for forwarding read call to the show method of the
+ * attribute owner
+ */
+static
+ssize_t wlp_wss_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct wlp_wss_attribute *wss_attr = attr_to_wlp_wss_attr(attr);
+ struct wlp_wss *wss = kobj_to_wlp_wss(kobj);
+ ssize_t ret = -EIO;
+
+ if (wss_attr->show)
+ ret = wss_attr->show(wss, buf);
+ return ret;
+}
+/**
+ * Sysfs subsystem: forward write calls
+ *
+ * Sysfs operation for forwarding write call to the store method of the
+ * attribute owner
+ */
+static
+ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wlp_wss_attribute *wss_attr = attr_to_wlp_wss_attr(attr);
+ struct wlp_wss *wss = kobj_to_wlp_wss(kobj);
+ ssize_t ret = -EIO;
+
+ if (wss_attr->store)
+ ret = wss_attr->store(wss, buf, count);
+ return ret;
+}
+
+static
+struct sysfs_ops wss_sysfs_ops = {
+ .show = wlp_wss_attr_show,
+ .store = wlp_wss_attr_store,
+};
+
+struct kobj_type wss_ktype = {
+ .release = wlp_wss_release,
+ .sysfs_ops = &wss_sysfs_ops,
+};
+
+
+/**
+ * Sysfs files for individual WSS
+ */
+
+/**
+ * Print static properties of this WSS
+ *
+ * The name of a WSS may not be null teminated. It's max size is 64 bytes
+ * so we copy it to a larger array just to make sure we print sane data.
+ */
+static ssize_t wlp_wss_properties_show(struct wlp_wss *wss, char *buf)
+{
+ int result = 0;
+
+ if (mutex_lock_interruptible(&wss->mutex))
+ goto out;
+ result = __wlp_wss_properties_show(wss, buf, PAGE_SIZE);
+ mutex_unlock(&wss->mutex);
+out:
+ return result;
+}
+WSS_ATTR(properties, S_IRUGO, wlp_wss_properties_show, NULL);
+
+/**
+ * Print all connected members of this WSS
+ * The EDA cache contains all members of WSS neighborhood.
+ */
+static ssize_t wlp_wss_members_show(struct wlp_wss *wss, char *buf)
+{
+ struct wlp *wlp = container_of(wss, struct wlp, wss);
+ return wlp_eda_show(wlp, buf);
+}
+WSS_ATTR(members, S_IRUGO, wlp_wss_members_show, NULL);
+
+static
+const char *__wlp_strstate[] = {
+ "none",
+ "partially enrolled",
+ "enrolled",
+ "active",
+ "connected",
+};
+
+static const char *wlp_wss_strstate(unsigned state)
+{
+ if (state >= ARRAY_SIZE(__wlp_strstate))
+ return "unknown state";
+ return __wlp_strstate[state];
+}
+
+/*
+ * Print current state of this WSS
+ */
+static ssize_t wlp_wss_state_show(struct wlp_wss *wss, char *buf)
+{
+ int result = 0;
+
+ if (mutex_lock_interruptible(&wss->mutex))
+ goto out;
+ result = scnprintf(buf, PAGE_SIZE, "%s\n",
+ wlp_wss_strstate(wss->state));
+ mutex_unlock(&wss->mutex);
+out:
+ return result;
+}
+WSS_ATTR(state, S_IRUGO, wlp_wss_state_show, NULL);
+
+
+static
+struct attribute *wss_attrs[] = {
+ &wss_attr_properties.attr,
+ &wss_attr_members.attr,
+ &wss_attr_state.attr,
+ NULL,
+};
+
+struct attribute_group wss_attr_group = {
+ .name = NULL, /* we want them in the same directory */
+ .attrs = wss_attrs,
+};
diff --git a/drivers/uwb/wlp/txrx.c b/drivers/uwb/wlp/txrx.c
new file mode 100644
index 00000000000..c701bd1a288
--- /dev/null
+++ b/drivers/uwb/wlp/txrx.c
@@ -0,0 +1,374 @@
+/*
+ * WiMedia Logical Link Control Protocol (WLP)
+ * Message exchange infrastructure
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: Docs
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/wlp.h>
+#define D_LOCAL 5
+#include <linux/uwb/debug.h>
+#include "wlp-internal.h"
+
+
+/**
+ * Direct incoming association msg to correct parsing routine
+ *
+ * We only expect D1, E1, C1, C3 messages as new. All other incoming
+ * association messages should form part of an established session that is
+ * handled elsewhere.
+ * The handling of these messages often require calling sleeping functions
+ * - this cannot be done in interrupt context. We use the kernel's
+ * workqueue to handle these messages.
+ */
+static
+void wlp_direct_assoc_frame(struct wlp *wlp, struct sk_buff *skb,
+ struct uwb_dev_addr *src)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct wlp_frame_assoc *assoc = (void *) skb->data;
+ struct wlp_assoc_frame_ctx *frame_ctx;
+ d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb);
+ frame_ctx = kmalloc(sizeof(*frame_ctx), GFP_ATOMIC);
+ if (frame_ctx == NULL) {
+ dev_err(dev, "WLP: Unable to allocate memory for association "
+ "frame handling.\n");
+ kfree_skb(skb);
+ goto out;
+ }
+ frame_ctx->wlp = wlp;
+ frame_ctx->skb = skb;
+ frame_ctx->src = *src;
+ switch (assoc->type) {
+ case WLP_ASSOC_D1:
+ d_printf(5, dev, "Received a D1 frame.\n");
+ INIT_WORK(&frame_ctx->ws, wlp_handle_d1_frame);
+ schedule_work(&frame_ctx->ws);
+ break;
+ case WLP_ASSOC_E1:
+ d_printf(5, dev, "Received a E1 frame. FIXME?\n");
+ kfree_skb(skb); /* Temporary until we handle it */
+ kfree(frame_ctx); /* Temporary until we handle it */
+ break;
+ case WLP_ASSOC_C1:
+ d_printf(5, dev, "Received a C1 frame.\n");
+ INIT_WORK(&frame_ctx->ws, wlp_handle_c1_frame);
+ schedule_work(&frame_ctx->ws);
+ break;
+ case WLP_ASSOC_C3:
+ d_printf(5, dev, "Received a C3 frame.\n");
+ INIT_WORK(&frame_ctx->ws, wlp_handle_c3_frame);
+ schedule_work(&frame_ctx->ws);
+ break;
+ default:
+ dev_err(dev, "Received unexpected association frame. "
+ "Type = %d \n", assoc->type);
+ kfree_skb(skb);
+ kfree(frame_ctx);
+ break;
+ }
+out:
+ d_fnend(5, dev, "wlp %p\n", wlp);
+}
+
+/**
+ * Process incoming association frame
+ *
+ * Although it could be possible to deal with some incoming association
+ * messages without creating a new session we are keeping things simple. We
+ * do not accept new association messages if there is a session in progress
+ * and the messages do not belong to that session.
+ *
+ * If an association message arrives that causes the creation of a session
+ * (WLP_ASSOC_E1) while we are in the process of creating a session then we
+ * rely on the neighbor mutex to protect the data. That is, the new session
+ * will not be started until the previous is completed.
+ */
+static
+void wlp_receive_assoc_frame(struct wlp *wlp, struct sk_buff *skb,
+ struct uwb_dev_addr *src)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct wlp_frame_assoc *assoc = (void *) skb->data;
+ struct wlp_session *session = wlp->session;
+ u8 version;
+ d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb);
+
+ if (wlp_get_version(wlp, &assoc->version, &version,
+ sizeof(assoc->version)) < 0)
+ goto error;
+ if (version != WLP_VERSION) {
+ dev_err(dev, "Unsupported WLP version in association "
+ "message.\n");
+ goto error;
+ }
+ if (session != NULL) {
+ /* Function that created this session is still holding the
+ * &wlp->mutex to protect this session. */
+ if (assoc->type == session->exp_message ||
+ assoc->type == WLP_ASSOC_F0) {
+ if (!memcmp(&session->neighbor_addr, src,
+ sizeof(*src))) {
+ session->data = skb;
+ (session->cb)(wlp);
+ } else {
+ dev_err(dev, "Received expected message from "
+ "unexpected source. Expected message "
+ "%d or F0 from %02x:%02x, but received "
+ "it from %02x:%02x. Dropping.\n",
+ session->exp_message,
+ session->neighbor_addr.data[1],
+ session->neighbor_addr.data[0],
+ src->data[1], src->data[0]);
+ goto error;
+ }
+ } else {
+ dev_err(dev, "Association already in progress. "
+ "Dropping.\n");
+ goto error;
+ }
+ } else {
+ wlp_direct_assoc_frame(wlp, skb, src);
+ }
+ d_fnend(5, dev, "wlp %p\n", wlp);
+ return;
+error:
+ kfree_skb(skb);
+ d_fnend(5, dev, "wlp %p\n", wlp);
+}
+
+/**
+ * Verify incoming frame is from connected neighbor, prep to pass to WLP client
+ *
+ * Verification proceeds according to WLP 0.99 [7.3.1]. The source address
+ * is used to determine which neighbor is sending the frame and the WSS tag
+ * is used to know to which WSS the frame belongs (we only support one WSS
+ * so this test is straight forward).
+ * With the WSS found we need to ensure that we are connected before
+ * allowing the exchange of data frames.
+ */
+static
+int wlp_verify_prep_rx_frame(struct wlp *wlp, struct sk_buff *skb,
+ struct uwb_dev_addr *src)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ int result = -EINVAL;
+ struct wlp_eda_node eda_entry;
+ struct wlp_frame_std_abbrv_hdr *hdr = (void *) skb->data;
+
+ d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb);
+ /*verify*/
+ result = wlp_copy_eda_node(&wlp->eda, src, &eda_entry);
+ if (result < 0) {
+ if (printk_ratelimit())
+ dev_err(dev, "WLP: Incoming frame is from unknown "
+ "neighbor %02x:%02x.\n", src->data[1],
+ src->data[0]);
+ goto out;
+ }
+ if (hdr->tag != eda_entry.tag) {
+ if (printk_ratelimit())
+ dev_err(dev, "WLP: Tag of incoming frame from "
+ "%02x:%02x does not match expected tag. "
+ "Received 0x%02x, expected 0x%02x. \n",
+ src->data[1], src->data[0], hdr->tag,
+ eda_entry.tag);
+ result = -EINVAL;
+ goto out;
+ }
+ if (eda_entry.state != WLP_WSS_CONNECTED) {
+ if (printk_ratelimit())
+ dev_err(dev, "WLP: Incoming frame from "
+ "%02x:%02x does is not from connected WSS.\n",
+ src->data[1], src->data[0]);
+ result = -EINVAL;
+ goto out;
+ }
+ /*prep*/
+ skb_pull(skb, sizeof(*hdr));
+out:
+ d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result);
+ return result;
+}
+
+/**
+ * Receive a WLP frame from device
+ *
+ * @returns: 1 if calling function should free the skb
+ * 0 if it successfully handled skb and freed it
+ * 0 if error occured, will free skb in this case
+ */
+int wlp_receive_frame(struct device *dev, struct wlp *wlp, struct sk_buff *skb,
+ struct uwb_dev_addr *src)
+{
+ unsigned len = skb->len;
+ void *ptr = skb->data;
+ struct wlp_frame_hdr *hdr;
+ int result = 0;
+
+ d_fnstart(6, dev, "skb (%p), len (%u)\n", skb, len);
+ if (len < sizeof(*hdr)) {
+ dev_err(dev, "Not enough data to parse WLP header.\n");
+ result = -EINVAL;
+ goto out;
+ }
+ hdr = ptr;
+ d_dump(6, dev, hdr, sizeof(*hdr));
+ if (le16_to_cpu(hdr->mux_hdr) != WLP_PROTOCOL_ID) {
+ dev_err(dev, "Not a WLP frame type.\n");
+ result = -EINVAL;
+ goto out;
+ }
+ switch (hdr->type) {
+ case WLP_FRAME_STANDARD:
+ if (len < sizeof(struct wlp_frame_std_abbrv_hdr)) {
+ dev_err(dev, "Not enough data to parse Standard "
+ "WLP header.\n");
+ goto out;
+ }
+ result = wlp_verify_prep_rx_frame(wlp, skb, src);
+ if (result < 0) {
+ if (printk_ratelimit())
+ dev_err(dev, "WLP: Verification of frame "
+ "from neighbor %02x:%02x failed.\n",
+ src->data[1], src->data[0]);
+ goto out;
+ }
+ result = 1;
+ break;
+ case WLP_FRAME_ABBREVIATED:
+ dev_err(dev, "Abbreviated frame received. FIXME?\n");
+ kfree_skb(skb);
+ break;
+ case WLP_FRAME_CONTROL:
+ dev_err(dev, "Control frame received. FIXME?\n");
+ kfree_skb(skb);
+ break;
+ case WLP_FRAME_ASSOCIATION:
+ if (len < sizeof(struct wlp_frame_assoc)) {
+ dev_err(dev, "Not enough data to parse Association "
+ "WLP header.\n");
+ goto out;
+ }
+ d_printf(5, dev, "Association frame received.\n");
+ wlp_receive_assoc_frame(wlp, skb, src);
+ break;
+ default:
+ dev_err(dev, "Invalid frame received.\n");
+ result = -EINVAL;
+ break;
+ }
+out:
+ if (result < 0) {
+ kfree_skb(skb);
+ result = 0;
+ }
+ d_fnend(6, dev, "skb (%p)\n", skb);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wlp_receive_frame);
+
+
+/**
+ * Verify frame from network stack, prepare for further transmission
+ *
+ * @skb: the socket buffer that needs to be prepared for transmission (it
+ * is in need of a WLP header). If this is a broadcast frame we take
+ * over the entire transmission.
+ * If it is a unicast the WSS connection should already be established
+ * and transmission will be done by the calling function.
+ * @dst: On return this will contain the device address to which the
+ * frame is destined.
+ * @returns: 0 on success no tx : WLP header sucessfully applied to skb buffer,
+ * calling function can proceed with tx
+ * 1 on success with tx : WLP will take over transmission of this
+ * frame
+ * <0 on error
+ *
+ * The network stack (WLP client) is attempting to transmit a frame. We can
+ * only transmit data if a local WSS is at least active (connection will be
+ * done here if this is a broadcast frame and neighbor also has the WSS
+ * active).
+ *
+ * The frame can be either broadcast or unicast. Broadcast in a WSS is
+ * supported via multicast, but we don't support multicast yet (until
+ * devices start to support MAB IEs). If a broadcast frame needs to be
+ * transmitted it is treated as a unicast frame to each neighbor. In this
+ * case the WLP takes over transmission of the skb and returns 1
+ * to the caller to indicate so. Also, in this case, if a neighbor has the
+ * same WSS activated but is not connected then the WSS connection will be
+ * done at this time. The neighbor's virtual address will be learned at
+ * this time.
+ *
+ * The destination address in a unicast frame is the virtual address of the
+ * neighbor. This address only becomes known when a WSS connection is
+ * established. We thus rely on a broadcast frame to trigger the setup of
+ * WSS connections to all neighbors before we are able to send unicast
+ * frames to them. This seems reasonable as IP would usually use ARP first
+ * before any unicast frames are sent.
+ *
+ * If we are already connected to the neighbor (neighbor's virtual address
+ * is known) we just prepare the WLP header and the caller will continue to
+ * send the frame.
+ *
+ * A failure in this function usually indicates something that cannot be
+ * fixed automatically. So, if this function fails (@return < 0) the calling
+ * function should not retry to send the frame as it will very likely keep
+ * failing.
+ *
+ */
+int wlp_prepare_tx_frame(struct device *dev, struct wlp *wlp,
+ struct sk_buff *skb, struct uwb_dev_addr *dst)
+{
+ int result = -EINVAL;
+ struct ethhdr *eth_hdr = (void *) skb->data;
+
+ d_fnstart(6, dev, "wlp (%p), skb (%p) \n", wlp, skb);
+ if (is_broadcast_ether_addr(eth_hdr->h_dest)) {
+ d_printf(6, dev, "WLP: handling broadcast frame. \n");
+ result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb);
+ if (result < 0) {
+ if (printk_ratelimit())
+ dev_err(dev, "Unable to handle broadcast "
+ "frame from WLP client.\n");
+ goto out;
+ }
+ dev_kfree_skb_irq(skb);
+ result = 1;
+ /* Frame will be transmitted by WLP. */
+ } else {
+ d_printf(6, dev, "WLP: handling unicast frame. \n");
+ result = wlp_eda_for_virtual(&wlp->eda, eth_hdr->h_dest, dst,
+ wlp_wss_prep_hdr, skb);
+ if (unlikely(result < 0)) {
+ if (printk_ratelimit())
+ dev_err(dev, "Unable to prepare "
+ "skb for transmission. \n");
+ goto out;
+ }
+ }
+out:
+ d_fnend(6, dev, "wlp (%p), skb (%p). result = %d \n", wlp, skb, result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wlp_prepare_tx_frame);
diff --git a/drivers/uwb/wlp/wlp-internal.h b/drivers/uwb/wlp/wlp-internal.h
new file mode 100644
index 00000000000..1c94fabfb1a
--- /dev/null
+++ b/drivers/uwb/wlp/wlp-internal.h
@@ -0,0 +1,228 @@
+/*
+ * WiMedia Logical Link Control Protocol (WLP)
+ * Internal API
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __WLP_INTERNAL_H__
+#define __WLP_INTERNAL_H__
+
+/**
+ * State of WSS connection
+ *
+ * A device needs to connect to a neighbor in an activated WSS before data
+ * can be transmitted. The spec also distinguishes between a new connection
+ * attempt and a connection attempt after previous connection attempts. The
+ * state WLP_WSS_CONNECT_FAILED is used for this scenario. See WLP 0.99
+ * [7.2.6]
+ */
+enum wlp_wss_connect {
+ WLP_WSS_UNCONNECTED = 0,
+ WLP_WSS_CONNECTED,
+ WLP_WSS_CONNECT_FAILED,
+};
+
+extern struct kobj_type wss_ktype;
+extern struct attribute_group wss_attr_group;
+
+extern int uwb_rc_ie_add(struct uwb_rc *, const struct uwb_ie_hdr *, size_t);
+extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie);
+
+
+/* This should be changed to a dynamic array where entries are sorted
+ * by eth_addr and search is done in a binary form
+ *
+ * Although thinking twice about it: this technologie's maximum reach
+ * is 10 meters...unless you want to pack too much stuff in around
+ * your radio controller/WLP device, the list will probably not be
+ * too big.
+ *
+ * In any case, there is probably some data structure in the kernel
+ * than we could reused for that already.
+ *
+ * The below structure is really just good while we support one WSS per
+ * host.
+ */
+struct wlp_eda_node {
+ struct list_head list_node;
+ unsigned char eth_addr[ETH_ALEN];
+ struct uwb_dev_addr dev_addr;
+ struct wlp_wss *wss;
+ unsigned char virt_addr[ETH_ALEN];
+ u8 tag;
+ enum wlp_wss_connect state;
+};
+
+typedef int (*wlp_eda_for_each_f)(struct wlp *, struct wlp_eda_node *, void *);
+
+extern void wlp_eda_init(struct wlp_eda *);
+extern void wlp_eda_release(struct wlp_eda *);
+extern int wlp_eda_create_node(struct wlp_eda *,
+ const unsigned char eth_addr[ETH_ALEN],
+ const struct uwb_dev_addr *);
+extern void wlp_eda_rm_node(struct wlp_eda *, const struct uwb_dev_addr *);
+extern int wlp_eda_update_node(struct wlp_eda *,
+ const struct uwb_dev_addr *,
+ struct wlp_wss *,
+ const unsigned char virt_addr[ETH_ALEN],
+ const u8, const enum wlp_wss_connect);
+extern int wlp_eda_update_node_state(struct wlp_eda *,
+ const struct uwb_dev_addr *,
+ const enum wlp_wss_connect);
+
+extern int wlp_copy_eda_node(struct wlp_eda *, struct uwb_dev_addr *,
+ struct wlp_eda_node *);
+extern int wlp_eda_for_each(struct wlp_eda *, wlp_eda_for_each_f , void *);
+extern int wlp_eda_for_virtual(struct wlp_eda *,
+ const unsigned char eth_addr[ETH_ALEN],
+ struct uwb_dev_addr *,
+ wlp_eda_for_each_f , void *);
+
+
+extern void wlp_remove_neighbor_tmp_info(struct wlp_neighbor_e *);
+
+extern size_t wlp_wss_key_print(char *, size_t, u8 *);
+
+/* Function called when no more references to WSS exists */
+extern void wlp_wss_release(struct kobject *);
+
+extern void wlp_wss_reset(struct wlp_wss *);
+extern int wlp_wss_create_activate(struct wlp_wss *, struct wlp_uuid *,
+ char *, unsigned, unsigned);
+extern int wlp_wss_enroll_activate(struct wlp_wss *, struct wlp_uuid *,
+ struct uwb_dev_addr *);
+extern ssize_t wlp_discover(struct wlp *);
+
+extern int wlp_enroll_neighbor(struct wlp *, struct wlp_neighbor_e *,
+ struct wlp_wss *, struct wlp_uuid *);
+extern int wlp_wss_is_active(struct wlp *, struct wlp_wss *,
+ struct uwb_dev_addr *);
+
+struct wlp_assoc_conn_ctx {
+ struct work_struct ws;
+ struct wlp *wlp;
+ struct sk_buff *skb;
+ struct wlp_eda_node eda_entry;
+};
+
+
+extern int wlp_wss_connect_prep(struct wlp *, struct wlp_eda_node *, void *);
+extern int wlp_wss_send_copy(struct wlp *, struct wlp_eda_node *, void *);
+
+
+/* Message handling */
+struct wlp_assoc_frame_ctx {
+ struct work_struct ws;
+ struct wlp *wlp;
+ struct sk_buff *skb;
+ struct uwb_dev_addr src;
+};
+
+extern int wlp_wss_prep_hdr(struct wlp *, struct wlp_eda_node *, void *);
+extern void wlp_handle_d1_frame(struct work_struct *);
+extern int wlp_parse_d2_frame_to_cache(struct wlp *, struct sk_buff *,
+ struct wlp_neighbor_e *);
+extern int wlp_parse_d2_frame_to_enroll(struct wlp_wss *, struct sk_buff *,
+ struct wlp_neighbor_e *,
+ struct wlp_uuid *);
+extern void wlp_handle_c1_frame(struct work_struct *);
+extern void wlp_handle_c3_frame(struct work_struct *);
+extern int wlp_parse_c3c4_frame(struct wlp *, struct sk_buff *,
+ struct wlp_uuid *, u8 *,
+ struct uwb_mac_addr *);
+extern int wlp_parse_f0(struct wlp *, struct sk_buff *);
+extern int wlp_send_assoc_frame(struct wlp *, struct wlp_wss *,
+ struct uwb_dev_addr *, enum wlp_assoc_type);
+extern ssize_t wlp_get_version(struct wlp *, struct wlp_attr_version *,
+ u8 *, ssize_t);
+extern ssize_t wlp_get_wssid(struct wlp *, struct wlp_attr_wssid *,
+ struct wlp_uuid *, ssize_t);
+extern int __wlp_alloc_device_info(struct wlp *);
+extern int __wlp_setup_device_info(struct wlp *);
+
+extern struct wlp_wss_attribute wss_attribute_properties;
+extern struct wlp_wss_attribute wss_attribute_members;
+extern struct wlp_wss_attribute wss_attribute_state;
+
+static inline
+size_t wlp_wss_uuid_print(char *buf, size_t bufsize, struct wlp_uuid *uuid)
+{
+ size_t result;
+
+ result = scnprintf(buf, bufsize,
+ "%02x:%02x:%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x",
+ uuid->data[0], uuid->data[1],
+ uuid->data[2], uuid->data[3],
+ uuid->data[4], uuid->data[5],
+ uuid->data[6], uuid->data[7],
+ uuid->data[8], uuid->data[9],
+ uuid->data[10], uuid->data[11],
+ uuid->data[12], uuid->data[13],
+ uuid->data[14], uuid->data[15]);
+ return result;
+}
+
+/**
+ * FIXME: How should a nonce be displayed?
+ */
+static inline
+size_t wlp_wss_nonce_print(char *buf, size_t bufsize, struct wlp_nonce *nonce)
+{
+ size_t result;
+
+ result = scnprintf(buf, bufsize,
+ "%02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x",
+ nonce->data[0], nonce->data[1],
+ nonce->data[2], nonce->data[3],
+ nonce->data[4], nonce->data[5],
+ nonce->data[6], nonce->data[7],
+ nonce->data[8], nonce->data[9],
+ nonce->data[10], nonce->data[11],
+ nonce->data[12], nonce->data[13],
+ nonce->data[14], nonce->data[15]);
+ return result;
+}
+
+
+static inline
+void wlp_session_cb(struct wlp *wlp)
+{
+ struct completion *completion = wlp->session->cb_priv;
+ complete(completion);
+}
+
+static inline
+int wlp_uuid_is_set(struct wlp_uuid *uuid)
+{
+ struct wlp_uuid zero_uuid = { .data = { 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00} };
+
+ if (!memcmp(uuid, &zero_uuid, sizeof(*uuid)))
+ return 0;
+ return 1;
+}
+
+#endif /* __WLP_INTERNAL_H__ */
diff --git a/drivers/uwb/wlp/wlp-lc.c b/drivers/uwb/wlp/wlp-lc.c
new file mode 100644
index 00000000000..0799402e73f
--- /dev/null
+++ b/drivers/uwb/wlp/wlp-lc.c
@@ -0,0 +1,585 @@
+/*
+ * WiMedia Logical Link Control Protocol (WLP)
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ */
+
+#include <linux/wlp.h>
+#define D_LOCAL 6
+#include <linux/uwb/debug.h>
+#include "wlp-internal.h"
+
+
+static
+void wlp_neighbor_init(struct wlp_neighbor_e *neighbor)
+{
+ INIT_LIST_HEAD(&neighbor->wssid);
+}
+
+/**
+ * Create area for device information storage
+ *
+ * wlp->mutex must be held
+ */
+int __wlp_alloc_device_info(struct wlp *wlp)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ BUG_ON(wlp->dev_info != NULL);
+ wlp->dev_info = kzalloc(sizeof(struct wlp_device_info), GFP_KERNEL);
+ if (wlp->dev_info == NULL) {
+ dev_err(dev, "WLP: Unable to allocate memory for "
+ "device information.\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+
+/**
+ * Fill in device information using function provided by driver
+ *
+ * wlp->mutex must be held
+ */
+static
+void __wlp_fill_device_info(struct wlp *wlp)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+
+ BUG_ON(wlp->fill_device_info == NULL);
+ d_printf(6, dev, "Retrieving device information "
+ "from device driver.\n");
+ wlp->fill_device_info(wlp, wlp->dev_info);
+}
+
+/**
+ * Setup device information
+ *
+ * Allocate area for device information and populate it.
+ *
+ * wlp->mutex must be held
+ */
+int __wlp_setup_device_info(struct wlp *wlp)
+{
+ int result;
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+
+ result = __wlp_alloc_device_info(wlp);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to allocate area for "
+ "device information.\n");
+ return result;
+ }
+ __wlp_fill_device_info(wlp);
+ return 0;
+}
+
+/**
+ * Remove information about neighbor stored temporarily
+ *
+ * Information learned during discovey should only be stored when the
+ * device enrolls in the neighbor's WSS. We do need to store this
+ * information temporarily in order to present it to the user.
+ *
+ * We are only interested in keeping neighbor WSS information if that
+ * neighbor is accepting enrollment.
+ *
+ * should be called with wlp->nbmutex held
+ */
+void wlp_remove_neighbor_tmp_info(struct wlp_neighbor_e *neighbor)
+{
+ struct wlp_wssid_e *wssid_e, *next;
+ u8 keep;
+ if (!list_empty(&neighbor->wssid)) {
+ list_for_each_entry_safe(wssid_e, next, &neighbor->wssid,
+ node) {
+ if (wssid_e->info != NULL) {
+ keep = wssid_e->info->accept_enroll;
+ kfree(wssid_e->info);
+ wssid_e->info = NULL;
+ if (!keep) {
+ list_del(&wssid_e->node);
+ kfree(wssid_e);
+ }
+ }
+ }
+ }
+ if (neighbor->info != NULL) {
+ kfree(neighbor->info);
+ neighbor->info = NULL;
+ }
+}
+
+/**
+ * Populate WLP neighborhood cache with neighbor information
+ *
+ * A new neighbor is found. If it is discoverable then we add it to the
+ * neighborhood cache.
+ *
+ */
+static
+int wlp_add_neighbor(struct wlp *wlp, struct uwb_dev *dev)
+{
+ int result = 0;
+ int discoverable;
+ struct wlp_neighbor_e *neighbor;
+
+ d_fnstart(6, &dev->dev, "uwb %p \n", dev);
+ d_printf(6, &dev->dev, "Found neighbor device %02x:%02x \n",
+ dev->dev_addr.data[1], dev->dev_addr.data[0]);
+ /**
+ * FIXME:
+ * Use contents of WLP IE found in beacon cache to determine if
+ * neighbor is discoverable.
+ * The device does not support WLP IE yet so this still needs to be
+ * done. Until then we assume all devices are discoverable.
+ */
+ discoverable = 1; /* will be changed when FIXME disappears */
+ if (discoverable) {
+ /* Add neighbor to cache for discovery */
+ neighbor = kzalloc(sizeof(*neighbor), GFP_KERNEL);
+ if (neighbor == NULL) {
+ dev_err(&dev->dev, "Unable to create memory for "
+ "new neighbor. \n");
+ result = -ENOMEM;
+ goto error_no_mem;
+ }
+ wlp_neighbor_init(neighbor);
+ uwb_dev_get(dev);
+ neighbor->uwb_dev = dev;
+ list_add(&neighbor->node, &wlp->neighbors);
+ }
+error_no_mem:
+ d_fnend(6, &dev->dev, "uwb %p, result = %d \n", dev, result);
+ return result;
+}
+
+/**
+ * Remove one neighbor from cache
+ */
+static
+void __wlp_neighbor_release(struct wlp_neighbor_e *neighbor)
+{
+ struct wlp_wssid_e *wssid_e, *next_wssid_e;
+
+ list_for_each_entry_safe(wssid_e, next_wssid_e,
+ &neighbor->wssid, node) {
+ list_del(&wssid_e->node);
+ kfree(wssid_e);
+ }
+ uwb_dev_put(neighbor->uwb_dev);
+ list_del(&neighbor->node);
+ kfree(neighbor);
+}
+
+/**
+ * Clear entire neighborhood cache.
+ */
+static
+void __wlp_neighbors_release(struct wlp *wlp)
+{
+ struct wlp_neighbor_e *neighbor, *next;
+ if (list_empty(&wlp->neighbors))
+ return;
+ list_for_each_entry_safe(neighbor, next, &wlp->neighbors, node) {
+ __wlp_neighbor_release(neighbor);
+ }
+}
+
+static
+void wlp_neighbors_release(struct wlp *wlp)
+{
+ mutex_lock(&wlp->nbmutex);
+ __wlp_neighbors_release(wlp);
+ mutex_unlock(&wlp->nbmutex);
+}
+
+
+
+/**
+ * Send D1 message to neighbor, receive D2 message
+ *
+ * @neighbor: neighbor to which D1 message will be sent
+ * @wss: if not NULL, it is an enrollment request for this WSS
+ * @wssid: if wss not NULL, this is the wssid of the WSS in which we
+ * want to enroll
+ *
+ * A D1/D2 exchange is done for one of two reasons: discovery or
+ * enrollment. If done for discovery the D1 message is sent to the neighbor
+ * and the contents of the D2 response is stored in a temporary cache.
+ * If done for enrollment the @wss and @wssid are provided also. In this
+ * case the D1 message is sent to the neighbor, the D2 response is parsed
+ * for enrollment of the WSS with wssid.
+ *
+ * &wss->mutex is held
+ */
+static
+int wlp_d1d2_exchange(struct wlp *wlp, struct wlp_neighbor_e *neighbor,
+ struct wlp_wss *wss, struct wlp_uuid *wssid)
+{
+ int result;
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct wlp_session session;
+ struct sk_buff *skb;
+ struct wlp_frame_assoc *resp;
+ struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr;
+
+ mutex_lock(&wlp->mutex);
+ if (!wlp_uuid_is_set(&wlp->uuid)) {
+ dev_err(dev, "WLP: UUID is not set. Set via sysfs to "
+ "proceed.\n");
+ result = -ENXIO;
+ goto out;
+ }
+ /* Send D1 association frame */
+ result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_D1);
+ if (result < 0) {
+ dev_err(dev, "Unable to send D1 frame to neighbor "
+ "%02x:%02x (%d)\n", dev_addr->data[1],
+ dev_addr->data[0], result);
+ d_printf(6, dev, "Add placeholders into buffer next to "
+ "neighbor information we have (dev address).\n");
+ goto out;
+ }
+ /* Create session, wait for response */
+ session.exp_message = WLP_ASSOC_D2;
+ session.cb = wlp_session_cb;
+ session.cb_priv = &completion;
+ session.neighbor_addr = *dev_addr;
+ BUG_ON(wlp->session != NULL);
+ wlp->session = &session;
+ /* Wait for D2/F0 frame */
+ result = wait_for_completion_interruptible_timeout(&completion,
+ WLP_PER_MSG_TIMEOUT * HZ);
+ if (result == 0) {
+ result = -ETIMEDOUT;
+ dev_err(dev, "Timeout while sending D1 to neighbor "
+ "%02x:%02x.\n", dev_addr->data[1],
+ dev_addr->data[0]);
+ goto error_session;
+ }
+ if (result < 0) {
+ dev_err(dev, "Unable to discover/enroll neighbor %02x:%02x.\n",
+ dev_addr->data[1], dev_addr->data[0]);
+ goto error_session;
+ }
+ /* Parse message in session->data: it will be either D2 or F0 */
+ skb = session.data;
+ resp = (void *) skb->data;
+ d_printf(6, dev, "Received response to D1 frame. \n");
+ d_dump(6, dev, skb->data, skb->len > 72 ? 72 : skb->len);
+
+ if (resp->type == WLP_ASSOC_F0) {
+ result = wlp_parse_f0(wlp, skb);
+ if (result < 0)
+ dev_err(dev, "WLP: Unable to parse F0 from neighbor "
+ "%02x:%02x.\n", dev_addr->data[1],
+ dev_addr->data[0]);
+ result = -EINVAL;
+ goto error_resp_parse;
+ }
+ if (wss == NULL) {
+ /* Discovery */
+ result = wlp_parse_d2_frame_to_cache(wlp, skb, neighbor);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to parse D2 message from "
+ "neighbor %02x:%02x for discovery.\n",
+ dev_addr->data[1], dev_addr->data[0]);
+ goto error_resp_parse;
+ }
+ } else {
+ /* Enrollment */
+ result = wlp_parse_d2_frame_to_enroll(wss, skb, neighbor,
+ wssid);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to parse D2 message from "
+ "neighbor %02x:%02x for enrollment.\n",
+ dev_addr->data[1], dev_addr->data[0]);
+ goto error_resp_parse;
+ }
+ }
+error_resp_parse:
+ kfree_skb(skb);
+error_session:
+ wlp->session = NULL;
+out:
+ mutex_unlock(&wlp->mutex);
+ return result;
+}
+
+/**
+ * Enroll into WSS of provided WSSID by using neighbor as registrar
+ *
+ * &wss->mutex is held
+ */
+int wlp_enroll_neighbor(struct wlp *wlp, struct wlp_neighbor_e *neighbor,
+ struct wlp_wss *wss, struct wlp_uuid *wssid)
+{
+ int result = 0;
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ char buf[WLP_WSS_UUID_STRSIZE];
+ struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr;
+ wlp_wss_uuid_print(buf, sizeof(buf), wssid);
+ d_fnstart(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n",
+ wlp, neighbor, wss, wssid, buf);
+ d_printf(6, dev, "Complete me.\n");
+ result = wlp_d1d2_exchange(wlp, neighbor, wss, wssid);
+ if (result < 0) {
+ dev_err(dev, "WLP: D1/D2 message exchange for enrollment "
+ "failed. result = %d \n", result);
+ goto out;
+ }
+ if (wss->state != WLP_WSS_STATE_PART_ENROLLED) {
+ dev_err(dev, "WLP: Unable to enroll into WSS %s using "
+ "neighbor %02x:%02x. \n", buf,
+ dev_addr->data[1], dev_addr->data[0]);
+ result = -EINVAL;
+ goto out;
+ }
+ if (wss->secure_status == WLP_WSS_SECURE) {
+ dev_err(dev, "FIXME: need to complete secure enrollment.\n");
+ result = -EINVAL;
+ goto error;
+ } else {
+ wss->state = WLP_WSS_STATE_ENROLLED;
+ d_printf(2, dev, "WLP: Success Enrollment into unsecure WSS "
+ "%s using neighbor %02x:%02x. \n", buf,
+ dev_addr->data[1], dev_addr->data[0]);
+ }
+
+ d_fnend(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n",
+ wlp, neighbor, wss, wssid, buf);
+out:
+ return result;
+error:
+ wlp_wss_reset(wss);
+ return result;
+}
+
+/**
+ * Discover WSS information of neighbor's active WSS
+ */
+static
+int wlp_discover_neighbor(struct wlp *wlp,
+ struct wlp_neighbor_e *neighbor)
+{
+ return wlp_d1d2_exchange(wlp, neighbor, NULL, NULL);
+}
+
+
+/**
+ * Each neighbor in the neighborhood cache is discoverable. Discover it.
+ *
+ * Discovery is done through sending of D1 association frame and parsing
+ * the D2 association frame response. Only wssid from D2 will be included
+ * in neighbor cache, rest is just displayed to user and forgotten.
+ *
+ * The discovery is not done in parallel. This is simple and enables us to
+ * maintain only one association context.
+ *
+ * The discovery of one neighbor does not affect the other, but if the
+ * discovery of a neighbor fails it is removed from the neighborhood cache.
+ */
+static
+int wlp_discover_all_neighbors(struct wlp *wlp)
+{
+ int result = 0;
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct wlp_neighbor_e *neighbor, *next;
+
+ list_for_each_entry_safe(neighbor, next, &wlp->neighbors, node) {
+ result = wlp_discover_neighbor(wlp, neighbor);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to discover neighbor "
+ "%02x:%02x, removing from neighborhood. \n",
+ neighbor->uwb_dev->dev_addr.data[1],
+ neighbor->uwb_dev->dev_addr.data[0]);
+ __wlp_neighbor_release(neighbor);
+ }
+ }
+ return result;
+}
+
+static int wlp_add_neighbor_helper(struct device *dev, void *priv)
+{
+ struct wlp *wlp = priv;
+ struct uwb_dev *uwb_dev = to_uwb_dev(dev);
+
+ return wlp_add_neighbor(wlp, uwb_dev);
+}
+
+/**
+ * Discover WLP neighborhood
+ *
+ * Will send D1 association frame to all devices in beacon group that have
+ * discoverable bit set in WLP IE. D2 frames will be received, information
+ * displayed to user in @buf. Partial information (from D2 association
+ * frame) will be cached to assist with future association
+ * requests.
+ *
+ * The discovery of the WLP neighborhood is triggered by the user. This
+ * should occur infrequently and we thus free current cache and re-allocate
+ * memory if needed.
+ *
+ * If one neighbor fails during initial discovery (determining if it is a
+ * neighbor or not), we fail all - note that interaction with neighbor has
+ * not occured at this point so if a failure occurs we know something went wrong
+ * locally. We thus undo everything.
+ */
+ssize_t wlp_discover(struct wlp *wlp)
+{
+ int result = 0;
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+
+ d_fnstart(6, dev, "wlp %p \n", wlp);
+ mutex_lock(&wlp->nbmutex);
+ /* Clear current neighborhood cache. */
+ __wlp_neighbors_release(wlp);
+ /* Determine which devices in neighborhood. Repopulate cache. */
+ result = uwb_dev_for_each(wlp->rc, wlp_add_neighbor_helper, wlp);
+ if (result < 0) {
+ /* May have partial neighbor information, release all. */
+ __wlp_neighbors_release(wlp);
+ goto error_dev_for_each;
+ }
+ /* Discover the properties of devices in neighborhood. */
+ result = wlp_discover_all_neighbors(wlp);
+ /* In case of failure we still print our partial results. */
+ if (result < 0) {
+ dev_err(dev, "Unable to fully discover neighborhood. \n");
+ result = 0;
+ }
+error_dev_for_each:
+ mutex_unlock(&wlp->nbmutex);
+ d_fnend(6, dev, "wlp %p \n", wlp);
+ return result;
+}
+
+/**
+ * Handle events from UWB stack
+ *
+ * We handle events conservatively. If a neighbor goes off the air we
+ * remove it from the neighborhood. If an association process is in
+ * progress this function will block waiting for the nbmutex to become
+ * free. The association process will thus be allowed to complete before it
+ * is removed.
+ */
+static
+void wlp_uwb_notifs_cb(void *_wlp, struct uwb_dev *uwb_dev,
+ enum uwb_notifs event)
+{
+ struct wlp *wlp = _wlp;
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct wlp_neighbor_e *neighbor, *next;
+ int result;
+ switch (event) {
+ case UWB_NOTIF_ONAIR:
+ d_printf(6, dev, "UWB device %02x:%02x is onair\n",
+ uwb_dev->dev_addr.data[1],
+ uwb_dev->dev_addr.data[0]);
+ result = wlp_eda_create_node(&wlp->eda,
+ uwb_dev->mac_addr.data,
+ &uwb_dev->dev_addr);
+ if (result < 0)
+ dev_err(dev, "WLP: Unable to add new neighbor "
+ "%02x:%02x to EDA cache.\n",
+ uwb_dev->dev_addr.data[1],
+ uwb_dev->dev_addr.data[0]);
+ break;
+ case UWB_NOTIF_OFFAIR:
+ d_printf(6, dev, "UWB device %02x:%02x is offair\n",
+ uwb_dev->dev_addr.data[1],
+ uwb_dev->dev_addr.data[0]);
+ wlp_eda_rm_node(&wlp->eda, &uwb_dev->dev_addr);
+ mutex_lock(&wlp->nbmutex);
+ list_for_each_entry_safe(neighbor, next, &wlp->neighbors,
+ node) {
+ if (neighbor->uwb_dev == uwb_dev) {
+ d_printf(6, dev, "Removing device from "
+ "neighborhood.\n");
+ __wlp_neighbor_release(neighbor);
+ }
+ }
+ mutex_unlock(&wlp->nbmutex);
+ break;
+ default:
+ dev_err(dev, "don't know how to handle event %d from uwb\n",
+ event);
+ }
+}
+
+int wlp_setup(struct wlp *wlp, struct uwb_rc *rc)
+{
+ struct device *dev = &rc->uwb_dev.dev;
+ int result;
+
+ d_fnstart(6, dev, "wlp %p\n", wlp);
+ BUG_ON(wlp->fill_device_info == NULL);
+ BUG_ON(wlp->xmit_frame == NULL);
+ BUG_ON(wlp->stop_queue == NULL);
+ BUG_ON(wlp->start_queue == NULL);
+ wlp->rc = rc;
+ wlp_eda_init(&wlp->eda);/* Set up address cache */
+ wlp->uwb_notifs_handler.cb = wlp_uwb_notifs_cb;
+ wlp->uwb_notifs_handler.data = wlp;
+ uwb_notifs_register(rc, &wlp->uwb_notifs_handler);
+
+ uwb_pal_init(&wlp->pal);
+ result = uwb_pal_register(rc, &wlp->pal);
+ if (result < 0)
+ uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler);
+
+ d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wlp_setup);
+
+void wlp_remove(struct wlp *wlp)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ d_fnstart(6, dev, "wlp %p\n", wlp);
+ wlp_neighbors_release(wlp);
+ uwb_pal_unregister(wlp->rc, &wlp->pal);
+ uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler);
+ wlp_eda_release(&wlp->eda);
+ mutex_lock(&wlp->mutex);
+ if (wlp->dev_info != NULL)
+ kfree(wlp->dev_info);
+ mutex_unlock(&wlp->mutex);
+ wlp->rc = NULL;
+ /* We have to use NULL here because this function can be called
+ * when the device disappeared. */
+ d_fnend(6, NULL, "wlp %p\n", wlp);
+}
+EXPORT_SYMBOL_GPL(wlp_remove);
+
+/**
+ * wlp_reset_all - reset the WLP hardware
+ * @wlp: the WLP device to reset.
+ *
+ * This schedules a full hardware reset of the WLP device. The radio
+ * controller and any other PALs will also be reset.
+ */
+void wlp_reset_all(struct wlp *wlp)
+{
+ uwb_rc_reset_all(wlp->rc);
+}
+EXPORT_SYMBOL_GPL(wlp_reset_all);
diff --git a/drivers/uwb/wlp/wss-lc.c b/drivers/uwb/wlp/wss-lc.c
new file mode 100644
index 00000000000..96b18c9bd6e
--- /dev/null
+++ b/drivers/uwb/wlp/wss-lc.c
@@ -0,0 +1,1055 @@
+/*
+ * WiMedia Logical Link Control Protocol (WLP)
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Implementation of the WLP association protocol.
+ *
+ * FIXME: Docs
+ *
+ * A UWB network interface will configure a WSS through wlp_wss_setup() after
+ * the interface has been assigned a MAC address, typically after
+ * "ifconfig" has been called. When the interface goes down it should call
+ * wlp_wss_remove().
+ *
+ * When the WSS is ready for use the user interacts via sysfs to create,
+ * discover, and activate WSS.
+ *
+ * wlp_wss_enroll_activate()
+ *
+ * wlp_wss_create_activate()
+ * wlp_wss_set_wssid_hash()
+ * wlp_wss_comp_wssid_hash()
+ * wlp_wss_sel_bcast_addr()
+ * wlp_wss_sysfs_add()
+ *
+ * Called when no more references to WSS exist:
+ * wlp_wss_release()
+ * wlp_wss_reset()
+ */
+
+#include <linux/etherdevice.h> /* for is_valid_ether_addr */
+#include <linux/skbuff.h>
+#include <linux/wlp.h>
+#define D_LOCAL 5
+#include <linux/uwb/debug.h>
+#include "wlp-internal.h"
+
+
+size_t wlp_wss_key_print(char *buf, size_t bufsize, u8 *key)
+{
+ size_t result;
+
+ result = scnprintf(buf, bufsize,
+ "%02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x",
+ key[0], key[1], key[2], key[3],
+ key[4], key[5], key[6], key[7],
+ key[8], key[9], key[10], key[11],
+ key[12], key[13], key[14], key[15]);
+ return result;
+}
+
+/**
+ * Compute WSSID hash
+ * WLP Draft 0.99 [7.2.1]
+ *
+ * The WSSID hash for a WSSID is the result of an octet-wise exclusive-OR
+ * of all octets in the WSSID.
+ */
+static
+u8 wlp_wss_comp_wssid_hash(struct wlp_uuid *wssid)
+{
+ return wssid->data[0] ^ wssid->data[1] ^ wssid->data[2]
+ ^ wssid->data[3] ^ wssid->data[4] ^ wssid->data[5]
+ ^ wssid->data[6] ^ wssid->data[7] ^ wssid->data[8]
+ ^ wssid->data[9] ^ wssid->data[10] ^ wssid->data[11]
+ ^ wssid->data[12] ^ wssid->data[13] ^ wssid->data[14]
+ ^ wssid->data[15];
+}
+
+/**
+ * Select a multicast EUI-48 for the WSS broadcast address.
+ * WLP Draft 0.99 [7.2.1]
+ *
+ * Selected based on the WiMedia Alliance OUI, 00-13-88, within the WLP
+ * range, [01-13-88-00-01-00, 01-13-88-00-01-FF] inclusive.
+ *
+ * This address is currently hardcoded.
+ * FIXME?
+ */
+static
+struct uwb_mac_addr wlp_wss_sel_bcast_addr(struct wlp_wss *wss)
+{
+ struct uwb_mac_addr bcast = {
+ .data = { 0x01, 0x13, 0x88, 0x00, 0x01, 0x00 }
+ };
+ return bcast;
+}
+
+/**
+ * Clear the contents of the WSS structure - all except kobj, mutex, virtual
+ *
+ * We do not want to reinitialize - the internal kobj should not change as
+ * it still points to the parent received during setup. The mutex should
+ * remain also. We thus just reset values individually.
+ * The virutal address assigned to WSS will remain the same for the
+ * lifetime of the WSS. We only reset the fields that can change during its
+ * lifetime.
+ */
+void wlp_wss_reset(struct wlp_wss *wss)
+{
+ struct wlp *wlp = container_of(wss, struct wlp, wss);
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ d_fnstart(5, dev, "wss (%p) \n", wss);
+ memset(&wss->wssid, 0, sizeof(wss->wssid));
+ wss->hash = 0;
+ memset(&wss->name[0], 0, sizeof(wss->name));
+ memset(&wss->bcast, 0, sizeof(wss->bcast));
+ wss->secure_status = WLP_WSS_UNSECURE;
+ memset(&wss->master_key[0], 0, sizeof(wss->master_key));
+ wss->tag = 0;
+ wss->state = WLP_WSS_STATE_NONE;
+ d_fnend(5, dev, "wss (%p) \n", wss);
+}
+
+/**
+ * Create sysfs infrastructure for WSS
+ *
+ * The WSS is configured to have the interface as parent (see wlp_wss_setup())
+ * a new sysfs directory that includes wssid as its name is created in the
+ * interface's sysfs directory. The group of files interacting with WSS are
+ * created also.
+ */
+static
+int wlp_wss_sysfs_add(struct wlp_wss *wss, char *wssid_str)
+{
+ struct wlp *wlp = container_of(wss, struct wlp, wss);
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ int result;
+
+ d_fnstart(5, dev, "wss (%p), wssid: %s\n", wss, wssid_str);
+ result = kobject_set_name(&wss->kobj, "wss-%s", wssid_str);
+ if (result < 0)
+ return result;
+ wss->kobj.ktype = &wss_ktype;
+ result = kobject_init_and_add(&wss->kobj,
+ &wss_ktype, wss->kobj.parent, "wlp");
+ if (result < 0) {
+ dev_err(dev, "WLP: Cannot register WSS kobject.\n");
+ goto error_kobject_register;
+ }
+ result = sysfs_create_group(&wss->kobj, &wss_attr_group);
+ if (result < 0) {
+ dev_err(dev, "WLP: Cannot register WSS attributes: %d\n",
+ result);
+ goto error_sysfs_create_group;
+ }
+ d_fnend(5, dev, "Completed. result = %d \n", result);
+ return 0;
+error_sysfs_create_group:
+
+ kobject_put(&wss->kobj); /* will free name if needed */
+ return result;
+error_kobject_register:
+ kfree(wss->kobj.name);
+ wss->kobj.name = NULL;
+ wss->kobj.ktype = NULL;
+ return result;
+}
+
+
+/**
+ * Release WSS
+ *
+ * No more references exist to this WSS. We should undo everything that was
+ * done in wlp_wss_create_activate() except removing the group. The group
+ * is not removed because an object can be unregistered before the group is
+ * created. We also undo any additional operations on the WSS after this
+ * (addition of members).
+ *
+ * If memory was allocated for the kobject's name then it will
+ * be freed by the kobject system during this time.
+ *
+ * The EDA cache is removed and reinitilized when the WSS is removed. We
+ * thus loose knowledge of members of this WSS at that time and need not do
+ * it here.
+ */
+void wlp_wss_release(struct kobject *kobj)
+{
+ struct wlp_wss *wss = container_of(kobj, struct wlp_wss, kobj);
+
+ wlp_wss_reset(wss);
+}
+
+/**
+ * Enroll into a WSS using provided neighbor as registrar
+ *
+ * First search the neighborhood information to learn which neighbor is
+ * referred to, next proceed with enrollment.
+ *
+ * &wss->mutex is held
+ */
+static
+int wlp_wss_enroll_target(struct wlp_wss *wss, struct wlp_uuid *wssid,
+ struct uwb_dev_addr *dest)
+{
+ struct wlp *wlp = container_of(wss, struct wlp, wss);
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct wlp_neighbor_e *neighbor;
+ char buf[WLP_WSS_UUID_STRSIZE];
+ int result = -ENXIO;
+ struct uwb_dev_addr *dev_addr;
+
+ wlp_wss_uuid_print(buf, sizeof(buf), wssid);
+ d_fnstart(5, dev, "wss %p, wssid %s, registrar %02x:%02x \n",
+ wss, buf, dest->data[1], dest->data[0]);
+ mutex_lock(&wlp->nbmutex);
+ list_for_each_entry(neighbor, &wlp->neighbors, node) {
+ dev_addr = &neighbor->uwb_dev->dev_addr;
+ if (!memcmp(dest, dev_addr, sizeof(*dest))) {
+ d_printf(5, dev, "Neighbor %02x:%02x is valid, "
+ "enrolling. \n",
+ dev_addr->data[1], dev_addr->data[0]);
+ result = wlp_enroll_neighbor(wlp, neighbor, wss,
+ wssid);
+ break;
+ }
+ }
+ if (result == -ENXIO)
+ dev_err(dev, "WLP: Cannot find neighbor %02x:%02x. \n",
+ dest->data[1], dest->data[0]);
+ mutex_unlock(&wlp->nbmutex);
+ d_fnend(5, dev, "wss %p, wssid %s, registrar %02x:%02x, result %d \n",
+ wss, buf, dest->data[1], dest->data[0], result);
+ return result;
+}
+
+/**
+ * Enroll into a WSS previously discovered
+ *
+ * User provides WSSID of WSS, search for neighbor that has this WSS
+ * activated and attempt to enroll.
+ *
+ * &wss->mutex is held
+ */
+static
+int wlp_wss_enroll_discovered(struct wlp_wss *wss, struct wlp_uuid *wssid)
+{
+ struct wlp *wlp = container_of(wss, struct wlp, wss);
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct wlp_neighbor_e *neighbor;
+ struct wlp_wssid_e *wssid_e;
+ char buf[WLP_WSS_UUID_STRSIZE];
+ int result = -ENXIO;
+
+ wlp_wss_uuid_print(buf, sizeof(buf), wssid);
+ d_fnstart(5, dev, "wss %p, wssid %s \n", wss, buf);
+ mutex_lock(&wlp->nbmutex);
+ list_for_each_entry(neighbor, &wlp->neighbors, node) {
+ list_for_each_entry(wssid_e, &neighbor->wssid, node) {
+ if (!memcmp(wssid, &wssid_e->wssid, sizeof(*wssid))) {
+ d_printf(5, dev, "Found WSSID %s in neighbor "
+ "%02x:%02x cache. \n", buf,
+ neighbor->uwb_dev->dev_addr.data[1],
+ neighbor->uwb_dev->dev_addr.data[0]);
+ result = wlp_enroll_neighbor(wlp, neighbor,
+ wss, wssid);
+ if (result == 0) /* enrollment success */
+ goto out;
+ break;
+ }
+ }
+ }
+out:
+ if (result == -ENXIO)
+ dev_err(dev, "WLP: Cannot find WSSID %s in cache. \n", buf);
+ mutex_unlock(&wlp->nbmutex);
+ d_fnend(5, dev, "wss %p, wssid %s, result %d \n", wss, buf, result);
+ return result;
+}
+
+/**
+ * Enroll into WSS with provided WSSID, registrar may be provided
+ *
+ * @wss: out WSS that will be enrolled
+ * @wssid: wssid of neighboring WSS that we want to enroll in
+ * @devaddr: registrar can be specified, will be broadcast (ff:ff) if any
+ * neighbor can be used as registrar.
+ *
+ * &wss->mutex is held
+ */
+static
+int wlp_wss_enroll(struct wlp_wss *wss, struct wlp_uuid *wssid,
+ struct uwb_dev_addr *devaddr)
+{
+ int result;
+ struct wlp *wlp = container_of(wss, struct wlp, wss);
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ char buf[WLP_WSS_UUID_STRSIZE];
+ struct uwb_dev_addr bcast = {.data = {0xff, 0xff} };
+
+ wlp_wss_uuid_print(buf, sizeof(buf), wssid);
+ if (wss->state != WLP_WSS_STATE_NONE) {
+ dev_err(dev, "WLP: Already enrolled in WSS %s.\n", buf);
+ result = -EEXIST;
+ goto error;
+ }
+ if (!memcmp(&bcast, devaddr, sizeof(bcast))) {
+ d_printf(5, dev, "Request to enroll in discovered WSS "
+ "with WSSID %s \n", buf);
+ result = wlp_wss_enroll_discovered(wss, wssid);
+ } else {
+ d_printf(5, dev, "Request to enroll in WSSID %s with "
+ "registrar %02x:%02x\n", buf, devaddr->data[1],
+ devaddr->data[0]);
+ result = wlp_wss_enroll_target(wss, wssid, devaddr);
+ }
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to enroll into WSS %s, result %d \n",
+ buf, result);
+ goto error;
+ }
+ d_printf(2, dev, "Successfully enrolled into WSS %s \n", buf);
+ result = wlp_wss_sysfs_add(wss, buf);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to set up sysfs for WSS kobject.\n");
+ wlp_wss_reset(wss);
+ }
+error:
+ return result;
+
+}
+
+/**
+ * Activate given WSS
+ *
+ * Prior to activation a WSS must be enrolled. To activate a WSS a device
+ * includes the WSS hash in the WLP IE in its beacon in each superframe.
+ * WLP 0.99 [7.2.5].
+ *
+ * The WSS tag is also computed at this time. We only support one activated
+ * WSS so we can use the hash as a tag - there will never be a conflict.
+ *
+ * We currently only support one activated WSS so only one WSS hash is
+ * included in the WLP IE.
+ */
+static
+int wlp_wss_activate(struct wlp_wss *wss)
+{
+ struct wlp *wlp = container_of(wss, struct wlp, wss);
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct uwb_rc *uwb_rc = wlp->rc;
+ int result;
+ struct {
+ struct wlp_ie wlp_ie;
+ u8 hash; /* only include one hash */
+ } ie_data;
+
+ d_fnstart(5, dev, "Activating WSS %p. \n", wss);
+ BUG_ON(wss->state != WLP_WSS_STATE_ENROLLED);
+ wss->hash = wlp_wss_comp_wssid_hash(&wss->wssid);
+ wss->tag = wss->hash;
+ memset(&ie_data, 0, sizeof(ie_data));
+ ie_data.wlp_ie.hdr.element_id = UWB_IE_WLP;
+ ie_data.wlp_ie.hdr.length = sizeof(ie_data) - sizeof(struct uwb_ie_hdr);
+ wlp_ie_set_hash_length(&ie_data.wlp_ie, sizeof(ie_data.hash));
+ ie_data.hash = wss->hash;
+ result = uwb_rc_ie_add(uwb_rc, &ie_data.wlp_ie.hdr,
+ sizeof(ie_data));
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to add WLP IE to beacon. "
+ "result = %d.\n", result);
+ goto error_wlp_ie;
+ }
+ wss->state = WLP_WSS_STATE_ACTIVE;
+ result = 0;
+error_wlp_ie:
+ d_fnend(5, dev, "Activating WSS %p, result = %d \n", wss, result);
+ return result;
+}
+
+/**
+ * Enroll in and activate WSS identified by provided WSSID
+ *
+ * The neighborhood cache should contain a list of all neighbors and the
+ * WSS they have activated. Based on that cache we search which neighbor we
+ * can perform the association process with. The user also has option to
+ * specify which neighbor it prefers as registrar.
+ * Successful enrollment is followed by activation.
+ * Successful activation will create the sysfs directory containing
+ * specific information regarding this WSS.
+ */
+int wlp_wss_enroll_activate(struct wlp_wss *wss, struct wlp_uuid *wssid,
+ struct uwb_dev_addr *devaddr)
+{
+ struct wlp *wlp = container_of(wss, struct wlp, wss);
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ int result = 0;
+ char buf[WLP_WSS_UUID_STRSIZE];
+
+ d_fnstart(5, dev, "Enrollment and activation requested. \n");
+ mutex_lock(&wss->mutex);
+ result = wlp_wss_enroll(wss, wssid, devaddr);
+ if (result < 0) {
+ wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid);
+ dev_err(dev, "WLP: Enrollment into WSS %s failed.\n", buf);
+ goto error_enroll;
+ }
+ result = wlp_wss_activate(wss);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to activate WSS. Undoing enrollment "
+ "result = %d \n", result);
+ /* Undo enrollment */
+ wlp_wss_reset(wss);
+ goto error_activate;
+ }
+error_activate:
+error_enroll:
+ mutex_unlock(&wss->mutex);
+ d_fnend(5, dev, "Completed. result = %d \n", result);
+ return result;
+}
+
+/**
+ * Create, enroll, and activate a new WSS
+ *
+ * @wssid: new wssid provided by user
+ * @name: WSS name requested by used.
+ * @sec_status: security status requested by user
+ *
+ * A user requested the creation of a new WSS. All operations are done
+ * locally. The new WSS will be stored locally, the hash will be included
+ * in the WLP IE, and the sysfs infrastructure for this WSS will be
+ * created.
+ */
+int wlp_wss_create_activate(struct wlp_wss *wss, struct wlp_uuid *wssid,
+ char *name, unsigned sec_status, unsigned accept)
+{
+ struct wlp *wlp = container_of(wss, struct wlp, wss);
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ int result = 0;
+ char buf[WLP_WSS_UUID_STRSIZE];
+ d_fnstart(5, dev, "Request to create new WSS.\n");
+ result = wlp_wss_uuid_print(buf, sizeof(buf), wssid);
+ d_printf(5, dev, "Request to create WSS: WSSID=%s, name=%s, "
+ "sec_status=%u, accepting enrollment=%u \n",
+ buf, name, sec_status, accept);
+ if (!mutex_trylock(&wss->mutex)) {
+ dev_err(dev, "WLP: WLP association session in progress.\n");
+ return -EBUSY;
+ }
+ if (wss->state != WLP_WSS_STATE_NONE) {
+ dev_err(dev, "WLP: WSS already exists. Not creating new.\n");
+ result = -EEXIST;
+ goto out;
+ }
+ if (wss->kobj.parent == NULL) {
+ dev_err(dev, "WLP: WSS parent not ready. Is network interface "
+ "up?\n");
+ result = -ENXIO;
+ goto out;
+ }
+ if (sec_status == WLP_WSS_SECURE) {
+ dev_err(dev, "WLP: FIXME Creation of secure WSS not "
+ "supported yet.\n");
+ result = -EINVAL;
+ goto out;
+ }
+ wss->wssid = *wssid;
+ memcpy(wss->name, name, sizeof(wss->name));
+ wss->bcast = wlp_wss_sel_bcast_addr(wss);
+ wss->secure_status = sec_status;
+ wss->accept_enroll = accept;
+ /*wss->virtual_addr is initialized in call to wlp_wss_setup*/
+ /* sysfs infrastructure */
+ result = wlp_wss_sysfs_add(wss, buf);
+ if (result < 0) {
+ dev_err(dev, "Cannot set up sysfs for WSS kobject.\n");
+ wlp_wss_reset(wss);
+ goto out;
+ } else
+ result = 0;
+ wss->state = WLP_WSS_STATE_ENROLLED;
+ result = wlp_wss_activate(wss);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to activate WSS. Undoing "
+ "enrollment\n");
+ wlp_wss_reset(wss);
+ goto out;
+ }
+ result = 0;
+out:
+ mutex_unlock(&wss->mutex);
+ d_fnend(5, dev, "Completed. result = %d \n", result);
+ return result;
+}
+
+/**
+ * Determine if neighbor has WSS activated
+ *
+ * @returns: 1 if neighbor has WSS activated, zero otherwise
+ *
+ * This can be done in two ways:
+ * - send a C1 frame, parse C2/F0 response
+ * - examine the WLP IE sent by the neighbor
+ *
+ * The WLP IE is not fully supported in hardware so we use the C1/C2 frame
+ * exchange to determine if a WSS is activated. Using the WLP IE should be
+ * faster and should be used when it becomes possible.
+ */
+int wlp_wss_is_active(struct wlp *wlp, struct wlp_wss *wss,
+ struct uwb_dev_addr *dev_addr)
+{
+ int result = 0;
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ char buf[WLP_WSS_UUID_STRSIZE];
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct wlp_session session;
+ struct sk_buff *skb;
+ struct wlp_frame_assoc *resp;
+ struct wlp_uuid wssid;
+
+ wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid);
+ d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
+ wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
+ mutex_lock(&wlp->mutex);
+ /* Send C1 association frame */
+ result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C1);
+ if (result < 0) {
+ dev_err(dev, "Unable to send C1 frame to neighbor "
+ "%02x:%02x (%d)\n", dev_addr->data[1],
+ dev_addr->data[0], result);
+ result = 0;
+ goto out;
+ }
+ /* Create session, wait for response */
+ session.exp_message = WLP_ASSOC_C2;
+ session.cb = wlp_session_cb;
+ session.cb_priv = &completion;
+ session.neighbor_addr = *dev_addr;
+ BUG_ON(wlp->session != NULL);
+ wlp->session = &session;
+ /* Wait for C2/F0 frame */
+ result = wait_for_completion_interruptible_timeout(&completion,
+ WLP_PER_MSG_TIMEOUT * HZ);
+ if (result == 0) {
+ dev_err(dev, "Timeout while sending C1 to neighbor "
+ "%02x:%02x.\n", dev_addr->data[1],
+ dev_addr->data[0]);
+ goto out;
+ }
+ if (result < 0) {
+ dev_err(dev, "Unable to send C1 to neighbor %02x:%02x.\n",
+ dev_addr->data[1], dev_addr->data[0]);
+ result = 0;
+ goto out;
+ }
+ /* Parse message in session->data: it will be either C2 or F0 */
+ skb = session.data;
+ resp = (void *) skb->data;
+ d_printf(5, dev, "Received response to C1 frame. \n");
+ d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len);
+ if (resp->type == WLP_ASSOC_F0) {
+ result = wlp_parse_f0(wlp, skb);
+ if (result < 0)
+ dev_err(dev, "WLP: unable to parse incoming F0 "
+ "frame from neighbor %02x:%02x.\n",
+ dev_addr->data[1], dev_addr->data[0]);
+ result = 0;
+ goto error_resp_parse;
+ }
+ /* WLP version and message type fields have already been parsed */
+ result = wlp_get_wssid(wlp, (void *)resp + sizeof(*resp), &wssid,
+ skb->len - sizeof(*resp));
+ if (result < 0) {
+ dev_err(dev, "WLP: unable to obtain WSSID from C2 frame.\n");
+ result = 0;
+ goto error_resp_parse;
+ }
+ if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))) {
+ d_printf(5, dev, "WSSID in C2 frame matches local "
+ "active WSS.\n");
+ result = 1;
+ } else {
+ dev_err(dev, "WLP: Received a C2 frame without matching "
+ "WSSID.\n");
+ result = 0;
+ }
+error_resp_parse:
+ kfree_skb(skb);
+out:
+ wlp->session = NULL;
+ mutex_unlock(&wlp->mutex);
+ d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
+ wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
+ return result;
+}
+
+/**
+ * Activate connection with neighbor by updating EDA cache
+ *
+ * @wss: local WSS to which neighbor wants to connect
+ * @dev_addr: neighbor's address
+ * @wssid: neighbor's WSSID - must be same as our WSS's WSSID
+ * @tag: neighbor's WSS tag used to identify frames transmitted by it
+ * @virt_addr: neighbor's virtual EUI-48
+ */
+static
+int wlp_wss_activate_connection(struct wlp *wlp, struct wlp_wss *wss,
+ struct uwb_dev_addr *dev_addr,
+ struct wlp_uuid *wssid, u8 *tag,
+ struct uwb_mac_addr *virt_addr)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ int result = 0;
+ char buf[WLP_WSS_UUID_STRSIZE];
+ wlp_wss_uuid_print(buf, sizeof(buf), wssid);
+ d_fnstart(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual "
+ "%02x:%02x:%02x:%02x:%02x:%02x \n", wlp, wss, buf, *tag,
+ virt_addr->data[0], virt_addr->data[1], virt_addr->data[2],
+ virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]);
+
+ if (!memcmp(wssid, &wss->wssid, sizeof(*wssid))) {
+ d_printf(5, dev, "WSSID from neighbor frame matches local "
+ "active WSS.\n");
+ /* Update EDA cache */
+ result = wlp_eda_update_node(&wlp->eda, dev_addr, wss,
+ (void *) virt_addr->data, *tag,
+ WLP_WSS_CONNECTED);
+ if (result < 0)
+ dev_err(dev, "WLP: Unable to update EDA cache "
+ "with new connected neighbor information.\n");
+ } else {
+ dev_err(dev, "WLP: Neighbor does not have matching "
+ "WSSID.\n");
+ result = -EINVAL;
+ }
+
+ d_fnend(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual "
+ "%02x:%02x:%02x:%02x:%02x:%02x, result = %d \n",
+ wlp, wss, buf, *tag,
+ virt_addr->data[0], virt_addr->data[1], virt_addr->data[2],
+ virt_addr->data[3], virt_addr->data[4], virt_addr->data[5],
+ result);
+
+ return result;
+}
+
+/**
+ * Connect to WSS neighbor
+ *
+ * Use C3/C4 exchange to determine if neighbor has WSS activated and
+ * retrieve the WSS tag and virtual EUI-48 of the neighbor.
+ */
+static
+int wlp_wss_connect_neighbor(struct wlp *wlp, struct wlp_wss *wss,
+ struct uwb_dev_addr *dev_addr)
+{
+ int result;
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ char buf[WLP_WSS_UUID_STRSIZE];
+ struct wlp_uuid wssid;
+ u8 tag;
+ struct uwb_mac_addr virt_addr;
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct wlp_session session;
+ struct wlp_frame_assoc *resp;
+ struct sk_buff *skb;
+
+ wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid);
+ d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
+ wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
+ mutex_lock(&wlp->mutex);
+ /* Send C3 association frame */
+ result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C3);
+ if (result < 0) {
+ dev_err(dev, "Unable to send C3 frame to neighbor "
+ "%02x:%02x (%d)\n", dev_addr->data[1],
+ dev_addr->data[0], result);
+ goto out;
+ }
+ /* Create session, wait for response */
+ session.exp_message = WLP_ASSOC_C4;
+ session.cb = wlp_session_cb;
+ session.cb_priv = &completion;
+ session.neighbor_addr = *dev_addr;
+ BUG_ON(wlp->session != NULL);
+ wlp->session = &session;
+ /* Wait for C4/F0 frame */
+ result = wait_for_completion_interruptible_timeout(&completion,
+ WLP_PER_MSG_TIMEOUT * HZ);
+ if (result == 0) {
+ dev_err(dev, "Timeout while sending C3 to neighbor "
+ "%02x:%02x.\n", dev_addr->data[1],
+ dev_addr->data[0]);
+ result = -ETIMEDOUT;
+ goto out;
+ }
+ if (result < 0) {
+ dev_err(dev, "Unable to send C3 to neighbor %02x:%02x.\n",
+ dev_addr->data[1], dev_addr->data[0]);
+ goto out;
+ }
+ /* Parse message in session->data: it will be either C4 or F0 */
+ skb = session.data;
+ resp = (void *) skb->data;
+ d_printf(5, dev, "Received response to C3 frame. \n");
+ d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len);
+ if (resp->type == WLP_ASSOC_F0) {
+ result = wlp_parse_f0(wlp, skb);
+ if (result < 0)
+ dev_err(dev, "WLP: unable to parse incoming F0 "
+ "frame from neighbor %02x:%02x.\n",
+ dev_addr->data[1], dev_addr->data[0]);
+ result = -EINVAL;
+ goto error_resp_parse;
+ }
+ result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to parse C4 frame from neighbor.\n");
+ goto error_resp_parse;
+ }
+ result = wlp_wss_activate_connection(wlp, wss, dev_addr, &wssid, &tag,
+ &virt_addr);
+ if (result < 0) {
+ dev_err(dev, "WLP: Unable to activate connection to "
+ "neighbor %02x:%02x.\n", dev_addr->data[1],
+ dev_addr->data[0]);
+ goto error_resp_parse;
+ }
+error_resp_parse:
+ kfree_skb(skb);
+out:
+ /* Record that we unsuccessfully tried to connect to this neighbor */
+ if (result < 0)
+ wlp_eda_update_node_state(&wlp->eda, dev_addr,
+ WLP_WSS_CONNECT_FAILED);
+ wlp->session = NULL;
+ mutex_unlock(&wlp->mutex);
+ d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
+ wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
+ return result;
+}
+
+/**
+ * Connect to neighbor with common WSS, send pending frame
+ *
+ * This function is scheduled when a frame is destined to a neighbor with
+ * which we do not have a connection. A copy of the EDA cache entry is
+ * provided - not the actual cache entry (because it is protected by a
+ * spinlock).
+ *
+ * First determine if neighbor has the same WSS activated, connect if it
+ * does. The C3/C4 exchange is dual purpose to determine if neighbor has
+ * WSS activated and proceed with the connection.
+ *
+ * The frame that triggered the connection setup is sent after connection
+ * setup.
+ *
+ * network queue is stopped - we need to restart when done
+ *
+ */
+static
+void wlp_wss_connect_send(struct work_struct *ws)
+{
+ struct wlp_assoc_conn_ctx *conn_ctx = container_of(ws,
+ struct wlp_assoc_conn_ctx,
+ ws);
+ struct wlp *wlp = conn_ctx->wlp;
+ struct sk_buff *skb = conn_ctx->skb;
+ struct wlp_eda_node *eda_entry = &conn_ctx->eda_entry;
+ struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr;
+ struct wlp_wss *wss = &wlp->wss;
+ int result;
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ char buf[WLP_WSS_UUID_STRSIZE];
+
+ mutex_lock(&wss->mutex);
+ wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid);
+ d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
+ wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
+ if (wss->state < WLP_WSS_STATE_ACTIVE) {
+ if (printk_ratelimit())
+ dev_err(dev, "WLP: Attempting to connect with "
+ "WSS that is not active or connected.\n");
+ dev_kfree_skb(skb);
+ goto out;
+ }
+ /* Establish connection - send C3 rcv C4 */
+ result = wlp_wss_connect_neighbor(wlp, wss, dev_addr);
+ if (result < 0) {
+ if (printk_ratelimit())
+ dev_err(dev, "WLP: Unable to establish connection "
+ "with neighbor %02x:%02x.\n",
+ dev_addr->data[1], dev_addr->data[0]);
+ dev_kfree_skb(skb);
+ goto out;
+ }
+ /* EDA entry changed, update the local copy being used */
+ result = wlp_copy_eda_node(&wlp->eda, dev_addr, eda_entry);
+ if (result < 0) {
+ if (printk_ratelimit())
+ dev_err(dev, "WLP: Cannot find EDA entry for "
+ "neighbor %02x:%02x \n",
+ dev_addr->data[1], dev_addr->data[0]);
+ }
+ result = wlp_wss_prep_hdr(wlp, eda_entry, skb);
+ if (result < 0) {
+ if (printk_ratelimit())
+ dev_err(dev, "WLP: Unable to prepare frame header for "
+ "transmission (neighbor %02x:%02x). \n",
+ dev_addr->data[1], dev_addr->data[0]);
+ dev_kfree_skb(skb);
+ goto out;
+ }
+ BUG_ON(wlp->xmit_frame == NULL);
+ result = wlp->xmit_frame(wlp, skb, dev_addr);
+ if (result < 0) {
+ if (printk_ratelimit())
+ dev_err(dev, "WLP: Unable to transmit frame: %d\n",
+ result);
+ if (result == -ENXIO)
+ dev_err(dev, "WLP: Is network interface up? \n");
+ /* We could try again ... */
+ dev_kfree_skb(skb);/*we need to free if tx fails */
+ }
+out:
+ kfree(conn_ctx);
+ BUG_ON(wlp->start_queue == NULL);
+ wlp->start_queue(wlp);
+ mutex_unlock(&wss->mutex);
+ d_fnend(5, dev, "wlp %p, wss %p (wssid %s)\n", wlp, wss, buf);
+}
+
+/**
+ * Add WLP header to outgoing skb
+ *
+ * @eda_entry: pointer to neighbor's entry in the EDA cache
+ * @_skb: skb containing data destined to the neighbor
+ */
+int wlp_wss_prep_hdr(struct wlp *wlp, struct wlp_eda_node *eda_entry,
+ void *_skb)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ int result = 0;
+ unsigned char *eth_addr = eda_entry->eth_addr;
+ struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr;
+ struct sk_buff *skb = _skb;
+ struct wlp_frame_std_abbrv_hdr *std_hdr;
+
+ d_fnstart(6, dev, "wlp %p \n", wlp);
+ if (eda_entry->state == WLP_WSS_CONNECTED) {
+ /* Add WLP header */
+ BUG_ON(skb_headroom(skb) < sizeof(*std_hdr));
+ std_hdr = (void *) __skb_push(skb, sizeof(*std_hdr));
+ std_hdr->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
+ std_hdr->hdr.type = WLP_FRAME_STANDARD;
+ std_hdr->tag = eda_entry->wss->tag;
+ } else {
+ if (printk_ratelimit())
+ dev_err(dev, "WLP: Destination neighbor (Ethernet: "
+ "%02x:%02x:%02x:%02x:%02x:%02x, Dev: "
+ "%02x:%02x) is not connected. \n", eth_addr[0],
+ eth_addr[1], eth_addr[2], eth_addr[3],
+ eth_addr[4], eth_addr[5], dev_addr->data[1],
+ dev_addr->data[0]);
+ result = -EINVAL;
+ }
+ d_fnend(6, dev, "wlp %p \n", wlp);
+ return result;
+}
+
+
+/**
+ * Prepare skb for neighbor: connect if not already and prep WLP header
+ *
+ * This function is called in interrupt context, but it needs to sleep. We
+ * temporarily stop the net queue to establish the WLP connection.
+ * Setup of the WLP connection and restart of queue is scheduled
+ * on the default work queue.
+ *
+ * run with eda->lock held (spinlock)
+ */
+int wlp_wss_connect_prep(struct wlp *wlp, struct wlp_eda_node *eda_entry,
+ void *_skb)
+{
+ int result = 0;
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr;
+ unsigned char *eth_addr = eda_entry->eth_addr;
+ struct sk_buff *skb = _skb;
+ struct wlp_assoc_conn_ctx *conn_ctx;
+
+ d_fnstart(5, dev, "wlp %p\n", wlp);
+ d_printf(5, dev, "To neighbor %02x:%02x with eth "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n", dev_addr->data[1],
+ dev_addr->data[0], eth_addr[0], eth_addr[1], eth_addr[2],
+ eth_addr[3], eth_addr[4], eth_addr[5]);
+ if (eda_entry->state == WLP_WSS_UNCONNECTED) {
+ /* We don't want any more packets while we set up connection */
+ BUG_ON(wlp->stop_queue == NULL);
+ wlp->stop_queue(wlp);
+ conn_ctx = kmalloc(sizeof(*conn_ctx), GFP_ATOMIC);
+ if (conn_ctx == NULL) {
+ if (printk_ratelimit())
+ dev_err(dev, "WLP: Unable to allocate memory "
+ "for connection handling.\n");
+ result = -ENOMEM;
+ goto out;
+ }
+ conn_ctx->wlp = wlp;
+ conn_ctx->skb = skb;
+ conn_ctx->eda_entry = *eda_entry;
+ INIT_WORK(&conn_ctx->ws, wlp_wss_connect_send);
+ schedule_work(&conn_ctx->ws);
+ result = 1;
+ } else if (eda_entry->state == WLP_WSS_CONNECT_FAILED) {
+ /* Previous connection attempts failed, don't retry - see
+ * conditions for connection in WLP 0.99 [7.6.2] */
+ if (printk_ratelimit())
+ dev_err(dev, "Could not connect to neighbor "
+ "previously. Not retrying. \n");
+ result = -ENONET;
+ goto out;
+ } else { /* eda_entry->state == WLP_WSS_CONNECTED */
+ d_printf(5, dev, "Neighbor is connected, preparing frame.\n");
+ result = wlp_wss_prep_hdr(wlp, eda_entry, skb);
+ }
+out:
+ d_fnend(5, dev, "wlp %p, result = %d \n", wlp, result);
+ return result;
+}
+
+/**
+ * Emulate broadcast: copy skb, send copy to neighbor (connect if not already)
+ *
+ * We need to copy skbs in the case where we emulate broadcast through
+ * unicast. We copy instead of clone because we are modifying the data of
+ * the frame after copying ... clones share data so we cannot emulate
+ * broadcast using clones.
+ *
+ * run with eda->lock held (spinlock)
+ */
+int wlp_wss_send_copy(struct wlp *wlp, struct wlp_eda_node *eda_entry,
+ void *_skb)
+{
+ int result = -ENOMEM;
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ struct sk_buff *skb = _skb;
+ struct sk_buff *copy;
+ struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr;
+
+ d_fnstart(5, dev, "to neighbor %02x:%02x, skb (%p) \n",
+ dev_addr->data[1], dev_addr->data[0], skb);
+ copy = skb_copy(skb, GFP_ATOMIC);
+ if (copy == NULL) {
+ if (printk_ratelimit())
+ dev_err(dev, "WLP: Unable to copy skb for "
+ "transmission.\n");
+ goto out;
+ }
+ result = wlp_wss_connect_prep(wlp, eda_entry, copy);
+ if (result < 0) {
+ if (printk_ratelimit())
+ dev_err(dev, "WLP: Unable to connect/send skb "
+ "to neighbor.\n");
+ dev_kfree_skb_irq(copy);
+ goto out;
+ } else if (result == 1)
+ /* Frame will be transmitted separately */
+ goto out;
+ BUG_ON(wlp->xmit_frame == NULL);
+ result = wlp->xmit_frame(wlp, copy, dev_addr);
+ if (result < 0) {
+ if (printk_ratelimit())
+ dev_err(dev, "WLP: Unable to transmit frame: %d\n",
+ result);
+ if ((result == -ENXIO) && printk_ratelimit())
+ dev_err(dev, "WLP: Is network interface up? \n");
+ /* We could try again ... */
+ dev_kfree_skb_irq(copy);/*we need to free if tx fails */
+ }
+out:
+ d_fnend(5, dev, "to neighbor %02x:%02x \n", dev_addr->data[1],
+ dev_addr->data[0]);
+ return result;
+}
+
+
+/**
+ * Setup WSS
+ *
+ * Should be called by network driver after the interface has been given a
+ * MAC address.
+ */
+int wlp_wss_setup(struct net_device *net_dev, struct wlp_wss *wss)
+{
+ struct wlp *wlp = container_of(wss, struct wlp, wss);
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ int result = 0;
+ d_fnstart(5, dev, "wss (%p) \n", wss);
+ mutex_lock(&wss->mutex);
+ wss->kobj.parent = &net_dev->dev.kobj;
+ if (!is_valid_ether_addr(net_dev->dev_addr)) {
+ dev_err(dev, "WLP: Invalid MAC address. Cannot use for"
+ "virtual.\n");
+ result = -EINVAL;
+ goto out;
+ }
+ memcpy(wss->virtual_addr.data, net_dev->dev_addr,
+ sizeof(wss->virtual_addr.data));
+out:
+ mutex_unlock(&wss->mutex);
+ d_fnend(5, dev, "wss (%p) \n", wss);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wlp_wss_setup);
+
+/**
+ * Remove WSS
+ *
+ * Called by client that configured WSS through wlp_wss_setup(). This
+ * function is called when client no longer needs WSS, eg. client shuts
+ * down.
+ *
+ * We remove the WLP IE from the beacon before initiating local cleanup.
+ */
+void wlp_wss_remove(struct wlp_wss *wss)
+{
+ struct wlp *wlp = container_of(wss, struct wlp, wss);
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ d_fnstart(5, dev, "wss (%p) \n", wss);
+ mutex_lock(&wss->mutex);
+ if (wss->state == WLP_WSS_STATE_ACTIVE)
+ uwb_rc_ie_rm(wlp->rc, UWB_IE_WLP);
+ if (wss->state != WLP_WSS_STATE_NONE) {
+ sysfs_remove_group(&wss->kobj, &wss_attr_group);
+ kobject_put(&wss->kobj);
+ }
+ wss->kobj.parent = NULL;
+ memset(&wss->virtual_addr, 0, sizeof(wss->virtual_addr));
+ /* Cleanup EDA cache */
+ wlp_eda_release(&wlp->eda);
+ wlp_eda_init(&wlp->eda);
+ mutex_unlock(&wss->mutex);
+ d_fnend(5, dev, "wss (%p) \n", wss);
+}
+EXPORT_SYMBOL_GPL(wlp_wss_remove);
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index c73b5e2919c..ada8ad82d99 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -102,7 +102,7 @@ static void w83697ug_select_wd_register(void)
} else {
printk(KERN_ERR PFX "No W83697UG/UF could be found\n");
- return -EIO;
+ return;
}
outb_p(0x07, WDT_EFER); /* point to logical device number reg */
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 565280ec1c6..974f56d1ebe 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -2,7 +2,7 @@
#include <xen/xenbus.h>
-#include <asm-x86/xen/hypervisor.h>
+#include <asm/xen/hypervisor.h>
#include <asm/cpu.h>
static void enable_hotplug_cpu(int cpu)