aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CREDITS5
-rw-r--r--Documentation/DocBook/kernel-api.tmpl13
-rw-r--r--Documentation/block/barrier.txt6
-rw-r--r--Documentation/block/biodoc.txt10
-rw-r--r--Documentation/block/request.txt2
-rw-r--r--Documentation/feature-removal-schedule.txt16
-rw-r--r--Documentation/gpio.txt4
-rw-r--r--Documentation/iostats.txt2
-rw-r--r--Documentation/kernel-parameters.txt7
-rw-r--r--Documentation/lguest/Makefile3
-rw-r--r--Documentation/lguest/extract58
-rw-r--r--Documentation/lguest/lguest.c702
-rw-r--r--Documentation/sched-stats.txt195
-rw-r--r--MAINTAINERS8
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/head.S1
-rw-r--r--arch/alpha/kernel/pci.c10
-rw-r--r--arch/alpha/kernel/pci_iommu.c4
-rw-r--r--arch/alpha/kernel/smp.c7
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/kernel/setup.c1
-rw-r--r--arch/arm/kernel/smp.c3
-rw-r--r--arch/arm/kernel/traps.c2
-rw-r--r--arch/arm/mach-sa1100/jornada720.c27
-rw-r--r--arch/arm/mach-sa1100/jornada720_ssp.c2
-rw-r--r--arch/arm/plat-omap/mailbox.c8
-rw-r--r--arch/blackfin/Makefile4
-rw-r--r--arch/blackfin/kernel/dma-mapping.c3
-rw-r--r--arch/blackfin/kernel/setup.c5
-rw-r--r--arch/blackfin/kernel/traps.c2
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S16
-rw-r--r--arch/blackfin/mach-bf561/head.S10
-rw-r--r--arch/blackfin/mach-common/cacheinit.S3
-rw-r--r--arch/blackfin/mach-common/ints-priority-dc.c39
-rw-r--r--arch/blackfin/mach-common/ints-priority-sc.c48
-rw-r--r--arch/frv/kernel/sys_frv.c1
-rw-r--r--arch/i386/Kconfig.debug4
-rw-r--r--arch/i386/boot/apm.c7
-rw-r--r--arch/i386/boot/main.c16
-rw-r--r--arch/i386/kernel/alternative.c14
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig2
-rw-r--r--arch/i386/kernel/e820.c2
-rw-r--r--arch/i386/kernel/microcode.c1
-rw-r--r--arch/i386/kernel/sys_i386.c1
-rw-r--r--arch/i386/kernel/sysenter.c1
-rw-r--r--arch/i386/mm/init.c16
-rw-r--r--arch/i386/power/Makefile2
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/hp/common/sba_iommu.c7
-rw-r--r--arch/ia64/hp/sim/simscsi.c2
-rw-r--r--arch/ia64/ia32/ia32_support.c8
-rw-r--r--arch/ia64/ia32/ia32priv.h12
-rw-r--r--arch/ia64/ia32/sys_ia32.c81
-rw-r--r--arch/ia64/kernel/acpi.c28
-rw-r--r--arch/ia64/kernel/cyclone.c14
-rw-r--r--arch/ia64/kernel/head.S4
-rw-r--r--arch/ia64/kernel/init_task.c1
-rw-r--r--arch/ia64/kernel/irq_ia64.c31
-rw-r--r--arch/ia64/kernel/machvec.c27
-rw-r--r--arch/ia64/kernel/process.c7
-rw-r--r--arch/ia64/kernel/setup.c11
-rw-r--r--arch/ia64/kernel/smp.c2
-rw-r--r--arch/ia64/kernel/smpboot.c6
-rw-r--r--arch/ia64/kernel/time.c4
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S2
-rw-r--r--arch/ia64/pci/pci.c2
-rw-r--r--arch/m32r/kernel/setup_mappi.c5
-rw-r--r--arch/m68k/kernel/process.c1
-rw-r--r--arch/m68k/kernel/sys_m68k.c1
-rw-r--r--arch/m68knommu/Kconfig24
-rw-r--r--arch/m68knommu/Makefile2
-rw-r--r--arch/m68knommu/kernel/dma.c1
-rw-r--r--arch/m68knommu/kernel/setup.c7
-rw-r--r--arch/m68knommu/platform/5206/config.c10
-rw-r--r--arch/m68knommu/platform/5206e/config.c9
-rw-r--r--arch/m68knommu/platform/520x/config.c9
-rw-r--r--arch/m68knommu/platform/523x/config.c10
-rw-r--r--arch/m68knommu/platform/5249/config.c10
-rw-r--r--arch/m68knommu/platform/5272/config.c9
-rw-r--r--arch/m68knommu/platform/527x/config.c10
-rw-r--r--arch/m68knommu/platform/528x/config.c10
-rw-r--r--arch/m68knommu/platform/5307/config.c9
-rw-r--r--arch/m68knommu/platform/5307/entry.S11
-rw-r--r--arch/m68knommu/platform/5307/pit.c14
-rw-r--r--arch/m68knommu/platform/5307/timers.c13
-rw-r--r--arch/m68knommu/platform/532x/config.c7
-rw-r--r--arch/m68knommu/platform/5407/config.c9
-rw-r--r--arch/m68knommu/platform/68328/timers.c11
-rw-r--r--arch/m68knommu/platform/68360/config.c14
-rw-r--r--arch/m68knommu/platform/68VZ328/config.c7
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/arc/console.c31
-rw-r--r--arch/mips/jazz/io.c135
-rw-r--r--arch/mips/jazz/reset.c13
-rw-r--r--arch/mips/jazz/setup.c4
-rw-r--r--arch/mips/jmr3927/rbhma3100/setup.c2
-rw-r--r--arch/mips/kernel/gdb-stub.c4
-rw-r--r--arch/mips/kernel/head.S2
-rw-r--r--arch/mips/kernel/linux32.c2
-rw-r--r--arch/mips/kernel/rtlx.c2
-rw-r--r--arch/mips/kernel/smp.c1
-rw-r--r--arch/mips/kernel/syscall.c5
-rw-r--r--arch/mips/kernel/vpe.c3
-rw-r--r--arch/mips/mm/c-sb1.c2
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mips/sni/sniprom.c5
-rw-r--r--arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c2
-rw-r--r--arch/parisc/hpux/fs.c1
-rw-r--r--arch/parisc/kernel/init_task.c1
-rw-r--r--arch/parisc/kernel/process.c1
-rw-r--r--arch/parisc/kernel/smp.c1
-rw-r--r--arch/powerpc/Kconfig9
-rw-r--r--arch/powerpc/Kconfig.debug4
-rw-r--r--arch/powerpc/boot/dts/kuroboxHD.dts18
-rw-r--r--arch/powerpc/boot/dts/kuroboxHG.dts19
-rw-r--r--arch/powerpc/boot/dts/mpc7448hpc2.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8313erdb.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc832x_mds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc832x_rdb.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitx.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitxgp.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc834x_mds.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc836x_mds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8540ads.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8541cds.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8544ds.dts219
-rw-r--r--arch/powerpc/boot/dts/mpc8548cds.dts250
-rw-r--r--arch/powerpc/boot/dts/mpc8555cds.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8560ads.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8568mds.dts60
-rw-r--r--arch/powerpc/boot/dts/mpc8641_hpcn.dts8
-rw-r--r--arch/powerpc/configs/lite5200_defconfig2
-rw-r--r--arch/powerpc/configs/mpc8544_ds_defconfig333
-rw-r--r--arch/powerpc/configs/mpc8568mds_defconfig292
-rw-r--r--arch/powerpc/configs/pmac32_defconfig2
-rw-r--r--arch/powerpc/kernel/Makefile6
-rw-r--r--arch/powerpc/kernel/head_64.S7
-rw-r--r--arch/powerpc/kernel/iomap.c8
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/pci-common.c25
-rw-r--r--arch/powerpc/kernel/pci_32.c45
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/kernel/prom_parse.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/syscalls.c1
-rw-r--r--arch/powerpc/kernel/udbg.c2
-rw-r--r--arch/powerpc/lib/rheap.c1
-rw-r--r--arch/powerpc/mm/lmb.c4
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c1
-rw-r--r--arch/powerpc/platforms/82xx/mpc82xx_ads.c3
-rw-r--r--arch/powerpc/platforms/83xx/pci.c4
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig6
-rw-r--r--arch/powerpc/platforms/85xx/Makefile2
-rw-r--r--arch/powerpc/platforms/85xx/mpc8544_ds.c241
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx.h1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c3
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c125
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c7
-rw-r--r--arch/powerpc/platforms/85xx/pci.c91
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig3
-rw-r--r--arch/powerpc/platforms/86xx/Makefile1
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx.h5
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c13
-rw-r--r--arch/powerpc/platforms/86xx/pci.c238
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h5
-rw-r--r--arch/powerpc/platforms/chrp/pci.c7
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c2
-rw-r--r--arch/powerpc/platforms/iseries/lpevents.c2
-rw-r--r--arch/powerpc/platforms/maple/pci.c3
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c257
-rw-r--r--arch/powerpc/sysdev/fsl_pci.h88
-rw-r--r--arch/powerpc/sysdev/fsl_pcie.h94
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c61
-rw-r--r--arch/powerpc/sysdev/grackle.c2
-rw-r--r--arch/powerpc/sysdev/indirect_pci.c60
-rw-r--r--arch/powerpc/sysdev/mv64x60_pci.c2
-rw-r--r--arch/ppc/configs/TQM8540_defconfig2
-rw-r--r--arch/ppc/configs/TQM8541_defconfig2
-rw-r--r--arch/ppc/configs/TQM8555_defconfig2
-rw-r--r--arch/ppc/configs/TQM8560_defconfig2
-rw-r--r--arch/ppc/configs/ev64360_defconfig2
-rw-r--r--arch/ppc/configs/ml300_defconfig2
-rw-r--r--arch/ppc/configs/ml403_defconfig2
-rw-r--r--arch/ppc/configs/mpc834x_sys_defconfig2
-rw-r--r--arch/ppc/configs/prep_defconfig2
-rw-r--r--arch/s390/appldata/appldata_base.c12
-rw-r--r--arch/s390/kernel/compat_wrapper.S10
-rw-r--r--arch/s390/kernel/entry.S6
-rw-r--r--arch/s390/kernel/entry64.S6
-rw-r--r--arch/s390/kernel/head.S1
-rw-r--r--arch/s390/kernel/init_task.c1
-rw-r--r--arch/s390/kernel/process.c1
-rw-r--r--arch/s390/kernel/smp.c31
-rw-r--r--arch/s390/kernel/sys_s390.c21
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S1
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/mm/vmem.c6
-rw-r--r--arch/sparc/defconfig2
-rw-r--r--arch/sparc/kernel/init_task.c1
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c3
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S2
-rw-r--r--arch/sparc/lib/memset.S2
-rw-r--r--arch/sparc/prom/printf.c2
-rw-r--r--arch/sparc64/Kconfig.debug2
-rw-r--r--arch/sparc64/defconfig32
-rw-r--r--arch/sparc64/kernel/head.S11
-rw-r--r--arch/sparc64/kernel/init_task.c1
-rw-r--r--arch/sparc64/kernel/process.c1
-rw-r--r--arch/sparc64/kernel/viohs.c2
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S2
-rw-r--r--arch/um/drivers/mconsole_kern.c4
-rw-r--r--arch/um/drivers/mmapper_kern.c1
-rw-r--r--arch/um/drivers/net_kern.c2
-rw-r--r--arch/um/drivers/ubd_kern.c4
-rw-r--r--arch/um/kernel/exec.c1
-rw-r--r--arch/um/kernel/init_task.c1
-rw-r--r--arch/um/kernel/mem.c2
-rw-r--r--arch/um/kernel/physmem.c15
-rw-r--r--arch/um/kernel/skas/process.c4
-rw-r--r--arch/um/kernel/syscall.c1
-rw-r--r--arch/um/os-Linux/aio.c47
-rw-r--r--arch/um/os-Linux/process.c2
-rw-r--r--arch/um/os-Linux/user_syms.c5
-rw-r--r--arch/um/sys-i386/Makefile2
-rw-r--r--arch/x86_64/Kconfig3
-rw-r--r--arch/x86_64/defconfig2
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c5
-rw-r--r--arch/x86_64/ia32/ptrace32.c1
-rw-r--r--arch/x86_64/ia32/syscall32.c8
-rw-r--r--arch/x86_64/kernel/Makefile2
-rw-r--r--arch/x86_64/kernel/acpi/sleep.c4
-rw-r--r--arch/x86_64/kernel/process.c1
-rw-r--r--arch/x86_64/kernel/suspend.c4
-rw-r--r--arch/x86_64/kernel/sys_x86_64.c1
-rw-r--r--arch/x86_64/kernel/tce.c4
-rw-r--r--arch/x86_64/kernel/tsc.c2
-rw-r--r--arch/x86_64/mm/init.c10
-rw-r--r--arch/x86_64/vdso/vma.c1
-rw-r--r--block/as-iosched.c26
-rw-r--r--block/blktrace.c30
-rw-r--r--block/bsg.c12
-rw-r--r--block/cfq-iosched.c39
-rw-r--r--block/deadline-iosched.c18
-rw-r--r--block/elevator.c75
-rw-r--r--block/ll_rw_blk.c215
-rw-r--r--block/noop-iosched.c14
-rw-r--r--block/scsi_ioctl.c24
-rw-r--r--drivers/acorn/block/fd1772.c4
-rw-r--r--drivers/acorn/block/mfmhd.c2
-rw-r--r--drivers/acpi/Kconfig54
-rw-r--r--drivers/acpi/ac.c9
-rw-r--r--drivers/acpi/acpi_memhotplug.c8
-rw-r--r--drivers/acpi/asus_acpi.c11
-rw-r--r--drivers/acpi/battery.c9
-rw-r--r--drivers/acpi/button.c12
-rw-r--r--drivers/acpi/container.c10
-rw-r--r--drivers/acpi/ec.c8
-rw-r--r--drivers/acpi/events/evrgnini.c2
-rw-r--r--drivers/acpi/fan.c8
-rw-r--r--drivers/acpi/namespace/nsxfeval.c2
-rw-r--r--drivers/acpi/pci_link.c9
-rw-r--r--drivers/acpi/pci_root.c9
-rw-r--r--drivers/acpi/power.c8
-rw-r--r--drivers/acpi/processor_core.c8
-rw-r--r--drivers/acpi/processor_throttling.c59
-rw-r--r--drivers/acpi/sbs.c10
-rw-r--r--drivers/acpi/scan.c156
-rw-r--r--drivers/acpi/sleep/Makefile2
-rw-r--r--drivers/acpi/sleep/main.c244
-rw-r--r--drivers/acpi/sleep/proc.c24
-rw-r--r--drivers/acpi/sleep/sleep.h2
-rw-r--r--drivers/acpi/sleep/wakeup.c2
-rw-r--r--drivers/acpi/thermal.c8
-rw-r--r--drivers/acpi/utilities/uteval.c4
-rw-r--r--drivers/acpi/video.c8
-rw-r--r--drivers/ata/ata_piix.c113
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/pata_ali.c2
-rw-r--r--drivers/ata/pata_hpt37x.c14
-rw-r--r--drivers/base/power/Makefile2
-rw-r--r--drivers/base/power/power.h4
-rw-r--r--drivers/base/power/shutdown.c2
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/aoe/aoe.h2
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/ataflop.c2
-rw-r--r--drivers/block/cciss.c10
-rw-r--r--drivers/block/cpqarray.c6
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/lguest_blk.c171
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/nbd.c4
-rw-r--r--drivers/block/paride/pcd.c4
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/paride/pf.c4
-rw-r--r--drivers/block/pktcdvd.c12
-rw-r--r--drivers/block/ps2esdi.c4
-rw-r--r--drivers/block/ps3disk.c8
-rw-r--r--drivers/block/rd.c2
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/swim3.c4
-rw-r--r--drivers/block/sx8.c20
-rw-r--r--drivers/block/ub.c6
-rw-r--r--drivers/block/umem.c6
-rw-r--r--drivers/block/viodasd.c2
-rw-r--r--drivers/block/xd.c2
-rw-r--r--drivers/block/xd.h2
-rw-r--r--drivers/block/xen-blkfront.c4
-rw-r--r--drivers/block/xsysace.c4
-rw-r--r--drivers/block/z2ram.c2
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/cdrom/viocd.c2
-rw-r--r--drivers/char/Kconfig25
-rw-r--r--drivers/char/Makefile3
-rw-r--r--drivers/char/agp/Kconfig2
-rw-r--r--drivers/char/agp/ati-agp.c9
-rw-r--r--drivers/char/agp/compat_ioctl.c1
-rw-r--r--drivers/char/agp/frontend.c1
-rw-r--r--drivers/char/agp/generic.c2
-rw-r--r--drivers/char/agp/intel-agp.c19
-rw-r--r--drivers/char/agp/sgi-agp.c1
-rw-r--r--drivers/char/hpet.c10
-rw-r--r--drivers/char/hvc_lguest.c80
-rw-r--r--drivers/char/mmtimer.c1
-rw-r--r--drivers/char/mspec.c1
-rw-r--r--drivers/edac/Kconfig4
-rw-r--r--drivers/edac/edac_mc.c64
-rw-r--r--drivers/edac/edac_mc_sysfs.c19
-rw-r--r--drivers/edac/edac_module.h8
-rw-r--r--drivers/edac/edac_pci.c162
-rw-r--r--drivers/edac/edac_pci_sysfs.c297
-rw-r--r--drivers/edac/i3000_edac.c2
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/i2c/chips/ds1682.c3
-rw-r--r--drivers/i2c/chips/tps65010.c2
-rw-r--r--drivers/ide/ide-cd.c4
-rw-r--r--drivers/ide/ide-disk.c4
-rw-r--r--drivers/ide/ide-io.c2
-rw-r--r--drivers/ide/ide-probe.c2
-rw-r--r--drivers/ide/legacy/hd.c2
-rw-r--r--drivers/ide/pci/scc_pata.c4
-rw-r--r--drivers/ieee1394/raw1394.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c1
-rw-r--r--drivers/input/misc/atlas_btns.c9
-rw-r--r--drivers/input/serio/Kconfig2
-rw-r--r--drivers/kvm/kvm_main.c44
-rw-r--r--drivers/kvm/x86_emulate.c2
-rw-r--r--drivers/lguest/Makefile12
-rw-r--r--drivers/lguest/README47
-rw-r--r--drivers/lguest/core.c357
-rw-r--r--drivers/lguest/hypercalls.c144
-rw-r--r--drivers/lguest/interrupts_and_traps.c212
-rw-r--r--drivers/lguest/io.c265
-rw-r--r--drivers/lguest/lg.h47
-rw-r--r--drivers/lguest/lguest.c535
-rw-r--r--drivers/lguest/lguest_asm.S71
-rw-r--r--drivers/lguest/lguest_bus.c75
-rw-r--r--drivers/lguest/lguest_user.c166
-rw-r--r--drivers/lguest/page_tables.c329
-rw-r--r--drivers/lguest/segments.c126
-rw-r--r--drivers/lguest/switcher.S284
-rw-r--r--drivers/md/dm-table.c8
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/faulty.c2
-rw-r--r--drivers/md/linear.c14
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/multipath.c12
-rw-r--r--drivers/md/raid0.c14
-rw-r--r--drivers/md/raid1.c12
-rw-r--r--drivers/md/raid10.c14
-rw-r--r--drivers/md/raid5.c18
-rw-r--r--drivers/media/video/Kconfig4
-rw-r--r--drivers/message/i2o/i2o_block.c4
-rw-r--r--drivers/misc/asus-laptop.c41
-rw-r--r--drivers/misc/sony-laptop.c21
-rw-r--r--drivers/misc/thinkpad_acpi.c20
-rw-r--r--drivers/misc/thinkpad_acpi.h2
-rw-r--r--drivers/mmc/card/queue.c10
-rw-r--r--drivers/mmc/core/bus.c23
-rw-r--r--drivers/mmc/core/core.c144
-rw-r--r--drivers/mmc/core/core.h22
-rw-r--r--drivers/mmc/core/host.c7
-rw-r--r--drivers/mmc/core/mmc.c26
-rw-r--r--drivers/mmc/core/mmc_ops.c2
-rw-r--r--drivers/mmc/core/mmc_ops.h2
-rw-r--r--drivers/mmc/core/sd.c36
-rw-r--r--drivers/mmc/core/sd_ops.c62
-rw-r--r--drivers/mmc/core/sd_ops.h3
-rw-r--r--drivers/mmc/host/at91_mci.c2
-rw-r--r--drivers/mmc/host/au1xmmc.c2
-rw-r--r--drivers/mmc/host/imxmmc.c2
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/mmci.h2
-rw-r--r--drivers/mmc/host/omap.c2
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/sdhci.c63
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/wbsd.c15
-rw-r--r--drivers/mmc/host/wbsd.h2
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/net/82596.c1
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/acenic.c6
-rw-r--r--drivers/net/atl1/atl1_hw.h9
-rw-r--r--drivers/net/atl1/atl1_main.c28
-rw-r--r--drivers/net/ax88796.c2
-rw-r--r--drivers/net/bfin_mac.c6
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/defxx.c17
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c22
-rw-r--r--drivers/net/forcedeth.c16
-rw-r--r--drivers/net/lguest_net.c237
-rw-r--r--drivers/net/lib8390.c9
-rw-r--r--drivers/net/netxen/netxen_nic.h3
-rw-r--r--drivers/net/netxen/netxen_nic_main.c48
-rw-r--r--drivers/net/phy/vitesse.c2
-rw-r--r--drivers/net/pppol2tp.c4
-rw-r--r--drivers/net/ps3_gelic_net.c215
-rw-r--r--drivers/net/ps3_gelic_net.h24
-rw-r--r--drivers/net/ucc_geth.c334
-rw-r--r--drivers/net/ucc_geth.h6
-rw-r--r--drivers/net/ucc_geth_ethtool.c388
-rw-r--r--drivers/net/ucc_geth_mii.c6
-rw-r--r--drivers/pci/pci-acpi.c32
-rw-r--r--drivers/pci/pci.c9
-rw-r--r--drivers/pci/pci.h3
-rw-r--r--drivers/pnp/card.c166
-rw-r--r--drivers/pnp/core.c50
-rw-r--r--drivers/pnp/driver.c75
-rw-r--r--drivers/pnp/interface.c217
-rw-r--r--drivers/pnp/isapnp/compat.c39
-rw-r--r--drivers/pnp/isapnp/core.c332
-rw-r--r--drivers/pnp/isapnp/proc.c21
-rw-r--r--drivers/pnp/manager.c144
-rw-r--r--drivers/pnp/pnpacpi/core.c117
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c441
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c339
-rw-r--r--drivers/pnp/pnpbios/core.c257
-rw-r--r--drivers/pnp/pnpbios/proc.c107
-rw-r--r--drivers/pnp/pnpbios/rsparser.c349
-rw-r--r--drivers/pnp/quirks.c80
-rw-r--r--drivers/pnp/resource.c102
-rw-r--r--drivers/pnp/support.c17
-rw-r--r--drivers/pnp/system.c40
-rw-r--r--drivers/rtc/Makefile42
-rw-r--r--drivers/rtc/class.c5
-rw-r--r--drivers/rtc/rtc-bfin.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c2
-rw-r--r--drivers/rtc/rtc-stk17ta8.c6
-rw-r--r--drivers/s390/block/dasd.c4
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/Kconfig12
-rw-r--r--drivers/s390/char/raw3270.c6
-rw-r--r--drivers/s390/char/sclp_vt220.c62
-rw-r--r--drivers/s390/char/tape.h2
-rw-r--r--drivers/s390/char/tape_block.c4
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/cio/blacklist.c19
-rw-r--r--drivers/s390/cio/ccwgroup.c3
-rw-r--r--drivers/s390/cio/chp.c19
-rw-r--r--drivers/s390/cio/chsc.c26
-rw-r--r--drivers/s390/cio/chsc.h2
-rw-r--r--drivers/s390/cio/cio.c13
-rw-r--r--drivers/s390/cio/cio_debug.h2
-rw-r--r--drivers/s390/cio/cmf.c16
-rw-r--r--drivers/s390/cio/css.c32
-rw-r--r--drivers/s390/cio/css.h1
-rw-r--r--drivers/s390/cio/device.c60
-rw-r--r--drivers/s390/cio/device_fsm.c20
-rw-r--r--drivers/s390/cio/device_ops.c257
-rw-r--r--drivers/s390/net/ctcmain.c6
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/sbus/char/Kconfig1
-rw-r--r--drivers/sbus/char/jsflash.c2
-rw-r--r--drivers/scsi/scsi_lib.c12
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/serial/68328serial.c71
-rw-r--r--drivers/serial/8250.c5
-rw-r--r--drivers/serial/8250_early.c10
-rw-r--r--drivers/serial/serial_core.c9
-rw-r--r--drivers/spi/spi_s3c24xx.c2
-rw-r--r--drivers/video/Kconfig9
-rw-r--r--drivers/video/chipsfb.c3
-rw-r--r--drivers/video/tgafb.c2
-rw-r--r--drivers/w1/masters/ds1wm.c2
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c2
-rw-r--r--fs/bio.c30
-rw-r--r--fs/dcookies.c1
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/ext3/super.c2
-rw-r--r--fs/ext4/super.c2
-rw-r--r--fs/lockd/svclock.c6
-rw-r--r--fs/nfsd/nfs4xdr.c2
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/open.c2
-rw-r--r--fs/pipe.c2
-rw-r--r--fs/proc/inode.c24
-rw-r--r--fs/quota.c2
-rw-r--r--fs/reiserfs/stree.c5
-rw-r--r--fs/signalfd.c2
-rw-r--r--fs/splice.c4
-rw-r--r--fs/timerfd.c6
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c4
-rw-r--r--include/acpi/acpi_bus.h16
-rw-r--r--include/acpi/acpi_drivers.h23
-rw-r--r--include/acpi/actypes.h6
-rw-r--r--include/acpi/acutils.h4
-rw-r--r--include/asm-alpha/bitops.h2
-rw-r--r--include/asm-arm/arch-mxc/uncompress.h3
-rw-r--r--include/asm-arm/arch-omap/mailbox.h2
-rw-r--r--include/asm-arm/unaligned.h22
-rw-r--r--include/asm-blackfin/bfin-global.h1
-rw-r--r--include/asm-blackfin/mach-bf548/cdefBF54x_base.h33
-rw-r--r--include/asm-blackfin/mach-bf548/irq.h1
-rw-r--r--include/asm-blackfin/mach-bf561/cdefBF561.h6
-rw-r--r--include/asm-blackfin/mach-bf561/defBF561.h4
-rw-r--r--include/asm-blackfin/thread_info.h12
-rw-r--r--include/asm-i386/acpi.h23
-rw-r--r--include/asm-i386/bootparam.h9
-rw-r--r--include/asm-i386/e820.h2
-rw-r--r--include/asm-i386/ist.h10
-rw-r--r--include/asm-i386/suspend.h2
-rw-r--r--include/asm-ia64/acpi.h5
-rw-r--r--include/asm-ia64/ia32.h9
-rw-r--r--include/asm-ia64/machvec.h1
-rw-r--r--include/asm-ia64/processor.h4
-rw-r--r--include/asm-ia64/smp.h1
-rw-r--r--include/asm-m68k/raw_io.h8
-rw-r--r--include/asm-m68k/system.h33
-rw-r--r--include/asm-m68knommu/hardirq.h2
-rw-r--r--include/asm-m68knommu/hw_irq.h4
-rw-r--r--include/asm-m68knommu/machdep.h38
-rw-r--r--include/asm-m68knommu/mcfdma.h2
-rw-r--r--include/asm-m68knommu/system.h4
-rw-r--r--include/asm-m68knommu/timex.h24
-rw-r--r--include/asm-mips/edac.h35
-rw-r--r--include/asm-mips/war.h1
-rw-r--r--include/asm-powerpc/bug.h2
-rw-r--r--include/asm-powerpc/page.h1
-rw-r--r--include/asm-powerpc/pci-bridge.h36
-rw-r--r--include/asm-powerpc/ppc_asm.h12
-rw-r--r--include/asm-powerpc/vio.h5
-rw-r--r--include/asm-s390/ccwdev.h5
-rw-r--r--include/asm-s390/s390_ext.h2
-rw-r--r--include/asm-s390/smp.h11
-rw-r--r--include/asm-s390/unistd.h2
-rw-r--r--include/asm-x86_64/acpi.h22
-rw-r--r--include/asm-x86_64/ist.h1
-rw-r--r--include/asm-x86_64/suspend.h2
-rw-r--r--include/asm-x86_64/uaccess.h4
-rw-r--r--include/asm-xtensa/io.h1
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/apm_bios.h20
-rw-r--r--include/linux/blkdev.h145
-rw-r--r--include/linux/blktrace_api.h2
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/device.h3
-rw-r--r--include/linux/elevator.h76
-rw-r--r--include/linux/freezer.h6
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/ide.h4
-rw-r--r--include/linux/interrupt.h4
-rw-r--r--include/linux/lguest.h51
-rw-r--r--include/linux/lguest_bus.h5
-rw-r--r--include/linux/lguest_launcher.h60
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/loop.h2
-rw-r--r--include/linux/mm.h38
-rw-r--r--include/linux/mmc/core.h2
-rw-r--r--include/linux/mod_devicetable.h6
-rw-r--r--include/linux/netfilter/xt_connlimit.h4
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/pci_ids.h23
-rw-r--r--include/linux/pm.h15
-rw-r--r--include/linux/pnp.h191
-rw-r--r--include/linux/pnpbios.h60
-rw-r--r--include/linux/preempt.h44
-rw-r--r--include/linux/raid/md_k.h4
-rw-r--r--include/linux/reiserfs_fs.h8
-rw-r--r--include/linux/sched.h23
-rw-r--r--include/linux/serial_8250.h2
-rw-r--r--include/linux/serial_core.h2
-rw-r--r--include/linux/suspend.h19
-rw-r--r--include/linux/time.h8
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h4
-rw-r--r--include/scsi/sd.h2
-rw-r--r--include/xen/page.h1
-rw-r--r--init/initramfs.c2
-rw-r--r--kernel/Kconfig.preempt3
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/auditsc.c6
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--kernel/irq/devres.c1
-rw-r--r--kernel/kmod.c8
-rw-r--r--kernel/power/Kconfig45
-rw-r--r--kernel/power/Makefile5
-rw-r--r--kernel/power/disk.c1
-rw-r--r--kernel/power/main.c28
-rw-r--r--kernel/power/power.h12
-rw-r--r--kernel/sched.c204
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/sys.c5
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/time.c16
-rw-r--r--kernel/time/timekeeping.c38
-rw-r--r--kernel/tsacct.c2
-rw-r--r--lib/fault-inject.c4
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/bounce.c4
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/migrate.c24
-rw-r--r--mm/mmap.c34
-rw-r--r--mm/oom_kill.c1
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/slab.c2
-rw-r--r--mm/swapfile.c6
-rw-r--r--mm/vmstat.c1
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/bridge/br_input.c6
-rw-r--r--net/bridge/netfilter/ebt_log.c7
-rw-r--r--net/bridge/netfilter/ebt_ulog.c9
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c2
-rw-r--r--net/ipv6/ip6_tunnel.c17
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c6
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/iucv/iucv.c15
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c3
-rw-r--r--net/netfilter/nf_conntrack_expect.c8
-rw-r--r--net/netfilter/nf_conntrack_helper.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c1
-rw-r--r--net/netfilter/xt_connlimit.c6
-rw-r--r--net/netfilter/xt_physdev.c1
-rw-r--r--net/netfilter/xt_u32.c11
-rw-r--r--net/netlink/genetlink.c28
-rw-r--r--net/rxrpc/af_rxrpc.c2
-rw-r--r--net/rxrpc/ar-connection.c6
-rw-r--r--net/rxrpc/ar-transport.c4
-rw-r--r--net/rxrpc/rxkad.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c9
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/tipc/msg.h6
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/Makefile.modpost21
-rw-r--r--scripts/kconfig/conf.c31
-rw-r--r--scripts/kconfig/expr.h4
-rw-r--r--scripts/kconfig/gconf.c10
-rw-r--r--scripts/kconfig/kxgettext.c4
-rw-r--r--scripts/kconfig/lkc_proto.h2
-rw-r--r--scripts/kconfig/mconf.c4
-rw-r--r--scripts/kconfig/menu.c12
-rw-r--r--scripts/kconfig/qconf.cc2
-rw-r--r--scripts/kconfig/zconf.tab.c_shipped12
-rw-r--r--scripts/kconfig/zconf.y12
-rw-r--r--scripts/mod/file2alias.c12
-rw-r--r--scripts/mod/modpost.c159
-rwxr-xr-xscripts/ver_linux4
-rw-r--r--security/selinux/ss/services.c4
-rw-r--r--security/selinux/xfrm.c3
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c4
674 files changed, 11990 insertions, 6154 deletions
diff --git a/CREDITS b/CREDITS
index 10c214dc95e..832436e1dd9 100644
--- a/CREDITS
+++ b/CREDITS
@@ -966,6 +966,7 @@ N: Pekka Enberg
E: penberg@cs.helsinki.fi
W: http://www.cs.helsinki.fi/u/penberg/
D: Various kernel hacks, fixes, and cleanups.
+D: Slab allocators
S: Finland
N: David Engebretsen
@@ -1939,8 +1940,8 @@ D: for Menuconfig's lxdialog.
N: Christoph Lameter
E: christoph@lameter.com
D: Digiboard PC/Xe and PC/Xi, Digiboard EPCA
-D: Early protocol filter for bridging code
-D: Bug fixes
+D: NUMA support, Slab allocators, Page migration
+D: Scalability, Time subsystem
N: Paul Laufer
E: paul@laufernet.com
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index eb42bf9847c..ec7c498b69f 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -704,14 +704,23 @@ X!Idrivers/video/console/fonts.c
<chapter id="splice">
<title>splice API</title>
- <para>)
+ <para>
splice is a method for moving blocks of data around inside the
- kernel, without continually transferring it between the kernel
+ kernel, without continually transferring them between the kernel
and user space.
</para>
!Iinclude/linux/splice.h
!Ffs/splice.c
</chapter>
+ <chapter id="pipes">
+ <title>pipes API</title>
+ <para>
+ Pipe interfaces are all for in-kernel (builtin image) use.
+ They are not exported for use by modules.
+ </para>
+!Iinclude/linux/pipe_fs_i.h
+!Ffs/pipe.c
+ </chapter>
</book>
diff --git a/Documentation/block/barrier.txt b/Documentation/block/barrier.txt
index 7d279f2f5bb..2c2f24f634e 100644
--- a/Documentation/block/barrier.txt
+++ b/Documentation/block/barrier.txt
@@ -79,9 +79,9 @@ and how to prepare flush requests. Note that the term 'ordered' is
used to indicate the whole sequence of performing barrier requests
including draining and flushing.
-typedef void (prepare_flush_fn)(request_queue_t *q, struct request *rq);
+typedef void (prepare_flush_fn)(struct request_queue *q, struct request *rq);
-int blk_queue_ordered(request_queue_t *q, unsigned ordered,
+int blk_queue_ordered(struct request_queue *q, unsigned ordered,
prepare_flush_fn *prepare_flush_fn);
@q : the queue in question
@@ -92,7 +92,7 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered,
For example, SCSI disk driver's prepare_flush_fn looks like the
following.
-static void sd_prepare_flush(request_queue_t *q, struct request *rq)
+static void sd_prepare_flush(struct request_queue *q, struct request *rq)
{
memset(rq->cmd, 0, sizeof(rq->cmd));
rq->cmd_type = REQ_TYPE_BLOCK_PC;
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 3adaace328a..8af392fc6ef 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -740,12 +740,12 @@ Block now offers some simple generic functionality to help support command
queueing (typically known as tagged command queueing), ie manage more than
one outstanding command on a queue at any given time.
- blk_queue_init_tags(request_queue_t *q, int depth)
+ blk_queue_init_tags(struct request_queue *q, int depth)
Initialize internal command tagging structures for a maximum
depth of 'depth'.
- blk_queue_free_tags((request_queue_t *q)
+ blk_queue_free_tags((struct request_queue *q)
Teardown tag info associated with the queue. This will be done
automatically by block if blk_queue_cleanup() is called on a queue
@@ -754,7 +754,7 @@ one outstanding command on a queue at any given time.
The above are initialization and exit management, the main helpers during
normal operations are:
- blk_queue_start_tag(request_queue_t *q, struct request *rq)
+ blk_queue_start_tag(struct request_queue *q, struct request *rq)
Start tagged operation for this request. A free tag number between
0 and 'depth' is assigned to the request (rq->tag holds this number),
@@ -762,7 +762,7 @@ normal operations are:
for this queue is already achieved (or if the tag wasn't started for
some other reason), 1 is returned. Otherwise 0 is returned.
- blk_queue_end_tag(request_queue_t *q, struct request *rq)
+ blk_queue_end_tag(struct request_queue *q, struct request *rq)
End tagged operation on this request. 'rq' is removed from the internal
book keeping structures.
@@ -781,7 +781,7 @@ queue. For instance, on IDE any tagged request error needs to clear both
the hardware and software block queue and enable the driver to sanely restart
all the outstanding requests. There's a third helper to do that:
- blk_queue_invalidate_tags(request_queue_t *q)
+ blk_queue_invalidate_tags(struct request_queue *q)
Clear the internal block tag queue and re-add all the pending requests
to the request queue. The driver will receive them again on the
diff --git a/Documentation/block/request.txt b/Documentation/block/request.txt
index 75924e2a697..fff58acb40a 100644
--- a/Documentation/block/request.txt
+++ b/Documentation/block/request.txt
@@ -83,6 +83,6 @@ struct bio *bio DBI First bio in request
struct bio *biotail DBI Last bio in request
-request_queue_t *q DB Request queue this request belongs to
+struct request_queue *q DB Request queue this request belongs to
struct request_list *rl B Request list this request came from
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index c175eedadb5..a43d2878a4e 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -211,22 +211,6 @@ Who: Richard Purdie <rpurdie@rpsys.net>
---------------------------
-What: read_dev_chars(), read_conf_data{,_lpm}() (s390 common I/O layer)
-When: December 2007
-Why: These functions are a leftover from 2.4 times. They have several
- problems:
- - Duplication of checks that are done in the device driver's
- interrupt handler
- - common I/O layer can't do device specific error recovery
- - device driver can't be notified for conditions happening during
- execution of the function
- Device drivers should issue the read device characteristics and read
- configuration data ccws and do the appropriate error handling
- themselves.
-Who: Cornelia Huck <cornelia.huck@de.ibm.com>
-
----------------------------
-
What: i2c-ixp2000, i2c-ixp4xx and scx200_i2c drivers
When: September 2007
Why: Obsolete. The new i2c-gpio driver replaces all hardware-specific
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index 218a8650f48..6bc2ba215df 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -148,7 +148,7 @@ pin ... that won't always match the specified output value, because of
issues including wire-OR and output latencies.
The get/set calls have no error returns because "invalid GPIO" should have
-been reported earlier in gpio_set_direction(). However, note that not all
+been reported earlier from gpio_direction_*(). However, note that not all
platforms can read the value of output pins; those that can't should always
return zero. Also, using these calls for GPIOs that can't safely be accessed
without sleeping (see below) is an error.
@@ -239,7 +239,7 @@ map between them using calls like:
Those return either the corresponding number in the other namespace, or
else a negative errno code if the mapping can't be done. (For example,
some GPIOs can't used as IRQs.) It is an unchecked error to use a GPIO
-number that hasn't been marked as an input using gpio_set_direction(), or
+number that wasn't set up as an input using gpio_direction_input(), or
to use an IRQ number that didn't originally come from gpio_to_irq().
These two mapping calls are expected to cost on the order of a single
diff --git a/Documentation/iostats.txt b/Documentation/iostats.txt
index 09a1bafe252..b963c3b4afa 100644
--- a/Documentation/iostats.txt
+++ b/Documentation/iostats.txt
@@ -79,7 +79,7 @@ Field 8 -- # of milliseconds spent writing
measured from __make_request() to end_that_request_last()).
Field 9 -- # of I/Os currently in progress
The only field that should go to zero. Incremented as requests are
- given to appropriate request_queue_t and decremented as they finish.
+ given to appropriate struct request_queue and decremented as they finish.
Field 10 -- # of milliseconds spent doing I/Os
This field is increases so long as field 9 is nonzero.
Field 11 -- weighted # of milliseconds spent doing I/Os
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index fb80e9ffea6..1156653338f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -30,6 +30,7 @@ the beginning of each description states the restrictions within which a
parameter is applicable:
ACPI ACPI support is enabled.
+ AGP AGP (Accelerated Graphics Port) is enabled.
ALSA ALSA sound support is enabled.
APIC APIC support is enabled.
APM Advanced Power Management support is enabled.
@@ -227,6 +228,12 @@ and is between 256 and 4096 characters. It is defined in the file
to assume that this machine's pmtimer latches its value
and always returns good values.
+ agp= [AGP]
+ { off | try_unsupported }
+ off: disable AGP support
+ try_unsupported: try to drive unsupported chipsets
+ (may crash computer or cause data corruption)
+
enable_timer_pin_1 [i386,x86-64]
Enable PIN 1 of APIC timer
Can be useful to work around chipset bugs
diff --git a/Documentation/lguest/Makefile b/Documentation/lguest/Makefile
index b9b9427376e..31e794ef5f9 100644
--- a/Documentation/lguest/Makefile
+++ b/Documentation/lguest/Makefile
@@ -11,8 +11,7 @@ endif
include $(KBUILD_OUTPUT)/.config
LGUEST_GUEST_TOP := ($(CONFIG_PAGE_OFFSET) - 0x08000000)
-CFLAGS:=-Wall -Wmissing-declarations -Wmissing-prototypes -O3 \
- -static -DLGUEST_GUEST_TOP="$(LGUEST_GUEST_TOP)" -Wl,-T,lguest.lds
+CFLAGS:=-Wall -Wmissing-declarations -Wmissing-prototypes -O3 -Wl,-T,lguest.lds
LDLIBS:=-lz
all: lguest.lds lguest
diff --git a/Documentation/lguest/extract b/Documentation/lguest/extract
new file mode 100644
index 00000000000..7730bb6e4b9
--- /dev/null
+++ b/Documentation/lguest/extract
@@ -0,0 +1,58 @@
+#! /bin/sh
+
+set -e
+
+PREFIX=$1
+shift
+
+trap 'rm -r $TMPDIR' 0
+TMPDIR=`mktemp -d`
+
+exec 3>/dev/null
+for f; do
+ while IFS="
+" read -r LINE; do
+ case "$LINE" in
+ *$PREFIX:[0-9]*:\**)
+ NUM=`echo "$LINE" | sed "s/.*$PREFIX:\([0-9]*\).*/\1/"`
+ if [ -f $TMPDIR/$NUM ]; then
+ echo "$TMPDIR/$NUM already exits prior to $f"
+ exit 1
+ fi
+ exec 3>>$TMPDIR/$NUM
+ echo $f | sed 's,\.\./,,g' > $TMPDIR/.$NUM
+ /bin/echo "$LINE" | sed -e "s/$PREFIX:[0-9]*//" -e "s/:\*/*/" >&3
+ ;;
+ *$PREFIX:[0-9]*)
+ NUM=`echo "$LINE" | sed "s/.*$PREFIX:\([0-9]*\).*/\1/"`
+ if [ -f $TMPDIR/$NUM ]; then
+ echo "$TMPDIR/$NUM already exits prior to $f"
+ exit 1
+ fi
+ exec 3>>$TMPDIR/$NUM
+ echo $f | sed 's,\.\./,,g' > $TMPDIR/.$NUM
+ /bin/echo "$LINE" | sed "s/$PREFIX:[0-9]*//" >&3
+ ;;
+ *:\**)
+ /bin/echo "$LINE" | sed -e "s/:\*/*/" -e "s,/\*\*/,," >&3
+ echo >&3
+ exec 3>/dev/null
+ ;;
+ *)
+ /bin/echo "$LINE" >&3
+ ;;
+ esac
+ done < $f
+ echo >&3
+ exec 3>/dev/null
+done
+
+LASTFILE=""
+for f in $TMPDIR/*; do
+ if [ "$LASTFILE" != $(cat $TMPDIR/.$(basename $f) ) ]; then
+ LASTFILE=$(cat $TMPDIR/.$(basename $f) )
+ echo "[ $LASTFILE ]"
+ fi
+ cat $f
+done
+
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index 1432b502a2d..f7918401a00 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -1,5 +1,10 @@
-/* Simple program to layout "physical" memory for new lguest guest.
- * Linked high to avoid likely physical memory. */
+/*P:100 This is the Launcher code, a simple program which lays out the
+ * "physical" memory for the new Guest by mapping the kernel image and the
+ * virtual devices, then reads repeatedly from /dev/lguest to run the Guest.
+ *
+ * The only trick: the Makefile links it at a high address so it will be clear
+ * of the guest memory region. It means that each Guest cannot have more than
+ * about 2.5G of memory on a normally configured Host. :*/
#define _LARGEFILE64_SOURCE
#define _GNU_SOURCE
#include <stdio.h>
@@ -29,12 +34,20 @@
#include <termios.h>
#include <getopt.h>
#include <zlib.h>
+/*L:110 We can ignore the 28 include files we need for this program, but I do
+ * want to draw attention to the use of kernel-style types.
+ *
+ * As Linus said, "C is a Spartan language, and so should your naming be." I
+ * like these abbreviations and the header we need uses them, so we define them
+ * here.
+ */
typedef unsigned long long u64;
typedef uint32_t u32;
typedef uint16_t u16;
typedef uint8_t u8;
#include "../../include/linux/lguest_launcher.h"
#include "../../include/asm-i386/e820.h"
+/*:*/
#define PAGE_PRESENT 0x7 /* Present, RW, Execute */
#define NET_PEERNUM 1
@@ -43,31 +56,52 @@ typedef uint8_t u8;
#define SIOCBRADDIF 0x89a2 /* add interface to bridge */
#endif
+/*L:120 verbose is both a global flag and a macro. The C preprocessor allows
+ * this, and although I wouldn't recommend it, it works quite nicely here. */
static bool verbose;
#define verbose(args...) \
do { if (verbose) printf(args); } while(0)
+/*:*/
+
+/* The pipe to send commands to the waker process */
static int waker_fd;
+/* The top of guest physical memory. */
+static u32 top;
+/* This is our list of devices. */
struct device_list
{
+ /* Summary information about the devices in our list: ready to pass to
+ * select() to ask which need servicing.*/
fd_set infds;
int max_infd;
+ /* The descriptor page for the devices. */
+ struct lguest_device_desc *descs;
+
+ /* A single linked list of devices. */
struct device *dev;
+ /* ... And an end pointer so we can easily append new devices */
struct device **lastdev;
};
+/* The device structure describes a single device. */
struct device
{
+ /* The linked-list pointer. */
struct device *next;
+ /* The descriptor for this device, as mapped into the Guest. */
struct lguest_device_desc *desc;
+ /* The memory page(s) of this device, if any. Also mapped in Guest. */
void *mem;
- /* Watch this fd if handle_input non-NULL. */
+ /* If handle_input is set, it wants to be called when this file
+ * descriptor is ready. */
int fd;
bool (*handle_input)(int fd, struct device *me);
- /* Watch DMA to this key if handle_input non-NULL. */
+ /* If handle_output is set, it wants to be called when the Guest sends
+ * DMA to this key. */
unsigned long watch_key;
u32 (*handle_output)(int fd, const struct iovec *iov,
unsigned int num, struct device *me);
@@ -76,6 +110,11 @@ struct device
void *priv;
};
+/*L:130
+ * Loading the Kernel.
+ *
+ * We start with couple of simple helper routines. open_or_die() avoids
+ * error-checking code cluttering the callers: */
static int open_or_die(const char *name, int flags)
{
int fd = open(name, flags);
@@ -84,26 +123,38 @@ static int open_or_die(const char *name, int flags)
return fd;
}
+/* map_zeroed_pages() takes a (page-aligned) address and a number of pages. */
static void *map_zeroed_pages(unsigned long addr, unsigned int num)
{
+ /* We cache the /dev/zero file-descriptor so we only open it once. */
static int fd = -1;
if (fd == -1)
fd = open_or_die("/dev/zero", O_RDONLY);
+ /* We use a private mapping (ie. if we write to the page, it will be
+ * copied), and obviously we insist that it be mapped where we ask. */
if (mmap((void *)addr, getpagesize() * num,
PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE, fd, 0)
!= (void *)addr)
err(1, "Mmaping %u pages of /dev/zero @%p", num, (void *)addr);
+
+ /* Returning the address is just a courtesy: can simplify callers. */
return (void *)addr;
}
-/* Find magic string marking entry point, return entry point. */
+/* To find out where to start we look for the magic Guest string, which marks
+ * the code we see in lguest_asm.S. This is a hack which we are currently
+ * plotting to replace with the normal Linux entry point. */
static unsigned long entry_point(void *start, void *end,
unsigned long page_offset)
{
void *p;
+ /* The scan gives us the physical starting address. We want the
+ * virtual address in this case, and fortunately, we already figured
+ * out the physical-virtual difference and passed it here in
+ * "page_offset". */
for (p = start; p < end; p++)
if (memcmp(p, "GenuineLguest", strlen("GenuineLguest")) == 0)
return (long)p + strlen("GenuineLguest") + page_offset;
@@ -111,7 +162,17 @@ static unsigned long entry_point(void *start, void *end,
err(1, "Is this image a genuine lguest?");
}
-/* Returns the entry point */
+/* This routine takes an open vmlinux image, which is in ELF, and maps it into
+ * the Guest memory. ELF = Embedded Linking Format, which is the format used
+ * by all modern binaries on Linux including the kernel.
+ *
+ * The ELF headers give *two* addresses: a physical address, and a virtual
+ * address. The Guest kernel expects to be placed in memory at the physical
+ * address, and the page tables set up so it will correspond to that virtual
+ * address. We return the difference between the virtual and physical
+ * addresses in the "page_offset" pointer.
+ *
+ * We return the starting address. */
static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr,
unsigned long *page_offset)
{
@@ -120,40 +181,61 @@ static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr,
unsigned int i;
unsigned long start = -1UL, end = 0;
- /* Sanity checks. */
+ /* Sanity checks on the main ELF header: an x86 executable with a
+ * reasonable number of correctly-sized program headers. */
if (ehdr->e_type != ET_EXEC
|| ehdr->e_machine != EM_386
|| ehdr->e_phentsize != sizeof(Elf32_Phdr)
|| ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr))
errx(1, "Malformed elf header");
+ /* An ELF executable contains an ELF header and a number of "program"
+ * headers which indicate which parts ("segments") of the program to
+ * load where. */
+
+ /* We read in all the program headers at once: */
if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0)
err(1, "Seeking to program headers");
if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr))
err(1, "Reading program headers");
+ /* We don't know page_offset yet. */
*page_offset = 0;
- /* We map the loadable segments at virtual addresses corresponding
- * to their physical addresses (our virtual == guest physical). */
+
+ /* Try all the headers: there are usually only three. A read-only one,
+ * a read-write one, and a "note" section which isn't loadable. */
for (i = 0; i < ehdr->e_phnum; i++) {
+ /* If this isn't a loadable segment, we ignore it */
if (phdr[i].p_type != PT_LOAD)
continue;
verbose("Section %i: size %i addr %p\n",
i, phdr[i].p_memsz, (void *)phdr[i].p_paddr);
- /* We expect linear address space. */
+ /* We expect a simple linear address space: every segment must
+ * have the same difference between virtual (p_vaddr) and
+ * physical (p_paddr) address. */
if (!*page_offset)
*page_offset = phdr[i].p_vaddr - phdr[i].p_paddr;
else if (*page_offset != phdr[i].p_vaddr - phdr[i].p_paddr)
errx(1, "Page offset of section %i different", i);
+ /* We track the first and last address we mapped, so we can
+ * tell entry_point() where to scan. */
if (phdr[i].p_paddr < start)
start = phdr[i].p_paddr;
if (phdr[i].p_paddr + phdr[i].p_filesz > end)
end = phdr[i].p_paddr + phdr[i].p_filesz;
- /* We map everything private, writable. */
+ /* We map this section of the file at its physical address. We
+ * map it read & write even if the header says this segment is
+ * read-only. The kernel really wants to be writable: it
+ * patches its own instructions which would normally be
+ * read-only.
+ *
+ * MAP_PRIVATE means that the page won't be copied until a
+ * write is done to it. This allows us to share much of the
+ * kernel memory between Guests. */
addr = mmap((void *)phdr[i].p_paddr,
phdr[i].p_filesz,
PROT_READ|PROT_WRITE|PROT_EXEC,
@@ -167,7 +249,31 @@ static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr,
return entry_point((void *)start, (void *)end, *page_offset);
}
-/* This is amazingly reliable. */
+/*L:170 Prepare to be SHOCKED and AMAZED. And possibly a trifle nauseated.
+ *
+ * We know that CONFIG_PAGE_OFFSET sets what virtual address the kernel expects
+ * to be. We don't know what that option was, but we can figure it out
+ * approximately by looking at the addresses in the code. I chose the common
+ * case of reading a memory location into the %eax register:
+ *
+ * movl <some-address>, %eax
+ *
+ * This gets encoded as five bytes: "0xA1 <4-byte-address>". For example,
+ * "0xA1 0x18 0x60 0x47 0xC0" reads the address 0xC0476018 into %eax.
+ *
+ * In this example can guess that the kernel was compiled with
+ * CONFIG_PAGE_OFFSET set to 0xC0000000 (it's always a round number). If the
+ * kernel were larger than 16MB, we might see 0xC1 addresses show up, but our
+ * kernel isn't that bloated yet.
+ *
+ * Unfortunately, x86 has variable-length instructions, so finding this
+ * particular instruction properly involves writing a disassembler. Instead,
+ * we rely on statistics. We look for "0xA1" and tally the different bytes
+ * which occur 4 bytes later (the "0xC0" in our example above). When one of
+ * those bytes appears three times, we can be reasonably confident that it
+ * forms the start of CONFIG_PAGE_OFFSET.
+ *
+ * This is amazingly reliable. */
static unsigned long intuit_page_offset(unsigned char *img, unsigned long len)
{
unsigned int i, possibilities[256] = { 0 };
@@ -180,30 +286,52 @@ static unsigned long intuit_page_offset(unsigned char *img, unsigned long len)
errx(1, "could not determine page offset");
}
+/*L:160 Unfortunately the entire ELF image isn't compressed: the segments
+ * which need loading are extracted and compressed raw. This denies us the
+ * information we need to make a fully-general loader. */
static unsigned long unpack_bzimage(int fd, unsigned long *page_offset)
{
gzFile f;
int ret, len = 0;
+ /* A bzImage always gets loaded at physical address 1M. This is
+ * actually configurable as CONFIG_PHYSICAL_START, but as the comment
+ * there says, "Don't change this unless you know what you are doing".
+ * Indeed. */
void *img = (void *)0x100000;
+ /* gzdopen takes our file descriptor (carefully placed at the start of
+ * the GZIP header we found) and returns a gzFile. */
f = gzdopen(fd, "rb");
+ /* We read it into memory in 64k chunks until we hit the end. */
while ((ret = gzread(f, img + len, 65536)) > 0)
len += ret;
if (ret < 0)
err(1, "reading image from bzImage");
verbose("Unpacked size %i addr %p\n", len, img);
+
+ /* Without the ELF header, we can't tell virtual-physical gap. This is
+ * CONFIG_PAGE_OFFSET, and people do actually change it. Fortunately,
+ * I have a clever way of figuring it out from the code itself. */
*page_offset = intuit_page_offset(img, len);
return entry_point(img, img + len, *page_offset);
}
+/*L:150 A bzImage, unlike an ELF file, is not meant to be loaded. You're
+ * supposed to jump into it and it will unpack itself. We can't do that
+ * because the Guest can't run the unpacking code, and adding features to
+ * lguest kills puppies, so we don't want to.
+ *
+ * The bzImage is formed by putting the decompressing code in front of the
+ * compressed kernel code. So we can simple scan through it looking for the
+ * first "gzip" header, and start decompressing from there. */
static unsigned long load_bzimage(int fd, unsigned long *page_offset)
{
unsigned char c;
int state = 0;
- /* Ugly brute force search for gzip header. */
+ /* GZIP header is 0x1F 0x8B <method> <flags>... <compressed-by>. */
while (read(fd, &c, 1) == 1) {
switch (state) {
case 0:
@@ -220,8 +348,10 @@ static unsigned long load_bzimage(int fd, unsigned long *page_offset)
state++;
break;
case 9:
+ /* Seek back to the start of the gzip header. */
lseek(fd, -10, SEEK_CUR);
- if (c != 0x03) /* Compressed under UNIX. */
+ /* One final check: "compressed under UNIX". */
+ if (c != 0x03)
state = -1;
else
return unpack_bzimage(fd, page_offset);
@@ -230,25 +360,43 @@ static unsigned long load_bzimage(int fd, unsigned long *page_offset)
errx(1, "Could not find kernel in bzImage");
}
+/*L:140 Loading the kernel is easy when it's a "vmlinux", but most kernels
+ * come wrapped up in the self-decompressing "bzImage" format. With some funky
+ * coding, we can load those, too. */
static unsigned long load_kernel(int fd, unsigned long *page_offset)
{
Elf32_Ehdr hdr;
+ /* Read in the first few bytes. */
if (read(fd, &hdr, sizeof(hdr)) != sizeof(hdr))
err(1, "Reading kernel");
+ /* If it's an ELF file, it starts with "\177ELF" */
if (memcmp(hdr.e_ident, ELFMAG, SELFMAG) == 0)
return map_elf(fd, &hdr, page_offset);
+ /* Otherwise we assume it's a bzImage, and try to unpack it */
return load_bzimage(fd, page_offset);
}
+/* This is a trivial little helper to align pages. Andi Kleen hated it because
+ * it calls getpagesize() twice: "it's dumb code."
+ *
+ * Kernel guys get really het up about optimization, even when it's not
+ * necessary. I leave this code as a reaction against that. */
static inline unsigned long page_align(unsigned long addr)
{
+ /* Add upwards and truncate downwards. */
return ((addr + getpagesize()-1) & ~(getpagesize()-1));
}
-/* initrd gets loaded at top of memory: return length. */
+/*L:180 An "initial ram disk" is a disk image loaded into memory along with
+ * the kernel which the kernel can use to boot from without needing any
+ * drivers. Most distributions now use this as standard: the initrd contains
+ * the code to load the appropriate driver modules for the current machine.
+ *
+ * Importantly, James Morris works for RedHat, and Fedora uses initrds for its
+ * kernels. He sent me this (and tells me when I break it). */
static unsigned long load_initrd(const char *name, unsigned long mem)
{
int ifd;
@@ -257,21 +405,35 @@ static unsigned long load_initrd(const char *name, unsigned long mem)
void *iaddr;
ifd = open_or_die(name, O_RDONLY);
+ /* fstat() is needed to get the file size. */
if (fstat(ifd, &st) < 0)
err(1, "fstat() on initrd '%s'", name);
+ /* The length needs to be rounded up to a page size: mmap needs the
+ * address to be page aligned. */
len = page_align(st.st_size);
+ /* We map the initrd at the top of memory. */
iaddr = mmap((void *)mem - len, st.st_size,
PROT_READ|PROT_EXEC|PROT_WRITE,
MAP_FIXED|MAP_PRIVATE, ifd, 0);
if (iaddr != (void *)mem - len)
err(1, "Mmaping initrd '%s' returned %p not %p",
name, iaddr, (void *)mem - len);
+ /* Once a file is mapped, you can close the file descriptor. It's a
+ * little odd, but quite useful. */
close(ifd);
verbose("mapped initrd %s size=%lu @ %p\n", name, st.st_size, iaddr);
+
+ /* We return the initrd size. */
return len;
}
+/* Once we know how much memory we have, and the address the Guest kernel
+ * expects, we can construct simple linear page tables which will get the Guest
+ * far enough into the boot to create its own.
+ *
+ * We lay them out of the way, just below the initrd (which is why we need to
+ * know its size). */
static unsigned long setup_pagetables(unsigned long mem,
unsigned long initrd_size,
unsigned long page_offset)
@@ -280,23 +442,32 @@ static unsigned long setup_pagetables(unsigned long mem,
unsigned int mapped_pages, i, linear_pages;
unsigned int ptes_per_page = getpagesize()/sizeof(u32);
- /* If we can map all of memory above page_offset, we do so. */
+ /* Ideally we map all physical memory starting at page_offset.
+ * However, if page_offset is 0xC0000000 we can only map 1G of physical
+ * (0xC0000000 + 1G overflows). */
if (mem <= -page_offset)
mapped_pages = mem/getpagesize();
else
mapped_pages = -page_offset/getpagesize();
- /* Each linear PTE page can map ptes_per_page pages. */
+ /* Each PTE page can map ptes_per_page pages: how many do we need? */
linear_pages = (mapped_pages + ptes_per_page-1)/ptes_per_page;
- /* We lay out top-level then linear mapping immediately below initrd */
+ /* We put the toplevel page directory page at the top of memory. */
pgdir = (void *)mem - initrd_size - getpagesize();
+
+ /* Now we use the next linear_pages pages as pte pages */
linear = (void *)pgdir - linear_pages*getpagesize();
+ /* Linear mapping is easy: put every page's address into the mapping in
+ * order. PAGE_PRESENT contains the flags Present, Writable and
+ * Executable. */
for (i = 0; i < mapped_pages; i++)
linear[i] = ((i * getpagesize()) | PAGE_PRESENT);
- /* Now set up pgd so that this memory is at page_offset */
+ /* The top level points to the linear page table pages above. The
+ * entry representing page_offset points to the first one, and they
+ * continue from there. */
for (i = 0; i < mapped_pages; i += ptes_per_page) {
pgdir[(i + page_offset/getpagesize())/ptes_per_page]
= (((u32)linear + i*sizeof(u32)) | PAGE_PRESENT);
@@ -305,9 +476,13 @@ static unsigned long setup_pagetables(unsigned long mem,
verbose("Linear mapping of %u pages in %u pte pages at %p\n",
mapped_pages, linear_pages, linear);
+ /* We return the top level (guest-physical) address: the kernel needs
+ * to know where it is. */
return (unsigned long)pgdir;
}
+/* Simple routine to roll all the commandline arguments together with spaces
+ * between them. */
static void concat(char *dst, char *args[])
{
unsigned int i, len = 0;
@@ -321,18 +496,24 @@ static void concat(char *dst, char *args[])
dst[len] = '\0';
}
+/* This is where we actually tell the kernel to initialize the Guest. We saw
+ * the arguments it expects when we looked at initialize() in lguest_user.c:
+ * the top physical page to allow, the top level pagetable, the entry point and
+ * the page_offset constant for the Guest. */
static int tell_kernel(u32 pgdir, u32 start, u32 page_offset)
{
u32 args[] = { LHREQ_INITIALIZE,
- LGUEST_GUEST_TOP/getpagesize(), /* Just below us */
- pgdir, start, page_offset };
+ top/getpagesize(), pgdir, start, page_offset };
int fd;
fd = open_or_die("/dev/lguest", O_RDWR);
if (write(fd, args, sizeof(args)) < 0)
err(1, "Writing to /dev/lguest");
+
+ /* We return the /dev/lguest file descriptor to control this Guest */
return fd;
}
+/*:*/
static void set_fd(int fd, struct device_list *devices)
{
@@ -341,61 +522,108 @@ static void set_fd(int fd, struct device_list *devices)
devices->max_infd = fd;
}
-/* When input arrives, we tell the kernel to kick lguest out with -EAGAIN. */
+/*L:200
+ * The Waker.
+ *
+ * With a console and network devices, we can have lots of input which we need
+ * to process. We could try to tell the kernel what file descriptors to watch,
+ * but handing a file descriptor mask through to the kernel is fairly icky.
+ *
+ * Instead, we fork off a process which watches the file descriptors and writes
+ * the LHREQ_BREAK command to the /dev/lguest filedescriptor to tell the Host
+ * loop to stop running the Guest. This causes it to return from the
+ * /dev/lguest read with -EAGAIN, where it will write to /dev/lguest to reset
+ * the LHREQ_BREAK and wake us up again.
+ *
+ * This, of course, is merely a different *kind* of icky.
+ */
static void wake_parent(int pipefd, int lguest_fd, struct device_list *devices)
{
+ /* Add the pipe from the Launcher to the fdset in the device_list, so
+ * we watch it, too. */
set_fd(pipefd, devices);
for (;;) {
fd_set rfds = devices->infds;
u32 args[] = { LHREQ_BREAK, 1 };
+ /* Wait until input is ready from one of the devices. */
select(devices->max_infd+1, &rfds, NULL, NULL, NULL);
+ /* Is it a message from the Launcher? */
if (FD_ISSET(pipefd, &rfds)) {
int ignorefd;
+ /* If read() returns 0, it means the Launcher has
+ * exited. We silently follow. */
if (read(pipefd, &ignorefd, sizeof(ignorefd)) == 0)
exit(0);
+ /* Otherwise it's telling us there's a problem with one
+ * of the devices, and we should ignore that file
+ * descriptor from now on. */
FD_CLR(ignorefd, &devices->infds);
- } else
+ } else /* Send LHREQ_BREAK command. */
write(lguest_fd, args, sizeof(args));
}
}
+/* This routine just sets up a pipe to the Waker process. */
static int setup_waker(int lguest_fd, struct device_list *device_list)
{
int pipefd[2], child;
+ /* We create a pipe to talk to the waker, and also so it knows when the
+ * Launcher dies (and closes pipe). */
pipe(pipefd);
child = fork();
if (child == -1)
err(1, "forking");
if (child == 0) {
+ /* Close the "writing" end of our copy of the pipe */
close(pipefd[1]);
wake_parent(pipefd[0], lguest_fd, device_list);
}
+ /* Close the reading end of our copy of the pipe. */
close(pipefd[0]);
+ /* Here is the fd used to talk to the waker. */
return pipefd[1];
}
+/*L:210
+ * Device Handling.
+ *
+ * When the Guest sends DMA to us, it sends us an array of addresses and sizes.
+ * We need to make sure it's not trying to reach into the Launcher itself, so
+ * we have a convenient routine which check it and exits with an error message
+ * if something funny is going on:
+ */
static void *_check_pointer(unsigned long addr, unsigned int size,
unsigned int line)
{
- if (addr >= LGUEST_GUEST_TOP || addr + size >= LGUEST_GUEST_TOP)
+ /* We have to separately check addr and addr+size, because size could
+ * be huge and addr + size might wrap around. */
+ if (addr >= top || addr + size >= top)
errx(1, "%s:%i: Invalid address %li", __FILE__, line, addr);
+ /* We return a pointer for the caller's convenience, now we know it's
+ * safe to use. */
return (void *)addr;
}
+/* A macro which transparently hands the line number to the real function. */
#define check_pointer(addr,size) _check_pointer(addr, size, __LINE__)
-/* Returns pointer to dma->used_len */
+/* The Guest has given us the address of a "struct lguest_dma". We check it's
+ * OK and convert it to an iovec (which is a simple array of ptr/size
+ * pairs). */
static u32 *dma2iov(unsigned long dma, struct iovec iov[], unsigned *num)
{
unsigned int i;
struct lguest_dma *udma;
+ /* First we make sure that the array memory itself is valid. */
udma = check_pointer(dma, sizeof(*udma));
+ /* Now we check each element */
for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) {
+ /* A zero length ends the array. */
if (!udma->len[i])
break;
@@ -403,9 +631,15 @@ static u32 *dma2iov(unsigned long dma, struct iovec iov[], unsigned *num)
iov[i].iov_len = udma->len[i];
}
*num = i;
+
+ /* We return the pointer to where the caller should write the amount of
+ * the buffer used. */
return &udma->used_len;
}
+/* This routine gets a DMA buffer from the Guest for a given key, and converts
+ * it to an iovec array. It returns the interrupt the Guest wants when we're
+ * finished, and a pointer to the "used_len" field to fill in. */
static u32 *get_dma_buffer(int fd, void *key,
struct iovec iov[], unsigned int *num, u32 *irq)
{
@@ -413,16 +647,21 @@ static u32 *get_dma_buffer(int fd, void *key,
unsigned long udma;
u32 *res;
+ /* Ask the kernel for a DMA buffer corresponding to this key. */
udma = write(fd, buf, sizeof(buf));
+ /* They haven't registered any, or they're all used? */
if (udma == (unsigned long)-1)
return NULL;
- /* Kernel stashes irq in ->used_len. */
+ /* Convert it into our iovec array */
res = dma2iov(udma, iov, num);
+ /* The kernel stashes irq in ->used_len to get it out to us. */
*irq = *res;
+ /* Return a pointer to ((struct lguest_dma *)udma)->used_len. */
return res;
}
+/* This is a convenient routine to send the Guest an interrupt. */
static void trigger_irq(int fd, u32 irq)
{
u32 buf[] = { LHREQ_IRQ, irq };
@@ -430,6 +669,10 @@ static void trigger_irq(int fd, u32 irq)
err(1, "Triggering irq %i", irq);
}
+/* This simply sets up an iovec array where we can put data to be discarded.
+ * This happens when the Guest doesn't want or can't handle the input: we have
+ * to get rid of it somewhere, and if we bury it in the ceiling space it will
+ * start to smell after a week. */
static void discard_iovec(struct iovec *iov, unsigned int *num)
{
static char discard_buf[1024];
@@ -438,19 +681,24 @@ static void discard_iovec(struct iovec *iov, unsigned int *num)
iov->iov_len = sizeof(discard_buf);
}
+/* Here is the input terminal setting we save, and the routine to restore them
+ * on exit so the user can see what they type next. */
static struct termios orig_term;
static void restore_term(void)
{
tcsetattr(STDIN_FILENO, TCSANOW, &orig_term);
}
+/* We associate some data with the console for our exit hack. */
struct console_abort
{
+ /* How many times have they hit ^C? */
int count;
+ /* When did they start? */
struct timeval start;
};
-/* We DMA input to buffer bound at start of console page. */
+/* This is the routine which handles console input (ie. stdin). */
static bool handle_console_input(int fd, struct device *dev)
{
u32 irq = 0, *lenp;
@@ -459,24 +707,38 @@ static bool handle_console_input(int fd, struct device *dev)
struct iovec iov[LGUEST_MAX_DMA_SECTIONS];
struct console_abort *abort = dev->priv;
+ /* First we get the console buffer from the Guest. The key is dev->mem
+ * which was set to 0 in setup_console(). */
lenp = get_dma_buffer(fd, dev->mem, iov, &num, &irq);
if (!lenp) {
+ /* If it's not ready for input, warn and set up to discard. */
warn("console: no dma buffer!");
discard_iovec(iov, &num);
}
+ /* This is why we convert to iovecs: the readv() call uses them, and so
+ * it reads straight into the Guest's buffer. */
len = readv(dev->fd, iov, num);
if (len <= 0) {
+ /* This implies that the console is closed, is /dev/null, or
+ * something went terribly wrong. We still go through the rest
+ * of the logic, though, especially the exit handling below. */
warnx("Failed to get console input, ignoring console.");
len = 0;
}
+ /* If we read the data into the Guest, fill in the length and send the
+ * interrupt. */
if (lenp) {
*lenp = len;
trigger_irq(fd, irq);
}
- /* Three ^C within one second? Exit. */
+ /* Three ^C within one second? Exit.
+ *
+ * This is such a hack, but works surprisingly well. Each ^C has to be
+ * in a buffer by itself, so they can't be too fast. But we check that
+ * we get three within about a second, so they can't be too slow. */
if (len == 1 && ((char *)iov[0].iov_base)[0] == 3) {
if (!abort->count++)
gettimeofday(&abort->start, NULL);
@@ -484,43 +746,60 @@ static bool handle_console_input(int fd, struct device *dev)
struct timeval now;
gettimeofday(&now, NULL);
if (now.tv_sec <= abort->start.tv_sec+1) {
- /* Make sure waker is not blocked in BREAK */
u32 args[] = { LHREQ_BREAK, 0 };
+ /* Close the fd so Waker will know it has to
+ * exit. */
close(waker_fd);
+ /* Just in case waker is blocked in BREAK, send
+ * unbreak now. */
write(fd, args, sizeof(args));
exit(2);
}
abort->count = 0;
}
} else
+ /* Any other key resets the abort counter. */
abort->count = 0;
+ /* Now, if we didn't read anything, put the input terminal back and
+ * return failure (meaning, don't call us again). */
if (!len) {
restore_term();
return false;
}
+ /* Everything went OK! */
return true;
}
+/* Handling console output is much simpler than input. */
static u32 handle_console_output(int fd, const struct iovec *iov,
unsigned num, struct device*dev)
{
+ /* Whatever the Guest sends, write it to standard output. Return the
+ * number of bytes written. */
return writev(STDOUT_FILENO, iov, num);
}
+/* Guest->Host network output is also pretty easy. */
static u32 handle_tun_output(int fd, const struct iovec *iov,
unsigned num, struct device *dev)
{
- /* Now we've seen output, we should warn if we can't get buffers. */
+ /* We put a flag in the "priv" pointer of the network device, and set
+ * it as soon as we see output. We'll see why in handle_tun_input() */
*(bool *)dev->priv = true;
+ /* Whatever packet the Guest sent us, write it out to the tun
+ * device. */
return writev(dev->fd, iov, num);
}
+/* This matches the peer_key() in lguest_net.c. The key for any given slot
+ * is the address of the network device's page plus 4 * the slot number. */
static unsigned long peer_offset(unsigned int peernum)
{
return 4 * peernum;
}
+/* This is where we handle a packet coming in from the tun device */
static bool handle_tun_input(int fd, struct device *dev)
{
u32 irq = 0, *lenp;
@@ -528,17 +807,28 @@ static bool handle_tun_input(int fd, struct device *dev)
unsigned num;
struct iovec iov[LGUEST_MAX_DMA_SECTIONS];
+ /* First we get a buffer the Guest has bound to its key. */
lenp = get_dma_buffer(fd, dev->mem+peer_offset(NET_PEERNUM), iov, &num,
&irq);
if (!lenp) {
+ /* Now, it's expected that if we try to send a packet too
+ * early, the Guest won't be ready yet. This is why we set a
+ * flag when the Guest sends its first packet. If it's sent a
+ * packet we assume it should be ready to receive them.
+ *
+ * Actually, this is what the status bits in the descriptor are
+ * for: we should *use* them. FIXME! */
if (*(bool *)dev->priv)
warn("network: no dma buffer!");
discard_iovec(iov, &num);
}
+ /* Read the packet from the device directly into the Guest's buffer. */
len = readv(dev->fd, iov, num);
if (len <= 0)
err(1, "reading network");
+
+ /* Write the used_len, and trigger the interrupt for the Guest */
if (lenp) {
*lenp = len;
trigger_irq(fd, irq);
@@ -546,9 +836,13 @@ static bool handle_tun_input(int fd, struct device *dev)
verbose("tun input packet len %i [%02x %02x] (%s)\n", len,
((u8 *)iov[0].iov_base)[0], ((u8 *)iov[0].iov_base)[1],
lenp ? "sent" : "discarded");
+ /* All good. */
return true;
}
+/* The last device handling routine is block output: the Guest has sent a DMA
+ * to the block device. It will have placed the command it wants in the
+ * "struct lguest_block_page". */
static u32 handle_block_output(int fd, const struct iovec *iov,
unsigned num, struct device *dev)
{
@@ -558,36 +852,64 @@ static u32 handle_block_output(int fd, const struct iovec *iov,
struct iovec reply[LGUEST_MAX_DMA_SECTIONS];
off64_t device_len, off = (off64_t)p->sector * 512;
+ /* First we extract the device length from the dev->priv pointer. */
device_len = *(off64_t *)dev->priv;
+ /* We first check that the read or write is within the length of the
+ * block file. */
if (off >= device_len)
err(1, "Bad offset %llu vs %llu", off, device_len);
+ /* Move to the right location in the block file. This shouldn't fail,
+ * but best to check. */
if (lseek64(dev->fd, off, SEEK_SET) != off)
err(1, "Bad seek to sector %i", p->sector);
verbose("Block: %s at offset %llu\n", p->type ? "WRITE" : "READ", off);
+ /* They were supposed to bind a reply buffer at key equal to the start
+ * of the block device memory. We need this to tell them when the
+ * request is finished. */
lenp = get_dma_buffer(fd, dev->mem, reply, &reply_num, &irq);
if (!lenp)
err(1, "Block request didn't give us a dma buffer");
if (p->type) {
+ /* A write request. The DMA they sent contained the data, so
+ * write it out. */
len = writev(dev->fd, iov, num);
+ /* Grr... Now we know how long the "struct lguest_dma" they
+ * sent was, we make sure they didn't try to write over the end
+ * of the block file (possibly extending it). */
if (off + len > device_len) {
+ /* Trim it back to the correct length */
ftruncate(dev->fd, device_len);
+ /* Die, bad Guest, die. */
errx(1, "Write past end %llu+%u", off, len);
}
+ /* The reply length is 0: we just send back an empty DMA to
+ * interrupt them and tell them the write is finished. */
*lenp = 0;
} else {
+ /* A read request. They sent an empty DMA to start the
+ * request, and we put the read contents into the reply
+ * buffer. */
len = readv(dev->fd, reply, reply_num);
*lenp = len;
}
+ /* The result is 1 (done), 2 if there was an error (short read or
+ * write). */
p->result = 1 + (p->bytes != len);
+ /* Now tell them we've used their reply buffer. */
trigger_irq(fd, irq);
+
+ /* We're supposed to return the number of bytes of the output buffer we
+ * used. But the block device uses the "result" field instead, so we
+ * don't bother. */
return 0;
}
+/* This is the generic routine we call when the Guest sends some DMA out. */
static void handle_output(int fd, unsigned long dma, unsigned long key,
struct device_list *devices)
{
@@ -596,30 +918,53 @@ static void handle_output(int fd, unsigned long dma, unsigned long key,
struct iovec iov[LGUEST_MAX_DMA_SECTIONS];
unsigned num = 0;
+ /* Convert the "struct lguest_dma" they're sending to a "struct
+ * iovec". */
lenp = dma2iov(dma, iov, &num);
+
+ /* Check each device: if they expect output to this key, tell them to
+ * handle it. */
for (i = devices->dev; i; i = i->next) {
if (i->handle_output && key == i->watch_key) {
+ /* We write the result straight into the used_len field
+ * for them. */
*lenp = i->handle_output(fd, iov, num, i);
return;
}
}
+
+ /* This can happen: the kernel sends any SEND_DMA which doesn't match
+ * another Guest to us. It could be that another Guest just left a
+ * network, for example. But it's unusual. */
warnx("Pending dma %p, key %p", (void *)dma, (void *)key);
}
+/* This is called when the waker wakes us up: check for incoming file
+ * descriptors. */
static void handle_input(int fd, struct device_list *devices)
{
+ /* select() wants a zeroed timeval to mean "don't wait". */
struct timeval poll = { .tv_sec = 0, .tv_usec = 0 };
for (;;) {
struct device *i;
fd_set fds = devices->infds;
+ /* If nothing is ready, we're done. */
if (select(devices->max_infd+1, &fds, NULL, NULL, &poll) == 0)
break;
+ /* Otherwise, call the device(s) which have readable
+ * file descriptors and a method of handling them. */
for (i = devices->dev; i; i = i->next) {
if (i->handle_input && FD_ISSET(i->fd, &fds)) {
+ /* If handle_input() returns false, it means we
+ * should no longer service it.
+ * handle_console_input() does this. */
if (!i->handle_input(fd, i)) {
+ /* Clear it from the set of input file
+ * descriptors kept at the head of the
+ * device list. */
FD_CLR(i->fd, &devices->infds);
/* Tell waker to ignore it too... */
write(waker_fd, &i->fd, sizeof(i->fd));
@@ -629,26 +974,42 @@ static void handle_input(int fd, struct device_list *devices)
}
}
-static struct lguest_device_desc *new_dev_desc(u16 type, u16 features,
- u16 num_pages)
+/*L:190
+ * Device Setup
+ *
+ * All devices need a descriptor so the Guest knows it exists, and a "struct
+ * device" so the Launcher can keep track of it. We have common helper
+ * routines to allocate them.
+ *
+ * This routine allocates a new "struct lguest_device_desc" from descriptor
+ * table in the devices array just above the Guest's normal memory. */
+static struct lguest_device_desc *
+new_dev_desc(struct lguest_device_desc *descs,
+ u16 type, u16 features, u16 num_pages)
{
- static unsigned long top = LGUEST_GUEST_TOP;
- struct lguest_device_desc *desc;
+ unsigned int i;
- desc = malloc(sizeof(*desc));
- desc->type = type;
- desc->num_pages = num_pages;
- desc->features = features;
- desc->status = 0;
- if (num_pages) {
- top -= num_pages*getpagesize();
- map_zeroed_pages(top, num_pages);
- desc->pfn = top / getpagesize();
- } else
- desc->pfn = 0;
- return desc;
+ for (i = 0; i < LGUEST_MAX_DEVICES; i++) {
+ if (!descs[i].type) {
+ descs[i].type = type;
+ descs[i].features = features;
+ descs[i].num_pages = num_pages;
+ /* If they said the device needs memory, we allocate
+ * that now, bumping up the top of Guest memory. */
+ if (num_pages) {
+ map_zeroed_pages(top, num_pages);
+ descs[i].pfn = top/getpagesize();
+ top += num_pages*getpagesize();
+ }
+ return &descs[i];
+ }
+ }
+ errx(1, "too many devices");
}
+/* This monster routine does all the creation and setup of a new device,
+ * including caling new_dev_desc() to allocate the descriptor and device
+ * memory. */
static struct device *new_device(struct device_list *devices,
u16 type, u16 num_pages, u16 features,
int fd,
@@ -661,15 +1022,21 @@ static struct device *new_device(struct device_list *devices,
{
struct device *dev = malloc(sizeof(*dev));
- /* Append to device list. */
+ /* Append to device list. Prepending to a single-linked list is
+ * easier, but the user expects the devices to be arranged on the bus
+ * in command-line order. The first network device on the command line
+ * is eth0, the first block device /dev/lgba, etc. */
*devices->lastdev = dev;
dev->next = NULL;
devices->lastdev = &dev->next;
+ /* Now we populate the fields one at a time. */
dev->fd = fd;
+ /* If we have an input handler for this file descriptor, then we add it
+ * to the device_list's fdset and maxfd. */
if (handle_input)
set_fd(dev->fd, devices);
- dev->desc = new_dev_desc(type, features, num_pages);
+ dev->desc = new_dev_desc(devices->descs, type, features, num_pages);
dev->mem = (void *)(dev->desc->pfn * getpagesize());
dev->handle_input = handle_input;
dev->watch_key = (unsigned long)dev->mem + watch_off;
@@ -677,27 +1044,37 @@ static struct device *new_device(struct device_list *devices,
return dev;
}
+/* Our first setup routine is the console. It's a fairly simple device, but
+ * UNIX tty handling makes it uglier than it could be. */
static void setup_console(struct device_list *devices)
{
struct device *dev;
+ /* If we can save the initial standard input settings... */
if (tcgetattr(STDIN_FILENO, &orig_term) == 0) {
struct termios term = orig_term;
+ /* Then we turn off echo, line buffering and ^C etc. We want a
+ * raw input stream to the Guest. */
term.c_lflag &= ~(ISIG|ICANON|ECHO);
tcsetattr(STDIN_FILENO, TCSANOW, &term);
+ /* If we exit gracefully, the original settings will be
+ * restored so the user can see what they're typing. */
atexit(restore_term);
}
- /* We don't currently require a page for the console. */
+ /* We don't currently require any memory for the console, so we ask for
+ * 0 pages. */
dev = new_device(devices, LGUEST_DEVICE_T_CONSOLE, 0, 0,
STDIN_FILENO, handle_console_input,
LGUEST_CONSOLE_DMA_KEY, handle_console_output);
+ /* We store the console state in dev->priv, and initialize it. */
dev->priv = malloc(sizeof(struct console_abort));
((struct console_abort *)dev->priv)->count = 0;
verbose("device %p: console\n",
(void *)(dev->desc->pfn * getpagesize()));
}
+/* Setting up a block file is also fairly straightforward. */
static void setup_block_file(const char *filename, struct device_list *devices)
{
int fd;
@@ -705,20 +1082,47 @@ static void setup_block_file(const char *filename, struct device_list *devices)
off64_t *device_len;
struct lguest_block_page *p;
+ /* We open with O_LARGEFILE because otherwise we get stuck at 2G. We
+ * open with O_DIRECT because otherwise our benchmarks go much too
+ * fast. */
fd = open_or_die(filename, O_RDWR|O_LARGEFILE|O_DIRECT);
+
+ /* We want one page, and have no input handler (the block file never
+ * has anything interesting to say to us). Our timing will be quite
+ * random, so it should be a reasonable randomness source. */
dev = new_device(devices, LGUEST_DEVICE_T_BLOCK, 1,
LGUEST_DEVICE_F_RANDOMNESS,
fd, NULL, 0, handle_block_output);
+
+ /* We store the device size in the private area */
device_len = dev->priv = malloc(sizeof(*device_len));
+ /* This is the safe way of establishing the size of our device: it
+ * might be a normal file or an actual block device like /dev/hdb. */
*device_len = lseek64(fd, 0, SEEK_END);
- p = dev->mem;
+ /* The device memory is a "struct lguest_block_page". It's zeroed
+ * already, we just need to put in the device size. Block devices
+ * think in sectors (ie. 512 byte chunks), so we translate here. */
+ p = dev->mem;
p->num_sectors = *device_len/512;
verbose("device %p: block %i sectors\n",
(void *)(dev->desc->pfn * getpagesize()), p->num_sectors);
}
-/* We use fnctl locks to reserve network slots (autocleanup!) */
+/*
+ * Network Devices.
+ *
+ * Setting up network devices is quite a pain, because we have three types.
+ * First, we have the inter-Guest network. This is a file which is mapped into
+ * the address space of the Guests who are on the network. Because it is a
+ * shared mapping, the same page underlies all the devices, and they can send
+ * DMA to each other.
+ *
+ * Remember from our network driver, the Guest is told what slot in the page it
+ * is to use. We use exclusive fnctl locks to reserve a slot. If another
+ * Guest is using a slot, the lock will fail and we try another. Because fnctl
+ * locks are cleaned up automatically when we die, this cleverly means that our
+ * reservation on the slot will vanish if we crash. */
static unsigned int find_slot(int netfd, const char *filename)
{
struct flock fl;
@@ -726,26 +1130,33 @@ static unsigned int find_slot(int netfd, const char *filename)
fl.l_type = F_WRLCK;
fl.l_whence = SEEK_SET;
fl.l_len = 1;
+ /* Try a 1 byte lock in each possible position number */
for (fl.l_start = 0;
fl.l_start < getpagesize()/sizeof(struct lguest_net);
fl.l_start++) {
+ /* If we succeed, return the slot number. */
if (fcntl(netfd, F_SETLK, &fl) == 0)
return fl.l_start;
}
errx(1, "No free slots in network file %s", filename);
}
+/* This function sets up the network file */
static void setup_net_file(const char *filename,
struct device_list *devices)
{
int netfd;
struct device *dev;
+ /* We don't use open_or_die() here: for friendliness we create the file
+ * if it doesn't already exist. */
netfd = open(filename, O_RDWR, 0);
if (netfd < 0) {
if (errno == ENOENT) {
netfd = open(filename, O_RDWR|O_CREAT, 0600);
if (netfd >= 0) {
+ /* If we succeeded, initialize the file with a
+ * blank page. */
char page[getpagesize()];
memset(page, 0, sizeof(page));
write(netfd, page, sizeof(page));
@@ -755,11 +1166,15 @@ static void setup_net_file(const char *filename,
err(1, "cannot open net file '%s'", filename);
}
+ /* We need 1 page, and the features indicate the slot to use and that
+ * no checksum is needed. We never touch this device again; it's
+ * between the Guests on the network, so we don't register input or
+ * output handlers. */
dev = new_device(devices, LGUEST_DEVICE_T_NET, 1,
find_slot(netfd, filename)|LGUEST_NET_F_NOCSUM,
-1, NULL, 0, NULL);
- /* We overwrite the /dev/zero mapping with the actual file. */
+ /* Map the shared file. */
if (mmap(dev->mem, getpagesize(), PROT_READ|PROT_WRITE,
MAP_FIXED|MAP_SHARED, netfd, 0) != dev->mem)
err(1, "could not mmap '%s'", filename);
@@ -767,6 +1182,7 @@ static void setup_net_file(const char *filename,
(void *)(dev->desc->pfn * getpagesize()), filename,
dev->desc->features & ~LGUEST_NET_F_NOCSUM);
}
+/*:*/
static u32 str2ip(const char *ipaddr)
{
@@ -776,7 +1192,11 @@ static u32 str2ip(const char *ipaddr)
return (byte[0] << 24) | (byte[1] << 16) | (byte[2] << 8) | byte[3];
}
-/* adapted from libbridge */
+/* This code is "adapted" from libbridge: it attaches the Host end of the
+ * network device to the bridge device specified by the command line.
+ *
+ * This is yet another James Morris contribution (I'm an IP-level guy, so I
+ * dislike bridging), and I just try not to break it. */
static void add_to_bridge(int fd, const char *if_name, const char *br_name)
{
int ifidx;
@@ -795,12 +1215,16 @@ static void add_to_bridge(int fd, const char *if_name, const char *br_name)
err(1, "can't add %s to bridge %s", if_name, br_name);
}
+/* This sets up the Host end of the network device with an IP address, brings
+ * it up so packets will flow, the copies the MAC address into the hwaddr
+ * pointer (in practice, the Host's slot in the network device's memory). */
static void configure_device(int fd, const char *devname, u32 ipaddr,
unsigned char hwaddr[6])
{
struct ifreq ifr;
struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
+ /* Don't read these incantations. Just cut & paste them like I did! */
memset(&ifr, 0, sizeof(ifr));
strcpy(ifr.ifr_name, devname);
sin->sin_family = AF_INET;
@@ -811,12 +1235,19 @@ static void configure_device(int fd, const char *devname, u32 ipaddr,
if (ioctl(fd, SIOCSIFFLAGS, &ifr) != 0)
err(1, "Bringing interface %s up", devname);
+ /* SIOC stands for Socket I/O Control. G means Get (vs S for Set
+ * above). IF means Interface, and HWADDR is hardware address.
+ * Simple! */
if (ioctl(fd, SIOCGIFHWADDR, &ifr) != 0)
err(1, "getting hw address for %s", devname);
-
memcpy(hwaddr, ifr.ifr_hwaddr.sa_data, 6);
}
+/*L:195 The other kind of network is a Host<->Guest network. This can either
+ * use briding or routing, but the principle is the same: it uses the "tun"
+ * device to inject packets into the Host as if they came in from a normal
+ * network card. We just shunt packets between the Guest and the tun
+ * device. */
static void setup_tun_net(const char *arg, struct device_list *devices)
{
struct device *dev;
@@ -825,36 +1256,56 @@ static void setup_tun_net(const char *arg, struct device_list *devices)
u32 ip;
const char *br_name = NULL;
+ /* We open the /dev/net/tun device and tell it we want a tap device. A
+ * tap device is like a tun device, only somehow different. To tell
+ * the truth, I completely blundered my way through this code, but it
+ * works now! */
netfd = open_or_die("/dev/net/tun", O_RDWR);
memset(&ifr, 0, sizeof(ifr));
ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
strcpy(ifr.ifr_name, "tap%d");
if (ioctl(netfd, TUNSETIFF, &ifr) != 0)
err(1, "configuring /dev/net/tun");
+ /* We don't need checksums calculated for packets coming in this
+ * device: trust us! */
ioctl(netfd, TUNSETNOCSUM, 1);
- /* You will be peer 1: we should create enough jitter to randomize */
+ /* We create the net device with 1 page, using the features field of
+ * the descriptor to tell the Guest it is in slot 1 (NET_PEERNUM), and
+ * that the device has fairly random timing. We do *not* specify
+ * LGUEST_NET_F_NOCSUM: these packets can reach the real world.
+ *
+ * We will put our MAC address is slot 0 for the Guest to see, so
+ * it will send packets to us using the key "peer_offset(0)": */
dev = new_device(devices, LGUEST_DEVICE_T_NET, 1,
NET_PEERNUM|LGUEST_DEVICE_F_RANDOMNESS, netfd,
handle_tun_input, peer_offset(0), handle_tun_output);
+
+ /* We keep a flag which says whether we've seen packets come out from
+ * this network device. */
dev->priv = malloc(sizeof(bool));
*(bool *)dev->priv = false;
+ /* We need a socket to perform the magic network ioctls to bring up the
+ * tap interface, connect to the bridge etc. Any socket will do! */
ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
if (ipfd < 0)
err(1, "opening IP socket");
+ /* If the command line was --tunnet=bridge:<name> do bridging. */
if (!strncmp(BRIDGE_PFX, arg, strlen(BRIDGE_PFX))) {
ip = INADDR_ANY;
br_name = arg + strlen(BRIDGE_PFX);
add_to_bridge(ipfd, ifr.ifr_name, br_name);
- } else
+ } else /* It is an IP address to set up the device with */
ip = str2ip(arg);
- /* We are peer 0, ie. first slot. */
+ /* We are peer 0, ie. first slot, so we hand dev->mem to this routine
+ * to write the MAC address at the start of the device memory. */
configure_device(ipfd, ifr.ifr_name, ip, dev->mem);
- /* Set "promisc" bit: we want every single packet. */
+ /* Set "promisc" bit: we want every single packet if we're going to
+ * bridge to other machines (and otherwise it doesn't matter). */
*((u8 *)dev->mem) |= 0x1;
close(ipfd);
@@ -865,31 +1316,10 @@ static void setup_tun_net(const char *arg, struct device_list *devices)
if (br_name)
verbose("attached to bridge: %s\n", br_name);
}
+/* That's the end of device setup. */
-/* Now we know how much memory we have, we copy in device descriptors */
-static void map_device_descriptors(struct device_list *devs, unsigned long mem)
-{
- struct device *i;
- unsigned int num;
- struct lguest_device_desc *descs;
-
- /* Device descriptor array sits just above top of normal memory */
- descs = map_zeroed_pages(mem, 1);
-
- for (i = devs->dev, num = 0; i; i = i->next, num++) {
- if (num == LGUEST_MAX_DEVICES)
- errx(1, "too many devices");
- verbose("Device %i: %s\n", num,
- i->desc->type == LGUEST_DEVICE_T_NET ? "net"
- : i->desc->type == LGUEST_DEVICE_T_CONSOLE ? "console"
- : i->desc->type == LGUEST_DEVICE_T_BLOCK ? "block"
- : "unknown");
- descs[num] = *i->desc;
- free(i->desc);
- i->desc = &descs[num];
- }
-}
-
+/*L:220 Finally we reach the core of the Launcher, which runs the Guest, serves
+ * its input and output, and finally, lays it to rest. */
static void __attribute__((noreturn))
run_guest(int lguest_fd, struct device_list *device_list)
{
@@ -901,20 +1331,37 @@ run_guest(int lguest_fd, struct device_list *device_list)
/* We read from the /dev/lguest device to run the Guest. */
readval = read(lguest_fd, arr, sizeof(arr));
+ /* The read can only really return sizeof(arr) (the Guest did a
+ * SEND_DMA to us), or an error. */
+
+ /* For a successful read, arr[0] is the address of the "struct
+ * lguest_dma", and arr[1] is the key the Guest sent to. */
if (readval == sizeof(arr)) {
handle_output(lguest_fd, arr[0], arr[1], device_list);
continue;
+ /* ENOENT means the Guest died. Reading tells us why. */
} else if (errno == ENOENT) {
char reason[1024] = { 0 };
read(lguest_fd, reason, sizeof(reason)-1);
errx(1, "%s", reason);
+ /* EAGAIN means the waker wanted us to look at some input.
+ * Anything else means a bug or incompatible change. */
} else if (errno != EAGAIN)
err(1, "Running guest failed");
+
+ /* Service input, then unset the BREAK which releases
+ * the Waker. */
handle_input(lguest_fd, device_list);
if (write(lguest_fd, args, sizeof(args)) < 0)
err(1, "Resetting break");
}
}
+/*
+ * This is the end of the Launcher.
+ *
+ * But wait! We've seen I/O from the Launcher, and we've seen I/O from the
+ * Drivers. If we were to see the Host kernel I/O code, our understanding
+ * would be complete... :*/
static struct option opts[] = {
{ "verbose", 0, NULL, 'v' },
@@ -932,19 +1379,59 @@ static void usage(void)
"<mem-in-mb> vmlinux [args...]");
}
+/*L:100 The Launcher code itself takes us out into userspace, that scary place
+ * where pointers run wild and free! Unfortunately, like most userspace
+ * programs, it's quite boring (which is why everyone like to hack on the
+ * kernel!). Perhaps if you make up an Lguest Drinking Game at this point, it
+ * will get you through this section. Or, maybe not.
+ *
+ * The Launcher binary sits up high, usually starting at address 0xB8000000.
+ * Everything below this is the "physical" memory for the Guest. For example,
+ * if the Guest were to write a "1" at physical address 0, we would see a "1"
+ * in the Launcher at "(int *)0". Guest physical == Launcher virtual.
+ *
+ * This can be tough to get your head around, but usually it just means that we
+ * don't need to do any conversion when the Guest gives us it's "physical"
+ * addresses.
+ */
int main(int argc, char *argv[])
{
- unsigned long mem, pgdir, start, page_offset, initrd_size = 0;
- int c, lguest_fd;
+ /* Memory, top-level pagetable, code startpoint, PAGE_OFFSET and size
+ * of the (optional) initrd. */
+ unsigned long mem = 0, pgdir, start, page_offset, initrd_size = 0;
+ /* A temporary and the /dev/lguest file descriptor. */
+ int i, c, lguest_fd;
+ /* The list of Guest devices, based on command line arguments. */
struct device_list device_list;
+ /* The boot information for the Guest: at guest-physical address 0. */
void *boot = (void *)0;
+ /* If they specify an initrd file to load. */
const char *initrd_name = NULL;
+ /* First we initialize the device list. Since console and network
+ * device receive input from a file descriptor, we keep an fdset
+ * (infds) and the maximum fd number (max_infd) with the head of the
+ * list. We also keep a pointer to the last device, for easy appending
+ * to the list. */
device_list.max_infd = -1;
device_list.dev = NULL;
device_list.lastdev = &device_list.dev;
FD_ZERO(&device_list.infds);
+ /* We need to know how much memory so we can set up the device
+ * descriptor and memory pages for the devices as we parse the command
+ * line. So we quickly look through the arguments to find the amount
+ * of memory now. */
+ for (i = 1; i < argc; i++) {
+ if (argv[i][0] != '-') {
+ mem = top = atoi(argv[i]) * 1024 * 1024;
+ device_list.descs = map_zeroed_pages(top, 1);
+ top += getpagesize();
+ break;
+ }
+ }
+
+ /* The options are fairly straight-forward */
while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) {
switch (c) {
case 'v':
@@ -967,46 +1454,71 @@ int main(int argc, char *argv[])
usage();
}
}
+ /* After the other arguments we expect memory and kernel image name,
+ * followed by command line arguments for the kernel. */
if (optind + 2 > argc)
usage();
- /* We need a console device */
+ /* We always have a console device */
setup_console(&device_list);
- /* First we map /dev/zero over all of guest-physical memory. */
- mem = atoi(argv[optind]) * 1024 * 1024;
+ /* We start by mapping anonymous pages over all of guest-physical
+ * memory range. This fills it with 0, and ensures that the Guest
+ * won't be killed when it tries to access it. */
map_zeroed_pages(0, mem / getpagesize());
/* Now we load the kernel */
start = load_kernel(open_or_die(argv[optind+1], O_RDONLY),
&page_offset);
- /* Write the device descriptors into memory. */
- map_device_descriptors(&device_list, mem);
-
- /* Map the initrd image if requested */
+ /* Map the initrd image if requested (at top of physical memory) */
if (initrd_name) {
initrd_size = load_initrd(initrd_name, mem);
+ /* These are the location in the Linux boot header where the
+ * start and size of the initrd are expected to be found. */
*(unsigned long *)(boot+0x218) = mem - initrd_size;
*(unsigned long *)(boot+0x21c) = initrd_size;
+ /* The bootloader type 0xFF means "unknown"; that's OK. */
*(unsigned char *)(boot+0x210) = 0xFF;
}
- /* Set up the initial linar pagetables. */
+ /* Set up the initial linear pagetables, starting below the initrd. */
pgdir = setup_pagetables(mem, initrd_size, page_offset);
- /* E820 memory map: ours is a simple, single region. */
+ /* The Linux boot header contains an "E820" memory map: ours is a
+ * simple, single region. */
*(char*)(boot+E820NR) = 1;
*((struct e820entry *)(boot+E820MAP))
= ((struct e820entry) { 0, mem, E820_RAM });
- /* Command line pointer and command line (at 4096) */
+ /* The boot header contains a command line pointer: we put the command
+ * line after the boot header (at address 4096) */
*(void **)(boot + 0x228) = boot + 4096;
concat(boot + 4096, argv+optind+2);
- /* Paravirt type: 1 == lguest */
+
+ /* The guest type value of "1" tells the Guest it's under lguest. */
*(int *)(boot + 0x23c) = 1;
+ /* We tell the kernel to initialize the Guest: this returns the open
+ * /dev/lguest file descriptor. */
lguest_fd = tell_kernel(pgdir, start, page_offset);
+
+ /* We fork off a child process, which wakes the Launcher whenever one
+ * of the input file descriptors needs attention. Otherwise we would
+ * run the Guest until it tries to output something. */
waker_fd = setup_waker(lguest_fd, &device_list);
+ /* Finally, run the Guest. This doesn't return. */
run_guest(lguest_fd, &device_list);
}
+/*:*/
+
+/*M:999
+ * Mastery is done: you now know everything I do.
+ *
+ * But surely you have seen code, features and bugs in your wanderings which
+ * you now yearn to attack? That is the real game, and I look forward to you
+ * patching and forking lguest into the Your-Name-Here-visor.
+ *
+ * Farewell, and good coding!
+ * Rusty Russell.
+ */
diff --git a/Documentation/sched-stats.txt b/Documentation/sched-stats.txt
index 6f72021aae5..442e14d35de 100644
--- a/Documentation/sched-stats.txt
+++ b/Documentation/sched-stats.txt
@@ -1,10 +1,11 @@
-Version 10 of schedstats includes support for sched_domains, which
-hit the mainline kernel in 2.6.7. Some counters make more sense to be
-per-runqueue; other to be per-domain. Note that domains (and their associated
-information) will only be pertinent and available on machines utilizing
-CONFIG_SMP.
-
-In version 10 of schedstat, there is at least one level of domain
+Version 14 of schedstats includes support for sched_domains, which hit the
+mainline kernel in 2.6.20 although it is identical to the stats from version
+12 which was in the kernel from 2.6.13-2.6.19 (version 13 never saw a kernel
+release). Some counters make more sense to be per-runqueue; other to be
+per-domain. Note that domains (and their associated information) will only
+be pertinent and available on machines utilizing CONFIG_SMP.
+
+In version 14 of schedstat, there is at least one level of domain
statistics for each cpu listed, and there may well be more than one
domain. Domains have no particular names in this implementation, but
the highest numbered one typically arbitrates balancing across all the
@@ -27,7 +28,7 @@ to write their own scripts, the fields are described here.
CPU statistics
--------------
-cpu<N> 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
+cpu<N> 1 2 3 4 5 6 7 8 9 10 11 12
NOTE: In the sched_yield() statistics, the active queue is considered empty
if it has only one process in it, since obviously the process calling
@@ -39,48 +40,20 @@ First four fields are sched_yield() statistics:
3) # of times just the expired queue was empty
4) # of times sched_yield() was called
-Next four are schedule() statistics:
- 5) # of times the active queue had at least one other process on it
- 6) # of times we switched to the expired queue and reused it
- 7) # of times schedule() was called
- 8) # of times schedule() left the processor idle
-
-Next four are active_load_balance() statistics:
- 9) # of times active_load_balance() was called
- 10) # of times active_load_balance() caused this cpu to gain a task
- 11) # of times active_load_balance() caused this cpu to lose a task
- 12) # of times active_load_balance() tried to move a task and failed
-
-Next three are try_to_wake_up() statistics:
- 13) # of times try_to_wake_up() was called
- 14) # of times try_to_wake_up() successfully moved the awakening task
- 15) # of times try_to_wake_up() attempted to move the awakening task
-
-Next two are wake_up_new_task() statistics:
- 16) # of times wake_up_new_task() was called
- 17) # of times wake_up_new_task() successfully moved the new task
-
-Next one is a sched_migrate_task() statistic:
- 18) # of times sched_migrate_task() was called
+Next three are schedule() statistics:
+ 5) # of times we switched to the expired queue and reused it
+ 6) # of times schedule() was called
+ 7) # of times schedule() left the processor idle
-Next one is a sched_balance_exec() statistic:
- 19) # of times sched_balance_exec() was called
+Next two are try_to_wake_up() statistics:
+ 8) # of times try_to_wake_up() was called
+ 9) # of times try_to_wake_up() was called to wake up the local cpu
Next three are statistics describing scheduling latency:
- 20) sum of all time spent running by tasks on this processor (in ms)
- 21) sum of all time spent waiting to run by tasks on this processor (in ms)
- 22) # of tasks (not necessarily unique) given to the processor
-
-The last six are statistics dealing with pull_task():
- 23) # of times pull_task() moved a task to this cpu when newly idle
- 24) # of times pull_task() stole a task from this cpu when another cpu
- was newly idle
- 25) # of times pull_task() moved a task to this cpu when idle
- 26) # of times pull_task() stole a task from this cpu when another cpu
- was idle
- 27) # of times pull_task() moved a task to this cpu when busy
- 28) # of times pull_task() stole a task from this cpu when another cpu
- was busy
+ 10) sum of all time spent running by tasks on this processor (in jiffies)
+ 11) sum of all time spent waiting to run by tasks on this processor (in
+ jiffies)
+ 12) # of timeslices run on this cpu
Domain statistics
@@ -89,65 +62,95 @@ One of these is produced per domain for each cpu described. (Note that if
CONFIG_SMP is not defined, *no* domains are utilized and these lines
will not appear in the output.)
-domain<N> 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
+domain<N> <cpumask> 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
The first field is a bit mask indicating what cpus this domain operates over.
-The next fifteen are a variety of load_balance() statistics:
-
- 1) # of times in this domain load_balance() was called when the cpu
- was idle
- 2) # of times in this domain load_balance() was called when the cpu
- was busy
- 3) # of times in this domain load_balance() was called when the cpu
- was just becoming idle
- 4) # of times in this domain load_balance() tried to move one or more
- tasks and failed, when the cpu was idle
- 5) # of times in this domain load_balance() tried to move one or more
- tasks and failed, when the cpu was busy
- 6) # of times in this domain load_balance() tried to move one or more
- tasks and failed, when the cpu was just becoming idle
- 7) sum of imbalances discovered (if any) with each call to
- load_balance() in this domain when the cpu was idle
- 8) sum of imbalances discovered (if any) with each call to
- load_balance() in this domain when the cpu was busy
- 9) sum of imbalances discovered (if any) with each call to
- load_balance() in this domain when the cpu was just becoming idle
- 10) # of times in this domain load_balance() was called but did not find
- a busier queue while the cpu was idle
- 11) # of times in this domain load_balance() was called but did not find
- a busier queue while the cpu was busy
- 12) # of times in this domain load_balance() was called but did not find
- a busier queue while the cpu was just becoming idle
- 13) # of times in this domain a busier queue was found while the cpu was
- idle but no busier group was found
- 14) # of times in this domain a busier queue was found while the cpu was
- busy but no busier group was found
- 15) # of times in this domain a busier queue was found while the cpu was
- just becoming idle but no busier group was found
-
-Next two are sched_balance_exec() statistics:
- 17) # of times in this domain sched_balance_exec() successfully pushed
- a task to a new cpu
- 18) # of times in this domain sched_balance_exec() tried but failed to
- push a task to a new cpu
-
-Next two are try_to_wake_up() statistics:
- 19) # of times in this domain try_to_wake_up() tried to move a task based
- on affinity and cache warmth
- 20) # of times in this domain try_to_wake_up() tried to move a task based
- on load balancing
-
+The next 24 are a variety of load_balance() statistics in grouped into types
+of idleness (idle, busy, and newly idle):
+
+ 1) # of times in this domain load_balance() was called when the
+ cpu was idle
+ 2) # of times in this domain load_balance() checked but found
+ the load did not require balancing when the cpu was idle
+ 3) # of times in this domain load_balance() tried to move one or
+ more tasks and failed, when the cpu was idle
+ 4) sum of imbalances discovered (if any) with each call to
+ load_balance() in this domain when the cpu was idle
+ 5) # of times in this domain pull_task() was called when the cpu
+ was idle
+ 6) # of times in this domain pull_task() was called even though
+ the target task was cache-hot when idle
+ 7) # of times in this domain load_balance() was called but did
+ not find a busier queue while the cpu was idle
+ 8) # of times in this domain a busier queue was found while the
+ cpu was idle but no busier group was found
+
+ 9) # of times in this domain load_balance() was called when the
+ cpu was busy
+ 10) # of times in this domain load_balance() checked but found the
+ load did not require balancing when busy
+ 11) # of times in this domain load_balance() tried to move one or
+ more tasks and failed, when the cpu was busy
+ 12) sum of imbalances discovered (if any) with each call to
+ load_balance() in this domain when the cpu was busy
+ 13) # of times in this domain pull_task() was called when busy
+ 14) # of times in this domain pull_task() was called even though the
+ target task was cache-hot when busy
+ 15) # of times in this domain load_balance() was called but did not
+ find a busier queue while the cpu was busy
+ 16) # of times in this domain a busier queue was found while the cpu
+ was busy but no busier group was found
+
+ 17) # of times in this domain load_balance() was called when the
+ cpu was just becoming idle
+ 18) # of times in this domain load_balance() checked but found the
+ load did not require balancing when the cpu was just becoming idle
+ 19) # of times in this domain load_balance() tried to move one or more
+ tasks and failed, when the cpu was just becoming idle
+ 20) sum of imbalances discovered (if any) with each call to
+ load_balance() in this domain when the cpu was just becoming idle
+ 21) # of times in this domain pull_task() was called when newly idle
+ 22) # of times in this domain pull_task() was called even though the
+ target task was cache-hot when just becoming idle
+ 23) # of times in this domain load_balance() was called but did not
+ find a busier queue while the cpu was just becoming idle
+ 24) # of times in this domain a busier queue was found while the cpu
+ was just becoming idle but no busier group was found
+
+ Next three are active_load_balance() statistics:
+ 25) # of times active_load_balance() was called
+ 26) # of times active_load_balance() tried to move a task and failed
+ 27) # of times active_load_balance() successfully moved a task
+
+ Next three are sched_balance_exec() statistics:
+ 28) sbe_cnt is not used
+ 29) sbe_balanced is not used
+ 30) sbe_pushed is not used
+
+ Next three are sched_balance_fork() statistics:
+ 31) sbf_cnt is not used
+ 32) sbf_balanced is not used
+ 33) sbf_pushed is not used
+
+ Next three are try_to_wake_up() statistics:
+ 34) # of times in this domain try_to_wake_up() awoke a task that
+ last ran on a different cpu in this domain
+ 35) # of times in this domain try_to_wake_up() moved a task to the
+ waking cpu because it was cache-cold on its own cpu anyway
+ 36) # of times in this domain try_to_wake_up() started passive balancing
/proc/<pid>/schedstat
----------------
schedstats also adds a new /proc/<pid/schedstat file to include some of
the same information on a per-process level. There are three fields in
-this file correlating to fields 20, 21, and 22 in the CPU fields, but
-they only apply for that process.
+this file correlating for that process to:
+ 1) time spent on the cpu
+ 2) time spent waiting on a runqueue
+ 3) # of timeslices run on this cpu
A program could be easily written to make use of these extra fields to
report on how well a particular process or set of processes is faring
under the scheduler's policies. A simple version of such a program is
available at
- http://eaglet.rain.com/rick/linux/schedstat/v10/latency.c
+ http://eaglet.rain.com/rick/linux/schedstat/v12/latency.c
diff --git a/MAINTAINERS b/MAINTAINERS
index 01f222e5187..babd00b0c65 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3339,6 +3339,14 @@ M: thomas@winischhofer.net
W: http://www.winischhofer.at/linuxsisusbvga.shtml
S: Maintained
+SLAB ALLOCATOR
+P: Christoph Lameter
+M: clameter@sgi.com
+P: Pekka Enberg
+M: penberg@cs.helsinki.fi
+L: linux-mm@kvack.org
+S: Maintained
+
SMC91x ETHERNET DRIVER
P: Nicolas Pitre
M: nico@cam.org
diff --git a/Makefile b/Makefile
index 23f81c9f698..dfe3d1610a7 100644
--- a/Makefile
+++ b/Makefile
@@ -299,7 +299,7 @@ CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ -Wbitwise $(C
MODFLAGS = -DMODULE
CFLAGS_MODULE = $(MODFLAGS)
AFLAGS_MODULE = $(MODFLAGS)
-LDFLAGS_MODULE = -r
+LDFLAGS_MODULE =
CFLAGS_KERNEL =
AFLAGS_KERNEL =
diff --git a/arch/alpha/kernel/head.S b/arch/alpha/kernel/head.S
index e27d23c74ba..7ac1f1372c3 100644
--- a/arch/alpha/kernel/head.S
+++ b/arch/alpha/kernel/head.S
@@ -10,6 +10,7 @@
#include <asm/system.h>
#include <asm/asm-offsets.h>
+.section .text.head, "ax"
.globl swapper_pg_dir
.globl _stext
swapper_pg_dir=SWAPPER_PGD
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index ab642a4f08d..9dc1cee4326 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -195,7 +195,7 @@ pcibios_init(void)
subsys_initcall(pcibios_init);
-char * __init
+char * __devinit
pcibios_setup(char *str)
{
return str;
@@ -204,7 +204,7 @@ pcibios_setup(char *str)
#ifdef ALPHA_RESTORE_SRM_SETUP
static struct pdev_srm_saved_conf *srm_saved_configs;
-void __init
+void __devinit
pdev_save_srm_config(struct pci_dev *dev)
{
struct pdev_srm_saved_conf *tmp;
@@ -247,14 +247,14 @@ pci_restore_srm_config(void)
}
#endif
-void __init
+void __devinit
pcibios_fixup_resource(struct resource *res, struct resource *root)
{
res->start += root->start;
res->end += root->start;
}
-void __init
+void __devinit
pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus)
{
/* Update device resources. */
@@ -273,7 +273,7 @@ pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus)
}
}
-void __init
+void __devinit
pcibios_fixup_bus(struct pci_bus *bus)
{
/* Propagate hose info into the subordinate devices. */
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 6b07f89a72c..e1c470752eb 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -58,7 +58,7 @@ size_for_memory(unsigned long max)
return max;
}
-struct pci_iommu_arena *
+struct pci_iommu_arena * __init
iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
unsigned long window_size, unsigned long align)
{
@@ -117,7 +117,7 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
return arena;
}
-struct pci_iommu_arena *
+struct pci_iommu_arena * __init
iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
unsigned long window_size, unsigned long align)
{
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index b28731437c3..ad176441be5 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/threads.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
@@ -358,7 +359,7 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
/*
* Bring one cpu online.
*/
-static int __devinit
+static int __cpuinit
smp_boot_one_cpu(int cpuid)
{
struct task_struct *idle;
@@ -487,7 +488,7 @@ smp_prepare_boot_cpu(void)
{
}
-int __devinit
+int __cpuinit
__cpu_up(unsigned int cpu)
{
smp_boot_one_cpu(cpu);
@@ -541,7 +542,7 @@ smp_percpu_timer_interrupt(struct pt_regs *regs)
set_irq_regs(old_regs);
}
-int __init
+int
setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index fe13daa5cb2..7af07d3ad5f 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -15,6 +15,7 @@ SECTIONS
_text = .; /* Text and read-only data */
.text : {
+ *(.text.head)
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 85016313bd1..c8569e862c6 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -341,6 +341,7 @@ config ARCH_PXA
select ARCH_MTD_XIP
select GENERIC_GPIO
select GENERIC_TIME
+ select GENERIC_CLOCKEVENTS
help
Support for Intel's PXA2XX processor line.
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 5be2e987b84..4de432ec903 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -23,6 +23,7 @@
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
+#include <linux/fs.h>
#include <asm/cpu.h>
#include <asm/elf.h>
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 1b76d87fa33..eafbb2b05eb 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -17,6 +17,7 @@
#include <linux/profile.h>
#include <linux/errno.h>
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/seq_file.h>
@@ -630,7 +631,7 @@ void smp_send_stop(void)
/*
* not supported here
*/
-int __init setup_profiling_timer(unsigned int multiplier)
+int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index f2114bcf09d..8ad47619c07 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -352,10 +352,8 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
asmlinkage void do_unexp_fiq (struct pt_regs *regs)
{
-#ifndef CONFIG_IGNORE_FIQ
printk("Hmm. Unexpected FIQ received, but trying to continue\n");
printk("You may have a hardware problem...\n");
-#endif
}
/*
diff --git a/arch/arm/mach-sa1100/jornada720.c b/arch/arm/mach-sa1100/jornada720.c
index 64067cd58d3..52ac37d1e23 100644
--- a/arch/arm/mach-sa1100/jornada720.c
+++ b/arch/arm/mach-sa1100/jornada720.c
@@ -3,6 +3,7 @@
*
* HP Jornada720 init code
*
+ * Copyright (C) 2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com>
* Copyright (C) 2006 Filip Zyzniewski <filip.zyzniewski@tefnet.pl>
* Copyright (C) 2005 Michael Gernoth <michael@gernoth.net>
*
@@ -220,14 +221,16 @@ static struct platform_device sa1111_device = {
.resource = sa1111_resources,
};
-static struct platform_device jornada720_mcu_device = {
- .name = "jornada720_mcu",
- .id = -1,
+static struct platform_device jornada_ssp_device = {
+ .name = "jornada_ssp",
+ .id = -1,
};
static struct platform_device *devices[] __initdata = {
&sa1111_device,
- &jornada720_mcu_device,
+#ifdef CONFIG_SA1100_JORNADA720_SSP
+ &jornada_ssp_device,
+#endif
&s1d13xxxfb_device,
};
@@ -236,19 +239,19 @@ static int __init jornada720_init(void)
int ret = -ENODEV;
if (machine_is_jornada720()) {
- GPDR |= GPIO_GPIO20;
- /* oscillator setup (line 116 of HP's doc) */
+ /* we want to use gpio20 as input to drive the clock of our uart 3 */
+ GPDR |= GPIO_GPIO20; /* Clear gpio20 pin as input */
TUCR = TUCR_VAL;
- /* resetting SA1111 (line 118 of HP's doc) */
- GPSR = GPIO_GPIO20;
+ GPSR = GPIO_GPIO20; /* start gpio20 pin */
udelay(1);
- GPCR = GPIO_GPIO20;
+ GPCR = GPIO_GPIO20; /* stop gpio20 */
udelay(1);
- GPSR = GPIO_GPIO20;
- udelay(20);
+ GPSR = GPIO_GPIO20; /* restart gpio20 */
+ udelay(20); /* give it some time to restart */
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
}
+
return ret;
}
@@ -345,7 +348,7 @@ static void __init jornada720_mach_init(void)
}
MACHINE_START(JORNADA720, "HP Jornada 720")
- /* Maintainer: Michael Gernoth <michael@gernoth.net> */
+ /* Maintainer: Kristoffer Ericson <Kristoffer.Ericson@gmail.com> */
.phys_io = 0x80000000,
.io_pg_offst = ((0xf8000000) >> 18) & 0xfffc,
.boot_params = 0xc0000100,
diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c
index 0a45e1ac8ad..395c39bed7d 100644
--- a/arch/arm/mach-sa1100/jornada720_ssp.c
+++ b/arch/arm/mach-sa1100/jornada720_ssp.c
@@ -161,7 +161,7 @@ static int __init jornada_ssp_probe(struct platform_device *dev)
ret = jornada_ssp_inout(GETBRIGHTNESS);
/* seems like it worked, just feed it with TxDummy to get rid of data */
- if (ret == TxDummy)
+ if (ret == TXDUMMY)
jornada_ssp_inout(TXDUMMY);
jornada_ssp_end();
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index de7e6ef48bd..0360b1f14d1 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -161,11 +161,11 @@ static void mbox_rx_work(struct work_struct *work)
/*
* Mailbox interrupt handler
*/
-static void mbox_txq_fn(request_queue_t * q)
+static void mbox_txq_fn(struct request_queue * q)
{
}
-static void mbox_rxq_fn(request_queue_t * q)
+static void mbox_rxq_fn(struct request_queue * q)
{
}
@@ -180,7 +180,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
{
struct request *rq;
mbox_msg_t msg;
- request_queue_t *q = mbox->rxq->queue;
+ struct request_queue *q = mbox->rxq->queue;
disable_mbox_irq(mbox, IRQ_RX);
@@ -297,7 +297,7 @@ static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
request_fn_proc * proc,
void (*work) (struct work_struct *))
{
- request_queue_t *q;
+ struct request_queue *q;
struct omap_mbox_queue *mq;
mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL);
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index 1b75672dfc8..20841663270 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -24,6 +24,8 @@ machine-$(CONFIG_BF533) := bf533
machine-$(CONFIG_BF534) := bf537
machine-$(CONFIG_BF536) := bf537
machine-$(CONFIG_BF537) := bf537
+machine-$(CONFIG_BF542) := bf548
+machine-$(CONFIG_BF544) := bf548
machine-$(CONFIG_BF548) := bf548
machine-$(CONFIG_BF549) := bf548
machine-$(CONFIG_BF561) := bf561
@@ -36,6 +38,8 @@ cpu-$(CONFIG_BF533) := bf533
cpu-$(CONFIG_BF534) := bf534
cpu-$(CONFIG_BF536) := bf536
cpu-$(CONFIG_BF537) := bf537
+cpu-$(CONFIG_BF542) := bf542
+cpu-$(CONFIG_BF544) := bf544
cpu-$(CONFIG_BF548) := bf548
cpu-$(CONFIG_BF549) := bf549
cpu-$(CONFIG_BF561) := bf561
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index ea48d5b13f1..94d7b119b71 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -160,7 +160,8 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
BUG_ON(direction == DMA_NONE);
for (i = 0; i < nents; i++, sg++) {
- sg->dma_address = page_address(sg->page) + sg->offset;
+ sg->dma_address = (dma_addr_t)(page_address(sg->page) +
+ sg->offset);
invalidate_dcache_range(sg_dma_address(sg),
sg_dma_address(sg) +
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index f59dcee7bae..88f221b89b3 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -402,11 +402,7 @@ void __init setup_arch(char **cmdline_p)
if (l1_length > L1_DATA_A_LENGTH)
panic("L1 data memory overflow\n");
-#ifdef BF561_FAMILY
- _bfin_swrst = bfin_read_SICA_SWRST();
-#else
_bfin_swrst = bfin_read_SWRST();
-#endif
/* Copy atomic sequences to their fixed location, and sanity check that
these locations are the ones that we advertise to userspace. */
@@ -429,6 +425,7 @@ void __init setup_arch(char **cmdline_p)
BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
!= ATOMIC_XOR32 - FIXED_CODE_START);
+ init_exception_vectors();
bf53x_cache_init();
}
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index 3909f5b3553..8766bd612b4 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -140,7 +140,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
#ifdef CONFIG_KGDB
# define CHK_DEBUGGER_TRAP() \
do { \
- CHK_DEBUGGER(trapnr, sig, info.si_code, fp); \
+ CHK_DEBUGGER(trapnr, sig, info.si_code, fp, ); \
} while (0)
# define CHK_DEBUGGER_TRAP_MAYBE() \
do { \
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index d06f860f479..fb53780247b 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -32,6 +32,7 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/mem_map.h>
#include <asm/page.h>
+#include <asm/thread_info.h>
OUTPUT_FORMAT("elf32-bfin")
ENTRY(__start)
@@ -64,8 +65,12 @@ SECTIONS
.data :
{
- . = ALIGN(PAGE_SIZE);
+ /* make sure the init_task is aligned to the
+ * kernel thread size so we can locate the kernel
+ * stack properly and quickly.
+ */
__sdata = .;
+ . = ALIGN(THREAD_SIZE);
*(.data.init_task)
DATA_DATA
CONSTRUCTORS
@@ -73,14 +78,14 @@ SECTIONS
. = ALIGN(32);
*(.data.cacheline_aligned)
- . = ALIGN(PAGE_SIZE);
+ . = ALIGN(THREAD_SIZE);
__edata = .;
}
- . = ALIGN(PAGE_SIZE);
___init_begin = .;
.init :
{
+ . = ALIGN(PAGE_SIZE);
__sinittext = .;
*(.init.text)
__einittext = .;
@@ -153,10 +158,9 @@ SECTIONS
__ebss_b_l1 = .;
}
- . = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1);
- ___init_end = ALIGN(PAGE_SIZE);
+ ___init_end = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1);
- .bss ___init_end :
+ .bss LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1) :
{
. = ALIGN(4);
___bss_start = .;
diff --git a/arch/blackfin/mach-bf561/head.S b/arch/blackfin/mach-bf561/head.S
index 2f08bcb2dde..38650a62898 100644
--- a/arch/blackfin/mach-bf561/head.S
+++ b/arch/blackfin/mach-bf561/head.S
@@ -440,15 +440,15 @@ ENTRY(_bfin_reset)
SSYNC;
/* make sure SYSCR is set to use BMODE */
- P0.h = hi(SICA_SYSCR);
- P0.l = lo(SICA_SYSCR);
- R0.l = 0x20;
+ P0.h = hi(SYSCR);
+ P0.l = lo(SYSCR);
+ R0.l = 0x20; /* on BF561, disable core b */
W[P0] = R0.l;
SSYNC;
/* issue a system soft reset */
- P1.h = hi(SICA_SWRST);
- P1.l = lo(SICA_SWRST);
+ P1.h = hi(SWRST);
+ P1.l = lo(SWRST);
R1.l = 0x0007;
W[P1] = R1;
SSYNC;
diff --git a/arch/blackfin/mach-common/cacheinit.S b/arch/blackfin/mach-common/cacheinit.S
index 9d475623b72..5be6b975ae4 100644
--- a/arch/blackfin/mach-common/cacheinit.S
+++ b/arch/blackfin/mach-common/cacheinit.S
@@ -60,6 +60,9 @@ ENDPROC(_bfin_write_IMEM_CONTROL)
#if defined(CONFIG_BLKFIN_DCACHE)
ENTRY(_bfin_write_DMEM_CONTROL)
+ P0.l = (DMEM_CONTROL & 0xFFFF);
+ P0.h = (DMEM_CONTROL >> 16);
+
CLI R1;
SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
.align 8;
diff --git a/arch/blackfin/mach-common/ints-priority-dc.c b/arch/blackfin/mach-common/ints-priority-dc.c
index 6b9fd03ce83..660f881b620 100644
--- a/arch/blackfin/mach-common/ints-priority-dc.c
+++ b/arch/blackfin/mach-common/ints-priority-dc.c
@@ -358,26 +358,10 @@ static void bf561_demux_gpio_irq(unsigned int inta_irq,
#endif /* CONFIG_IRQCHIP_DEMUX_GPIO */
-/*
- * This function should be called during kernel startup to initialize
- * the BFin IRQ handling routines.
- */
-int __init init_arch_irq(void)
+void __init init_exception_vectors(void)
{
- int irq;
- unsigned long ilat = 0;
- /* Disable all the peripheral intrs - page 4-29 HW Ref manual */
- bfin_write_SICA_IMASK0(SIC_UNMASK_ALL);
- bfin_write_SICA_IMASK1(SIC_UNMASK_ALL);
SSYNC();
- bfin_write_SICA_IWR0(IWR_ENABLE_ALL);
- bfin_write_SICA_IWR1(IWR_ENABLE_ALL);
-
- local_irq_disable();
-
- init_exception_buff();
-
#ifndef CONFIG_KGDB
bfin_write_EVT0(evt_emulation);
#endif
@@ -395,6 +379,27 @@ int __init init_arch_irq(void)
bfin_write_EVT14(evt14_softirq);
bfin_write_EVT15(evt_system_call);
CSYNC();
+}
+
+/*
+ * This function should be called during kernel startup to initialize
+ * the BFin IRQ handling routines.
+ */
+int __init init_arch_irq(void)
+{
+ int irq;
+ unsigned long ilat = 0;
+ /* Disable all the peripheral intrs - page 4-29 HW Ref manual */
+ bfin_write_SICA_IMASK0(SIC_UNMASK_ALL);
+ bfin_write_SICA_IMASK1(SIC_UNMASK_ALL);
+ SSYNC();
+
+ bfin_write_SICA_IWR0(IWR_ENABLE_ALL);
+ bfin_write_SICA_IWR1(IWR_ENABLE_ALL);
+
+ local_irq_disable();
+
+ init_exception_buff();
for (irq = 0; irq <= SYS_IRQS; irq++) {
if (irq <= IRQ_CORETMR)
diff --git a/arch/blackfin/mach-common/ints-priority-sc.c b/arch/blackfin/mach-common/ints-priority-sc.c
index 28a878c3577..4708023fe71 100644
--- a/arch/blackfin/mach-common/ints-priority-sc.c
+++ b/arch/blackfin/mach-common/ints-priority-sc.c
@@ -579,8 +579,12 @@ static unsigned int bfin_gpio_irq_startup(unsigned int irq)
u16 gpionr = irq - IRQ_PA0;
u8 pint_val = irq2pint_lut[irq - SYS_IRQS];
- if (pint_val == IRQ_NOT_AVAIL)
+ if (pint_val == IRQ_NOT_AVAIL) {
+ printk(KERN_ERR
+ "GPIO IRQ %d :Not in PINT Assign table "
+ "Reconfigure Interrupt to Port Assignemt\n", irq);
return -ENODEV;
+ }
if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) {
ret = gpio_request(gpionr, NULL);
@@ -713,6 +717,29 @@ static void bfin_demux_gpio_irq(unsigned int intb_irq,
}
#endif /* CONFIG_IRQCHIP_DEMUX_GPIO */
+void __init init_exception_vectors(void)
+{
+ SSYNC();
+
+#ifndef CONFIG_KGDB
+ bfin_write_EVT0(evt_emulation);
+#endif
+ bfin_write_EVT2(evt_evt2);
+ bfin_write_EVT3(trap);
+ bfin_write_EVT5(evt_ivhw);
+ bfin_write_EVT6(evt_timer);
+ bfin_write_EVT7(evt_evt7);
+ bfin_write_EVT8(evt_evt8);
+ bfin_write_EVT9(evt_evt9);
+ bfin_write_EVT10(evt_evt10);
+ bfin_write_EVT11(evt_evt11);
+ bfin_write_EVT12(evt_evt12);
+ bfin_write_EVT13(evt_evt13);
+ bfin_write_EVT14(evt14_softirq);
+ bfin_write_EVT15(evt_system_call);
+ CSYNC();
+}
+
/*
* This function should be called during kernel startup to initialize
* the BFin IRQ handling routines.
@@ -733,29 +760,10 @@ int __init init_arch_irq(void)
bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
bfin_write_SIC_IWR(IWR_ENABLE_ALL);
#endif
-
SSYNC();
local_irq_disable();
-#ifndef CONFIG_KGDB
- bfin_write_EVT0(evt_emulation);
-#endif
- bfin_write_EVT2(evt_evt2);
- bfin_write_EVT3(trap);
- bfin_write_EVT5(evt_ivhw);
- bfin_write_EVT6(evt_timer);
- bfin_write_EVT7(evt_evt7);
- bfin_write_EVT8(evt_evt8);
- bfin_write_EVT9(evt_evt9);
- bfin_write_EVT10(evt_evt10);
- bfin_write_EVT11(evt_evt11);
- bfin_write_EVT12(evt_evt12);
- bfin_write_EVT13(evt_evt13);
- bfin_write_EVT14(evt14_softirq);
- bfin_write_EVT15(evt_system_call);
- CSYNC();
-
#if defined(CONFIG_IRQCHIP_DEMUX_GPIO) && defined(CONFIG_BF54x)
#ifdef CONFIG_PINTx_REASSIGN
pint[0]->assign = CONFIG_PINT0_ASSIGN;
diff --git a/arch/frv/kernel/sys_frv.c b/arch/frv/kernel/sys_frv.c
index 26b3df32b9a..6fbe2665c57 100644
--- a/arch/frv/kernel/sys_frv.c
+++ b/arch/frv/kernel/sys_frv.c
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug
index b31c0802e1c..f03531eacdf 100644
--- a/arch/i386/Kconfig.debug
+++ b/arch/i386/Kconfig.debug
@@ -36,11 +36,11 @@ config DEBUG_STACK_USAGE
This option will slow down process creation somewhat.
comment "Page alloc debug is incompatible with Software Suspend on i386"
- depends on DEBUG_KERNEL && SOFTWARE_SUSPEND
+ depends on DEBUG_KERNEL && HIBERNATION
config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
- depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND && !HUGETLBFS
+ depends on DEBUG_KERNEL && !HIBERNATION && !HUGETLBFS
help
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
diff --git a/arch/i386/boot/apm.c b/arch/i386/boot/apm.c
index a34087c370c..eab50c55a3a 100644
--- a/arch/i386/boot/apm.c
+++ b/arch/i386/boot/apm.c
@@ -40,14 +40,15 @@ int query_apm_bios(void)
if (bx != 0x504d) /* "PM" signature */
return -1;
- if (cx & 0x02) /* 32 bits supported? */
+ if (!(cx & 0x02)) /* 32 bits supported? */
return -1;
/* Disconnect first, just in case */
ax = 0x5304;
+ bx = 0;
asm volatile("pushl %%ebp ; int $0x15 ; popl %%ebp"
- : "+a" (ax)
- : : "ebx", "ecx", "edx", "esi", "edi");
+ : "+a" (ax), "+b" (bx)
+ : : "ecx", "edx", "esi", "edi");
/* Paranoia */
ebx = esi = 0;
diff --git a/arch/i386/boot/main.c b/arch/i386/boot/main.c
index 7f01f96c4fb..0eeef3989a1 100644
--- a/arch/i386/boot/main.c
+++ b/arch/i386/boot/main.c
@@ -73,15 +73,15 @@ static void keyboard_set_repeat(void)
}
/*
- * Get Intel SpeedStep IST information.
+ * Get Intel SpeedStep (IST) information.
*/
-static void query_speedstep_ist(void)
+static void query_ist(void)
{
asm("int $0x15"
- : "=a" (boot_params.speedstep_info[0]),
- "=b" (boot_params.speedstep_info[1]),
- "=c" (boot_params.speedstep_info[2]),
- "=d" (boot_params.speedstep_info[3])
+ : "=a" (boot_params.ist_info.signature),
+ "=b" (boot_params.ist_info.command),
+ "=c" (boot_params.ist_info.event),
+ "=d" (boot_params.ist_info.perf_level)
: "a" (0x0000e980), /* IST Support */
"d" (0x47534943)); /* Request value */
}
@@ -144,8 +144,8 @@ void main(void)
query_voyager();
#endif
- /* Query SpeedStep IST information */
- query_speedstep_ist();
+ /* Query Intel SpeedStep (IST) information */
+ query_ist();
/* Query APM information */
#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index c3750c2c411..c85598acb8f 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -430,22 +430,12 @@ void __init alternative_instructions(void)
* And on the local CPU you need to be protected again NMI or MCE handlers
* seeing an inconsistent instruction while you patch.
*/
-void __kprobes text_poke(void *oaddr, unsigned char *opcode, int len)
+void __kprobes text_poke(void *addr, unsigned char *opcode, int len)
{
- u8 *addr = oaddr;
- if (!pte_write(*lookup_address((unsigned long)addr))) {
- struct page *p[2] = { virt_to_page(addr), virt_to_page(addr+PAGE_SIZE) };
- addr = vmap(p, 2, VM_MAP, PAGE_KERNEL);
- if (!addr)
- return;
- addr += ((unsigned long)oaddr) % PAGE_SIZE;
- }
memcpy(addr, opcode, len);
sync_core();
/* Not strictly needed, but can speed CPU recovery up. Ignore cross cacheline
case. */
if (cpu_has_clflush)
- asm("clflush (%0) " :: "r" (oaddr) : "memory");
- if (addr != oaddr)
- vunmap(addr);
+ asm("clflush (%0) " :: "r" (addr) : "memory");
}
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index 094118ba00d..d8c6f132dc7 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -92,7 +92,7 @@ config X86_POWERNOW_K8
config X86_POWERNOW_K8_ACPI
bool "ACPI Support"
select ACPI_PROCESSOR
- depends on X86_POWERNOW_K8
+ depends on ACPI && X86_POWERNOW_K8
default y
help
This provides access to the K8s Processor Performance States via ACPI.
diff --git a/arch/i386/kernel/e820.c b/arch/i386/kernel/e820.c
index e60cddbc4cf..3c86b979a40 100644
--- a/arch/i386/kernel/e820.c
+++ b/arch/i386/kernel/e820.c
@@ -321,7 +321,7 @@ static int __init request_standard_resources(void)
subsys_initcall(request_standard_resources);
-#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND)
+#if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION)
/**
* e820_mark_nosave_regions - Find the ranges of physical addresses that do not
* correspond to e820 RAM areas and mark the corresponding pages as nosave for
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index d865d041bea..09cf7811035 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -82,6 +82,7 @@
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <linux/firmware.h>
diff --git a/arch/i386/kernel/sys_i386.c b/arch/i386/kernel/sys_i386.c
index e5dcb937901..42147304de8 100644
--- a/arch/i386/kernel/sys_i386.c
+++ b/arch/i386/kernel/sys_i386.c
@@ -9,6 +9,7 @@
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index 6deb159d08e..4eb2e408764 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -16,6 +16,7 @@
#include <linux/string.h>
#include <linux/elf.h>
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/module.h>
#include <asm/cpufeature.h>
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index c3b9905af2d..730a5b177b1 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -432,7 +432,7 @@ static void __init pagetable_init (void)
paravirt_pagetable_setup_done(pgd_base);
}
-#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
+#if defined(CONFIG_HIBERNATION) || defined(CONFIG_ACPI)
/*
* Swap suspend & friends need this for resume because things like the intel-agp
* driver might have split up a kernel 4MB mapping.
@@ -800,9 +800,17 @@ void mark_rodata_ro(void)
unsigned long start = PFN_ALIGN(_text);
unsigned long size = PFN_ALIGN(_etext) - start;
- change_page_attr(virt_to_page(start),
- size >> PAGE_SHIFT, PAGE_KERNEL_RX);
- printk("Write protecting the kernel text: %luk\n", size >> 10);
+#ifndef CONFIG_KPROBES
+#ifdef CONFIG_HOTPLUG_CPU
+ /* It must still be possible to apply SMP alternatives. */
+ if (num_possible_cpus() <= 1)
+#endif
+ {
+ change_page_attr(virt_to_page(start),
+ size >> PAGE_SHIFT, PAGE_KERNEL_RX);
+ printk("Write protecting the kernel text: %luk\n", size >> 10);
+ }
+#endif
start += size;
size = (unsigned long)__end_rodata - start;
change_page_attr(virt_to_page(start),
diff --git a/arch/i386/power/Makefile b/arch/i386/power/Makefile
index 2de7bbf03cd..d764ec95006 100644
--- a/arch/i386/power/Makefile
+++ b/arch/i386/power/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_PM) += cpu.o
-obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o suspend.o
+obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 36c7b9682aa..21aa4fc5f8e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -425,6 +425,9 @@ config COMPAT
depends on IA32_SUPPORT
default y
+config COMPAT_FOR_U64_ALIGNMENT
+ def_bool COMPAT
+
config IA64_MCA_RECOVERY
tristate "MCA recovery from errors other than TLB."
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index cd4adf52f17..e980e7aa230 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -2015,9 +2015,14 @@ acpi_sba_ioc_add(struct acpi_device *device)
return 0;
}
+static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
+ {"HWP0001", 0},
+ {"HWP0004", 0},
+ {"", 0},
+};
static struct acpi_driver acpi_sba_ioc_driver = {
.name = "IOC IOMMU Driver",
- .ids = "HWP0001,HWP0004",
+ .ids = hp_ioc_iommu_device_ids,
.ops = {
.add = acpi_sba_ioc_add,
},
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index decdf6e1e5d..e62694f8ef7 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -101,7 +101,7 @@ simscsi_interrupt (unsigned long val)
{
struct scsi_cmnd *sc;
- while ((sc = queue[rd].sc) != 0) {
+ while ((sc = queue[rd].sc) != NULL) {
atomic_dec(&num_reqs);
queue[rd].sc = NULL;
if (DBG)
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
index e13a1a1db4b..d1d50cd1c38 100644
--- a/arch/ia64/ia32/ia32_support.c
+++ b/arch/ia64/ia32/ia32_support.c
@@ -249,11 +249,11 @@ ia32_init (void)
#if PAGE_SHIFT > IA32_PAGE_SHIFT
{
- extern struct kmem_cache *partial_page_cachep;
+ extern struct kmem_cache *ia64_partial_page_cachep;
- partial_page_cachep = kmem_cache_create("partial_page_cache",
- sizeof(struct partial_page),
- 0, SLAB_PANIC, NULL);
+ ia64_partial_page_cachep = kmem_cache_create("ia64_partial_page_cache",
+ sizeof(struct ia64_partial_page),
+ 0, SLAB_PANIC, NULL);
}
#endif
return 0;
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index cfa0bc0026b..466bbcb138b 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -25,8 +25,8 @@
* partially mapped pages provide precise accounting of which 4k sub pages
* are mapped and which ones are not, thereby improving IA-32 compatibility.
*/
-struct partial_page {
- struct partial_page *next; /* linked list, sorted by address */
+struct ia64_partial_page {
+ struct ia64_partial_page *next; /* linked list, sorted by address */
struct rb_node pp_rb;
/* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64
* should suffice.*/
@@ -34,17 +34,17 @@ struct partial_page {
unsigned int base;
};
-struct partial_page_list {
- struct partial_page *pp_head; /* list head, points to the lowest
+struct ia64_partial_page_list {
+ struct ia64_partial_page *pp_head; /* list head, points to the lowest
* addressed partial page */
struct rb_root ppl_rb;
- struct partial_page *pp_hint; /* pp_hint->next is the last
+ struct ia64_partial_page *pp_hint; /* pp_hint->next is the last
* accessed partial page */
atomic_t pp_count; /* reference count */
};
#if PAGE_SHIFT > IA32_PAGE_SHIFT
-struct partial_page_list* ia32_init_pp_list (void);
+struct ia64_partial_page_list* ia32_init_pp_list (void);
#else
# define ia32_init_pp_list() 0
#endif
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 0afb4fe7c35..af10462d44d 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -253,17 +253,17 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
return ret;
}
-/* SLAB cache for partial_page structures */
-struct kmem_cache *partial_page_cachep;
+/* SLAB cache for ia64_partial_page structures */
+struct kmem_cache *ia64_partial_page_cachep;
/*
- * init partial_page_list.
+ * init ia64_partial_page_list.
* return 0 means kmalloc fail.
*/
-struct partial_page_list*
+struct ia64_partial_page_list*
ia32_init_pp_list(void)
{
- struct partial_page_list *p;
+ struct ia64_partial_page_list *p;
if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
return p;
@@ -280,12 +280,12 @@ ia32_init_pp_list(void)
* Else, return 0 and provide @pprev, @rb_link, @rb_parent to
* be used by later __ia32_insert_pp().
*/
-static struct partial_page *
-__ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
- struct partial_page **pprev, struct rb_node ***rb_link,
+static struct ia64_partial_page *
+__ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start,
+ struct ia64_partial_page **pprev, struct rb_node ***rb_link,
struct rb_node **rb_parent)
{
- struct partial_page *pp;
+ struct ia64_partial_page *pp;
struct rb_node **__rb_link, *__rb_parent, *rb_prev;
pp = ppl->pp_hint;
@@ -297,7 +297,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
while (*__rb_link) {
__rb_parent = *__rb_link;
- pp = rb_entry(__rb_parent, struct partial_page, pp_rb);
+ pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb);
if (pp->base == start) {
ppl->pp_hint = pp;
@@ -314,7 +314,7 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
*rb_parent = __rb_parent;
*pprev = NULL;
if (rb_prev)
- *pprev = rb_entry(rb_prev, struct partial_page, pp_rb);
+ *pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb);
return NULL;
}
@@ -322,9 +322,9 @@ __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
* insert @pp into @ppl.
*/
static void
-__ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
- struct partial_page *prev, struct rb_node **rb_link,
- struct rb_node *rb_parent)
+__ia32_insert_pp(struct ia64_partial_page_list *ppl,
+ struct ia64_partial_page *pp, struct ia64_partial_page *prev,
+ struct rb_node **rb_link, struct rb_node *rb_parent)
{
/* link list */
if (prev) {
@@ -334,7 +334,7 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
ppl->pp_head = pp;
if (rb_parent)
pp->next = rb_entry(rb_parent,
- struct partial_page, pp_rb);
+ struct ia64_partial_page, pp_rb);
else
pp->next = NULL;
}
@@ -350,8 +350,8 @@ __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
* delete @pp from partial page list @ppl.
*/
static void
-__ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
- struct partial_page *prev)
+__ia32_delete_pp(struct ia64_partial_page_list *ppl,
+ struct ia64_partial_page *pp, struct ia64_partial_page *prev)
{
if (prev) {
prev->next = pp->next;
@@ -363,15 +363,15 @@ __ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
ppl->pp_hint = pp->next;
}
rb_erase(&pp->pp_rb, &ppl->ppl_rb);
- kmem_cache_free(partial_page_cachep, pp);
+ kmem_cache_free(ia64_partial_page_cachep, pp);
}
-static struct partial_page *
-__pp_prev(struct partial_page *pp)
+static struct ia64_partial_page *
+__pp_prev(struct ia64_partial_page *pp)
{
struct rb_node *prev = rb_prev(&pp->pp_rb);
if (prev)
- return rb_entry(prev, struct partial_page, pp_rb);
+ return rb_entry(prev, struct ia64_partial_page, pp_rb);
else
return NULL;
}
@@ -383,7 +383,7 @@ __pp_prev(struct partial_page *pp)
static void
__ia32_delete_pp_range(unsigned int start, unsigned int end)
{
- struct partial_page *pp, *prev;
+ struct ia64_partial_page *pp, *prev;
struct rb_node **rb_link, *rb_parent;
if (start >= end)
@@ -401,7 +401,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end)
}
while (pp && pp->base < end) {
- struct partial_page *tmp = pp->next;
+ struct ia64_partial_page *tmp = pp->next;
__ia32_delete_pp(current->thread.ppl, pp, prev);
pp = tmp;
}
@@ -414,7 +414,7 @@ __ia32_delete_pp_range(unsigned int start, unsigned int end)
static int
__ia32_set_pp(unsigned int start, unsigned int end, int flags)
{
- struct partial_page *pp, *prev;
+ struct ia64_partial_page *pp, *prev;
struct rb_node ** rb_link, *rb_parent;
unsigned int pstart, start_bit, end_bit, i;
@@ -450,8 +450,8 @@ __ia32_set_pp(unsigned int start, unsigned int end, int flags)
return 0;
}
- /* new a partial_page */
- pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
+ /* new a ia64_partial_page */
+ pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->base = pstart;
@@ -504,7 +504,7 @@ ia32_set_pp(unsigned int start, unsigned int end, int flags)
static int
__ia32_unset_pp(unsigned int start, unsigned int end)
{
- struct partial_page *pp, *prev;
+ struct ia64_partial_page *pp, *prev;
struct rb_node ** rb_link, *rb_parent;
unsigned int pstart, start_bit, end_bit, i;
struct vm_area_struct *vma;
@@ -532,8 +532,8 @@ __ia32_unset_pp(unsigned int start, unsigned int end)
return -ENOMEM;
}
- /* new a partial_page */
- pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
+ /* new a ia64_partial_page */
+ pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->base = pstart;
@@ -605,7 +605,7 @@ ia32_unset_pp(unsigned int *startp, unsigned int *endp)
static int
__ia32_compare_pp(unsigned int start, unsigned int end)
{
- struct partial_page *pp, *prev;
+ struct ia64_partial_page *pp, *prev;
struct rb_node ** rb_link, *rb_parent;
unsigned int pstart, start_bit, end_bit, size;
unsigned int first_bit, next_zero_bit; /* the first range in bitmap */
@@ -682,13 +682,13 @@ ia32_compare_pp(unsigned int *startp, unsigned int *endp)
}
static void
-__ia32_drop_pp_list(struct partial_page_list *ppl)
+__ia32_drop_pp_list(struct ia64_partial_page_list *ppl)
{
- struct partial_page *pp = ppl->pp_head;
+ struct ia64_partial_page *pp = ppl->pp_head;
while (pp) {
- struct partial_page *next = pp->next;
- kmem_cache_free(partial_page_cachep, pp);
+ struct ia64_partial_page *next = pp->next;
+ kmem_cache_free(ia64_partial_page_cachep, pp);
pp = next;
}
@@ -696,9 +696,9 @@ __ia32_drop_pp_list(struct partial_page_list *ppl)
}
void
-ia32_drop_partial_page_list(struct task_struct *task)
+ia32_drop_ia64_partial_page_list(struct task_struct *task)
{
- struct partial_page_list* ppl = task->thread.ppl;
+ struct ia64_partial_page_list* ppl = task->thread.ppl;
if (ppl && atomic_dec_and_test(&ppl->pp_count))
__ia32_drop_pp_list(ppl);
@@ -708,9 +708,9 @@ ia32_drop_partial_page_list(struct task_struct *task)
* Copy current->thread.ppl to ppl (already initialized).
*/
static int
-__ia32_copy_pp_list(struct partial_page_list *ppl)
+__ia32_copy_pp_list(struct ia64_partial_page_list *ppl)
{
- struct partial_page *pp, *tmp, *prev;
+ struct ia64_partial_page *pp, *tmp, *prev;
struct rb_node **rb_link, *rb_parent;
ppl->pp_head = NULL;
@@ -721,7 +721,7 @@ __ia32_copy_pp_list(struct partial_page_list *ppl)
prev = NULL;
for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
- tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
+ tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
*tmp = *pp;
@@ -734,7 +734,8 @@ __ia32_copy_pp_list(struct partial_page_list *ppl)
}
int
-ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags)
+ia32_copy_ia64_partial_page_list(struct task_struct *p,
+ unsigned long clone_flags)
{
int retval = 0;
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 103dd8edda7..3d45d24a9d6 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -67,6 +67,8 @@ EXPORT_SYMBOL(pm_power_off);
unsigned int acpi_cpei_override;
unsigned int acpi_cpei_phys_cpuid;
+unsigned long acpi_wakeup_address = 0;
+
const char __init *
acpi_get_sysname(void)
{
@@ -739,16 +741,15 @@ int __init acpi_boot_init(void)
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
{
- int vector;
+ int tmp;
if (has_8259 && gsi < 16)
*irq = isa_irq_to_vector(gsi);
else {
- vector = gsi_to_vector(gsi);
- if (vector == -1)
+ tmp = gsi_to_irq(gsi);
+ if (tmp == -1)
return -1;
-
- *irq = vector;
+ *irq = tmp;
}
return 0;
}
@@ -986,4 +987,21 @@ int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
EXPORT_SYMBOL(acpi_unregister_ioapic);
+/*
+ * acpi_save_state_mem() - save kernel state
+ *
+ * TBD when when IA64 starts to support suspend...
+ */
+int acpi_save_state_mem(void) { return 0; }
+
+/*
+ * acpi_restore_state()
+ */
+void acpi_restore_state_mem(void) {}
+
+/*
+ * do_suspend_lowlevel()
+ */
+void do_suspend_lowlevel(void) {}
+
#endif /* CONFIG_ACPI */
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
index 2fd96d9062a..790ef0d87e1 100644
--- a/arch/ia64/kernel/cyclone.c
+++ b/arch/ia64/kernel/cyclone.c
@@ -38,11 +38,11 @@ static struct clocksource clocksource_cyclone = {
int __init init_cyclone_clock(void)
{
- u64* reg;
+ u64 __iomem *reg;
u64 base; /* saved cyclone base address */
u64 offset; /* offset from pageaddr to cyclone_timer register */
int i;
- u32* volatile cyclone_timer; /* Cyclone MPMC0 register */
+ u32 __iomem *cyclone_timer; /* Cyclone MPMC0 register */
if (!use_cyclone)
return 0;
@@ -51,7 +51,7 @@ int __init init_cyclone_clock(void)
/* find base address */
offset = (CYCLONE_CBAR_ADDR);
- reg = (u64*)ioremap_nocache(offset, sizeof(u64));
+ reg = ioremap_nocache(offset, sizeof(u64));
if(!reg){
printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
" register.\n");
@@ -69,7 +69,7 @@ int __init init_cyclone_clock(void)
/* setup PMCC */
offset = (base + CYCLONE_PMCC_OFFSET);
- reg = (u64*)ioremap_nocache(offset, sizeof(u64));
+ reg = ioremap_nocache(offset, sizeof(u64));
if(!reg){
printk(KERN_ERR "Summit chipset: Could not find valid PMCC"
" register.\n");
@@ -81,7 +81,7 @@ int __init init_cyclone_clock(void)
/* setup MPCS */
offset = (base + CYCLONE_MPCS_OFFSET);
- reg = (u64*)ioremap_nocache(offset, sizeof(u64));
+ reg = ioremap_nocache(offset, sizeof(u64));
if(!reg){
printk(KERN_ERR "Summit chipset: Could not find valid MPCS"
" register.\n");
@@ -93,7 +93,7 @@ int __init init_cyclone_clock(void)
/* map in cyclone_timer */
offset = (base + CYCLONE_MPMC_OFFSET);
- cyclone_timer = (u32*)ioremap_nocache(offset, sizeof(u32));
+ cyclone_timer = ioremap_nocache(offset, sizeof(u32));
if(!cyclone_timer){
printk(KERN_ERR "Summit chipset: Could not find valid MPMC"
" register.\n");
@@ -110,7 +110,7 @@ int __init init_cyclone_clock(void)
printk(KERN_ERR "Summit chipset: Counter not counting!"
" DISABLED\n");
iounmap(cyclone_timer);
- cyclone_timer = 0;
+ cyclone_timer = NULL;
use_cyclone = 0;
return -ENODEV;
}
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 44d540efa6d..4e5e27540e2 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -178,7 +178,7 @@ swapper_pg_dir:
halt_msg:
stringz "Halting kernel\n"
- .text
+ .section .text.head,"ax"
.global start_ap
@@ -392,6 +392,8 @@ self: hint @pause
br.sptk.many self // endless loop
END(_start)
+ .text
+
GLOBAL_ENTRY(ia64_save_debug_regs)
alloc r16=ar.pfs,1,0,0,0
mov r20=ar.lc // preserve ar.lc
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index b69c397ed1b..bc8efcad28b 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init_task.h>
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 91797c11116..9386b955eed 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -85,8 +85,8 @@ DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
[0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR
};
-static cpumask_t vector_table[IA64_MAX_DEVICE_VECTORS] = {
- [0 ... IA64_MAX_DEVICE_VECTORS - 1] = CPU_MASK_NONE
+static cpumask_t vector_table[IA64_NUM_VECTORS] = {
+ [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
};
static int irq_status[NR_IRQS] = {
@@ -123,17 +123,18 @@ static inline int find_unassigned_irq(void)
static inline int find_unassigned_vector(cpumask_t domain)
{
cpumask_t mask;
- int pos;
+ int pos, vector;
cpus_and(mask, domain, cpu_online_map);
if (cpus_empty(mask))
return -EINVAL;
for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
- cpus_and(mask, domain, vector_table[pos]);
+ vector = IA64_FIRST_DEVICE_VECTOR + pos;
+ cpus_and(mask, domain, vector_table[vector]);
if (!cpus_empty(mask))
continue;
- return IA64_FIRST_DEVICE_VECTOR + pos;
+ return vector;
}
return -ENOSPC;
}
@@ -141,9 +142,12 @@ static inline int find_unassigned_vector(cpumask_t domain)
static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
{
cpumask_t mask;
- int cpu, pos;
+ int cpu;
struct irq_cfg *cfg = &irq_cfg[irq];
+ BUG_ON((unsigned)irq >= NR_IRQS);
+ BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
+
cpus_and(mask, domain, cpu_online_map);
if (cpus_empty(mask))
return -EINVAL;
@@ -156,8 +160,7 @@ static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
cfg->vector = vector;
cfg->domain = domain;
irq_status[irq] = IRQ_USED;
- pos = vector - IA64_FIRST_DEVICE_VECTOR;
- cpus_or(vector_table[pos], vector_table[pos], domain);
+ cpus_or(vector_table[vector], vector_table[vector], domain);
return 0;
}
@@ -174,7 +177,7 @@ int bind_irq_vector(int irq, int vector, cpumask_t domain)
static void __clear_irq_vector(int irq)
{
- int vector, cpu, pos;
+ int vector, cpu;
cpumask_t mask;
cpumask_t domain;
struct irq_cfg *cfg = &irq_cfg[irq];
@@ -189,8 +192,7 @@ static void __clear_irq_vector(int irq)
cfg->vector = IRQ_VECTOR_UNASSIGNED;
cfg->domain = CPU_MASK_NONE;
irq_status[irq] = IRQ_UNUSED;
- pos = vector - IA64_FIRST_DEVICE_VECTOR;
- cpus_andnot(vector_table[pos], vector_table[pos], domain);
+ cpus_andnot(vector_table[vector], vector_table[vector], domain);
}
static void clear_irq_vector(int irq)
@@ -212,9 +214,6 @@ assign_irq_vector (int irq)
vector = -ENOSPC;
spin_lock_irqsave(&vector_lock, flags);
- if (irq < 0) {
- goto out;
- }
for_each_online_cpu(cpu) {
domain = vector_allocation_domain(cpu);
vector = find_unassigned_vector(domain);
@@ -223,6 +222,8 @@ assign_irq_vector (int irq)
}
if (vector < 0)
goto out;
+ if (irq == AUTO_ASSIGN)
+ irq = vector;
BUG_ON(__bind_irq_vector(irq, vector, domain));
out:
spin_unlock_irqrestore(&vector_lock, flags);
@@ -288,7 +289,7 @@ static int __init parse_vector_domain(char *arg)
vector_domain_type = VECTOR_DOMAIN_PERCPU;
no_int_routing = 1;
}
- return 1;
+ return 0;
}
early_param("vector", parse_vector_domain);
#else
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c
index 13df337508e..7ccb228ceed 100644
--- a/arch/ia64/kernel/machvec.c
+++ b/arch/ia64/kernel/machvec.c
@@ -13,14 +13,6 @@
struct ia64_machine_vector ia64_mv;
EXPORT_SYMBOL(ia64_mv);
-static __initdata const char *mvec_name;
-static __init int setup_mvec(char *s)
-{
- mvec_name = s;
- return 0;
-}
-early_param("machvec", setup_mvec);
-
static struct ia64_machine_vector * __init
lookup_machvec (const char *name)
{
@@ -41,7 +33,7 @@ machvec_init (const char *name)
struct ia64_machine_vector *mv;
if (!name)
- name = mvec_name ? mvec_name : acpi_get_sysname();
+ name = acpi_get_sysname();
mv = lookup_machvec(name);
if (!mv)
panic("generic kernel failed to find machine vector for"
@@ -51,6 +43,23 @@ machvec_init (const char *name)
printk(KERN_INFO "booting generic kernel on platform %s\n", name);
}
+void __init
+machvec_init_from_cmdline(const char *cmdline)
+{
+ char str[64];
+ const char *start;
+ char *end;
+
+ if (! (start = strstr(cmdline, "machvec=")) )
+ return machvec_init(NULL);
+
+ strlcpy(str, start + strlen("machvec="), sizeof(str));
+ if ( (end = strchr(str, ' ')) )
+ *end = '\0';
+
+ return machvec_init(str);
+}
+
#endif /* CONFIG_IA64_GENERIC */
void
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index fa40cba4335..4158906c45a 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -499,7 +499,8 @@ copy_thread (int nr, unsigned long clone_flags,
/* Copy partially mapped page list */
if (!retval)
- retval = ia32_copy_partial_page_list(p, clone_flags);
+ retval = ia32_copy_ia64_partial_page_list(p,
+ clone_flags);
}
#endif
@@ -728,7 +729,7 @@ flush_thread (void)
ia64_drop_fpu(current);
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(task_pt_regs(current))) {
- ia32_drop_partial_page_list(current);
+ ia32_drop_ia64_partial_page_list(current);
current->thread.task_size = IA32_PAGE_OFFSET;
set_fs(USER_DS);
}
@@ -754,7 +755,7 @@ exit_thread (void)
pfm_release_debug_registers(current);
#endif
if (IS_IA32_PROCESS(task_pt_regs(current)))
- ia32_drop_partial_page_list(current);
+ ia32_drop_ia64_partial_page_list(current);
}
unsigned long
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index cf06fe79904..7cecd296420 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -491,12 +491,17 @@ setup_arch (char **cmdline_p)
efi_init();
io_port_init();
- parse_early_param();
-
#ifdef CONFIG_IA64_GENERIC
- machvec_init(NULL);
+ /* machvec needs to be parsed from the command line
+ * before parse_early_param() is called to ensure
+ * that ia64_mv is initialised before any command line
+ * settings may cause console setup to occur
+ */
+ machvec_init_from_cmdline(*cmdline_p);
#endif
+ parse_early_param();
+
if (early_console_setup(*cmdline_p) == 0)
mark_bsp_online();
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 9f72838db26..0982882bfb8 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -468,7 +468,7 @@ smp_send_stop (void)
send_IPI_allbutself(IPI_CPU_STOP);
}
-int __init
+int
setup_profiling_timer (unsigned int multiplier)
{
return -EINVAL;
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 9f5c90b594b..62209dcf06d 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -487,7 +487,7 @@ struct create_idle {
int cpu;
};
-void
+void __cpuinit
do_fork_idle(struct work_struct *work)
{
struct create_idle *c_idle =
@@ -497,7 +497,7 @@ do_fork_idle(struct work_struct *work)
complete(&c_idle->done);
}
-static int __devinit
+static int __cpuinit
do_boot_cpu (int sapicid, int cpu)
{
int timeout;
@@ -808,7 +808,7 @@ set_cpu_sibling_map(int cpu)
}
}
-int __devinit
+int __cpuinit
__cpu_up (unsigned int cpu)
{
int ret;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 627785c48ea..6c0e9e2e1b8 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -52,7 +52,7 @@ static struct clocksource clocksource_itc = {
.name = "itc",
.rating = 350,
.read = itc_get_cycles,
- .mask = 0xffffffffffffffff,
+ .mask = CLOCKSOURCE_MASK(64),
.mult = 0, /*to be caluclated*/
.shift = 16,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
@@ -255,7 +255,7 @@ ia64_init_itm (void)
}
}
-static cycle_t itc_get_cycles()
+static cycle_t itc_get_cycles(void)
{
u64 lcycle, now, ret;
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 860f251d2fc..83e80677de7 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -50,6 +50,8 @@ SECTIONS
KPROBES_TEXT
*(.gnu.linkonce.t*)
}
+ .text.head : AT(ADDR(.text.head) - LOAD_OFFSET)
+ { *(.text.head) }
.text2 : AT(ADDR(.text2) - LOAD_OFFSET)
{ *(.text2) }
#ifdef CONFIG_SMP
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 07d0e92742c..488e48a5dee 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -581,7 +581,7 @@ pcibios_align_resource (void *data, struct resource *res,
/*
* PCI BIOS setup, always defaults to SAL interface
*/
-char * __init
+char * __devinit
pcibios_setup (char *str)
{
return str;
diff --git a/arch/m32r/kernel/setup_mappi.c b/arch/m32r/kernel/setup_mappi.c
index 6b2d77da068..fe73c9ec611 100644
--- a/arch/m32r/kernel/setup_mappi.c
+++ b/arch/m32r/kernel/setup_mappi.c
@@ -45,7 +45,8 @@ static void mask_and_ack_mappi(unsigned int irq)
static void end_mappi_irq(unsigned int irq)
{
- enable_mappi_irq(irq);
+ if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
+ enable_mappi_irq(irq);
}
static unsigned int startup_mappi_irq(unsigned int irq)
@@ -88,7 +89,7 @@ void __init init_IRQ(void)
irq_desc[M32R_IRQ_INT0].chip = &mappi_irq_type;
irq_desc[M32R_IRQ_INT0].action = NULL;
irq_desc[M32R_IRQ_INT0].depth = 1;
- icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
+ icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD11;
disable_mappi_irq(M32R_IRQ_INT0);
#endif /* CONFIG_M32R_NE2000 */
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 99fc1226f7f..3ee91869521 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -15,6 +15,7 @@
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/stddef.h>
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 90238a8c9e1..36d78cf1a7b 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/sem.h>
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 1175ceff8b2..185906b54cb 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -216,6 +216,18 @@ config XCOPILOT_BUGS
help
Support the bugs of Xcopilot.
+config UC5272
+ bool 'Arcturus Networks uC5272 dimm board support'
+ depends on M5272
+ help
+ Support for the Arcturus Networks uC5272 dimm board.
+
+config UC5282
+ bool "Arcturus Networks uC5282 board support"
+ depends on M528x
+ help
+ Support for the Arcturus Networks uC5282 dimm board.
+
config UCSIMM
bool "uCsimm module support"
depends on M68EZ328
@@ -342,6 +354,18 @@ config SOM5282EM
depends on M528x
help
Support for the EMAC.Inc SOM5282EM module.
+
+config WILDFIRE
+ bool "Intec Automation Inc. WildFire board support"
+ depends on M528x
+ help
+ Support for the Intec Automation Inc. WildFire.
+
+config WILDFIREMOD
+ bool "Intec Automation Inc. WildFire module support"
+ depends on M528x
+ help
+ Support for the Intec Automation Inc. WildFire module.
config ARN5307
bool "Arnewsh 5307 board support"
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile
index 8951793fd8d..1305cc98002 100644
--- a/arch/m68knommu/Makefile
+++ b/arch/m68knommu/Makefile
@@ -26,6 +26,8 @@ platform-$(CONFIG_M5407) := 5407
PLATFORM := $(platform-y)
board-$(CONFIG_PILOT) := pilot
+board-$(CONFIG_UC5272) := UC5272
+board-$(CONFIG_UC5282) := UC5282
board-$(CONFIG_UCSIMM) := ucsimm
board-$(CONFIG_UCDIMM) := ucdimm
board-$(CONFIG_UCQUICC) := uCquicc
diff --git a/arch/m68knommu/kernel/dma.c b/arch/m68knommu/kernel/dma.c
index 0a25874a2aa..e10eafc5278 100644
--- a/arch/m68knommu/kernel/dma.c
+++ b/arch/m68knommu/kernel/dma.c
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/string.h>
+#include <linux/device.h>
#include <asm/io.h>
void *dma_alloc_coherent(struct device *dev, size_t size,
diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c
index 2203f694f26..3f86ade3a22 100644
--- a/arch/m68knommu/kernel/setup.c
+++ b/arch/m68knommu/kernel/setup.c
@@ -42,8 +42,6 @@ EXPORT_SYMBOL(memory_end);
char __initdata command_line[COMMAND_LINE_SIZE];
-void (*mach_trap_init)(void);
-
/* machine dependent timer functions */
void (*mach_sched_init)(irq_handler_t handler);
void (*mach_tick)(void);
@@ -132,6 +130,11 @@ void setup_arch(char **cmdline_p)
config_BSP(&command_line[0], sizeof(command_line));
+#if defined(CONFIG_BOOTPARAM)
+ strncpy(&command_line[0], CONFIG_BOOTPARAM_STRING, sizeof(command_line));
+ command_line[sizeof(command_line) - 1] = 0;
+#endif
+
printk(KERN_INFO "\x0F\r\n\nuClinux/" CPU "\n");
#ifdef CONFIG_UCDIMM
diff --git a/arch/m68knommu/platform/5206/config.c b/arch/m68knommu/platform/5206/config.c
index 3343830aad1..d0f2dc5cb5a 100644
--- a/arch/m68knommu/platform/5206/config.c
+++ b/arch/m68knommu/platform/5206/config.c
@@ -28,7 +28,6 @@
void coldfire_tick(void);
void coldfire_timer_init(irq_handler_t handler);
unsigned long coldfire_timer_offset(void);
-void coldfire_trap_init(void);
void coldfire_reset(void);
/***************************************************************************/
@@ -98,18 +97,9 @@ int mcf_timerirqpending(int timer)
void config_BSP(char *commandp, int size)
{
mcf_setimr(MCFSIM_IMR_MASKALL);
-
-#if defined(CONFIG_BOOTPARAM)
- strncpy(commandp, CONFIG_BOOTPARAM_STRING, size);
- commandp[size-1] = 0;
-#else
- memset(commandp, 0, size);
-#endif
-
mach_sched_init = coldfire_timer_init;
mach_tick = coldfire_tick;
mach_gettimeoffset = coldfire_timer_offset;
- mach_trap_init = coldfire_trap_init;
mach_reset = coldfire_reset;
}
diff --git a/arch/m68knommu/platform/5206e/config.c b/arch/m68knommu/platform/5206e/config.c
index 0f67320b403..4ab614f1ecd 100644
--- a/arch/m68knommu/platform/5206e/config.c
+++ b/arch/m68knommu/platform/5206e/config.c
@@ -27,7 +27,6 @@
void coldfire_tick(void);
void coldfire_timer_init(irq_handler_t handler);
unsigned long coldfire_timer_offset(void);
-void coldfire_trap_init(void);
void coldfire_reset(void);
/***************************************************************************/
@@ -98,21 +97,15 @@ void config_BSP(char *commandp, int size)
{
mcf_setimr(MCFSIM_IMR_MASKALL);
-#if defined(CONFIG_BOOTPARAM)
- strncpy(commandp, CONFIG_BOOTPARAM_STRING, size);
- commandp[size-1] = 0;
-#elif defined(CONFIG_NETtel)
+#if defined(CONFIG_NETtel)
/* Copy command line from FLASH to local buffer... */
memcpy(commandp, (char *) 0xf0004000, size);
commandp[size-1] = 0;
-#else
- memset(commandp, 0, size);
#endif /* CONFIG_NETtel */
mach_sched_init = coldfire_timer_init;
mach_tick = coldfire_tick;
mach_gettimeoffset = coldfire_timer_offset;
- mach_trap_init = coldfire_trap_init;
mach_reset = coldfire_reset;
}
diff --git a/arch/m68knommu/platform/520x/config.c b/arch/m68knommu/platform/520x/config.c
index 58b2878deb6..a2c95bebd00 100644
--- a/arch/m68knommu/platform/520x/config.c
+++ b/arch/m68knommu/platform/520x/config.c
@@ -30,7 +30,6 @@ unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
void coldfire_pit_tick(void);
void coldfire_pit_init(irq_handler_t handler);
unsigned long coldfire_pit_offset(void);
-void coldfire_trap_init(void);
void coldfire_reset(void);
/***************************************************************************/
@@ -48,17 +47,9 @@ void mcf_autovector(unsigned int vec)
void config_BSP(char *commandp, int size)
{
-#ifdef CONFIG_BOOTPARAM
- strncpy(commandp, CONFIG_BOOTPARAM_STRING, size);
- commandp[size-1] = 0;
-#else
- memset(commandp, 0, size);
-#endif
-
mach_sched_init = coldfire_pit_init;
mach_tick = coldfire_pit_tick;
mach_gettimeoffset = coldfire_pit_offset;
- mach_trap_init = coldfire_trap_init;
mach_reset = coldfire_reset;
}
diff --git a/arch/m68knommu/platform/523x/config.c b/arch/m68knommu/platform/523x/config.c
index 9b054e6caee..0a3af05a434 100644
--- a/arch/m68knommu/platform/523x/config.c
+++ b/arch/m68knommu/platform/523x/config.c
@@ -29,7 +29,6 @@
void coldfire_pit_tick(void);
void coldfire_pit_init(irq_handler_t handler);
unsigned long coldfire_pit_offset(void);
-void coldfire_trap_init(void);
void coldfire_reset(void);
/***************************************************************************/
@@ -63,18 +62,9 @@ void mcf_autovector(unsigned int vec)
void config_BSP(char *commandp, int size)
{
mcf_disableall();
-
-#ifdef CONFIG_BOOTPARAM
- strncpy(commandp, CONFIG_BOOTPARAM_STRING, size);
- commandp[size-1] = 0;
-#else
- memset(commandp, 0, size);
-#endif
-
mach_sched_init = coldfire_pit_init;
mach_tick = coldfire_pit_tick;
mach_gettimeoffset = coldfire_pit_offset;
- mach_trap_init = coldfire_trap_init;
mach_reset = coldfire_reset;
}
diff --git a/arch/m68knommu/platform/5249/config.c b/arch/m68knommu/platform/5249/config.c
index d6706079d64..dc2c362590c 100644
--- a/arch/m68knommu/platform/5249/config.c
+++ b/arch/m68knommu/platform/5249/config.c
@@ -27,7 +27,6 @@
void coldfire_tick(void);
void coldfire_timer_init(irq_handler_t handler);
unsigned long coldfire_timer_offset(void);
-void coldfire_trap_init(void);
void coldfire_reset(void);
/***************************************************************************/
@@ -96,18 +95,9 @@ int mcf_timerirqpending(int timer)
void config_BSP(char *commandp, int size)
{
mcf_setimr(MCFSIM_IMR_MASKALL);
-
-#if defined(CONFIG_BOOTPARAM)
- strncpy(commandp, CONFIG_BOOTPARAM_STRING, size);
- commandp[size-1] = 0;
-#else
- memset(commandp, 0, size);
-#endif
-
mach_sched_init = coldfire_timer_init;
mach_tick = coldfire_tick;
mach_gettimeoffset = coldfire_timer_offset;
- mach_trap_init = coldfire_trap_init;
mach_reset = coldfire_reset;
}
diff --git a/arch/m68knommu/platform/5272/config.c b/arch/m68knommu/platform/5272/config.c
index 6b437cc9777..1365a8300d5 100644
--- a/arch/m68knommu/platform/5272/config.c
+++ b/arch/m68knommu/platform/5272/config.c
@@ -28,7 +28,6 @@
void coldfire_tick(void);
void coldfire_timer_init(irq_handler_t handler);
unsigned long coldfire_timer_offset(void);
-void coldfire_trap_init(void);
void coldfire_reset(void);
extern unsigned int mcf_timervector;
@@ -113,10 +112,7 @@ void config_BSP(char *commandp, int size)
mcf_disableall();
-#if defined(CONFIG_BOOTPARAM)
- strncpy(commandp, CONFIG_BOOTPARAM_STRING, size);
- commandp[size-1] = 0;
-#elif defined(CONFIG_NETtel) || defined(CONFIG_SCALES)
+#if defined(CONFIG_NETtel) || defined(CONFIG_SCALES)
/* Copy command line from FLASH to local buffer... */
memcpy(commandp, (char *) 0xf0004000, size);
commandp[size-1] = 0;
@@ -128,8 +124,6 @@ void config_BSP(char *commandp, int size)
/* Copy command line from FLASH to local buffer... */
memcpy(commandp, (char *) 0xf0010000, size);
commandp[size-1] = 0;
-#else
- memset(commandp, 0, size);
#endif
mcf_timervector = 69;
@@ -137,7 +131,6 @@ void config_BSP(char *commandp, int size)
mach_sched_init = coldfire_timer_init;
mach_tick = coldfire_tick;
mach_gettimeoffset = coldfire_timer_offset;
- mach_trap_init = coldfire_trap_init;
mach_reset = coldfire_reset;
}
diff --git a/arch/m68knommu/platform/527x/config.c b/arch/m68knommu/platform/527x/config.c
index 28e7d964eef..1b820441419 100644
--- a/arch/m68knommu/platform/527x/config.c
+++ b/arch/m68knommu/platform/527x/config.c
@@ -29,7 +29,6 @@
void coldfire_pit_tick(void);
void coldfire_pit_init(irq_handler_t handler);
unsigned long coldfire_pit_offset(void);
-void coldfire_trap_init(void);
void coldfire_reset(void);
/***************************************************************************/
@@ -63,18 +62,9 @@ void mcf_autovector(unsigned int vec)
void config_BSP(char *commandp, int size)
{
mcf_disableall();
-
-#ifdef CONFIG_BOOTPARAM
- strncpy(commandp, CONFIG_BOOTPARAM_STRING, size);
- commandp[size-1] = 0;
-#else
- memset(commandp, 0, size);
-#endif
-
mach_sched_init = coldfire_pit_init;
mach_tick = coldfire_pit_tick;
mach_gettimeoffset = coldfire_pit_offset;
- mach_trap_init = coldfire_trap_init;
mach_reset = coldfire_reset;
}
diff --git a/arch/m68knommu/platform/528x/config.c b/arch/m68knommu/platform/528x/config.c
index 805b4f74ff1..a089e951369 100644
--- a/arch/m68knommu/platform/528x/config.c
+++ b/arch/m68knommu/platform/528x/config.c
@@ -29,7 +29,6 @@
void coldfire_pit_tick(void);
void coldfire_pit_init(irq_handler_t handler);
unsigned long coldfire_pit_offset(void);
-void coldfire_trap_init(void);
void coldfire_reset(void);
/***************************************************************************/
@@ -63,18 +62,9 @@ void mcf_autovector(unsigned int vec)
void config_BSP(char *commandp, int size)
{
mcf_disableall();
-
-#ifdef CONFIG_BOOTPARAM
- strncpy(commandp, CONFIG_BOOTPARAM_STRING, size);
- commandp[size-1] = 0;
-#else
- memset(commandp, 0, size);
-#endif
-
mach_sched_init = coldfire_pit_init;
mach_tick = coldfire_pit_tick;
mach_gettimeoffset = coldfire_pit_offset;
- mach_trap_init = coldfire_trap_init;
mach_reset = coldfire_reset;
}
diff --git a/arch/m68knommu/platform/5307/config.c b/arch/m68knommu/platform/5307/config.c
index e04b84deb57..e3461619fd6 100644
--- a/arch/m68knommu/platform/5307/config.c
+++ b/arch/m68knommu/platform/5307/config.c
@@ -29,7 +29,6 @@
void coldfire_tick(void);
void coldfire_timer_init(irq_handler_t handler);
unsigned long coldfire_timer_offset(void);
-void coldfire_trap_init(void);
void coldfire_reset(void);
extern unsigned int mcf_timervector;
@@ -111,10 +110,7 @@ void config_BSP(char *commandp, int size)
{
mcf_setimr(MCFSIM_IMR_MASKALL);
-#if defined(CONFIG_BOOTPARAM)
- strncpy(commandp, CONFIG_BOOTPARAM_STRING, size);
- commandp[size-1] = 0;
-#elif defined(CONFIG_NETtel) || defined(CONFIG_eLIA) || \
+#if defined(CONFIG_NETtel) || defined(CONFIG_eLIA) || \
defined(CONFIG_DISKtel) || defined(CONFIG_SECUREEDGEMP3) || \
defined(CONFIG_CLEOPATRA)
/* Copy command line from FLASH to local buffer... */
@@ -124,14 +120,11 @@ void config_BSP(char *commandp, int size)
mcf_timervector = 30;
mcf_profilevector = 31;
mcf_timerlevel = 6;
-#else
- memset(commandp, 0, size);
#endif
mach_sched_init = coldfire_timer_init;
mach_tick = coldfire_tick;
mach_gettimeoffset = coldfire_timer_offset;
- mach_trap_init = coldfire_trap_init;
mach_reset = coldfire_reset;
#ifdef MCF_BDM_DISABLE
diff --git a/arch/m68knommu/platform/5307/entry.S b/arch/m68knommu/platform/5307/entry.S
index c358aebe0af..a8cd867805c 100644
--- a/arch/m68knommu/platform/5307/entry.S
+++ b/arch/m68knommu/platform/5307/entry.S
@@ -213,16 +213,12 @@ ENTRY(ret_from_interrupt)
* Beware - when entering resume, prev (the current task) is
* in a0, next (the new task) is in a1,so don't change these
* registers until their contents are no longer needed.
+ * This is always called in supervisor mode, so don't bother to save
+ * and restore sr; user's process sr is actually in the stack.
*/
ENTRY(resume)
movel %a0, %d1 /* get prev thread in d1 */
- movew %sr,%d0 /* save thread status reg */
- movew %d0,%a0@(TASK_THREAD+THREAD_SR)
-
- oril #0x700,%d0 /* disable interrupts */
- move %d0,%sr
-
movel sw_usp,%d0 /* save usp */
movel %d0,%a0@(TASK_THREAD+THREAD_USP)
@@ -233,7 +229,4 @@ ENTRY(resume)
movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore thread user stack */
movel %a0, sw_usp
-
- movew %a1@(TASK_THREAD+THREAD_SR),%d0 /* restore thread status reg */
- movew %d0, %sr
rts
diff --git a/arch/m68knommu/platform/5307/pit.c b/arch/m68knommu/platform/5307/pit.c
index aa15beeb36c..e53c446d10e 100644
--- a/arch/m68knommu/platform/5307/pit.c
+++ b/arch/m68knommu/platform/5307/pit.c
@@ -5,9 +5,8 @@
* hardware timer only exists in the Freescale ColdFire
* 5270/5271, 5282 and other CPUs.
*
- * Copyright (C) 1999-2006, Greg Ungerer (gerg@snapgear.com)
+ * Copyright (C) 1999-2007, Greg Ungerer (gerg@snapgear.com)
* Copyright (C) 2001-2004, SnapGear Inc. (www.snapgear.com)
- *
*/
/***************************************************************************/
@@ -17,8 +16,8 @@
#include <linux/param.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include <asm/coldfire.h>
#include <asm/mcfpit.h>
#include <asm/mcfsim.h>
@@ -43,13 +42,18 @@ void coldfire_pit_tick(void)
/***************************************************************************/
+static struct irqaction coldfire_pit_irq = {
+ .name = "timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+};
+
void coldfire_pit_init(irq_handler_t handler)
{
volatile unsigned char *icrp;
volatile unsigned long *imrp;
- request_irq(MCFINT_VECBASE + MCFINT_PIT1, handler, IRQF_DISABLED,
- "ColdFire Timer", NULL);
+ coldfire_pit_irq.handler = handler;
+ setup_irq(MCFINT_VECBASE + MCFINT_PIT1, &coldfire_pit_irq);
icrp = (volatile unsigned char *) (MCF_IPSBAR + MCFICM_INTC0 +
MCFINTC_ICR0 + MCFINT_PIT1);
diff --git a/arch/m68knommu/platform/5307/timers.c b/arch/m68knommu/platform/5307/timers.c
index fb66eadd589..64bd0ff9029 100644
--- a/arch/m68knommu/platform/5307/timers.c
+++ b/arch/m68knommu/platform/5307/timers.c
@@ -3,7 +3,7 @@
/*
* timers.c -- generic ColdFire hardware timer support.
*
- * Copyright (C) 1999-2006, Greg Ungerer (gerg@snapgear.com)
+ * Copyright (C) 1999-2007, Greg Ungerer (gerg@snapgear.com)
*/
/***************************************************************************/
@@ -13,8 +13,8 @@
#include <linux/param.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include <asm/traps.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
@@ -62,17 +62,24 @@ void coldfire_tick(void)
/***************************************************************************/
+static struct irqaction coldfire_timer_irq = {
+ .name = "timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+};
+
static int ticks_per_intr;
void coldfire_timer_init(irq_handler_t handler)
{
+ coldfire_timer_irq.handler = handler;
+ setup_irq(mcf_timervector, &coldfire_timer_irq);
+
__raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR));
ticks_per_intr = (MCF_BUSCLK / 16) / HZ;
__raw_writetrr(ticks_per_intr - 1, TA(MCFTIMER_TRR));
__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR));
- request_irq(mcf_timervector, handler, IRQF_DISABLED, "timer", NULL);
mcf_settimericr(1, mcf_timerlevel);
#ifdef CONFIG_HIGHPROFILE
diff --git a/arch/m68knommu/platform/532x/config.c b/arch/m68knommu/platform/532x/config.c
index 664c3a12b0c..b32c6425f82 100644
--- a/arch/m68knommu/platform/532x/config.c
+++ b/arch/m68knommu/platform/532x/config.c
@@ -37,7 +37,6 @@
void coldfire_tick(void);
void coldfire_timer_init(irq_handler_t handler);
unsigned long coldfire_timer_offset(void);
-void coldfire_trap_init(void);
void coldfire_reset(void);
extern unsigned int mcf_timervector;
@@ -92,10 +91,7 @@ void config_BSP(char *commandp, int size)
{
mcf_setimr(MCFSIM_IMR_MASKALL);
-#if defined(CONFIG_BOOTPARAM)
- strncpy(commandp, CONFIG_BOOTPARAM_STRING, size);
- commandp[size-1] = 0;
-#else
+#if !defined(CONFIG_BOOTPARAM)
/* Copy command line from FLASH to local buffer... */
memcpy(commandp, (char *) 0x4000, 4);
if(strncmp(commandp, "kcl ", 4) == 0){
@@ -111,7 +107,6 @@ void config_BSP(char *commandp, int size)
mach_sched_init = coldfire_timer_init;
mach_tick = coldfire_tick;
mach_gettimeoffset = coldfire_timer_offset;
- mach_trap_init = coldfire_trap_init;
mach_reset = coldfire_reset;
#ifdef MCF_BDM_DISABLE
diff --git a/arch/m68knommu/platform/5407/config.c b/arch/m68knommu/platform/5407/config.c
index 036f6287624..e692536817d 100644
--- a/arch/m68knommu/platform/5407/config.c
+++ b/arch/m68knommu/platform/5407/config.c
@@ -28,7 +28,6 @@
void coldfire_tick(void);
void coldfire_timer_init(irq_handler_t handler);
unsigned long coldfire_timer_offset(void);
-void coldfire_trap_init(void);
void coldfire_reset(void);
extern unsigned int mcf_timervector;
@@ -102,13 +101,6 @@ void config_BSP(char *commandp, int size)
{
mcf_setimr(MCFSIM_IMR_MASKALL);
-#if defined(CONFIG_BOOTPARAM)
- strncpy(commandp, CONFIG_BOOTPARAM_STRING, size);
- commandp[size-1] = 0;
-#else
- memset(commandp, 0, size);
-#endif
-
#if defined(CONFIG_CLEOPATRA)
/* Different timer setup - to prevent device clash */
mcf_timervector = 30;
@@ -119,7 +111,6 @@ void config_BSP(char *commandp, int size)
mach_sched_init = coldfire_timer_init;
mach_tick = coldfire_tick;
mach_gettimeoffset = coldfire_timer_offset;
- mach_trap_init = coldfire_trap_init;
mach_reset = coldfire_reset;
}
diff --git a/arch/m68knommu/platform/68328/timers.c b/arch/m68knommu/platform/68328/timers.c
index ef067f4c3cd..0396476f955 100644
--- a/arch/m68knommu/platform/68328/timers.c
+++ b/arch/m68knommu/platform/68328/timers.c
@@ -18,10 +18,10 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <asm/setup.h>
#include <asm/system.h>
#include <asm/pgtable.h>
-#include <asm/irq.h>
#include <asm/machdep.h>
#include <asm/MC68VZ328.h>
@@ -53,14 +53,19 @@
/***************************************************************************/
+static struct irqaction m68328_timer_irq = {
+ .name = "timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+};
+
void m68328_timer_init(irq_handler_t timer_routine)
{
/* disable timer 1 */
TCTL = 0;
/* set ISR */
- if (request_irq(TMR_IRQ_NUM, timer_routine, IRQ_FLG_LOCK, "timer", NULL))
- panic("Unable to attach timer interrupt\n");
+ m68328_timer_irq.handler = timer_routine;
+ setup_irq(TMR_IRQ_NUM, &m68328_timer_irq);
/* Restart mode, Enable int, Set clock source */
TCTL = TCTL_OM | TCTL_IRQEN | CLOCK_SOURCE;
diff --git a/arch/m68knommu/platform/68360/config.c b/arch/m68knommu/platform/68360/config.c
index 4ff13bd51ff..155b72fe260 100644
--- a/arch/m68knommu/platform/68360/config.c
+++ b/arch/m68knommu/platform/68360/config.c
@@ -17,11 +17,11 @@
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <asm/setup.h>
#include <asm/system.h>
#include <asm/pgtable.h>
-#include <asm/irq.h>
#include <asm/machdep.h>
#include <asm/m68360.h>
@@ -51,11 +51,15 @@ extern unsigned long int system_clock; //In kernel setup.c
extern void config_M68360_irq(void);
+static struct irqaction m68360_timer_irq = {
+ .name = "timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+};
+
void BSP_sched_init(irq_handler_t timer_routine)
{
unsigned char prescaler;
unsigned short tgcr_save;
- int return_value;
#if 0
/* Restart mode, Enable int, 32KHz, Enable timer */
@@ -86,10 +90,8 @@ void BSP_sched_init(irq_handler_t timer_routine)
pquicc->timer_ter1 = 0x0003; /* clear timer events */
/* enable timer 1 interrupt in CIMR */
-// request_irq(IRQ_MACHSPEC | CPMVEC_TIMER1, timer_routine, IRQ_FLG_LOCK, "timer", NULL);
- //return_value = request_irq( CPMVEC_TIMER1, timer_routine, IRQ_FLG_LOCK, "timer", NULL);
- return_value = request_irq(CPMVEC_TIMER1 , timer_routine, IRQ_FLG_LOCK,
- "Timer", NULL);
+ m68360_timer_irq.handler = timer_routine;
+ setup_irq(CPMVEC_TIMER1, &m68360_timer_irq);
/* Start timer 1: */
tgcr_save = (pquicc->timer_tgcr & 0xfff0) | 0x0001;
diff --git a/arch/m68knommu/platform/68VZ328/config.c b/arch/m68knommu/platform/68VZ328/config.c
index 8abe0f6e723..79dced929c9 100644
--- a/arch/m68knommu/platform/68VZ328/config.c
+++ b/arch/m68knommu/platform/68VZ328/config.c
@@ -191,13 +191,6 @@ void config_BSP(char *command, int size)
{
printk(KERN_INFO "68VZ328 DragonBallVZ support (c) 2001 Lineo, Inc.\n");
-#if defined(CONFIG_BOOTPARAM)
- strncpy(command, CONFIG_BOOTPARAM_STRING, size);
- command[size-1] = 0;
-#else
- memset(command, 0, size);
-#endif
-
init_hardware(command, size);
mach_sched_init = (void *) m68328_timer_init;
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 20d19c9b776..a9a987a06da 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -328,7 +328,7 @@ load-$(CONFIG_MIPS_SEAD) += 0xffffffff80100000
# MIPS SIM
#
core-$(CONFIG_MIPS_SIM) += arch/mips/mipssim/
-cflags-$(CONFIG_MIPS_SIM) += -Iinclude/asm-mips/mach-sim
+cflags-$(CONFIG_MIPS_SIM) += -Iinclude/asm-mips/mach-mipssim
load-$(CONFIG_MIPS_SIM) += 0x80100000
#
diff --git a/arch/mips/arc/console.c b/arch/mips/arc/console.c
deleted file mode 100644
index 0fe6032999c..00000000000
--- a/arch/mips/arc/console.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1996 David S. Miller (dm@sgi.com)
- * Compability with board caches, Ulf Carlsson
- */
-#include <linux/kernel.h>
-#include <asm/sgialib.h>
-#include <asm/bcache.h>
-
-/*
- * IP22 boardcache is not compatible with board caches. Thus we disable it
- * during romvec action. Since r4xx0.c is always compiled and linked with your
- * kernel, this shouldn't cause any harm regardless what MIPS processor you
- * have.
- *
- * The ARC write and read functions seem to interfere with the serial lines
- * in some way. You should be careful with them.
- */
-
-void prom_putchar(char c)
-{
- ULONG cnt;
- CHAR it = c;
-
- bc_disable();
- ArcWrite(1, &it, 1, &cnt);
- bc_enable();
-}
diff --git a/arch/mips/jazz/io.c b/arch/mips/jazz/io.c
deleted file mode 100644
index e86904454c8..00000000000
--- a/arch/mips/jazz/io.c
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Low level I/O functions for Jazz family machines.
- *
- * Copyright (C) 1997 by Ralf Baechle.
- */
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <asm/addrspace.h>
-#include <asm/system.h>
-#include <asm/jazz.h>
-
-/*
- * Map an 16mb segment of the EISA address space to 0xe3000000;
- */
-static inline void map_eisa_address(unsigned long address)
-{
- /* XXX */
- /* We've got an wired entry in the TLB. We just need to modify it.
- fast and clean. But since we want to get rid of wired entries
- things are a little bit more complicated ... */
-}
-
-static unsigned char jazz_readb(unsigned long addr)
-{
- unsigned char res;
-
- map_eisa_address(addr);
- addr &= 0xffffff;
- res = *(volatile unsigned char *) (JAZZ_EISA_BASE + addr);
-
- return res;
-}
-
-static unsigned short jazz_readw(unsigned long addr)
-{
- unsigned short res;
-
- map_eisa_address(addr);
- addr &= 0xffffff;
- res = *(volatile unsigned char *) (JAZZ_EISA_BASE + addr);
-
- return res;
-}
-
-static unsigned int jazz_readl(unsigned long addr)
-{
- unsigned int res;
-
- map_eisa_address(addr);
- addr &= 0xffffff;
- res = *(volatile unsigned char *) (JAZZ_EISA_BASE + addr);
-
- return res;
-}
-
-static void jazz_writeb(unsigned char val, unsigned long addr)
-{
- map_eisa_address(addr);
- addr &= 0xffffff;
- *(volatile unsigned char *) (JAZZ_EISA_BASE + addr) = val;
-}
-
-static void jazz_writew(unsigned short val, unsigned long addr)
-{
- map_eisa_address(addr);
- addr &= 0xffffff;
- *(volatile unsigned char *) (JAZZ_EISA_BASE + addr) = val;
-}
-
-static void jazz_writel(unsigned int val, unsigned long addr)
-{
- map_eisa_address(addr);
- addr &= 0xffffff;
- *(volatile unsigned char *) (JAZZ_EISA_BASE + addr) = val;
-}
-
-static void jazz_memset_io(unsigned long addr, int val, unsigned long len)
-{
- unsigned long waddr;
-
- waddr = JAZZ_EISA_BASE | (addr & 0xffffff);
- while(len) {
- unsigned long fraglen;
-
- fraglen = (~addr + 1) & 0xffffff;
- fraglen = (fraglen < len) ? fraglen : len;
- map_eisa_address(addr);
- memset((char *)waddr, val, fraglen);
- addr += fraglen;
- waddr = waddr + fraglen - 0x1000000;
- len -= fraglen;
- }
-}
-
-static void jazz_memcpy_fromio(unsigned long to, unsigned long from, unsigned long len)
-{
- unsigned long waddr;
-
- waddr = JAZZ_EISA_BASE | (from & 0xffffff);
- while(len) {
- unsigned long fraglen;
-
- fraglen = (~from + 1) & 0xffffff;
- fraglen = (fraglen < len) ? fraglen : len;
- map_eisa_address(from);
- memcpy((void *)to, (void *)waddr, fraglen);
- to += fraglen;
- from += fraglen;
- waddr = waddr + fraglen - 0x1000000;
- len -= fraglen;
- }
-}
-
-static void jazz_memcpy_toio(unsigned long to, unsigned long from, unsigned long len)
-{
- unsigned long waddr;
-
- waddr = JAZZ_EISA_BASE | (to & 0xffffff);
- while(len) {
- unsigned long fraglen;
-
- fraglen = (~to + 1) & 0xffffff;
- fraglen = (fraglen < len) ? fraglen : len;
- map_eisa_address(to);
- memcpy((char *)to + JAZZ_EISA_BASE, (void *)from, fraglen);
- to += fraglen;
- from += fraglen;
- waddr = waddr + fraglen - 0x1000000;
- len -= fraglen;
- }
-}
diff --git a/arch/mips/jazz/reset.c b/arch/mips/jazz/reset.c
index 2a9754750bc..d8ade85060b 100644
--- a/arch/mips/jazz/reset.c
+++ b/arch/mips/jazz/reset.c
@@ -6,10 +6,6 @@
*/
#include <linux/jiffies.h>
#include <asm/jazz.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/reboot.h>
-#include <asm/delay.h>
#define KBD_STAT_IBF 0x02 /* Keyboard input buffer full */
@@ -58,12 +54,3 @@ void jazz_machine_restart(char *command)
jazz_write_output (0x00);
}
}
-
-void jazz_machine_halt(void)
-{
-}
-
-void jazz_machine_power_off(void)
-{
- /* Jazz machines don't have a software power switch */
-}
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
index 81ec559a1c2..798279e0669 100644
--- a/arch/mips/jazz/setup.c
+++ b/arch/mips/jazz/setup.c
@@ -34,8 +34,6 @@
extern asmlinkage void jazz_handle_int(void);
extern void jazz_machine_restart(char *command);
-extern void jazz_machine_halt(void);
-extern void jazz_machine_power_off(void);
void __init plat_timer_setup(struct irqaction *irq)
{
@@ -95,8 +93,6 @@ void __init plat_mem_setup(void)
/* The RTC is outside the port address space */
_machine_restart = jazz_machine_restart;
- _machine_halt = jazz_machine_halt;
- pm_power_off = jazz_machine_power_off;
screen_info = (struct screen_info) {
0, 0, /* orig-x, orig-y */
diff --git a/arch/mips/jmr3927/rbhma3100/setup.c b/arch/mips/jmr3927/rbhma3100/setup.c
index d1ef2895d56..8303001516d 100644
--- a/arch/mips/jmr3927/rbhma3100/setup.c
+++ b/arch/mips/jmr3927/rbhma3100/setup.c
@@ -434,7 +434,7 @@ EXPORT_SYMBOL(__swizzle_addr_b);
static int __init jmr3927_rtc_init(void)
{
- struct resource res = {
+ static struct resource __initdata res = {
.start = JMR3927_IOC_NVRAMB_ADDR - IO_BASE,
.end = JMR3927_IOC_NVRAMB_ADDR - IO_BASE + 0x800 - 1,
.flags = IORESOURCE_MEM,
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
index 7bc88204926..cb5623aad55 100644
--- a/arch/mips/kernel/gdb-stub.c
+++ b/arch/mips/kernel/gdb-stub.c
@@ -1099,12 +1099,12 @@ void adel(void)
* malloc is needed by gdb client in "call func()", even a private one
* will make gdb happy
*/
-static void * __attribute_used__ malloc(size_t size)
+static void __used *malloc(size_t size)
{
return kmalloc(size, GFP_ATOMIC);
}
-static void __attribute_used__ free (void *where)
+static void __used free(void *where)
{
kfree(where);
}
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index f78538eceef..c15bbc436bb 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -141,7 +141,7 @@
EXPORT(stext) # used for profiling
EXPORT(_stext)
-#ifdef CONFIG_BOOT_RAW
+#ifndef CONFIG_BOOT_RAW
/*
* Give us a fighting chance of running if execution beings at the
* kernel load address. This is needed because this platform does
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 06e04da211d..c37568d6fb5 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -567,7 +567,7 @@ asmlinkage long sys32_fadvise64_64(int fd, int __pad,
}
save_static_function(sys32_clone);
-__attribute_used__ noinline static int
+static int noinline __used
_sys32_clone(nabi_no_regargs struct pt_regs regs)
{
unsigned long clone_flags;
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index bfc8ca168f8..8cf24d716d4 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -85,7 +85,7 @@ static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static __attribute_used__ void dump_rtlx(void)
+static void __used dump_rtlx(void)
{
int i;
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index be7362bc2c9..04bbbd8d91a 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -30,6 +30,7 @@
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/cpu.h>
+#include <linux/err.h>
#include <asm/atomic.h>
#include <asm/cpu.h>
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index b947c61c0cc..541b5005957 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -12,6 +12,7 @@
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/mman.h>
#include <linux/ptrace.h>
@@ -167,14 +168,14 @@ sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
}
save_static_function(sys_fork);
-__attribute_used__ noinline static int
+static int __used noinline
_sys_fork(nabi_no_regargs struct pt_regs regs)
{
return do_fork(SIGCHLD, regs.regs[29], &regs, 0, NULL, NULL);
}
save_static_function(sys_clone);
-__attribute_used__ noinline static int
+static int __used noinline
_sys_clone(nabi_no_regargs struct pt_regs regs)
{
unsigned long clone_flags;
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 9e66354dee8..a2bee10f04c 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -154,7 +154,6 @@ struct {
};
static void release_progmem(void *ptr);
-/* static __attribute_used__ void dump_vpe(struct vpe * v); */
extern void save_gp_address(unsigned int secbase, unsigned int rel);
/* get the vpe associated with this minor */
@@ -1024,7 +1023,7 @@ static int vpe_elfload(struct vpe * v)
return 0;
}
-__attribute_used__ void dump_vpe(struct vpe * v)
+void __used dump_vpe(struct vpe * v)
{
struct tc *t;
diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c
index 6f9bd7fbd48..85ce2842d0d 100644
--- a/arch/mips/mm/c-sb1.c
+++ b/arch/mips/mm/c-sb1.c
@@ -272,7 +272,7 @@ void sb1_flush_cache_data_page(unsigned long)
/*
* Invalidate all caches on this CPU
*/
-static void __attribute_used__ local_sb1___flush_cache_all(void)
+static void __used local_sb1___flush_cache_all(void)
{
__sb1_writeback_inv_dcache_all();
__sb1_flush_icache_all();
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 4c80528dead..b8cb0dde3af 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -484,7 +484,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
-void free_initmem(void)
+void __init_refok free_initmem(void)
{
prom_free_prom_memory();
free_init_pages("unused kernel memory",
diff --git a/arch/mips/sni/sniprom.c b/arch/mips/sni/sniprom.c
index 00a03a6e8f5..db544a6e23f 100644
--- a/arch/mips/sni/sniprom.c
+++ b/arch/mips/sni/sniprom.c
@@ -19,6 +19,7 @@
#include <asm/addrspace.h>
#include <asm/sni.h>
#include <asm/mipsprom.h>
+#include <asm/mipsregs.h>
#include <asm/bootinfo.h>
/* special SNI prom calls */
@@ -71,7 +72,7 @@ const char *get_system_type(void)
#define SNI_IDPROM_SIZE 0x1000
#ifdef DEBUG
-static void sni_idprom_dump(void)
+static void __init sni_idprom_dump(void)
{
int i;
@@ -88,7 +89,7 @@ static void sni_idprom_dump(void)
}
#endif
-static void sni_mem_init(void )
+static void __init sni_mem_init(void )
{
int i, memsize;
struct membank {
diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c
index 40c7c3eeafa..ab72292a172 100644
--- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c
+++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c
@@ -1020,7 +1020,7 @@ void __init toshiba_rbtx4927_timer_setup(struct irqaction *irq)
static int __init toshiba_rbtx4927_rtc_init(void)
{
- struct resource res = {
+ static struct resource __initdata res = {
.start = 0x1c010000,
.end = 0x1c010000 + 0x800 - 1,
.flags = IORESOURCE_MEM,
diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c
index f2042e6466a..1263f00dc35 100644
--- a/arch/parisc/hpux/fs.c
+++ b/arch/parisc/hpux/fs.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/slab.h>
diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c
index 8384bf9cecd..446f98d3fd7 100644
--- a/arch/parisc/kernel/init_task.c
+++ b/arch/parisc/kernel/init_task.c
@@ -23,6 +23,7 @@
*/
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 355664812b8..b80e02a4d81 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -38,6 +38,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/module.h>
#include <linux/personality.h>
#include <linux/ptrace.h>
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 04c7e1d36ce..d7bc7bb42c9 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -28,6 +28,7 @@
#include <linux/smp.h>
#include <linux/kernel_stat.h>
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/delay.h>
#include <linux/bitops.h>
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 853c282da22..00099efe0e9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -411,11 +411,6 @@ config PPC_INDIRECT_PCI
default y if 40x || 44x
default n
-config PPC_INDIRECT_PCI_BE
- bool
- depends PPC_INDIRECT_PCI
- default n
-
config EISA
bool
@@ -425,6 +420,10 @@ config SBUS
config FSL_SOC
bool
+config FSL_PCI
+ bool
+ select PPC_INDIRECT_PCI
+
# Yes MCA RS/6000s exist but Linux-PPC does not currently support any
config MCA
bool
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 346cd3befe1..22acece95b1 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -20,7 +20,7 @@ config DEBUG_STACK_USAGE
config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
- depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND
+ depends on DEBUG_KERNEL && !HIBERNATION
help
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
@@ -134,7 +134,7 @@ config BDI_SWITCH
config BOOTX_TEXT
bool "Support for early boot text console (BootX or OpenFirmware only)"
- depends PPC_OF
+ depends PPC_OF && PPC_MULTIPLATFORM
help
Say Y here to see progress messages from the boot firmware in text
mode. Requires either BootX or Open Firmware.
diff --git a/arch/powerpc/boot/dts/kuroboxHD.dts b/arch/powerpc/boot/dts/kuroboxHD.dts
index a983680c326..122537419d9 100644
--- a/arch/powerpc/boot/dts/kuroboxHD.dts
+++ b/arch/powerpc/boot/dts/kuroboxHD.dts
@@ -33,12 +33,10 @@ build with: "dtc -f -I dts -O dtb -o kuroboxHD.dtb -V 16 kuroboxHD.dts"
PowerPC,603e { /* Really 8241 */
device_type = "cpu";
reg = <0>;
- clock-frequency = <bebc200>; /* Fixed by bootwrapper */
- timebase-frequency = <1743000>; /* Fixed by bootwrapper */
- bus-frequency = <0>; /* From bootloader */
+ clock-frequency = <bebc200>; /* Fixed by bootloader */
+ timebase-frequency = <1743000>; /* Fixed by bootloader */
+ bus-frequency = <0>; /* Fixed by bootloader */
/* Following required by dtc but not used */
- i-cache-line-size = <0>;
- d-cache-line-size = <0>;
i-cache-size = <4000>;
d-cache-size = <4000>;
};
@@ -64,11 +62,19 @@ build with: "dtc -f -I dts -O dtb -o kuroboxHD.dtb -V 16 kuroboxHD.dts"
fef00000 fef00000 00100000>; /* pci iack */
i2c@80003000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
device_type = "i2c";
compatible = "fsl-i2c";
reg = <80003000 1000>;
interrupts = <5 2>;
interrupt-parent = <&mpic>;
+
+ rtc@32 {
+ device_type = "rtc";
+ compatible = "ricoh,rs5c372b";
+ reg = <32>;
+ };
};
serial@80004500 {
@@ -91,7 +97,7 @@ build with: "dtc -f -I dts -O dtb -o kuroboxHD.dtb -V 16 kuroboxHD.dts"
interrupt-parent = <&mpic>;
};
- mpic: pic@80040000 {
+ mpic: interrupt-controller@80040000 {
#interrupt-cells = <2>;
#address-cells = <0>;
device_type = "open-pic";
diff --git a/arch/powerpc/boot/dts/kuroboxHG.dts b/arch/powerpc/boot/dts/kuroboxHG.dts
index 5cf42dc022d..579aa8b967d 100644
--- a/arch/powerpc/boot/dts/kuroboxHG.dts
+++ b/arch/powerpc/boot/dts/kuroboxHG.dts
@@ -33,12 +33,10 @@ build with: "dtc -f -I dts -O dtb -o kuroboxHG.dtb -V 16 kuroboxHG.dts"
PowerPC,603e { /* Really 8241 */
device_type = "cpu";
reg = <0>;
- clock-frequency = <fdad680>; /* Fixed by bootwrapper */
- timebase-frequency = <1F04000>; /* Fixed by bootwrapper */
- bus-frequency = <0>; /* From bootloader */
+ clock-frequency = <fdad680>; /* Fixed by bootloader */
+ timebase-frequency = <1F04000>; /* Fixed by bootloader */
+ bus-frequency = <0>; /* Fixed by bootloader */
/* Following required by dtc but not used */
- i-cache-line-size = <0>;
- d-cache-line-size = <0>;
i-cache-size = <4000>;
d-cache-size = <4000>;
};
@@ -64,11 +62,19 @@ build with: "dtc -f -I dts -O dtb -o kuroboxHG.dtb -V 16 kuroboxHG.dts"
fef00000 fef00000 00100000>; /* pci iack */
i2c@80003000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
device_type = "i2c";
compatible = "fsl-i2c";
reg = <80003000 1000>;
interrupts = <5 2>;
interrupt-parent = <&mpic>;
+
+ rtc@32 {
+ device_type = "rtc";
+ compatible = "ricoh,rs5c372b";
+ reg = <32>;
+ };
};
serial@80004500 {
@@ -91,8 +97,7 @@ build with: "dtc -f -I dts -O dtb -o kuroboxHG.dtb -V 16 kuroboxHG.dts"
interrupt-parent = <&mpic>;
};
- mpic: pic@80040000 {
- interrupt-parent = <&mpic>;
+ mpic: interrupt-controller@80040000 {
#interrupt-cells = <2>;
#address-cells = <0>;
device_type = "open-pic";
diff --git a/arch/powerpc/boot/dts/mpc7448hpc2.dts b/arch/powerpc/boot/dts/mpc7448hpc2.dts
index 0e3d314a715..b9158eb2797 100644
--- a/arch/powerpc/boot/dts/mpc7448hpc2.dts
+++ b/arch/powerpc/boot/dts/mpc7448hpc2.dts
@@ -45,7 +45,7 @@
#address-cells = <1>;
#size-cells = <1>;
#interrupt-cells = <2>;
- device_type = "tsi108-bridge";
+ device_type = "tsi-bridge";
ranges = <00000000 c0000000 00010000>;
reg = <c0000000 00010000>;
bus-frequency = <0>;
diff --git a/arch/powerpc/boot/dts/mpc8313erdb.dts b/arch/powerpc/boot/dts/mpc8313erdb.dts
index a1533cc07d0..c5adbe40364 100644
--- a/arch/powerpc/boot/dts/mpc8313erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8313erdb.dts
@@ -178,7 +178,7 @@
#size-cells = <2>;
#address-cells = <3>;
reg = <8500 100>;
- compatible = "83xx";
+ compatible = "fsl,mpc8349-pci";
device_type = "pci";
};
diff --git a/arch/powerpc/boot/dts/mpc832x_mds.dts b/arch/powerpc/boot/dts/mpc832x_mds.dts
index 4fc0c4d34aa..f158ed781ba 100644
--- a/arch/powerpc/boot/dts/mpc832x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc832x_mds.dts
@@ -154,7 +154,7 @@
#size-cells = <2>;
#address-cells = <3>;
reg = <8500 100>;
- compatible = "83xx";
+ compatible = "fsl,mpc8349-pci";
device_type = "pci";
};
diff --git a/arch/powerpc/boot/dts/mpc832x_rdb.dts b/arch/powerpc/boot/dts/mpc832x_rdb.dts
index 447c03ffabb..7c4beff3e20 100644
--- a/arch/powerpc/boot/dts/mpc832x_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc832x_rdb.dts
@@ -123,7 +123,7 @@
#size-cells = <2>;
#address-cells = <3>;
reg = <8500 100>;
- compatible = "83xx";
+ compatible = "fsl,mpc8349-pci";
device_type = "pci";
};
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index ae9bca57545..502f47c0179 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -197,7 +197,7 @@
#size-cells = <2>;
#address-cells = <3>;
reg = <8500 100>;
- compatible = "83xx";
+ compatible = "fsl,mpc8349-pci";
device_type = "pci";
};
@@ -222,7 +222,7 @@
#size-cells = <2>;
#address-cells = <3>;
reg = <8600 100>;
- compatible = "83xx";
+ compatible = "fsl,mpc8349-pci";
device_type = "pci";
};
diff --git a/arch/powerpc/boot/dts/mpc8349emitxgp.dts b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
index f636528a3c7..0b8387141d8 100644
--- a/arch/powerpc/boot/dts/mpc8349emitxgp.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
@@ -154,7 +154,7 @@
#size-cells = <2>;
#address-cells = <3>;
reg = <8600 100>;
- compatible = "83xx";
+ compatible = "fsl,mpc8349-pci";
device_type = "pci";
};
diff --git a/arch/powerpc/boot/dts/mpc834x_mds.dts b/arch/powerpc/boot/dts/mpc834x_mds.dts
index 310e877826b..481099756e4 100644
--- a/arch/powerpc/boot/dts/mpc834x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc834x_mds.dts
@@ -241,7 +241,7 @@
#size-cells = <2>;
#address-cells = <3>;
reg = <8500 100>;
- compatible = "83xx";
+ compatible = "fsl,mpc8349-pci";
device_type = "pci";
};
@@ -301,7 +301,7 @@
#size-cells = <2>;
#address-cells = <3>;
reg = <8600 100>;
- compatible = "83xx";
+ compatible = "fsl,mpc8349-pci";
device_type = "pci";
};
diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts
index 1e914f31dd9..e3f7c128206 100644
--- a/arch/powerpc/boot/dts/mpc836x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc836x_mds.dts
@@ -169,7 +169,7 @@
#size-cells = <2>;
#address-cells = <3>;
reg = <8500 100>;
- compatible = "83xx";
+ compatible = "fsl,mpc8349-pci";
device_type = "pci";
};
diff --git a/arch/powerpc/boot/dts/mpc8540ads.dts b/arch/powerpc/boot/dts/mpc8540ads.dts
index 364a969f5c2..fc8dff9f620 100644
--- a/arch/powerpc/boot/dts/mpc8540ads.dts
+++ b/arch/powerpc/boot/dts/mpc8540ads.dts
@@ -258,7 +258,7 @@
#size-cells = <2>;
#address-cells = <3>;
reg = <8000 1000>;
- compatible = "85xx";
+ compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
device_type = "pci";
};
diff --git a/arch/powerpc/boot/dts/mpc8541cds.dts b/arch/powerpc/boot/dts/mpc8541cds.dts
index 070206fffe8..fb0b647f8c2 100644
--- a/arch/powerpc/boot/dts/mpc8541cds.dts
+++ b/arch/powerpc/boot/dts/mpc8541cds.dts
@@ -193,7 +193,7 @@
#size-cells = <2>;
#address-cells = <3>;
reg = <8000 1000>;
- compatible = "85xx";
+ compatible = "fsl,mpc8540-pci";
device_type = "pci";
i8259@19000 {
@@ -230,7 +230,7 @@
#size-cells = <2>;
#address-cells = <3>;
reg = <9000 1000>;
- compatible = "85xx";
+ compatible = "fsl,mpc8540-pci";
device_type = "pci";
};
diff --git a/arch/powerpc/boot/dts/mpc8544ds.dts b/arch/powerpc/boot/dts/mpc8544ds.dts
index 82859259246..4680e201088 100644
--- a/arch/powerpc/boot/dts/mpc8544ds.dts
+++ b/arch/powerpc/boot/dts/mpc8544ds.dts
@@ -104,6 +104,7 @@
interrupts = <1d 2 1e 2 22 2>;
interrupt-parent = <&mpic>;
phy-handle = <&phy0>;
+ phy-connection-type = "rgmii-id";
};
ethernet@26000 {
@@ -117,6 +118,7 @@
interrupts = <1f 2 20 2 21 2>;
interrupt-parent = <&mpic>;
phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
};
serial@4500 {
@@ -137,6 +139,223 @@
interrupt-parent = <&mpic>;
};
+ pci@8000 {
+ compatible = "fsl,mpc8540-pci";
+ device_type = "pci";
+ interrupt-map-mask = <f800 0 0 7>;
+ interrupt-map = <
+
+ /* IDSEL 0x11 J17 Slot 1 */
+ 8800 0 0 1 &mpic 2 1
+ 8800 0 0 2 &mpic 3 1
+ 8800 0 0 3 &mpic 4 1
+ 8800 0 0 4 &mpic 1 1
+
+ /* IDSEL 0x12 J16 Slot 2 */
+
+ 9000 0 0 1 &mpic 3 1
+ 9000 0 0 2 &mpic 4 1
+ 9000 0 0 3 &mpic 2 1
+ 9000 0 0 4 &mpic 1 1>;
+
+ interrupt-parent = <&mpic>;
+ interrupts = <18 2>;
+ bus-range = <0 ff>;
+ ranges = <02000000 0 80000000 80000000 0 10000000
+ 01000000 0 00000000 e2000000 0 00800000>;
+ clock-frequency = <3f940aa>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <8000 1000>;
+ };
+
+ pcie@9000 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <9000 1000>;
+ bus-range = <0 ff>;
+ ranges = <02000000 0 90000000 90000000 0 10000000
+ 01000000 0 00000000 e3000000 0 00800000>;
+ clock-frequency = <1fca055>;
+ interrupt-parent = <&mpic>;
+ interrupts = <1a 2>;
+ interrupt-map-mask = <f800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 4 1
+ 0000 0 0 2 &mpic 5 1
+ 0000 0 0 3 &mpic 6 1
+ 0000 0 0 4 &mpic 7 1
+ >;
+ };
+
+ pcie@a000 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <a000 1000>;
+ bus-range = <0 ff>;
+ ranges = <02000000 0 a0000000 a0000000 0 10000000
+ 01000000 0 00000000 e2800000 0 00800000>;
+ clock-frequency = <1fca055>;
+ interrupt-parent = <&mpic>;
+ interrupts = <19 2>;
+ interrupt-map-mask = <f800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 0 1
+ 0000 0 0 2 &mpic 1 1
+ 0000 0 0 3 &mpic 2 1
+ 0000 0 0 4 &mpic 3 1
+ >;
+ };
+
+ pcie@b000 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <b000 1000>;
+ bus-range = <0 ff>;
+ ranges = <02000000 0 b0000000 b0000000 0 10000000
+ 01000000 0 00000000 e3800000 0 00800000>;
+ clock-frequency = <1fca055>;
+ interrupt-parent = <&mpic>;
+ interrupts = <1b 2>;
+ interrupt-map-mask = <f800 0 0 7>;
+ interrupt-map = <
+
+ // IDSEL 0x1a
+ d000 0 0 1 &i8259 6 2
+ d000 0 0 2 &i8259 3 2
+ d000 0 0 3 &i8259 4 2
+ d000 0 0 4 &i8259 5 2
+
+ // IDSEL 0x1b
+ d800 0 0 1 &i8259 5 2
+ d800 0 0 2 &i8259 0 0
+ d800 0 0 3 &i8259 0 0
+ d800 0 0 4 &i8259 0 0
+
+ // IDSEL 0x1c USB
+ e000 0 0 1 &i8259 9 2
+ e000 0 0 2 &i8259 a 2
+ e000 0 0 3 &i8259 c 2
+ e000 0 0 4 &i8259 7 2
+
+ // IDSEL 0x1d Audio
+ e800 0 0 1 &i8259 9 2
+ e800 0 0 2 &i8259 a 2
+ e800 0 0 3 &i8259 b 2
+ e800 0 0 4 &i8259 0 0
+
+ // IDSEL 0x1e Legacy
+ f000 0 0 1 &i8259 c 2
+ f000 0 0 2 &i8259 0 0
+ f000 0 0 3 &i8259 0 0
+ f000 0 0 4 &i8259 0 0
+
+ // IDSEL 0x1f IDE/SATA
+ f800 0 0 1 &i8259 6 2
+ f800 0 0 2 &i8259 0 0
+ f800 0 0 3 &i8259 0 0
+ f800 0 0 4 &i8259 0 0
+ >;
+ uli1575@0 {
+ reg = <0 0 0 0 0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ ranges = <02000000 0 b0000000
+ 02000000 0 b0000000
+ 0 10000000
+ 01000000 0 00000000
+ 01000000 0 00000000
+ 0 00080000>;
+
+ pci_bridge@0 {
+ reg = <0 0 0 0 0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ ranges = <02000000 0 b0000000
+ 02000000 0 b0000000
+ 0 20000000
+ 01000000 0 00000000
+ 01000000 0 00000000
+ 0 00100000>;
+
+ isa@1e {
+ device_type = "isa";
+ #interrupt-cells = <2>;
+ #size-cells = <1>;
+ #address-cells = <2>;
+ reg = <f000 0 0 0 0>;
+ ranges = <1 0 01000000 0 0
+ 00001000>;
+ interrupt-parent = <&i8259>;
+
+ i8259: interrupt-controller@20 {
+ reg = <1 20 2
+ 1 a0 2
+ 1 4d0 2>;
+ clock-frequency = <0>;
+ interrupt-controller;
+ device_type = "interrupt-controller";
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ built-in;
+ compatible = "chrp,iic";
+ interrupts = <9 2>;
+ interrupt-parent =
+ <&mpic>;
+ };
+
+ i8042@60 {
+ #size-cells = <0>;
+ #address-cells = <1>;
+ reg = <1 60 1 1 64 1>;
+ interrupts = <1 3 c 3>;
+ interrupt-parent =
+ <&i8259>;
+
+ keyboard@0 {
+ reg = <0>;
+ compatible = "pnpPNP,303";
+ };
+
+ mouse@1 {
+ reg = <1>;
+ compatible = "pnpPNP,f03";
+ };
+ };
+
+ rtc@70 {
+ compatible =
+ "pnpPNP,b00";
+ reg = <1 70 2>;
+ };
+
+ gpio@400 {
+ reg = <1 400 80>;
+ };
+ };
+ };
+ };
+
+ };
+
+ global-utilities@e0000 { //global utilities block
+ compatible = "fsl,mpc8548-guts";
+ reg = <e0000 1000>;
+ fsl,has-rstcr;
+ };
+
mpic: pic@40000 {
clock-frequency = <0>;
interrupt-controller;
diff --git a/arch/powerpc/boot/dts/mpc8548cds.dts b/arch/powerpc/boot/dts/mpc8548cds.dts
index 9d0b84b66cd..d215d21fff4 100644
--- a/arch/powerpc/boot/dts/mpc8548cds.dts
+++ b/arch/powerpc/boot/dts/mpc8548cds.dts
@@ -1,5 +1,5 @@
/*
- * MPC8555 CDS Device Tree Source
+ * MPC8548 CDS Device Tree Source
*
* Copyright 2006 Freescale Semiconductor Inc.
*
@@ -44,8 +44,14 @@
#size-cells = <1>;
#interrupt-cells = <2>;
device_type = "soc";
- ranges = <0 e0000000 00100000>;
- reg = <e0000000 00100000>; // CCSRBAR 1M
+ ranges = <00001000 e0001000 000ff000
+ 80000000 80000000 10000000
+ e2000000 e2000000 00800000
+ 90000000 90000000 10000000
+ e2800000 e2800000 00800000
+ a0000000 a0000000 20000000
+ e3000000 e3000000 01000000>;
+ reg = <e0000000 00001000>; // CCSRBAR
bus-frequency = <0>;
memory-controller@2000 {
@@ -162,8 +168,8 @@
serial@4500 {
device_type = "serial";
compatible = "ns16550";
- reg = <4500 100>; // reg base, size
- clock-frequency = <0>; // should we fill in in uboot?
+ reg = <4500 100>; // reg base, size
+ clock-frequency = <0>; // should we fill in in uboot?
interrupts = <2a 2>;
interrupt-parent = <&mpic>;
};
@@ -172,7 +178,7 @@
device_type = "serial";
compatible = "ns16550";
reg = <4600 100>; // reg base, size
- clock-frequency = <0>; // should we fill in in uboot?
+ clock-frequency = <0>; // should we fill in in uboot?
interrupts = <2a 2>;
interrupt-parent = <&mpic>;
};
@@ -183,77 +189,154 @@
fsl,has-rstcr;
};
- pci1: pci@8000 {
- interrupt-map-mask = <1f800 0 0 7>;
+ pci@8000 {
+ interrupt-map-mask = <f800 0 0 7>;
interrupt-map = <
+ /* IDSEL 0x4 (PCIX Slot 2) */
+ 02000 0 0 1 &mpic 0 1
+ 02000 0 0 2 &mpic 1 1
+ 02000 0 0 3 &mpic 2 1
+ 02000 0 0 4 &mpic 3 1
+
+ /* IDSEL 0x5 (PCIX Slot 3) */
+ 02800 0 0 1 &mpic 1 1
+ 02800 0 0 2 &mpic 2 1
+ 02800 0 0 3 &mpic 3 1
+ 02800 0 0 4 &mpic 0 1
+
+ /* IDSEL 0x6 (PCIX Slot 4) */
+ 03000 0 0 1 &mpic 2 1
+ 03000 0 0 2 &mpic 3 1
+ 03000 0 0 3 &mpic 0 1
+ 03000 0 0 4 &mpic 1 1
+
+ /* IDSEL 0x8 (PCIX Slot 5) */
+ 04000 0 0 1 &mpic 0 1
+ 04000 0 0 2 &mpic 1 1
+ 04000 0 0 3 &mpic 2 1
+ 04000 0 0 4 &mpic 3 1
+
+ /* IDSEL 0xC (Tsi310 bridge) */
+ 06000 0 0 1 &mpic 0 1
+ 06000 0 0 2 &mpic 1 1
+ 06000 0 0 3 &mpic 2 1
+ 06000 0 0 4 &mpic 3 1
+
+ /* IDSEL 0x14 (Slot 2) */
+ 0a000 0 0 1 &mpic 0 1
+ 0a000 0 0 2 &mpic 1 1
+ 0a000 0 0 3 &mpic 2 1
+ 0a000 0 0 4 &mpic 3 1
+
+ /* IDSEL 0x15 (Slot 3) */
+ 0a800 0 0 1 &mpic 1 1
+ 0a800 0 0 2 &mpic 2 1
+ 0a800 0 0 3 &mpic 3 1
+ 0a800 0 0 4 &mpic 0 1
+
+ /* IDSEL 0x16 (Slot 4) */
+ 0b000 0 0 1 &mpic 2 1
+ 0b000 0 0 2 &mpic 3 1
+ 0b000 0 0 3 &mpic 0 1
+ 0b000 0 0 4 &mpic 1 1
+
+ /* IDSEL 0x18 (Slot 5) */
+ 0c000 0 0 1 &mpic 0 1
+ 0c000 0 0 2 &mpic 1 1
+ 0c000 0 0 3 &mpic 2 1
+ 0c000 0 0 4 &mpic 3 1
+
+ /* IDSEL 0x1C (Tsi310 bridge PCI primary) */
+ 0E000 0 0 1 &mpic 0 1
+ 0E000 0 0 2 &mpic 1 1
+ 0E000 0 0 3 &mpic 2 1
+ 0E000 0 0 4 &mpic 3 1>;
- /* IDSEL 0x10 */
- 08000 0 0 1 &mpic 0 1
- 08000 0 0 2 &mpic 1 1
- 08000 0 0 3 &mpic 2 1
- 08000 0 0 4 &mpic 3 1
-
- /* IDSEL 0x11 */
- 08800 0 0 1 &mpic 0 1
- 08800 0 0 2 &mpic 1 1
- 08800 0 0 3 &mpic 2 1
- 08800 0 0 4 &mpic 3 1
-
- /* IDSEL 0x12 (Slot 1) */
- 09000 0 0 1 &mpic 0 1
- 09000 0 0 2 &mpic 1 1
- 09000 0 0 3 &mpic 2 1
- 09000 0 0 4 &mpic 3 1
-
- /* IDSEL 0x13 (Slot 2) */
- 09800 0 0 1 &mpic 1 1
- 09800 0 0 2 &mpic 2 1
- 09800 0 0 3 &mpic 3 1
- 09800 0 0 4 &mpic 0 1
-
- /* IDSEL 0x14 (Slot 3) */
- 0a000 0 0 1 &mpic 2 1
- 0a000 0 0 2 &mpic 3 1
- 0a000 0 0 3 &mpic 0 1
- 0a000 0 0 4 &mpic 1 1
-
- /* IDSEL 0x15 (Slot 4) */
- 0a800 0 0 1 &mpic 3 1
- 0a800 0 0 2 &mpic 0 1
- 0a800 0 0 3 &mpic 1 1
- 0a800 0 0 4 &mpic 2 1
-
- /* Bus 1 (Tundra Bridge) */
- /* IDSEL 0x12 (ISA bridge) */
- 19000 0 0 1 &mpic 0 1
- 19000 0 0 2 &mpic 1 1
- 19000 0 0 3 &mpic 2 1
- 19000 0 0 4 &mpic 3 1>;
interrupt-parent = <&mpic>;
interrupts = <18 2>;
bus-range = <0 0>;
- ranges = <02000000 0 80000000 80000000 0 20000000
- 01000000 0 00000000 e2000000 0 00100000>;
+ ranges = <02000000 0 80000000 80000000 0 10000000
+ 01000000 0 00000000 e2000000 0 00800000>;
clock-frequency = <3f940aa>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <8000 1000>;
- compatible = "85xx";
+ compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
device_type = "pci";
- i8259@19000 {
- clock-frequency = <0>;
- interrupt-controller;
- device_type = "interrupt-controller";
- reg = <19000 0 0 0 1>;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- built-in;
- compatible = "chrp,iic";
- big-endian;
- interrupts = <1>;
- interrupt-parent = <&pci1>;
+ pci_bridge@1c {
+ interrupt-map-mask = <f800 0 0 7>;
+ interrupt-map = <
+
+ /* IDSEL 0x00 (PrPMC Site) */
+ 0000 0 0 1 &mpic 0 1
+ 0000 0 0 2 &mpic 1 1
+ 0000 0 0 3 &mpic 2 1
+ 0000 0 0 4 &mpic 3 1
+
+ /* IDSEL 0x04 (VIA chip) */
+ 2000 0 0 1 &mpic 0 1
+ 2000 0 0 2 &mpic 1 1
+ 2000 0 0 3 &mpic 2 1
+ 2000 0 0 4 &mpic 3 1
+
+ /* IDSEL 0x05 (8139) */
+ 2800 0 0 1 &mpic 1 1
+
+ /* IDSEL 0x06 (Slot 6) */
+ 3000 0 0 1 &mpic 2 1
+ 3000 0 0 2 &mpic 3 1
+ 3000 0 0 3 &mpic 0 1
+ 3000 0 0 4 &mpic 1 1
+
+ /* IDESL 0x07 (Slot 7) */
+ 3800 0 0 1 &mpic 3 1
+ 3800 0 0 2 &mpic 0 1
+ 3800 0 0 3 &mpic 1 1
+ 3800 0 0 4 &mpic 2 1>;
+
+ reg = <e000 0 0 0 0>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ ranges = <02000000 0 80000000
+ 02000000 0 80000000
+ 0 20000000
+ 01000000 0 00000000
+ 01000000 0 00000000
+ 0 00080000>;
+ clock-frequency = <1fca055>;
+
+ isa@4 {
+ device_type = "isa";
+ #interrupt-cells = <2>;
+ #size-cells = <1>;
+ #address-cells = <2>;
+ reg = <2000 0 0 0 0>;
+ ranges = <1 0 01000000 0 0 00001000>;
+ interrupt-parent = <&i8259>;
+
+ i8259: interrupt-controller@20 {
+ clock-frequency = <0>;
+ interrupt-controller;
+ device_type = "interrupt-controller";
+ reg = <1 20 2
+ 1 a0 2
+ 1 4d0 2>;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ built-in;
+ compatible = "chrp,iic";
+ interrupts = <0 1>;
+ interrupt-parent = <&mpic>;
+ };
+
+ rtc@70 {
+ compatible = "pnpPNP,b00";
+ reg = <1 70 2>;
+ };
+ };
};
};
@@ -263,20 +346,45 @@
/* IDSEL 0x15 */
a800 0 0 1 &mpic b 1
- a800 0 0 2 &mpic b 1
- a800 0 0 3 &mpic b 1
- a800 0 0 4 &mpic b 1>;
+ a800 0 0 2 &mpic 1 1
+ a800 0 0 3 &mpic 2 1
+ a800 0 0 4 &mpic 3 1>;
+
interrupt-parent = <&mpic>;
interrupts = <19 2>;
bus-range = <0 0>;
- ranges = <02000000 0 a0000000 a0000000 0 20000000
- 01000000 0 00000000 e3000000 0 00100000>;
+ ranges = <02000000 0 90000000 90000000 0 10000000
+ 01000000 0 00000000 e2800000 0 00800000>;
clock-frequency = <3f940aa>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <9000 1000>;
- compatible = "85xx";
+ compatible = "fsl,mpc8540-pci";
+ device_type = "pci";
+ };
+ /* PCI Express */
+ pcie@a000 {
+ interrupt-map-mask = <f800 0 0 7>;
+ interrupt-map = <
+
+ /* IDSEL 0x0 (PEX) */
+ 00000 0 0 1 &mpic 0 1
+ 00000 0 0 2 &mpic 1 1
+ 00000 0 0 3 &mpic 2 1
+ 00000 0 0 4 &mpic 3 1>;
+
+ interrupt-parent = <&mpic>;
+ interrupts = <1a 2>;
+ bus-range = <0 ff>;
+ ranges = <02000000 0 a0000000 a0000000 0 20000000
+ 01000000 0 00000000 e3000000 0 08000000>;
+ clock-frequency = <1fca055>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <a000 1000>;
+ compatible = "fsl,mpc8548-pcie";
device_type = "pci";
};
diff --git a/arch/powerpc/boot/dts/mpc8555cds.dts b/arch/powerpc/boot/dts/mpc8555cds.dts
index 17e45d9a382..c3c88825212 100644
--- a/arch/powerpc/boot/dts/mpc8555cds.dts
+++ b/arch/powerpc/boot/dts/mpc8555cds.dts
@@ -193,7 +193,7 @@
#size-cells = <2>;
#address-cells = <3>;
reg = <8000 1000>;
- compatible = "85xx";
+ compatible = "fsl,mpc8540-pci";
device_type = "pci";
i8259@19000 {
@@ -230,7 +230,7 @@
#size-cells = <2>;
#address-cells = <3>;
reg = <9000 1000>;
- compatible = "85xx";
+ compatible = "fsl,mpc8540-pci";
device_type = "pci";
};
diff --git a/arch/powerpc/boot/dts/mpc8560ads.dts b/arch/powerpc/boot/dts/mpc8560ads.dts
index 21ccaaa2799..16dbe848cec 100644
--- a/arch/powerpc/boot/dts/mpc8560ads.dts
+++ b/arch/powerpc/boot/dts/mpc8560ads.dts
@@ -136,7 +136,7 @@
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
- compatible = "85xx";
+ compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
device_type = "pci";
reg = <8000 1000>;
clock-frequency = <3f940aa>;
diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts
index 6bb18f2807a..b1dcfbe8c1f 100644
--- a/arch/powerpc/boot/dts/mpc8568mds.dts
+++ b/arch/powerpc/boot/dts/mpc8568mds.dts
@@ -170,6 +170,66 @@
interrupt-parent = <&mpic>;
};
+ global-utilities@e0000 { //global utilities block
+ compatible = "fsl,mpc8548-guts";
+ reg = <e0000 1000>;
+ fsl,has-rstcr;
+ };
+
+ pci@8000 {
+ interrupt-map-mask = <f800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x12 AD18 */
+ 9000 0 0 1 &mpic 5 1
+ 9000 0 0 2 &mpic 6 1
+ 9000 0 0 3 &mpic 7 1
+ 9000 0 0 4 &mpic 4 1
+
+ /* IDSEL 0x13 AD19 */
+ 9800 0 0 1 &mpic 6 1
+ 9800 0 0 2 &mpic 7 1
+ 9800 0 0 3 &mpic 4 1
+ 9800 0 0 4 &mpic 5 1>;
+
+ interrupt-parent = <&mpic>;
+ interrupts = <18 2>;
+ bus-range = <0 ff>;
+ ranges = <02000000 0 80000000 80000000 0 20000000
+ 01000000 0 00000000 e2000000 0 00800000>;
+ clock-frequency = <3f940aa>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <8000 1000>;
+ compatible = "fsl,mpc8540-pci";
+ device_type = "pci";
+ };
+
+ /* PCI Express */
+ pcie@a000 {
+ interrupt-map-mask = <f800 0 0 7>;
+ interrupt-map = <
+
+ /* IDSEL 0x0 (PEX) */
+ 00000 0 0 1 &mpic 0 1
+ 00000 0 0 2 &mpic 1 1
+ 00000 0 0 3 &mpic 2 1
+ 00000 0 0 4 &mpic 3 1>;
+
+ interrupt-parent = <&mpic>;
+ interrupts = <1a 2>;
+ bus-range = <0 ff>;
+ ranges = <02000000 0 a0000000 a0000000 0 20000000
+ 01000000 0 00000000 e3000000 0 08000000>;
+ clock-frequency = <1fca055>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <a000 1000>;
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ };
+
serial@4600 {
device_type = "serial";
compatible = "ns16550";
diff --git a/arch/powerpc/boot/dts/mpc8641_hpcn.dts b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
index 6a78a2b37c0..5d82709cfcb 100644
--- a/arch/powerpc/boot/dts/mpc8641_hpcn.dts
+++ b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
@@ -211,8 +211,8 @@
interrupt-parent = <&mpic>;
};
- pci@8000 {
- compatible = "86xx";
+ pcie@8000 {
+ compatible = "fsl,mpc8641-pcie";
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -399,8 +399,8 @@
};
- pci@9000 {
- compatible = "86xx";
+ pcie@9000 {
+ compatible = "fsl,mpc8641-pcie";
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
diff --git a/arch/powerpc/configs/lite5200_defconfig b/arch/powerpc/configs/lite5200_defconfig
index d12a981398b..9c30ca45161 100644
--- a/arch/powerpc/configs/lite5200_defconfig
+++ b/arch/powerpc/configs/lite5200_defconfig
@@ -196,7 +196,7 @@ CONFIG_PM=y
# CONFIG_PM_LEGACY is not set
# CONFIG_PM_DEBUG is not set
# CONFIG_PM_SYSFS_DEPRECATED is not set
-# CONFIG_SOFTWARE_SUSPEND is not set
+# CONFIG_HIBERNATION is not set
CONFIG_SECCOMP=y
# CONFIG_WANT_DEVICE_TREE is not set
CONFIG_ISA_DMA_API=y
diff --git a/arch/powerpc/configs/mpc8544_ds_defconfig b/arch/powerpc/configs/mpc8544_ds_defconfig
index c40a25a79cb..7995231def2 100644
--- a/arch/powerpc/configs/mpc8544_ds_defconfig
+++ b/arch/powerpc/configs/mpc8544_ds_defconfig
@@ -1,9 +1,26 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.22-rc7
-# Sun Jul 1 23:56:58 2007
+# Linux kernel version: 2.6.22
+# Fri Jul 20 14:09:13 2007
#
# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+# CONFIG_6xx is not set
+CONFIG_PPC_85xx=y
+# CONFIG_PPC_8xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_E200 is not set
+CONFIG_85xx=y
+CONFIG_E500=y
+CONFIG_BOOKE=y
+CONFIG_FSL_BOOKE=y
+# CONFIG_PHYS_64BIT is not set
+# CONFIG_SPE is not set
+# CONFIG_PPC_MM_SLICES is not set
CONFIG_PPC32=y
CONFIG_PPC_MERGE=y
CONFIG_MMU=y
@@ -14,6 +31,7 @@ CONFIG_ARCH_HAS_ILOG2_U32=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
CONFIG_PPC=y
CONFIG_EARLY_PRINTK=y
CONFIG_GENERIC_NVRAM=y
@@ -25,28 +43,8 @@ CONFIG_PPC_UDBG_16550=y
CONFIG_AUDIT_ARCH=y
CONFIG_GENERIC_BUG=y
CONFIG_DEFAULT_UIMAGE=y
-
-#
-# Processor support
-#
-# CONFIG_CLASSIC32 is not set
-# CONFIG_PPC_82xx is not set
-# CONFIG_PPC_83xx is not set
-CONFIG_PPC_85xx=y
-# CONFIG_PPC_86xx is not set
-# CONFIG_PPC_8xx is not set
-# CONFIG_40x is not set
-# CONFIG_44x is not set
-# CONFIG_E200 is not set
-CONFIG_85xx=y
-CONFIG_E500=y
# CONFIG_PPC_DCR_NATIVE is not set
# CONFIG_PPC_DCR_MMIO is not set
-CONFIG_BOOKE=y
-CONFIG_FSL_BOOKE=y
-# CONFIG_PHYS_64BIT is not set
-# CONFIG_SPE is not set
-# CONFIG_PPC_MM_SLICES is not set
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
@@ -63,13 +61,12 @@ CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-CONFIG_IPC_NS=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
# CONFIG_BSD_PROCESS_ACCT_V3 is not set
# CONFIG_TASKSTATS is not set
-# CONFIG_UTS_NS is not set
+# CONFIG_USER_NS is not set
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
CONFIG_IKCONFIG=y
@@ -86,7 +83,7 @@ CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_HOTPLUG is not set
+CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
@@ -105,24 +102,17 @@ CONFIG_SLAB=y
CONFIG_RT_MUTEXES=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
-
-#
-# Loadable module support
-#
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_KMOD=y
-
-#
-# Block layer
-#
CONFIG_BLOCK=y
CONFIG_LBD=y
# CONFIG_BLK_DEV_IO_TRACE is not set
# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
#
# IO Schedulers
@@ -153,7 +143,7 @@ CONFIG_MPC8544_DS=y
CONFIG_MPC85xx=y
CONFIG_MPIC=y
# CONFIG_MPIC_WEIRD is not set
-# CONFIG_PPC_I8259 is not set
+CONFIG_PPC_I8259=y
# CONFIG_PPC_RTAS is not set
# CONFIG_MMIO_NVRAM is not set
# CONFIG_PPC_MPC106 is not set
@@ -191,6 +181,8 @@ CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_RESOURCES_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
CONFIG_PROC_DEVICETREE=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="root=/dev/sda3 rw console=ttyS0,115200"
@@ -205,15 +197,21 @@ CONFIG_ISA_DMA_API=y
#
CONFIG_ZONE_DMA=y
CONFIG_PPC_INDIRECT_PCI=y
-CONFIG_PPC_INDIRECT_PCI_BE=y
CONFIG_FSL_SOC=y
-# CONFIG_PCI is not set
-# CONFIG_PCI_DOMAINS is not set
-# CONFIG_ARCH_SUPPORTS_MSI is not set
+CONFIG_FSL_PCI=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_SYSCALL=y
+# CONFIG_PCIEPORTBUS is not set
+CONFIG_ARCH_SUPPORTS_MSI=y
+# CONFIG_PCI_MSI is not set
+# CONFIG_PCI_DEBUG is not set
#
# PCCARD (PCMCIA/CardBus) support
#
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
#
# Advanced setup
@@ -254,7 +252,6 @@ CONFIG_ASK_IP_FIB_HASH=y
CONFIG_IP_FIB_HASH=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
-# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -330,6 +327,7 @@ CONFIG_FIB_RULES=y
# CONFIG_MAC80211 is not set
# CONFIG_IEEE80211 is not set
# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
#
# Device Drivers
@@ -340,45 +338,35 @@ CONFIG_FIB_RULES=y
#
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
-
-#
-# Connector - unified userspace <-> kernelspace linker
-#
# CONFIG_CONNECTOR is not set
# CONFIG_MTD is not set
-
-#
-# Parallel port support
-#
# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-# CONFIG_PNPACPI is not set
-
-#
-# Block devices
-#
+CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
CONFIG_BLK_DEV_NBD=y
+# CONFIG_BLK_DEV_SX8 is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
-
-#
-# Misc devices
-#
-# CONFIG_BLINK is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_PHANTOM is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
# CONFIG_IDE is not set
#
@@ -386,6 +374,7 @@ CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
#
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
# CONFIG_SCSI_TGT is not set
# CONFIG_SCSI_NETLINK is not set
CONFIG_SCSI_PROC_FS=y
@@ -422,25 +411,120 @@ CONFIG_SCSI_WAIT_SCAN=m
# SCSI low-level drivers
#
# CONFIG_ISCSI_TCP is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ARCMSR is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_SRP is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
+# CONFIG_SATA_AHCI is not set
+# CONFIG_SATA_SVW is not set
+# CONFIG_ATA_PIIX is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIL24 is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_SATA_INIC162X is not set
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_CMD640_PCI is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_ATA_GENERIC is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_IT8213 is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
# CONFIG_PATA_PLATFORM is not set
+# CONFIG_MD is not set
#
-# Multi-device support (RAID and LVM)
+# Fusion MPT device support
#
-# CONFIG_MD is not set
-# CONFIG_MACINTOSH_DRIVERS is not set
+# CONFIG_FUSION is not set
+# CONFIG_FUSION_SPI is not set
+# CONFIG_FUSION_FC is not set
+# CONFIG_FUSION_SAS is not set
#
-# Network device support
+# IEEE 1394 (FireWire) support
#
+# CONFIG_FIREWIRE is not set
+# CONFIG_IEEE1394 is not set
+# CONFIG_I2O is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
+# CONFIG_ARCNET is not set
CONFIG_PHYLIB=y
#
@@ -454,17 +538,44 @@ CONFIG_PHYLIB=y
CONFIG_VITESSE_PHY=y
# CONFIG_SMSC_PHY is not set
# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
# CONFIG_FIXED_PHY is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_PCI is not set
CONFIG_NETDEV_1000=y
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
CONFIG_GIANFAR=y
CONFIG_GFAR_NAPI=y
+# CONFIG_QLA3XXX is not set
+# CONFIG_ATL1 is not set
CONFIG_NETDEV_10000=y
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_CHELSIO_T3 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+# CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
+# CONFIG_MLX4_CORE is not set
+# CONFIG_TR is not set
#
# Wireless LAN
@@ -472,21 +583,16 @@ CONFIG_NETDEV_10000=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
-
-#
-# ISDN subsystem
-#
# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
# CONFIG_PHONE is not set
#
@@ -521,6 +627,7 @@ CONFIG_INPUT=y
CONFIG_SERIO=y
CONFIG_SERIO_I8042=y
CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_PCIPS2 is not set
CONFIG_SERIO_LIBPS2=y
# CONFIG_SERIO_RAW is not set
# CONFIG_GAMEPORT is not set
@@ -539,6 +646,7 @@ CONFIG_HW_CONSOLE=y
#
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_PCI=y
CONFIG_SERIAL_8250_NR_UARTS=4
CONFIG_SERIAL_8250_RUNTIME_UARTS=4
# CONFIG_SERIAL_8250_EXTENDED is not set
@@ -550,14 +658,11 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_SERIAL_UARTLITE is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
# CONFIG_SERIAL_OF_PLATFORM is not set
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
-
-#
-# IPMI
-#
# CONFIG_IPMI_HANDLER is not set
# CONFIG_WATCHDOG is not set
# CONFIG_HW_RANDOM is not set
@@ -565,12 +670,12 @@ CONFIG_NVRAM=y
CONFIG_GEN_RTC=y
CONFIG_GEN_RTC_X=y
# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
# CONFIG_RAW_DRIVER is not set
-
-#
-# TPM devices
-#
# CONFIG_TCG_TPM is not set
+CONFIG_DEVPORT=y
# CONFIG_I2C is not set
#
@@ -578,11 +683,8 @@ CONFIG_GEN_RTC_X=y
#
# CONFIG_SPI is not set
# CONFIG_SPI_MASTER is not set
-
-#
-# Dallas's 1-wire bus
-#
# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
#
@@ -655,19 +757,14 @@ CONFIG_DUMMY_CONSOLE=y
# Sound
#
# CONFIG_SOUND is not set
-
-#
-# HID Devices
-#
+CONFIG_HID_SUPPORT=y
CONFIG_HID=y
# CONFIG_HID_DEBUG is not set
-
-#
-# USB support
-#
-# CONFIG_USB_ARCH_HAS_HCD is not set
-# CONFIG_USB_ARCH_HAS_OHCI is not set
-# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+# CONFIG_USB is not set
#
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
@@ -691,14 +788,7 @@ CONFIG_HID=y
#
# LED Triggers
#
-
-#
-# InfiniBand support
-#
-
-#
-# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
-#
+# CONFIG_INFINIBAND is not set
#
# Real Time Clock
@@ -719,19 +809,13 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_TEST is not set
#
-# I2C RTC drivers
-#
-
-#
-# SPI RTC drivers
-#
-
-#
# Platform RTC drivers
#
+# CONFIG_RTC_DRV_CMOS is not set
# CONFIG_RTC_DRV_DS1553 is not set
# CONFIG_RTC_DRV_DS1742 is not set
# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
# CONFIG_RTC_DRV_V3020 is not set
#
@@ -752,6 +836,11 @@ CONFIG_RTC_INTF_DEV=y
#
#
+# Userspace I/O
+#
+# CONFIG_UIO is not set
+
+#
# File systems
#
CONFIG_EXT2_FS=y
@@ -859,7 +948,6 @@ CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
-# CONFIG_9P_FS is not set
#
# Partition Types
@@ -941,6 +1029,7 @@ CONFIG_BITREVERSE=y
# CONFIG_CRC16 is not set
# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=y
CONFIG_PLIST=y
@@ -965,6 +1054,7 @@ CONFIG_ENABLE_MUST_CHECK=y
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SHIRQ is not set
CONFIG_DETECT_SOFTLOCKUP=y
+CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_SLAB is not set
@@ -996,10 +1086,6 @@ CONFIG_FORCED_INLINING=y
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
CONFIG_CRYPTO=y
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_BLKCIPHER=y
@@ -1038,7 +1124,4 @@ CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_CRC32C is not set
# CONFIG_CRYPTO_CAMELLIA is not set
# CONFIG_CRYPTO_TEST is not set
-
-#
-# Hardware crypto devices
-#
+CONFIG_CRYPTO_HW=y
diff --git a/arch/powerpc/configs/mpc8568mds_defconfig b/arch/powerpc/configs/mpc8568mds_defconfig
index 6451d4dd28a..417d3e6abcd 100644
--- a/arch/powerpc/configs/mpc8568mds_defconfig
+++ b/arch/powerpc/configs/mpc8568mds_defconfig
@@ -1,9 +1,26 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.22-rc7
-# Sun Jul 1 23:56:59 2007
+# Linux kernel version: 2.6.22
+# Fri Jul 20 13:55:04 2007
#
# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+# CONFIG_6xx is not set
+CONFIG_PPC_85xx=y
+# CONFIG_PPC_8xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_E200 is not set
+CONFIG_85xx=y
+CONFIG_E500=y
+CONFIG_BOOKE=y
+CONFIG_FSL_BOOKE=y
+# CONFIG_PHYS_64BIT is not set
+CONFIG_SPE=y
+# CONFIG_PPC_MM_SLICES is not set
CONFIG_PPC32=y
CONFIG_PPC_MERGE=y
CONFIG_MMU=y
@@ -14,6 +31,7 @@ CONFIG_ARCH_HAS_ILOG2_U32=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
CONFIG_PPC=y
CONFIG_EARLY_PRINTK=y
CONFIG_GENERIC_NVRAM=y
@@ -25,28 +43,8 @@ CONFIG_PPC_UDBG_16550=y
CONFIG_AUDIT_ARCH=y
CONFIG_GENERIC_BUG=y
CONFIG_DEFAULT_UIMAGE=y
-
-#
-# Processor support
-#
-# CONFIG_CLASSIC32 is not set
-# CONFIG_PPC_82xx is not set
-# CONFIG_PPC_83xx is not set
-CONFIG_PPC_85xx=y
-# CONFIG_PPC_86xx is not set
-# CONFIG_PPC_8xx is not set
-# CONFIG_40x is not set
-# CONFIG_44x is not set
-# CONFIG_E200 is not set
-CONFIG_85xx=y
-CONFIG_E500=y
# CONFIG_PPC_DCR_NATIVE is not set
# CONFIG_PPC_DCR_MMIO is not set
-CONFIG_BOOKE=y
-CONFIG_FSL_BOOKE=y
-# CONFIG_PHYS_64BIT is not set
-CONFIG_SPE=y
-# CONFIG_PPC_MM_SLICES is not set
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
@@ -63,12 +61,11 @@ CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-# CONFIG_IPC_NS is not set
CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
-# CONFIG_UTS_NS is not set
+# CONFIG_USER_NS is not set
# CONFIG_AUDIT is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
@@ -100,24 +97,17 @@ CONFIG_SLAB=y
CONFIG_RT_MUTEXES=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
-
-#
-# Loadable module support
-#
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
# CONFIG_KMOD is not set
-
-#
-# Block layer
-#
CONFIG_BLOCK=y
# CONFIG_LBD is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
#
# IO Schedulers
@@ -186,6 +176,8 @@ CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_RESOURCES_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
CONFIG_PROC_DEVICETREE=y
# CONFIG_CMDLINE_BOOL is not set
# CONFIG_PM is not set
@@ -201,14 +193,20 @@ CONFIG_ZONE_DMA=y
CONFIG_PPC_INDIRECT_PCI=y
CONFIG_PPC_INDIRECT_PCI_BE=y
CONFIG_FSL_SOC=y
-# CONFIG_PCI is not set
-# CONFIG_PCI_DOMAINS is not set
-# CONFIG_ARCH_SUPPORTS_MSI is not set
+CONFIG_FSL_PCI=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_SYSCALL=y
+# CONFIG_PCIEPORTBUS is not set
+CONFIG_ARCH_SUPPORTS_MSI=y
+# CONFIG_PCI_MSI is not set
+# CONFIG_PCI_DEBUG is not set
#
# PCCARD (PCMCIA/CardBus) support
#
# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
#
# Advanced setup
@@ -309,6 +307,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_MAC80211 is not set
# CONFIG_IEEE80211 is not set
# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
#
# Device Drivers
@@ -323,42 +322,31 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
-
-#
-# Connector - unified userspace <-> kernelspace linker
-#
# CONFIG_CONNECTOR is not set
# CONFIG_MTD is not set
-
-#
-# Parallel port support
-#
# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-# CONFIG_PNPACPI is not set
-
-#
-# Block devices
-#
+CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
-
-#
-# Misc devices
-#
-# CONFIG_BLINK is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_PHANTOM is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
# CONFIG_IDE is not set
#
@@ -366,6 +354,7 @@ CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
#
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
# CONFIG_SCSI_TGT is not set
# CONFIG_SCSI_NETLINK is not set
CONFIG_SCSI_PROC_FS=y
@@ -402,23 +391,65 @@ CONFIG_SCSI_WAIT_SCAN=m
# SCSI low-level drivers
#
# CONFIG_ISCSI_TCP is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ARCMSR is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_SRP is not set
# CONFIG_ATA is not set
+# CONFIG_MD is not set
#
-# Multi-device support (RAID and LVM)
+# Fusion MPT device support
#
-# CONFIG_MD is not set
-# CONFIG_MACINTOSH_DRIVERS is not set
+# CONFIG_FUSION is not set
+# CONFIG_FUSION_SPI is not set
+# CONFIG_FUSION_FC is not set
+# CONFIG_FUSION_SAS is not set
#
-# Network device support
+# IEEE 1394 (FireWire) support
#
+# CONFIG_FIREWIRE is not set
+# CONFIG_IEEE1394 is not set
+# CONFIG_I2O is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
+# CONFIG_ARCNET is not set
CONFIG_PHYLIB=y
#
@@ -432,17 +463,44 @@ CONFIG_MARVELL_PHY=y
# CONFIG_VITESSE_PHY is not set
# CONFIG_SMSC_PHY is not set
# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
# CONFIG_FIXED_PHY is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_PCI is not set
CONFIG_NETDEV_1000=y
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
CONFIG_GIANFAR=y
CONFIG_GFAR_NAPI=y
+# CONFIG_QLA3XXX is not set
+# CONFIG_ATL1 is not set
CONFIG_NETDEV_10000=y
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_CHELSIO_T3 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+# CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
+# CONFIG_MLX4_CORE is not set
+# CONFIG_TR is not set
#
# Wireless LAN
@@ -450,21 +508,16 @@ CONFIG_NETDEV_10000=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
-
-#
-# ISDN subsystem
-#
# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
# CONFIG_PHONE is not set
#
@@ -510,6 +563,7 @@ CONFIG_INPUT=y
#
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_PCI=y
CONFIG_SERIAL_8250_NR_UARTS=4
CONFIG_SERIAL_8250_RUNTIME_UARTS=4
# CONFIG_SERIAL_8250_EXTENDED is not set
@@ -521,14 +575,11 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_SERIAL_UARTLITE is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
# CONFIG_SERIAL_OF_PLATFORM is not set
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
-
-#
-# IPMI
-#
# CONFIG_IPMI_HANDLER is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -538,17 +589,23 @@ CONFIG_WATCHDOG=y
#
# CONFIG_SOFT_WATCHDOG is not set
# CONFIG_BOOKE_WDT is not set
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
CONFIG_HW_RANDOM=y
# CONFIG_NVRAM is not set
CONFIG_GEN_RTC=y
# CONFIG_GEN_RTC_X is not set
# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
# CONFIG_RAW_DRIVER is not set
-
-#
-# TPM devices
-#
# CONFIG_TCG_TPM is not set
+CONFIG_DEVPORT=y
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
CONFIG_I2C_CHARDEV=y
@@ -563,23 +620,43 @@ CONFIG_I2C_CHARDEV=y
#
# I2C Hardware Bus support
#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
CONFIG_I2C_MPC=y
+# CONFIG_I2C_NFORCE2 is not set
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_TAOS_EVM is not set
# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
#
# Miscellaneous I2C Chip support
#
# CONFIG_SENSORS_DS1337 is not set
# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_DS1682 is not set
# CONFIG_SENSORS_EEPROM is not set
# CONFIG_SENSORS_PCF8574 is not set
# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_PCF8591 is not set
# CONFIG_SENSORS_M41T00 is not set
# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
@@ -590,11 +667,8 @@ CONFIG_I2C_MPC=y
#
# CONFIG_SPI is not set
# CONFIG_SPI_MASTER is not set
-
-#
-# Dallas's 1-wire bus
-#
# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
# CONFIG_SENSORS_ABITUGURU is not set
@@ -628,10 +702,13 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_MAX6650 is not set
# CONFIG_SENSORS_PC87360 is not set
# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_SIS5595 is not set
# CONFIG_SENSORS_SMSC47M1 is not set
# CONFIG_SENSORS_SMSC47M192 is not set
# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_VIA686A is not set
# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
# CONFIG_SENSORS_W83781D is not set
# CONFIG_SENSORS_W83791D is not set
# CONFIG_SENSORS_W83792D is not set
@@ -670,19 +747,14 @@ CONFIG_DAB=y
# Sound
#
# CONFIG_SOUND is not set
-
-#
-# HID Devices
-#
+CONFIG_HID_SUPPORT=y
CONFIG_HID=y
# CONFIG_HID_DEBUG is not set
-
-#
-# USB support
-#
-# CONFIG_USB_ARCH_HAS_HCD is not set
-# CONFIG_USB_ARCH_HAS_OHCI is not set
-# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+# CONFIG_USB is not set
#
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
@@ -706,14 +778,7 @@ CONFIG_HID=y
#
# LED Triggers
#
-
-#
-# InfiniBand support
-#
-
-#
-# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
-#
+# CONFIG_INFINIBAND is not set
#
# Real Time Clock
@@ -734,6 +799,11 @@ CONFIG_HID=y
#
#
+# Userspace I/O
+#
+# CONFIG_UIO is not set
+
+#
# File systems
#
CONFIG_EXT2_FS=y
@@ -829,7 +899,6 @@ CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
-# CONFIG_9P_FS is not set
#
# Partition Types
@@ -868,6 +937,7 @@ CONFIG_BITREVERSE=y
# CONFIG_CRC16 is not set
# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
@@ -892,6 +962,7 @@ CONFIG_ENABLE_MUST_CHECK=y
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SHIRQ is not set
CONFIG_DETECT_SOFTLOCKUP=y
+CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_SLAB is not set
@@ -915,7 +986,7 @@ CONFIG_FORCED_INLINING=y
CONFIG_DEBUGGER=y
# CONFIG_XMON is not set
# CONFIG_BDI_SWITCH is not set
-CONFIG_BOOTX_TEXT=y
+# CONFIG_BOOTX_TEXT is not set
CONFIG_PPC_EARLY_DEBUG=y
# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
# CONFIG_PPC_EARLY_DEBUG_G5 is not set
@@ -932,10 +1003,6 @@ CONFIG_PPC_EARLY_DEBUG=y
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
CONFIG_CRYPTO=y
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_BLKCIPHER=y
@@ -973,7 +1040,4 @@ CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_CRC32C is not set
# CONFIG_CRYPTO_CAMELLIA is not set
# CONFIG_CRYPTO_TEST is not set
-
-#
-# Hardware crypto devices
-#
+CONFIG_CRYPTO_HW=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index 0d8ba623e29..08525d6fb1f 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -218,7 +218,7 @@ CONFIG_PM=y
CONFIG_PM_DEBUG=y
# CONFIG_DISABLE_CONSOLE_SUSPEND is not set
CONFIG_PM_SYSFS_DEPRECATED=y
-CONFIG_SOFTWARE_SUSPEND=y
+CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION=""
CONFIG_APM_EMULATION=y
CONFIG_SECCOMP=y
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 42c42ecad00..f39a72f30aa 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -37,9 +37,9 @@ obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
obj-$(CONFIG_TAU) += tau_6xx.o
-obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o suspend.o
-obj32-$(CONFIG_SOFTWARE_SUSPEND) += swsusp_32.o
-obj64-$(CONFIG_SOFTWARE_SUSPEND) += swsusp_64.o swsusp_asm64.o
+obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o
+obj32-$(CONFIG_HIBERNATION) += swsusp_32.o
+obj64-$(CONFIG_HIBERNATION) += swsusp_64.o swsusp_asm64.o
obj32-$(CONFIG_MODULES) += module_32.o
ifeq ($(CONFIG_PPC_MERGE),y)
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 8cdd48ea439..1448af92c6a 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -809,8 +809,9 @@ system_reset_iSeries:
mtmsrd r24 /* RI on */
lhz r24,PACAPACAINDEX(r13) /* Get processor # */
cmpwi 0,r24,0 /* Are we processor 0? */
- beq .__start_initialization_iSeries /* Start up the first processor */
- mfspr r4,SPRN_CTRLF
+ bne 1f
+ b .__start_initialization_iSeries /* Start up the first processor */
+1: mfspr r4,SPRN_CTRLF
li r5,CTRL_RUNLATCH /* Turn off the run light */
andc r4,r4,r5
mtspr SPRN_CTRLT,r4
@@ -1611,7 +1612,7 @@ _GLOBAL(generic_secondary_smp_init)
#endif
#ifdef CONFIG_PPC_ISERIES
-_STATIC(__start_initialization_iSeries)
+_INIT_STATIC(__start_initialization_iSeries)
/* Clear out the BSS */
LOAD_REG_IMMEDIATE(r11,__bss_stop)
LOAD_REG_IMMEDIATE(r8,__bss_start)
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index 601ef79a591..2a5cf868037 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -7,6 +7,7 @@
#include <linux/pci.h>
#include <linux/mm.h>
#include <asm/io.h>
+#include <asm/pci-bridge.h>
/*
* Here comes the ppc64 implementation of the IOMAP
@@ -136,7 +137,12 @@ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
{
- /* Nothing to do */
+ if (isa_vaddr_is_ioport(addr))
+ return;
+ if (pcibios_vaddr_is_ioport(addr))
+ return;
+ iounmap(addr);
}
+
EXPORT_SYMBOL(pci_iomap);
EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 2fc87862146..24bea97c736 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -418,10 +418,10 @@ irq_hw_number_t virq_to_hw(unsigned int virq)
}
EXPORT_SYMBOL_GPL(virq_to_hw);
-struct irq_host *irq_alloc_host(unsigned int revmap_type,
- unsigned int revmap_arg,
- struct irq_host_ops *ops,
- irq_hw_number_t inval_irq)
+__init_refok struct irq_host *irq_alloc_host(unsigned int revmap_type,
+ unsigned int revmap_arg,
+ struct irq_host_ops *ops,
+ irq_hw_number_t inval_irq)
{
struct irq_host *host;
unsigned int size = sizeof(struct irq_host);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index fe7d1255e11..083cfbdbe0b 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -65,7 +65,7 @@ static void __devinit pci_setup_pci_controller(struct pci_controller *hose)
spin_unlock(&hose_spinlock);
}
-struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
+__init_refok struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
{
struct pci_controller *phb;
@@ -101,6 +101,29 @@ void pcibios_free_controller(struct pci_controller *phb)
kfree(phb);
}
+int pcibios_vaddr_is_ioport(void __iomem *address)
+{
+ int ret = 0;
+ struct pci_controller *hose;
+ unsigned long size;
+
+ spin_lock(&hose_spinlock);
+ list_for_each_entry(hose, &hose_list, list_node) {
+#ifdef CONFIG_PPC64
+ size = hose->pci_io_size;
+#else
+ size = hose->io_resource.end - hose->io_resource.start + 1;
+#endif
+ if (address >= hose->io_base_virt &&
+ address < (hose->io_base_virt + size)) {
+ ret = 1;
+ break;
+ }
+ }
+ spin_unlock(&hose_spinlock);
+ return ret;
+}
+
/*
* Return the domain number for this bus.
*/
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 0adf077f3f3..cd35c969bb2 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -60,6 +60,24 @@ LIST_HEAD(hose_list);
static int pci_bus_count;
static void
+fixup_hide_host_resource_fsl(struct pci_dev* dev)
+{
+ int i, class = dev->class >> 8;
+
+ if ((class == PCI_CLASS_PROCESSOR_POWERPC) &&
+ (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
+ (dev->bus->parent == NULL)) {
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ dev->resource[i].start = 0;
+ dev->resource[i].end = 0;
+ dev->resource[i].flags = 0;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
+
+static void
fixup_broken_pcnet32(struct pci_dev* dev)
{
if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
@@ -415,15 +433,13 @@ probe_resource(struct pci_bus *parent, struct resource *pr,
return 0;
}
-static void __init
-update_bridge_base(struct pci_bus *bus, int i)
+void __init
+update_bridge_resource(struct pci_dev *dev, struct resource *res)
{
- struct resource *res = bus->resource[i];
u8 io_base_lo, io_limit_lo;
u16 mem_base, mem_limit;
u16 cmd;
unsigned long start, end, off;
- struct pci_dev *dev = bus->self;
struct pci_controller *hose = dev->sysdata;
if (!hose) {
@@ -467,12 +483,20 @@ update_bridge_base(struct pci_bus *bus, int i)
pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit);
} else {
- DBG(KERN_ERR "PCI: ugh, bridge %s res %d has flags=%lx\n",
- pci_name(dev), i, res->flags);
+ DBG(KERN_ERR "PCI: ugh, bridge %s res has flags=%lx\n",
+ pci_name(dev), res->flags);
}
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
+static void __init
+update_bridge_base(struct pci_bus *bus, int i)
+{
+ struct resource *res = bus->resource[i];
+ struct pci_dev *dev = bus->self;
+ update_bridge_resource(dev, res);
+}
+
static inline void alloc_resource(struct pci_dev *dev, int idx)
{
struct resource *pr, *r = &dev->resource[idx];
@@ -1223,7 +1247,7 @@ pcibios_init(void)
subsys_initcall(pcibios_init);
-void __init pcibios_fixup_bus(struct pci_bus *bus)
+void pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
unsigned long io_offset;
@@ -1468,3 +1492,10 @@ EARLY_PCI_OP(read, dword, u32 *)
EARLY_PCI_OP(write, byte, u8)
EARLY_PCI_OP(write, word, u16)
EARLY_PCI_OP(write, dword, u32)
+
+extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int cap);
+int early_find_capability(struct pci_controller *hose, int bus, int devfn,
+ int cap)
+{
+ return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
+}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 84f000a45e3..a83727b308a 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -423,7 +423,11 @@ void show_regs(struct pt_regs * regs)
printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
trap = TRAP(regs);
if (trap == 0x300 || trap == 0x600)
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+ printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
+#else
printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
+#endif
printk("TASK = %p[%d] '%s' THREAD: %p",
current, current->pid, current->comm, task_thread_info(current));
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 3786dcc8a7b..b5c96af955c 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -24,7 +24,7 @@
/* Max address size we deal with */
#define OF_MAX_ADDR_CELLS 4
#define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \
- (ns) >= 0)
+ (ns) > 0)
static struct of_bus *of_match_bus(struct device_node *np);
static int __of_address_to_resource(struct device_node *dev,
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 4924c48cb1f..50ef38cffdb 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -496,6 +496,10 @@ int check_legacy_ioport(unsigned long base_port)
break;
}
np = of_find_node_by_type(NULL, "8042");
+ /* Pegasos has no device_type on its 8042 node, look for the
+ * name instead */
+ if (!np)
+ np = of_find_node_by_name(NULL, "8042");
break;
case FDC_BASE: /* FDC1 */
np = of_find_node_by_type(NULL, "fdc");
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index fc6647d332c..f85f402ceae 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index cbca1df8bc6..0f9b4eadfbc 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -155,7 +155,7 @@ static int early_console_initialized;
* Called by setup_system after ppc_md->probe and ppc_md->early_init.
* Call it again after setting udbg_putc in ppc_md->setup_arch.
*/
-void register_early_udbg_console(void)
+void __init register_early_udbg_console(void)
{
if (early_console_initialized)
return;
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c
index 2f24ea0d723..ada5b42dd23 100644
--- a/arch/powerpc/lib/rheap.c
+++ b/arch/powerpc/lib/rheap.c
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/slab.h>
#include <asm/rheap.h>
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c
index e3a1e8dc536..8f4d2dc4caf 100644
--- a/arch/powerpc/mm/lmb.c
+++ b/arch/powerpc/mm/lmb.c
@@ -138,8 +138,8 @@ void __init lmb_analyze(void)
static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base,
unsigned long size)
{
- unsigned long i, coalesced = 0;
- long adjacent;
+ unsigned long coalesced = 0;
+ long adjacent, i;
/* First try and coalesce this LMB with another. */
for (i=0; i < rgn->cnt; i++) {
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
index 133665754a7..4a890cb42b9 100644
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ b/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -21,6 +21,7 @@
#include <linux/dcookies.h>
#include <linux/kref.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/numa.h>
diff --git a/arch/powerpc/platforms/82xx/mpc82xx_ads.c b/arch/powerpc/platforms/82xx/mpc82xx_ads.c
index da20832b27f..2d1b05b9f8e 100644
--- a/arch/powerpc/platforms/82xx/mpc82xx_ads.c
+++ b/arch/powerpc/platforms/82xx/mpc82xx_ads.c
@@ -553,7 +553,8 @@ static void __init mpc82xx_add_bridge(struct device_node *np)
setup_indirect_pci(hose,
r.start + offsetof(pci_cpm2_t, pci_cfg_addr),
- r.start + offsetof(pci_cpm2_t, pci_cfg_data));
+ r.start + offsetof(pci_cpm2_t, pci_cfg_data),
+ 0);
pci_process_bridge_OF_ranges(hose, np, 1);
}
diff --git a/arch/powerpc/platforms/83xx/pci.c b/arch/powerpc/platforms/83xx/pci.c
index c0e2b89154e..92069469de2 100644
--- a/arch/powerpc/platforms/83xx/pci.c
+++ b/arch/powerpc/platforms/83xx/pci.c
@@ -74,11 +74,11 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
*/
/* PCI 1 */
if ((rsrc.start & 0xfffff) == 0x8500) {
- setup_indirect_pci(hose, immr + 0x8300, immr + 0x8304);
+ setup_indirect_pci(hose, immr + 0x8300, immr + 0x8304, 0);
}
/* PCI 2 */
if ((rsrc.start & 0xfffff) == 0x8600) {
- setup_indirect_pci(hose, immr + 0x8380, immr + 0x8384);
+ setup_indirect_pci(hose, immr + 0x8380, immr + 0x8384, 0);
primary = 0;
}
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 629926e01e9..f58184086c8 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -18,6 +18,7 @@ config MPC8560_ADS
config MPC85xx_CDS
bool "Freescale MPC85xx CDS"
select DEFAULT_UIMAGE
+ select PPC_I8259
help
This option enables support for the MPC85xx CDS board
@@ -30,6 +31,7 @@ config MPC85xx_MDS
config MPC8544_DS
bool "Freescale MPC8544 DS"
+ select PPC_I8259
select DEFAULT_UIMAGE
help
This option enables support for the MPC8544 DS board
@@ -50,9 +52,9 @@ config MPC8560
config MPC85xx
bool
select PPC_UDBG_16550
- select PPC_INDIRECT_PCI
- select PPC_INDIRECT_PCI_BE
+ select PPC_INDIRECT_PCI if PCI
select MPIC
+ select FSL_PCI if PCI
select SERIAL_8250_SHARE_IRQ if SERIAL_8250
default y if MPC8540_ADS || MPC85xx_CDS || MPC8560_ADS \
|| MPC85xx_MDS || MPC8544_DS
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 4e02cbb14cf..d70f2d0f9d3 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -1,7 +1,7 @@
#
# Makefile for the PowerPC 85xx linux kernel.
#
-obj-$(CONFIG_PPC_85xx) += misc.o pci.o
+obj-$(CONFIG_PPC_85xx) += misc.o
obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads.o
obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads.o
obj-$(CONFIG_MPC85xx_CDS) += mpc85xx_cds.o
diff --git a/arch/powerpc/platforms/85xx/mpc8544_ds.c b/arch/powerpc/platforms/85xx/mpc8544_ds.c
index 6fb90aab879..4905f6f8903 100644
--- a/arch/powerpc/platforms/85xx/mpc8544_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8544_ds.c
@@ -2,6 +2,8 @@
* MPC8544 DS Board Setup
*
* Author Xianghua Xiao (x.xiao@freescale.com)
+ * Roy Zang <tie-fei.zang@freescale.com>
+ * - Add PCI/PCI Exprees support
* Copyright 2007 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
@@ -12,13 +14,16 @@
#include <linux/stddef.h>
#include <linux/kernel.h>
+#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
+#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
#include <asm/mpc85xx.h>
#include <mm/mmu_decl.h>
#include <asm/prom.h>
@@ -27,6 +32,7 @@
#include <asm/i8259.h>
#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
#include "mpc85xx.h"
#undef DEBUG
@@ -37,6 +43,17 @@
#define DBG(fmt, args...)
#endif
+#ifdef CONFIG_PPC_I8259
+static void mpc8544_8259_cascade(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned int cascade_irq = i8259_irq();
+
+ if (cascade_irq != NO_IRQ) {
+ generic_handle_irq(cascade_irq);
+ }
+ desc->chip->eoi(irq);
+}
+#endif /* CONFIG_PPC_I8259 */
void __init mpc8544_ds_pic_init(void)
{
@@ -96,19 +113,240 @@ void __init mpc8544_ds_pic_init(void)
#endif /* CONFIG_PPC_I8259 */
}
+#ifdef CONFIG_PCI
+enum pirq { PIRQA = 8, PIRQB, PIRQC, PIRQD, PIRQE, PIRQF, PIRQG, PIRQH };
+
+/*
+ * Value in table -- IRQ number
+ */
+const unsigned char uli1575_irq_route_table[16] = {
+ 0, /* 0: Reserved */
+ 0x8,
+ 0, /* 2: Reserved */
+ 0x2,
+ 0x4,
+ 0x5,
+ 0x7,
+ 0x6,
+ 0, /* 8: Reserved */
+ 0x1,
+ 0x3,
+ 0x9,
+ 0xb,
+ 0, /* 13: Reserved */
+ 0xd,
+ 0xf,
+};
+
+static int __devinit
+get_pci_irq_from_of(struct pci_controller *hose, int slot, int pin)
+{
+ struct of_irq oirq;
+ u32 laddr[3];
+ struct device_node *hosenode = hose ? hose->arch_data : NULL;
+
+ if (!hosenode)
+ return -EINVAL;
+
+ laddr[0] = (hose->first_busno << 16) | (PCI_DEVFN(slot, 0) << 8);
+ laddr[1] = laddr[2] = 0;
+ of_irq_map_raw(hosenode, &pin, 1, laddr, &oirq);
+ DBG("mpc8544_ds: pci irq addr %x, slot %d, pin %d, irq %d\n",
+ laddr[0], slot, pin, oirq.specifier[0]);
+ return oirq.specifier[0];
+}
+
+/*8259*/
+static void __devinit quirk_uli1575(struct pci_dev *dev)
+{
+ unsigned short temp;
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ unsigned char irq2pin[16];
+ unsigned long pirq_map_word = 0;
+ u32 irq;
+ int i;
+
+ /*
+ * ULI1575 interrupts route setup
+ */
+ memset(irq2pin, 0, 16); /* Initialize default value 0 */
+
+ irq2pin[6]=PIRQA+3; /* enabled mapping for IRQ6 to PIRQD, used by SATA */
+
+ /*
+ * PIRQE -> PIRQF mapping set manually
+ *
+ * IRQ pin IRQ#
+ * PIRQE ---- 9
+ * PIRQF ---- 10
+ * PIRQG ---- 11
+ * PIRQH ---- 12
+ */
+ for (i = 0; i < 4; i++)
+ irq2pin[i + 9] = PIRQE + i;
+
+ /* Set IRQ-PIRQ Mapping to ULI1575 */
+ for (i = 0; i < 16; i++)
+ if (irq2pin[i])
+ pirq_map_word |= (uli1575_irq_route_table[i] & 0xf)
+ << ((irq2pin[i] - PIRQA) * 4);
+
+ pirq_map_word |= 1<<26; /* disable INTx in EP mode*/
+
+ /* ULI1575 IRQ mapping conf register default value is 0xb9317542 */
+ DBG("Setup ULI1575 IRQ mapping configuration register value = 0x%x\n",
+ (int)pirq_map_word);
+ pci_write_config_dword(dev, 0x48, pirq_map_word);
+
+#define ULI1575_SET_DEV_IRQ(slot, pin, reg) \
+ do { \
+ int irq; \
+ irq = get_pci_irq_from_of(hose, slot, pin); \
+ if (irq > 0 && irq < 16) \
+ pci_write_config_byte(dev, reg, irq2pin[irq]); \
+ else \
+ printk(KERN_WARNING "ULI1575 device" \
+ "(slot %d, pin %d) irq %d is invalid.\n", \
+ slot, pin, irq); \
+ } while(0)
+
+ /* USB 1.1 OHCI controller 1, slot 28, pin 1 */
+ ULI1575_SET_DEV_IRQ(28, 1, 0x86);
+
+ /* USB 1.1 OHCI controller 2, slot 28, pin 2 */
+ ULI1575_SET_DEV_IRQ(28, 2, 0x87);
+
+ /* USB 1.1 OHCI controller 3, slot 28, pin 3 */
+ ULI1575_SET_DEV_IRQ(28, 3, 0x88);
+
+ /* USB 2.0 controller, slot 28, pin 4 */
+ irq = get_pci_irq_from_of(hose, 28, 4);
+ if (irq >= 0 && irq <= 15)
+ pci_write_config_dword(dev, 0x74, uli1575_irq_route_table[irq]);
+
+ /* Audio controller, slot 29, pin 1 */
+ ULI1575_SET_DEV_IRQ(29, 1, 0x8a);
+
+ /* Modem controller, slot 29, pin 2 */
+ ULI1575_SET_DEV_IRQ(29, 2, 0x8b);
+
+ /* HD audio controller, slot 29, pin 3 */
+ ULI1575_SET_DEV_IRQ(29, 3, 0x8c);
+
+ /* SMB interrupt: slot 30, pin 1 */
+ ULI1575_SET_DEV_IRQ(30, 1, 0x8e);
+
+ /* PMU ACPI SCI interrupt: slot 30, pin 2 */
+ ULI1575_SET_DEV_IRQ(30, 2, 0x8f);
+
+ /* Serial ATA interrupt: slot 31, pin 1 */
+ ULI1575_SET_DEV_IRQ(31, 1, 0x8d);
+
+ /* Primary PATA IDE IRQ: 14
+ * Secondary PATA IDE IRQ: 15
+ */
+ pci_write_config_byte(dev, 0x44, 0x30 | uli1575_irq_route_table[14]);
+ pci_write_config_byte(dev, 0x75, uli1575_irq_route_table[15]);
+
+ /* Set IRQ14 and IRQ15 to legacy IRQs */
+ pci_read_config_word(dev, 0x46, &temp);
+ temp |= 0xc000;
+ pci_write_config_word(dev, 0x46, temp);
+
+ /* Set i8259 interrupt trigger
+ * IRQ 3: Level
+ * IRQ 4: Level
+ * IRQ 5: Level
+ * IRQ 6: Level
+ * IRQ 7: Level
+ * IRQ 9: Level
+ * IRQ 10: Level
+ * IRQ 11: Level
+ * IRQ 12: Level
+ * IRQ 14: Edge
+ * IRQ 15: Edge
+ */
+ outb(0xfa, 0x4d0);
+ outb(0x1e, 0x4d1);
+
+#undef ULI1575_SET_DEV_IRQ
+}
+
+/* SATA */
+static void __devinit quirk_uli5288(struct pci_dev *dev)
+{
+ unsigned char c;
+
+ pci_read_config_byte(dev, 0x83, &c);
+ c |= 0x80; /* read/write lock */
+ pci_write_config_byte(dev, 0x83, c);
+
+ pci_write_config_byte(dev, 0x09, 0x01); /* Base class code: storage */
+ pci_write_config_byte(dev, 0x0a, 0x06); /* IDE disk */
+
+ pci_read_config_byte(dev, 0x83, &c);
+ c &= 0x7f;
+ pci_write_config_byte(dev, 0x83, c);
+
+ pci_read_config_byte(dev, 0x84, &c);
+ c |= 0x01; /* emulated PATA mode enabled */
+ pci_write_config_byte(dev, 0x84, c);
+}
+
+/* PATA */
+static void __devinit quirk_uli5229(struct pci_dev *dev)
+{
+ unsigned short temp;
+ pci_write_config_word(dev, 0x04, 0x0405); /* MEM IO MSI */
+ pci_read_config_word(dev, 0x4a, &temp);
+ temp |= 0x1000; /* Enable Native IRQ 14/15 */
+ pci_write_config_word(dev, 0x4a, temp);
+}
+
+/*Bridge*/
+static void __devinit early_uli5249(struct pci_dev *dev)
+{
+ unsigned char temp;
+ pci_write_config_word(dev, 0x04, 0x0007); /* mem access */
+ pci_read_config_byte(dev, 0x7c, &temp);
+ pci_write_config_byte(dev, 0x7c, 0x80); /* R/W lock control */
+ pci_write_config_byte(dev, 0x09, 0x01); /* set as pci-pci bridge */
+ pci_write_config_byte(dev, 0x7c, temp); /* restore pci bus debug control */
+ dev->class |= 0x1;
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, quirk_uli1575);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5288, quirk_uli5288);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5229, quirk_uli5229);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, 0x5249, early_uli5249);
+#endif /* CONFIG_PCI */
/*
* Setup the architecture
*/
static void __init mpc8544_ds_setup_arch(void)
{
+#ifdef CONFIG_PCI
+ struct device_node *np;
+#endif
+
if (ppc_md.progress)
ppc_md.progress("mpc8544_ds_setup_arch()", 0);
+#ifdef CONFIG_PCI
+ for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;) {
+ struct resource rsrc;
+ of_address_to_resource(np, 0, &rsrc);
+ if ((rsrc.start & 0xfffff) == 0xb000)
+ fsl_add_bridge(np, 1);
+ else
+ fsl_add_bridge(np, 0);
+ }
+#endif
+
printk("MPC8544 DS board from Freescale Semiconductor\n");
}
-
/*
* Called very early, device-tree isn't unflattened
*/
@@ -124,6 +362,7 @@ define_machine(mpc8544_ds) {
.probe = mpc8544_ds_probe,
.setup_arch = mpc8544_ds_setup_arch,
.init_IRQ = mpc8544_ds_pic_init,
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
.get_irq = mpic_get_irq,
.restart = mpc85xx_restart,
.calibrate_decr = generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/85xx/mpc85xx.h b/arch/powerpc/platforms/85xx/mpc85xx.h
index 7286ffac2c1..5b34deef12b 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx.h
+++ b/arch/powerpc/platforms/85xx/mpc85xx.h
@@ -15,4 +15,3 @@
*/
extern void mpc85xx_restart(char *);
-extern int mpc85xx_add_bridge(struct device_node *dev);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 7235f702394..40a828675c7 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -29,6 +29,7 @@
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
#include "mpc85xx.h"
#ifdef CONFIG_CPM2
@@ -217,7 +218,7 @@ static void __init mpc85xx_ads_setup_arch(void)
#ifdef CONFIG_PCI
for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
- mpc85xx_add_bridge(np);
+ fsl_add_bridge(np, 1);
ppc_md.pci_exclude_device = mpc85xx_exclude_device;
#endif
}
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 50c8d645836..6a171e9abf7 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -24,6 +24,7 @@
#include <linux/seq_file.h>
#include <linux/initrd.h>
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/fsl_devices.h>
#include <asm/system.h>
@@ -45,6 +46,7 @@
#include <asm/i8259.h>
#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
#include "mpc85xx.h"
static int cds_pci_slot = 2;
@@ -58,8 +60,6 @@ static volatile u8 *cadmus;
static int mpc85xx_exclude_device(struct pci_controller *hose,
u_char bus, u_char devfn)
{
- if ((bus == hose->first_busno) && PCI_SLOT(devfn) == 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
/* We explicitly do not go past the Tundra 320 Bridge */
if ((bus == 1) && (PCI_SLOT(devfn) == ARCADIA_2ND_BRIDGE_IDSEL))
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -69,6 +69,37 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
return PCIBIOS_SUCCESSFUL;
}
+static void mpc85xx_cds_restart(char *cmd)
+{
+ struct pci_dev *dev;
+ u_char tmp;
+
+ if ((dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686,
+ NULL))) {
+
+ /* Use the VIA Super Southbridge to force a PCI reset */
+ pci_read_config_byte(dev, 0x47, &tmp);
+ pci_write_config_byte(dev, 0x47, tmp | 1);
+
+ /* Flush the outbound PCI write queues */
+ pci_read_config_byte(dev, 0x47, &tmp);
+
+ /*
+ * At this point, the harware reset should have triggered.
+ * However, if it doesn't work for some mysterious reason,
+ * just fall through to the default reset below.
+ */
+
+ pci_dev_put(dev);
+ }
+
+ /*
+ * If we can't find the VIA chip (maybe the P2P bridge is disabled)
+ * or the VIA chip reset didn't work, just use the default reset.
+ */
+ mpc85xx_restart(NULL);
+}
+
static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev)
{
u_char c;
@@ -98,7 +129,7 @@ static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev)
/* There are two USB controllers.
* Identify them by functon number
*/
- if (PCI_FUNC(dev->devfn))
+ if (PCI_FUNC(dev->devfn) == 3)
dev->irq = 11;
else
dev->irq = 10;
@@ -109,17 +140,41 @@ static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev)
}
}
+static void __devinit skip_fake_bridge(struct pci_dev *dev)
+{
+ /* Make it an error to skip the fake bridge
+ * in pci_setup_device() in probe.c */
+ dev->hdr_type = 0x7f;
+}
+DECLARE_PCI_FIXUP_EARLY(0x1957, 0x3fff, skip_fake_bridge);
+DECLARE_PCI_FIXUP_EARLY(0x3fff, 0x1957, skip_fake_bridge);
+DECLARE_PCI_FIXUP_EARLY(0xff3f, 0x5719, skip_fake_bridge);
+
#ifdef CONFIG_PPC_I8259
-#warning The i8259 PIC support is currently broken
-static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void mpc85xx_8259_cascade_handler(unsigned int irq,
+ struct irq_desc *desc)
{
unsigned int cascade_irq = i8259_irq();
if (cascade_irq != NO_IRQ)
+ /* handle an interrupt from the 8259 */
generic_handle_irq(cascade_irq);
- desc->chip->eoi(irq);
+ /* check for any interrupts from the shared IRQ line */
+ handle_fasteoi_irq(irq, desc);
+}
+
+static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
}
+
+static struct irqaction mpc85xxcds_8259_irqaction = {
+ .handler = mpc85xx_8259_cascade_action,
+ .flags = IRQF_SHARED,
+ .mask = CPU_MASK_NONE,
+ .name = "8259 cascade",
+};
#endif /* PPC_I8259 */
#endif /* CONFIG_PCI */
@@ -128,10 +183,6 @@ static void __init mpc85xx_cds_pic_init(void)
struct mpic *mpic;
struct resource r;
struct device_node *np = NULL;
-#ifdef CONFIG_PPC_I8259
- struct device_node *cascade_node = NULL;
- int cascade_irq;
-#endif
np = of_find_node_by_type(np, "open-pic");
@@ -155,8 +206,19 @@ static void __init mpc85xx_cds_pic_init(void)
of_node_put(np);
mpic_init(mpic);
+}
+
+#if defined(CONFIG_PPC_I8259) && defined(CONFIG_PCI)
+static int mpc85xx_cds_8259_attach(void)
+{
+ int ret;
+ struct device_node *np = NULL;
+ struct device_node *cascade_node = NULL;
+ int cascade_irq;
+
+ if (!machine_is(mpc85xx_cds))
+ return 0;
-#ifdef CONFIG_PPC_I8259
/* Initialize the i8259 controller */
for_each_node_by_type(np, "interrupt-controller")
if (of_device_is_compatible(np, "chrp,iic")) {
@@ -166,22 +228,39 @@ static void __init mpc85xx_cds_pic_init(void)
if (cascade_node == NULL) {
printk(KERN_DEBUG "Could not find i8259 PIC\n");
- return;
+ return -ENODEV;
}
cascade_irq = irq_of_parse_and_map(cascade_node, 0);
if (cascade_irq == NO_IRQ) {
printk(KERN_ERR "Failed to map cascade interrupt\n");
- return;
+ return -ENXIO;
}
i8259_init(cascade_node, 0);
of_node_put(cascade_node);
- set_irq_chained_handler(cascade_irq, mpc85xx_8259_cascade);
-#endif /* CONFIG_PPC_I8259 */
+ /*
+ * Hook the interrupt to make sure desc->action is never NULL.
+ * This is required to ensure that the interrupt does not get
+ * disabled when the last user of the shared IRQ line frees their
+ * interrupt.
+ */
+ if ((ret = setup_irq(cascade_irq, &mpc85xxcds_8259_irqaction))) {
+ printk(KERN_ERR "Failed to setup cascade interrupt\n");
+ return ret;
+ }
+
+ /* Success. Connect our low-level cascade handler. */
+ set_irq_handler(cascade_irq, mpc85xx_8259_cascade_handler);
+
+ return 0;
}
+device_initcall(mpc85xx_cds_8259_attach);
+
+#endif /* CONFIG_PPC_I8259 */
+
/*
* Setup the architecture
*/
@@ -218,9 +297,14 @@ static void __init mpc85xx_cds_setup_arch(void)
}
#ifdef CONFIG_PCI
- for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
- mpc85xx_add_bridge(np);
-
+ for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;) {
+ struct resource rsrc;
+ of_address_to_resource(np, 0, &rsrc);
+ if ((rsrc.start & 0xfffff) == 0x8000)
+ fsl_add_bridge(np, 1);
+ else
+ fsl_add_bridge(np, 0);
+ }
ppc_md.pci_irq_fixup = mpc85xx_cds_pci_irq_fixup;
ppc_md.pci_exclude_device = mpc85xx_exclude_device;
#endif
@@ -265,7 +349,12 @@ define_machine(mpc85xx_cds) {
.init_IRQ = mpc85xx_cds_pic_init,
.show_cpuinfo = mpc85xx_cds_show_cpuinfo,
.get_irq = mpic_get_irq,
+#ifdef CONFIG_PCI
+ .restart = mpc85xx_cds_restart,
+#else
.restart = mpc85xx_restart,
+#endif
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 004b80bd0b8..e8003bf00c9 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -46,6 +46,7 @@
#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
#include <asm/qe.h>
#include <asm/qe_ic.h>
#include <asm/mpic.h>
@@ -94,9 +95,8 @@ static void __init mpc85xx_mds_setup_arch(void)
}
#ifdef CONFIG_PCI
- for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;) {
- mpc85xx_add_bridge(np);
- }
+ for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
+ fsl_add_bridge(np, 1);
of_node_put(np);
#endif
@@ -208,4 +208,5 @@ define_machine(mpc85xx_mds) {
.restart = mpc85xx_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
};
diff --git a/arch/powerpc/platforms/85xx/pci.c b/arch/powerpc/platforms/85xx/pci.c
deleted file mode 100644
index 8118417b736..00000000000
--- a/arch/powerpc/platforms/85xx/pci.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * FSL SoC setup code
- *
- * Maintained by Kumar Gala (see MAINTAINERS for contact information)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/module.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <asm/io.h>
-#include <asm/pci-bridge.h>
-#include <asm/prom.h>
-#include <sysdev/fsl_soc.h>
-
-#undef DEBUG
-
-#ifdef DEBUG
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif
-
-#ifdef CONFIG_PCI
-int __init mpc85xx_add_bridge(struct device_node *dev)
-{
- int len;
- struct pci_controller *hose;
- struct resource rsrc;
- const int *bus_range;
- int primary = 1, has_address = 0;
- phys_addr_t immr = get_immrbase();
-
- DBG("Adding PCI host bridge %s\n", dev->full_name);
-
- /* Fetch host bridge registers address */
- has_address = (of_address_to_resource(dev, 0, &rsrc) == 0);
-
- /* Get bus range if any */
- bus_range = of_get_property(dev, "bus-range", &len);
- if (bus_range == NULL || len < 2 * sizeof(int)) {
- printk(KERN_WARNING "Can't get bus-range for %s, assume"
- " bus 0\n", dev->full_name);
- }
-
- pci_assign_all_buses = 1;
- hose = pcibios_alloc_controller(dev);
- if (!hose)
- return -ENOMEM;
-
- hose->first_busno = bus_range ? bus_range[0] : 0;
- hose->last_busno = bus_range ? bus_range[1] : 0xff;
-
- /* PCI 1 */
- if ((rsrc.start & 0xfffff) == 0x8000) {
- setup_indirect_pci(hose, immr + 0x8000, immr + 0x8004);
- }
- /* PCI 2 */
- if ((rsrc.start & 0xfffff) == 0x9000) {
- setup_indirect_pci(hose, immr + 0x9000, immr + 0x9004);
- primary = 0;
- }
-
- printk(KERN_INFO "Found MPC85xx PCI host bridge at 0x%016llx. "
- "Firmware bus number: %d->%d\n",
- (unsigned long long)rsrc.start, hose->first_busno,
- hose->last_busno);
-
- DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
- hose, hose->cfg_addr, hose->cfg_data);
-
- /* Interpret the "ranges" property */
- /* This also maps the I/O region and sets isa_io/mem_base */
- pci_process_bridge_OF_ranges(hose, dev, primary);
-
- return 0;
-}
-
-#endif
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 0faebfdc159..343b76d0d79 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -14,8 +14,7 @@ endchoice
config MPC8641
bool
- select PPC_INDIRECT_PCI
- select PPC_INDIRECT_PCI_BE
+ select FSL_PCI if PCI
select PPC_UDBG_16550
select MPIC
default y if MPC8641_HPCN
diff --git a/arch/powerpc/platforms/86xx/Makefile b/arch/powerpc/platforms/86xx/Makefile
index 418fd8f4d26..3376c7767f2 100644
--- a/arch/powerpc/platforms/86xx/Makefile
+++ b/arch/powerpc/platforms/86xx/Makefile
@@ -4,4 +4,3 @@
obj-$(CONFIG_SMP) += mpc86xx_smp.o
obj-$(CONFIG_MPC8641_HPCN) += mpc86xx_hpcn.o
-obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/powerpc/platforms/86xx/mpc86xx.h b/arch/powerpc/platforms/86xx/mpc86xx.h
index 23f7ed2a7f8..525ffa1904f 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx.h
+++ b/arch/powerpc/platforms/86xx/mpc86xx.h
@@ -15,11 +15,6 @@
* mpc86xx_* files. Mostly for use by mpc86xx_setup().
*/
-extern int mpc86xx_add_bridge(struct device_node *dev);
-
-extern int mpc86xx_exclude_device(struct pci_controller *hose,
- u_char bus, u_char devfn);
-
extern void __init mpc86xx_smp_init(void);
#endif /* __MPC86XX_H__ */
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index 5b01ec7c13d..e9eaa0749ae 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -31,6 +31,7 @@
#include <asm/mpic.h>
+#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
#include "mpc86xx.h"
@@ -344,8 +345,14 @@ mpc86xx_hpcn_setup_arch(void)
}
#ifdef CONFIG_PCI
- for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
- mpc86xx_add_bridge(np);
+ for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;) {
+ struct resource rsrc;
+ of_address_to_resource(np, 0, &rsrc);
+ if ((rsrc.start & 0xfffff) == 0x8000)
+ fsl_add_bridge(np, 1);
+ else
+ fsl_add_bridge(np, 0);
+ }
#endif
printk("MPC86xx HPCN board from Freescale Semiconductor\n");
@@ -424,7 +431,6 @@ mpc86xx_time_init(void)
return 0;
}
-
define_machine(mpc86xx_hpcn) {
.name = "MPC86xx HPCN",
.probe = mpc86xx_hpcn_probe,
@@ -436,4 +442,5 @@ define_machine(mpc86xx_hpcn) {
.time_init = mpc86xx_time_init,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
};
diff --git a/arch/powerpc/platforms/86xx/pci.c b/arch/powerpc/platforms/86xx/pci.c
deleted file mode 100644
index 73cd5b05a84..00000000000
--- a/arch/powerpc/platforms/86xx/pci.c
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * MPC86XX pci setup code
- *
- * Recode: ZHANG WEI <wei.zhang@freescale.com>
- * Initial author: Xianghua Xiao <x.xiao@freescale.com>
- *
- * Copyright 2006 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/serial.h>
-
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <sysdev/fsl_soc.h>
-#include <sysdev/fsl_pcie.h>
-
-#include "mpc86xx.h"
-
-#undef DEBUG
-
-#ifdef DEBUG
-#define DBG(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
-#else
-#define DBG(fmt, args...)
-#endif
-
-struct pcie_outbound_window_regs {
- uint pexotar; /* 0x.0 - PCI Express outbound translation address register */
- uint pexotear; /* 0x.4 - PCI Express outbound translation extended address register */
- uint pexowbar; /* 0x.8 - PCI Express outbound window base address register */
- char res1[4];
- uint pexowar; /* 0x.10 - PCI Express outbound window attributes register */
- char res2[12];
-};
-
-struct pcie_inbound_window_regs {
- uint pexitar; /* 0x.0 - PCI Express inbound translation address register */
- char res1[4];
- uint pexiwbar; /* 0x.8 - PCI Express inbound window base address register */
- uint pexiwbear; /* 0x.c - PCI Express inbound window base extended address register */
- uint pexiwar; /* 0x.10 - PCI Express inbound window attributes register */
- char res2[12];
-};
-
-static void __init setup_pcie_atmu(struct pci_controller *hose, struct resource *rsrc)
-{
- volatile struct ccsr_pex *pcie;
- volatile struct pcie_outbound_window_regs *pcieow;
- volatile struct pcie_inbound_window_regs *pcieiw;
- int i = 0;
-
- DBG("PCIE memory map start 0x%x, size 0x%x\n", rsrc->start,
- rsrc->end - rsrc->start + 1);
- pcie = ioremap(rsrc->start, rsrc->end - rsrc->start + 1);
-
- /* Disable all windows (except pexowar0 since its ignored) */
- pcie->pexowar1 = 0;
- pcie->pexowar2 = 0;
- pcie->pexowar3 = 0;
- pcie->pexowar4 = 0;
- pcie->pexiwar1 = 0;
- pcie->pexiwar2 = 0;
- pcie->pexiwar3 = 0;
-
- pcieow = (struct pcie_outbound_window_regs *)&pcie->pexotar1;
- pcieiw = (struct pcie_inbound_window_regs *)&pcie->pexitar1;
-
- /* Setup outbound MEM window */
- for(i = 0; i < 3; i++)
- if (hose->mem_resources[i].flags & IORESOURCE_MEM){
- DBG("PCIE MEM resource start 0x%08x, size 0x%08x.\n",
- hose->mem_resources[i].start,
- hose->mem_resources[i].end
- - hose->mem_resources[i].start + 1);
- pcieow->pexotar = (hose->mem_resources[i].start) >> 12
- & 0x000fffff;
- pcieow->pexotear = 0;
- pcieow->pexowbar = (hose->mem_resources[i].start) >> 12
- & 0x000fffff;
- /* Enable, Mem R/W */
- pcieow->pexowar = 0x80044000 |
- (__ilog2(hose->mem_resources[i].end
- - hose->mem_resources[i].start + 1)
- - 1);
- pcieow++;
- }
-
- /* Setup outbound IO window */
- if (hose->io_resource.flags & IORESOURCE_IO){
- DBG("PCIE IO resource start 0x%08x, size 0x%08x, phy base 0x%08x.\n",
- hose->io_resource.start,
- hose->io_resource.end - hose->io_resource.start + 1,
- hose->io_base_phys);
- pcieow->pexotar = (hose->io_resource.start) >> 12 & 0x000fffff;
- pcieow->pexotear = 0;
- pcieow->pexowbar = (hose->io_base_phys) >> 12 & 0x000fffff;
- /* Enable, IO R/W */
- pcieow->pexowar = 0x80088000 | (__ilog2(hose->io_resource.end
- - hose->io_resource.start + 1) - 1);
- }
-
- /* Setup 2G inbound Memory Window @ 0 */
- pcieiw->pexitar = 0x00000000;
- pcieiw->pexiwbar = 0x00000000;
- /* Enable, Prefetch, Local Mem, Snoop R/W, 2G */
- pcieiw->pexiwar = 0xa0f5501e;
-}
-
-static void __init
-mpc86xx_setup_pcie(struct pci_controller *hose, u32 pcie_offset, u32 pcie_size)
-{
- u16 cmd;
-
- DBG("PCIE host controller register offset 0x%08x, size 0x%08x.\n",
- pcie_offset, pcie_size);
-
- early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
- cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
- | PCI_COMMAND_IO;
- early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
-
- early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
-}
-
-static void __devinit quirk_fsl_pcie_transparent(struct pci_dev *dev)
-{
- struct resource *res;
- int i, res_idx = PCI_BRIDGE_RESOURCES;
- struct pci_controller *hose;
-
- /*
- * Make the bridge be transparent.
- */
- dev->transparent = 1;
-
- hose = pci_bus_to_host(dev->bus);
- if (!hose) {
- printk(KERN_ERR "Can't find hose for bus %d\n",
- dev->bus->number);
- return;
- }
-
- if (hose->io_resource.flags) {
- res = &dev->resource[res_idx++];
- res->start = hose->io_resource.start;
- res->end = hose->io_resource.end;
- res->flags = hose->io_resource.flags;
- }
-
- for (i = 0; i < 3; i++) {
- res = &dev->resource[res_idx + i];
- res->start = hose->mem_resources[i].start;
- res->end = hose->mem_resources[i].end;
- res->flags = hose->mem_resources[i].flags;
- }
-}
-
-
-DECLARE_PCI_FIXUP_EARLY(0x1957, 0x7010, quirk_fsl_pcie_transparent);
-DECLARE_PCI_FIXUP_EARLY(0x1957, 0x7011, quirk_fsl_pcie_transparent);
-
-#define PCIE_LTSSM 0x404 /* PCIe Link Training and Status */
-#define PCIE_LTSSM_L0 0x16 /* L0 state */
-
-int __init mpc86xx_add_bridge(struct device_node *dev)
-{
- int len;
- struct pci_controller *hose;
- struct resource rsrc;
- const int *bus_range;
- int has_address = 0;
- int primary = 0;
- u16 val;
-
- DBG("Adding PCIE host bridge %s\n", dev->full_name);
-
- /* Fetch host bridge registers address */
- has_address = (of_address_to_resource(dev, 0, &rsrc) == 0);
-
- /* Get bus range if any */
- bus_range = of_get_property(dev, "bus-range", &len);
- if (bus_range == NULL || len < 2 * sizeof(int))
- printk(KERN_WARNING "Can't get bus-range for %s, assume"
- " bus 0\n", dev->full_name);
-
- pci_assign_all_buses = 1;
- hose = pcibios_alloc_controller(dev);
- if (!hose)
- return -ENOMEM;
-
- hose->indirect_type = PPC_INDIRECT_TYPE_EXT_REG |
- PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
-
- hose->first_busno = bus_range ? bus_range[0] : 0x0;
- hose->last_busno = bus_range ? bus_range[1] : 0xff;
-
- setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4);
-
- /* Probe the hose link training status */
- early_read_config_word(hose, 0, 0, PCIE_LTSSM, &val);
- if (val < PCIE_LTSSM_L0)
- return -ENXIO;
-
- /* Setup the PCIE host controller. */
- mpc86xx_setup_pcie(hose, rsrc.start, rsrc.end - rsrc.start + 1);
-
- if ((rsrc.start & 0xfffff) == 0x8000)
- primary = 1;
-
- printk(KERN_INFO "Found MPC86xx PCIE host bridge at 0x%08lx. "
- "Firmware bus number: %d->%d\n",
- (unsigned long) rsrc.start,
- hose->first_busno, hose->last_busno);
-
- DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
- hose, hose->cfg_addr, hose->cfg_data);
-
- /* Interpret the "ranges" property */
- /* This also maps the I/O region and sets isa_io/mem_base */
- pci_process_bridge_OF_ranges(hose, dev, primary);
-
- /* Setup PEX window registers */
- setup_pcie_atmu(hose, &rsrc);
-
- return 0;
-}
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 6694f86d700..9cb081c26e7 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -59,7 +59,8 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
INIT_LIST_HEAD(&ctx->aff_list);
if (gang)
spu_gang_add_ctx(gang, ctx);
- ctx->cpus_allowed = current->cpus_allowed;
+
+ __spu_update_sched_info(ctx);
spu_set_timeslice(ctx);
ctx->stats.util_state = SPU_UTIL_IDLE_LOADED;
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 0b50fa5cb39..6abdd8fe209 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -312,6 +312,7 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
spu_acquire(ctx);
if (ctx->state == SPU_STATE_SAVED) {
__spu_update_sched_info(ctx);
+ spu_set_timeslice(ctx);
ret = spu_activate(ctx, 0);
if (ret) {
@@ -322,6 +323,9 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
/*
* We have to update the scheduling priority under active_mutex
* to protect against find_victim().
+ *
+ * No need to update the timeslice ASAP, it will get updated
+ * once the current one has expired.
*/
spu_update_sched_info(ctx);
}
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 227968b4779..758a80ac080 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -927,10 +927,6 @@ int __init spu_sched_init(void)
INIT_LIST_HEAD(&spu_prio->runq[i]);
__clear_bit(i, spu_prio->bitmap);
}
- for (i = 0; i < MAX_NUMNODES; i++) {
- mutex_init(&cbe_spu_info[i].list_mutex);
- INIT_LIST_HEAD(&cbe_spu_info[i].spus);
- }
spin_lock_init(&spu_prio->runq_lock);
setup_timer(&spusched_timer, spusched_wake, 0);
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 8b20c0c1556..2bfdeb8ea8b 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -40,13 +40,10 @@ enum {
struct spu_context_ops;
struct spu_gang;
-enum {
- SPU_SCHED_WAS_ACTIVE, /* was active upon spu_acquire_saved() */
-};
-
/* ctx->sched_flags */
enum {
SPU_SCHED_NOTIFY_ACTIVE,
+ SPU_SCHED_WAS_ACTIVE, /* was active upon spu_acquire_saved() */
};
struct spu_context {
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index 3690624e49d..28d1647b204 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -181,7 +181,7 @@ setup_python(struct pci_controller *hose, struct device_node *dev)
}
iounmap(reg);
- setup_indirect_pci(hose, r.start + 0xf8000, r.start + 0xf8010);
+ setup_indirect_pci(hose, r.start + 0xf8000, r.start + 0xf8010, 0);
}
/* Marvell Discovery II based Pegasos 2 */
@@ -277,13 +277,14 @@ chrp_find_bridges(void)
hose->cfg_data = p;
gg2_pci_config_base = p;
} else if (is_pegasos == 1) {
- setup_indirect_pci(hose, 0xfec00cf8, 0xfee00cfc);
+ setup_indirect_pci(hose, 0xfec00cf8, 0xfee00cfc, 0);
} else if (is_pegasos == 2) {
setup_peg2(hose, dev);
} else if (!strncmp(model, "IBM,CPC710", 10)) {
setup_indirect_pci(hose,
r.start + 0x000f8000,
- r.start + 0x000f8010);
+ r.start + 0x000f8010,
+ 0);
if (index == 0) {
dma = of_get_property(dev, "system-dma-base",
&len);
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index f4d0a7a603f..bd5ca58345a 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -73,7 +73,7 @@ static int __init linkstation_add_bridge(struct device_node *dev)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
- setup_indirect_pci(hose, 0xfec00000, 0xfee00000);
+ setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
diff --git a/arch/powerpc/platforms/iseries/lpevents.c b/arch/powerpc/platforms/iseries/lpevents.c
index 91df52a1899..34bdbbe3ce5 100644
--- a/arch/powerpc/platforms/iseries/lpevents.c
+++ b/arch/powerpc/platforms/iseries/lpevents.c
@@ -182,7 +182,7 @@ static int set_spread_lpevents(char *str)
}
__setup("spread_lpevents=", set_spread_lpevents);
-void setup_hvlpevent_queue(void)
+void __init setup_hvlpevent_queue(void)
{
void *eventStack;
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index fceaae40fe7..2542403288f 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -490,6 +490,9 @@ static int __init maple_add_bridge(struct device_node *dev)
/* Fixup "bus-range" OF property */
fixup_bus_range(dev);
+ /* Check for legacy IOs */
+ isa_bridge_find_early(hose);
+
return 0;
}
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 484eb4e0e9d..08ce31e612c 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_PPC_PMI) += pmi.o
obj-$(CONFIG_U3_DART) += dart_iommu.o
obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
obj-$(CONFIG_FSL_SOC) += fsl_soc.o
+obj-$(CONFIG_FSL_PCI) += fsl_pci.o
obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
obj-$(CONFIG_QUICC_ENGINE) += qe_lib/
mv64x60-$(CONFIG_PCI) += mv64x60_pci.o
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
new file mode 100644
index 00000000000..9fb0ce5c717
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -0,0 +1,257 @@
+/*
+ * MPC85xx/86xx PCI/PCIE support routing.
+ *
+ * Copyright 2007 Freescale Semiconductor, Inc
+ *
+ * Initial author: Xianghua Xiao <x.xiao@freescale.com>
+ * Recode: ZHANG WEI <wei.zhang@freescale.com>
+ * Rewrite the routing for Frescale PCI and PCI Express
+ * Roy Zang <tie-fei.zang@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+
+/* atmu setup for fsl pci/pcie controller */
+void __init setup_pci_atmu(struct pci_controller *hose, struct resource *rsrc)
+{
+ struct ccsr_pci __iomem *pci;
+ int i;
+
+ pr_debug("PCI memory map start 0x%x, size 0x%x\n", rsrc->start,
+ rsrc->end - rsrc->start + 1);
+ pci = ioremap(rsrc->start, rsrc->end - rsrc->start + 1);
+
+ /* Disable all windows (except powar0 since its ignored) */
+ for(i = 1; i < 5; i++)
+ out_be32(&pci->pow[i].powar, 0);
+ for(i = 0; i < 3; i++)
+ out_be32(&pci->piw[i].piwar, 0);
+
+ /* Setup outbound MEM window */
+ for(i = 0; i < 3; i++)
+ if (hose->mem_resources[i].flags & IORESOURCE_MEM){
+ pr_debug("PCI MEM resource start 0x%08x, size 0x%08x.\n",
+ hose->mem_resources[i].start,
+ hose->mem_resources[i].end
+ - hose->mem_resources[i].start + 1);
+ out_be32(&pci->pow[i+1].potar,
+ (hose->mem_resources[i].start >> 12)
+ & 0x000fffff);
+ out_be32(&pci->pow[i+1].potear, 0);
+ out_be32(&pci->pow[i+1].powbar,
+ (hose->mem_resources[i].start >> 12)
+ & 0x000fffff);
+ /* Enable, Mem R/W */
+ out_be32(&pci->pow[i+1].powar, 0x80044000
+ | (__ilog2(hose->mem_resources[i].end
+ - hose->mem_resources[i].start + 1) - 1));
+ }
+
+ /* Setup outbound IO window */
+ if (hose->io_resource.flags & IORESOURCE_IO){
+ pr_debug("PCI IO resource start 0x%08x, size 0x%08x, phy base 0x%08x.\n",
+ hose->io_resource.start,
+ hose->io_resource.end - hose->io_resource.start + 1,
+ hose->io_base_phys);
+ out_be32(&pci->pow[i+1].potar, (hose->io_resource.start >> 12)
+ & 0x000fffff);
+ out_be32(&pci->pow[i+1].potear, 0);
+ out_be32(&pci->pow[i+1].powbar, (hose->io_base_phys >> 12)
+ & 0x000fffff);
+ /* Enable, IO R/W */
+ out_be32(&pci->pow[i+1].powar, 0x80088000
+ | (__ilog2(hose->io_resource.end
+ - hose->io_resource.start + 1) - 1));
+ }
+
+ /* Setup 2G inbound Memory Window @ 1 */
+ out_be32(&pci->piw[2].pitar, 0x00000000);
+ out_be32(&pci->piw[2].piwbar,0x00000000);
+ out_be32(&pci->piw[2].piwar, PIWAR_2G);
+}
+
+void __init setup_pci_cmd(struct pci_controller *hose)
+{
+ u16 cmd;
+ int cap_x;
+
+ early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
+ cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
+ | PCI_COMMAND_IO;
+ early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
+
+ cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX);
+ if (cap_x) {
+ int pci_x_cmd = cap_x + PCI_X_CMD;
+ cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
+ | PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
+ early_write_config_word(hose, 0, 0, pci_x_cmd, cmd);
+ } else {
+ early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
+ }
+}
+
+static void __init quirk_fsl_pcie_transparent(struct pci_dev *dev)
+{
+ struct resource *res;
+ int i, res_idx = PCI_BRIDGE_RESOURCES;
+ struct pci_controller *hose;
+
+ /* if we aren't a PCIe don't bother */
+ if (!pci_find_capability(dev, PCI_CAP_ID_EXP))
+ return ;
+
+ /*
+ * Make the bridge be transparent.
+ */
+ dev->transparent = 1;
+
+ hose = pci_bus_to_host(dev->bus);
+ if (!hose) {
+ printk(KERN_ERR "Can't find hose for bus %d\n",
+ dev->bus->number);
+ return;
+ }
+
+ /* Clear out any of the virtual P2P bridge registers */
+ pci_write_config_word(dev, PCI_IO_BASE_UPPER16, 0);
+ pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16, 0);
+ pci_write_config_byte(dev, PCI_IO_BASE, 0x10);
+ pci_write_config_byte(dev, PCI_IO_LIMIT, 0);
+ pci_write_config_word(dev, PCI_MEMORY_BASE, 0x10);
+ pci_write_config_word(dev, PCI_MEMORY_LIMIT, 0);
+ pci_write_config_word(dev, PCI_PREF_BASE_UPPER32, 0x0);
+ pci_write_config_word(dev, PCI_PREF_LIMIT_UPPER32, 0x0);
+ pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, 0x10);
+ pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, 0);
+
+ if (hose->io_resource.flags) {
+ res = &dev->resource[res_idx++];
+ res->start = hose->io_resource.start;
+ res->end = hose->io_resource.end;
+ res->flags = hose->io_resource.flags;
+ update_bridge_resource(dev, res);
+ }
+
+ for (i = 0; i < 3; i++) {
+ res = &dev->resource[res_idx + i];
+ res->start = hose->mem_resources[i].start;
+ res->end = hose->mem_resources[i].end;
+ res->flags = hose->mem_resources[i].flags;
+ update_bridge_resource(dev, res);
+ }
+}
+
+int __init fsl_pcie_check_link(struct pci_controller *hose)
+{
+ u16 val;
+ early_read_config_word(hose, 0, 0, PCIE_LTSSM, &val);
+ if (val < PCIE_LTSSM_L0)
+ return 1;
+ return 0;
+}
+
+void fsl_pcibios_fixup_bus(struct pci_bus *bus)
+{
+ struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
+ int i;
+
+ /* deal with bogus pci_bus when we don't have anything connected on PCIe */
+ if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK) {
+ if (bus->parent) {
+ for (i = 0; i < 4; ++i)
+ bus->resource[i] = bus->parent->resource[i];
+ }
+ }
+}
+
+int __init fsl_add_bridge(struct device_node *dev, int is_primary)
+{
+ int len;
+ struct pci_controller *hose;
+ struct resource rsrc;
+ const int *bus_range;
+
+ pr_debug("Adding PCI host bridge %s\n", dev->full_name);
+
+ /* Fetch host bridge registers address */
+ if (of_address_to_resource(dev, 0, &rsrc)) {
+ printk(KERN_WARNING "Can't get pci register base!");
+ return -ENOMEM;
+ }
+
+ /* Get bus range if any */
+ bus_range = of_get_property(dev, "bus-range", &len);
+ if (bus_range == NULL || len < 2 * sizeof(int))
+ printk(KERN_WARNING "Can't get bus-range for %s, assume"
+ " bus 0\n", dev->full_name);
+
+ pci_assign_all_buses = 1;
+ hose = pcibios_alloc_controller(dev);
+ if (!hose)
+ return -ENOMEM;
+
+ hose->first_busno = bus_range ? bus_range[0] : 0x0;
+ hose->last_busno = bus_range ? bus_range[1] : 0xff;
+
+ setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
+ PPC_INDIRECT_TYPE_BIG_ENDIAN);
+ setup_pci_cmd(hose);
+
+ /* check PCI express link status */
+ if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
+ hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG |
+ PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
+ if (fsl_pcie_check_link(hose))
+ hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
+ }
+
+ printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx."
+ "Firmware bus number: %d->%d\n",
+ (unsigned long long)rsrc.start, hose->first_busno,
+ hose->last_busno);
+
+ pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
+ hose, hose->cfg_addr, hose->cfg_data);
+
+ /* Interpret the "ranges" property */
+ /* This also maps the I/O region and sets isa_io/mem_base */
+ pci_process_bridge_OF_ranges(hose, dev, is_primary);
+
+ /* Setup PEX window registers */
+ setup_pci_atmu(hose, &rsrc);
+
+ return 0;
+}
+
+DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8548E, quirk_fsl_pcie_transparent);
+DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8548, quirk_fsl_pcie_transparent);
+DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8543E, quirk_fsl_pcie_transparent);
+DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8543, quirk_fsl_pcie_transparent);
+DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8547E, quirk_fsl_pcie_transparent);
+DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8545E, quirk_fsl_pcie_transparent);
+DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8545, quirk_fsl_pcie_transparent);
+DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8568E, quirk_fsl_pcie_transparent);
+DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8568, quirk_fsl_pcie_transparent);
+DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8567E, quirk_fsl_pcie_transparent);
+DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8567, quirk_fsl_pcie_transparent);
+DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8544E, quirk_fsl_pcie_transparent);
+DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8544, quirk_fsl_pcie_transparent);
+DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_transparent);
+DECLARE_PCI_FIXUP_EARLY(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_transparent);
diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h
new file mode 100644
index 00000000000..37b04ad2657
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_pci.h
@@ -0,0 +1,88 @@
+/*
+ * MPC85xx/86xx PCI Express structure define
+ *
+ * Copyright 2007 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifdef __KERNEL__
+#ifndef __POWERPC_FSL_PCI_H
+#define __POWERPC_FSL_PCI_H
+
+#define PCIE_LTSSM 0x0404 /* PCIE Link Training and Status */
+#define PCIE_LTSSM_L0 0x16 /* L0 state */
+#define PIWAR_2G 0xa0f5501e /* Enable, Prefetch, Local Mem, Snoop R/W, 2G */
+
+/* PCI/PCI Express outbound window reg */
+struct pci_outbound_window_regs {
+ __be32 potar; /* 0x.0 - Outbound translation address register */
+ __be32 potear; /* 0x.4 - Outbound translation extended address register */
+ __be32 powbar; /* 0x.8 - Outbound window base address register */
+ u8 res1[4];
+ __be32 powar; /* 0x.10 - Outbound window attributes register */
+ u8 res2[12];
+};
+
+/* PCI/PCI Express inbound window reg */
+struct pci_inbound_window_regs {
+ __be32 pitar; /* 0x.0 - Inbound translation address register */
+ u8 res1[4];
+ __be32 piwbar; /* 0x.8 - Inbound window base address register */
+ __be32 piwbear; /* 0x.c - Inbound window base extended address register */
+ __be32 piwar; /* 0x.10 - Inbound window attributes register */
+ u8 res2[12];
+};
+
+/* PCI/PCI Express IO block registers for 85xx/86xx */
+struct ccsr_pci {
+ __be32 config_addr; /* 0x.000 - PCI/PCIE Configuration Address Register */
+ __be32 config_data; /* 0x.004 - PCI/PCIE Configuration Data Register */
+ __be32 int_ack; /* 0x.008 - PCI Interrupt Acknowledge Register */
+ __be32 pex_otb_cpl_tor; /* 0x.00c - PCIE Outbound completion timeout register */
+ __be32 pex_conf_tor; /* 0x.010 - PCIE configuration timeout register */
+ u8 res2[12];
+ __be32 pex_pme_mes_dr; /* 0x.020 - PCIE PME and message detect register */
+ __be32 pex_pme_mes_disr; /* 0x.024 - PCIE PME and message disable register */
+ __be32 pex_pme_mes_ier; /* 0x.028 - PCIE PME and message interrupt enable register */
+ __be32 pex_pmcr; /* 0x.02c - PCIE power management command register */
+ u8 res3[3024];
+
+/* PCI/PCI Express outbound window 0-4
+ * Window 0 is the default window and is the only window enabled upon reset.
+ * The default outbound register set is used when a transaction misses
+ * in all of the other outbound windows.
+ */
+ struct pci_outbound_window_regs pow[5];
+
+ u8 res14[256];
+
+/* PCI/PCI Express inbound window 3-1
+ * inbound window 1 supports only a 32-bit base address and does not
+ * define an inbound window base extended address register.
+ */
+ struct pci_inbound_window_regs piw[3];
+
+ __be32 pex_err_dr; /* 0x.e00 - PCI/PCIE error detect register */
+ u8 res21[4];
+ __be32 pex_err_en; /* 0x.e08 - PCI/PCIE error interrupt enable register */
+ u8 res22[4];
+ __be32 pex_err_disr; /* 0x.e10 - PCI/PCIE error disable register */
+ u8 res23[12];
+ __be32 pex_err_cap_stat; /* 0x.e20 - PCI/PCIE error capture status register */
+ u8 res24[4];
+ __be32 pex_err_cap_r0; /* 0x.e28 - PCIE error capture register 0 */
+ __be32 pex_err_cap_r1; /* 0x.e2c - PCIE error capture register 0 */
+ __be32 pex_err_cap_r2; /* 0x.e30 - PCIE error capture register 0 */
+ __be32 pex_err_cap_r3; /* 0x.e34 - PCIE error capture register 0 */
+};
+
+extern int fsl_add_bridge(struct device_node *dev, int is_primary);
+extern void fsl_pcibios_fixup_bus(struct pci_bus *bus);
+
+#endif /* __POWERPC_FSL_PCI_H */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/sysdev/fsl_pcie.h b/arch/powerpc/sysdev/fsl_pcie.h
deleted file mode 100644
index 8d9779c84be..00000000000
--- a/arch/powerpc/sysdev/fsl_pcie.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * MPC85xx/86xx PCI Express structure define
- *
- * Copyright 2007 Freescale Semiconductor, Inc
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#ifdef __KERNEL__
-#ifndef __POWERPC_FSL_PCIE_H
-#define __POWERPC_FSL_PCIE_H
-
-/* PCIE Express IO block registers in 85xx/86xx */
-
-struct ccsr_pex {
- __be32 __iomem pex_config_addr; /* 0x.000 - PCI Express Configuration Address Register */
- __be32 __iomem pex_config_data; /* 0x.004 - PCI Express Configuration Data Register */
- u8 __iomem res1[4];
- __be32 __iomem pex_otb_cpl_tor; /* 0x.00c - PCI Express Outbound completion timeout register */
- __be32 __iomem pex_conf_tor; /* 0x.010 - PCI Express configuration timeout register */
- u8 __iomem res2[12];
- __be32 __iomem pex_pme_mes_dr; /* 0x.020 - PCI Express PME and message detect register */
- __be32 __iomem pex_pme_mes_disr; /* 0x.024 - PCI Express PME and message disable register */
- __be32 __iomem pex_pme_mes_ier; /* 0x.028 - PCI Express PME and message interrupt enable register */
- __be32 __iomem pex_pmcr; /* 0x.02c - PCI Express power management command register */
- u8 __iomem res3[3024];
- __be32 __iomem pexotar0; /* 0x.c00 - PCI Express outbound translation address register 0 */
- __be32 __iomem pexotear0; /* 0x.c04 - PCI Express outbound translation extended address register 0*/
- u8 __iomem res4[8];
- __be32 __iomem pexowar0; /* 0x.c10 - PCI Express outbound window attributes register 0*/
- u8 __iomem res5[12];
- __be32 __iomem pexotar1; /* 0x.c20 - PCI Express outbound translation address register 1 */
- __be32 __iomem pexotear1; /* 0x.c24 - PCI Express outbound translation extended address register 1*/
- __be32 __iomem pexowbar1; /* 0x.c28 - PCI Express outbound window base address register 1*/
- u8 __iomem res6[4];
- __be32 __iomem pexowar1; /* 0x.c30 - PCI Express outbound window attributes register 1*/
- u8 __iomem res7[12];
- __be32 __iomem pexotar2; /* 0x.c40 - PCI Express outbound translation address register 2 */
- __be32 __iomem pexotear2; /* 0x.c44 - PCI Express outbound translation extended address register 2*/
- __be32 __iomem pexowbar2; /* 0x.c48 - PCI Express outbound window base address register 2*/
- u8 __iomem res8[4];
- __be32 __iomem pexowar2; /* 0x.c50 - PCI Express outbound window attributes register 2*/
- u8 __iomem res9[12];
- __be32 __iomem pexotar3; /* 0x.c60 - PCI Express outbound translation address register 3 */
- __be32 __iomem pexotear3; /* 0x.c64 - PCI Express outbound translation extended address register 3*/
- __be32 __iomem pexowbar3; /* 0x.c68 - PCI Express outbound window base address register 3*/
- u8 __iomem res10[4];
- __be32 __iomem pexowar3; /* 0x.c70 - PCI Express outbound window attributes register 3*/
- u8 __iomem res11[12];
- __be32 __iomem pexotar4; /* 0x.c80 - PCI Express outbound translation address register 4 */
- __be32 __iomem pexotear4; /* 0x.c84 - PCI Express outbound translation extended address register 4*/
- __be32 __iomem pexowbar4; /* 0x.c88 - PCI Express outbound window base address register 4*/
- u8 __iomem res12[4];
- __be32 __iomem pexowar4; /* 0x.c90 - PCI Express outbound window attributes register 4*/
- u8 __iomem res13[12];
- u8 __iomem res14[256];
- __be32 __iomem pexitar3; /* 0x.da0 - PCI Express inbound translation address register 3 */
- u8 __iomem res15[4];
- __be32 __iomem pexiwbar3; /* 0x.da8 - PCI Express inbound window base address register 3 */
- __be32 __iomem pexiwbear3; /* 0x.dac - PCI Express inbound window base extended address register 3 */
- __be32 __iomem pexiwar3; /* 0x.db0 - PCI Express inbound window attributes register 3 */
- u8 __iomem res16[12];
- __be32 __iomem pexitar2; /* 0x.dc0 - PCI Express inbound translation address register 2 */
- u8 __iomem res17[4];
- __be32 __iomem pexiwbar2; /* 0x.dc8 - PCI Express inbound window base address register 2 */
- __be32 __iomem pexiwbear2; /* 0x.dcc - PCI Express inbound window base extended address register 2 */
- __be32 __iomem pexiwar2; /* 0x.dd0 - PCI Express inbound window attributes register 2 */
- u8 __iomem res18[12];
- __be32 __iomem pexitar1; /* 0x.de0 - PCI Express inbound translation address register 2 */
- u8 __iomem res19[4];
- __be32 __iomem pexiwbar1; /* 0x.de8 - PCI Express inbound window base address register 2 */
- __be32 __iomem pexiwbear1; /* 0x.dec - PCI Express inbound window base extended address register 2 */
- __be32 __iomem pexiwar1; /* 0x.df0 - PCI Express inbound window attributes register 2 */
- u8 __iomem res20[12];
- __be32 __iomem pex_err_dr; /* 0x.e00 - PCI Express error detect register */
- u8 __iomem res21[4];
- __be32 __iomem pex_err_en; /* 0x.e08 - PCI Express error interrupt enable register */
- u8 __iomem res22[4];
- __be32 __iomem pex_err_disr; /* 0x.e10 - PCI Express error disable register */
- u8 __iomem res23[12];
- __be32 __iomem pex_err_cap_stat; /* 0x.e20 - PCI Express error capture status register */
- u8 __iomem res24[4];
- __be32 __iomem pex_err_cap_r0; /* 0x.e28 - PCI Express error capture register 0 */
- __be32 __iomem pex_err_cap_r1; /* 0x.e2c - PCI Express error capture register 0 */
- __be32 __iomem pex_err_cap_r2; /* 0x.e30 - PCI Express error capture register 0 */
- __be32 __iomem pex_err_cap_r3; /* 0x.e34 - PCI Express error capture register 0 */
-};
-
-#endif /* __POWERPC_FSL_PCIE_H */
-#endif /* __KERNEL__ */
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 3289fab01e9..727453d3e8b 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -305,6 +305,64 @@ err:
arch_initcall(gfar_of_init);
+#ifdef CONFIG_I2C_BOARDINFO
+#include <linux/i2c.h>
+struct i2c_driver_device {
+ char *of_device;
+ char *i2c_driver;
+ char *i2c_type;
+};
+
+static struct i2c_driver_device i2c_devices[] __initdata = {
+ {"ricoh,rs5c372a", "rtc-rs5c372", "rs5c372a",},
+ {"ricoh,rs5c372b", "rtc-rs5c372", "rs5c372b",},
+ {"ricoh,rv5c386", "rtc-rs5c372", "rv5c386",},
+ {"ricoh,rv5c387a", "rtc-rs5c372", "rv5c387a",},
+};
+
+static int __init of_find_i2c_driver(struct device_node *node, struct i2c_board_info *info)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(i2c_devices); i++) {
+ if (!of_device_is_compatible(node, i2c_devices[i].of_device))
+ continue;
+ strncpy(info->driver_name, i2c_devices[i].i2c_driver, KOBJ_NAME_LEN);
+ strncpy(info->type, i2c_devices[i].i2c_type, I2C_NAME_SIZE);
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static void __init of_register_i2c_devices(struct device_node *adap_node, int bus_num)
+{
+ struct device_node *node = NULL;
+
+ while ((node = of_get_next_child(adap_node, node))) {
+ struct i2c_board_info info;
+ const u32 *addr;
+ int len;
+
+ addr = of_get_property(node, "reg", &len);
+ if (!addr || len < sizeof(int) || *addr > (1 << 10) - 1) {
+ printk(KERN_WARNING "fsl_ioc.c: invalid i2c device entry\n");
+ continue;
+ }
+
+ info.irq = irq_of_parse_and_map(node, 0);
+ if (info.irq == NO_IRQ)
+ info.irq = -1;
+
+ if (of_find_i2c_driver(node, &info) < 0)
+ continue;
+
+ info.platform_data = NULL;
+ info.addr = *addr;
+
+ i2c_register_board_info(bus_num, &info, 1);
+ }
+}
+
static int __init fsl_i2c_of_init(void)
{
struct device_node *np;
@@ -349,6 +407,8 @@ static int __init fsl_i2c_of_init(void)
fsl_i2c_platform_data));
if (ret)
goto unreg;
+
+ of_register_i2c_devices(np, i);
}
return 0;
@@ -360,6 +420,7 @@ err:
}
arch_initcall(fsl_i2c_of_init);
+#endif
#ifdef CONFIG_PPC_83xx
static int __init mpc83xx_wdt_init(void)
diff --git a/arch/powerpc/sysdev/grackle.c b/arch/powerpc/sysdev/grackle.c
index 42053625f49..11ad5622eb7 100644
--- a/arch/powerpc/sysdev/grackle.c
+++ b/arch/powerpc/sysdev/grackle.c
@@ -55,7 +55,7 @@ static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable)
void __init setup_grackle(struct pci_controller *hose)
{
- setup_indirect_pci(hose, 0xfec00000, 0xfee00000);
+ setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0);
if (machine_is_compatible("PowerMac1,1"))
pci_assign_all_buses = 1;
if (machine_is_compatible("AAPL,PowerBook1998"))
diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c
index c7e6e859b39..5294560c7b0 100644
--- a/arch/powerpc/sysdev/indirect_pci.c
+++ b/arch/powerpc/sysdev/indirect_pci.c
@@ -20,12 +20,6 @@
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
-#ifdef CONFIG_PPC_INDIRECT_PCI_BE
-#define PCI_CFG_OUT out_be32
-#else
-#define PCI_CFG_OUT out_le32
-#endif
-
static int
indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 *val)
@@ -35,10 +29,17 @@ indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
u8 cfg_type = 0;
u32 bus_no, reg;
+ if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK) {
+ if (bus->number != hose->first_busno)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ if (devfn != 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
if (ppc_md.pci_exclude_device)
if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
-
+
if (hose->indirect_type & PPC_INDIRECT_TYPE_SET_CFG_TYPE)
if (bus->number != hose->first_busno)
cfg_type = 1;
@@ -51,9 +52,12 @@ indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
else
reg = offset & 0xfc;
- PCI_CFG_OUT(hose->cfg_addr,
- (0x80000000 | (bus_no << 16)
- | (devfn << 8) | reg | cfg_type));
+ if (hose->indirect_type & PPC_INDIRECT_TYPE_BIG_ENDIAN)
+ out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
+ (devfn << 8) | reg | cfg_type));
+ else
+ out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
+ (devfn << 8) | reg | cfg_type));
/*
* Note: the caller has already checked that offset is
@@ -83,6 +87,13 @@ indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
u8 cfg_type = 0;
u32 bus_no, reg;
+ if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK) {
+ if (bus->number != hose->first_busno)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ if (devfn != 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
if (ppc_md.pci_exclude_device)
if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -99,9 +110,12 @@ indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
else
reg = offset & 0xfc;
- PCI_CFG_OUT(hose->cfg_addr,
- (0x80000000 | (bus_no << 16)
- | (devfn << 8) | reg | cfg_type));
+ if (hose->indirect_type & PPC_INDIRECT_TYPE_BIG_ENDIAN)
+ out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
+ (devfn << 8) | reg | cfg_type));
+ else
+ out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
+ (devfn << 8) | reg | cfg_type));
/* surpress setting of PCI_PRIMARY_BUS */
if (hose->indirect_type & PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS)
@@ -135,24 +149,16 @@ static struct pci_ops indirect_pci_ops =
};
void __init
-setup_indirect_pci_nomap(struct pci_controller* hose, void __iomem * cfg_addr,
- void __iomem * cfg_data)
-{
- hose->cfg_addr = cfg_addr;
- hose->cfg_data = cfg_data;
- hose->ops = &indirect_pci_ops;
-}
-
-void __init
-setup_indirect_pci(struct pci_controller* hose, u32 cfg_addr, u32 cfg_data)
+setup_indirect_pci(struct pci_controller* hose, u32 cfg_addr, u32 cfg_data, u32 flags)
{
unsigned long base = cfg_addr & PAGE_MASK;
- void __iomem *mbase, *addr, *data;
+ void __iomem *mbase;
mbase = ioremap(base, PAGE_SIZE);
- addr = mbase + (cfg_addr & ~PAGE_MASK);
+ hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK);
if ((cfg_data & PAGE_MASK) != base)
mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE);
- data = mbase + (cfg_data & ~PAGE_MASK);
- setup_indirect_pci_nomap(hose, addr, data);
+ hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK);
+ hose->ops = &indirect_pci_ops;
+ hose->indirect_type = flags;
}
diff --git a/arch/powerpc/sysdev/mv64x60_pci.c b/arch/powerpc/sysdev/mv64x60_pci.c
index 45db86c2363..9b3baa7317d 100644
--- a/arch/powerpc/sysdev/mv64x60_pci.c
+++ b/arch/powerpc/sysdev/mv64x60_pci.c
@@ -144,7 +144,7 @@ static int __init mv64x60_add_bridge(struct device_node *dev)
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
- setup_indirect_pci(hose, rsrc.start, rsrc.start + 4);
+ setup_indirect_pci(hose, rsrc.start, rsrc.start + 4, 0);
hose->self_busno = hose->first_busno;
printk(KERN_INFO "Found MV64x60 PCI host bridge at 0x%016llx. "
diff --git a/arch/ppc/configs/TQM8540_defconfig b/arch/ppc/configs/TQM8540_defconfig
index 99bf3b7a276..f33f0e772dc 100644
--- a/arch/ppc/configs/TQM8540_defconfig
+++ b/arch/ppc/configs/TQM8540_defconfig
@@ -136,7 +136,7 @@ CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
# CONFIG_CMDLINE_BOOL is not set
# CONFIG_PM is not set
-# CONFIG_SOFTWARE_SUSPEND is not set
+# CONFIG_HIBERNATION is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
diff --git a/arch/ppc/configs/TQM8541_defconfig b/arch/ppc/configs/TQM8541_defconfig
index 0ff56695d34..e00cd62daa3 100644
--- a/arch/ppc/configs/TQM8541_defconfig
+++ b/arch/ppc/configs/TQM8541_defconfig
@@ -138,7 +138,7 @@ CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
# CONFIG_CMDLINE_BOOL is not set
# CONFIG_PM is not set
-# CONFIG_SOFTWARE_SUSPEND is not set
+# CONFIG_HIBERNATION is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
diff --git a/arch/ppc/configs/TQM8555_defconfig b/arch/ppc/configs/TQM8555_defconfig
index 730b3db2e47..43a0d9df1e2 100644
--- a/arch/ppc/configs/TQM8555_defconfig
+++ b/arch/ppc/configs/TQM8555_defconfig
@@ -138,7 +138,7 @@ CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
# CONFIG_CMDLINE_BOOL is not set
# CONFIG_PM is not set
-# CONFIG_SOFTWARE_SUSPEND is not set
+# CONFIG_HIBERNATION is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
diff --git a/arch/ppc/configs/TQM8560_defconfig b/arch/ppc/configs/TQM8560_defconfig
index 1d902072825..a814d17a2be 100644
--- a/arch/ppc/configs/TQM8560_defconfig
+++ b/arch/ppc/configs/TQM8560_defconfig
@@ -137,7 +137,7 @@ CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
# CONFIG_CMDLINE_BOOL is not set
# CONFIG_PM is not set
-# CONFIG_SOFTWARE_SUSPEND is not set
+# CONFIG_HIBERNATION is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
diff --git a/arch/ppc/configs/ev64360_defconfig b/arch/ppc/configs/ev64360_defconfig
index d471e578dcb..f297c4bb632 100644
--- a/arch/ppc/configs/ev64360_defconfig
+++ b/arch/ppc/configs/ev64360_defconfig
@@ -142,7 +142,7 @@ CONFIG_BINFMT_MISC=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyMM0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"
# CONFIG_PM is not set
-# CONFIG_SOFTWARE_SUSPEND is not set
+# CONFIG_HIBERNATION is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
diff --git a/arch/ppc/configs/ml300_defconfig b/arch/ppc/configs/ml300_defconfig
index 4a33aca948c..69bad91a6b6 100644
--- a/arch/ppc/configs/ml300_defconfig
+++ b/arch/ppc/configs/ml300_defconfig
@@ -148,7 +148,7 @@ CONFIG_BINFMT_ELF=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,9600"
# CONFIG_PM is not set
-# CONFIG_SOFTWARE_SUSPEND is not set
+# CONFIG_HIBERNATION is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
diff --git a/arch/ppc/configs/ml403_defconfig b/arch/ppc/configs/ml403_defconfig
index fafd2516fa5..a78896ea456 100644
--- a/arch/ppc/configs/ml403_defconfig
+++ b/arch/ppc/configs/ml403_defconfig
@@ -149,7 +149,7 @@ CONFIG_BINFMT_ELF=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,9600"
# CONFIG_PM is not set
-# CONFIG_SOFTWARE_SUSPEND is not set
+# CONFIG_HIBERNATION is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
diff --git a/arch/ppc/configs/mpc834x_sys_defconfig b/arch/ppc/configs/mpc834x_sys_defconfig
index b96a6d6dad0..d90c8a7e060 100644
--- a/arch/ppc/configs/mpc834x_sys_defconfig
+++ b/arch/ppc/configs/mpc834x_sys_defconfig
@@ -130,7 +130,7 @@ CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
# CONFIG_CMDLINE_BOOL is not set
# CONFIG_PM is not set
-# CONFIG_SOFTWARE_SUSPEND is not set
+# CONFIG_HIBERNATION is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
diff --git a/arch/ppc/configs/prep_defconfig b/arch/ppc/configs/prep_defconfig
index 0aa333178b2..b7cee2d7140 100644
--- a/arch/ppc/configs/prep_defconfig
+++ b/arch/ppc/configs/prep_defconfig
@@ -166,7 +166,7 @@ CONFIG_PROC_PREPRESIDUAL=y
CONFIG_PM=y
# CONFIG_PM_LEGACY is not set
# CONFIG_PM_DEBUG is not set
-CONFIG_SOFTWARE_SUSPEND=y
+CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION=""
# CONFIG_SECCOMP is not set
CONFIG_ISA_DMA_API=y
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 6ffbab77ae4..62391fb1f61 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -173,7 +173,7 @@ int appldata_diag(char record_nr, u16 function, unsigned long buffer,
/*
* appldata_mod_vtimer_wrap()
*
- * wrapper function for mod_virt_timer(), because smp_call_function_on()
+ * wrapper function for mod_virt_timer(), because smp_call_function_single()
* accepts only one parameter.
*/
static void __appldata_mod_vtimer_wrap(void *p) {
@@ -208,9 +208,9 @@ __appldata_vtimer_setup(int cmd)
num_online_cpus()) * TOD_MICRO;
for_each_online_cpu(i) {
per_cpu(appldata_timer, i).expires = per_cpu_interval;
- smp_call_function_on(add_virt_timer_periodic,
- &per_cpu(appldata_timer, i),
- 0, 1, i);
+ smp_call_function_single(i, add_virt_timer_periodic,
+ &per_cpu(appldata_timer, i),
+ 0, 1);
}
appldata_timer_active = 1;
P_INFO("Monitoring timer started.\n");
@@ -236,8 +236,8 @@ __appldata_vtimer_setup(int cmd)
} args;
args.timer = &per_cpu(appldata_timer, i);
args.expires = per_cpu_interval;
- smp_call_function_on(__appldata_mod_vtimer_wrap,
- &args, 0, 1, i);
+ smp_call_function_single(i, __appldata_mod_vtimer_wrap,
+ &args, 0, 1);
}
}
}
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index acc415457b4..6ee1bedbd1b 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1710,3 +1710,13 @@ compat_sys_timerfd_wrapper:
sys_eventfd_wrapper:
llgfr %r2,%r2 # unsigned int
jg sys_eventfd
+
+ .globl sys_fallocate_wrapper
+sys_fallocate_wrapper:
+ lgfr %r2,%r2 # int
+ lgfr %r3,%r3 # int
+ sllg %r4,%r4,32 # get high word of 64bit loff_t
+ lr %r4,%r5 # get low word of 64bit loff_t
+ sllg %r5,%r6,32 # get high word of 64bit loff_t
+ l %r5,164(%r15) # get low word of 64bit loff_t
+ jg sys_fallocate
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index bc7ff3658c3..f3bceb16532 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -624,9 +624,11 @@ io_work_loop:
# _TIF_MCCK_PENDING is set, call handler
#
io_mcck_pending:
+ TRACE_IRQS_OFF
l %r1,BASED(.Ls390_handle_mcck)
- la %r14,BASED(io_work_loop)
- br %r1 # TIF bit will be cleared by handler
+ basr %r14,%r1 # TIF bit will be cleared by handler
+ TRACE_IRQS_ON
+ b BASED(io_work_loop)
#
# _TIF_NEED_RESCHED is set, call schedule
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 2a7b1304418..9c0d5cc8269 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -611,8 +611,10 @@ io_work_loop:
# _TIF_MCCK_PENDING is set, call handler
#
io_mcck_pending:
- larl %r14,io_work_loop
- jg s390_handle_mcck # TIF bit will be cleared by handler
+ TRACE_IRQS_OFF
+ brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
+ TRACE_IRQS_ON
+ j io_work_loop
#
# _TIF_NEED_RESCHED is set, call schedule
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 8f8c802f1bc..83477c7dc74 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -35,6 +35,7 @@
#define ARCH_OFFSET 0
#endif
+.section ".text.head","ax"
#ifndef CONFIG_IPL
.org 0
.long 0x00080000,0x80000000+startup # Just a restart PSW
diff --git a/arch/s390/kernel/init_task.c b/arch/s390/kernel/init_task.c
index d73a74013e7..d494161b05b 100644
--- a/arch/s390/kernel/init_task.c
+++ b/arch/s390/kernel/init_task.c
@@ -7,6 +7,7 @@
*/
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init_task.h>
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 441975b796f..abb447a3e47 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -21,6 +21,7 @@
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 182c085ae4d..35edbef1d22 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/kernel_stat.h>
#include <linux/delay.h>
@@ -120,7 +121,7 @@ static void __smp_call_function_map(void (*func) (void *info), void *info,
if (wait)
data.finished = CPU_MASK_NONE;
- spin_lock_bh(&call_lock);
+ spin_lock(&call_lock);
call_data = &data;
for_each_cpu_mask(cpu, map)
@@ -129,18 +130,16 @@ static void __smp_call_function_map(void (*func) (void *info), void *info,
/* Wait for response */
while (!cpus_equal(map, data.started))
cpu_relax();
-
if (wait)
while (!cpus_equal(map, data.finished))
cpu_relax();
-
- spin_unlock_bh(&call_lock);
-
+ spin_unlock(&call_lock);
out:
- local_irq_disable();
- if (local)
+ if (local) {
+ local_irq_disable();
func(info);
- local_irq_enable();
+ local_irq_enable();
+ }
}
/*
@@ -170,30 +169,28 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
EXPORT_SYMBOL(smp_call_function);
/*
- * smp_call_function_on:
+ * smp_call_function_single:
+ * @cpu: the CPU where func should run
* @func: the function to run; this must be fast and non-blocking
* @info: an arbitrary pointer to pass to the function
* @nonatomic: unused
* @wait: if true, wait (atomically) until function has completed on other CPUs
- * @cpu: the CPU where func should run
*
* Run a function on one processor.
*
* You must not call this function with disabled interrupts, from a
* hardware interrupt handler or from a bottom half.
*/
-int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic,
- int wait, int cpu)
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int nonatomic, int wait)
{
- cpumask_t map = CPU_MASK_NONE;
-
preempt_disable();
- cpu_set(cpu, map);
- __smp_call_function_map(func, info, nonatomic, wait, map);
+ __smp_call_function_map(func, info, nonatomic, wait,
+ cpumask_of_cpu(cpu));
preempt_enable();
return 0;
}
-EXPORT_SYMBOL(smp_call_function_on);
+EXPORT_SYMBOL(smp_call_function_single);
static void do_send_stop(void)
{
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 1c90c7e9997..1eaff84a1eb 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
@@ -265,3 +266,23 @@ s390_fadvise64_64(struct fadvise64_64_args __user *args)
return -EFAULT;
return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
}
+
+#ifndef CONFIG_64BIT
+/*
+ * This is a wrapper to call sys_fallocate(). For 31 bit s390 the last
+ * 64 bit argument "len" is split into the upper and lower 32 bits. The
+ * system call wrapper in the user space loads the value to %r6/%r7.
+ * The code in entry.S keeps the values in %r2 - %r6 where they are and
+ * stores %r7 to 96(%r15). But the standard C linkage requires that
+ * the whole 64 bit value for len is stored on the stack and doesn't
+ * use %r6 at all. So s390_fallocate has to convert the arguments from
+ * %r2: fd, %r3: mode, %r4/%r5: offset, %r6/96(%r15)-99(%r15): len
+ * to
+ * %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len
+ */
+asmlinkage long s390_fallocate(int fd, int mode, loff_t offset,
+ u32 len_high, u32 len_low)
+{
+ return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low);
+}
+#endif
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 738feb4a0aa..9e26ed9fe4e 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -322,7 +322,7 @@ NI_SYSCALL /* 310 sys_move_pages */
SYSCALL(sys_getcpu,sys_getcpu,sys_getcpu_wrapper)
SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait_wrapper)
SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes_wrapper)
-NI_SYSCALL /* 314 sys_fallocate */
+SYSCALL(s390_fallocate,sys_fallocate,sys_fallocate_wrapper)
SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat_wrapper) /* 315 */
SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd_wrapper)
SYSCALL(sys_timerfd,sys_timerfd,compat_sys_timerfd_wrapper)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 6ab7d4ee13a..b4622a3889b 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -21,6 +21,7 @@ SECTIONS
. = 0x00000000;
_text = .; /* Text and read-only data */
.text : {
+ *(.text.head)
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index b6ed143e859..84ff78de6ba 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -415,7 +415,7 @@ EXPORT_SYMBOL(add_virt_timer_periodic);
/*
* If we change a pending timer the function must be called on the CPU
- * where the timer is running on, e.g. by smp_call_function_on()
+ * where the timer is running on, e.g. by smp_call_function_single()
*
* The original mod_timer adds the timer if it is not pending. For compatibility
* we do the same. The timer will be added on the current CPU as a oneshot timer.
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 92a56519002..fd594d5fe14 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -29,8 +29,8 @@ struct memory_segment {
static LIST_HEAD(mem_segs);
-void memmap_init(unsigned long size, int nid, unsigned long zone,
- unsigned long start_pfn)
+void __meminit memmap_init(unsigned long size, int nid, unsigned long zone,
+ unsigned long start_pfn)
{
struct page *start, *end;
struct page *map_start, *map_end;
@@ -66,7 +66,7 @@ void memmap_init(unsigned long size, int nid, unsigned long zone,
}
}
-static inline void *vmem_alloc_pages(unsigned int order)
+static void __init_refok *vmem_alloc_pages(unsigned int order)
{
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
diff --git a/arch/sparc/defconfig b/arch/sparc/defconfig
index 38bd79fe6e7..fdc67238408 100644
--- a/arch/sparc/defconfig
+++ b/arch/sparc/defconfig
@@ -600,7 +600,7 @@ CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_IPMI_HANDLER is not set
# CONFIG_WATCHDOG is not set
CONFIG_HW_RANDOM=m
-CONFIG_RTC=m
+CONFIG_JS_RTC=m
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
# CONFIG_DRM is not set
diff --git a/arch/sparc/kernel/init_task.c b/arch/sparc/kernel/init_task.c
index fc31de66b1c..d9d4f96360c 100644
--- a/arch/sparc/kernel/init_task.c
+++ b/arch/sparc/kernel/init_task.c
@@ -1,4 +1,5 @@
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init_task.h>
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index 55bac516dfe..7b4abde4302 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -161,6 +161,8 @@ EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_one));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_sgl));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_one));
+EXPORT_SYMBOL(BTFIXUP_CALL(pgprot_noncached));
+
#ifdef CONFIG_SBUS
EXPORT_SYMBOL(sbus_root);
EXPORT_SYMBOL(dma_chain);
@@ -260,6 +262,7 @@ EXPORT_SYMBOL(__memmove);
/* Moving data to/from userspace. */
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(__strnlen_user);
/* Networking helper routines. */
EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 47583887abc..15109c156e8 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -35,6 +35,8 @@ SECTIONS
__ex_table : { *(__ex_table) }
__stop___ex_table = .;
+ NOTES
+
. = ALIGN(4096);
__init_begin = .;
_sinittext = .;
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
index a65eba41097..1c37ea892de 100644
--- a/arch/sparc/lib/memset.S
+++ b/arch/sparc/lib/memset.S
@@ -162,7 +162,7 @@ __bzero:
8:
add %o0, 1, %o0
subcc %o1, 1, %o1
- bne,a 8b
+ bne 8b
EX(stb %g3, [%o0 - 1], add %o1, 1)
0:
retl
diff --git a/arch/sparc/prom/printf.c b/arch/sparc/prom/printf.c
index 27fdac99f79..a36ab9c5ee0 100644
--- a/arch/sparc/prom/printf.c
+++ b/arch/sparc/prom/printf.c
@@ -13,6 +13,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
@@ -44,3 +45,4 @@ prom_printf(char *fmt, ...)
prom_write(ppbuf, i);
}
+EXPORT_SYMBOL(prom_printf);
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
index 1f130f3b6c2..a5faa3683bd 100644
--- a/arch/sparc64/Kconfig.debug
+++ b/arch/sparc64/Kconfig.debug
@@ -29,7 +29,7 @@ config DEBUG_BOOTMEM
config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
- depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND
+ depends on DEBUG_KERNEL && !HIBERNATION
help
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 10e301970a4..68338a601f7 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,11 +1,12 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.22
-# Thu Jul 19 21:30:37 2007
+# Linux kernel version: 2.6.23-rc1
+# Sun Jul 22 19:24:37 2007
#
CONFIG_SPARC=y
CONFIG_SPARC64=y
CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CMOS_UPDATE=y
CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_64BIT=y
CONFIG_MMU=y
@@ -17,6 +18,7 @@ CONFIG_ARCH_MAY_HAVE_PC_FDC=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_AUDIT_ARCH=y
CONFIG_ARCH_NO_VIRT_TO_BUS=y
+CONFIG_OF=y
CONFIG_SPARC64_PAGE_SIZE_8KB=y
# CONFIG_SPARC64_PAGE_SIZE_64KB is not set
# CONFIG_SPARC64_PAGE_SIZE_512KB is not set
@@ -314,6 +316,7 @@ CONFIG_FW_LOADER=y
# CONFIG_SYS_HYPERVISOR is not set
CONFIG_CONNECTOR=m
# CONFIG_MTD is not set
+CONFIG_OF_DEVICE=y
# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_FD is not set
@@ -433,10 +436,7 @@ CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_ISCSI_ATTRS=m
# CONFIG_SCSI_SAS_ATTRS is not set
# CONFIG_SCSI_SAS_LIBSAS is not set
-
-#
-# SCSI low-level drivers
-#
+CONFIG_SCSI_LOWLEVEL=y
CONFIG_ISCSI_TCP=m
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
@@ -701,7 +701,6 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_IPMI_HANDLER is not set
# CONFIG_WATCHDOG is not set
# CONFIG_HW_RANDOM is not set
-CONFIG_RTC=y
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
# CONFIG_DRM is not set
@@ -844,6 +843,7 @@ CONFIG_HWMON=y
#
# CONFIG_DISPLAY_SUPPORT is not set
# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
CONFIG_FB=y
# CONFIG_FIRMWARE_EDID is not set
CONFIG_FB_DDC=y
@@ -937,7 +937,6 @@ CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_PCM_OSS_PLUGINS=y
CONFIG_SND_SEQUENCER_OSS=y
-# CONFIG_SND_RTCTIMER is not set
# CONFIG_SND_DYNAMIC_MINORS is not set
CONFIG_SND_SUPPORT_OLD_API=y
CONFIG_SND_VERBOSE_PROCFS=y
@@ -1034,6 +1033,10 @@ CONFIG_SND_SUN_CS4231=m
# CONFIG_SND_SOC is not set
#
+# SoC Audio support for SuperH
+#
+
+#
# Open Sound System
#
# CONFIG_SOUND_PRIME is not set
@@ -1157,19 +1160,7 @@ CONFIG_USB_STORAGE=m
#
# CONFIG_USB_GADGET is not set
# CONFIG_MMC is not set
-
-#
-# LED devices
-#
# CONFIG_NEW_LEDS is not set
-
-#
-# LED drivers
-#
-
-#
-# LED Triggers
-#
# CONFIG_INFINIBAND is not set
#
@@ -1199,7 +1190,6 @@ CONFIG_USB_STORAGE=m
# Misc Linux/SPARC drivers
#
CONFIG_SUN_OPENPROMIO=m
-CONFIG_SUN_MOSTEK_RTC=y
# CONFIG_OBP_FLASH is not set
# CONFIG_SUN_BPP is not set
# CONFIG_BBC_I2C is not set
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 35feacb6b8e..9dbd833d79d 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -1,15 +1,15 @@
-/* $Id: head.S,v 1.87 2002/02/09 19:49:31 davem Exp $
- * head.S: Initial boot code for the Sparc64 port of Linux.
+/* head.S: Initial boot code for the Sparc64 port of Linux.
*
- * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996, 1997, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au)
- * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
*/
#include <linux/version.h>
#include <linux/errno.h>
#include <linux/threads.h>
+#include <linux/init.h>
#include <asm/thread_info.h>
#include <asm/asi.h>
#include <asm/pstate.h>
@@ -374,6 +374,7 @@ jump_to_sun4u_init:
jmpl %g2 + %g0, %g0
nop
+ .section .text.init.refok
sun4u_init:
BRANCH_IF_SUN4V(g1, sun4v_init)
@@ -529,6 +530,8 @@ tlb_fixup_done:
nop
/* Not reached... */
+ .previous
+
/* This is meant to allow the sharing of this code between
* boot processor invocation (via setup_tba() below) and
* secondary processor startup (via trampoline.S). The
diff --git a/arch/sparc64/kernel/init_task.c b/arch/sparc64/kernel/init_task.c
index 329b38fa5c8..90007cf88ba 100644
--- a/arch/sparc64/kernel/init_task.c
+++ b/arch/sparc64/kernel/init_task.c
@@ -1,4 +1,5 @@
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init_task.h>
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index fd7899ba1d7..ca7cdfd55f7 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/kallsyms.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/ptrace.h>
diff --git a/arch/sparc64/kernel/viohs.c b/arch/sparc64/kernel/viohs.c
index 09126fc338b..708fa1705fb 100644
--- a/arch/sparc64/kernel/viohs.c
+++ b/arch/sparc64/kernel/viohs.c
@@ -702,7 +702,7 @@ u32 vio_send_sid(struct vio_driver_state *vio)
}
EXPORT_SYMBOL(vio_send_sid);
-extern int vio_ldc_alloc(struct vio_driver_state *vio,
+int vio_ldc_alloc(struct vio_driver_state *vio,
struct ldc_channel_config *base_cfg,
void *event_arg)
{
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 481861764de..b982fa3dd74 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -45,6 +45,8 @@ SECTIONS
__ex_table : { *(__ex_table) }
__stop___ex_table = .;
+ NOTES
+
. = ALIGN(PAGE_SIZE);
__init_begin = .;
.init.text : {
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 542c9ef858f..d8709050740 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -499,7 +499,7 @@ static struct mc_device mem_mc = {
.remove = mem_remove,
};
-static int mem_mc_init(void)
+static int __init mem_mc_init(void)
{
if(can_drop_memory())
mconsole_register_dev(&mem_mc);
@@ -798,7 +798,7 @@ void mconsole_stack(struct mc_request *req)
*/
static char *notify_socket = NULL;
-static int mconsole_init(void)
+static int __init mconsole_init(void)
{
/* long to avoid size mismatch warnings from gcc */
long sock;
diff --git a/arch/um/drivers/mmapper_kern.c b/arch/um/drivers/mmapper_kern.c
index e41a08f0469..867666a0233 100644
--- a/arch/um/drivers/mmapper_kern.c
+++ b/arch/um/drivers/mmapper_kern.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <asm/uaccess.h>
#include "mem_user.h"
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 72773dd5442..d35d0c1ee7f 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -623,7 +623,7 @@ static int eth_setup_common(char *str, int index)
return found;
}
-static int eth_setup(char *str)
+static int __init eth_setup(char *str)
{
struct eth_init *new;
char *error;
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index fc27f6c72b4..aff661fe2ee 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -469,7 +469,7 @@ __uml_help(fakehd,
" Change the ubd device name to \"hd\".\n\n"
);
-static void do_ubd_request(request_queue_t * q);
+static void do_ubd_request(struct request_queue * q);
/* Only changed by ubd_init, which is an initcall. */
int thread_fd = -1;
@@ -1081,7 +1081,7 @@ static void prepare_request(struct request *req, struct io_thread_req *io_req,
}
/* Called with dev->lock held */
-static void do_ubd_request(request_queue_t *q)
+static void do_ubd_request(struct request_queue *q)
{
struct io_thread_req *io_req;
struct request *req;
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index 356e50f5aae..ce6828fd396 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -6,6 +6,7 @@
#include "linux/slab.h"
#include "linux/smp_lock.h"
#include "linux/ptrace.h"
+#include "linux/fs.h"
#include "asm/ptrace.h"
#include "asm/pgtable.h"
#include "asm/tlbflush.h"
diff --git a/arch/um/kernel/init_task.c b/arch/um/kernel/init_task.c
index d4f1d1ab252..cba516e6c99 100644
--- a/arch/um/kernel/init_task.c
+++ b/arch/um/kernel/init_task.c
@@ -4,6 +4,7 @@
*/
#include "linux/mm.h"
+#include "linux/fs.h"
#include "linux/module.h"
#include "linux/sched.h"
#include "linux/init_task.h"
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 72ff85693a3..d2b11f24269 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -62,7 +62,7 @@ static void setup_highmem(unsigned long highmem_start,
}
#endif
-void mem_init(void)
+void __init mem_init(void)
{
/* clear the zero-page */
memset((void *) empty_zero_page, 0, PAGE_SIZE);
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index 3ba6e4c841d..5ee7e851bbc 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -28,7 +28,8 @@ unsigned long high_physmem;
extern unsigned long long physmem_size;
-int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
+int __init init_maps(unsigned long physmem, unsigned long iomem,
+ unsigned long highmem)
{
struct page *p, *map;
unsigned long phys_len, phys_pages, highmem_len, highmem_pages;
@@ -47,13 +48,7 @@ int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
total_pages = phys_pages + iomem_pages + highmem_pages;
total_len = phys_len + iomem_len + highmem_len;
- if(kmalloc_ok){
- map = kmalloc(total_len, GFP_KERNEL);
- if(map == NULL)
- map = vmalloc(total_len);
- }
- else map = alloc_bootmem_low_pages(total_len);
-
+ map = alloc_bootmem_low_pages(total_len);
if(map == NULL)
return -ENOMEM;
@@ -98,8 +93,8 @@ void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
extern int __syscall_stub_start;
-void setup_physmem(unsigned long start, unsigned long reserve_end,
- unsigned long len, unsigned long long highmem)
+void __init setup_physmem(unsigned long start, unsigned long reserve_end,
+ unsigned long len, unsigned long long highmem)
{
unsigned long reserve = reserve_end - start;
int pfn = PFN_UP(__pa(reserve_end));
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 2a69a7ce579..48051a98525 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -145,7 +145,7 @@ void init_idle_skas(void)
extern void start_kernel(void);
-static int start_kernel_proc(void *unused)
+static int __init start_kernel_proc(void *unused)
{
int pid;
@@ -165,7 +165,7 @@ extern int userspace_pid[];
extern char cpu0_irqstack[];
-int start_uml_skas(void)
+int __init start_uml_skas(void)
{
stack_protections((unsigned long) &cpu0_irqstack);
set_sigstack(cpu0_irqstack, THREAD_SIZE);
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c
index 237c4eab7cf..7b3b67333ff 100644
--- a/arch/um/kernel/syscall.c
+++ b/arch/um/kernel/syscall.c
@@ -7,6 +7,7 @@
#include "linux/file.h"
#include "linux/smp_lock.h"
#include "linux/mm.h"
+#include "linux/fs.h"
#include "linux/utsname.h"
#include "linux/msg.h"
#include "linux/shm.h"
diff --git a/arch/um/os-Linux/aio.c b/arch/um/os-Linux/aio.c
index b126df4ea16..59348359f9a 100644
--- a/arch/um/os-Linux/aio.c
+++ b/arch/um/os-Linux/aio.c
@@ -14,6 +14,7 @@
#include "init.h"
#include "user.h"
#include "mode.h"
+#include "kern_constants.h"
struct aio_thread_req {
enum aio_type type;
@@ -65,47 +66,33 @@ static long io_getevents(aio_context_t ctx_id, long min_nr, long nr,
static int do_aio(aio_context_t ctx, enum aio_type type, int fd, char *buf,
int len, unsigned long long offset, struct aio_context *aio)
{
- struct iocb iocb, *iocbp = &iocb;
+ struct iocb *iocbp = & ((struct iocb) {
+ .aio_data = (unsigned long) aio,
+ .aio_fildes = fd,
+ .aio_buf = (unsigned long) buf,
+ .aio_nbytes = len,
+ .aio_offset = offset
+ });
char c;
- int err;
- iocb = ((struct iocb) { .aio_data = (unsigned long) aio,
- .aio_reqprio = 0,
- .aio_fildes = fd,
- .aio_buf = (unsigned long) buf,
- .aio_nbytes = len,
- .aio_offset = offset,
- .aio_reserved1 = 0,
- .aio_reserved2 = 0,
- .aio_reserved3 = 0 });
-
- switch(type){
+ switch (type) {
case AIO_READ:
- iocb.aio_lio_opcode = IOCB_CMD_PREAD;
- err = io_submit(ctx, 1, &iocbp);
+ iocbp->aio_lio_opcode = IOCB_CMD_PREAD;
break;
case AIO_WRITE:
- iocb.aio_lio_opcode = IOCB_CMD_PWRITE;
- err = io_submit(ctx, 1, &iocbp);
+ iocbp->aio_lio_opcode = IOCB_CMD_PWRITE;
break;
case AIO_MMAP:
- iocb.aio_lio_opcode = IOCB_CMD_PREAD;
- iocb.aio_buf = (unsigned long) &c;
- iocb.aio_nbytes = sizeof(c);
- err = io_submit(ctx, 1, &iocbp);
+ iocbp->aio_lio_opcode = IOCB_CMD_PREAD;
+ iocbp->aio_buf = (unsigned long) &c;
+ iocbp->aio_nbytes = sizeof(c);
break;
default:
- printk("Bogus op in do_aio - %d\n", type);
- err = -EINVAL;
- break;
+ printk(UM_KERN_ERR "Bogus op in do_aio - %d\n", type);
+ return -EINVAL;
}
- if(err > 0)
- err = 0;
- else
- err = -errno;
-
- return err;
+ return (io_submit(ctx, 1, &iocbp) > 0) ? 0 : -errno;
}
/* Initialized in an initcall and unchanged thereafter */
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index 2d9d2ca3929..e9c14329751 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -194,7 +194,7 @@ int os_unmap_memory(void *addr, int len)
#define MADV_REMOVE KERNEL_MADV_REMOVE
#endif
-int __init os_drop_memory(void *addr, int length)
+int os_drop_memory(void *addr, int length)
{
int err;
diff --git a/arch/um/os-Linux/user_syms.c b/arch/um/os-Linux/user_syms.c
index 419b2d5ff6d..4c37b1b1d0b 100644
--- a/arch/um/os-Linux/user_syms.c
+++ b/arch/um/os-Linux/user_syms.c
@@ -19,10 +19,7 @@ extern void *memmove(void *, const void *, size_t);
extern void *memset(void *, int, size_t);
extern int printf(const char *, ...);
-/* If they're not defined, the export is included in lib/string.c.*/
-#ifdef __HAVE_ARCH_STRLEN
-EXPORT_SYMBOL(strlen);
-#endif
+/* If it's not defined, the export is included in lib/string.c.*/
#ifdef __HAVE_ARCH_STRSTR
EXPORT_SYMBOL(strstr);
#endif
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
index 098720be019..d6b3ecd4b77 100644
--- a/arch/um/sys-i386/Makefile
+++ b/arch/um/sys-i386/Makefile
@@ -4,7 +4,7 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
obj-$(CONFIG_MODE_SKAS) += stub.o stub_segv.o
-subarch-obj-y = lib/bitops.o lib/semaphore.o
+subarch-obj-y = lib/bitops.o lib/semaphore.o lib/string.o
subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem.o
subarch-obj-$(CONFIG_MODULES) += kernel/module.o
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 45f82ae6d38..ffa03640628 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -765,6 +765,9 @@ config COMPAT
depends on IA32_EMULATION
default y
+config COMPAT_FOR_U64_ALIGNMENT
+ def_bool COMPAT
+
config SYSVIPC_COMPAT
bool
depends on COMPAT && SYSVIPC
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index b7c4cd04bfc..e64f65c9d90 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -199,7 +199,7 @@ CONFIG_GENERIC_PENDING_IRQ=y
CONFIG_PM=y
# CONFIG_PM_LEGACY is not set
# CONFIG_PM_DEBUG is not set
-CONFIG_SOFTWARE_SUSPEND=y
+CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION=""
CONFIG_SUSPEND_SMP=y
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index b70f3e7cf06..dffd2ac7274 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -41,8 +41,9 @@ int sysctl_vsyscall32 = 1;
#undef ARCH_DLINFO
#define ARCH_DLINFO do { \
if (sysctl_vsyscall32) { \
- NEW_AUX_ENT(AT_SYSINFO, (u32)(u64)VSYSCALL32_VSYSCALL); \
- NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL32_BASE); \
+ current->mm->context.vdso = (void *)VSYSCALL32_BASE; \
+ NEW_AUX_ENT(AT_SYSINFO, (u32)(u64)VSYSCALL32_VSYSCALL); \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL32_BASE); \
} \
} while(0)
diff --git a/arch/x86_64/ia32/ptrace32.c b/arch/x86_64/ia32/ptrace32.c
index 4de3a54318f..4a233ad6269 100644
--- a/arch/x86_64/ia32/ptrace32.c
+++ b/arch/x86_64/ia32/ptrace32.c
@@ -15,6 +15,7 @@
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/ptrace.h>
#include <asm/ptrace.h>
#include <asm/compat.h>
diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
index fc4419ff035..15013bac181 100644
--- a/arch/x86_64/ia32/syscall32.c
+++ b/arch/x86_64/ia32/syscall32.c
@@ -49,14 +49,6 @@ int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
return ret;
}
-const char *arch_vma_name(struct vm_area_struct *vma)
-{
- if (vma->vm_start == VSYSCALL32_BASE &&
- vma->vm_mm && vma->vm_mm->task_size == IA32_PAGE_OFFSET)
- return "[vdso]";
- return NULL;
-}
-
static int __init init_syscall32(void)
{
char *syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
index 47f1dc30bf5..d1d18c1ea0f 100644
--- a/arch/x86_64/kernel/Makefile
+++ b/arch/x86_64/kernel/Makefile
@@ -26,7 +26,7 @@ obj-y += io_apic.o mpparse.o genapic.o genapic_flat.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_PM) += suspend.o
-obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
+obj-$(CONFIG_HIBERNATION) += suspend_asm.o
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_IOMMU) += pci-gart.o aperture.o
diff --git a/arch/x86_64/kernel/acpi/sleep.c b/arch/x86_64/kernel/acpi/sleep.c
index 4277f2b27e6..79475d23707 100644
--- a/arch/x86_64/kernel/acpi/sleep.c
+++ b/arch/x86_64/kernel/acpi/sleep.c
@@ -51,8 +51,6 @@
Low-Level Sleep Support
-------------------------------------------------------------------------- */
-#ifdef CONFIG_ACPI_SLEEP
-
/* address in low memory of the wakeup routine. */
unsigned long acpi_wakeup_address = 0;
unsigned long acpi_realmode_flags;
@@ -117,8 +115,6 @@ static int __init acpi_sleep_setup(char *str)
__setup("acpi_sleep=", acpi_sleep_setup);
-#endif /*CONFIG_ACPI_SLEEP */
-
void acpi_pci_link_exit(void)
{
}
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index e7ac629d4c4..2842f50cbe3 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/elfcore.h>
#include <linux/smp.h>
#include <linux/slab.h>
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c
index ea83a9f9196..573c0a6e0ac 100644
--- a/arch/x86_64/kernel/suspend.c
+++ b/arch/x86_64/kernel/suspend.c
@@ -146,7 +146,7 @@ void fix_processor_context(void)
}
-#ifdef CONFIG_SOFTWARE_SUSPEND
+#ifdef CONFIG_HIBERNATION
/* Defined in arch/x86_64/kernel/suspend_asm.S */
extern int restore_image(void);
@@ -236,4 +236,4 @@ int pfn_is_nosave(unsigned long pfn)
unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}
-#endif /* CONFIG_SOFTWARE_SUSPEND */
+#endif /* CONFIG_HIBERNATION */
diff --git a/arch/x86_64/kernel/sys_x86_64.c b/arch/x86_64/kernel/sys_x86_64.c
index d067d9a2ad2..4770b7a2052 100644
--- a/arch/x86_64/kernel/sys_x86_64.c
+++ b/arch/x86_64/kernel/sys_x86_64.c
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
diff --git a/arch/x86_64/kernel/tce.c b/arch/x86_64/kernel/tce.c
index 3aeae2fa2e2..821527e7faa 100644
--- a/arch/x86_64/kernel/tce.c
+++ b/arch/x86_64/kernel/tce.c
@@ -165,7 +165,7 @@ done:
return ret;
}
-void* alloc_tce_table(void)
+void * __init alloc_tce_table(void)
{
unsigned int size;
@@ -175,7 +175,7 @@ void* alloc_tce_table(void)
return __alloc_bootmem_low(size, size, 0);
}
-void free_tce_table(void *tbl)
+void __init free_tce_table(void *tbl)
{
unsigned int size;
diff --git a/arch/x86_64/kernel/tsc.c b/arch/x86_64/kernel/tsc.c
index 9b76b03d060..2a59bde663f 100644
--- a/arch/x86_64/kernel/tsc.c
+++ b/arch/x86_64/kernel/tsc.c
@@ -118,8 +118,6 @@ core_initcall(cpufreq_tsc);
#endif
-static int tsc_unstable = 0;
-
/*
* Make an educated guess if the TSC is trustworthy and synchronized
* over all CPUs.
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 38f5d636800..458893b376f 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -600,6 +600,16 @@ void mark_rodata_ro(void)
{
unsigned long start = (unsigned long)_stext, end;
+#ifdef CONFIG_HOTPLUG_CPU
+ /* It must still be possible to apply SMP alternatives. */
+ if (num_possible_cpus() > 1)
+ start = (unsigned long)_etext;
+#endif
+
+#ifdef CONFIG_KPROBES
+ start = (unsigned long)__start_rodata;
+#endif
+
end = (unsigned long)__end_rodata;
start = (start + PAGE_SIZE - 1) & PAGE_MASK;
end &= PAGE_MASK;
diff --git a/arch/x86_64/vdso/vma.c b/arch/x86_64/vdso/vma.c
index d4cb83a6c06..ff9333e5fb0 100644
--- a/arch/x86_64/vdso/vma.c
+++ b/arch/x86_64/vdso/vma.c
@@ -4,6 +4,7 @@
* Subject to the GPL, v.2
*/
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/random.h>
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 3e316dd7252..dc715a562e1 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -796,7 +796,7 @@ static void update_write_batch(struct as_data *ad)
* as_completed_request is to be called when a request has completed and
* returned something to the requesting process, be it an error or data.
*/
-static void as_completed_request(request_queue_t *q, struct request *rq)
+static void as_completed_request(struct request_queue *q, struct request *rq)
{
struct as_data *ad = q->elevator->elevator_data;
@@ -853,7 +853,8 @@ out:
* reference unless it replaces the request at somepart of the elevator
* (ie. the dispatch queue)
*/
-static void as_remove_queued_request(request_queue_t *q, struct request *rq)
+static void as_remove_queued_request(struct request_queue *q,
+ struct request *rq)
{
const int data_dir = rq_is_sync(rq);
struct as_data *ad = q->elevator->elevator_data;
@@ -978,7 +979,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
* read/write expire, batch expire, etc, and moves it to the dispatch
* queue. Returns 1 if a request was found, 0 otherwise.
*/
-static int as_dispatch_request(request_queue_t *q, int force)
+static int as_dispatch_request(struct request_queue *q, int force)
{
struct as_data *ad = q->elevator->elevator_data;
const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
@@ -1139,7 +1140,7 @@ fifo_expired:
/*
* add rq to rbtree and fifo
*/
-static void as_add_request(request_queue_t *q, struct request *rq)
+static void as_add_request(struct request_queue *q, struct request *rq)
{
struct as_data *ad = q->elevator->elevator_data;
int data_dir;
@@ -1167,7 +1168,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
RQ_SET_STATE(rq, AS_RQ_QUEUED);
}
-static void as_activate_request(request_queue_t *q, struct request *rq)
+static void as_activate_request(struct request_queue *q, struct request *rq)
{
WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED);
RQ_SET_STATE(rq, AS_RQ_REMOVED);
@@ -1175,7 +1176,7 @@ static void as_activate_request(request_queue_t *q, struct request *rq)
atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched);
}
-static void as_deactivate_request(request_queue_t *q, struct request *rq)
+static void as_deactivate_request(struct request_queue *q, struct request *rq)
{
WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED);
RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
@@ -1189,7 +1190,7 @@ static void as_deactivate_request(request_queue_t *q, struct request *rq)
* is not empty - it is used in the block layer to check for plugging and
* merging opportunities
*/
-static int as_queue_empty(request_queue_t *q)
+static int as_queue_empty(struct request_queue *q)
{
struct as_data *ad = q->elevator->elevator_data;
@@ -1198,7 +1199,7 @@ static int as_queue_empty(request_queue_t *q)
}
static int
-as_merge(request_queue_t *q, struct request **req, struct bio *bio)
+as_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
struct as_data *ad = q->elevator->elevator_data;
sector_t rb_key = bio->bi_sector + bio_sectors(bio);
@@ -1216,7 +1217,8 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
}
-static void as_merged_request(request_queue_t *q, struct request *req, int type)
+static void as_merged_request(struct request_queue *q, struct request *req,
+ int type)
{
struct as_data *ad = q->elevator->elevator_data;
@@ -1234,7 +1236,7 @@ static void as_merged_request(request_queue_t *q, struct request *req, int type)
}
}
-static void as_merged_requests(request_queue_t *q, struct request *req,
+static void as_merged_requests(struct request_queue *q, struct request *req,
struct request *next)
{
/*
@@ -1285,7 +1287,7 @@ static void as_work_handler(struct work_struct *work)
spin_unlock_irqrestore(q->queue_lock, flags);
}
-static int as_may_queue(request_queue_t *q, int rw)
+static int as_may_queue(struct request_queue *q, int rw)
{
int ret = ELV_MQUEUE_MAY;
struct as_data *ad = q->elevator->elevator_data;
@@ -1318,7 +1320,7 @@ static void as_exit_queue(elevator_t *e)
/*
* initialize elevator private data (as_data).
*/
-static void *as_init_queue(request_queue_t *q)
+static void *as_init_queue(struct request_queue *q)
{
struct as_data *ad;
diff --git a/block/blktrace.c b/block/blktrace.c
index 3f0e7c37c05..20fa034ea4a 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -41,7 +41,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
const int cpu = smp_processor_id();
t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
- t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
+ t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu);
t->device = bt->dev;
t->action = action;
t->pid = pid;
@@ -159,7 +159,7 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
t->sequence = ++(*sequence);
- t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
+ t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu);
t->sector = sector;
t->bytes = bytes;
t->action = what;
@@ -231,7 +231,7 @@ static void blk_trace_cleanup(struct blk_trace *bt)
kfree(bt);
}
-static int blk_trace_remove(request_queue_t *q)
+static int blk_trace_remove(struct request_queue *q)
{
struct blk_trace *bt;
@@ -312,7 +312,7 @@ static struct rchan_callbacks blk_relay_callbacks = {
/*
* Setup everything required to start tracing
*/
-static int blk_trace_setup(request_queue_t *q, struct block_device *bdev,
+static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
char __user *arg)
{
struct blk_user_trace_setup buts;
@@ -401,7 +401,7 @@ err:
return ret;
}
-static int blk_trace_startstop(request_queue_t *q, int start)
+static int blk_trace_startstop(struct request_queue *q, int start)
{
struct blk_trace *bt;
int ret;
@@ -444,7 +444,7 @@ static int blk_trace_startstop(request_queue_t *q, int start)
**/
int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
{
- request_queue_t *q;
+ struct request_queue *q;
int ret, start = 0;
q = bdev_get_queue(bdev);
@@ -479,7 +479,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
* @q: the request queue associated with the device
*
**/
-void blk_trace_shutdown(request_queue_t *q)
+void blk_trace_shutdown(struct request_queue *q)
{
if (q->blk_trace) {
blk_trace_startstop(q, 0);
@@ -488,17 +488,17 @@ void blk_trace_shutdown(request_queue_t *q)
}
/*
- * Average offset over two calls to sched_clock() with a gettimeofday()
+ * Average offset over two calls to cpu_clock() with a gettimeofday()
* in the middle
*/
-static void blk_check_time(unsigned long long *t)
+static void blk_check_time(unsigned long long *t, int this_cpu)
{
unsigned long long a, b;
struct timeval tv;
- a = sched_clock();
+ a = cpu_clock(this_cpu);
do_gettimeofday(&tv);
- b = sched_clock();
+ b = cpu_clock(this_cpu);
*t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000;
*t -= (a + b) / 2;
@@ -510,16 +510,16 @@ static void blk_check_time(unsigned long long *t)
static void blk_trace_check_cpu_time(void *data)
{
unsigned long long *t;
- int cpu = get_cpu();
+ int this_cpu = get_cpu();
- t = &per_cpu(blk_trace_cpu_offset, cpu);
+ t = &per_cpu(blk_trace_cpu_offset, this_cpu);
/*
* Just call it twice, hopefully the second call will be cache hot
* and a little more precise
*/
- blk_check_time(t);
- blk_check_time(t);
+ blk_check_time(t, this_cpu);
+ blk_check_time(t, this_cpu);
put_cpu();
}
diff --git a/block/bsg.c b/block/bsg.c
index 12c287b9862..d60eee54940 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -31,7 +31,7 @@
#define BSG_VERSION "0.4"
struct bsg_device {
- request_queue_t *queue;
+ struct request_queue *queue;
spinlock_t lock;
struct list_head busy_list;
struct list_head done_list;
@@ -172,7 +172,7 @@ unlock:
return ret;
}
-static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,
+static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_v4 *hdr, int has_write_perm)
{
memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
@@ -206,7 +206,7 @@ static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,
* Check if sg_io_v4 from user is allowed and valid
*/
static int
-bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)
+bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
{
int ret = 0;
@@ -242,7 +242,7 @@ bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)
static struct request *
bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
{
- request_queue_t *q = bd->queue;
+ struct request_queue *q = bd->queue;
struct request *rq, *next_rq = NULL;
int ret, rw;
unsigned int dxfer_len;
@@ -337,7 +337,7 @@ static void bsg_rq_end_io(struct request *rq, int uptodate)
* do final setup of a 'bc' and submit the matching 'rq' to the block
* layer for io
*/
-static void bsg_add_command(struct bsg_device *bd, request_queue_t *q,
+static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
struct bsg_command *bc, struct request *rq)
{
rq->sense = bc->sense;
@@ -603,7 +603,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf,
bc = NULL;
ret = 0;
while (nr_commands) {
- request_queue_t *q = bd->queue;
+ struct request_queue *q = bd->queue;
bc = bsg_alloc_command(bd);
if (IS_ERR(bc)) {
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index d148ccbc36d..54dc0543900 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -71,7 +71,7 @@ struct cfq_rb_root {
* Per block device queue structure
*/
struct cfq_data {
- request_queue_t *queue;
+ struct request_queue *queue;
/*
* rr list of queues with requests and the count of them
@@ -197,7 +197,7 @@ CFQ_CFQQ_FNS(slice_new);
CFQ_CFQQ_FNS(sync);
#undef CFQ_CFQQ_FNS
-static void cfq_dispatch_insert(request_queue_t *, struct request *);
+static void cfq_dispatch_insert(struct request_queue *, struct request *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
struct task_struct *, gfp_t);
static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *,
@@ -237,7 +237,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
kblockd_schedule_work(&cfqd->unplug_work);
}
-static int cfq_queue_empty(request_queue_t *q)
+static int cfq_queue_empty(struct request_queue *q)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
@@ -623,7 +623,7 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
return NULL;
}
-static void cfq_activate_request(request_queue_t *q, struct request *rq)
+static void cfq_activate_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
@@ -641,7 +641,7 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq)
cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
}
-static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
+static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
@@ -665,7 +665,8 @@ static void cfq_remove_request(struct request *rq)
}
}
-static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
+static int cfq_merge(struct request_queue *q, struct request **req,
+ struct bio *bio)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct request *__rq;
@@ -679,7 +680,7 @@ static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
}
-static void cfq_merged_request(request_queue_t *q, struct request *req,
+static void cfq_merged_request(struct request_queue *q, struct request *req,
int type)
{
if (type == ELEVATOR_FRONT_MERGE) {
@@ -690,7 +691,7 @@ static void cfq_merged_request(request_queue_t *q, struct request *req,
}
static void
-cfq_merged_requests(request_queue_t *q, struct request *rq,
+cfq_merged_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
/*
@@ -703,7 +704,7 @@ cfq_merged_requests(request_queue_t *q, struct request *rq,
cfq_remove_request(next);
}
-static int cfq_allow_merge(request_queue_t *q, struct request *rq,
+static int cfq_allow_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
@@ -913,7 +914,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
/*
* Move request from internal lists to the request queue dispatch list.
*/
-static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
+static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -1093,7 +1094,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
return dispatched;
}
-static int cfq_dispatch_requests(request_queue_t *q, int force)
+static int cfq_dispatch_requests(struct request_queue *q, int force)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq;
@@ -1214,7 +1215,7 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic)
struct cfq_data *cfqd = cic->key;
if (cfqd) {
- request_queue_t *q = cfqd->queue;
+ struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock);
__cfq_exit_single_io_context(cfqd, cic);
@@ -1775,7 +1776,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
}
-static void cfq_insert_request(request_queue_t *q, struct request *rq)
+static void cfq_insert_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -1789,7 +1790,7 @@ static void cfq_insert_request(request_queue_t *q, struct request *rq)
cfq_rq_enqueued(cfqd, cfqq, rq);
}
-static void cfq_completed_request(request_queue_t *q, struct request *rq)
+static void cfq_completed_request(struct request_queue *q, struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
struct cfq_data *cfqd = cfqq->cfqd;
@@ -1868,7 +1869,7 @@ static inline int __cfq_may_queue(struct cfq_queue *cfqq)
return ELV_MQUEUE_MAY;
}
-static int cfq_may_queue(request_queue_t *q, int rw)
+static int cfq_may_queue(struct request_queue *q, int rw)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
@@ -1922,7 +1923,7 @@ static void cfq_put_request(struct request *rq)
* Allocate cfq data structures associated with this request.
*/
static int
-cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
+cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
@@ -1974,7 +1975,7 @@ static void cfq_kick_queue(struct work_struct *work)
{
struct cfq_data *cfqd =
container_of(work, struct cfq_data, unplug_work);
- request_queue_t *q = cfqd->queue;
+ struct request_queue *q = cfqd->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
@@ -2072,7 +2073,7 @@ static void cfq_put_async_queues(struct cfq_data *cfqd)
static void cfq_exit_queue(elevator_t *e)
{
struct cfq_data *cfqd = e->elevator_data;
- request_queue_t *q = cfqd->queue;
+ struct request_queue *q = cfqd->queue;
cfq_shutdown_timer_wq(cfqd);
@@ -2098,7 +2099,7 @@ static void cfq_exit_queue(elevator_t *e)
kfree(cfqd);
}
-static void *cfq_init_queue(request_queue_t *q)
+static void *cfq_init_queue(struct request_queue *q)
{
struct cfq_data *cfqd;
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 87ca02ac84c..1a511ffaf8a 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
/*
* remove rq from rbtree and fifo.
*/
-static void deadline_remove_request(request_queue_t *q, struct request *rq)
+static void deadline_remove_request(struct request_queue *q, struct request *rq)
{
struct deadline_data *dd = q->elevator->elevator_data;
@@ -115,7 +115,7 @@ static void deadline_remove_request(request_queue_t *q, struct request *rq)
}
static int
-deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
+deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
struct deadline_data *dd = q->elevator->elevator_data;
struct request *__rq;
@@ -144,8 +144,8 @@ out:
return ret;
}
-static void deadline_merged_request(request_queue_t *q, struct request *req,
- int type)
+static void deadline_merged_request(struct request_queue *q,
+ struct request *req, int type)
{
struct deadline_data *dd = q->elevator->elevator_data;
@@ -159,7 +159,7 @@ static void deadline_merged_request(request_queue_t *q, struct request *req,
}
static void
-deadline_merged_requests(request_queue_t *q, struct request *req,
+deadline_merged_requests(struct request_queue *q, struct request *req,
struct request *next)
{
/*
@@ -185,7 +185,7 @@ deadline_merged_requests(request_queue_t *q, struct request *req,
static inline void
deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
{
- request_queue_t *q = rq->q;
+ struct request_queue *q = rq->q;
deadline_remove_request(q, rq);
elv_dispatch_add_tail(q, rq);
@@ -236,7 +236,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
* deadline_dispatch_requests selects the best request according to
* read/write expire, fifo_batch, etc
*/
-static int deadline_dispatch_requests(request_queue_t *q, int force)
+static int deadline_dispatch_requests(struct request_queue *q, int force)
{
struct deadline_data *dd = q->elevator->elevator_data;
const int reads = !list_empty(&dd->fifo_list[READ]);
@@ -335,7 +335,7 @@ dispatch_request:
return 1;
}
-static int deadline_queue_empty(request_queue_t *q)
+static int deadline_queue_empty(struct request_queue *q)
{
struct deadline_data *dd = q->elevator->elevator_data;
@@ -356,7 +356,7 @@ static void deadline_exit_queue(elevator_t *e)
/*
* initialize elevator private data (deadline_data).
*/
-static void *deadline_init_queue(request_queue_t *q)
+static void *deadline_init_queue(struct request_queue *q)
{
struct deadline_data *dd;
diff --git a/block/elevator.c b/block/elevator.c
index d265963d1ed..c6d153de9fd 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -56,7 +56,7 @@ static const int elv_hash_shift = 6;
*/
static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
{
- request_queue_t *q = rq->q;
+ struct request_queue *q = rq->q;
elevator_t *e = q->elevator;
if (e->ops->elevator_allow_merge_fn)
@@ -141,12 +141,13 @@ static struct elevator_type *elevator_get(const char *name)
return e;
}
-static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
+static void *elevator_init_queue(struct request_queue *q,
+ struct elevator_queue *eq)
{
return eq->ops->elevator_init_fn(q);
}
-static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
+static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
void *data)
{
q->elevator = eq;
@@ -172,7 +173,8 @@ __setup("elevator=", elevator_setup);
static struct kobj_type elv_ktype;
-static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
+static elevator_t *elevator_alloc(struct request_queue *q,
+ struct elevator_type *e)
{
elevator_t *eq;
int i;
@@ -212,7 +214,7 @@ static void elevator_release(struct kobject *kobj)
kfree(e);
}
-int elevator_init(request_queue_t *q, char *name)
+int elevator_init(struct request_queue *q, char *name)
{
struct elevator_type *e = NULL;
struct elevator_queue *eq;
@@ -264,7 +266,7 @@ void elevator_exit(elevator_t *e)
EXPORT_SYMBOL(elevator_exit);
-static void elv_activate_rq(request_queue_t *q, struct request *rq)
+static void elv_activate_rq(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -272,7 +274,7 @@ static void elv_activate_rq(request_queue_t *q, struct request *rq)
e->ops->elevator_activate_req_fn(q, rq);
}
-static void elv_deactivate_rq(request_queue_t *q, struct request *rq)
+static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -285,13 +287,13 @@ static inline void __elv_rqhash_del(struct request *rq)
hlist_del_init(&rq->hash);
}
-static void elv_rqhash_del(request_queue_t *q, struct request *rq)
+static void elv_rqhash_del(struct request_queue *q, struct request *rq)
{
if (ELV_ON_HASH(rq))
__elv_rqhash_del(rq);
}
-static void elv_rqhash_add(request_queue_t *q, struct request *rq)
+static void elv_rqhash_add(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -299,13 +301,13 @@ static void elv_rqhash_add(request_queue_t *q, struct request *rq)
hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
}
-static void elv_rqhash_reposition(request_queue_t *q, struct request *rq)
+static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
{
__elv_rqhash_del(rq);
elv_rqhash_add(q, rq);
}
-static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset)
+static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
{
elevator_t *e = q->elevator;
struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
@@ -391,7 +393,7 @@ EXPORT_SYMBOL(elv_rb_find);
* entry. rq is sort insted into the dispatch queue. To be used by
* specific elevators.
*/
-void elv_dispatch_sort(request_queue_t *q, struct request *rq)
+void elv_dispatch_sort(struct request_queue *q, struct request *rq)
{
sector_t boundary;
struct list_head *entry;
@@ -449,7 +451,7 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
EXPORT_SYMBOL(elv_dispatch_add_tail);
-int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
+int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
elevator_t *e = q->elevator;
struct request *__rq;
@@ -481,7 +483,7 @@ int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
return ELEVATOR_NO_MERGE;
}
-void elv_merged_request(request_queue_t *q, struct request *rq, int type)
+void elv_merged_request(struct request_queue *q, struct request *rq, int type)
{
elevator_t *e = q->elevator;
@@ -494,7 +496,7 @@ void elv_merged_request(request_queue_t *q, struct request *rq, int type)
q->last_merge = rq;
}
-void elv_merge_requests(request_queue_t *q, struct request *rq,
+void elv_merge_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
elevator_t *e = q->elevator;
@@ -509,7 +511,7 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
q->last_merge = rq;
}
-void elv_requeue_request(request_queue_t *q, struct request *rq)
+void elv_requeue_request(struct request_queue *q, struct request *rq)
{
/*
* it already went through dequeue, we need to decrement the
@@ -526,7 +528,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
}
-static void elv_drain_elevator(request_queue_t *q)
+static void elv_drain_elevator(struct request_queue *q)
{
static int printed;
while (q->elevator->ops->elevator_dispatch_fn(q, 1))
@@ -540,7 +542,7 @@ static void elv_drain_elevator(request_queue_t *q)
}
}
-void elv_insert(request_queue_t *q, struct request *rq, int where)
+void elv_insert(struct request_queue *q, struct request *rq, int where)
{
struct list_head *pos;
unsigned ordseq;
@@ -638,7 +640,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
}
}
-void __elv_add_request(request_queue_t *q, struct request *rq, int where,
+void __elv_add_request(struct request_queue *q, struct request *rq, int where,
int plug)
{
if (q->ordcolor)
@@ -676,7 +678,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
EXPORT_SYMBOL(__elv_add_request);
-void elv_add_request(request_queue_t *q, struct request *rq, int where,
+void elv_add_request(struct request_queue *q, struct request *rq, int where,
int plug)
{
unsigned long flags;
@@ -688,7 +690,7 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where,
EXPORT_SYMBOL(elv_add_request);
-static inline struct request *__elv_next_request(request_queue_t *q)
+static inline struct request *__elv_next_request(struct request_queue *q)
{
struct request *rq;
@@ -704,7 +706,7 @@ static inline struct request *__elv_next_request(request_queue_t *q)
}
}
-struct request *elv_next_request(request_queue_t *q)
+struct request *elv_next_request(struct request_queue *q)
{
struct request *rq;
int ret;
@@ -770,7 +772,7 @@ struct request *elv_next_request(request_queue_t *q)
EXPORT_SYMBOL(elv_next_request);
-void elv_dequeue_request(request_queue_t *q, struct request *rq)
+void elv_dequeue_request(struct request_queue *q, struct request *rq)
{
BUG_ON(list_empty(&rq->queuelist));
BUG_ON(ELV_ON_HASH(rq));
@@ -788,7 +790,7 @@ void elv_dequeue_request(request_queue_t *q, struct request *rq)
EXPORT_SYMBOL(elv_dequeue_request);
-int elv_queue_empty(request_queue_t *q)
+int elv_queue_empty(struct request_queue *q)
{
elevator_t *e = q->elevator;
@@ -803,7 +805,7 @@ int elv_queue_empty(request_queue_t *q)
EXPORT_SYMBOL(elv_queue_empty);
-struct request *elv_latter_request(request_queue_t *q, struct request *rq)
+struct request *elv_latter_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -812,7 +814,7 @@ struct request *elv_latter_request(request_queue_t *q, struct request *rq)
return NULL;
}
-struct request *elv_former_request(request_queue_t *q, struct request *rq)
+struct request *elv_former_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -821,7 +823,7 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq)
return NULL;
}
-int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
+int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
{
elevator_t *e = q->elevator;
@@ -832,7 +834,7 @@ int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
return 0;
}
-void elv_put_request(request_queue_t *q, struct request *rq)
+void elv_put_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -840,7 +842,7 @@ void elv_put_request(request_queue_t *q, struct request *rq)
e->ops->elevator_put_req_fn(rq);
}
-int elv_may_queue(request_queue_t *q, int rw)
+int elv_may_queue(struct request_queue *q, int rw)
{
elevator_t *e = q->elevator;
@@ -850,7 +852,7 @@ int elv_may_queue(request_queue_t *q, int rw)
return ELV_MQUEUE_MAY;
}
-void elv_completed_request(request_queue_t *q, struct request *rq)
+void elv_completed_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
@@ -1006,7 +1008,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
* need for the new one. this way we have a chance of going back to the old
* one, if the new one fails init for some reason.
*/
-static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
+static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{
elevator_t *old_elevator, *e;
void *data;
@@ -1078,7 +1080,8 @@ fail_register:
return 0;
}
-ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
+ssize_t elv_iosched_store(struct request_queue *q, const char *name,
+ size_t count)
{
char elevator_name[ELV_NAME_MAX];
size_t len;
@@ -1107,7 +1110,7 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
return count;
}
-ssize_t elv_iosched_show(request_queue_t *q, char *name)
+ssize_t elv_iosched_show(struct request_queue *q, char *name)
{
elevator_t *e = q->elevator;
struct elevator_type *elv = e->elevator_type;
@@ -1127,7 +1130,8 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
return len;
}
-struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
+struct request *elv_rb_former_request(struct request_queue *q,
+ struct request *rq)
{
struct rb_node *rbprev = rb_prev(&rq->rb_node);
@@ -1139,7 +1143,8 @@ struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
EXPORT_SYMBOL(elv_rb_former_request);
-struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq)
+struct request *elv_rb_latter_request(struct request_queue *q,
+ struct request *rq)
{
struct rb_node *rbnext = rb_next(&rq->rb_node);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 66056ca5e63..8c2caff87cc 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -40,7 +40,7 @@ static void blk_unplug_work(struct work_struct *work);
static void blk_unplug_timeout(unsigned long data);
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
-static int __make_request(request_queue_t *q, struct bio *bio);
+static int __make_request(struct request_queue *q, struct bio *bio);
static struct io_context *current_io_context(gfp_t gfp_flags, int node);
/*
@@ -121,7 +121,7 @@ static void blk_queue_congestion_threshold(struct request_queue *q)
struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
{
struct backing_dev_info *ret = NULL;
- request_queue_t *q = bdev_get_queue(bdev);
+ struct request_queue *q = bdev_get_queue(bdev);
if (q)
ret = &q->backing_dev_info;
@@ -140,7 +140,7 @@ EXPORT_SYMBOL(blk_get_backing_dev_info);
* cdb from the request data for instance.
*
*/
-void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn)
+void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
{
q->prep_rq_fn = pfn;
}
@@ -163,14 +163,14 @@ EXPORT_SYMBOL(blk_queue_prep_rq);
* no merge_bvec_fn is defined for a queue, and only the fixed limits are
* honored.
*/
-void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
+void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
{
q->merge_bvec_fn = mbfn;
}
EXPORT_SYMBOL(blk_queue_merge_bvec);
-void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn)
+void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
{
q->softirq_done_fn = fn;
}
@@ -199,7 +199,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done);
* __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
* blk_queue_bounce() to create a buffer in normal memory.
**/
-void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
+void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
{
/*
* set defaults
@@ -235,7 +235,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
EXPORT_SYMBOL(blk_queue_make_request);
-static void rq_init(request_queue_t *q, struct request *rq)
+static void rq_init(struct request_queue *q, struct request *rq)
{
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->donelist);
@@ -272,7 +272,7 @@ static void rq_init(request_queue_t *q, struct request *rq)
* feature should call this function and indicate so.
*
**/
-int blk_queue_ordered(request_queue_t *q, unsigned ordered,
+int blk_queue_ordered(struct request_queue *q, unsigned ordered,
prepare_flush_fn *prepare_flush_fn)
{
if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
@@ -311,7 +311,7 @@ EXPORT_SYMBOL(blk_queue_ordered);
* to the block layer by defining it through this call.
*
**/
-void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff)
+void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
{
q->issue_flush_fn = iff;
}
@@ -321,7 +321,7 @@ EXPORT_SYMBOL(blk_queue_issue_flush_fn);
/*
* Cache flushing for ordered writes handling
*/
-inline unsigned blk_ordered_cur_seq(request_queue_t *q)
+inline unsigned blk_ordered_cur_seq(struct request_queue *q)
{
if (!q->ordseq)
return 0;
@@ -330,7 +330,7 @@ inline unsigned blk_ordered_cur_seq(request_queue_t *q)
unsigned blk_ordered_req_seq(struct request *rq)
{
- request_queue_t *q = rq->q;
+ struct request_queue *q = rq->q;
BUG_ON(q->ordseq == 0);
@@ -357,7 +357,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
return QUEUE_ORDSEQ_DONE;
}
-void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error)
+void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
{
struct request *rq;
int uptodate;
@@ -401,7 +401,7 @@ static void post_flush_end_io(struct request *rq, int error)
blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
}
-static void queue_flush(request_queue_t *q, unsigned which)
+static void queue_flush(struct request_queue *q, unsigned which)
{
struct request *rq;
rq_end_io_fn *end_io;
@@ -425,7 +425,7 @@ static void queue_flush(request_queue_t *q, unsigned which)
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
}
-static inline struct request *start_ordered(request_queue_t *q,
+static inline struct request *start_ordered(struct request_queue *q,
struct request *rq)
{
q->bi_size = 0;
@@ -476,7 +476,7 @@ static inline struct request *start_ordered(request_queue_t *q,
return rq;
}
-int blk_do_ordered(request_queue_t *q, struct request **rqp)
+int blk_do_ordered(struct request_queue *q, struct request **rqp)
{
struct request *rq = *rqp;
int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
@@ -527,7 +527,7 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
{
- request_queue_t *q = bio->bi_private;
+ struct request_queue *q = bio->bi_private;
/*
* This is dry run, restore bio_sector and size. We'll finish
@@ -551,7 +551,7 @@ static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
static int ordered_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
{
- request_queue_t *q = rq->q;
+ struct request_queue *q = rq->q;
bio_end_io_t *endio;
void *private;
@@ -588,7 +588,7 @@ static int ordered_bio_endio(struct request *rq, struct bio *bio,
* blk_queue_bounce_limit to have lower memory pages allocated as bounce
* buffers for doing I/O to pages residing above @page.
**/
-void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
+void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
{
unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
int dma = 0;
@@ -624,7 +624,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
* Enables a low level driver to set an upper limit on the size of
* received requests.
**/
-void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors)
+void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
{
if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -651,7 +651,8 @@ EXPORT_SYMBOL(blk_queue_max_sectors);
* physical data segments in a request. This would be the largest sized
* scatter list the driver could handle.
**/
-void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments)
+void blk_queue_max_phys_segments(struct request_queue *q,
+ unsigned short max_segments)
{
if (!max_segments) {
max_segments = 1;
@@ -674,7 +675,8 @@ EXPORT_SYMBOL(blk_queue_max_phys_segments);
* address/length pairs the host adapter can actually give as once
* to the device.
**/
-void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments)
+void blk_queue_max_hw_segments(struct request_queue *q,
+ unsigned short max_segments)
{
if (!max_segments) {
max_segments = 1;
@@ -695,7 +697,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments);
* Enables a low level driver to set an upper limit on the size of a
* coalesced segment
**/
-void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size)
+void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
if (max_size < PAGE_CACHE_SIZE) {
max_size = PAGE_CACHE_SIZE;
@@ -718,7 +720,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size);
* even internal read-modify-write operations). Usually the default
* of 512 covers most hardware.
**/
-void blk_queue_hardsect_size(request_queue_t *q, unsigned short size)
+void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
{
q->hardsect_size = size;
}
@@ -735,7 +737,7 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
* @t: the stacking driver (top)
* @b: the underlying device (bottom)
**/
-void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
+void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{
/* zero is "infinity" */
t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
@@ -756,7 +758,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
* @q: the request queue for the device
* @mask: the memory boundary mask
**/
-void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
+void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{
if (mask < PAGE_CACHE_SIZE - 1) {
mask = PAGE_CACHE_SIZE - 1;
@@ -778,7 +780,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary);
* this is used when buiding direct io requests for the queue.
*
**/
-void blk_queue_dma_alignment(request_queue_t *q, int mask)
+void blk_queue_dma_alignment(struct request_queue *q, int mask)
{
q->dma_alignment = mask;
}
@@ -796,7 +798,7 @@ EXPORT_SYMBOL(blk_queue_dma_alignment);
*
* no locks need be held.
**/
-struct request *blk_queue_find_tag(request_queue_t *q, int tag)
+struct request *blk_queue_find_tag(struct request_queue *q, int tag)
{
return blk_map_queue_find_tag(q->queue_tags, tag);
}
@@ -840,7 +842,7 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
* blk_cleanup_queue() will take care of calling this function, if tagging
* has been used. So there's no need to call this directly.
**/
-static void __blk_queue_free_tags(request_queue_t *q)
+static void __blk_queue_free_tags(struct request_queue *q)
{
struct blk_queue_tag *bqt = q->queue_tags;
@@ -877,7 +879,7 @@ EXPORT_SYMBOL(blk_free_tags);
* This is used to disabled tagged queuing to a device, yet leave
* queue in function.
**/
-void blk_queue_free_tags(request_queue_t *q)
+void blk_queue_free_tags(struct request_queue *q)
{
clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
}
@@ -885,7 +887,7 @@ void blk_queue_free_tags(request_queue_t *q)
EXPORT_SYMBOL(blk_queue_free_tags);
static int
-init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
+init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
{
struct request **tag_index;
unsigned long *tag_map;
@@ -955,7 +957,7 @@ EXPORT_SYMBOL(blk_init_tags);
* @depth: the maximum queue depth supported
* @tags: the tag to use
**/
-int blk_queue_init_tags(request_queue_t *q, int depth,
+int blk_queue_init_tags(struct request_queue *q, int depth,
struct blk_queue_tag *tags)
{
int rc;
@@ -996,7 +998,7 @@ EXPORT_SYMBOL(blk_queue_init_tags);
* Notes:
* Must be called with the queue lock held.
**/
-int blk_queue_resize_tags(request_queue_t *q, int new_depth)
+int blk_queue_resize_tags(struct request_queue *q, int new_depth)
{
struct blk_queue_tag *bqt = q->queue_tags;
struct request **tag_index;
@@ -1059,7 +1061,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
* Notes:
* queue lock must be held.
**/
-void blk_queue_end_tag(request_queue_t *q, struct request *rq)
+void blk_queue_end_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
int tag = rq->tag;
@@ -1111,7 +1113,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
* Notes:
* queue lock must be held.
**/
-int blk_queue_start_tag(request_queue_t *q, struct request *rq)
+int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
int tag;
@@ -1158,7 +1160,7 @@ EXPORT_SYMBOL(blk_queue_start_tag);
* Notes:
* queue lock must be held.
**/
-void blk_queue_invalidate_tags(request_queue_t *q)
+void blk_queue_invalidate_tags(struct request_queue *q)
{
struct blk_queue_tag *bqt = q->queue_tags;
struct list_head *tmp, *n;
@@ -1205,7 +1207,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
EXPORT_SYMBOL(blk_dump_rq_flags);
-void blk_recount_segments(request_queue_t *q, struct bio *bio)
+void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
struct bio_vec *bv, *bvprv = NULL;
int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
@@ -1267,7 +1269,7 @@ new_hw_segment:
}
EXPORT_SYMBOL(blk_recount_segments);
-static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
+static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
@@ -1288,7 +1290,7 @@ static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
return 0;
}
-static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
+static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
@@ -1308,7 +1310,8 @@ static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
* map a request to scatterlist, return number of sg entries setup. Caller
* must make sure sg can hold rq->nr_phys_segments entries
*/
-int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)
+int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sg)
{
struct bio_vec *bvec, *bvprv;
struct bio *bio;
@@ -1361,7 +1364,7 @@ EXPORT_SYMBOL(blk_rq_map_sg);
* specific ones if so desired
*/
-static inline int ll_new_mergeable(request_queue_t *q,
+static inline int ll_new_mergeable(struct request_queue *q,
struct request *req,
struct bio *bio)
{
@@ -1382,7 +1385,7 @@ static inline int ll_new_mergeable(request_queue_t *q,
return 1;
}
-static inline int ll_new_hw_segment(request_queue_t *q,
+static inline int ll_new_hw_segment(struct request_queue *q,
struct request *req,
struct bio *bio)
{
@@ -1406,7 +1409,7 @@ static inline int ll_new_hw_segment(request_queue_t *q,
return 1;
}
-int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
+int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio)
{
unsigned short max_sectors;
int len;
@@ -1444,7 +1447,7 @@ int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
}
EXPORT_SYMBOL(ll_back_merge_fn);
-static int ll_front_merge_fn(request_queue_t *q, struct request *req,
+static int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
unsigned short max_sectors;
@@ -1483,7 +1486,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req,
return ll_new_hw_segment(q, req, bio);
}
-static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
+static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
struct request *next)
{
int total_phys_segments;
@@ -1539,7 +1542,7 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
* This is called with interrupts off and no requests on the queue and
* with the queue lock held.
*/
-void blk_plug_device(request_queue_t *q)
+void blk_plug_device(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
@@ -1562,7 +1565,7 @@ EXPORT_SYMBOL(blk_plug_device);
* remove the queue from the plugged list, if present. called with
* queue lock held and interrupts disabled.
*/
-int blk_remove_plug(request_queue_t *q)
+int blk_remove_plug(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
@@ -1578,7 +1581,7 @@ EXPORT_SYMBOL(blk_remove_plug);
/*
* remove the plug and let it rip..
*/
-void __generic_unplug_device(request_queue_t *q)
+void __generic_unplug_device(struct request_queue *q)
{
if (unlikely(blk_queue_stopped(q)))
return;
@@ -1592,7 +1595,7 @@ EXPORT_SYMBOL(__generic_unplug_device);
/**
* generic_unplug_device - fire a request queue
- * @q: The &request_queue_t in question
+ * @q: The &struct request_queue in question
*
* Description:
* Linux uses plugging to build bigger requests queues before letting
@@ -1601,7 +1604,7 @@ EXPORT_SYMBOL(__generic_unplug_device);
* gets unplugged, the request_fn defined for the queue is invoked and
* transfers started.
**/
-void generic_unplug_device(request_queue_t *q)
+void generic_unplug_device(struct request_queue *q)
{
spin_lock_irq(q->queue_lock);
__generic_unplug_device(q);
@@ -1612,7 +1615,7 @@ EXPORT_SYMBOL(generic_unplug_device);
static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
struct page *page)
{
- request_queue_t *q = bdi->unplug_io_data;
+ struct request_queue *q = bdi->unplug_io_data;
/*
* devices don't necessarily have an ->unplug_fn defined
@@ -1627,7 +1630,8 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
static void blk_unplug_work(struct work_struct *work)
{
- request_queue_t *q = container_of(work, request_queue_t, unplug_work);
+ struct request_queue *q =
+ container_of(work, struct request_queue, unplug_work);
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
@@ -1637,7 +1641,7 @@ static void blk_unplug_work(struct work_struct *work)
static void blk_unplug_timeout(unsigned long data)
{
- request_queue_t *q = (request_queue_t *)data;
+ struct request_queue *q = (struct request_queue *)data;
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
@@ -1647,14 +1651,14 @@ static void blk_unplug_timeout(unsigned long data)
/**
* blk_start_queue - restart a previously stopped queue
- * @q: The &request_queue_t in question
+ * @q: The &struct request_queue in question
*
* Description:
* blk_start_queue() will clear the stop flag on the queue, and call
* the request_fn for the queue if it was in a stopped state when
* entered. Also see blk_stop_queue(). Queue lock must be held.
**/
-void blk_start_queue(request_queue_t *q)
+void blk_start_queue(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
@@ -1677,7 +1681,7 @@ EXPORT_SYMBOL(blk_start_queue);
/**
* blk_stop_queue - stop a queue
- * @q: The &request_queue_t in question
+ * @q: The &struct request_queue in question
*
* Description:
* The Linux block layer assumes that a block driver will consume all
@@ -1689,7 +1693,7 @@ EXPORT_SYMBOL(blk_start_queue);
* the driver has signalled it's ready to go again. This happens by calling
* blk_start_queue() to restart queue operations. Queue lock must be held.
**/
-void blk_stop_queue(request_queue_t *q)
+void blk_stop_queue(struct request_queue *q)
{
blk_remove_plug(q);
set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
@@ -1746,7 +1750,7 @@ void blk_run_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_run_queue);
/**
- * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
+ * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
* @kobj: the kobj belonging of the request queue to be released
*
* Description:
@@ -1762,7 +1766,8 @@ EXPORT_SYMBOL(blk_run_queue);
**/
static void blk_release_queue(struct kobject *kobj)
{
- request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+ struct request_queue *q =
+ container_of(kobj, struct request_queue, kobj);
struct request_list *rl = &q->rq;
blk_sync_queue(q);
@@ -1778,13 +1783,13 @@ static void blk_release_queue(struct kobject *kobj)
kmem_cache_free(requestq_cachep, q);
}
-void blk_put_queue(request_queue_t *q)
+void blk_put_queue(struct request_queue *q)
{
kobject_put(&q->kobj);
}
EXPORT_SYMBOL(blk_put_queue);
-void blk_cleanup_queue(request_queue_t * q)
+void blk_cleanup_queue(struct request_queue * q)
{
mutex_lock(&q->sysfs_lock);
set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
@@ -1798,7 +1803,7 @@ void blk_cleanup_queue(request_queue_t * q)
EXPORT_SYMBOL(blk_cleanup_queue);
-static int blk_init_free_list(request_queue_t *q)
+static int blk_init_free_list(struct request_queue *q)
{
struct request_list *rl = &q->rq;
@@ -1817,7 +1822,7 @@ static int blk_init_free_list(request_queue_t *q)
return 0;
}
-request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
+struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{
return blk_alloc_queue_node(gfp_mask, -1);
}
@@ -1825,9 +1830,9 @@ EXPORT_SYMBOL(blk_alloc_queue);
static struct kobj_type queue_ktype;
-request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
- request_queue_t *q;
+ struct request_queue *q;
q = kmem_cache_alloc_node(requestq_cachep,
gfp_mask | __GFP_ZERO, node_id);
@@ -1882,16 +1887,16 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
* when the block device is deactivated (such as at module unload).
**/
-request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
+struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
{
return blk_init_queue_node(rfn, lock, -1);
}
EXPORT_SYMBOL(blk_init_queue);
-request_queue_t *
+struct request_queue *
blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
{
- request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+ struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
if (!q)
return NULL;
@@ -1940,7 +1945,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
}
EXPORT_SYMBOL(blk_init_queue_node);
-int blk_get_queue(request_queue_t *q)
+int blk_get_queue(struct request_queue *q)
{
if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
kobject_get(&q->kobj);
@@ -1952,7 +1957,7 @@ int blk_get_queue(request_queue_t *q)
EXPORT_SYMBOL(blk_get_queue);
-static inline void blk_free_request(request_queue_t *q, struct request *rq)
+static inline void blk_free_request(struct request_queue *q, struct request *rq)
{
if (rq->cmd_flags & REQ_ELVPRIV)
elv_put_request(q, rq);
@@ -1960,7 +1965,7 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq)
}
static struct request *
-blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask)
+blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
{
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
@@ -1988,7 +1993,7 @@ blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask)
* ioc_batching returns true if the ioc is a valid batching request and
* should be given priority access to a request.
*/
-static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
+static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
{
if (!ioc)
return 0;
@@ -2009,7 +2014,7 @@ static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
* is the behaviour we want though - once it gets a wakeup it should be given
* a nice run.
*/
-static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
+static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
{
if (!ioc || ioc_batching(q, ioc))
return;
@@ -2018,7 +2023,7 @@ static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
ioc->last_waited = jiffies;
}
-static void __freed_request(request_queue_t *q, int rw)
+static void __freed_request(struct request_queue *q, int rw)
{
struct request_list *rl = &q->rq;
@@ -2037,7 +2042,7 @@ static void __freed_request(request_queue_t *q, int rw)
* A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock.
*/
-static void freed_request(request_queue_t *q, int rw, int priv)
+static void freed_request(struct request_queue *q, int rw, int priv)
{
struct request_list *rl = &q->rq;
@@ -2057,7 +2062,7 @@ static void freed_request(request_queue_t *q, int rw, int priv)
* Returns NULL on failure, with queue_lock held.
* Returns !NULL on success, with queue_lock *not held*.
*/
-static struct request *get_request(request_queue_t *q, int rw_flags,
+static struct request *get_request(struct request_queue *q, int rw_flags,
struct bio *bio, gfp_t gfp_mask)
{
struct request *rq = NULL;
@@ -2162,7 +2167,7 @@ out:
*
* Called with q->queue_lock held, and returns with it unlocked.
*/
-static struct request *get_request_wait(request_queue_t *q, int rw_flags,
+static struct request *get_request_wait(struct request_queue *q, int rw_flags,
struct bio *bio)
{
const int rw = rw_flags & 0x01;
@@ -2204,7 +2209,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw_flags,
return rq;
}
-struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
+struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
{
struct request *rq;
@@ -2234,7 +2239,7 @@ EXPORT_SYMBOL(blk_get_request);
*
* The queue lock must be held with interrupts disabled.
*/
-void blk_start_queueing(request_queue_t *q)
+void blk_start_queueing(struct request_queue *q)
{
if (!blk_queue_plugged(q))
q->request_fn(q);
@@ -2253,7 +2258,7 @@ EXPORT_SYMBOL(blk_start_queueing);
* more, when that condition happens we need to put the request back
* on the queue. Must be called with queue lock held.
*/
-void blk_requeue_request(request_queue_t *q, struct request *rq)
+void blk_requeue_request(struct request_queue *q, struct request *rq)
{
blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
@@ -2284,7 +2289,7 @@ EXPORT_SYMBOL(blk_requeue_request);
* of the queue for things like a QUEUE_FULL message from a device, or a
* host that is unable to accept a particular command.
*/
-void blk_insert_request(request_queue_t *q, struct request *rq,
+void blk_insert_request(struct request_queue *q, struct request *rq,
int at_head, void *data)
{
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
@@ -2330,7 +2335,7 @@ static int __blk_rq_unmap_user(struct bio *bio)
return ret;
}
-static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
+static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
void __user *ubuf, unsigned int len)
{
unsigned long uaddr;
@@ -2403,8 +2408,8 @@ unmap_bio:
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
-int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
- unsigned long len)
+int blk_rq_map_user(struct request_queue *q, struct request *rq,
+ void __user *ubuf, unsigned long len)
{
unsigned long bytes_read = 0;
struct bio *bio = NULL;
@@ -2470,7 +2475,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
-int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
+int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct sg_iovec *iov, int iov_count, unsigned int len)
{
struct bio *bio;
@@ -2540,7 +2545,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
* @len: length of user data
* @gfp_mask: memory allocation flags
*/
-int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
+int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
unsigned int len, gfp_t gfp_mask)
{
struct bio *bio;
@@ -2577,7 +2582,7 @@ EXPORT_SYMBOL(blk_rq_map_kern);
* Insert a fully prepared request at the back of the io scheduler queue
* for execution. Don't wait for completion.
*/
-void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
+void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq, int at_head,
rq_end_io_fn *done)
{
@@ -2605,7 +2610,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
* Insert a fully prepared request at the back of the io scheduler queue
* for execution and wait for completion.
*/
-int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
+int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq, int at_head)
{
DECLARE_COMPLETION_ONSTACK(wait);
@@ -2648,7 +2653,7 @@ EXPORT_SYMBOL(blk_execute_rq);
*/
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
{
- request_queue_t *q;
+ struct request_queue *q;
if (bdev->bd_disk == NULL)
return -ENXIO;
@@ -2684,7 +2689,7 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
* queue lock is held and interrupts disabled, as we muck with the
* request queue list.
*/
-static inline void add_request(request_queue_t * q, struct request * req)
+static inline void add_request(struct request_queue * q, struct request * req)
{
drive_stat_acct(req, req->nr_sectors, 1);
@@ -2730,7 +2735,7 @@ EXPORT_SYMBOL_GPL(disk_round_stats);
/*
* queue lock must be held
*/
-void __blk_put_request(request_queue_t *q, struct request *req)
+void __blk_put_request(struct request_queue *q, struct request *req)
{
if (unlikely(!q))
return;
@@ -2760,7 +2765,7 @@ EXPORT_SYMBOL_GPL(__blk_put_request);
void blk_put_request(struct request *req)
{
unsigned long flags;
- request_queue_t *q = req->q;
+ struct request_queue *q = req->q;
/*
* Gee, IDE calls in w/ NULL q. Fix IDE and remove the
@@ -2798,7 +2803,7 @@ EXPORT_SYMBOL(blk_end_sync_rq);
/*
* Has to be called with the request spinlock acquired
*/
-static int attempt_merge(request_queue_t *q, struct request *req,
+static int attempt_merge(struct request_queue *q, struct request *req,
struct request *next)
{
if (!rq_mergeable(req) || !rq_mergeable(next))
@@ -2851,7 +2856,8 @@ static int attempt_merge(request_queue_t *q, struct request *req,
return 1;
}
-static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
+static inline int attempt_back_merge(struct request_queue *q,
+ struct request *rq)
{
struct request *next = elv_latter_request(q, rq);
@@ -2861,7 +2867,8 @@ static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
return 0;
}
-static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
+static inline int attempt_front_merge(struct request_queue *q,
+ struct request *rq)
{
struct request *prev = elv_former_request(q, rq);
@@ -2905,7 +2912,7 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
req->start_time = jiffies;
}
-static int __make_request(request_queue_t *q, struct bio *bio)
+static int __make_request(struct request_queue *q, struct bio *bio)
{
struct request *req;
int el_ret, nr_sectors, barrier, err;
@@ -3119,7 +3126,7 @@ static inline int should_fail_request(struct bio *bio)
*/
static inline void __generic_make_request(struct bio *bio)
{
- request_queue_t *q;
+ struct request_queue *q;
sector_t maxsector;
sector_t old_sector;
int ret, nr_sectors = bio_sectors(bio);
@@ -3312,7 +3319,7 @@ static void blk_recalc_rq_segments(struct request *rq)
struct bio *bio, *prevbio = NULL;
int nr_phys_segs, nr_hw_segs;
unsigned int phys_size, hw_size;
- request_queue_t *q = rq->q;
+ struct request_queue *q = rq->q;
if (!rq->bio)
return;
@@ -3658,7 +3665,8 @@ void end_request(struct request *req, int uptodate)
EXPORT_SYMBOL(end_request);
-void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
+void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+ struct bio *bio)
{
/* first two bits are identical in rq->cmd_flags and bio->bi_rw */
rq->cmd_flags |= (bio->bi_rw & 3);
@@ -3701,7 +3709,7 @@ int __init blk_dev_init(void)
sizeof(struct request), 0, SLAB_PANIC, NULL);
requestq_cachep = kmem_cache_create("blkdev_queue",
- sizeof(request_queue_t), 0, SLAB_PANIC, NULL);
+ sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
iocontext_cachep = kmem_cache_create("blkdev_ioc",
sizeof(struct io_context), 0, SLAB_PANIC, NULL);
@@ -4021,7 +4029,8 @@ static ssize_t
queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct queue_sysfs_entry *entry = to_queue(attr);
- request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+ struct request_queue *q =
+ container_of(kobj, struct request_queue, kobj);
ssize_t res;
if (!entry->show)
@@ -4041,7 +4050,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct queue_sysfs_entry *entry = to_queue(attr);
- request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+ struct request_queue *q = container_of(kobj, struct request_queue, kobj);
ssize_t res;
@@ -4072,7 +4081,7 @@ int blk_register_queue(struct gendisk *disk)
{
int ret;
- request_queue_t *q = disk->queue;
+ struct request_queue *q = disk->queue;
if (!q || !q->request_fn)
return -ENXIO;
@@ -4097,7 +4106,7 @@ int blk_register_queue(struct gendisk *disk)
void blk_unregister_queue(struct gendisk *disk)
{
- request_queue_t *q = disk->queue;
+ struct request_queue *q = disk->queue;
if (q && q->request_fn) {
elv_unregister_queue(q);
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 1c3de2b9a6b..7563d8aa394 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -11,13 +11,13 @@ struct noop_data {
struct list_head queue;
};
-static void noop_merged_requests(request_queue_t *q, struct request *rq,
+static void noop_merged_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
list_del_init(&next->queuelist);
}
-static int noop_dispatch(request_queue_t *q, int force)
+static int noop_dispatch(struct request_queue *q, int force)
{
struct noop_data *nd = q->elevator->elevator_data;
@@ -31,14 +31,14 @@ static int noop_dispatch(request_queue_t *q, int force)
return 0;
}
-static void noop_add_request(request_queue_t *q, struct request *rq)
+static void noop_add_request(struct request_queue *q, struct request *rq)
{
struct noop_data *nd = q->elevator->elevator_data;
list_add_tail(&rq->queuelist, &nd->queue);
}
-static int noop_queue_empty(request_queue_t *q)
+static int noop_queue_empty(struct request_queue *q)
{
struct noop_data *nd = q->elevator->elevator_data;
@@ -46,7 +46,7 @@ static int noop_queue_empty(request_queue_t *q)
}
static struct request *
-noop_former_request(request_queue_t *q, struct request *rq)
+noop_former_request(struct request_queue *q, struct request *rq)
{
struct noop_data *nd = q->elevator->elevator_data;
@@ -56,7 +56,7 @@ noop_former_request(request_queue_t *q, struct request *rq)
}
static struct request *
-noop_latter_request(request_queue_t *q, struct request *rq)
+noop_latter_request(struct request_queue *q, struct request *rq)
{
struct noop_data *nd = q->elevator->elevator_data;
@@ -65,7 +65,7 @@ noop_latter_request(request_queue_t *q, struct request *rq)
return list_entry(rq->queuelist.next, struct request, queuelist);
}
-static void *noop_init_queue(request_queue_t *q)
+static void *noop_init_queue(struct request_queue *q)
{
struct noop_data *nd;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index d359a715bbc..91c73224f4c 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -49,22 +49,22 @@ static int sg_get_version(int __user *p)
return put_user(sg_version_num, p);
}
-static int scsi_get_idlun(request_queue_t *q, int __user *p)
+static int scsi_get_idlun(struct request_queue *q, int __user *p)
{
return put_user(0, p);
}
-static int scsi_get_bus(request_queue_t *q, int __user *p)
+static int scsi_get_bus(struct request_queue *q, int __user *p)
{
return put_user(0, p);
}
-static int sg_get_timeout(request_queue_t *q)
+static int sg_get_timeout(struct request_queue *q)
{
return q->sg_timeout / (HZ / USER_HZ);
}
-static int sg_set_timeout(request_queue_t *q, int __user *p)
+static int sg_set_timeout(struct request_queue *q, int __user *p)
{
int timeout, err = get_user(timeout, p);
@@ -74,14 +74,14 @@ static int sg_set_timeout(request_queue_t *q, int __user *p)
return err;
}
-static int sg_get_reserved_size(request_queue_t *q, int __user *p)
+static int sg_get_reserved_size(struct request_queue *q, int __user *p)
{
unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
return put_user(val, p);
}
-static int sg_set_reserved_size(request_queue_t *q, int __user *p)
+static int sg_set_reserved_size(struct request_queue *q, int __user *p)
{
int size, err = get_user(size, p);
@@ -101,7 +101,7 @@ static int sg_set_reserved_size(request_queue_t *q, int __user *p)
* will always return that we are ATAPI even for a real SCSI drive, I'm not
* so sure this is worth doing anything about (why would you care??)
*/
-static int sg_emulated_host(request_queue_t *q, int __user *p)
+static int sg_emulated_host(struct request_queue *q, int __user *p)
{
return put_user(1, p);
}
@@ -214,7 +214,7 @@ int blk_verify_command(unsigned char *cmd, int has_write_perm)
}
EXPORT_SYMBOL_GPL(blk_verify_command);
-static int blk_fill_sghdr_rq(request_queue_t *q, struct request *rq,
+static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_hdr *hdr, int has_write_perm)
{
memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
@@ -286,7 +286,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
return r;
}
-static int sg_io(struct file *file, request_queue_t *q,
+static int sg_io(struct file *file, struct request_queue *q,
struct gendisk *bd_disk, struct sg_io_hdr *hdr)
{
unsigned long start_time;
@@ -519,7 +519,8 @@ error:
EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
/* Send basic block requests */
-static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int cmd, int data)
+static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
+ int cmd, int data)
{
struct request *rq;
int err;
@@ -539,7 +540,8 @@ static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int c
return err;
}
-static inline int blk_send_start_stop(request_queue_t *q, struct gendisk *bd_disk, int data)
+static inline int blk_send_start_stop(struct request_queue *q,
+ struct gendisk *bd_disk, int data)
{
return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
}
diff --git a/drivers/acorn/block/fd1772.c b/drivers/acorn/block/fd1772.c
index 423ed08fb6f..d7e18ce8dad 100644
--- a/drivers/acorn/block/fd1772.c
+++ b/drivers/acorn/block/fd1772.c
@@ -372,7 +372,7 @@ static int fd_test_drive_present(int drive);
static void config_types(void);
static int floppy_open(struct inode *inode, struct file *filp);
static int floppy_release(struct inode *inode, struct file *filp);
-static void do_fd_request(request_queue_t *);
+static void do_fd_request(struct request_queue *);
/************************* End of Prototypes **************************/
@@ -1271,7 +1271,7 @@ static void fd1772_checkint(void)
}
}
-static void do_fd_request(request_queue_t* q)
+static void do_fd_request(struct request_queue* q)
{
unsigned long flags;
diff --git a/drivers/acorn/block/mfmhd.c b/drivers/acorn/block/mfmhd.c
index d85520f78e6..74058db674d 100644
--- a/drivers/acorn/block/mfmhd.c
+++ b/drivers/acorn/block/mfmhd.c
@@ -924,7 +924,7 @@ static void mfm_request(void)
DBG("mfm_request: Dropping out bottom\n");
}
-static void do_mfm_request(request_queue_t *q)
+static void do_mfm_request(struct request_queue *q)
{
DBG("do_mfm_request: about to mfm_request\n");
mfm_request();
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 408b45168ab..934d639b368 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -43,51 +43,39 @@ menuconfig ACPI
if ACPI
config ACPI_SLEEP
- bool "Sleep States"
- depends on X86 && (!SMP || SUSPEND_SMP)
+ bool
+ depends on PM_SLEEP
default y
- ---help---
- This option adds support for ACPI suspend states.
- With this option, you will be able to put the system "to sleep".
- Sleep states are low power states for the system and devices. All
- of the system operating state is saved to either memory or disk
- (depending on the state), to allow the system to resume operation
- quickly at your request.
+config ACPI_PROCFS
+ bool "Deprecated /proc/acpi files"
+ depends on PROC_FS
+ ---help---
+ For backwards compatibility, this option allows
+ depricated /proc/acpi/ files to exist, even when
+ they have been replaced by functions in /sys.
+ The deprecated files (and their replacements) include:
- Although this option sounds really nifty, barely any of the device
- drivers have been converted to the new driver model and hence few
- have proper power management support.
+ /proc/acpi/sleep (/sys/power/state)
+ /proc/acpi/info (/sys/modules/acpi/parameters/acpica_version)
+ /proc/acpi/dsdt (/sys/firmware/acpi/tables/DSDT)
+ /proc/acpi/fadt (/sys/firmware/acpi/tables/FACP)
+ /proc/acpi/debug_layer (/sys/module/acpi/parameters/debug_layer)
+ /proc/acpi/debug_level (/sys/module/acpi/parameters/debug_level)
- This option is not recommended for anyone except those doing driver
- power management development.
+ This option has no effect on /proc/acpi/ files
+ and functions which do not yet exist in /sys.
-config ACPI_SLEEP_PROC_FS
- bool
- depends on ACPI_SLEEP && PROC_FS
- default y
+ Say N to delete /proc/acpi/ files that have moved to /sys/
-config ACPI_SLEEP_PROC_SLEEP
+config ACPI_PROCFS_SLEEP
bool "/proc/acpi/sleep (deprecated)"
- depends on ACPI_SLEEP_PROC_FS
+ depends on PM_SLEEP && ACPI_PROCFS
default n
---help---
Create /proc/acpi/sleep
Deprecated by /sys/power/state
-config ACPI_PROCFS
- bool "Procfs interface (deprecated)"
- default y
- ---help---
- The Procfs interface for ACPI is made optional for backward compatibility.
- As the same functions are duplicated in the sysfs interface
- and this proc interface will be removed some time later,
- it's marked as deprecated.
- ( /proc/acpi/debug_layer && debug_level are deprecated by
- /sys/module/acpi/parameters/debug_layer && debug_level.
- /proc/acpi/info is deprecated by
- /sys/module/acpi/parameters/acpica_version )
-
config ACPI_AC
tristate "AC Adapter"
depends on X86
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 37c7dc4f9fe..d8b35093527 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -34,7 +34,6 @@
#define ACPI_AC_COMPONENT 0x00020000
#define ACPI_AC_CLASS "ac_adapter"
-#define ACPI_AC_HID "ACPI0003"
#define ACPI_AC_DEVICE_NAME "AC Adapter"
#define ACPI_AC_FILE_STATE "state"
#define ACPI_AC_NOTIFY_STATUS 0x80
@@ -56,10 +55,16 @@ static int acpi_ac_add(struct acpi_device *device);
static int acpi_ac_remove(struct acpi_device *device, int type);
static int acpi_ac_open_fs(struct inode *inode, struct file *file);
+const static struct acpi_device_id ac_device_ids[] = {
+ {"ACPI0003", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, ac_device_ids);
+
static struct acpi_driver acpi_ac_driver = {
.name = "ac",
.class = ACPI_AC_CLASS,
- .ids = ACPI_AC_HID,
+ .ids = ac_device_ids,
.ops = {
.add = acpi_ac_add,
.remove = acpi_ac_remove,
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index e65628a0308..5f1127ad5a9 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -53,10 +53,16 @@ static int acpi_memory_device_add(struct acpi_device *device);
static int acpi_memory_device_remove(struct acpi_device *device, int type);
static int acpi_memory_device_start(struct acpi_device *device);
+static const struct acpi_device_id memory_device_ids[] = {
+ {ACPI_MEMORY_DEVICE_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, memory_device_ids);
+
static struct acpi_driver acpi_memory_device_driver = {
.name = "acpi_memhotplug",
.class = ACPI_MEMORY_DEVICE_CLASS,
- .ids = ACPI_MEMORY_DEVICE_HID,
+ .ids = memory_device_ids,
.ops = {
.add = acpi_memory_device_add,
.remove = acpi_memory_device_remove,
diff --git a/drivers/acpi/asus_acpi.c b/drivers/acpi/asus_acpi.c
index 3cd79caad70..9c4bd220c44 100644
--- a/drivers/acpi/asus_acpi.c
+++ b/drivers/acpi/asus_acpi.c
@@ -56,7 +56,6 @@
#define ACPI_HOTK_NAME "Asus Laptop ACPI Extras Driver"
#define ACPI_HOTK_CLASS "hotkey"
#define ACPI_HOTK_DEVICE_NAME "Hotkey"
-#define ACPI_HOTK_HID "ATK0100"
/*
* Some events we use, same for all Asus
@@ -426,14 +425,20 @@ static struct acpi_table_header *asus_info;
static struct asus_hotk *hotk;
/*
- * The hotkey driver declaration
+ * The hotkey driver and autoloading declaration
*/
static int asus_hotk_add(struct acpi_device *device);
static int asus_hotk_remove(struct acpi_device *device, int type);
+static const struct acpi_device_id asus_device_ids[] = {
+ {"ATK0100", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, asus_device_ids);
+
static struct acpi_driver asus_hotk_driver = {
.name = "asus_acpi",
.class = ACPI_HOTK_CLASS,
- .ids = ACPI_HOTK_HID,
+ .ids = asus_device_ids,
.ops = {
.add = asus_hotk_add,
.remove = asus_hotk_remove,
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index cad932de383..81651032791 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -41,7 +41,6 @@
#define ACPI_BATTERY_COMPONENT 0x00040000
#define ACPI_BATTERY_CLASS "battery"
-#define ACPI_BATTERY_HID "PNP0C0A"
#define ACPI_BATTERY_DEVICE_NAME "Battery"
#define ACPI_BATTERY_NOTIFY_STATUS 0x80
#define ACPI_BATTERY_NOTIFY_INFO 0x81
@@ -74,10 +73,16 @@ static int acpi_battery_add(struct acpi_device *device);
static int acpi_battery_remove(struct acpi_device *device, int type);
static int acpi_battery_resume(struct acpi_device *device);
+static const struct acpi_device_id battery_device_ids[] = {
+ {"PNP0C0A", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, battery_device_ids);
+
static struct acpi_driver acpi_battery_driver = {
.name = "battery",
.class = ACPI_BATTERY_CLASS,
- .ids = ACPI_BATTERY_HID,
+ .ids = battery_device_ids,
.ops = {
.add = acpi_battery_add,
.resume = acpi_battery_resume,
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index cb4110b50cd..540581338ef 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -66,6 +66,16 @@ MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Button Driver");
MODULE_LICENSE("GPL");
+static const struct acpi_device_id button_device_ids[] = {
+ {ACPI_BUTTON_HID_LID, 0},
+ {ACPI_BUTTON_HID_SLEEP, 0},
+ {ACPI_BUTTON_HID_SLEEPF, 0},
+ {ACPI_BUTTON_HID_POWER, 0},
+ {ACPI_BUTTON_HID_POWERF, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, button_device_ids);
+
static int acpi_button_add(struct acpi_device *device);
static int acpi_button_remove(struct acpi_device *device, int type);
static int acpi_button_info_open_fs(struct inode *inode, struct file *file);
@@ -74,7 +84,7 @@ static int acpi_button_state_open_fs(struct inode *inode, struct file *file);
static struct acpi_driver acpi_button_driver = {
.name = "button",
.class = ACPI_BUTTON_CLASS,
- .ids = "button_power,button_sleep,PNP0C0D,PNP0C0C,PNP0C0E",
+ .ids = button_device_ids,
.ops = {
.add = acpi_button_add,
.remove = acpi_button_remove,
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 0dd3bf7c0ed..3c25ec7a187 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -52,10 +52,18 @@ MODULE_LICENSE("GPL");
static int acpi_container_add(struct acpi_device *device);
static int acpi_container_remove(struct acpi_device *device, int type);
+static const struct acpi_device_id container_device_ids[] = {
+ {"ACPI0004", 0},
+ {"PNP0A05", 0},
+ {"PNP0A06", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, container_device_ids);
+
static struct acpi_driver acpi_container_driver = {
.name = "container",
.class = ACPI_CONTAINER_CLASS,
- .ids = "ACPI0004,PNP0A05,PNP0A06",
+ .ids = container_device_ids,
.ops = {
.add = acpi_container_add,
.remove = acpi_container_remove,
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 10e851021ec..469f3f57f88 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -41,7 +41,6 @@
#include <acpi/actypes.h>
#define ACPI_EC_CLASS "embedded_controller"
-#define ACPI_EC_HID "PNP0C09"
#define ACPI_EC_DEVICE_NAME "Embedded Controller"
#define ACPI_EC_FILE_INFO "info"
@@ -82,10 +81,15 @@ static int acpi_ec_start(struct acpi_device *device);
static int acpi_ec_stop(struct acpi_device *device, int type);
static int acpi_ec_add(struct acpi_device *device);
+static const struct acpi_device_id ec_device_ids[] = {
+ {"PNP0C09", 0},
+ {"", 0},
+};
+
static struct acpi_driver acpi_ec_driver = {
.name = "ec",
.class = ACPI_EC_CLASS,
- .ids = ACPI_EC_HID,
+ .ids = ec_device_ids,
.ops = {
.add = acpi_ec_add,
.remove = acpi_ec_remove,
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
index 23ee7bc4a70..b1aaa0e8458 100644
--- a/drivers/acpi/events/evrgnini.c
+++ b/drivers/acpi/events/evrgnini.c
@@ -378,7 +378,7 @@ static u8 acpi_ev_match_pci_root_bridge(char *id)
static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
{
acpi_status status;
- struct acpi_device_id hid;
+ struct acpica_device_id hid;
struct acpi_compatible_id_list *cid;
acpi_native_uint i;
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index ec655c53949..c81f6bdb68b 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -50,10 +50,16 @@ static int acpi_fan_remove(struct acpi_device *device, int type);
static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state);
static int acpi_fan_resume(struct acpi_device *device);
+static const struct acpi_device_id fan_device_ids[] = {
+ {"PNP0C0B", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, fan_device_ids);
+
static struct acpi_driver acpi_fan_driver = {
.name = "fan",
.class = ACPI_FAN_CLASS,
- .ids = "PNP0C0B",
+ .ids = fan_device_ids,
.ops = {
.add = acpi_fan_add,
.remove = acpi_fan_remove,
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
index be4f2899de7..ab65b2c2560 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -440,7 +440,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
acpi_status status;
struct acpi_namespace_node *node;
u32 flags;
- struct acpi_device_id hid;
+ struct acpica_device_id hid;
struct acpi_compatible_id_list *cid;
acpi_native_uint i;
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 3448edd61dc..c9f526e5539 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -46,7 +46,6 @@
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME("pci_link");
#define ACPI_PCI_LINK_CLASS "pci_irq_routing"
-#define ACPI_PCI_LINK_HID "PNP0C0F"
#define ACPI_PCI_LINK_DEVICE_NAME "PCI Interrupt Link"
#define ACPI_PCI_LINK_FILE_INFO "info"
#define ACPI_PCI_LINK_FILE_STATUS "state"
@@ -54,10 +53,16 @@ ACPI_MODULE_NAME("pci_link");
static int acpi_pci_link_add(struct acpi_device *device);
static int acpi_pci_link_remove(struct acpi_device *device, int type);
+static struct acpi_device_id link_device_ids[] = {
+ {"PNP0C0F", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, link_device_ids);
+
static struct acpi_driver acpi_pci_link_driver = {
.name = "pci_link",
.class = ACPI_PCI_LINK_CLASS,
- .ids = ACPI_PCI_LINK_HID,
+ .ids = link_device_ids,
.ops = {
.add = acpi_pci_link_add,
.remove = acpi_pci_link_remove,
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ad4145a3778..f14ff1ffab2 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -38,16 +38,21 @@
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME("pci_root");
#define ACPI_PCI_ROOT_CLASS "pci_bridge"
-#define ACPI_PCI_ROOT_HID "PNP0A03"
#define ACPI_PCI_ROOT_DEVICE_NAME "PCI Root Bridge"
static int acpi_pci_root_add(struct acpi_device *device);
static int acpi_pci_root_remove(struct acpi_device *device, int type);
static int acpi_pci_root_start(struct acpi_device *device);
+static struct acpi_device_id root_device_ids[] = {
+ {"PNP0A03", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, root_device_ids);
+
static struct acpi_driver acpi_pci_root_driver = {
.name = "pci_root",
.class = ACPI_PCI_ROOT_CLASS,
- .ids = ACPI_PCI_ROOT_HID,
+ .ids = root_device_ids,
.ops = {
.add = acpi_pci_root_add,
.remove = acpi_pci_root_remove,
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 4ffecd17970..57b9a2998fd 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -59,10 +59,16 @@ static int acpi_power_remove(struct acpi_device *device, int type);
static int acpi_power_resume(struct acpi_device *device);
static int acpi_power_open_fs(struct inode *inode, struct file *file);
+static struct acpi_device_id power_device_ids[] = {
+ {ACPI_POWER_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, power_device_ids);
+
static struct acpi_driver acpi_power_driver = {
.name = "power",
.class = ACPI_POWER_CLASS,
- .ids = ACPI_POWER_HID,
+ .ids = power_device_ids,
.ops = {
.add = acpi_power_add,
.remove = acpi_power_remove,
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 81aceb5da7c..498422343f3 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -88,10 +88,16 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr);
extern int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
+static const struct acpi_device_id processor_device_ids[] = {
+ {ACPI_PROCESSOR_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, processor_device_ids);
+
static struct acpi_driver acpi_processor_driver = {
.name = "processor",
.class = ACPI_PROCESSOR_CLASS,
- .ids = ACPI_PROCESSOR_HID,
+ .ids = processor_device_ids,
.ops = {
.add = acpi_processor_add,
.remove = acpi_processor_remove,
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 3f55d1f90c1..0b8204e7082 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -47,6 +47,9 @@ ACPI_MODULE_NAME("processor_throttling");
static int acpi_processor_get_throttling(struct acpi_processor *pr);
int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
+/*
+ * _TPC - Throttling Present Capabilities
+ */
static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
{
acpi_status status = 0;
@@ -55,8 +58,10 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
if (!pr)
return -EINVAL;
status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
+ }
return -ENODEV;
}
pr->throttling_platform_limit = (int)tpc;
@@ -68,9 +73,9 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
return acpi_processor_get_platform_limit(pr);
}
-/* --------------------------------------------------------------------------
- _PTC, _TSS, _TSD support
- -------------------------------------------------------------------------- */
+/*
+ * _PTC - Processor Throttling Control (and status) register location
+ */
static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
{
int result = 0;
@@ -81,7 +86,9 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
+ if (status != AE_NOT_FOUND) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
+ }
return -ENODEV;
}
@@ -132,6 +139,10 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
return result;
}
+
+/*
+ * _TSS - Throttling Supported States
+ */
static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
{
int result = 0;
@@ -144,7 +155,9 @@ static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
+ if (status != AE_NOT_FOUND) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
+ }
return -ENODEV;
}
@@ -201,6 +214,10 @@ static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
return result;
}
+
+/*
+ * _TSD - T-State Dependencies
+ */
static int acpi_processor_get_tsd(struct acpi_processor *pr)
{
int result = 0;
@@ -213,6 +230,9 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
+ }
return -ENODEV;
}
@@ -525,9 +545,6 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
int result = 0;
int step = 0;
int i = 0;
- int no_ptc = 0;
- int no_tss = 0;
- int no_tsd = 0;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
@@ -538,12 +555,14 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
if (!pr)
return -EINVAL;
- /* TBD: Support ACPI 2.0 objects */
- no_ptc = acpi_processor_get_throttling_control(pr);
- no_tss = acpi_processor_get_throttling_states(pr);
- no_tsd = acpi_processor_get_tsd(pr);
-
- if (no_ptc || no_tss) {
+ /*
+ * Evaluate _PTC, _TSS and _TPC
+ * They must all be present or none of them can be used.
+ */
+ if (acpi_processor_get_throttling_control(pr) ||
+ acpi_processor_get_throttling_states(pr) ||
+ acpi_processor_get_platform_limit(pr))
+ {
pr->throttling.acpi_processor_get_throttling =
&acpi_processor_get_throttling_fadt;
pr->throttling.acpi_processor_set_throttling =
@@ -555,6 +574,8 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
&acpi_processor_set_throttling_ptc;
}
+ acpi_processor_get_tsd(pr);
+
if (!pr->throttling.address) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
return 0;
@@ -658,18 +679,20 @@ static int acpi_processor_throttling_seq_show(struct seq_file *seq,
pr->throttling.state_count - 1);
seq_puts(seq, "states:\n");
- if (acpi_processor_get_throttling == acpi_processor_get_throttling_fadt)
+ if (pr->throttling.acpi_processor_get_throttling ==
+ acpi_processor_get_throttling_fadt) {
for (i = 0; i < pr->throttling.state_count; i++)
seq_printf(seq, " %cT%d: %02d%%\n",
(i == pr->throttling.state ? '*' : ' '), i,
(pr->throttling.states[i].performance ? pr->
throttling.states[i].performance / 10 : 0));
- else
+ } else {
for (i = 0; i < pr->throttling.state_count; i++)
seq_printf(seq, " %cT%d: %02d%%\n",
(i == pr->throttling.state ? '*' : ' '), i,
(int)pr->throttling.states_tss[i].
freqpercentage);
+ }
end:
return 0;
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 974d00ccfe8..7d8e78ea13a 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -38,7 +38,6 @@
#define ACPI_SBS_CLASS "sbs"
#define ACPI_AC_CLASS "ac_adapter"
#define ACPI_BATTERY_CLASS "battery"
-#define ACPI_SBS_HID "ACPI0002"
#define ACPI_SBS_DEVICE_NAME "Smart Battery System"
#define ACPI_SBS_FILE_INFO "info"
#define ACPI_SBS_FILE_STATE "state"
@@ -124,10 +123,17 @@ static int acpi_sbs_add(struct acpi_device *device);
static int acpi_sbs_remove(struct acpi_device *device, int type);
static int acpi_sbs_resume(struct acpi_device *device);
+static const struct acpi_device_id sbs_device_ids[] = {
+ {"ACPI0001", 0},
+ {"ACPI0005", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, sbs_device_ids);
+
static struct acpi_driver acpi_sbs_driver = {
.name = "sbs",
.class = ACPI_SBS_CLASS,
- .ids = "ACPI0001,ACPI0005",
+ .ids = sbs_device_ids,
.ops = {
.add = acpi_sbs_add,
.remove = acpi_sbs_remove,
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 6b3b8a52247..be74347d135 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -16,7 +16,7 @@ ACPI_MODULE_NAME("scan");
extern struct acpi_device *acpi_root;
#define ACPI_BUS_CLASS "system_bus"
-#define ACPI_BUS_HID "ACPI_BUS"
+#define ACPI_BUS_HID "LNXSYBUS"
#define ACPI_BUS_DEVICE_NAME "System Bus"
static LIST_HEAD(acpi_device_list);
@@ -29,6 +29,62 @@ struct acpi_device_bus_id{
unsigned int instance_no;
struct list_head node;
};
+
+/*
+ * Creates hid/cid(s) string needed for modalias and uevent
+ * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
+ * char *modalias: "acpi:IBM0001:ACPI0001"
+*/
+int create_modalias(struct acpi_device *acpi_dev, char *modalias, int size){
+
+ int len;
+
+ if (!acpi_dev->flags.hardware_id)
+ return -ENODEV;
+
+ len = snprintf(modalias, size, "acpi:%s:",
+ acpi_dev->pnp.hardware_id);
+ if (len < 0 || len >= size)
+ return -EINVAL;
+ size -= len;
+
+ if (acpi_dev->flags.compatible_ids) {
+ struct acpi_compatible_id_list *cid_list;
+ int i;
+ int count;
+
+ cid_list = acpi_dev->pnp.cid_list;
+ for (i = 0; i < cid_list->count; i++) {
+ count = snprintf(&modalias[len], size, "%s:",
+ cid_list->id[i].value);
+ if (count < 0 || count >= size) {
+ printk(KERN_ERR "acpi: %s cid[%i] exceeds event buffer size",
+ acpi_dev->pnp.device_name, i);
+ break;
+ }
+ len += count;
+ size -= count;
+ }
+ }
+
+ modalias[len] = '\0';
+ return len;
+}
+
+static ssize_t
+acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ int len;
+
+ /* Device has no HID and no CID or string is >1024 */
+ len = create_modalias(acpi_dev, buf, 1024);
+ if (len <= 0)
+ return 0;
+ buf[len++] = '\n';
+ return len;
+}
+static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
+
static int acpi_eject_operation(acpi_handle handle, int lockable)
{
struct acpi_object_list arg_list;
@@ -154,6 +210,12 @@ static int acpi_device_setup_files(struct acpi_device *dev)
goto end;
}
+ if (dev->flags.hardware_id || dev->flags.compatible_ids){
+ result = device_create_file(&dev->dev, &dev_attr_modalias);
+ if(result)
+ goto end;
+ }
+
/*
* If device has _EJ0, 'eject' file is created that is used to trigger
* hot-removal function from userland.
@@ -178,6 +240,9 @@ static void acpi_device_remove_files(struct acpi_device *dev)
if (ACPI_SUCCESS(status))
device_remove_file(&dev->dev, &dev_attr_eject);
+ if (dev->flags.hardware_id || dev->flags.compatible_ids)
+ device_remove_file(&dev->dev, &dev_attr_modalias);
+
if(dev->flags.hardware_id)
device_remove_file(&dev->dev, &dev_attr_hid);
if(dev->handle)
@@ -186,6 +251,37 @@ static void acpi_device_remove_files(struct acpi_device *dev)
/* --------------------------------------------------------------------------
ACPI Bus operations
-------------------------------------------------------------------------- */
+
+int acpi_match_device_ids(struct acpi_device *device,
+ const struct acpi_device_id *ids)
+{
+ const struct acpi_device_id *id;
+
+ if (device->flags.hardware_id) {
+ for (id = ids; id->id[0]; id++) {
+ if (!strcmp((char*)id->id, device->pnp.hardware_id))
+ return 0;
+ }
+ }
+
+ if (device->flags.compatible_ids) {
+ struct acpi_compatible_id_list *cid_list = device->pnp.cid_list;
+ int i;
+
+ for (id = ids; id->id[0]; id++) {
+ /* compare multiple _CID entries against driver ids */
+ for (i = 0; i < cid_list->count; i++) {
+ if (!strcmp((char*)id->id,
+ cid_list->id[i].value))
+ return 0;
+ }
+ }
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(acpi_match_device_ids);
+
static void acpi_device_release(struct device *dev)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
@@ -219,37 +315,19 @@ static int acpi_bus_match(struct device *dev, struct device_driver *drv)
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_driver *acpi_drv = to_acpi_driver(drv);
- return !acpi_match_ids(acpi_dev, acpi_drv->ids);
+ return !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
}
static int acpi_device_uevent(struct device *dev, char **envp, int num_envp,
- char *buffer, int buffer_size)
+ char *buffer, int buffer_size)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
- int i = 0, length = 0, ret = 0;
-
- if (acpi_dev->flags.hardware_id)
- ret = add_uevent_var(envp, num_envp, &i,
- buffer, buffer_size, &length,
- "HWID=%s", acpi_dev->pnp.hardware_id);
- if (ret)
- return -ENOMEM;
- if (acpi_dev->flags.compatible_ids) {
- int j;
- struct acpi_compatible_id_list *cid_list;
- cid_list = acpi_dev->pnp.cid_list;
-
- for (j = 0; j < cid_list->count; j++) {
- ret = add_uevent_var(envp, num_envp, &i, buffer,
- buffer_size, &length, "COMPTID=%s",
- cid_list->id[j].value);
- if (ret)
- return -ENOMEM;
- }
+ strcpy(buffer, "MODALIAS=");
+ if (create_modalias(acpi_dev, buffer + 9, buffer_size - 9) > 0) {
+ envp[0] = buffer;
+ envp[1] = NULL;
}
-
- envp[i] = NULL;
return 0;
}
@@ -543,25 +621,6 @@ void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context)
return;
}
-int acpi_match_ids(struct acpi_device *device, char *ids)
-{
- if (device->flags.hardware_id)
- if (strstr(ids, device->pnp.hardware_id))
- return 0;
-
- if (device->flags.compatible_ids) {
- struct acpi_compatible_id_list *cid_list = device->pnp.cid_list;
- int i;
-
- /* compare multiple _CID entries against driver ids */
- for (i = 0; i < cid_list->count; i++) {
- if (strstr(ids, cid_list->id[i].value))
- return 0;
- }
- }
- return -ENOENT;
-}
-
static int acpi_bus_get_perf_flags(struct acpi_device *device)
{
device->performance.state = ACPI_STATE_UNKNOWN;
@@ -624,6 +683,13 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *package = NULL;
+ struct acpi_device_id button_device_ids[] = {
+ {"PNP0C0D", 0},
+ {"PNP0C0C", 0},
+ {"PNP0C0E", 0},
+ {"", 0},
+ };
+
/* _PRW */
status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer);
@@ -643,7 +709,7 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
device->wakeup.flags.valid = 1;
/* Power button, Lid switch always enable wakeup */
- if (!acpi_match_ids(device, "PNP0C0D,PNP0C0C,PNP0C0E"))
+ if (!acpi_match_device_ids(device, button_device_ids))
device->wakeup.flags.run_wake = 1;
end:
diff --git a/drivers/acpi/sleep/Makefile b/drivers/acpi/sleep/Makefile
index d6c017709c8..195a4f69c0f 100644
--- a/drivers/acpi/sleep/Makefile
+++ b/drivers/acpi/sleep/Makefile
@@ -1,5 +1,5 @@
obj-y := poweroff.o wakeup.o
obj-$(CONFIG_ACPI_SLEEP) += main.o
-obj-$(CONFIG_ACPI_SLEEP_PROC_FS) += proc.o
+obj-$(CONFIG_ACPI_SLEEP) += proc.o
EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index 3279e72a94f..e8cff5dd4cb 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -21,6 +21,9 @@
u8 sleep_states[ACPI_S_STATE_COUNT];
+static u32 acpi_target_sleep_state = ACPI_STATE_S0;
+
+#ifdef CONFIG_SUSPEND
static struct pm_ops acpi_pm_ops;
extern void do_suspend_lowlevel(void);
@@ -35,34 +38,49 @@ static u32 acpi_suspend_states[] = {
static int init_8259A_after_S1;
/**
+ * acpi_pm_set_target - Set the target system sleep state to the state
+ * associated with given @pm_state, if supported.
+ */
+
+static int acpi_pm_set_target(suspend_state_t pm_state)
+{
+ u32 acpi_state = acpi_suspend_states[pm_state];
+ int error = 0;
+
+ if (sleep_states[acpi_state]) {
+ acpi_target_sleep_state = acpi_state;
+ } else {
+ printk(KERN_ERR "ACPI does not support this state: %d\n",
+ pm_state);
+ error = -ENOSYS;
+ }
+ return error;
+}
+
+/**
* acpi_pm_prepare - Do preliminary suspend work.
- * @pm_state: suspend state we're entering.
+ * @pm_state: ignored
*
- * Make sure we support the state. If we do, and we need it, set the
- * firmware waking vector and do arch-specific nastiness to get the
- * wakeup code to the waking vector.
+ * If necessary, set the firmware waking vector and do arch-specific
+ * nastiness to get the wakeup code to the waking vector.
*/
-extern int acpi_sleep_prepare(u32 acpi_state);
-extern void acpi_power_off(void);
-
static int acpi_pm_prepare(suspend_state_t pm_state)
{
- u32 acpi_state = acpi_suspend_states[pm_state];
+ int error = acpi_sleep_prepare(acpi_target_sleep_state);
- if (!sleep_states[acpi_state]) {
- printk("acpi_pm_prepare does not support %d \n", pm_state);
- return -EPERM;
- }
- return acpi_sleep_prepare(acpi_state);
+ if (error)
+ acpi_target_sleep_state = ACPI_STATE_S0;
+
+ return error;
}
/**
* acpi_pm_enter - Actually enter a sleep state.
- * @pm_state: State we're entering.
+ * @pm_state: ignored
*
- * Flush caches and go to sleep. For STR or STD, we have to call
- * arch-specific assembly, which in turn call acpi_enter_sleep_state().
+ * Flush caches and go to sleep. For STR we have to call arch-specific
+ * assembly, which in turn call acpi_enter_sleep_state().
* It's unfortunate, but it works. Please fix if you're feeling frisky.
*/
@@ -70,31 +88,31 @@ static int acpi_pm_enter(suspend_state_t pm_state)
{
acpi_status status = AE_OK;
unsigned long flags = 0;
- u32 acpi_state = acpi_suspend_states[pm_state];
+ u32 acpi_state = acpi_target_sleep_state;
ACPI_FLUSH_CPU_CACHE();
/* Do arch specific saving of state. */
- if (pm_state > PM_SUSPEND_STANDBY) {
+ if (acpi_state == ACPI_STATE_S3) {
int error = acpi_save_state_mem();
- if (error)
+
+ if (error) {
+ acpi_target_sleep_state = ACPI_STATE_S0;
return error;
+ }
}
local_irq_save(flags);
acpi_enable_wakeup_device(acpi_state);
- switch (pm_state) {
- case PM_SUSPEND_STANDBY:
+ switch (acpi_state) {
+ case ACPI_STATE_S1:
barrier();
status = acpi_enter_sleep_state(acpi_state);
break;
- case PM_SUSPEND_MEM:
+ case ACPI_STATE_S3:
do_suspend_lowlevel();
break;
-
- default:
- return -EINVAL;
}
/* ACPI 3.0 specs (P62) says that it's the responsabilty
@@ -107,12 +125,8 @@ static int acpi_pm_enter(suspend_state_t pm_state)
local_irq_restore(flags);
printk(KERN_DEBUG "Back to C!\n");
- /* restore processor state
- * We should only be here if we're coming back from STR or STD.
- * And, in the case of the latter, the memory image should have already
- * been loaded from disk.
- */
- if (pm_state > PM_SUSPEND_STANDBY)
+ /* restore processor state */
+ if (acpi_state == ACPI_STATE_S3)
acpi_restore_state_mem();
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
@@ -120,7 +134,7 @@ static int acpi_pm_enter(suspend_state_t pm_state)
/**
* acpi_pm_finish - Finish up suspend sequence.
- * @pm_state: State we're coming out of.
+ * @pm_state: ignored
*
* This is called after we wake back up (or if entering the sleep state
* failed).
@@ -128,7 +142,7 @@ static int acpi_pm_enter(suspend_state_t pm_state)
static int acpi_pm_finish(suspend_state_t pm_state)
{
- u32 acpi_state = acpi_suspend_states[pm_state];
+ u32 acpi_state = acpi_target_sleep_state;
acpi_leave_sleep_state(acpi_state);
acpi_disable_wakeup_device(acpi_state);
@@ -136,28 +150,17 @@ static int acpi_pm_finish(suspend_state_t pm_state)
/* reset firmware waking vector */
acpi_set_firmware_waking_vector((acpi_physical_address) 0);
+ acpi_target_sleep_state = ACPI_STATE_S0;
+
+#ifdef CONFIG_X86
if (init_8259A_after_S1) {
printk("Broken toshiba laptop -> kicking interrupts\n");
init_8259A(0);
}
+#endif
return 0;
}
-int acpi_suspend(u32 acpi_state)
-{
- suspend_state_t states[] = {
- [1] = PM_SUSPEND_STANDBY,
- [3] = PM_SUSPEND_MEM,
- [5] = PM_SUSPEND_MAX
- };
-
- if (acpi_state < 6 && states[acpi_state])
- return pm_suspend(states[acpi_state]);
- if (acpi_state == 4)
- return hibernate();
- return -EINVAL;
-}
-
static int acpi_pm_state_valid(suspend_state_t pm_state)
{
u32 acpi_state;
@@ -176,12 +179,34 @@ static int acpi_pm_state_valid(suspend_state_t pm_state)
static struct pm_ops acpi_pm_ops = {
.valid = acpi_pm_state_valid,
+ .set_target = acpi_pm_set_target,
.prepare = acpi_pm_prepare,
.enter = acpi_pm_enter,
.finish = acpi_pm_finish,
};
-#ifdef CONFIG_SOFTWARE_SUSPEND
+/*
+ * Toshiba fails to preserve interrupts over S1, reinitialization
+ * of 8259 is needed after S1 resume.
+ */
+static int __init init_ints_after_s1(struct dmi_system_id *d)
+{
+ printk(KERN_WARNING "%s with broken S1 detected.\n", d->ident);
+ init_8259A_after_S1 = 1;
+ return 0;
+}
+
+static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+ {
+ .callback = init_ints_after_s1,
+ .ident = "Toshiba Satellite 4030cdt",
+ .matches = {DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),},
+ },
+ {},
+};
+#endif /* CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATION
static int acpi_hibernation_prepare(void)
{
return acpi_sleep_prepare(ACPI_STATE_S4);
@@ -233,41 +258,114 @@ static struct hibernation_ops acpi_hibernation_ops = {
.pre_restore = acpi_hibernation_pre_restore,
.restore_cleanup = acpi_hibernation_restore_cleanup,
};
-#endif /* CONFIG_SOFTWARE_SUSPEND */
+#endif /* CONFIG_HIBERNATION */
-/*
- * Toshiba fails to preserve interrupts over S1, reinitialization
- * of 8259 is needed after S1 resume.
- */
-static int __init init_ints_after_s1(struct dmi_system_id *d)
+int acpi_suspend(u32 acpi_state)
{
- printk(KERN_WARNING "%s with broken S1 detected.\n", d->ident);
- init_8259A_after_S1 = 1;
- return 0;
+ suspend_state_t states[] = {
+ [1] = PM_SUSPEND_STANDBY,
+ [3] = PM_SUSPEND_MEM,
+ [5] = PM_SUSPEND_MAX
+ };
+
+ if (acpi_state < 6 && states[acpi_state])
+ return pm_suspend(states[acpi_state]);
+ if (acpi_state == 4)
+ return hibernate();
+ return -EINVAL;
}
-static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
- {
- .callback = init_ints_after_s1,
- .ident = "Toshiba Satellite 4030cdt",
- .matches = {DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),},
- },
- {},
-};
+/**
+ * acpi_pm_device_sleep_state - return preferred power state of ACPI device
+ * in the system sleep state given by %acpi_target_sleep_state
+ * @dev: device to examine
+ * @wake: if set, the device should be able to wake up the system
+ * @d_min_p: used to store the upper limit of allowed states range
+ * Return value: preferred power state of the device on success, -ENODEV on
+ * failure (ie. if there's no 'struct acpi_device' for @dev)
+ *
+ * Find the lowest power (highest number) ACPI device power state that
+ * device @dev can be in while the system is in the sleep state represented
+ * by %acpi_target_sleep_state. If @wake is nonzero, the device should be
+ * able to wake up the system from this sleep state. If @d_min_p is set,
+ * the highest power (lowest number) device power state of @dev allowed
+ * in this system sleep state is stored at the location pointed to by it.
+ *
+ * The caller must ensure that @dev is valid before using this function.
+ * The caller is also responsible for figuring out if the device is
+ * supposed to be able to wake up the system and passing this information
+ * via @wake.
+ */
+
+int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p)
+{
+ acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
+ struct acpi_device *adev;
+ char acpi_method[] = "_SxD";
+ unsigned long d_min, d_max;
+
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+ printk(KERN_ERR "ACPI handle has no context!\n");
+ return -ENODEV;
+ }
+
+ acpi_method[2] = '0' + acpi_target_sleep_state;
+ /*
+ * If the sleep state is S0, we will return D3, but if the device has
+ * _S0W, we will use the value from _S0W
+ */
+ d_min = ACPI_STATE_D0;
+ d_max = ACPI_STATE_D3;
+
+ /*
+ * If present, _SxD methods return the minimum D-state (highest power
+ * state) we can use for the corresponding S-states. Otherwise, the
+ * minimum D-state is D0 (ACPI 3.x).
+ *
+ * NOTE: We rely on acpi_evaluate_integer() not clobbering the integer
+ * provided -- that's our fault recovery, we ignore retval.
+ */
+ if (acpi_target_sleep_state > ACPI_STATE_S0)
+ acpi_evaluate_integer(handle, acpi_method, NULL, &d_min);
+
+ /*
+ * If _PRW says we can wake up the system from the target sleep state,
+ * the D-state returned by _SxD is sufficient for that (we assume a
+ * wakeup-aware driver if wake is set). Still, if _SxW exists
+ * (ACPI 3.x), it should return the maximum (lowest power) D-state that
+ * can wake the system. _S0W may be valid, too.
+ */
+ if (acpi_target_sleep_state == ACPI_STATE_S0 ||
+ (wake && adev->wakeup.state.enabled &&
+ adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
+ acpi_method[3] = 'W';
+ acpi_evaluate_integer(handle, acpi_method, NULL, &d_max);
+ /* Sanity check */
+ if (d_max < d_min)
+ d_min = d_max;
+ }
+
+ if (d_min_p)
+ *d_min_p = d_min;
+ return d_max;
+}
int __init acpi_sleep_init(void)
{
+ acpi_status status;
+ u8 type_a, type_b;
+#ifdef CONFIG_SUSPEND
int i = 0;
dmi_check_system(acpisleep_dmi_table);
+#endif
if (acpi_disabled)
return 0;
+#ifdef CONFIG_SUSPEND
printk(KERN_INFO PREFIX "(supports");
- for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
- acpi_status status;
- u8 type_a, type_b;
+ for (i = ACPI_STATE_S0; i < ACPI_STATE_S4; i++) {
status = acpi_get_sleep_type_data(i, &type_a, &type_b);
if (ACPI_SUCCESS(status)) {
sleep_states[i] = 1;
@@ -277,10 +375,14 @@ int __init acpi_sleep_init(void)
printk(")\n");
pm_set_ops(&acpi_pm_ops);
+#endif
-#ifdef CONFIG_SOFTWARE_SUSPEND
- if (sleep_states[ACPI_STATE_S4])
+#ifdef CONFIG_HIBERNATION
+ status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
+ if (ACPI_SUCCESS(status)) {
hibernation_set_ops(&acpi_hibernation_ops);
+ sleep_states[ACPI_STATE_S4] = 1;
+ }
#else
sleep_states[ACPI_STATE_S4] = 0;
#endif
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 61f1822cc35..66b62b0d360 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -14,8 +14,16 @@
#include "sleep.h"
#define _COMPONENT ACPI_SYSTEM_COMPONENT
+
+/*
+ * this file provides support for:
+ * /proc/acpi/sleep
+ * /proc/acpi/alarm
+ * /proc/acpi/wakeup
+ */
+
ACPI_MODULE_NAME("sleep")
-#ifdef CONFIG_ACPI_SLEEP_PROC_SLEEP
+#ifdef CONFIG_ACPI_PROCFS_SLEEP
static int acpi_system_sleep_seq_show(struct seq_file *seq, void *offset)
{
int i;
@@ -58,7 +66,7 @@ acpi_system_write_sleep(struct file *file,
goto Done;
}
state = simple_strtoul(str, NULL, 0);
-#ifdef CONFIG_SOFTWARE_SUSPEND
+#ifdef CONFIG_HIBERNATION
if (state == 4) {
error = hibernate();
goto Done;
@@ -68,9 +76,9 @@ acpi_system_write_sleep(struct file *file,
Done:
return error ? error : count;
}
-#endif /* CONFIG_ACPI_SLEEP_PROC_SLEEP */
+#endif /* CONFIG_ACPI_PROCFS_SLEEP */
-#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)
+#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || !defined(CONFIG_X86)
/* use /sys/class/rtc/rtcX/wakealarm instead; it's not ACPI-specific */
#else
#define HAVE_ACPI_LEGACY_ALARM
@@ -463,7 +471,7 @@ static const struct file_operations acpi_system_wakeup_device_fops = {
.release = single_release,
};
-#ifdef CONFIG_ACPI_SLEEP_PROC_SLEEP
+#ifdef CONFIG_ACPI_PROCFS_SLEEP
static const struct file_operations acpi_system_sleep_fops = {
.open = acpi_system_sleep_open_fs,
.read = seq_read,
@@ -471,7 +479,7 @@ static const struct file_operations acpi_system_sleep_fops = {
.llseek = seq_lseek,
.release = single_release,
};
-#endif /* CONFIG_ACPI_SLEEP_PROC_SLEEP */
+#endif /* CONFIG_ACPI_PROCFS_SLEEP */
#ifdef HAVE_ACPI_LEGACY_ALARM
static const struct file_operations acpi_system_alarm_fops = {
@@ -498,14 +506,14 @@ static int __init acpi_sleep_proc_init(void)
if (acpi_disabled)
return 0;
-#ifdef CONFIG_ACPI_SLEEP_PROC_SLEEP
+#ifdef CONFIG_ACPI_PROCFS_SLEEP
/* 'sleep' [R/W] */
entry =
create_proc_entry("sleep", S_IFREG | S_IRUGO | S_IWUSR,
acpi_root_dir);
if (entry)
entry->proc_fops = &acpi_system_sleep_fops;
-#endif
+#endif /* CONFIG_ACPI_PROCFS */
#ifdef HAVE_ACPI_LEGACY_ALARM
/* 'alarm' [R/W] */
diff --git a/drivers/acpi/sleep/sleep.h b/drivers/acpi/sleep/sleep.h
index f3e70397a7d..ff1f8504f49 100644
--- a/drivers/acpi/sleep/sleep.h
+++ b/drivers/acpi/sleep/sleep.h
@@ -6,3 +6,5 @@ extern void acpi_enable_wakeup_device_prep(u8 sleep_state);
extern void acpi_enable_wakeup_device(u8 sleep_state);
extern void acpi_disable_wakeup_device(u8 sleep_state);
extern void acpi_gpe_sleep_prepare(u32 sleep_state);
+
+extern int acpi_sleep_prepare(u32 acpi_state);
diff --git a/drivers/acpi/sleep/wakeup.c b/drivers/acpi/sleep/wakeup.c
index fab8f2694f0..97c27ddb144 100644
--- a/drivers/acpi/sleep/wakeup.c
+++ b/drivers/acpi/sleep/wakeup.c
@@ -17,7 +17,6 @@ ACPI_MODULE_NAME("wakeup_devices")
extern struct list_head acpi_wakeup_device_list;
extern spinlock_t acpi_device_lock;
-#ifdef CONFIG_ACPI_SLEEP
/**
* acpi_enable_wakeup_device_prep - prepare wakeup devices
* @sleep_state: ACPI state
@@ -180,7 +179,6 @@ static int __init acpi_wakeup_device_init(void)
}
late_initcall(acpi_wakeup_device_init);
-#endif
/*
* Disable all wakeup GPEs before entering requested sleep state.
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 58f1338981b..5a62de1b7f2 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -92,10 +92,16 @@ static int acpi_thermal_polling_open_fs(struct inode *inode, struct file *file);
static ssize_t acpi_thermal_write_polling(struct file *, const char __user *,
size_t, loff_t *);
+static const struct acpi_device_id thermal_device_ids[] = {
+ {ACPI_THERMAL_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
+
static struct acpi_driver acpi_thermal_driver = {
.name = "thermal",
.class = ACPI_THERMAL_CLASS,
- .ids = ACPI_THERMAL_HID,
+ .ids = thermal_device_ids,
.ops = {
.add = acpi_thermal_add,
.remove = acpi_thermal_remove,
diff --git a/drivers/acpi/utilities/uteval.c b/drivers/acpi/utilities/uteval.c
index f112af433e3..0042b7e78b2 100644
--- a/drivers/acpi/utilities/uteval.c
+++ b/drivers/acpi/utilities/uteval.c
@@ -407,7 +407,7 @@ acpi_ut_copy_id_string(char *destination, char *source, acpi_size max_length)
acpi_status
acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
- struct acpi_device_id *hid)
+ struct acpica_device_id *hid)
{
union acpi_operand_object *obj_desc;
acpi_status status;
@@ -609,7 +609,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node * device_node,
acpi_status
acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
- struct acpi_device_id *uid)
+ struct acpica_device_id *uid)
{
union acpi_operand_object *obj_desc;
acpi_status status;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 04ea697f72b..d9870194198 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -74,10 +74,16 @@ MODULE_LICENSE("GPL");
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device, int type);
+static const struct acpi_device_id video_device_ids[] = {
+ {ACPI_VIDEO_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, video_device_ids);
+
static struct acpi_driver acpi_video_bus = {
.name = "video",
.class = ACPI_VIDEO_CLASS,
- .ids = ACPI_VIDEO_HID,
+ .ids = video_device_ids,
.ops = {
.add = acpi_video_bus_add,
.remove = acpi_video_bus_remove,
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index d9fa329fd15..ad070861bb5 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -91,6 +91,7 @@
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
+#include <linux/dmi.h>
#define DRV_NAME "ata_piix"
#define DRV_VERSION "2.11"
@@ -140,6 +141,9 @@ enum {
RV = -3, /* reserved */
PIIX_AHCI_DEVICE = 6,
+
+ /* host->flags bits */
+ PIIX_HOST_BROKEN_SUSPEND = (1 << 24),
};
struct piix_map_db {
@@ -159,6 +163,10 @@ static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev);
static int ich_pata_cable_detect(struct ata_port *ap);
+#ifdef CONFIG_PM
+static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
+static int piix_pci_device_resume(struct pci_dev *pdev);
+#endif
static unsigned int in_module_init = 1;
@@ -255,8 +263,8 @@ static struct pci_driver piix_pci_driver = {
.probe = piix_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
- .suspend = ata_pci_device_suspend,
- .resume = ata_pci_device_resume,
+ .suspend = piix_pci_device_suspend,
+ .resume = piix_pci_device_resume,
#endif
};
@@ -881,6 +889,107 @@ static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev)
do_pata_set_dmamode(ap, adev, 1);
}
+#ifdef CONFIG_PM
+static struct dmi_system_id piix_broken_suspend_dmi_table[] = {
+ {
+ .ident = "TECRA M5",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M5"),
+ },
+ },
+ {
+ .ident = "Satellite U200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U200"),
+ },
+ },
+ {
+ .ident = "Satellite U205",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U205"),
+ },
+ },
+ {
+ .ident = "Portege M500",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"),
+ },
+ },
+ { }
+};
+
+static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ unsigned long flags;
+ int rc = 0;
+
+ rc = ata_host_suspend(host, mesg);
+ if (rc)
+ return rc;
+
+ /* Some braindamaged ACPI suspend implementations expect the
+ * controller to be awake on entry; otherwise, it burns cpu
+ * cycles and power trying to do something to the sleeping
+ * beauty.
+ */
+ if (dmi_check_system(piix_broken_suspend_dmi_table) &&
+ mesg.event == PM_EVENT_SUSPEND) {
+ pci_save_state(pdev);
+
+ /* mark its power state as "unknown", since we don't
+ * know if e.g. the BIOS will change its device state
+ * when we suspend.
+ */
+ if (pdev->current_state == PCI_D0)
+ pdev->current_state = PCI_UNKNOWN;
+
+ /* tell resume that it's waking up from broken suspend */
+ spin_lock_irqsave(&host->lock, flags);
+ host->flags |= PIIX_HOST_BROKEN_SUSPEND;
+ spin_unlock_irqrestore(&host->lock, flags);
+ } else
+ ata_pci_device_do_suspend(pdev, mesg);
+
+ return 0;
+}
+
+static int piix_pci_device_resume(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ unsigned long flags;
+ int rc;
+
+ if (host->flags & PIIX_HOST_BROKEN_SUSPEND) {
+ spin_lock_irqsave(&host->lock, flags);
+ host->flags &= ~PIIX_HOST_BROKEN_SUSPEND;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ /* PCI device wasn't disabled during suspend. Use
+ * __pci_reenable_device() to avoid affecting the
+ * enable count.
+ */
+ rc = __pci_reenable_device(pdev);
+ if (rc)
+ dev_printk(KERN_ERR, &pdev->dev, "failed to enable "
+ "device after resume (%d)\n", rc);
+ } else
+ rc = ata_pci_device_do_resume(pdev);
+
+ if (rc == 0)
+ ata_host_resume(host);
+
+ return rc;
+}
+#endif
+
#define AHCI_PCI_BAR 5
#define AHCI_GLOBAL_CTL 0x04
#define AHCI_ENABLE (1 << 31)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 12ac0b511f7..e83647651b3 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -768,7 +768,7 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
* Decrement max hw segments accordingly.
*/
if (dev->class == ATA_DEV_ATAPI) {
- request_queue_t *q = sdev->request_queue;
+ struct request_queue *q = sdev->request_queue;
blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
}
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 010436795d2..e8a28e94fe4 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -45,7 +45,7 @@ static struct dmi_system_id cable_dmi_table[] = {
.ident = "HP Pavilion N5430",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_BOARD_NAME, "OmniBook N32N-736"),
+ DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
},
},
{ }
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index b0af65aadde..84d9c556856 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -26,7 +26,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt37x"
-#define DRV_VERSION "0.6.6"
+#define DRV_VERSION "0.6.7"
struct hpt_clock {
u8 xfer_speed;
@@ -1103,17 +1103,17 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
/* Select the DPLL clock. */
pci_write_config_byte(dev, 0x5b, 0x21);
- pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
+ pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
for(adjust = 0; adjust < 8; adjust++) {
if (hpt37x_calibrate_dpll(dev))
break;
/* See if it'll settle at a fractionally different clock */
- if ((adjust & 3) == 3) {
- f_low --;
- f_high ++;
- }
- pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
+ if (adjust & 1)
+ f_low -= adjust >> 1;
+ else
+ f_high += adjust >> 1;
+ pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
}
if (adjust == 8) {
printk(KERN_WARNING "hpt37x: DPLL did not stabilize.\n");
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 966a5e28741..9caeaea753a 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,5 +1,5 @@
obj-y := shutdown.o
-obj-$(CONFIG_PM) += main.o suspend.o resume.o sysfs.o
+obj-$(CONFIG_PM_SLEEP) += main.o suspend.o resume.o sysfs.o
obj-$(CONFIG_PM_TRACE) += trace.o
ifeq ($(CONFIG_DEBUG_DRIVER),y)
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 591a0dd5dee..8ba0830cbc0 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -5,7 +5,7 @@
extern void device_shutdown(void);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/*
* main.c
@@ -62,7 +62,7 @@ extern int resume_device(struct device *);
*/
extern int suspend_device(struct device *, pm_message_t);
-#else /* CONFIG_PM */
+#else /* CONFIG_PM_SLEEP */
static inline int device_pm_add(struct device * dev)
diff --git a/drivers/base/power/shutdown.c b/drivers/base/power/shutdown.c
index a47ee1b70d2..56e8eaaac01 100644
--- a/drivers/base/power/shutdown.c
+++ b/drivers/base/power/shutdown.c
@@ -44,7 +44,5 @@ void device_shutdown(void)
dev->driver->shutdown(dev);
}
}
-
- sysdev_shutdown();
}
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 6ce8b897e26..c9751b2b57e 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1422,7 +1422,7 @@ static void redo_fd_request(void)
goto repeat;
}
-static void do_fd_request(request_queue_t * q)
+static void do_fd_request(struct request_queue * q)
{
redo_fd_request();
}
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 1d846681794..ba07f762c4c 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -138,7 +138,7 @@ struct aoedev {
u16 maxbcnt;
struct work_struct work;/* disk create work struct */
struct gendisk *gd;
- request_queue_t blkq;
+ struct request_queue blkq;
struct hd_geometry geo;
sector_t ssize;
struct timer_list timer;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 4f598270fa3..007faaf008e 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -125,7 +125,7 @@ aoeblk_release(struct inode *inode, struct file *filp)
}
static int
-aoeblk_make_request(request_queue_t *q, struct bio *bio)
+aoeblk_make_request(struct request_queue *q, struct bio *bio)
{
struct aoedev *d;
struct buf *buf;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 14d6b949275..94268c75d04 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1466,7 +1466,7 @@ repeat:
}
-void do_fd_request(request_queue_t * q)
+void do_fd_request(struct request_queue * q)
{
unsigned long flags;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index a2d6612b80d..1be82d544dc 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -139,7 +139,7 @@ static struct board_type products[] = {
static ctlr_info_t *hba[MAX_CTLR];
-static void do_cciss_request(request_queue_t *q);
+static void do_cciss_request(struct request_queue *q);
static irqreturn_t do_cciss_intr(int irq, void *dev_id);
static int cciss_open(struct inode *inode, struct file *filep);
static int cciss_release(struct inode *inode, struct file *filep);
@@ -1584,7 +1584,7 @@ static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
*/
if (h->gendisk[0] != disk) {
if (disk) {
- request_queue_t *q = disk->queue;
+ struct request_queue *q = disk->queue;
if (disk->flags & GENHD_FL_UP)
del_gendisk(disk);
if (q) {
@@ -2511,7 +2511,7 @@ after_error_processing:
/*
* Get a request and submit it to the controller.
*/
-static void do_cciss_request(request_queue_t *q)
+static void do_cciss_request(struct request_queue *q)
{
ctlr_info_t *h = q->queuedata;
CommandList_struct *c;
@@ -3380,7 +3380,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
do {
drive_info_struct *drv = &(hba[i]->drv[j]);
struct gendisk *disk = hba[i]->gendisk[j];
- request_queue_t *q;
+ struct request_queue *q;
/* Check if the disk was allocated already */
if (!disk){
@@ -3523,7 +3523,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
for (j = 0; j < CISS_MAX_LUN; j++) {
struct gendisk *disk = hba[i]->gendisk[j];
if (disk) {
- request_queue_t *q = disk->queue;
+ struct request_queue *q = disk->queue;
if (disk->flags & GENHD_FL_UP)
del_gendisk(disk);
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index b94cd1c3213..be4e3477d83 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -161,7 +161,7 @@ static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
-static void do_ida_request(request_queue_t *q);
+static void do_ida_request(struct request_queue *q);
static void start_io(ctlr_info_t *h);
static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
@@ -391,7 +391,7 @@ static void __devexit cpqarray_remove_one_eisa (int i)
/* pdev is NULL for eisa */
static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
{
- request_queue_t *q;
+ struct request_queue *q;
int j;
/*
@@ -886,7 +886,7 @@ static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
* are in here (either via the dummy do_ida_request functions or by being
* called from the interrupt handler
*/
-static void do_ida_request(request_queue_t *q)
+static void do_ida_request(struct request_queue *q)
{
ctlr_info_t *h = q->queuedata;
cmdlist_t *c;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index fe088045dd0..085b7794fb3 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -251,7 +251,7 @@ static int irqdma_allocated;
static struct request *current_req;
static struct request_queue *floppy_queue;
-static void do_fd_request(request_queue_t * q);
+static void do_fd_request(struct request_queue * q);
#ifndef fd_get_dma_residue
#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA)
@@ -2981,7 +2981,7 @@ static void process_fd_request(void)
schedule_bh(redo_fd_request);
}
-static void do_fd_request(request_queue_t * q)
+static void do_fd_request(struct request_queue * q)
{
if (max_buffer_sectors == 0) {
printk("VFS: do_fd_request called on non-open device\n");
diff --git a/drivers/block/lguest_blk.c b/drivers/block/lguest_blk.c
index 1634c2dd25e..93e3c4001bf 100644
--- a/drivers/block/lguest_blk.c
+++ b/drivers/block/lguest_blk.c
@@ -1,6 +1,12 @@
-/* A simple block driver for lguest.
+/*D:400
+ * The Guest block driver
*
- * Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
+ * This is a simple block driver, which appears as /dev/lgba, lgbb, lgbc etc.
+ * The mechanism is simple: we place the information about the request in the
+ * device page, then use SEND_DMA (containing the data for a write, or an empty
+ * "ping" DMA for a read).
+ :*/
+/* Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,27 +31,50 @@
static char next_block_index = 'a';
+/*D:420 Here is the structure which holds all the information we need about
+ * each Guest block device.
+ *
+ * I'm sure at this stage, you're wondering "hey, where was the adventure I was
+ * promised?" and thinking "Rusty sucks, I shall say nasty things about him on
+ * my blog". I think Real adventures have boring bits, too, and you're in the
+ * middle of one. But it gets better. Just not quite yet. */
struct blockdev
{
+ /* The block queue infrastructure wants a spinlock: it is held while it
+ * calls our block request function. We grab it in our interrupt
+ * handler so the responses don't mess with new requests. */
spinlock_t lock;
- /* The disk structure for the kernel. */
+ /* The disk structure registered with kernel. */
struct gendisk *disk;
- /* The major number for this disk. */
+ /* The major device number for this disk, and the interrupt. We only
+ * really keep them here for completeness; we'd need them if we
+ * supported device unplugging. */
int major;
int irq;
+ /* The physical address of this device's memory page */
unsigned long phys_addr;
- /* The mapped block page. */
+ /* The mapped memory page for convenient acces. */
struct lguest_block_page *lb_page;
- /* We only have a single request outstanding at a time. */
+ /* We only have a single request outstanding at a time: this is it. */
struct lguest_dma dma;
struct request *req;
};
-/* Jens gave me this nice helper to end all chunks of a request. */
+/*D:495 We originally used end_request() throughout the driver, but it turns
+ * out that end_request() is deprecated, and doesn't actually end the request
+ * (which seems like a good reason to deprecate it!). It simply ends the first
+ * bio. So if we had 3 bios in a "struct request" we would do all 3,
+ * end_request(), do 2, end_request(), do 1 and end_request(): twice as much
+ * work as we needed to do.
+ *
+ * This reinforced to me that I do not understand the block layer.
+ *
+ * Nonetheless, Jens Axboe gave me this nice helper to end all chunks of a
+ * request. This improved disk speed by 130%. */
static void end_entire_request(struct request *req, int uptodate)
{
if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
@@ -55,30 +84,62 @@ static void end_entire_request(struct request *req, int uptodate)
end_that_request_last(req, uptodate);
}
+/* I'm told there are only two stories in the world worth telling: love and
+ * hate. So there used to be a love scene here like this:
+ *
+ * Launcher: We could make beautiful I/O together, you and I.
+ * Guest: My, that's a big disk!
+ *
+ * Unfortunately, it was just too raunchy for our otherwise-gentle tale. */
+
+/*D:490 This is the interrupt handler, called when a block read or write has
+ * been completed for us. */
static irqreturn_t lgb_irq(int irq, void *_bd)
{
+ /* We handed our "struct blockdev" as the argument to request_irq(), so
+ * it is passed through to us here. This tells us which device we're
+ * dealing with in case we have more than one. */
struct blockdev *bd = _bd;
unsigned long flags;
+ /* We weren't doing anything? Strange, but could happen if we shared
+ * interrupts (we don't!). */
if (!bd->req) {
pr_debug("No work!\n");
return IRQ_NONE;
}
+ /* Not done yet? That's equally strange. */
if (!bd->lb_page->result) {
pr_debug("No result!\n");
return IRQ_NONE;
}
+ /* We have to grab the lock before ending the request. */
spin_lock_irqsave(&bd->lock, flags);
+ /* "result" is 1 for success, 2 for failure: end_entire_request() wants
+ * to know whether this succeeded or not. */
end_entire_request(bd->req, bd->lb_page->result == 1);
+ /* Clear out request, it's done. */
bd->req = NULL;
+ /* Reset incoming DMA for next time. */
bd->dma.used_len = 0;
+ /* Ready for more reads or writes */
blk_start_queue(bd->disk->queue);
spin_unlock_irqrestore(&bd->lock, flags);
+
+ /* The interrupt was for us, we dealt with it. */
return IRQ_HANDLED;
}
+/*D:480 The block layer's "struct request" contains a number of "struct bio"s,
+ * each of which contains "struct bio_vec"s, each of which contains a page, an
+ * offset and a length.
+ *
+ * Fortunately there are iterators to help us walk through the "struct
+ * request". Even more fortunately, there were plenty of places to steal the
+ * code from. We pack the "struct request" into our "struct lguest_dma" and
+ * return the total length. */
static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
{
unsigned int i = 0, idx, len = 0;
@@ -87,8 +148,13 @@ static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
rq_for_each_bio(bio, req) {
struct bio_vec *bvec;
bio_for_each_segment(bvec, bio, idx) {
+ /* We told the block layer not to give us too many. */
BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
+ /* If we had a zero-length segment, it would look like
+ * the end of the data referred to by the "struct
+ * lguest_dma", so make sure that doesn't happen. */
BUG_ON(!bvec->bv_len);
+ /* Convert page & offset to a physical address */
dma->addr[i] = page_to_phys(bvec->bv_page)
+ bvec->bv_offset;
dma->len[i] = bvec->bv_len;
@@ -96,26 +162,39 @@ static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
i++;
}
}
+ /* If the array isn't full, we mark the end with a 0 length */
if (i < LGUEST_MAX_DMA_SECTIONS)
dma->len[i] = 0;
return len;
}
+/* This creates an empty DMA, useful for prodding the Host without sending data
+ * (ie. when we want to do a read) */
static void empty_dma(struct lguest_dma *dma)
{
dma->len[0] = 0;
}
+/*D:470 Setting up a request is fairly easy: */
static void setup_req(struct blockdev *bd,
int type, struct request *req, struct lguest_dma *dma)
{
+ /* The type is 1 (write) or 0 (read). */
bd->lb_page->type = type;
+ /* The sector on disk where the read or write starts. */
bd->lb_page->sector = req->sector;
+ /* The result is initialized to 0 (unfinished). */
bd->lb_page->result = 0;
+ /* The current request (so we can end it in the interrupt handler). */
bd->req = req;
+ /* The number of bytes: returned as a side-effect of req_to_dma(),
+ * which packs the block layer's "struct request" into our "struct
+ * lguest_dma" */
bd->lb_page->bytes = req_to_dma(req, dma);
}
+/*D:450 Write is pretty straightforward: we pack the request into a "struct
+ * lguest_dma", then use SEND_DMA to send the request. */
static void do_write(struct blockdev *bd, struct request *req)
{
struct lguest_dma send;
@@ -126,6 +205,9 @@ static void do_write(struct blockdev *bd, struct request *req)
lguest_send_dma(bd->phys_addr, &send);
}
+/* Read is similar to write, except we pack the request into our receive
+ * "struct lguest_dma" and send through an empty DMA just to tell the Host that
+ * there's a request pending. */
static void do_read(struct blockdev *bd, struct request *req)
{
struct lguest_dma ping;
@@ -137,21 +219,30 @@ static void do_read(struct blockdev *bd, struct request *req)
lguest_send_dma(bd->phys_addr, &ping);
}
-static void do_lgb_request(request_queue_t *q)
+/*D:440 This where requests come in: we get handed the request queue and are
+ * expected to pull a "struct request" off it until we've finished them or
+ * we're waiting for a reply: */
+static void do_lgb_request(struct request_queue *q)
{
struct blockdev *bd;
struct request *req;
again:
+ /* This sometimes returns NULL even on the very first time around. I
+ * wonder if it's something to do with letting elves handle the request
+ * queue... */
req = elv_next_request(q);
if (!req)
return;
+ /* We attached the struct blockdev to the disk: get it back */
bd = req->rq_disk->private_data;
- /* Sometimes we get repeated requests after blk_stop_queue. */
+ /* Sometimes we get repeated requests after blk_stop_queue(), but we
+ * can only handle one at a time. */
if (bd->req)
return;
+ /* We only do reads and writes: no tricky business! */
if (!blk_fs_request(req)) {
pr_debug("Got non-command 0x%08x\n", req->cmd_type);
req->errors++;
@@ -164,20 +255,31 @@ again:
else
do_read(bd, req);
- /* Wait for interrupt to tell us it's done. */
+ /* We've put out the request, so stop any more coming in until we get
+ * an interrupt, which takes us to lgb_irq() to re-enable the queue. */
blk_stop_queue(q);
}
+/*D:430 This is the "struct block_device_operations" we attach to the disk at
+ * the end of lguestblk_probe(). It doesn't seem to want much. */
static struct block_device_operations lguestblk_fops = {
.owner = THIS_MODULE,
};
+/*D:425 Setting up a disk device seems to involve a lot of code. I'm not sure
+ * quite why. I do know that the IDE code sent two or three of the maintainers
+ * insane, perhaps this is the fringe of the same disease?
+ *
+ * As in the console code, the probe function gets handed the generic
+ * lguest_device from lguest_bus.c: */
static int lguestblk_probe(struct lguest_device *lgdev)
{
struct blockdev *bd;
int err;
int irqflags = IRQF_SHARED;
+ /* First we allocate our own "struct blockdev" and initialize the easy
+ * fields. */
bd = kmalloc(sizeof(*bd), GFP_KERNEL);
if (!bd)
return -ENOMEM;
@@ -187,59 +289,100 @@ static int lguestblk_probe(struct lguest_device *lgdev)
bd->req = NULL;
bd->dma.used_len = 0;
bd->dma.len[0] = 0;
+ /* The descriptor in the lguest_devices array provided by the Host
+ * gives the Guest the physical page number of the device's page. */
bd->phys_addr = (lguest_devices[lgdev->index].pfn << PAGE_SHIFT);
+ /* We use lguest_map() to get a pointer to the device page */
bd->lb_page = lguest_map(bd->phys_addr, 1);
if (!bd->lb_page) {
err = -ENOMEM;
goto out_free_bd;
}
+ /* We need a major device number: 0 means "assign one dynamically". */
bd->major = register_blkdev(0, "lguestblk");
if (bd->major < 0) {
err = bd->major;
goto out_unmap;
}
+ /* This allocates a "struct gendisk" where we pack all the information
+ * about the disk which the rest of Linux sees. We ask for one minor
+ * number; I do wonder if we should be asking for more. */
bd->disk = alloc_disk(1);
if (!bd->disk) {
err = -ENOMEM;
goto out_unregister_blkdev;
}
+ /* Every disk needs a queue for requests to come in: we set up the
+ * queue with a callback function (the core of our driver) and the lock
+ * to use. */
bd->disk->queue = blk_init_queue(do_lgb_request, &bd->lock);
if (!bd->disk->queue) {
err = -ENOMEM;
goto out_put_disk;
}
- /* We can only handle a certain number of sg entries */
+ /* We can only handle a certain number of pointers in our SEND_DMA
+ * call, so we set that with blk_queue_max_hw_segments(). This is not
+ * to be confused with blk_queue_max_phys_segments() of course! I
+ * know, who could possibly confuse the two?
+ *
+ * Well, it's simple to tell them apart: this one seems to work and the
+ * other one didn't. */
blk_queue_max_hw_segments(bd->disk->queue, LGUEST_MAX_DMA_SECTIONS);
- /* Buffers must not cross page boundaries */
+
+ /* Due to technical limitations of our Host (and simple coding) we
+ * can't have a single buffer which crosses a page boundary. Tell it
+ * here. This means that our maximum request size is 16
+ * (LGUEST_MAX_DMA_SECTIONS) pages. */
blk_queue_segment_boundary(bd->disk->queue, PAGE_SIZE-1);
+ /* We name our disk: this becomes the device name when udev does its
+ * magic thing and creates the device node, such as /dev/lgba.
+ * next_block_index is a global which starts at 'a'. Unfortunately
+ * this simple increment logic means that the 27th disk will be called
+ * "/dev/lgb{". In that case, I recommend having at least 29 disks, so
+ * your /dev directory will be balanced. */
sprintf(bd->disk->disk_name, "lgb%c", next_block_index++);
+
+ /* We look to the device descriptor again to see if this device's
+ * interrupts are expected to be random. If they are, we tell the irq
+ * subsystem. At the moment this bit is always set. */
if (lguest_devices[lgdev->index].features & LGUEST_DEVICE_F_RANDOMNESS)
irqflags |= IRQF_SAMPLE_RANDOM;
+
+ /* Now we have the name and irqflags, we can request the interrupt; we
+ * give it the "struct blockdev" we have set up to pass to lgb_irq()
+ * when there is an interrupt. */
err = request_irq(bd->irq, lgb_irq, irqflags, bd->disk->disk_name, bd);
if (err)
goto out_cleanup_queue;
+ /* We bind our one-entry DMA pool to the key for this block device so
+ * the Host can reply to our requests. The key is equal to the
+ * physical address of the device's page, which is conveniently
+ * unique. */
err = lguest_bind_dma(bd->phys_addr, &bd->dma, 1, bd->irq);
if (err)
goto out_free_irq;
+ /* We finish our disk initialization and add the disk to the system. */
bd->disk->major = bd->major;
bd->disk->first_minor = 0;
bd->disk->private_data = bd;
bd->disk->fops = &lguestblk_fops;
- /* This is initialized to the disk size by the other end. */
+ /* This is initialized to the disk size by the Launcher. */
set_capacity(bd->disk, bd->lb_page->num_sectors);
add_disk(bd->disk);
printk(KERN_INFO "%s: device %i at major %d\n",
bd->disk->disk_name, lgdev->index, bd->major);
+ /* We don't need to keep the "struct blockdev" around, but if we ever
+ * implemented device removal, we'd need this. */
lgdev->private = bd;
return 0;
@@ -258,6 +401,8 @@ out_free_bd:
return err;
}
+/*D:410 The boilerplate code for registering the lguest block driver is just
+ * like the console: */
static struct lguest_driver lguestblk_drv = {
.name = "lguestblk",
.owner = THIS_MODULE,
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index e425daa1eac..9f015fce413 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -529,7 +529,7 @@ static struct bio *loop_get_bio(struct loop_device *lo)
return bio;
}
-static int loop_make_request(request_queue_t *q, struct bio *old_bio)
+static int loop_make_request(struct request_queue *q, struct bio *old_bio)
{
struct loop_device *lo = q->queuedata;
int rw = bio_rw(old_bio);
@@ -558,7 +558,7 @@ out:
/*
* kick off io on the underlying address space
*/
-static void loop_unplug(request_queue_t *q)
+static void loop_unplug(struct request_queue *q)
{
struct loop_device *lo = q->queuedata;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index c1295102409..be92c658f06 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -100,7 +100,7 @@ static const char *nbdcmd_to_ascii(int cmd)
static void nbd_end_request(struct request *req)
{
int uptodate = (req->errors == 0) ? 1 : 0;
- request_queue_t *q = req->q;
+ struct request_queue *q = req->q;
unsigned long flags;
dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
@@ -410,7 +410,7 @@ static void nbd_clear_que(struct nbd_device *lo)
* { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
*/
-static void do_nbd_request(request_queue_t * q)
+static void do_nbd_request(struct request_queue * q)
{
struct request *req;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 1eeb8f2cde7..b8a994a2b01 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -183,7 +183,7 @@ static int pcd_packet(struct cdrom_device_info *cdi,
static int pcd_detect(void);
static void pcd_probe_capabilities(void);
static void do_pcd_read_drq(void);
-static void do_pcd_request(request_queue_t * q);
+static void do_pcd_request(struct request_queue * q);
static void do_pcd_read(void);
struct pcd_unit {
@@ -713,7 +713,7 @@ static int pcd_detect(void)
/* I/O request processing */
static struct request_queue *pcd_queue;
-static void do_pcd_request(request_queue_t * q)
+static void do_pcd_request(struct request_queue * q)
{
if (pcd_busy)
return;
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 31e01488eb5..df819f8a95a 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -698,7 +698,7 @@ static enum action pd_identify(struct pd_unit *disk)
/* end of io request engine */
-static void do_pd_request(request_queue_t * q)
+static void do_pd_request(struct request_queue * q)
{
if (pd_req)
return;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 5826508f673..ceffa6034e2 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -202,7 +202,7 @@ module_param_array(drive3, int, NULL, 0);
#define ATAPI_WRITE_10 0x2a
static int pf_open(struct inode *inode, struct file *file);
-static void do_pf_request(request_queue_t * q);
+static void do_pf_request(struct request_queue * q);
static int pf_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg);
static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -760,7 +760,7 @@ static void pf_end_request(int uptodate)
}
}
-static void do_pf_request(request_queue_t * q)
+static void do_pf_request(struct request_queue * q)
{
if (pf_busy)
return;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 31be33e4f11..fadbfd880ba 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -752,7 +752,7 @@ static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio
*/
static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
{
- request_queue_t *q = bdev_get_queue(pd->bdev);
+ struct request_queue *q = bdev_get_queue(pd->bdev);
struct request *rq;
int ret = 0;
@@ -979,7 +979,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
* Special care is needed if the underlying block device has a small
* max_phys_segments value.
*/
-static int pkt_set_segment_merging(struct pktcdvd_device *pd, request_queue_t *q)
+static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
{
if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
/*
@@ -2314,7 +2314,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write)
{
int ret;
long lba;
- request_queue_t *q;
+ struct request_queue *q;
/*
* We need to re-open the cdrom device without O_NONBLOCK to be able
@@ -2477,7 +2477,7 @@ static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int
return 0;
}
-static int pkt_make_request(request_queue_t *q, struct bio *bio)
+static int pkt_make_request(struct request_queue *q, struct bio *bio)
{
struct pktcdvd_device *pd;
char b[BDEVNAME_SIZE];
@@ -2626,7 +2626,7 @@ end_io:
-static int pkt_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *bvec)
+static int pkt_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *bvec)
{
struct pktcdvd_device *pd = q->queuedata;
sector_t zone = ZONE(bio->bi_sector, pd);
@@ -2647,7 +2647,7 @@ static int pkt_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *b
static void pkt_init_queue(struct pktcdvd_device *pd)
{
- request_queue_t *q = pd->disk->queue;
+ struct request_queue *q = pd->disk->queue;
blk_queue_make_request(q, pkt_make_request);
blk_queue_hardsect_size(q, CD_FRAMESIZE);
diff --git a/drivers/block/ps2esdi.c b/drivers/block/ps2esdi.c
index 688a4fb0dc9..3c796e23625 100644
--- a/drivers/block/ps2esdi.c
+++ b/drivers/block/ps2esdi.c
@@ -64,7 +64,7 @@ static void reset_ctrl(void);
static int ps2esdi_geninit(void);
-static void do_ps2esdi_request(request_queue_t * q);
+static void do_ps2esdi_request(struct request_queue * q);
static void ps2esdi_readwrite(int cmd, struct request *req);
@@ -473,7 +473,7 @@ static void __init ps2esdi_get_device_cfg(void)
}
/* strategy routine that handles most of the IO requests */
-static void do_ps2esdi_request(request_queue_t * q)
+static void do_ps2esdi_request(struct request_queue * q)
{
struct request *req;
/* since, this routine is called with interrupts cleared - they
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 170fb33dba9..aa8b890c80d 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -190,7 +190,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
}
static void ps3disk_do_request(struct ps3_storage_device *dev,
- request_queue_t *q)
+ struct request_queue *q)
{
struct request *req;
@@ -211,7 +211,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
}
}
-static void ps3disk_request(request_queue_t *q)
+static void ps3disk_request(struct request_queue *q)
{
struct ps3_storage_device *dev = q->queuedata;
struct ps3disk_private *priv = dev->sbd.core.driver_data;
@@ -404,7 +404,7 @@ static int ps3disk_identify(struct ps3_storage_device *dev)
return 0;
}
-static void ps3disk_prepare_flush(request_queue_t *q, struct request *req)
+static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
{
struct ps3_storage_device *dev = q->queuedata;
@@ -414,7 +414,7 @@ static void ps3disk_prepare_flush(request_queue_t *q, struct request *req)
req->cmd_type = REQ_TYPE_FLUSH;
}
-static int ps3disk_issue_flush(request_queue_t *q, struct gendisk *gendisk,
+static int ps3disk_issue_flush(struct request_queue *q, struct gendisk *gendisk,
sector_t *sector)
{
struct ps3_storage_device *dev = q->queuedata;
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index a1512da3241..65150b548f3 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -264,7 +264,7 @@ static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, sector_t sector,
* 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Added devfs support
*
*/
-static int rd_make_request(request_queue_t *q, struct bio *bio)
+static int rd_make_request(struct request_queue *q, struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
struct address_space * mapping = bdev->bd_inode->i_mapping;
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index d50b8238115..4dff49256ac 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -444,7 +444,7 @@ out:
return err;
}
-static void do_vdc_request(request_queue_t *q)
+static void do_vdc_request(struct request_queue *q)
{
while (1) {
struct request *req = elv_next_request(q);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 1a65979f1f0..b4e462f154e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -225,7 +225,7 @@ static unsigned short write_postamble[] = {
static void swim3_select(struct floppy_state *fs, int sel);
static void swim3_action(struct floppy_state *fs, int action);
static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(request_queue_t * q);
+static void do_fd_request(struct request_queue * q);
static void start_request(struct floppy_state *fs);
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(unsigned long));
@@ -290,7 +290,7 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
return (stat & DATA) == 0;
}
-static void do_fd_request(request_queue_t * q)
+static void do_fd_request(struct request_queue * q)
{
int i;
for(i=0;i<floppy_count;i++)
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 949ae93499e..402209fec59 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -278,7 +278,7 @@ struct carm_host {
unsigned int state;
u32 fw_ver;
- request_queue_t *oob_q;
+ struct request_queue *oob_q;
unsigned int n_oob;
unsigned int hw_sg_used;
@@ -287,7 +287,7 @@ struct carm_host {
unsigned int wait_q_prod;
unsigned int wait_q_cons;
- request_queue_t *wait_q[CARM_MAX_WAIT_Q];
+ struct request_queue *wait_q[CARM_MAX_WAIT_Q];
unsigned int n_msgs;
u64 msg_alloc;
@@ -756,7 +756,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
assert(rc == 0);
}
-static inline void carm_push_q (struct carm_host *host, request_queue_t *q)
+static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
{
unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
@@ -768,7 +768,7 @@ static inline void carm_push_q (struct carm_host *host, request_queue_t *q)
BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */
}
-static inline request_queue_t *carm_pop_q(struct carm_host *host)
+static inline struct request_queue *carm_pop_q(struct carm_host *host)
{
unsigned int idx;
@@ -783,7 +783,7 @@ static inline request_queue_t *carm_pop_q(struct carm_host *host)
static inline void carm_round_robin(struct carm_host *host)
{
- request_queue_t *q = carm_pop_q(host);
+ struct request_queue *q = carm_pop_q(host);
if (q) {
blk_start_queue(q);
VPRINTK("STARTED QUEUE %p\n", q);
@@ -802,7 +802,7 @@ static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
}
}
-static void carm_oob_rq_fn(request_queue_t *q)
+static void carm_oob_rq_fn(struct request_queue *q)
{
struct carm_host *host = q->queuedata;
struct carm_request *crq;
@@ -833,7 +833,7 @@ static void carm_oob_rq_fn(request_queue_t *q)
}
}
-static void carm_rq_fn(request_queue_t *q)
+static void carm_rq_fn(struct request_queue *q)
{
struct carm_port *port = q->queuedata;
struct carm_host *host = port->host;
@@ -1494,7 +1494,7 @@ static int carm_init_disks(struct carm_host *host)
for (i = 0; i < CARM_MAX_PORTS; i++) {
struct gendisk *disk;
- request_queue_t *q;
+ struct request_queue *q;
struct carm_port *port;
port = &host->port[i];
@@ -1538,7 +1538,7 @@ static void carm_free_disks(struct carm_host *host)
for (i = 0; i < CARM_MAX_PORTS; i++) {
struct gendisk *disk = host->port[i].disk;
if (disk) {
- request_queue_t *q = disk->queue;
+ struct request_queue *q = disk->queue;
if (disk->flags & GENHD_FL_UP)
del_gendisk(disk);
@@ -1571,7 +1571,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
struct carm_host *host;
unsigned int pci_dac;
int rc;
- request_queue_t *q;
+ struct request_queue *q;
unsigned int i;
if (!printed_version++)
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 8b13d7d2cb6..c57dd2b3a0c 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -503,7 +503,7 @@ static void ub_cleanup(struct ub_dev *sc)
{
struct list_head *p;
struct ub_lun *lun;
- request_queue_t *q;
+ struct request_queue *q;
while (!list_empty(&sc->luns)) {
p = sc->luns.next;
@@ -619,7 +619,7 @@ static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
* The request function is our main entry point
*/
-static void ub_request_fn(request_queue_t *q)
+static void ub_request_fn(struct request_queue *q)
{
struct ub_lun *lun = q->queuedata;
struct request *rq;
@@ -2273,7 +2273,7 @@ err_core:
static int ub_probe_lun(struct ub_dev *sc, int lnum)
{
struct ub_lun *lun;
- request_queue_t *q;
+ struct request_queue *q;
struct gendisk *disk;
int rc;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index dec74bd2349..6b7c02d6360 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -114,7 +114,7 @@ struct cardinfo {
*/
struct bio *bio, *currentbio, **biotail;
- request_queue_t *queue;
+ struct request_queue *queue;
struct mm_page {
dma_addr_t page_dma;
@@ -357,7 +357,7 @@ static inline void reset_page(struct mm_page *page)
page->biotail = & page->bio;
}
-static void mm_unplug_device(request_queue_t *q)
+static void mm_unplug_device(struct request_queue *q)
{
struct cardinfo *card = q->queuedata;
unsigned long flags;
@@ -541,7 +541,7 @@ static void process_page(unsigned long data)
-- mm_make_request
-----------------------------------------------------------------------------------
*/
-static int mm_make_request(request_queue_t *q, struct bio *bio)
+static int mm_make_request(struct request_queue *q, struct bio *bio)
{
struct cardinfo *card = q->queuedata;
pr_debug("mm_make_request %llu %u\n",
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index dae39911a11..85916e2665d 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -400,7 +400,7 @@ error_ret:
/*
* This is the external request processing routine
*/
-static void do_viodasd_request(request_queue_t *q)
+static void do_viodasd_request(struct request_queue *q)
{
struct request *req;
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 0d97b7eb818..624d30f7da3 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -298,7 +298,7 @@ static u_char __init xd_detect (u_char *controller, unsigned int *address)
}
/* do_xd_request: handle an incoming request */
-static void do_xd_request (request_queue_t * q)
+static void do_xd_request (struct request_queue * q)
{
struct request *req;
diff --git a/drivers/block/xd.h b/drivers/block/xd.h
index 82e090fea95..cffd44a2038 100644
--- a/drivers/block/xd.h
+++ b/drivers/block/xd.h
@@ -104,7 +104,7 @@ static int xd_manual_geo_init (char *command);
static u_char xd_detect (u_char *controller, unsigned int *address);
static u_char xd_initdrives (void (*init_drive)(u_char drive));
-static void do_xd_request (request_queue_t * q);
+static void do_xd_request (struct request_queue * q);
static int xd_ioctl (struct inode *inode,struct file *file,unsigned int cmd,unsigned long arg);
static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count);
static void xd_recalibrate (u_char drive);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6746c29181f..964e51634f2 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -241,7 +241,7 @@ static inline void flush_requests(struct blkfront_info *info)
* do_blkif_request
* read a block; request is in a request queue
*/
-static void do_blkif_request(request_queue_t *rq)
+static void do_blkif_request(struct request_queue *rq)
{
struct blkfront_info *info = NULL;
struct request *req;
@@ -287,7 +287,7 @@ wait:
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
{
- request_queue_t *rq;
+ struct request_queue *rq;
rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
if (rq == NULL)
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 732ec63b6e9..cb27e8863d7 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -458,7 +458,7 @@ static inline void ace_fsm_yieldirq(struct ace_device *ace)
}
/* Get the next read/write request; ending requests that we don't handle */
-struct request *ace_get_next_request(request_queue_t * q)
+struct request *ace_get_next_request(struct request_queue * q)
{
struct request *req;
@@ -825,7 +825,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
/* ---------------------------------------------------------------------
* Block ops
*/
-static void ace_request(request_queue_t * q)
+static void ace_request(struct request_queue * q)
{
struct request *req;
struct ace_device *ace;
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index e40fa98842e..2d5853cbd4b 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -67,7 +67,7 @@ static DEFINE_SPINLOCK(z2ram_lock);
static struct block_device_operations z2_fops;
static struct gendisk *z2ram_gendisk;
-static void do_z2_request(request_queue_t *q)
+static void do_z2_request(struct request_queue *q)
{
struct request *req;
while ((req = elv_next_request(q)) != NULL) {
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 499019bf8f4..67ee3d4b287 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2094,7 +2094,7 @@ out:
static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
int lba, int nframes)
{
- request_queue_t *q = cdi->disk->queue;
+ struct request_queue *q = cdi->disk->queue;
struct request *rq;
struct bio *bio;
unsigned int len;
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 44cd7b2ddf0..e51550db157 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -398,7 +398,7 @@ static void viocd_end_request(struct request *req, int uptodate)
static int rwreq;
-static void do_viocd_request(request_queue_t *q)
+static void do_viocd_request(struct request_queue *q)
{
struct request *req;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index c8dfd18bea4..b391776e5bf 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -130,6 +130,7 @@ config ROCKETPORT
config CYCLADES
tristate "Cyclades async mux support"
depends on SERIAL_NONSTANDARD && (PCI || ISA)
+ select FW_LOADER
---help---
This driver supports Cyclades Z and Y multiserial boards.
You would need something like this to connect more than two modems to
@@ -726,7 +727,7 @@ config NVRAM
config RTC
tristate "Enhanced Real Time Clock Support"
- depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC64 && (!SPARC32 || PCI) && !FRV && !ARM && !SUPERH && !S390
+ depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV && !ARM && !SUPERH && !S390
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -750,6 +751,28 @@ config RTC
To compile this driver as a module, choose M here: the
module will be called rtc.
+config JS_RTC
+ tristate "Enhanced Real Time Clock Support"
+ depends on SPARC32 && PCI
+ ---help---
+ If you say Y here and create a character special file /dev/rtc with
+ major number 10 and minor number 135 using mknod ("man mknod"), you
+ will get access to the real time clock (or hardware clock) built
+ into your computer.
+
+ Every PC has such a clock built in. It can be used to generate
+ signals from as low as 1Hz up to 8192Hz, and can also be used
+ as a 24 hour alarm. It reports status information via the file
+ /proc/driver/rtc and its behaviour is set by various ioctls on
+ /dev/rtc.
+
+ If you think you have a use for such a device (such as periodic data
+ sampling), then say Y here, and read <file:Documentation/rtc.txt>
+ for details.
+
+ To compile this driver as a module, choose M here: the
+ module will be called js-rtc.
+
config SGI_DS1286
tristate "SGI DS1286 RTC support"
depends on SGI_IP22
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 8fecaf4010b..23b26b87cc3 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -109,6 +109,9 @@ obj-$(CONFIG_TCG_TPM) += tpm/
obj-$(CONFIG_PS3_FLASH) += ps3flash.o
+obj-$(CONFIG_JS_RTC) += js-rtc.o
+js-rtc-y = rtc.o
+
# Files generated that shall be removed upon make clean
clean-files := consolemap_deftbl.c defkeymap.c
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index a9f9c48c242..713533d8a86 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -50,7 +50,7 @@ config AGP_ATI
config AGP_AMD
tristate "AMD Irongate, 761, and 762 chipset support"
- depends on AGP && X86_32
+ depends on AGP && (X86_32 || ALPHA)
help
This option gives you AGP support for the GLX component of
X on AMD Irongate, 761, and 762 chipsets.
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 780e59e588a..da7513d7b4e 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -123,21 +123,16 @@ static int ati_create_gatt_pages(int nr_tables)
for (i = 0; i < nr_tables; i++) {
entry = kzalloc(sizeof(struct ati_page_map), GFP_KERNEL);
+ tables[i] = entry;
if (entry == NULL) {
- while (i > 0) {
- kfree(tables[i-1]);
- i--;
- }
- kfree(tables);
retval = -ENOMEM;
break;
}
- tables[i] = entry;
retval = ati_create_page_map(entry);
if (retval != 0)
break;
}
- ati_generic_private.num_tables = nr_tables;
+ ati_generic_private.num_tables = i;
ati_generic_private.gatt_pages = tables;
if (retval != 0)
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
index fcb4b1bf0d4..ecd4248861b 100644
--- a/drivers/char/agp/compat_ioctl.c
+++ b/drivers/char/agp/compat_ioctl.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/fs.h>
#include <linux/agpgart.h>
#include <asm/uaccess.h>
#include "agp.h"
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index c7ed617aa7f..7791e98de51 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -37,6 +37,7 @@
#include <linux/agpgart.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/sched.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index d535c406b31..3db4f4076ed 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -1170,7 +1170,6 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
map_page_into_agp(page);
get_page(page);
- SetPageLocked(page);
atomic_inc(&agp_bridge->current_memory_agp);
return page_address(page);
}
@@ -1187,7 +1186,6 @@ void agp_generic_destroy_page(void *addr)
page = virt_to_page(addr);
unmap_page_from_agp(page);
put_page(page);
- unlock_page(page);
free_page((unsigned long)addr);
atomic_dec(&agp_bridge->current_memory_agp);
}
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index a1240603912..294cdbf4d44 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -20,7 +20,9 @@
#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
+#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
+#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
@@ -33,7 +35,8 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_1_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
@@ -213,7 +216,6 @@ static void *i8xx_alloc_pages(void)
}
global_flush_tlb();
get_page(page);
- SetPageLocked(page);
atomic_inc(&agp_bridge->current_memory_agp);
return page_address(page);
}
@@ -229,7 +231,6 @@ static void i8xx_destroy_pages(void *addr)
change_page_attr(page, 4, PAGE_KERNEL);
global_flush_tlb();
put_page(page);
- unlock_page(page);
__free_pages(page, 2);
atomic_dec(&agp_bridge->current_memory_agp);
}
@@ -527,6 +528,7 @@ static void intel_i830_init_gtt_entries(void)
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB ||
IS_I965 || IS_G33)
gtt_entries = MB(48) - KB(size);
else
@@ -538,6 +540,7 @@ static void intel_i830_init_gtt_entries(void)
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB ||
IS_I965 || IS_G33)
gtt_entries = MB(64) - KB(size);
else
@@ -1848,9 +1851,9 @@ static const struct intel_driver_description {
NULL, &intel_915_driver },
{ PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 1, "945GM",
+ { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM",
NULL, &intel_915_driver },
- { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME",
+ { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME",
NULL, &intel_915_driver },
{ PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ",
NULL, &intel_i965_driver },
@@ -1860,9 +1863,9 @@ static const struct intel_driver_description {
NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 1, "965GM",
+ { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE",
+ { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE",
NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL },
{ PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL },
@@ -2051,11 +2054,13 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
ID(PCI_DEVICE_ID_INTEL_82945G_HB),
ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
ID(PCI_DEVICE_ID_INTEL_82965G_1_HB),
ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
ID(PCI_DEVICE_ID_INTEL_82965G_HB),
ID(PCI_DEVICE_ID_INTEL_82965GM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965GME_HB),
ID(PCI_DEVICE_ID_INTEL_G33_HB),
ID(PCI_DEVICE_ID_INTEL_Q35_HB),
ID(PCI_DEVICE_ID_INTEL_Q33_HB),
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index cda608c42be..98cf8abb3e5 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -51,7 +51,6 @@ static void *sgi_tioca_alloc_page(struct agp_bridge_data *bridge)
return NULL;
get_page(page);
- SetPageLocked(page);
atomic_inc(&agp_bridge->current_memory_agp);
return page_address(page);
}
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index ba0e74ad74b..77bf4aa217a 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -73,7 +73,7 @@ static struct clocksource clocksource_hpet = {
.name = "hpet",
.rating = 250,
.read = read_hpet,
- .mask = 0xffffffffffffffff,
+ .mask = CLOCKSOURCE_MASK(64),
.mult = 0, /*to be caluclated*/
.shift = 10,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
@@ -1007,9 +1007,15 @@ static int hpet_acpi_remove(struct acpi_device *device, int type)
return -EINVAL;
}
+static const struct acpi_device_id hpet_device_ids[] = {
+ {"PNP0103", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, hpet_device_ids);
+
static struct acpi_driver hpet_acpi_driver = {
.name = "hpet",
- .ids = "PNP0103",
+ .ids = hpet_device_ids,
.ops = {
.add = hpet_acpi_add,
.remove = hpet_acpi_remove,
diff --git a/drivers/char/hvc_lguest.c b/drivers/char/hvc_lguest.c
index e7b889e404a..feeccbaec43 100644
--- a/drivers/char/hvc_lguest.c
+++ b/drivers/char/hvc_lguest.c
@@ -1,6 +1,22 @@
-/* Simple console for lguest.
+/*D:300
+ * The Guest console driver
*
- * Copyright (C) 2006 Rusty Russell, IBM Corporation
+ * This is a trivial console driver: we use lguest's DMA mechanism to send
+ * bytes out, and register a DMA buffer to receive bytes in. It is assumed to
+ * be present and available from the very beginning of boot.
+ *
+ * Writing console drivers is one of the few remaining Dark Arts in Linux.
+ * Fortunately for us, the path of virtual consoles has been well-trodden by
+ * the PowerPC folks, who wrote "hvc_console.c" to generically support any
+ * virtual console. We use that infrastructure which only requires us to write
+ * the basic put_chars and get_chars functions and call the right register
+ * functions.
+ :*/
+
+/*M:002 The console can be flooded: while the Guest is processing input the
+ * Host can send more. Buffering in the Host could alleviate this, but it is a
+ * difficult problem in general. :*/
+/* Copyright (C) 2006 Rusty Russell, IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,49 +37,81 @@
#include <linux/lguest_bus.h>
#include "hvc_console.h"
+/*D:340 This is our single console input buffer, with associated "struct
+ * lguest_dma" referring to it. Note the 0-terminated length array, and the
+ * use of physical address for the buffer itself. */
static char inbuf[256];
static struct lguest_dma cons_input = { .used_len = 0,
.addr[0] = __pa(inbuf),
.len[0] = sizeof(inbuf),
.len[1] = 0 };
+/*D:310 The put_chars() callback is pretty straightforward.
+ *
+ * First we put the pointer and length in a "struct lguest_dma": we only have
+ * one pointer, so we set the second length to 0. Then we use SEND_DMA to send
+ * the data to (Host) buffers attached to the console key. Usually a device's
+ * key is a physical address within the device's memory, but because the
+ * console device doesn't have any associated physical memory, we use the
+ * LGUEST_CONSOLE_DMA_KEY constant (aka 0). */
static int put_chars(u32 vtermno, const char *buf, int count)
{
struct lguest_dma dma;
- /* FIXME: what if it's over a page boundary? */
+ /* FIXME: DMA buffers in a "struct lguest_dma" are not allowed
+ * to go over page boundaries. This never seems to happen,
+ * but if it did we'd need to fix this code. */
dma.len[0] = count;
dma.len[1] = 0;
dma.addr[0] = __pa(buf);
lguest_send_dma(LGUEST_CONSOLE_DMA_KEY, &dma);
+ /* We're expected to return the amount of data we wrote: all of it. */
return count;
}
+/*D:350 get_chars() is the callback from the hvc_console infrastructure when
+ * an interrupt is received.
+ *
+ * Firstly we see if our buffer has been filled: if not, we return. The rest
+ * of the code deals with the fact that the hvc_console() infrastructure only
+ * asks us for 16 bytes at a time. We keep a "cons_offset" variable for
+ * partially-read buffers. */
static int get_chars(u32 vtermno, char *buf, int count)
{
static int cons_offset;
+ /* Nothing left to see here... */
if (!cons_input.used_len)
return 0;
+ /* You want more than we have to give? Well, try wanting less! */
if (cons_input.used_len - cons_offset < count)
count = cons_input.used_len - cons_offset;
+ /* Copy across to their buffer and increment offset. */
memcpy(buf, inbuf + cons_offset, count);
cons_offset += count;
+
+ /* Finished? Zero offset, and reset cons_input so Host will use it
+ * again. */
if (cons_offset == cons_input.used_len) {
cons_offset = 0;
cons_input.used_len = 0;
}
return count;
}
+/*:*/
static struct hv_ops lguest_cons = {
.get_chars = get_chars,
.put_chars = put_chars,
};
+/*D:320 Console drivers are initialized very early so boot messages can go
+ * out. At this stage, the console is output-only. Our driver checks we're a
+ * Guest, and if so hands hvc_instantiate() the console number (0), priority
+ * (0), and the struct hv_ops containing the put_chars() function. */
static int __init cons_init(void)
{
if (strcmp(paravirt_ops.name, "lguest") != 0)
@@ -73,21 +121,46 @@ static int __init cons_init(void)
}
console_initcall(cons_init);
+/*D:370 To set up and manage our virtual console, we call hvc_alloc() and
+ * stash the result in the private pointer of the "struct lguest_device".
+ * Since we never remove the console device we never need this pointer again,
+ * but using ->private is considered good form, and you never know who's going
+ * to copy your driver.
+ *
+ * Once the console is set up, we bind our input buffer ready for input. */
static int lguestcons_probe(struct lguest_device *lgdev)
{
int err;
+ /* The first argument of hvc_alloc() is the virtual console number, so
+ * we use zero. The second argument is the interrupt number.
+ *
+ * The third argument is a "struct hv_ops" containing the put_chars()
+ * and get_chars() pointers. The final argument is the output buffer
+ * size: we use 256 and expect the Host to have room for us to send
+ * that much. */
lgdev->private = hvc_alloc(0, lgdev_irq(lgdev), &lguest_cons, 256);
if (IS_ERR(lgdev->private))
return PTR_ERR(lgdev->private);
+ /* We bind a single DMA buffer at key LGUEST_CONSOLE_DMA_KEY.
+ * "cons_input" is that statically-initialized global DMA buffer we saw
+ * above, and we also give the interrupt we want. */
err = lguest_bind_dma(LGUEST_CONSOLE_DMA_KEY, &cons_input, 1,
lgdev_irq(lgdev));
if (err)
printk("lguest console: failed to bind buffer.\n");
return err;
}
+/* Note the use of lgdev_irq() for the interrupt number. We tell hvc_alloc()
+ * to expect input when this interrupt is triggered, and then tell
+ * lguest_bind_dma() that is the interrupt to send us when input comes in. */
+/*D:360 From now on the console driver follows standard Guest driver form:
+ * register_lguest_driver() registers the device type and probe function, and
+ * the probe function sets up the device.
+ *
+ * The standard "struct lguest_driver": */
static struct lguest_driver lguestcons_drv = {
.name = "lguestcons",
.owner = THIS_MODULE,
@@ -95,6 +168,7 @@ static struct lguest_driver lguestcons_drv = {
.probe = lguestcons_probe,
};
+/* The standard init function */
static int __init hvc_lguest_init(void)
{
return register_lguest_driver(&lguestcons_drv);
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index 6e55cfb9c65..e60a74c66e3 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -25,6 +25,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/mmtimer.h>
#include <linux/miscdevice.h>
#include <linux/posix-timers.h>
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index c716ef0dd37..c08a4152ee8 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -38,6 +38,7 @@
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/string.h>
#include <linux/slab.h>
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 1724c41d241..98b6b4fb425 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -8,7 +8,7 @@ menuconfig EDAC
bool "EDAC - error detection and reporting (EXPERIMENTAL)"
depends on HAS_IOMEM
depends on EXPERIMENTAL
- depends on X86 || MIPS || PPC
+ depends on X86 || PPC
help
EDAC is designed to report errors in the core system.
These are low-level errors that are reported in the CPU or
@@ -126,7 +126,7 @@ config EDAC_I5000
config EDAC_PASEMI
tristate "PA Semi PWRficient"
depends on EDAC_MM_EDAC && PCI
- depends on PPC
+ depends on PPC_PASEMI
help
Support for error detection and correction on PA Semi
PWRficient.
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 4471be36259..063a1bffe38 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -214,6 +214,13 @@ void edac_mc_free(struct mem_ctl_info *mci)
}
EXPORT_SYMBOL_GPL(edac_mc_free);
+
+/*
+ * find_mci_by_dev
+ *
+ * scan list of controllers looking for the one that manages
+ * the 'dev' device
+ */
static struct mem_ctl_info *find_mci_by_dev(struct device *dev)
{
struct mem_ctl_info *mci;
@@ -268,12 +275,6 @@ static void edac_mc_workq_function(struct work_struct *work_req)
if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
mci->edac_check(mci);
- /*
- * FIXME: temp place holder for PCI checks,
- * goes away when we break out PCI
- */
- edac_pci_do_parity_check();
-
mutex_unlock(&mem_ctls_mutex);
/* Reschedule */
@@ -314,36 +315,55 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
{
int status;
- /* if not running POLL, leave now */
- if (mci->op_state == OP_RUNNING_POLL) {
- status = cancel_delayed_work(&mci->work);
- if (status == 0) {
- debugf0("%s() not canceled, flush the queue\n",
- __func__);
+ status = cancel_delayed_work(&mci->work);
+ if (status == 0) {
+ debugf0("%s() not canceled, flush the queue\n",
+ __func__);
- /* workq instance might be running, wait for it */
- flush_workqueue(edac_workqueue);
- }
+ /* workq instance might be running, wait for it */
+ flush_workqueue(edac_workqueue);
}
}
/*
- * edac_reset_delay_period
+ * edac_mc_reset_delay_period(unsigned long value)
+ *
+ * user space has updated our poll period value, need to
+ * reset our workq delays
*/
-static void edac_reset_delay_period(struct mem_ctl_info *mci, unsigned long value)
+void edac_mc_reset_delay_period(int value)
{
- /* cancel the current workq request */
- edac_mc_workq_teardown(mci);
+ struct mem_ctl_info *mci;
+ struct list_head *item;
- /* lock the list of devices for the new setup */
mutex_lock(&mem_ctls_mutex);
- /* restart the workq request, with new delay value */
- edac_mc_workq_setup(mci, value);
+ /* scan the list and turn off all workq timers, doing so under lock
+ */
+ list_for_each(item, &mc_devices) {
+ mci = list_entry(item, struct mem_ctl_info, link);
+
+ if (mci->op_state == OP_RUNNING_POLL)
+ cancel_delayed_work(&mci->work);
+ }
+
+ mutex_unlock(&mem_ctls_mutex);
+
+
+ /* re-walk the list, and reset the poll delay */
+ mutex_lock(&mem_ctls_mutex);
+
+ list_for_each(item, &mc_devices) {
+ mci = list_entry(item, struct mem_ctl_info, link);
+
+ edac_mc_workq_setup(mci, (unsigned long) value);
+ }
mutex_unlock(&mem_ctls_mutex);
}
+
+
/* Return 0 on success, 1 on failure.
* Before calling this function, caller must
* assign a unique value to mci->mc_idx.
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index cd090b0677a..4a0576bd06f 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -122,6 +122,23 @@ static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
return count;
}
+/*
+ * mc poll_msec time value
+ */
+static ssize_t poll_msec_int_store(void *ptr, const char *buffer, size_t count)
+{
+ int *value = (int *)ptr;
+
+ if (isdigit(*buffer)) {
+ *value = simple_strtoul(buffer, NULL, 0);
+
+ /* notify edac_mc engine to reset the poll period */
+ edac_mc_reset_delay_period(*value);
+ }
+
+ return count;
+}
+
/* EDAC sysfs CSROW data structures and methods
*/
@@ -704,7 +721,7 @@ MEMCTRL_ATTR(edac_mc_log_ce,
S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
MEMCTRL_ATTR(edac_mc_poll_msec,
- S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
+ S_IRUGO | S_IWUSR, memctrl_int_show, poll_msec_int_store);
/* Base Attributes of the memory ECC object */
static struct memctrl_dev_attribute *memctrl_attr[] = {
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index a2134dfc3cc..cbc419c8ebc 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -52,6 +52,8 @@ extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev);
extern void edac_device_reset_delay_period(struct edac_device_ctl_info
*edac_dev, unsigned long value);
+extern void edac_mc_reset_delay_period(int value);
+
extern void *edac_align_ptr(void *ptr, unsigned size);
/*
@@ -64,6 +66,10 @@ extern int edac_sysfs_pci_setup(void);
extern void edac_sysfs_pci_teardown(void);
extern int edac_pci_get_check_errors(void);
extern int edac_pci_get_poll_msec(void);
+extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci);
+extern void edac_pci_handle_pe(struct edac_pci_ctl_info *pci, const char *msg);
+extern void edac_pci_handle_npe(struct edac_pci_ctl_info *pci,
+ const char *msg);
#else /* CONFIG_PCI */
/* pre-process these away */
#define edac_pci_do_parity_check()
@@ -72,6 +78,8 @@ extern int edac_pci_get_poll_msec(void);
#define edac_sysfs_pci_teardown()
#define edac_pci_get_check_errors()
#define edac_pci_get_poll_msec()
+#define edac_pci_handle_pe()
+#define edac_pci_handle_npe()
#endif /* CONFIG_PCI */
#endif /* __EDAC_MODULE_H__ */
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index d9cd5e048ce..5dee9f50414 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -31,20 +31,12 @@
static DEFINE_MUTEX(edac_pci_ctls_mutex);
static struct list_head edac_pci_list = LIST_HEAD_INIT(edac_pci_list);
-static inline void edac_lock_pci_list(void)
-{
- mutex_lock(&edac_pci_ctls_mutex);
-}
-
-static inline void edac_unlock_pci_list(void)
-{
- mutex_unlock(&edac_pci_ctls_mutex);
-}
-
/*
- * The alloc() and free() functions for the 'edac_pci' control info
- * structure. The chip driver will allocate one of these for each
- * edac_pci it is going to control/register with the EDAC CORE.
+ * edac_pci_alloc_ctl_info
+ *
+ * The alloc() function for the 'edac_pci' control info
+ * structure. The chip driver will allocate one of these for each
+ * edac_pci it is going to control/register with the EDAC CORE.
*/
struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
const char *edac_pci_name)
@@ -53,47 +45,59 @@ struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
void *pvt;
unsigned int size;
+ debugf1("%s()\n", __func__);
+
pci = (struct edac_pci_ctl_info *)0;
pvt = edac_align_ptr(&pci[1], sz_pvt);
size = ((unsigned long)pvt) + sz_pvt;
- if ((pci = kzalloc(size, GFP_KERNEL)) == NULL)
+ /* Alloc the needed control struct memory */
+ pci = kzalloc(size, GFP_KERNEL);
+ if (pci == NULL)
return NULL;
+ /* Now much private space */
pvt = sz_pvt ? ((char *)pci) + ((unsigned long)pvt) : NULL;
pci->pvt_info = pvt;
-
pci->op_state = OP_ALLOC;
snprintf(pci->name, strlen(edac_pci_name) + 1, "%s", edac_pci_name);
return pci;
}
-
EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info);
/*
* edac_pci_free_ctl_info()
- * frees the memory allocated by edac_pci_alloc_ctl_info() function
+ *
+ * Last action on the pci control structure.
+ *
+ * call the remove sysfs informaton, which will unregister
+ * this control struct's kobj. When that kobj's ref count
+ * goes to zero, its release function will be call and then
+ * kfree() the memory.
*/
void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci)
{
- kfree(pci);
-}
+ debugf1("%s()\n", __func__);
+ edac_pci_remove_sysfs(pci);
+}
EXPORT_SYMBOL_GPL(edac_pci_free_ctl_info);
/*
* find_edac_pci_by_dev()
* scans the edac_pci list for a specific 'struct device *'
+ *
+ * return NULL if not found, or return control struct pointer
*/
static struct edac_pci_ctl_info *find_edac_pci_by_dev(struct device *dev)
{
struct edac_pci_ctl_info *pci;
struct list_head *item;
- debugf3("%s()\n", __func__);
+ debugf1("%s()\n", __func__);
list_for_each(item, &edac_pci_list) {
pci = list_entry(item, struct edac_pci_ctl_info, link);
@@ -118,10 +122,13 @@ static int add_edac_pci_to_global_list(struct edac_pci_ctl_info *pci)
struct list_head *item, *insert_before;
struct edac_pci_ctl_info *rover;
+ debugf1("%s()\n", __func__);
+
insert_before = &edac_pci_list;
/* Determine if already on the list */
- if (unlikely((rover = find_edac_pci_by_dev(pci->dev)) != NULL))
+ rover = find_edac_pci_by_dev(pci->dev);
+ if (unlikely(rover != NULL))
goto fail0;
/* Insert in ascending order by 'pci_idx', so find position */
@@ -157,6 +164,8 @@ fail1:
/*
* complete_edac_pci_list_del
+ *
+ * RCU completion callback to indicate item is deleted
*/
static void complete_edac_pci_list_del(struct rcu_head *head)
{
@@ -169,6 +178,8 @@ static void complete_edac_pci_list_del(struct rcu_head *head)
/*
* del_edac_pci_from_global_list
+ *
+ * remove the PCI control struct from the global list
*/
static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci)
{
@@ -207,35 +218,52 @@ struct edac_pci_ctl_info *edac_pci_find(int idx)
return NULL;
}
-
EXPORT_SYMBOL_GPL(edac_pci_find);
/*
* edac_pci_workq_function()
- * performs the operation scheduled by a workq request
+ *
+ * periodic function that performs the operation
+ * scheduled by a workq request, for a given PCI control struct
*/
static void edac_pci_workq_function(struct work_struct *work_req)
{
struct delayed_work *d_work = (struct delayed_work *)work_req;
struct edac_pci_ctl_info *pci = to_edac_pci_ctl_work(d_work);
+ int msec;
+ unsigned long delay;
- edac_lock_pci_list();
+ debugf3("%s() checking\n", __func__);
- if ((pci->op_state == OP_RUNNING_POLL) &&
- (pci->edac_check != NULL) && (edac_pci_get_check_errors()))
- pci->edac_check(pci);
+ mutex_lock(&edac_pci_ctls_mutex);
- edac_unlock_pci_list();
+ if (pci->op_state == OP_RUNNING_POLL) {
+ /* we might be in POLL mode, but there may NOT be a poll func
+ */
+ if ((pci->edac_check != NULL) && edac_pci_get_check_errors())
+ pci->edac_check(pci);
+
+ /* if we are on a one second period, then use round */
+ msec = edac_pci_get_poll_msec();
+ if (msec == 1000)
+ delay = round_jiffies(msecs_to_jiffies(msec));
+ else
+ delay = msecs_to_jiffies(msec);
+
+ /* Reschedule only if we are in POLL mode */
+ queue_delayed_work(edac_workqueue, &pci->work, delay);
+ }
- /* Reschedule */
- queue_delayed_work(edac_workqueue, &pci->work,
- msecs_to_jiffies(edac_pci_get_poll_msec()));
+ mutex_unlock(&edac_pci_ctls_mutex);
}
/*
* edac_pci_workq_setup()
* initialize a workq item for this edac_pci instance
* passing in the new delay period in msec
+ *
+ * locking model:
+ * called when 'edac_pci_ctls_mutex' is locked
*/
static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
unsigned int msec)
@@ -255,6 +283,8 @@ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
{
int status;
+ debugf0("%s()\n", __func__);
+
status = cancel_delayed_work(&pci->work);
if (status == 0)
flush_workqueue(edac_workqueue);
@@ -262,19 +292,25 @@ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
/*
* edac_pci_reset_delay_period
+ *
+ * called with a new period value for the workq period
+ * a) stop current workq timer
+ * b) restart workq timer with new value
*/
void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
unsigned long value)
{
- edac_lock_pci_list();
+ debugf0("%s()\n", __func__);
edac_pci_workq_teardown(pci);
+ /* need to lock for the setup */
+ mutex_lock(&edac_pci_ctls_mutex);
+
edac_pci_workq_setup(pci, value);
- edac_unlock_pci_list();
+ mutex_unlock(&edac_pci_ctls_mutex);
}
-
EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
/*
@@ -294,14 +330,13 @@ int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
debugf0("%s()\n", __func__);
pci->pci_idx = edac_idx;
+ pci->start_time = jiffies;
- edac_lock_pci_list();
+ mutex_lock(&edac_pci_ctls_mutex);
if (add_edac_pci_to_global_list(pci))
goto fail0;
- pci->start_time = jiffies;
-
if (edac_pci_create_sysfs(pci)) {
edac_pci_printk(pci, KERN_WARNING,
"failed to create sysfs pci\n");
@@ -323,16 +358,16 @@ int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
pci->ctl_name,
dev_name(pci), edac_op_state_to_string(pci->op_state));
- edac_unlock_pci_list();
+ mutex_unlock(&edac_pci_ctls_mutex);
return 0;
+ /* error unwind stack */
fail1:
del_edac_pci_from_global_list(pci);
fail0:
- edac_unlock_pci_list();
+ mutex_unlock(&edac_pci_ctls_mutex);
return 1;
}
-
EXPORT_SYMBOL_GPL(edac_pci_add_device);
/*
@@ -354,22 +389,25 @@ struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
debugf0("%s()\n", __func__);
- edac_lock_pci_list();
+ mutex_lock(&edac_pci_ctls_mutex);
- if ((pci = find_edac_pci_by_dev(dev)) == NULL) {
- edac_unlock_pci_list();
+ /* ensure the control struct is on the global list
+ * if not, then leave
+ */
+ pci = find_edac_pci_by_dev(dev);
+ if (pci == NULL) {
+ mutex_unlock(&edac_pci_ctls_mutex);
return NULL;
}
pci->op_state = OP_OFFLINE;
- edac_pci_workq_teardown(pci);
-
- edac_pci_remove_sysfs(pci);
-
del_edac_pci_from_global_list(pci);
- edac_unlock_pci_list();
+ mutex_unlock(&edac_pci_ctls_mutex);
+
+ /* stop the workq timer */
+ edac_pci_workq_teardown(pci);
edac_printk(KERN_INFO, EDAC_PCI,
"Removed device %d for %s %s: DEV %s\n",
@@ -377,14 +415,20 @@ struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
return pci;
}
-
EXPORT_SYMBOL_GPL(edac_pci_del_device);
+/*
+ * edac_pci_generic_check
+ *
+ * a Generic parity check API
+ */
void edac_pci_generic_check(struct edac_pci_ctl_info *pci)
{
+ debugf4("%s()\n", __func__);
edac_pci_do_parity_check();
}
+/* free running instance index counter */
static int edac_pci_idx;
#define EDAC_PCI_GENCTL_NAME "EDAC PCI controller"
@@ -392,6 +436,17 @@ struct edac_pci_gen_data {
int edac_idx;
};
+/*
+ * edac_pci_create_generic_ctl
+ *
+ * A generic constructor for a PCI parity polling device
+ * Some systems have more than one domain of PCI busses.
+ * For systems with one domain, then this API will
+ * provide for a generic poller.
+ *
+ * This routine calls the edac_pci_alloc_ctl_info() for
+ * the generic device, with default values
+ */
struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
const char *mod_name)
{
@@ -421,13 +476,18 @@ struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
return pci;
}
-
EXPORT_SYMBOL_GPL(edac_pci_create_generic_ctl);
+/*
+ * edac_pci_release_generic_ctl
+ *
+ * The release function of a generic EDAC PCI polling device
+ */
void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci)
{
+ debugf0("%s() pci mod=%s\n", __func__, pci->mod_name);
+
edac_pci_del_device(pci->dev);
edac_pci_free_ctl_info(pci);
}
-
EXPORT_SYMBOL_GPL(edac_pci_release_generic_ctl);
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index fac94cae2c3..69f5dddabdd 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -13,22 +13,25 @@
#include "edac_core.h"
#include "edac_module.h"
+/* Turn off this whole feature if PCI is not configured */
#ifdef CONFIG_PCI
#define EDAC_PCI_SYMLINK "device"
-static int check_pci_errors; /* default YES check PCI parity */
-static int edac_pci_panic_on_pe; /* default no panic on PCI Parity */
-static int edac_pci_log_pe = 1; /* log PCI parity errors */
+/* data variables exported via sysfs */
+static int check_pci_errors; /* default NO check PCI parity */
+static int edac_pci_panic_on_pe; /* default NO panic on PCI Parity */
+static int edac_pci_log_pe = 1; /* log PCI parity errors */
static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
+static int edac_pci_poll_msec = 1000; /* one second workq period */
+
static atomic_t pci_parity_count = ATOMIC_INIT(0);
static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
-static int edac_pci_poll_msec = 1000;
-static struct kobject edac_pci_kobj; /* /sys/devices/system/edac/pci */
-static struct completion edac_pci_kobj_complete;
+static struct kobject edac_pci_top_main_kobj;
static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
+/* getter functions for the data variables */
int edac_pci_get_check_errors(void)
{
return check_pci_errors;
@@ -74,17 +77,22 @@ static void edac_pci_instance_release(struct kobject *kobj)
{
struct edac_pci_ctl_info *pci;
- debugf1("%s()\n", __func__);
+ debugf0("%s()\n", __func__);
+ /* Form pointer to containing struct, the pci control struct */
pci = to_instance(kobj);
- complete(&pci->kobj_complete);
+
+ /* decrement reference count on top main kobj */
+ kobject_put(&edac_pci_top_main_kobj);
+
+ kfree(pci); /* Free the control struct */
}
/* instance specific attribute structure */
struct instance_attribute {
struct attribute attr;
- ssize_t(*show) (struct edac_pci_ctl_info *, char *);
- ssize_t(*store) (struct edac_pci_ctl_info *, const char *, size_t);
+ ssize_t(*show) (struct edac_pci_ctl_info *, char *);
+ ssize_t(*store) (struct edac_pci_ctl_info *, const char *, size_t);
};
/* Function to 'show' fields from the edac_pci 'instance' structure */
@@ -112,6 +120,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
return -EIO;
}
+/* fs_ops table */
static struct sysfs_ops pci_instance_ops = {
.show = edac_pci_instance_show,
.store = edac_pci_instance_store
@@ -134,48 +143,82 @@ static struct instance_attribute *pci_instance_attr[] = {
NULL
};
-/* the ktype for pci instance */
+/* the ktype for a pci instance */
static struct kobj_type ktype_pci_instance = {
.release = edac_pci_instance_release,
.sysfs_ops = &pci_instance_ops,
.default_attrs = (struct attribute **)pci_instance_attr,
};
+/*
+ * edac_pci_create_instance_kobj
+ *
+ * construct one EDAC PCI instance's kobject for use
+ */
static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
{
+ struct kobject *main_kobj;
int err;
- pci->kobj.parent = &edac_pci_kobj;
+ debugf0("%s()\n", __func__);
+
+ /* Set the parent and the instance's ktype */
+ pci->kobj.parent = &edac_pci_top_main_kobj;
pci->kobj.ktype = &ktype_pci_instance;
err = kobject_set_name(&pci->kobj, "pci%d", idx);
if (err)
return err;
+ /* First bump the ref count on the top main kobj, which will
+ * track the number of PCI instances we have, and thus nest
+ * properly on keeping the module loaded
+ */
+ main_kobj = kobject_get(&edac_pci_top_main_kobj);
+ if (!main_kobj) {
+ err = -ENODEV;
+ goto error_out;
+ }
+
+ /* And now register this new kobject under the main kobj */
err = kobject_register(&pci->kobj);
if (err != 0) {
debugf2("%s() failed to register instance pci%d\n",
__func__, idx);
- return err;
+ kobject_put(&edac_pci_top_main_kobj);
+ goto error_out;
}
debugf1("%s() Register instance 'pci%d' kobject\n", __func__, idx);
return 0;
+
+ /* Error unwind statck */
+error_out:
+ return err;
}
-static void
-edac_pci_delete_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
+/*
+ * edac_pci_unregister_sysfs_instance_kobj
+ *
+ * unregister the kobj for the EDAC PCI instance
+ */
+void edac_pci_unregister_sysfs_instance_kobj(struct edac_pci_ctl_info *pci)
{
- init_completion(&pci->kobj_complete);
+ debugf0("%s()\n", __func__);
+
+ /* Unregister the instance kobject and allow its release
+ * function release the main reference count and then
+ * kfree the memory
+ */
kobject_unregister(&pci->kobj);
- wait_for_completion(&pci->kobj_complete);
}
/***************************** EDAC PCI sysfs root **********************/
#define to_edacpci(k) container_of(k, struct edac_pci_ctl_info, kobj)
#define to_edacpci_attr(a) container_of(a, struct edac_pci_attr, attr)
+/* simple show/store functions for attributes */
static ssize_t edac_pci_int_show(void *ptr, char *buffer)
{
int *value = ptr;
@@ -267,118 +310,189 @@ static struct edac_pci_dev_attribute *edac_pci_attr[] = {
NULL,
};
-/* No memory to release */
-static void edac_pci_release(struct kobject *kobj)
+/*
+ * edac_pci_release_main_kobj
+ *
+ * This release function is called when the reference count to the
+ * passed kobj goes to zero.
+ *
+ * This kobj is the 'main' kobject that EDAC PCI instances
+ * link to, and thus provide for proper nesting counts
+ */
+static void edac_pci_release_main_kobj(struct kobject *kobj)
{
- struct edac_pci_ctl_info *pci;
- pci = to_edacpci(kobj);
+ debugf0("%s() here to module_put(THIS_MODULE)\n", __func__);
- debugf1("%s()\n", __func__);
- complete(&pci->kobj_complete);
+ /* last reference to top EDAC PCI kobject has been removed,
+ * NOW release our ref count on the core module
+ */
+ module_put(THIS_MODULE);
}
-static struct kobj_type ktype_edac_pci = {
- .release = edac_pci_release,
+/* ktype struct for the EDAC PCI main kobj */
+static struct kobj_type ktype_edac_pci_main_kobj = {
+ .release = edac_pci_release_main_kobj,
.sysfs_ops = &edac_pci_sysfs_ops,
.default_attrs = (struct attribute **)edac_pci_attr,
};
/**
- * edac_sysfs_pci_setup()
+ * edac_pci_main_kobj_setup()
*
* setup the sysfs for EDAC PCI attributes
* assumes edac_class has already been initialized
*/
-int edac_pci_register_main_kobj(void)
+int edac_pci_main_kobj_setup(void)
{
int err;
struct sysdev_class *edac_class;
- debugf1("%s()\n", __func__);
+ debugf0("%s()\n", __func__);
+
+ /* check and count if we have already created the main kobject */
+ if (atomic_inc_return(&edac_pci_sysfs_refcount) != 1)
+ return 0;
+ /* First time, so create the main kobject and its
+ * controls and atributes
+ */
edac_class = edac_get_edac_class();
if (edac_class == NULL) {
debugf1("%s() no edac_class\n", __func__);
- return -ENODEV;
+ err = -ENODEV;
+ goto decrement_count_fail;
}
- edac_pci_kobj.ktype = &ktype_edac_pci;
+ /* Need the kobject hook ups, and name setting */
+ edac_pci_top_main_kobj.ktype = &ktype_edac_pci_main_kobj;
+ edac_pci_top_main_kobj.parent = &edac_class->kset.kobj;
- edac_pci_kobj.parent = &edac_class->kset.kobj;
-
- err = kobject_set_name(&edac_pci_kobj, "pci");
+ err = kobject_set_name(&edac_pci_top_main_kobj, "pci");
if (err)
- return err;
+ goto decrement_count_fail;
+
+ /* Bump the reference count on this module to ensure the
+ * modules isn't unloaded until we deconstruct the top
+ * level main kobj for EDAC PCI
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ debugf1("%s() try_module_get() failed\n", __func__);
+ err = -ENODEV;
+ goto decrement_count_fail;
+ }
/* Instanstiate the pci object */
/* FIXME: maybe new sysdev_create_subdir() */
- err = kobject_register(&edac_pci_kobj);
-
+ err = kobject_register(&edac_pci_top_main_kobj);
if (err) {
debugf1("Failed to register '.../edac/pci'\n");
- return err;
+ goto kobject_register_fail;
}
+ /* At this point, to 'release' the top level kobject
+ * for EDAC PCI, then edac_pci_main_kobj_teardown()
+ * must be used, for resources to be cleaned up properly
+ */
debugf1("Registered '.../edac/pci' kobject\n");
return 0;
+
+ /* Error unwind statck */
+kobject_register_fail:
+ module_put(THIS_MODULE);
+
+decrement_count_fail:
+ /* if are on this error exit, nothing to tear down */
+ atomic_dec(&edac_pci_sysfs_refcount);
+
+ return err;
}
/*
- * edac_pci_unregister_main_kobj()
+ * edac_pci_main_kobj_teardown()
*
- * perform the sysfs teardown for the PCI attributes
+ * if no longer linked (needed) remove the top level EDAC PCI
+ * kobject with its controls and attributes
*/
-void edac_pci_unregister_main_kobj(void)
+static void edac_pci_main_kobj_teardown(void)
{
debugf0("%s()\n", __func__);
- init_completion(&edac_pci_kobj_complete);
- kobject_unregister(&edac_pci_kobj);
- wait_for_completion(&edac_pci_kobj_complete);
+
+ /* Decrement the count and only if no more controller instances
+ * are connected perform the unregisteration of the top level
+ * main kobj
+ */
+ if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
+ debugf0("%s() called kobject_unregister on main kobj\n",
+ __func__);
+ kobject_unregister(&edac_pci_top_main_kobj);
+ }
}
+/*
+ *
+ * edac_pci_create_sysfs
+ *
+ * Create the controls/attributes for the specified EDAC PCI device
+ */
int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
{
int err;
struct kobject *edac_kobj = &pci->kobj;
- if (atomic_inc_return(&edac_pci_sysfs_refcount) == 1) {
- err = edac_pci_register_main_kobj();
- if (err) {
- atomic_dec(&edac_pci_sysfs_refcount);
- return err;
- }
- }
+ debugf0("%s() idx=%d\n", __func__, pci->pci_idx);
- err = edac_pci_create_instance_kobj(pci, pci->pci_idx);
- if (err) {
- if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0)
- edac_pci_unregister_main_kobj();
- }
+ /* create the top main EDAC PCI kobject, IF needed */
+ err = edac_pci_main_kobj_setup();
+ if (err)
+ return err;
- debugf0("%s() idx=%d\n", __func__, pci->pci_idx);
+ /* Create this instance's kobject under the MAIN kobject */
+ err = edac_pci_create_instance_kobj(pci, pci->pci_idx);
+ if (err)
+ goto unregister_cleanup;
err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK);
if (err) {
debugf0("%s() sysfs_create_link() returned err= %d\n",
__func__, err);
- return err;
+ goto symlink_fail;
}
return 0;
+
+ /* Error unwind stack */
+symlink_fail:
+ edac_pci_unregister_sysfs_instance_kobj(pci);
+
+unregister_cleanup:
+ edac_pci_main_kobj_teardown();
+
+ return err;
}
+/*
+ * edac_pci_remove_sysfs
+ *
+ * remove the controls and attributes for this EDAC PCI device
+ */
void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
{
- debugf0("%s()\n", __func__);
-
- edac_pci_delete_instance_kobj(pci, pci->pci_idx);
+ debugf0("%s() index=%d\n", __func__, pci->pci_idx);
+ /* Remove the symlink */
sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK);
- if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0)
- edac_pci_unregister_main_kobj();
+ /* remove this PCI instance's sysfs entries */
+ edac_pci_unregister_sysfs_instance_kobj(pci);
+
+ /* Call the main unregister function, which will determine
+ * if this 'pci' is the last instance.
+ * If it is, the main kobject will be unregistered as a result
+ */
+ debugf0("%s() calling edac_pci_main_kobj_teardown()\n", __func__);
+ edac_pci_main_kobj_teardown();
}
/************************ PCI error handling *************************/
@@ -414,13 +528,14 @@ static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
return status;
}
-typedef void (*pci_parity_check_fn_t) (struct pci_dev * dev);
/* Clear any PCI parity errors logged by this device. */
static void edac_pci_dev_parity_clear(struct pci_dev *dev)
{
u8 header_type;
+ debugf0("%s()\n", __func__);
+
get_pci_parity_status(dev, 0);
/* read the device TYPE, looking for bridges */
@@ -433,17 +548,28 @@ static void edac_pci_dev_parity_clear(struct pci_dev *dev)
/*
* PCI Parity polling
*
+ * Fucntion to retrieve the current parity status
+ * and decode it
+ *
*/
static void edac_pci_dev_parity_test(struct pci_dev *dev)
{
+ unsigned long flags;
u16 status;
u8 header_type;
- /* read the STATUS register on this device
- */
+ /* stop any interrupts until we can acquire the status */
+ local_irq_save(flags);
+
+ /* read the STATUS register on this device */
status = get_pci_parity_status(dev, 0);
- debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id);
+ /* read the device TYPE, looking for bridges */
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+ local_irq_restore(flags);
+
+ debugf4("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id);
/* check the status reg for errors */
if (status) {
@@ -471,16 +597,14 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
}
}
- /* read the device TYPE, looking for bridges */
- pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
- debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id);
+ debugf4("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id);
if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
/* On bridges, need to examine secondary status register */
status = get_pci_parity_status(dev, 1);
- debugf2("PCI SEC_STATUS= 0x%04x %s\n", status, dev->dev.bus_id);
+ debugf4("PCI SEC_STATUS= 0x%04x %s\n", status, dev->dev.bus_id);
/* check the secondary status reg for errors */
if (status) {
@@ -510,9 +634,12 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
}
}
+/* reduce some complexity in definition of the iterator */
+typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
+
/*
* pci_dev parity list iterator
- * Scan the PCI device list for one iteration, looking for SERRORs
+ * Scan the PCI device list for one pass, looking for SERRORs
* Master Parity ERRORS or Parity ERRORs on primary or secondary devices
*/
static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
@@ -535,22 +662,22 @@ static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
*/
void edac_pci_do_parity_check(void)
{
- unsigned long flags;
int before_count;
debugf3("%s()\n", __func__);
+ /* if policy has PCI check off, leave now */
if (!check_pci_errors)
return;
before_count = atomic_read(&pci_parity_count);
/* scan all PCI devices looking for a Parity Error on devices and
- * bridges
+ * bridges.
+ * The iterator calls pci_get_device() which might sleep, thus
+ * we cannot disable interrupts in this scan.
*/
- local_irq_save(flags);
edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
- local_irq_restore(flags);
/* Only if operator has selected panic on PCI Error */
if (edac_pci_get_panic_on_pe()) {
@@ -560,6 +687,12 @@ void edac_pci_do_parity_check(void)
}
}
+/*
+ * edac_pci_clear_parity_errors
+ *
+ * function to perform an iteration over the PCI devices
+ * and clearn their current status
+ */
void edac_pci_clear_parity_errors(void)
{
/* Clear any PCI bus parity errors that devices initially have logged
@@ -567,6 +700,12 @@ void edac_pci_clear_parity_errors(void)
*/
edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
}
+
+/*
+ * edac_pci_handle_pe
+ *
+ * Called to handle a PARITY ERROR event
+ */
void edac_pci_handle_pe(struct edac_pci_ctl_info *pci, const char *msg)
{
@@ -584,9 +723,14 @@ void edac_pci_handle_pe(struct edac_pci_ctl_info *pci, const char *msg)
*/
edac_pci_do_parity_check();
}
-
EXPORT_SYMBOL_GPL(edac_pci_handle_pe);
+
+/*
+ * edac_pci_handle_npe
+ *
+ * Called to handle a NON-PARITY ERROR event
+ */
void edac_pci_handle_npe(struct edac_pci_ctl_info *pci, const char *msg)
{
@@ -604,7 +748,6 @@ void edac_pci_handle_npe(struct edac_pci_ctl_info *pci, const char *msg)
*/
edac_pci_do_parity_check();
}
-
EXPORT_SYMBOL_GPL(edac_pci_handle_npe);
/*
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 0ecfdc432f8..e895f9f887a 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -275,7 +275,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
unsigned char *c0drb = drb, *c1drb = &drb[I3000_RANKS_PER_CHANNEL];
unsigned long mchbar;
- void *window;
+ void __iomem *window;
debugf0("MC: %s()\n", __func__);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index da1647869f9..1842f523c23 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -92,9 +92,9 @@ config I2C_AU1550
config I2C_BLACKFIN_TWI
tristate "Blackfin TWI I2C support"
- depends on BF534 || BF536 || BF537
+ depends on BF534 || BF536 || BF537 || BF54x
help
- This is the TWI I2C device driver for Blackfin 534/536/537.
+ This is the TWI I2C device driver for Blackfin 534/536/537/54x.
This driver can also be built as a module. If so, the module
will be called i2c-bfin-twi.
diff --git a/drivers/i2c/chips/ds1682.c b/drivers/i2c/chips/ds1682.c
index 5879f0f2549..9e94542c18a 100644
--- a/drivers/i2c/chips/ds1682.c
+++ b/drivers/i2c/chips/ds1682.c
@@ -75,7 +75,8 @@ static ssize_t ds1682_show(struct device *dev, struct device_attribute *attr,
/* Special case: the 32 bit regs are time values with 1/4s
* resolution, scale them up to milliseconds */
if (sattr->nr == 4)
- return sprintf(buf, "%llu\n", ((u64) le32_to_cpu(val)) * 250);
+ return sprintf(buf, "%llu\n",
+ ((unsigned long long)le32_to_cpu(val)) * 250);
/* Format the output string and return # of bytes */
return sprintf(buf, "%li\n", (long)le32_to_cpu(val));
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
index 3c3f2ebf3fc..503ffec2ce0 100644
--- a/drivers/i2c/chips/tps65010.c
+++ b/drivers/i2c/chips/tps65010.c
@@ -352,7 +352,7 @@ static void tps65010_interrupt(struct tps65010 *tps)
/* REVISIT: this might need its own workqueue
* plus tweaks including deadlock avoidance ...
* also needs to get error handling and probably
- * an #ifdef CONFIG_SOFTWARE_SUSPEND
+ * an #ifdef CONFIG_HIBERNATION
*/
hibernate();
#endif
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 1486eb212cc..ca843522f91 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -3071,7 +3071,7 @@ static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
/*
* standard prep_rq_fn that builds 10 byte cmds
*/
-static int ide_cdrom_prep_fs(request_queue_t *q, struct request *rq)
+static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
{
int hard_sect = queue_hardsect_size(q);
long block = (long)rq->hard_sector / (hard_sect >> 9);
@@ -3137,7 +3137,7 @@ static int ide_cdrom_prep_pc(struct request *rq)
return BLKPREP_OK;
}
-static int ide_cdrom_prep_fn(request_queue_t *q, struct request *rq)
+static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq)
{
if (blk_fs_request(rq))
return ide_cdrom_prep_fs(q, rq);
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index b1304a7f3e0..5ce4216f72a 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -679,7 +679,7 @@ static ide_proc_entry_t idedisk_proc[] = {
};
#endif /* CONFIG_IDE_PROC_FS */
-static void idedisk_prepare_flush(request_queue_t *q, struct request *rq)
+static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
{
ide_drive_t *drive = q->queuedata;
@@ -697,7 +697,7 @@ static void idedisk_prepare_flush(request_queue_t *q, struct request *rq)
rq->buffer = rq->cmd;
}
-static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int idedisk_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
ide_drive_t *drive = q->queuedata;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 484c50e7144..aa9f5f0b1e6 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -1327,7 +1327,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
/*
* Passes the stuff to ide_do_request
*/
-void do_ide_request(request_queue_t *q)
+void do_ide_request(struct request_queue *q)
{
ide_drive_t *drive = q->queuedata;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 5a4c5ea12f8..3a2a9a338fd 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -945,7 +945,7 @@ static void save_match(ide_hwif_t *hwif, ide_hwif_t *new, ide_hwif_t **match)
*/
static int ide_init_queue(ide_drive_t *drive)
{
- request_queue_t *q;
+ struct request_queue *q;
ide_hwif_t *hwif = HWIF(drive);
int max_sectors = 256;
int max_sg_entries = PRD_ENTRIES;
diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c
index 8f2db8dd35f..8e05d88e81b 100644
--- a/drivers/ide/legacy/hd.c
+++ b/drivers/ide/legacy/hd.c
@@ -652,7 +652,7 @@ repeat:
}
}
-static void do_hd_request (request_queue_t * q)
+static void do_hd_request (struct request_queue * q)
{
disable_irq(HD_IRQ);
hd_request();
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index f668d235e6b..bf19ddfa6cd 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -551,8 +551,8 @@ static int setup_mmio_scc (struct pci_dev *dev, const char *name)
unsigned long dma_base = pci_resource_start(dev, 1);
unsigned long ctl_size = pci_resource_len(dev, 0);
unsigned long dma_size = pci_resource_len(dev, 1);
- void *ctl_addr;
- void *dma_addr;
+ void __iomem *ctl_addr;
+ void __iomem *dma_addr;
int i;
for (i = 0; i < MAX_HWIFS; i++) {
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 336e5ff4cfc..cadf0479cce 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2677,7 +2677,7 @@ static long raw1394_iso_xmit_recv_packets32(struct file *file, unsigned int cmd,
struct raw1394_iso_packets32 __user *arg)
{
compat_uptr_t infos32;
- void *infos;
+ void __user *infos;
long err = -EFAULT;
struct raw1394_iso_packets __user *dst = compat_alloc_user_space(sizeof(struct raw1394_iso_packets));
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index a698f1949d1..cf25cdab02f 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -44,6 +44,7 @@
#include <linux/io.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
+#include <linux/fs.h>
#include <asm/uaccess.h>
#include "ipath_kernel.h"
diff --git a/drivers/input/misc/atlas_btns.c b/drivers/input/misc/atlas_btns.c
index 0acc3a12360..e43e92fd9e2 100644
--- a/drivers/input/misc/atlas_btns.c
+++ b/drivers/input/misc/atlas_btns.c
@@ -31,7 +31,6 @@
#define ACPI_ATLAS_NAME "Atlas ACPI"
#define ACPI_ATLAS_CLASS "Atlas"
-#define ACPI_ATLAS_BUTTON_HID "ASIM0000"
static struct input_dev *input_dev;
@@ -130,10 +129,16 @@ static int atlas_acpi_button_remove(struct acpi_device *device, int type)
return status;
}
+static const struct acpi_device_id atlas_device_ids[] = {
+ {"ASIM0000", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, atlas_device_ids);
+
static struct acpi_driver atlas_acpi_driver = {
.name = ACPI_ATLAS_NAME,
.class = ACPI_ATLAS_CLASS,
- .ids = ACPI_ATLAS_BUTTON_HID,
+ .ids = atlas_device_ids,
.ops = {
.add = atlas_acpi_button_add,
.remove = atlas_acpi_button_remove,
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index adef447f23e..5ce632ca681 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -21,7 +21,7 @@ if SERIO
config SERIO_I8042
tristate "i8042 PC Keyboard controller" if EMBEDDED || !X86
default y
- depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && !M68K
+ depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && !M68K && !BFIN
---help---
i8042 is the chip over which the standard AT keyboard and PS/2
mouse are connected to the computer. If you use these devices,
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index bcbe6835beb..96856097d15 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -297,9 +297,6 @@ static struct kvm *kvm_create_vm(void)
kvm_io_bus_init(&kvm->pio_bus);
spin_lock_init(&kvm->lock);
INIT_LIST_HEAD(&kvm->active_mmu_pages);
- spin_lock(&kvm_lock);
- list_add(&kvm->vm_list, &vm_list);
- spin_unlock(&kvm_lock);
kvm_io_bus_init(&kvm->mmio_bus);
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
struct kvm_vcpu *vcpu = &kvm->vcpus[i];
@@ -309,6 +306,9 @@ static struct kvm *kvm_create_vm(void)
vcpu->kvm = kvm;
vcpu->mmu.root_hpa = INVALID_PAGE;
}
+ spin_lock(&kvm_lock);
+ list_add(&kvm->vm_list, &vm_list);
+ spin_unlock(&kvm_lock);
return kvm;
}
@@ -1070,18 +1070,16 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
return 0;
mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
virt = kmap_atomic(page, KM_USER0);
- if (memcmp(virt + offset_in_page(gpa), val, bytes)) {
- kvm_mmu_pte_write(vcpu, gpa, virt + offset, val, bytes);
- memcpy(virt + offset_in_page(gpa), val, bytes);
- }
+ kvm_mmu_pte_write(vcpu, gpa, virt + offset, val, bytes);
+ memcpy(virt + offset_in_page(gpa), val, bytes);
kunmap_atomic(virt, KM_USER0);
return 1;
}
-static int emulator_write_emulated(unsigned long addr,
- const void *val,
- unsigned int bytes,
- struct x86_emulate_ctxt *ctxt)
+static int emulator_write_emulated_onepage(unsigned long addr,
+ const void *val,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
struct kvm_io_device *mmio_dev;
@@ -1113,6 +1111,26 @@ static int emulator_write_emulated(unsigned long addr,
return X86EMUL_CONTINUE;
}
+static int emulator_write_emulated(unsigned long addr,
+ const void *val,
+ unsigned int bytes,
+ struct x86_emulate_ctxt *ctxt)
+{
+ /* Crossing a page boundary? */
+ if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
+ int rc, now;
+
+ now = -addr & ~PAGE_MASK;
+ rc = emulator_write_emulated_onepage(addr, val, now, ctxt);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ addr += now;
+ val += now;
+ bytes -= now;
+ }
+ return emulator_write_emulated_onepage(addr, val, bytes, ctxt);
+}
+
static int emulator_cmpxchg_emulated(unsigned long addr,
const void *old,
const void *new,
@@ -2414,9 +2432,9 @@ static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
break;
}
}
- if (entry && (entry->edx & EFER_NX) && !(efer & EFER_NX)) {
+ if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) {
entry->edx &= ~(1 << 20);
- printk(KERN_INFO ": guest NX capability removed\n");
+ printk(KERN_INFO "kvm: guest NX capability removed\n");
}
}
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index 1b800fc0034..1f979cb0df3 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -1178,6 +1178,8 @@ pop_instruction:
twobyte_insn:
switch (b) {
case 0x01: /* lgdt, lidt, lmsw */
+ /* Disable writeback. */
+ no_wb = 1;
switch (modrm_reg) {
u16 size;
unsigned long address;
diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile
index 55382c7d799..e5047471c33 100644
--- a/drivers/lguest/Makefile
+++ b/drivers/lguest/Makefile
@@ -5,3 +5,15 @@ obj-$(CONFIG_LGUEST_GUEST) += lguest.o lguest_asm.o lguest_bus.o
obj-$(CONFIG_LGUEST) += lg.o
lg-y := core.o hypercalls.o page_tables.o interrupts_and_traps.o \
segments.o io.o lguest_user.o switcher.o
+
+Preparation Preparation!: PREFIX=P
+Guest: PREFIX=G
+Drivers: PREFIX=D
+Launcher: PREFIX=L
+Host: PREFIX=H
+Switcher: PREFIX=S
+Mastery: PREFIX=M
+Beer:
+ @for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}"
+Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery:
+ @sh ../../Documentation/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'`
diff --git a/drivers/lguest/README b/drivers/lguest/README
new file mode 100644
index 00000000000..b7db39a64c6
--- /dev/null
+++ b/drivers/lguest/README
@@ -0,0 +1,47 @@
+Welcome, friend reader, to lguest.
+
+Lguest is an adventure, with you, the reader, as Hero. I can't think of many
+5000-line projects which offer both such capability and glimpses of future
+potential; it is an exciting time to be delving into the source!
+
+But be warned; this is an arduous journey of several hours or more! And as we
+know, all true Heroes are driven by a Noble Goal. Thus I offer a Beer (or
+equivalent) to anyone I meet who has completed this documentation.
+
+So get comfortable and keep your wits about you (both quick and humorous).
+Along your way to the Noble Goal, you will also gain masterly insight into
+lguest, and hypervisors and x86 virtualization in general.
+
+Our Quest is in seven parts: (best read with C highlighting turned on)
+
+I) Preparation
+ - In which our potential hero is flown quickly over the landscape for a
+ taste of its scope. Suitable for the armchair coders and other such
+ persons of faint constitution.
+
+II) Guest
+ - Where we encounter the first tantalising wisps of code, and come to
+ understand the details of the life of a Guest kernel.
+
+III) Drivers
+ - Whereby the Guest finds its voice and become useful, and our
+ understanding of the Guest is completed.
+
+IV) Launcher
+ - Where we trace back to the creation of the Guest, and thus begin our
+ understanding of the Host.
+
+V) Host
+ - Where we master the Host code, through a long and tortuous journey.
+ Indeed, it is here that our hero is tested in the Bit of Despair.
+
+VI) Switcher
+ - Where our understanding of the intertwined nature of Guests and Hosts
+ is completed.
+
+VII) Mastery
+ - Where our fully fledged hero grapples with the Great Question:
+ "What next?"
+
+make Preparation!
+Rusty Russell.
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index ce909ec5749..0a46e8837d9 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -1,5 +1,8 @@
-/* World's simplest hypervisor, to test paravirt_ops and show
- * unbelievers that virtualization is the future. Plus, it's fun! */
+/*P:400 This contains run_guest() which actually calls into the Host<->Guest
+ * Switcher and analyzes the return, such as determining if the Guest wants the
+ * Host to do something. This file also contains useful helper routines, and a
+ * couple of non-obvious setup and teardown pieces which were implemented after
+ * days of debugging pain. :*/
#include <linux/module.h>
#include <linux/stringify.h>
#include <linux/stddef.h>
@@ -61,11 +64,33 @@ static struct lguest_pages *lguest_pages(unsigned int cpu)
(SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
}
+/*H:010 We need to set up the Switcher at a high virtual address. Remember the
+ * Switcher is a few hundred bytes of assembler code which actually changes the
+ * CPU to run the Guest, and then changes back to the Host when a trap or
+ * interrupt happens.
+ *
+ * The Switcher code must be at the same virtual address in the Guest as the
+ * Host since it will be running as the switchover occurs.
+ *
+ * Trying to map memory at a particular address is an unusual thing to do, so
+ * it's not a simple one-liner. We also set up the per-cpu parts of the
+ * Switcher here.
+ */
static __init int map_switcher(void)
{
int i, err;
struct page **pagep;
+ /*
+ * Map the Switcher in to high memory.
+ *
+ * It turns out that if we choose the address 0xFFC00000 (4MB under the
+ * top virtual address), it makes setting up the page tables really
+ * easy.
+ */
+
+ /* We allocate an array of "struct page"s. map_vm_area() wants the
+ * pages in this form, rather than just an array of pointers. */
switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES,
GFP_KERNEL);
if (!switcher_page) {
@@ -73,6 +98,8 @@ static __init int map_switcher(void)
goto out;
}
+ /* Now we actually allocate the pages. The Guest will see these pages,
+ * so we make sure they're zeroed. */
for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
unsigned long addr = get_zeroed_page(GFP_KERNEL);
if (!addr) {
@@ -82,6 +109,9 @@ static __init int map_switcher(void)
switcher_page[i] = virt_to_page(addr);
}
+ /* Now we reserve the "virtual memory area" we want: 0xFFC00000
+ * (SWITCHER_ADDR). We might not get it in theory, but in practice
+ * it's worked so far. */
switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
VM_ALLOC, SWITCHER_ADDR, VMALLOC_END);
if (!switcher_vma) {
@@ -90,49 +120,105 @@ static __init int map_switcher(void)
goto free_pages;
}
+ /* This code actually sets up the pages we've allocated to appear at
+ * SWITCHER_ADDR. map_vm_area() takes the vma we allocated above, the
+ * kind of pages we're mapping (kernel pages), and a pointer to our
+ * array of struct pages. It increments that pointer, but we don't
+ * care. */
pagep = switcher_page;
err = map_vm_area(switcher_vma, PAGE_KERNEL, &pagep);
if (err) {
printk("lguest: map_vm_area failed: %i\n", err);
goto free_vma;
}
+
+ /* Now the switcher is mapped at the right address, we can't fail!
+ * Copy in the compiled-in Switcher code (from switcher.S). */
memcpy(switcher_vma->addr, start_switcher_text,
end_switcher_text - start_switcher_text);
- /* Fix up IDT entries to point into copied text. */
+ /* Most of the switcher.S doesn't care that it's been moved; on Intel,
+ * jumps are relative, and it doesn't access any references to external
+ * code or data.
+ *
+ * The only exception is the interrupt handlers in switcher.S: their
+ * addresses are placed in a table (default_idt_entries), so we need to
+ * update the table with the new addresses. switcher_offset() is a
+ * convenience function which returns the distance between the builtin
+ * switcher code and the high-mapped copy we just made. */
for (i = 0; i < IDT_ENTRIES; i++)
default_idt_entries[i] += switcher_offset();
+ /*
+ * Set up the Switcher's per-cpu areas.
+ *
+ * Each CPU gets two pages of its own within the high-mapped region
+ * (aka. "struct lguest_pages"). Much of this can be initialized now,
+ * but some depends on what Guest we are running (which is set up in
+ * copy_in_guest_info()).
+ */
for_each_possible_cpu(i) {
+ /* lguest_pages() returns this CPU's two pages. */
struct lguest_pages *pages = lguest_pages(i);
+ /* This is a convenience pointer to make the code fit one
+ * statement to a line. */
struct lguest_ro_state *state = &pages->state;
- /* These fields are static: rest done in copy_in_guest_info */
+ /* The Global Descriptor Table: the Host has a different one
+ * for each CPU. We keep a descriptor for the GDT which says
+ * where it is and how big it is (the size is actually the last
+ * byte, not the size, hence the "-1"). */
state->host_gdt_desc.size = GDT_SIZE-1;
state->host_gdt_desc.address = (long)get_cpu_gdt_table(i);
+
+ /* All CPUs on the Host use the same Interrupt Descriptor
+ * Table, so we just use store_idt(), which gets this CPU's IDT
+ * descriptor. */
store_idt(&state->host_idt_desc);
+
+ /* The descriptors for the Guest's GDT and IDT can be filled
+ * out now, too. We copy the GDT & IDT into ->guest_gdt and
+ * ->guest_idt before actually running the Guest. */
state->guest_idt_desc.size = sizeof(state->guest_idt)-1;
state->guest_idt_desc.address = (long)&state->guest_idt;
state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1;
state->guest_gdt_desc.address = (long)&state->guest_gdt;
+
+ /* We know where we want the stack to be when the Guest enters
+ * the switcher: in pages->regs. The stack grows upwards, so
+ * we start it at the end of that structure. */
state->guest_tss.esp0 = (long)(&pages->regs + 1);
+ /* And this is the GDT entry to use for the stack: we keep a
+ * couple of special LGUEST entries. */
state->guest_tss.ss0 = LGUEST_DS;
- /* No I/O for you! */
+
+ /* x86 can have a finegrained bitmap which indicates what I/O
+ * ports the process can use. We set it to the end of our
+ * structure, meaning "none". */
state->guest_tss.io_bitmap_base = sizeof(state->guest_tss);
+
+ /* Some GDT entries are the same across all Guests, so we can
+ * set them up now. */
setup_default_gdt_entries(state);
+ /* Most IDT entries are the same for all Guests, too.*/
setup_default_idt_entries(state, default_idt_entries);
- /* Setup LGUEST segments on all cpus */
+ /* The Host needs to be able to use the LGUEST segments on this
+ * CPU, too, so put them in the Host GDT. */
get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
}
- /* Initialize entry point into switcher. */
+ /* In the Switcher, we want the %cs segment register to use the
+ * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so
+ * it will be undisturbed when we switch. To change %cs and jump we
+ * need this structure to feed to Intel's "lcall" instruction. */
lguest_entry.offset = (long)switch_to_guest + switcher_offset();
lguest_entry.segment = LGUEST_CS;
printk(KERN_INFO "lguest: mapped switcher at %p\n",
switcher_vma->addr);
+ /* And we succeeded... */
return 0;
free_vma:
@@ -146,35 +232,58 @@ free_some_pages:
out:
return err;
}
+/*:*/
+/* Cleaning up the mapping when the module is unloaded is almost...
+ * too easy. */
static void unmap_switcher(void)
{
unsigned int i;
+ /* vunmap() undoes *both* map_vm_area() and __get_vm_area(). */
vunmap(switcher_vma->addr);
+ /* Now we just need to free the pages we copied the switcher into */
for (i = 0; i < TOTAL_SWITCHER_PAGES; i++)
__free_pages(switcher_page[i], 0);
}
-/* IN/OUT insns: enough to get us past boot-time probing. */
+/*H:130 Our Guest is usually so well behaved; it never tries to do things it
+ * isn't allowed to. Unfortunately, "struct paravirt_ops" isn't quite
+ * complete, because it doesn't contain replacements for the Intel I/O
+ * instructions. As a result, the Guest sometimes fumbles across one during
+ * the boot process as it probes for various things which are usually attached
+ * to a PC.
+ *
+ * When the Guest uses one of these instructions, we get trap #13 (General
+ * Protection Fault) and come here. We see if it's one of those troublesome
+ * instructions and skip over it. We return true if we did. */
static int emulate_insn(struct lguest *lg)
{
u8 insn;
unsigned int insnlen = 0, in = 0, shift = 0;
+ /* The eip contains the *virtual* address of the Guest's instruction:
+ * guest_pa just subtracts the Guest's page_offset. */
unsigned long physaddr = guest_pa(lg, lg->regs->eip);
- /* This only works for addresses in linear mapping... */
+ /* The guest_pa() function only works for Guest kernel addresses, but
+ * that's all we're trying to do anyway. */
if (lg->regs->eip < lg->page_offset)
return 0;
+
+ /* Decoding x86 instructions is icky. */
lgread(lg, &insn, physaddr, 1);
- /* Operand size prefix means it's actually for ax. */
+ /* 0x66 is an "operand prefix". It means it's using the upper 16 bits
+ of the eax register. */
if (insn == 0x66) {
shift = 16;
+ /* The instruction is 1 byte so far, read the next byte. */
insnlen = 1;
lgread(lg, &insn, physaddr + insnlen, 1);
}
+ /* We can ignore the lower bit for the moment and decode the 4 opcodes
+ * we need to emulate. */
switch (insn & 0xFE) {
case 0xE4: /* in <next byte>,%al */
insnlen += 2;
@@ -191,9 +300,13 @@ static int emulate_insn(struct lguest *lg)
insnlen += 1;
break;
default:
+ /* OK, we don't know what this is, can't emulate. */
return 0;
}
+ /* If it was an "IN" instruction, they expect the result to be read
+ * into %eax, so we change %eax. We always return all-ones, which
+ * traditionally means "there's nothing there". */
if (in) {
/* Lower bit tells is whether it's a 16 or 32 bit access */
if (insn & 0x1)
@@ -201,28 +314,46 @@ static int emulate_insn(struct lguest *lg)
else
lg->regs->eax |= (0xFFFF << shift);
}
+ /* Finally, we've "done" the instruction, so move past it. */
lg->regs->eip += insnlen;
+ /* Success! */
return 1;
}
-
+/*:*/
+
+/*L:305
+ * Dealing With Guest Memory.
+ *
+ * When the Guest gives us (what it thinks is) a physical address, we can use
+ * the normal copy_from_user() & copy_to_user() on that address: remember,
+ * Guest physical == Launcher virtual.
+ *
+ * But we can't trust the Guest: it might be trying to access the Launcher
+ * code. We have to check that the range is below the pfn_limit the Launcher
+ * gave us. We have to make sure that addr + len doesn't give us a false
+ * positive by overflowing, too. */
int lguest_address_ok(const struct lguest *lg,
unsigned long addr, unsigned long len)
{
return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
}
-/* Just like get_user, but don't let guest access lguest binary. */
+/* This is a convenient routine to get a 32-bit value from the Guest (a very
+ * common operation). Here we can see how useful the kill_lguest() routine we
+ * met in the Launcher can be: we return a random value (0) instead of needing
+ * to return an error. */
u32 lgread_u32(struct lguest *lg, unsigned long addr)
{
u32 val = 0;
- /* Don't let them access lguest binary */
+ /* Don't let them access lguest binary. */
if (!lguest_address_ok(lg, addr, sizeof(val))
|| get_user(val, (u32 __user *)addr) != 0)
kill_guest(lg, "bad read address %#lx", addr);
return val;
}
+/* Same thing for writing a value. */
void lgwrite_u32(struct lguest *lg, unsigned long addr, u32 val)
{
if (!lguest_address_ok(lg, addr, sizeof(val))
@@ -230,6 +361,9 @@ void lgwrite_u32(struct lguest *lg, unsigned long addr, u32 val)
kill_guest(lg, "bad write address %#lx", addr);
}
+/* This routine is more generic, and copies a range of Guest bytes into a
+ * buffer. If the copy_from_user() fails, we fill the buffer with zeroes, so
+ * the caller doesn't end up using uninitialized kernel memory. */
void lgread(struct lguest *lg, void *b, unsigned long addr, unsigned bytes)
{
if (!lguest_address_ok(lg, addr, bytes)
@@ -240,6 +374,7 @@ void lgread(struct lguest *lg, void *b, unsigned long addr, unsigned bytes)
}
}
+/* Similarly, our generic routine to copy into a range of Guest bytes. */
void lgwrite(struct lguest *lg, unsigned long addr, const void *b,
unsigned bytes)
{
@@ -247,6 +382,7 @@ void lgwrite(struct lguest *lg, unsigned long addr, const void *b,
|| copy_to_user((void __user *)addr, b, bytes) != 0)
kill_guest(lg, "bad write address %#lx len %u", addr, bytes);
}
+/* (end of memory access helper routines) :*/
static void set_ts(void)
{
@@ -257,54 +393,108 @@ static void set_ts(void)
write_cr0(cr0|8);
}
+/*S:010
+ * We are getting close to the Switcher.
+ *
+ * Remember that each CPU has two pages which are visible to the Guest when it
+ * runs on that CPU. This has to contain the state for that Guest: we copy the
+ * state in just before we run the Guest.
+ *
+ * Each Guest has "changed" flags which indicate what has changed in the Guest
+ * since it last ran. We saw this set in interrupts_and_traps.c and
+ * segments.c.
+ */
static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages)
{
+ /* Copying all this data can be quite expensive. We usually run the
+ * same Guest we ran last time (and that Guest hasn't run anywhere else
+ * meanwhile). If that's not the case, we pretend everything in the
+ * Guest has changed. */
if (__get_cpu_var(last_guest) != lg || lg->last_pages != pages) {
__get_cpu_var(last_guest) = lg;
lg->last_pages = pages;
lg->changed = CHANGED_ALL;
}
- /* These are pretty cheap, so we do them unconditionally. */
+ /* These copies are pretty cheap, so we do them unconditionally: */
+ /* Save the current Host top-level page directory. */
pages->state.host_cr3 = __pa(current->mm->pgd);
+ /* Set up the Guest's page tables to see this CPU's pages (and no
+ * other CPU's pages). */
map_switcher_in_guest(lg, pages);
+ /* Set up the two "TSS" members which tell the CPU what stack to use
+ * for traps which do directly into the Guest (ie. traps at privilege
+ * level 1). */
pages->state.guest_tss.esp1 = lg->esp1;
pages->state.guest_tss.ss1 = lg->ss1;
- /* Copy direct trap entries. */
+ /* Copy direct-to-Guest trap entries. */
if (lg->changed & CHANGED_IDT)
copy_traps(lg, pages->state.guest_idt, default_idt_entries);
- /* Copy all GDT entries but the TSS. */
+ /* Copy all GDT entries which the Guest can change. */
if (lg->changed & CHANGED_GDT)
copy_gdt(lg, pages->state.guest_gdt);
/* If only the TLS entries have changed, copy them. */
else if (lg->changed & CHANGED_GDT_TLS)
copy_gdt_tls(lg, pages->state.guest_gdt);
+ /* Mark the Guest as unchanged for next time. */
lg->changed = 0;
}
+/* Finally: the code to actually call into the Switcher to run the Guest. */
static void run_guest_once(struct lguest *lg, struct lguest_pages *pages)
{
+ /* This is a dummy value we need for GCC's sake. */
unsigned int clobber;
+ /* Copy the guest-specific information into this CPU's "struct
+ * lguest_pages". */
copy_in_guest_info(lg, pages);
- /* Put eflags on stack, lcall does rest: suitable for iret return. */
+ /* Now: we push the "eflags" register on the stack, then do an "lcall".
+ * This is how we change from using the kernel code segment to using
+ * the dedicated lguest code segment, as well as jumping into the
+ * Switcher.
+ *
+ * The lcall also pushes the old code segment (KERNEL_CS) onto the
+ * stack, then the address of this call. This stack layout happens to
+ * exactly match the stack of an interrupt... */
asm volatile("pushf; lcall *lguest_entry"
+ /* This is how we tell GCC that %eax ("a") and %ebx ("b")
+ * are changed by this routine. The "=" means output. */
: "=a"(clobber), "=b"(clobber)
+ /* %eax contains the pages pointer. ("0" refers to the
+ * 0-th argument above, ie "a"). %ebx contains the
+ * physical address of the Guest's top-level page
+ * directory. */
: "0"(pages), "1"(__pa(lg->pgdirs[lg->pgdidx].pgdir))
+ /* We tell gcc that all these registers could change,
+ * which means we don't have to save and restore them in
+ * the Switcher. */
: "memory", "%edx", "%ecx", "%edi", "%esi");
}
+/*:*/
+/*H:030 Let's jump straight to the the main loop which runs the Guest.
+ * Remember, this is called by the Launcher reading /dev/lguest, and we keep
+ * going around and around until something interesting happens. */
int run_guest(struct lguest *lg, unsigned long __user *user)
{
+ /* We stop running once the Guest is dead. */
while (!lg->dead) {
+ /* We need to initialize this, otherwise gcc complains. It's
+ * not (yet) clever enough to see that it's initialized when we
+ * need it. */
unsigned int cr2 = 0; /* Damn gcc */
- /* Hypercalls first: we might have been out to userspace */
+ /* First we run any hypercalls the Guest wants done: either in
+ * the hypercall ring in "struct lguest_data", or directly by
+ * using int 31 (LGUEST_TRAP_ENTRY). */
do_hypercalls(lg);
+ /* It's possible the Guest did a SEND_DMA hypercall to the
+ * Launcher, in which case we return from the read() now. */
if (lg->dma_is_pending) {
if (put_user(lg->pending_dma, user) ||
put_user(lg->pending_key, user+1))
@@ -312,6 +502,7 @@ int run_guest(struct lguest *lg, unsigned long __user *user)
return sizeof(unsigned long)*2;
}
+ /* Check for signals */
if (signal_pending(current))
return -ERESTARTSYS;
@@ -319,77 +510,154 @@ int run_guest(struct lguest *lg, unsigned long __user *user)
if (lg->break_out)
return -EAGAIN;
+ /* Check if there are any interrupts which can be delivered
+ * now: if so, this sets up the hander to be executed when we
+ * next run the Guest. */
maybe_do_interrupt(lg);
+ /* All long-lived kernel loops need to check with this horrible
+ * thing called the freezer. If the Host is trying to suspend,
+ * it stops us. */
try_to_freeze();
+ /* Just make absolutely sure the Guest is still alive. One of
+ * those hypercalls could have been fatal, for example. */
if (lg->dead)
break;
+ /* If the Guest asked to be stopped, we sleep. The Guest's
+ * clock timer or LHCALL_BREAK from the Waker will wake us. */
if (lg->halted) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
continue;
}
+ /* OK, now we're ready to jump into the Guest. First we put up
+ * the "Do Not Disturb" sign: */
local_irq_disable();
- /* Even if *we* don't want FPU trap, guest might... */
+ /* Remember the awfully-named TS bit? If the Guest has asked
+ * to set it we set it now, so we can trap and pass that trap
+ * to the Guest if it uses the FPU. */
if (lg->ts)
set_ts();
- /* Don't let Guest do SYSENTER: we can't handle it. */
+ /* SYSENTER is an optimized way of doing system calls. We
+ * can't allow it because it always jumps to privilege level 0.
+ * A normal Guest won't try it because we don't advertise it in
+ * CPUID, but a malicious Guest (or malicious Guest userspace
+ * program) could, so we tell the CPU to disable it before
+ * running the Guest. */
if (boot_cpu_has(X86_FEATURE_SEP))
wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
+ /* Now we actually run the Guest. It will pop back out when
+ * something interesting happens, and we can examine its
+ * registers to see what it was doing. */
run_guest_once(lg, lguest_pages(raw_smp_processor_id()));
- /* Save cr2 now if we page-faulted. */
+ /* The "regs" pointer contains two extra entries which are not
+ * really registers: a trap number which says what interrupt or
+ * trap made the switcher code come back, and an error code
+ * which some traps set. */
+
+ /* If the Guest page faulted, then the cr2 register will tell
+ * us the bad virtual address. We have to grab this now,
+ * because once we re-enable interrupts an interrupt could
+ * fault and thus overwrite cr2, or we could even move off to a
+ * different CPU. */
if (lg->regs->trapnum == 14)
cr2 = read_cr2();
+ /* Similarly, if we took a trap because the Guest used the FPU,
+ * we have to restore the FPU it expects to see. */
else if (lg->regs->trapnum == 7)
math_state_restore();
+ /* Restore SYSENTER if it's supposed to be on. */
if (boot_cpu_has(X86_FEATURE_SEP))
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
+
+ /* Now we're ready to be interrupted or moved to other CPUs */
local_irq_enable();
+ /* OK, so what happened? */
switch (lg->regs->trapnum) {
case 13: /* We've intercepted a GPF. */
+ /* Check if this was one of those annoying IN or OUT
+ * instructions which we need to emulate. If so, we
+ * just go back into the Guest after we've done it. */
if (lg->regs->errcode == 0) {
if (emulate_insn(lg))
continue;
}
break;
case 14: /* We've intercepted a page fault. */
+ /* The Guest accessed a virtual address that wasn't
+ * mapped. This happens a lot: we don't actually set
+ * up most of the page tables for the Guest at all when
+ * we start: as it runs it asks for more and more, and
+ * we set them up as required. In this case, we don't
+ * even tell the Guest that the fault happened.
+ *
+ * The errcode tells whether this was a read or a
+ * write, and whether kernel or userspace code. */
if (demand_page(lg, cr2, lg->regs->errcode))
continue;
- /* If lguest_data is NULL, this won't hurt. */
+ /* OK, it's really not there (or not OK): the Guest
+ * needs to know. We write out the cr2 value so it
+ * knows where the fault occurred.
+ *
+ * Note that if the Guest were really messed up, this
+ * could happen before it's done the INITIALIZE
+ * hypercall, so lg->lguest_data will be NULL, so
+ * &lg->lguest_data->cr2 will be address 8. Writing
+ * into that address won't hurt the Host at all,
+ * though. */
if (put_user(cr2, &lg->lguest_data->cr2))
kill_guest(lg, "Writing cr2");
break;
case 7: /* We've intercepted a Device Not Available fault. */
- /* If they don't want to know, just absorb it. */
+ /* If the Guest doesn't want to know, we already
+ * restored the Floating Point Unit, so we just
+ * continue without telling it. */
if (!lg->ts)
continue;
break;
- case 32 ... 255: /* Real interrupt, fall thru */
+ case 32 ... 255:
+ /* These values mean a real interrupt occurred, in
+ * which case the Host handler has already been run.
+ * We just do a friendly check if another process
+ * should now be run, then fall through to loop
+ * around: */
cond_resched();
case LGUEST_TRAP_ENTRY: /* Handled at top of loop */
continue;
}
+ /* If we get here, it's a trap the Guest wants to know
+ * about. */
if (deliver_trap(lg, lg->regs->trapnum))
continue;
+ /* If the Guest doesn't have a handler (either it hasn't
+ * registered any yet, or it's one of the faults we don't let
+ * it handle), it dies with a cryptic error message. */
kill_guest(lg, "unhandled trap %li at %#lx (%#lx)",
lg->regs->trapnum, lg->regs->eip,
lg->regs->trapnum == 14 ? cr2 : lg->regs->errcode);
}
+ /* The Guest is dead => "No such file or directory" */
return -ENOENT;
}
+/* Now we can look at each of the routines this calls, in increasing order of
+ * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(),
+ * deliver_trap() and demand_page(). After all those, we'll be ready to
+ * examine the Switcher, and our philosophical understanding of the Host/Guest
+ * duality will be complete. :*/
+
int find_free_guest(void)
{
unsigned int i;
@@ -407,55 +675,96 @@ static void adjust_pge(void *on)
write_cr4(read_cr4() & ~X86_CR4_PGE);
}
+/*H:000
+ * Welcome to the Host!
+ *
+ * By this point your brain has been tickled by the Guest code and numbed by
+ * the Launcher code; prepare for it to be stretched by the Host code. This is
+ * the heart. Let's begin at the initialization routine for the Host's lg
+ * module.
+ */
static int __init init(void)
{
int err;
+ /* Lguest can't run under Xen, VMI or itself. It does Tricky Stuff. */
if (paravirt_enabled()) {
printk("lguest is afraid of %s\n", paravirt_ops.name);
return -EPERM;
}
+ /* First we put the Switcher up in very high virtual memory. */
err = map_switcher();
if (err)
return err;
+ /* Now we set up the pagetable implementation for the Guests. */
err = init_pagetables(switcher_page, SHARED_SWITCHER_PAGES);
if (err) {
unmap_switcher();
return err;
}
+
+ /* The I/O subsystem needs some things initialized. */
lguest_io_init();
+ /* /dev/lguest needs to be registered. */
err = lguest_device_init();
if (err) {
free_pagetables();
unmap_switcher();
return err;
}
+
+ /* Finally, we need to turn off "Page Global Enable". PGE is an
+ * optimization where page table entries are specially marked to show
+ * they never change. The Host kernel marks all the kernel pages this
+ * way because it's always present, even when userspace is running.
+ *
+ * Lguest breaks this: unbeknownst to the rest of the Host kernel, we
+ * switch to the Guest kernel. If you don't disable this on all CPUs,
+ * you'll get really weird bugs that you'll chase for two days.
+ *
+ * I used to turn PGE off every time we switched to the Guest and back
+ * on when we return, but that slowed the Switcher down noticibly. */
+
+ /* We don't need the complexity of CPUs coming and going while we're
+ * doing this. */
lock_cpu_hotplug();
if (cpu_has_pge) { /* We have a broader idea of "global". */
+ /* Remember that this was originally set (for cleanup). */
cpu_had_pge = 1;
+ /* adjust_pge is a helper function which sets or unsets the PGE
+ * bit on its CPU, depending on the argument (0 == unset). */
on_each_cpu(adjust_pge, (void *)0, 0, 1);
+ /* Turn off the feature in the global feature set. */
clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
}
unlock_cpu_hotplug();
+
+ /* All good! */
return 0;
}
+/* Cleaning up is just the same code, backwards. With a little French. */
static void __exit fini(void)
{
lguest_device_remove();
free_pagetables();
unmap_switcher();
+
+ /* If we had PGE before we started, turn it back on now. */
lock_cpu_hotplug();
if (cpu_had_pge) {
set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
+ /* adjust_pge's argument "1" means set PGE. */
on_each_cpu(adjust_pge, (void *)1, 0, 1);
}
unlock_cpu_hotplug();
}
+/* The Host side of lguest can be a module. This is a nice way for people to
+ * play with it. */
module_init(init);
module_exit(fini);
MODULE_LICENSE("GPL");
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index ea52ca451f7..db6caace3b9 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -1,5 +1,10 @@
-/* Actual hypercalls, which allow guests to actually do something.
- Copyright (C) 2006 Rusty Russell IBM Corporation
+/*P:500 Just as userspace programs request kernel operations through a system
+ * call, the Guest requests Host operations through a "hypercall". You might
+ * notice this nomenclature doesn't really follow any logic, but the name has
+ * been around for long enough that we're stuck with it. As you'd expect, this
+ * code is basically a one big switch statement. :*/
+
+/* Copyright (C) 2006 Rusty Russell IBM Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -23,37 +28,55 @@
#include <irq_vectors.h>
#include "lg.h"
+/*H:120 This is the core hypercall routine: where the Guest gets what it
+ * wants. Or gets killed. Or, in the case of LHCALL_CRASH, both.
+ *
+ * Remember from the Guest: %eax == which call to make, and the arguments are
+ * packed into %edx, %ebx and %ecx if needed. */
static void do_hcall(struct lguest *lg, struct lguest_regs *regs)
{
switch (regs->eax) {
case LHCALL_FLUSH_ASYNC:
+ /* This call does nothing, except by breaking out of the Guest
+ * it makes us process all the asynchronous hypercalls. */
break;
case LHCALL_LGUEST_INIT:
+ /* You can't get here unless you're already initialized. Don't
+ * do that. */
kill_guest(lg, "already have lguest_data");
break;
case LHCALL_CRASH: {
+ /* Crash is such a trivial hypercall that we do it in four
+ * lines right here. */
char msg[128];
+ /* If the lgread fails, it will call kill_guest() itself; the
+ * kill_guest() with the message will be ignored. */
lgread(lg, msg, regs->edx, sizeof(msg));
msg[sizeof(msg)-1] = '\0';
kill_guest(lg, "CRASH: %s", msg);
break;
}
case LHCALL_FLUSH_TLB:
+ /* FLUSH_TLB comes in two flavors, depending on the
+ * argument: */
if (regs->edx)
guest_pagetable_clear_all(lg);
else
guest_pagetable_flush_user(lg);
break;
- case LHCALL_GET_WALLCLOCK: {
- struct timespec ts;
- ktime_get_real_ts(&ts);
- regs->eax = ts.tv_sec;
- break;
- }
case LHCALL_BIND_DMA:
+ /* BIND_DMA really wants four arguments, but it's the only call
+ * which does. So the Guest packs the number of buffers and
+ * the interrupt number into the final argument, and we decode
+ * it here. This can legitimately fail, since we currently
+ * place a limit on the number of DMA pools a Guest can have.
+ * So we return true or false from this call. */
regs->eax = bind_dma(lg, regs->edx, regs->ebx,
regs->ecx >> 8, regs->ecx & 0xFF);
break;
+
+ /* All these calls simply pass the arguments through to the right
+ * routines. */
case LHCALL_SEND_DMA:
send_dma(lg, regs->edx, regs->ebx);
break;
@@ -81,10 +104,13 @@ static void do_hcall(struct lguest *lg, struct lguest_regs *regs)
case LHCALL_SET_CLOCKEVENT:
guest_set_clockevent(lg, regs->edx);
break;
+
case LHCALL_TS:
+ /* This sets the TS flag, as we saw used in run_guest(). */
lg->ts = regs->edx;
break;
case LHCALL_HALT:
+ /* Similarly, this sets the halted flag for run_guest(). */
lg->halted = 1;
break;
default:
@@ -92,25 +118,42 @@ static void do_hcall(struct lguest *lg, struct lguest_regs *regs)
}
}
-/* We always do queued calls before actual hypercall. */
+/* Asynchronous hypercalls are easy: we just look in the array in the Guest's
+ * "struct lguest_data" and see if there are any new ones marked "ready".
+ *
+ * We are careful to do these in order: obviously we respect the order the
+ * Guest put them in the ring, but we also promise the Guest that they will
+ * happen before any normal hypercall (which is why we check this before
+ * checking for a normal hcall). */
static void do_async_hcalls(struct lguest *lg)
{
unsigned int i;
u8 st[LHCALL_RING_SIZE];
+ /* For simplicity, we copy the entire call status array in at once. */
if (copy_from_user(&st, &lg->lguest_data->hcall_status, sizeof(st)))
return;
+
+ /* We process "struct lguest_data"s hcalls[] ring once. */
for (i = 0; i < ARRAY_SIZE(st); i++) {
struct lguest_regs regs;
+ /* We remember where we were up to from last time. This makes
+ * sure that the hypercalls are done in the order the Guest
+ * places them in the ring. */
unsigned int n = lg->next_hcall;
+ /* 0xFF means there's no call here (yet). */
if (st[n] == 0xFF)
break;
+ /* OK, we have hypercall. Increment the "next_hcall" cursor,
+ * and wrap back to 0 if we reach the end. */
if (++lg->next_hcall == LHCALL_RING_SIZE)
lg->next_hcall = 0;
+ /* We copy the hypercall arguments into a fake register
+ * structure. This makes life simple for do_hcall(). */
if (get_user(regs.eax, &lg->lguest_data->hcalls[n].eax)
|| get_user(regs.edx, &lg->lguest_data->hcalls[n].edx)
|| get_user(regs.ecx, &lg->lguest_data->hcalls[n].ecx)
@@ -119,74 +162,139 @@ static void do_async_hcalls(struct lguest *lg)
break;
}
+ /* Do the hypercall, same as a normal one. */
do_hcall(lg, &regs);
+
+ /* Mark the hypercall done. */
if (put_user(0xFF, &lg->lguest_data->hcall_status[n])) {
kill_guest(lg, "Writing result for async hypercall");
break;
}
+ /* Stop doing hypercalls if we've just done a DMA to the
+ * Launcher: it needs to service this first. */
if (lg->dma_is_pending)
break;
}
}
+/* Last of all, we look at what happens first of all. The very first time the
+ * Guest makes a hypercall, we end up here to set things up: */
static void initialize(struct lguest *lg)
{
u32 tsc_speed;
+ /* You can't do anything until you're initialized. The Guest knows the
+ * rules, so we're unforgiving here. */
if (lg->regs->eax != LHCALL_LGUEST_INIT) {
kill_guest(lg, "hypercall %li before LGUEST_INIT",
lg->regs->eax);
return;
}
- /* We only tell the guest to use the TSC if it's reliable. */
+ /* We insist that the Time Stamp Counter exist and doesn't change with
+ * cpu frequency. Some devious chip manufacturers decided that TSC
+ * changes could be handled in software. I decided that time going
+ * backwards might be good for benchmarks, but it's bad for users.
+ *
+ * We also insist that the TSC be stable: the kernel detects unreliable
+ * TSCs for its own purposes, and we use that here. */
if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable())
tsc_speed = tsc_khz;
else
tsc_speed = 0;
+ /* The pointer to the Guest's "struct lguest_data" is the only
+ * argument. */
lg->lguest_data = (struct lguest_data __user *)lg->regs->edx;
- /* We check here so we can simply copy_to_user/from_user */
+ /* If we check the address they gave is OK now, we can simply
+ * copy_to_user/from_user from now on rather than using lgread/lgwrite.
+ * I put this in to show that I'm not immune to writing stupid
+ * optimizations. */
if (!lguest_address_ok(lg, lg->regs->edx, sizeof(*lg->lguest_data))) {
kill_guest(lg, "bad guest page %p", lg->lguest_data);
return;
}
+ /* The Guest tells us where we're not to deliver interrupts by putting
+ * the range of addresses into "struct lguest_data". */
if (get_user(lg->noirq_start, &lg->lguest_data->noirq_start)
|| get_user(lg->noirq_end, &lg->lguest_data->noirq_end)
- /* We reserve the top pgd entry. */
+ /* We tell the Guest that it can't use the top 4MB of virtual
+ * addresses used by the Switcher. */
|| put_user(4U*1024*1024, &lg->lguest_data->reserve_mem)
|| put_user(tsc_speed, &lg->lguest_data->tsc_khz)
+ /* We also give the Guest a unique id, as used in lguest_net.c. */
|| put_user(lg->guestid, &lg->lguest_data->guestid))
kill_guest(lg, "bad guest page %p", lg->lguest_data);
- /* This is the one case where the above accesses might have
- * been the first write to a Guest page. This may have caused
- * a copy-on-write fault, but the Guest might be referring to
- * the old (read-only) page. */
+ /* We write the current time into the Guest's data page once now. */
+ write_timestamp(lg);
+
+ /* This is the one case where the above accesses might have been the
+ * first write to a Guest page. This may have caused a copy-on-write
+ * fault, but the Guest might be referring to the old (read-only)
+ * page. */
guest_pagetable_clear_all(lg);
}
+/* Now we've examined the hypercall code; our Guest can make requests. There
+ * is one other way we can do things for the Guest, as we see in
+ * emulate_insn(). */
-/* Even if we go out to userspace and come back, we don't want to do
- * the hypercall again. */
+/*H:110 Tricky point: we mark the hypercall as "done" once we've done it.
+ * Normally we don't need to do this: the Guest will run again and update the
+ * trap number before we come back around the run_guest() loop to
+ * do_hypercalls().
+ *
+ * However, if we are signalled or the Guest sends DMA to the Launcher, that
+ * loop will exit without running the Guest. When it comes back it would try
+ * to re-run the hypercall. */
static void clear_hcall(struct lguest *lg)
{
lg->regs->trapnum = 255;
}
+/*H:100
+ * Hypercalls
+ *
+ * Remember from the Guest, hypercalls come in two flavors: normal and
+ * asynchronous. This file handles both of types.
+ */
void do_hypercalls(struct lguest *lg)
{
+ /* Not initialized yet? */
if (unlikely(!lg->lguest_data)) {
+ /* Did the Guest make a hypercall? We might have come back for
+ * some other reason (an interrupt, a different trap). */
if (lg->regs->trapnum == LGUEST_TRAP_ENTRY) {
+ /* Set up the "struct lguest_data" */
initialize(lg);
+ /* The hypercall is done. */
clear_hcall(lg);
}
return;
}
+ /* The Guest has initialized.
+ *
+ * Look in the hypercall ring for the async hypercalls: */
do_async_hcalls(lg);
+
+ /* If we stopped reading the hypercall ring because the Guest did a
+ * SEND_DMA to the Launcher, we want to return now. Otherwise if the
+ * Guest asked us to do a hypercall, we do it. */
if (!lg->dma_is_pending && lg->regs->trapnum == LGUEST_TRAP_ENTRY) {
do_hcall(lg, lg->regs);
+ /* The hypercall is done. */
clear_hcall(lg);
}
}
+
+/* This routine supplies the Guest with time: it's used for wallclock time at
+ * initial boot and as a rough time source if the TSC isn't available. */
+void write_timestamp(struct lguest *lg)
+{
+ struct timespec now;
+ ktime_get_real_ts(&now);
+ if (put_user(now, &lg->lguest_data->time))
+ kill_guest(lg, "Writing timestamp");
+}
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index bee029bb2c7..49787e964a0 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -1,100 +1,160 @@
+/*P:800 Interrupts (traps) are complicated enough to earn their own file.
+ * There are three classes of interrupts:
+ *
+ * 1) Real hardware interrupts which occur while we're running the Guest,
+ * 2) Interrupts for virtual devices attached to the Guest, and
+ * 3) Traps and faults from the Guest.
+ *
+ * Real hardware interrupts must be delivered to the Host, not the Guest.
+ * Virtual interrupts must be delivered to the Guest, but we make them look
+ * just like real hardware would deliver them. Traps from the Guest can be set
+ * up to go directly back into the Guest, but sometimes the Host wants to see
+ * them first, so we also have a way of "reflecting" them into the Guest as if
+ * they had been delivered to it directly. :*/
#include <linux/uaccess.h>
#include "lg.h"
+/* The address of the interrupt handler is split into two bits: */
static unsigned long idt_address(u32 lo, u32 hi)
{
return (lo & 0x0000FFFF) | (hi & 0xFFFF0000);
}
+/* The "type" of the interrupt handler is a 4 bit field: we only support a
+ * couple of types. */
static int idt_type(u32 lo, u32 hi)
{
return (hi >> 8) & 0xF;
}
+/* An IDT entry can't be used unless the "present" bit is set. */
static int idt_present(u32 lo, u32 hi)
{
return (hi & 0x8000);
}
+/* We need a helper to "push" a value onto the Guest's stack, since that's a
+ * big part of what delivering an interrupt does. */
static void push_guest_stack(struct lguest *lg, unsigned long *gstack, u32 val)
{
+ /* Stack grows upwards: move stack then write value. */
*gstack -= 4;
lgwrite_u32(lg, *gstack, val);
}
+/*H:210 The set_guest_interrupt() routine actually delivers the interrupt or
+ * trap. The mechanics of delivering traps and interrupts to the Guest are the
+ * same, except some traps have an "error code" which gets pushed onto the
+ * stack as well: the caller tells us if this is one.
+ *
+ * "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this
+ * interrupt or trap. It's split into two parts for traditional reasons: gcc
+ * on i386 used to be frightened by 64 bit numbers.
+ *
+ * We set up the stack just like the CPU does for a real interrupt, so it's
+ * identical for the Guest (and the standard "iret" instruction will undo
+ * it). */
static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err)
{
unsigned long gstack;
u32 eflags, ss, irq_enable;
- /* If they want a ring change, we use new stack and push old ss/esp */
+ /* There are two cases for interrupts: one where the Guest is already
+ * in the kernel, and a more complex one where the Guest is in
+ * userspace. We check the privilege level to find out. */
if ((lg->regs->ss&0x3) != GUEST_PL) {
+ /* The Guest told us their kernel stack with the SET_STACK
+ * hypercall: both the virtual address and the segment */
gstack = guest_pa(lg, lg->esp1);
ss = lg->ss1;
+ /* We push the old stack segment and pointer onto the new
+ * stack: when the Guest does an "iret" back from the interrupt
+ * handler the CPU will notice they're dropping privilege
+ * levels and expect these here. */
push_guest_stack(lg, &gstack, lg->regs->ss);
push_guest_stack(lg, &gstack, lg->regs->esp);
} else {
+ /* We're staying on the same Guest (kernel) stack. */
gstack = guest_pa(lg, lg->regs->esp);
ss = lg->regs->ss;
}
- /* We use IF bit in eflags to indicate whether irqs were enabled
- (it's always 1, since irqs are enabled when guest is running). */
+ /* Remember that we never let the Guest actually disable interrupts, so
+ * the "Interrupt Flag" bit is always set. We copy that bit from the
+ * Guest's "irq_enabled" field into the eflags word: the Guest copies
+ * it back in "lguest_iret". */
eflags = lg->regs->eflags;
if (get_user(irq_enable, &lg->lguest_data->irq_enabled) == 0
&& !(irq_enable & X86_EFLAGS_IF))
eflags &= ~X86_EFLAGS_IF;
+ /* An interrupt is expected to push three things on the stack: the old
+ * "eflags" word, the old code segment, and the old instruction
+ * pointer. */
push_guest_stack(lg, &gstack, eflags);
push_guest_stack(lg, &gstack, lg->regs->cs);
push_guest_stack(lg, &gstack, lg->regs->eip);
+ /* For the six traps which supply an error code, we push that, too. */
if (has_err)
push_guest_stack(lg, &gstack, lg->regs->errcode);
- /* Change the real stack so switcher returns to trap handler */
+ /* Now we've pushed all the old state, we change the stack, the code
+ * segment and the address to execute. */
lg->regs->ss = ss;
lg->regs->esp = gstack + lg->page_offset;
lg->regs->cs = (__KERNEL_CS|GUEST_PL);
lg->regs->eip = idt_address(lo, hi);
- /* Disable interrupts for an interrupt gate. */
+ /* There are two kinds of interrupt handlers: 0xE is an "interrupt
+ * gate" which expects interrupts to be disabled on entry. */
if (idt_type(lo, hi) == 0xE)
if (put_user(0, &lg->lguest_data->irq_enabled))
kill_guest(lg, "Disabling interrupts");
}
+/*H:200
+ * Virtual Interrupts.
+ *
+ * maybe_do_interrupt() gets called before every entry to the Guest, to see if
+ * we should divert the Guest to running an interrupt handler. */
void maybe_do_interrupt(struct lguest *lg)
{
unsigned int irq;
DECLARE_BITMAP(blk, LGUEST_IRQS);
struct desc_struct *idt;
+ /* If the Guest hasn't even initialized yet, we can do nothing. */
if (!lg->lguest_data)
return;
- /* Mask out any interrupts they have blocked. */
+ /* Take our "irqs_pending" array and remove any interrupts the Guest
+ * wants blocked: the result ends up in "blk". */
if (copy_from_user(&blk, lg->lguest_data->blocked_interrupts,
sizeof(blk)))
return;
bitmap_andnot(blk, lg->irqs_pending, blk, LGUEST_IRQS);
+ /* Find the first interrupt. */
irq = find_first_bit(blk, LGUEST_IRQS);
+ /* None? Nothing to do */
if (irq >= LGUEST_IRQS)
return;
+ /* They may be in the middle of an iret, where they asked us never to
+ * deliver interrupts. */
if (lg->regs->eip >= lg->noirq_start && lg->regs->eip < lg->noirq_end)
return;
- /* If they're halted, we re-enable interrupts. */
+ /* If they're halted, interrupts restart them. */
if (lg->halted) {
/* Re-enable interrupts. */
if (put_user(X86_EFLAGS_IF, &lg->lguest_data->irq_enabled))
kill_guest(lg, "Re-enabling interrupts");
lg->halted = 0;
} else {
- /* Maybe they have interrupts disabled? */
+ /* Otherwise we check if they have interrupts disabled. */
u32 irq_enabled;
if (get_user(irq_enabled, &lg->lguest_data->irq_enabled))
irq_enabled = 0;
@@ -102,112 +162,218 @@ void maybe_do_interrupt(struct lguest *lg)
return;
}
+ /* Look at the IDT entry the Guest gave us for this interrupt. The
+ * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip
+ * over them. */
idt = &lg->idt[FIRST_EXTERNAL_VECTOR+irq];
+ /* If they don't have a handler (yet?), we just ignore it */
if (idt_present(idt->a, idt->b)) {
+ /* OK, mark it no longer pending and deliver it. */
clear_bit(irq, lg->irqs_pending);
+ /* set_guest_interrupt() takes the interrupt descriptor and a
+ * flag to say whether this interrupt pushes an error code onto
+ * the stack as well: virtual interrupts never do. */
set_guest_interrupt(lg, idt->a, idt->b, 0);
}
+
+ /* Every time we deliver an interrupt, we update the timestamp in the
+ * Guest's lguest_data struct. It would be better for the Guest if we
+ * did this more often, but it can actually be quite slow: doing it
+ * here is a compromise which means at least it gets updated every
+ * timer interrupt. */
+ write_timestamp(lg);
}
+/*H:220 Now we've got the routines to deliver interrupts, delivering traps
+ * like page fault is easy. The only trick is that Intel decided that some
+ * traps should have error codes: */
static int has_err(unsigned int trap)
{
return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17);
}
+/* deliver_trap() returns true if it could deliver the trap. */
int deliver_trap(struct lguest *lg, unsigned int num)
{
u32 lo = lg->idt[num].a, hi = lg->idt[num].b;
+ /* Early on the Guest hasn't set the IDT entries (or maybe it put a
+ * bogus one in): if we fail here, the Guest will be killed. */
if (!idt_present(lo, hi))
return 0;
set_guest_interrupt(lg, lo, hi, has_err(num));
return 1;
}
+/*H:250 Here's the hard part: returning to the Host every time a trap happens
+ * and then calling deliver_trap() and re-entering the Guest is slow.
+ * Particularly because Guest userspace system calls are traps (trap 128).
+ *
+ * So we'd like to set up the IDT to tell the CPU to deliver traps directly
+ * into the Guest. This is possible, but the complexities cause the size of
+ * this file to double! However, 150 lines of code is worth writing for taking
+ * system calls down from 1750ns to 270ns. Plus, if lguest didn't do it, all
+ * the other hypervisors would tease it.
+ *
+ * This routine determines if a trap can be delivered directly. */
static int direct_trap(const struct lguest *lg,
const struct desc_struct *trap,
unsigned int num)
{
- /* Hardware interrupts don't go to guest (except syscall). */
+ /* Hardware interrupts don't go to the Guest at all (except system
+ * call). */
if (num >= FIRST_EXTERNAL_VECTOR && num != SYSCALL_VECTOR)
return 0;
- /* We intercept page fault (demand shadow paging & cr2 saving)
- protection fault (in/out emulation) and device not
- available (TS handling), and hypercall */
+ /* The Host needs to see page faults (for shadow paging and to save the
+ * fault address), general protection faults (in/out emulation) and
+ * device not available (TS handling), and of course, the hypercall
+ * trap. */
if (num == 14 || num == 13 || num == 7 || num == LGUEST_TRAP_ENTRY)
return 0;
- /* Interrupt gates (0xE) or not present (0x0) can't go direct. */
+ /* Only trap gates (type 15) can go direct to the Guest. Interrupt
+ * gates (type 14) disable interrupts as they are entered, which we
+ * never let the Guest do. Not present entries (type 0x0) also can't
+ * go direct, of course 8) */
return idt_type(trap->a, trap->b) == 0xF;
}
-
+/*:*/
+
+/*M:005 The Guest has the ability to turn its interrupt gates into trap gates,
+ * if it is careful. The Host will let trap gates can go directly to the
+ * Guest, but the Guest needs the interrupts atomically disabled for an
+ * interrupt gate. It can do this by pointing the trap gate at instructions
+ * within noirq_start and noirq_end, where it can safely disable interrupts. */
+
+/*M:006 The Guests do not use the sysenter (fast system call) instruction,
+ * because it's hardcoded to enter privilege level 0 and so can't go direct.
+ * It's about twice as fast as the older "int 0x80" system call, so it might
+ * still be worthwhile to handle it in the Switcher and lcall down to the
+ * Guest. The sysenter semantics are hairy tho: search for that keyword in
+ * entry.S :*/
+
+/*H:260 When we make traps go directly into the Guest, we need to make sure
+ * the kernel stack is valid (ie. mapped in the page tables). Otherwise, the
+ * CPU trying to deliver the trap will fault while trying to push the interrupt
+ * words on the stack: this is called a double fault, and it forces us to kill
+ * the Guest.
+ *
+ * Which is deeply unfair, because (literally!) it wasn't the Guests' fault. */
void pin_stack_pages(struct lguest *lg)
{
unsigned int i;
+ /* Depending on the CONFIG_4KSTACKS option, the Guest can have one or
+ * two pages of stack space. */
for (i = 0; i < lg->stack_pages; i++)
+ /* The stack grows *upwards*, hence the subtraction */
pin_page(lg, lg->esp1 - i * PAGE_SIZE);
}
+/* Direct traps also mean that we need to know whenever the Guest wants to use
+ * a different kernel stack, so we can change the IDT entries to use that
+ * stack. The IDT entries expect a virtual address, so unlike most addresses
+ * the Guest gives us, the "esp" (stack pointer) value here is virtual, not
+ * physical.
+ *
+ * In Linux each process has its own kernel stack, so this happens a lot: we
+ * change stacks on each context switch. */
void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages)
{
- /* You cannot have a stack segment with priv level 0. */
+ /* You are not allowd have a stack segment with privilege level 0: bad
+ * Guest! */
if ((seg & 0x3) != GUEST_PL)
kill_guest(lg, "bad stack segment %i", seg);
+ /* We only expect one or two stack pages. */
if (pages > 2)
kill_guest(lg, "bad stack pages %u", pages);
+ /* Save where the stack is, and how many pages */
lg->ss1 = seg;
lg->esp1 = esp;
lg->stack_pages = pages;
+ /* Make sure the new stack pages are mapped */
pin_stack_pages(lg);
}
-/* Set up trap in IDT. */
+/* All this reference to mapping stacks leads us neatly into the other complex
+ * part of the Host: page table handling. */
+
+/*H:235 This is the routine which actually checks the Guest's IDT entry and
+ * transfers it into our entry in "struct lguest": */
static void set_trap(struct lguest *lg, struct desc_struct *trap,
unsigned int num, u32 lo, u32 hi)
{
u8 type = idt_type(lo, hi);
+ /* We zero-out a not-present entry */
if (!idt_present(lo, hi)) {
trap->a = trap->b = 0;
return;
}
+ /* We only support interrupt and trap gates. */
if (type != 0xE && type != 0xF)
kill_guest(lg, "bad IDT type %i", type);
+ /* We only copy the handler address, present bit, privilege level and
+ * type. The privilege level controls where the trap can be triggered
+ * manually with an "int" instruction. This is usually GUEST_PL,
+ * except for system calls which userspace can use. */
trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF);
trap->b = (hi&0xFFFFEF00);
}
+/*H:230 While we're here, dealing with delivering traps and interrupts to the
+ * Guest, we might as well complete the picture: how the Guest tells us where
+ * it wants them to go. This would be simple, except making traps fast
+ * requires some tricks.
+ *
+ * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the
+ * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here. */
void load_guest_idt_entry(struct lguest *lg, unsigned int num, u32 lo, u32 hi)
{
- /* Guest never handles: NMI, doublefault, hypercall, spurious irq. */
+ /* Guest never handles: NMI, doublefault, spurious interrupt or
+ * hypercall. We ignore when it tries to set them. */
if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY)
return;
+ /* Mark the IDT as changed: next time the Guest runs we'll know we have
+ * to copy this again. */
lg->changed |= CHANGED_IDT;
+
+ /* The IDT which we keep in "struct lguest" only contains 32 entries
+ * for the traps and LGUEST_IRQS (32) entries for interrupts. We
+ * ignore attempts to set handlers for higher interrupt numbers, except
+ * for the system call "interrupt" at 128: we have a special IDT entry
+ * for that. */
if (num < ARRAY_SIZE(lg->idt))
set_trap(lg, &lg->idt[num], num, lo, hi);
else if (num == SYSCALL_VECTOR)
set_trap(lg, &lg->syscall_idt, num, lo, hi);
}
+/* The default entry for each interrupt points into the Switcher routines which
+ * simply return to the Host. The run_guest() loop will then call
+ * deliver_trap() to bounce it back into the Guest. */
static void default_idt_entry(struct desc_struct *idt,
int trap,
const unsigned long handler)
{
+ /* A present interrupt gate. */
u32 flags = 0x8e00;
- /* They can't "int" into any of them except hypercall. */
+ /* Set the privilege level on the entry for the hypercall: this allows
+ * the Guest to use the "int" instruction to trigger it. */
if (trap == LGUEST_TRAP_ENTRY)
flags |= (GUEST_PL << 13);
+ /* Now pack it into the IDT entry in its weird format. */
idt->a = (LGUEST_CS<<16) | (handler&0x0000FFFF);
idt->b = (handler&0xFFFF0000) | flags;
}
+/* When the Guest first starts, we put default entries into the IDT. */
void setup_default_idt_entries(struct lguest_ro_state *state,
const unsigned long *def)
{
@@ -217,19 +383,25 @@ void setup_default_idt_entries(struct lguest_ro_state *state,
default_idt_entry(&state->guest_idt[i], i, def[i]);
}
+/*H:240 We don't use the IDT entries in the "struct lguest" directly, instead
+ * we copy them into the IDT which we've set up for Guests on this CPU, just
+ * before we run the Guest. This routine does that copy. */
void copy_traps(const struct lguest *lg, struct desc_struct *idt,
const unsigned long *def)
{
unsigned int i;
- /* All hardware interrupts are same whatever the guest: only the
- * traps might be different. */
+ /* We can simply copy the direct traps, otherwise we use the default
+ * ones in the Switcher: they will return to the Host. */
for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) {
if (direct_trap(lg, &lg->idt[i], i))
idt[i] = lg->idt[i];
else
default_idt_entry(&idt[i], i, def[i]);
}
+
+ /* Don't forget the system call trap! The IDT entries for other
+ * interupts never change, so no need to copy them. */
i = SYSCALL_VECTOR;
if (direct_trap(lg, &lg->syscall_idt, i))
idt[i] = lg->syscall_idt;
diff --git a/drivers/lguest/io.c b/drivers/lguest/io.c
index c8eb7926699..ea68613b43f 100644
--- a/drivers/lguest/io.c
+++ b/drivers/lguest/io.c
@@ -1,5 +1,9 @@
-/* Simple I/O model for guests, based on shared memory.
- * Copyright (C) 2006 Rusty Russell IBM Corporation
+/*P:300 The I/O mechanism in lguest is simple yet flexible, allowing the Guest
+ * to talk to the Launcher or directly to another Guest. It uses familiar
+ * concepts of DMA and interrupts, plus some neat code stolen from
+ * futexes... :*/
+
+/* Copyright (C) 2006 Rusty Russell IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -23,8 +27,36 @@
#include <linux/uaccess.h>
#include "lg.h"
+/*L:300
+ * I/O
+ *
+ * Getting data in and out of the Guest is quite an art. There are numerous
+ * ways to do it, and they all suck differently. We try to keep things fairly
+ * close to "real" hardware so our Guest's drivers don't look like an alien
+ * visitation in the middle of the Linux code, and yet make sure that Guests
+ * can talk directly to other Guests, not just the Launcher.
+ *
+ * To do this, the Guest gives us a key when it binds or sends DMA buffers.
+ * The key corresponds to a "physical" address inside the Guest (ie. a virtual
+ * address inside the Launcher process). We don't, however, use this key
+ * directly.
+ *
+ * We want Guests which share memory to be able to DMA to each other: two
+ * Launchers can mmap memory the same file, then the Guests can communicate.
+ * Fortunately, the futex code provides us with a way to get a "union
+ * futex_key" corresponding to the memory lying at a virtual address: if the
+ * two processes share memory, the "union futex_key" for that memory will match
+ * even if the memory is mapped at different addresses in each. So we always
+ * convert the keys to "union futex_key"s to compare them.
+ *
+ * Before we dive into this though, we need to look at another set of helper
+ * routines used throughout the Host kernel code to access Guest memory.
+ :*/
static struct list_head dma_hash[61];
+/* An unfortunate side effect of the Linux double-linked list implementation is
+ * that there's no good way to statically initialize an array of linked
+ * lists. */
void lguest_io_init(void)
{
unsigned int i;
@@ -56,6 +88,19 @@ kill:
return 0;
}
+/*L:330 This is our hash function, using the wonderful Jenkins hash.
+ *
+ * The futex key is a union with three parts: an unsigned long word, a pointer,
+ * and an int "offset". We could use jhash_2words() which takes three u32s.
+ * (Ok, the hash functions are great: the naming sucks though).
+ *
+ * It's nice to be portable to 64-bit platforms, so we use the more generic
+ * jhash2(), which takes an array of u32, the number of u32s, and an initial
+ * u32 to roll in. This is uglier, but breaks down to almost the same code on
+ * 32-bit platforms like this one.
+ *
+ * We want a position in the array, so we modulo ARRAY_SIZE(dma_hash) (ie. 61).
+ */
static unsigned int hash(const union futex_key *key)
{
return jhash2((u32*)&key->both.word,
@@ -64,6 +109,9 @@ static unsigned int hash(const union futex_key *key)
% ARRAY_SIZE(dma_hash);
}
+/* This is a convenience routine to compare two keys. It's a much bemoaned C
+ * weakness that it doesn't allow '==' on structures or unions, so we have to
+ * open-code it like this. */
static inline int key_eq(const union futex_key *a, const union futex_key *b)
{
return (a->both.word == b->both.word
@@ -71,22 +119,36 @@ static inline int key_eq(const union futex_key *a, const union futex_key *b)
&& a->both.offset == b->both.offset);
}
-/* Must hold read lock on dmainfo owner's current->mm->mmap_sem */
+/*L:360 OK, when we need to actually free up a Guest's DMA array we do several
+ * things, so we have a convenient function to do it.
+ *
+ * The caller must hold a read lock on dmainfo owner's current->mm->mmap_sem
+ * for the drop_futex_key_refs(). */
static void unlink_dma(struct lguest_dma_info *dmainfo)
{
+ /* You locked this too, right? */
BUG_ON(!mutex_is_locked(&lguest_lock));
+ /* This is how we know that the entry is free. */
dmainfo->interrupt = 0;
+ /* Remove it from the hash table. */
list_del(&dmainfo->list);
+ /* Drop the references we were holding (to the inode or mm). */
drop_futex_key_refs(&dmainfo->key);
}
+/*L:350 This is the routine which we call when the Guest asks to unregister a
+ * DMA array attached to a given key. Returns true if the array was found. */
static int unbind_dma(struct lguest *lg,
const union futex_key *key,
unsigned long dmas)
{
int i, ret = 0;
+ /* We don't bother with the hash table, just look through all this
+ * Guest's DMA arrays. */
for (i = 0; i < LGUEST_MAX_DMA; i++) {
+ /* In theory it could have more than one array on the same key,
+ * or one array on multiple keys, so we check both */
if (key_eq(key, &lg->dma[i].key) && dmas == lg->dma[i].dmas) {
unlink_dma(&lg->dma[i]);
ret = 1;
@@ -96,51 +158,91 @@ static int unbind_dma(struct lguest *lg,
return ret;
}
+/*L:340 BIND_DMA: this is the hypercall which sets up an array of "struct
+ * lguest_dma" for receiving I/O.
+ *
+ * The Guest wants to bind an array of "struct lguest_dma"s to a particular key
+ * to receive input. This only happens when the Guest is setting up a new
+ * device, so it doesn't have to be very fast.
+ *
+ * It returns 1 on a successful registration (it can fail if we hit the limit
+ * of registrations for this Guest).
+ */
int bind_dma(struct lguest *lg,
unsigned long ukey, unsigned long dmas, u16 numdmas, u8 interrupt)
{
unsigned int i;
int ret = 0;
union futex_key key;
+ /* Futex code needs the mmap_sem. */
struct rw_semaphore *fshared = &current->mm->mmap_sem;
+ /* Invalid interrupt? (We could kill the guest here). */
if (interrupt >= LGUEST_IRQS)
return 0;
+ /* We need to grab the Big Lguest Lock, because other Guests may be
+ * trying to look through this Guest's DMAs to send something while
+ * we're doing this. */
mutex_lock(&lguest_lock);
down_read(fshared);
if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) {
kill_guest(lg, "bad dma key %#lx", ukey);
goto unlock;
}
+
+ /* We want to keep this key valid once we drop mmap_sem, so we have to
+ * hold a reference. */
get_futex_key_refs(&key);
+ /* If the Guest specified an interrupt of 0, that means they want to
+ * unregister this array of "struct lguest_dma"s. */
if (interrupt == 0)
ret = unbind_dma(lg, &key, dmas);
else {
+ /* Look through this Guest's dma array for an unused entry. */
for (i = 0; i < LGUEST_MAX_DMA; i++) {
+ /* If the interrupt is non-zero, the entry is already
+ * used. */
if (lg->dma[i].interrupt)
continue;
+ /* OK, a free one! Fill on our details. */
lg->dma[i].dmas = dmas;
lg->dma[i].num_dmas = numdmas;
lg->dma[i].next_dma = 0;
lg->dma[i].key = key;
lg->dma[i].guestid = lg->guestid;
lg->dma[i].interrupt = interrupt;
+
+ /* Now we add it to the hash table: the position
+ * depends on the futex key that we got. */
list_add(&lg->dma[i].list, &dma_hash[hash(&key)]);
+ /* Success! */
ret = 1;
goto unlock;
}
}
+ /* If we didn't find a slot to put the key in, drop the reference
+ * again. */
drop_futex_key_refs(&key);
unlock:
+ /* Unlock and out. */
up_read(fshared);
mutex_unlock(&lguest_lock);
return ret;
}
-/* lgread from another guest */
+/*L:385 Note that our routines to access a different Guest's memory are called
+ * lgread_other() and lgwrite_other(): these names emphasize that they are only
+ * used when the Guest is *not* the current Guest.
+ *
+ * The interface for copying from another process's memory is called
+ * access_process_vm(), with a final argument of 0 for a read, and 1 for a
+ * write.
+ *
+ * We need lgread_other() to read the destination Guest's "struct lguest_dma"
+ * array. */
static int lgread_other(struct lguest *lg,
void *buf, u32 addr, unsigned bytes)
{
@@ -153,7 +255,8 @@ static int lgread_other(struct lguest *lg,
return 1;
}
-/* lgwrite to another guest */
+/* "lgwrite()" to another Guest: used to update the destination "used_len" once
+ * we've transferred data into the buffer. */
static int lgwrite_other(struct lguest *lg, u32 addr,
const void *buf, unsigned bytes)
{
@@ -166,6 +269,15 @@ static int lgwrite_other(struct lguest *lg, u32 addr,
return 1;
}
+/*L:400 This is the generic engine which copies from a source "struct
+ * lguest_dma" from this Guest into another Guest's "struct lguest_dma". The
+ * destination Guest's pages have already been mapped, as contained in the
+ * pages array.
+ *
+ * If you're wondering if there's a nice "copy from one process to another"
+ * routine, so was I. But Linux isn't really set up to copy between two
+ * unrelated processes, so we have to write it ourselves.
+ */
static u32 copy_data(struct lguest *srclg,
const struct lguest_dma *src,
const struct lguest_dma *dst,
@@ -174,33 +286,59 @@ static u32 copy_data(struct lguest *srclg,
unsigned int totlen, si, di, srcoff, dstoff;
void *maddr = NULL;
+ /* We return the total length transferred. */
totlen = 0;
+
+ /* We keep indexes into the source and destination "struct lguest_dma",
+ * and an offset within each region. */
si = di = 0;
srcoff = dstoff = 0;
+
+ /* We loop until the source or destination is exhausted. */
while (si < LGUEST_MAX_DMA_SECTIONS && src->len[si]
&& di < LGUEST_MAX_DMA_SECTIONS && dst->len[di]) {
+ /* We can only transfer the rest of the src buffer, or as much
+ * as will fit into the destination buffer. */
u32 len = min(src->len[si] - srcoff, dst->len[di] - dstoff);
+ /* For systems using "highmem" we need to use kmap() to access
+ * the page we want. We often use the same page over and over,
+ * so rather than kmap() it on every loop, we set the maddr
+ * pointer to NULL when we need to move to the next
+ * destination page. */
if (!maddr)
maddr = kmap(pages[di]);
- /* FIXME: This is not completely portable, since
- archs do different things for copy_to_user_page. */
+ /* Copy directly from (this Guest's) source address to the
+ * destination Guest's kmap()ed buffer. Note that maddr points
+ * to the start of the page: we need to add the offset of the
+ * destination address and offset within the buffer. */
+
+ /* FIXME: This is not completely portable. I looked at
+ * copy_to_user_page(), and some arch's seem to need special
+ * flushes. x86 is fine. */
if (copy_from_user(maddr + (dst->addr[di] + dstoff)%PAGE_SIZE,
(void __user *)src->addr[si], len) != 0) {
+ /* If a copy failed, it's the source's fault. */
kill_guest(srclg, "bad address in sending DMA");
totlen = 0;
break;
}
+ /* Increment the total and src & dst offsets */
totlen += len;
srcoff += len;
dstoff += len;
+
+ /* Presumably we reached the end of the src or dest buffers: */
if (srcoff == src->len[si]) {
+ /* Move to the next buffer at offset 0 */
si++;
srcoff = 0;
}
if (dstoff == dst->len[di]) {
+ /* We need to unmap that destination page and reset
+ * maddr ready for the next one. */
kunmap(pages[di]);
maddr = NULL;
di++;
@@ -208,13 +346,15 @@ static u32 copy_data(struct lguest *srclg,
}
}
+ /* If we still had a page mapped at the end, unmap now. */
if (maddr)
kunmap(pages[di]);
return totlen;
}
-/* Src is us, ie. current. */
+/*L:390 This is how we transfer a "struct lguest_dma" from the source Guest
+ * (the current Guest which called SEND_DMA) to another Guest. */
static u32 do_dma(struct lguest *srclg, const struct lguest_dma *src,
struct lguest *dstlg, const struct lguest_dma *dst)
{
@@ -222,23 +362,31 @@ static u32 do_dma(struct lguest *srclg, const struct lguest_dma *src,
u32 ret;
struct page *pages[LGUEST_MAX_DMA_SECTIONS];
+ /* We check that both source and destination "struct lguest_dma"s are
+ * within the bounds of the source and destination Guests */
if (!check_dma_list(dstlg, dst) || !check_dma_list(srclg, src))
return 0;
- /* First get the destination pages */
+ /* We need to map the pages which correspond to each parts of
+ * destination buffer. */
for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) {
if (dst->len[i] == 0)
break;
+ /* get_user_pages() is a complicated function, especially since
+ * we only want a single page. But it works, and returns the
+ * number of pages. Note that we're holding the destination's
+ * mmap_sem, as get_user_pages() requires. */
if (get_user_pages(dstlg->tsk, dstlg->mm,
dst->addr[i], 1, 1, 1, pages+i, NULL)
!= 1) {
+ /* This means the destination gave us a bogus buffer */
kill_guest(dstlg, "Error mapping DMA pages");
ret = 0;
goto drop_pages;
}
}
- /* Now copy until we run out of src or dst. */
+ /* Now copy the data until we run out of src or dst. */
ret = copy_data(srclg, src, dst, pages);
drop_pages:
@@ -247,6 +395,11 @@ drop_pages:
return ret;
}
+/*L:380 Transferring data from one Guest to another is not as simple as I'd
+ * like. We've found the "struct lguest_dma_info" bound to the same address as
+ * the send, we need to copy into it.
+ *
+ * This function returns true if the destination array was empty. */
static int dma_transfer(struct lguest *srclg,
unsigned long udma,
struct lguest_dma_info *dst)
@@ -255,15 +408,23 @@ static int dma_transfer(struct lguest *srclg,
struct lguest *dstlg;
u32 i, dma = 0;
+ /* From the "struct lguest_dma_info" we found in the hash, grab the
+ * Guest. */
dstlg = &lguests[dst->guestid];
- /* Get our dma list. */
+ /* Read in the source "struct lguest_dma" handed to SEND_DMA. */
lgread(srclg, &src_dma, udma, sizeof(src_dma));
- /* We can't deadlock against them dmaing to us, because this
- * is all under the lguest_lock. */
+ /* We need the destination's mmap_sem, and we already hold the source's
+ * mmap_sem for the futex key lookup. Normally this would suggest that
+ * we could deadlock if the destination Guest was trying to send to
+ * this source Guest at the same time, which is another reason that all
+ * I/O is done under the big lguest_lock. */
down_read(&dstlg->mm->mmap_sem);
+ /* Look through the destination DMA array for an available buffer. */
for (i = 0; i < dst->num_dmas; i++) {
+ /* We keep a "next_dma" pointer which often helps us avoid
+ * looking at lots of previously-filled entries. */
dma = (dst->next_dma + i) % dst->num_dmas;
if (!lgread_other(dstlg, &dst_dma,
dst->dmas + dma * sizeof(struct lguest_dma),
@@ -273,30 +434,46 @@ static int dma_transfer(struct lguest *srclg,
if (!dst_dma.used_len)
break;
}
+
+ /* If we found a buffer, we do the actual data copy. */
if (i != dst->num_dmas) {
unsigned long used_lenp;
unsigned int ret;
ret = do_dma(srclg, &src_dma, dstlg, &dst_dma);
- /* Put used length in src. */
+ /* Put used length in the source "struct lguest_dma"'s used_len
+ * field. It's a little tricky to figure out where that is,
+ * though. */
lgwrite_u32(srclg,
udma+offsetof(struct lguest_dma, used_len), ret);
+ /* Tranferring 0 bytes is OK if the source buffer was empty. */
if (ret == 0 && src_dma.len[0] != 0)
goto fail;
- /* Make sure destination sees contents before length. */
+ /* The destination Guest might be running on a different CPU:
+ * we have to make sure that it will see the "used_len" field
+ * change to non-zero *after* it sees the data we copied into
+ * the buffer. Hence a write memory barrier. */
wmb();
+ /* Figuring out where the destination's used_len field for this
+ * "struct lguest_dma" in the array is also a little ugly. */
used_lenp = dst->dmas
+ dma * sizeof(struct lguest_dma)
+ offsetof(struct lguest_dma, used_len);
lgwrite_other(dstlg, used_lenp, &ret, sizeof(ret));
+ /* Move the cursor for next time. */
dst->next_dma++;
}
up_read(&dstlg->mm->mmap_sem);
- /* Do this last so dst doesn't simply sleep on lock. */
+ /* We trigger the destination interrupt, even if the destination was
+ * empty and we didn't transfer anything: this gives them a chance to
+ * wake up and refill. */
set_bit(dst->interrupt, dstlg->irqs_pending);
+ /* Wake up the destination process. */
wake_up_process(dstlg->tsk);
+ /* If we passed the last "struct lguest_dma", the receive had no
+ * buffers left. */
return i == dst->num_dmas;
fail:
@@ -304,6 +481,8 @@ fail:
return 0;
}
+/*L:370 This is the counter-side to the BIND_DMA hypercall; the SEND_DMA
+ * hypercall. We find out who's listening, and send to them. */
void send_dma(struct lguest *lg, unsigned long ukey, unsigned long udma)
{
union futex_key key;
@@ -313,31 +492,43 @@ void send_dma(struct lguest *lg, unsigned long ukey, unsigned long udma)
again:
mutex_lock(&lguest_lock);
down_read(fshared);
+ /* Get the futex key for the key the Guest gave us */
if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) {
kill_guest(lg, "bad sending DMA key");
goto unlock;
}
- /* Shared mapping? Look for other guests... */
+ /* Since the key must be a multiple of 4, the futex key uses the lower
+ * bit of the "offset" field (which would always be 0) to indicate a
+ * mapping which is shared with other processes (ie. Guests). */
if (key.shared.offset & 1) {
struct lguest_dma_info *i;
+ /* Look through the hash for other Guests. */
list_for_each_entry(i, &dma_hash[hash(&key)], list) {
+ /* Don't send to ourselves. */
if (i->guestid == lg->guestid)
continue;
if (!key_eq(&key, &i->key))
continue;
+ /* If dma_transfer() tells us the destination has no
+ * available buffers, we increment "empty". */
empty += dma_transfer(lg, udma, i);
break;
}
+ /* If the destination is empty, we release our locks and
+ * give the destination Guest a brief chance to restock. */
if (empty == 1) {
/* Give any recipients one chance to restock. */
up_read(&current->mm->mmap_sem);
mutex_unlock(&lguest_lock);
+ /* Next time, we won't try again. */
empty++;
goto again;
}
} else {
- /* Private mapping: tell our userspace. */
+ /* Private mapping: Guest is sending to its Launcher. We set
+ * the "dma_is_pending" flag so that the main loop will exit
+ * and the Launcher's read() from /dev/lguest will return. */
lg->dma_is_pending = 1;
lg->pending_dma = udma;
lg->pending_key = ukey;
@@ -346,6 +537,7 @@ unlock:
up_read(fshared);
mutex_unlock(&lguest_lock);
}
+/*:*/
void release_all_dma(struct lguest *lg)
{
@@ -361,7 +553,18 @@ void release_all_dma(struct lguest *lg)
up_read(&lg->mm->mmap_sem);
}
-/* Userspace wants a dma buffer from this guest. */
+/*M:007 We only return a single DMA buffer to the Launcher, but it would be
+ * more efficient to return a pointer to the entire array of DMA buffers, which
+ * it can cache and choose one whenever it wants.
+ *
+ * Currently the Launcher uses a write to /dev/lguest, and the return value is
+ * the address of the DMA structure with the interrupt number placed in
+ * dma->used_len. If we wanted to return the entire array, we need to return
+ * the address, array size and interrupt number: this seems to require an
+ * ioctl(). :*/
+
+/*L:320 This routine looks for a DMA buffer registered by the Guest on the
+ * given key (using the BIND_DMA hypercall). */
unsigned long get_dma_buffer(struct lguest *lg,
unsigned long ukey, unsigned long *interrupt)
{
@@ -370,15 +573,29 @@ unsigned long get_dma_buffer(struct lguest *lg,
struct lguest_dma_info *i;
struct rw_semaphore *fshared = &current->mm->mmap_sem;
+ /* Take the Big Lguest Lock to stop other Guests sending this Guest DMA
+ * at the same time. */
mutex_lock(&lguest_lock);
+ /* To match between Guests sharing the same underlying memory we steal
+ * code from the futex infrastructure. This requires that we hold the
+ * "mmap_sem" for our process (the Launcher), and pass it to the futex
+ * code. */
down_read(fshared);
+
+ /* This can fail if it's not a valid address, or if the address is not
+ * divisible by 4 (the futex code needs that, we don't really). */
if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) {
kill_guest(lg, "bad registered DMA buffer");
goto unlock;
}
+ /* Search the hash table for matching entries (the Launcher can only
+ * send to its own Guest for the moment, so the entry must be for this
+ * Guest) */
list_for_each_entry(i, &dma_hash[hash(&key)], list) {
if (key_eq(&key, &i->key) && i->guestid == lg->guestid) {
unsigned int j;
+ /* Look through the registered DMA array for an
+ * available buffer. */
for (j = 0; j < i->num_dmas; j++) {
struct lguest_dma dma;
@@ -387,6 +604,8 @@ unsigned long get_dma_buffer(struct lguest *lg,
if (dma.used_len == 0)
break;
}
+ /* Store the interrupt the Guest wants when the buffer
+ * is used. */
*interrupt = i->interrupt;
break;
}
@@ -396,4 +615,12 @@ unlock:
mutex_unlock(&lguest_lock);
return ret;
}
+/*:*/
+/*L:410 This really has completed the Launcher. Not only have we now finished
+ * the longest chapter in our journey, but this also means we are over halfway
+ * through!
+ *
+ * Enough prevaricating around the bush: it is time for us to dive into the
+ * core of the Host, in "make Host".
+ */
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 3e2ddfbc816..64f0abed317 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -58,9 +58,18 @@ struct lguest_dma_info
u8 interrupt; /* 0 when not registered */
};
-/* We have separate types for the guest's ptes & pgds and the shadow ptes &
- * pgds. Since this host might use three-level pagetables and the guest and
- * shadow pagetables don't, we can't use the normal pte_t/pgd_t. */
+/*H:310 The page-table code owes a great debt of gratitude to Andi Kleen. He
+ * reviewed the original code which used "u32" for all page table entries, and
+ * insisted that it would be far clearer with explicit typing. I thought it
+ * was overkill, but he was right: it is much clearer than it was before.
+ *
+ * We have separate types for the Guest's ptes & pgds and the shadow ptes &
+ * pgds. There's already a Linux type for these (pte_t and pgd_t) but they
+ * change depending on kernel config options (PAE). */
+
+/* Each entry is identical: lower 12 bits of flags and upper 20 bits for the
+ * "page frame number" (0 == first physical page, etc). They are different
+ * types so the compiler will warn us if we mix them improperly. */
typedef union {
struct { unsigned flags:12, pfn:20; };
struct { unsigned long val; } raw;
@@ -77,8 +86,12 @@ typedef union {
struct { unsigned flags:12, pfn:20; };
struct { unsigned long val; } raw;
} gpte_t;
+
+/* We have two convenient macros to convert a "raw" value as handed to us by
+ * the Guest into the correct Guest PGD or PTE type. */
#define mkgpte(_val) ((gpte_t){.raw.val = _val})
#define mkgpgd(_val) ((gpgd_t){.raw.val = _val})
+/*:*/
struct pgdir
{
@@ -243,7 +256,32 @@ unsigned long get_dma_buffer(struct lguest *lg, unsigned long key,
/* hypercalls.c: */
void do_hypercalls(struct lguest *lg);
-
+void write_timestamp(struct lguest *lg);
+
+/*L:035
+ * Let's step aside for the moment, to study one important routine that's used
+ * widely in the Host code.
+ *
+ * There are many cases where the Guest does something invalid, like pass crap
+ * to a hypercall. Since only the Guest kernel can make hypercalls, it's quite
+ * acceptable to simply terminate the Guest and give the Launcher a nicely
+ * formatted reason. It's also simpler for the Guest itself, which doesn't
+ * need to check most hypercalls for "success"; if you're still running, it
+ * succeeded.
+ *
+ * Once this is called, the Guest will never run again, so most Host code can
+ * call this then continue as if nothing had happened. This means many
+ * functions don't have to explicitly return an error code, which keeps the
+ * code simple.
+ *
+ * It also means that this can be called more than once: only the first one is
+ * remembered. The only trick is that we still need to kill the Guest even if
+ * we can't allocate memory to store the reason. Linux has a neat way of
+ * packing error codes into invalid pointers, so we use that here.
+ *
+ * Like any macro which uses an "if", it is safely wrapped in a run-once "do {
+ * } while(0)".
+ */
#define kill_guest(lg, fmt...) \
do { \
if (!(lg)->dead) { \
@@ -252,6 +290,7 @@ do { \
(lg)->dead = ERR_PTR(-ENOMEM); \
} \
} while(0)
+/* (End of aside) :*/
static inline unsigned long guest_pa(struct lguest *lg, unsigned long vaddr)
{
diff --git a/drivers/lguest/lguest.c b/drivers/lguest/lguest.c
index 18dade06d4a..1bc1546c7fd 100644
--- a/drivers/lguest/lguest.c
+++ b/drivers/lguest/lguest.c
@@ -1,6 +1,32 @@
-/*
- * Lguest specific paravirt-ops implementation
+/*P:010
+ * A hypervisor allows multiple Operating Systems to run on a single machine.
+ * To quote David Wheeler: "Any problem in computer science can be solved with
+ * another layer of indirection."
+ *
+ * We keep things simple in two ways. First, we start with a normal Linux
+ * kernel and insert a module (lg.ko) which allows us to run other Linux
+ * kernels the same way we'd run processes. We call the first kernel the Host,
+ * and the others the Guests. The program which sets up and configures Guests
+ * (such as the example in Documentation/lguest/lguest.c) is called the
+ * Launcher.
+ *
+ * Secondly, we only run specially modified Guests, not normal kernels. When
+ * you set CONFIG_LGUEST to 'y' or 'm', this automatically sets
+ * CONFIG_LGUEST_GUEST=y, which compiles this file into the kernel so it knows
+ * how to be a Guest. This means that you can use the same kernel you boot
+ * normally (ie. as a Host) as a Guest.
*
+ * These Guests know that they cannot do privileged operations, such as disable
+ * interrupts, and that they have to ask the Host to do such things explicitly.
+ * This file consists of all the replacements for such low-level native
+ * hardware operations: these special Guest versions call the Host.
+ *
+ * So how does the kernel know it's a Guest? The Guest starts at a special
+ * entry point marked with a magic string, which sets up a few things then
+ * calls here. We replace the native functions in "struct paravirt_ops"
+ * with our Guest versions, then boot like normal. :*/
+
+/*
* Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify
@@ -40,6 +66,12 @@
#include <asm/mce.h>
#include <asm/io.h>
+/*G:010 Welcome to the Guest!
+ *
+ * The Guest in our tale is a simple creature: identical to the Host but
+ * behaving in simplified but equivalent ways. In particular, the Guest is the
+ * same kernel as the Host (or at least, built from the same source code). :*/
+
/* Declarations for definitions in lguest_guest.S */
extern char lguest_noirq_start[], lguest_noirq_end[];
extern const char lgstart_cli[], lgend_cli[];
@@ -58,7 +90,26 @@ struct lguest_data lguest_data = {
struct lguest_device_desc *lguest_devices;
static cycle_t clock_base;
-static enum paravirt_lazy_mode lazy_mode;
+/*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first
+ * real optimization trick!
+ *
+ * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
+ * them as a batch when lazy_mode is eventually turned off. Because hypercalls
+ * are reasonably expensive, batching them up makes sense. For example, a
+ * large mmap might update dozens of page table entries: that code calls
+ * lguest_lazy_mode(PARAVIRT_LAZY_MMU), does the dozen updates, then calls
+ * lguest_lazy_mode(PARAVIRT_LAZY_NONE).
+ *
+ * So, when we're in lazy mode, we call async_hypercall() to store the call for
+ * future processing. When lazy mode is turned off we issue a hypercall to
+ * flush the stored calls.
+ *
+ * There's also a hack where "mode" is set to "PARAVIRT_LAZY_FLUSH" which
+ * indicates we're to flush any outstanding calls immediately. This is used
+ * when an interrupt handler does a kmap_atomic(): the page table changes must
+ * happen immediately even if we're in the middle of a batch. Usually we're
+ * not, though, so there's nothing to do. */
+static enum paravirt_lazy_mode lazy_mode; /* Note: not SMP-safe! */
static void lguest_lazy_mode(enum paravirt_lazy_mode mode)
{
if (mode == PARAVIRT_LAZY_FLUSH) {
@@ -82,6 +133,16 @@ static void lazy_hcall(unsigned long call,
async_hcall(call, arg1, arg2, arg3);
}
+/* async_hcall() is pretty simple: I'm quite proud of it really. We have a
+ * ring buffer of stored hypercalls which the Host will run though next time we
+ * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall
+ * arguments, and a "hcall_status" word which is 0 if the call is ready to go,
+ * and 255 once the Host has finished with it.
+ *
+ * If we come around to a slot which hasn't been finished, then the table is
+ * full and we just make the hypercall directly. This has the nice side
+ * effect of causing the Host to run all the stored calls in the ring buffer
+ * which empties it for next time! */
void async_hcall(unsigned long call,
unsigned long arg1, unsigned long arg2, unsigned long arg3)
{
@@ -89,6 +150,9 @@ void async_hcall(unsigned long call,
static unsigned int next_call;
unsigned long flags;
+ /* Disable interrupts if not already disabled: we don't want an
+ * interrupt handler making a hypercall while we're already doing
+ * one! */
local_irq_save(flags);
if (lguest_data.hcall_status[next_call] != 0xFF) {
/* Table full, so do normal hcall which will flush table. */
@@ -98,7 +162,7 @@ void async_hcall(unsigned long call,
lguest_data.hcalls[next_call].edx = arg1;
lguest_data.hcalls[next_call].ebx = arg2;
lguest_data.hcalls[next_call].ecx = arg3;
- /* Make sure host sees arguments before "valid" flag. */
+ /* Arguments must all be written before we mark it to go */
wmb();
lguest_data.hcall_status[next_call] = 0;
if (++next_call == LHCALL_RING_SIZE)
@@ -106,9 +170,14 @@ void async_hcall(unsigned long call,
}
local_irq_restore(flags);
}
+/*:*/
+/* Wrappers for the SEND_DMA and BIND_DMA hypercalls. This is mainly because
+ * Jeff Garzik complained that __pa() should never appear in drivers, and this
+ * helps remove most of them. But also, it wraps some ugliness. */
void lguest_send_dma(unsigned long key, struct lguest_dma *dma)
{
+ /* The hcall might not write this if something goes wrong */
dma->used_len = 0;
hcall(LHCALL_SEND_DMA, key, __pa(dma), 0);
}
@@ -116,11 +185,16 @@ void lguest_send_dma(unsigned long key, struct lguest_dma *dma)
int lguest_bind_dma(unsigned long key, struct lguest_dma *dmas,
unsigned int num, u8 irq)
{
+ /* This is the only hypercall which actually wants 5 arguments, and we
+ * only support 4. Fortunately the interrupt number is always less
+ * than 256, so we can pack it with the number of dmas in the final
+ * argument. */
if (!hcall(LHCALL_BIND_DMA, key, __pa(dmas), (num << 8) | irq))
return -ENOMEM;
return 0;
}
+/* Unbinding is the same hypercall as binding, but with 0 num & irq. */
void lguest_unbind_dma(unsigned long key, struct lguest_dma *dmas)
{
hcall(LHCALL_BIND_DMA, key, __pa(dmas), 0);
@@ -138,35 +212,73 @@ void lguest_unmap(void *addr)
iounmap((__force void __iomem *)addr);
}
+/*G:033
+ * Here are our first native-instruction replacements: four functions for
+ * interrupt control.
+ *
+ * The simplest way of implementing these would be to have "turn interrupts
+ * off" and "turn interrupts on" hypercalls. Unfortunately, this is too slow:
+ * these are by far the most commonly called functions of those we override.
+ *
+ * So instead we keep an "irq_enabled" field inside our "struct lguest_data",
+ * which the Guest can update with a single instruction. The Host knows to
+ * check there when it wants to deliver an interrupt.
+ */
+
+/* save_flags() is expected to return the processor state (ie. "eflags"). The
+ * eflags word contains all kind of stuff, but in practice Linux only cares
+ * about the interrupt flag. Our "save_flags()" just returns that. */
static unsigned long save_fl(void)
{
return lguest_data.irq_enabled;
}
+/* "restore_flags" just sets the flags back to the value given. */
static void restore_fl(unsigned long flags)
{
- /* FIXME: Check if interrupt pending... */
lguest_data.irq_enabled = flags;
}
+/* Interrupts go off... */
static void irq_disable(void)
{
lguest_data.irq_enabled = 0;
}
+/* Interrupts go on... */
static void irq_enable(void)
{
- /* FIXME: Check if interrupt pending... */
lguest_data.irq_enabled = X86_EFLAGS_IF;
}
-
+/*:*/
+/*M:003 Note that we don't check for outstanding interrupts when we re-enable
+ * them (or when we unmask an interrupt). This seems to work for the moment,
+ * since interrupts are rare and we'll just get the interrupt on the next timer
+ * tick, but when we turn on CONFIG_NO_HZ, we should revisit this. One way
+ * would be to put the "irq_enabled" field in a page by itself, and have the
+ * Host write-protect it when an interrupt comes in when irqs are disabled.
+ * There will then be a page fault as soon as interrupts are re-enabled. :*/
+
+/*G:034
+ * The Interrupt Descriptor Table (IDT).
+ *
+ * The IDT tells the processor what to do when an interrupt comes in. Each
+ * entry in the table is a 64-bit descriptor: this holds the privilege level,
+ * address of the handler, and... well, who cares? The Guest just asks the
+ * Host to make the change anyway, because the Host controls the real IDT.
+ */
static void lguest_write_idt_entry(struct desc_struct *dt,
int entrynum, u32 low, u32 high)
{
+ /* Keep the local copy up to date. */
write_dt_entry(dt, entrynum, low, high);
+ /* Tell Host about this new entry. */
hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, low, high);
}
+/* Changing to a different IDT is very rare: we keep the IDT up-to-date every
+ * time it is written, so we can simply loop through all entries and tell the
+ * Host about them. */
static void lguest_load_idt(const struct Xgt_desc_struct *desc)
{
unsigned int i;
@@ -176,12 +288,29 @@ static void lguest_load_idt(const struct Xgt_desc_struct *desc)
hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b);
}
+/*
+ * The Global Descriptor Table.
+ *
+ * The Intel architecture defines another table, called the Global Descriptor
+ * Table (GDT). You tell the CPU where it is (and its size) using the "lgdt"
+ * instruction, and then several other instructions refer to entries in the
+ * table. There are three entries which the Switcher needs, so the Host simply
+ * controls the entire thing and the Guest asks it to make changes using the
+ * LOAD_GDT hypercall.
+ *
+ * This is the opposite of the IDT code where we have a LOAD_IDT_ENTRY
+ * hypercall and use that repeatedly to load a new IDT. I don't think it
+ * really matters, but wouldn't it be nice if they were the same?
+ */
static void lguest_load_gdt(const struct Xgt_desc_struct *desc)
{
BUG_ON((desc->size+1)/8 != GDT_ENTRIES);
hcall(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES, 0);
}
+/* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
+ * then tell the Host to reload the entire thing. This operation is so rare
+ * that this naive implementation is reasonable. */
static void lguest_write_gdt_entry(struct desc_struct *dt,
int entrynum, u32 low, u32 high)
{
@@ -189,19 +318,58 @@ static void lguest_write_gdt_entry(struct desc_struct *dt,
hcall(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES, 0);
}
+/* OK, I lied. There are three "thread local storage" GDT entries which change
+ * on every context switch (these three entries are how glibc implements
+ * __thread variables). So we have a hypercall specifically for this case. */
static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
{
lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0);
}
+/*:*/
+/*G:038 That's enough excitement for now, back to ploughing through each of
+ * the paravirt_ops (we're about 1/3 of the way through).
+ *
+ * This is the Local Descriptor Table, another weird Intel thingy. Linux only
+ * uses this for some strange applications like Wine. We don't do anything
+ * here, so they'll get an informative and friendly Segmentation Fault. */
static void lguest_set_ldt(const void *addr, unsigned entries)
{
}
+/* This loads a GDT entry into the "Task Register": that entry points to a
+ * structure called the Task State Segment. Some comments scattered though the
+ * kernel code indicate that this used for task switching in ages past, along
+ * with blood sacrifice and astrology.
+ *
+ * Now there's nothing interesting in here that we don't get told elsewhere.
+ * But the native version uses the "ltr" instruction, which makes the Host
+ * complain to the Guest about a Segmentation Fault and it'll oops. So we
+ * override the native version with a do-nothing version. */
static void lguest_load_tr_desc(void)
{
}
+/* The "cpuid" instruction is a way of querying both the CPU identity
+ * (manufacturer, model, etc) and its features. It was introduced before the
+ * Pentium in 1993 and keeps getting extended by both Intel and AMD. As you
+ * might imagine, after a decade and a half this treatment, it is now a giant
+ * ball of hair. Its entry in the current Intel manual runs to 28 pages.
+ *
+ * This instruction even it has its own Wikipedia entry. The Wikipedia entry
+ * has been translated into 4 languages. I am not making this up!
+ *
+ * We could get funky here and identify ourselves as "GenuineLguest", but
+ * instead we just use the real "cpuid" instruction. Then I pretty much turned
+ * off feature bits until the Guest booted. (Don't say that: you'll damage
+ * lguest sales!) Shut up, inner voice! (Hey, just pointing out that this is
+ * hardly future proof.) Noone's listening! They don't like you anyway,
+ * parenthetic weirdo!
+ *
+ * Replacing the cpuid so we can turn features off is great for the kernel, but
+ * anyone (including userspace) can just use the raw "cpuid" instruction and
+ * the Host won't even notice since it isn't privileged. So we try not to get
+ * too worked up about it. */
static void lguest_cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
@@ -214,21 +382,43 @@ static void lguest_cpuid(unsigned int *eax, unsigned int *ebx,
*ecx &= 0x00002201;
/* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, FPU. */
*edx &= 0x07808101;
- /* Host wants to know when we flush kernel pages: set PGE. */
+ /* The Host can do a nice optimization if it knows that the
+ * kernel mappings (addresses above 0xC0000000 or whatever
+ * PAGE_OFFSET is set to) haven't changed. But Linux calls
+ * flush_tlb_user() for both user and kernel mappings unless
+ * the Page Global Enable (PGE) feature bit is set. */
*edx |= 0x00002000;
break;
case 0x80000000:
/* Futureproof this a little: if they ask how much extended
- * processor information, limit it to known fields. */
+ * processor information there is, limit it to known fields. */
if (*eax > 0x80000008)
*eax = 0x80000008;
break;
}
}
+/* Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4.
+ * I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother
+ * it. The Host needs to know when the Guest wants to change them, so we have
+ * a whole series of functions like read_cr0() and write_cr0().
+ *
+ * We start with CR0. CR0 allows you to turn on and off all kinds of basic
+ * features, but Linux only really cares about one: the horrifically-named Task
+ * Switched (TS) bit at bit 3 (ie. 8)
+ *
+ * What does the TS bit do? Well, it causes the CPU to trap (interrupt 7) if
+ * the floating point unit is used. Which allows us to restore FPU state
+ * lazily after a task switch, and Linux uses that gratefully, but wouldn't a
+ * name like "FPUTRAP bit" be a little less cryptic?
+ *
+ * We store cr0 (and cr3) locally, because the Host never changes it. The
+ * Guest sometimes wants to read it and we'd prefer not to bother the Host
+ * unnecessarily. */
static unsigned long current_cr0, current_cr3;
static void lguest_write_cr0(unsigned long val)
{
+ /* 8 == TS bit. */
lazy_hcall(LHCALL_TS, val & 8, 0, 0);
current_cr0 = val;
}
@@ -238,17 +428,25 @@ static unsigned long lguest_read_cr0(void)
return current_cr0;
}
+/* Intel provided a special instruction to clear the TS bit for people too cool
+ * to use write_cr0() to do it. This "clts" instruction is faster, because all
+ * the vowels have been optimized out. */
static void lguest_clts(void)
{
lazy_hcall(LHCALL_TS, 0, 0, 0);
current_cr0 &= ~8U;
}
+/* CR2 is the virtual address of the last page fault, which the Guest only ever
+ * reads. The Host kindly writes this into our "struct lguest_data", so we
+ * just read it out of there. */
static unsigned long lguest_read_cr2(void)
{
return lguest_data.cr2;
}
+/* CR3 is the current toplevel pagetable page: the principle is the same as
+ * cr0. Keep a local copy, and tell the Host when it changes. */
static void lguest_write_cr3(unsigned long cr3)
{
lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0);
@@ -260,7 +458,7 @@ static unsigned long lguest_read_cr3(void)
return current_cr3;
}
-/* Used to enable/disable PGE, but we don't care. */
+/* CR4 is used to enable and disable PGE, but we don't care. */
static unsigned long lguest_read_cr4(void)
{
return 0;
@@ -270,6 +468,59 @@ static void lguest_write_cr4(unsigned long val)
{
}
+/*
+ * Page Table Handling.
+ *
+ * Now would be a good time to take a rest and grab a coffee or similarly
+ * relaxing stimulant. The easy parts are behind us, and the trek gradually
+ * winds uphill from here.
+ *
+ * Quick refresher: memory is divided into "pages" of 4096 bytes each. The CPU
+ * maps virtual addresses to physical addresses using "page tables". We could
+ * use one huge index of 1 million entries: each address is 4 bytes, so that's
+ * 1024 pages just to hold the page tables. But since most virtual addresses
+ * are unused, we use a two level index which saves space. The CR3 register
+ * contains the physical address of the top level "page directory" page, which
+ * contains physical addresses of up to 1024 second-level pages. Each of these
+ * second level pages contains up to 1024 physical addresses of actual pages,
+ * or Page Table Entries (PTEs).
+ *
+ * Here's a diagram, where arrows indicate physical addresses:
+ *
+ * CR3 ---> +---------+
+ * | --------->+---------+
+ * | | | PADDR1 |
+ * Top-level | | PADDR2 |
+ * (PMD) page | | |
+ * | | Lower-level |
+ * | | (PTE) page |
+ * | | | |
+ * .... ....
+ *
+ * So to convert a virtual address to a physical address, we look up the top
+ * level, which points us to the second level, which gives us the physical
+ * address of that page. If the top level entry was not present, or the second
+ * level entry was not present, then the virtual address is invalid (we
+ * say "the page was not mapped").
+ *
+ * Put another way, a 32-bit virtual address is divided up like so:
+ *
+ * 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ * |<---- 10 bits ---->|<---- 10 bits ---->|<------ 12 bits ------>|
+ * Index into top Index into second Offset within page
+ * page directory page pagetable page
+ *
+ * The kernel spends a lot of time changing both the top-level page directory
+ * and lower-level pagetable pages. The Guest doesn't know physical addresses,
+ * so while it maintains these page tables exactly like normal, it also needs
+ * to keep the Host informed whenever it makes a change: the Host will create
+ * the real page tables based on the Guests'.
+ */
+
+/* The Guest calls this to set a second-level entry (pte), ie. to map a page
+ * into a process' address space. We set the entry then tell the Host the
+ * toplevel and address this corresponds to. The Guest uses one pagetable per
+ * process, so we need to tell the Host which one we're changing (mm->pgd). */
static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
@@ -277,7 +528,9 @@ static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pteval.pte_low);
}
-/* We only support two-level pagetables at the moment. */
+/* The Guest calls this to set a top-level entry. Again, we set the entry then
+ * tell the Host which top-level page we changed, and the index of the entry we
+ * changed. */
static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
*pmdp = pmdval;
@@ -285,7 +538,15 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
(__pa(pmdp)&(PAGE_SIZE-1))/4, 0);
}
-/* FIXME: Eliminate all callers of this. */
+/* There are a couple of legacy places where the kernel sets a PTE, but we
+ * don't know the top level any more. This is useless for us, since we don't
+ * know which pagetable is changing or what address, so we just tell the Host
+ * to forget all of them. Fortunately, this is very rare.
+ *
+ * ... except in early boot when the kernel sets up the initial pagetables,
+ * which makes booting astonishingly slow. So we don't even tell the Host
+ * anything changed until we've done the first page table switch.
+ */
static void lguest_set_pte(pte_t *ptep, pte_t pteval)
{
*ptep = pteval;
@@ -294,22 +555,51 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval)
lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
}
+/* Unfortunately for Lguest, the paravirt_ops for page tables were based on
+ * native page table operations. On native hardware you can set a new page
+ * table entry whenever you want, but if you want to remove one you have to do
+ * a TLB flush (a TLB is a little cache of page table entries kept by the CPU).
+ *
+ * So the lguest_set_pte_at() and lguest_set_pmd() functions above are only
+ * called when a valid entry is written, not when it's removed (ie. marked not
+ * present). Instead, this is where we come when the Guest wants to remove a
+ * page table entry: we tell the Host to set that entry to 0 (ie. the present
+ * bit is zero). */
static void lguest_flush_tlb_single(unsigned long addr)
{
- /* Simply set it to zero, and it will fault back in. */
+ /* Simply set it to zero: if it was not, it will fault back in. */
lazy_hcall(LHCALL_SET_PTE, current_cr3, addr, 0);
}
+/* This is what happens after the Guest has removed a large number of entries.
+ * This tells the Host that any of the page table entries for userspace might
+ * have changed, ie. virtual addresses below PAGE_OFFSET. */
static void lguest_flush_tlb_user(void)
{
lazy_hcall(LHCALL_FLUSH_TLB, 0, 0, 0);
}
+/* This is called when the kernel page tables have changed. That's not very
+ * common (unless the Guest is using highmem, which makes the Guest extremely
+ * slow), so it's worth separating this from the user flushing above. */
static void lguest_flush_tlb_kernel(void)
{
lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
}
+/*
+ * The Unadvanced Programmable Interrupt Controller.
+ *
+ * This is an attempt to implement the simplest possible interrupt controller.
+ * I spent some time looking though routines like set_irq_chip_and_handler,
+ * set_irq_chip_and_handler_name, set_irq_chip_data and set_phasers_to_stun and
+ * I *think* this is as simple as it gets.
+ *
+ * We can tell the Host what interrupts we want blocked ready for using the
+ * lguest_data.interrupts bitmap, so disabling (aka "masking") them is as
+ * simple as setting a bit. We don't actually "ack" interrupts as such, we
+ * just mask and unmask them. I wonder if we should be cleverer?
+ */
static void disable_lguest_irq(unsigned int irq)
{
set_bit(irq, lguest_data.blocked_interrupts);
@@ -318,9 +608,9 @@ static void disable_lguest_irq(unsigned int irq)
static void enable_lguest_irq(unsigned int irq)
{
clear_bit(irq, lguest_data.blocked_interrupts);
- /* FIXME: If it's pending? */
}
+/* This structure describes the lguest IRQ controller. */
static struct irq_chip lguest_irq_controller = {
.name = "lguest",
.mask = disable_lguest_irq,
@@ -328,6 +618,10 @@ static struct irq_chip lguest_irq_controller = {
.unmask = enable_lguest_irq,
};
+/* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
+ * interrupt (except 128, which is used for system calls), and then tells the
+ * Linux infrastructure that each interrupt is controlled by our level-based
+ * lguest interrupt controller. */
static void __init lguest_init_IRQ(void)
{
unsigned int i;
@@ -340,20 +634,51 @@ static void __init lguest_init_IRQ(void)
handle_level_irq);
}
}
+ /* This call is required to set up for 4k stacks, where we have
+ * separate stacks for hard and soft interrupts. */
irq_ctx_init(smp_processor_id());
}
+/*
+ * Time.
+ *
+ * It would be far better for everyone if the Guest had its own clock, but
+ * until then the Host gives us the time on every interrupt.
+ */
static unsigned long lguest_get_wallclock(void)
{
- return hcall(LHCALL_GET_WALLCLOCK, 0, 0, 0);
+ return lguest_data.time.tv_sec;
}
static cycle_t lguest_clock_read(void)
{
+ unsigned long sec, nsec;
+
+ /* If the Host tells the TSC speed, we can trust that. */
if (lguest_data.tsc_khz)
return native_read_tsc();
- else
- return jiffies;
+
+ /* If we can't use the TSC, we read the time value written by the Host.
+ * Since it's in two parts (seconds and nanoseconds), we risk reading
+ * it just as it's changing from 99 & 0.999999999 to 100 and 0, and
+ * getting 99 and 0. As Linux tends to come apart under the stress of
+ * time travel, we must be careful: */
+ do {
+ /* First we read the seconds part. */
+ sec = lguest_data.time.tv_sec;
+ /* This read memory barrier tells the compiler and the CPU that
+ * this can't be reordered: we have to complete the above
+ * before going on. */
+ rmb();
+ /* Now we read the nanoseconds part. */
+ nsec = lguest_data.time.tv_nsec;
+ /* Make sure we've done that. */
+ rmb();
+ /* Now if the seconds part has changed, try again. */
+ } while (unlikely(lguest_data.time.tv_sec != sec));
+
+ /* Our non-TSC clock is in real nanoseconds. */
+ return sec*1000000000ULL + nsec;
}
/* This is what we tell the kernel is our clocksource. */
@@ -361,8 +686,11 @@ static struct clocksource lguest_clock = {
.name = "lguest",
.rating = 400,
.read = lguest_clock_read,
+ .mask = CLOCKSOURCE_MASK(64),
+ .mult = 1,
};
+/* The "scheduler clock" is just our real clock, adjusted to start at zero */
static unsigned long long lguest_sched_clock(void)
{
return cyc2ns(&lguest_clock, lguest_clock_read() - clock_base);
@@ -428,34 +756,55 @@ static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
local_irq_restore(flags);
}
+/* At some point in the boot process, we get asked to set up our timing
+ * infrastructure. The kernel doesn't expect timer interrupts before this, but
+ * we cleverly initialized the "blocked_interrupts" field of "struct
+ * lguest_data" so that timer interrupts were blocked until now. */
static void lguest_time_init(void)
{
+ /* Set up the timer interrupt (0) to go to our simple timer routine */
set_irq_handler(0, lguest_time_irq);
- /* We use the TSC if the Host tells us we can, otherwise a dumb
- * jiffies-based clock. */
+ /* Our clock structure look like arch/i386/kernel/tsc.c if we can use
+ * the TSC, otherwise it's a dumb nanosecond-resolution clock. Either
+ * way, the "rating" is initialized so high that it's always chosen
+ * over any other clocksource. */
if (lguest_data.tsc_khz) {
lguest_clock.shift = 22;
lguest_clock.mult = clocksource_khz2mult(lguest_data.tsc_khz,
lguest_clock.shift);
- lguest_clock.mask = CLOCKSOURCE_MASK(64);
lguest_clock.flags = CLOCK_SOURCE_IS_CONTINUOUS;
- } else {
- /* To understand this, start at kernel/time/jiffies.c... */
- lguest_clock.shift = 8;
- lguest_clock.mult = (((u64)NSEC_PER_SEC<<8)/ACTHZ) << 8;
- lguest_clock.mask = CLOCKSOURCE_MASK(32);
}
clock_base = lguest_clock_read();
clocksource_register(&lguest_clock);
- /* We can't set cpumask in the initializer: damn C limitations! */
+ /* Now we've set up our clock, we can use it as the scheduler clock */
+ paravirt_ops.sched_clock = lguest_sched_clock;
+
+ /* We can't set cpumask in the initializer: damn C limitations! Set it
+ * here and register our timer device. */
lguest_clockevent.cpumask = cpumask_of_cpu(0);
clockevents_register_device(&lguest_clockevent);
+ /* Finally, we unblock the timer interrupt. */
enable_lguest_irq(0);
}
+/*
+ * Miscellaneous bits and pieces.
+ *
+ * Here is an oddball collection of functions which the Guest needs for things
+ * to work. They're pretty simple.
+ */
+
+/* The Guest needs to tell the host what stack it expects traps to use. For
+ * native hardware, this is part of the Task State Segment mentioned above in
+ * lguest_load_tr_desc(), but to help hypervisors there's this special call.
+ *
+ * We tell the Host the segment we want to use (__KERNEL_DS is the kernel data
+ * segment), the privilege level (we're privilege level 1, the Host is 0 and
+ * will not tolerate us trying to use that), the stack pointer, and the number
+ * of pages in the stack. */
static void lguest_load_esp0(struct tss_struct *tss,
struct thread_struct *thread)
{
@@ -463,15 +812,31 @@ static void lguest_load_esp0(struct tss_struct *tss,
THREAD_SIZE/PAGE_SIZE);
}
+/* Let's just say, I wouldn't do debugging under a Guest. */
static void lguest_set_debugreg(int regno, unsigned long value)
{
/* FIXME: Implement */
}
+/* There are times when the kernel wants to make sure that no memory writes are
+ * caught in the cache (that they've all reached real hardware devices). This
+ * doesn't matter for the Guest which has virtual hardware.
+ *
+ * On the Pentium 4 and above, cpuid() indicates that the Cache Line Flush
+ * (clflush) instruction is available and the kernel uses that. Otherwise, it
+ * uses the older "Write Back and Invalidate Cache" (wbinvd) instruction.
+ * Unlike clflush, wbinvd can only be run at privilege level 0. So we can
+ * ignore clflush, but replace wbinvd.
+ */
static void lguest_wbinvd(void)
{
}
+/* If the Guest expects to have an Advanced Programmable Interrupt Controller,
+ * we play dumb by ignoring writes and returning 0 for reads. So it's no
+ * longer Programmable nor Controlling anything, and I don't think 8 lines of
+ * code qualifies for Advanced. It will also never interrupt anything. It
+ * does, however, allow us to get through the Linux boot code. */
#ifdef CONFIG_X86_LOCAL_APIC
static void lguest_apic_write(unsigned long reg, unsigned long v)
{
@@ -483,19 +848,32 @@ static unsigned long lguest_apic_read(unsigned long reg)
}
#endif
+/* STOP! Until an interrupt comes in. */
static void lguest_safe_halt(void)
{
hcall(LHCALL_HALT, 0, 0, 0);
}
+/* Perhaps CRASH isn't the best name for this hypercall, but we use it to get a
+ * message out when we're crashing as well as elegant termination like powering
+ * off.
+ *
+ * Note that the Host always prefers that the Guest speak in physical addresses
+ * rather than virtual addresses, so we use __pa() here. */
static void lguest_power_off(void)
{
hcall(LHCALL_CRASH, __pa("Power down"), 0, 0);
}
+/*
+ * Panicing.
+ *
+ * Don't. But if you did, this is what happens.
+ */
static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
{
hcall(LHCALL_CRASH, __pa(p), 0, 0);
+ /* The hcall won't return, but to keep gcc happy, we're "done". */
return NOTIFY_DONE;
}
@@ -503,15 +881,45 @@ static struct notifier_block paniced = {
.notifier_call = lguest_panic
};
+/* Setting up memory is fairly easy. */
static __init char *lguest_memory_setup(void)
{
- /* We do this here because lockcheck barfs if before start_kernel */
+ /* We do this here and not earlier because lockcheck barfs if we do it
+ * before start_kernel() */
atomic_notifier_chain_register(&panic_notifier_list, &paniced);
+ /* The Linux bootloader header contains an "e820" memory map: the
+ * Launcher populated the first entry with our memory limit. */
add_memory_region(E820_MAP->addr, E820_MAP->size, E820_MAP->type);
+
+ /* This string is for the boot messages. */
return "LGUEST";
}
+/*G:050
+ * Patching (Powerfully Placating Performance Pedants)
+ *
+ * We have already seen that "struct paravirt_ops" lets us replace simple
+ * native instructions with calls to the appropriate back end all throughout
+ * the kernel. This allows the same kernel to run as a Guest and as a native
+ * kernel, but it's slow because of all the indirect branches.
+ *
+ * Remember that David Wheeler quote about "Any problem in computer science can
+ * be solved with another layer of indirection"? The rest of that quote is
+ * "... But that usually will create another problem." This is the first of
+ * those problems.
+ *
+ * Our current solution is to allow the paravirt back end to optionally patch
+ * over the indirect calls to replace them with something more efficient. We
+ * patch the four most commonly called functions: disable interrupts, enable
+ * interrupts, restore interrupts and save interrupts. We usually have 10
+ * bytes to patch into: the Guest versions of these operations are small enough
+ * that we can fit comfortably.
+ *
+ * First we need assembly templates of each of the patchable Guest operations,
+ * and these are in lguest_asm.S. */
+
+/*G:060 We construct a table from the assembler templates: */
static const struct lguest_insns
{
const char *start, *end;
@@ -521,35 +929,52 @@ static const struct lguest_insns
[PARAVIRT_PATCH(restore_fl)] = { lgstart_popf, lgend_popf },
[PARAVIRT_PATCH(save_fl)] = { lgstart_pushf, lgend_pushf },
};
+
+/* Now our patch routine is fairly simple (based on the native one in
+ * paravirt.c). If we have a replacement, we copy it in and return how much of
+ * the available space we used. */
static unsigned lguest_patch(u8 type, u16 clobber, void *insns, unsigned len)
{
unsigned int insn_len;
- /* Don't touch it if we don't have a replacement */
+ /* Don't do anything special if we don't have a replacement */
if (type >= ARRAY_SIZE(lguest_insns) || !lguest_insns[type].start)
return paravirt_patch_default(type, clobber, insns, len);
insn_len = lguest_insns[type].end - lguest_insns[type].start;
- /* Similarly if we can't fit replacement. */
+ /* Similarly if we can't fit replacement (shouldn't happen, but let's
+ * be thorough). */
if (len < insn_len)
return paravirt_patch_default(type, clobber, insns, len);
+ /* Copy in our instructions. */
memcpy(insns, lguest_insns[type].start, insn_len);
return insn_len;
}
+/*G:030 Once we get to lguest_init(), we know we're a Guest. The paravirt_ops
+ * structure in the kernel provides a single point for (almost) every routine
+ * we have to override to avoid privileged instructions. */
__init void lguest_init(void *boot)
{
- /* Copy boot parameters first. */
+ /* Copy boot parameters first: the Launcher put the physical location
+ * in %esi, and head.S converted that to a virtual address and handed
+ * it to us. */
memcpy(&boot_params, boot, PARAM_SIZE);
+ /* The boot parameters also tell us where the command-line is: save
+ * that, too. */
memcpy(boot_command_line, __va(boot_params.hdr.cmd_line_ptr),
COMMAND_LINE_SIZE);
+ /* We're under lguest, paravirt is enabled, and we're running at
+ * privilege level 1, not 0 as normal. */
paravirt_ops.name = "lguest";
paravirt_ops.paravirt_enabled = 1;
paravirt_ops.kernel_rpl = 1;
+ /* We set up all the lguest overrides for sensitive operations. These
+ * are detailed with the operations themselves. */
paravirt_ops.save_fl = save_fl;
paravirt_ops.restore_fl = restore_fl;
paravirt_ops.irq_disable = irq_disable;
@@ -592,21 +1017,50 @@ __init void lguest_init(void *boot)
paravirt_ops.time_init = lguest_time_init;
paravirt_ops.set_lazy_mode = lguest_lazy_mode;
paravirt_ops.wbinvd = lguest_wbinvd;
- paravirt_ops.sched_clock = lguest_sched_clock;
-
+ /* Now is a good time to look at the implementations of these functions
+ * before returning to the rest of lguest_init(). */
+
+ /*G:070 Now we've seen all the paravirt_ops, we return to
+ * lguest_init() where the rest of the fairly chaotic boot setup
+ * occurs.
+ *
+ * The Host expects our first hypercall to tell it where our "struct
+ * lguest_data" is, so we do that first. */
hcall(LHCALL_LGUEST_INIT, __pa(&lguest_data), 0, 0);
- /* We use top of mem for initial pagetables. */
+ /* The native boot code sets up initial page tables immediately after
+ * the kernel itself, and sets init_pg_tables_end so they're not
+ * clobbered. The Launcher places our initial pagetables somewhere at
+ * the top of our physical memory, so we don't need extra space: set
+ * init_pg_tables_end to the end of the kernel. */
init_pg_tables_end = __pa(pg0);
+ /* Load the %fs segment register (the per-cpu segment register) with
+ * the normal data segment to get through booting. */
asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory");
+ /* Clear the part of the kernel data which is expected to be zero.
+ * Normally it will be anyway, but if we're loading from a bzImage with
+ * CONFIG_RELOCATALE=y, the relocations will be sitting here. */
+ memset(__bss_start, 0, __bss_stop - __bss_start);
+
+ /* The Host uses the top of the Guest's virtual address space for the
+ * Host<->Guest Switcher, and it tells us how much it needs in
+ * lguest_data.reserve_mem, set up on the LGUEST_INIT hypercall. */
reserve_top_address(lguest_data.reserve_mem);
+ /* If we don't initialize the lock dependency checker now, it crashes
+ * paravirt_disable_iospace. */
lockdep_init();
+ /* The IDE code spends about 3 seconds probing for disks: if we reserve
+ * all the I/O ports up front it can't get them and so doesn't probe.
+ * Other device drivers are similar (but less severe). This cuts the
+ * kernel boot time on my machine from 4.1 seconds to 0.45 seconds. */
paravirt_disable_iospace();
+ /* This is messy CPU setup stuff which the native boot code does before
+ * start_kernel, so we have to do, too: */
cpu_detect(&new_cpu_data);
/* head.S usually sets up the first capability word, so do it here. */
new_cpu_data.x86_capability[0] = cpuid_edx(1);
@@ -617,14 +1071,27 @@ __init void lguest_init(void *boot)
#ifdef CONFIG_X86_MCE
mce_disabled = 1;
#endif
-
#ifdef CONFIG_ACPI
acpi_disabled = 1;
acpi_ht = 0;
#endif
+ /* We set the perferred console to "hvc". This is the "hypervisor
+ * virtual console" driver written by the PowerPC people, which we also
+ * adapted for lguest's use. */
add_preferred_console("hvc", 0, NULL);
+ /* Last of all, we set the power management poweroff hook to point to
+ * the Guest routine to power off. */
pm_power_off = lguest_power_off;
+
+ /* Now we're set up, call start_kernel() in init/main.c and we proceed
+ * to boot as normal. It never returns. */
start_kernel();
}
+/*
+ * This marks the end of stage II of our journey, The Guest.
+ *
+ * It is now time for us to explore the nooks and crannies of the three Guest
+ * devices and complete our understanding of the Guest in "make Drivers".
+ */
diff --git a/drivers/lguest/lguest_asm.S b/drivers/lguest/lguest_asm.S
index a3dbf22ee36..f182c6a3620 100644
--- a/drivers/lguest/lguest_asm.S
+++ b/drivers/lguest/lguest_asm.S
@@ -4,15 +4,15 @@
#include <asm/thread_info.h>
#include <asm/processor-flags.h>
-/*
- * This is where we begin: we have a magic signature which the launcher looks
- * for. The plan is that the Linux boot protocol will be extended with a
+/*G:020 This is where we begin: we have a magic signature which the launcher
+ * looks for. The plan is that the Linux boot protocol will be extended with a
* "platform type" field which will guide us here from the normal entry point,
- * but for the moment this suffices. We pass the virtual address of the boot
- * info to lguest_init().
+ * but for the moment this suffices. The normal boot code uses %esi for the
+ * boot header, so we do too. We convert it to a virtual address by adding
+ * PAGE_OFFSET, and hand it to lguest_init() as its argument (ie. %eax).
*
- * We put it in .init.text will be discarded after boot.
- */
+ * The .section line puts this code in .init.text so it will be discarded after
+ * boot. */
.section .init.text, "ax", @progbits
.ascii "GenuineLguest"
/* Set up initial stack. */
@@ -21,7 +21,9 @@
addl $__PAGE_OFFSET, %eax
jmp lguest_init
-/* The templates for inline patching. */
+/*G:055 We create a macro which puts the assembler code between lgstart_ and
+ * lgend_ markers. These templates end up in the .init.text section, so they
+ * are discarded after boot. */
#define LGUEST_PATCH(name, insns...) \
lgstart_##name: insns; lgend_##name:; \
.globl lgstart_##name; .globl lgend_##name
@@ -30,24 +32,61 @@ LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled)
LGUEST_PATCH(sti, movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled)
LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled)
LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
+/*:*/
.text
/* These demark the EIP range where host should never deliver interrupts. */
.global lguest_noirq_start
.global lguest_noirq_end
-/*
- * We move eflags word to lguest_data.irq_enabled to restore interrupt state.
- * For page faults, gpfs and virtual interrupts, the hypervisor has saved
- * eflags manually, otherwise it was delivered directly and so eflags reflects
- * the real machine IF state, ie. interrupts on. Since the kernel always dies
- * if it takes such a trap with interrupts disabled anyway, turning interrupts
- * back on unconditionally here is OK.
- */
+/*M:004 When the Host reflects a trap or injects an interrupt into the Guest,
+ * it sets the eflags interrupt bit on the stack based on
+ * lguest_data.irq_enabled, so the Guest iret logic does the right thing when
+ * restoring it. However, when the Host sets the Guest up for direct traps,
+ * such as system calls, the processor is the one to push eflags onto the
+ * stack, and the interrupt bit will be 1 (in reality, interrupts are always
+ * enabled in the Guest).
+ *
+ * This turns out to be harmless: the only trap which should happen under Linux
+ * with interrupts disabled is Page Fault (due to our lazy mapping of vmalloc
+ * regions), which has to be reflected through the Host anyway. If another
+ * trap *does* go off when interrupts are disabled, the Guest will panic, and
+ * we'll never get to this iret! :*/
+
+/*G:045 There is one final paravirt_op that the Guest implements, and glancing
+ * at it you can see why I left it to last. It's *cool*! It's in *assembler*!
+ *
+ * The "iret" instruction is used to return from an interrupt or trap. The
+ * stack looks like this:
+ * old address
+ * old code segment & privilege level
+ * old processor flags ("eflags")
+ *
+ * The "iret" instruction pops those values off the stack and restores them all
+ * at once. The only problem is that eflags includes the Interrupt Flag which
+ * the Guest can't change: the CPU will simply ignore it when we do an "iret".
+ * So we have to copy eflags from the stack to lguest_data.irq_enabled before
+ * we do the "iret".
+ *
+ * There are two problems with this: firstly, we need to use a register to do
+ * the copy and secondly, the whole thing needs to be atomic. The first
+ * problem is easy to solve: push %eax on the stack so we can use it, and then
+ * restore it at the end just before the real "iret".
+ *
+ * The second is harder: copying eflags to lguest_data.irq_enabled will turn
+ * interrupts on before we're finished, so we could be interrupted before we
+ * return to userspace or wherever. Our solution to this is to surround the
+ * code with lguest_noirq_start: and lguest_noirq_end: labels. We tell the
+ * Host that it is *never* to interrupt us there, even if interrupts seem to be
+ * enabled. */
ENTRY(lguest_iret)
pushl %eax
movl 12(%esp), %eax
lguest_noirq_start:
+ /* Note the %ss: segment prefix here. Normal data accesses use the
+ * "ds" segment, but that will have already been restored for whatever
+ * we're returning to (such as userspace): we can't trust it. The %ss:
+ * prefix makes sure we use the stack segment, which is still valid. */
movl %eax,%ss:lguest_data+LGUEST_DATA_irq_enabled
popl %eax
iret
diff --git a/drivers/lguest/lguest_bus.c b/drivers/lguest/lguest_bus.c
index 18d6ab21a43..55a7940ca73 100644
--- a/drivers/lguest/lguest_bus.c
+++ b/drivers/lguest/lguest_bus.c
@@ -1,3 +1,6 @@
+/*P:050 Lguest guests use a very simple bus for devices. It's a simple array
+ * of device descriptors contained just above the top of normal memory. The
+ * lguest bus is 80% tedious boilerplate code. :*/
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/lguest_bus.h>
@@ -43,6 +46,10 @@ static struct device_attribute lguest_dev_attrs[] = {
__ATTR_NULL
};
+/*D:130 The generic bus infrastructure requires a function which says whether a
+ * device matches a driver. For us, it is simple: "struct lguest_driver"
+ * contains a "device_type" field which indicates what type of device it can
+ * handle, so we just cast the args and compare: */
static int lguest_dev_match(struct device *_dev, struct device_driver *_drv)
{
struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
@@ -50,6 +57,7 @@ static int lguest_dev_match(struct device *_dev, struct device_driver *_drv)
return (drv->device_type == lguest_devices[dev->index].type);
}
+/*:*/
struct lguest_bus {
struct bus_type bus;
@@ -68,11 +76,24 @@ static struct lguest_bus lguest_bus = {
}
};
+/*D:140 This is the callback which occurs once the bus infrastructure matches
+ * up a device and driver, ie. in response to add_lguest_device() calling
+ * device_register(), or register_lguest_driver() calling driver_register().
+ *
+ * At the moment it's always the latter: the devices are added first, since
+ * scan_devices() is called from a "core_initcall", and the drivers themselves
+ * called later as a normal "initcall". But it would work the other way too.
+ *
+ * So now we have the happy couple, we add the status bit to indicate that we
+ * found a driver. If the driver truly loves the device, it will return
+ * happiness from its probe function (ok, perhaps this wasn't my greatest
+ * analogy), and we set the final "driver ok" bit so the Host sees it's all
+ * green. */
static int lguest_dev_probe(struct device *_dev)
{
int ret;
- struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
- struct lguest_driver *drv = container_of(dev->dev.driver,
+ struct lguest_device*dev = container_of(_dev,struct lguest_device,dev);
+ struct lguest_driver*drv = container_of(dev->dev.driver,
struct lguest_driver, drv);
lguest_devices[dev->index].status |= LGUEST_DEVICE_S_DRIVER;
@@ -82,6 +103,10 @@ static int lguest_dev_probe(struct device *_dev)
return ret;
}
+/* The last part of the bus infrastructure is the function lguest drivers use
+ * to register themselves. Firstly, we do nothing if there's no lguest bus
+ * (ie. this is not a Guest), otherwise we fill in the embedded generic "struct
+ * driver" fields and call the generic driver_register(). */
int register_lguest_driver(struct lguest_driver *drv)
{
if (!lguest_devices)
@@ -94,12 +119,36 @@ int register_lguest_driver(struct lguest_driver *drv)
return driver_register(&drv->drv);
}
+
+/* At the moment we build all the drivers into the kernel because they're so
+ * simple: 8144 bytes for all three of them as I type this. And as the console
+ * really needs to be built in, it's actually only 3527 bytes for the network
+ * and block drivers.
+ *
+ * If they get complex it will make sense for them to be modularized, so we
+ * need to explicitly export the symbol.
+ *
+ * I don't think non-GPL modules make sense, so it's a GPL-only export.
+ */
EXPORT_SYMBOL_GPL(register_lguest_driver);
+/*D:120 This is the core of the lguest bus: actually adding a new device.
+ * It's a separate function because it's neater that way, and because an
+ * earlier version of the code supported hotplug and unplug. They were removed
+ * early on because they were never used.
+ *
+ * As Andrew Tridgell says, "Untested code is buggy code".
+ *
+ * It's worth reading this carefully: we start with an index into the array of
+ * "struct lguest_device_desc"s indicating the device which is new: */
static void add_lguest_device(unsigned int index)
{
struct lguest_device *new;
+ /* Each "struct lguest_device_desc" has a "status" field, which the
+ * Guest updates as the device is probed. In the worst case, the Host
+ * can look at these bits to tell what part of device setup failed,
+ * even if the console isn't available. */
lguest_devices[index].status |= LGUEST_DEVICE_S_ACKNOWLEDGE;
new = kmalloc(sizeof(struct lguest_device), GFP_KERNEL);
if (!new) {
@@ -108,12 +157,17 @@ static void add_lguest_device(unsigned int index)
return;
}
+ /* The "struct lguest_device" setup is pretty straight-forward example
+ * code. */
new->index = index;
new->private = NULL;
memset(&new->dev, 0, sizeof(new->dev));
new->dev.parent = &lguest_bus.dev;
new->dev.bus = &lguest_bus.bus;
sprintf(new->dev.bus_id, "%u", index);
+
+ /* device_register() causes the bus infrastructure to look for a
+ * matching driver. */
if (device_register(&new->dev) != 0) {
printk(KERN_EMERG "Cannot register lguest device %u\n", index);
lguest_devices[index].status |= LGUEST_DEVICE_S_FAILED;
@@ -121,6 +175,9 @@ static void add_lguest_device(unsigned int index)
}
}
+/*D:110 scan_devices() simply iterates through the device array. The type 0
+ * is reserved to mean "no device", and anything else means we have found a
+ * device: add it. */
static void scan_devices(void)
{
unsigned int i;
@@ -130,12 +187,23 @@ static void scan_devices(void)
add_lguest_device(i);
}
+/*D:100 Fairly early in boot, lguest_bus_init() is called to set up the lguest
+ * bus. We check that we are a Guest by checking paravirt_ops.name: there are
+ * other ways of checking, but this seems most obvious to me.
+ *
+ * So we can access the array of "struct lguest_device_desc"s easily, we map
+ * that memory and store the pointer in the global "lguest_devices". Then we
+ * register the bus with the core. Doing two registrations seems clunky to me,
+ * but it seems to be the correct sysfs incantation.
+ *
+ * Finally we call scan_devices() which adds all the devices found in the
+ * "struct lguest_device_desc" array. */
static int __init lguest_bus_init(void)
{
if (strcmp(paravirt_ops.name, "lguest") != 0)
return 0;
- /* Devices are in page above top of "normal" mem. */
+ /* Devices are in a single page above top of "normal" mem */
lguest_devices = lguest_map(max_pfn<<PAGE_SHIFT, 1);
if (bus_register(&lguest_bus.bus) != 0
@@ -145,4 +213,5 @@ static int __init lguest_bus_init(void)
scan_devices();
return 0;
}
+/* Do this after core stuff, before devices. */
postcore_initcall(lguest_bus_init);
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index e90d7a783da..80d1b58c769 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -1,36 +1,70 @@
-/* Userspace control of the guest, via /dev/lguest. */
+/*P:200 This contains all the /dev/lguest code, whereby the userspace launcher
+ * controls and communicates with the Guest. For example, the first write will
+ * tell us the memory size, pagetable, entry point and kernel address offset.
+ * A read will run the Guest until a signal is pending (-EINTR), or the Guest
+ * does a DMA out to the Launcher. Writes are also used to get a DMA buffer
+ * registered by the Guest and to send the Guest an interrupt. :*/
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include "lg.h"
+/*L:030 setup_regs() doesn't really belong in this file, but it gives us an
+ * early glimpse deeper into the Host so it's worth having here.
+ *
+ * Most of the Guest's registers are left alone: we used get_zeroed_page() to
+ * allocate the structure, so they will be 0. */
static void setup_regs(struct lguest_regs *regs, unsigned long start)
{
- /* Write out stack in format lguest expects, so we can switch to it. */
+ /* There are four "segment" registers which the Guest needs to boot:
+ * The "code segment" register (cs) refers to the kernel code segment
+ * __KERNEL_CS, and the "data", "extra" and "stack" segment registers
+ * refer to the kernel data segment __KERNEL_DS.
+ *
+ * The privilege level is packed into the lower bits. The Guest runs
+ * at privilege level 1 (GUEST_PL).*/
regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL;
regs->cs = __KERNEL_CS|GUEST_PL;
- regs->eflags = 0x202; /* Interrupts enabled. */
+
+ /* The "eflags" register contains miscellaneous flags. Bit 1 (0x002)
+ * is supposed to always be "1". Bit 9 (0x200) controls whether
+ * interrupts are enabled. We always leave interrupts enabled while
+ * running the Guest. */
+ regs->eflags = 0x202;
+
+ /* The "Extended Instruction Pointer" register says where the Guest is
+ * running. */
regs->eip = start;
- /* esi points to our boot information (physical address 0) */
+
+ /* %esi points to our boot information, at physical address 0, so don't
+ * touch it. */
}
-/* + addr */
+/*L:310 To send DMA into the Guest, the Launcher needs to be able to ask for a
+ * DMA buffer. This is done by writing LHREQ_GETDMA and the key to
+ * /dev/lguest. */
static long user_get_dma(struct lguest *lg, const u32 __user *input)
{
unsigned long key, udma, irq;
+ /* Fetch the key they wrote to us. */
if (get_user(key, input) != 0)
return -EFAULT;
+ /* Look for a free Guest DMA buffer bound to that key. */
udma = get_dma_buffer(lg, key, &irq);
if (!udma)
return -ENOENT;
- /* We put irq number in udma->used_len. */
+ /* We need to tell the Launcher what interrupt the Guest expects after
+ * the buffer is filled. We stash it in udma->used_len. */
lgwrite_u32(lg, udma + offsetof(struct lguest_dma, used_len), irq);
+
+ /* The (guest-physical) address of the DMA buffer is returned from
+ * the write(). */
return udma;
}
-/* To force the Guest to stop running and return to the Launcher, the
+/*L:315 To force the Guest to stop running and return to the Launcher, the
* Waker sets writes LHREQ_BREAK and the value "1" to /dev/lguest. The
* Launcher then writes LHREQ_BREAK and "0" to release the Waker. */
static int break_guest_out(struct lguest *lg, const u32 __user *input)
@@ -54,7 +88,8 @@ static int break_guest_out(struct lguest *lg, const u32 __user *input)
}
}
-/* + irq */
+/*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt
+ * number to /dev/lguest. */
static int user_send_irq(struct lguest *lg, const u32 __user *input)
{
u32 irq;
@@ -63,14 +98,19 @@ static int user_send_irq(struct lguest *lg, const u32 __user *input)
return -EFAULT;
if (irq >= LGUEST_IRQS)
return -EINVAL;
+ /* Next time the Guest runs, the core code will see if it can deliver
+ * this interrupt. */
set_bit(irq, lg->irqs_pending);
return 0;
}
+/*L:040 Once our Guest is initialized, the Launcher makes it run by reading
+ * from /dev/lguest. */
static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
{
struct lguest *lg = file->private_data;
+ /* You must write LHREQ_INITIALIZE first! */
if (!lg)
return -EINVAL;
@@ -78,27 +118,52 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
if (current != lg->tsk)
return -EPERM;
+ /* If the guest is already dead, we indicate why */
if (lg->dead) {
size_t len;
+ /* lg->dead either contains an error code, or a string. */
if (IS_ERR(lg->dead))
return PTR_ERR(lg->dead);
+ /* We can only return as much as the buffer they read with. */
len = min(size, strlen(lg->dead)+1);
if (copy_to_user(user, lg->dead, len) != 0)
return -EFAULT;
return len;
}
+ /* If we returned from read() last time because the Guest sent DMA,
+ * clear the flag. */
if (lg->dma_is_pending)
lg->dma_is_pending = 0;
+ /* Run the Guest until something interesting happens. */
return run_guest(lg, (unsigned long __user *)user);
}
-/* Take: pfnlimit, pgdir, start, pageoffset. */
+/*L:020 The initialization write supplies 4 32-bit values (in addition to the
+ * 32-bit LHREQ_INITIALIZE value). These are:
+ *
+ * pfnlimit: The highest (Guest-physical) page number the Guest should be
+ * allowed to access. The Launcher has to live in Guest memory, so it sets
+ * this to ensure the Guest can't reach it.
+ *
+ * pgdir: The (Guest-physical) address of the top of the initial Guest
+ * pagetables (which are set up by the Launcher).
+ *
+ * start: The first instruction to execute ("eip" in x86-speak).
+ *
+ * page_offset: The PAGE_OFFSET constant in the Guest kernel. We should
+ * probably wean the code off this, but it's a very useful constant! Any
+ * address above this is within the Guest kernel, and any kernel address can
+ * quickly converted from physical to virtual by adding PAGE_OFFSET. It's
+ * 0xC0000000 (3G) by default, but it's configurable at kernel build time.
+ */
static int initialize(struct file *file, const u32 __user *input)
{
+ /* "struct lguest" contains everything we (the Host) know about a
+ * Guest. */
struct lguest *lg;
int err, i;
u32 args[4];
@@ -106,7 +171,7 @@ static int initialize(struct file *file, const u32 __user *input)
/* We grab the Big Lguest lock, which protects the global array
* "lguests" and multiple simultaneous initializations. */
mutex_lock(&lguest_lock);
-
+ /* You can't initialize twice! Close the device and start again... */
if (file->private_data) {
err = -EBUSY;
goto unlock;
@@ -117,37 +182,70 @@ static int initialize(struct file *file, const u32 __user *input)
goto unlock;
}
+ /* Find an unused guest. */
i = find_free_guest();
if (i < 0) {
err = -ENOSPC;
goto unlock;
}
+ /* OK, we have an index into the "lguest" array: "lg" is a convenient
+ * pointer. */
lg = &lguests[i];
+
+ /* Populate the easy fields of our "struct lguest" */
lg->guestid = i;
lg->pfn_limit = args[0];
lg->page_offset = args[3];
+
+ /* We need a complete page for the Guest registers: they are accessible
+ * to the Guest and we can only grant it access to whole pages. */
lg->regs_page = get_zeroed_page(GFP_KERNEL);
if (!lg->regs_page) {
err = -ENOMEM;
goto release_guest;
}
+ /* We actually put the registers at the bottom of the page. */
lg->regs = (void *)lg->regs_page + PAGE_SIZE - sizeof(*lg->regs);
+ /* Initialize the Guest's shadow page tables, using the toplevel
+ * address the Launcher gave us. This allocates memory, so can
+ * fail. */
err = init_guest_pagetable(lg, args[1]);
if (err)
goto free_regs;
+ /* Now we initialize the Guest's registers, handing it the start
+ * address. */
setup_regs(lg->regs, args[2]);
+
+ /* There are a couple of GDT entries the Guest expects when first
+ * booting. */
setup_guest_gdt(lg);
+
+ /* The timer for lguest's clock needs initialization. */
init_clockdev(lg);
+
+ /* We keep a pointer to the Launcher task (ie. current task) for when
+ * other Guests want to wake this one (inter-Guest I/O). */
lg->tsk = current;
+ /* We need to keep a pointer to the Launcher's memory map, because if
+ * the Launcher dies we need to clean it up. If we don't keep a
+ * reference, it is destroyed before close() is called. */
lg->mm = get_task_mm(lg->tsk);
+
+ /* Initialize the queue for the waker to wait on */
init_waitqueue_head(&lg->break_wq);
+
+ /* We remember which CPU's pages this Guest used last, for optimization
+ * when the same Guest runs on the same CPU twice. */
lg->last_pages = NULL;
+
+ /* We keep our "struct lguest" in the file's private_data. */
file->private_data = lg;
mutex_unlock(&lguest_lock);
+ /* And because this is a write() call, we return the length used. */
return sizeof(args);
free_regs:
@@ -159,9 +257,15 @@ unlock:
return err;
}
+/*L:010 The first operation the Launcher does must be a write. All writes
+ * start with a 32 bit number: for the first write this must be
+ * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use
+ * writes of other values to get DMA buffers and send interrupts. */
static ssize_t write(struct file *file, const char __user *input,
size_t size, loff_t *off)
{
+ /* Once the guest is initialized, we hold the "struct lguest" in the
+ * file private data. */
struct lguest *lg = file->private_data;
u32 req;
@@ -169,8 +273,11 @@ static ssize_t write(struct file *file, const char __user *input,
return -EFAULT;
input += sizeof(req);
+ /* If you haven't initialized, you must do that first. */
if (req != LHREQ_INITIALIZE && !lg)
return -EINVAL;
+
+ /* Once the Guest is dead, all you can do is read() why it died. */
if (lg && lg->dead)
return -ENOENT;
@@ -192,33 +299,72 @@ static ssize_t write(struct file *file, const char __user *input,
}
}
+/*L:060 The final piece of interface code is the close() routine. It reverses
+ * everything done in initialize(). This is usually called because the
+ * Launcher exited.
+ *
+ * Note that the close routine returns 0 or a negative error number: it can't
+ * really fail, but it can whine. I blame Sun for this wart, and K&R C for
+ * letting them do it. :*/
static int close(struct inode *inode, struct file *file)
{
struct lguest *lg = file->private_data;
+ /* If we never successfully initialized, there's nothing to clean up */
if (!lg)
return 0;
+ /* We need the big lock, to protect from inter-guest I/O and other
+ * Launchers initializing guests. */
mutex_lock(&lguest_lock);
/* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */
hrtimer_cancel(&lg->hrt);
+ /* Free any DMA buffers the Guest had bound. */
release_all_dma(lg);
+ /* Free up the shadow page tables for the Guest. */
free_guest_pagetable(lg);
+ /* Now all the memory cleanups are done, it's safe to release the
+ * Launcher's memory management structure. */
mmput(lg->mm);
+ /* If lg->dead doesn't contain an error code it will be NULL or a
+ * kmalloc()ed string, either of which is ok to hand to kfree(). */
if (!IS_ERR(lg->dead))
kfree(lg->dead);
+ /* We can free up the register page we allocated. */
free_page(lg->regs_page);
+ /* We clear the entire structure, which also marks it as free for the
+ * next user. */
memset(lg, 0, sizeof(*lg));
+ /* Release lock and exit. */
mutex_unlock(&lguest_lock);
+
return 0;
}
+/*L:000
+ * Welcome to our journey through the Launcher!
+ *
+ * The Launcher is the Host userspace program which sets up, runs and services
+ * the Guest. In fact, many comments in the Drivers which refer to "the Host"
+ * doing things are inaccurate: the Launcher does all the device handling for
+ * the Guest. The Guest can't tell what's done by the the Launcher and what by
+ * the Host.
+ *
+ * Just to confuse you: to the Host kernel, the Launcher *is* the Guest and we
+ * shall see more of that later.
+ *
+ * We begin our understanding with the Host kernel interface which the Launcher
+ * uses: reading and writing a character device called /dev/lguest. All the
+ * work happens in the read(), write() and close() routines: */
static struct file_operations lguest_fops = {
.owner = THIS_MODULE,
.release = close,
.write = write,
.read = read,
};
+
+/* This is a textbook example of a "misc" character device. Populate a "struct
+ * miscdevice" and register it with misc_register(). */
static struct miscdevice lguest_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "lguest",
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 1b0ba09b126..b7a924ace68 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -1,5 +1,11 @@
-/* Shadow page table operations.
- * Copyright (C) Rusty Russell IBM Corporation 2006.
+/*P:700 The pagetable code, on the other hand, still shows the scars of
+ * previous encounters. It's functional, and as neat as it can be in the
+ * circumstances, but be wary, for these things are subtle and break easily.
+ * The Guest provides a virtual to physical mapping, but we can neither trust
+ * it nor use it: we verify and convert it here to point the hardware to the
+ * actual Guest pages when running the Guest. :*/
+
+/* Copyright (C) Rusty Russell IBM Corporation 2006.
* GPL v2 and any later version */
#include <linux/mm.h>
#include <linux/types.h>
@@ -9,38 +15,96 @@
#include <asm/tlbflush.h>
#include "lg.h"
+/*M:008 We hold reference to pages, which prevents them from being swapped.
+ * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
+ * to swap out. If we had this, and a shrinker callback to trim PTE pages, we
+ * could probably consider launching Guests as non-root. :*/
+
+/*H:300
+ * The Page Table Code
+ *
+ * We use two-level page tables for the Guest. If you're not entirely
+ * comfortable with virtual addresses, physical addresses and page tables then
+ * I recommend you review lguest.c's "Page Table Handling" (with diagrams!).
+ *
+ * The Guest keeps page tables, but we maintain the actual ones here: these are
+ * called "shadow" page tables. Which is a very Guest-centric name: these are
+ * the real page tables the CPU uses, although we keep them up to date to
+ * reflect the Guest's. (See what I mean about weird naming? Since when do
+ * shadows reflect anything?)
+ *
+ * Anyway, this is the most complicated part of the Host code. There are seven
+ * parts to this:
+ * (i) Setting up a page table entry for the Guest when it faults,
+ * (ii) Setting up the page table entry for the Guest stack,
+ * (iii) Setting up a page table entry when the Guest tells us it has changed,
+ * (iv) Switching page tables,
+ * (v) Flushing (thowing away) page tables,
+ * (vi) Mapping the Switcher when the Guest is about to run,
+ * (vii) Setting up the page tables initially.
+ :*/
+
+/* Pages a 4k long, and each page table entry is 4 bytes long, giving us 1024
+ * (or 2^10) entries per page. */
#define PTES_PER_PAGE_SHIFT 10
#define PTES_PER_PAGE (1 << PTES_PER_PAGE_SHIFT)
+
+/* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is
+ * conveniently placed at the top 4MB, so it uses a separate, complete PTE
+ * page. */
#define SWITCHER_PGD_INDEX (PTES_PER_PAGE - 1)
+/* We actually need a separate PTE page for each CPU. Remember that after the
+ * Switcher code itself comes two pages for each CPU, and we don't want this
+ * CPU's guest to see the pages of any other CPU. */
static DEFINE_PER_CPU(spte_t *, switcher_pte_pages);
#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
+/*H:320 With our shadow and Guest types established, we need to deal with
+ * them: the page table code is curly enough to need helper functions to keep
+ * it clear and clean.
+ *
+ * The first helper takes a virtual address, and says which entry in the top
+ * level page table deals with that address. Since each top level entry deals
+ * with 4M, this effectively divides by 4M. */
static unsigned vaddr_to_pgd_index(unsigned long vaddr)
{
return vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT);
}
-/* These access the shadow versions (ie. the ones used by the CPU). */
+/* There are two functions which return pointers to the shadow (aka "real")
+ * page tables.
+ *
+ * spgd_addr() takes the virtual address and returns a pointer to the top-level
+ * page directory entry for that address. Since we keep track of several page
+ * tables, the "i" argument tells us which one we're interested in (it's
+ * usually the current one). */
static spgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr)
{
unsigned int index = vaddr_to_pgd_index(vaddr);
+ /* We kill any Guest trying to touch the Switcher addresses. */
if (index >= SWITCHER_PGD_INDEX) {
kill_guest(lg, "attempt to access switcher pages");
index = 0;
}
+ /* Return a pointer index'th pgd entry for the i'th page table. */
return &lg->pgdirs[i].pgdir[index];
}
+/* This routine then takes the PGD entry given above, which contains the
+ * address of the PTE page. It then returns a pointer to the PTE entry for the
+ * given address. */
static spte_t *spte_addr(struct lguest *lg, spgd_t spgd, unsigned long vaddr)
{
spte_t *page = __va(spgd.pfn << PAGE_SHIFT);
+ /* You should never call this if the PGD entry wasn't valid */
BUG_ON(!(spgd.flags & _PAGE_PRESENT));
return &page[(vaddr >> PAGE_SHIFT) % PTES_PER_PAGE];
}
-/* These access the guest versions. */
+/* These two functions just like the above two, except they access the Guest
+ * page tables. Hence they return a Guest address. */
static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr)
{
unsigned int index = vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT);
@@ -55,12 +119,24 @@ static unsigned long gpte_addr(struct lguest *lg,
return gpage + ((vaddr>>PAGE_SHIFT) % PTES_PER_PAGE) * sizeof(gpte_t);
}
-/* Do a virtual -> physical mapping on a user page. */
+/*H:350 This routine takes a page number given by the Guest and converts it to
+ * an actual, physical page number. It can fail for several reasons: the
+ * virtual address might not be mapped by the Launcher, the write flag is set
+ * and the page is read-only, or the write flag was set and the page was
+ * shared so had to be copied, but we ran out of memory.
+ *
+ * This holds a reference to the page, so release_pte() is careful to
+ * put that back. */
static unsigned long get_pfn(unsigned long virtpfn, int write)
{
struct page *page;
+ /* This value indicates failure. */
unsigned long ret = -1UL;
+ /* get_user_pages() is a complex interface: it gets the "struct
+ * vm_area_struct" and "struct page" assocated with a range of pages.
+ * It also needs the task's mmap_sem held, and is not very quick.
+ * It returns the number of pages it got. */
down_read(&current->mm->mmap_sem);
if (get_user_pages(current, current->mm, virtpfn << PAGE_SHIFT,
1, write, 1, &page, NULL) == 1)
@@ -69,28 +145,47 @@ static unsigned long get_pfn(unsigned long virtpfn, int write)
return ret;
}
+/*H:340 Converting a Guest page table entry to a shadow (ie. real) page table
+ * entry can be a little tricky. The flags are (almost) the same, but the
+ * Guest PTE contains a virtual page number: the CPU needs the real page
+ * number. */
static spte_t gpte_to_spte(struct lguest *lg, gpte_t gpte, int write)
{
spte_t spte;
unsigned long pfn;
- /* We ignore the global flag. */
+ /* The Guest sets the global flag, because it thinks that it is using
+ * PGE. We only told it to use PGE so it would tell us whether it was
+ * flushing a kernel mapping or a userspace mapping. We don't actually
+ * use the global bit, so throw it away. */
spte.flags = (gpte.flags & ~_PAGE_GLOBAL);
+
+ /* We need a temporary "unsigned long" variable to hold the answer from
+ * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
+ * fit in spte.pfn. get_pfn() finds the real physical number of the
+ * page, given the virtual number. */
pfn = get_pfn(gpte.pfn, write);
if (pfn == -1UL) {
kill_guest(lg, "failed to get page %u", gpte.pfn);
- /* Must not put_page() bogus page on cleanup. */
+ /* When we destroy the Guest, we'll go through the shadow page
+ * tables and release_pte() them. Make sure we don't think
+ * this one is valid! */
spte.flags = 0;
}
+ /* Now we assign the page number, and our shadow PTE is complete. */
spte.pfn = pfn;
return spte;
}
+/*H:460 And to complete the chain, release_pte() looks like this: */
static void release_pte(spte_t pte)
{
+ /* Remember that get_user_pages() took a reference to the page, in
+ * get_pfn()? We have to put it back now. */
if (pte.flags & _PAGE_PRESENT)
put_page(pfn_to_page(pte.pfn));
}
+/*:*/
static void check_gpte(struct lguest *lg, gpte_t gpte)
{
@@ -104,11 +199,16 @@ static void check_gpgd(struct lguest *lg, gpgd_t gpgd)
kill_guest(lg, "bad page directory entry");
}
-/* FIXME: We hold reference to pages, which prevents them from being
- swapped. It'd be nice to have a callback when Linux wants to swap out. */
-
-/* We fault pages in, which allows us to update accessed/dirty bits.
- * Return true if we got page. */
+/*H:330
+ * (i) Setting up a page table entry for the Guest when it faults
+ *
+ * We saw this call in run_guest(): when we see a page fault in the Guest, we
+ * come here. That's because we only set up the shadow page tables lazily as
+ * they're needed, so we get page faults all the time and quietly fix them up
+ * and return to the Guest without it knowing.
+ *
+ * If we fixed up the fault (ie. we mapped the address), this routine returns
+ * true. */
int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
{
gpgd_t gpgd;
@@ -117,106 +217,161 @@ int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
gpte_t gpte;
spte_t *spte;
+ /* First step: get the top-level Guest page table entry. */
gpgd = mkgpgd(lgread_u32(lg, gpgd_addr(lg, vaddr)));
+ /* Toplevel not present? We can't map it in. */
if (!(gpgd.flags & _PAGE_PRESENT))
return 0;
+ /* Now look at the matching shadow entry. */
spgd = spgd_addr(lg, lg->pgdidx, vaddr);
if (!(spgd->flags & _PAGE_PRESENT)) {
- /* Get a page of PTEs for them. */
+ /* No shadow entry: allocate a new shadow PTE page. */
unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
- /* FIXME: Steal from self in this case? */
+ /* This is not really the Guest's fault, but killing it is
+ * simple for this corner case. */
if (!ptepage) {
kill_guest(lg, "out of memory allocating pte page");
return 0;
}
+ /* We check that the Guest pgd is OK. */
check_gpgd(lg, gpgd);
+ /* And we copy the flags to the shadow PGD entry. The page
+ * number in the shadow PGD is the page we just allocated. */
spgd->raw.val = (__pa(ptepage) | gpgd.flags);
}
+ /* OK, now we look at the lower level in the Guest page table: keep its
+ * address, because we might update it later. */
gpte_ptr = gpte_addr(lg, gpgd, vaddr);
gpte = mkgpte(lgread_u32(lg, gpte_ptr));
- /* No page? */
+ /* If this page isn't in the Guest page tables, we can't page it in. */
if (!(gpte.flags & _PAGE_PRESENT))
return 0;
- /* Write to read-only page? */
+ /* Check they're not trying to write to a page the Guest wants
+ * read-only (bit 2 of errcode == write). */
if ((errcode & 2) && !(gpte.flags & _PAGE_RW))
return 0;
- /* User access to a non-user page? */
+ /* User access to a kernel page? (bit 3 == user access) */
if ((errcode & 4) && !(gpte.flags & _PAGE_USER))
return 0;
+ /* Check that the Guest PTE flags are OK, and the page number is below
+ * the pfn_limit (ie. not mapping the Launcher binary). */
check_gpte(lg, gpte);
+ /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
gpte.flags |= _PAGE_ACCESSED;
if (errcode & 2)
gpte.flags |= _PAGE_DIRTY;
- /* We're done with the old pte. */
+ /* Get the pointer to the shadow PTE entry we're going to set. */
spte = spte_addr(lg, *spgd, vaddr);
+ /* If there was a valid shadow PTE entry here before, we release it.
+ * This can happen with a write to a previously read-only entry. */
release_pte(*spte);
- /* We don't make it writable if this isn't a write: later
- * write will fault so we can set dirty bit in guest. */
+ /* If this is a write, we insist that the Guest page is writable (the
+ * final arg to gpte_to_spte()). */
if (gpte.flags & _PAGE_DIRTY)
*spte = gpte_to_spte(lg, gpte, 1);
else {
+ /* If this is a read, don't set the "writable" bit in the page
+ * table entry, even if the Guest says it's writable. That way
+ * we come back here when a write does actually ocur, so we can
+ * update the Guest's _PAGE_DIRTY flag. */
gpte_t ro_gpte = gpte;
ro_gpte.flags &= ~_PAGE_RW;
*spte = gpte_to_spte(lg, ro_gpte, 0);
}
- /* Now we update dirty/accessed on guest. */
+ /* Finally, we write the Guest PTE entry back: we've set the
+ * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
lgwrite_u32(lg, gpte_ptr, gpte.raw.val);
+
+ /* We succeeded in mapping the page! */
return 1;
}
-/* This is much faster than the full demand_page logic. */
+/*H:360 (ii) Setting up the page table entry for the Guest stack.
+ *
+ * Remember pin_stack_pages() which makes sure the stack is mapped? It could
+ * simply call demand_page(), but as we've seen that logic is quite long, and
+ * usually the stack pages are already mapped anyway, so it's not required.
+ *
+ * This is a quick version which answers the question: is this virtual address
+ * mapped by the shadow page tables, and is it writable? */
static int page_writable(struct lguest *lg, unsigned long vaddr)
{
spgd_t *spgd;
unsigned long flags;
+ /* Look at the top level entry: is it present? */
spgd = spgd_addr(lg, lg->pgdidx, vaddr);
if (!(spgd->flags & _PAGE_PRESENT))
return 0;
+ /* Check the flags on the pte entry itself: it must be present and
+ * writable. */
flags = spte_addr(lg, *spgd, vaddr)->flags;
return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
}
+/* So, when pin_stack_pages() asks us to pin a page, we check if it's already
+ * in the page tables, and if not, we call demand_page() with error code 2
+ * (meaning "write"). */
void pin_page(struct lguest *lg, unsigned long vaddr)
{
if (!page_writable(lg, vaddr) && !demand_page(lg, vaddr, 2))
kill_guest(lg, "bad stack page %#lx", vaddr);
}
+/*H:450 If we chase down the release_pgd() code, it looks like this: */
static void release_pgd(struct lguest *lg, spgd_t *spgd)
{
+ /* If the entry's not present, there's nothing to release. */
if (spgd->flags & _PAGE_PRESENT) {
unsigned int i;
+ /* Converting the pfn to find the actual PTE page is easy: turn
+ * the page number into a physical address, then convert to a
+ * virtual address (easy for kernel pages like this one). */
spte_t *ptepage = __va(spgd->pfn << PAGE_SHIFT);
+ /* For each entry in the page, we might need to release it. */
for (i = 0; i < PTES_PER_PAGE; i++)
release_pte(ptepage[i]);
+ /* Now we can free the page of PTEs */
free_page((long)ptepage);
+ /* And zero out the PGD entry we we never release it twice. */
spgd->raw.val = 0;
}
}
+/*H:440 (v) Flushing (thowing away) page tables,
+ *
+ * We saw flush_user_mappings() called when we re-used a top-level pgdir page.
+ * It simply releases every PTE page from 0 up to the kernel address. */
static void flush_user_mappings(struct lguest *lg, int idx)
{
unsigned int i;
+ /* Release every pgd entry up to the kernel's address. */
for (i = 0; i < vaddr_to_pgd_index(lg->page_offset); i++)
release_pgd(lg, lg->pgdirs[idx].pgdir + i);
}
+/* The Guest also has a hypercall to do this manually: it's used when a large
+ * number of mappings have been changed. */
void guest_pagetable_flush_user(struct lguest *lg)
{
+ /* Drop the userspace part of the current page table. */
flush_user_mappings(lg, lg->pgdidx);
}
+/*:*/
+/* We keep several page tables. This is a simple routine to find the page
+ * table (if any) corresponding to this top-level address the Guest has given
+ * us. */
static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
{
unsigned int i;
@@ -226,21 +381,30 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
return i;
}
+/*H:435 And this is us, creating the new page directory. If we really do
+ * allocate a new one (and so the kernel parts are not there), we set
+ * blank_pgdir. */
static unsigned int new_pgdir(struct lguest *lg,
unsigned long cr3,
int *blank_pgdir)
{
unsigned int next;
+ /* We pick one entry at random to throw out. Choosing the Least
+ * Recently Used might be better, but this is easy. */
next = random32() % ARRAY_SIZE(lg->pgdirs);
+ /* If it's never been allocated at all before, try now. */
if (!lg->pgdirs[next].pgdir) {
lg->pgdirs[next].pgdir = (spgd_t *)get_zeroed_page(GFP_KERNEL);
+ /* If the allocation fails, just keep using the one we have */
if (!lg->pgdirs[next].pgdir)
next = lg->pgdidx;
else
- /* There are no mappings: you'll need to re-pin */
+ /* This is a blank page, so there are no kernel
+ * mappings: caller must map the stack! */
*blank_pgdir = 1;
}
+ /* Record which Guest toplevel this shadows. */
lg->pgdirs[next].cr3 = cr3;
/* Release all the non-kernel mappings. */
flush_user_mappings(lg, next);
@@ -248,82 +412,161 @@ static unsigned int new_pgdir(struct lguest *lg,
return next;
}
+/*H:430 (iv) Switching page tables
+ *
+ * This is what happens when the Guest changes page tables (ie. changes the
+ * top-level pgdir). This happens on almost every context switch. */
void guest_new_pagetable(struct lguest *lg, unsigned long pgtable)
{
int newpgdir, repin = 0;
+ /* Look to see if we have this one already. */
newpgdir = find_pgdir(lg, pgtable);
+ /* If not, we allocate or mug an existing one: if it's a fresh one,
+ * repin gets set to 1. */
if (newpgdir == ARRAY_SIZE(lg->pgdirs))
newpgdir = new_pgdir(lg, pgtable, &repin);
+ /* Change the current pgd index to the new one. */
lg->pgdidx = newpgdir;
+ /* If it was completely blank, we map in the Guest kernel stack */
if (repin)
pin_stack_pages(lg);
}
+/*H:470 Finally, a routine which throws away everything: all PGD entries in all
+ * the shadow page tables. This is used when we destroy the Guest. */
static void release_all_pagetables(struct lguest *lg)
{
unsigned int i, j;
+ /* Every shadow pagetable this Guest has */
for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
if (lg->pgdirs[i].pgdir)
+ /* Every PGD entry except the Switcher at the top */
for (j = 0; j < SWITCHER_PGD_INDEX; j++)
release_pgd(lg, lg->pgdirs[i].pgdir + j);
}
+/* We also throw away everything when a Guest tells us it's changed a kernel
+ * mapping. Since kernel mappings are in every page table, it's easiest to
+ * throw them all away. This is amazingly slow, but thankfully rare. */
void guest_pagetable_clear_all(struct lguest *lg)
{
release_all_pagetables(lg);
+ /* We need the Guest kernel stack mapped again. */
pin_stack_pages(lg);
}
+/*H:420 This is the routine which actually sets the page table entry for then
+ * "idx"'th shadow page table.
+ *
+ * Normally, we can just throw out the old entry and replace it with 0: if they
+ * use it demand_page() will put the new entry in. We need to do this anyway:
+ * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
+ * is read from, and _PAGE_DIRTY when it's written to.
+ *
+ * But Avi Kivity pointed out that most Operating Systems (Linux included) set
+ * these bits on PTEs immediately anyway. This is done to save the CPU from
+ * having to update them, but it helps us the same way: if they set
+ * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
+ * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
+ */
static void do_set_pte(struct lguest *lg, int idx,
unsigned long vaddr, gpte_t gpte)
{
+ /* Look up the matching shadow page directot entry. */
spgd_t *spgd = spgd_addr(lg, idx, vaddr);
+
+ /* If the top level isn't present, there's no entry to update. */
if (spgd->flags & _PAGE_PRESENT) {
+ /* Otherwise, we start by releasing the existing entry. */
spte_t *spte = spte_addr(lg, *spgd, vaddr);
release_pte(*spte);
+
+ /* If they're setting this entry as dirty or accessed, we might
+ * as well put that entry they've given us in now. This shaves
+ * 10% off a copy-on-write micro-benchmark. */
if (gpte.flags & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
check_gpte(lg, gpte);
*spte = gpte_to_spte(lg, gpte, gpte.flags&_PAGE_DIRTY);
} else
+ /* Otherwise we can demand_page() it in later. */
spte->raw.val = 0;
}
}
+/*H:410 Updating a PTE entry is a little trickier.
+ *
+ * We keep track of several different page tables (the Guest uses one for each
+ * process, so it makes sense to cache at least a few). Each of these have
+ * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
+ * all processes. So when the page table above that address changes, we update
+ * all the page tables, not just the current one. This is rare.
+ *
+ * The benefit is that when we have to track a new page table, we can copy keep
+ * all the kernel mappings. This speeds up context switch immensely. */
void guest_set_pte(struct lguest *lg,
unsigned long cr3, unsigned long vaddr, gpte_t gpte)
{
- /* Kernel mappings must be changed on all top levels. */
+ /* Kernel mappings must be changed on all top levels. Slow, but
+ * doesn't happen often. */
if (vaddr >= lg->page_offset) {
unsigned int i;
for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
if (lg->pgdirs[i].pgdir)
do_set_pte(lg, i, vaddr, gpte);
} else {
+ /* Is this page table one we have a shadow for? */
int pgdir = find_pgdir(lg, cr3);
if (pgdir != ARRAY_SIZE(lg->pgdirs))
+ /* If so, do the update. */
do_set_pte(lg, pgdir, vaddr, gpte);
}
}
+/*H:400
+ * (iii) Setting up a page table entry when the Guest tells us it has changed.
+ *
+ * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
+ * with the other side of page tables while we're here: what happens when the
+ * Guest asks for a page table to be updated?
+ *
+ * We already saw that demand_page() will fill in the shadow page tables when
+ * needed, so we can simply remove shadow page table entries whenever the Guest
+ * tells us they've changed. When the Guest tries to use the new entry it will
+ * fault and demand_page() will fix it up.
+ *
+ * So with that in mind here's our code to to update a (top-level) PGD entry:
+ */
void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 idx)
{
int pgdir;
+ /* The kernel seems to try to initialize this early on: we ignore its
+ * attempts to map over the Switcher. */
if (idx >= SWITCHER_PGD_INDEX)
return;
+ /* If they're talking about a page table we have a shadow for... */
pgdir = find_pgdir(lg, cr3);
if (pgdir < ARRAY_SIZE(lg->pgdirs))
+ /* ... throw it away. */
release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx);
}
+/*H:500 (vii) Setting up the page tables initially.
+ *
+ * When a Guest is first created, the Launcher tells us where the toplevel of
+ * its first page table is. We set some things up here: */
int init_guest_pagetable(struct lguest *lg, unsigned long pgtable)
{
- /* We assume this in flush_user_mappings, so check now */
+ /* In flush_user_mappings() we loop from 0 to
+ * "vaddr_to_pgd_index(lg->page_offset)". This assumes it won't hit
+ * the Switcher mappings, so check that now. */
if (vaddr_to_pgd_index(lg->page_offset) >= SWITCHER_PGD_INDEX)
return -EINVAL;
+ /* We start on the first shadow page table, and give it a blank PGD
+ * page. */
lg->pgdidx = 0;
lg->pgdirs[lg->pgdidx].cr3 = pgtable;
lg->pgdirs[lg->pgdidx].pgdir = (spgd_t*)get_zeroed_page(GFP_KERNEL);
@@ -332,33 +575,48 @@ int init_guest_pagetable(struct lguest *lg, unsigned long pgtable)
return 0;
}
+/* When a Guest dies, our cleanup is fairly simple. */
void free_guest_pagetable(struct lguest *lg)
{
unsigned int i;
+ /* Throw away all page table pages. */
release_all_pagetables(lg);
+ /* Now free the top levels: free_page() can handle 0 just fine. */
for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
free_page((long)lg->pgdirs[i].pgdir);
}
-/* Caller must be preempt-safe */
+/*H:480 (vi) Mapping the Switcher when the Guest is about to run.
+ *
+ * The Switcher and the two pages for this CPU need to be available to the
+ * Guest (and not the pages for other CPUs). We have the appropriate PTE pages
+ * for each CPU already set up, we just need to hook them in. */
void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages)
{
spte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
spgd_t switcher_pgd;
spte_t regs_pte;
- /* Since switcher less that 4MB, we simply mug top pte page. */
+ /* Make the last PGD entry for this Guest point to the Switcher's PTE
+ * page for this CPU (with appropriate flags). */
switcher_pgd.pfn = __pa(switcher_pte_page) >> PAGE_SHIFT;
switcher_pgd.flags = _PAGE_KERNEL;
lg->pgdirs[lg->pgdidx].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
- /* Map our regs page over stack page. */
+ /* We also change the Switcher PTE page. When we're running the Guest,
+ * we want the Guest's "regs" page to appear where the first Switcher
+ * page for this CPU is. This is an optimization: when the Switcher
+ * saves the Guest registers, it saves them into the first page of this
+ * CPU's "struct lguest_pages": if we make sure the Guest's register
+ * page is already mapped there, we don't have to copy them out
+ * again. */
regs_pte.pfn = __pa(lg->regs_page) >> PAGE_SHIFT;
regs_pte.flags = _PAGE_KERNEL;
switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTES_PER_PAGE]
= regs_pte;
}
+/*:*/
static void free_switcher_pte_pages(void)
{
@@ -368,6 +626,10 @@ static void free_switcher_pte_pages(void)
free_page((long)switcher_pte_page(i));
}
+/*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given
+ * the CPU number and the "struct page"s for the Switcher code itself.
+ *
+ * Currently the Switcher is less than a page long, so "pages" is always 1. */
static __init void populate_switcher_pte_page(unsigned int cpu,
struct page *switcher_page[],
unsigned int pages)
@@ -375,21 +637,26 @@ static __init void populate_switcher_pte_page(unsigned int cpu,
unsigned int i;
spte_t *pte = switcher_pte_page(cpu);
+ /* The first entries are easy: they map the Switcher code. */
for (i = 0; i < pages; i++) {
pte[i].pfn = page_to_pfn(switcher_page[i]);
pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED;
}
- /* We only map this CPU's pages, so guest can't see others. */
+ /* The only other thing we map is this CPU's pair of pages. */
i = pages + cpu*2;
- /* First page (regs) is rw, second (state) is ro. */
+ /* First page (Guest registers) is writable from the Guest */
pte[i].pfn = page_to_pfn(switcher_page[i]);
pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW;
+ /* The second page contains the "struct lguest_ro_state", and is
+ * read-only. */
pte[i+1].pfn = page_to_pfn(switcher_page[i+1]);
pte[i+1].flags = _PAGE_PRESENT|_PAGE_ACCESSED;
}
+/*H:510 At boot or module load time, init_pagetables() allocates and populates
+ * the Switcher PTE page for each CPU. */
__init int init_pagetables(struct page **switcher_page, unsigned int pages)
{
unsigned int i;
@@ -404,7 +671,9 @@ __init int init_pagetables(struct page **switcher_page, unsigned int pages)
}
return 0;
}
+/*:*/
+/* Cleaning up simply involves freeing the PTE page for each CPU. */
void free_pagetables(void)
{
free_switcher_pte_pages();
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index 1b2cfe89dcd..f675a41a80d 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -1,16 +1,68 @@
+/*P:600 The x86 architecture has segments, which involve a table of descriptors
+ * which can be used to do funky things with virtual address interpretation.
+ * We originally used to use segments so the Guest couldn't alter the
+ * Guest<->Host Switcher, and then we had to trim Guest segments, and restore
+ * for userspace per-thread segments, but trim again for on userspace->kernel
+ * transitions... This nightmarish creation was contained within this file,
+ * where we knew not to tread without heavy armament and a change of underwear.
+ *
+ * In these modern times, the segment handling code consists of simple sanity
+ * checks, and the worst you'll experience reading this code is butterfly-rash
+ * from frolicking through its parklike serenity. :*/
#include "lg.h"
+/*H:600
+ * We've almost completed the Host; there's just one file to go!
+ *
+ * Segments & The Global Descriptor Table
+ *
+ * (That title sounds like a bad Nerdcore group. Not to suggest that there are
+ * any good Nerdcore groups, but in high school a friend of mine had a band
+ * called Joe Fish and the Chips, so there are definitely worse band names).
+ *
+ * To refresh: the GDT is a table of 8-byte values describing segments. Once
+ * set up, these segments can be loaded into one of the 6 "segment registers".
+ *
+ * GDT entries are passed around as "struct desc_struct"s, which like IDT
+ * entries are split into two 32-bit members, "a" and "b". One day, someone
+ * will clean that up, and be declared a Hero. (No pressure, I'm just saying).
+ *
+ * Anyway, the GDT entry contains a base (the start address of the segment), a
+ * limit (the size of the segment - 1), and some flags. Sounds simple, and it
+ * would be, except those zany Intel engineers decided that it was too boring
+ * to put the base at one end, the limit at the other, and the flags in
+ * between. They decided to shotgun the bits at random throughout the 8 bytes,
+ * like so:
+ *
+ * 0 16 40 48 52 56 63
+ * [ limit part 1 ][ base part 1 ][ flags ][li][fl][base ]
+ * mit ags part 2
+ * part 2
+ *
+ * As a result, this file contains a certain amount of magic numeracy. Let's
+ * begin.
+ */
+
+/* Is the descriptor the Guest wants us to put in OK?
+ *
+ * The flag which Intel says must be zero: must be zero. The descriptor must
+ * be present, (this is actually checked earlier but is here for thorougness),
+ * and the descriptor type must be 1 (a memory segment). */
static int desc_ok(const struct desc_struct *gdt)
{
- /* MBZ=0, P=1, DT=1 */
return ((gdt->b & 0x00209000) == 0x00009000);
}
+/* Is the segment present? (Otherwise it can't be used by the Guest). */
static int segment_present(const struct desc_struct *gdt)
{
return gdt->b & 0x8000;
}
+/* There are several entries we don't let the Guest set. The TSS entry is the
+ * "Task State Segment" which controls all kinds of delicate things. The
+ * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the
+ * the Guest can't be trusted to deal with double faults. */
static int ignored_gdt(unsigned int num)
{
return (num == GDT_ENTRY_TSS
@@ -19,9 +71,18 @@ static int ignored_gdt(unsigned int num)
|| num == GDT_ENTRY_DOUBLEFAULT_TSS);
}
-/* We don't allow removal of CS, DS or SS; it doesn't make sense. */
+/* If the Guest asks us to remove an entry from the GDT, we have to be careful.
+ * If one of the segment registers is pointing at that entry the Switcher will
+ * crash when it tries to reload the segment registers for the Guest.
+ *
+ * It doesn't make much sense for the Guest to try to remove its own code, data
+ * or stack segments while they're in use: assume that's a Guest bug. If it's
+ * one of the lesser segment registers using the removed entry, we simply set
+ * that register to 0 (unusable). */
static void check_segment_use(struct lguest *lg, unsigned int desc)
{
+ /* GDT entries are 8 bytes long, so we divide to get the index and
+ * ignore the bottom bits. */
if (lg->regs->gs / 8 == desc)
lg->regs->gs = 0;
if (lg->regs->fs / 8 == desc)
@@ -33,13 +94,21 @@ static void check_segment_use(struct lguest *lg, unsigned int desc)
|| lg->regs->ss / 8 == desc)
kill_guest(lg, "Removed live GDT entry %u", desc);
}
-
+/*:*/
+/*M:009 We wouldn't need to check for removal of in-use segments if we handled
+ * faults in the Switcher. However, it's probably not a worthwhile
+ * optimization. :*/
+
+/*H:610 Once the GDT has been changed, we look through the changed entries and
+ * see if they're OK. If not, we'll call kill_guest() and the Guest will never
+ * get to use the invalid entries. */
static void fixup_gdt_table(struct lguest *lg, unsigned start, unsigned end)
{
unsigned int i;
for (i = start; i < end; i++) {
- /* We never copy these ones to real gdt */
+ /* We never copy these ones to real GDT, so we don't care what
+ * they say */
if (ignored_gdt(i))
continue;
@@ -53,41 +122,57 @@ static void fixup_gdt_table(struct lguest *lg, unsigned start, unsigned end)
if (!desc_ok(&lg->gdt[i]))
kill_guest(lg, "Bad GDT descriptor %i", i);
- /* DPL 0 presumably means "for use by guest". */
+ /* Segment descriptors contain a privilege level: the Guest is
+ * sometimes careless and leaves this as 0, even though it's
+ * running at privilege level 1. If so, we fix it here. */
if ((lg->gdt[i].b & 0x00006000) == 0)
lg->gdt[i].b |= (GUEST_PL << 13);
- /* Set accessed bit, since gdt isn't writable. */
+ /* Each descriptor has an "accessed" bit. If we don't set it
+ * now, the CPU will try to set it when the Guest first loads
+ * that entry into a segment register. But the GDT isn't
+ * writable by the Guest, so bad things can happen. */
lg->gdt[i].b |= 0x00000100;
}
}
+/* This routine is called at boot or modprobe time for each CPU to set up the
+ * "constant" GDT entries for Guests running on that CPU. */
void setup_default_gdt_entries(struct lguest_ro_state *state)
{
struct desc_struct *gdt = state->guest_gdt;
unsigned long tss = (unsigned long)&state->guest_tss;
- /* Hypervisor segments. */
+ /* The hypervisor segments are full 0-4G segments, privilege level 0 */
gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
- /* This is the one which we *cannot* copy from guest, since tss
- is depended on this lguest_ro_state, ie. this cpu. */
+ /* The TSS segment refers to the TSS entry for this CPU, so we cannot
+ * copy it from the Guest. Forgive the magic flags */
gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16);
gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000)
| ((tss >> 16) & 0x000000FF);
}
+/* This routine is called before the Guest is run for the first time. */
void setup_guest_gdt(struct lguest *lg)
{
+ /* Start with full 0-4G segments... */
lg->gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT;
lg->gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT;
+ /* ...except the Guest is allowed to use them, so set the privilege
+ * level appropriately in the flags. */
lg->gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13);
lg->gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13);
}
-/* This is a fast version for the common case where only the three TLS entries
- * have changed. */
+/* Like the IDT, we never simply use the GDT the Guest gives us. We set up the
+ * GDTs for each CPU, then we copy across the entries each time we want to run
+ * a different Guest on that CPU. */
+
+/* A partial GDT load, for the three "thead-local storage" entries. Otherwise
+ * it's just like load_guest_gdt(). So much, in fact, it would probably be
+ * neater to have a single hypercall to cover both. */
void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt)
{
unsigned int i;
@@ -96,22 +181,31 @@ void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt)
gdt[i] = lg->gdt[i];
}
+/* This is the full version */
void copy_gdt(const struct lguest *lg, struct desc_struct *gdt)
{
unsigned int i;
+ /* The default entries from setup_default_gdt_entries() are not
+ * replaced. See ignored_gdt() above. */
for (i = 0; i < GDT_ENTRIES; i++)
if (!ignored_gdt(i))
gdt[i] = lg->gdt[i];
}
+/* This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT). */
void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num)
{
+ /* We assume the Guest has the same number of GDT entries as the
+ * Host, otherwise we'd have to dynamically allocate the Guest GDT. */
if (num > ARRAY_SIZE(lg->gdt))
kill_guest(lg, "too many gdt entries %i", num);
+ /* We read the whole thing in, then fix it up. */
lgread(lg, lg->gdt, table, num * sizeof(lg->gdt[0]));
fixup_gdt_table(lg, 0, ARRAY_SIZE(lg->gdt));
+ /* Mark that the GDT changed so the core knows it has to copy it again,
+ * even if the Guest is run on the same CPU. */
lg->changed |= CHANGED_GDT;
}
@@ -123,3 +217,13 @@ void guest_load_tls(struct lguest *lg, unsigned long gtls)
fixup_gdt_table(lg, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1);
lg->changed |= CHANGED_GDT_TLS;
}
+
+/*
+ * With this, we have finished the Host.
+ *
+ * Five of the seven parts of our task are complete. You have made it through
+ * the Bit of Despair (I think that's somewhere in the page table code,
+ * myself).
+ *
+ * Next, we examine "make Switcher". It's short, but intense.
+ */
diff --git a/drivers/lguest/switcher.S b/drivers/lguest/switcher.S
index eadd4cc299d..d418179ea6b 100644
--- a/drivers/lguest/switcher.S
+++ b/drivers/lguest/switcher.S
@@ -1,45 +1,136 @@
-/* This code sits at 0xFFC00000 to do the low-level guest<->host switch.
+/*P:900 This is the Switcher: code which sits at 0xFFC00000 to do the low-level
+ * Guest<->Host switch. It is as simple as it can be made, but it's naturally
+ * very specific to x86.
+ *
+ * You have now completed Preparation. If this has whet your appetite; if you
+ * are feeling invigorated and refreshed then the next, more challenging stage
+ * can be found in "make Guest". :*/
- There is are two pages above us for this CPU (struct lguest_pages).
- The second page (struct lguest_ro_state) becomes read-only after the
- context switch. The first page (the stack for traps) remains writable,
- but while we're in here, the guest cannot be running.
-*/
+/*S:100
+ * Welcome to the Switcher itself!
+ *
+ * This file contains the low-level code which changes the CPU to run the Guest
+ * code, and returns to the Host when something happens. Understand this, and
+ * you understand the heart of our journey.
+ *
+ * Because this is in assembler rather than C, our tale switches from prose to
+ * verse. First I tried limericks:
+ *
+ * There once was an eax reg,
+ * To which our pointer was fed,
+ * It needed an add,
+ * Which asm-offsets.h had
+ * But this limerick is hurting my head.
+ *
+ * Next I tried haikus, but fitting the required reference to the seasons in
+ * every stanza was quickly becoming tiresome:
+ *
+ * The %eax reg
+ * Holds "struct lguest_pages" now:
+ * Cherry blossoms fall.
+ *
+ * Then I started with Heroic Verse, but the rhyming requirement leeched away
+ * the content density and led to some uniquely awful oblique rhymes:
+ *
+ * These constants are coming from struct offsets
+ * For use within the asm switcher text.
+ *
+ * Finally, I settled for something between heroic hexameter, and normal prose
+ * with inappropriate linebreaks. Anyway, it aint no Shakespeare.
+ */
+
+// Not all kernel headers work from assembler
+// But these ones are needed: the ENTRY() define
+// And constants extracted from struct offsets
+// To avoid magic numbers and breakage:
+// Should they change the compiler can't save us
+// Down here in the depths of assembler code.
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include "lg.h"
+// We mark the start of the code to copy
+// It's placed in .text tho it's never run here
+// You'll see the trick macro at the end
+// Which interleaves data and text to effect.
.text
ENTRY(start_switcher_text)
-/* %eax points to lguest pages for this CPU. %ebx contains cr3 value.
- All normal registers can be clobbered! */
+// When we reach switch_to_guest we have just left
+// The safe and comforting shores of C code
+// %eax has the "struct lguest_pages" to use
+// Where we save state and still see it from the Guest
+// And %ebx holds the Guest shadow pagetable:
+// Once set we have truly left Host behind.
ENTRY(switch_to_guest)
- /* Save host segments on host stack. */
+ // We told gcc all its regs could fade,
+ // Clobbered by our journey into the Guest
+ // We could have saved them, if we tried
+ // But time is our master and cycles count.
+
+ // Segment registers must be saved for the Host
+ // We push them on the Host stack for later
pushl %es
pushl %ds
pushl %gs
pushl %fs
- /* With CONFIG_FRAME_POINTER, gcc doesn't let us clobber this! */
+ // But the compiler is fickle, and heeds
+ // No warning of %ebp clobbers
+ // When frame pointers are used. That register
+ // Must be saved and restored or chaos strikes.
pushl %ebp
- /* Save host stack. */
+ // The Host's stack is done, now save it away
+ // In our "struct lguest_pages" at offset
+ // Distilled into asm-offsets.h
movl %esp, LGUEST_PAGES_host_sp(%eax)
- /* Switch to guest stack: if we get NMI we expect to be there. */
+
+ // All saved and there's now five steps before us:
+ // Stack, GDT, IDT, TSS
+ // And last of all the page tables are flipped.
+
+ // Yet beware that our stack pointer must be
+ // Always valid lest an NMI hits
+ // %edx does the duty here as we juggle
+ // %eax is lguest_pages: our stack lies within.
movl %eax, %edx
addl $LGUEST_PAGES_regs, %edx
movl %edx, %esp
- /* Switch to guest's GDT, IDT. */
+
+ // The Guest's GDT we so carefully
+ // Placed in the "struct lguest_pages" before
lgdt LGUEST_PAGES_guest_gdt_desc(%eax)
+
+ // The Guest's IDT we did partially
+ // Move to the "struct lguest_pages" as well.
lidt LGUEST_PAGES_guest_idt_desc(%eax)
- /* Switch to guest's TSS while GDT still writable. */
+
+ // The TSS entry which controls traps
+ // Must be loaded up with "ltr" now:
+ // For after we switch over our page tables
+ // It (as the rest) will be writable no more.
+ // (The GDT entry TSS needs
+ // Changes type when we load it: damn Intel!)
movl $(GDT_ENTRY_TSS*8), %edx
ltr %dx
- /* Set host's TSS GDT entry to available (clear byte 5 bit 2). */
+
+ // Look back now, before we take this last step!
+ // The Host's TSS entry was also marked used;
+ // Let's clear it again, ere we return.
+ // The GDT descriptor of the Host
+ // Points to the table after two "size" bytes
movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
+ // Clear the type field of "used" (byte 5, bit 2)
andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
- /* Switch to guest page tables: lguest_pages->state now read-only. */
+
+ // Once our page table's switched, the Guest is live!
+ // The Host fades as we run this final step.
+ // Our "struct lguest_pages" is now read-only.
movl %ebx, %cr3
- /* Restore guest regs */
+
+ // The page table change did one tricky thing:
+ // The Guest's register page has been mapped
+ // Writable onto our %esp (stack) --
+ // We can simply pop off all Guest regs.
popl %ebx
popl %ecx
popl %edx
@@ -51,12 +142,27 @@ ENTRY(switch_to_guest)
popl %fs
popl %ds
popl %es
- /* Skip error code and trap number */
+
+ // Near the base of the stack lurk two strange fields
+ // Which we fill as we exit the Guest
+ // These are the trap number and its error
+ // We can simply step past them on our way.
addl $8, %esp
+
+ // The last five stack slots hold return address
+ // And everything needed to change privilege
+ // Into the Guest privilege level of 1,
+ // And the stack where the Guest had last left it.
+ // Interrupts are turned back on: we are Guest.
iret
+// There are two paths where we switch to the Host
+// So we put the routine in a macro.
+// We are on our way home, back to the Host
+// Interrupted out of the Guest, we come here.
#define SWITCH_TO_HOST \
- /* Save guest state */ \
+ /* We save the Guest state: all registers first \
+ * Laid out just as "struct lguest_regs" defines */ \
pushl %es; \
pushl %ds; \
pushl %fs; \
@@ -68,58 +174,119 @@ ENTRY(switch_to_guest)
pushl %edx; \
pushl %ecx; \
pushl %ebx; \
- /* Load lguest ds segment for convenience. */ \
+ /* Our stack and our code are using segments \
+ * Set in the TSS and IDT \
+ * Yet if we were to touch data we'd use \
+ * Whatever data segment the Guest had. \
+ * Load the lguest ds segment for now. */ \
movl $(LGUEST_DS), %eax; \
movl %eax, %ds; \
- /* Figure out where we are, based on stack (at top of regs). */ \
+ /* So where are we? Which CPU, which struct? \
+ * The stack is our clue: our TSS sets \
+ * It at the end of "struct lguest_pages" \
+ * And we then pushed and pushed and pushed Guest regs: \
+ * Now stack points atop the "struct lguest_regs". \
+ * Subtract that offset, and we find our struct. */ \
movl %esp, %eax; \
subl $LGUEST_PAGES_regs, %eax; \
- /* Put trap number in %ebx before we switch cr3 and lose it. */ \
+ /* Save our trap number: the switch will obscure it \
+ * (The Guest regs are not mapped here in the Host) \
+ * %ebx holds it safe for deliver_to_host */ \
movl LGUEST_PAGES_regs_trapnum(%eax), %ebx; \
- /* Switch to host page tables (host GDT, IDT and stack are in host \
- mem, so need this first) */ \
+ /* The Host GDT, IDT and stack! \
+ * All these lie safely hidden from the Guest: \
+ * We must return to the Host page tables \
+ * (Hence that was saved in struct lguest_pages) */ \
movl LGUEST_PAGES_host_cr3(%eax), %edx; \
movl %edx, %cr3; \
- /* Set guest's TSS to available (clear byte 5 bit 2). */ \
+ /* As before, when we looked back at the Host \
+ * As we left and marked TSS unused \
+ * So must we now for the Guest left behind. */ \
andb $0xFD, (LGUEST_PAGES_guest_gdt+GDT_ENTRY_TSS*8+5)(%eax); \
- /* Switch to host's GDT & IDT. */ \
+ /* Switch to Host's GDT, IDT. */ \
lgdt LGUEST_PAGES_host_gdt_desc(%eax); \
lidt LGUEST_PAGES_host_idt_desc(%eax); \
- /* Switch to host's stack. */ \
+ /* Restore the Host's stack where it's saved regs lie */ \
movl LGUEST_PAGES_host_sp(%eax), %esp; \
- /* Switch to host's TSS */ \
+ /* Last the TSS: our Host is complete */ \
movl $(GDT_ENTRY_TSS*8), %edx; \
ltr %dx; \
+ /* Restore now the regs saved right at the first. */ \
popl %ebp; \
popl %fs; \
popl %gs; \
popl %ds; \
popl %es
-/* Return to run_guest_once. */
+// Here's where we come when the Guest has just trapped:
+// (Which trap we'll see has been pushed on the stack).
+// We need only switch back, and the Host will decode
+// Why we came home, and what needs to be done.
return_to_host:
SWITCH_TO_HOST
iret
+// An interrupt, with some cause external
+// Has ajerked us rudely from the Guest's code
+// Again we must return home to the Host
deliver_to_host:
SWITCH_TO_HOST
- /* Decode IDT and jump to hosts' irq handler. When that does iret, it
- * will return to run_guest_once. This is a feature. */
+ // But now we must go home via that place
+ // Where that interrupt was supposed to go
+ // Had we not been ensconced, running the Guest.
+ // Here we see the cleverness of our stack:
+ // The Host stack is formed like an interrupt
+ // With EIP, CS and EFLAGS layered.
+ // Interrupt handlers end with "iret"
+ // And that will take us home at long long last.
+
+ // But first we must find the handler to call!
+ // The IDT descriptor for the Host
+ // Has two bytes for size, and four for address:
+ // %edx will hold it for us for now.
movl (LGUEST_PAGES_host_idt_desc+2)(%eax), %edx
+ // We now know the table address we need,
+ // And saved the trap's number inside %ebx.
+ // Yet the pointer to the handler is smeared
+ // Across the bits of the table entry.
+ // What oracle can tell us how to extract
+ // From such a convoluted encoding?
+ // I consulted gcc, and it gave
+ // These instructions, which I gladly credit:
leal (%edx,%ebx,8), %eax
movzwl (%eax),%edx
movl 4(%eax), %eax
xorw %ax, %ax
orl %eax, %edx
+ // Now the address of the handler's in %edx
+ // We call it now: its "iret" takes us home.
jmp *%edx
-/* Real hardware interrupts are delivered straight to the host. Others
- cause us to return to run_guest_once so it can decide what to do. Note
- that some of these are overridden by the guest to deliver directly, and
- never enter here (see load_guest_idt_entry). */
+// Every interrupt can come to us here
+// But we must truly tell each apart.
+// They number two hundred and fifty six
+// And each must land in a different spot,
+// Push its number on stack, and join the stream.
+
+// And worse, a mere six of the traps stand apart
+// And push on their stack an addition:
+// An error number, thirty two bits long
+// So we punish the other two fifty
+// And make them push a zero so they match.
+
+// Yet two fifty six entries is long
+// And all will look most the same as the last
+// So we create a macro which can make
+// As many entries as we need to fill.
+
+// Note the change to .data then .text:
+// We plant the address of each entry
+// Into a (data) table for the Host
+// To know where each Guest interrupt should go.
.macro IRQ_STUB N TARGET
.data; .long 1f; .text; 1:
- /* Make an error number for most traps, which don't have one. */
+ // Trap eight, ten through fourteen and seventeen
+ // Supply an error number. Else zero.
.if (\N <> 8) && (\N < 10 || \N > 14) && (\N <> 17)
pushl $0
.endif
@@ -128,6 +295,8 @@ deliver_to_host:
ALIGN
.endm
+// This macro creates numerous entries
+// Using GAS macros which out-power C's.
.macro IRQ_STUBS FIRST LAST TARGET
irq=\FIRST
.rept \LAST-\FIRST+1
@@ -136,24 +305,43 @@ deliver_to_host:
.endr
.endm
-/* We intercept every interrupt, because we may need to switch back to
- * host. Unfortunately we can't tell them apart except by entry
- * point, so we need 256 entry points.
- */
+// Here's the marker for our pointer table
+// Laid in the data section just before
+// Each macro places the address of code
+// Forming an array: each one points to text
+// Which handles interrupt in its turn.
.data
.global default_idt_entries
default_idt_entries:
.text
- IRQ_STUBS 0 1 return_to_host /* First two traps */
- IRQ_STUB 2 handle_nmi /* NMI */
- IRQ_STUBS 3 31 return_to_host /* Rest of traps */
- IRQ_STUBS 32 127 deliver_to_host /* Real interrupts */
- IRQ_STUB 128 return_to_host /* System call (overridden) */
- IRQ_STUBS 129 255 deliver_to_host /* Other real interrupts */
-
-/* We ignore NMI and return. */
+ // The first two traps go straight back to the Host
+ IRQ_STUBS 0 1 return_to_host
+ // We'll say nothing, yet, about NMI
+ IRQ_STUB 2 handle_nmi
+ // Other traps also return to the Host
+ IRQ_STUBS 3 31 return_to_host
+ // All interrupts go via their handlers
+ IRQ_STUBS 32 127 deliver_to_host
+ // 'Cept system calls coming from userspace
+ // Are to go to the Guest, never the Host.
+ IRQ_STUB 128 return_to_host
+ IRQ_STUBS 129 255 deliver_to_host
+
+// The NMI, what a fabulous beast
+// Which swoops in and stops us no matter that
+// We're suspended between heaven and hell,
+// (Or more likely between the Host and Guest)
+// When in it comes! We are dazed and confused
+// So we do the simplest thing which one can.
+// Though we've pushed the trap number and zero
+// We discard them, return, and hope we live.
handle_nmi:
addl $8, %esp
iret
+// We are done; all that's left is Mastery
+// And "make Mastery" is a journey long
+// Designed to make your fingers itch to code.
+
+// Here ends the text, the file and poem.
ENTRY(end_switcher_text)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2fc199b0016..2bcde5798b5 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -526,7 +526,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
{
- request_queue_t *q = bdev_get_queue(bdev);
+ struct request_queue *q = bdev_get_queue(bdev);
struct io_restrictions *rs = &ti->limits;
/*
@@ -979,7 +979,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
devices = dm_table_get_devices(t);
for (d = devices->next; d != devices; d = d->next) {
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- request_queue_t *q = bdev_get_queue(dd->bdev);
+ struct request_queue *q = bdev_get_queue(dd->bdev);
r |= bdi_congested(&q->backing_dev_info, bdi_bits);
}
@@ -992,7 +992,7 @@ void dm_table_unplug_all(struct dm_table *t)
for (d = devices->next; d != devices; d = d->next) {
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- request_queue_t *q = bdev_get_queue(dd->bdev);
+ struct request_queue *q = bdev_get_queue(dd->bdev);
if (q->unplug_fn)
q->unplug_fn(q);
@@ -1011,7 +1011,7 @@ int dm_table_flush_all(struct dm_table *t)
for (d = devices->next; d != devices; d = d->next) {
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- request_queue_t *q = bdev_get_queue(dd->bdev);
+ struct request_queue *q = bdev_get_queue(dd->bdev);
int err;
if (!q->issue_flush_fn)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 846614e676c..141ff9fa296 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -80,7 +80,7 @@ struct mapped_device {
unsigned long flags;
- request_queue_t *queue;
+ struct request_queue *queue;
struct gendisk *disk;
char name[16];
@@ -792,7 +792,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
* The request function that just remaps the bio built up by
* dm_merge_bvec.
*/
-static int dm_request(request_queue_t *q, struct bio *bio)
+static int dm_request(struct request_queue *q, struct bio *bio)
{
int r;
int rw = bio_data_dir(bio);
@@ -844,7 +844,7 @@ static int dm_request(request_queue_t *q, struct bio *bio)
return 0;
}
-static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
+static int dm_flush_all(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
struct mapped_device *md = q->queuedata;
@@ -859,7 +859,7 @@ static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
return ret;
}
-static void dm_unplug_all(request_queue_t *q)
+static void dm_unplug_all(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_table(md);
@@ -1110,7 +1110,7 @@ static void __set_size(struct mapped_device *md, sector_t size)
static int __bind(struct mapped_device *md, struct dm_table *t)
{
- request_queue_t *q = md->queue;
+ struct request_queue *q = md->queue;
sector_t size;
size = dm_table_get_size(t);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 4ebd0f2a75e..cb059cf14c2 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -167,7 +167,7 @@ static void add_sector(conf_t *conf, sector_t start, int mode)
conf->nfaults = n+1;
}
-static int make_request(request_queue_t *q, struct bio *bio)
+static int make_request(struct request_queue *q, struct bio *bio)
{
mddev_t *mddev = q->queuedata;
conf_t *conf = (conf_t*)mddev->private;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 19274108319..17f795c3e0a 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -55,7 +55,7 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
*
* Return amount of bytes we can take at this offset
*/
-static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int linear_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
dev_info_t *dev0;
@@ -79,20 +79,20 @@ static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio
return maxsectors << 9;
}
-static void linear_unplug(request_queue_t *q)
+static void linear_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
linear_conf_t *conf = mddev_to_conf(mddev);
int i;
for (i=0; i < mddev->raid_disks; i++) {
- request_queue_t *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
if (r_queue->unplug_fn)
r_queue->unplug_fn(r_queue);
}
}
-static int linear_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -101,7 +101,7 @@ static int linear_issue_flush(request_queue_t *q, struct gendisk *disk,
for (i=0; i < mddev->raid_disks && ret == 0; i++) {
struct block_device *bdev = conf->disks[i].rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -118,7 +118,7 @@ static int linear_congested(void *data, int bits)
int i, ret = 0;
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
- request_queue_t *q = bdev_get_queue(conf->disks[i].rdev->bdev);
+ struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
return ret;
@@ -330,7 +330,7 @@ static int linear_stop (mddev_t *mddev)
return 0;
}
-static int linear_make_request (request_queue_t *q, struct bio *bio)
+static int linear_make_request (struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
mddev_t *mddev = q->queuedata;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 65ddc887dfd..f883b7e37f3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -211,7 +211,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
)
-static int md_fail_request (request_queue_t *q, struct bio *bio)
+static int md_fail_request (struct request_queue *q, struct bio *bio)
{
bio_io_error(bio, bio->bi_size);
return 0;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 14da37fee37..1e2af43a73b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -125,7 +125,7 @@ static void unplug_slaves(mddev_t *mddev)
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)
&& atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
@@ -140,13 +140,13 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_unlock();
}
-static void multipath_unplug(request_queue_t *q)
+static void multipath_unplug(struct request_queue *q)
{
unplug_slaves(q->queuedata);
}
-static int multipath_make_request (request_queue_t *q, struct bio * bio)
+static int multipath_make_request (struct request_queue *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
multipath_conf_t *conf = mddev_to_conf(mddev);
@@ -199,7 +199,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
seq_printf (seq, "]");
}
-static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -211,7 +211,7 @@ static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -238,7 +238,7 @@ static int multipath_congested(void *data, int bits)
for (i = 0; i < mddev->raid_disks ; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
- request_queue_t *q = bdev_get_queue(rdev->bdev);
+ struct request_queue *q = bdev_get_queue(rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
/* Just like multipath_map, we just check the
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 2c404f73a37..b8216bc6db4 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -25,7 +25,7 @@
#define MD_DRIVER
#define MD_PERSONALITY
-static void raid0_unplug(request_queue_t *q)
+static void raid0_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
raid0_conf_t *conf = mddev_to_conf(mddev);
@@ -33,14 +33,14 @@ static void raid0_unplug(request_queue_t *q)
int i;
for (i=0; i<mddev->raid_disks; i++) {
- request_queue_t *r_queue = bdev_get_queue(devlist[i]->bdev);
+ struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
if (r_queue->unplug_fn)
r_queue->unplug_fn(r_queue);
}
}
-static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -50,7 +50,7 @@ static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk,
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
struct block_device *bdev = devlist[i]->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -68,7 +68,7 @@ static int raid0_congested(void *data, int bits)
int i, ret = 0;
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
- request_queue_t *q = bdev_get_queue(devlist[i]->bdev);
+ struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
@@ -268,7 +268,7 @@ static int create_strip_zones (mddev_t *mddev)
*
* Return amount of bytes we can accept at this offset
*/
-static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int raid0_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
@@ -408,7 +408,7 @@ static int raid0_stop (mddev_t *mddev)
return 0;
}
-static int raid0_make_request (request_queue_t *q, struct bio *bio)
+static int raid0_make_request (struct request_queue *q, struct bio *bio)
{
mddev_t *mddev = q->queuedata;
unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 00c78b77b13..650991bddd8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -552,7 +552,7 @@ static void unplug_slaves(mddev_t *mddev)
for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
@@ -567,7 +567,7 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_unlock();
}
-static void raid1_unplug(request_queue_t *q)
+static void raid1_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
@@ -575,7 +575,7 @@ static void raid1_unplug(request_queue_t *q)
md_wakeup_thread(mddev->thread);
}
-static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -587,7 +587,7 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -615,7 +615,7 @@ static int raid1_congested(void *data, int bits)
for (i = 0; i < mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
- request_queue_t *q = bdev_get_queue(rdev->bdev);
+ struct request_queue *q = bdev_get_queue(rdev->bdev);
/* Note the '|| 1' - when read_balance prefers
* non-congested targets, it can be removed
@@ -765,7 +765,7 @@ do_sync_io:
return NULL;
}
-static int make_request(request_queue_t *q, struct bio * bio)
+static int make_request(struct request_queue *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
conf_t *conf = mddev_to_conf(mddev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a95ada1cfac..f730a144baf 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -453,7 +453,7 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
* If near_copies == raid_disk, there are no striping issues,
* but in that case, the function isn't called at all.
*/
-static int raid10_mergeable_bvec(request_queue_t *q, struct bio *bio,
+static int raid10_mergeable_bvec(struct request_queue *q, struct bio *bio,
struct bio_vec *bio_vec)
{
mddev_t *mddev = q->queuedata;
@@ -595,7 +595,7 @@ static void unplug_slaves(mddev_t *mddev)
for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
@@ -610,7 +610,7 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_unlock();
}
-static void raid10_unplug(request_queue_t *q)
+static void raid10_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
@@ -618,7 +618,7 @@ static void raid10_unplug(request_queue_t *q)
md_wakeup_thread(mddev->thread);
}
-static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -630,7 +630,7 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -658,7 +658,7 @@ static int raid10_congested(void *data, int bits)
for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
- request_queue_t *q = bdev_get_queue(rdev->bdev);
+ struct request_queue *q = bdev_get_queue(rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
@@ -772,7 +772,7 @@ static void unfreeze_array(conf_t *conf)
spin_unlock_irq(&conf->resync_lock);
}
-static int make_request(request_queue_t *q, struct bio * bio)
+static int make_request(struct request_queue *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
conf_t *conf = mddev_to_conf(mddev);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d90ee145eff..2aff4be35dc 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -289,7 +289,7 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in
}
static void unplug_slaves(mddev_t *mddev);
-static void raid5_unplug_device(request_queue_t *q);
+static void raid5_unplug_device(struct request_queue *q);
static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
int pd_idx, int noblock)
@@ -3182,7 +3182,7 @@ static void unplug_slaves(mddev_t *mddev)
for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
@@ -3197,7 +3197,7 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_unlock();
}
-static void raid5_unplug_device(request_queue_t *q)
+static void raid5_unplug_device(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3216,7 +3216,7 @@ static void raid5_unplug_device(request_queue_t *q)
unplug_slaves(mddev);
}
-static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
@@ -3228,7 +3228,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
@@ -3267,7 +3267,7 @@ static int raid5_congested(void *data, int bits)
/* We want read requests to align with chunks where possible,
* but write requests don't need to.
*/
-static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
@@ -3377,7 +3377,7 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
static int bio_fits_rdev(struct bio *bi)
{
- request_queue_t *q = bdev_get_queue(bi->bi_bdev);
+ struct request_queue *q = bdev_get_queue(bi->bi_bdev);
if ((bi->bi_size>>9) > q->max_sectors)
return 0;
@@ -3396,7 +3396,7 @@ static int bio_fits_rdev(struct bio *bi)
}
-static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
+static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3466,7 +3466,7 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
}
-static int make_request(request_queue_t *q, struct bio * bi)
+static int make_request(struct request_queue *q, struct bio * bi)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 9dcbffd0aa1..e204e7b4028 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -509,7 +509,7 @@ config VIDEO_VINO
config VIDEO_STRADIS
tristate "Stradis 4:2:2 MPEG-2 video driver (EXPERIMENTAL)"
- depends on EXPERIMENTAL && PCI && VIDEO_V4L1 && !PPC64
+ depends on EXPERIMENTAL && PCI && VIDEO_V4L1 && VIRT_TO_BUS
help
Say Y here to enable support for the Stradis 4:2:2 MPEG-2 video
driver for PCI. There is a product page at
@@ -520,7 +520,7 @@ config VIDEO_ZORAN_ZR36060
config VIDEO_ZORAN
tristate "Zoran ZR36057/36067 Video For Linux"
- depends on PCI && I2C_ALGOBIT && VIDEO_V4L1 && !PPC64
+ depends on PCI && I2C_ALGOBIT && VIDEO_V4L1 && VIRT_TO_BUS
help
Say Y for support for MJPEG capture cards based on the Zoran
36057/36067 PCI controller chipset. This includes the Iomega
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 988c8ce47f5..5e1c99f83ab 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -159,7 +159,7 @@ static int i2o_block_device_flush(struct i2o_device *dev)
* Returns 0 on success or negative error code on failure.
*/
-static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk,
+static int i2o_block_issue_flush(struct request_queue * queue, struct gendisk *disk,
sector_t * error_sector)
{
struct i2o_block_device *i2o_blk_dev = queue->queuedata;
@@ -445,7 +445,7 @@ static void i2o_block_end_request(struct request *req, int uptodate,
{
struct i2o_block_request *ireq = req->special;
struct i2o_block_device *dev = ireq->i2o_blk_dev;
- request_queue_t *q = req->q;
+ struct request_queue *q = req->q;
unsigned long flags;
if (end_that_request_chunk(req, uptodate, nr_bytes)) {
diff --git a/drivers/misc/asus-laptop.c b/drivers/misc/asus-laptop.c
index f7530605997..d0fc4fd212e 100644
--- a/drivers/misc/asus-laptop.c
+++ b/drivers/misc/asus-laptop.c
@@ -53,7 +53,6 @@
#define ASUS_HOTK_NAME "Asus Laptop Support"
#define ASUS_HOTK_CLASS "hotkey"
#define ASUS_HOTK_DEVICE_NAME "Hotkey"
-#define ASUS_HOTK_HID "ATK0100"
#define ASUS_HOTK_FILE "asus-laptop"
#define ASUS_HOTK_PREFIX "\\_SB.ATKD."
@@ -197,12 +196,18 @@ static struct asus_hotk *hotk;
/*
* The hotkey driver declaration
*/
+static const struct acpi_device_id asus_device_ids[] = {
+ {"ATK0100", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, asus_device_ids);
+
static int asus_hotk_add(struct acpi_device *device);
static int asus_hotk_remove(struct acpi_device *device, int type);
static struct acpi_driver asus_hotk_driver = {
.name = ASUS_HOTK_NAME,
.class = ASUS_HOTK_CLASS,
- .ids = ASUS_HOTK_HID,
+ .ids = asus_device_ids,
.ops = {
.add = asus_hotk_add,
.remove = asus_hotk_remove,
@@ -1067,19 +1072,16 @@ static void asus_backlight_exit(void)
}
#define ASUS_LED_UNREGISTER(object) \
- if(object##_led.class_dev \
- && !IS_ERR(object##_led.class_dev)) \
- led_classdev_unregister(&object##_led)
+ led_classdev_unregister(&object##_led)
static void asus_led_exit(void)
{
+ destroy_workqueue(led_workqueue);
ASUS_LED_UNREGISTER(mled);
ASUS_LED_UNREGISTER(tled);
ASUS_LED_UNREGISTER(pled);
ASUS_LED_UNREGISTER(rled);
ASUS_LED_UNREGISTER(gled);
-
- destroy_workqueue(led_workqueue);
}
static void __exit asus_laptop_exit(void)
@@ -1135,29 +1137,42 @@ static int asus_led_init(struct device *dev)
rv = ASUS_LED_REGISTER(mled, dev);
if (rv)
- return rv;
+ goto out;
rv = ASUS_LED_REGISTER(tled, dev);
if (rv)
- return rv;
+ goto out1;
rv = ASUS_LED_REGISTER(rled, dev);
if (rv)
- return rv;
+ goto out2;
rv = ASUS_LED_REGISTER(pled, dev);
if (rv)
- return rv;
+ goto out3;
rv = ASUS_LED_REGISTER(gled, dev);
if (rv)
- return rv;
+ goto out4;
led_workqueue = create_singlethread_workqueue("led_workqueue");
if (!led_workqueue)
- return -ENOMEM;
+ goto out5;
return 0;
+out5:
+ rv = -ENOMEM;
+ ASUS_LED_UNREGISTER(gled);
+out4:
+ ASUS_LED_UNREGISTER(pled);
+out3:
+ ASUS_LED_UNREGISTER(rled);
+out2:
+ ASUS_LED_UNREGISTER(tled);
+out1:
+ ASUS_LED_UNREGISTER(mled);
+out:
+ return rv;
}
static int __init asus_laptop_init(void)
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
index 303e48ca0e8..14ee06c8f12 100644
--- a/drivers/misc/sony-laptop.c
+++ b/drivers/misc/sony-laptop.c
@@ -1124,10 +1124,22 @@ static int sony_nc_remove(struct acpi_device *device, int type)
return 0;
}
+static const struct acpi_device_id sony_device_ids[] = {
+ {SONY_NC_HID, 0},
+ {SONY_PIC_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, sony_device_ids);
+
+static const struct acpi_device_id sony_nc_device_ids[] = {
+ {SONY_NC_HID, 0},
+ {"", 0},
+};
+
static struct acpi_driver sony_nc_driver = {
.name = SONY_NC_DRIVER_NAME,
.class = SONY_NC_CLASS,
- .ids = SONY_NC_HID,
+ .ids = sony_nc_device_ids,
.owner = THIS_MODULE,
.ops = {
.add = sony_nc_add,
@@ -2470,10 +2482,15 @@ static int sony_pic_resume(struct acpi_device *device)
return 0;
}
+static const struct acpi_device_id sony_pic_device_ids[] = {
+ {SONY_PIC_HID, 0},
+ {"", 0},
+};
+
static struct acpi_driver sony_pic_driver = {
.name = SONY_PIC_DRIVER_NAME,
.class = SONY_PIC_CLASS,
- .ids = SONY_PIC_HID,
+ .ids = sony_pic_device_ids,
.owner = THIS_MODULE,
.ops = {
.add = sony_pic_add,
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
index f15a58f7403..fa80f355e52 100644
--- a/drivers/misc/thinkpad_acpi.c
+++ b/drivers/misc/thinkpad_acpi.c
@@ -411,12 +411,13 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
sprintf(ibm->acpi->driver->name, "%s_%s", IBM_NAME, ibm->name);
ibm->acpi->driver->ids = ibm->acpi->hid;
+
ibm->acpi->driver->ops.add = &tpacpi_device_add;
rc = acpi_bus_register_driver(ibm->acpi->driver);
if (rc < 0) {
printk(IBM_ERR "acpi_bus_register_driver(%s) failed: %d\n",
- ibm->acpi->hid, rc);
+ ibm->name, rc);
kfree(ibm->acpi->driver);
ibm->acpi->driver = NULL;
} else if (!rc)
@@ -1316,8 +1317,13 @@ errexit:
return res;
}
+static const struct acpi_device_id ibm_htk_device_ids[] = {
+ {IBM_HKEY_HID, 0},
+ {"", 0},
+};
+
static struct tp_acpi_drv_struct ibm_hotkey_acpidriver = {
- .hid = IBM_HKEY_HID,
+ .hid = ibm_htk_device_ids,
.notify = hotkey_notify,
.handle = &hkey_handle,
.type = ACPI_DEVICE_NOTIFY,
@@ -2080,6 +2086,11 @@ IBM_HANDLE(dock, root, "\\_SB.GDCK", /* X30, X31, X40 */
/* don't list other alternatives as we install a notify handler on the 570 */
IBM_HANDLE(pci, root, "\\_SB.PCI"); /* 570 */
+static const struct acpi_device_id ibm_pci_device_ids[] = {
+ {PCI_ROOT_HID_STRING, 0},
+ {"", 0},
+};
+
static struct tp_acpi_drv_struct ibm_dock_acpidriver[2] = {
{
.notify = dock_notify,
@@ -2090,7 +2101,7 @@ static struct tp_acpi_drv_struct ibm_dock_acpidriver[2] = {
/* THIS ONE MUST NEVER BE USED FOR DRIVER AUTOLOADING.
* We just use it to get notifications of dock hotplug
* in very old thinkpads */
- .hid = PCI_ROOT_HID_STRING,
+ .hid = ibm_pci_device_ids,
.notify = dock_notify,
.handle = &pci_handle,
.type = ACPI_SYSTEM_NOTIFY,
@@ -2149,7 +2160,8 @@ static int __init dock_init2(struct ibm_init_struct *iibm)
static void dock_notify(struct ibm_struct *ibm, u32 event)
{
int docked = dock_docked();
- int pci = ibm->acpi->hid && strstr(ibm->acpi->hid, PCI_ROOT_HID_STRING);
+ int pci = ibm->acpi->hid && ibm->acpi->device &&
+ acpi_match_device_ids(ibm->acpi->device, ibm_pci_device_ids);
if (event == 1 && !pci) /* 570 */
acpi_bus_generate_event(ibm->acpi->device, event, 1); /* button */
diff --git a/drivers/misc/thinkpad_acpi.h b/drivers/misc/thinkpad_acpi.h
index b7a4a888cc8..88af089d649 100644
--- a/drivers/misc/thinkpad_acpi.h
+++ b/drivers/misc/thinkpad_acpi.h
@@ -193,7 +193,7 @@ static void thinkpad_acpi_module_exit(void);
struct ibm_struct;
struct tp_acpi_drv_struct {
- char *hid;
+ const struct acpi_device_id *hid;
struct acpi_driver *driver;
void (*notify) (struct ibm_struct *, u32);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index b53dac8d1b6..c9a289c6c13 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/queue.c
+ * linux/drivers/mmc/card/queue.c
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright 2006-2007 Pierre Ossman
@@ -83,7 +83,7 @@ static int mmc_queue_thread(void *d)
* on any queue on this host, and attempt to issue it. This may
* not be the queue we were asked to process.
*/
-static void mmc_request(request_queue_t *q)
+static void mmc_request(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
@@ -211,7 +211,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
void mmc_cleanup_queue(struct mmc_queue *mq)
{
- request_queue_t *q = mq->queue;
+ struct request_queue *q = mq->queue;
unsigned long flags;
/* Mark that we should start throwing out stragglers */
@@ -252,7 +252,7 @@ EXPORT_SYMBOL(mmc_cleanup_queue);
*/
void mmc_queue_suspend(struct mmc_queue *mq)
{
- request_queue_t *q = mq->queue;
+ struct request_queue *q = mq->queue;
unsigned long flags;
if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
@@ -272,7 +272,7 @@ void mmc_queue_suspend(struct mmc_queue *mq)
*/
void mmc_queue_resume(struct mmc_queue *mq)
{
- request_queue_t *q = mq->queue;
+ struct request_queue *q = mq->queue;
unsigned long flags;
if (mq->flags & MMC_QUEUE_SUSPENDED) {
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 348b566bf4f..fe0e785ed7d 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -209,10 +209,30 @@ struct mmc_card *mmc_alloc_card(struct mmc_host *host)
int mmc_add_card(struct mmc_card *card)
{
int ret;
+ const char *type;
snprintf(card->dev.bus_id, sizeof(card->dev.bus_id),
"%s:%04x", mmc_hostname(card->host), card->rca);
+ switch (card->type) {
+ case MMC_TYPE_MMC:
+ type = "MMC";
+ break;
+ case MMC_TYPE_SD:
+ type = "SD";
+ if (mmc_card_blockaddr(card))
+ type = "SDHC";
+ break;
+ default:
+ type = "?";
+ break;
+ }
+
+ printk(KERN_INFO "%s: new %s%s card at address %04x\n",
+ mmc_hostname(card->host),
+ mmc_card_highspeed(card) ? "high speed " : "",
+ type, card->rca);
+
card->dev.uevent_suppress = 1;
ret = device_add(&card->dev);
@@ -243,6 +263,9 @@ int mmc_add_card(struct mmc_card *card)
void mmc_remove_card(struct mmc_card *card)
{
if (mmc_card_present(card)) {
+ printk(KERN_INFO "%s: card %04x removed\n",
+ mmc_hostname(card->host), card->rca);
+
if (card->host->bus_ops->sysfs_remove)
card->host->bus_ops->sysfs_remove(card->host, card);
device_del(&card->dev);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index b5d8a6d90cc..bfd2ae5bd66 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -68,32 +68,41 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
struct mmc_command *cmd = mrq->cmd;
int err = cmd->error;
- pr_debug("%s: req done (CMD%u): %d/%d/%d: %08x %08x %08x %08x\n",
- mmc_hostname(host), cmd->opcode, err,
- mrq->data ? mrq->data->error : 0,
- mrq->stop ? mrq->stop->error : 0,
- cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
-
if (err && cmd->retries) {
+ pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
+ mmc_hostname(host), cmd->opcode, err);
+
cmd->retries--;
cmd->error = 0;
host->ops->request(host, mrq);
- } else if (mrq->done) {
- mrq->done(mrq);
+ } else {
+ pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
+ mmc_hostname(host), cmd->opcode, err,
+ cmd->resp[0], cmd->resp[1],
+ cmd->resp[2], cmd->resp[3]);
+
+ if (mrq->data) {
+ pr_debug("%s: %d bytes transferred: %d\n",
+ mmc_hostname(host),
+ mrq->data->bytes_xfered, mrq->data->error);
+ }
+
+ if (mrq->stop) {
+ pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
+ mmc_hostname(host), mrq->stop->opcode,
+ mrq->stop->error,
+ mrq->stop->resp[0], mrq->stop->resp[1],
+ mrq->stop->resp[2], mrq->stop->resp[3]);
+ }
+
+ if (mrq->done)
+ mrq->done(mrq);
}
}
EXPORT_SYMBOL(mmc_request_done);
-/**
- * mmc_start_request - start a command on a host
- * @host: MMC host to start command on
- * @mrq: MMC request to start
- *
- * Queue a command on the specified host. We expect the
- * caller to be holding the host lock with interrupts disabled.
- */
-void
+static void
mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
#ifdef CONFIG_MMC_DEBUG
@@ -104,6 +113,21 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
mmc_hostname(host), mrq->cmd->opcode,
mrq->cmd->arg, mrq->cmd->flags);
+ if (mrq->data) {
+ pr_debug("%s: blksz %d blocks %d flags %08x "
+ "tsac %d ms nsac %d\n",
+ mmc_hostname(host), mrq->data->blksz,
+ mrq->data->blocks, mrq->data->flags,
+ mrq->data->timeout_ns / 10000000,
+ mrq->data->timeout_clks);
+ }
+
+ if (mrq->stop) {
+ pr_debug("%s: CMD%u arg %08x flags %08x\n",
+ mmc_hostname(host), mrq->stop->opcode,
+ mrq->stop->arg, mrq->stop->flags);
+ }
+
WARN_ON(!host->claimed);
mrq->cmd->error = 0;
@@ -133,14 +157,21 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
host->ops->request(host, mrq);
}
-EXPORT_SYMBOL(mmc_start_request);
-
static void mmc_wait_done(struct mmc_request *mrq)
{
complete(mrq->done_data);
}
-int mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
+/**
+ * mmc_wait_for_req - start a request and wait for completion
+ * @host: MMC host to start command
+ * @mrq: MMC request to start
+ *
+ * Start a new MMC custom command request for a host, and wait
+ * for the command to complete. Does not attempt to parse the
+ * response.
+ */
+void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
{
DECLARE_COMPLETION_ONSTACK(complete);
@@ -150,8 +181,6 @@ int mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
mmc_start_request(host, mrq);
wait_for_completion(&complete);
-
- return 0;
}
EXPORT_SYMBOL(mmc_wait_for_req);
@@ -192,6 +221,9 @@ EXPORT_SYMBOL(mmc_wait_for_cmd);
* @data: data phase for command
* @card: the MMC card associated with the data transfer
* @write: flag to differentiate reads from writes
+ *
+ * Computes the data timeout parameters according to the
+ * correct algorithm given the card type.
*/
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card,
int write)
@@ -240,21 +272,18 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card,
EXPORT_SYMBOL(mmc_set_data_timeout);
/**
- * __mmc_claim_host - exclusively claim a host
+ * mmc_claim_host - exclusively claim a host
* @host: mmc host to claim
- * @card: mmc card to claim host for
- *
- * Claim a host for a set of operations. If a valid card
- * is passed and this wasn't the last card selected, select
- * the card before returning.
*
- * Note: you should use mmc_card_claim_host or mmc_claim_host.
+ * Claim a host for a set of operations.
*/
void mmc_claim_host(struct mmc_host *host)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
+ might_sleep();
+
add_wait_queue(&host->wq, &wait);
spin_lock_irqsave(&host->lock, flags);
while (1) {
@@ -433,6 +462,45 @@ static void mmc_power_off(struct mmc_host *host)
}
/*
+ * Cleanup when the last reference to the bus operator is dropped.
+ */
+void __mmc_release_bus(struct mmc_host *host)
+{
+ BUG_ON(!host);
+ BUG_ON(host->bus_refs);
+ BUG_ON(!host->bus_dead);
+
+ host->bus_ops = NULL;
+}
+
+/*
+ * Increase reference count of bus operator
+ */
+static inline void mmc_bus_get(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->bus_refs++;
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/*
+ * Decrease reference count of bus operator and free it if
+ * it is the last reference.
+ */
+static inline void mmc_bus_put(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->bus_refs--;
+ if ((host->bus_refs == 0) && host->bus_ops)
+ __mmc_release_bus(host);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/*
* Assign a mmc bus handler to a host. Only one bus handler may control a
* host at any given time.
*/
@@ -481,25 +549,15 @@ void mmc_detach_bus(struct mmc_host *host)
mmc_bus_put(host);
}
-/*
- * Cleanup when the last reference to the bus operator is dropped.
- */
-void __mmc_release_bus(struct mmc_host *host)
-{
- BUG_ON(!host);
- BUG_ON(host->bus_refs);
- BUG_ON(!host->bus_dead);
-
- host->bus_ops = NULL;
-}
-
/**
* mmc_detect_change - process change of state on a MMC socket
* @host: host which changed state.
* @delay: optional delay to wait before detection (jiffies)
*
- * All we know is that card(s) have been inserted or removed
- * from the socket(s). We don't know which socket or cards.
+ * MMC drivers should call this when they detect a card has been
+ * inserted or removed. The MMC layer will confirm that any
+ * present card is still functional, and initialize any newly
+ * inserted.
*/
void mmc_detect_change(struct mmc_host *host, unsigned long delay)
{
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index ae006b30dd8..bb2774af9ea 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -27,28 +27,6 @@ struct mmc_bus_ops {
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
void mmc_detach_bus(struct mmc_host *host);
-void __mmc_release_bus(struct mmc_host *host);
-
-static inline void mmc_bus_get(struct mmc_host *host)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
- host->bus_refs++;
- spin_unlock_irqrestore(&host->lock, flags);
-}
-
-static inline void mmc_bus_put(struct mmc_host *host)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
- host->bus_refs--;
- if ((host->bus_refs == 0) && host->bus_ops)
- __mmc_release_bus(host);
- spin_unlock_irqrestore(&host->lock, flags);
-}
-
void mmc_set_chip_select(struct mmc_host *host, int mode);
void mmc_set_clock(struct mmc_host *host, unsigned int hz);
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 1433d95c40b..6a7e2984960 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -93,6 +93,10 @@ EXPORT_SYMBOL(mmc_alloc_host);
/**
* mmc_add_host - initialise host hardware
* @host: mmc host
+ *
+ * Register the host with the driver model. The host must be
+ * prepared to start servicing requests before this function
+ * completes.
*/
int mmc_add_host(struct mmc_host *host)
{
@@ -126,7 +130,8 @@ EXPORT_SYMBOL(mmc_add_host);
* @host: mmc host
*
* Unregister and remove all cards associated with this host,
- * and power down the MMC bus.
+ * and power down the MMC bus. No new requests will be issued
+ * after this function has returned.
*/
void mmc_remove_host(struct mmc_host *host)
{
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 66f85bfa8db..21d7f48e1d4 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/mmc.c
+ * linux/drivers/mmc/core/mmc.c
*
* Copyright (C) 2003-2004 Russell King, All Rights Reserved.
* Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
@@ -100,7 +100,7 @@ static int mmc_decode_cid(struct mmc_card *card)
break;
default:
- printk("%s: card has unknown MMCA version %d\n",
+ printk(KERN_ERR "%s: card has unknown MMCA version %d\n",
mmc_hostname(card->host), card->csd.mmca_vsn);
return -EINVAL;
}
@@ -123,7 +123,7 @@ static int mmc_decode_csd(struct mmc_card *card)
*/
csd_struct = UNSTUFF_BITS(resp, 126, 2);
if (csd_struct != 1 && csd_struct != 2) {
- printk("%s: unrecognised CSD structure version %d\n",
+ printk(KERN_ERR "%s: unrecognised CSD structure version %d\n",
mmc_hostname(card->host), csd_struct);
return -EINVAL;
}
@@ -499,14 +499,17 @@ static void mmc_resume(struct mmc_host *host)
BUG_ON(!host->card);
mmc_claim_host(host);
-
err = mmc_init_card(host, host->ocr, host->card);
+ mmc_release_host(host);
+
if (err != MMC_ERR_NONE) {
mmc_remove(host);
+
+ mmc_claim_host(host);
mmc_detach_bus(host);
+ mmc_release_host(host);
}
- mmc_release_host(host);
}
#else
@@ -553,8 +556,10 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
/*
* Can we support the voltage of the card?
*/
- if (!host->ocr)
+ if (!host->ocr) {
+ err = -EINVAL;
goto err;
+ }
/*
* Detect and init the card.
@@ -567,18 +572,21 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
err = mmc_add_card(host->card);
if (err)
- goto reclaim_host;
+ goto remove_card;
return 0;
-reclaim_host:
- mmc_claim_host(host);
+remove_card:
mmc_remove_card(host->card);
host->card = NULL;
+ mmc_claim_host(host);
err:
mmc_detach_bus(host);
mmc_release_host(host);
+ printk(KERN_ERR "%s: error %d whilst initialising MMC card\n",
+ mmc_hostname(host), err);
+
return 0;
}
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 7dd720fa589..913e75f0084 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/mmc_ops.h
+ * linux/drivers/mmc/core/mmc_ops.h
*
* Copyright 2006-2007 Pierre Ossman
*
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 7a481e8ca5e..76d09a93c5d 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/mmc_ops.h
+ * linux/drivers/mmc/core/mmc_ops.h
*
* Copyright 2006-2007 Pierre Ossman
*
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 1240684083f..1edc62b1e5c 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/sd.c
+ * linux/drivers/mmc/core/sd.c
*
* Copyright (C) 2003-2004 Russell King, All Rights Reserved.
* SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
@@ -149,7 +149,7 @@ static int mmc_decode_csd(struct mmc_card *card)
csd->write_partial = 0;
break;
default:
- printk("%s: unrecognised CSD structure version %d\n",
+ printk(KERN_ERR "%s: unrecognised CSD structure version %d\n",
mmc_hostname(card->host), csd_struct);
return -EINVAL;
}
@@ -173,7 +173,7 @@ static int mmc_decode_scr(struct mmc_card *card)
scr_struct = UNSTUFF_BITS(resp, 60, 4);
if (scr_struct != 0) {
- printk("%s: unrecognised SCR structure version %d\n",
+ printk(KERN_ERR "%s: unrecognised SCR structure version %d\n",
mmc_hostname(card->host), scr_struct);
return -EINVAL;
}
@@ -206,9 +206,8 @@ static int mmc_read_switch(struct mmc_card *card)
status = kmalloc(64, GFP_KERNEL);
if (!status) {
- printk("%s: could not allocate a buffer for switch "
- "capabilities.\n",
- mmc_hostname(card->host));
+ printk(KERN_ERR "%s: could not allocate a buffer for "
+ "switch capabilities.\n", mmc_hostname(card->host));
return err;
}
@@ -254,9 +253,8 @@ static int mmc_switch_hs(struct mmc_card *card)
status = kmalloc(64, GFP_KERNEL);
if (!status) {
- printk("%s: could not allocate a buffer for switch "
- "capabilities.\n",
- mmc_hostname(card->host));
+ printk(KERN_ERR "%s: could not allocate a buffer for "
+ "switch capabilities.\n", mmc_hostname(card->host));
return err;
}
@@ -573,14 +571,17 @@ static void mmc_sd_resume(struct mmc_host *host)
BUG_ON(!host->card);
mmc_claim_host(host);
-
err = mmc_sd_init_card(host, host->ocr, host->card);
+ mmc_release_host(host);
+
if (err != MMC_ERR_NONE) {
mmc_sd_remove(host);
+
+ mmc_claim_host(host);
mmc_detach_bus(host);
+ mmc_release_host(host);
}
- mmc_release_host(host);
}
#else
@@ -634,8 +635,10 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
/*
* Can we support the voltage(s) of the card(s)?
*/
- if (!host->ocr)
+ if (!host->ocr) {
+ err = -EINVAL;
goto err;
+ }
/*
* Detect and init the card.
@@ -648,18 +651,21 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
err = mmc_add_card(host->card);
if (err)
- goto reclaim_host;
+ goto remove_card;
return 0;
-reclaim_host:
- mmc_claim_host(host);
+remove_card:
mmc_remove_card(host->card);
host->card = NULL;
+ mmc_claim_host(host);
err:
mmc_detach_bus(host);
mmc_release_host(host);
+ printk(KERN_ERR "%s: error %d whilst initialising SD card\n",
+ mmc_hostname(host), err);
+
return 0;
}
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 9697ce58110..342f340ebc2 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/sd_ops.h
+ * linux/drivers/mmc/core/sd_ops.h
*
* Copyright 2006-2007 Pierre Ossman
*
@@ -21,11 +21,40 @@
#include "core.h"
#include "sd_ops.h"
+static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
+{
+ int err;
+ struct mmc_command cmd;
+
+ BUG_ON(!host);
+ BUG_ON(card && (card->host != host));
+
+ cmd.opcode = MMC_APP_CMD;
+
+ if (card) {
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_BCR;
+ }
+
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err != MMC_ERR_NONE)
+ return err;
+
+ /* Check that card supported application commands */
+ if (!(cmd.resp[0] & R1_APP_CMD))
+ return MMC_ERR_FAILED;
+
+ return MMC_ERR_NONE;
+}
+
/**
* mmc_wait_for_app_cmd - start an application command and wait for
completion
* @host: MMC host to start command
- * @rca: RCA to send MMC_APP_CMD to
+ * @card: Card to send MMC_APP_CMD to
* @cmd: MMC command to start
* @retries: maximum number of retries
*
@@ -77,35 +106,6 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
EXPORT_SYMBOL(mmc_wait_for_app_cmd);
-int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
-{
- int err;
- struct mmc_command cmd;
-
- BUG_ON(!host);
- BUG_ON(card && (card->host != host));
-
- cmd.opcode = MMC_APP_CMD;
-
- if (card) {
- cmd.arg = card->rca << 16;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- } else {
- cmd.arg = 0;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_BCR;
- }
-
- err = mmc_wait_for_cmd(host, &cmd, 0);
- if (err != MMC_ERR_NONE)
- return err;
-
- /* Check that card supported application commands */
- if (!(cmd.resp[0] & R1_APP_CMD))
- return MMC_ERR_FAILED;
-
- return MMC_ERR_NONE;
-}
-
int mmc_app_set_bus_width(struct mmc_card *card, int width)
{
int err;
diff --git a/drivers/mmc/core/sd_ops.h b/drivers/mmc/core/sd_ops.h
index 1240fddba5e..9742d8a3066 100644
--- a/drivers/mmc/core/sd_ops.h
+++ b/drivers/mmc/core/sd_ops.h
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/sd_ops.h
+ * linux/drivers/mmc/core/sd_ops.h
*
* Copyright 2006-2007 Pierre Ossman
*
@@ -12,7 +12,6 @@
#ifndef _MMC_SD_OPS_H
#define _MMC_SD_OPS_H
-int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card);
int mmc_app_set_bus_width(struct mmc_card *card, int width);
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
int mmc_send_if_cond(struct mmc_host *host, u32 ocr);
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 15aab374127..62564ccde03 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/at91_mci.c - ATMEL AT91 MCI Driver
+ * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
*
* Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
*
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 52b63f11ddd..34c99d4ea04 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/au1xmmc.c - AU1XX0 MMC driver
+ * linux/drivers/mmc/host/au1xmmc.c - AU1XX0 MMC driver
*
* Copyright (c) 2005, Advanced Micro Devices, Inc.
*
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 7ee2045acbe..54bfc9f2559 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/imxmmc.c - Motorola i.MX MMCI driver
+ * linux/drivers/mmc/host/imxmmc.c - Motorola i.MX MMCI driver
*
* Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de>
* Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index d11c2d23cee..be730c0a035 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/mmci.c - ARM PrimeCell MMCI PL180/1 driver
+ * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
*
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
*
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 6d7eadc9a67..000e6a91978 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/mmci.h - ARM PrimeCell MMCI PL180/1 driver
+ * linux/drivers/mmc/host/mmci.h - ARM PrimeCell MMCI PL180/1 driver
*
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
*
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index b0824a38f42..0cf97edc5f5 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/media/mmc/omap.c
+ * linux/drivers/mmc/host/omap.c
*
* Copyright (C) 2004 Nokia Corporation
* Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index f8985c508bb..ff960334b33 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/pxa.c - PXA MMCI driver
+ * linux/drivers/mmc/host/pxa.c - PXA MMCI driver
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
*
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 4a24db028d8..f2bc87ac24f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/sdhci.c - Secure Digital Host Controller Interface driver
+ * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
*
* Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
*
@@ -34,6 +34,7 @@ static unsigned int debug_quirks = 0;
/* Controller doesn't like some resets when there is no card inserted. */
#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
+#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
static const struct pci_device_id pci_ids[] __devinitdata = {
{
@@ -78,6 +79,24 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE,
},
+ {
+ .vendor = PCI_VENDOR_ID_ENE,
+ .device = PCI_DEVICE_ID_ENE_CB714_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_ENE,
+ .device = PCI_DEVICE_ID_ENE_CB714_SD_2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS,
+ },
+
{ /* Generic SD host controller */
PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
},
@@ -361,11 +380,6 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
if (data == NULL)
return;
- DBG("blksz %04x blks %04x flags %08x\n",
- data->blksz, data->blocks, data->flags);
- DBG("tsac %d ms nsac %d clk\n",
- data->timeout_ns / 1000000, data->timeout_clks);
-
/* Sanity checks */
BUG_ON(data->blksz * data->blocks > 524288);
BUG_ON(data->blksz > host->mmc->max_blk_size);
@@ -476,8 +490,6 @@ static void sdhci_finish_data(struct sdhci_host *host)
data->error = MMC_ERR_FAILED;
}
- DBG("Ending data transfer (%d bytes)\n", data->bytes_xfered);
-
if (data->stop) {
/*
* The controller needs a reset of internal state machines
@@ -501,8 +513,6 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
WARN_ON(host->cmd);
- DBG("Sending cmd (%x)\n", cmd->opcode);
-
/* Wait max 10 ms */
timeout = 10;
@@ -590,8 +600,6 @@ static void sdhci_finish_command(struct sdhci_host *host)
host->cmd->error = MMC_ERR_NONE;
- DBG("Ending cmd (%x)\n", host->cmd->opcode);
-
if (host->cmd->data)
host->data = host->cmd->data;
else
@@ -759,6 +767,14 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
+ /*
+ * Some (ENE) controllers go apeshit on some ios operation,
+ * signalling timeout and CRC errors even on CMD0. Resetting
+ * it on each ios seems to solve the problem.
+ */
+ if(host->chip->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -835,8 +851,6 @@ static void sdhci_tasklet_finish(unsigned long param)
mrq = host->mrq;
- DBG("Ending request, cmd (%x)\n", mrq->cmd->opcode);
-
/*
* The controller needs a reset of internal state machines
* upon error conditions.
@@ -922,20 +936,17 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
return;
}
- if (intmask & SDHCI_INT_RESPONSE)
- sdhci_finish_command(host);
- else {
- if (intmask & SDHCI_INT_TIMEOUT)
- host->cmd->error = MMC_ERR_TIMEOUT;
- else if (intmask & SDHCI_INT_CRC)
- host->cmd->error = MMC_ERR_BADCRC;
- else if (intmask & (SDHCI_INT_END_BIT | SDHCI_INT_INDEX))
- host->cmd->error = MMC_ERR_FAILED;
- else
- host->cmd->error = MMC_ERR_INVALID;
+ if (intmask & SDHCI_INT_TIMEOUT)
+ host->cmd->error = MMC_ERR_TIMEOUT;
+ else if (intmask & SDHCI_INT_CRC)
+ host->cmd->error = MMC_ERR_BADCRC;
+ else if (intmask & (SDHCI_INT_END_BIT | SDHCI_INT_INDEX))
+ host->cmd->error = MMC_ERR_FAILED;
+ if (host->cmd->error != MMC_ERR_NONE)
tasklet_schedule(&host->finish_tasklet);
- }
+ else if (intmask & SDHCI_INT_RESPONSE)
+ sdhci_finish_command(host);
}
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index a6c870480b8..d157776c114 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/sdhci.h - Secure Digital Host Controller Interface driver
+ * linux/drivers/mmc/host/sdhci.h - Secure Digital Host Controller Interface driver
*
* Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
*
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 867ca6a6929..e0c9808fd42 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
+ * linux/drivers/mmc/host/wbsd.c - Winbond W83L51xD SD/MMC driver
*
* Copyright (C) 2004-2007 Pierre Ossman, All Rights Reserved.
*
@@ -207,8 +207,6 @@ static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq)
{
unsigned long dmaflags;
- DBGF("Ending request, cmd (%x)\n", mrq->cmd->opcode);
-
if (host->dma >= 0) {
/*
* Release ISA DMA controller.
@@ -360,8 +358,6 @@ static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd)
int i;
u8 status, isr;
- DBGF("Sending cmd (%x)\n", cmd->opcode);
-
/*
* Clear accumulated ISR. The interrupt routine
* will fill this one with events that occur during
@@ -411,8 +407,6 @@ static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd)
wbsd_get_short_reply(host, cmd);
}
}
-
- DBGF("Sent cmd (%x), res %d\n", cmd->opcode, cmd->error);
}
/*
@@ -550,11 +544,6 @@ static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
unsigned long dmaflags;
unsigned int size;
- DBGF("blksz %04x blks %04x flags %08x\n",
- data->blksz, data->blocks, data->flags);
- DBGF("tsac %d ms nsac %d clk\n",
- data->timeout_ns / 1000000, data->timeout_clks);
-
/*
* Calculate size.
*/
@@ -752,8 +741,6 @@ static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data)
}
}
- DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered);
-
wbsd_request_end(host, host->mrq);
}
diff --git a/drivers/mmc/host/wbsd.h b/drivers/mmc/host/wbsd.h
index 873bda1e59b..0877866f8d2 100644
--- a/drivers/mmc/host/wbsd.h
+++ b/drivers/mmc/host/wbsd.h
@@ -1,5 +1,5 @@
/*
- * linux/drivers/mmc/wbsd.h - Winbond W83L51xD SD/MMC driver
+ * linux/drivers/mmc/host/wbsd.h - Winbond W83L51xD SD/MMC driver
*
* Copyright (C) 2004-2007 Pierre Ossman, All Rights Reserved.
*
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index f88ebc5b685..cc6c7344243 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -103,7 +103,7 @@ config MTD_PMC_MSP_RAMROOT
config MTD_SUN_UFLASH
tristate "Sun Microsystems userflash support"
- depends on SPARC && MTD_CFI
+ depends on SPARC && MTD_CFI && PCI
help
This provides a 'mapping' driver which supports the way in
which user-programmable flash chips are connected on various
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 3ff1155459a..d915837193c 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -57,6 +57,7 @@
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
static char version[] __initdata =
"82596.c $Revision: 1.5 $\n";
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 336af0635df..94b78cc5fe8 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -18,7 +18,7 @@ gianfar_driver-objs := gianfar.o \
gianfar_sysfs.o
obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
-ucc_geth_driver-objs := ucc_geth.o ucc_geth_mii.o
+ucc_geth_driver-objs := ucc_geth.o ucc_geth_mii.o ucc_geth_ethtool.o
#
# link order important here
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index b78a4e5ceeb..62e660a7938 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -3128,12 +3128,6 @@ static int __devinit read_eeprom_byte(struct net_device *dev,
int result = 0;
short i;
- if (!dev) {
- printk(KERN_ERR "No device!\n");
- result = -ENODEV;
- goto out;
- }
-
/*
* Don't take interrupts on this CPU will bit banging
* the %#%#@$ I2C device
diff --git a/drivers/net/atl1/atl1_hw.h b/drivers/net/atl1/atl1_hw.h
index 100c09c66e6..939aa0f53f6 100644
--- a/drivers/net/atl1/atl1_hw.h
+++ b/drivers/net/atl1/atl1_hw.h
@@ -680,11 +680,6 @@ void atl1_check_options(struct atl1_adapter *adapter);
#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */
#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */
-/* The size (in bytes) of a ethernet packet */
-#define ENET_HEADER_SIZE 14
-#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* with FCS */
-#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* with FCS */
-#define ETHERNET_FCS_SIZE 4
#define MAX_JUMBO_FRAME_SIZE 0x2800
#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
@@ -929,8 +924,8 @@ enum atl1_dma_req_block {
atl1_dma_req_128 = 0,
atl1_dma_req_256 = 1,
atl1_dma_req_512 = 2,
- atl1_dam_req_1024 = 3,
- atl1_dam_req_2048 = 4,
+ atl1_dma_req_1024 = 3,
+ atl1_dma_req_2048 = 4,
atl1_dma_req_4096 = 5
};
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
index fd1e156f174..56f6389a300 100644
--- a/drivers/net/atl1/atl1_main.c
+++ b/drivers/net/atl1/atl1_main.c
@@ -59,6 +59,7 @@
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
#include <linux/irqreturn.h>
#include <linux/workqueue.h>
#include <linux/timer.h>
@@ -120,8 +121,8 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
struct atl1_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
- hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
+ hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
adapter->wol = 0;
adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
@@ -314,7 +315,7 @@ err_nomem:
return -ENOMEM;
}
-void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
+static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
{
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
@@ -688,9 +689,9 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
int old_mtu = netdev->mtu;
- int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
+ if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
return -EINVAL;
@@ -908,8 +909,8 @@ static u32 atl1_configure(struct atl1_adapter *adapter)
/* config DMA Engine */
value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
<< DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
- ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
- << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
+ ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
+ << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
DMA_CTRL_DMAW_EN;
value |= (u32) hw->dma_ord;
if (atl1_rcb_128 == hw->rcb_value)
@@ -917,7 +918,10 @@ static u32 atl1_configure(struct atl1_adapter *adapter)
iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
/* config CMB / SMB */
- value = hw->cmb_rrd | ((u32) hw->cmb_tpd << 16);
+ value = (hw->cmb_tpd > adapter->tpd_ring.count) ?
+ hw->cmb_tpd : adapter->tpd_ring.count;
+ value <<= 16;
+ value |= hw->cmb_rrd;
iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
@@ -1334,7 +1338,7 @@ rrd_ok:
skb = buffer_info->skb;
length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
- skb_put(skb, length - ETHERNET_FCS_SIZE);
+ skb_put(skb, length - ETH_FCS_LEN);
/* Receive Checksum Offload */
atl1_rx_checksum(adapter, rrd, skb);
@@ -1422,7 +1426,7 @@ static void atl1_intr_tx(struct atl1_adapter *adapter)
netif_wake_queue(adapter->netdev);
}
-static u16 tpd_avail(struct atl1_tpd_ring *tpd_ring)
+static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
{
u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
@@ -1453,7 +1457,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
iph->daddr, 0, IPPROTO_TCP, 0);
ipofst = skb_network_offset(skb);
- if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */
+ if (ipofst != ETH_HLEN) /* 802.3 frame */
tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT;
tso->tsopl |= (iph->ihl &
@@ -1708,7 +1712,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_LOCKED;
}
- if (tpd_avail(&adapter->tpd_ring) < count) {
+ if (atl1_tpd_avail(&adapter->tpd_ring) < count) {
/* not enough descriptors */
netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->lock, flags);
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 1d882360b34..e43e8047b90 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -819,7 +819,7 @@ static int ax_probe(struct platform_device *pdev)
}
ei_status.mem = ioremap(res->start, size);
- dev->base_addr = (long)ei_status.mem;
+ dev->base_addr = (unsigned long)ei_status.mem;
if (ei_status.mem == NULL) {
dev_err(&pdev->dev, "Cannot ioremap area (%08zx,%08zx)\n",
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 9a08d656f1c..2bb97d46468 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -798,6 +798,7 @@ static void bf537mac_shutdown(struct net_device *dev)
*/
static int bf537mac_open(struct net_device *dev)
{
+ int retval;
pr_debug("%s: %s\n", dev->name, __FUNCTION__);
/*
@@ -811,7 +812,10 @@ static int bf537mac_open(struct net_device *dev)
}
/* initial rx and tx list */
- desc_list_init();
+ retval = desc_list_init();
+
+ if (retval)
+ return retval;
bf537mac_setphy(dev);
setup_system_regs(dev);
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index ebcf35e4cf5..e620ed4c3ff 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -699,7 +699,7 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
* the buffer.
*/
static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
- int gfp)
+ gfp_t gfp)
{
if (likely(!skb_cloned(skb))) {
BUG_ON(skb->len < len);
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 7df23dc2819..9c8e3f9f5e5 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -200,6 +200,7 @@
/* Include files */
#include <linux/bitops.h>
+#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/eisa.h>
@@ -240,8 +241,6 @@ static char version[] __devinitdata =
*/
#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
-#define __unused __attribute__ ((unused))
-
#ifdef CONFIG_PCI
#define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type)
#else
@@ -375,7 +374,7 @@ static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
{
- struct device __unused *bdev = bp->bus_dev;
+ struct device __maybe_unused *bdev = bp->bus_dev;
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
@@ -399,7 +398,7 @@ static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
{
- struct device __unused *bdev = bp->bus_dev;
+ struct device __maybe_unused *bdev = bp->bus_dev;
int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
@@ -866,7 +865,7 @@ static void __devinit dfx_bus_uninit(struct net_device *dev)
static void __devinit dfx_bus_config_check(DFX_board_t *bp)
{
- struct device __unused *bdev = bp->bus_dev;
+ struct device __maybe_unused *bdev = bp->bus_dev;
int dfx_bus_eisa = DFX_BUS_EISA(bdev);
int status; /* return code from adapter port control call */
u32 host_data; /* LW data returned from port control call */
@@ -3624,8 +3623,8 @@ static void __devexit dfx_unregister(struct device *bdev)
}
-static int __devinit __unused dfx_dev_register(struct device *);
-static int __devexit __unused dfx_dev_unregister(struct device *);
+static int __devinit __maybe_unused dfx_dev_register(struct device *);
+static int __devexit __maybe_unused dfx_dev_unregister(struct device *);
#ifdef CONFIG_PCI
static int __devinit dfx_pci_register(struct pci_dev *,
@@ -3699,7 +3698,7 @@ static struct tc_driver dfx_tc_driver = {
};
#endif /* CONFIG_TC */
-static int __devinit __unused dfx_dev_register(struct device *dev)
+static int __devinit __maybe_unused dfx_dev_register(struct device *dev)
{
int status;
@@ -3709,7 +3708,7 @@ static int __devinit __unused dfx_dev_register(struct device *dev)
return status;
}
-static int __devexit __unused dfx_dev_unregister(struct device *dev)
+static int __devexit __maybe_unused dfx_dev_unregister(struct device *dev)
{
put_device(dev);
dfx_unregister(dev);
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 489c8b260dd..8ee2c2c86b4 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -39,7 +39,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0071"
+#define DRV_VERSION "EHEA_0072"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 4c70a9301c1..58702f54c3f 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -589,6 +589,23 @@ static int ehea_poll(struct net_device *dev, int *budget)
return 1;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ehea_netpoll(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+
+ netif_rx_schedule(port->port_res[0].d_netdev);
+}
+#endif
+
+static int ehea_poll_firstqueue(struct net_device *dev, int *budget)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct net_device *d_dev = port->port_res[0].d_netdev;
+
+ return ehea_poll(d_dev, budget);
+}
+
static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
{
struct ehea_port_res *pr = param;
@@ -2626,7 +2643,10 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
dev->open = ehea_open;
- dev->poll = ehea_poll;
+ dev->poll = ehea_poll_firstqueue;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ehea_netpoll;
+#endif
dev->weight = 64;
dev->stop = ehea_stop;
dev->hard_start_xmit = ehea_start_xmit;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 6d1d50a1978..661c747389e 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5546,6 +5546,22 @@ static struct pci_device_id pci_tbl[] = {
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
+ { /* MCP73 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP73 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP73 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP73 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
{0,},
};
diff --git a/drivers/net/lguest_net.c b/drivers/net/lguest_net.c
index 112778652f7..cab57911a80 100644
--- a/drivers/net/lguest_net.c
+++ b/drivers/net/lguest_net.c
@@ -1,6 +1,13 @@
-/* A simple network driver for lguest.
+/*D:500
+ * The Guest network driver.
*
- * Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
+ * This is very simple a virtual network driver, and our last Guest driver.
+ * The only trick is that it can talk directly to multiple other recipients
+ * (ie. other Guests on the same network). It can also be used with only the
+ * Host on the network.
+ :*/
+
+/* Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,23 +35,47 @@
#define MAX_LANS 4
#define NUM_SKBS 8
+/*M:011 Network code master Jeff Garzik points out numerous shortcomings in
+ * this driver if it aspires to greatness.
+ *
+ * Firstly, it doesn't use "NAPI": the networking's New API, and is poorer for
+ * it. As he says "NAPI means system-wide load leveling, across multiple
+ * network interfaces. Lack of NAPI can mean competition at higher loads."
+ *
+ * He also points out that we don't implement set_mac_address, so users cannot
+ * change the devices hardware address. When I asked why one would want to:
+ * "Bonding, and situations where you /do/ want the MAC address to "leak" out
+ * of the host onto the wider net."
+ *
+ * Finally, he would like module unloading: "It is not unrealistic to think of
+ * [un|re|]loading the net support module in an lguest guest. And, adding
+ * module support makes the programmer more responsible, because they now have
+ * to learn to clean up after themselves. Any driver that cannot clean up
+ * after itself is an incomplete driver in my book."
+ :*/
+
+/*D:530 The "struct lguestnet_info" contains all the information we need to
+ * know about the network device. */
struct lguestnet_info
{
- /* The shared page(s). */
+ /* The mapped device page(s) (an array of "struct lguest_net"). */
struct lguest_net *peer;
+ /* The physical address of the device page(s) */
unsigned long peer_phys;
+ /* The size of the device page(s). */
unsigned long mapsize;
/* The lguest_device I come from */
struct lguest_device *lgdev;
- /* My peerid. */
+ /* My peerid (ie. my slot in the array). */
unsigned int me;
- /* Receive queue. */
+ /* Receive queue: the network packets waiting to be filled. */
struct sk_buff *skb[NUM_SKBS];
struct lguest_dma dma[NUM_SKBS];
};
+/*:*/
/* How many bytes left in this page. */
static unsigned int rest_of_page(void *data)
@@ -52,39 +83,82 @@ static unsigned int rest_of_page(void *data)
return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE);
}
-/* Simple convention: offset 4 * peernum. */
+/*D:570 Each peer (ie. Guest or Host) on the network binds their receive
+ * buffers to a different key: we simply use the physical address of the
+ * device's memory page plus the peer number. The Host insists that all keys
+ * be a multiple of 4, so we multiply the peer number by 4. */
static unsigned long peer_key(struct lguestnet_info *info, unsigned peernum)
{
return info->peer_phys + 4 * peernum;
}
+/* This is the routine which sets up a "struct lguest_dma" to point to a
+ * network packet, similar to req_to_dma() in lguest_blk.c. The structure of a
+ * "struct sk_buff" has grown complex over the years: it consists of a "head"
+ * linear section pointed to by "skb->data", and possibly an array of
+ * "fragments" in the case of a non-linear packet.
+ *
+ * Our receive buffers don't use fragments at all but outgoing skbs might, so
+ * we handle it. */
static void skb_to_dma(const struct sk_buff *skb, unsigned int headlen,
struct lguest_dma *dma)
{
unsigned int i, seg;
+ /* First, we put the linear region into the "struct lguest_dma". Each
+ * entry can't go over a page boundary, so even though all our packets
+ * are 1514 bytes or less, we might need to use two entries here: */
for (i = seg = 0; i < headlen; seg++, i += rest_of_page(skb->data+i)) {
dma->addr[seg] = virt_to_phys(skb->data + i);
dma->len[seg] = min((unsigned)(headlen - i),
rest_of_page(skb->data + i));
}
+
+ /* Now we handle the fragments: at least they're guaranteed not to go
+ * over a page. skb_shinfo(skb) returns a pointer to the structure
+ * which tells us about the number of fragments and the fragment
+ * array. */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, seg++) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
/* Should not happen with MTU less than 64k - 2 * PAGE_SIZE. */
if (seg == LGUEST_MAX_DMA_SECTIONS) {
+ /* We will end up sending a truncated packet should
+ * this ever happen. Plus, a cool log message! */
printk("Woah dude! Megapacket!\n");
break;
}
dma->addr[seg] = page_to_phys(f->page) + f->page_offset;
dma->len[seg] = f->size;
}
+
+ /* If after all that we didn't use the entire "struct lguest_dma"
+ * array, we terminate it with a 0 length. */
if (seg < LGUEST_MAX_DMA_SECTIONS)
dma->len[seg] = 0;
}
-/* We overload multicast bit to show promiscuous mode. */
+/*
+ * Packet transmission.
+ *
+ * Our packet transmission is a little unusual. A real network card would just
+ * send out the packet and leave the receivers to decide if they're interested.
+ * Instead, we look through the network device memory page and see if any of
+ * the ethernet addresses match the packet destination, and if so we send it to
+ * that Guest.
+ *
+ * This is made a little more complicated in two cases. The first case is
+ * broadcast packets: for that we send the packet to all Guests on the network,
+ * one at a time. The second case is "promiscuous" mode, where a Guest wants
+ * to see all the packets on the network. We need a way for the Guest to tell
+ * us it wants to see all packets, so it sets the "multicast" bit on its
+ * published MAC address, which is never valid in a real ethernet address.
+ */
#define PROMISC_BIT 0x01
+/* This is the callback which is summoned whenever the network device's
+ * multicast or promiscuous state changes. If the card is in promiscuous mode,
+ * we advertise that in our ethernet address in the device's memory. We do the
+ * same if Linux wants any or all multicast traffic. */
static void lguestnet_set_multicast(struct net_device *dev)
{
struct lguestnet_info *info = netdev_priv(dev);
@@ -95,11 +169,14 @@ static void lguestnet_set_multicast(struct net_device *dev)
info->peer[info->me].mac[0] &= ~PROMISC_BIT;
}
+/* A simple test function to see if a peer wants to see all packets.*/
static int promisc(struct lguestnet_info *info, unsigned int peer)
{
return info->peer[peer].mac[0] & PROMISC_BIT;
}
+/* Another simple function to see if a peer's advertised ethernet address
+ * matches a packet's destination ethernet address. */
static int mac_eq(const unsigned char mac[ETH_ALEN],
struct lguestnet_info *info, unsigned int peer)
{
@@ -109,6 +186,8 @@ static int mac_eq(const unsigned char mac[ETH_ALEN],
return memcmp(mac+1, info->peer[peer].mac+1, ETH_ALEN-1) == 0;
}
+/* This is the function which actually sends a packet once we've decided a
+ * peer wants it: */
static void transfer_packet(struct net_device *dev,
struct sk_buff *skb,
unsigned int peernum)
@@ -116,76 +195,134 @@ static void transfer_packet(struct net_device *dev,
struct lguestnet_info *info = netdev_priv(dev);
struct lguest_dma dma;
+ /* We use our handy "struct lguest_dma" packing function to prepare
+ * the skb for sending. */
skb_to_dma(skb, skb_headlen(skb), &dma);
pr_debug("xfer length %04x (%u)\n", htons(skb->len), skb->len);
+ /* This is the actual send call which copies the packet. */
lguest_send_dma(peer_key(info, peernum), &dma);
+
+ /* Check that the entire packet was transmitted. If not, it could mean
+ * that the other Guest registered a short receive buffer, but this
+ * driver should never do that. More likely, the peer is dead. */
if (dma.used_len != skb->len) {
dev->stats.tx_carrier_errors++;
pr_debug("Bad xfer to peer %i: %i of %i (dma %p/%i)\n",
peernum, dma.used_len, skb->len,
(void *)dma.addr[0], dma.len[0]);
} else {
+ /* On success we update the stats. */
dev->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
}
}
+/* Another helper function to tell is if a slot in the device memory is unused.
+ * Since we always set the Local Assignment bit in the ethernet address, the
+ * first byte can never be 0. */
static int unused_peer(const struct lguest_net peer[], unsigned int num)
{
return peer[num].mac[0] == 0;
}
+/* Finally, here is the routine which handles an outgoing packet. It's called
+ * "start_xmit" for traditional reasons. */
static int lguestnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned int i;
int broadcast;
struct lguestnet_info *info = netdev_priv(dev);
+ /* Extract the destination ethernet address from the packet. */
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
pr_debug("%s: xmit %02x:%02x:%02x:%02x:%02x:%02x\n",
dev->name, dest[0],dest[1],dest[2],dest[3],dest[4],dest[5]);
+ /* If it's a multicast packet, we broadcast to everyone. That's not
+ * very efficient, but there are very few applications which actually
+ * use multicast, which is a shame really.
+ *
+ * As etherdevice.h points out: "By definition the broadcast address is
+ * also a multicast address." So we don't have to test for broadcast
+ * packets separately. */
broadcast = is_multicast_ether_addr(dest);
+
+ /* Look through all the published ethernet addresses to see if we
+ * should send this packet. */
for (i = 0; i < info->mapsize/sizeof(struct lguest_net); i++) {
+ /* We don't send to ourselves (we actually can't SEND_DMA to
+ * ourselves anyway), and don't send to unused slots.*/
if (i == info->me || unused_peer(info->peer, i))
continue;
+ /* If it's broadcast we send it. If they want every packet we
+ * send it. If the destination matches their address we send
+ * it. Otherwise we go to the next peer. */
if (!broadcast && !promisc(info, i) && !mac_eq(dest, info, i))
continue;
pr_debug("lguestnet %s: sending from %i to %i\n",
dev->name, info->me, i);
+ /* Our routine which actually does the transfer. */
transfer_packet(dev, skb, i);
}
+
+ /* An xmit routine is expected to dispose of the packet, so we do. */
dev_kfree_skb(skb);
+
+ /* As per kernel convention, 0 means success. This is why I love
+ * networking: even if we never sent to anyone, that's still
+ * success! */
return 0;
}
-/* Find a new skb to put in this slot in shared mem. */
+/*D:560
+ * Packet receiving.
+ *
+ * First, here's a helper routine which fills one of our array of receive
+ * buffers: */
static int fill_slot(struct net_device *dev, unsigned int slot)
{
struct lguestnet_info *info = netdev_priv(dev);
- /* Try to create and register a new one. */
+
+ /* We can receive ETH_DATA_LEN (1500) byte packets, plus a standard
+ * ethernet header of ETH_HLEN (14) bytes. */
info->skb[slot] = netdev_alloc_skb(dev, ETH_HLEN + ETH_DATA_LEN);
if (!info->skb[slot]) {
printk("%s: could not fill slot %i\n", dev->name, slot);
return -ENOMEM;
}
+ /* skb_to_dma() is a helper which sets up the "struct lguest_dma" to
+ * point to the data in the skb: we also use it for sending out a
+ * packet. */
skb_to_dma(info->skb[slot], ETH_HLEN + ETH_DATA_LEN, &info->dma[slot]);
+
+ /* This is a Write Memory Barrier: it ensures that the entry in the
+ * receive buffer array is written *before* we set the "used_len" entry
+ * to 0. If the Host were looking at the receive buffer array from a
+ * different CPU, it could potentially see "used_len = 0" and not see
+ * the updated receive buffer information. This would be a horribly
+ * nasty bug, so make sure the compiler and CPU know this has to happen
+ * first. */
wmb();
- /* Now we tell hypervisor it can use the slot. */
+ /* Writing 0 to "used_len" tells the Host it can use this receive
+ * buffer now. */
info->dma[slot].used_len = 0;
return 0;
}
+/* This is the actual receive routine. When we receive an interrupt from the
+ * Host to tell us a packet has been delivered, we arrive here: */
static irqreturn_t lguestnet_rcv(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct lguestnet_info *info = netdev_priv(dev);
unsigned int i, done = 0;
+ /* Look through our entire receive array for an entry which has data
+ * in it. */
for (i = 0; i < ARRAY_SIZE(info->dma); i++) {
unsigned int length;
struct sk_buff *skb;
@@ -194,10 +331,16 @@ static irqreturn_t lguestnet_rcv(int irq, void *dev_id)
if (length == 0)
continue;
+ /* We've found one! Remember the skb (we grabbed the length
+ * above), and immediately refill the slot we've taken it
+ * from. */
done++;
skb = info->skb[i];
fill_slot(dev, i);
+ /* This shouldn't happen: micropackets could be sent by a
+ * badly-behaved Guest on the network, but the Host will never
+ * stuff more data in the buffer than the buffer length. */
if (length < ETH_HLEN || length > ETH_HLEN + ETH_DATA_LEN) {
pr_debug(KERN_WARNING "%s: unbelievable skb len: %i\n",
dev->name, length);
@@ -205,36 +348,72 @@ static irqreturn_t lguestnet_rcv(int irq, void *dev_id)
continue;
}
+ /* skb_put(), what a great function! I've ranted about this
+ * function before (http://lkml.org/lkml/1999/9/26/24). You
+ * call it after you've added data to the end of an skb (in
+ * this case, it was the Host which wrote the data). */
skb_put(skb, length);
+
+ /* The ethernet header contains a protocol field: we use the
+ * standard helper to extract it, and place the result in
+ * skb->protocol. The helper also sets up skb->pkt_type and
+ * eats up the ethernet header from the front of the packet. */
skb->protocol = eth_type_trans(skb, dev);
- /* This is a reliable transport. */
+
+ /* If this device doesn't need checksums for sending, we also
+ * don't need to check the packets when they come in. */
if (dev->features & NETIF_F_NO_CSUM)
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* As a last resort for debugging the driver or the lguest I/O
+ * subsystem, you can uncomment the "#define DEBUG" at the top
+ * of this file, which turns all the pr_debug() into printk()
+ * and floods the logs. */
pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
ntohs(skb->protocol), skb->len, skb->pkt_type);
+ /* Update the packet and byte counts (visible from ifconfig,
+ * and good for debugging). */
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
+
+ /* Hand our fresh network packet into the stack's "network
+ * interface receive" routine. That will free the packet
+ * itself when it's finished. */
netif_rx(skb);
}
+
+ /* If we found any packets, we assume the interrupt was for us. */
return done ? IRQ_HANDLED : IRQ_NONE;
}
+/*D:550 This is where we start: when the device is brought up by dhcpd or
+ * ifconfig. At this point we advertise our MAC address to the rest of the
+ * network, and register receive buffers ready for incoming packets. */
static int lguestnet_open(struct net_device *dev)
{
int i;
struct lguestnet_info *info = netdev_priv(dev);
- /* Set up our MAC address */
+ /* Copy our MAC address into the device page, so others on the network
+ * can find us. */
memcpy(info->peer[info->me].mac, dev->dev_addr, ETH_ALEN);
- /* Turn on promisc mode if needed */
+ /* We might already be in promisc mode (dev->flags & IFF_PROMISC). Our
+ * set_multicast callback handles this already, so we call it now. */
lguestnet_set_multicast(dev);
+ /* Allocate packets and put them into our "struct lguest_dma" array.
+ * If we fail to allocate all the packets we could still limp along,
+ * but it's a sign of real stress so we should probably give up now. */
for (i = 0; i < ARRAY_SIZE(info->dma); i++) {
if (fill_slot(dev, i) != 0)
goto cleanup;
}
+
+ /* Finally we tell the Host where our array of "struct lguest_dma"
+ * receive buffers is, binding it to the key corresponding to the
+ * device's physical memory plus our peerid. */
if (lguest_bind_dma(peer_key(info,info->me), info->dma,
NUM_SKBS, lgdev_irq(info->lgdev)) != 0)
goto cleanup;
@@ -245,22 +424,29 @@ cleanup:
dev_kfree_skb(info->skb[i]);
return -ENOMEM;
}
+/*:*/
+/* The close routine is called when the device is no longer in use: we clean up
+ * elegantly. */
static int lguestnet_close(struct net_device *dev)
{
unsigned int i;
struct lguestnet_info *info = netdev_priv(dev);
- /* Clear all trace: others might deliver packets, we'll ignore it. */
+ /* Clear all trace of our existence out of the device memory by setting
+ * the slot which held our MAC address to 0 (unused). */
memset(&info->peer[info->me], 0, sizeof(info->peer[info->me]));
- /* Deregister sg lists. */
+ /* Unregister our array of receive buffers */
lguest_unbind_dma(peer_key(info, info->me), info->dma);
for (i = 0; i < ARRAY_SIZE(info->dma); i++)
dev_kfree_skb(info->skb[i]);
return 0;
}
+/*D:510 The network device probe function is basically a standard ethernet
+ * device setup. It reads the "struct lguest_device_desc" and sets the "struct
+ * net_device". Oh, the line-by-line excitement! Let's skip over it. :*/
static int lguestnet_probe(struct lguest_device *lgdev)
{
int err, irqf = IRQF_SHARED;
@@ -290,10 +476,16 @@ static int lguestnet_probe(struct lguest_device *lgdev)
dev->stop = lguestnet_close;
dev->hard_start_xmit = lguestnet_start_xmit;
- /* Turning on/off promisc will call dev->set_multicast_list.
- * We don't actually support multicast yet */
+ /* We don't actually support multicast yet, but turning on/off
+ * promisc also calls dev->set_multicast_list. */
dev->set_multicast_list = lguestnet_set_multicast;
SET_NETDEV_DEV(dev, &lgdev->dev);
+
+ /* The network code complains if you have "scatter-gather" capability
+ * if you don't also handle checksums (it seem that would be
+ * "illogical"). So we use a lie of omission and don't tell it that we
+ * can handle scattered packets unless we also don't want checksums,
+ * even though to us they're completely independent. */
if (desc->features & LGUEST_NET_F_NOCSUM)
dev->features = NETIF_F_SG|NETIF_F_NO_CSUM;
@@ -325,6 +517,9 @@ static int lguestnet_probe(struct lguest_device *lgdev)
}
pr_debug("lguestnet: registered device %s\n", dev->name);
+ /* Finally, we put the "struct net_device" in the generic "struct
+ * lguest_device"s private pointer. Again, it's not necessary, but
+ * makes sure the cool kernel kids don't tease us. */
lgdev->private = dev;
return 0;
@@ -352,3 +547,11 @@ module_init(lguestnet_init);
MODULE_DESCRIPTION("Lguest network driver");
MODULE_LICENSE("GPL");
+
+/*D:580
+ * This is the last of the Drivers, and with this we have covered the many and
+ * wonderous and fine (and boring) details of the Guest.
+ *
+ * "make Launcher" beckons, where we answer questions like "Where do Guests
+ * come from?", and "What do you do when someone asks for optimization?"
+ */
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 5c86e737f95..721ee38d224 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -219,15 +219,6 @@ static void ei_tx_timeout(struct net_device *dev)
int txsr, isr, tickssofar = jiffies - dev->trans_start;
unsigned long flags;
-#if defined(CONFIG_M32R) && defined(CONFIG_SMP)
- unsigned long icucr;
-
- local_irq_save(flags);
- icucr = inl(M32R_ICU_CR1_PORTL);
- icucr |= M32R_ICUCR_ISMOD11;
- outl(icucr, M32R_ICU_CR1_PORTL);
- local_irq_restore(flags);
-#endif
ei_local->stat.tx_errors++;
spin_lock_irqsave(&ei_local->page_lock, flags);
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 325269d8ae3..d4c92cc879d 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -1179,8 +1179,7 @@ dma_watchdog_shutdown_poll_result(struct netxen_adapter *adapter)
NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4))
printk(KERN_ERR "failed to read dma watchdog status\n");
- return ((netxen_get_dma_watchdog_enabled(ctrl) == 0) &&
- (netxen_get_dma_watchdog_disabled(ctrl) == 0));
+ return (netxen_get_dma_watchdog_enabled(ctrl) == 0);
}
static inline int
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index b703ccfe040..19e2fa940ac 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -46,7 +46,7 @@ MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
-char netxen_nic_driver_name[] = "netxen-nic";
+char netxen_nic_driver_name[] = "netxen_nic";
static char netxen_nic_driver_string[] = "NetXen Network Driver version "
NETXEN_NIC_LINUX_VERSIONID;
@@ -640,6 +640,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETXEN_CRB_NORMALIZE(adapter,
NETXEN_ROMUSB_GLB_PEGTUNE_DONE));
/* Handshake with the card before we register the devices. */
+ writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
+ netxen_pinit_from_rom(adapter, 0);
+ msleep(1);
+ netxen_load_firmware(adapter);
netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
}
@@ -782,19 +786,18 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
if (adapter->portnum == 0) {
if (init_firmware_done) {
- dma_watchdog_shutdown_request(adapter);
- msleep(100);
i = 100;
- while ((dma_watchdog_shutdown_poll_result(adapter) != 1) && i) {
- printk(KERN_INFO "dma_watchdog_shutdown_poll still in progress\n");
+ do {
+ if (dma_watchdog_shutdown_request(adapter) == 1)
+ break;
msleep(100);
- i--;
- }
+ if (dma_watchdog_shutdown_poll_result(adapter) == 1)
+ break;
+ } while (--i);
- if (i == 0) {
- printk(KERN_ERR "dma_watchdog_shutdown_request failed\n");
- return;
- }
+ if (i == 0)
+ printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n",
+ netdev->name);
/* clear the register for future unloads/loads */
writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_CAM_RAM(0x1fc)));
@@ -803,11 +806,9 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
/* leave the hw in the same state as reboot */
writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
- if (netxen_pinit_from_rom(adapter, 0))
- return;
+ netxen_pinit_from_rom(adapter, 0);
msleep(1);
- if (netxen_load_firmware(adapter))
- return;
+ netxen_load_firmware(adapter);
netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
}
@@ -816,22 +817,21 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
printk(KERN_INFO "State: 0x%0x\n",
readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)));
- dma_watchdog_shutdown_request(adapter);
- msleep(100);
i = 100;
- while ((dma_watchdog_shutdown_poll_result(adapter) != 1) && i) {
- printk(KERN_INFO "dma_watchdog_shutdown_poll still in progress\n");
+ do {
+ if (dma_watchdog_shutdown_request(adapter) == 1)
+ break;
msleep(100);
- i--;
- }
+ if (dma_watchdog_shutdown_poll_result(adapter) == 1)
+ break;
+ } while (--i);
if (i) {
netxen_free_adapter_offload(adapter);
} else {
- printk(KERN_ERR "failed to dma shutdown\n");
- return;
+ printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n",
+ netdev->name);
}
-
}
iounmap(adapter->ahw.db_base);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 6a538564791..8874497b6bb 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -109,7 +109,7 @@ static int vsc824x_config_intr(struct phy_device *phydev)
*/
err = phy_read(phydev, MII_VSC8244_ISTAT);
- if (err)
+ if (err < 0)
return err;
err = phy_write(phydev, MII_VSC8244_IMASK, 0);
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index f87176055d0..266e8b38fe1 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -2054,7 +2054,7 @@ end:
*/
static int pppol2tp_tunnel_getsockopt(struct sock *sk,
struct pppol2tp_tunnel *tunnel,
- int optname, int __user *val)
+ int optname, int *val)
{
int err = 0;
@@ -2077,7 +2077,7 @@ static int pppol2tp_tunnel_getsockopt(struct sock *sk,
*/
static int pppol2tp_session_getsockopt(struct sock *sk,
struct pppol2tp_session *session,
- int optname, int __user *val)
+ int optname, int *val)
{
int err = 0;
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 08d25066f05..13d1c0a2a25 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -290,7 +290,8 @@ static void gelic_net_release_rx_chain(struct gelic_net_card *card)
descr->buf_addr = 0;
dev_kfree_skb_any(descr->skb);
descr->skb = NULL;
- descr->dmac_cmd_status = GELIC_NET_DESCR_NOT_IN_USE;
+ gelic_net_set_descr_status(descr,
+ GELIC_NET_DESCR_NOT_IN_USE);
}
descr = descr->next;
} while (descr != card->rx_chain.head);
@@ -374,7 +375,7 @@ static void gelic_net_release_tx_descr(struct gelic_net_card *card,
descr->skb = NULL;
/* set descr status */
- descr->dmac_cmd_status = GELIC_NET_DMAC_CMDSTAT_NOT_IN_USE;
+ gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE);
}
/**
@@ -403,26 +404,29 @@ static void gelic_net_release_tx_chain(struct gelic_net_card *card, int stop)
"%s: forcing end of tx descriptor " \
"with status %x\n",
__func__, status);
- card->netdev_stats.tx_dropped++;
+ card->netdev->stats.tx_dropped++;
break;
case GELIC_NET_DESCR_COMPLETE:
- card->netdev_stats.tx_packets++;
- card->netdev_stats.tx_bytes +=
- tx_chain->tail->skb->len;
+ if (tx_chain->tail->skb) {
+ card->netdev->stats.tx_packets++;
+ card->netdev->stats.tx_bytes +=
+ tx_chain->tail->skb->len;
+ }
break;
case GELIC_NET_DESCR_CARDOWNED:
/* pending tx request */
default:
/* any other value (== GELIC_NET_DESCR_NOT_IN_USE) */
- goto out;
+ if (!stop)
+ goto out;
}
gelic_net_release_tx_descr(card, tx_chain->tail);
- release = 1;
+ release ++;
}
out:
- if (!stop && release)
+ if (!stop && (2 < release))
netif_wake_queue(card->netdev);
}
@@ -659,19 +663,21 @@ static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card,
{
dma_addr_t buf[2];
unsigned int vlan_len;
+ struct gelic_net_descr *sec_descr = descr->next;
if (skb->len < GELIC_NET_VLAN_POS)
return -EINVAL;
- memcpy(&descr->vlan, skb->data, GELIC_NET_VLAN_POS);
+ vlan_len = GELIC_NET_VLAN_POS;
+ memcpy(&descr->vlan, skb->data, vlan_len);
if (card->vlan_index != -1) {
+ /* internal vlan tag used */
descr->vlan.h_vlan_proto = htons(ETH_P_8021Q); /* vlan 0x8100*/
descr->vlan.h_vlan_TCI = htons(card->vlan_id[card->vlan_index]);
- vlan_len = GELIC_NET_VLAN_POS + VLAN_HLEN; /* VLAN_HLEN=4 */
- } else
- vlan_len = GELIC_NET_VLAN_POS; /* no vlan tag */
+ vlan_len += VLAN_HLEN; /* added for above two lines */
+ }
- /* first descr */
+ /* map data area */
buf[0] = dma_map_single(ctodev(card), &descr->vlan,
vlan_len, DMA_TO_DEVICE);
@@ -682,20 +688,6 @@ static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card,
return -ENOMEM;
}
- descr->buf_addr = buf[0];
- descr->buf_size = vlan_len;
- descr->skb = skb; /* not used */
- descr->data_status = 0;
- gelic_net_set_txdescr_cmdstat(descr, skb, 1); /* not the frame end */
-
- /* second descr */
- card->tx_chain.head = card->tx_chain.head->next;
- descr->next_descr_addr = descr->next->bus_addr;
- descr = descr->next;
- if (gelic_net_get_descr_status(descr) != GELIC_NET_DESCR_NOT_IN_USE)
- /* XXX will be removed */
- dev_err(ctodev(card), "descr is not free!\n");
-
buf[1] = dma_map_single(ctodev(card), skb->data + GELIC_NET_VLAN_POS,
skb->len - GELIC_NET_VLAN_POS,
DMA_TO_DEVICE);
@@ -710,13 +702,24 @@ static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card,
return -ENOMEM;
}
- descr->buf_addr = buf[1];
- descr->buf_size = skb->len - GELIC_NET_VLAN_POS;
- descr->skb = skb;
+ /* first descr */
+ descr->buf_addr = buf[0];
+ descr->buf_size = vlan_len;
+ descr->skb = NULL; /* not used */
descr->data_status = 0;
- descr->next_descr_addr = 0; /* terminate hw descr */
- gelic_net_set_txdescr_cmdstat(descr, skb, 0);
+ descr->next_descr_addr = descr->next->bus_addr;
+ gelic_net_set_txdescr_cmdstat(descr, skb, 1); /* not the frame end */
+ /* second descr */
+ sec_descr->buf_addr = buf[1];
+ sec_descr->buf_size = skb->len - GELIC_NET_VLAN_POS;
+ sec_descr->skb = skb;
+ sec_descr->data_status = 0;
+ sec_descr->next_descr_addr = 0; /* terminate hw descr */
+ gelic_net_set_txdescr_cmdstat(sec_descr, skb, 0);
+
+ /* bump free descriptor pointer */
+ card->tx_chain.head = sec_descr->next;
return 0;
}
@@ -729,7 +732,7 @@ static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card,
static int gelic_net_kick_txdma(struct gelic_net_card *card,
struct gelic_net_descr *descr)
{
- int status = -ENXIO;
+ int status = 0;
int count = 10;
if (card->tx_dma_progress)
@@ -763,47 +766,62 @@ static int gelic_net_kick_txdma(struct gelic_net_card *card,
static int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct gelic_net_card *card = netdev_priv(netdev);
- struct gelic_net_descr *descr = NULL;
+ struct gelic_net_descr *descr;
int result;
unsigned long flags;
spin_lock_irqsave(&card->tx_dma_lock, flags);
gelic_net_release_tx_chain(card, 0);
- if (!skb)
- goto kick;
+
descr = gelic_net_get_next_tx_descr(card);
if (!descr) {
+ /*
+ * no more descriptors free
+ */
netif_stop_queue(netdev);
spin_unlock_irqrestore(&card->tx_dma_lock, flags);
return NETDEV_TX_BUSY;
}
- result = gelic_net_prepare_tx_descr_v(card, descr, skb);
-
- if (result)
- goto error;
- card->tx_chain.head = card->tx_chain.head->next;
-
- if (descr->prev)
- descr->prev->next_descr_addr = descr->bus_addr;
-kick:
+ result = gelic_net_prepare_tx_descr_v(card, descr, skb);
+ if (result) {
+ /*
+ * DMA map failed. As chanses are that failure
+ * would continue, just release skb and return
+ */
+ card->netdev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&card->tx_dma_lock, flags);
+ return NETDEV_TX_OK;
+ }
+ /*
+ * link this prepared descriptor to previous one
+ * to achieve high performance
+ */
+ descr->prev->next_descr_addr = descr->bus_addr;
/*
* as hardware descriptor is modified in the above lines,
* ensure that the hardware sees it
*/
wmb();
- if (gelic_net_kick_txdma(card, card->tx_chain.tail))
- goto error;
+ if (gelic_net_kick_txdma(card, descr)) {
+ /*
+ * kick failed.
+ * release descriptors which were just prepared
+ */
+ card->netdev->stats.tx_dropped++;
+ gelic_net_release_tx_descr(card, descr);
+ gelic_net_release_tx_descr(card, descr->next);
+ card->tx_chain.tail = descr->next->next;
+ dev_info(ctodev(card), "%s: kick failure\n", __func__);
+ } else {
+ /* OK, DMA started/reserved */
+ netdev->trans_start = jiffies;
+ }
- netdev->trans_start = jiffies;
spin_unlock_irqrestore(&card->tx_dma_lock, flags);
return NETDEV_TX_OK;
-
-error:
- card->netdev_stats.tx_dropped++;
- spin_unlock_irqrestore(&card->tx_dma_lock, flags);
- return NETDEV_TX_LOCKED;
}
/**
@@ -854,8 +872,8 @@ static void gelic_net_pass_skb_up(struct gelic_net_descr *descr,
skb->ip_summed = CHECKSUM_NONE;
/* update netdevice statistics */
- card->netdev_stats.rx_packets++;
- card->netdev_stats.rx_bytes += skb->len;
+ card->netdev->stats.rx_packets++;
+ card->netdev->stats.rx_bytes += skb->len;
/* pass skb up to stack */
netif_receive_skb(skb);
@@ -895,38 +913,67 @@ static int gelic_net_decode_one_descr(struct gelic_net_card *card)
(status == GELIC_NET_DESCR_FORCE_END)) {
dev_info(ctodev(card), "dropping RX descriptor with state %x\n",
status);
- card->netdev_stats.rx_dropped++;
+ card->netdev->stats.rx_dropped++;
goto refill;
}
- if ((status != GELIC_NET_DESCR_COMPLETE) &&
- (status != GELIC_NET_DESCR_FRAME_END)) {
+ if (status == GELIC_NET_DESCR_BUFFER_FULL) {
+ /*
+ * Buffer full would occur if and only if
+ * the frame length was longer than the size of this
+ * descriptor's buffer. If the frame length was equal
+ * to or shorter than buffer'size, FRAME_END condition
+ * would occur.
+ * Anyway this frame was longer than the MTU,
+ * just drop it.
+ */
+ dev_info(ctodev(card), "overlength frame\n");
+ goto refill;
+ }
+ /*
+ * descriptoers any other than FRAME_END here should
+ * be treated as error.
+ */
+ if (status != GELIC_NET_DESCR_FRAME_END) {
dev_dbg(ctodev(card), "RX descriptor with state %x\n",
status);
goto refill;
}
/* ok, we've got a packet in descr */
- gelic_net_pass_skb_up(descr, card); /* 1: skb_up sccess */
-
+ gelic_net_pass_skb_up(descr, card);
refill:
- descr->next_descr_addr = 0; /* unlink the descr */
+ /*
+ * So that always DMAC can see the end
+ * of the descriptor chain to avoid
+ * from unwanted DMAC overrun.
+ */
+ descr->next_descr_addr = 0;
/* change the descriptor state: */
gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE);
- /* refill one desc
- * FIXME: this can fail, but for now, just leave this
- * descriptor without skb
+ /*
+ * this call can fail, but for now, just leave this
+ * decriptor without skb
*/
gelic_net_prepare_rx_descr(card, descr);
+
chain->head = descr;
chain->tail = descr->next;
+
+ /*
+ * Set this descriptor the end of the chain.
+ */
descr->prev->next_descr_addr = descr->bus_addr;
+ /*
+ * If dmac chain was met, DMAC stopped.
+ * thus re-enable it
+ */
if (dmac_chain_ended) {
- gelic_net_enable_rxdmac(card);
- dev_dbg(ctodev(card), "reenable rx dma\n");
+ card->rx_dma_restart_required = 1;
+ dev_dbg(ctodev(card), "reenable rx dma scheduled\n");
}
return 1;
@@ -968,20 +1015,6 @@ static int gelic_net_poll(struct net_device *netdev, int *budget)
} else
return 1;
}
-
-/**
- * gelic_net_get_stats - get interface statistics
- * @netdev: interface device structure
- *
- * returns the interface statistics residing in the gelic_net_card struct
- */
-static struct net_device_stats *gelic_net_get_stats(struct net_device *netdev)
-{
- struct gelic_net_card *card = netdev_priv(netdev);
-
- return &card->netdev_stats;
-}
-
/**
* gelic_net_change_mtu - changes the MTU of an interface
* @netdev: interface device structure
@@ -1016,6 +1049,11 @@ static irqreturn_t gelic_net_interrupt(int irq, void *ptr)
if (!status)
return IRQ_NONE;
+ if (card->rx_dma_restart_required) {
+ card->rx_dma_restart_required = 0;
+ gelic_net_enable_rxdmac(card);
+ }
+
if (status & GELIC_NET_RXINT) {
gelic_net_rx_irq_off(card);
netif_rx_schedule(netdev);
@@ -1024,9 +1062,10 @@ static irqreturn_t gelic_net_interrupt(int irq, void *ptr)
if (status & GELIC_NET_TXINT) {
spin_lock_irqsave(&card->tx_dma_lock, flags);
card->tx_dma_progress = 0;
+ gelic_net_release_tx_chain(card, 0);
+ /* kick outstanding tx descriptor if any */
+ gelic_net_kick_txdma(card, card->tx_chain.tail);
spin_unlock_irqrestore(&card->tx_dma_lock, flags);
- /* start pending DMA */
- gelic_net_xmit(NULL, netdev);
}
return IRQ_HANDLED;
}
@@ -1068,7 +1107,7 @@ static int gelic_net_open_device(struct gelic_net_card *card)
}
result = request_irq(card->netdev->irq, gelic_net_interrupt,
- IRQF_DISABLED, "gelic network", card->netdev);
+ IRQF_DISABLED, card->netdev->name, card->netdev);
if (result) {
dev_info(ctodev(card), "%s:%d: request_irq failed (%d)\n",
@@ -1107,7 +1146,7 @@ static int gelic_net_open(struct net_device *netdev)
card->descr, GELIC_NET_TX_DESCRIPTORS))
goto alloc_tx_failed;
if (gelic_net_init_chain(card, &card->rx_chain,
- card->descr + GELIC_NET_RX_DESCRIPTORS,
+ card->descr + GELIC_NET_TX_DESCRIPTORS,
GELIC_NET_RX_DESCRIPTORS))
goto alloc_rx_failed;
@@ -1129,7 +1168,6 @@ static int gelic_net_open(struct net_device *netdev)
netif_start_queue(netdev);
netif_carrier_on(netdev);
- netif_poll_enable(netdev);
return 0;
@@ -1141,7 +1179,6 @@ alloc_tx_failed:
return -ENOMEM;
}
-#ifdef GELIC_NET_ETHTOOL
static void gelic_net_get_drvinfo (struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -1261,7 +1298,6 @@ static struct ethtool_ops gelic_net_ethtool_ops = {
.get_rx_csum = gelic_net_get_rx_csum,
.set_rx_csum = gelic_net_set_rx_csum,
};
-#endif
/**
* gelic_net_tx_timeout_task - task scheduled by the watchdog timeout
@@ -1320,7 +1356,6 @@ static void gelic_net_setup_netdev_ops(struct net_device *netdev)
netdev->open = &gelic_net_open;
netdev->stop = &gelic_net_stop;
netdev->hard_start_xmit = &gelic_net_xmit;
- netdev->get_stats = &gelic_net_get_stats;
netdev->set_multicast_list = &gelic_net_set_multi;
netdev->change_mtu = &gelic_net_change_mtu;
/* tx watchdog */
@@ -1329,9 +1364,7 @@ static void gelic_net_setup_netdev_ops(struct net_device *netdev)
/* NAPI */
netdev->poll = &gelic_net_poll;
netdev->weight = GELIC_NET_NAPI_WEIGHT;
-#ifdef GELIC_NET_ETHTOOL
netdev->ethtool_ops = &gelic_net_ethtool_ops;
-#endif
}
/**
diff --git a/drivers/net/ps3_gelic_net.h b/drivers/net/ps3_gelic_net.h
index 5e1c28654e1..a9c4c4fc254 100644
--- a/drivers/net/ps3_gelic_net.h
+++ b/drivers/net/ps3_gelic_net.h
@@ -28,21 +28,12 @@
#ifndef _GELIC_NET_H
#define _GELIC_NET_H
-#define GELIC_NET_DRV_NAME "Gelic Network Driver"
-#define GELIC_NET_DRV_VERSION "1.0"
-
-#define GELIC_NET_ETHTOOL /* use ethtool */
-
-/* ioctl */
-#define GELIC_NET_GET_MODE (SIOCDEVPRIVATE + 0)
-#define GELIC_NET_SET_MODE (SIOCDEVPRIVATE + 1)
-
/* descriptors */
#define GELIC_NET_RX_DESCRIPTORS 128 /* num of descriptors */
#define GELIC_NET_TX_DESCRIPTORS 128 /* num of descriptors */
-#define GELIC_NET_MAX_MTU 2308
-#define GELIC_NET_MIN_MTU 64
+#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN
+#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN
#define GELIC_NET_RXBUF_ALIGN 128
#define GELIC_NET_RX_CSUM_DEFAULT 1 /* hw chksum */
#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
@@ -90,7 +81,8 @@ enum gelic_net_int1_status {
*/
#define GELIC_NET_RXVLNPKT 0x00200000 /* VLAN packet */
/* bit 20..16 reserved */
-#define GELIC_NET_RXRECNUM 0x0000ff00 /* reception receipt number */
+#define GELIC_NET_RXRRECNUM 0x0000ff00 /* reception receipt number */
+#define GELIC_NET_RXRRECNUM_SHIFT 8
/* bit 7..0 reserved */
#define GELIC_NET_TXDESC_TAIL 0
@@ -133,19 +125,19 @@ enum gelic_net_int1_status {
* interrupt status */
#define GELIC_NET_DMAC_CMDSTAT_CHAIN_END 0x00000002 /* RXDCEIS:DMA stopped */
-#define GELIC_NET_DMAC_CMDSTAT_NOT_IN_USE 0xb0000000
#define GELIC_NET_DESCR_IND_PROC_SHIFT 28
#define GELIC_NET_DESCR_IND_PROC_MASKO 0x0fffffff
enum gelic_net_descr_status {
- GELIC_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */
+ GELIC_NET_DESCR_COMPLETE = 0x00, /* used in tx */
+ GELIC_NET_DESCR_BUFFER_FULL = 0x00, /* used in rx */
GELIC_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
GELIC_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */
GELIC_NET_DESCR_FRAME_END = 0x04, /* used in rx */
GELIC_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */
GELIC_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */
- GELIC_NET_DESCR_NOT_IN_USE /* any other value */
+ GELIC_NET_DESCR_NOT_IN_USE = 0x0b /* any other value */
};
/* for lv1_net_control */
#define GELIC_NET_GET_MAC_ADDRESS 0x0000000000000001
@@ -216,10 +208,10 @@ struct gelic_net_card {
struct gelic_net_descr_chain tx_chain;
struct gelic_net_descr_chain rx_chain;
+ int rx_dma_restart_required;
/* gurad dmac descriptor chain*/
spinlock_t chain_lock;
- struct net_device_stats netdev_stats;
int rx_csum;
/* guard tx_dma_progress */
spinlock_t tx_dma_lock;
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index e4736a3b1b7..12e01b24105 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -43,10 +43,6 @@
#undef DEBUG
-#define DRV_DESC "QE UCC Gigabit Ethernet Controller"
-#define DRV_NAME "ucc_geth"
-#define DRV_VERSION "1.1"
-
#define ugeth_printk(level, format, arg...) \
printk(level format "\n", ## arg)
@@ -64,9 +60,19 @@
#else
#define ugeth_vdbg(fmt, args...) do { } while (0)
#endif /* UGETH_VERBOSE_DEBUG */
+#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
+void uec_set_ethtool_ops(struct net_device *netdev);
+
static DEFINE_SPINLOCK(ugeth_lock);
+static struct {
+ u32 msg_enable;
+} debug = { -1 };
+
+module_param_named(debug, debug.msg_enable, int, 0);
+MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
+
static struct ucc_geth_info ugeth_primary_info = {
.uf_info = {
.bd_mem_part = MEM_PART_SYSTEM,
@@ -104,6 +110,7 @@ static struct ucc_geth_info ugeth_primary_info = {
.maxRetransmission = 0xf,
.collisionWindow = 0x37,
.receiveFlowControl = 1,
+ .transmitFlowControl = 1,
.maxGroupAddrInHash = 4,
.maxIndAddrInHash = 4,
.prel = 7,
@@ -139,7 +146,9 @@ static struct ucc_geth_info ugeth_primary_info = {
.numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
.largestexternallookupkeysize =
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
- .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE,
+ .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE |
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX |
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX,
.vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
.vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
.rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
@@ -281,7 +290,8 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
for (i = 0; i < num_entries; i++) {
if ((snum = qe_get_snum()) < 0) {
- ugeth_err("fill_init_enet_entries: Can not get SNUM.");
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("fill_init_enet_entries: Can not get SNUM.");
return snum;
}
if ((i == 0) && skip_page_for_first_entry)
@@ -291,8 +301,8 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
init_enet_offset =
qe_muram_alloc(thread_size, thread_alignment);
if (IS_ERR_VALUE(init_enet_offset)) {
- ugeth_err
- ("fill_init_enet_entries: Can not allocate DPRAM memory.");
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory.");
qe_put_snum((u8) snum);
return -ENOMEM;
}
@@ -1200,7 +1210,7 @@ static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
return 0;
}
-static int init_flow_control_params(u32 automatic_flow_control_mode,
+int init_flow_control_params(u32 automatic_flow_control_mode,
int rx_flow_control_enable,
int tx_flow_control_enable,
u16 pause_period,
@@ -1486,9 +1496,9 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
if (ret_val != 0) {
- ugeth_err
- ("%s: Preamble length must be between 3 and 7 inclusive.",
- __FUNCTION__);
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.",
+ __FUNCTION__);
return ret_val;
}
@@ -1726,7 +1736,8 @@ static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
/* check if the UCC number is in range. */
if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
- ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
return -EINVAL;
}
@@ -1754,7 +1765,8 @@ static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode)
/* check if the UCC number is in range. */
if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
- ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
return -EINVAL;
}
@@ -2306,7 +2318,9 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
(uf_info->bd_mem_part == MEM_PART_MURAM))) {
- ugeth_err("%s: Bad memory partition value.", __FUNCTION__);
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: Bad memory partition value.",
+ __FUNCTION__);
return -EINVAL;
}
@@ -2315,9 +2329,10 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
(ug_info->bdRingLenRx[i] %
UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
- ugeth_err
- ("%s: Rx BD ring length must be multiple of 4,"
- " no smaller than 8.", __FUNCTION__);
+ if (netif_msg_probe(ugeth))
+ ugeth_err
+ ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.",
+ __FUNCTION__);
return -EINVAL;
}
}
@@ -2325,9 +2340,10 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* Tx BD lengths */
for (i = 0; i < ug_info->numQueuesTx; i++) {
if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
- ugeth_err
- ("%s: Tx BD ring length must be no smaller than 2.",
- __FUNCTION__);
+ if (netif_msg_probe(ugeth))
+ ugeth_err
+ ("%s: Tx BD ring length must be no smaller than 2.",
+ __FUNCTION__);
return -EINVAL;
}
}
@@ -2335,31 +2351,35 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* mrblr */
if ((uf_info->max_rx_buf_length == 0) ||
(uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
- ugeth_err
- ("%s: max_rx_buf_length must be non-zero multiple of 128.",
- __FUNCTION__);
+ if (netif_msg_probe(ugeth))
+ ugeth_err
+ ("%s: max_rx_buf_length must be non-zero multiple of 128.",
+ __FUNCTION__);
return -EINVAL;
}
/* num Tx queues */
if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
- ugeth_err("%s: number of tx queues too large.", __FUNCTION__);
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: number of tx queues too large.", __FUNCTION__);
return -EINVAL;
}
/* num Rx queues */
if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
- ugeth_err("%s: number of rx queues too large.", __FUNCTION__);
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: number of rx queues too large.", __FUNCTION__);
return -EINVAL;
}
/* l2qt */
for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
- ugeth_err
- ("%s: VLAN priority table entry must not be"
- " larger than number of Rx queues.",
- __FUNCTION__);
+ if (netif_msg_probe(ugeth))
+ ugeth_err
+ ("%s: VLAN priority table entry must not be"
+ " larger than number of Rx queues.",
+ __FUNCTION__);
return -EINVAL;
}
}
@@ -2367,26 +2387,29 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* l3qt */
for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
- ugeth_err
- ("%s: IP priority table entry must not be"
- " larger than number of Rx queues.",
- __FUNCTION__);
+ if (netif_msg_probe(ugeth))
+ ugeth_err
+ ("%s: IP priority table entry must not be"
+ " larger than number of Rx queues.",
+ __FUNCTION__);
return -EINVAL;
}
}
if (ug_info->cam && !ug_info->ecamptr) {
- ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
- __FUNCTION__);
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
+ __FUNCTION__);
return -EINVAL;
}
if ((ug_info->numStationAddresses !=
UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
&& ug_info->rxExtendedFiltering) {
- ugeth_err("%s: Number of station addresses greater than 1 "
- "not allowed in extended parsing mode.",
- __FUNCTION__);
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: Number of station addresses greater than 1 "
+ "not allowed in extended parsing mode.",
+ __FUNCTION__);
return -EINVAL;
}
@@ -2399,7 +2422,8 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
/* Initialize the general fast UCC block. */
if (ucc_fast_init(uf_info, &ugeth->uccf)) {
- ugeth_err("%s: Failed to init uccf.", __FUNCTION__);
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: Failed to init uccf.", __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -2452,7 +2476,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
numThreadsRxNumerical = 8;
break;
default:
- ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Bad number of Rx threads value.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -EINVAL;
break;
@@ -2475,7 +2501,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
numThreadsTxNumerical = 8;
break;
default:
- ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Bad number of Tx threads value.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -EINVAL;
break;
@@ -2507,7 +2535,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* For more details see the hardware spec. */
init_flow_control_params(ug_info->aufc,
ug_info->receiveFlowControl,
- 1,
+ ug_info->transmitFlowControl,
ug_info->pausePeriod,
ug_info->extensionField,
&uf_regs->upsmr,
@@ -2527,8 +2555,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ug_info->backToBackInterFrameGap,
&ug_regs->ipgifg);
if (ret_val != 0) {
- ugeth_err("%s: IPGIFG initialization parameter too large.",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: IPGIFG initialization parameter too large.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return ret_val;
}
@@ -2544,7 +2573,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ug_info->collisionWindow,
&ug_regs->hafdup);
if (ret_val != 0) {
- ugeth_err("%s: Half Duplex initialization parameter too large.",
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Half Duplex initialization parameter too large.",
__FUNCTION__);
ucc_geth_memclean(ugeth);
return ret_val;
@@ -2597,9 +2627,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
tx_bd_ring_offset[j]);
}
if (!ugeth->p_tx_bd_ring[j]) {
- ugeth_err
- ("%s: Can not allocate memory for Tx bd rings.",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate memory for Tx bd rings.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -2632,9 +2663,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
rx_bd_ring_offset[j]);
}
if (!ugeth->p_rx_bd_ring[j]) {
- ugeth_err
- ("%s: Can not allocate memory for Rx bd rings.",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate memory for Rx bd rings.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -2648,8 +2680,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
GFP_KERNEL);
if (ugeth->tx_skbuff[j] == NULL) {
- ugeth_err("%s: Could not allocate tx_skbuff",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Could not allocate tx_skbuff",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -2679,8 +2712,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
GFP_KERNEL);
if (ugeth->rx_skbuff[j] == NULL) {
- ugeth_err("%s: Could not allocate rx_skbuff",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Could not allocate rx_skbuff",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -2711,9 +2745,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
- ugeth_err
- ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -2733,9 +2768,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
32 * (numThreadsTxNumerical == 1),
UCC_GETH_THREAD_DATA_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
- ugeth_err
- ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -2761,9 +2797,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
sizeof(struct ucc_geth_send_queue_qd),
UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
- ugeth_err
- ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -2804,9 +2841,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
UCC_GETH_SCHEDULER_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
- ugeth_err
- ("%s: Can not allocate DPRAM memory for p_scheduler.",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_scheduler.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -2852,9 +2890,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
(struct ucc_geth_tx_firmware_statistics_pram),
UCC_GETH_TX_STATISTICS_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
- ugeth_err
- ("%s: Can not allocate DPRAM memory for"
- " p_tx_fw_statistics_pram.", __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for"
+ " p_tx_fw_statistics_pram.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -2891,9 +2931,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
- ugeth_err
- ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -2912,9 +2953,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
sizeof(struct ucc_geth_thread_data_rx),
UCC_GETH_THREAD_DATA_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
- ugeth_err
- ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -2935,9 +2977,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
(struct ucc_geth_rx_firmware_statistics_pram),
UCC_GETH_RX_STATISTICS_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
- ugeth_err
- ("%s: Can not allocate DPRAM memory for"
- " p_rx_fw_statistics_pram.", __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for"
+ " p_rx_fw_statistics_pram.", __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -2957,9 +3000,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
+ 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
- ugeth_err
- ("%s: Can not allocate DPRAM memory for"
- " p_rx_irq_coalescing_tbl.", __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for"
+ " p_rx_irq_coalescing_tbl.", __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -3025,9 +3069,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
sizeof(struct ucc_geth_rx_prefetched_bds)),
UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
- ugeth_err
- ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -3102,8 +3147,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* initialize extended filtering */
if (ug_info->rxExtendedFiltering) {
if (!ug_info->extendedFilteringChainPointer) {
- ugeth_err("%s: Null Extended Filtering Chain Pointer.",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Null Extended Filtering Chain Pointer.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -EINVAL;
}
@@ -3114,9 +3160,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
- ugeth_err
- ("%s: Can not allocate DPRAM memory for"
- " p_exf_glbl_param.", __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for"
+ " p_exf_glbl_param.", __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -3161,9 +3208,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
*/
if (!(ugeth->p_init_enet_param_shadow =
kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
- ugeth_err
- ("%s: Can not allocate memory for"
- " p_UccInitEnetParamShadows.", __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate memory for"
+ " p_UccInitEnetParamShadows.", __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -3196,8 +3244,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
&& (ug_info->largestexternallookupkeysize !=
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
- ugeth_err("%s: Invalid largest External Lookup Key Size.",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Invalid largest External Lookup Key Size.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -EINVAL;
}
@@ -3222,8 +3271,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* Rx needs one extra for terminator */
, size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
ug_info->riscRx, 1)) != 0) {
- ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return ret_val;
}
@@ -3237,8 +3287,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
sizeof(struct ucc_geth_thread_tx_pram),
UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
ug_info->riscTx, 0)) != 0) {
- ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return ret_val;
}
@@ -3246,8 +3297,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* Load Rx bds with buffers */
for (i = 0; i < ug_info->numQueuesRx; i++) {
if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
- ugeth_err("%s: Can not fill Rx bds with buffers.",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Can not fill Rx bds with buffers.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return ret_val;
}
@@ -3256,9 +3308,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* Allocate InitEnet command parameter structure */
init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
if (IS_ERR_VALUE(init_enet_pram_offset)) {
- ugeth_err
- ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
- __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
+ __FUNCTION__);
ucc_geth_memclean(ugeth);
return -ENOMEM;
}
@@ -3428,8 +3481,9 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
if (!skb ||
(!(bd_status & (R_F | R_L))) ||
(bd_status & R_ERRORS_FATAL)) {
- ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x",
- __FUNCTION__, __LINE__, (u32) skb);
+ if (netif_msg_rx_err(ugeth))
+ ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
+ __FUNCTION__, __LINE__, (u32) skb);
if (skb)
dev_kfree_skb_any(skb);
@@ -3458,7 +3512,8 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
skb = get_new_skb(ugeth, bd);
if (!skb) {
- ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__);
+ if (netif_msg_rx_err(ugeth))
+ ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__);
ugeth->stats.rx_dropped++;
break;
}
@@ -3649,28 +3704,32 @@ static int ucc_geth_open(struct net_device *dev)
/* Test station address */
if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
- ugeth_err("%s: Multicast address used for station address"
- " - is this what you wanted?", __FUNCTION__);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Multicast address used for station address"
+ " - is this what you wanted?", __FUNCTION__);
return -EINVAL;
}
err = ucc_struct_init(ugeth);
if (err) {
- ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name);
return err;
}
err = ucc_geth_startup(ugeth);
if (err) {
- ugeth_err("%s: Cannot configure net device, aborting.",
- dev->name);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Cannot configure net device, aborting.",
+ dev->name);
return err;
}
err = adjust_enet_interface(ugeth);
if (err) {
- ugeth_err("%s: Cannot configure net device, aborting.",
- dev->name);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Cannot configure net device, aborting.",
+ dev->name);
return err;
}
@@ -3687,7 +3746,8 @@ static int ucc_geth_open(struct net_device *dev)
err = init_phy(dev);
if (err) {
- ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name);
return err;
}
@@ -3697,15 +3757,17 @@ static int ucc_geth_open(struct net_device *dev)
request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0,
"UCC Geth", dev);
if (err) {
- ugeth_err("%s: Cannot get IRQ for net device, aborting.",
- dev->name);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Cannot get IRQ for net device, aborting.",
+ dev->name);
ucc_geth_stop(ugeth);
return err;
}
err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
if (err) {
- ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
+ if (netif_msg_ifup(ugeth))
+ ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
ucc_geth_stop(ugeth);
return err;
}
@@ -3732,8 +3794,6 @@ static int ucc_geth_close(struct net_device *dev)
return 0;
}
-const struct ethtool_ops ucc_geth_ethtool_ops = { };
-
static phy_interface_t to_phy_interface(const char *phy_connection_type)
{
if (strcasecmp(phy_connection_type, "mii") == 0)
@@ -3790,6 +3850,13 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
return -ENODEV;
ug_info = &ugeth_info[ucc_num];
+ if (ug_info == NULL) {
+ if (netif_msg_probe(&debug))
+ ugeth_err("%s: [%d] Missing additional data!",
+ __FUNCTION__, ucc_num);
+ return -ENODEV;
+ }
+
ug_info->uf_info.ucc_num = ucc_num;
prop = of_get_property(np, "rx-clock", NULL);
@@ -3868,15 +3935,10 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
ug_info->mdio_bus = res.start;
- printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
- ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
- ug_info->uf_info.irq);
-
- if (ug_info == NULL) {
- ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__,
- ucc_num);
- return -ENODEV;
- }
+ if (netif_msg_probe(&debug))
+ printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
+ ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
+ ug_info->uf_info.irq);
/* Create an ethernet device instance */
dev = alloc_etherdev(sizeof(*ugeth));
@@ -3896,6 +3958,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
SET_NETDEV_DEV(dev, device);
/* Fill in the dev structure */
+ uec_set_ethtool_ops(dev);
dev->open = ucc_geth_open;
dev->hard_start_xmit = ucc_geth_start_xmit;
dev->tx_timeout = ucc_geth_timeout;
@@ -3909,16 +3972,16 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
// dev->change_mtu = ucc_geth_change_mtu;
dev->mtu = 1500;
dev->set_multicast_list = ucc_geth_set_multi;
- dev->ethtool_ops = &ucc_geth_ethtool_ops;
- ugeth->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
+ ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
ugeth->phy_interface = phy_interface;
ugeth->max_speed = max_speed;
err = register_netdev(dev);
if (err) {
- ugeth_err("%s: Cannot register net device, aborting.",
- dev->name);
+ if (netif_msg_probe(ugeth))
+ ugeth_err("%s: Cannot register net device, aborting.",
+ dev->name);
free_netdev(dev);
return err;
}
@@ -3972,7 +4035,8 @@ static int __init ucc_geth_init(void)
if (ret)
return ret;
- printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
+ if (netif_msg_drv(&debug))
+ printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
for (i = 0; i < 8; i++)
memcpy(&(ugeth_info[i]), &ugeth_primary_info,
sizeof(ugeth_primary_info));
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index a29e1c3ca4b..bb4dac8c0c6 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -30,6 +30,10 @@
#include "ucc_geth_mii.h"
+#define DRV_DESC "QE UCC Gigabit Ethernet Controller"
+#define DRV_NAME "ucc_geth"
+#define DRV_VERSION "1.1"
+
#define NUM_TX_QUEUES 8
#define NUM_RX_QUEUES 8
#define NUM_BDS_IN_PREFETCHED_BDS 4
@@ -896,6 +900,7 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8
#define UCC_GETH_RX_BD_RING_SIZE_MIN 8
#define UCC_GETH_TX_BD_RING_SIZE_MIN 2
+#define UCC_GETH_BD_RING_SIZE_MAX 0xffff
#define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD
@@ -1135,6 +1140,7 @@ struct ucc_geth_info {
int bro;
int ecm;
int receiveFlowControl;
+ int transmitFlowControl;
u8 maxGroupAddrInHash;
u8 maxIndAddrInHash;
u8 prel;
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
new file mode 100644
index 00000000000..a8994c7b858
--- /dev/null
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright (c) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Description: QE UCC Gigabit Ethernet Ethtool API Set
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ *
+ * Limitation:
+ * Can only get/set setttings of the first queue.
+ * Need to re-open the interface manually after changing some paramters.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/fsl_devices.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/types.h>
+#include <asm/uaccess.h>
+
+#include "ucc_geth.h"
+#include "ucc_geth_mii.h"
+
+static char hw_stat_gstrings[][ETH_GSTRING_LEN] = {
+ "tx-64-frames",
+ "tx-65-127-frames",
+ "tx-128-255-frames",
+ "rx-64-frames",
+ "rx-65-127-frames",
+ "rx-128-255-frames",
+ "tx-bytes-ok",
+ "tx-pause-frames",
+ "tx-multicast-frames",
+ "tx-broadcast-frames",
+ "rx-frames",
+ "rx-bytes-ok",
+ "rx-bytes-all",
+ "rx-multicast-frames",
+ "rx-broadcast-frames",
+ "stats-counter-carry",
+ "stats-counter-mask",
+ "rx-dropped-frames",
+};
+
+static char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
+ "tx-single-collision",
+ "tx-multiple-collision",
+ "tx-late-collsion",
+ "tx-aborted-frames",
+ "tx-lost-frames",
+ "tx-carrier-sense-errors",
+ "tx-frames-ok",
+ "tx-excessive-differ-frames",
+ "tx-256-511-frames",
+ "tx-1024-1518-frames",
+ "tx-jumbo-frames",
+};
+
+static char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
+ "rx-crc-errors",
+ "rx-alignment-errors",
+ "rx-in-range-length-errors",
+ "rx-out-of-range-length-errors",
+ "rx-too-long-frames",
+ "rx-runt",
+ "rx-very-long-event",
+ "rx-symbol-errors",
+ "rx-busy-drop-frames",
+ "reserved",
+ "reserved",
+ "rx-mismatch-drop-frames",
+ "rx-small-than-64",
+ "rx-256-511-frames",
+ "rx-512-1023-frames",
+ "rx-1024-1518-frames",
+ "rx-jumbo-frames",
+ "rx-mac-error-loss",
+ "rx-pause-frames",
+ "reserved",
+ "rx-vlan-removed",
+ "rx-vlan-replaced",
+ "rx-vlan-inserted",
+ "rx-ip-checksum-errors",
+};
+
+#define UEC_HW_STATS_LEN ARRAY_SIZE(hw_stat_gstrings)
+#define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings)
+#define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
+
+extern int init_flow_control_params(u32 automatic_flow_control_mode,
+ int rx_flow_control_enable,
+ int tx_flow_control_enable, u16 pause_period,
+ u16 extension_field, volatile u32 *upsmr_register,
+ volatile u32 *uempr_register, volatile u32 *maccfg1_register);
+
+static int
+uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct phy_device *phydev = ugeth->phydev;
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+
+ if (!phydev)
+ return -ENODEV;
+
+ ecmd->maxtxpkt = 1;
+ ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0];
+
+ return phy_ethtool_gset(phydev, ecmd);
+}
+
+static int
+uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct phy_device *phydev = ugeth->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, ecmd);
+}
+
+static void
+uec_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+
+ pause->autoneg = ugeth->phydev->autoneg;
+
+ if (ugeth->ug_info->receiveFlowControl)
+ pause->rx_pause = 1;
+ if (ugeth->ug_info->transmitFlowControl)
+ pause->tx_pause = 1;
+}
+
+static int
+uec_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ int ret = 0;
+
+ ugeth->ug_info->receiveFlowControl = pause->rx_pause;
+ ugeth->ug_info->transmitFlowControl = pause->tx_pause;
+
+ if (ugeth->phydev->autoneg) {
+ if (netif_running(netdev)) {
+ /* FIXME: automatically restart */
+ printk(KERN_INFO
+ "Please re-open the interface.\n");
+ }
+ } else {
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+
+ ret = init_flow_control_params(ug_info->aufc,
+ ug_info->receiveFlowControl,
+ ug_info->transmitFlowControl,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &ugeth->uccf->uf_regs->upsmr,
+ &ugeth->ug_regs->uempr,
+ &ugeth->ug_regs->maccfg1);
+ }
+
+ return ret;
+}
+
+static uint32_t
+uec_get_msglevel(struct net_device *netdev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ return ugeth->msg_enable;
+}
+
+static void
+uec_set_msglevel(struct net_device *netdev, uint32_t data)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ ugeth->msg_enable = data;
+}
+
+static int
+uec_get_regs_len(struct net_device *netdev)
+{
+ return sizeof(struct ucc_geth);
+}
+
+static void
+uec_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ int i;
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ u32 __iomem *ug_regs = (u32 __iomem *)ugeth->ug_regs;
+ u32 *buff = p;
+
+ for (i = 0; i < sizeof(struct ucc_geth) / sizeof(u32); i++)
+ buff[i] = in_be32(&ug_regs[i]);
+}
+
+static void
+uec_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ int queue = 0;
+
+ ring->rx_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
+ ring->rx_mini_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
+ ring->rx_jumbo_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
+ ring->tx_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
+
+ ring->rx_pending = ug_info->bdRingLenRx[queue];
+ ring->rx_mini_pending = ug_info->bdRingLenRx[queue];
+ ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue];
+ ring->tx_pending = ug_info->bdRingLenTx[queue];
+}
+
+static int
+uec_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ int queue = 0, ret = 0;
+
+ if (ring->rx_pending < UCC_GETH_RX_BD_RING_SIZE_MIN) {
+ printk("%s: RxBD ring size must be no smaller than %d.\n",
+ netdev->name, UCC_GETH_RX_BD_RING_SIZE_MIN);
+ return -EINVAL;
+ }
+ if (ring->rx_pending % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT) {
+ printk("%s: RxBD ring size must be multiple of %d.\n",
+ netdev->name, UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT);
+ return -EINVAL;
+ }
+ if (ring->tx_pending < UCC_GETH_TX_BD_RING_SIZE_MIN) {
+ printk("%s: TxBD ring size must be no smaller than %d.\n",
+ netdev->name, UCC_GETH_TX_BD_RING_SIZE_MIN);
+ return -EINVAL;
+ }
+
+ ug_info->bdRingLenRx[queue] = ring->rx_pending;
+ ug_info->bdRingLenTx[queue] = ring->tx_pending;
+
+ if (netif_running(netdev)) {
+ /* FIXME: restart automatically */
+ printk(KERN_INFO
+ "Please re-open the interface.\n");
+ }
+
+ return ret;
+}
+
+static int uec_get_stats_count(struct net_device *netdev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ u32 stats_mode = ugeth->ug_info->statisticsMode;
+ int len = 0;
+
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE)
+ len += UEC_HW_STATS_LEN;
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX)
+ len += UEC_TX_FW_STATS_LEN;
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX)
+ len += UEC_RX_FW_STATS_LEN;
+
+ return len;
+}
+
+static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ u32 stats_mode = ugeth->ug_info->statisticsMode;
+
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
+ memcpy(buf, hw_stat_gstrings, UEC_HW_STATS_LEN *
+ ETH_GSTRING_LEN);
+ buf += UEC_HW_STATS_LEN * ETH_GSTRING_LEN;
+ }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
+ memcpy(buf, tx_fw_stat_gstrings, UEC_TX_FW_STATS_LEN *
+ ETH_GSTRING_LEN);
+ buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN;
+ }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX)
+ memcpy(buf, tx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN *
+ ETH_GSTRING_LEN);
+}
+
+static void uec_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, uint64_t *data)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ u32 stats_mode = ugeth->ug_info->statisticsMode;
+ u32 __iomem *base;
+ int i, j = 0;
+
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
+ base = (u32 __iomem *)&ugeth->ug_regs->tx64;
+ for (i = 0; i < UEC_HW_STATS_LEN; i++)
+ data[j++] = (u64)in_be32(&base[i]);
+ }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
+ base = (u32 __iomem *)ugeth->p_tx_fw_statistics_pram;
+ for (i = 0; i < UEC_TX_FW_STATS_LEN; i++)
+ data[j++] = (u64)in_be32(&base[i]);
+ }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
+ base = (u32 __iomem *)ugeth->p_rx_fw_statistics_pram;
+ for (i = 0; i < UEC_RX_FW_STATS_LEN; i++)
+ data[j++] = (u64)in_be32(&base[i]);
+ }
+}
+
+static int uec_nway_reset(struct net_device *netdev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+
+ return phy_start_aneg(ugeth->phydev);
+}
+
+/* Report driver information */
+static void
+uec_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strncpy(drvinfo->driver, DRV_NAME, 32);
+ strncpy(drvinfo->version, DRV_VERSION, 32);
+ strncpy(drvinfo->fw_version, "N/A", 32);
+ strncpy(drvinfo->bus_info, "QUICC ENGINE", 32);
+ drvinfo->n_stats = uec_get_stats_count(netdev);
+ drvinfo->testinfo_len = 0;
+ drvinfo->eedump_len = 0;
+ drvinfo->regdump_len = uec_get_regs_len(netdev);
+}
+
+static const struct ethtool_ops uec_ethtool_ops = {
+ .get_settings = uec_get_settings,
+ .set_settings = uec_set_settings,
+ .get_drvinfo = uec_get_drvinfo,
+ .get_regs_len = uec_get_regs_len,
+ .get_regs = uec_get_regs,
+ .get_msglevel = uec_get_msglevel,
+ .set_msglevel = uec_set_msglevel,
+ .nway_reset = uec_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = uec_get_ringparam,
+ .set_ringparam = uec_set_ringparam,
+ .get_pauseparam = uec_get_pauseparam,
+ .set_pauseparam = uec_set_pauseparam,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .get_stats_count = uec_get_stats_count,
+ .get_strings = uec_get_strings,
+ .get_ethtool_stats = uec_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
+};
+
+void uec_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &uec_ethtool_ops);
+}
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c
index 7bcb82f50cf..5f8c2d30a32 100644
--- a/drivers/net/ucc_geth_mii.c
+++ b/drivers/net/ucc_geth_mii.c
@@ -54,8 +54,8 @@
#define vdbg(format, arg...) do {} while(0)
#endif
-#define DRV_DESC "QE UCC Ethernet Controller MII Bus"
-#define DRV_NAME "fsl-uec_mdio"
+#define MII_DRV_DESC "QE UCC Ethernet Controller MII Bus"
+#define MII_DRV_NAME "fsl-uec_mdio"
/* Write value to the PHY for this device to the register at regnum, */
/* waiting until the write is done before it returns. All PHY */
@@ -261,7 +261,7 @@ static struct of_device_id uec_mdio_match[] = {
};
static struct of_platform_driver uec_mdio_driver = {
- .name = DRV_NAME,
+ .name = MII_DRV_NAME,
.probe = uec_mdio_probe,
.remove = uec_mdio_remove,
.match_table = uec_mdio_match,
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index c8062494009..5c6a5d04300 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -220,6 +220,7 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
}
EXPORT_SYMBOL(pci_osc_control_set);
+#ifdef CONFIG_ACPI_SLEEP
/*
* _SxD returns the D-state with the highest power
* (lowest D-state number) supported in the S-state "x".
@@ -245,16 +246,34 @@ EXPORT_SYMBOL(pci_osc_control_set);
* currently we simply return _SxD, if present.
*/
-static int acpi_pci_choose_state(struct pci_dev *pdev, pm_message_t state)
+static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev,
+ pm_message_t state)
{
- /* TBD */
-
- return -ENODEV;
+ int acpi_state;
+
+ acpi_state = acpi_pm_device_sleep_state(&pdev->dev,
+ device_may_wakeup(&pdev->dev), NULL);
+ if (acpi_state < 0)
+ return PCI_POWER_ERROR;
+
+ switch (acpi_state) {
+ case ACPI_STATE_D0:
+ return PCI_D0;
+ case ACPI_STATE_D1:
+ return PCI_D1;
+ case ACPI_STATE_D2:
+ return PCI_D2;
+ case ACPI_STATE_D3:
+ return PCI_D3hot;
+ }
+ return PCI_POWER_ERROR;
}
+#endif
static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ acpi_handle tmp;
static int state_conv[] = {
[0] = 0,
[1] = 1,
@@ -266,6 +285,9 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
if (!handle)
return -ENODEV;
+ /* If the ACPI device has _EJ0, ignore the device */
+ if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
+ return 0;
return acpi_bus_set_power(handle, acpi_state);
}
@@ -320,7 +342,9 @@ static int __init acpi_pci_init(void)
ret = register_acpi_bus_type(&acpi_pci_bus);
if (ret)
return 0;
+#ifdef CONFIG_ACPI_SLEEP
platform_pci_choose_state = acpi_pci_choose_state;
+#endif
platform_pci_set_power_state = acpi_pci_set_power_state;
return 0;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 03fd59e80fe..fba319d6fcc 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -499,7 +499,7 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return 0;
}
-int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state);
+pci_power_t (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state);
/**
* pci_choose_state - Choose the power state of a PCI device
@@ -513,15 +513,15 @@ int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
{
- int ret;
+ pci_power_t ret;
if (!pci_find_capability(dev, PCI_CAP_ID_PM))
return PCI_D0;
if (platform_pci_choose_state) {
ret = platform_pci_choose_state(dev, state);
- if (ret >= 0)
- state.event = ret;
+ if (ret != PCI_POWER_ERROR)
+ return ret;
}
switch (state.event) {
@@ -1604,6 +1604,7 @@ early_param("pci", pci_setup);
device_initcall(pci_init);
EXPORT_SYMBOL_GPL(pci_restore_bars);
+EXPORT_SYMBOL(__pci_reenable_device);
EXPORT_SYMBOL(pci_enable_device_bars);
EXPORT_SYMBOL(pci_enable_device);
EXPORT_SYMBOL(pcim_enable_device);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 3fec13d3add..c6e132d7c0f 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,6 +1,5 @@
/* Functions internal to the PCI core code */
-extern int __must_check __pci_reenable_device(struct pci_dev *);
extern int pci_uevent(struct device *dev, char **envp, int num_envp,
char *buffer, int buffer_size);
extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
@@ -13,7 +12,7 @@ extern int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
resource_size_t, resource_size_t),
void *alignf_data);
/* Firmware callbacks */
-extern int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state);
+extern pci_power_t (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state);
extern int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t state);
extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index dd6384b1efc..b6a4f02b01d 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -2,7 +2,6 @@
* card.c - contains functions for managing groups of PnP devices
*
* Copyright 2002 Adam Belay <ambx1@neo.rr.com>
- *
*/
#include <linux/module.h>
@@ -13,26 +12,31 @@
LIST_HEAD(pnp_cards);
static LIST_HEAD(pnp_card_drivers);
-
-static const struct pnp_card_device_id * match_card(struct pnp_card_driver * drv, struct pnp_card * card)
+static const struct pnp_card_device_id *match_card(struct pnp_card_driver *drv,
+ struct pnp_card *card)
{
- const struct pnp_card_device_id * drv_id = drv->id_table;
- while (*drv_id->id){
- if (compare_pnp_id(card->id,drv_id->id)) {
+ const struct pnp_card_device_id *drv_id = drv->id_table;
+
+ while (*drv_id->id) {
+ if (compare_pnp_id(card->id, drv_id->id)) {
int i = 0;
+
for (;;) {
int found;
struct pnp_dev *dev;
- if (i == PNP_MAX_DEVICES || ! *drv_id->devs[i].id)
+
+ if (i == PNP_MAX_DEVICES
+ || !*drv_id->devs[i].id)
return drv_id;
found = 0;
card_for_each_dev(card, dev) {
- if (compare_pnp_id(dev->id, drv_id->devs[i].id)) {
+ if (compare_pnp_id
+ (dev->id, drv_id->devs[i].id)) {
found = 1;
break;
}
}
- if (! found)
+ if (!found)
break;
i++;
}
@@ -42,14 +46,15 @@ static const struct pnp_card_device_id * match_card(struct pnp_card_driver * drv
return NULL;
}
-static void card_remove(struct pnp_dev * dev)
+static void card_remove(struct pnp_dev *dev)
{
dev->card_link = NULL;
}
-static void card_remove_first(struct pnp_dev * dev)
+static void card_remove_first(struct pnp_dev *dev)
{
- struct pnp_card_driver * drv = to_pnp_card_driver(dev->driver);
+ struct pnp_card_driver *drv = to_pnp_card_driver(dev->driver);
+
if (!dev->card || !drv)
return;
if (drv->remove)
@@ -67,7 +72,7 @@ static int card_probe(struct pnp_card *card, struct pnp_card_driver *drv)
if (!drv->probe)
return 0;
- id = match_card(drv,card);
+ id = match_card(drv, card);
if (!id)
return 0;
@@ -94,12 +99,11 @@ static int card_probe(struct pnp_card *card, struct pnp_card_driver *drv)
* pnp_add_card_id - adds an EISA id to the specified card
* @id: pointer to a pnp_id structure
* @card: pointer to the desired card
- *
*/
-
-int pnp_add_card_id(struct pnp_id *id, struct pnp_card * card)
+int pnp_add_card_id(struct pnp_id *id, struct pnp_card *card)
{
- struct pnp_id * ptr;
+ struct pnp_id *ptr;
+
if (!id)
return -EINVAL;
if (!card)
@@ -115,10 +119,11 @@ int pnp_add_card_id(struct pnp_id *id, struct pnp_card * card)
return 0;
}
-static void pnp_free_card_ids(struct pnp_card * card)
+static void pnp_free_card_ids(struct pnp_card *card)
{
- struct pnp_id * id;
+ struct pnp_id *id;
struct pnp_id *next;
+
if (!card)
return;
id = card->id;
@@ -131,49 +136,55 @@ static void pnp_free_card_ids(struct pnp_card * card)
static void pnp_release_card(struct device *dmdev)
{
- struct pnp_card * card = to_pnp_card(dmdev);
+ struct pnp_card *card = to_pnp_card(dmdev);
+
pnp_free_card_ids(card);
kfree(card);
}
-
-static ssize_t pnp_show_card_name(struct device *dmdev, struct device_attribute *attr, char *buf)
+static ssize_t pnp_show_card_name(struct device *dmdev,
+ struct device_attribute *attr, char *buf)
{
char *str = buf;
struct pnp_card *card = to_pnp_card(dmdev);
- str += sprintf(str,"%s\n", card->name);
+
+ str += sprintf(str, "%s\n", card->name);
return (str - buf);
}
-static DEVICE_ATTR(name,S_IRUGO,pnp_show_card_name,NULL);
+static DEVICE_ATTR(name, S_IRUGO, pnp_show_card_name, NULL);
-static ssize_t pnp_show_card_ids(struct device *dmdev, struct device_attribute *attr, char *buf)
+static ssize_t pnp_show_card_ids(struct device *dmdev,
+ struct device_attribute *attr, char *buf)
{
char *str = buf;
struct pnp_card *card = to_pnp_card(dmdev);
- struct pnp_id * pos = card->id;
+ struct pnp_id *pos = card->id;
while (pos) {
- str += sprintf(str,"%s\n", pos->id);
+ str += sprintf(str, "%s\n", pos->id);
pos = pos->next;
}
return (str - buf);
}
-static DEVICE_ATTR(card_id,S_IRUGO,pnp_show_card_ids,NULL);
+static DEVICE_ATTR(card_id, S_IRUGO, pnp_show_card_ids, NULL);
static int pnp_interface_attach_card(struct pnp_card *card)
{
- int rc = device_create_file(&card->dev,&dev_attr_name);
- if (rc) return rc;
+ int rc = device_create_file(&card->dev, &dev_attr_name);
- rc = device_create_file(&card->dev,&dev_attr_card_id);
- if (rc) goto err_name;
+ if (rc)
+ return rc;
+
+ rc = device_create_file(&card->dev, &dev_attr_card_id);
+ if (rc)
+ goto err_name;
return 0;
-err_name:
- device_remove_file(&card->dev,&dev_attr_name);
+ err_name:
+ device_remove_file(&card->dev, &dev_attr_name);
return rc;
}
@@ -181,15 +192,16 @@ err_name:
* pnp_add_card - adds a PnP card to the PnP Layer
* @card: pointer to the card to add
*/
-
-int pnp_add_card(struct pnp_card * card)
+int pnp_add_card(struct pnp_card *card)
{
int error;
- struct list_head * pos, * temp;
+ struct list_head *pos, *temp;
+
if (!card || !card->protocol)
return -EINVAL;
- sprintf(card->dev.bus_id, "%02x:%02x", card->protocol->number, card->number);
+ sprintf(card->dev.bus_id, "%02x:%02x", card->protocol->number,
+ card->number);
card->dev.parent = &card->protocol->dev;
card->dev.bus = NULL;
card->dev.release = &pnp_release_card;
@@ -205,18 +217,21 @@ int pnp_add_card(struct pnp_card * card)
/* we wait until now to add devices in order to ensure the drivers
* will be able to use all of the related devices on the card
* without waiting any unresonable length of time */
- list_for_each(pos,&card->devices){
+ list_for_each(pos, &card->devices) {
struct pnp_dev *dev = card_to_pnp_dev(pos);
__pnp_add_device(dev);
}
/* match with card drivers */
- list_for_each_safe(pos,temp,&pnp_card_drivers){
- struct pnp_card_driver * drv = list_entry(pos, struct pnp_card_driver, global_list);
- card_probe(card,drv);
+ list_for_each_safe(pos, temp, &pnp_card_drivers) {
+ struct pnp_card_driver *drv =
+ list_entry(pos, struct pnp_card_driver,
+ global_list);
+ card_probe(card, drv);
}
} else
- pnp_err("sysfs failure, card '%s' will be unavailable", card->dev.bus_id);
+ pnp_err("sysfs failure, card '%s' will be unavailable",
+ card->dev.bus_id);
return error;
}
@@ -224,10 +239,10 @@ int pnp_add_card(struct pnp_card * card)
* pnp_remove_card - removes a PnP card from the PnP Layer
* @card: pointer to the card to remove
*/
-
-void pnp_remove_card(struct pnp_card * card)
+void pnp_remove_card(struct pnp_card *card)
{
struct list_head *pos, *temp;
+
if (!card)
return;
device_unregister(&card->dev);
@@ -235,7 +250,7 @@ void pnp_remove_card(struct pnp_card * card)
list_del(&card->global_list);
list_del(&card->protocol_list);
spin_unlock(&pnp_lock);
- list_for_each_safe(pos,temp,&card->devices){
+ list_for_each_safe(pos, temp, &card->devices) {
struct pnp_dev *dev = card_to_pnp_dev(pos);
pnp_remove_card_device(dev);
}
@@ -246,15 +261,14 @@ void pnp_remove_card(struct pnp_card * card)
* @card: pointer to the card to add to
* @dev: pointer to the device to add
*/
-
-int pnp_add_card_device(struct pnp_card * card, struct pnp_dev * dev)
+int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev)
{
if (!card || !dev || !dev->protocol)
return -EINVAL;
dev->dev.parent = &card->dev;
dev->card_link = NULL;
- snprintf(dev->dev.bus_id, BUS_ID_SIZE, "%02x:%02x.%02x", dev->protocol->number,
- card->number,dev->number);
+ snprintf(dev->dev.bus_id, BUS_ID_SIZE, "%02x:%02x.%02x",
+ dev->protocol->number, card->number, dev->number);
spin_lock(&pnp_lock);
dev->card = card;
list_add_tail(&dev->card_list, &card->devices);
@@ -266,8 +280,7 @@ int pnp_add_card_device(struct pnp_card * card, struct pnp_dev * dev)
* pnp_remove_card_device- removes a device from the specified card
* @dev: pointer to the device to remove
*/
-
-void pnp_remove_card_device(struct pnp_dev * dev)
+void pnp_remove_card_device(struct pnp_dev *dev)
{
spin_lock(&pnp_lock);
dev->card = NULL;
@@ -282,13 +295,14 @@ void pnp_remove_card_device(struct pnp_dev * dev)
* @id: pointer to a PnP ID structure that explains the rules for finding the device
* @from: Starting place to search from. If NULL it will start from the begining.
*/
-
-struct pnp_dev * pnp_request_card_device(struct pnp_card_link *clink, const char * id, struct pnp_dev * from)
+struct pnp_dev *pnp_request_card_device(struct pnp_card_link *clink,
+ const char *id, struct pnp_dev *from)
{
- struct list_head * pos;
- struct pnp_dev * dev;
- struct pnp_card_driver * drv;
- struct pnp_card * card;
+ struct list_head *pos;
+ struct pnp_dev *dev;
+ struct pnp_card_driver *drv;
+ struct pnp_card *card;
+
if (!clink || !id)
goto done;
card = clink->card;
@@ -302,15 +316,15 @@ struct pnp_dev * pnp_request_card_device(struct pnp_card_link *clink, const char
}
while (pos != &card->devices) {
dev = card_to_pnp_dev(pos);
- if ((!dev->card_link) && compare_pnp_id(dev->id,id))
+ if ((!dev->card_link) && compare_pnp_id(dev->id, id))
goto found;
pos = pos->next;
}
-done:
+ done:
return NULL;
-found:
+ found:
dev->card_link = clink;
dev->dev.driver = &drv->link.driver;
if (pnp_bus_type.probe(&dev->dev))
@@ -320,7 +334,7 @@ found:
return dev;
-err_out:
+ err_out:
dev->dev.driver = NULL;
dev->card_link = NULL;
return NULL;
@@ -330,10 +344,10 @@ err_out:
* pnp_release_card_device - call this when the driver no longer needs the device
* @dev: pointer to the PnP device stucture
*/
-
-void pnp_release_card_device(struct pnp_dev * dev)
+void pnp_release_card_device(struct pnp_dev *dev)
{
- struct pnp_card_driver * drv = dev->card_link->driver;
+ struct pnp_card_driver *drv = dev->card_link->driver;
+
if (!drv)
return;
drv->link.remove = &card_remove;
@@ -347,6 +361,7 @@ void pnp_release_card_device(struct pnp_dev * dev)
static int card_suspend(struct pnp_dev *dev, pm_message_t state)
{
struct pnp_card_link *link = dev->card_link;
+
if (link->pm_state.event == state.event)
return 0;
link->pm_state = state;
@@ -356,6 +371,7 @@ static int card_suspend(struct pnp_dev *dev, pm_message_t state)
static int card_resume(struct pnp_dev *dev)
{
struct pnp_card_link *link = dev->card_link;
+
if (link->pm_state.event == PM_EVENT_ON)
return 0;
link->pm_state = PMSG_ON;
@@ -367,8 +383,7 @@ static int card_resume(struct pnp_dev *dev)
* pnp_register_card_driver - registers a PnP card driver with the PnP Layer
* @drv: pointer to the driver to register
*/
-
-int pnp_register_card_driver(struct pnp_card_driver * drv)
+int pnp_register_card_driver(struct pnp_card_driver *drv)
{
int error;
struct list_head *pos, *temp;
@@ -389,9 +404,10 @@ int pnp_register_card_driver(struct pnp_card_driver * drv)
list_add_tail(&drv->global_list, &pnp_card_drivers);
spin_unlock(&pnp_lock);
- list_for_each_safe(pos,temp,&pnp_cards){
- struct pnp_card *card = list_entry(pos, struct pnp_card, global_list);
- card_probe(card,drv);
+ list_for_each_safe(pos, temp, &pnp_cards) {
+ struct pnp_card *card =
+ list_entry(pos, struct pnp_card, global_list);
+ card_probe(card, drv);
}
return 0;
}
@@ -400,8 +416,7 @@ int pnp_register_card_driver(struct pnp_card_driver * drv)
* pnp_unregister_card_driver - unregisters a PnP card driver from the PnP Layer
* @drv: pointer to the driver to unregister
*/
-
-void pnp_unregister_card_driver(struct pnp_card_driver * drv)
+void pnp_unregister_card_driver(struct pnp_card_driver *drv)
{
spin_lock(&pnp_lock);
list_del(&drv->global_list);
@@ -409,13 +424,6 @@ void pnp_unregister_card_driver(struct pnp_card_driver * drv)
pnp_unregister_driver(&drv->link);
}
-#if 0
-EXPORT_SYMBOL(pnp_add_card);
-EXPORT_SYMBOL(pnp_remove_card);
-EXPORT_SYMBOL(pnp_add_card_device);
-EXPORT_SYMBOL(pnp_remove_card_device);
-EXPORT_SYMBOL(pnp_add_card_id);
-#endif /* 0 */
EXPORT_SYMBOL(pnp_request_card_device);
EXPORT_SYMBOL(pnp_release_card_device);
EXPORT_SYMBOL(pnp_register_card_driver);
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 8e7b2dd3881..61066fdb9e6 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -2,7 +2,6 @@
* core.c - contains all core device and protocol registration functions
*
* Copyright 2002 Adam Belay <ambx1@neo.rr.com>
- *
*/
#include <linux/pnp.h>
@@ -18,7 +17,6 @@
#include "base.h"
-
static LIST_HEAD(pnp_protocols);
LIST_HEAD(pnp_global);
DEFINE_SPINLOCK(pnp_lock);
@@ -36,7 +34,7 @@ void *pnp_alloc(long size)
void *result;
result = kzalloc(size, GFP_KERNEL);
- if (!result){
+ if (!result) {
printk(KERN_ERR "pnp: Out of Memory\n");
return NULL;
}
@@ -49,11 +47,10 @@ void *pnp_alloc(long size)
*
* Ex protocols: ISAPNP, PNPBIOS, etc
*/
-
int pnp_register_protocol(struct pnp_protocol *protocol)
{
int nodenum;
- struct list_head * pos;
+ struct list_head *pos;
if (!protocol)
return -EINVAL;
@@ -64,9 +61,9 @@ int pnp_register_protocol(struct pnp_protocol *protocol)
spin_lock(&pnp_lock);
/* assign the lowest unused number */
- list_for_each(pos,&pnp_protocols) {
- struct pnp_protocol * cur = to_pnp_protocol(pos);
- if (cur->number == nodenum){
+ list_for_each(pos, &pnp_protocols) {
+ struct pnp_protocol *cur = to_pnp_protocol(pos);
+ if (cur->number == nodenum) {
pos = &pnp_protocols;
nodenum++;
}
@@ -83,7 +80,6 @@ int pnp_register_protocol(struct pnp_protocol *protocol)
/**
* pnp_protocol_unregister - removes a pnp protocol from the pnp layer
* @protocol: pointer to the corresponding pnp_protocol structure
- *
*/
void pnp_unregister_protocol(struct pnp_protocol *protocol)
{
@@ -93,11 +89,11 @@ void pnp_unregister_protocol(struct pnp_protocol *protocol)
device_unregister(&protocol->dev);
}
-
static void pnp_free_ids(struct pnp_dev *dev)
{
- struct pnp_id * id;
- struct pnp_id * next;
+ struct pnp_id *id;
+ struct pnp_id *next;
+
if (!dev)
return;
id = dev->id;
@@ -110,7 +106,8 @@ static void pnp_free_ids(struct pnp_dev *dev)
static void pnp_release_device(struct device *dmdev)
{
- struct pnp_dev * dev = to_pnp_dev(dmdev);
+ struct pnp_dev *dev = to_pnp_dev(dmdev);
+
pnp_free_option(dev->independent);
pnp_free_option(dev->dependent);
pnp_free_ids(dev);
@@ -120,6 +117,7 @@ static void pnp_release_device(struct device *dmdev)
int __pnp_add_device(struct pnp_dev *dev)
{
int ret;
+
pnp_fixup_device(dev);
dev->dev.bus = &pnp_bus_type;
dev->dev.dma_mask = &dev->dma_mask;
@@ -143,13 +141,13 @@ int __pnp_add_device(struct pnp_dev *dev)
*
* adds to driver model, name database, fixups, interface, etc.
*/
-
int pnp_add_device(struct pnp_dev *dev)
{
if (!dev || !dev->protocol || dev->card)
return -EINVAL;
dev->dev.parent = &dev->protocol->dev;
- sprintf(dev->dev.bus_id, "%02x:%02x", dev->protocol->number, dev->number);
+ sprintf(dev->dev.bus_id, "%02x:%02x", dev->protocol->number,
+ dev->number);
return __pnp_add_device(dev);
}
@@ -162,21 +160,6 @@ void __pnp_remove_device(struct pnp_dev *dev)
device_unregister(&dev->dev);
}
-/**
- * pnp_remove_device - removes a pnp device from the pnp layer
- * @dev: pointer to dev to add
- *
- * this function will free all mem used by dev
- */
-#if 0
-void pnp_remove_device(struct pnp_dev *dev)
-{
- if (!dev || dev->card)
- return;
- __pnp_remove_device(dev);
-}
-#endif /* 0 */
-
static int __init pnp_init(void)
{
printk(KERN_INFO "Linux Plug and Play Support v0.97 (c) Adam Belay\n");
@@ -184,10 +167,3 @@ static int __init pnp_init(void)
}
subsys_initcall(pnp_init);
-
-#if 0
-EXPORT_SYMBOL(pnp_register_protocol);
-EXPORT_SYMBOL(pnp_unregister_protocol);
-EXPORT_SYMBOL(pnp_add_device);
-EXPORT_SYMBOL(pnp_remove_device);
-#endif /* 0 */
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index e161423b430..30b8f6f3258 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -2,7 +2,6 @@
* driver.c - device id matching, driver model, etc.
*
* Copyright 2002 Adam Belay <ambx1@neo.rr.com>
- *
*/
#include <linux/string.h>
@@ -16,12 +15,11 @@
static int compare_func(const char *ida, const char *idb)
{
int i;
+
/* we only need to compare the last 4 chars */
- for (i=3; i<7; i++)
- {
+ for (i = 3; i < 7; i++) {
if (ida[i] != 'X' &&
- idb[i] != 'X' &&
- toupper(ida[i]) != toupper(idb[i]))
+ idb[i] != 'X' && toupper(ida[i]) != toupper(idb[i]))
return 0;
}
return 1;
@@ -31,20 +29,22 @@ int compare_pnp_id(struct pnp_id *pos, const char *id)
{
if (!pos || !id || (strlen(id) != 7))
return 0;
- if (memcmp(id,"ANYDEVS",7)==0)
+ if (memcmp(id, "ANYDEVS", 7) == 0)
return 1;
- while (pos){
- if (memcmp(pos->id,id,3)==0)
- if (compare_func(pos->id,id)==1)
+ while (pos) {
+ if (memcmp(pos->id, id, 3) == 0)
+ if (compare_func(pos->id, id) == 1)
return 1;
pos = pos->next;
}
return 0;
}
-static const struct pnp_device_id * match_device(struct pnp_driver *drv, struct pnp_dev *dev)
+static const struct pnp_device_id *match_device(struct pnp_driver *drv,
+ struct pnp_dev *dev)
{
const struct pnp_device_id *drv_id = drv->id_table;
+
if (!drv_id)
return NULL;
@@ -59,7 +59,7 @@ static const struct pnp_device_id * match_device(struct pnp_driver *drv, struct
int pnp_device_attach(struct pnp_dev *pnp_dev)
{
spin_lock(&pnp_lock);
- if(pnp_dev->status != PNP_READY){
+ if (pnp_dev->status != PNP_READY) {
spin_unlock(&pnp_lock);
return -EBUSY;
}
@@ -86,7 +86,8 @@ static int pnp_device_probe(struct device *dev)
pnp_dev = to_pnp_dev(dev);
pnp_drv = to_pnp_driver(dev->driver);
- pnp_dbg("match found with the PnP device '%s' and the driver '%s'", dev->bus_id,pnp_drv->name);
+ pnp_dbg("match found with the PnP device '%s' and the driver '%s'",
+ dev->bus_id, pnp_drv->name);
error = pnp_device_attach(pnp_dev);
if (error < 0)
@@ -99,7 +100,7 @@ static int pnp_device_probe(struct device *dev)
return error;
}
} else if ((pnp_drv->flags & PNP_DRIVER_RES_DISABLE)
- == PNP_DRIVER_RES_DISABLE) {
+ == PNP_DRIVER_RES_DISABLE) {
error = pnp_disable_dev(pnp_dev);
if (error < 0)
return error;
@@ -110,22 +111,22 @@ static int pnp_device_probe(struct device *dev)
if (dev_id != NULL)
error = pnp_drv->probe(pnp_dev, dev_id);
}
- if (error >= 0){
+ if (error >= 0) {
pnp_dev->driver = pnp_drv;
error = 0;
} else
goto fail;
return error;
-fail:
+ fail:
pnp_device_detach(pnp_dev);
return error;
}
static int pnp_device_remove(struct device *dev)
{
- struct pnp_dev * pnp_dev = to_pnp_dev(dev);
- struct pnp_driver * drv = pnp_dev->driver;
+ struct pnp_dev *pnp_dev = to_pnp_dev(dev);
+ struct pnp_driver *drv = pnp_dev->driver;
if (drv) {
if (drv->remove)
@@ -138,8 +139,9 @@ static int pnp_device_remove(struct device *dev)
static int pnp_bus_match(struct device *dev, struct device_driver *drv)
{
- struct pnp_dev * pnp_dev = to_pnp_dev(dev);
- struct pnp_driver * pnp_drv = to_pnp_driver(drv);
+ struct pnp_dev *pnp_dev = to_pnp_dev(dev);
+ struct pnp_driver *pnp_drv = to_pnp_driver(drv);
+
if (match_device(pnp_drv, pnp_dev) == NULL)
return 0;
return 1;
@@ -147,8 +149,8 @@ static int pnp_bus_match(struct device *dev, struct device_driver *drv)
static int pnp_bus_suspend(struct device *dev, pm_message_t state)
{
- struct pnp_dev * pnp_dev = to_pnp_dev(dev);
- struct pnp_driver * pnp_drv = pnp_dev->driver;
+ struct pnp_dev *pnp_dev = to_pnp_dev(dev);
+ struct pnp_driver *pnp_drv = pnp_dev->driver;
int error;
if (!pnp_drv)
@@ -162,23 +164,28 @@ static int pnp_bus_suspend(struct device *dev, pm_message_t state)
if (!(pnp_drv->flags & PNP_DRIVER_RES_DO_NOT_CHANGE) &&
pnp_can_disable(pnp_dev)) {
- error = pnp_stop_dev(pnp_dev);
- if (error)
- return error;
+ error = pnp_stop_dev(pnp_dev);
+ if (error)
+ return error;
}
+ if (pnp_dev->protocol && pnp_dev->protocol->suspend)
+ pnp_dev->protocol->suspend(pnp_dev, state);
return 0;
}
static int pnp_bus_resume(struct device *dev)
{
- struct pnp_dev * pnp_dev = to_pnp_dev(dev);
- struct pnp_driver * pnp_drv = pnp_dev->driver;
+ struct pnp_dev *pnp_dev = to_pnp_dev(dev);
+ struct pnp_driver *pnp_drv = pnp_dev->driver;
int error;
if (!pnp_drv)
return 0;
+ if (pnp_dev->protocol && pnp_dev->protocol->resume)
+ pnp_dev->protocol->resume(pnp_dev);
+
if (!(pnp_drv->flags & PNP_DRIVER_RES_DO_NOT_CHANGE)) {
error = pnp_start_dev(pnp_dev);
if (error)
@@ -192,12 +199,12 @@ static int pnp_bus_resume(struct device *dev)
}
struct bus_type pnp_bus_type = {
- .name = "pnp",
- .match = pnp_bus_match,
- .probe = pnp_device_probe,
- .remove = pnp_device_remove,
+ .name = "pnp",
+ .match = pnp_bus_match,
+ .probe = pnp_device_probe,
+ .remove = pnp_device_remove,
.suspend = pnp_bus_suspend,
- .resume = pnp_bus_resume,
+ .resume = pnp_bus_resume,
};
int pnp_register_driver(struct pnp_driver *drv)
@@ -220,12 +227,11 @@ void pnp_unregister_driver(struct pnp_driver *drv)
* pnp_add_id - adds an EISA id to the specified device
* @id: pointer to a pnp_id structure
* @dev: pointer to the desired device
- *
*/
-
int pnp_add_id(struct pnp_id *id, struct pnp_dev *dev)
{
struct pnp_id *ptr;
+
if (!id)
return -EINVAL;
if (!dev)
@@ -243,8 +249,5 @@ int pnp_add_id(struct pnp_id *id, struct pnp_dev *dev)
EXPORT_SYMBOL(pnp_register_driver);
EXPORT_SYMBOL(pnp_unregister_driver);
-#if 0
-EXPORT_SYMBOL(pnp_add_id);
-#endif
EXPORT_SYMBOL(pnp_device_attach);
EXPORT_SYMBOL(pnp_device_detach);
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index ac9fcd499f3..fe6684e13e8 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -3,7 +3,6 @@
*
* Some code, especially possible resource dumping is based on isapnp_proc.c (c) Jaroslav Kysela <perex@suse.cz>
* Copyright 2002 Adam Belay <ambx1@neo.rr.com>
- *
*/
#include <linux/pnp.h>
@@ -29,7 +28,7 @@ struct pnp_info_buffer {
typedef struct pnp_info_buffer pnp_info_buffer_t;
-static int pnp_printf(pnp_info_buffer_t * buffer, char *fmt,...)
+static int pnp_printf(pnp_info_buffer_t * buffer, char *fmt, ...)
{
va_list args;
int res;
@@ -48,14 +47,18 @@ static int pnp_printf(pnp_info_buffer_t * buffer, char *fmt,...)
return res;
}
-static void pnp_print_port(pnp_info_buffer_t *buffer, char *space, struct pnp_port *port)
+static void pnp_print_port(pnp_info_buffer_t * buffer, char *space,
+ struct pnp_port *port)
{
- pnp_printf(buffer, "%sport 0x%x-0x%x, align 0x%x, size 0x%x, %i-bit address decoding\n",
- space, port->min, port->max, port->align ? (port->align-1) : 0, port->size,
- port->flags & PNP_PORT_FLAG_16BITADDR ? 16 : 10);
+ pnp_printf(buffer,
+ "%sport 0x%x-0x%x, align 0x%x, size 0x%x, %i-bit address decoding\n",
+ space, port->min, port->max,
+ port->align ? (port->align - 1) : 0, port->size,
+ port->flags & PNP_PORT_FLAG_16BITADDR ? 16 : 10);
}
-static void pnp_print_irq(pnp_info_buffer_t *buffer, char *space, struct pnp_irq *irq)
+static void pnp_print_irq(pnp_info_buffer_t * buffer, char *space,
+ struct pnp_irq *irq)
{
int first = 1, i;
@@ -85,14 +88,15 @@ static void pnp_print_irq(pnp_info_buffer_t *buffer, char *space, struct pnp_irq
pnp_printf(buffer, "\n");
}
-static void pnp_print_dma(pnp_info_buffer_t *buffer, char *space, struct pnp_dma *dma)
+static void pnp_print_dma(pnp_info_buffer_t * buffer, char *space,
+ struct pnp_dma *dma)
{
int first = 1, i;
char *s;
pnp_printf(buffer, "%sdma ", space);
for (i = 0; i < 8; i++)
- if (dma->map & (1<<i)) {
+ if (dma->map & (1 << i)) {
if (!first) {
pnp_printf(buffer, ",");
} else {
@@ -136,12 +140,13 @@ static void pnp_print_dma(pnp_info_buffer_t *buffer, char *space, struct pnp_dma
pnp_printf(buffer, " %s\n", s);
}
-static void pnp_print_mem(pnp_info_buffer_t *buffer, char *space, struct pnp_mem *mem)
+static void pnp_print_mem(pnp_info_buffer_t * buffer, char *space,
+ struct pnp_mem *mem)
{
char *s;
pnp_printf(buffer, "%sMemory 0x%x-0x%x, align 0x%x, size 0x%x",
- space, mem->min, mem->max, mem->align, mem->size);
+ space, mem->min, mem->max, mem->align, mem->size);
if (mem->flags & IORESOURCE_MEM_WRITEABLE)
pnp_printf(buffer, ", writeable");
if (mem->flags & IORESOURCE_MEM_CACHEABLE)
@@ -168,7 +173,7 @@ static void pnp_print_mem(pnp_info_buffer_t *buffer, char *space, struct pnp_mem
pnp_printf(buffer, ", %s\n", s);
}
-static void pnp_print_option(pnp_info_buffer_t *buffer, char *space,
+static void pnp_print_option(pnp_info_buffer_t * buffer, char *space,
struct pnp_option *option, int dep)
{
char *s;
@@ -179,19 +184,19 @@ static void pnp_print_option(pnp_info_buffer_t *buffer, char *space,
if (dep) {
switch (option->priority) {
- case PNP_RES_PRIORITY_PREFERRED:
+ case PNP_RES_PRIORITY_PREFERRED:
s = "preferred";
break;
- case PNP_RES_PRIORITY_ACCEPTABLE:
+ case PNP_RES_PRIORITY_ACCEPTABLE:
s = "acceptable";
break;
- case PNP_RES_PRIORITY_FUNCTIONAL:
+ case PNP_RES_PRIORITY_FUNCTIONAL:
s = "functional";
break;
- default:
+ default:
s = "invalid";
}
- pnp_printf(buffer, "Dependent: %02i - Priority %s\n",dep, s);
+ pnp_printf(buffer, "Dependent: %02i - Priority %s\n", dep, s);
}
for (port = option->port; port; port = port->next)
@@ -204,16 +209,16 @@ static void pnp_print_option(pnp_info_buffer_t *buffer, char *space,
pnp_print_mem(buffer, space, mem);
}
-
-static ssize_t pnp_show_options(struct device *dmdev, struct device_attribute *attr, char *buf)
+static ssize_t pnp_show_options(struct device *dmdev,
+ struct device_attribute *attr, char *buf)
{
struct pnp_dev *dev = to_pnp_dev(dmdev);
- struct pnp_option * independent = dev->independent;
- struct pnp_option * dependent = dev->dependent;
+ struct pnp_option *independent = dev->independent;
+ struct pnp_option *dependent = dev->dependent;
int ret, dep = 1;
pnp_info_buffer_t *buffer = (pnp_info_buffer_t *)
- pnp_alloc(sizeof(pnp_info_buffer_t));
+ pnp_alloc(sizeof(pnp_info_buffer_t));
if (!buffer)
return -ENOMEM;
@@ -223,7 +228,7 @@ static ssize_t pnp_show_options(struct device *dmdev, struct device_attribute *a
if (independent)
pnp_print_option(buffer, "", independent, 0);
- while (dependent){
+ while (dependent) {
pnp_print_option(buffer, " ", dependent, dep);
dependent = dependent->next;
dep++;
@@ -233,10 +238,11 @@ static ssize_t pnp_show_options(struct device *dmdev, struct device_attribute *a
return ret;
}
-static DEVICE_ATTR(options,S_IRUGO,pnp_show_options,NULL);
+static DEVICE_ATTR(options, S_IRUGO, pnp_show_options, NULL);
-
-static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_attribute *attr, char *buf)
+static ssize_t pnp_show_current_resources(struct device *dmdev,
+ struct device_attribute *attr,
+ char *buf)
{
struct pnp_dev *dev = to_pnp_dev(dmdev);
int i, ret;
@@ -252,52 +258,56 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at
buffer->buffer = buf;
buffer->curr = buffer->buffer;
- pnp_printf(buffer,"state = ");
+ pnp_printf(buffer, "state = ");
if (dev->active)
- pnp_printf(buffer,"active\n");
+ pnp_printf(buffer, "active\n");
else
- pnp_printf(buffer,"disabled\n");
+ pnp_printf(buffer, "disabled\n");
for (i = 0; i < PNP_MAX_PORT; i++) {
if (pnp_port_valid(dev, i)) {
- pnp_printf(buffer,"io");
+ pnp_printf(buffer, "io");
if (pnp_port_flags(dev, i) & IORESOURCE_DISABLED)
- pnp_printf(buffer," disabled\n");
+ pnp_printf(buffer, " disabled\n");
else
- pnp_printf(buffer," 0x%llx-0x%llx\n",
- (unsigned long long)pnp_port_start(dev, i),
- (unsigned long long)pnp_port_end(dev, i));
+ pnp_printf(buffer, " 0x%llx-0x%llx\n",
+ (unsigned long long)
+ pnp_port_start(dev, i),
+ (unsigned long long)pnp_port_end(dev,
+ i));
}
}
for (i = 0; i < PNP_MAX_MEM; i++) {
if (pnp_mem_valid(dev, i)) {
- pnp_printf(buffer,"mem");
+ pnp_printf(buffer, "mem");
if (pnp_mem_flags(dev, i) & IORESOURCE_DISABLED)
- pnp_printf(buffer," disabled\n");
+ pnp_printf(buffer, " disabled\n");
else
- pnp_printf(buffer," 0x%llx-0x%llx\n",
- (unsigned long long)pnp_mem_start(dev, i),
- (unsigned long long)pnp_mem_end(dev, i));
+ pnp_printf(buffer, " 0x%llx-0x%llx\n",
+ (unsigned long long)
+ pnp_mem_start(dev, i),
+ (unsigned long long)pnp_mem_end(dev,
+ i));
}
}
for (i = 0; i < PNP_MAX_IRQ; i++) {
if (pnp_irq_valid(dev, i)) {
- pnp_printf(buffer,"irq");
+ pnp_printf(buffer, "irq");
if (pnp_irq_flags(dev, i) & IORESOURCE_DISABLED)
- pnp_printf(buffer," disabled\n");
+ pnp_printf(buffer, " disabled\n");
else
- pnp_printf(buffer," %lld\n",
- (unsigned long long)pnp_irq(dev, i));
+ pnp_printf(buffer, " %lld\n",
+ (unsigned long long)pnp_irq(dev, i));
}
}
for (i = 0; i < PNP_MAX_DMA; i++) {
if (pnp_dma_valid(dev, i)) {
- pnp_printf(buffer,"dma");
+ pnp_printf(buffer, "dma");
if (pnp_dma_flags(dev, i) & IORESOURCE_DISABLED)
- pnp_printf(buffer," disabled\n");
+ pnp_printf(buffer, " disabled\n");
else
- pnp_printf(buffer," %lld\n",
- (unsigned long long)pnp_dma(dev, i));
+ pnp_printf(buffer, " %lld\n",
+ (unsigned long long)pnp_dma(dev, i));
}
}
ret = (buffer->curr - buf);
@@ -308,55 +318,57 @@ static ssize_t pnp_show_current_resources(struct device *dmdev, struct device_at
extern struct semaphore pnp_res_mutex;
static ssize_t
-pnp_set_current_resources(struct device * dmdev, struct device_attribute *attr, const char * ubuf, size_t count)
+pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
+ const char *ubuf, size_t count)
{
struct pnp_dev *dev = to_pnp_dev(dmdev);
- char *buf = (void *)ubuf;
- int retval = 0;
+ char *buf = (void *)ubuf;
+ int retval = 0;
if (dev->status & PNP_ATTACHED) {
retval = -EBUSY;
- pnp_info("Device %s cannot be configured because it is in use.", dev->dev.bus_id);
+ pnp_info("Device %s cannot be configured because it is in use.",
+ dev->dev.bus_id);
goto done;
}
while (isspace(*buf))
++buf;
- if (!strnicmp(buf,"disable",7)) {
+ if (!strnicmp(buf, "disable", 7)) {
retval = pnp_disable_dev(dev);
goto done;
}
- if (!strnicmp(buf,"activate",8)) {
+ if (!strnicmp(buf, "activate", 8)) {
retval = pnp_activate_dev(dev);
goto done;
}
- if (!strnicmp(buf,"fill",4)) {
+ if (!strnicmp(buf, "fill", 4)) {
if (dev->active)
goto done;
retval = pnp_auto_config_dev(dev);
goto done;
}
- if (!strnicmp(buf,"auto",4)) {
+ if (!strnicmp(buf, "auto", 4)) {
if (dev->active)
goto done;
pnp_init_resource_table(&dev->res);
retval = pnp_auto_config_dev(dev);
goto done;
}
- if (!strnicmp(buf,"clear",5)) {
+ if (!strnicmp(buf, "clear", 5)) {
if (dev->active)
goto done;
pnp_init_resource_table(&dev->res);
goto done;
}
- if (!strnicmp(buf,"get",3)) {
+ if (!strnicmp(buf, "get", 3)) {
down(&pnp_res_mutex);
if (pnp_can_read(dev))
dev->protocol->get(dev, &dev->res);
up(&pnp_res_mutex);
goto done;
}
- if (!strnicmp(buf,"set",3)) {
+ if (!strnicmp(buf, "set", 3)) {
int nport = 0, nmem = 0, nirq = 0, ndma = 0;
if (dev->active)
goto done;
@@ -366,65 +378,77 @@ pnp_set_current_resources(struct device * dmdev, struct device_attribute *attr,
while (1) {
while (isspace(*buf))
++buf;
- if (!strnicmp(buf,"io",2)) {
+ if (!strnicmp(buf, "io", 2)) {
buf += 2;
while (isspace(*buf))
++buf;
- dev->res.port_resource[nport].start = simple_strtoul(buf,&buf,0);
+ dev->res.port_resource[nport].start =
+ simple_strtoul(buf, &buf, 0);
while (isspace(*buf))
++buf;
- if(*buf == '-') {
+ if (*buf == '-') {
buf += 1;
while (isspace(*buf))
++buf;
- dev->res.port_resource[nport].end = simple_strtoul(buf,&buf,0);
+ dev->res.port_resource[nport].end =
+ simple_strtoul(buf, &buf, 0);
} else
- dev->res.port_resource[nport].end = dev->res.port_resource[nport].start;
- dev->res.port_resource[nport].flags = IORESOURCE_IO;
+ dev->res.port_resource[nport].end =
+ dev->res.port_resource[nport].start;
+ dev->res.port_resource[nport].flags =
+ IORESOURCE_IO;
nport++;
if (nport >= PNP_MAX_PORT)
break;
continue;
}
- if (!strnicmp(buf,"mem",3)) {
+ if (!strnicmp(buf, "mem", 3)) {
buf += 3;
while (isspace(*buf))
++buf;
- dev->res.mem_resource[nmem].start = simple_strtoul(buf,&buf,0);
+ dev->res.mem_resource[nmem].start =
+ simple_strtoul(buf, &buf, 0);
while (isspace(*buf))
++buf;
- if(*buf == '-') {
+ if (*buf == '-') {
buf += 1;
while (isspace(*buf))
++buf;
- dev->res.mem_resource[nmem].end = simple_strtoul(buf,&buf,0);
+ dev->res.mem_resource[nmem].end =
+ simple_strtoul(buf, &buf, 0);
} else
- dev->res.mem_resource[nmem].end = dev->res.mem_resource[nmem].start;
- dev->res.mem_resource[nmem].flags = IORESOURCE_MEM;
+ dev->res.mem_resource[nmem].end =
+ dev->res.mem_resource[nmem].start;
+ dev->res.mem_resource[nmem].flags =
+ IORESOURCE_MEM;
nmem++;
if (nmem >= PNP_MAX_MEM)
break;
continue;
}
- if (!strnicmp(buf,"irq",3)) {
+ if (!strnicmp(buf, "irq", 3)) {
buf += 3;
while (isspace(*buf))
++buf;
dev->res.irq_resource[nirq].start =
- dev->res.irq_resource[nirq].end = simple_strtoul(buf,&buf,0);
- dev->res.irq_resource[nirq].flags = IORESOURCE_IRQ;
+ dev->res.irq_resource[nirq].end =
+ simple_strtoul(buf, &buf, 0);
+ dev->res.irq_resource[nirq].flags =
+ IORESOURCE_IRQ;
nirq++;
if (nirq >= PNP_MAX_IRQ)
break;
continue;
}
- if (!strnicmp(buf,"dma",3)) {
+ if (!strnicmp(buf, "dma", 3)) {
buf += 3;
while (isspace(*buf))
++buf;
dev->res.dma_resource[ndma].start =
- dev->res.dma_resource[ndma].end = simple_strtoul(buf,&buf,0);
- dev->res.dma_resource[ndma].flags = IORESOURCE_DMA;
+ dev->res.dma_resource[ndma].end =
+ simple_strtoul(buf, &buf, 0);
+ dev->res.dma_resource[ndma].flags =
+ IORESOURCE_DMA;
ndma++;
if (ndma >= PNP_MAX_DMA)
break;
@@ -435,45 +459,50 @@ pnp_set_current_resources(struct device * dmdev, struct device_attribute *attr,
up(&pnp_res_mutex);
goto done;
}
- done:
+ done:
if (retval < 0)
return retval;
return count;
}
-static DEVICE_ATTR(resources,S_IRUGO | S_IWUSR,
- pnp_show_current_resources,pnp_set_current_resources);
+static DEVICE_ATTR(resources, S_IRUGO | S_IWUSR,
+ pnp_show_current_resources, pnp_set_current_resources);
-static ssize_t pnp_show_current_ids(struct device *dmdev, struct device_attribute *attr, char *buf)
+static ssize_t pnp_show_current_ids(struct device *dmdev,
+ struct device_attribute *attr, char *buf)
{
char *str = buf;
struct pnp_dev *dev = to_pnp_dev(dmdev);
- struct pnp_id * pos = dev->id;
+ struct pnp_id *pos = dev->id;
while (pos) {
- str += sprintf(str,"%s\n", pos->id);
+ str += sprintf(str, "%s\n", pos->id);
pos = pos->next;
}
return (str - buf);
}
-static DEVICE_ATTR(id,S_IRUGO,pnp_show_current_ids,NULL);
+static DEVICE_ATTR(id, S_IRUGO, pnp_show_current_ids, NULL);
int pnp_interface_attach_device(struct pnp_dev *dev)
{
- int rc = device_create_file(&dev->dev,&dev_attr_options);
- if (rc) goto err;
- rc = device_create_file(&dev->dev,&dev_attr_resources);
- if (rc) goto err_opt;
- rc = device_create_file(&dev->dev,&dev_attr_id);
- if (rc) goto err_res;
+ int rc = device_create_file(&dev->dev, &dev_attr_options);
+
+ if (rc)
+ goto err;
+ rc = device_create_file(&dev->dev, &dev_attr_resources);
+ if (rc)
+ goto err_opt;
+ rc = device_create_file(&dev->dev, &dev_attr_id);
+ if (rc)
+ goto err_res;
return 0;
-err_res:
- device_remove_file(&dev->dev,&dev_attr_resources);
-err_opt:
- device_remove_file(&dev->dev,&dev_attr_options);
-err:
+ err_res:
+ device_remove_file(&dev->dev, &dev_attr_resources);
+ err_opt:
+ device_remove_file(&dev->dev, &dev_attr_options);
+ err:
return rc;
}
diff --git a/drivers/pnp/isapnp/compat.c b/drivers/pnp/isapnp/compat.c
index 0697ab88a9a..10bdcc4d4f7 100644
--- a/drivers/pnp/isapnp/compat.c
+++ b/drivers/pnp/isapnp/compat.c
@@ -3,34 +3,30 @@
* the old isapnp APIs. If possible use the new APIs instead.
*
* Copyright 2002 Adam Belay <ambx1@neo.rr.com>
- *
*/
-
-/* TODO: see if more isapnp functions are needed here */
#include <linux/module.h>
#include <linux/isapnp.h>
#include <linux/string.h>
-static void pnp_convert_id(char *buf, unsigned short vendor, unsigned short device)
+static void pnp_convert_id(char *buf, unsigned short vendor,
+ unsigned short device)
{
sprintf(buf, "%c%c%c%x%x%x%x",
- 'A' + ((vendor >> 2) & 0x3f) - 1,
- 'A' + (((vendor & 3) << 3) | ((vendor >> 13) & 7)) - 1,
- 'A' + ((vendor >> 8) & 0x1f) - 1,
- (device >> 4) & 0x0f,
- device & 0x0f,
- (device >> 12) & 0x0f,
- (device >> 8) & 0x0f);
+ 'A' + ((vendor >> 2) & 0x3f) - 1,
+ 'A' + (((vendor & 3) << 3) | ((vendor >> 13) & 7)) - 1,
+ 'A' + ((vendor >> 8) & 0x1f) - 1,
+ (device >> 4) & 0x0f, device & 0x0f,
+ (device >> 12) & 0x0f, (device >> 8) & 0x0f);
}
-struct pnp_card *pnp_find_card(unsigned short vendor,
- unsigned short device,
+struct pnp_card *pnp_find_card(unsigned short vendor, unsigned short device,
struct pnp_card *from)
{
char id[8];
char any[8];
struct list_head *list;
+
pnp_convert_id(id, vendor, device);
pnp_convert_id(any, ISAPNP_ANY_ID, ISAPNP_ANY_ID);
@@ -38,20 +34,20 @@ struct pnp_card *pnp_find_card(unsigned short vendor,
while (list != &pnp_cards) {
struct pnp_card *card = global_to_pnp_card(list);
- if (compare_pnp_id(card->id,id) || (memcmp(id,any,7)==0))
+
+ if (compare_pnp_id(card->id, id) || (memcmp(id, any, 7) == 0))
return card;
list = list->next;
}
return NULL;
}
-struct pnp_dev *pnp_find_dev(struct pnp_card *card,
- unsigned short vendor,
- unsigned short function,
- struct pnp_dev *from)
+struct pnp_dev *pnp_find_dev(struct pnp_card *card, unsigned short vendor,
+ unsigned short function, struct pnp_dev *from)
{
char id[8];
char any[8];
+
pnp_convert_id(id, vendor, function);
pnp_convert_id(any, ISAPNP_ANY_ID, ISAPNP_ANY_ID);
if (card == NULL) { /* look for a logical device from all cards */
@@ -63,7 +59,9 @@ struct pnp_dev *pnp_find_dev(struct pnp_card *card,
while (list != &pnp_global) {
struct pnp_dev *dev = global_to_pnp_dev(list);
- if (compare_pnp_id(dev->id,id) || (memcmp(id,any,7)==0))
+
+ if (compare_pnp_id(dev->id, id) ||
+ (memcmp(id, any, 7) == 0))
return dev;
list = list->next;
}
@@ -78,7 +76,8 @@ struct pnp_dev *pnp_find_dev(struct pnp_card *card,
}
while (list != &card->devices) {
struct pnp_dev *dev = card_to_pnp_dev(list);
- if (compare_pnp_id(dev->id,id))
+
+ if (compare_pnp_id(dev->id, id))
return dev;
list = list->next;
}
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index 914d00c423a..b4e2aa995b5 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -51,10 +51,10 @@
#define ISAPNP_DEBUG
#endif
-int isapnp_disable; /* Disable ISA PnP */
-static int isapnp_rdp; /* Read Data Port */
-static int isapnp_reset = 1; /* reset all PnP cards (deactivate) */
-static int isapnp_verbose = 1; /* verbose mode */
+int isapnp_disable; /* Disable ISA PnP */
+static int isapnp_rdp; /* Read Data Port */
+static int isapnp_reset = 1; /* reset all PnP cards (deactivate) */
+static int isapnp_verbose = 1; /* verbose mode */
MODULE_AUTHOR("Jaroslav Kysela <perex@suse.cz>");
MODULE_DESCRIPTION("Generic ISA Plug & Play support");
@@ -126,7 +126,7 @@ static unsigned short isapnp_read_word(unsigned char idx)
unsigned short val;
val = isapnp_read_byte(idx);
- val = (val << 8) + isapnp_read_byte(idx+1);
+ val = (val << 8) + isapnp_read_byte(idx + 1);
return val;
}
@@ -139,7 +139,7 @@ void isapnp_write_byte(unsigned char idx, unsigned char val)
static void isapnp_write_word(unsigned char idx, unsigned short val)
{
isapnp_write_byte(idx, val >> 8);
- isapnp_write_byte(idx+1, val);
+ isapnp_write_byte(idx + 1, val);
}
static void isapnp_key(void)
@@ -193,7 +193,7 @@ static void isapnp_deactivate(unsigned char logdev)
static void __init isapnp_peek(unsigned char *data, int bytes)
{
int i, j;
- unsigned char d=0;
+ unsigned char d = 0;
for (i = 1; i <= bytes; i++) {
for (j = 0; j < 20; j++) {
@@ -220,19 +220,18 @@ static int isapnp_next_rdp(void)
{
int rdp = isapnp_rdp;
static int old_rdp = 0;
-
- if(old_rdp)
- {
+
+ if (old_rdp) {
release_region(old_rdp, 1);
old_rdp = 0;
}
while (rdp <= 0x3ff) {
/*
- * We cannot use NE2000 probe spaces for ISAPnP or we
- * will lock up machines.
+ * We cannot use NE2000 probe spaces for ISAPnP or we
+ * will lock up machines.
*/
- if ((rdp < 0x280 || rdp > 0x380) && request_region(rdp, 1, "ISAPnP"))
- {
+ if ((rdp < 0x280 || rdp > 0x380)
+ && request_region(rdp, 1, "ISAPnP")) {
isapnp_rdp = rdp;
old_rdp = rdp;
return 0;
@@ -253,7 +252,6 @@ static inline void isapnp_set_rdp(void)
* Perform an isolation. The port selection code now tries to avoid
* "dangerous to read" ports.
*/
-
static int __init isapnp_isolate_rdp_select(void)
{
isapnp_wait();
@@ -282,7 +280,6 @@ static int __init isapnp_isolate_rdp_select(void)
/*
* Isolate (assign uniqued CSN) to all ISA PnP devices.
*/
-
static int __init isapnp_isolate(void)
{
unsigned char checksum = 0x6a;
@@ -305,7 +302,9 @@ static int __init isapnp_isolate(void)
udelay(250);
if (data == 0x55aa)
bit = 0x01;
- checksum = ((((checksum ^ (checksum >> 1)) & 0x01) ^ bit) << 7) | (checksum >> 1);
+ checksum =
+ ((((checksum ^ (checksum >> 1)) & 0x01) ^ bit) << 7)
+ | (checksum >> 1);
bit = 0x00;
}
for (i = 65; i <= 72; i++) {
@@ -351,13 +350,12 @@ static int __init isapnp_isolate(void)
/*
* Read one tag from stream.
*/
-
static int __init isapnp_read_tag(unsigned char *type, unsigned short *size)
{
unsigned char tag, tmp[2];
isapnp_peek(&tag, 1);
- if (tag == 0) /* invalid tag */
+ if (tag == 0) /* invalid tag */
return -1;
if (tag & 0x80) { /* large item */
*type = tag;
@@ -368,7 +366,8 @@ static int __init isapnp_read_tag(unsigned char *type, unsigned short *size)
*size = tag & 0x07;
}
#if 0
- printk(KERN_DEBUG "tag = 0x%x, type = 0x%x, size = %i\n", tag, *type, *size);
+ printk(KERN_DEBUG "tag = 0x%x, type = 0x%x, size = %i\n", tag, *type,
+ *size);
#endif
if (*type == 0xff && *size == 0xffff) /* probably invalid data */
return -1;
@@ -378,7 +377,6 @@ static int __init isapnp_read_tag(unsigned char *type, unsigned short *size)
/*
* Skip specified number of bytes from stream.
*/
-
static void __init isapnp_skip_bytes(int count)
{
isapnp_peek(NULL, count);
@@ -387,31 +385,30 @@ static void __init isapnp_skip_bytes(int count)
/*
* Parse EISA id.
*/
-
-static void isapnp_parse_id(struct pnp_dev * dev, unsigned short vendor, unsigned short device)
+static void isapnp_parse_id(struct pnp_dev *dev, unsigned short vendor,
+ unsigned short device)
{
- struct pnp_id * id;
+ struct pnp_id *id;
+
if (!dev)
return;
id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
if (!id)
return;
sprintf(id->id, "%c%c%c%x%x%x%x",
- 'A' + ((vendor >> 2) & 0x3f) - 1,
- 'A' + (((vendor & 3) << 3) | ((vendor >> 13) & 7)) - 1,
- 'A' + ((vendor >> 8) & 0x1f) - 1,
- (device >> 4) & 0x0f,
- device & 0x0f,
- (device >> 12) & 0x0f,
- (device >> 8) & 0x0f);
+ 'A' + ((vendor >> 2) & 0x3f) - 1,
+ 'A' + (((vendor & 3) << 3) | ((vendor >> 13) & 7)) - 1,
+ 'A' + ((vendor >> 8) & 0x1f) - 1,
+ (device >> 4) & 0x0f,
+ device & 0x0f, (device >> 12) & 0x0f, (device >> 8) & 0x0f);
pnp_add_id(id, dev);
}
/*
* Parse logical device tag.
*/
-
-static struct pnp_dev * __init isapnp_parse_device(struct pnp_card *card, int size, int number)
+static struct pnp_dev *__init isapnp_parse_device(struct pnp_card *card,
+ int size, int number)
{
unsigned char tmp[6];
struct pnp_dev *dev;
@@ -435,13 +432,11 @@ static struct pnp_dev * __init isapnp_parse_device(struct pnp_card *card, int si
return dev;
}
-
/*
* Add IRQ resource to resources list.
*/
-
static void __init isapnp_parse_irq_resource(struct pnp_option *option,
- int size)
+ int size)
{
unsigned char tmp[3];
struct pnp_irq *irq;
@@ -458,15 +453,13 @@ static void __init isapnp_parse_irq_resource(struct pnp_option *option,
else
irq->flags = IORESOURCE_IRQ_HIGHEDGE;
pnp_register_irq_resource(option, irq);
- return;
}
/*
* Add DMA resource to resources list.
*/
-
static void __init isapnp_parse_dma_resource(struct pnp_option *option,
- int size)
+ int size)
{
unsigned char tmp[2];
struct pnp_dma *dma;
@@ -478,15 +471,13 @@ static void __init isapnp_parse_dma_resource(struct pnp_option *option,
dma->map = tmp[0];
dma->flags = tmp[1];
pnp_register_dma_resource(option, dma);
- return;
}
/*
* Add port resource to resources list.
*/
-
static void __init isapnp_parse_port_resource(struct pnp_option *option,
- int size)
+ int size)
{
unsigned char tmp[7];
struct pnp_port *port;
@@ -500,16 +491,14 @@ static void __init isapnp_parse_port_resource(struct pnp_option *option,
port->align = tmp[5];
port->size = tmp[6];
port->flags = tmp[0] ? PNP_PORT_FLAG_16BITADDR : 0;
- pnp_register_port_resource(option,port);
- return;
+ pnp_register_port_resource(option, port);
}
/*
* Add fixed port resource to resources list.
*/
-
static void __init isapnp_parse_fixed_port_resource(struct pnp_option *option,
- int size)
+ int size)
{
unsigned char tmp[3];
struct pnp_port *port;
@@ -522,16 +511,14 @@ static void __init isapnp_parse_fixed_port_resource(struct pnp_option *option,
port->size = tmp[2];
port->align = 0;
port->flags = PNP_PORT_FLAG_FIXED;
- pnp_register_port_resource(option,port);
- return;
+ pnp_register_port_resource(option, port);
}
/*
* Add memory resource to resources list.
*/
-
static void __init isapnp_parse_mem_resource(struct pnp_option *option,
- int size)
+ int size)
{
unsigned char tmp[9];
struct pnp_mem *mem;
@@ -545,16 +532,14 @@ static void __init isapnp_parse_mem_resource(struct pnp_option *option,
mem->align = (tmp[6] << 8) | tmp[5];
mem->size = ((tmp[8] << 8) | tmp[7]) << 8;
mem->flags = tmp[0];
- pnp_register_mem_resource(option,mem);
- return;
+ pnp_register_mem_resource(option, mem);
}
/*
* Add 32-bit memory resource to resources list.
*/
-
static void __init isapnp_parse_mem32_resource(struct pnp_option *option,
- int size)
+ int size)
{
unsigned char tmp[17];
struct pnp_mem *mem;
@@ -565,18 +550,19 @@ static void __init isapnp_parse_mem32_resource(struct pnp_option *option,
return;
mem->min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1];
mem->max = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5];
- mem->align = (tmp[12] << 24) | (tmp[11] << 16) | (tmp[10] << 8) | tmp[9];
- mem->size = (tmp[16] << 24) | (tmp[15] << 16) | (tmp[14] << 8) | tmp[13];
+ mem->align =
+ (tmp[12] << 24) | (tmp[11] << 16) | (tmp[10] << 8) | tmp[9];
+ mem->size =
+ (tmp[16] << 24) | (tmp[15] << 16) | (tmp[14] << 8) | tmp[13];
mem->flags = tmp[0];
- pnp_register_mem_resource(option,mem);
+ pnp_register_mem_resource(option, mem);
}
/*
* Add 32-bit fixed memory resource to resources list.
*/
-
static void __init isapnp_parse_fixed_mem32_resource(struct pnp_option *option,
- int size)
+ int size)
{
unsigned char tmp[9];
struct pnp_mem *mem;
@@ -585,28 +571,29 @@ static void __init isapnp_parse_fixed_mem32_resource(struct pnp_option *option,
mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
if (!mem)
return;
- mem->min = mem->max = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1];
+ mem->min = mem->max =
+ (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1];
mem->size = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5];
mem->align = 0;
mem->flags = tmp[0];
- pnp_register_mem_resource(option,mem);
+ pnp_register_mem_resource(option, mem);
}
/*
* Parse card name for ISA PnP device.
- */
-
+ */
static void __init
isapnp_parse_name(char *name, unsigned int name_max, unsigned short *size)
{
if (name[0] == '\0') {
- unsigned short size1 = *size >= name_max ? (name_max - 1) : *size;
+ unsigned short size1 =
+ *size >= name_max ? (name_max - 1) : *size;
isapnp_peek(name, size1);
name[size1] = '\0';
*size -= size1;
/* clean whitespace from end of string */
- while (size1 > 0 && name[--size1] == ' ')
+ while (size1 > 0 && name[--size1] == ' ')
name[size1] = '\0';
}
}
@@ -614,7 +601,6 @@ isapnp_parse_name(char *name, unsigned int name_max, unsigned short *size)
/*
* Parse resource map for logical device.
*/
-
static int __init isapnp_create_device(struct pnp_card *card,
unsigned short size)
{
@@ -622,6 +608,7 @@ static int __init isapnp_create_device(struct pnp_card *card,
unsigned char type, tmp[17];
struct pnp_option *option;
struct pnp_dev *dev;
+
if ((dev = isapnp_parse_device(card, size, number++)) == NULL)
return 1;
option = pnp_register_independent_option(dev);
@@ -629,17 +616,19 @@ static int __init isapnp_create_device(struct pnp_card *card,
kfree(dev);
return 1;
}
- pnp_add_card_device(card,dev);
+ pnp_add_card_device(card, dev);
while (1) {
- if (isapnp_read_tag(&type, &size)<0)
+ if (isapnp_read_tag(&type, &size) < 0)
return 1;
if (skip && type != _STAG_LOGDEVID && type != _STAG_END)
goto __skip;
switch (type) {
case _STAG_LOGDEVID:
if (size >= 5 && size <= 6) {
- if ((dev = isapnp_parse_device(card, size, number++)) == NULL)
+ if ((dev =
+ isapnp_parse_device(card, size,
+ number++)) == NULL)
return 1;
size = 0;
skip = 0;
@@ -648,7 +637,7 @@ static int __init isapnp_create_device(struct pnp_card *card,
kfree(dev);
return 1;
}
- pnp_add_card_device(card,dev);
+ pnp_add_card_device(card, dev);
} else {
skip = 1;
}
@@ -658,7 +647,8 @@ static int __init isapnp_create_device(struct pnp_card *card,
case _STAG_COMPATDEVID:
if (size == 4 && compat < DEVICE_COUNT_COMPATIBLE) {
isapnp_peek(tmp, 4);
- isapnp_parse_id(dev,(tmp[1] << 8) | tmp[0], (tmp[3] << 8) | tmp[2]);
+ isapnp_parse_id(dev, (tmp[1] << 8) | tmp[0],
+ (tmp[3] << 8) | tmp[2]);
compat++;
size = 0;
}
@@ -684,7 +674,7 @@ static int __init isapnp_create_device(struct pnp_card *card,
priority = 0x100 | tmp[0];
size = 0;
}
- option = pnp_register_dependent_option(dev,priority);
+ option = pnp_register_dependent_option(dev, priority);
if (!option)
return 1;
break;
@@ -739,11 +729,13 @@ static int __init isapnp_create_device(struct pnp_card *card,
isapnp_skip_bytes(size);
return 1;
default:
- printk(KERN_ERR "isapnp: unexpected or unknown tag type 0x%x for logical device %i (device %i), ignored\n", type, dev->number, card->number);
+ printk(KERN_ERR
+ "isapnp: unexpected or unknown tag type 0x%x for logical device %i (device %i), ignored\n",
+ type, dev->number, card->number);
}
__skip:
- if (size > 0)
- isapnp_skip_bytes(size);
+ if (size > 0)
+ isapnp_skip_bytes(size);
}
return 0;
}
@@ -751,14 +743,13 @@ static int __init isapnp_create_device(struct pnp_card *card,
/*
* Parse resource map for ISA PnP card.
*/
-
static void __init isapnp_parse_resource_map(struct pnp_card *card)
{
unsigned char type, tmp[17];
unsigned short size;
while (1) {
- if (isapnp_read_tag(&type, &size)<0)
+ if (isapnp_read_tag(&type, &size) < 0)
return;
switch (type) {
case _STAG_PNPVERNO:
@@ -771,7 +762,7 @@ static void __init isapnp_parse_resource_map(struct pnp_card *card)
break;
case _STAG_LOGDEVID:
if (size >= 5 && size <= 6) {
- if (isapnp_create_device(card, size)==1)
+ if (isapnp_create_device(card, size) == 1)
return;
size = 0;
}
@@ -779,7 +770,8 @@ static void __init isapnp_parse_resource_map(struct pnp_card *card)
case _STAG_VENDOR:
break;
case _LTAG_ANSISTR:
- isapnp_parse_name(card->name, sizeof(card->name), &size);
+ isapnp_parse_name(card->name, sizeof(card->name),
+ &size);
break;
case _LTAG_UNICODESTR:
/* silently ignore */
@@ -792,18 +784,19 @@ static void __init isapnp_parse_resource_map(struct pnp_card *card)
isapnp_skip_bytes(size);
return;
default:
- printk(KERN_ERR "isapnp: unexpected or unknown tag type 0x%x for device %i, ignored\n", type, card->number);
+ printk(KERN_ERR
+ "isapnp: unexpected or unknown tag type 0x%x for device %i, ignored\n",
+ type, card->number);
}
__skip:
- if (size > 0)
- isapnp_skip_bytes(size);
+ if (size > 0)
+ isapnp_skip_bytes(size);
}
}
/*
* Compute ISA PnP checksum for first eight bytes.
*/
-
static unsigned char __init isapnp_checksum(unsigned char *data)
{
int i, j;
@@ -815,7 +808,9 @@ static unsigned char __init isapnp_checksum(unsigned char *data)
bit = 0;
if (b & (1 << j))
bit = 1;
- checksum = ((((checksum ^ (checksum >> 1)) & 0x01) ^ bit) << 7) | (checksum >> 1);
+ checksum =
+ ((((checksum ^ (checksum >> 1)) & 0x01) ^ bit) << 7)
+ | (checksum >> 1);
}
}
return checksum;
@@ -824,27 +819,25 @@ static unsigned char __init isapnp_checksum(unsigned char *data)
/*
* Parse EISA id for ISA PnP card.
*/
-
-static void isapnp_parse_card_id(struct pnp_card * card, unsigned short vendor, unsigned short device)
+static void isapnp_parse_card_id(struct pnp_card *card, unsigned short vendor,
+ unsigned short device)
{
- struct pnp_id * id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
+ struct pnp_id *id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
+
if (!id)
return;
sprintf(id->id, "%c%c%c%x%x%x%x",
- 'A' + ((vendor >> 2) & 0x3f) - 1,
- 'A' + (((vendor & 3) << 3) | ((vendor >> 13) & 7)) - 1,
- 'A' + ((vendor >> 8) & 0x1f) - 1,
- (device >> 4) & 0x0f,
- device & 0x0f,
- (device >> 12) & 0x0f,
- (device >> 8) & 0x0f);
- pnp_add_card_id(id,card);
+ 'A' + ((vendor >> 2) & 0x3f) - 1,
+ 'A' + (((vendor & 3) << 3) | ((vendor >> 13) & 7)) - 1,
+ 'A' + ((vendor >> 8) & 0x1f) - 1,
+ (device >> 4) & 0x0f,
+ device & 0x0f, (device >> 12) & 0x0f, (device >> 8) & 0x0f);
+ pnp_add_card_id(id, card);
}
/*
* Build device list for all present ISA PnP devices.
*/
-
static int __init isapnp_build_device_list(void)
{
int csn;
@@ -858,22 +851,29 @@ static int __init isapnp_build_device_list(void)
isapnp_peek(header, 9);
checksum = isapnp_checksum(header);
#if 0
- printk(KERN_DEBUG "vendor: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- header[0], header[1], header[2], header[3],
- header[4], header[5], header[6], header[7], header[8]);
+ printk(KERN_DEBUG
+ "vendor: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ header[0], header[1], header[2], header[3], header[4],
+ header[5], header[6], header[7], header[8]);
printk(KERN_DEBUG "checksum = 0x%x\n", checksum);
#endif
- if ((card = kzalloc(sizeof(struct pnp_card), GFP_KERNEL)) == NULL)
+ if ((card =
+ kzalloc(sizeof(struct pnp_card), GFP_KERNEL)) == NULL)
continue;
card->number = csn;
INIT_LIST_HEAD(&card->devices);
- isapnp_parse_card_id(card, (header[1] << 8) | header[0], (header[3] << 8) | header[2]);
- card->serial = (header[7] << 24) | (header[6] << 16) | (header[5] << 8) | header[4];
+ isapnp_parse_card_id(card, (header[1] << 8) | header[0],
+ (header[3] << 8) | header[2]);
+ card->serial =
+ (header[7] << 24) | (header[6] << 16) | (header[5] << 8) |
+ header[4];
isapnp_checksum_value = 0x00;
isapnp_parse_resource_map(card);
if (isapnp_checksum_value != 0x00)
- printk(KERN_ERR "isapnp: checksum for device %i is not valid (0x%x)\n", csn, isapnp_checksum_value);
+ printk(KERN_ERR
+ "isapnp: checksum for device %i is not valid (0x%x)\n",
+ csn, isapnp_checksum_value);
card->checksum = isapnp_checksum_value;
card->protocol = &isapnp_protocol;
@@ -890,6 +890,7 @@ static int __init isapnp_build_device_list(void)
int isapnp_present(void)
{
struct pnp_card *card;
+
pnp_for_each_card(card) {
if (card->protocol == &isapnp_protocol)
return 1;
@@ -911,13 +912,13 @@ int isapnp_cfg_begin(int csn, int logdev)
/* it is possible to set RDP only in the isolation phase */
/* Jens Thoms Toerring <Jens.Toerring@physik.fu-berlin.de> */
isapnp_write_byte(0x02, 0x04); /* clear CSN of card */
- mdelay(2); /* is this necessary? */
- isapnp_wake(csn); /* bring card into sleep state */
- isapnp_wake(0); /* bring card into isolation state */
- isapnp_set_rdp(); /* reset the RDP port */
- udelay(1000); /* delay 1000us */
+ mdelay(2); /* is this necessary? */
+ isapnp_wake(csn); /* bring card into sleep state */
+ isapnp_wake(0); /* bring card into isolation state */
+ isapnp_set_rdp(); /* reset the RDP port */
+ udelay(1000); /* delay 1000us */
isapnp_write_byte(0x06, csn); /* reset CSN to previous value */
- udelay(250); /* is this necessary? */
+ udelay(250); /* is this necessary? */
#endif
if (logdev >= 0)
isapnp_device(logdev);
@@ -931,12 +932,10 @@ int isapnp_cfg_end(void)
return 0;
}
-
/*
- * Inititialization.
+ * Initialization.
*/
-
EXPORT_SYMBOL(isapnp_protocol);
EXPORT_SYMBOL(isapnp_present);
EXPORT_SYMBOL(isapnp_cfg_begin);
@@ -946,7 +945,8 @@ EXPORT_SYMBOL(isapnp_read_byte);
#endif
EXPORT_SYMBOL(isapnp_write_byte);
-static int isapnp_read_resources(struct pnp_dev *dev, struct pnp_resource_table *res)
+static int isapnp_read_resources(struct pnp_dev *dev,
+ struct pnp_resource_table *res)
{
int tmp, ret;
@@ -960,31 +960,37 @@ static int isapnp_read_resources(struct pnp_dev *dev, struct pnp_resource_table
res->port_resource[tmp].flags = IORESOURCE_IO;
}
for (tmp = 0; tmp < PNP_MAX_MEM; tmp++) {
- ret = isapnp_read_word(ISAPNP_CFG_MEM + (tmp << 3)) << 8;
+ ret =
+ isapnp_read_word(ISAPNP_CFG_MEM + (tmp << 3)) << 8;
if (!ret)
continue;
res->mem_resource[tmp].start = ret;
res->mem_resource[tmp].flags = IORESOURCE_MEM;
}
for (tmp = 0; tmp < PNP_MAX_IRQ; tmp++) {
- ret = (isapnp_read_word(ISAPNP_CFG_IRQ + (tmp << 1)) >> 8);
+ ret =
+ (isapnp_read_word(ISAPNP_CFG_IRQ + (tmp << 1)) >>
+ 8);
if (!ret)
continue;
- res->irq_resource[tmp].start = res->irq_resource[tmp].end = ret;
+ res->irq_resource[tmp].start =
+ res->irq_resource[tmp].end = ret;
res->irq_resource[tmp].flags = IORESOURCE_IRQ;
}
for (tmp = 0; tmp < PNP_MAX_DMA; tmp++) {
ret = isapnp_read_byte(ISAPNP_CFG_DMA + tmp);
if (ret == 4)
continue;
- res->dma_resource[tmp].start = res->dma_resource[tmp].end = ret;
+ res->dma_resource[tmp].start =
+ res->dma_resource[tmp].end = ret;
res->dma_resource[tmp].flags = IORESOURCE_DMA;
}
}
return 0;
}
-static int isapnp_get_resources(struct pnp_dev *dev, struct pnp_resource_table * res)
+static int isapnp_get_resources(struct pnp_dev *dev,
+ struct pnp_resource_table *res)
{
int ret;
pnp_init_resource_table(res);
@@ -994,24 +1000,44 @@ static int isapnp_get_resources(struct pnp_dev *dev, struct pnp_resource_table *
return ret;
}
-static int isapnp_set_resources(struct pnp_dev *dev, struct pnp_resource_table * res)
+static int isapnp_set_resources(struct pnp_dev *dev,
+ struct pnp_resource_table *res)
{
int tmp;
isapnp_cfg_begin(dev->card->number, dev->number);
dev->active = 1;
- for (tmp = 0; tmp < PNP_MAX_PORT && (res->port_resource[tmp].flags & (IORESOURCE_IO | IORESOURCE_UNSET)) == IORESOURCE_IO; tmp++)
- isapnp_write_word(ISAPNP_CFG_PORT+(tmp<<1), res->port_resource[tmp].start);
- for (tmp = 0; tmp < PNP_MAX_IRQ && (res->irq_resource[tmp].flags & (IORESOURCE_IRQ | IORESOURCE_UNSET)) == IORESOURCE_IRQ; tmp++) {
+ for (tmp = 0;
+ tmp < PNP_MAX_PORT
+ && (res->port_resource[tmp].
+ flags & (IORESOURCE_IO | IORESOURCE_UNSET)) == IORESOURCE_IO;
+ tmp++)
+ isapnp_write_word(ISAPNP_CFG_PORT + (tmp << 1),
+ res->port_resource[tmp].start);
+ for (tmp = 0;
+ tmp < PNP_MAX_IRQ
+ && (res->irq_resource[tmp].
+ flags & (IORESOURCE_IRQ | IORESOURCE_UNSET)) == IORESOURCE_IRQ;
+ tmp++) {
int irq = res->irq_resource[tmp].start;
if (irq == 2)
irq = 9;
- isapnp_write_byte(ISAPNP_CFG_IRQ+(tmp<<1), irq);
+ isapnp_write_byte(ISAPNP_CFG_IRQ + (tmp << 1), irq);
}
- for (tmp = 0; tmp < PNP_MAX_DMA && (res->dma_resource[tmp].flags & (IORESOURCE_DMA | IORESOURCE_UNSET)) == IORESOURCE_DMA; tmp++)
- isapnp_write_byte(ISAPNP_CFG_DMA+tmp, res->dma_resource[tmp].start);
- for (tmp = 0; tmp < PNP_MAX_MEM && (res->mem_resource[tmp].flags & (IORESOURCE_MEM | IORESOURCE_UNSET)) == IORESOURCE_MEM; tmp++)
- isapnp_write_word(ISAPNP_CFG_MEM+(tmp<<3), (res->mem_resource[tmp].start >> 8) & 0xffff);
+ for (tmp = 0;
+ tmp < PNP_MAX_DMA
+ && (res->dma_resource[tmp].
+ flags & (IORESOURCE_DMA | IORESOURCE_UNSET)) == IORESOURCE_DMA;
+ tmp++)
+ isapnp_write_byte(ISAPNP_CFG_DMA + tmp,
+ res->dma_resource[tmp].start);
+ for (tmp = 0;
+ tmp < PNP_MAX_MEM
+ && (res->mem_resource[tmp].
+ flags & (IORESOURCE_MEM | IORESOURCE_UNSET)) == IORESOURCE_MEM;
+ tmp++)
+ isapnp_write_word(ISAPNP_CFG_MEM + (tmp << 3),
+ (res->mem_resource[tmp].start >> 8) & 0xffff);
/* FIXME: We aren't handling 32bit mems properly here */
isapnp_activate(dev->number);
isapnp_cfg_end();
@@ -1030,9 +1056,9 @@ static int isapnp_disable_resources(struct pnp_dev *dev)
}
struct pnp_protocol isapnp_protocol = {
- .name = "ISA Plug and Play",
- .get = isapnp_get_resources,
- .set = isapnp_set_resources,
+ .name = "ISA Plug and Play",
+ .get = isapnp_get_resources,
+ .set = isapnp_set_resources,
.disable = isapnp_disable_resources,
};
@@ -1053,31 +1079,36 @@ static int __init isapnp_init(void)
#endif
#ifdef ISAPNP_REGION_OK
if (!request_region(_PIDXR, 1, "isapnp index")) {
- printk(KERN_ERR "isapnp: Index Register 0x%x already used\n", _PIDXR);
+ printk(KERN_ERR "isapnp: Index Register 0x%x already used\n",
+ _PIDXR);
return -EBUSY;
}
#endif
if (!request_region(_PNPWRP, 1, "isapnp write")) {
- printk(KERN_ERR "isapnp: Write Data Register 0x%x already used\n", _PNPWRP);
+ printk(KERN_ERR
+ "isapnp: Write Data Register 0x%x already used\n",
+ _PNPWRP);
#ifdef ISAPNP_REGION_OK
release_region(_PIDXR, 1);
#endif
return -EBUSY;
}
- if(pnp_register_protocol(&isapnp_protocol)<0)
+ if (pnp_register_protocol(&isapnp_protocol) < 0)
return -EBUSY;
/*
- * Print a message. The existing ISAPnP code is hanging machines
- * so let the user know where.
+ * Print a message. The existing ISAPnP code is hanging machines
+ * so let the user know where.
*/
-
+
printk(KERN_INFO "isapnp: Scanning for PnP cards...\n");
if (isapnp_rdp >= 0x203 && isapnp_rdp <= 0x3ff) {
isapnp_rdp |= 3;
if (!request_region(isapnp_rdp, 1, "isapnp read")) {
- printk(KERN_ERR "isapnp: Read Data Register 0x%x already used\n", isapnp_rdp);
+ printk(KERN_ERR
+ "isapnp: Read Data Register 0x%x already used\n",
+ isapnp_rdp);
#ifdef ISAPNP_REGION_OK
release_region(_PIDXR, 1);
#endif
@@ -1089,14 +1120,14 @@ static int __init isapnp_init(void)
isapnp_detected = 1;
if (isapnp_rdp < 0x203 || isapnp_rdp > 0x3ff) {
cards = isapnp_isolate();
- if (cards < 0 ||
- (isapnp_rdp < 0x203 || isapnp_rdp > 0x3ff)) {
+ if (cards < 0 || (isapnp_rdp < 0x203 || isapnp_rdp > 0x3ff)) {
#ifdef ISAPNP_REGION_OK
release_region(_PIDXR, 1);
#endif
release_region(_PNPWRP, 1);
isapnp_detected = 0;
- printk(KERN_INFO "isapnp: No Plug & Play device found\n");
+ printk(KERN_INFO
+ "isapnp: No Plug & Play device found\n");
return 0;
}
request_region(isapnp_rdp, 1, "isapnp read");
@@ -1104,19 +1135,23 @@ static int __init isapnp_init(void)
isapnp_build_device_list();
cards = 0;
- protocol_for_each_card(&isapnp_protocol,card) {
+ protocol_for_each_card(&isapnp_protocol, card) {
cards++;
if (isapnp_verbose) {
- printk(KERN_INFO "isapnp: Card '%s'\n", card->name[0]?card->name:"Unknown");
+ printk(KERN_INFO "isapnp: Card '%s'\n",
+ card->name[0] ? card->name : "Unknown");
if (isapnp_verbose < 2)
continue;
- card_for_each_dev(card,dev) {
- printk(KERN_INFO "isapnp: Device '%s'\n", dev->name[0]?dev->name:"Unknown");
+ card_for_each_dev(card, dev) {
+ printk(KERN_INFO "isapnp: Device '%s'\n",
+ dev->name[0] ? dev->name : "Unknown");
}
}
}
if (cards) {
- printk(KERN_INFO "isapnp: %i Plug & Play card%s detected total\n", cards, cards>1?"s":"");
+ printk(KERN_INFO
+ "isapnp: %i Plug & Play card%s detected total\n", cards,
+ cards > 1 ? "s" : "");
} else {
printk(KERN_INFO "isapnp: No Plug & Play card found\n");
}
@@ -1141,11 +1176,10 @@ __setup("noisapnp", isapnp_setup_disable);
static int __init isapnp_setup_isapnp(char *str)
{
- (void)((get_option(&str,&isapnp_rdp) == 2) &&
- (get_option(&str,&isapnp_reset) == 2) &&
- (get_option(&str,&isapnp_verbose) == 2));
+ (void)((get_option(&str, &isapnp_rdp) == 2) &&
+ (get_option(&str, &isapnp_reset) == 2) &&
+ (get_option(&str, &isapnp_verbose) == 2));
return 1;
}
__setup("isapnp=", isapnp_setup_isapnp);
-
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c
index 40b724ebe23..3fbc0f9ffc2 100644
--- a/drivers/pnp/isapnp/proc.c
+++ b/drivers/pnp/isapnp/proc.c
@@ -2,7 +2,6 @@
* ISA Plug & Play support
* Copyright (c) by Jaroslav Kysela <perex@suse.cz>
*
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -16,7 +15,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/module.h>
@@ -54,7 +52,8 @@ static loff_t isapnp_proc_bus_lseek(struct file *file, loff_t off, int whence)
return (file->f_pos = new);
}
-static ssize_t isapnp_proc_bus_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf,
+ size_t nbytes, loff_t * ppos)
{
struct inode *ino = file->f_path.dentry->d_inode;
struct proc_dir_entry *dp = PDE(ino);
@@ -74,7 +73,7 @@ static ssize_t isapnp_proc_bus_read(struct file *file, char __user *buf, size_t
return -EINVAL;
isapnp_cfg_begin(dev->card->number, dev->number);
- for ( ; pos < 256 && cnt > 0; pos++, buf++, cnt--) {
+ for (; pos < 256 && cnt > 0; pos++, buf++, cnt--) {
unsigned char val;
val = isapnp_read_byte(pos);
__put_user(val, buf);
@@ -85,10 +84,9 @@ static ssize_t isapnp_proc_bus_read(struct file *file, char __user *buf, size_t
return nbytes;
}
-static const struct file_operations isapnp_proc_bus_file_operations =
-{
- .llseek = isapnp_proc_bus_lseek,
- .read = isapnp_proc_bus_read,
+static const struct file_operations isapnp_proc_bus_file_operations = {
+ .llseek = isapnp_proc_bus_lseek,
+ .read = isapnp_proc_bus_read,
};
static int isapnp_proc_attach_device(struct pnp_dev *dev)
@@ -139,13 +137,14 @@ static int __exit isapnp_proc_detach_bus(struct pnp_card *bus)
remove_proc_entry(name, isapnp_proc_bus_dir);
return 0;
}
-#endif /* MODULE */
+#endif /* MODULE */
int __init isapnp_proc_init(void)
{
struct pnp_dev *dev;
+
isapnp_proc_bus_dir = proc_mkdir("isapnp", proc_bus);
- protocol_for_each_dev(&isapnp_protocol,dev) {
+ protocol_for_each_dev(&isapnp_protocol, dev) {
isapnp_proc_attach_device(dev);
}
return 0;
@@ -167,4 +166,4 @@ int __exit isapnp_proc_done(void)
remove_proc_entry("isapnp", proc_bus);
return 0;
}
-#endif /* MODULE */
+#endif /* MODULE */
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index 57e6ab1004d..3bda513a6bd 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -3,7 +3,6 @@
*
* based on isapnp.c resource management (c) Jaroslav Kysela <perex@suse.cz>
* Copyright 2003 Adam Belay <ambx1@neo.rr.com>
- *
*/
#include <linux/errno.h>
@@ -26,7 +25,8 @@ static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
return -EINVAL;
if (idx >= PNP_MAX_PORT) {
- pnp_err("More than 4 ports is incompatible with pnp specifications.");
+ pnp_err
+ ("More than 4 ports is incompatible with pnp specifications.");
/* pretend we were successful so at least the manager won't try again */
return 1;
}
@@ -41,11 +41,11 @@ static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
/* set the initial values */
*flags |= rule->flags | IORESOURCE_IO;
- *flags &= ~IORESOURCE_UNSET;
+ *flags &= ~IORESOURCE_UNSET;
if (!rule->size) {
*flags |= IORESOURCE_DISABLED;
- return 1; /* skip disabled resource requests */
+ return 1; /* skip disabled resource requests */
}
*start = rule->min;
@@ -70,7 +70,8 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
return -EINVAL;
if (idx >= PNP_MAX_MEM) {
- pnp_err("More than 8 mems is incompatible with pnp specifications.");
+ pnp_err
+ ("More than 8 mems is incompatible with pnp specifications.");
/* pretend we were successful so at least the manager won't try again */
return 1;
}
@@ -85,7 +86,7 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
/* set the initial values */
*flags |= rule->flags | IORESOURCE_MEM;
- *flags &= ~IORESOURCE_UNSET;
+ *flags &= ~IORESOURCE_UNSET;
/* convert pnp flags to standard Linux flags */
if (!(rule->flags & IORESOURCE_MEM_WRITEABLE))
@@ -99,11 +100,11 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
if (!rule->size) {
*flags |= IORESOURCE_DISABLED;
- return 1; /* skip disabled resource requests */
+ return 1; /* skip disabled resource requests */
}
*start = rule->min;
- *end = *start + rule->size -1;
+ *end = *start + rule->size - 1;
/* run through until pnp_check_mem is happy */
while (!pnp_check_mem(dev, idx)) {
@@ -115,7 +116,7 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
return 1;
}
-static int pnp_assign_irq(struct pnp_dev * dev, struct pnp_irq *rule, int idx)
+static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
{
resource_size_t *start, *end;
unsigned long *flags;
@@ -130,7 +131,8 @@ static int pnp_assign_irq(struct pnp_dev * dev, struct pnp_irq *rule, int idx)
return -EINVAL;
if (idx >= PNP_MAX_IRQ) {
- pnp_err("More than 2 irqs is incompatible with pnp specifications.");
+ pnp_err
+ ("More than 2 irqs is incompatible with pnp specifications.");
/* pretend we were successful so at least the manager won't try again */
return 1;
}
@@ -145,11 +147,11 @@ static int pnp_assign_irq(struct pnp_dev * dev, struct pnp_irq *rule, int idx)
/* set the initial values */
*flags |= rule->flags | IORESOURCE_IRQ;
- *flags &= ~IORESOURCE_UNSET;
+ *flags &= ~IORESOURCE_UNSET;
if (bitmap_empty(rule->map, PNP_IRQ_NR)) {
*flags |= IORESOURCE_DISABLED;
- return 1; /* skip disabled resource requests */
+ return 1; /* skip disabled resource requests */
}
/* TBD: need check for >16 IRQ */
@@ -159,9 +161,9 @@ static int pnp_assign_irq(struct pnp_dev * dev, struct pnp_irq *rule, int idx)
return 1;
}
for (i = 0; i < 16; i++) {
- if(test_bit(xtab[i], rule->map)) {
+ if (test_bit(xtab[i], rule->map)) {
*start = *end = xtab[i];
- if(pnp_check_irq(dev, idx))
+ if (pnp_check_irq(dev, idx))
return 1;
}
}
@@ -183,7 +185,8 @@ static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
return -EINVAL;
if (idx >= PNP_MAX_DMA) {
- pnp_err("More than 2 dmas is incompatible with pnp specifications.");
+ pnp_err
+ ("More than 2 dmas is incompatible with pnp specifications.");
/* pretend we were successful so at least the manager won't try again */
return 1;
}
@@ -198,17 +201,17 @@ static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
/* set the initial values */
*flags |= rule->flags | IORESOURCE_DMA;
- *flags &= ~IORESOURCE_UNSET;
+ *flags &= ~IORESOURCE_UNSET;
if (!rule->map) {
*flags |= IORESOURCE_DISABLED;
- return 1; /* skip disabled resource requests */
+ return 1; /* skip disabled resource requests */
}
for (i = 0; i < 8; i++) {
- if(rule->map & (1<<xtab[i])) {
+ if (rule->map & (1 << xtab[i])) {
*start = *end = xtab[i];
- if(pnp_check_dma(dev, idx))
+ if (pnp_check_dma(dev, idx))
return 1;
}
}
@@ -218,72 +221,80 @@ static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
/**
* pnp_init_resources - Resets a resource table to default values.
* @table: pointer to the desired resource table
- *
*/
void pnp_init_resource_table(struct pnp_resource_table *table)
{
int idx;
+
for (idx = 0; idx < PNP_MAX_IRQ; idx++) {
table->irq_resource[idx].name = NULL;
table->irq_resource[idx].start = -1;
table->irq_resource[idx].end = -1;
- table->irq_resource[idx].flags = IORESOURCE_IRQ | IORESOURCE_AUTO | IORESOURCE_UNSET;
+ table->irq_resource[idx].flags =
+ IORESOURCE_IRQ | IORESOURCE_AUTO | IORESOURCE_UNSET;
}
for (idx = 0; idx < PNP_MAX_DMA; idx++) {
table->dma_resource[idx].name = NULL;
table->dma_resource[idx].start = -1;
table->dma_resource[idx].end = -1;
- table->dma_resource[idx].flags = IORESOURCE_DMA | IORESOURCE_AUTO | IORESOURCE_UNSET;
+ table->dma_resource[idx].flags =
+ IORESOURCE_DMA | IORESOURCE_AUTO | IORESOURCE_UNSET;
}
for (idx = 0; idx < PNP_MAX_PORT; idx++) {
table->port_resource[idx].name = NULL;
table->port_resource[idx].start = 0;
table->port_resource[idx].end = 0;
- table->port_resource[idx].flags = IORESOURCE_IO | IORESOURCE_AUTO | IORESOURCE_UNSET;
+ table->port_resource[idx].flags =
+ IORESOURCE_IO | IORESOURCE_AUTO | IORESOURCE_UNSET;
}
for (idx = 0; idx < PNP_MAX_MEM; idx++) {
table->mem_resource[idx].name = NULL;
table->mem_resource[idx].start = 0;
table->mem_resource[idx].end = 0;
- table->mem_resource[idx].flags = IORESOURCE_MEM | IORESOURCE_AUTO | IORESOURCE_UNSET;
+ table->mem_resource[idx].flags =
+ IORESOURCE_MEM | IORESOURCE_AUTO | IORESOURCE_UNSET;
}
}
/**
* pnp_clean_resources - clears resources that were not manually set
* @res: the resources to clean
- *
*/
-static void pnp_clean_resource_table(struct pnp_resource_table * res)
+static void pnp_clean_resource_table(struct pnp_resource_table *res)
{
int idx;
+
for (idx = 0; idx < PNP_MAX_IRQ; idx++) {
if (!(res->irq_resource[idx].flags & IORESOURCE_AUTO))
continue;
res->irq_resource[idx].start = -1;
res->irq_resource[idx].end = -1;
- res->irq_resource[idx].flags = IORESOURCE_IRQ | IORESOURCE_AUTO | IORESOURCE_UNSET;
+ res->irq_resource[idx].flags =
+ IORESOURCE_IRQ | IORESOURCE_AUTO | IORESOURCE_UNSET;
}
for (idx = 0; idx < PNP_MAX_DMA; idx++) {
if (!(res->dma_resource[idx].flags & IORESOURCE_AUTO))
continue;
res->dma_resource[idx].start = -1;
res->dma_resource[idx].end = -1;
- res->dma_resource[idx].flags = IORESOURCE_DMA | IORESOURCE_AUTO | IORESOURCE_UNSET;
+ res->dma_resource[idx].flags =
+ IORESOURCE_DMA | IORESOURCE_AUTO | IORESOURCE_UNSET;
}
for (idx = 0; idx < PNP_MAX_PORT; idx++) {
if (!(res->port_resource[idx].flags & IORESOURCE_AUTO))
continue;
res->port_resource[idx].start = 0;
res->port_resource[idx].end = 0;
- res->port_resource[idx].flags = IORESOURCE_IO | IORESOURCE_AUTO | IORESOURCE_UNSET;
+ res->port_resource[idx].flags =
+ IORESOURCE_IO | IORESOURCE_AUTO | IORESOURCE_UNSET;
}
for (idx = 0; idx < PNP_MAX_MEM; idx++) {
if (!(res->mem_resource[idx].flags & IORESOURCE_AUTO))
continue;
res->mem_resource[idx].start = 0;
res->mem_resource[idx].end = 0;
- res->mem_resource[idx].flags = IORESOURCE_MEM | IORESOURCE_AUTO | IORESOURCE_UNSET;
+ res->mem_resource[idx].flags =
+ IORESOURCE_MEM | IORESOURCE_AUTO | IORESOURCE_UNSET;
}
}
@@ -306,7 +317,7 @@ static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
return -ENODEV;
down(&pnp_res_mutex);
- pnp_clean_resource_table(&dev->res); /* start with a fresh slate */
+ pnp_clean_resource_table(&dev->res); /* start with a fresh slate */
if (dev->independent) {
port = dev->independent->port;
mem = dev->independent->mem;
@@ -341,10 +352,11 @@ static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
if (depnum) {
struct pnp_option *dep;
int i;
- for (i=1,dep=dev->dependent; i<depnum; i++, dep=dep->next)
- if(!dep)
+ for (i = 1, dep = dev->dependent; i < depnum;
+ i++, dep = dep->next)
+ if (!dep)
goto fail;
- port =dep->port;
+ port = dep->port;
mem = dep->mem;
irq = dep->irq;
dma = dep->dma;
@@ -378,7 +390,7 @@ static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
up(&pnp_res_mutex);
return 1;
-fail:
+ fail:
pnp_clean_resource_table(&dev->res);
up(&pnp_res_mutex);
return 0;
@@ -392,10 +404,12 @@ fail:
*
* This function can be used by drivers that want to manually set thier resources.
*/
-int pnp_manual_config_dev(struct pnp_dev *dev, struct pnp_resource_table * res, int mode)
+int pnp_manual_config_dev(struct pnp_dev *dev, struct pnp_resource_table *res,
+ int mode)
{
int i;
- struct pnp_resource_table * bak;
+ struct pnp_resource_table *bak;
+
if (!dev || !res)
return -EINVAL;
if (!pnp_can_configure(dev))
@@ -409,19 +423,19 @@ int pnp_manual_config_dev(struct pnp_dev *dev, struct pnp_resource_table * res,
dev->res = *res;
if (!(mode & PNP_CONFIG_FORCE)) {
for (i = 0; i < PNP_MAX_PORT; i++) {
- if(!pnp_check_port(dev,i))
+ if (!pnp_check_port(dev, i))
goto fail;
}
for (i = 0; i < PNP_MAX_MEM; i++) {
- if(!pnp_check_mem(dev,i))
+ if (!pnp_check_mem(dev, i))
goto fail;
}
for (i = 0; i < PNP_MAX_IRQ; i++) {
- if(!pnp_check_irq(dev,i))
+ if (!pnp_check_irq(dev, i))
goto fail;
}
for (i = 0; i < PNP_MAX_DMA; i++) {
- if(!pnp_check_dma(dev,i))
+ if (!pnp_check_dma(dev, i))
goto fail;
}
}
@@ -430,7 +444,7 @@ int pnp_manual_config_dev(struct pnp_dev *dev, struct pnp_resource_table * res,
kfree(bak);
return 0;
-fail:
+ fail:
dev->res = *bak;
up(&pnp_res_mutex);
kfree(bak);
@@ -440,18 +454,18 @@ fail:
/**
* pnp_auto_config_dev - automatically assigns resources to a device
* @dev: pointer to the desired device
- *
*/
int pnp_auto_config_dev(struct pnp_dev *dev)
{
struct pnp_option *dep;
int i = 1;
- if(!dev)
+ if (!dev)
return -EINVAL;
- if(!pnp_can_configure(dev)) {
- pnp_dbg("Device %s does not support resource configuration.", dev->dev.bus_id);
+ if (!pnp_can_configure(dev)) {
+ pnp_dbg("Device %s does not support resource configuration.",
+ dev->dev.bus_id);
return -ENODEV;
}
@@ -476,23 +490,22 @@ int pnp_auto_config_dev(struct pnp_dev *dev)
* pnp_start_dev - low-level start of the PnP device
* @dev: pointer to the desired device
*
- * assumes that resources have alread been allocated
+ * assumes that resources have already been allocated
*/
-
int pnp_start_dev(struct pnp_dev *dev)
{
if (!pnp_can_write(dev)) {
- pnp_dbg("Device %s does not support activation.", dev->dev.bus_id);
+ pnp_dbg("Device %s does not support activation.",
+ dev->dev.bus_id);
return -EINVAL;
}
- if (dev->protocol->set(dev, &dev->res)<0) {
+ if (dev->protocol->set(dev, &dev->res) < 0) {
pnp_err("Failed to activate device %s.", dev->dev.bus_id);
return -EIO;
}
pnp_info("Device %s activated.", dev->dev.bus_id);
-
return 0;
}
@@ -502,20 +515,19 @@ int pnp_start_dev(struct pnp_dev *dev)
*
* does not free resources
*/
-
int pnp_stop_dev(struct pnp_dev *dev)
{
if (!pnp_can_disable(dev)) {
- pnp_dbg("Device %s does not support disabling.", dev->dev.bus_id);
+ pnp_dbg("Device %s does not support disabling.",
+ dev->dev.bus_id);
return -EINVAL;
}
- if (dev->protocol->disable(dev)<0) {
+ if (dev->protocol->disable(dev) < 0) {
pnp_err("Failed to disable device %s.", dev->dev.bus_id);
return -EIO;
}
pnp_info("Device %s disabled.", dev->dev.bus_id);
-
return 0;
}
@@ -531,9 +543,8 @@ int pnp_activate_dev(struct pnp_dev *dev)
if (!dev)
return -EINVAL;
- if (dev->active) {
- return 0; /* the device is already active */
- }
+ if (dev->active)
+ return 0; /* the device is already active */
/* ensure resources are allocated */
if (pnp_auto_config_dev(dev))
@@ -544,7 +555,6 @@ int pnp_activate_dev(struct pnp_dev *dev)
return error;
dev->active = 1;
-
return 1;
}
@@ -558,11 +568,10 @@ int pnp_disable_dev(struct pnp_dev *dev)
{
int error;
- if (!dev)
- return -EINVAL;
- if (!dev->active) {
- return 0; /* the device is already disabled */
- }
+ if (!dev)
+ return -EINVAL;
+ if (!dev->active)
+ return 0; /* the device is already disabled */
error = pnp_stop_dev(dev);
if (error)
@@ -583,10 +592,9 @@ int pnp_disable_dev(struct pnp_dev *dev)
* @resource: pointer to resource to be changed
* @start: start of region
* @size: size of region
- *
*/
void pnp_resource_change(struct resource *resource, resource_size_t start,
- resource_size_t size)
+ resource_size_t size)
{
if (resource == NULL)
return;
@@ -595,11 +603,7 @@ void pnp_resource_change(struct resource *resource, resource_size_t start,
resource->end = start + size - 1;
}
-
EXPORT_SYMBOL(pnp_manual_config_dev);
-#if 0
-EXPORT_SYMBOL(pnp_auto_config_dev);
-#endif
EXPORT_SYMBOL(pnp_start_dev);
EXPORT_SYMBOL(pnp_stop_dev);
EXPORT_SYMBOL(pnp_activate_dev);
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index a00548799e9..616fc72190b 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -21,7 +21,10 @@
#include <linux/acpi.h>
#include <linux/pnp.h>
+#include <linux/mod_devicetable.h>
#include <acpi/acpi_bus.h>
+#include <acpi/actypes.h>
+
#include "pnpacpi.h"
static int num = 0;
@@ -31,17 +34,19 @@ static int num = 0;
* used by the kernel (PCI root, ...), as it is harmless and there were
* already present in pnpbios. But there is an exception for devices that
* have irqs (PIC, Timer) because we call acpi_register_gsi.
- * Finaly only devices that have a CRS method need to be in this list.
+ * Finally, only devices that have a CRS method need to be in this list.
*/
-static char __initdata excluded_id_list[] =
- "PNP0C09," /* EC */
- "PNP0C0F," /* Link device */
- "PNP0000," /* PIC */
- "PNP0100," /* Timer */
- ;
+static struct __initdata acpi_device_id excluded_id_list[] = {
+ {"PNP0C09", 0}, /* EC */
+ {"PNP0C0F", 0}, /* Link device */
+ {"PNP0000", 0}, /* PIC */
+ {"PNP0100", 0}, /* Timer */
+ {"", 0},
+};
+
static inline int is_exclusive_device(struct acpi_device *dev)
{
- return (!acpi_match_ids(dev, excluded_id_list));
+ return (!acpi_match_device_ids(dev, excluded_id_list));
}
/*
@@ -79,15 +84,18 @@ static void __init pnpidacpi_to_pnpid(char *id, char *str)
str[7] = '\0';
}
-static int pnpacpi_get_resources(struct pnp_dev * dev, struct pnp_resource_table * res)
+static int pnpacpi_get_resources(struct pnp_dev *dev,
+ struct pnp_resource_table *res)
{
acpi_status status;
- status = pnpacpi_parse_allocated_resource((acpi_handle)dev->data,
- &dev->res);
+
+ status = pnpacpi_parse_allocated_resource((acpi_handle) dev->data,
+ &dev->res);
return ACPI_FAILURE(status) ? -ENODEV : 0;
}
-static int pnpacpi_set_resources(struct pnp_dev * dev, struct pnp_resource_table * res)
+static int pnpacpi_set_resources(struct pnp_dev *dev,
+ struct pnp_resource_table *res)
{
acpi_handle handle = dev->data;
struct acpi_buffer buffer;
@@ -114,16 +122,36 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
acpi_status status;
/* acpi_unregister_gsi(pnp_irq(dev, 0)); */
- status = acpi_evaluate_object((acpi_handle)dev->data,
- "_DIS", NULL, NULL);
+ status = acpi_evaluate_object((acpi_handle) dev->data,
+ "_DIS", NULL, NULL);
return ACPI_FAILURE(status) ? -ENODEV : 0;
}
+#ifdef CONFIG_ACPI_SLEEP
+static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
+{
+ return acpi_bus_set_power((acpi_handle) dev->data,
+ acpi_pm_device_sleep_state(&dev->dev,
+ device_may_wakeup
+ (&dev->dev),
+ NULL));
+}
+
+static int pnpacpi_resume(struct pnp_dev *dev)
+{
+ return acpi_bus_set_power((acpi_handle) dev->data, ACPI_STATE_D0);
+}
+#endif
+
static struct pnp_protocol pnpacpi_protocol = {
- .name = "Plug and Play ACPI",
- .get = pnpacpi_get_resources,
- .set = pnpacpi_set_resources,
+ .name = "Plug and Play ACPI",
+ .get = pnpacpi_get_resources,
+ .set = pnpacpi_set_resources,
.disable = pnpacpi_disable_resources,
+#ifdef CONFIG_ACPI_SLEEP
+ .suspend = pnpacpi_suspend,
+ .resume = pnpacpi_resume,
+#endif
};
static int __init pnpacpi_add_device(struct acpi_device *device)
@@ -135,17 +163,17 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
status = acpi_get_handle(device->handle, "_CRS", &temp);
if (ACPI_FAILURE(status) || !ispnpidacpi(acpi_device_hid(device)) ||
- is_exclusive_device(device))
+ is_exclusive_device(device))
return 0;
pnp_dbg("ACPI device : hid %s", acpi_device_hid(device));
- dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL);
if (!dev) {
pnp_err("Out of memory");
return -ENOMEM;
}
dev->data = device->handle;
- /* .enabled means if the device can decode the resources */
+ /* .enabled means the device can decode the resources */
dev->active = device->status.enabled;
status = acpi_get_handle(device->handle, "_SRS", &temp);
if (ACPI_SUCCESS(status))
@@ -175,20 +203,23 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
pnpidacpi_to_pnpid(acpi_device_hid(device), dev_id->id);
pnp_add_id(dev_id, dev);
- if(dev->active) {
+ if (dev->active) {
/* parse allocated resource */
- status = pnpacpi_parse_allocated_resource(device->handle, &dev->res);
+ status = pnpacpi_parse_allocated_resource(device->handle,
+ &dev->res);
if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
- pnp_err("PnPACPI: METHOD_NAME__CRS failure for %s", dev_id->id);
+ pnp_err("PnPACPI: METHOD_NAME__CRS failure for %s",
+ dev_id->id);
goto err1;
}
}
- if(dev->capabilities & PNP_CONFIGURABLE) {
+ if (dev->capabilities & PNP_CONFIGURABLE) {
status = pnpacpi_parse_resource_option_data(device->handle,
- dev);
+ dev);
if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
- pnp_err("PnPACPI: METHOD_NAME__PRS failure for %s", dev_id->id);
+ pnp_err("PnPACPI: METHOD_NAME__PRS failure for %s",
+ dev_id->id);
goto err1;
}
}
@@ -214,18 +245,19 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
if (!dev->active)
pnp_init_resource_table(&dev->res);
pnp_add_device(dev);
- num ++;
+ num++;
return AE_OK;
-err1:
+ err1:
kfree(dev_id);
-err:
+ err:
kfree(dev);
return -EINVAL;
}
static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle,
- u32 lvl, void *context, void **rv)
+ u32 lvl, void *context,
+ void **rv)
{
struct acpi_device *device;
@@ -238,23 +270,22 @@ static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle,
static int __init acpi_pnp_match(struct device *dev, void *_pnp)
{
- struct acpi_device *acpi = to_acpi_device(dev);
- struct pnp_dev *pnp = _pnp;
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pnp_dev *pnp = _pnp;
/* true means it matched */
return acpi->flags.hardware_id
- && !acpi_get_physical_device(acpi->handle)
- && compare_pnp_id(pnp->id, acpi->pnp.hardware_id);
+ && !acpi_get_physical_device(acpi->handle)
+ && compare_pnp_id(pnp->id, acpi->pnp.hardware_id);
}
-static int __init acpi_pnp_find_device(struct device *dev, acpi_handle *handle)
+static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle)
{
- struct device *adev;
- struct acpi_device *acpi;
+ struct device *adev;
+ struct acpi_device *acpi;
adev = bus_find_device(&acpi_bus_type, NULL,
- to_pnp_dev(dev),
- acpi_pnp_match);
+ to_pnp_dev(dev), acpi_pnp_match);
if (!adev)
return -ENODEV;
@@ -268,7 +299,7 @@ static int __init acpi_pnp_find_device(struct device *dev, acpi_handle *handle)
* pnpdev->dev.archdata.acpi_handle point to its ACPI sibling.
*/
static struct acpi_bus_type __initdata acpi_pnp_bus = {
- .bus = &pnp_bus_type,
+ .bus = &pnp_bus_type,
.find_device = acpi_pnp_find_device,
};
@@ -288,6 +319,7 @@ static int __init pnpacpi_init(void)
pnp_platform_devices = 1;
return 0;
}
+
subsys_initcall(pnpacpi_init);
static int __init pnpacpi_setup(char *str)
@@ -298,8 +330,5 @@ static int __init pnpacpi_setup(char *str)
pnpacpi_disabled = 1;
return 1;
}
-__setup("pnpacpi=", pnpacpi_setup);
-#if 0
-EXPORT_SYMBOL(pnpacpi_protocol);
-#endif
+__setup("pnpacpi=", pnpacpi_setup);
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 118ac9779b3..ce5027feb3d 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -40,8 +40,7 @@ static int irq_flags(int triggering, int polarity)
flag = IORESOURCE_IRQ_LOWLEVEL;
else
flag = IORESOURCE_IRQ_HIGHLEVEL;
- }
- else {
+ } else {
if (polarity == ACPI_ACTIVE_LOW)
flag = IORESOURCE_IRQ_LOWEDGE;
else
@@ -72,9 +71,9 @@ static void decode_irq_flags(int flag, int *triggering, int *polarity)
}
}
-static void
-pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res, u32 gsi,
- int triggering, int polarity, int shareable)
+static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res,
+ u32 gsi, int triggering,
+ int polarity, int shareable)
{
int i = 0;
int irq;
@@ -83,12 +82,12 @@ pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res, u32 gsi,
return;
while (!(res->irq_resource[i].flags & IORESOURCE_UNSET) &&
- i < PNP_MAX_IRQ)
+ i < PNP_MAX_IRQ)
i++;
if (i >= PNP_MAX_IRQ)
return;
- res->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag
+ res->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag
res->irq_resource[i].flags |= irq_flags(triggering, polarity);
irq = acpi_register_gsi(gsi, triggering, polarity);
if (irq < 0) {
@@ -147,17 +146,19 @@ static int dma_flags(int type, int bus_master, int transfer)
return flags;
}
-static void
-pnpacpi_parse_allocated_dmaresource(struct pnp_resource_table *res, u32 dma,
- int type, int bus_master, int transfer)
+static void pnpacpi_parse_allocated_dmaresource(struct pnp_resource_table *res,
+ u32 dma, int type,
+ int bus_master, int transfer)
{
int i = 0;
+
while (i < PNP_MAX_DMA &&
- !(res->dma_resource[i].flags & IORESOURCE_UNSET))
+ !(res->dma_resource[i].flags & IORESOURCE_UNSET))
i++;
if (i < PNP_MAX_DMA) {
- res->dma_resource[i].flags = IORESOURCE_DMA; // Also clears _UNSET flag
- res->dma_resource[i].flags |= dma_flags(type, bus_master, transfer);
+ res->dma_resource[i].flags = IORESOURCE_DMA; // Also clears _UNSET flag
+ res->dma_resource[i].flags |=
+ dma_flags(type, bus_master, transfer);
if (dma == -1) {
res->dma_resource[i].flags |= IORESOURCE_DISABLED;
return;
@@ -167,19 +168,19 @@ pnpacpi_parse_allocated_dmaresource(struct pnp_resource_table *res, u32 dma,
}
}
-static void
-pnpacpi_parse_allocated_ioresource(struct pnp_resource_table *res,
- u64 io, u64 len, int io_decode)
+static void pnpacpi_parse_allocated_ioresource(struct pnp_resource_table *res,
+ u64 io, u64 len, int io_decode)
{
int i = 0;
+
while (!(res->port_resource[i].flags & IORESOURCE_UNSET) &&
- i < PNP_MAX_PORT)
+ i < PNP_MAX_PORT)
i++;
if (i < PNP_MAX_PORT) {
- res->port_resource[i].flags = IORESOURCE_IO; // Also clears _UNSET flag
+ res->port_resource[i].flags = IORESOURCE_IO; // Also clears _UNSET flag
if (io_decode == ACPI_DECODE_16)
res->port_resource[i].flags |= PNP_PORT_FLAG_16BITADDR;
- if (len <= 0 || (io + len -1) >= 0x10003) {
+ if (len <= 0 || (io + len - 1) >= 0x10003) {
res->port_resource[i].flags |= IORESOURCE_DISABLED;
return;
}
@@ -188,21 +189,22 @@ pnpacpi_parse_allocated_ioresource(struct pnp_resource_table *res,
}
}
-static void
-pnpacpi_parse_allocated_memresource(struct pnp_resource_table *res,
- u64 mem, u64 len, int write_protect)
+static void pnpacpi_parse_allocated_memresource(struct pnp_resource_table *res,
+ u64 mem, u64 len,
+ int write_protect)
{
int i = 0;
+
while (!(res->mem_resource[i].flags & IORESOURCE_UNSET) &&
- (i < PNP_MAX_MEM))
+ (i < PNP_MAX_MEM))
i++;
if (i < PNP_MAX_MEM) {
- res->mem_resource[i].flags = IORESOURCE_MEM; // Also clears _UNSET flag
+ res->mem_resource[i].flags = IORESOURCE_MEM; // Also clears _UNSET flag
if (len <= 0) {
res->mem_resource[i].flags |= IORESOURCE_DISABLED;
return;
}
- if(write_protect == ACPI_READ_WRITE_MEMORY)
+ if (write_protect == ACPI_READ_WRITE_MEMORY)
res->mem_resource[i].flags |= IORESOURCE_MEM_WRITEABLE;
res->mem_resource[i].start = mem;
@@ -210,9 +212,8 @@ pnpacpi_parse_allocated_memresource(struct pnp_resource_table *res,
}
}
-static void
-pnpacpi_parse_allocated_address_space(struct pnp_resource_table *res_table,
- struct acpi_resource *res)
+static void pnpacpi_parse_allocated_address_space(struct pnp_resource_table *res_table,
+ struct acpi_resource *res)
{
struct acpi_resource_address64 addr, *p = &addr;
acpi_status status;
@@ -220,7 +221,7 @@ pnpacpi_parse_allocated_address_space(struct pnp_resource_table *res_table,
status = acpi_resource_to_address64(res, p);
if (!ACPI_SUCCESS(status)) {
pnp_warn("PnPACPI: failed to convert resource type %d",
- res->type);
+ res->type);
return;
}
@@ -229,17 +230,20 @@ pnpacpi_parse_allocated_address_space(struct pnp_resource_table *res_table,
if (p->resource_type == ACPI_MEMORY_RANGE)
pnpacpi_parse_allocated_memresource(res_table,
- p->minimum, p->address_length, p->info.mem.write_protect);
+ p->minimum, p->address_length,
+ p->info.mem.write_protect);
else if (p->resource_type == ACPI_IO_RANGE)
pnpacpi_parse_allocated_ioresource(res_table,
- p->minimum, p->address_length,
- p->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16);
+ p->minimum, p->address_length,
+ p->granularity == 0xfff ? ACPI_DECODE_10 :
+ ACPI_DECODE_16);
}
static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
- void *data)
+ void *data)
{
- struct pnp_resource_table *res_table = (struct pnp_resource_table *)data;
+ struct pnp_resource_table *res_table =
+ (struct pnp_resource_table *)data;
int i;
switch (res->type) {
@@ -260,17 +264,17 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
case ACPI_RESOURCE_TYPE_DMA:
if (res->data.dma.channel_count > 0)
pnpacpi_parse_allocated_dmaresource(res_table,
- res->data.dma.channels[0],
- res->data.dma.type,
- res->data.dma.bus_master,
- res->data.dma.transfer);
+ res->data.dma.channels[0],
+ res->data.dma.type,
+ res->data.dma.bus_master,
+ res->data.dma.transfer);
break;
case ACPI_RESOURCE_TYPE_IO:
pnpacpi_parse_allocated_ioresource(res_table,
- res->data.io.minimum,
- res->data.io.address_length,
- res->data.io.io_decode);
+ res->data.io.minimum,
+ res->data.io.address_length,
+ res->data.io.io_decode);
break;
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
@@ -279,9 +283,9 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
case ACPI_RESOURCE_TYPE_FIXED_IO:
pnpacpi_parse_allocated_ioresource(res_table,
- res->data.fixed_io.address,
- res->data.fixed_io.address_length,
- ACPI_DECODE_10);
+ res->data.fixed_io.address,
+ res->data.fixed_io.address_length,
+ ACPI_DECODE_10);
break;
case ACPI_RESOURCE_TYPE_VENDOR:
@@ -292,21 +296,21 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
case ACPI_RESOURCE_TYPE_MEMORY24:
pnpacpi_parse_allocated_memresource(res_table,
- res->data.memory24.minimum,
- res->data.memory24.address_length,
- res->data.memory24.write_protect);
+ res->data.memory24.minimum,
+ res->data.memory24.address_length,
+ res->data.memory24.write_protect);
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
pnpacpi_parse_allocated_memresource(res_table,
- res->data.memory32.minimum,
- res->data.memory32.address_length,
- res->data.memory32.write_protect);
+ res->data.memory32.minimum,
+ res->data.memory32.address_length,
+ res->data.memory32.write_protect);
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
pnpacpi_parse_allocated_memresource(res_table,
- res->data.fixed_memory32.address,
- res->data.fixed_memory32.address_length,
- res->data.fixed_memory32.write_protect);
+ res->data.fixed_memory32.address,
+ res->data.fixed_memory32.address_length,
+ res->data.fixed_memory32.write_protect);
break;
case ACPI_RESOURCE_TYPE_ADDRESS16:
case ACPI_RESOURCE_TYPE_ADDRESS32:
@@ -343,18 +347,21 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
return AE_OK;
}
-acpi_status pnpacpi_parse_allocated_resource(acpi_handle handle, struct pnp_resource_table *res)
+acpi_status pnpacpi_parse_allocated_resource(acpi_handle handle,
+ struct pnp_resource_table * res)
{
/* Blank the resource table values */
pnp_init_resource_table(res);
- return acpi_walk_resources(handle, METHOD_NAME__CRS, pnpacpi_allocated_resource, res);
+ return acpi_walk_resources(handle, METHOD_NAME__CRS,
+ pnpacpi_allocated_resource, res);
}
-static void pnpacpi_parse_dma_option(struct pnp_option *option, struct acpi_resource_dma *p)
+static void pnpacpi_parse_dma_option(struct pnp_option *option,
+ struct acpi_resource_dma *p)
{
int i;
- struct pnp_dma * dma;
+ struct pnp_dma *dma;
if (p->channel_count == 0)
return;
@@ -362,18 +369,16 @@ static void pnpacpi_parse_dma_option(struct pnp_option *option, struct acpi_reso
if (!dma)
return;
- for(i = 0; i < p->channel_count; i++)
+ for (i = 0; i < p->channel_count; i++)
dma->map |= 1 << p->channels[i];
dma->flags = dma_flags(p->type, p->bus_master, p->transfer);
pnp_register_dma_resource(option, dma);
- return;
}
-
static void pnpacpi_parse_irq_option(struct pnp_option *option,
- struct acpi_resource_irq *p)
+ struct acpi_resource_irq *p)
{
int i;
struct pnp_irq *irq;
@@ -384,17 +389,16 @@ static void pnpacpi_parse_irq_option(struct pnp_option *option,
if (!irq)
return;
- for(i = 0; i < p->interrupt_count; i++)
+ for (i = 0; i < p->interrupt_count; i++)
if (p->interrupts[i])
__set_bit(p->interrupts[i], irq->map);
irq->flags = irq_flags(p->triggering, p->polarity);
pnp_register_irq_resource(option, irq);
- return;
}
static void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
- struct acpi_resource_extended_irq *p)
+ struct acpi_resource_extended_irq *p)
{
int i;
struct pnp_irq *irq;
@@ -405,18 +409,16 @@ static void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
if (!irq)
return;
- for(i = 0; i < p->interrupt_count; i++)
+ for (i = 0; i < p->interrupt_count; i++)
if (p->interrupts[i])
__set_bit(p->interrupts[i], irq->map);
irq->flags = irq_flags(p->triggering, p->polarity);
pnp_register_irq_resource(option, irq);
- return;
}
-static void
-pnpacpi_parse_port_option(struct pnp_option *option,
- struct acpi_resource_io *io)
+static void pnpacpi_parse_port_option(struct pnp_option *option,
+ struct acpi_resource_io *io)
{
struct pnp_port *port;
@@ -430,14 +432,12 @@ pnpacpi_parse_port_option(struct pnp_option *option,
port->align = io->alignment;
port->size = io->address_length;
port->flags = ACPI_DECODE_16 == io->io_decode ?
- PNP_PORT_FLAG_16BITADDR : 0;
+ PNP_PORT_FLAG_16BITADDR : 0;
pnp_register_port_resource(option, port);
- return;
}
-static void
-pnpacpi_parse_fixed_port_option(struct pnp_option *option,
- struct acpi_resource_fixed_io *io)
+static void pnpacpi_parse_fixed_port_option(struct pnp_option *option,
+ struct acpi_resource_fixed_io *io)
{
struct pnp_port *port;
@@ -451,12 +451,10 @@ pnpacpi_parse_fixed_port_option(struct pnp_option *option,
port->align = 0;
port->flags = PNP_PORT_FLAG_FIXED;
pnp_register_port_resource(option, port);
- return;
}
-static void
-pnpacpi_parse_mem24_option(struct pnp_option *option,
- struct acpi_resource_memory24 *p)
+static void pnpacpi_parse_mem24_option(struct pnp_option *option,
+ struct acpi_resource_memory24 *p)
{
struct pnp_mem *mem;
@@ -471,15 +469,13 @@ pnpacpi_parse_mem24_option(struct pnp_option *option,
mem->size = p->address_length;
mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
- IORESOURCE_MEM_WRITEABLE : 0;
+ IORESOURCE_MEM_WRITEABLE : 0;
pnp_register_mem_resource(option, mem);
- return;
}
-static void
-pnpacpi_parse_mem32_option(struct pnp_option *option,
- struct acpi_resource_memory32 *p)
+static void pnpacpi_parse_mem32_option(struct pnp_option *option,
+ struct acpi_resource_memory32 *p)
{
struct pnp_mem *mem;
@@ -494,15 +490,13 @@ pnpacpi_parse_mem32_option(struct pnp_option *option,
mem->size = p->address_length;
mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
- IORESOURCE_MEM_WRITEABLE : 0;
+ IORESOURCE_MEM_WRITEABLE : 0;
pnp_register_mem_resource(option, mem);
- return;
}
-static void
-pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
- struct acpi_resource_fixed_memory32 *p)
+static void pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
+ struct acpi_resource_fixed_memory32 *p)
{
struct pnp_mem *mem;
@@ -516,14 +510,13 @@ pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
mem->align = 0;
mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
- IORESOURCE_MEM_WRITEABLE : 0;
+ IORESOURCE_MEM_WRITEABLE : 0;
pnp_register_mem_resource(option, mem);
- return;
}
-static void
-pnpacpi_parse_address_option(struct pnp_option *option, struct acpi_resource *r)
+static void pnpacpi_parse_address_option(struct pnp_option *option,
+ struct acpi_resource *r)
{
struct acpi_resource_address64 addr, *p = &addr;
acpi_status status;
@@ -532,7 +525,8 @@ pnpacpi_parse_address_option(struct pnp_option *option, struct acpi_resource *r)
status = acpi_resource_to_address64(r, p);
if (!ACPI_SUCCESS(status)) {
- pnp_warn("PnPACPI: failed to convert resource type %d", r->type);
+ pnp_warn("PnPACPI: failed to convert resource type %d",
+ r->type);
return;
}
@@ -547,7 +541,8 @@ pnpacpi_parse_address_option(struct pnp_option *option, struct acpi_resource *r)
mem->size = p->address_length;
mem->align = 0;
mem->flags = (p->info.mem.write_protect ==
- ACPI_READ_WRITE_MEMORY) ? IORESOURCE_MEM_WRITEABLE : 0;
+ ACPI_READ_WRITE_MEMORY) ? IORESOURCE_MEM_WRITEABLE
+ : 0;
pnp_register_mem_resource(option, mem);
} else if (p->resource_type == ACPI_IO_RANGE) {
port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
@@ -568,109 +563,108 @@ struct acpipnp_parse_option_s {
};
static acpi_status pnpacpi_option_resource(struct acpi_resource *res,
- void *data)
+ void *data)
{
int priority = 0;
- struct acpipnp_parse_option_s *parse_data = (struct acpipnp_parse_option_s *)data;
+ struct acpipnp_parse_option_s *parse_data =
+ (struct acpipnp_parse_option_s *)data;
struct pnp_dev *dev = parse_data->dev;
struct pnp_option *option = parse_data->option;
switch (res->type) {
- case ACPI_RESOURCE_TYPE_IRQ:
- pnpacpi_parse_irq_option(option, &res->data.irq);
- break;
+ case ACPI_RESOURCE_TYPE_IRQ:
+ pnpacpi_parse_irq_option(option, &res->data.irq);
+ break;
- case ACPI_RESOURCE_TYPE_DMA:
- pnpacpi_parse_dma_option(option, &res->data.dma);
- break;
+ case ACPI_RESOURCE_TYPE_DMA:
+ pnpacpi_parse_dma_option(option, &res->data.dma);
+ break;
- case ACPI_RESOURCE_TYPE_START_DEPENDENT:
- switch (res->data.start_dpf.compatibility_priority) {
- case ACPI_GOOD_CONFIGURATION:
- priority = PNP_RES_PRIORITY_PREFERRED;
- break;
-
- case ACPI_ACCEPTABLE_CONFIGURATION:
- priority = PNP_RES_PRIORITY_ACCEPTABLE;
- break;
-
- case ACPI_SUB_OPTIMAL_CONFIGURATION:
- priority = PNP_RES_PRIORITY_FUNCTIONAL;
- break;
- default:
- priority = PNP_RES_PRIORITY_INVALID;
- break;
- }
- /* TBD: Considering performace/robustness bits */
- option = pnp_register_dependent_option(dev, priority);
- if (!option)
- return AE_ERROR;
- parse_data->option = option;
+ case ACPI_RESOURCE_TYPE_START_DEPENDENT:
+ switch (res->data.start_dpf.compatibility_priority) {
+ case ACPI_GOOD_CONFIGURATION:
+ priority = PNP_RES_PRIORITY_PREFERRED;
break;
- case ACPI_RESOURCE_TYPE_END_DEPENDENT:
- /*only one EndDependentFn is allowed*/
- if (!parse_data->option_independent) {
- pnp_warn("PnPACPI: more than one EndDependentFn");
- return AE_ERROR;
- }
- parse_data->option = parse_data->option_independent;
- parse_data->option_independent = NULL;
+ case ACPI_ACCEPTABLE_CONFIGURATION:
+ priority = PNP_RES_PRIORITY_ACCEPTABLE;
break;
- case ACPI_RESOURCE_TYPE_IO:
- pnpacpi_parse_port_option(option, &res->data.io);
+ case ACPI_SUB_OPTIMAL_CONFIGURATION:
+ priority = PNP_RES_PRIORITY_FUNCTIONAL;
break;
-
- case ACPI_RESOURCE_TYPE_FIXED_IO:
- pnpacpi_parse_fixed_port_option(option,
- &res->data.fixed_io);
+ default:
+ priority = PNP_RES_PRIORITY_INVALID;
break;
+ }
+ /* TBD: Consider performance/robustness bits */
+ option = pnp_register_dependent_option(dev, priority);
+ if (!option)
+ return AE_ERROR;
+ parse_data->option = option;
+ break;
- case ACPI_RESOURCE_TYPE_VENDOR:
- case ACPI_RESOURCE_TYPE_END_TAG:
- break;
+ case ACPI_RESOURCE_TYPE_END_DEPENDENT:
+ /*only one EndDependentFn is allowed */
+ if (!parse_data->option_independent) {
+ pnp_warn("PnPACPI: more than one EndDependentFn");
+ return AE_ERROR;
+ }
+ parse_data->option = parse_data->option_independent;
+ parse_data->option_independent = NULL;
+ break;
- case ACPI_RESOURCE_TYPE_MEMORY24:
- pnpacpi_parse_mem24_option(option, &res->data.memory24);
- break;
+ case ACPI_RESOURCE_TYPE_IO:
+ pnpacpi_parse_port_option(option, &res->data.io);
+ break;
- case ACPI_RESOURCE_TYPE_MEMORY32:
- pnpacpi_parse_mem32_option(option, &res->data.memory32);
- break;
+ case ACPI_RESOURCE_TYPE_FIXED_IO:
+ pnpacpi_parse_fixed_port_option(option, &res->data.fixed_io);
+ break;
- case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
- pnpacpi_parse_fixed_mem32_option(option,
- &res->data.fixed_memory32);
- break;
+ case ACPI_RESOURCE_TYPE_VENDOR:
+ case ACPI_RESOURCE_TYPE_END_TAG:
+ break;
- case ACPI_RESOURCE_TYPE_ADDRESS16:
- case ACPI_RESOURCE_TYPE_ADDRESS32:
- case ACPI_RESOURCE_TYPE_ADDRESS64:
- pnpacpi_parse_address_option(option, res);
- break;
+ case ACPI_RESOURCE_TYPE_MEMORY24:
+ pnpacpi_parse_mem24_option(option, &res->data.memory24);
+ break;
- case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
- break;
+ case ACPI_RESOURCE_TYPE_MEMORY32:
+ pnpacpi_parse_mem32_option(option, &res->data.memory32);
+ break;
- case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
- pnpacpi_parse_ext_irq_option(option,
- &res->data.extended_irq);
- break;
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ pnpacpi_parse_fixed_mem32_option(option,
+ &res->data.fixed_memory32);
+ break;
- case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
- break;
+ case ACPI_RESOURCE_TYPE_ADDRESS16:
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ pnpacpi_parse_address_option(option, res);
+ break;
- default:
- pnp_warn("PnPACPI: unknown resource type %d", res->type);
- return AE_ERROR;
+ case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
+ break;
+
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ pnpacpi_parse_ext_irq_option(option, &res->data.extended_irq);
+ break;
+
+ case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
+ break;
+
+ default:
+ pnp_warn("PnPACPI: unknown resource type %d", res->type);
+ return AE_ERROR;
}
return AE_OK;
}
acpi_status pnpacpi_parse_resource_option_data(acpi_handle handle,
- struct pnp_dev *dev)
+ struct pnp_dev * dev)
{
acpi_status status;
struct acpipnp_parse_option_s parse_data;
@@ -681,7 +675,7 @@ acpi_status pnpacpi_parse_resource_option_data(acpi_handle handle,
parse_data.option_independent = parse_data.option;
parse_data.dev = dev;
status = acpi_walk_resources(handle, METHOD_NAME__PRS,
- pnpacpi_option_resource, &parse_data);
+ pnpacpi_option_resource, &parse_data);
return status;
}
@@ -709,7 +703,7 @@ static int pnpacpi_supported_resource(struct acpi_resource *res)
* Set resource
*/
static acpi_status pnpacpi_count_resources(struct acpi_resource *res,
- void *data)
+ void *data)
{
int *res_cnt = (int *)data;
@@ -732,14 +726,14 @@ static acpi_status pnpacpi_type_resources(struct acpi_resource *res, void *data)
}
int pnpacpi_build_resource_template(acpi_handle handle,
- struct acpi_buffer *buffer)
+ struct acpi_buffer *buffer)
{
struct acpi_resource *resource;
int res_cnt = 0;
acpi_status status;
status = acpi_walk_resources(handle, METHOD_NAME__CRS,
- pnpacpi_count_resources, &res_cnt);
+ pnpacpi_count_resources, &res_cnt);
if (ACPI_FAILURE(status)) {
pnp_err("Evaluate _CRS failed");
return -EINVAL;
@@ -753,7 +747,7 @@ int pnpacpi_build_resource_template(acpi_handle handle,
pnp_dbg("Res cnt %d", res_cnt);
resource = (struct acpi_resource *)buffer->pointer;
status = acpi_walk_resources(handle, METHOD_NAME__CRS,
- pnpacpi_type_resources, &resource);
+ pnpacpi_type_resources, &resource);
if (ACPI_FAILURE(status)) {
kfree(buffer->pointer);
pnp_err("Evaluate _CRS failed");
@@ -766,7 +760,7 @@ int pnpacpi_build_resource_template(acpi_handle handle,
}
static void pnpacpi_encode_irq(struct acpi_resource *resource,
- struct resource *p)
+ struct resource *p)
{
int triggering, polarity;
@@ -782,7 +776,7 @@ static void pnpacpi_encode_irq(struct acpi_resource *resource,
}
static void pnpacpi_encode_ext_irq(struct acpi_resource *resource,
- struct resource *p)
+ struct resource *p)
{
int triggering, polarity;
@@ -799,32 +793,32 @@ static void pnpacpi_encode_ext_irq(struct acpi_resource *resource,
}
static void pnpacpi_encode_dma(struct acpi_resource *resource,
- struct resource *p)
+ struct resource *p)
{
/* Note: pnp_assign_dma will copy pnp_dma->flags into p->flags */
switch (p->flags & IORESOURCE_DMA_SPEED_MASK) {
- case IORESOURCE_DMA_TYPEA:
- resource->data.dma.type = ACPI_TYPE_A;
- break;
- case IORESOURCE_DMA_TYPEB:
- resource->data.dma.type = ACPI_TYPE_B;
- break;
- case IORESOURCE_DMA_TYPEF:
- resource->data.dma.type = ACPI_TYPE_F;
- break;
- default:
- resource->data.dma.type = ACPI_COMPATIBILITY;
+ case IORESOURCE_DMA_TYPEA:
+ resource->data.dma.type = ACPI_TYPE_A;
+ break;
+ case IORESOURCE_DMA_TYPEB:
+ resource->data.dma.type = ACPI_TYPE_B;
+ break;
+ case IORESOURCE_DMA_TYPEF:
+ resource->data.dma.type = ACPI_TYPE_F;
+ break;
+ default:
+ resource->data.dma.type = ACPI_COMPATIBILITY;
}
switch (p->flags & IORESOURCE_DMA_TYPE_MASK) {
- case IORESOURCE_DMA_8BIT:
- resource->data.dma.transfer = ACPI_TRANSFER_8;
- break;
- case IORESOURCE_DMA_8AND16BIT:
- resource->data.dma.transfer = ACPI_TRANSFER_8_16;
- break;
- default:
- resource->data.dma.transfer = ACPI_TRANSFER_16;
+ case IORESOURCE_DMA_8BIT:
+ resource->data.dma.transfer = ACPI_TRANSFER_8;
+ break;
+ case IORESOURCE_DMA_8AND16BIT:
+ resource->data.dma.transfer = ACPI_TRANSFER_8_16;
+ break;
+ default:
+ resource->data.dma.transfer = ACPI_TRANSFER_16;
}
resource->data.dma.bus_master = !!(p->flags & IORESOURCE_DMA_MASTER);
@@ -833,31 +827,31 @@ static void pnpacpi_encode_dma(struct acpi_resource *resource,
}
static void pnpacpi_encode_io(struct acpi_resource *resource,
- struct resource *p)
+ struct resource *p)
{
/* Note: pnp_assign_port will copy pnp_port->flags into p->flags */
- resource->data.io.io_decode = (p->flags & PNP_PORT_FLAG_16BITADDR)?
- ACPI_DECODE_16 : ACPI_DECODE_10;
+ resource->data.io.io_decode = (p->flags & PNP_PORT_FLAG_16BITADDR) ?
+ ACPI_DECODE_16 : ACPI_DECODE_10;
resource->data.io.minimum = p->start;
resource->data.io.maximum = p->end;
- resource->data.io.alignment = 0; /* Correct? */
+ resource->data.io.alignment = 0; /* Correct? */
resource->data.io.address_length = p->end - p->start + 1;
}
static void pnpacpi_encode_fixed_io(struct acpi_resource *resource,
- struct resource *p)
+ struct resource *p)
{
resource->data.fixed_io.address = p->start;
resource->data.fixed_io.address_length = p->end - p->start + 1;
}
static void pnpacpi_encode_mem24(struct acpi_resource *resource,
- struct resource *p)
+ struct resource *p)
{
/* Note: pnp_assign_mem will copy pnp_mem->flags into p->flags */
resource->data.memory24.write_protect =
- (p->flags & IORESOURCE_MEM_WRITEABLE) ?
- ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
+ (p->flags & IORESOURCE_MEM_WRITEABLE) ?
+ ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
resource->data.memory24.minimum = p->start;
resource->data.memory24.maximum = p->end;
resource->data.memory24.alignment = 0;
@@ -865,11 +859,11 @@ static void pnpacpi_encode_mem24(struct acpi_resource *resource,
}
static void pnpacpi_encode_mem32(struct acpi_resource *resource,
- struct resource *p)
+ struct resource *p)
{
resource->data.memory32.write_protect =
- (p->flags & IORESOURCE_MEM_WRITEABLE) ?
- ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
+ (p->flags & IORESOURCE_MEM_WRITEABLE) ?
+ ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
resource->data.memory32.minimum = p->start;
resource->data.memory32.maximum = p->end;
resource->data.memory32.alignment = 0;
@@ -877,74 +871,77 @@ static void pnpacpi_encode_mem32(struct acpi_resource *resource,
}
static void pnpacpi_encode_fixed_mem32(struct acpi_resource *resource,
- struct resource *p)
+ struct resource *p)
{
resource->data.fixed_memory32.write_protect =
- (p->flags & IORESOURCE_MEM_WRITEABLE) ?
- ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
+ (p->flags & IORESOURCE_MEM_WRITEABLE) ?
+ ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
resource->data.fixed_memory32.address = p->start;
resource->data.fixed_memory32.address_length = p->end - p->start + 1;
}
int pnpacpi_encode_resources(struct pnp_resource_table *res_table,
- struct acpi_buffer *buffer)
+ struct acpi_buffer *buffer)
{
int i = 0;
/* pnpacpi_build_resource_template allocates extra mem */
- int res_cnt = (buffer->length - 1)/sizeof(struct acpi_resource) - 1;
- struct acpi_resource *resource = (struct acpi_resource*)buffer->pointer;
+ int res_cnt = (buffer->length - 1) / sizeof(struct acpi_resource) - 1;
+ struct acpi_resource *resource =
+ (struct acpi_resource *)buffer->pointer;
int port = 0, irq = 0, dma = 0, mem = 0;
pnp_dbg("res cnt %d", res_cnt);
while (i < res_cnt) {
- switch(resource->type) {
+ switch (resource->type) {
case ACPI_RESOURCE_TYPE_IRQ:
pnp_dbg("Encode irq");
pnpacpi_encode_irq(resource,
- &res_table->irq_resource[irq]);
+ &res_table->irq_resource[irq]);
irq++;
break;
case ACPI_RESOURCE_TYPE_DMA:
pnp_dbg("Encode dma");
pnpacpi_encode_dma(resource,
- &res_table->dma_resource[dma]);
+ &res_table->dma_resource[dma]);
dma++;
break;
case ACPI_RESOURCE_TYPE_IO:
pnp_dbg("Encode io");
pnpacpi_encode_io(resource,
- &res_table->port_resource[port]);
+ &res_table->port_resource[port]);
port++;
break;
case ACPI_RESOURCE_TYPE_FIXED_IO:
pnp_dbg("Encode fixed io");
pnpacpi_encode_fixed_io(resource,
- &res_table->port_resource[port]);
+ &res_table->
+ port_resource[port]);
port++;
break;
case ACPI_RESOURCE_TYPE_MEMORY24:
pnp_dbg("Encode mem24");
pnpacpi_encode_mem24(resource,
- &res_table->mem_resource[mem]);
+ &res_table->mem_resource[mem]);
mem++;
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
pnp_dbg("Encode mem32");
pnpacpi_encode_mem32(resource,
- &res_table->mem_resource[mem]);
+ &res_table->mem_resource[mem]);
mem++;
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
pnp_dbg("Encode fixed mem32");
pnpacpi_encode_fixed_mem32(resource,
- &res_table->mem_resource[mem]);
+ &res_table->
+ mem_resource[mem]);
mem++;
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
pnp_dbg("Encode ext irq");
pnpacpi_encode_ext_irq(resource,
- &res_table->irq_resource[irq]);
+ &res_table->irq_resource[irq]);
irq++;
break;
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
@@ -956,7 +953,7 @@ int pnpacpi_encode_resources(struct pnp_resource_table *res_table,
case ACPI_RESOURCE_TYPE_ADDRESS64:
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
- default: /* other type */
+ default: /* other type */
pnp_warn("unknown resource type %d", resource->type);
return -EINVAL;
}
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index a1f0b0ba2bf..5dba68fe33f 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -1,6 +1,5 @@
/*
* bioscalls.c - the lowlevel layer of the PnPBIOS driver
- *
*/
#include <linux/types.h>
@@ -26,11 +25,10 @@
#include "pnpbios.h"
static struct {
- u16 offset;
- u16 segment;
+ u16 offset;
+ u16 segment;
} pnp_bios_callpoint;
-
/*
* These are some opcodes for a "static asmlinkage"
* As this code is *not* executed inside the linux kernel segment, but in a
@@ -44,8 +42,7 @@ static struct {
asmlinkage void pnp_bios_callfunc(void);
-__asm__(
- ".text \n"
+__asm__(".text \n"
__ALIGN_STR "\n"
"pnp_bios_callfunc:\n"
" pushl %edx \n"
@@ -55,8 +52,7 @@ __asm__(
" lcallw *pnp_bios_callpoint\n"
" addl $16, %esp \n"
" lret \n"
- ".previous \n"
-);
+ ".previous \n");
#define Q2_SET_SEL(cpu, selname, address, size) \
do { \
@@ -78,7 +74,6 @@ u32 pnp_bios_is_utter_crap = 0;
static spinlock_t pnp_bios_lock;
-
/*
* Support Functions
*/
@@ -97,7 +92,7 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
* PnP BIOSes are generally not terribly re-entrant.
* Also, don't rely on them to save everything correctly.
*/
- if(pnp_bios_is_utter_crap)
+ if (pnp_bios_is_utter_crap)
return PNP_FUNCTION_NOT_SUPPORTED;
cpu = get_cpu();
@@ -113,112 +108,128 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
if (ts2_size)
Q2_SET_SEL(smp_processor_id(), PNP_TS2, ts2_base, ts2_size);
- __asm__ __volatile__(
- "pushl %%ebp\n\t"
- "pushl %%edi\n\t"
- "pushl %%esi\n\t"
- "pushl %%ds\n\t"
- "pushl %%es\n\t"
- "pushl %%fs\n\t"
- "pushl %%gs\n\t"
- "pushfl\n\t"
- "movl %%esp, pnp_bios_fault_esp\n\t"
- "movl $1f, pnp_bios_fault_eip\n\t"
- "lcall %5,%6\n\t"
- "1:popfl\n\t"
- "popl %%gs\n\t"
- "popl %%fs\n\t"
- "popl %%es\n\t"
- "popl %%ds\n\t"
- "popl %%esi\n\t"
- "popl %%edi\n\t"
- "popl %%ebp\n\t"
- : "=a" (status)
- : "0" ((func) | (((u32)arg1) << 16)),
- "b" ((arg2) | (((u32)arg3) << 16)),
- "c" ((arg4) | (((u32)arg5) << 16)),
- "d" ((arg6) | (((u32)arg7) << 16)),
- "i" (PNP_CS32),
- "i" (0)
- : "memory"
- );
+ __asm__ __volatile__("pushl %%ebp\n\t"
+ "pushl %%edi\n\t"
+ "pushl %%esi\n\t"
+ "pushl %%ds\n\t"
+ "pushl %%es\n\t"
+ "pushl %%fs\n\t"
+ "pushl %%gs\n\t"
+ "pushfl\n\t"
+ "movl %%esp, pnp_bios_fault_esp\n\t"
+ "movl $1f, pnp_bios_fault_eip\n\t"
+ "lcall %5,%6\n\t"
+ "1:popfl\n\t"
+ "popl %%gs\n\t"
+ "popl %%fs\n\t"
+ "popl %%es\n\t"
+ "popl %%ds\n\t"
+ "popl %%esi\n\t"
+ "popl %%edi\n\t"
+ "popl %%ebp\n\t":"=a"(status)
+ :"0"((func) | (((u32) arg1) << 16)),
+ "b"((arg2) | (((u32) arg3) << 16)),
+ "c"((arg4) | (((u32) arg5) << 16)),
+ "d"((arg6) | (((u32) arg7) << 16)),
+ "i"(PNP_CS32), "i"(0)
+ :"memory");
spin_unlock_irqrestore(&pnp_bios_lock, flags);
get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
put_cpu();
/* If we get here and this is set then the PnP BIOS faulted on us. */
- if(pnp_bios_is_utter_crap)
- {
- printk(KERN_ERR "PnPBIOS: Warning! Your PnP BIOS caused a fatal error. Attempting to continue\n");
- printk(KERN_ERR "PnPBIOS: You may need to reboot with the \"pnpbios=off\" option to operate stably\n");
- printk(KERN_ERR "PnPBIOS: Check with your vendor for an updated BIOS\n");
+ if (pnp_bios_is_utter_crap) {
+ printk(KERN_ERR
+ "PnPBIOS: Warning! Your PnP BIOS caused a fatal error. Attempting to continue\n");
+ printk(KERN_ERR
+ "PnPBIOS: You may need to reboot with the \"pnpbios=off\" option to operate stably\n");
+ printk(KERN_ERR
+ "PnPBIOS: Check with your vendor for an updated BIOS\n");
}
return status;
}
-void pnpbios_print_status(const char * module, u16 status)
+void pnpbios_print_status(const char *module, u16 status)
{
- switch(status) {
+ switch (status) {
case PNP_SUCCESS:
printk(KERN_ERR "PnPBIOS: %s: function successful\n", module);
break;
case PNP_NOT_SET_STATICALLY:
- printk(KERN_ERR "PnPBIOS: %s: unable to set static resources\n", module);
+ printk(KERN_ERR "PnPBIOS: %s: unable to set static resources\n",
+ module);
break;
case PNP_UNKNOWN_FUNCTION:
- printk(KERN_ERR "PnPBIOS: %s: invalid function number passed\n", module);
+ printk(KERN_ERR "PnPBIOS: %s: invalid function number passed\n",
+ module);
break;
case PNP_FUNCTION_NOT_SUPPORTED:
- printk(KERN_ERR "PnPBIOS: %s: function not supported on this system\n", module);
+ printk(KERN_ERR
+ "PnPBIOS: %s: function not supported on this system\n",
+ module);
break;
case PNP_INVALID_HANDLE:
printk(KERN_ERR "PnPBIOS: %s: invalid handle\n", module);
break;
case PNP_BAD_PARAMETER:
- printk(KERN_ERR "PnPBIOS: %s: invalid parameters were passed\n", module);
+ printk(KERN_ERR "PnPBIOS: %s: invalid parameters were passed\n",
+ module);
break;
case PNP_SET_FAILED:
- printk(KERN_ERR "PnPBIOS: %s: unable to set resources\n", module);
+ printk(KERN_ERR "PnPBIOS: %s: unable to set resources\n",
+ module);
break;
case PNP_EVENTS_NOT_PENDING:
printk(KERN_ERR "PnPBIOS: %s: no events are pending\n", module);
break;
case PNP_SYSTEM_NOT_DOCKED:
- printk(KERN_ERR "PnPBIOS: %s: the system is not docked\n", module);
+ printk(KERN_ERR "PnPBIOS: %s: the system is not docked\n",
+ module);
break;
case PNP_NO_ISA_PNP_CARDS:
- printk(KERN_ERR "PnPBIOS: %s: no isapnp cards are installed on this system\n", module);
+ printk(KERN_ERR
+ "PnPBIOS: %s: no isapnp cards are installed on this system\n",
+ module);
break;
case PNP_UNABLE_TO_DETERMINE_DOCK_CAPABILITIES:
- printk(KERN_ERR "PnPBIOS: %s: cannot determine the capabilities of the docking station\n", module);
+ printk(KERN_ERR
+ "PnPBIOS: %s: cannot determine the capabilities of the docking station\n",
+ module);
break;
case PNP_CONFIG_CHANGE_FAILED_NO_BATTERY:
- printk(KERN_ERR "PnPBIOS: %s: unable to undock, the system does not have a battery\n", module);
+ printk(KERN_ERR
+ "PnPBIOS: %s: unable to undock, the system does not have a battery\n",
+ module);
break;
case PNP_CONFIG_CHANGE_FAILED_RESOURCE_CONFLICT:
- printk(KERN_ERR "PnPBIOS: %s: could not dock due to resource conflicts\n", module);
+ printk(KERN_ERR
+ "PnPBIOS: %s: could not dock due to resource conflicts\n",
+ module);
break;
case PNP_BUFFER_TOO_SMALL:
- printk(KERN_ERR "PnPBIOS: %s: the buffer passed is too small\n", module);
+ printk(KERN_ERR "PnPBIOS: %s: the buffer passed is too small\n",
+ module);
break;
case PNP_USE_ESCD_SUPPORT:
printk(KERN_ERR "PnPBIOS: %s: use ESCD instead\n", module);
break;
case PNP_MESSAGE_NOT_SUPPORTED:
- printk(KERN_ERR "PnPBIOS: %s: the message is unsupported\n", module);
+ printk(KERN_ERR "PnPBIOS: %s: the message is unsupported\n",
+ module);
break;
case PNP_HARDWARE_ERROR:
- printk(KERN_ERR "PnPBIOS: %s: a hardware failure has occured\n", module);
+ printk(KERN_ERR "PnPBIOS: %s: a hardware failure has occured\n",
+ module);
break;
default:
- printk(KERN_ERR "PnPBIOS: %s: unexpected status 0x%x\n", module, status);
+ printk(KERN_ERR "PnPBIOS: %s: unexpected status 0x%x\n", module,
+ status);
break;
}
}
-
/*
* PnP BIOS Low Level Calls
*/
@@ -243,19 +254,22 @@ void pnpbios_print_status(const char * module, u16 status)
static int __pnp_bios_dev_node_info(struct pnp_dev_node_info *data)
{
u16 status;
+
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
- status = call_pnp_bios(PNP_GET_NUM_SYS_DEV_NODES, 0, PNP_TS1, 2, PNP_TS1, PNP_DS, 0, 0,
- data, sizeof(struct pnp_dev_node_info), NULL, 0);
+ status = call_pnp_bios(PNP_GET_NUM_SYS_DEV_NODES, 0, PNP_TS1, 2,
+ PNP_TS1, PNP_DS, 0, 0, data,
+ sizeof(struct pnp_dev_node_info), NULL, 0);
data->no_nodes &= 0xff;
return status;
}
int pnp_bios_dev_node_info(struct pnp_dev_node_info *data)
{
- int status = __pnp_bios_dev_node_info( data );
- if ( status )
- pnpbios_print_status( "dev_node_info", status );
+ int status = __pnp_bios_dev_node_info(data);
+
+ if (status)
+ pnpbios_print_status("dev_node_info", status);
return status;
}
@@ -273,17 +287,20 @@ int pnp_bios_dev_node_info(struct pnp_dev_node_info *data)
* or volatile current (0) config
* Output: *nodenum=next node or 0xff if no more nodes
*/
-static int __pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data)
+static int __pnp_bios_get_dev_node(u8 *nodenum, char boot,
+ struct pnp_bios_node *data)
{
u16 status;
u16 tmp_nodenum;
+
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
- if ( !boot && pnpbios_dont_use_current_config )
+ if (!boot && pnpbios_dont_use_current_config)
return PNP_FUNCTION_NOT_SUPPORTED;
tmp_nodenum = *nodenum;
- status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2, boot ? 2 : 1, PNP_DS, 0,
- &tmp_nodenum, sizeof(tmp_nodenum), data, 65536);
+ status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2,
+ boot ? 2 : 1, PNP_DS, 0, &tmp_nodenum,
+ sizeof(tmp_nodenum), data, 65536);
*nodenum = tmp_nodenum;
return status;
}
@@ -291,104 +308,66 @@ static int __pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node
int pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data)
{
int status;
- status = __pnp_bios_get_dev_node( nodenum, boot, data );
- if ( status )
- pnpbios_print_status( "get_dev_node", status );
+
+ status = __pnp_bios_get_dev_node(nodenum, boot, data);
+ if (status)
+ pnpbios_print_status("get_dev_node", status);
return status;
}
-
/*
* Call PnP BIOS with function 0x02, "set system device node"
* Input: *nodenum = desired node,
* boot = whether to set nonvolatile boot (!=0)
* or volatile current (0) config
*/
-static int __pnp_bios_set_dev_node(u8 nodenum, char boot, struct pnp_bios_node *data)
+static int __pnp_bios_set_dev_node(u8 nodenum, char boot,
+ struct pnp_bios_node *data)
{
u16 status;
+
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
- if ( !boot && pnpbios_dont_use_current_config )
+ if (!boot && pnpbios_dont_use_current_config)
return PNP_FUNCTION_NOT_SUPPORTED;
- status = call_pnp_bios(PNP_SET_SYS_DEV_NODE, nodenum, 0, PNP_TS1, boot ? 2 : 1, PNP_DS, 0, 0,
- data, 65536, NULL, 0);
+ status = call_pnp_bios(PNP_SET_SYS_DEV_NODE, nodenum, 0, PNP_TS1,
+ boot ? 2 : 1, PNP_DS, 0, 0, data, 65536, NULL,
+ 0);
return status;
}
int pnp_bios_set_dev_node(u8 nodenum, char boot, struct pnp_bios_node *data)
{
int status;
- status = __pnp_bios_set_dev_node( nodenum, boot, data );
- if ( status ) {
- pnpbios_print_status( "set_dev_node", status );
+
+ status = __pnp_bios_set_dev_node(nodenum, boot, data);
+ if (status) {
+ pnpbios_print_status("set_dev_node", status);
return status;
}
- if ( !boot ) { /* Update devlist */
- status = pnp_bios_get_dev_node( &nodenum, boot, data );
- if ( status )
+ if (!boot) { /* Update devlist */
+ status = pnp_bios_get_dev_node(&nodenum, boot, data);
+ if (status)
return status;
}
return status;
}
-#if needed
-/*
- * Call PnP BIOS with function 0x03, "get event"
- */
-static int pnp_bios_get_event(u16 *event)
-{
- u16 status;
- if (!pnp_bios_present())
- return PNP_FUNCTION_NOT_SUPPORTED;
- status = call_pnp_bios(PNP_GET_EVENT, 0, PNP_TS1, PNP_DS, 0, 0 ,0 ,0,
- event, sizeof(u16), NULL, 0);
- return status;
-}
-#endif
-
-#if needed
-/*
- * Call PnP BIOS with function 0x04, "send message"
- */
-static int pnp_bios_send_message(u16 message)
-{
- u16 status;
- if (!pnp_bios_present())
- return PNP_FUNCTION_NOT_SUPPORTED;
- status = call_pnp_bios(PNP_SEND_MESSAGE, message, PNP_DS, 0, 0, 0, 0, 0, 0, 0, 0, 0);
- return status;
-}
-#endif
-
/*
* Call PnP BIOS with function 0x05, "get docking station information"
*/
int pnp_bios_dock_station_info(struct pnp_docking_station_info *data)
{
u16 status;
- if (!pnp_bios_present())
- return PNP_FUNCTION_NOT_SUPPORTED;
- status = call_pnp_bios(PNP_GET_DOCKING_STATION_INFORMATION, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0,
- data, sizeof(struct pnp_docking_station_info), NULL, 0);
- return status;
-}
-#if needed
-/*
- * Call PnP BIOS with function 0x09, "set statically allocated resource
- * information"
- */
-static int pnp_bios_set_stat_res(char *info)
-{
- u16 status;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
- status = call_pnp_bios(PNP_SET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0,
- info, *((u16 *) info), 0, 0);
+ status = call_pnp_bios(PNP_GET_DOCKING_STATION_INFORMATION, 0, PNP_TS1,
+ PNP_DS, 0, 0, 0, 0, data,
+ sizeof(struct pnp_docking_station_info), NULL,
+ 0);
return status;
}
-#endif
/*
* Call PnP BIOS with function 0x0a, "get statically allocated resource
@@ -397,36 +376,23 @@ static int pnp_bios_set_stat_res(char *info)
static int __pnp_bios_get_stat_res(char *info)
{
u16 status;
+
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
- status = call_pnp_bios(PNP_GET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0,
- info, 65536, NULL, 0);
+ status = call_pnp_bios(PNP_GET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1,
+ PNP_DS, 0, 0, 0, 0, info, 65536, NULL, 0);
return status;
}
int pnp_bios_get_stat_res(char *info)
{
int status;
- status = __pnp_bios_get_stat_res( info );
- if ( status )
- pnpbios_print_status( "get_stat_res", status );
- return status;
-}
-#if needed
-/*
- * Call PnP BIOS with function 0x0b, "get APM id table"
- */
-static int pnp_bios_apm_id_table(char *table, u16 *size)
-{
- u16 status;
- if (!pnp_bios_present())
- return PNP_FUNCTION_NOT_SUPPORTED;
- status = call_pnp_bios(PNP_GET_APM_ID_TABLE, 0, PNP_TS2, 0, PNP_TS1, PNP_DS, 0, 0,
- table, *size, size, sizeof(u16));
+ status = __pnp_bios_get_stat_res(info);
+ if (status)
+ pnpbios_print_status("get_stat_res", status);
return status;
}
-#endif
/*
* Call PnP BIOS with function 0x40, "get isa pnp configuration structure"
@@ -434,19 +400,22 @@ static int pnp_bios_apm_id_table(char *table, u16 *size)
static int __pnp_bios_isapnp_config(struct pnp_isa_config_struc *data)
{
u16 status;
+
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
- status = call_pnp_bios(PNP_GET_PNP_ISA_CONFIG_STRUC, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0,
- data, sizeof(struct pnp_isa_config_struc), NULL, 0);
+ status = call_pnp_bios(PNP_GET_PNP_ISA_CONFIG_STRUC, 0, PNP_TS1, PNP_DS,
+ 0, 0, 0, 0, data,
+ sizeof(struct pnp_isa_config_struc), NULL, 0);
return status;
}
int pnp_bios_isapnp_config(struct pnp_isa_config_struc *data)
{
int status;
- status = __pnp_bios_isapnp_config( data );
- if ( status )
- pnpbios_print_status( "isapnp_config", status );
+
+ status = __pnp_bios_isapnp_config(data);
+ if (status)
+ pnpbios_print_status("isapnp_config", status);
return status;
}
@@ -456,19 +425,22 @@ int pnp_bios_isapnp_config(struct pnp_isa_config_struc *data)
static int __pnp_bios_escd_info(struct escd_info_struc *data)
{
u16 status;
+
if (!pnp_bios_present())
return ESCD_FUNCTION_NOT_SUPPORTED;
- status = call_pnp_bios(PNP_GET_ESCD_INFO, 0, PNP_TS1, 2, PNP_TS1, 4, PNP_TS1, PNP_DS,
- data, sizeof(struct escd_info_struc), NULL, 0);
+ status = call_pnp_bios(PNP_GET_ESCD_INFO, 0, PNP_TS1, 2, PNP_TS1, 4,
+ PNP_TS1, PNP_DS, data,
+ sizeof(struct escd_info_struc), NULL, 0);
return status;
}
int pnp_bios_escd_info(struct escd_info_struc *data)
{
int status;
- status = __pnp_bios_escd_info( data );
- if ( status )
- pnpbios_print_status( "escd_info", status );
+
+ status = __pnp_bios_escd_info(data);
+ if (status)
+ pnpbios_print_status("escd_info", status);
return status;
}
@@ -479,57 +451,42 @@ int pnp_bios_escd_info(struct escd_info_struc *data)
static int __pnp_bios_read_escd(char *data, u32 nvram_base)
{
u16 status;
+
if (!pnp_bios_present())
return ESCD_FUNCTION_NOT_SUPPORTED;
- status = call_pnp_bios(PNP_READ_ESCD, 0, PNP_TS1, PNP_TS2, PNP_DS, 0, 0, 0,
- data, 65536, __va(nvram_base), 65536);
+ status = call_pnp_bios(PNP_READ_ESCD, 0, PNP_TS1, PNP_TS2, PNP_DS, 0, 0,
+ 0, data, 65536, __va(nvram_base), 65536);
return status;
}
int pnp_bios_read_escd(char *data, u32 nvram_base)
{
int status;
- status = __pnp_bios_read_escd( data, nvram_base );
- if ( status )
- pnpbios_print_status( "read_escd", status );
- return status;
-}
-#if needed
-/*
- * Call PnP BIOS function 0x43, "write ESCD"
- */
-static int pnp_bios_write_escd(char *data, u32 nvram_base)
-{
- u16 status;
- if (!pnp_bios_present())
- return ESCD_FUNCTION_NOT_SUPPORTED;
- status = call_pnp_bios(PNP_WRITE_ESCD, 0, PNP_TS1, PNP_TS2, PNP_DS, 0, 0, 0,
- data, 65536, __va(nvram_base), 65536);
+ status = __pnp_bios_read_escd(data, nvram_base);
+ if (status)
+ pnpbios_print_status("read_escd", status);
return status;
}
-#endif
-
-
-/*
- * Initialization
- */
void pnpbios_calls_init(union pnp_bios_install_struct *header)
{
int i;
+
spin_lock_init(&pnp_bios_lock);
pnp_bios_callpoint.offset = header->fields.pm16offset;
pnp_bios_callpoint.segment = PNP_CS16;
set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
_set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
- for (i = 0; i < NR_CPUS; i++) {
- struct desc_struct *gdt = get_cpu_gdt_table(i);
- if (!gdt)
- continue;
- set_base(gdt[GDT_ENTRY_PNPBIOS_CS32], &pnp_bios_callfunc);
- set_base(gdt[GDT_ENTRY_PNPBIOS_CS16], __va(header->fields.pm16cseg));
- set_base(gdt[GDT_ENTRY_PNPBIOS_DS], __va(header->fields.pm16dseg));
- }
+ for (i = 0; i < NR_CPUS; i++) {
+ struct desc_struct *gdt = get_cpu_gdt_table(i);
+ if (!gdt)
+ continue;
+ set_base(gdt[GDT_ENTRY_PNPBIOS_CS32], &pnp_bios_callfunc);
+ set_base(gdt[GDT_ENTRY_PNPBIOS_CS16],
+ __va(header->fields.pm16cseg));
+ set_base(gdt[GDT_ENTRY_PNPBIOS_DS],
+ __va(header->fields.pm16dseg));
+ }
}
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index ed112ee1601..3692a099b45 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -32,7 +32,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
+
/* Change Log
*
* Adam Belay - <ambx1@neo.rr.com> - March 16, 2003
@@ -71,14 +71,13 @@
#include "pnpbios.h"
-
/*
*
* PnP BIOS INTERFACE
*
*/
-static union pnp_bios_install_struct * pnp_bios_install = NULL;
+static union pnp_bios_install_struct *pnp_bios_install = NULL;
int pnp_bios_present(void)
{
@@ -101,36 +100,35 @@ static struct completion unload_sem;
/*
* (Much of this belongs in a shared routine somewhere)
*/
-
static int pnp_dock_event(int dock, struct pnp_docking_station_info *info)
{
- char *argv [3], **envp, *buf, *scratch;
+ char *argv[3], **envp, *buf, *scratch;
int i = 0, value;
- if (!current->fs->root) {
+ if (!current->fs->root)
return -EAGAIN;
- }
- if (!(envp = kcalloc(20, sizeof (char *), GFP_KERNEL))) {
+ if (!(envp = kcalloc(20, sizeof(char *), GFP_KERNEL)))
return -ENOMEM;
- }
if (!(buf = kzalloc(256, GFP_KERNEL))) {
- kfree (envp);
+ kfree(envp);
return -ENOMEM;
}
- /* FIXME: if there are actual users of this, it should be integrated into
- * the driver core and use the usual infrastructure like sysfs and uevents */
- argv [0] = "/sbin/pnpbios";
- argv [1] = "dock";
- argv [2] = NULL;
+ /* FIXME: if there are actual users of this, it should be
+ * integrated into the driver core and use the usual infrastructure
+ * like sysfs and uevents
+ */
+ argv[0] = "/sbin/pnpbios";
+ argv[1] = "dock";
+ argv[2] = NULL;
/* minimal command environment */
- envp [i++] = "HOME=/";
- envp [i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+ envp[i++] = "HOME=/";
+ envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
#ifdef DEBUG
/* hint that policy agent should enter no-stdout debug mode */
- envp [i++] = "DEBUG=kernel";
+ envp[i++] = "DEBUG=kernel";
#endif
/* extensible set of named bus-specific parameters,
* supporting multiple driver selection algorithms.
@@ -138,33 +136,33 @@ static int pnp_dock_event(int dock, struct pnp_docking_station_info *info)
scratch = buf;
/* action: add, remove */
- envp [i++] = scratch;
- scratch += sprintf (scratch, "ACTION=%s", dock?"add":"remove") + 1;
+ envp[i++] = scratch;
+ scratch += sprintf(scratch, "ACTION=%s", dock ? "add" : "remove") + 1;
/* Report the ident for the dock */
- envp [i++] = scratch;
- scratch += sprintf (scratch, "DOCK=%x/%x/%x",
- info->location_id, info->serial, info->capabilities);
+ envp[i++] = scratch;
+ scratch += sprintf(scratch, "DOCK=%x/%x/%x",
+ info->location_id, info->serial, info->capabilities);
envp[i] = NULL;
-
- value = call_usermodehelper (argv [0], argv, envp, UMH_WAIT_EXEC);
- kfree (buf);
- kfree (envp);
+
+ value = call_usermodehelper(argv [0], argv, envp, UMH_WAIT_EXEC);
+ kfree(buf);
+ kfree(envp);
return 0;
}
/*
* Poll the PnP docking at regular intervals
*/
-static int pnp_dock_thread(void * unused)
+static int pnp_dock_thread(void *unused)
{
static struct pnp_docking_station_info now;
int docked = -1, d = 0;
+
set_freezable();
- while (!unloading)
- {
+ while (!unloading) {
int status;
-
+
/*
* Poll every 2 seconds
*/
@@ -175,30 +173,29 @@ static int pnp_dock_thread(void * unused)
status = pnp_bios_dock_station_info(&now);
- switch(status)
- {
+ switch (status) {
/*
* No dock to manage
*/
- case PNP_FUNCTION_NOT_SUPPORTED:
- complete_and_exit(&unload_sem, 0);
- case PNP_SYSTEM_NOT_DOCKED:
- d = 0;
- break;
- case PNP_SUCCESS:
- d = 1;
- break;
- default:
- pnpbios_print_status( "pnp_dock_thread", status );
- continue;
+ case PNP_FUNCTION_NOT_SUPPORTED:
+ complete_and_exit(&unload_sem, 0);
+ case PNP_SYSTEM_NOT_DOCKED:
+ d = 0;
+ break;
+ case PNP_SUCCESS:
+ d = 1;
+ break;
+ default:
+ pnpbios_print_status("pnp_dock_thread", status);
+ continue;
}
- if(d != docked)
- {
- if(pnp_dock_event(d, &now)==0)
- {
+ if (d != docked) {
+ if (pnp_dock_event(d, &now) == 0) {
docked = d;
#if 0
- printk(KERN_INFO "PnPBIOS: Docking station %stached\n", docked?"at":"de");
+ printk(KERN_INFO
+ "PnPBIOS: Docking station %stached\n",
+ docked ? "at" : "de");
#endif
}
}
@@ -206,21 +203,21 @@ static int pnp_dock_thread(void * unused)
complete_and_exit(&unload_sem, 0);
}
-#endif /* CONFIG_HOTPLUG */
+#endif /* CONFIG_HOTPLUG */
-static int pnpbios_get_resources(struct pnp_dev * dev, struct pnp_resource_table * res)
+static int pnpbios_get_resources(struct pnp_dev *dev,
+ struct pnp_resource_table *res)
{
u8 nodenum = dev->number;
- struct pnp_bios_node * node;
+ struct pnp_bios_node *node;
- /* just in case */
- if(!pnpbios_is_dynamic(dev))
+ if (!pnpbios_is_dynamic(dev))
return -EPERM;
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
if (!node)
return -1;
- if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) {
+ if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) {
kfree(node);
return -ENODEV;
}
@@ -230,24 +227,24 @@ static int pnpbios_get_resources(struct pnp_dev * dev, struct pnp_resource_table
return 0;
}
-static int pnpbios_set_resources(struct pnp_dev * dev, struct pnp_resource_table * res)
+static int pnpbios_set_resources(struct pnp_dev *dev,
+ struct pnp_resource_table *res)
{
u8 nodenum = dev->number;
- struct pnp_bios_node * node;
+ struct pnp_bios_node *node;
int ret;
- /* just in case */
if (!pnpbios_is_dynamic(dev))
return -EPERM;
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
if (!node)
return -1;
- if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) {
+ if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) {
kfree(node);
return -ENODEV;
}
- if(pnpbios_write_resources_to_node(res, node)<0) {
+ if (pnpbios_write_resources_to_node(res, node) < 0) {
kfree(node);
return -1;
}
@@ -258,18 +255,19 @@ static int pnpbios_set_resources(struct pnp_dev * dev, struct pnp_resource_table
return ret;
}
-static void pnpbios_zero_data_stream(struct pnp_bios_node * node)
+static void pnpbios_zero_data_stream(struct pnp_bios_node *node)
{
- unsigned char * p = (char *)node->data;
- unsigned char * end = (char *)(node->data + node->size);
+ unsigned char *p = (char *)node->data;
+ unsigned char *end = (char *)(node->data + node->size);
unsigned int len;
int i;
+
while ((char *)p < (char *)end) {
- if(p[0] & 0x80) { /* large tag */
+ if (p[0] & 0x80) { /* large tag */
len = (p[2] << 8) | p[1];
p += 3;
} else {
- if (((p[0]>>3) & 0x0f) == 0x0f)
+ if (((p[0] >> 3) & 0x0f) == 0x0f)
return;
len = p[0] & 0x07;
p += 1;
@@ -278,24 +276,24 @@ static void pnpbios_zero_data_stream(struct pnp_bios_node * node)
p[i] = 0;
p += len;
}
- printk(KERN_ERR "PnPBIOS: Resource structure did not contain an end tag.\n");
+ printk(KERN_ERR
+ "PnPBIOS: Resource structure did not contain an end tag.\n");
}
static int pnpbios_disable_resources(struct pnp_dev *dev)
{
- struct pnp_bios_node * node;
+ struct pnp_bios_node *node;
u8 nodenum = dev->number;
int ret;
- /* just in case */
- if(dev->flags & PNPBIOS_NO_DISABLE || !pnpbios_is_dynamic(dev))
+ if (dev->flags & PNPBIOS_NO_DISABLE || !pnpbios_is_dynamic(dev))
return -EPERM;
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
if (!node)
return -ENOMEM;
- if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) {
+ if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) {
kfree(node);
return -ENODEV;
}
@@ -311,22 +309,22 @@ static int pnpbios_disable_resources(struct pnp_dev *dev)
/* PnP Layer support */
struct pnp_protocol pnpbios_protocol = {
- .name = "Plug and Play BIOS",
- .get = pnpbios_get_resources,
- .set = pnpbios_set_resources,
+ .name = "Plug and Play BIOS",
+ .get = pnpbios_get_resources,
+ .set = pnpbios_set_resources,
.disable = pnpbios_disable_resources,
};
-static int insert_device(struct pnp_dev *dev, struct pnp_bios_node * node)
+static int insert_device(struct pnp_dev *dev, struct pnp_bios_node *node)
{
- struct list_head * pos;
- struct pnp_dev * pnp_dev;
+ struct list_head *pos;
+ struct pnp_dev *pnp_dev;
struct pnp_id *dev_id;
char id[8];
/* check if the device is already added */
dev->number = node->handle;
- list_for_each (pos, &pnpbios_protocol.devices){
+ list_for_each(pos, &pnpbios_protocol.devices) {
pnp_dev = list_entry(pos, struct pnp_dev, protocol_list);
if (dev->number == pnp_dev->number)
return -1;
@@ -336,8 +334,8 @@ static int insert_device(struct pnp_dev *dev, struct pnp_bios_node * node)
dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
if (!dev_id)
return -1;
- pnpid32_to_pnpid(node->eisa_id,id);
- memcpy(dev_id->id,id,7);
+ pnpid32_to_pnpid(node->eisa_id, id);
+ memcpy(dev_id->id, id, 7);
pnp_add_id(dev_id, dev);
pnpbios_parse_data_stream(dev, node);
dev->active = pnp_is_active(dev);
@@ -375,35 +373,41 @@ static void __init build_devlist(void)
if (!node)
return;
- for(nodenum=0; nodenum<0xff; ) {
+ for (nodenum = 0; nodenum < 0xff;) {
u8 thisnodenum = nodenum;
/* eventually we will want to use PNPMODE_STATIC here but for now
* dynamic will help us catch buggy bioses to add to the blacklist.
*/
if (!pnpbios_dont_use_current_config) {
- if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node))
+ if (pnp_bios_get_dev_node
+ (&nodenum, (char)PNPMODE_DYNAMIC, node))
break;
} else {
- if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_STATIC, node))
+ if (pnp_bios_get_dev_node
+ (&nodenum, (char)PNPMODE_STATIC, node))
break;
}
nodes_got++;
- dev = kzalloc(sizeof (struct pnp_dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL);
if (!dev)
break;
- if(insert_device(dev,node)<0)
+ if (insert_device(dev, node) < 0)
kfree(dev);
else
devs++;
if (nodenum <= thisnodenum) {
- printk(KERN_ERR "PnPBIOS: build_devlist: Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", (unsigned int)nodenum, (unsigned int)thisnodenum);
+ printk(KERN_ERR
+ "PnPBIOS: build_devlist: Node number 0x%x is out of sequence following node 0x%x. Aborting.\n",
+ (unsigned int)nodenum,
+ (unsigned int)thisnodenum);
break;
}
}
kfree(node);
- printk(KERN_INFO "PnPBIOS: %i node%s reported by PnP BIOS; %i recorded by driver\n",
- nodes_got, nodes_got != 1 ? "s" : "", devs);
+ printk(KERN_INFO
+ "PnPBIOS: %i node%s reported by PnP BIOS; %i recorded by driver\n",
+ nodes_got, nodes_got != 1 ? "s" : "", devs);
}
/*
@@ -412,8 +416,8 @@ static void __init build_devlist(void)
*
*/
-static int pnpbios_disabled; /* = 0 */
-int pnpbios_dont_use_current_config; /* = 0 */
+static int pnpbios_disabled;
+int pnpbios_dont_use_current_config;
#ifndef MODULE
static int __init pnpbios_setup(char *str)
@@ -422,9 +426,9 @@ static int __init pnpbios_setup(char *str)
while ((str != NULL) && (*str != '\0')) {
if (strncmp(str, "off", 3) == 0)
- pnpbios_disabled=1;
+ pnpbios_disabled = 1;
if (strncmp(str, "on", 2) == 0)
- pnpbios_disabled=0;
+ pnpbios_disabled = 0;
invert = (strncmp(str, "no-", 3) == 0);
if (invert)
str += 3;
@@ -453,35 +457,41 @@ static int __init pnpbios_probe_system(void)
printk(KERN_INFO "PnPBIOS: Scanning system for PnP BIOS support...\n");
/*
- * Search the defined area (0xf0000-0xffff0) for a valid PnP BIOS
+ * Search the defined area (0xf0000-0xffff0) for a valid PnP BIOS
* structure and, if one is found, sets up the selectors and
* entry points
*/
- for (check = (union pnp_bios_install_struct *) __va(0xf0000);
- check < (union pnp_bios_install_struct *) __va(0xffff0);
+ for (check = (union pnp_bios_install_struct *)__va(0xf0000);
+ check < (union pnp_bios_install_struct *)__va(0xffff0);
check = (void *)check + 16) {
if (check->fields.signature != PNP_SIGNATURE)
continue;
- printk(KERN_INFO "PnPBIOS: Found PnP BIOS installation structure at 0x%p\n", check);
+ printk(KERN_INFO
+ "PnPBIOS: Found PnP BIOS installation structure at 0x%p\n",
+ check);
length = check->fields.length;
if (!length) {
- printk(KERN_ERR "PnPBIOS: installation structure is invalid, skipping\n");
+ printk(KERN_ERR
+ "PnPBIOS: installation structure is invalid, skipping\n");
continue;
}
for (sum = 0, i = 0; i < length; i++)
sum += check->chars[i];
if (sum) {
- printk(KERN_ERR "PnPBIOS: installation structure is corrupted, skipping\n");
+ printk(KERN_ERR
+ "PnPBIOS: installation structure is corrupted, skipping\n");
continue;
}
if (check->fields.version < 0x10) {
- printk(KERN_WARNING "PnPBIOS: PnP BIOS version %d.%d is not supported\n",
+ printk(KERN_WARNING
+ "PnPBIOS: PnP BIOS version %d.%d is not supported\n",
check->fields.version >> 4,
check->fields.version & 15);
continue;
}
- printk(KERN_INFO "PnPBIOS: PnP BIOS version %d.%d, entry 0x%x:0x%x, dseg 0x%x\n",
- check->fields.version >> 4, check->fields.version & 15,
+ printk(KERN_INFO
+ "PnPBIOS: PnP BIOS version %d.%d, entry 0x%x:0x%x, dseg 0x%x\n",
+ check->fields.version >> 4, check->fields.version & 15,
check->fields.pm16cseg, check->fields.pm16offset,
check->fields.pm16dseg);
pnp_bios_install = check;
@@ -499,25 +509,25 @@ static int __init exploding_pnp_bios(struct dmi_system_id *d)
}
static struct dmi_system_id pnpbios_dmi_table[] __initdata = {
- { /* PnPBIOS GPF on boot */
- .callback = exploding_pnp_bios,
- .ident = "Higraded P14H",
- .matches = {
- DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
- DMI_MATCH(DMI_BIOS_VERSION, "07.00T"),
- DMI_MATCH(DMI_SYS_VENDOR, "Higraded"),
- DMI_MATCH(DMI_PRODUCT_NAME, "P14H"),
- },
- },
- { /* PnPBIOS GPF on boot */
- .callback = exploding_pnp_bios,
- .ident = "ASUS P4P800",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_BOARD_NAME, "P4P800"),
- },
- },
- { }
+ { /* PnPBIOS GPF on boot */
+ .callback = exploding_pnp_bios,
+ .ident = "Higraded P14H",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
+ DMI_MATCH(DMI_BIOS_VERSION, "07.00T"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Higraded"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P14H"),
+ },
+ },
+ { /* PnPBIOS GPF on boot */
+ .callback = exploding_pnp_bios,
+ .ident = "ASUS P4P800",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "P4P800"),
+ },
+ },
+ {}
};
static int __init pnpbios_init(void)
@@ -533,14 +543,13 @@ static int __init pnpbios_init(void)
printk(KERN_INFO "PnPBIOS: Disabled\n");
return -ENODEV;
}
-
#ifdef CONFIG_PNPACPI
if (!acpi_disabled && !pnpacpi_disabled) {
pnpbios_disabled = 1;
printk(KERN_INFO "PnPBIOS: Disabled by ACPI PNP\n");
return -ENODEV;
}
-#endif /* CONFIG_ACPI */
+#endif /* CONFIG_ACPI */
/* scan the system for pnpbios support */
if (!pnpbios_probe_system())
@@ -552,14 +561,16 @@ static int __init pnpbios_init(void)
/* read the node info */
ret = pnp_bios_dev_node_info(&node_info);
if (ret) {
- printk(KERN_ERR "PnPBIOS: Unable to get node info. Aborting.\n");
+ printk(KERN_ERR
+ "PnPBIOS: Unable to get node info. Aborting.\n");
return ret;
}
/* register with the pnp layer */
ret = pnp_register_protocol(&pnpbios_protocol);
if (ret) {
- printk(KERN_ERR "PnPBIOS: Unable to register driver. Aborting.\n");
+ printk(KERN_ERR
+ "PnPBIOS: Unable to register driver. Aborting.\n");
return ret;
}
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index 8027073f791..9c8c07701b6 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -18,9 +18,6 @@
* The other files are human-readable.
*/
-//#include <pcmcia/config.h>
-//#include <pcmcia/k_compat.h>
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -37,42 +34,37 @@ static struct proc_dir_entry *proc_pnp = NULL;
static struct proc_dir_entry *proc_pnp_boot = NULL;
static int proc_read_pnpconfig(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+ int count, int *eof, void *data)
{
struct pnp_isa_config_struc pnps;
if (pnp_bios_isapnp_config(&pnps))
return -EIO;
return snprintf(buf, count,
- "structure_revision %d\n"
- "number_of_CSNs %d\n"
- "ISA_read_data_port 0x%x\n",
- pnps.revision,
- pnps.no_csns,
- pnps.isa_rd_data_port
- );
+ "structure_revision %d\n"
+ "number_of_CSNs %d\n"
+ "ISA_read_data_port 0x%x\n",
+ pnps.revision, pnps.no_csns, pnps.isa_rd_data_port);
}
static int proc_read_escdinfo(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+ int count, int *eof, void *data)
{
struct escd_info_struc escd;
if (pnp_bios_escd_info(&escd))
return -EIO;
return snprintf(buf, count,
- "min_ESCD_write_size %d\n"
- "ESCD_size %d\n"
- "NVRAM_base 0x%x\n",
- escd.min_escd_write_size,
- escd.escd_size,
- escd.nv_storage_base
- );
+ "min_ESCD_write_size %d\n"
+ "ESCD_size %d\n"
+ "NVRAM_base 0x%x\n",
+ escd.min_escd_write_size,
+ escd.escd_size, escd.nv_storage_base);
}
#define MAX_SANE_ESCD_SIZE (32*1024)
static int proc_read_escd(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+ int count, int *eof, void *data)
{
struct escd_info_struc escd;
char *tmpbuf;
@@ -83,30 +75,36 @@ static int proc_read_escd(char *buf, char **start, off_t pos,
/* sanity check */
if (escd.escd_size > MAX_SANE_ESCD_SIZE) {
- printk(KERN_ERR "PnPBIOS: proc_read_escd: ESCD size reported by BIOS escd_info call is too great\n");
+ printk(KERN_ERR
+ "PnPBIOS: proc_read_escd: ESCD size reported by BIOS escd_info call is too great\n");
return -EFBIG;
}
tmpbuf = kzalloc(escd.escd_size, GFP_KERNEL);
- if (!tmpbuf) return -ENOMEM;
+ if (!tmpbuf)
+ return -ENOMEM;
if (pnp_bios_read_escd(tmpbuf, escd.nv_storage_base)) {
kfree(tmpbuf);
return -EIO;
}
- escd_size = (unsigned char)(tmpbuf[0]) + (unsigned char)(tmpbuf[1])*256;
+ escd_size =
+ (unsigned char)(tmpbuf[0]) + (unsigned char)(tmpbuf[1]) * 256;
/* sanity check */
if (escd_size > MAX_SANE_ESCD_SIZE) {
- printk(KERN_ERR "PnPBIOS: proc_read_escd: ESCD size reported by BIOS read_escd call is too great\n");
+ printk(KERN_ERR
+ "PnPBIOS: proc_read_escd: ESCD size reported by BIOS read_escd call is too great\n");
return -EFBIG;
}
escd_left_to_read = escd_size - pos;
- if (escd_left_to_read < 0) escd_left_to_read = 0;
- if (escd_left_to_read == 0) *eof = 1;
- n = min(count,escd_left_to_read);
+ if (escd_left_to_read < 0)
+ escd_left_to_read = 0;
+ if (escd_left_to_read == 0)
+ *eof = 1;
+ n = min(count, escd_left_to_read);
memcpy(buf, tmpbuf + pos, n);
kfree(tmpbuf);
*start = buf;
@@ -114,17 +112,17 @@ static int proc_read_escd(char *buf, char **start, off_t pos,
}
static int proc_read_legacyres(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+ int count, int *eof, void *data)
{
/* Assume that the following won't overflow the buffer */
- if (pnp_bios_get_stat_res(buf))
+ if (pnp_bios_get_stat_res(buf))
return -EIO;
- return count; // FIXME: Return actual length
+ return count; // FIXME: Return actual length
}
static int proc_read_devices(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+ int count, int *eof, void *data)
{
struct pnp_bios_node *node;
u8 nodenum;
@@ -134,9 +132,10 @@ static int proc_read_devices(char *buf, char **start, off_t pos,
return 0;
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
- if (!node) return -ENOMEM;
+ if (!node)
+ return -ENOMEM;
- for (nodenum=pos; nodenum<0xff; ) {
+ for (nodenum = pos; nodenum < 0xff;) {
u8 thisnodenum = nodenum;
/* 26 = the number of characters per line sprintf'ed */
if ((p - buf + 26) > count)
@@ -148,7 +147,11 @@ static int proc_read_devices(char *buf, char **start, off_t pos,
node->type_code[0], node->type_code[1],
node->type_code[2], node->flags);
if (nodenum <= thisnodenum) {
- printk(KERN_ERR "%s Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", "PnPBIOS: proc_read_devices:", (unsigned int)nodenum, (unsigned int)thisnodenum);
+ printk(KERN_ERR
+ "%s Node number 0x%x is out of sequence following node 0x%x. Aborting.\n",
+ "PnPBIOS: proc_read_devices:",
+ (unsigned int)nodenum,
+ (unsigned int)thisnodenum);
*eof = 1;
break;
}
@@ -156,12 +159,12 @@ static int proc_read_devices(char *buf, char **start, off_t pos,
kfree(node);
if (nodenum == 0xff)
*eof = 1;
- *start = (char *)((off_t)nodenum - pos);
+ *start = (char *)((off_t) nodenum - pos);
return p - buf;
}
static int proc_read_node(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+ int count, int *eof, void *data)
{
struct pnp_bios_node *node;
int boot = (long)data >> 8;
@@ -169,7 +172,8 @@ static int proc_read_node(char *buf, char **start, off_t pos,
int len;
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
- if (!node) return -ENOMEM;
+ if (!node)
+ return -ENOMEM;
if (pnp_bios_get_dev_node(&nodenum, boot, node)) {
kfree(node);
return -EIO;
@@ -180,8 +184,8 @@ static int proc_read_node(char *buf, char **start, off_t pos,
return len;
}
-static int proc_write_node(struct file *file, const char __user *buf,
- unsigned long count, void *data)
+static int proc_write_node(struct file *file, const char __user * buf,
+ unsigned long count, void *data)
{
struct pnp_bios_node *node;
int boot = (long)data >> 8;
@@ -208,12 +212,12 @@ static int proc_write_node(struct file *file, const char __user *buf,
goto out;
}
ret = count;
-out:
+ out:
kfree(node);
return ret;
}
-int pnpbios_interface_attach_device(struct pnp_bios_node * node)
+int pnpbios_interface_attach_device(struct pnp_bios_node *node)
{
char name[3];
struct proc_dir_entry *ent;
@@ -222,7 +226,7 @@ int pnpbios_interface_attach_device(struct pnp_bios_node * node)
if (!proc_pnp)
return -EIO;
- if ( !pnpbios_dont_use_current_config ) {
+ if (!pnpbios_dont_use_current_config) {
ent = create_proc_entry(name, 0, proc_pnp);
if (ent) {
ent->read_proc = proc_read_node;
@@ -237,7 +241,7 @@ int pnpbios_interface_attach_device(struct pnp_bios_node * node)
if (ent) {
ent->read_proc = proc_read_node;
ent->write_proc = proc_write_node;
- ent->data = (void *)(long)(node->handle+0x100);
+ ent->data = (void *)(long)(node->handle + 0x100);
return 0;
}
@@ -249,7 +253,7 @@ int pnpbios_interface_attach_device(struct pnp_bios_node * node)
* work and the pnpbios_dont_use_current_config flag
* should already have been set to the appropriate value
*/
-int __init pnpbios_proc_init( void )
+int __init pnpbios_proc_init(void)
{
proc_pnp = proc_mkdir("pnp", proc_bus);
if (!proc_pnp)
@@ -258,10 +262,13 @@ int __init pnpbios_proc_init( void )
if (!proc_pnp_boot)
return -EIO;
create_proc_read_entry("devices", 0, proc_pnp, proc_read_devices, NULL);
- create_proc_read_entry("configuration_info", 0, proc_pnp, proc_read_pnpconfig, NULL);
- create_proc_read_entry("escd_info", 0, proc_pnp, proc_read_escdinfo, NULL);
+ create_proc_read_entry("configuration_info", 0, proc_pnp,
+ proc_read_pnpconfig, NULL);
+ create_proc_read_entry("escd_info", 0, proc_pnp, proc_read_escdinfo,
+ NULL);
create_proc_read_entry("escd", S_IRUSR, proc_pnp, proc_read_escd, NULL);
- create_proc_read_entry("legacy_device_resources", 0, proc_pnp, proc_read_legacyres, NULL);
+ create_proc_read_entry("legacy_device_resources", 0, proc_pnp,
+ proc_read_legacyres, NULL);
return 0;
}
@@ -274,9 +281,9 @@ void __exit pnpbios_proc_exit(void)
if (!proc_pnp)
return;
- for (i=0; i<0xff; i++) {
+ for (i = 0; i < 0xff; i++) {
sprintf(name, "%02x", i);
- if ( !pnpbios_dont_use_current_config )
+ if (!pnpbios_dont_use_current_config)
remove_proc_entry(name, proc_pnp);
remove_proc_entry(name, proc_pnp_boot);
}
@@ -287,6 +294,4 @@ void __exit pnpbios_proc_exit(void)
remove_proc_entry("devices", proc_pnp);
remove_proc_entry("boot", proc_pnp);
remove_proc_entry("pnp", proc_bus);
-
- return;
}
diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
index 3c2ab8394e3..04ecd7b6723 100644
--- a/drivers/pnp/pnpbios/rsparser.c
+++ b/drivers/pnp/pnpbios/rsparser.c
@@ -1,6 +1,5 @@
/*
* rsparser.c - parses and encodes pnpbios resource data streams
- *
*/
#include <linux/ctype.h>
@@ -12,8 +11,10 @@
#ifdef CONFIG_PCI
#include <linux/pci.h>
#else
-inline void pcibios_penalize_isa_irq(int irq, int active) {}
-#endif /* CONFIG_PCI */
+inline void pcibios_penalize_isa_irq(int irq, int active)
+{
+}
+#endif /* CONFIG_PCI */
#include "pnpbios.h"
@@ -52,75 +53,88 @@ inline void pcibios_penalize_isa_irq(int irq, int active) {}
* Allocated Resources
*/
-static void
-pnpbios_parse_allocated_irqresource(struct pnp_resource_table * res, int irq)
+static void pnpbios_parse_allocated_irqresource(struct pnp_resource_table *res,
+ int irq)
{
int i = 0;
- while (!(res->irq_resource[i].flags & IORESOURCE_UNSET) && i < PNP_MAX_IRQ) i++;
+
+ while (!(res->irq_resource[i].flags & IORESOURCE_UNSET)
+ && i < PNP_MAX_IRQ)
+ i++;
if (i < PNP_MAX_IRQ) {
- res->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag
+ res->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag
if (irq == -1) {
res->irq_resource[i].flags |= IORESOURCE_DISABLED;
return;
}
res->irq_resource[i].start =
- res->irq_resource[i].end = (unsigned long) irq;
+ res->irq_resource[i].end = (unsigned long)irq;
pcibios_penalize_isa_irq(irq, 1);
}
}
-static void
-pnpbios_parse_allocated_dmaresource(struct pnp_resource_table * res, int dma)
+static void pnpbios_parse_allocated_dmaresource(struct pnp_resource_table *res,
+ int dma)
{
int i = 0;
+
while (i < PNP_MAX_DMA &&
- !(res->dma_resource[i].flags & IORESOURCE_UNSET))
+ !(res->dma_resource[i].flags & IORESOURCE_UNSET))
i++;
if (i < PNP_MAX_DMA) {
- res->dma_resource[i].flags = IORESOURCE_DMA; // Also clears _UNSET flag
+ res->dma_resource[i].flags = IORESOURCE_DMA; // Also clears _UNSET flag
if (dma == -1) {
res->dma_resource[i].flags |= IORESOURCE_DISABLED;
return;
}
res->dma_resource[i].start =
- res->dma_resource[i].end = (unsigned long) dma;
+ res->dma_resource[i].end = (unsigned long)dma;
}
}
-static void
-pnpbios_parse_allocated_ioresource(struct pnp_resource_table * res, int io, int len)
+static void pnpbios_parse_allocated_ioresource(struct pnp_resource_table *res,
+ int io, int len)
{
int i = 0;
- while (!(res->port_resource[i].flags & IORESOURCE_UNSET) && i < PNP_MAX_PORT) i++;
+
+ while (!(res->port_resource[i].flags & IORESOURCE_UNSET)
+ && i < PNP_MAX_PORT)
+ i++;
if (i < PNP_MAX_PORT) {
- res->port_resource[i].flags = IORESOURCE_IO; // Also clears _UNSET flag
- if (len <= 0 || (io + len -1) >= 0x10003) {
+ res->port_resource[i].flags = IORESOURCE_IO; // Also clears _UNSET flag
+ if (len <= 0 || (io + len - 1) >= 0x10003) {
res->port_resource[i].flags |= IORESOURCE_DISABLED;
return;
}
- res->port_resource[i].start = (unsigned long) io;
+ res->port_resource[i].start = (unsigned long)io;
res->port_resource[i].end = (unsigned long)(io + len - 1);
}
}
-static void
-pnpbios_parse_allocated_memresource(struct pnp_resource_table * res, int mem, int len)
+static void pnpbios_parse_allocated_memresource(struct pnp_resource_table *res,
+ int mem, int len)
{
int i = 0;
- while (!(res->mem_resource[i].flags & IORESOURCE_UNSET) && i < PNP_MAX_MEM) i++;
+
+ while (!(res->mem_resource[i].flags & IORESOURCE_UNSET)
+ && i < PNP_MAX_MEM)
+ i++;
if (i < PNP_MAX_MEM) {
- res->mem_resource[i].flags = IORESOURCE_MEM; // Also clears _UNSET flag
+ res->mem_resource[i].flags = IORESOURCE_MEM; // Also clears _UNSET flag
if (len <= 0) {
res->mem_resource[i].flags |= IORESOURCE_DISABLED;
return;
}
- res->mem_resource[i].start = (unsigned long) mem;
+ res->mem_resource[i].start = (unsigned long)mem;
res->mem_resource[i].end = (unsigned long)(mem + len - 1);
}
}
-static unsigned char *
-pnpbios_parse_allocated_resource_data(unsigned char * p, unsigned char * end, struct pnp_resource_table * res)
+static unsigned char *pnpbios_parse_allocated_resource_data(unsigned char *p,
+ unsigned char *end,
+ struct
+ pnp_resource_table
+ *res)
{
unsigned int len, tag;
int io, size, mask, i;
@@ -134,12 +148,12 @@ pnpbios_parse_allocated_resource_data(unsigned char * p, unsigned char * end, st
while ((char *)p < (char *)end) {
/* determine the type of tag */
- if (p[0] & LARGE_TAG) { /* large tag */
+ if (p[0] & LARGE_TAG) { /* large tag */
len = (p[2] << 8) | p[1];
tag = p[0];
- } else { /* small tag */
+ } else { /* small tag */
len = p[0] & 0x07;
- tag = ((p[0]>>3) & 0x0f);
+ tag = ((p[0] >> 3) & 0x0f);
}
switch (tag) {
@@ -147,8 +161,8 @@ pnpbios_parse_allocated_resource_data(unsigned char * p, unsigned char * end, st
case LARGE_TAG_MEM:
if (len != 9)
goto len_err;
- io = *(short *) &p[4];
- size = *(short *) &p[10];
+ io = *(short *)&p[4];
+ size = *(short *)&p[10];
pnpbios_parse_allocated_memresource(res, io, size);
break;
@@ -163,16 +177,16 @@ pnpbios_parse_allocated_resource_data(unsigned char * p, unsigned char * end, st
case LARGE_TAG_MEM32:
if (len != 17)
goto len_err;
- io = *(int *) &p[4];
- size = *(int *) &p[16];
+ io = *(int *)&p[4];
+ size = *(int *)&p[16];
pnpbios_parse_allocated_memresource(res, io, size);
break;
case LARGE_TAG_FIXEDMEM32:
if (len != 9)
goto len_err;
- io = *(int *) &p[4];
- size = *(int *) &p[8];
+ io = *(int *)&p[4];
+ size = *(int *)&p[8];
pnpbios_parse_allocated_memresource(res, io, size);
break;
@@ -180,9 +194,10 @@ pnpbios_parse_allocated_resource_data(unsigned char * p, unsigned char * end, st
if (len < 2 || len > 3)
goto len_err;
io = -1;
- mask= p[1] + p[2]*256;
- for (i=0;i<16;i++, mask=mask>>1)
- if(mask & 0x01) io=i;
+ mask = p[1] + p[2] * 256;
+ for (i = 0; i < 16; i++, mask = mask >> 1)
+ if (mask & 0x01)
+ io = i;
pnpbios_parse_allocated_irqresource(res, io);
break;
@@ -191,15 +206,16 @@ pnpbios_parse_allocated_resource_data(unsigned char * p, unsigned char * end, st
goto len_err;
io = -1;
mask = p[1];
- for (i=0;i<8;i++, mask = mask>>1)
- if(mask & 0x01) io=i;
+ for (i = 0; i < 8; i++, mask = mask >> 1)
+ if (mask & 0x01)
+ io = i;
pnpbios_parse_allocated_dmaresource(res, io);
break;
case SMALL_TAG_PORT:
if (len != 7)
goto len_err;
- io = p[2] + p[3] *256;
+ io = p[2] + p[3] * 256;
size = p[7];
pnpbios_parse_allocated_ioresource(res, io, size);
break;
@@ -218,12 +234,14 @@ pnpbios_parse_allocated_resource_data(unsigned char * p, unsigned char * end, st
case SMALL_TAG_END:
p = p + 2;
- return (unsigned char *)p;
+ return (unsigned char *)p;
break;
- default: /* an unkown tag */
- len_err:
- printk(KERN_ERR "PnPBIOS: Unknown tag '0x%x', length '%d'.\n", tag, len);
+ default: /* an unkown tag */
+ len_err:
+ printk(KERN_ERR
+ "PnPBIOS: Unknown tag '0x%x', length '%d'.\n",
+ tag, len);
break;
}
@@ -234,20 +252,21 @@ pnpbios_parse_allocated_resource_data(unsigned char * p, unsigned char * end, st
p += len + 1;
}
- printk(KERN_ERR "PnPBIOS: Resource structure does not contain an end tag.\n");
+ printk(KERN_ERR
+ "PnPBIOS: Resource structure does not contain an end tag.\n");
return NULL;
}
-
/*
* Resource Configuration Options
*/
-static void
-pnpbios_parse_mem_option(unsigned char *p, int size, struct pnp_option *option)
+static void pnpbios_parse_mem_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
- struct pnp_mem * mem;
+ struct pnp_mem *mem;
+
mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
if (!mem)
return;
@@ -256,14 +275,14 @@ pnpbios_parse_mem_option(unsigned char *p, int size, struct pnp_option *option)
mem->align = (p[9] << 8) | p[8];
mem->size = ((p[11] << 8) | p[10]) << 8;
mem->flags = p[3];
- pnp_register_mem_resource(option,mem);
- return;
+ pnp_register_mem_resource(option, mem);
}
-static void
-pnpbios_parse_mem32_option(unsigned char *p, int size, struct pnp_option *option)
+static void pnpbios_parse_mem32_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
- struct pnp_mem * mem;
+ struct pnp_mem *mem;
+
mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
if (!mem)
return;
@@ -272,14 +291,13 @@ pnpbios_parse_mem32_option(unsigned char *p, int size, struct pnp_option *option
mem->align = (p[15] << 24) | (p[14] << 16) | (p[13] << 8) | p[12];
mem->size = (p[19] << 24) | (p[18] << 16) | (p[17] << 8) | p[16];
mem->flags = p[3];
- pnp_register_mem_resource(option,mem);
- return;
+ pnp_register_mem_resource(option, mem);
}
-static void
-pnpbios_parse_fixed_mem32_option(unsigned char *p, int size, struct pnp_option *option)
+static void pnpbios_parse_fixed_mem32_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
- struct pnp_mem * mem;
+ struct pnp_mem *mem;
mem = kzalloc(sizeof(struct pnp_mem), GFP_KERNEL);
if (!mem)
return;
@@ -287,14 +305,13 @@ pnpbios_parse_fixed_mem32_option(unsigned char *p, int size, struct pnp_option *
mem->size = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8];
mem->align = 0;
mem->flags = p[3];
- pnp_register_mem_resource(option,mem);
- return;
+ pnp_register_mem_resource(option, mem);
}
-static void
-pnpbios_parse_irq_option(unsigned char *p, int size, struct pnp_option *option)
+static void pnpbios_parse_irq_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
- struct pnp_irq * irq;
+ struct pnp_irq *irq;
unsigned long bits;
irq = kzalloc(sizeof(struct pnp_irq), GFP_KERNEL);
@@ -306,27 +323,27 @@ pnpbios_parse_irq_option(unsigned char *p, int size, struct pnp_option *option)
irq->flags = p[3];
else
irq->flags = IORESOURCE_IRQ_HIGHEDGE;
- pnp_register_irq_resource(option,irq);
- return;
+ pnp_register_irq_resource(option, irq);
}
-static void
-pnpbios_parse_dma_option(unsigned char *p, int size, struct pnp_option *option)
+static void pnpbios_parse_dma_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
- struct pnp_dma * dma;
+ struct pnp_dma *dma;
+
dma = kzalloc(sizeof(struct pnp_dma), GFP_KERNEL);
if (!dma)
return;
dma->map = p[1];
dma->flags = p[2];
- pnp_register_dma_resource(option,dma);
- return;
+ pnp_register_dma_resource(option, dma);
}
-static void
-pnpbios_parse_port_option(unsigned char *p, int size, struct pnp_option *option)
+static void pnpbios_parse_port_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
- struct pnp_port * port;
+ struct pnp_port *port;
+
port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
if (!port)
return;
@@ -335,14 +352,14 @@ pnpbios_parse_port_option(unsigned char *p, int size, struct pnp_option *option)
port->align = p[6];
port->size = p[7];
port->flags = p[1] ? PNP_PORT_FLAG_16BITADDR : 0;
- pnp_register_port_resource(option,port);
- return;
+ pnp_register_port_resource(option, port);
}
-static void
-pnpbios_parse_fixed_port_option(unsigned char *p, int size, struct pnp_option *option)
+static void pnpbios_parse_fixed_port_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
- struct pnp_port * port;
+ struct pnp_port *port;
+
port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
if (!port)
return;
@@ -350,12 +367,12 @@ pnpbios_parse_fixed_port_option(unsigned char *p, int size, struct pnp_option *o
port->size = p[3];
port->align = 0;
port->flags = PNP_PORT_FLAG_FIXED;
- pnp_register_port_resource(option,port);
- return;
+ pnp_register_port_resource(option, port);
}
-static unsigned char *
-pnpbios_parse_resource_option_data(unsigned char * p, unsigned char * end, struct pnp_dev *dev)
+static unsigned char *pnpbios_parse_resource_option_data(unsigned char *p,
+ unsigned char *end,
+ struct pnp_dev *dev)
{
unsigned int len, tag;
int priority = 0;
@@ -371,12 +388,12 @@ pnpbios_parse_resource_option_data(unsigned char * p, unsigned char * end, struc
while ((char *)p < (char *)end) {
/* determine the type of tag */
- if (p[0] & LARGE_TAG) { /* large tag */
+ if (p[0] & LARGE_TAG) { /* large tag */
len = (p[2] << 8) | p[1];
tag = p[0];
- } else { /* small tag */
+ } else { /* small tag */
len = p[0] & 0x07;
- tag = ((p[0]>>3) & 0x0f);
+ tag = ((p[0] >> 3) & 0x0f);
}
switch (tag) {
@@ -442,16 +459,19 @@ pnpbios_parse_resource_option_data(unsigned char * p, unsigned char * end, struc
if (len != 0)
goto len_err;
if (option_independent == option)
- printk(KERN_WARNING "PnPBIOS: Missing SMALL_TAG_STARTDEP tag\n");
+ printk(KERN_WARNING
+ "PnPBIOS: Missing SMALL_TAG_STARTDEP tag\n");
option = option_independent;
break;
case SMALL_TAG_END:
- return p + 2;
+ return p + 2;
- default: /* an unkown tag */
- len_err:
- printk(KERN_ERR "PnPBIOS: Unknown tag '0x%x', length '%d'.\n", tag, len);
+ default: /* an unkown tag */
+ len_err:
+ printk(KERN_ERR
+ "PnPBIOS: Unknown tag '0x%x', length '%d'.\n",
+ tag, len);
break;
}
@@ -462,19 +482,18 @@ pnpbios_parse_resource_option_data(unsigned char * p, unsigned char * end, struc
p += len + 1;
}
- printk(KERN_ERR "PnPBIOS: Resource structure does not contain an end tag.\n");
+ printk(KERN_ERR
+ "PnPBIOS: Resource structure does not contain an end tag.\n");
return NULL;
}
-
/*
* Compatible Device IDs
*/
#define HEX(id,a) hex[((id)>>a) & 15]
#define CHAR(id,a) (0x40 + (((id)>>a) & 31))
-//
void pnpid32_to_pnpid(u32 id, char *str)
{
@@ -483,21 +502,20 @@ void pnpid32_to_pnpid(u32 id, char *str)
id = be32_to_cpu(id);
str[0] = CHAR(id, 26);
str[1] = CHAR(id, 21);
- str[2] = CHAR(id,16);
+ str[2] = CHAR(id, 16);
str[3] = HEX(id, 12);
str[4] = HEX(id, 8);
str[5] = HEX(id, 4);
str[6] = HEX(id, 0);
str[7] = '\0';
-
- return;
}
-//
+
#undef CHAR
#undef HEX
-static unsigned char *
-pnpbios_parse_compatible_ids(unsigned char *p, unsigned char *end, struct pnp_dev *dev)
+static unsigned char *pnpbios_parse_compatible_ids(unsigned char *p,
+ unsigned char *end,
+ struct pnp_dev *dev)
{
int len, tag;
char id[8];
@@ -509,40 +527,45 @@ pnpbios_parse_compatible_ids(unsigned char *p, unsigned char *end, struct pnp_de
while ((char *)p < (char *)end) {
/* determine the type of tag */
- if (p[0] & LARGE_TAG) { /* large tag */
+ if (p[0] & LARGE_TAG) { /* large tag */
len = (p[2] << 8) | p[1];
tag = p[0];
- } else { /* small tag */
+ } else { /* small tag */
len = p[0] & 0x07;
- tag = ((p[0]>>3) & 0x0f);
+ tag = ((p[0] >> 3) & 0x0f);
}
switch (tag) {
case LARGE_TAG_ANSISTR:
- strncpy(dev->name, p + 3, len >= PNP_NAME_LEN ? PNP_NAME_LEN - 2 : len);
- dev->name[len >= PNP_NAME_LEN ? PNP_NAME_LEN - 1 : len] = '\0';
+ strncpy(dev->name, p + 3,
+ len >= PNP_NAME_LEN ? PNP_NAME_LEN - 2 : len);
+ dev->name[len >=
+ PNP_NAME_LEN ? PNP_NAME_LEN - 1 : len] = '\0';
break;
- case SMALL_TAG_COMPATDEVID: /* compatible ID */
+ case SMALL_TAG_COMPATDEVID: /* compatible ID */
if (len != 4)
goto len_err;
- dev_id = kzalloc(sizeof (struct pnp_id), GFP_KERNEL);
+ dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
if (!dev_id)
return NULL;
- pnpid32_to_pnpid(p[1] | p[2] << 8 | p[3] << 16 | p[4] << 24,id);
+ pnpid32_to_pnpid(p[1] | p[2] << 8 | p[3] << 16 | p[4] <<
+ 24, id);
memcpy(&dev_id->id, id, 7);
pnp_add_id(dev_id, dev);
break;
case SMALL_TAG_END:
p = p + 2;
- return (unsigned char *)p;
+ return (unsigned char *)p;
break;
- default: /* an unkown tag */
- len_err:
- printk(KERN_ERR "PnPBIOS: Unknown tag '0x%x', length '%d'.\n", tag, len);
+ default: /* an unkown tag */
+ len_err:
+ printk(KERN_ERR
+ "PnPBIOS: Unknown tag '0x%x', length '%d'.\n",
+ tag, len);
break;
}
@@ -553,33 +576,34 @@ pnpbios_parse_compatible_ids(unsigned char *p, unsigned char *end, struct pnp_de
p += len + 1;
}
- printk(KERN_ERR "PnPBIOS: Resource structure does not contain an end tag.\n");
+ printk(KERN_ERR
+ "PnPBIOS: Resource structure does not contain an end tag.\n");
return NULL;
}
-
/*
* Allocated Resource Encoding
*/
-static void pnpbios_encode_mem(unsigned char *p, struct resource * res)
+static void pnpbios_encode_mem(unsigned char *p, struct resource *res)
{
unsigned long base = res->start;
unsigned long len = res->end - res->start + 1;
+
p[4] = (base >> 8) & 0xff;
p[5] = ((base >> 8) >> 8) & 0xff;
p[6] = (base >> 8) & 0xff;
p[7] = ((base >> 8) >> 8) & 0xff;
p[10] = (len >> 8) & 0xff;
p[11] = ((len >> 8) >> 8) & 0xff;
- return;
}
-static void pnpbios_encode_mem32(unsigned char *p, struct resource * res)
+static void pnpbios_encode_mem32(unsigned char *p, struct resource *res)
{
unsigned long base = res->start;
unsigned long len = res->end - res->start + 1;
+
p[4] = base & 0xff;
p[5] = (base >> 8) & 0xff;
p[6] = (base >> 16) & 0xff;
@@ -592,12 +616,13 @@ static void pnpbios_encode_mem32(unsigned char *p, struct resource * res)
p[17] = (len >> 8) & 0xff;
p[18] = (len >> 16) & 0xff;
p[19] = (len >> 24) & 0xff;
- return;
}
-static void pnpbios_encode_fixed_mem32(unsigned char *p, struct resource * res)
-{ unsigned long base = res->start;
+static void pnpbios_encode_fixed_mem32(unsigned char *p, struct resource *res)
+{
+ unsigned long base = res->start;
unsigned long len = res->end - res->start + 1;
+
p[4] = base & 0xff;
p[5] = (base >> 8) & 0xff;
p[6] = (base >> 16) & 0xff;
@@ -606,50 +631,52 @@ static void pnpbios_encode_fixed_mem32(unsigned char *p, struct resource * res)
p[9] = (len >> 8) & 0xff;
p[10] = (len >> 16) & 0xff;
p[11] = (len >> 24) & 0xff;
- return;
}
-static void pnpbios_encode_irq(unsigned char *p, struct resource * res)
+static void pnpbios_encode_irq(unsigned char *p, struct resource *res)
{
unsigned long map = 0;
+
map = 1 << res->start;
p[1] = map & 0xff;
p[2] = (map >> 8) & 0xff;
- return;
}
-static void pnpbios_encode_dma(unsigned char *p, struct resource * res)
+static void pnpbios_encode_dma(unsigned char *p, struct resource *res)
{
unsigned long map = 0;
+
map = 1 << res->start;
p[1] = map & 0xff;
- return;
}
-static void pnpbios_encode_port(unsigned char *p, struct resource * res)
+static void pnpbios_encode_port(unsigned char *p, struct resource *res)
{
unsigned long base = res->start;
unsigned long len = res->end - res->start + 1;
+
p[2] = base & 0xff;
p[3] = (base >> 8) & 0xff;
p[4] = base & 0xff;
p[5] = (base >> 8) & 0xff;
p[7] = len & 0xff;
- return;
}
-static void pnpbios_encode_fixed_port(unsigned char *p, struct resource * res)
+static void pnpbios_encode_fixed_port(unsigned char *p, struct resource *res)
{
unsigned long base = res->start;
unsigned long len = res->end - res->start + 1;
+
p[1] = base & 0xff;
p[2] = (base >> 8) & 0xff;
p[3] = len & 0xff;
- return;
}
-static unsigned char *
-pnpbios_encode_allocated_resource_data(unsigned char * p, unsigned char * end, struct pnp_resource_table * res)
+static unsigned char *pnpbios_encode_allocated_resource_data(unsigned char *p,
+ unsigned char *end,
+ struct
+ pnp_resource_table
+ *res)
{
unsigned int len, tag;
int port = 0, irq = 0, dma = 0, mem = 0;
@@ -660,12 +687,12 @@ pnpbios_encode_allocated_resource_data(unsigned char * p, unsigned char * end, s
while ((char *)p < (char *)end) {
/* determine the type of tag */
- if (p[0] & LARGE_TAG) { /* large tag */
+ if (p[0] & LARGE_TAG) { /* large tag */
len = (p[2] << 8) | p[1];
tag = p[0];
- } else { /* small tag */
+ } else { /* small tag */
len = p[0] & 0x07;
- tag = ((p[0]>>3) & 0x0f);
+ tag = ((p[0] >> 3) & 0x0f);
}
switch (tag) {
@@ -725,12 +752,14 @@ pnpbios_encode_allocated_resource_data(unsigned char * p, unsigned char * end, s
case SMALL_TAG_END:
p = p + 2;
- return (unsigned char *)p;
+ return (unsigned char *)p;
break;
- default: /* an unkown tag */
- len_err:
- printk(KERN_ERR "PnPBIOS: Unknown tag '0x%x', length '%d'.\n", tag, len);
+ default: /* an unkown tag */
+ len_err:
+ printk(KERN_ERR
+ "PnPBIOS: Unknown tag '0x%x', length '%d'.\n",
+ tag, len);
break;
}
@@ -741,52 +770,52 @@ pnpbios_encode_allocated_resource_data(unsigned char * p, unsigned char * end, s
p += len + 1;
}
- printk(KERN_ERR "PnPBIOS: Resource structure does not contain an end tag.\n");
+ printk(KERN_ERR
+ "PnPBIOS: Resource structure does not contain an end tag.\n");
return NULL;
}
-
/*
* Core Parsing Functions
*/
-int
-pnpbios_parse_data_stream(struct pnp_dev *dev, struct pnp_bios_node * node)
+int pnpbios_parse_data_stream(struct pnp_dev *dev, struct pnp_bios_node *node)
{
- unsigned char * p = (char *)node->data;
- unsigned char * end = (char *)(node->data + node->size);
- p = pnpbios_parse_allocated_resource_data(p,end,&dev->res);
+ unsigned char *p = (char *)node->data;
+ unsigned char *end = (char *)(node->data + node->size);
+
+ p = pnpbios_parse_allocated_resource_data(p, end, &dev->res);
if (!p)
return -EIO;
- p = pnpbios_parse_resource_option_data(p,end,dev);
+ p = pnpbios_parse_resource_option_data(p, end, dev);
if (!p)
return -EIO;
- p = pnpbios_parse_compatible_ids(p,end,dev);
+ p = pnpbios_parse_compatible_ids(p, end, dev);
if (!p)
return -EIO;
return 0;
}
-int
-pnpbios_read_resources_from_node(struct pnp_resource_table *res,
- struct pnp_bios_node * node)
+int pnpbios_read_resources_from_node(struct pnp_resource_table *res,
+ struct pnp_bios_node *node)
{
- unsigned char * p = (char *)node->data;
- unsigned char * end = (char *)(node->data + node->size);
- p = pnpbios_parse_allocated_resource_data(p,end,res);
+ unsigned char *p = (char *)node->data;
+ unsigned char *end = (char *)(node->data + node->size);
+
+ p = pnpbios_parse_allocated_resource_data(p, end, res);
if (!p)
return -EIO;
return 0;
}
-int
-pnpbios_write_resources_to_node(struct pnp_resource_table *res,
- struct pnp_bios_node * node)
+int pnpbios_write_resources_to_node(struct pnp_resource_table *res,
+ struct pnp_bios_node *node)
{
- unsigned char * p = (char *)node->data;
- unsigned char * end = (char *)(node->data + node->size);
- p = pnpbios_encode_allocated_resource_data(p,end,res);
+ unsigned char *p = (char *)node->data;
+ unsigned char *end = (char *)(node->data + node->size);
+
+ p = pnpbios_encode_allocated_resource_data(p, end, res);
if (!p)
return -EIO;
return 0;
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 7c3236690cc..90755d4cdb9 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -19,7 +19,6 @@
#include <linux/io.h>
#include "base.h"
-
static void quirk_awe32_resources(struct pnp_dev *dev)
{
struct pnp_port *port, *port2, *port3;
@@ -31,7 +30,7 @@ static void quirk_awe32_resources(struct pnp_dev *dev)
* two extra ports (at offset 0x400 and 0x800 from the one given) by
* hand.
*/
- for ( ; res ; res = res->next ) {
+ for (; res; res = res->next) {
port2 = pnp_alloc(sizeof(struct pnp_port));
if (!port2)
return;
@@ -58,18 +57,19 @@ static void quirk_cmi8330_resources(struct pnp_dev *dev)
struct pnp_option *res = dev->dependent;
unsigned long tmp;
- for ( ; res ; res = res->next ) {
+ for (; res; res = res->next) {
struct pnp_irq *irq;
struct pnp_dma *dma;
- for( irq = res->irq; irq; irq = irq->next ) { // Valid irqs are 5, 7, 10
+ for (irq = res->irq; irq; irq = irq->next) { // Valid irqs are 5, 7, 10
tmp = 0x04A0;
bitmap_copy(irq->map, &tmp, 16); // 0000 0100 1010 0000
}
- for( dma = res->dma; dma; dma = dma->next ) // Valid 8bit dma channels are 1,3
- if( ( dma->flags & IORESOURCE_DMA_TYPE_MASK ) == IORESOURCE_DMA_8BIT )
+ for (dma = res->dma; dma; dma = dma->next) // Valid 8bit dma channels are 1,3
+ if ((dma->flags & IORESOURCE_DMA_TYPE_MASK) ==
+ IORESOURCE_DMA_8BIT)
dma->map = 0x000A;
}
printk(KERN_INFO "pnp: CMI8330 quirk - fixing interrupts and dma\n");
@@ -79,7 +79,7 @@ static void quirk_sb16audio_resources(struct pnp_dev *dev)
{
struct pnp_port *port;
struct pnp_option *res = dev->dependent;
- int changed = 0;
+ int changed = 0;
/*
* The default range on the mpu port for these devices is 0x388-0x388.
@@ -87,24 +87,24 @@ static void quirk_sb16audio_resources(struct pnp_dev *dev)
* auto-configured.
*/
- for( ; res ; res = res->next ) {
+ for (; res; res = res->next) {
port = res->port;
- if(!port)
+ if (!port)
continue;
port = port->next;
- if(!port)
+ if (!port)
continue;
port = port->next;
- if(!port)
+ if (!port)
continue;
- if(port->min != port->max)
+ if (port->min != port->max)
continue;
port->max += 0x70;
changed = 1;
}
- if(changed)
- printk(KERN_INFO "pnp: SB audio device quirk - increasing port range\n");
- return;
+ if (changed)
+ printk(KERN_INFO
+ "pnp: SB audio device quirk - increasing port range\n");
}
static int quirk_smc_fir_enabled(struct pnp_dev *dev)
@@ -124,7 +124,7 @@ static int quirk_smc_fir_enabled(struct pnp_dev *dev)
outb(bank, firbase + 7);
high = inb(firbase + 0);
- low = inb(firbase + 1);
+ low = inb(firbase + 1);
chip = inb(firbase + 2);
/* This corresponds to the check in smsc_ircc_present() */
@@ -153,8 +153,8 @@ static void quirk_smc_enable(struct pnp_dev *dev)
*/
dev_err(&dev->dev, "%s not responding at SIR 0x%lx, FIR 0x%lx; "
"auto-configuring\n", dev->id->id,
- (unsigned long) pnp_port_start(dev, 0),
- (unsigned long) pnp_port_start(dev, 1));
+ (unsigned long)pnp_port_start(dev, 0),
+ (unsigned long)pnp_port_start(dev, 1));
pnp_disable_dev(dev);
pnp_init_resource_table(&dev->res);
@@ -162,8 +162,8 @@ static void quirk_smc_enable(struct pnp_dev *dev)
pnp_activate_dev(dev);
if (quirk_smc_fir_enabled(dev)) {
dev_err(&dev->dev, "responds at SIR 0x%lx, FIR 0x%lx\n",
- (unsigned long) pnp_port_start(dev, 0),
- (unsigned long) pnp_port_start(dev, 1));
+ (unsigned long)pnp_port_start(dev, 0),
+ (unsigned long)pnp_port_start(dev, 1));
return;
}
@@ -175,8 +175,8 @@ static void quirk_smc_enable(struct pnp_dev *dev)
*/
dev_err(&dev->dev, "not responding at SIR 0x%lx, FIR 0x%lx; "
"swapping SIR/FIR and reconfiguring\n",
- (unsigned long) pnp_port_start(dev, 0),
- (unsigned long) pnp_port_start(dev, 1));
+ (unsigned long)pnp_port_start(dev, 0),
+ (unsigned long)pnp_port_start(dev, 1));
/*
* Clear IORESOURCE_AUTO so pnp_activate_dev() doesn't reassign
@@ -200,8 +200,8 @@ static void quirk_smc_enable(struct pnp_dev *dev)
if (quirk_smc_fir_enabled(dev)) {
dev_err(&dev->dev, "responds at SIR 0x%lx, FIR 0x%lx\n",
- (unsigned long) pnp_port_start(dev, 0),
- (unsigned long) pnp_port_start(dev, 1));
+ (unsigned long)pnp_port_start(dev, 0),
+ (unsigned long)pnp_port_start(dev, 1));
return;
}
@@ -209,7 +209,6 @@ static void quirk_smc_enable(struct pnp_dev *dev)
"email bjorn.helgaas@hp.com\n");
}
-
/*
* PnP Quirks
* Cards or devices that need some tweaking due to incomplete resource info
@@ -217,21 +216,21 @@ static void quirk_smc_enable(struct pnp_dev *dev)
static struct pnp_fixup pnp_fixups[] = {
/* Soundblaster awe io port quirk */
- { "CTL0021", quirk_awe32_resources },
- { "CTL0022", quirk_awe32_resources },
- { "CTL0023", quirk_awe32_resources },
+ {"CTL0021", quirk_awe32_resources},
+ {"CTL0022", quirk_awe32_resources},
+ {"CTL0023", quirk_awe32_resources},
/* CMI 8330 interrupt and dma fix */
- { "@X@0001", quirk_cmi8330_resources },
+ {"@X@0001", quirk_cmi8330_resources},
/* Soundblaster audio device io port range quirk */
- { "CTL0001", quirk_sb16audio_resources },
- { "CTL0031", quirk_sb16audio_resources },
- { "CTL0041", quirk_sb16audio_resources },
- { "CTL0042", quirk_sb16audio_resources },
- { "CTL0043", quirk_sb16audio_resources },
- { "CTL0044", quirk_sb16audio_resources },
- { "CTL0045", quirk_sb16audio_resources },
- { "SMCf010", quirk_smc_enable },
- { "" }
+ {"CTL0001", quirk_sb16audio_resources},
+ {"CTL0031", quirk_sb16audio_resources},
+ {"CTL0041", quirk_sb16audio_resources},
+ {"CTL0042", quirk_sb16audio_resources},
+ {"CTL0043", quirk_sb16audio_resources},
+ {"CTL0044", quirk_sb16audio_resources},
+ {"CTL0045", quirk_sb16audio_resources},
+ {"SMCf010", quirk_smc_enable},
+ {""}
};
void pnp_fixup_device(struct pnp_dev *dev)
@@ -239,9 +238,8 @@ void pnp_fixup_device(struct pnp_dev *dev)
int i = 0;
while (*pnp_fixups[i].id) {
- if (compare_pnp_id(dev->id,pnp_fixups[i].id)) {
- pnp_dbg("Calling quirk for %s",
- dev->dev.bus_id);
+ if (compare_pnp_id(dev->id, pnp_fixups[i].id)) {
+ pnp_dbg("Calling quirk for %s", dev->dev.bus_id);
pnp_fixups[i].quirk_function(dev);
}
i++;
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index a685fbec460..ea6ec14a055 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -3,7 +3,6 @@
*
* based on isapnp.c resource management (c) Jaroslav Kysela <perex@suse.cz>
* Copyright 2003 Adam Belay <ambx1@neo.rr.com>
- *
*/
#include <linux/module.h>
@@ -20,21 +19,19 @@
#include <linux/pnp.h>
#include "base.h"
-static int pnp_reserve_irq[16] = { [0 ... 15] = -1 }; /* reserve (don't use) some IRQ */
-static int pnp_reserve_dma[8] = { [0 ... 7] = -1 }; /* reserve (don't use) some DMA */
-static int pnp_reserve_io[16] = { [0 ... 15] = -1 }; /* reserve (don't use) some I/O region */
-static int pnp_reserve_mem[16] = { [0 ... 15] = -1 }; /* reserve (don't use) some memory region */
-
+static int pnp_reserve_irq[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some IRQ */
+static int pnp_reserve_dma[8] = {[0 ... 7] = -1 }; /* reserve (don't use) some DMA */
+static int pnp_reserve_io[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some I/O region */
+static int pnp_reserve_mem[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some memory region */
/*
* option registration
*/
-static struct pnp_option * pnp_build_option(int priority)
+static struct pnp_option *pnp_build_option(int priority)
{
struct pnp_option *option = pnp_alloc(sizeof(struct pnp_option));
- /* check if pnp_alloc ran out of memory */
if (!option)
return NULL;
@@ -46,9 +43,10 @@ static struct pnp_option * pnp_build_option(int priority)
return option;
}
-struct pnp_option * pnp_register_independent_option(struct pnp_dev *dev)
+struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev)
{
struct pnp_option *option;
+
if (!dev)
return NULL;
@@ -61,9 +59,11 @@ struct pnp_option * pnp_register_independent_option(struct pnp_dev *dev)
return option;
}
-struct pnp_option * pnp_register_dependent_option(struct pnp_dev *dev, int priority)
+struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev,
+ int priority)
{
struct pnp_option *option;
+
if (!dev)
return NULL;
@@ -82,6 +82,7 @@ struct pnp_option * pnp_register_dependent_option(struct pnp_dev *dev, int prior
int pnp_register_irq_resource(struct pnp_option *option, struct pnp_irq *data)
{
struct pnp_irq *ptr;
+
if (!option)
return -EINVAL;
if (!data)
@@ -110,6 +111,7 @@ int pnp_register_irq_resource(struct pnp_option *option, struct pnp_irq *data)
int pnp_register_dma_resource(struct pnp_option *option, struct pnp_dma *data)
{
struct pnp_dma *ptr;
+
if (!option)
return -EINVAL;
if (!data)
@@ -129,6 +131,7 @@ int pnp_register_dma_resource(struct pnp_option *option, struct pnp_dma *data)
int pnp_register_port_resource(struct pnp_option *option, struct pnp_port *data)
{
struct pnp_port *ptr;
+
if (!option)
return -EINVAL;
if (!data)
@@ -148,6 +151,7 @@ int pnp_register_port_resource(struct pnp_option *option, struct pnp_port *data)
int pnp_register_mem_resource(struct pnp_option *option, struct pnp_mem *data)
{
struct pnp_mem *ptr;
+
if (!option)
return -EINVAL;
if (!data)
@@ -222,7 +226,6 @@ void pnp_free_option(struct pnp_option *option)
}
}
-
/*
* resource validity checking
*/
@@ -236,11 +239,12 @@ void pnp_free_option(struct pnp_option *option)
#define cannot_compare(flags) \
((flags) & (IORESOURCE_UNSET | IORESOURCE_DISABLED))
-int pnp_check_port(struct pnp_dev * dev, int idx)
+int pnp_check_port(struct pnp_dev *dev, int idx)
{
int tmp;
struct pnp_dev *tdev;
resource_size_t *port, *end, *tport, *tend;
+
port = &dev->res.port_resource[idx].start;
end = &dev->res.port_resource[idx].end;
@@ -250,8 +254,8 @@ int pnp_check_port(struct pnp_dev * dev, int idx)
/* check if the resource is already in use, skip if the
* device is active because it itself may be in use */
- if(!dev->active) {
- if (__check_region(&ioport_resource, *port, length(port,end)))
+ if (!dev->active) {
+ if (__check_region(&ioport_resource, *port, length(port, end)))
return 0;
}
@@ -259,7 +263,7 @@ int pnp_check_port(struct pnp_dev * dev, int idx)
for (tmp = 0; tmp < 8; tmp++) {
int rport = pnp_reserve_io[tmp << 1];
int rend = pnp_reserve_io[(tmp << 1) + 1] + rport - 1;
- if (ranged_conflict(port,end,&rport,&rend))
+ if (ranged_conflict(port, end, &rport, &rend))
return 0;
}
@@ -268,7 +272,7 @@ int pnp_check_port(struct pnp_dev * dev, int idx)
if (dev->res.port_resource[tmp].flags & IORESOURCE_IO) {
tport = &dev->res.port_resource[tmp].start;
tend = &dev->res.port_resource[tmp].end;
- if (ranged_conflict(port,end,tport,tend))
+ if (ranged_conflict(port, end, tport, tend))
return 0;
}
}
@@ -279,11 +283,12 @@ int pnp_check_port(struct pnp_dev * dev, int idx)
continue;
for (tmp = 0; tmp < PNP_MAX_PORT; tmp++) {
if (tdev->res.port_resource[tmp].flags & IORESOURCE_IO) {
- if (cannot_compare(tdev->res.port_resource[tmp].flags))
+ if (cannot_compare
+ (tdev->res.port_resource[tmp].flags))
continue;
tport = &tdev->res.port_resource[tmp].start;
tend = &tdev->res.port_resource[tmp].end;
- if (ranged_conflict(port,end,tport,tend))
+ if (ranged_conflict(port, end, tport, tend))
return 0;
}
}
@@ -292,11 +297,12 @@ int pnp_check_port(struct pnp_dev * dev, int idx)
return 1;
}
-int pnp_check_mem(struct pnp_dev * dev, int idx)
+int pnp_check_mem(struct pnp_dev *dev, int idx)
{
int tmp;
struct pnp_dev *tdev;
resource_size_t *addr, *end, *taddr, *tend;
+
addr = &dev->res.mem_resource[idx].start;
end = &dev->res.mem_resource[idx].end;
@@ -306,8 +312,8 @@ int pnp_check_mem(struct pnp_dev * dev, int idx)
/* check if the resource is already in use, skip if the
* device is active because it itself may be in use */
- if(!dev->active) {
- if (check_mem_region(*addr, length(addr,end)))
+ if (!dev->active) {
+ if (check_mem_region(*addr, length(addr, end)))
return 0;
}
@@ -315,7 +321,7 @@ int pnp_check_mem(struct pnp_dev * dev, int idx)
for (tmp = 0; tmp < 8; tmp++) {
int raddr = pnp_reserve_mem[tmp << 1];
int rend = pnp_reserve_mem[(tmp << 1) + 1] + raddr - 1;
- if (ranged_conflict(addr,end,&raddr,&rend))
+ if (ranged_conflict(addr, end, &raddr, &rend))
return 0;
}
@@ -324,7 +330,7 @@ int pnp_check_mem(struct pnp_dev * dev, int idx)
if (dev->res.mem_resource[tmp].flags & IORESOURCE_MEM) {
taddr = &dev->res.mem_resource[tmp].start;
tend = &dev->res.mem_resource[tmp].end;
- if (ranged_conflict(addr,end,taddr,tend))
+ if (ranged_conflict(addr, end, taddr, tend))
return 0;
}
}
@@ -335,11 +341,12 @@ int pnp_check_mem(struct pnp_dev * dev, int idx)
continue;
for (tmp = 0; tmp < PNP_MAX_MEM; tmp++) {
if (tdev->res.mem_resource[tmp].flags & IORESOURCE_MEM) {
- if (cannot_compare(tdev->res.mem_resource[tmp].flags))
+ if (cannot_compare
+ (tdev->res.mem_resource[tmp].flags))
continue;
taddr = &tdev->res.mem_resource[tmp].start;
tend = &tdev->res.mem_resource[tmp].end;
- if (ranged_conflict(addr,end,taddr,tend))
+ if (ranged_conflict(addr, end, taddr, tend))
return 0;
}
}
@@ -353,11 +360,11 @@ static irqreturn_t pnp_test_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int pnp_check_irq(struct pnp_dev * dev, int idx)
+int pnp_check_irq(struct pnp_dev *dev, int idx)
{
int tmp;
struct pnp_dev *tdev;
- resource_size_t * irq = &dev->res.irq_resource[idx].start;
+ resource_size_t *irq = &dev->res.irq_resource[idx].start;
/* if the resource doesn't exist, don't complain about it */
if (cannot_compare(dev->res.irq_resource[idx].flags))
@@ -394,9 +401,9 @@ int pnp_check_irq(struct pnp_dev * dev, int idx)
/* check if the resource is already in use, skip if the
* device is active because it itself may be in use */
- if(!dev->active) {
+ if (!dev->active) {
if (request_irq(*irq, pnp_test_handler,
- IRQF_DISABLED|IRQF_PROBE_SHARED, "pnp", NULL))
+ IRQF_DISABLED | IRQF_PROBE_SHARED, "pnp", NULL))
return 0;
free_irq(*irq, NULL);
}
@@ -407,7 +414,8 @@ int pnp_check_irq(struct pnp_dev * dev, int idx)
continue;
for (tmp = 0; tmp < PNP_MAX_IRQ; tmp++) {
if (tdev->res.irq_resource[tmp].flags & IORESOURCE_IRQ) {
- if (cannot_compare(tdev->res.irq_resource[tmp].flags))
+ if (cannot_compare
+ (tdev->res.irq_resource[tmp].flags))
continue;
if ((tdev->res.irq_resource[tmp].start == *irq))
return 0;
@@ -418,12 +426,12 @@ int pnp_check_irq(struct pnp_dev * dev, int idx)
return 1;
}
-int pnp_check_dma(struct pnp_dev * dev, int idx)
+int pnp_check_dma(struct pnp_dev *dev, int idx)
{
#ifndef CONFIG_IA64
int tmp;
struct pnp_dev *tdev;
- resource_size_t * dma = &dev->res.dma_resource[idx].start;
+ resource_size_t *dma = &dev->res.dma_resource[idx].start;
/* if the resource doesn't exist, don't complain about it */
if (cannot_compare(dev->res.dma_resource[idx].flags))
@@ -449,7 +457,7 @@ int pnp_check_dma(struct pnp_dev * dev, int idx)
/* check if the resource is already in use, skip if the
* device is active because it itself may be in use */
- if(!dev->active) {
+ if (!dev->active) {
if (request_dma(*dma, "pnp"))
return 0;
free_dma(*dma);
@@ -461,7 +469,8 @@ int pnp_check_dma(struct pnp_dev * dev, int idx)
continue;
for (tmp = 0; tmp < PNP_MAX_DMA; tmp++) {
if (tdev->res.dma_resource[tmp].flags & IORESOURCE_DMA) {
- if (cannot_compare(tdev->res.dma_resource[tmp].flags))
+ if (cannot_compare
+ (tdev->res.dma_resource[tmp].flags))
continue;
if ((tdev->res.dma_resource[tmp].start == *dma))
return 0;
@@ -471,30 +480,18 @@ int pnp_check_dma(struct pnp_dev * dev, int idx)
return 1;
#else
- /* IA64 hasn't legacy DMA */
+ /* IA64 does not have legacy DMA */
return 0;
#endif
}
-
-#if 0
-EXPORT_SYMBOL(pnp_register_dependent_option);
-EXPORT_SYMBOL(pnp_register_independent_option);
-EXPORT_SYMBOL(pnp_register_irq_resource);
-EXPORT_SYMBOL(pnp_register_dma_resource);
-EXPORT_SYMBOL(pnp_register_port_resource);
-EXPORT_SYMBOL(pnp_register_mem_resource);
-#endif /* 0 */
-
-
/* format is: pnp_reserve_irq=irq1[,irq2] .... */
-
static int __init pnp_setup_reserve_irq(char *str)
{
int i;
for (i = 0; i < 16; i++)
- if (get_option(&str,&pnp_reserve_irq[i]) != 2)
+ if (get_option(&str, &pnp_reserve_irq[i]) != 2)
break;
return 1;
}
@@ -502,13 +499,12 @@ static int __init pnp_setup_reserve_irq(char *str)
__setup("pnp_reserve_irq=", pnp_setup_reserve_irq);
/* format is: pnp_reserve_dma=dma1[,dma2] .... */
-
static int __init pnp_setup_reserve_dma(char *str)
{
int i;
for (i = 0; i < 8; i++)
- if (get_option(&str,&pnp_reserve_dma[i]) != 2)
+ if (get_option(&str, &pnp_reserve_dma[i]) != 2)
break;
return 1;
}
@@ -516,13 +512,12 @@ static int __init pnp_setup_reserve_dma(char *str)
__setup("pnp_reserve_dma=", pnp_setup_reserve_dma);
/* format is: pnp_reserve_io=io1,size1[,io2,size2] .... */
-
static int __init pnp_setup_reserve_io(char *str)
{
int i;
for (i = 0; i < 16; i++)
- if (get_option(&str,&pnp_reserve_io[i]) != 2)
+ if (get_option(&str, &pnp_reserve_io[i]) != 2)
break;
return 1;
}
@@ -530,13 +525,12 @@ static int __init pnp_setup_reserve_io(char *str)
__setup("pnp_reserve_io=", pnp_setup_reserve_io);
/* format is: pnp_reserve_mem=mem1,size1[,mem2,size2] .... */
-
static int __init pnp_setup_reserve_mem(char *str)
{
int i;
for (i = 0; i < 16; i++)
- if (get_option(&str,&pnp_reserve_mem[i]) != 2)
+ if (get_option(&str, &pnp_reserve_mem[i]) != 2)
break;
return 1;
}
diff --git a/drivers/pnp/support.c b/drivers/pnp/support.c
index 946a0dcd627..13c608f5fb3 100644
--- a/drivers/pnp/support.c
+++ b/drivers/pnp/support.c
@@ -1,8 +1,7 @@
/*
- * support.c - provides standard pnp functions for the use of pnp protocol drivers,
+ * support.c - standard functions for the use of pnp protocol drivers
*
* Copyright 2003 Adam Belay <ambx1@neo.rr.com>
- *
*/
#include <linux/module.h>
@@ -11,22 +10,18 @@
#include "base.h"
/**
- * pnp_is_active - Determines if a device is active based on its current resources
+ * pnp_is_active - Determines if a device is active based on its current
+ * resources
* @dev: pointer to the desired PnP device
- *
*/
-
-int pnp_is_active(struct pnp_dev * dev)
+int pnp_is_active(struct pnp_dev *dev)
{
if (!pnp_port_start(dev, 0) && pnp_port_len(dev, 0) <= 1 &&
!pnp_mem_start(dev, 0) && pnp_mem_len(dev, 0) <= 1 &&
- pnp_irq(dev, 0) == -1 &&
- pnp_dma(dev, 0) == -1)
- return 0;
+ pnp_irq(dev, 0) == -1 && pnp_dma(dev, 0) == -1)
+ return 0;
else
return 1;
}
-
-
EXPORT_SYMBOL(pnp_is_active);
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index a8a95540b1e..a06f980b3ac 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -16,13 +16,14 @@
static const struct pnp_device_id pnp_dev_table[] = {
/* General ID for reserving resources */
- { "PNP0c02", 0 },
+ {"PNP0c02", 0},
/* memory controller */
- { "PNP0c01", 0 },
- { "", 0 }
+ {"PNP0c01", 0},
+ {"", 0}
};
-static void reserve_range(const char *pnpid, resource_size_t start, resource_size_t end, int port)
+static void reserve_range(const char *pnpid, resource_size_t start,
+ resource_size_t end, int port)
{
struct resource *res;
char *regionid;
@@ -32,9 +33,9 @@ static void reserve_range(const char *pnpid, resource_size_t start, resource_siz
return;
snprintf(regionid, 16, "pnp %s", pnpid);
if (port)
- res = request_region(start, end-start+1, regionid);
+ res = request_region(start, end - start + 1, regionid);
else
- res = request_mem_region(start, end-start+1, regionid);
+ res = request_mem_region(start, end - start + 1, regionid);
if (res == NULL)
kfree(regionid);
else
@@ -44,11 +45,10 @@ static void reserve_range(const char *pnpid, resource_size_t start, resource_siz
* example do reserve stuff they know about too, so we may well
* have double reservations.
*/
- printk(KERN_INFO
- "pnp: %s: %s range 0x%llx-0x%llx %s reserved\n",
- pnpid, port ? "ioport" : "iomem",
- (unsigned long long)start, (unsigned long long)end,
- NULL != res ? "has been" : "could not be");
+ printk(KERN_INFO "pnp: %s: %s range 0x%llx-0x%llx %s reserved\n",
+ pnpid, port ? "ioport" : "iomem",
+ (unsigned long long)start, (unsigned long long)end,
+ NULL != res ? "has been" : "could not be");
}
static void reserve_resources_of_dev(const struct pnp_dev *dev)
@@ -74,7 +74,7 @@ static void reserve_resources_of_dev(const struct pnp_dev *dev)
continue; /* invalid */
reserve_range(dev->dev.bus_id, pnp_port_start(dev, i),
- pnp_port_end(dev, i), 1);
+ pnp_port_end(dev, i), 1);
}
for (i = 0; i < PNP_MAX_MEM; i++) {
@@ -82,24 +82,22 @@ static void reserve_resources_of_dev(const struct pnp_dev *dev)
continue;
reserve_range(dev->dev.bus_id, pnp_mem_start(dev, i),
- pnp_mem_end(dev, i), 0);
+ pnp_mem_end(dev, i), 0);
}
-
- return;
}
-static int system_pnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id)
+static int system_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *dev_id)
{
reserve_resources_of_dev(dev);
return 0;
}
static struct pnp_driver system_pnp_driver = {
- .name = "system",
- .id_table = pnp_dev_table,
- .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
- .probe = system_pnp_probe,
- .remove = NULL,
+ .name = "system",
+ .id_table = pnp_dev_table,
+ .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
+ .probe = system_pnp_probe,
};
static int __init pnp_system_init(void)
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 7ede9e72536..d3a33aa2696 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -15,34 +15,36 @@ rtc-core-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o
rtc-core-$(CONFIG_RTC_INTF_PROC) += rtc-proc.o
rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
+# Keep the list ordered.
+
+obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
+obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
+obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
-obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
-obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
-obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
-obj-$(CONFIG_RTC_DRV_AT32AP700X) += rtc-at32ap700x.o
+obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o
+obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o
obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o
obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o
+obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
+obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
+obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
+obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
+obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
+obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
+obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
+obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
+obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
+obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
-obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
-obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
-obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
-obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o
-obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
-obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
-obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
-obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
-obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
-obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
-obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
-obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
-obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
-obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
-obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
-obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
+obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
+obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
+obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
+obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
+obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 8b3cd31d6a6..10ab3b71ffc 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -46,6 +46,7 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
{
struct rtc_device *rtc = to_rtc_device(dev);
struct rtc_time tm;
+ struct timespec ts = current_kernel_time();
if (strncmp(rtc->dev.bus_id,
CONFIG_RTC_HCTOSYS_DEVICE,
@@ -57,8 +58,8 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
/* RTC precision is 1 second; adjust delta for avg 1/2 sec err */
set_normalized_timespec(&delta,
- xtime.tv_sec - oldtime,
- xtime.tv_nsec - (NSEC_PER_SEC >> 1));
+ ts.tv_sec - oldtime,
+ ts.tv_nsec - (NSEC_PER_SEC >> 1));
return 0;
}
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 260ead95991..1aa709dda0d 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -1,6 +1,6 @@
/*
* Blackfin On-Chip Real Time Clock Driver
- * Supports BF531/BF532/BF533/BF534/BF536/BF537
+ * Supports BF53[123]/BF53[467]/BF54[2489]
*
* Copyright 2004-2007 Analog Devices Inc.
*
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 5158a625671..db6f3f0d898 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -352,7 +352,7 @@ read_rtc:
/* oscillator fault? clear flag, and warn */
if (ds1307->regs[DS1307_REG_CONTROL] & DS1338_BIT_OSF) {
i2c_smbus_write_byte_data(client, DS1307_REG_CONTROL,
- ds1307->regs[DS1337_REG_CONTROL]
+ ds1307->regs[DS1307_REG_CONTROL]
& ~DS1338_BIT_OSF);
dev_warn(&client->dev, "SET TIME!\n");
goto read_rtc;
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index f10d3facecb..8288b6b2bf2 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -258,7 +258,8 @@ static const struct rtc_class_ops stk17ta8_rtc_ops = {
.ioctl = stk17ta8_rtc_ioctl,
};
-static ssize_t stk17ta8_nvram_read(struct kobject *kobj, char *buf,
+static ssize_t stk17ta8_nvram_read(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
loff_t pos, size_t size)
{
struct platform_device *pdev =
@@ -272,7 +273,8 @@ static ssize_t stk17ta8_nvram_read(struct kobject *kobj, char *buf,
return count;
}
-static ssize_t stk17ta8_nvram_write(struct kobject *kobj, char *buf,
+static ssize_t stk17ta8_nvram_write(struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
loff_t pos, size_t size)
{
struct platform_device *pdev =
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index bfeca57098f..e6bfce690ca 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1187,7 +1187,7 @@ dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
static void
__dasd_process_blk_queue(struct dasd_device * device)
{
- request_queue_t *queue;
+ struct request_queue *queue;
struct request *req;
struct dasd_ccw_req *cqr;
int nr_queued;
@@ -1740,7 +1740,7 @@ dasd_cancel_req(struct dasd_ccw_req *cqr)
* Dasd request queue function. Called from ll_rw_blk.c
*/
static void
-do_dasd_request(request_queue_t * queue)
+do_dasd_request(struct request_queue * queue)
{
struct dasd_device *device;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 241294cba41..aeda5268244 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -293,7 +293,7 @@ struct dasd_uid {
struct dasd_device {
/* Block device stuff. */
struct gendisk *gdp;
- request_queue_t *request_queue;
+ struct request_queue *request_queue;
spinlock_t request_queue_lock;
struct block_device *bdev;
unsigned int devindex;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 35765f6a86e..4d8798bacf9 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -621,7 +621,7 @@ out:
}
static int
-dcssblk_make_request(request_queue_t *q, struct bio *bio)
+dcssblk_make_request(struct request_queue *q, struct bio *bio)
{
struct dcssblk_dev_info *dev_info;
struct bio_vec *bvec;
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index a04d9120cef..354a060e5be 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -191,7 +191,7 @@ static unsigned long __init xpram_highest_page_index(void)
/*
* Block device make request function.
*/
-static int xpram_make_request(request_queue_t *q, struct bio *bio)
+static int xpram_make_request(struct request_queue *q, struct bio *bio)
{
xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
struct bio_vec *bvec;
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 3f36cb3910e..643033890e3 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -44,15 +44,9 @@ config CCW_CONSOLE
depends on TN3215_CONSOLE || TN3270_CONSOLE
default y
-config SCLP
- bool "Support for SCLP"
- depends on S390
- help
- Include support for the SCLP interface to the service element.
-
config SCLP_TTY
bool "Support for SCLP line mode terminal"
- depends on SCLP
+ depends on S390
help
Include support for IBM SCLP line-mode terminals.
@@ -65,7 +59,7 @@ config SCLP_CONSOLE
config SCLP_VT220_TTY
bool "Support for SCLP VT220-compatible terminal"
- depends on SCLP
+ depends on S390
help
Include support for an IBM SCLP VT220-compatible terminal.
@@ -78,7 +72,7 @@ config SCLP_VT220_CONSOLE
config SCLP_CPI
tristate "Control-Program Identification"
- depends on SCLP
+ depends on S390
help
This option enables the hardware console interface for system
identification. This is commonly used for workload management and
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 743944ad61e..4f2f81b16cf 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -147,8 +147,7 @@ raw3270_request_alloc(size_t size)
* Allocate a new 3270 ccw request from bootmem. Only works very
* early in the boot process. Only con3270.c should be using this.
*/
-struct raw3270_request *
-raw3270_request_alloc_bootmem(size_t size)
+struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size)
{
struct raw3270_request *rq;
@@ -848,8 +847,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
/*
* Setup 3270 device configured as console.
*/
-struct raw3270 *
-raw3270_setup_console(struct ccw_device *cdev)
+struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
{
struct raw3270 *rp;
char *ascebc;
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 726334757bb..40cd21bc5cc 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -621,11 +621,24 @@ sclp_vt220_flush_buffer(struct tty_struct *tty)
/*
* Initialize all relevant components and register driver with system.
*/
-static int
-__sclp_vt220_init(int early)
+static void __init __sclp_vt220_cleanup(void)
+{
+ struct list_head *page, *p;
+
+ list_for_each_safe(page, p, &sclp_vt220_empty) {
+ list_del(page);
+ if (slab_is_available())
+ free_page((unsigned long) page);
+ else
+ free_bootmem((unsigned long) page, PAGE_SIZE);
+ }
+}
+
+static int __init __sclp_vt220_init(void)
{
void *page;
int i;
+ int num_pages;
if (sclp_vt220_initialized)
return 0;
@@ -642,13 +655,16 @@ __sclp_vt220_init(int early)
sclp_vt220_flush_later = 0;
/* Allocate pages for output buffering */
- for (i = 0; i < (early ? MAX_CONSOLE_PAGES : MAX_KMEM_PAGES); i++) {
- if (early)
- page = alloc_bootmem_low_pages(PAGE_SIZE);
- else
+ num_pages = slab_is_available() ? MAX_KMEM_PAGES : MAX_CONSOLE_PAGES;
+ for (i = 0; i < num_pages; i++) {
+ if (slab_is_available())
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!page)
+ else
+ page = alloc_bootmem_low_pages(PAGE_SIZE);
+ if (!page) {
+ __sclp_vt220_cleanup();
return -ENOMEM;
+ }
list_add_tail((struct list_head *) page, &sclp_vt220_empty);
}
return 0;
@@ -662,14 +678,13 @@ static const struct tty_operations sclp_vt220_ops = {
.flush_chars = sclp_vt220_flush_chars,
.write_room = sclp_vt220_write_room,
.chars_in_buffer = sclp_vt220_chars_in_buffer,
- .flush_buffer = sclp_vt220_flush_buffer
+ .flush_buffer = sclp_vt220_flush_buffer,
};
/*
* Register driver with SCLP and Linux and initialize internal tty structures.
*/
-static int __init
-sclp_vt220_tty_init(void)
+static int __init sclp_vt220_tty_init(void)
{
struct tty_driver *driver;
int rc;
@@ -679,18 +694,15 @@ sclp_vt220_tty_init(void)
driver = alloc_tty_driver(1);
if (!driver)
return -ENOMEM;
- rc = __sclp_vt220_init(0);
- if (rc) {
- put_tty_driver(driver);
- return rc;
- }
+ rc = __sclp_vt220_init();
+ if (rc)
+ goto out_driver;
rc = sclp_register(&sclp_vt220_register);
if (rc) {
printk(KERN_ERR SCLP_VT220_PRINT_HEADER
"could not register tty - "
"sclp_register returned %d\n", rc);
- put_tty_driver(driver);
- return rc;
+ goto out_init;
}
driver->owner = THIS_MODULE;
@@ -709,14 +721,20 @@ sclp_vt220_tty_init(void)
printk(KERN_ERR SCLP_VT220_PRINT_HEADER
"could not register tty - "
"tty_register_driver returned %d\n", rc);
- put_tty_driver(driver);
- return rc;
+ goto out_sclp;
}
sclp_vt220_driver = driver;
return 0;
-}
-module_init(sclp_vt220_tty_init);
+out_sclp:
+ sclp_unregister(&sclp_vt220_register);
+out_init:
+ __sclp_vt220_cleanup();
+out_driver:
+ put_tty_driver(driver);
+ return rc;
+}
+__initcall(sclp_vt220_tty_init);
#ifdef CONFIG_SCLP_VT220_CONSOLE
@@ -762,7 +780,7 @@ sclp_vt220_con_init(void)
if (!CONSOLE_IS_SCLP)
return 0;
- rc = __sclp_vt220_init(1);
+ rc = __sclp_vt220_init();
if (rc)
return rc;
/* Attach linux console */
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 3b52f5c1dbe..dddf8d62c15 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -188,7 +188,7 @@ struct tape_blk_data
{
struct tape_device * device;
/* Block device request queue. */
- request_queue_t * request_queue;
+ struct request_queue * request_queue;
spinlock_t request_queue_lock;
/* Task to move entries from block request to CCS request queue. */
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index dd0ecaed592..eeb92e2ed0c 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -147,7 +147,7 @@ static void
tapeblock_requeue(struct work_struct *work) {
struct tape_blk_data * blkdat;
struct tape_device * device;
- request_queue_t * queue;
+ struct request_queue * queue;
int nr_queued;
struct request * req;
struct list_head * l;
@@ -194,7 +194,7 @@ tapeblock_requeue(struct work_struct *work) {
* Tape request queue function. Called from ll_rw_blk.c
*/
static void
-tapeblock_request_fn(request_queue_t *queue)
+tapeblock_request_fn(struct request_queue *queue)
{
struct tape_device *device;
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index e90b0f84619..161867cebd8 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -486,7 +486,7 @@ static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
}
if (rc)
goto fail;
- if (reclen)
+ if (reclen && (copied == 0) && (*offs < PAGE_SIZE))
*((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
len = min(count - copied, PAGE_SIZE - res);
if (copy_to_user(ubuf + copied, buf + res, len)) {
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index ec0404874fa..bd5f16f80bf 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -51,7 +51,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to,
to = from;
if (from > to || to > __MAX_SUBCHANNEL || ssid > __MAX_SSID) {
- printk (KERN_WARNING "Invalid blacklist range "
+ printk (KERN_WARNING "cio: Invalid blacklist range "
"0.%x.%04x to 0.%x.%04x, skipping\n",
ssid, from, ssid, to);
return;
@@ -119,7 +119,7 @@ blacklist_busid(char **str, int *id0, int *ssid, int *devno)
return 0;
confused:
strsep(str, ",\n");
- printk(KERN_WARNING "Invalid cio_ignore parameter '%s'\n", sav);
+ printk(KERN_WARNING "cio: Invalid cio_ignore parameter '%s'\n", sav);
return 1;
}
@@ -166,22 +166,19 @@ blacklist_parse_parameters (char *str, range_action action)
continue;
}
if (*str == '-') {
- printk(KERN_WARNING "invalid cio_ignore "
+ printk(KERN_WARNING "cio: invalid cio_ignore "
"parameter '%s'\n",
strsep(&str, ",\n"));
continue;
}
if ((from_id0 != to_id0) ||
(from_ssid != to_ssid)) {
- printk(KERN_WARNING "invalid cio_ignore range "
- "%x.%x.%04x-%x.%x.%04x\n",
- from_id0, from_ssid, from,
- to_id0, to_ssid, to);
+ printk(KERN_WARNING "cio: invalid cio_ignore "
+ "range %x.%x.%04x-%x.%x.%04x\n",
+ from_id0, from_ssid, from,
+ to_id0, to_ssid, to);
continue;
}
- pr_debug("blacklist_setup: adding range "
- "from %x.%x.%04x to %x.%x.%04x\n",
- from_id0, from_ssid, from, to_id0, to_ssid, to);
blacklist_range (ra, from, to, to_ssid);
}
}
@@ -239,7 +236,7 @@ blacklist_parse_proc_parameters (char *buf)
*/
blacklist_parse_parameters (buf + 4, add);
} else {
- printk (KERN_WARNING "cio_ignore: Parse error; \n"
+ printk (KERN_WARNING "cio: cio_ignore: Parse error; \n"
KERN_WARNING "try using 'free all|<devno-range>,"
"<devno-range>,...'\n"
KERN_WARNING "or 'add <devno-range>,"
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index e5ccda63e88..b0a18f5176a 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -359,7 +359,6 @@ ccwgroup_probe (struct device *dev)
if ((ret = device_create_file(dev, &dev_attr_online)))
return ret;
- pr_debug("%s: device %s\n", __func__, gdev->dev.bus_id);
ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
if (ret)
device_remove_file(dev, &dev_attr_online);
@@ -376,8 +375,6 @@ ccwgroup_remove (struct device *dev)
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
- pr_debug("%s: device %s\n", __func__, gdev->dev.bus_id);
-
device_remove_file(dev, &dev_attr_online);
if (gdrv && gdrv->remove)
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index b57d93d986c..920dd71e643 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -121,14 +121,8 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
CIO_TRACE_EVENT( 2, dbf_text);
status = chp_get_status(chpid);
- if (status < 0) {
- printk(KERN_ERR "Can't vary unknown chpid %x.%02x\n",
- chpid.cssid, chpid.id);
- return -EINVAL;
- }
-
if (!on && !status) {
- printk(KERN_ERR "chpid %x.%02x is already offline\n",
+ printk(KERN_ERR "cio: chpid %x.%02x is already offline\n",
chpid.cssid, chpid.id);
return -EINVAL;
}
@@ -421,21 +415,14 @@ int chp_new(struct chp_id chpid)
if (ret)
goto out_free;
} else {
- static int msg_done;
-
- if (!msg_done) {
- printk(KERN_WARNING "cio: Channel measurements not "
- "available, continuing.\n");
- msg_done = 1;
- }
chp->cmg = -1;
}
/* make it known to the system */
ret = device_register(&chp->dev);
if (ret) {
- printk(KERN_WARNING "%s: could not register %x.%02x\n",
- __func__, chpid.cssid, chpid.id);
+ CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
+ chpid.cssid, chpid.id, ret);
goto out_free;
}
ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index ea92ac4d657..597c0c76a2a 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -990,16 +990,20 @@ out:
return ret;
}
-static int __init
-chsc_alloc_sei_area(void)
+int __init chsc_alloc_sei_area(void)
{
sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sei_page)
- printk(KERN_WARNING"Can't allocate page for processing of " \
- "chsc machine checks!\n");
+ CIO_MSG_EVENT(0, "Can't allocate page for processing of "
+ "chsc machine checks!\n");
return (sei_page ? 0 : -ENOMEM);
}
+void __init chsc_free_sei_area(void)
+{
+ kfree(sei_page);
+}
+
int __init
chsc_enable_facility(int operation_code)
{
@@ -1051,8 +1055,6 @@ chsc_enable_facility(int operation_code)
return ret;
}
-subsys_initcall(chsc_alloc_sei_area);
-
struct css_general_char css_general_characteristics;
struct css_chsc_char css_chsc_characteristics;
@@ -1073,8 +1075,8 @@ chsc_determine_css_characteristics(void)
scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scsc_area) {
- printk(KERN_WARNING"cio: Was not able to determine available" \
- "CHSCs due to no memory.\n");
+ CIO_MSG_EVENT(0, "Was not able to determine available"
+ "CHSCs due to no memory.\n");
return -ENOMEM;
}
@@ -1083,15 +1085,15 @@ chsc_determine_css_characteristics(void)
result = chsc(scsc_area);
if (result) {
- printk(KERN_WARNING"cio: Was not able to determine " \
- "available CHSCs, cc=%i.\n", result);
+ CIO_MSG_EVENT(0, "Was not able to determine available CHSCs, "
+ "cc=%i.\n", result);
result = -EIO;
goto exit;
}
if (scsc_area->response.code != 1) {
- printk(KERN_WARNING"cio: Was not able to determine " \
- "available CHSCs.\n");
+ CIO_MSG_EVENT(0, "Was not able to determine "
+ "available CHSCs.\n");
result = -EIO;
goto exit;
}
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 2ad81d11cf7..d1f5db1e69b 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -79,6 +79,8 @@ extern int chsc_get_ssd_info(struct subchannel_id schid,
struct chsc_ssd_info *ssd);
extern int chsc_determine_css_characteristics(void);
extern int css_characteristics_avail;
+extern int chsc_alloc_sei_area(void);
+extern void chsc_free_sei_area(void);
extern int chsc_enable_facility(int);
struct channel_subsystem;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index ea1defba569..f2708d65be5 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -47,8 +47,8 @@ cio_setup (char *parm)
else if (!strcmp (parm, "no"))
cio_show_msg = 0;
else
- printk (KERN_ERR "cio_setup : invalid cio_msg parameter '%s'",
- parm);
+ printk(KERN_ERR "cio: cio_setup: "
+ "invalid cio_msg parameter '%s'", parm);
return 1;
}
@@ -80,7 +80,6 @@ cio_debug_init (void)
goto out_unregister;
debug_register_view (cio_debug_crw_id, &debug_sprintf_view);
debug_set_level (cio_debug_crw_id, 2);
- pr_debug("debugging initialized\n");
return 0;
out_unregister:
@@ -90,7 +89,7 @@ out_unregister:
debug_unregister (cio_debug_trace_id);
if (cio_debug_crw_id)
debug_unregister (cio_debug_crw_id);
- pr_debug("could not initialize debugging\n");
+ printk(KERN_WARNING"cio: could not initialize debugging\n");
return -1;
}
@@ -568,7 +567,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
*/
if (sch->st != 0) {
CIO_DEBUG(KERN_INFO, 0,
- "Subchannel 0.%x.%04x reports "
+ "cio: Subchannel 0.%x.%04x reports "
"non-I/O subchannel type %04X\n",
sch->schid.ssid, sch->schid.sch_no, sch->st);
/* We stop here for non-io subchannels. */
@@ -601,7 +600,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
sch->lpm = sch->schib.pmcw.pam & sch->opm;
CIO_DEBUG(KERN_INFO, 0,
- "Detected device %04x on subchannel 0.%x.%04X"
+ "cio: Detected device %04x on subchannel 0.%x.%04X"
" - PIM = %02X, PAM = %02X, POM = %02X\n",
sch->schib.pmcw.dev, sch->schid.ssid,
sch->schid.sch_no, sch->schib.pmcw.pim,
@@ -766,7 +765,7 @@ cio_get_console_sch_no(void)
/* unlike in 2.4, we cannot autoprobe here, since
* the channel subsystem is not fully initialized.
* With some luck, the HWC console can take over */
- printk(KERN_WARNING "No ccw console found!\n");
+ printk(KERN_WARNING "cio: No ccw console found!\n");
return -1;
}
return console_irq;
diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h
index f88844adae1..c9bf8989930 100644
--- a/drivers/s390/cio/cio_debug.h
+++ b/drivers/s390/cio/cio_debug.h
@@ -23,6 +23,8 @@ extern debug_info_t *cio_debug_crw_id;
static inline void
CIO_HEX_EVENT(int level, void *data, int length)
{
+ if (unlikely(!cio_debug_trace_id))
+ return;
while (length > 0) {
debug_event(cio_debug_trace_id, level, data, length);
length -= cio_debug_trace_id->buf_size;
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 28abd697be1..02fd00b55e1 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1185,12 +1185,12 @@ static ssize_t cmb_enable_store(struct device *dev, struct device_attribute *att
case '0':
ret = disable_cmf(cdev);
if (ret)
- printk(KERN_INFO "disable_cmf failed (%d)\n", ret);
+ dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret);
break;
case '1':
ret = enable_cmf(cdev);
if (ret && ret != -EBUSY)
- printk(KERN_INFO "enable_cmf failed (%d)\n", ret);
+ dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret);
break;
}
@@ -1280,10 +1280,10 @@ init_cmf(void)
format_string = "basic";
cmbops = &cmbops_basic;
if (cmb_area.num_channels > 4096 || cmb_area.num_channels < 1) {
- printk(KERN_ERR "Basic channel measurement facility"
- " can only use 1 to 4096 devices\n"
+ printk(KERN_ERR "cio: Basic channel measurement "
+ "facility can only use 1 to 4096 devices\n"
KERN_ERR "when the cmf driver is built"
- " as a loadable module\n");
+ " as a loadable module\n");
return 1;
}
break;
@@ -1292,13 +1292,13 @@ init_cmf(void)
cmbops = &cmbops_extended;
break;
default:
- printk(KERN_ERR "Invalid format %d for channel "
+ printk(KERN_ERR "cio: Invalid format %d for channel "
"measurement facility\n", format);
return 1;
}
- printk(KERN_INFO "Channel measurement facility using %s format (%s)\n",
- format_string, detect_string);
+ printk(KERN_INFO "cio: Channel measurement facility using %s "
+ "format (%s)\n", format_string, detect_string);
return 0;
}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index dfca0ef139f..1c27a5a06b4 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -109,7 +109,7 @@ css_subchannel_release(struct device *dev)
}
}
-int css_sch_device_register(struct subchannel *sch)
+static int css_sch_device_register(struct subchannel *sch)
{
int ret;
@@ -184,8 +184,8 @@ static int css_register_subchannel(struct subchannel *sch)
/* make it known to the system */
ret = css_sch_device_register(sch);
if (ret) {
- printk (KERN_WARNING "%s: could not register %s\n",
- __func__, sch->dev.bus_id);
+ CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
+ sch->schid.ssid, sch->schid.sch_no, ret);
return ret;
}
return ret;
@@ -371,15 +371,12 @@ static int __init slow_subchannel_init(void)
spin_lock_init(&slow_subchannel_lock);
slow_subchannel_set = idset_sch_new();
if (!slow_subchannel_set) {
- printk(KERN_WARNING "cio: could not allocate slow subchannel "
- "set\n");
+ CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
return -ENOMEM;
}
return 0;
}
-subsys_initcall(slow_subchannel_init);
-
static void css_slow_path_func(struct work_struct *unused)
{
struct subchannel_id schid;
@@ -425,8 +422,8 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data)
struct subchannel *sch;
int ret;
- CIO_DEBUG(KERN_INFO, 6, "cio: reprobe 0.%x.%04x\n",
- schid.ssid, schid.sch_no);
+ CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
+ schid.ssid, schid.sch_no);
if (need_reprobe)
return -EAGAIN;
@@ -642,9 +639,20 @@ init_channel_subsystem (void)
{
int ret, i;
- if (chsc_determine_css_characteristics() == 0)
+ ret = chsc_determine_css_characteristics();
+ if (ret == -ENOMEM)
+ goto out; /* No need to continue. */
+ if (ret == 0)
css_characteristics_avail = 1;
+ ret = chsc_alloc_sei_area();
+ if (ret)
+ goto out;
+
+ ret = slow_subchannel_init();
+ if (ret)
+ goto out;
+
if ((ret = bus_register(&css_bus_type)))
goto out;
@@ -710,6 +718,10 @@ out_unregister:
out_bus:
bus_unregister(&css_bus_type);
out:
+ chsc_free_sei_area();
+ kfree(slow_subchannel_set);
+ printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n",
+ ret);
return ret;
}
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index ed7977531c3..5d65e83ca66 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -139,7 +139,6 @@ struct css_driver {
*/
extern struct bus_type css_bus_type;
-extern int css_sch_device_register(struct subchannel *);
extern void css_sch_device_unregister(struct subchannel *);
extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
extern int css_init_done;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 001682e70f6..297659fa0e2 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -338,15 +338,20 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
rc = device_schedule_callback(&cdev->dev,
ccw_device_remove_orphan_cb);
if (rc)
- dev_info(&cdev->dev, "Couldn't unregister orphan\n");
+ CIO_MSG_EVENT(2, "Couldn't unregister orphan "
+ "0.%x.%04x\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
return;
}
/* Deregister subchannel, which will kill the ccw device. */
rc = device_schedule_callback(cdev->dev.parent,
ccw_device_remove_sch_cb);
if (rc)
- dev_info(&cdev->dev,
- "Couldn't unregister disconnected device\n");
+ CIO_MSG_EVENT(2, "Couldn't unregister disconnected device "
+ "0.%x.%04x\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
}
int
@@ -379,8 +384,10 @@ ccw_device_set_offline(struct ccw_device *cdev)
if (ret == 0)
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
else {
- pr_debug("ccw_device_offline returned %d, device %s\n",
- ret, cdev->dev.bus_id);
+ CIO_MSG_EVENT(2, "ccw_device_offline returned %d, "
+ "device 0.%x.%04x\n",
+ ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
cdev->online = 1;
}
return ret;
@@ -402,8 +409,10 @@ ccw_device_set_online(struct ccw_device *cdev)
if (ret == 0)
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
else {
- pr_debug("ccw_device_online returned %d, device %s\n",
- ret, cdev->dev.bus_id);
+ CIO_MSG_EVENT(2, "ccw_device_online returned %d, "
+ "device 0.%x.%04x\n",
+ ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
return ret;
}
if (cdev->private->state != DEV_STATE_ONLINE)
@@ -417,9 +426,11 @@ ccw_device_set_online(struct ccw_device *cdev)
spin_unlock_irq(cdev->ccwlock);
if (ret == 0)
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- else
- pr_debug("ccw_device_offline returned %d, device %s\n",
- ret, cdev->dev.bus_id);
+ else
+ CIO_MSG_EVENT(2, "ccw_device_offline returned %d, "
+ "device 0.%x.%04x\n",
+ ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
return (ret == 0) ? -ENODEV : ret;
}
@@ -439,9 +450,10 @@ static int online_store_recog_and_online(struct ccw_device *cdev)
if (cdev->id.cu_type == 0) {
ret = ccw_device_recognition(cdev);
if (ret) {
- printk(KERN_WARNING"Couldn't start recognition "
- "for device %s (ret=%d)\n",
- cdev->dev.bus_id, ret);
+ CIO_MSG_EVENT(0, "Couldn't start recognition "
+ "for device 0.%x.%04x (ret=%d)\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
return ret;
}
wait_event(cdev->private->wait_q,
@@ -461,8 +473,8 @@ static void online_store_handle_online(struct ccw_device *cdev, int force)
if (force && cdev->private->state == DEV_STATE_BOXED) {
ret = ccw_device_stlck(cdev);
if (ret) {
- printk(KERN_WARNING"ccw_device_stlck for device %s "
- "returned %d!\n", cdev->dev.bus_id, ret);
+ dev_warn(&cdev->dev,
+ "ccw_device_stlck returned %d!\n", ret);
return;
}
if (cdev->id.cu_type == 0)
@@ -893,8 +905,10 @@ io_subchannel_register(struct work_struct *work)
ret = device_reprobe(&cdev->dev);
if (ret)
/* We can't do much here. */
- dev_info(&cdev->dev, "device_reprobe() returned"
- " %d\n", ret);
+ CIO_MSG_EVENT(2, "device_reprobe() returned"
+ " %d for 0.%x.%04x\n", ret,
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
}
goto out;
}
@@ -907,8 +921,9 @@ io_subchannel_register(struct work_struct *work)
/* make it known to the system */
ret = ccw_device_register(cdev);
if (ret) {
- printk (KERN_WARNING "%s: could not register %s\n",
- __func__, cdev->dev.bus_id);
+ CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
put_device(&cdev->dev);
spin_lock_irqsave(sch->lock, flags);
sch->dev.driver_data = NULL;
@@ -1361,7 +1376,6 @@ ccw_device_remove (struct device *dev)
struct ccw_driver *cdrv = cdev->drv;
int ret;
- pr_debug("removing device %s\n", cdev->dev.bus_id);
if (cdrv->remove)
cdrv->remove(cdev);
if (cdev->online) {
@@ -1374,8 +1388,10 @@ ccw_device_remove (struct device *dev)
dev_fsm_final_state(cdev));
else
//FIXME: we can't fail!
- pr_debug("ccw_device_offline returned %d, device %s\n",
- ret, cdev->dev.bus_id);
+ CIO_MSG_EVENT(2, "ccw_device_offline returned %d, "
+ "device 0.%x.%04x\n",
+ ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
}
ccw_device_set_timeout(cdev, 0);
cdev->drv = NULL;
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 6bba8092957..8633dc53769 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -268,7 +268,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
switch (state) {
case DEV_STATE_NOT_OPER:
CIO_DEBUG(KERN_WARNING, 2,
- "SenseID : unknown device %04x on subchannel "
+ "cio: SenseID : unknown device %04x on subchannel "
"0.%x.%04x\n", cdev->private->dev_id.devno,
sch->schid.ssid, sch->schid.sch_no);
break;
@@ -293,7 +293,8 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
return;
}
/* Issue device info message. */
- CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: "
+ CIO_DEBUG(KERN_INFO, 2,
+ "cio: SenseID : device 0.%x.%04x reports: "
"CU Type/Mod = %04X/%02X, Dev Type/Mod = "
"%04X/%02X\n",
cdev->private->dev_id.ssid,
@@ -303,7 +304,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
break;
case DEV_STATE_BOXED:
CIO_DEBUG(KERN_WARNING, 2,
- "SenseID : boxed device %04x on subchannel "
+ "cio: SenseID : boxed device %04x on subchannel "
"0.%x.%04x\n", cdev->private->dev_id.devno,
sch->schid.ssid, sch->schid.sch_no);
break;
@@ -388,7 +389,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
if (state == DEV_STATE_BOXED)
CIO_DEBUG(KERN_WARNING, 2,
- "Boxed device %04x on subchannel %04x\n",
+ "cio: Boxed device %04x on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
if (cdev->private->flags.donotify) {
@@ -946,9 +947,10 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
/* Basic sense hasn't started. Try again. */
ccw_device_do_sense(cdev, irb);
else {
- printk(KERN_INFO "Huh? %s(%s): unsolicited "
- "interrupt...\n",
- __FUNCTION__, cdev->dev.bus_id);
+ CIO_MSG_EVENT(2, "Huh? 0.%x.%04x: unsolicited "
+ "interrupt during w4sense...\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
if (cdev->handler)
cdev->handler (cdev, 0, irb);
}
@@ -1215,8 +1217,8 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
static void
ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
{
- printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
- cdev->private->state, dev_event);
+ CIO_MSG_EVENT(0, "dev_jumptable[%i][%i] == NULL\n",
+ cdev->private->state, dev_event);
BUG();
}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index a5d263fb55a..14eba854b15 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -288,253 +288,6 @@ ccw_device_get_path_mask(struct ccw_device *cdev)
return sch->lpm;
}
-static void
-ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
-{
- if (!ip)
- /* unsolicited interrupt */
- return;
-
- /* Abuse intparm for error reporting. */
- if (IS_ERR(irb))
- cdev->private->intparm = -EIO;
- else if (irb->scsw.cc == 1)
- /* Retry for deferred condition code. */
- cdev->private->intparm = -EAGAIN;
- else if ((irb->scsw.dstat !=
- (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
- (irb->scsw.cstat != 0)) {
- /*
- * We didn't get channel end / device end. Check if path
- * verification has been started; we can retry after it has
- * finished. We also retry unit checks except for command reject
- * or intervention required. Also check for long busy
- * conditions.
- */
- if (cdev->private->flags.doverify ||
- cdev->private->state == DEV_STATE_VERIFY)
- cdev->private->intparm = -EAGAIN;
- else if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
- !(irb->ecw[0] &
- (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
- cdev->private->intparm = -EAGAIN;
- else if ((irb->scsw.dstat & DEV_STAT_ATTENTION) &&
- (irb->scsw.dstat & DEV_STAT_DEV_END) &&
- (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP))
- cdev->private->intparm = -EAGAIN;
- else
- cdev->private->intparm = -EIO;
-
- } else
- cdev->private->intparm = 0;
- wake_up(&cdev->private->wait_q);
-}
-
-static int
-__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm)
-{
- int ret;
- struct subchannel *sch;
-
- sch = to_subchannel(cdev->dev.parent);
- do {
- ccw_device_set_timeout(cdev, 60 * HZ);
- ret = cio_start (sch, ccw, lpm);
- if (ret != 0)
- ccw_device_set_timeout(cdev, 0);
- if (ret == -EBUSY) {
- /* Try again later. */
- spin_unlock_irq(sch->lock);
- msleep(10);
- spin_lock_irq(sch->lock);
- continue;
- }
- if (ret != 0)
- /* Non-retryable error. */
- break;
- /* Wait for end of request. */
- cdev->private->intparm = magic;
- spin_unlock_irq(sch->lock);
- wait_event(cdev->private->wait_q,
- (cdev->private->intparm == -EIO) ||
- (cdev->private->intparm == -EAGAIN) ||
- (cdev->private->intparm == 0));
- spin_lock_irq(sch->lock);
- /* Check at least for channel end / device end */
- if (cdev->private->intparm == -EIO) {
- /* Non-retryable error. */
- ret = -EIO;
- break;
- }
- if (cdev->private->intparm == 0)
- /* Success. */
- break;
- /* Try again later. */
- spin_unlock_irq(sch->lock);
- msleep(10);
- spin_lock_irq(sch->lock);
- } while (1);
-
- return ret;
-}
-
-/**
- * read_dev_chars() - read device characteristics
- * @param cdev target ccw device
- * @param buffer pointer to buffer for rdc data
- * @param length size of rdc data
- * @returns 0 for success, negative error value on failure
- *
- * Context:
- * called for online device, lock not held
- **/
-int
-read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
-{
- void (*handler)(struct ccw_device *, unsigned long, struct irb *);
- struct subchannel *sch;
- int ret;
- struct ccw1 *rdc_ccw;
-
- if (!cdev)
- return -ENODEV;
- if (!buffer || !length)
- return -EINVAL;
- sch = to_subchannel(cdev->dev.parent);
-
- CIO_TRACE_EVENT (4, "rddevch");
- CIO_TRACE_EVENT (4, sch->dev.bus_id);
-
- rdc_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
- if (!rdc_ccw)
- return -ENOMEM;
- rdc_ccw->cmd_code = CCW_CMD_RDC;
- rdc_ccw->count = length;
- rdc_ccw->flags = CCW_FLAG_SLI;
- ret = set_normalized_cda (rdc_ccw, (*buffer));
- if (ret != 0) {
- kfree(rdc_ccw);
- return ret;
- }
-
- spin_lock_irq(sch->lock);
- /* Save interrupt handler. */
- handler = cdev->handler;
- /* Temporarily install own handler. */
- cdev->handler = ccw_device_wake_up;
- if (cdev->private->state != DEV_STATE_ONLINE)
- ret = -ENODEV;
- else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
- !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
- cdev->private->flags.doverify)
- ret = -EBUSY;
- else
- /* 0x00D9C4C3 == ebcdic "RDC" */
- ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3, 0);
-
- /* Restore interrupt handler. */
- cdev->handler = handler;
- spin_unlock_irq(sch->lock);
-
- clear_normalized_cda (rdc_ccw);
- kfree(rdc_ccw);
-
- return ret;
-}
-
-/*
- * Read Configuration data using path mask
- */
-int
-read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lpm)
-{
- void (*handler)(struct ccw_device *, unsigned long, struct irb *);
- struct subchannel *sch;
- struct ciw *ciw;
- char *rcd_buf;
- int ret;
- struct ccw1 *rcd_ccw;
-
- if (!cdev)
- return -ENODEV;
- if (!buffer || !length)
- return -EINVAL;
- sch = to_subchannel(cdev->dev.parent);
-
- CIO_TRACE_EVENT (4, "rdconf");
- CIO_TRACE_EVENT (4, sch->dev.bus_id);
-
- /*
- * scan for RCD command in extended SenseID data
- */
- ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD);
- if (!ciw || ciw->cmd == 0)
- return -EOPNOTSUPP;
-
- /* Adjust requested path mask to excluded varied off paths. */
- if (lpm) {
- lpm &= sch->opm;
- if (lpm == 0)
- return -EACCES;
- }
-
- rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
- if (!rcd_ccw)
- return -ENOMEM;
- rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
- if (!rcd_buf) {
- kfree(rcd_ccw);
- return -ENOMEM;
- }
- rcd_ccw->cmd_code = ciw->cmd;
- rcd_ccw->cda = (__u32) __pa (rcd_buf);
- rcd_ccw->count = ciw->count;
- rcd_ccw->flags = CCW_FLAG_SLI;
-
- spin_lock_irq(sch->lock);
- /* Save interrupt handler. */
- handler = cdev->handler;
- /* Temporarily install own handler. */
- cdev->handler = ccw_device_wake_up;
- if (cdev->private->state != DEV_STATE_ONLINE)
- ret = -ENODEV;
- else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
- !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
- cdev->private->flags.doverify)
- ret = -EBUSY;
- else
- /* 0x00D9C3C4 == ebcdic "RCD" */
- ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4, lpm);
-
- /* Restore interrupt handler. */
- cdev->handler = handler;
- spin_unlock_irq(sch->lock);
-
- /*
- * on success we update the user input parms
- */
- if (ret) {
- kfree (rcd_buf);
- *buffer = NULL;
- *length = 0;
- } else {
- *length = ciw->count;
- *buffer = rcd_buf;
- }
- kfree(rcd_ccw);
-
- return ret;
-}
-
-/*
- * Read Configuration data
- */
-int
-read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
-{
- return read_conf_data_lpm (cdev, buffer, length, 0);
-}
-
/*
* Try to break the lock on a boxed device.
*/
@@ -635,12 +388,6 @@ _ccw_device_get_subchannel_number(struct ccw_device *cdev)
return cdev->private->schid.sch_no;
}
-int
-_ccw_device_get_device_number(struct ccw_device *cdev)
-{
- return cdev->private->dev_id.devno;
-}
-
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_options_mask);
@@ -655,9 +402,5 @@ EXPORT_SYMBOL(ccw_device_start_timeout_key);
EXPORT_SYMBOL(ccw_device_start_key);
EXPORT_SYMBOL(ccw_device_get_ciw);
EXPORT_SYMBOL(ccw_device_get_path_mask);
-EXPORT_SYMBOL(read_conf_data);
-EXPORT_SYMBOL(read_dev_chars);
EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
-EXPORT_SYMBOL(_ccw_device_get_device_number);
EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
-EXPORT_SYMBOL_GPL(read_conf_data_lpm);
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index b20fd068173..92e8a37b502 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -674,7 +674,7 @@ ch_action_txdone(fsm_instance * fi, int event, void *arg)
int first = 1;
int i;
unsigned long duration;
- struct timespec done_stamp = xtime;
+ struct timespec done_stamp = current_kernel_time();
DBF_TEXT(trace, 4, __FUNCTION__);
@@ -730,7 +730,7 @@ ch_action_txdone(fsm_instance * fi, int event, void *arg)
spin_unlock(&ch->collect_lock);
ch->ccw[1].count = ch->trans_skb->len;
fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
- ch->prof.send_stamp = xtime;
+ ch->prof.send_stamp = current_kernel_time();
rc = ccw_device_start(ch->cdev, &ch->ccw[0],
(unsigned long) ch, 0xff, 0);
ch->prof.doios_multi++;
@@ -2281,7 +2281,7 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
fsm_newstate(ch->fsm, CH_STATE_TX);
fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
- ch->prof.send_stamp = xtime;
+ ch->prof.send_stamp = current_kernel_time();
rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
(unsigned long) ch, 0xff, 0);
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 3d28e1a5bf7..26888947433 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -753,7 +753,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
header.next = 0;
memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
- conn->prof.send_stamp = xtime;
+ conn->prof.send_stamp = current_kernel_time();
txmsg.class = 0;
txmsg.tag = 0;
rc = iucv_message_send(conn->path, &txmsg, 0, 0,
@@ -1185,7 +1185,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
fsm_newstate(conn->fsm, CONN_STATE_TX);
- conn->prof.send_stamp = xtime;
+ conn->prof.send_stamp = current_kernel_time();
msg.tag = 1;
msg.class = 0;
diff --git a/drivers/sbus/char/Kconfig b/drivers/sbus/char/Kconfig
index 35a73168333..400c65bfb8c 100644
--- a/drivers/sbus/char/Kconfig
+++ b/drivers/sbus/char/Kconfig
@@ -15,6 +15,7 @@ config SUN_OPENPROMIO
config SUN_MOSTEK_RTC
tristate "Mostek real time clock support"
+ depends on SPARC32
help
The Mostek RTC chip is used on all known Sun computers except
some JavaStations. For a JavaStation you need to say Y both here
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 5157a2abc58..4b7079fdc10 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -185,7 +185,7 @@ static void jsfd_read(char *buf, unsigned long p, size_t togo) {
}
}
-static void jsfd_do_request(request_queue_t *q)
+static void jsfd_do_request(struct request_queue *q)
{
struct request *req;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index da63c544919..21c075d44db 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -654,7 +654,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
int bytes, int requeue)
{
- request_queue_t *q = cmd->device->request_queue;
+ struct request_queue *q = cmd->device->request_queue;
struct request *req = cmd->request;
unsigned long flags;
@@ -818,7 +818,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
int this_count = cmd->request_bufflen;
- request_queue_t *q = cmd->device->request_queue;
+ struct request_queue *q = cmd->device->request_queue;
struct request *req = cmd->request;
int clear_errors = 1;
struct scsi_sense_hdr sshdr;
@@ -1038,7 +1038,7 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
return BLKPREP_KILL;
}
-static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
+static int scsi_issue_flush_fn(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
struct scsi_device *sdev = q->queuedata;
@@ -1340,7 +1340,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
/*
* Kill a request for a dead device
*/
-static void scsi_kill_request(struct request *req, request_queue_t *q)
+static void scsi_kill_request(struct request *req, struct request_queue *q)
{
struct scsi_cmnd *cmd = req->special;
struct scsi_device *sdev = cmd->device;
@@ -2119,7 +2119,7 @@ EXPORT_SYMBOL(scsi_target_resume);
int
scsi_internal_device_block(struct scsi_device *sdev)
{
- request_queue_t *q = sdev->request_queue;
+ struct request_queue *q = sdev->request_queue;
unsigned long flags;
int err = 0;
@@ -2159,7 +2159,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
int
scsi_internal_device_unblock(struct scsi_device *sdev)
{
- request_queue_t *q = sdev->request_queue;
+ struct request_queue *q = sdev->request_queue;
int err;
unsigned long flags;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 424d557284a..e21c7142a3e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -814,7 +814,7 @@ static int sd_issue_flush(struct device *dev, sector_t *error_sector)
return ret;
}
-static void sd_prepare_flush(request_queue_t *q, struct request *rq)
+static void sd_prepare_flush(struct request_queue *q, struct request *rq)
{
memset(rq->cmd, 0, sizeof(rq->cmd));
rq->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1285,7 +1285,7 @@ got_data:
*/
int hard_sector = sector_size;
sector_t sz = (sdkp->capacity/2) * (hard_sector/256);
- request_queue_t *queue = sdp->request_queue;
+ struct request_queue *queue = sdp->request_queue;
sector_t mb = sz;
blk_queue_hardsect_size(queue, hard_sector);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index e7b6a7fde1c..902eb11ffe8 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -624,7 +624,7 @@ static void get_sectorsize(struct scsi_cd *cd)
unsigned char *buffer;
int the_result, retries = 3;
int sector_size;
- request_queue_t *queue;
+ struct request_queue *queue;
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
if (!buffer)
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index cad426c9711..aad4012bbb3 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -33,7 +33,6 @@
#include <linux/keyboard.h>
#include <linux/init.h>
#include <linux/pm.h>
-#include <linux/pm_legacy.h>
#include <linux/bitops.h>
#include <linux/delay.h>
@@ -401,9 +400,9 @@ irqreturn_t rs_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void do_softint(void *private)
+static void do_softint(struct work_struct *work)
{
- struct m68k_serial *info = (struct m68k_serial *) private;
+ struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue);
struct tty_struct *tty;
tty = info->tty;
@@ -425,9 +424,9 @@ static void do_softint(void *private)
* do_serial_hangup() -> tty->hangup() -> rs_hangup()
*
*/
-static void do_serial_hangup(void *private)
+static void do_serial_hangup(struct work_struct *work)
{
- struct m68k_serial *info = (struct m68k_serial *) private;
+ struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue_hangup);
struct tty_struct *tty;
tty = info->tty;
@@ -1324,59 +1323,6 @@ static void show_serial_version(void)
printk("MC68328 serial driver version 1.00\n");
}
-#ifdef CONFIG_PM_LEGACY
-/* Serial Power management
- * The console (currently fixed at line 0) is a special case for power
- * management because the kernel is so chatty. The console will be
- * explicitly disabled my our power manager as the last minute, so we won't
- * mess with it here.
- */
-static struct pm_dev *serial_pm[NR_PORTS];
-
-static int serial_pm_callback(struct pm_dev *dev, pm_request_t request, void *data)
-{
- struct m68k_serial *info = (struct m68k_serial *)dev->data;
-
- if(info == NULL)
- return -1;
-
- /* special case for line 0 - pm restores it */
- if(info->line == 0)
- return 0;
-
- switch (request) {
- case PM_SUSPEND:
- shutdown(info);
- break;
-
- case PM_RESUME:
- startup(info);
- break;
- }
- return 0;
-}
-
-void shutdown_console(void)
-{
- struct m68k_serial *info = &m68k_soft[0];
-
- /* HACK: wait a bit for any pending printk's to be dumped */
- {
- int i = 10000;
- while(i--);
- }
-
- shutdown(info);
-}
-
-void startup_console(void)
-{
- struct m68k_serial *info = &m68k_soft[0];
- startup(info);
-}
-#endif /* CONFIG_PM_LEGACY */
-
-
static const struct tty_operations rs_ops = {
.open = rs_open,
.close = rs_close,
@@ -1444,8 +1390,8 @@ rs68328_init(void)
info->event = 0;
info->count = 0;
info->blocked_open = 0;
- INIT_WORK(&info->tqueue, do_softint, info);
- INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info);
+ INIT_WORK(&info->tqueue, do_softint);
+ INIT_WORK(&info->tqueue_hangup, do_serial_hangup);
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
info->line = i;
@@ -1467,11 +1413,6 @@ rs68328_init(void)
IRQ_FLG_STD,
"M68328_UART", NULL))
panic("Unable to attach 68328 serial interrupt\n");
-#ifdef CONFIG_PM_LEGACY
- serial_pm[i] = pm_register(PM_SYS_DEV, PM_SYS_COM, serial_pm_callback);
- if (serial_pm[i])
- serial_pm[i]->data = info;
-#endif
}
local_irq_restore(flags);
return 0;
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 0b3ec38ae61..2f5a5ac1b27 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2650,8 +2650,9 @@ static int __devinit serial8250_probe(struct platform_device *dev)
ret = serial8250_register_port(&port);
if (ret < 0) {
dev_err(&dev->dev, "unable to register port at index %d "
- "(IO%lx MEM%lx IRQ%d): %d\n", i,
- p->iobase, p->mapbase, p->irq, ret);
+ "(IO%lx MEM%llx IRQ%d): %d\n", i,
+ p->iobase, (unsigned long long)p->mapbase,
+ p->irq, ret);
}
}
return 0;
diff --git a/drivers/serial/8250_early.c b/drivers/serial/8250_early.c
index 947c20507e1..150cad5c2eb 100644
--- a/drivers/serial/8250_early.c
+++ b/drivers/serial/8250_early.c
@@ -151,8 +151,9 @@ static int __init parse_options(struct early_serial8250_device *device, char *op
#else
port->membase = ioremap(port->mapbase, 64);
if (!port->membase) {
- printk(KERN_ERR "%s: Couldn't ioremap 0x%lx\n",
- __FUNCTION__, port->mapbase);
+ printk(KERN_ERR "%s: Couldn't ioremap 0x%llx\n",
+ __FUNCTION__,
+ (unsigned long long)port->mapbase);
return -ENOMEM;
}
#endif
@@ -175,9 +176,10 @@ static int __init parse_options(struct early_serial8250_device *device, char *op
device->baud);
}
- printk(KERN_INFO "Early serial console at %s 0x%lx (options '%s')\n",
+ printk(KERN_INFO "Early serial console at %s 0x%llx (options '%s')\n",
mmio ? "MMIO" : "I/O port",
- mmio ? port->mapbase : (unsigned long) port->iobase,
+ mmio ? (unsigned long long) port->mapbase
+ : (unsigned long long) port->iobase,
device->options);
return 0;
}
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 9c57486c2e7..030a6063541 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -626,7 +626,7 @@ static int uart_get_info(struct uart_state *state,
tmp.hub6 = port->hub6;
tmp.io_type = port->iotype;
tmp.iomem_reg_shift = port->regshift;
- tmp.iomem_base = (void *)port->mapbase;
+ tmp.iomem_base = (void *)(unsigned long)port->mapbase;
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
@@ -1666,10 +1666,11 @@ static int uart_line_info(char *buf, struct uart_driver *drv, int i)
return 0;
mmio = port->iotype >= UPIO_MEM;
- ret = sprintf(buf, "%d: uart:%s %s%08lX irq:%d",
+ ret = sprintf(buf, "%d: uart:%s %s%08llX irq:%d",
port->line, uart_type(port),
mmio ? "mmio:0x" : "port:",
- mmio ? port->mapbase : (unsigned long) port->iobase,
+ mmio ? (unsigned long long)port->mapbase
+ : (unsigned long long) port->iobase,
port->irq);
if (port->type == PORT_UNKNOWN) {
@@ -2069,7 +2070,7 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
case UPIO_TSI:
case UPIO_DWAPB:
snprintf(address, sizeof(address),
- "MMIO 0x%lx", port->mapbase);
+ "MMIO 0x%llx", (unsigned long long)port->mapbase);
break;
default:
strlcpy(address, "*unknown*", sizeof(address));
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index 7071ff8da63..5cf48123e0e 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -28,7 +28,7 @@
#include <asm/hardware.h>
#include <asm/arch/regs-gpio.h>
-#include <asm/arch/regs-spi.h>
+#include <asm/plat-s3c24xx/regs-spi.h>
#include <asm/arch/spi.h>
struct s3c24xx_spi {
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 564cc9b5182..a7231d171bd 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1571,7 +1571,14 @@ config FB_PM3
config FB_AU1100
bool "Au1100 LCD Driver"
- depends on (FB = y) && EXPERIMENTAL && PCI && MIPS && MIPS_PB1100=y
+ depends on (FB = y) && MIPS && SOC_AU1100
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ This is the framebuffer driver for the AMD Au1100 SOC. It can drive
+ various panels and CRTs by passing in kernel cmd line option
+ au1100fb:panel=<name>.
config FB_AU1200
bool "Au1200 LCD Driver"
diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c
index f48e8c534c8..6796ba62c3c 100644
--- a/drivers/video/chipsfb.c
+++ b/drivers/video/chipsfb.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fb.h>
+#include <linux/pm.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/console.h>
@@ -458,7 +459,7 @@ static int chipsfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
if (state.event == pdev->dev.power.power_state.event)
return 0;
- if (state.event != PM_SUSPEND_MEM)
+ if (state.event != PM_EVENT_SUSPEND)
goto done;
acquire_console_sem();
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index 89facb73edf..d292a37ec7d 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -849,7 +849,7 @@ tgafb_clut_imageblit(struct fb_info *info, const struct fb_image *image)
u32 *palette = ((u32 *)info->pseudo_palette);
unsigned long pos, line_length, i, j;
const unsigned char *data;
- void *regs_base, *fb_base;
+ void __iomem *regs_base, *fb_base;
dx = image->dx;
dy = image->dy;
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index 763bc73e507..4b696641ce3 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -85,7 +85,7 @@ static struct {
};
struct ds1wm_data {
- void *map;
+ void __iomem *map;
int bus_shift; /* # of shifts to calc register offsets */
struct platform_device *pdev;
struct ds1wm_platform_data *pdata;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 9e943fbce81..227d53b12a5 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -782,8 +782,8 @@ static int process_msg(void)
msg->u.watch.vec = split(body, msg->hdr.len,
&msg->u.watch.vec_size);
if (IS_ERR(msg->u.watch.vec)) {
- kfree(msg);
err = PTR_ERR(msg->u.watch.vec);
+ kfree(msg);
goto out;
}
diff --git a/fs/bio.c b/fs/bio.c
index 0d2c2d38b7b..29a44c1b64c 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -230,7 +230,7 @@ void bio_put(struct bio *bio)
}
}
-inline int bio_phys_segments(request_queue_t *q, struct bio *bio)
+inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
blk_recount_segments(q, bio);
@@ -238,7 +238,7 @@ inline int bio_phys_segments(request_queue_t *q, struct bio *bio)
return bio->bi_phys_segments;
}
-inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
+inline int bio_hw_segments(struct request_queue *q, struct bio *bio)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
blk_recount_segments(q, bio);
@@ -257,7 +257,7 @@ inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
*/
void __bio_clone(struct bio *bio, struct bio *bio_src)
{
- request_queue_t *q = bdev_get_queue(bio_src->bi_bdev);
+ struct request_queue *q = bdev_get_queue(bio_src->bi_bdev);
memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
bio_src->bi_max_vecs * sizeof(struct bio_vec));
@@ -303,7 +303,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
*/
int bio_get_nr_vecs(struct block_device *bdev)
{
- request_queue_t *q = bdev_get_queue(bdev);
+ struct request_queue *q = bdev_get_queue(bdev);
int nr_pages;
nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -315,7 +315,7 @@ int bio_get_nr_vecs(struct block_device *bdev)
return nr_pages;
}
-static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
+static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
*page, unsigned int len, unsigned int offset,
unsigned short max_sectors)
{
@@ -425,7 +425,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
* smaller than PAGE_SIZE, so it is always possible to add a single
* page to an empty bio. This should only be used by REQ_PC bios.
*/
-int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
+int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
@@ -523,7 +523,7 @@ int bio_uncopy_user(struct bio *bio)
* to/from kernel pages as necessary. Must be paired with
* call bio_uncopy_user() on io completion.
*/
-struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
+struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr,
unsigned int len, int write_to_vm)
{
unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -600,7 +600,7 @@ out_bmd:
return ERR_PTR(ret);
}
-static struct bio *__bio_map_user_iov(request_queue_t *q,
+static struct bio *__bio_map_user_iov(struct request_queue *q,
struct block_device *bdev,
struct sg_iovec *iov, int iov_count,
int write_to_vm)
@@ -712,7 +712,7 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
/**
* bio_map_user - map user address into bio
- * @q: the request_queue_t for the bio
+ * @q: the struct request_queue for the bio
* @bdev: destination block device
* @uaddr: start of user address
* @len: length in bytes
@@ -721,7 +721,7 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
* Map the user space address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
+struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
unsigned long uaddr, unsigned int len, int write_to_vm)
{
struct sg_iovec iov;
@@ -734,7 +734,7 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
/**
* bio_map_user_iov - map user sg_iovec table into bio
- * @q: the request_queue_t for the bio
+ * @q: the struct request_queue for the bio
* @bdev: destination block device
* @iov: the iovec.
* @iov_count: number of elements in the iovec
@@ -743,7 +743,7 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
* Map the user space address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,
+struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
struct sg_iovec *iov, int iov_count,
int write_to_vm)
{
@@ -808,7 +808,7 @@ static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)
}
-static struct bio *__bio_map_kern(request_queue_t *q, void *data,
+static struct bio *__bio_map_kern(struct request_queue *q, void *data,
unsigned int len, gfp_t gfp_mask)
{
unsigned long kaddr = (unsigned long)data;
@@ -847,7 +847,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
/**
* bio_map_kern - map kernel address into bio
- * @q: the request_queue_t for the bio
+ * @q: the struct request_queue for the bio
* @data: pointer to buffer to map
* @len: length in bytes
* @gfp_mask: allocation flags for bio allocation
@@ -855,7 +855,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
* Map the kernel address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len,
+struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
gfp_t gfp_mask)
{
struct bio *bio;
diff --git a/fs/dcookies.c b/fs/dcookies.c
index c1208f53bd7..792cbf55fa9 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -20,6 +20,7 @@
#include <linux/capability.h>
#include <linux/dcache.h>
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/dcookies.h>
#include <linux/mutex.h>
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 68579a0ed3f..639a32c3c9c 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -580,7 +580,7 @@ static int ext2_check_descriptors (struct super_block * sb)
return 0;
}
if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
- le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group >
+ le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 >
last_block)
{
ext2_error (sb, "ext2_check_descriptors",
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index f0614e3f1fe..22cfdd61c06 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1221,7 +1221,7 @@ static int ext3_check_descriptors (struct super_block * sb)
return 0;
}
if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
- le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group >
+ le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 >
last_block)
{
ext3_error (sb, "ext3_check_descriptors",
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 75adbb64e02..4550b83ab1c 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1283,7 +1283,7 @@ static int ext4_check_descriptors (struct super_block * sb)
}
inode_table = ext4_inode_table(sb, gdp);
if (inode_table < first_block ||
- inode_table + sbi->s_itb_per_group > last_block)
+ inode_table + sbi->s_itb_per_group - 1 > last_block)
{
ext4_error (sb, "ext4_check_descriptors",
"Inode table for group %d"
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index b3efa4536cc..a21e4bc5444 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -335,10 +335,10 @@ static void nlmsvc_freegrantargs(struct nlm_rqst *call)
/*
* Deferred lock request handling for non-blocking lock
*/
-static u32
+static __be32
nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
{
- u32 status = nlm_lck_denied_nolocks;
+ __be32 status = nlm_lck_denied_nolocks;
block->b_flags |= B_QUEUED;
@@ -352,7 +352,7 @@ nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
status = nlm_drop_reply;
}
dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
- block, block->b_flags, status);
+ block, block->b_flags, ntohl(status));
return status;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b3d55c6747f..8ef0964179b 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2450,7 +2450,7 @@ nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
}
static void
-nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, int nfserr,
+nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_secinfo *secinfo)
{
int i = 0;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 5727cd18302..c4034f693e7 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2153,7 +2153,7 @@ static int ocfs2_splice_write_actor(struct pipe_inode_info *pipe,
src = buf->ops->map(pipe, buf, 1);
dst = kmap_atomic(page, KM_USER1);
memcpy(dst + offset, src + buf->offset, count);
- kunmap_atomic(page, KM_USER1);
+ kunmap_atomic(dst, KM_USER1);
buf->ops->unmap(pipe, buf, src);
copied = ocfs2_write_end(file, file->f_mapping, sd->pos, count, count,
diff --git a/fs/open.c b/fs/open.c
index a6b054edacb..e27c205364d 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -403,7 +403,7 @@ asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len)
if (inode->i_op && inode->i_op->fallocate)
ret = inode->i_op->fallocate(inode, mode, offset, len);
else
- ret = -ENOSYS;
+ ret = -EOPNOTSUPP;
out_fput:
fput(file);
diff --git a/fs/pipe.c b/fs/pipe.c
index d007830d9c8..6b3d91a691b 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -255,7 +255,7 @@ void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
/**
* generic_pipe_buf_confirm - verify contents of the pipe buffer
- * @pipe: the pipe that the buffer belongs to
+ * @info: the pipe that the buffer belongs to
* @buf: the buffer to confirm
*
* Description:
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 94e2c1adf18..a5b0dfd89a1 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -386,6 +386,19 @@ static const struct file_operations proc_reg_file_ops = {
.release = proc_reg_release,
};
+#ifdef CONFIG_COMPAT
+static const struct file_operations proc_reg_file_ops_no_compat = {
+ .llseek = proc_reg_llseek,
+ .read = proc_reg_read,
+ .write = proc_reg_write,
+ .poll = proc_reg_poll,
+ .unlocked_ioctl = proc_reg_unlocked_ioctl,
+ .mmap = proc_reg_mmap,
+ .open = proc_reg_open,
+ .release = proc_reg_release,
+};
+#endif
+
struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
struct proc_dir_entry *de)
{
@@ -413,8 +426,15 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
if (de->proc_iops)
inode->i_op = de->proc_iops;
if (de->proc_fops) {
- if (S_ISREG(inode->i_mode))
- inode->i_fop = &proc_reg_file_ops;
+ if (S_ISREG(inode->i_mode)) {
+#ifdef CONFIG_COMPAT
+ if (!de->proc_fops->compat_ioctl)
+ inode->i_fop =
+ &proc_reg_file_ops_no_compat;
+ else
+#endif
+ inode->i_fop = &proc_reg_file_ops;
+ }
else
inode->i_fop = de->proc_fops;
}
diff --git a/fs/quota.c b/fs/quota.c
index e6577ac15a6..99b24b52bfc 100644
--- a/fs/quota.c
+++ b/fs/quota.c
@@ -387,7 +387,7 @@ asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special, qid_t
return ret;
}
-#if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
+#if defined(CONFIG_COMPAT_FOR_U64_ALIGNMENT)
/*
* This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64)
* and is necessary due to alignment problems.
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index b6f12593c39..981027d1187 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1042,7 +1042,8 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st
pos = I_UNFM_NUM(&s_ih);
while (le_ih_k_offset (&s_ih) + (pos - 1) * blk_size > n_new_file_length) {
- __u32 *unfm, block;
+ __le32 *unfm;
+ __u32 block;
/* Each unformatted block deletion may involve one additional
* bitmap block into the transaction, thereby the initial
@@ -1052,7 +1053,7 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st
break;
}
- unfm = (__u32 *)B_I_PITEM(p_s_bh, &s_ih) + pos - 1;
+ unfm = (__le32 *)B_I_PITEM(p_s_bh, &s_ih) + pos - 1;
block = get_block_num(unfm, 0);
if (block != 0) {
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 3b07f26d984..7b941abbcde 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -320,7 +320,7 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas
if (sizemask != sizeof(sigset_t) ||
copy_from_user(&sigmask, user_mask, sizeof(sigmask)))
- return error = -EINVAL;
+ return -EINVAL;
sigdelsetmask(&sigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
signotset(&sigmask);
diff --git a/fs/splice.c b/fs/splice.c
index 0a097321808..c010a72ca2d 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -164,7 +164,7 @@ static const struct pipe_buf_operations user_page_pipe_buf_ops = {
* @spd: data to fill
*
* Description:
- * @spd contains a map of pages and len/offset tupples, a long with
+ * @spd contains a map of pages and len/offset tuples, along with
* the struct pipe_buf_operations associated with these pages. This
* function will link that data to the pipe.
*
@@ -1000,7 +1000,7 @@ static long do_splice_to(struct file *in, loff_t *ppos,
* Description:
* This is a special case helper to splice directly between two
* points, without requiring an explicit pipe. Internally an allocated
- * pipe is cached in the process, and reused during the life time of
+ * pipe is cached in the process, and reused during the lifetime of
* that process.
*
*/
diff --git a/fs/timerfd.c b/fs/timerfd.c
index af9eca5c023..61983f3b107 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -95,7 +95,7 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
{
struct timerfd_ctx *ctx = file->private_data;
ssize_t res;
- u32 ticks = 0;
+ u64 ticks = 0;
DECLARE_WAITQUEUE(wait, current);
if (count < sizeof(ticks))
@@ -130,7 +130,7 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
* callback to avoid DoS attacks specifying a very
* short timer period.
*/
- ticks = (u32)
+ ticks = (u64)
hrtimer_forward(&ctx->tmr,
hrtimer_cb_get_time(&ctx->tmr),
ctx->tintv);
@@ -140,7 +140,7 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
}
spin_unlock_irq(&ctx->wqh.lock);
if (ticks)
- res = put_user(ticks, buf) ? -EFAULT: sizeof(ticks);
+ res = put_user(ticks, (u64 __user *) buf) ? -EFAULT: sizeof(ticks);
return res;
}
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 141cf15067c..42319d75aaa 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -139,7 +139,7 @@ STATIC int xfs_inumbers_fmt_compat(
long count,
long *written)
{
- compat_xfs_inogrp_t *p32 = ubuffer;
+ compat_xfs_inogrp_t __user *p32 = ubuffer;
long i;
for (i = 0; i < count; i++) {
@@ -444,7 +444,7 @@ xfs_compat_ioctl(
case XFS_IOC_FSINUMBERS_32:
cmd = _NATIVE_IOC(cmd, struct xfs_fsop_bulkreq);
return xfs_ioc_bulkstat_compat(XFS_BHVTOI(VNHEAD(vp))->i_mount,
- cmd, (void*)arg);
+ cmd, (void __user*)arg);
case XFS_IOC_FD_TO_HANDLE_32:
case XFS_IOC_PATH_TO_HANDLE_32:
case XFS_IOC_PATH_TO_FSHANDLE_32:
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 5e3dcf3299b..3d0fea235bf 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -131,7 +131,7 @@ struct acpi_device_ops {
struct acpi_driver {
char name[80];
char class[80];
- char *ids; /* Supported Hardware IDs */
+ const struct acpi_device_id *ids; /* Supported Hardware IDs */
struct acpi_device_ops ops;
struct device_driver drv;
struct module *owner;
@@ -341,7 +341,8 @@ int acpi_bus_add(struct acpi_device **child, struct acpi_device *parent,
int acpi_bus_trim(struct acpi_device *start, int rmdevice);
int acpi_bus_start(struct acpi_device *device);
acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle * ejd);
-int acpi_match_ids(struct acpi_device *device, char *ids);
+int acpi_match_device_ids(struct acpi_device *device,
+ const struct acpi_device_id *ids);
int acpi_create_dir(struct acpi_device *);
void acpi_remove_dir(struct acpi_device *);
@@ -365,6 +366,17 @@ acpi_handle acpi_get_child(acpi_handle, acpi_integer);
acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int);
#define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)((dev)->archdata.acpi_handle))
+#ifdef CONFIG_PM_SLEEP
+int acpi_pm_device_sleep_state(struct device *, int, int *);
+#else /* !CONFIG_PM_SLEEP */
+static inline int acpi_pm_device_sleep_state(struct device *d, int w, int *p)
+{
+ if (p)
+ *p = ACPI_STATE_D0;
+ return ACPI_STATE_D3;
+}
+#endif /* !CONFIG_PM_SLEEP */
+
#endif /* CONFIG_ACPI */
#endif /*__ACPI_BUS_H__*/
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 553515912c0..202acb9ff4d 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -34,16 +34,21 @@
#define ACPI_BUS_COMPONENT 0x00010000
#define ACPI_SYSTEM_COMPONENT 0x02000000
-/* _HID definitions */
+/*
+ * _HID definitions
+ * HIDs must conform to ACPI spec(6.1.4)
+ * Linux specific HIDs do not apply to this and begin with LNX:
+ */
-#define ACPI_POWER_HID "power_resource"
+#define ACPI_POWER_HID "LNXPOWER"
#define ACPI_PROCESSOR_HID "ACPI0007"
-#define ACPI_SYSTEM_HID "acpi_system"
-#define ACPI_THERMAL_HID "thermal"
-#define ACPI_BUTTON_HID_POWERF "button_power"
-#define ACPI_BUTTON_HID_SLEEPF "button_sleep"
-#define ACPI_VIDEO_HID "video"
-#define ACPI_BAY_HID "bay"
+#define ACPI_SYSTEM_HID "LNXSYSTM"
+#define ACPI_THERMAL_HID "LNXTHERM"
+#define ACPI_BUTTON_HID_POWERF "LNXPWRBN"
+#define ACPI_BUTTON_HID_SLEEPF "LNXSLPBN"
+#define ACPI_VIDEO_HID "LNXVIDEO"
+#define ACPI_BAY_HID "LNXIOBAY"
+
/* --------------------------------------------------------------------------
PCI
-------------------------------------------------------------------------- */
@@ -145,7 +150,7 @@ static inline void unregister_hotplug_dock_device(acpi_handle handle)
#ifdef CONFIG_ACPI_SLEEP
extern int acpi_sleep_init(void);
#else
-#define acpi_sleep_init() do {} while (0)
+static inline int acpi_sleep_init(void) { return 0; }
#endif
#endif /*__ACPI_DRIVERS_H__*/
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index fe8abc27643..e73a3893912 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -809,7 +809,7 @@ acpi_status(*acpi_walk_callback) (acpi_handle obj_handle,
/* Common string version of device HIDs and UIDs */
-struct acpi_device_id {
+struct acpica_device_id {
char value[ACPI_DEVICE_ID_LENGTH];
};
@@ -859,8 +859,8 @@ struct acpi_device_info {
u32 valid; /* Indicates which fields below are valid */
u32 current_status; /* _STA value */
acpi_integer address; /* _ADR value if any */
- struct acpi_device_id hardware_id; /* _HID value if any */
- struct acpi_device_id unique_id; /* _UID value if any */
+ struct acpica_device_id hardware_id; /* _HID value if any */
+ struct acpica_device_id unique_id; /* _UID value if any */
u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */
struct acpi_compatible_id_list compatibility_id; /* List of _CIDs if any */
};
diff --git a/include/acpi/acutils.h b/include/acpi/acutils.h
index a87ef1c8d46..a2918547c73 100644
--- a/include/acpi/acutils.h
+++ b/include/acpi/acutils.h
@@ -354,7 +354,7 @@ acpi_ut_evaluate_numeric_object(char *object_name,
acpi_status
acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
- struct acpi_device_id *hid);
+ struct acpica_device_id *hid);
acpi_status
acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
@@ -366,7 +366,7 @@ acpi_ut_execute_STA(struct acpi_namespace_node *device_node,
acpi_status
acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
- struct acpi_device_id *uid);
+ struct acpica_device_id *uid);
acpi_status
acpi_ut_execute_sxds(struct acpi_namespace_node *device_node, u8 * highest);
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index 3a0cbeb03fa..9e71201000d 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -324,7 +324,7 @@ static inline int fls64(unsigned long x)
{
unsigned long t, a, r;
- t = __kernel_cmpbge (x, 0x0101010101010101);
+ t = __kernel_cmpbge (x, 0x0101010101010101UL);
a = __flsm1_tab[t];
t = __kernel_extbl (x, a);
r = a*8 + __flsm1_tab[t] + (x != 0);
diff --git a/include/asm-arm/arch-mxc/uncompress.h b/include/asm-arm/arch-mxc/uncompress.h
index ec5787d0e78..42cc0cb3fef 100644
--- a/include/asm-arm/arch-mxc/uncompress.h
+++ b/include/asm-arm/arch-mxc/uncompress.h
@@ -26,7 +26,6 @@
#define __MXC_BOOT_UNCOMPRESS
#include <asm/hardware.h>
-#include <asm/processor.h>
#define UART(x) (*(volatile unsigned long *)(serial_port + (x)))
@@ -62,7 +61,7 @@ static void putc(int ch)
}
while (!(UART(USR2) & USR2_TXFE))
- cpu_relax();
+ barrier();
UART(TXR) = ch;
}
diff --git a/include/asm-arm/arch-omap/mailbox.h b/include/asm-arm/arch-omap/mailbox.h
index 4bf0909461f..7cbed9332e1 100644
--- a/include/asm-arm/arch-omap/mailbox.h
+++ b/include/asm-arm/arch-omap/mailbox.h
@@ -37,7 +37,7 @@ struct omap_mbox_ops {
struct omap_mbox_queue {
spinlock_t lock;
- request_queue_t *queue;
+ struct request_queue *queue;
struct work_struct work;
int (*callback)(void *);
struct omap_mbox *mbox;
diff --git a/include/asm-arm/unaligned.h b/include/asm-arm/unaligned.h
index 795b9e5b9e6..8431f6eed5c 100644
--- a/include/asm-arm/unaligned.h
+++ b/include/asm-arm/unaligned.h
@@ -60,24 +60,24 @@ extern int __bug_unaligned_x(const void *ptr);
__get_unaligned_4_be((__p+4)))
#define __get_unaligned_le(ptr) \
- ({ \
+ ((__force typeof(*(ptr)))({ \
const __u8 *__p = (const __u8 *)(ptr); \
__builtin_choose_expr(sizeof(*(ptr)) == 1, *__p, \
__builtin_choose_expr(sizeof(*(ptr)) == 2, __get_unaligned_2_le(__p), \
__builtin_choose_expr(sizeof(*(ptr)) == 4, __get_unaligned_4_le(__p), \
__builtin_choose_expr(sizeof(*(ptr)) == 8, __get_unaligned_8_le(__p), \
(void)__bug_unaligned_x(__p))))); \
- })
+ }))
#define __get_unaligned_be(ptr) \
- ({ \
+ ((__force typeof(*(ptr)))({ \
const __u8 *__p = (const __u8 *)(ptr); \
__builtin_choose_expr(sizeof(*(ptr)) == 1, *__p, \
__builtin_choose_expr(sizeof(*(ptr)) == 2, __get_unaligned_2_be(__p), \
__builtin_choose_expr(sizeof(*(ptr)) == 4, __get_unaligned_4_be(__p), \
__builtin_choose_expr(sizeof(*(ptr)) == 8, __get_unaligned_8_be(__p), \
(void)__bug_unaligned_x(__p))))); \
- })
+ }))
static inline void __put_unaligned_2_le(__u32 __v, register __u8 *__p)
@@ -131,15 +131,16 @@ static inline void __put_unaligned_8_be(const unsigned long long __v, register _
*/
#define __put_unaligned_le(val,ptr) \
({ \
+ (void)sizeof(*(ptr) = (val)); \
switch (sizeof(*(ptr))) { \
case 1: \
*(ptr) = (val); \
break; \
- case 2: __put_unaligned_2_le((val),(__u8 *)(ptr)); \
+ case 2: __put_unaligned_2_le((__force u16)(val),(__u8 *)(ptr)); \
break; \
- case 4: __put_unaligned_4_le((val),(__u8 *)(ptr)); \
+ case 4: __put_unaligned_4_le((__force u32)(val),(__u8 *)(ptr)); \
break; \
- case 8: __put_unaligned_8_le((val),(__u8 *)(ptr)); \
+ case 8: __put_unaligned_8_le((__force u64)(val),(__u8 *)(ptr)); \
break; \
default: __bug_unaligned_x(ptr); \
break; \
@@ -149,15 +150,16 @@ static inline void __put_unaligned_8_be(const unsigned long long __v, register _
#define __put_unaligned_be(val,ptr) \
({ \
+ (void)sizeof(*(ptr) = (val)); \
switch (sizeof(*(ptr))) { \
case 1: \
*(ptr) = (val); \
break; \
- case 2: __put_unaligned_2_be((val),(__u8 *)(ptr)); \
+ case 2: __put_unaligned_2_be((__force u16)(val),(__u8 *)(ptr)); \
break; \
- case 4: __put_unaligned_4_be((val),(__u8 *)(ptr)); \
+ case 4: __put_unaligned_4_be((__force u32)(val),(__u8 *)(ptr)); \
break; \
- case 8: __put_unaligned_8_be((val),(__u8 *)(ptr)); \
+ case 8: __put_unaligned_8_be((__force u64)(val),(__u8 *)(ptr)); \
break; \
default: __bug_unaligned_x(ptr); \
break; \
diff --git a/include/asm-blackfin/bfin-global.h b/include/asm-blackfin/bfin-global.h
index c4d6cbbf96d..a970781a0f9 100644
--- a/include/asm-blackfin/bfin-global.h
+++ b/include/asm-blackfin/bfin-global.h
@@ -61,6 +61,7 @@ extern void bfin_dcache_init(void);
extern int read_iloc(void);
extern int bfin_console_init(void);
extern asmlinkage void lower_to_irq14(void);
+extern void init_exception_vectors(void);
extern void init_dma(void);
extern void program_IAR(void);
extern void evt14_softirq(void);
diff --git a/include/asm-blackfin/mach-bf548/cdefBF54x_base.h b/include/asm-blackfin/mach-bf548/cdefBF54x_base.h
index 98d35a92911..cdf29e75ea5 100644
--- a/include/asm-blackfin/mach-bf548/cdefBF54x_base.h
+++ b/include/asm-blackfin/mach-bf548/cdefBF54x_base.h
@@ -242,6 +242,39 @@ static __inline__ void bfin_write_VR_CTL(unsigned int val)
#define bfin_read_TWI0_RCV_DATA16() bfin_read16(TWI0_RCV_DATA16)
#define bfin_write_TWI0_RCV_DATA16(val) bfin_write16(TWI0_RCV_DATA16, val)
+#define bfin_read_TWI_CLKDIV() bfin_read16(TWI0_CLKDIV)
+#define bfin_write_TWI_CLKDIV(val) bfin_write16(TWI0_CLKDIV, val)
+#define bfin_read_TWI_CONTROL() bfin_read16(TWI0_CONTROL)
+#define bfin_write_TWI_CONTROL(val) bfin_write16(TWI0_CONTROL, val)
+#define bfin_read_TWI_SLAVE_CTRL() bfin_read16(TWI0_SLAVE_CTRL)
+#define bfin_write_TWI_SLAVE_CTRL(val) bfin_write16(TWI0_SLAVE_CTRL, val)
+#define bfin_read_TWI_SLAVE_STAT() bfin_read16(TWI0_SLAVE_STAT)
+#define bfin_write_TWI_SLAVE_STAT(val) bfin_write16(TWI0_SLAVE_STAT, val)
+#define bfin_read_TWI_SLAVE_ADDR() bfin_read16(TWI0_SLAVE_ADDR)
+#define bfin_write_TWI_SLAVE_ADDR(val) bfin_write16(TWI0_SLAVE_ADDR, val)
+#define bfin_read_TWI_MASTER_CTL() bfin_read16(TWI0_MASTER_CTRL)
+#define bfin_write_TWI_MASTER_CTL(val) bfin_write16(TWI0_MASTER_CTRL, val)
+#define bfin_read_TWI_MASTER_STAT() bfin_read16(TWI0_MASTER_STAT)
+#define bfin_write_TWI_MASTER_STAT(val) bfin_write16(TWI0_MASTER_STAT, val)
+#define bfin_read_TWI_MASTER_ADDR() bfin_read16(TWI0_MASTER_ADDR)
+#define bfin_write_TWI_MASTER_ADDR(val) bfin_write16(TWI0_MASTER_ADDR, val)
+#define bfin_read_TWI_INT_STAT() bfin_read16(TWI0_INT_STAT)
+#define bfin_write_TWI_INT_STAT(val) bfin_write16(TWI0_INT_STAT, val)
+#define bfin_read_TWI_INT_MASK() bfin_read16(TWI0_INT_MASK)
+#define bfin_write_TWI_INT_MASK(val) bfin_write16(TWI0_INT_MASK, val)
+#define bfin_read_TWI_FIFO_CTL() bfin_read16(TWI0_FIFO_CTRL)
+#define bfin_write_TWI_FIFO_CTL(val) bfin_write16(TWI0_FIFO_CTRL, val)
+#define bfin_read_TWI_FIFO_STAT() bfin_read16(TWI0_FIFO_STAT)
+#define bfin_write_TWI_FIFO_STAT(val) bfin_write16(TWI0_FIFO_STAT, val)
+#define bfin_read_TWI_XMT_DATA8() bfin_read16(TWI0_XMT_DATA8)
+#define bfin_write_TWI_XMT_DATA8(val) bfin_write16(TWI0_XMT_DATA8, val)
+#define bfin_read_TWI_XMT_DATA16() bfin_read16(TWI0_XMT_DATA16)
+#define bfin_write_TWI_XMT_DATA16(val) bfin_write16(TWI0_XMT_DATA16, val)
+#define bfin_read_TWI_RCV_DATA8() bfin_read16(TWI0_RCV_DATA8)
+#define bfin_write_TWI_RCV_DATA8(val) bfin_write16(TWI0_RCV_DATA8, val)
+#define bfin_read_TWI_RCV_DATA16() bfin_read16(TWI0_RCV_DATA16)
+#define bfin_write_TWI_RCV_DATA16(val) bfin_write16(TWI0_RCV_DATA16, val)
+
/* SPORT0 is not defined in the shared file because it is not available on the ADSP-BF542 and ADSP-BF544 bfin_read_()rocessors */
/* SPORT1 Registers */
diff --git a/include/asm-blackfin/mach-bf548/irq.h b/include/asm-blackfin/mach-bf548/irq.h
index 0b3325bb1ff..e548d3cd81e 100644
--- a/include/asm-blackfin/mach-bf548/irq.h
+++ b/include/asm-blackfin/mach-bf548/irq.h
@@ -112,6 +112,7 @@ Events (highest priority) EMU 0
#define IRQ_ATAPI_TX BFIN_IRQ(44) /* ATAPI TX (DMA11) Interrupt */
#define IRQ_TWI0 BFIN_IRQ(45) /* TWI0 Interrupt */
#define IRQ_TWI1 BFIN_IRQ(46) /* TWI1 Interrupt */
+#define IRQ_TWI IRQ_TWI0 /* TWI Interrupt */
#define IRQ_CAN0_RX BFIN_IRQ(47) /* CAN0 Receive Interrupt */
#define IRQ_CAN0_TX BFIN_IRQ(48) /* CAN0 Transmit Interrupt */
#define IRQ_MDMAS2 BFIN_IRQ(49) /* MDMA Stream 2 Interrupt */
diff --git a/include/asm-blackfin/mach-bf561/cdefBF561.h b/include/asm-blackfin/mach-bf561/cdefBF561.h
index 1a8ec9e4692..6e87ab269ff 100644
--- a/include/asm-blackfin/mach-bf561/cdefBF561.h
+++ b/include/asm-blackfin/mach-bf561/cdefBF561.h
@@ -81,6 +81,12 @@ static __inline__ void bfin_write_VR_CTL(unsigned int val)
#define bfin_write_PLL_LOCKCNT(val) bfin_write16(PLL_LOCKCNT,val)
#define bfin_read_CHIPID() bfin_read32(CHIPID)
+/* For MMR's that are reserved on Core B, set up defines to better integrate with other ports */
+#define bfin_read_SWRST() bfin_read_SICA_SWRST()
+#define bfin_write_SWRST() bfin_write_SICA_SWRST()
+#define bfin_read_SYSCR() bfin_read_SICA_SYSCR()
+#define bfin_write_SYSCR() bfin_write_SICA_SYSCR()
+
/* System Reset and Interrupt Controller registers for core A (0xFFC0 0100-0xFFC0 01FF) */
#define bfin_read_SICA_SWRST() bfin_read16(SICA_SWRST)
#define bfin_write_SICA_SWRST(val) bfin_write16(SICA_SWRST,val)
diff --git a/include/asm-blackfin/mach-bf561/defBF561.h b/include/asm-blackfin/mach-bf561/defBF561.h
index 89150ecb909..0f2dc6e6335 100644
--- a/include/asm-blackfin/mach-bf561/defBF561.h
+++ b/include/asm-blackfin/mach-bf561/defBF561.h
@@ -52,6 +52,10 @@
#define PLL_LOCKCNT 0xFFC00010 /* PLL Lock Count register (16-bit) */
#define CHIPID 0xFFC00014 /* Chip ID Register */
+/* For MMR's that are reserved on Core B, set up defines to better integrate with other ports */
+#define SWRST SICA_SWRST
+#define SYSCR SICA_SYSCR
+
/* System Reset and Interrupt Controller registers for core A (0xFFC0 0100-0xFFC0 01FF) */
#define SICA_SWRST 0xFFC00100 /* Software Reset register */
#define SICA_SYSCR 0xFFC00104 /* System Reset Configuration register */
diff --git a/include/asm-blackfin/thread_info.h b/include/asm-blackfin/thread_info.h
index fa8f08cf283..34d3c2eec94 100644
--- a/include/asm-blackfin/thread_info.h
+++ b/include/asm-blackfin/thread_info.h
@@ -39,6 +39,11 @@
*/
#define ALIGN_PAGE_MASK 0xffffe000
+/*
+ * Size of kernel stack for each process. This must be a power of 2...
+ */
+#define THREAD_SIZE 8192 /* 2 pages */
+
#ifndef __ASSEMBLY__
typedef unsigned long mm_segment_t;
@@ -76,11 +81,6 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
-/*
- * Size of kernel stack for each process. This must be a power of 2...
- */
-#define THREAD_SIZE 8192 /* 2 pages */
-
/* How to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
@@ -94,7 +94,7 @@ static inline struct thread_info *current_thread_info(void)
struct thread_info *ti;
__asm__("%0 = sp;": "=&d"(ti):
);
- return (struct thread_info *)((long)ti & ~8191UL);
+ return (struct thread_info *)((long)ti & ~((long)THREAD_SIZE-1));
}
/* thread information allocation */
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h
index 449f3f272e0..125179adf04 100644
--- a/include/asm-i386/acpi.h
+++ b/include/asm-i386/acpi.h
@@ -121,19 +121,6 @@ static inline void acpi_disable_pci(void)
}
extern int acpi_irq_balance_set(char *str);
-#else /* !CONFIG_ACPI */
-
-#define acpi_lapic 0
-#define acpi_ioapic 0
-static inline void acpi_noirq_set(void) { }
-static inline void acpi_disable_pci(void) { }
-static inline void disable_acpi(void) { }
-
-#endif /* !CONFIG_ACPI */
-
-
-#ifdef CONFIG_ACPI_SLEEP
-
/* routines for saving/restoring kernel state */
extern int acpi_save_state_mem(void);
extern void acpi_restore_state_mem(void);
@@ -143,7 +130,15 @@ extern unsigned long acpi_wakeup_address;
/* early initialization routine */
extern void acpi_reserve_bootmem(void);
-#endif /*CONFIG_ACPI_SLEEP*/
+#else /* !CONFIG_ACPI */
+
+#define acpi_lapic 0
+#define acpi_ioapic 0
+static inline void acpi_noirq_set(void) { }
+static inline void acpi_disable_pci(void) { }
+static inline void disable_acpi(void) { }
+
+#endif /* !CONFIG_ACPI */
#define ARCH_HAS_POWER_INIT 1
diff --git a/include/asm-i386/bootparam.h b/include/asm-i386/bootparam.h
index 427d8652bfd..b91b01783e4 100644
--- a/include/asm-i386/bootparam.h
+++ b/include/asm-i386/bootparam.h
@@ -4,8 +4,9 @@
#include <linux/types.h>
#include <linux/screen_info.h>
#include <linux/apm_bios.h>
-#include <asm/e820.h>
#include <linux/edd.h>
+#include <asm/e820.h>
+#include <asm/ist.h>
#include <video/edid.h>
struct setup_header {
@@ -48,9 +49,9 @@ struct efi_info {
u32 _pad1;
u32 efi_systab;
u32 efi_memdesc_size;
- u32 efi_memdec_version;
+ u32 efi_memdesc_version;
u32 efi_memmap;
- u32 fi_memmap_size;
+ u32 efi_memmap_size;
u32 _pad2[2];
};
@@ -59,7 +60,7 @@ struct boot_params {
struct screen_info screen_info; /* 0x000 */
struct apm_bios_info apm_bios_info; /* 0x040 */
u8 _pad2[12]; /* 0x054 */
- u32 speedstep_info[4]; /* 0x060 */
+ struct ist_info ist_info; /* 0x060 */
u8 _pad3[16]; /* 0x070 */
u8 hd0_info[16]; /* obsolete! */ /* 0x080 */
u8 hd1_info[16]; /* obsolete! */ /* 0x090 */
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h
index 43114c82460..cf67dbb1db7 100644
--- a/include/asm-i386/e820.h
+++ b/include/asm-i386/e820.h
@@ -47,7 +47,7 @@ extern void e820_register_memory(void);
extern void limit_regions(unsigned long long size);
extern void print_memory_map(char *who);
-#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND)
+#if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION)
extern void e820_mark_nosave_regions(void);
#else
static inline void e820_mark_nosave_regions(void)
diff --git a/include/asm-i386/ist.h b/include/asm-i386/ist.h
index d13d1e68afa..ef2003ebc6f 100644
--- a/include/asm-i386/ist.h
+++ b/include/asm-i386/ist.h
@@ -19,11 +19,13 @@
#ifdef __KERNEL__
+#include <linux/types.h>
+
struct ist_info {
- unsigned long signature;
- unsigned long command;
- unsigned long event;
- unsigned long perf_level;
+ u32 signature;
+ u32 command;
+ u32 event;
+ u32 perf_level;
};
extern struct ist_info ist_info;
diff --git a/include/asm-i386/suspend.h b/include/asm-i386/suspend.h
index 8dbaafe611f..a2520732ffd 100644
--- a/include/asm-i386/suspend.h
+++ b/include/asm-i386/suspend.h
@@ -21,7 +21,7 @@ struct saved_context {
unsigned long return_address;
} __attribute__((packed));
-#ifdef CONFIG_ACPI_SLEEP
+#ifdef CONFIG_ACPI
extern unsigned long saved_eip;
extern unsigned long saved_esp;
extern unsigned long saved_ebp;
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h
index 5b526357d17..49730ffbbae 100644
--- a/include/asm-ia64/acpi.h
+++ b/include/asm-ia64/acpi.h
@@ -100,6 +100,11 @@ const char *acpi_get_sysname (void);
int acpi_request_vector (u32 int_type);
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
+/* routines for saving/restoring kernel state */
+extern int acpi_save_state_mem(void);
+extern void acpi_restore_state_mem(void);
+extern unsigned long acpi_wakeup_address;
+
/*
* Record the cpei override flag and current logical cpu. This is
* useful for CPU removal.
diff --git a/include/asm-ia64/ia32.h b/include/asm-ia64/ia32.h
index 5ff8d74c3e0..2390ee145aa 100644
--- a/include/asm-ia64/ia32.h
+++ b/include/asm-ia64/ia32.h
@@ -27,11 +27,12 @@ extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs)
extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs);
#if PAGE_SHIFT > IA32_PAGE_SHIFT
-extern int ia32_copy_partial_page_list (struct task_struct *, unsigned long);
-extern void ia32_drop_partial_page_list (struct task_struct *);
+extern int ia32_copy_ia64_partial_page_list(struct task_struct *,
+ unsigned long);
+extern void ia32_drop_ia64_partial_page_list(struct task_struct *);
#else
-# define ia32_copy_partial_page_list(a1, a2) 0
-# define ia32_drop_partial_page_list(a1) do { ; } while (0)
+# define ia32_copy_ia64_partial_page_list(a1, a2) 0
+# define ia32_drop_ia64_partial_page_list(a1) do { ; } while (0)
#endif
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h
index ca33eb181ff..5cf8bf1e805 100644
--- a/include/asm-ia64/machvec.h
+++ b/include/asm-ia64/machvec.h
@@ -275,6 +275,7 @@ struct ia64_machine_vector {
extern struct ia64_machine_vector ia64_mv;
extern void machvec_init (const char *name);
+extern void machvec_init_from_cmdline(const char *cmdline);
# else
# error Unknown configuration. Update asm-ia64/machvec.h.
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 6251c76437d..be3b0ae4327 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -220,7 +220,7 @@ struct desc_struct {
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
-struct partial_page_list;
+struct ia64_partial_page_list;
#endif
struct thread_struct {
@@ -242,7 +242,7 @@ struct thread_struct {
__u64 fdr; /* IA32 fp except. data reg */
__u64 old_k1; /* old value of ar.k1 */
__u64 old_iob; /* old IOBase value */
- struct partial_page_list *ppl; /* partial page list for 4K page size issue */
+ struct ia64_partial_page_list *ppl; /* partial page list for 4K page size issue */
/* cached TLS descriptors. */
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index c60024989eb..6314b29e8c4 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -116,7 +116,6 @@ max_xtp (void)
extern int __cpu_disable (void);
extern void __cpu_die (unsigned int cpu);
extern void cpu_die (void) __attribute__ ((noreturn));
-extern int __cpu_up (unsigned int cpu);
extern void __init smp_build_cpu_map(void);
extern void __init init_smp_config (void);
diff --git a/include/asm-m68k/raw_io.h b/include/asm-m68k/raw_io.h
index 91c623f0994..d9eb9834ccc 100644
--- a/include/asm-m68k/raw_io.h
+++ b/include/asm-m68k/raw_io.h
@@ -36,15 +36,15 @@ extern void __iounmap(void *addr, unsigned long size);
#define in_be32(addr) \
({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
#define in_le16(addr) \
- ({ u16 __v = le16_to_cpu(*(__force volatile u16 *) (addr)); __v; })
+ ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; })
#define in_le32(addr) \
- ({ u32 __v = le32_to_cpu(*(__force volatile u32 *) (addr)); __v; })
+ ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; })
#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
-#define out_le16(addr,w) (void)((*(__force volatile u16 *) (addr)) = cpu_to_le16(w))
-#define out_le32(addr,l) (void)((*(__force volatile u32 *) (addr)) = cpu_to_le32(l))
+#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w))
+#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l))
#define raw_inb in_8
#define raw_inw in_be16
diff --git a/include/asm-m68k/system.h b/include/asm-m68k/system.h
index 198878b53a6..caa9b1663e4 100644
--- a/include/asm-m68k/system.h
+++ b/include/asm-m68k/system.h
@@ -46,6 +46,22 @@ asmlinkage void resume(void);
} while (0)
+/*
+ * Force strict CPU ordering.
+ * Not really required on m68k...
+ */
+#define nop() do { asm volatile ("nop"); barrier(); } while (0)
+#define mb() barrier()
+#define rmb() barrier()
+#define wmb() barrier()
+#define read_barrier_depends() ((void)0)
+#define set_mb(var, value) ({ (var) = (value); wmb(); })
+
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() ((void)0)
+
/* interrupt control.. */
#if 0
#define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory")
@@ -70,23 +86,6 @@ static inline int irqs_disabled(void)
/* For spinlocks etc */
#define local_irq_save(x) ({ local_save_flags(x); local_irq_disable(); })
-/*
- * Force strict CPU ordering.
- * Not really required on m68k...
- */
-#define nop() do { asm volatile ("nop"); barrier(); } while (0)
-#define mb() barrier()
-#define rmb() barrier()
-#define wmb() barrier()
-#define read_barrier_depends() ((void)0)
-#define set_mb(var, value) ({ (var) = (value); wmb(); })
-
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() ((void)0)
-
-
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
struct __xchg_dummy { unsigned long a[100]; };
diff --git a/include/asm-m68knommu/hardirq.h b/include/asm-m68knommu/hardirq.h
index 980075bab79..bfad28149a4 100644
--- a/include/asm-m68knommu/hardirq.h
+++ b/include/asm-m68knommu/hardirq.h
@@ -22,4 +22,6 @@ typedef struct {
# error HARDIRQ_BITS is too low!
#endif
+void ack_bad_irq(unsigned int irq);
+
#endif /* __M68K_HARDIRQ_H */
diff --git a/include/asm-m68knommu/hw_irq.h b/include/asm-m68knommu/hw_irq.h
new file mode 100644
index 00000000000..f3ec9e5ae04
--- /dev/null
+++ b/include/asm-m68knommu/hw_irq.h
@@ -0,0 +1,4 @@
+#ifndef __M68KNOMMU_HW_IRQ_H__
+#define __M68KNOMMU_HW_IRQ_H__
+
+#endif /* __M68KNOMMU_HW_IRQ_H__ */
diff --git a/include/asm-m68knommu/machdep.h b/include/asm-m68knommu/machdep.h
index 6ce28f8e0ea..708d7863ba6 100644
--- a/include/asm-m68knommu/machdep.h
+++ b/include/asm-m68knommu/machdep.h
@@ -1,53 +1,21 @@
#ifndef _M68KNOMMU_MACHDEP_H
#define _M68KNOMMU_MACHDEP_H
-#include <linux/seq_file.h>
#include <linux/interrupt.h>
-struct pt_regs;
-struct kbd_repeat;
-struct mktime;
-struct hwclk_time;
-struct gendisk;
-struct buffer_head;
-
-extern void (*mach_sched_init) (irqreturn_t (*handler)(int, void *, struct pt_regs *));
-/* machine dependent keyboard functions */
-extern int (*mach_keyb_init) (void);
-extern int (*mach_kbdrate) (struct kbd_repeat *);
-extern void (*mach_kbd_leds) (unsigned int);
-/* machine dependent irq functions */
-extern void (*mach_init_IRQ) (void);
-extern irq_handler_t mach_default_handler;
-extern int (*mach_request_irq) (unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
- unsigned long flags, const char *devname, void *dev_id);
-extern void (*mach_free_irq) (unsigned int irq, void *dev_id);
-extern void (*mach_get_model) (char *model);
-extern int (*mach_get_hardware_list) (char *buffer);
-extern int (*mach_get_irq_list) (struct seq_file *p, void *v);
-extern void (*mach_process_int) (int irq, struct pt_regs *fp);
+extern void (*mach_sched_init) (irq_handler_t handler);
/* machine dependent timer functions */
extern unsigned long (*mach_gettimeoffset)(void);
extern void (*mach_gettod)(int *year, int *mon, int *day, int *hour,
int *min, int *sec);
-extern int (*mach_hwclk)(int, struct hwclk_time*);
extern int (*mach_set_clock_mmss)(unsigned long);
+
+/* machine dependent power off functions */
extern void (*mach_reset)( void );
extern void (*mach_halt)( void );
extern void (*mach_power_off)( void );
-extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
-extern void (*mach_hd_setup)(char *, int *);
-extern long mach_max_dma_address;
-extern void (*mach_floppy_eject)(void);
-extern void (*mach_heartbeat) (int);
-extern void (*mach_l2_flush) (int);
-extern int mach_sysrq_key;
-extern int mach_sysrq_shift_state;
-extern int mach_sysrq_shift_mask;
-extern char *mach_sysrq_xlate;
extern void config_BSP(char *command, int len);
extern void (*mach_tick)(void);
-extern void (*mach_trap_init)(void);
#endif /* _M68KNOMMU_MACHDEP_H */
diff --git a/include/asm-m68knommu/mcfdma.h b/include/asm-m68knommu/mcfdma.h
index ea729e81a6b..705c52c79cd 100644
--- a/include/asm-m68knommu/mcfdma.h
+++ b/include/asm-m68knommu/mcfdma.h
@@ -133,7 +133,7 @@
#define MCFDMA_DIR_ASCEN 0x0800 /* Address Sequence Complete (Completion) interrupt enable */
#define MCFDMA_DIR_TEEN 0x0200 /* Transfer Error interrupt enable */
#define MCFDMA_DIR_TCEN 0x0100 /* Transfer Complete (a bus transfer, that is) interrupt enable */
-#define MCFDMA_DIR_INV 0x1000 /* Invalid Combination */
+#define MCFDMA_DIR_INV 0x0010 /* Invalid Combination */
#define MCFDMA_DIR_ASC 0x0008 /* Address Sequence Complete (DMA Completion) */
#define MCFDMA_DIR_TE 0x0002 /* Transfer Error */
#define MCFDMA_DIR_TC 0x0001 /* Transfer Complete */
diff --git a/include/asm-m68knommu/system.h b/include/asm-m68knommu/system.h
index 5e5ed18bb78..5da43a5d12a 100644
--- a/include/asm-m68knommu/system.h
+++ b/include/asm-m68knommu/system.h
@@ -296,7 +296,7 @@ cmpxchg(volatile int *p, int old, int new)
({ \
unsigned char volatile *reset; \
asm("move.w #0x2700, %sr"); \
- reset = ((volatile unsigned short *)(MCF_IPSBAR + 0x110000)); \
+ reset = ((volatile unsigned char *)(MCF_IPSBAR + 0x110000)); \
while(1) \
*reset |= (0x01 << 7);\
})
@@ -318,7 +318,7 @@ cmpxchg(volatile int *p, int old, int new)
({ \
unsigned char volatile *reset; \
asm("move.w #0x2700, %sr"); \
- reset = ((volatile unsigned short *)(MCF_IPSBAR + 0xA0000)); \
+ reset = ((volatile unsigned char *)(MCF_IPSBAR + 0xA0000)); \
while(1) \
*reset |= 0x80; \
})
diff --git a/include/asm-m68knommu/timex.h b/include/asm-m68knommu/timex.h
index 85069998db5..109050f3fe9 100644
--- a/include/asm-m68knommu/timex.h
+++ b/include/asm-m68knommu/timex.h
@@ -1 +1,23 @@
-#include <asm-m68k/timex.h>
+/*
+ * linux/include/asm-m68knommu/timex.h
+ *
+ * m68knommu architecture timex specifications
+ */
+#ifndef _ASM_M68KNOMMU_TIMEX_H
+#define _ASM_M68KNOMMU_TIMEX_H
+
+#ifdef CONFIG_COLDFIRE
+#include <asm/coldfire.h>
+#define CLOCK_TICK_RATE MCF_CLK
+#else
+#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+#endif
+
+typedef unsigned long cycles_t;
+
+static inline cycles_t get_cycles(void)
+{
+ return 0;
+}
+
+#endif
diff --git a/include/asm-mips/edac.h b/include/asm-mips/edac.h
new file mode 100644
index 00000000000..83719eee2d1
--- /dev/null
+++ b/include/asm-mips/edac.h
@@ -0,0 +1,35 @@
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+
+/* ECC atomic, DMA, SMP and interrupt safe scrub function */
+
+static inline void atomic_scrub(void *va, u32 size)
+{
+ unsigned long *virt_addr = va;
+ unsigned long temp;
+ u32 i;
+
+ for (i = 0; i < size / sizeof(unsigned long); i++, virt_addr++) {
+
+ /*
+ * Very carefully read and write to memory atomically
+ * so we are interrupt, DMA and SMP safe.
+ *
+ * Intel: asm("lock; addl $0, %0"::"m"(*virt_addr));
+ */
+
+ __asm__ __volatile__ (
+ " .set mips3 \n"
+ "1: ll %0, %1 # atomic_add \n"
+ " ll %0, %1 # atomic_add \n"
+ " addu %0, $0 \n"
+ " sc %0, %1 \n"
+ " beqz %0, 1b \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (*virt_addr)
+ : "m" (*virt_addr));
+
+ }
+}
+
+#endif
diff --git a/include/asm-mips/war.h b/include/asm-mips/war.h
index 9de52a5b0f3..2883ccc69ed 100644
--- a/include/asm-mips/war.h
+++ b/include/asm-mips/war.h
@@ -188,7 +188,6 @@
#define ICACHE_REFILLS_WORKAROUND_WAR 1
#endif
-
/*
* On the R10000 upto version 2.6 (not sure about 2.7) there is a bug that
* may cause ll / sc and lld / scd sequences to execute non-atomically.
diff --git a/include/asm-powerpc/bug.h b/include/asm-powerpc/bug.h
index f6fa3947484..a248b8bd4d7 100644
--- a/include/asm-powerpc/bug.h
+++ b/include/asm-powerpc/bug.h
@@ -79,7 +79,7 @@
_EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), "i" (0), \
"i" (sizeof(struct bug_entry)), \
- "r" ((long)(x))); \
+ "r" ((__force long)(x))); \
} \
} while (0)
diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h
index 10c51f457d4..236a9210e5f 100644
--- a/include/asm-powerpc/page.h
+++ b/include/asm-powerpc/page.h
@@ -190,7 +190,6 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr,
extern int page_is_ram(unsigned long pfn);
struct vm_area_struct;
-extern const char *arch_vma_name(struct vm_area_struct *vma);
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-powerpc/pci-bridge.h b/include/asm-powerpc/pci-bridge.h
index e72c2a60853..e909769b641 100644
--- a/include/asm-powerpc/pci-bridge.h
+++ b/include/asm-powerpc/pci-bridge.h
@@ -45,10 +45,17 @@ struct pci_controller {
* on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS
* to determine which bus number to match on when generating type0
* config cycles
+ * NO_PCIE_LINK - the Freescale PCI-e controllers have issues with
+ * hanging if we don't have link and try to do config cycles to
+ * anything but the PHB. Only allow talking to the PHB if this is
+ * set.
+ * BIG_ENDIAN - cfg_addr is a big endian register
*/
#define PPC_INDIRECT_TYPE_SET_CFG_TYPE (0x00000001)
#define PPC_INDIRECT_TYPE_EXT_REG (0x00000002)
#define PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS (0x00000004)
+#define PPC_INDIRECT_TYPE_NO_PCIE_LINK (0x00000008)
+#define PPC_INDIRECT_TYPE_BIG_ENDIAN (0x00000010)
u32 indirect_type;
/* Currently, we limit ourselves to 1 IO range and 3 mem
@@ -64,6 +71,14 @@ static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus)
return bus->sysdata;
}
+static inline int isa_vaddr_is_ioport(void __iomem *address)
+{
+ /* No specific ISA handling on ppc32 at this stage, it
+ * all goes through PCI
+ */
+ return 0;
+}
+
/* These are used for config access before all the PCI probing
has been done. */
int early_read_config_byte(struct pci_controller *hose, int bus, int dev_fn,
@@ -79,11 +94,14 @@ int early_write_config_word(struct pci_controller *hose, int bus, int dev_fn,
int early_write_config_dword(struct pci_controller *hose, int bus, int dev_fn,
int where, u32 val);
-extern void setup_indirect_pci_nomap(struct pci_controller* hose,
- void __iomem *cfg_addr, void __iomem *cfg_data);
+extern int early_find_capability(struct pci_controller *hose, int bus,
+ int dev_fn, int cap);
+
extern void setup_indirect_pci(struct pci_controller* hose,
- u32 cfg_addr, u32 cfg_data);
+ u32 cfg_addr, u32 cfg_data, u32 flags);
extern void setup_grackle(struct pci_controller *hose);
+extern void __init update_bridge_resource(struct pci_dev *dev,
+ struct resource *res);
#else
@@ -231,6 +249,13 @@ extern void pcibios_free_controller(struct pci_controller *phb);
extern void isa_bridge_find_early(struct pci_controller *hose);
+static inline int isa_vaddr_is_ioport(void __iomem *address)
+{
+ /* Check if address hits the reserved legacy IO range */
+ unsigned long ea = (unsigned long)address;
+ return ea >= ISA_IO_BASE && ea < ISA_IO_END;
+}
+
extern int pcibios_unmap_io_space(struct pci_bus *bus);
extern int pcibios_map_io_space(struct pci_bus *bus);
@@ -261,11 +286,16 @@ extern struct pci_controller *
pcibios_alloc_controller(struct device_node *dev);
#ifdef CONFIG_PCI
extern unsigned long pci_address_to_pio(phys_addr_t address);
+extern int pcibios_vaddr_is_ioport(void __iomem *address);
#else
static inline unsigned long pci_address_to_pio(phys_addr_t address)
{
return (unsigned long)-1;
}
+static inline int pcibios_vaddr_is_ioport(void __iomem *address)
+{
+ return 0;
+}
#endif
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index fa083d8e466..65325721446 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -181,6 +181,18 @@ name: \
.type GLUE(.,name),@function; \
GLUE(.,name):
+#define _INIT_STATIC(name) \
+ .section ".text.init.refok"; \
+ .align 2 ; \
+ .section ".opd","aw"; \
+name: \
+ .quad GLUE(.,name); \
+ .quad .TOC.@tocbase; \
+ .quad 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+GLUE(.,name):
+
#else /* 32-bit */
#define _GLOBAL(n) \
diff --git a/include/asm-powerpc/vio.h b/include/asm-powerpc/vio.h
index 0117b544ecb..3a0975e2ada 100644
--- a/include/asm-powerpc/vio.h
+++ b/include/asm-powerpc/vio.h
@@ -80,6 +80,11 @@ extern const void *vio_get_attribute(struct vio_dev *vdev, char *which,
extern struct vio_dev *vio_find_node(struct device_node *vnode);
extern int vio_enable_interrupts(struct vio_dev *dev);
extern int vio_disable_interrupts(struct vio_dev *dev);
+#else
+static inline int vio_enable_interrupts(struct vio_dev *dev)
+{
+ return 0;
+}
#endif
static inline struct vio_driver *to_vio_driver(struct device_driver *drv)
diff --git a/include/asm-s390/ccwdev.h b/include/asm-s390/ccwdev.h
index 4c2e1710f15..1aeda27d5a8 100644
--- a/include/asm-s390/ccwdev.h
+++ b/include/asm-s390/ccwdev.h
@@ -165,11 +165,6 @@ extern int ccw_device_resume(struct ccw_device *);
extern int ccw_device_halt(struct ccw_device *, unsigned long);
extern int ccw_device_clear(struct ccw_device *, unsigned long);
-extern int __deprecated read_dev_chars(struct ccw_device *cdev, void **buffer, int length);
-extern int __deprecated read_conf_data(struct ccw_device *cdev, void **buffer, int *length);
-extern int __deprecated read_conf_data_lpm(struct ccw_device *cdev, void **buffer,
- int *length, __u8 lpm);
-
extern int ccw_device_set_online(struct ccw_device *cdev);
extern int ccw_device_set_offline(struct ccw_device *cdev);
diff --git a/include/asm-s390/s390_ext.h b/include/asm-s390/s390_ext.h
index df9b1017b70..1e72362cad7 100644
--- a/include/asm-s390/s390_ext.h
+++ b/include/asm-s390/s390_ext.h
@@ -10,6 +10,8 @@
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
+#include <linux/types.h>
+
typedef void (*ext_int_handler_t)(__u16 code);
/*
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 76e424f718c..07708c07701 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -36,8 +36,7 @@ extern void machine_halt_smp(void);
extern void machine_power_off_smp(void);
extern void smp_setup_cpu_possible_map(void);
-extern int smp_call_function_on(void (*func) (void *info), void *info,
- int nonatomic, int wait, int cpu);
+
#define NO_PROC_ID 0xFF /* No processor magic marker */
/*
@@ -96,14 +95,6 @@ extern int __cpu_up (unsigned int cpu);
#endif
#ifndef CONFIG_SMP
-static inline int
-smp_call_function_on(void (*func) (void *info), void *info,
- int nonatomic, int wait, int cpu)
-{
- func(info);
- return 0;
-}
-
static inline void smp_send_stop(void)
{
/* Disable all interrupts/machine checks */
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h
index 790c1c55741..f04acb2670a 100644
--- a/include/asm-s390/unistd.h
+++ b/include/asm-s390/unistd.h
@@ -251,7 +251,7 @@
#define __NR_getcpu 311
#define __NR_epoll_pwait 312
#define __NR_utimes 313
-/* Number 314 is reserved for new sys_fallocate */
+#define __NR_fallocate 314
#define __NR_utimensat 315
#define __NR_signalfd 316
#define __NR_timerfd 317
diff --git a/include/asm-x86_64/acpi.h b/include/asm-x86_64/acpi.h
index 1da8f49c0fe..98173357dd8 100644
--- a/include/asm-x86_64/acpi.h
+++ b/include/asm-x86_64/acpi.h
@@ -108,6 +108,15 @@ static inline void acpi_disable_pci(void)
}
extern int acpi_irq_balance_set(char *str);
+/* routines for saving/restoring kernel state */
+extern int acpi_save_state_mem(void);
+extern void acpi_restore_state_mem(void);
+
+extern unsigned long acpi_wakeup_address;
+
+/* early initialization routine */
+extern void acpi_reserve_bootmem(void);
+
#else /* !CONFIG_ACPI */
#define acpi_lapic 0
@@ -121,19 +130,6 @@ extern int acpi_numa;
extern int acpi_scan_nodes(unsigned long start, unsigned long end);
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
-#ifdef CONFIG_ACPI_SLEEP
-
-/* routines for saving/restoring kernel state */
-extern int acpi_save_state_mem(void);
-extern void acpi_restore_state_mem(void);
-
-extern unsigned long acpi_wakeup_address;
-
-/* early initialization routine */
-extern void acpi_reserve_bootmem(void);
-
-#endif /*CONFIG_ACPI_SLEEP*/
-
extern int acpi_disabled;
extern int acpi_pci_disabled;
diff --git a/include/asm-x86_64/ist.h b/include/asm-x86_64/ist.h
new file mode 100644
index 00000000000..338857ecbc6
--- /dev/null
+++ b/include/asm-x86_64/ist.h
@@ -0,0 +1 @@
+#include <asm-i386/ist.h>
diff --git a/include/asm-x86_64/suspend.h b/include/asm-x86_64/suspend.h
index 9c3f8de90d2..b897e8cb55f 100644
--- a/include/asm-x86_64/suspend.h
+++ b/include/asm-x86_64/suspend.h
@@ -44,7 +44,6 @@ extern unsigned long saved_context_eflags;
extern void fix_processor_context(void);
-#ifdef CONFIG_ACPI_SLEEP
extern unsigned long saved_rip;
extern unsigned long saved_rsp;
extern unsigned long saved_rbp;
@@ -54,4 +53,3 @@ extern unsigned long saved_rdi;
/* routines for saving/restoring kernel state */
extern int acpi_save_state_mem(void);
-#endif
diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
index 9df30b939c4..f4ce8768ad4 100644
--- a/include/asm-x86_64/uaccess.h
+++ b/include/asm-x86_64/uaccess.h
@@ -100,7 +100,7 @@ struct exception_table_entry
case 8: __get_user_x(8,__ret_gu,__val_gu,ptr); break; \
default: __get_user_bad(); break; \
} \
- (x) = (typeof(*(ptr)))__val_gu; \
+ (x) = (__force typeof(*(ptr)))__val_gu; \
__ret_gu; \
})
@@ -192,7 +192,7 @@ struct __large_struct { unsigned long buf[100]; };
int __gu_err; \
unsigned long __gu_val; \
__get_user_size(__gu_val,(ptr),(size),__gu_err); \
- (x) = (typeof(*(ptr)))__gu_val; \
+ (x) = (__force typeof(*(ptr)))__gu_val; \
__gu_err; \
})
diff --git a/include/asm-xtensa/io.h b/include/asm-xtensa/io.h
index 31ffc3f119c..0faa614d969 100644
--- a/include/asm-xtensa/io.h
+++ b/include/asm-xtensa/io.h
@@ -13,6 +13,7 @@
#ifdef __KERNEL__
#include <asm/byteorder.h>
+#include <asm/page.h>
#include <linux/types.h>
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d5680cd7746..bf5e0009de7 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -33,6 +33,7 @@
#endif
#include <linux/list.h>
+#include <linux/mod_devicetable.h>
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
diff --git a/include/linux/apm_bios.h b/include/linux/apm_bios.h
index 290aef32681..5f921c84827 100644
--- a/include/linux/apm_bios.h
+++ b/include/linux/apm_bios.h
@@ -21,20 +21,22 @@ typedef unsigned short apm_eventinfo_t;
#ifdef __KERNEL__
+#include <linux/types.h>
+
#define APM_CS (GDT_ENTRY_APMBIOS_BASE * 8)
#define APM_CS_16 (APM_CS + 8)
#define APM_DS (APM_CS_16 + 8)
struct apm_bios_info {
- unsigned short version;
- unsigned short cseg;
- unsigned long offset;
- unsigned short cseg_16;
- unsigned short dseg;
- unsigned short flags;
- unsigned short cseg_len;
- unsigned short cseg_16_len;
- unsigned short dseg_len;
+ u16 version;
+ u16 cseg;
+ u32 offset;
+ u16 cseg_16;
+ u16 dseg;
+ u16 flags;
+ u16 cseg_len;
+ u16 cseg_16_len;
+ u16 dseg_len;
};
/* Results of APM Installation Check */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 695e34964cb..b126c6f68e2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -37,7 +37,7 @@
struct scsi_ioctl_command;
struct request_queue;
-typedef struct request_queue request_queue_t;
+typedef struct request_queue request_queue_t __deprecated;
struct elevator_queue;
typedef struct elevator_queue elevator_t;
struct request_pm_state;
@@ -233,7 +233,7 @@ struct request {
struct list_head queuelist;
struct list_head donelist;
- request_queue_t *q;
+ struct request_queue *q;
unsigned int cmd_flags;
enum rq_cmd_type_bits cmd_type;
@@ -337,15 +337,15 @@ struct request_pm_state
#include <linux/elevator.h>
-typedef void (request_fn_proc) (request_queue_t *q);
-typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
-typedef int (prep_rq_fn) (request_queue_t *, struct request *);
-typedef void (unplug_fn) (request_queue_t *);
+typedef void (request_fn_proc) (struct request_queue *q);
+typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
+typedef int (prep_rq_fn) (struct request_queue *, struct request *);
+typedef void (unplug_fn) (struct request_queue *);
struct bio_vec;
-typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
-typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
-typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
+typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
+typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *);
+typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
typedef void (softirq_done_fn)(struct request *);
enum blk_queue_state {
@@ -483,8 +483,8 @@ struct request_queue
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
-#define QUEUE_FLAG_READFULL 3 /* write queue has been filled */
-#define QUEUE_FLAG_WRITEFULL 4 /* read queue has been filled */
+#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */
+#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
@@ -626,13 +626,13 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
#ifdef CONFIG_BOUNCE
extern int init_emergency_isa_pool(void);
-extern void blk_queue_bounce(request_queue_t *q, struct bio **bio);
+extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
#else
static inline int init_emergency_isa_pool(void)
{
return 0;
}
-static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
+static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
{
}
#endif /* CONFIG_MMU */
@@ -646,14 +646,14 @@ extern void blk_unregister_queue(struct gendisk *disk);
extern void register_disk(struct gendisk *dev);
extern void generic_make_request(struct bio *bio);
extern void blk_put_request(struct request *);
-extern void __blk_put_request(request_queue_t *, struct request *);
+extern void __blk_put_request(struct request_queue *, struct request *);
extern void blk_end_sync_rq(struct request *rq, int error);
-extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
-extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
-extern void blk_requeue_request(request_queue_t *, struct request *);
-extern void blk_plug_device(request_queue_t *);
-extern int blk_remove_plug(request_queue_t *);
-extern void blk_recount_segments(request_queue_t *, struct bio *);
+extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
+extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
+extern void blk_requeue_request(struct request_queue *, struct request *);
+extern void blk_plug_device(struct request_queue *);
+extern int blk_remove_plug(struct request_queue *);
+extern void blk_recount_segments(struct request_queue *, struct bio *);
extern int scsi_cmd_ioctl(struct file *, struct request_queue *,
struct gendisk *, unsigned int, void __user *);
extern int sg_scsi_ioctl(struct file *, struct request_queue *,
@@ -662,14 +662,15 @@ extern int sg_scsi_ioctl(struct file *, struct request_queue *,
/*
* Temporary export, until SCSI gets fixed up.
*/
-extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *);
+extern int ll_back_merge_fn(struct request_queue *, struct request *,
+ struct bio *);
/*
* A queue has just exitted congestion. Note this in the global counter of
* congested queues, and wake up anyone who was waiting for requests to be
* put back.
*/
-static inline void blk_clear_queue_congested(request_queue_t *q, int rw)
+static inline void blk_clear_queue_congested(struct request_queue *q, int rw)
{
clear_bdi_congested(&q->backing_dev_info, rw);
}
@@ -678,29 +679,29 @@ static inline void blk_clear_queue_congested(request_queue_t *q, int rw)
* A queue has just entered congestion. Flag that in the queue's VM-visible
* state flags and increment the global gounter of congested queues.
*/
-static inline void blk_set_queue_congested(request_queue_t *q, int rw)
+static inline void blk_set_queue_congested(struct request_queue *q, int rw)
{
set_bdi_congested(&q->backing_dev_info, rw);
}
-extern void blk_start_queue(request_queue_t *q);
-extern void blk_stop_queue(request_queue_t *q);
+extern void blk_start_queue(struct request_queue *q);
+extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
-extern void __blk_stop_queue(request_queue_t *q);
-extern void blk_run_queue(request_queue_t *);
-extern void blk_start_queueing(request_queue_t *);
-extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long);
+extern void __blk_stop_queue(struct request_queue *q);
+extern void blk_run_queue(struct request_queue *);
+extern void blk_start_queueing(struct request_queue *);
+extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long);
extern int blk_rq_unmap_user(struct bio *);
-extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
-extern int blk_rq_map_user_iov(request_queue_t *, struct request *,
+extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
+extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
struct sg_iovec *, int, unsigned int);
-extern int blk_execute_rq(request_queue_t *, struct gendisk *,
+extern int blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
-extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
+extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
extern int blk_verify_command(unsigned char *, int);
-static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
+static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
return bdev->bd_disk->queue;
}
@@ -749,41 +750,41 @@ static inline void blkdev_dequeue_request(struct request *req)
/*
* Access functions for manipulating queue properties
*/
-extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn,
+extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
-extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);
-extern void blk_cleanup_queue(request_queue_t *);
-extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
-extern void blk_queue_bounce_limit(request_queue_t *, u64);
-extern void blk_queue_max_sectors(request_queue_t *, unsigned int);
-extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short);
-extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short);
-extern void blk_queue_max_segment_size(request_queue_t *, unsigned int);
-extern void blk_queue_hardsect_size(request_queue_t *, unsigned short);
-extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b);
-extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
-extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
-extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
-extern void blk_queue_dma_alignment(request_queue_t *, int);
-extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *);
+extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+extern void blk_cleanup_queue(struct request_queue *);
+extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
+extern void blk_queue_bounce_limit(struct request_queue *, u64);
+extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
+extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
+extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
+extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
+extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
+extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
+extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
+extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
+extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
+extern void blk_queue_dma_alignment(struct request_queue *, int);
+extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
-extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);
-extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
-extern int blk_do_ordered(request_queue_t *, struct request **);
-extern unsigned blk_ordered_cur_seq(request_queue_t *);
+extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
+extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *);
+extern int blk_do_ordered(struct request_queue *, struct request **);
+extern unsigned blk_ordered_cur_seq(struct request_queue *);
extern unsigned blk_ordered_req_seq(struct request *);
-extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
+extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
-extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
+extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
-extern void generic_unplug_device(request_queue_t *);
-extern void __generic_unplug_device(request_queue_t *);
+extern void generic_unplug_device(struct request_queue *);
+extern void __generic_unplug_device(struct request_queue *);
extern long nr_blockdev_pages(void);
-int blk_get_queue(request_queue_t *);
-request_queue_t *blk_alloc_queue(gfp_t);
-request_queue_t *blk_alloc_queue_node(gfp_t, int);
-extern void blk_put_queue(request_queue_t *);
+int blk_get_queue(struct request_queue *);
+struct request_queue *blk_alloc_queue(gfp_t);
+struct request_queue *blk_alloc_queue_node(gfp_t, int);
+extern void blk_put_queue(struct request_queue *);
/*
* tag stuff
@@ -791,13 +792,13 @@ extern void blk_put_queue(request_queue_t *);
#define blk_queue_tag_depth(q) ((q)->queue_tags->busy)
#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth)
#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
-extern int blk_queue_start_tag(request_queue_t *, struct request *);
-extern struct request *blk_queue_find_tag(request_queue_t *, int);
-extern void blk_queue_end_tag(request_queue_t *, struct request *);
-extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *);
-extern void blk_queue_free_tags(request_queue_t *);
-extern int blk_queue_resize_tags(request_queue_t *, int);
-extern void blk_queue_invalidate_tags(request_queue_t *);
+extern int blk_queue_start_tag(struct request_queue *, struct request *);
+extern struct request *blk_queue_find_tag(struct request_queue *, int);
+extern void blk_queue_end_tag(struct request_queue *, struct request *);
+extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
+extern void blk_queue_free_tags(struct request_queue *);
+extern int blk_queue_resize_tags(struct request_queue *, int);
+extern void blk_queue_invalidate_tags(struct request_queue *);
extern struct blk_queue_tag *blk_init_tags(int);
extern void blk_free_tags(struct blk_queue_tag *);
@@ -809,7 +810,7 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
return bqt->tag_index[tag];
}
-extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);
+extern void blk_rq_bio_prep(struct request_queue *, struct request *, struct bio *);
extern int blkdev_issue_flush(struct block_device *, sector_t *);
#define MAX_PHYS_SEGMENTS 128
@@ -821,7 +822,7 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *);
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
-static inline int queue_hardsect_size(request_queue_t *q)
+static inline int queue_hardsect_size(struct request_queue *q)
{
int retval = 512;
@@ -836,7 +837,7 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
return queue_hardsect_size(bdev_get_queue(bdev));
}
-static inline int queue_dma_alignment(request_queue_t *q)
+static inline int queue_dma_alignment(struct request_queue *q)
{
int retval = 511;
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 3680ff9a30e..90874a5d7d7 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -144,7 +144,7 @@ struct blk_user_trace_setup {
#if defined(CONFIG_BLK_DEV_IO_TRACE)
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
-extern void blk_trace_shutdown(request_queue_t *);
+extern void blk_trace_shutdown(struct request_queue *);
extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
/**
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 12a1291855e..86f9a3a6137 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -15,8 +15,8 @@
# define __acquire(x) __context__(x,1)
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
-extern void __chk_user_ptr(const void __user *);
-extern void __chk_io_ptr(const void __iomem *);
+extern void __chk_user_ptr(const volatile void __user *);
+extern void __chk_io_ptr(const volatile void __iomem *);
#else
# define __user
# define __kernel
diff --git a/include/linux/device.h b/include/linux/device.h
index d9f0a57f5a2..3a38d1f70cb 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -551,6 +551,9 @@ extern void put_device(struct device * dev);
/* drivers/base/power/shutdown.c */
extern void device_shutdown(void);
+/* drivers/base/sys.c */
+extern void sysdev_shutdown(void);
+
/* drivers/base/firmware.c */
extern int __must_check firmware_register(struct kset *);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index e88fcbc77f8..e8f42133a61 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -5,29 +5,29 @@
#ifdef CONFIG_BLOCK
-typedef int (elevator_merge_fn) (request_queue_t *, struct request **,
+typedef int (elevator_merge_fn) (struct request_queue *, struct request **,
struct bio *);
-typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struct request *);
+typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *);
-typedef void (elevator_merged_fn) (request_queue_t *, struct request *, int);
+typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int);
-typedef int (elevator_allow_merge_fn) (request_queue_t *, struct request *, struct bio *);
+typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *);
-typedef int (elevator_dispatch_fn) (request_queue_t *, int);
+typedef int (elevator_dispatch_fn) (struct request_queue *, int);
-typedef void (elevator_add_req_fn) (request_queue_t *, struct request *);
-typedef int (elevator_queue_empty_fn) (request_queue_t *);
-typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *);
-typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *);
-typedef int (elevator_may_queue_fn) (request_queue_t *, int);
+typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
+typedef int (elevator_queue_empty_fn) (struct request_queue *);
+typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
+typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
+typedef int (elevator_may_queue_fn) (struct request_queue *, int);
-typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, gfp_t);
+typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t);
typedef void (elevator_put_req_fn) (struct request *);
-typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *);
-typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *);
+typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
+typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
-typedef void *(elevator_init_fn) (request_queue_t *);
+typedef void *(elevator_init_fn) (struct request_queue *);
typedef void (elevator_exit_fn) (elevator_t *);
struct elevator_ops
@@ -94,27 +94,27 @@ struct elevator_queue
/*
* block elevator interface
*/
-extern void elv_dispatch_sort(request_queue_t *, struct request *);
-extern void elv_dispatch_add_tail(request_queue_t *, struct request *);
-extern void elv_add_request(request_queue_t *, struct request *, int, int);
-extern void __elv_add_request(request_queue_t *, struct request *, int, int);
-extern void elv_insert(request_queue_t *, struct request *, int);
-extern int elv_merge(request_queue_t *, struct request **, struct bio *);
-extern void elv_merge_requests(request_queue_t *, struct request *,
+extern void elv_dispatch_sort(struct request_queue *, struct request *);
+extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
+extern void elv_add_request(struct request_queue *, struct request *, int, int);
+extern void __elv_add_request(struct request_queue *, struct request *, int, int);
+extern void elv_insert(struct request_queue *, struct request *, int);
+extern int elv_merge(struct request_queue *, struct request **, struct bio *);
+extern void elv_merge_requests(struct request_queue *, struct request *,
struct request *);
-extern void elv_merged_request(request_queue_t *, struct request *, int);
-extern void elv_dequeue_request(request_queue_t *, struct request *);
-extern void elv_requeue_request(request_queue_t *, struct request *);
-extern int elv_queue_empty(request_queue_t *);
+extern void elv_merged_request(struct request_queue *, struct request *, int);
+extern void elv_dequeue_request(struct request_queue *, struct request *);
+extern void elv_requeue_request(struct request_queue *, struct request *);
+extern int elv_queue_empty(struct request_queue *);
extern struct request *elv_next_request(struct request_queue *q);
-extern struct request *elv_former_request(request_queue_t *, struct request *);
-extern struct request *elv_latter_request(request_queue_t *, struct request *);
-extern int elv_register_queue(request_queue_t *q);
-extern void elv_unregister_queue(request_queue_t *q);
-extern int elv_may_queue(request_queue_t *, int);
-extern void elv_completed_request(request_queue_t *, struct request *);
-extern int elv_set_request(request_queue_t *, struct request *, gfp_t);
-extern void elv_put_request(request_queue_t *, struct request *);
+extern struct request *elv_former_request(struct request_queue *, struct request *);
+extern struct request *elv_latter_request(struct request_queue *, struct request *);
+extern int elv_register_queue(struct request_queue *q);
+extern void elv_unregister_queue(struct request_queue *q);
+extern int elv_may_queue(struct request_queue *, int);
+extern void elv_completed_request(struct request_queue *, struct request *);
+extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
+extern void elv_put_request(struct request_queue *, struct request *);
/*
* io scheduler registration
@@ -125,18 +125,18 @@ extern void elv_unregister(struct elevator_type *);
/*
* io scheduler sysfs switching
*/
-extern ssize_t elv_iosched_show(request_queue_t *, char *);
-extern ssize_t elv_iosched_store(request_queue_t *, const char *, size_t);
+extern ssize_t elv_iosched_show(struct request_queue *, char *);
+extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
-extern int elevator_init(request_queue_t *, char *);
+extern int elevator_init(struct request_queue *, char *);
extern void elevator_exit(elevator_t *);
extern int elv_rq_merge_ok(struct request *, struct bio *);
/*
* Helper functions.
*/
-extern struct request *elv_rb_former_request(request_queue_t *, struct request *);
-extern struct request *elv_rb_latter_request(request_queue_t *, struct request *);
+extern struct request *elv_rb_former_request(struct request_queue *, struct request *);
+extern struct request *elv_rb_latter_request(struct request_queue *, struct request *);
/*
* rb support functions.
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index c8e02de737f..efded00ad08 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -5,7 +5,7 @@
#include <linux/sched.h>
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/*
* Check if a process has been frozen
*/
@@ -126,7 +126,7 @@ static inline void set_freezable(void)
current->flags &= ~PF_NOFREEZE;
}
-#else
+#else /* !CONFIG_PM_SLEEP */
static inline int frozen(struct task_struct *p) { return 0; }
static inline int freezing(struct task_struct *p) { return 0; }
static inline void set_freeze_flag(struct task_struct *p) {}
@@ -143,6 +143,6 @@ static inline void freezer_do_not_count(void) {}
static inline void freezer_count(void) {}
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
static inline void set_freezable(void) {}
-#endif
+#endif /* !CONFIG_PM_SLEEP */
#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 49b7053043a..e6a71c82d20 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -1,6 +1,8 @@
#ifndef _LINUX_HUGETLB_H
#define _LINUX_HUGETLB_H
+#include <linux/fs.h>
+
#ifdef CONFIG_HUGETLB_PAGE
#include <linux/mempolicy.h>
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 5f5daad8bc5..d71d0121b7f 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -555,7 +555,7 @@ typedef struct ide_drive_s {
char name[4]; /* drive name, such as "hda" */
char driver_req[10]; /* requests specific driver */
- request_queue_t *queue; /* request queue */
+ struct request_queue *queue; /* request queue */
struct request *rq; /* current request */
struct ide_drive_s *next; /* circular list of hwgroup drives */
@@ -1206,7 +1206,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
extern int ide_spin_wait_hwgroup(ide_drive_t *);
extern void ide_timer_expiry(unsigned long);
extern irqreturn_t ide_intr(int irq, void *dev_id);
-extern void do_ide_request(request_queue_t *);
+extern void do_ide_request(struct request_queue *);
void ide_init_disk(struct gendisk *, ide_drive_t *);
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0a3c2ebf200..5523f19d88d 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -11,8 +11,6 @@
#include <linux/hardirq.h>
#include <linux/sched.h>
#include <linux/irqflags.h>
-#include <linux/bottom_half.h>
-#include <linux/device.h>
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
@@ -97,6 +95,8 @@ extern int __must_check request_irq(unsigned int, irq_handler_t handler,
unsigned long, const char *, void *);
extern void free_irq(unsigned int, void *);
+struct device;
+
extern int __must_check devm_request_irq(struct device *dev, unsigned int irq,
irq_handler_t handler, unsigned long irqflags,
const char *devname, void *dev_id);
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
index 500aace21ca..157ad64aa7c 100644
--- a/include/linux/lguest.h
+++ b/include/linux/lguest.h
@@ -17,7 +17,6 @@
#define LHCALL_TS 8
#define LHCALL_SET_CLOCKEVENT 9
#define LHCALL_HALT 10
-#define LHCALL_GET_WALLCLOCK 11
#define LHCALL_BIND_DMA 12
#define LHCALL_SEND_DMA 13
#define LHCALL_SET_PTE 14
@@ -27,18 +26,38 @@
#define LG_CLOCK_MIN_DELTA 100UL
#define LG_CLOCK_MAX_DELTA ULONG_MAX
+/*G:031 First, how does our Guest contact the Host to ask for privileged
+ * operations? There are two ways: the direct way is to make a "hypercall",
+ * to make requests of the Host Itself.
+ *
+ * Our hypercall mechanism uses the highest unused trap code (traps 32 and
+ * above are used by real hardware interrupts). Seventeen hypercalls are
+ * available: the hypercall number is put in the %eax register, and the
+ * arguments (when required) are placed in %edx, %ebx and %ecx. If a return
+ * value makes sense, it's returned in %eax.
+ *
+ * Grossly invalid calls result in Sudden Death at the hands of the vengeful
+ * Host, rather than returning failure. This reflects Winston Churchill's
+ * definition of a gentleman: "someone who is only rude intentionally". */
#define LGUEST_TRAP_ENTRY 0x1F
static inline unsigned long
hcall(unsigned long call,
unsigned long arg1, unsigned long arg2, unsigned long arg3)
{
+ /* "int" is the Intel instruction to trigger a trap. */
asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
+ /* The call is in %eax (aka "a"), and can be replaced */
: "=a"(call)
+ /* The other arguments are in %eax, %edx, %ebx & %ecx */
: "a"(call), "d"(arg1), "b"(arg2), "c"(arg3)
+ /* "memory" means this might write somewhere in memory.
+ * This isn't true for all calls, but it's safe to tell
+ * gcc that it might happen so it doesn't get clever. */
: "memory");
return call;
}
+/*:*/
void async_hcall(unsigned long call,
unsigned long arg1, unsigned long arg2, unsigned long arg3);
@@ -52,31 +71,43 @@ struct hcall_ring
u32 eax, edx, ebx, ecx;
};
-/* All the good stuff happens here: guest registers it with LGUEST_INIT */
+/*G:032 The second method of communicating with the Host is to via "struct
+ * lguest_data". The Guest's very first hypercall is to tell the Host where
+ * this is, and then the Guest and Host both publish information in it. :*/
struct lguest_data
{
-/* Fields which change during running: */
- /* 512 == enabled (same as eflags) */
+ /* 512 == enabled (same as eflags in normal hardware). The Guest
+ * changes interrupts so often that a hypercall is too slow. */
unsigned int irq_enabled;
- /* Interrupts blocked by guest. */
+ /* Fine-grained interrupt disabling by the Guest */
DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS);
- /* Virtual address of page fault. */
+ /* The Host writes the virtual address of the last page fault here,
+ * which saves the Guest a hypercall. CR2 is the native register where
+ * this address would normally be found. */
unsigned long cr2;
- /* Async hypercall ring. 0xFF == done, 0 == pending. */
+ /* Wallclock time set by the Host. */
+ struct timespec time;
+
+ /* Async hypercall ring. Instead of directly making hypercalls, we can
+ * place them in here for processing the next time the Host wants.
+ * This batching can be quite efficient. */
+
+ /* 0xFF == done (set by Host), 0 == pending (set by Guest). */
u8 hcall_status[LHCALL_RING_SIZE];
+ /* The actual registers for the hypercalls. */
struct hcall_ring hcalls[LHCALL_RING_SIZE];
-/* Fields initialized by the hypervisor at boot: */
+/* Fields initialized by the Host at boot: */
/* Memory not to try to access */
unsigned long reserve_mem;
- /* ID of this guest (used by network driver to set ethernet address) */
+ /* ID of this Guest (used by network driver to set ethernet address) */
u16 guestid;
/* KHz for the TSC clock. */
u32 tsc_khz;
-/* Fields initialized by the guest at boot: */
+/* Fields initialized by the Guest at boot: */
/* Instruction range to suppress interrupts even if enabled */
unsigned long noirq_start, noirq_end;
};
diff --git a/include/linux/lguest_bus.h b/include/linux/lguest_bus.h
index c9b4e05fee4..d27853ddc64 100644
--- a/include/linux/lguest_bus.h
+++ b/include/linux/lguest_bus.h
@@ -15,11 +15,14 @@ struct lguest_device {
void *private;
};
-/* By convention, each device can use irq index+1 if it wants to. */
+/*D:380 Since interrupt numbers are arbitrary, we use a convention: each device
+ * can use the interrupt number corresponding to its index. The +1 is because
+ * interrupt 0 is not usable (it's actually the timer interrupt). */
static inline int lgdev_irq(const struct lguest_device *dev)
{
return dev->index + 1;
}
+/*:*/
/* dma args must not be vmalloced! */
void lguest_send_dma(unsigned long key, struct lguest_dma *dma);
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
index 0ba414a40c8..64167057944 100644
--- a/include/linux/lguest_launcher.h
+++ b/include/linux/lguest_launcher.h
@@ -9,14 +9,45 @@
/* How many devices? Assume each one wants up to two dma arrays per device. */
#define LGUEST_MAX_DEVICES (LGUEST_MAX_DMA/2)
+/*D:200
+ * Lguest I/O
+ *
+ * The lguest I/O mechanism is the only way Guests can talk to devices. There
+ * are two hypercalls involved: SEND_DMA for output and BIND_DMA for input. In
+ * each case, "struct lguest_dma" describes the buffer: this contains 16
+ * addr/len pairs, and if there are fewer buffer elements the len array is
+ * terminated with a 0.
+ *
+ * I/O is organized by keys: BIND_DMA attaches buffers to a particular key, and
+ * SEND_DMA transfers to buffers bound to particular key. By convention, keys
+ * correspond to a physical address within the device's page. This means that
+ * devices will never accidentally end up with the same keys, and allows the
+ * Host use The Futex Trick (as we'll see later in our journey).
+ *
+ * SEND_DMA simply indicates a key to send to, and the physical address of the
+ * "struct lguest_dma" to send. The Host will write the number of bytes
+ * transferred into the "struct lguest_dma"'s used_len member.
+ *
+ * BIND_DMA indicates a key to bind to, a pointer to an array of "struct
+ * lguest_dma"s ready for receiving, the size of that array, and an interrupt
+ * to trigger when data is received. The Host will only allow transfers into
+ * buffers with a used_len of zero: it then sets used_len to the number of
+ * bytes transferred and triggers the interrupt for the Guest to process the
+ * new input. */
struct lguest_dma
{
- /* 0 if free to be used, filled by hypervisor. */
+ /* 0 if free to be used, filled by the Host. */
u32 used_len;
unsigned long addr[LGUEST_MAX_DMA_SECTIONS];
u16 len[LGUEST_MAX_DMA_SECTIONS];
};
+/*:*/
+/*D:460 This is the layout of a block device memory page. The Launcher sets up
+ * the num_sectors initially to tell the Guest the size of the disk. The Guest
+ * puts the type, sector and length of the request in the first three fields,
+ * then DMAs to the Host. The Host processes the request, sets up the result,
+ * then DMAs back to the Guest. */
struct lguest_block_page
{
/* 0 is a read, 1 is a write. */
@@ -28,27 +59,47 @@ struct lguest_block_page
u32 num_sectors; /* Disk length = num_sectors * 512 */
};
-/* There is a shared page of these. */
+/*D:520 The network device is basically a memory page where all the Guests on
+ * the network publish their MAC (ethernet) addresses: it's an array of "struct
+ * lguest_net": */
struct lguest_net
{
/* Simply the mac address (with multicast bit meaning promisc). */
unsigned char mac[6];
};
+/*:*/
/* Where the Host expects the Guest to SEND_DMA console output to. */
#define LGUEST_CONSOLE_DMA_KEY 0
-/* We have a page of these descriptors in the lguest_device page. */
+/*D:010
+ * Drivers
+ *
+ * The Guest needs devices to do anything useful. Since we don't let it touch
+ * real devices (think of the damage it could do!) we provide virtual devices.
+ * We could emulate a PCI bus with various devices on it, but that is a fairly
+ * complex burden for the Host and suboptimal for the Guest, so we have our own
+ * "lguest" bus and simple drivers.
+ *
+ * Devices are described by an array of LGUEST_MAX_DEVICES of these structs,
+ * placed by the Launcher just above the top of physical memory:
+ */
struct lguest_device_desc {
+ /* The device type: console, network, disk etc. */
u16 type;
#define LGUEST_DEVICE_T_CONSOLE 1
#define LGUEST_DEVICE_T_NET 2
#define LGUEST_DEVICE_T_BLOCK 3
+ /* The specific features of this device: these depends on device type
+ * except for LGUEST_DEVICE_F_RANDOMNESS. */
u16 features;
#define LGUEST_NET_F_NOCSUM 0x4000 /* Don't bother checksumming */
#define LGUEST_DEVICE_F_RANDOMNESS 0x8000 /* IRQ is fairly random */
+ /* This is how the Guest reports status of the device: the Host can set
+ * LGUEST_DEVICE_S_REMOVED to indicate removal, but the rest are only
+ * ever manipulated by the Guest, and only ever set. */
u16 status;
/* 256 and above are device specific. */
#define LGUEST_DEVICE_S_ACKNOWLEDGE 1 /* We have seen device. */
@@ -58,9 +109,12 @@ struct lguest_device_desc {
#define LGUEST_DEVICE_S_REMOVED_ACK 16 /* Driver has been told. */
#define LGUEST_DEVICE_S_FAILED 128 /* Something actually failed */
+ /* Each device exists somewhere in Guest physical memory, over some
+ * number of pages. */
u16 num_pages;
u32 pfn;
};
+/*:*/
/* Write command first word is a request. */
enum lguest_req
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 9aa6c10f7bb..41978a55731 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -216,6 +216,8 @@ enum {
ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */
ATA_HOST_STARTED = (1 << 1), /* Host started */
+ /* bits 24:31 of host->flags are reserved for LLD specific flags */
+
/* various lengths of time */
ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 0b99b31f017..26a0a103898 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -63,7 +63,7 @@ struct loop_device {
struct task_struct *lo_thread;
wait_queue_head_t lo_event;
- request_queue_t *lo_queue;
+ struct request_queue *lo_queue;
struct gendisk *lo_disk;
struct list_head lo_list;
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c456c3a1c28..655094dc944 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -10,7 +10,6 @@
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/prio_tree.h>
-#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/debug_locks.h>
#include <linux/backing-dev.h>
@@ -18,7 +17,9 @@
struct mempolicy;
struct anon_vma;
+struct file_ra_state;
struct user_struct;
+struct writeback_control;
#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
@@ -861,38 +862,7 @@ struct shrinker {
extern void register_shrinker(struct shrinker *);
extern void unregister_shrinker(struct shrinker *);
-/*
- * Some shared mappigns will want the pages marked read-only
- * to track write events. If so, we'll downgrade vm_page_prot
- * to the private version (using protection_map[] without the
- * VM_SHARED bit).
- */
-static inline int vma_wants_writenotify(struct vm_area_struct *vma)
-{
- unsigned int vm_flags = vma->vm_flags;
-
- /* If it was private or non-writable, the write bit is already clear */
- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
- return 0;
-
- /* The backer wishes to know when pages are first written to? */
- if (vma->vm_ops && vma->vm_ops->page_mkwrite)
- return 1;
-
- /* The open routine did something to the protections already? */
- if (pgprot_val(vma->vm_page_prot) !=
- pgprot_val(protection_map[vm_flags &
- (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]))
- return 0;
-
- /* Specialty mapping? */
- if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
- return 0;
-
- /* Can the mapping track the dirty pages? */
- return vma->vm_file && vma->vm_file->f_mapping &&
- mapping_cap_account_dirty(vma->vm_file->f_mapping);
-}
+int vma_wants_writenotify(struct vm_area_struct *vma);
extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl));
@@ -1246,7 +1216,7 @@ void drop_slab(void);
extern int randomize_va_space;
#endif
-__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma);
+const char * arch_vma_name(struct vm_area_struct *vma);
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 04bbe12fae8..63a80ea6112 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -99,7 +99,7 @@ struct mmc_request {
struct mmc_host;
struct mmc_card;
-extern int mmc_wait_for_req(struct mmc_host *, struct mmc_request *);
+extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *);
extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int);
extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
struct mmc_command *, int);
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index af04a555b52..2ada8ee316b 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -159,6 +159,12 @@ struct ap_device_id {
#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01
+#define ACPI_ID_LEN 9
+
+struct acpi_device_id {
+ __u8 id[ACPI_ID_LEN];
+ kernel_ulong_t driver_data;
+};
#define PNP_ID_LEN 8
#define PNP_MAX_DEVICES 8
diff --git a/include/linux/netfilter/xt_connlimit.h b/include/linux/netfilter/xt_connlimit.h
index 90ae8b474cb..37e933c9987 100644
--- a/include/linux/netfilter/xt_connlimit.h
+++ b/include/linux/netfilter/xt_connlimit.h
@@ -5,8 +5,8 @@ struct xt_connlimit_data;
struct xt_connlimit_info {
union {
- u_int32_t v4_mask;
- u_int32_t v6_mask[4];
+ __be32 v4_mask;
+ __be32 v6_mask[4];
};
unsigned int limit, inverse;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 5e84f2e8d54..d8f8a3a9664 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -534,6 +534,7 @@ static inline int pci_write_config_dword(struct pci_dev *dev, int where, u32 val
int __must_check pci_enable_device(struct pci_dev *dev);
int __must_check pci_enable_device_bars(struct pci_dev *dev, int mask);
+int __must_check __pci_reenable_device(struct pci_dev *);
int __must_check pcim_enable_device(struct pci_dev *pdev);
void pcim_pin_device(struct pci_dev *pdev);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cbabb9c675c..69d68117bda 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1225,6 +1225,10 @@
#define PCI_DEVICE_ID_NVIDIA_NVENET_25 0x054D
#define PCI_DEVICE_ID_NVIDIA_NVENET_26 0x054E
#define PCI_DEVICE_ID_NVIDIA_NVENET_27 0x054F
+#define PCI_DEVICE_ID_NVIDIA_NVENET_28 0x07DC
+#define PCI_DEVICE_ID_NVIDIA_NVENET_29 0x07DD
+#define PCI_DEVICE_ID_NVIDIA_NVENET_30 0x07DE
+#define PCI_DEVICE_ID_NVIDIA_NVENET_31 0x07DF
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759
@@ -1972,6 +1976,8 @@
#define PCI_VENDOR_ID_ENE 0x1524
#define PCI_DEVICE_ID_ENE_CB712_SD 0x0550
#define PCI_DEVICE_ID_ENE_CB712_SD_2 0x0551
+#define PCI_DEVICE_ID_ENE_CB714_SD 0x0750
+#define PCI_DEVICE_ID_ENE_CB714_SD_2 0x0751
#define PCI_DEVICE_ID_ENE_1211 0x1211
#define PCI_DEVICE_ID_ENE_1225 0x1225
#define PCI_DEVICE_ID_ENE_1410 0x1410
@@ -2075,6 +2081,23 @@
#define PCI_VENDOR_ID_TDI 0x192E
#define PCI_DEVICE_ID_TDI_EHCI 0x0101
+#define PCI_VENDOR_ID_FREESCALE 0x1957
+#define PCI_DEVICE_ID_MPC8548E 0x0012
+#define PCI_DEVICE_ID_MPC8548 0x0013
+#define PCI_DEVICE_ID_MPC8543E 0x0014
+#define PCI_DEVICE_ID_MPC8543 0x0015
+#define PCI_DEVICE_ID_MPC8547E 0x0018
+#define PCI_DEVICE_ID_MPC8545E 0x0019
+#define PCI_DEVICE_ID_MPC8545 0x001a
+#define PCI_DEVICE_ID_MPC8568E 0x0020
+#define PCI_DEVICE_ID_MPC8568 0x0021
+#define PCI_DEVICE_ID_MPC8567E 0x0022
+#define PCI_DEVICE_ID_MPC8567 0x0023
+#define PCI_DEVICE_ID_MPC8544E 0x0030
+#define PCI_DEVICE_ID_MPC8544 0x0031
+#define PCI_DEVICE_ID_MPC8641 0x7010
+#define PCI_DEVICE_ID_MPC8641D 0x7011
+
#define PCI_VENDOR_ID_PASEMI 0x1959
#define PCI_VENDOR_ID_ATTANSIC 0x1969
diff --git a/include/linux/pm.h b/include/linux/pm.h
index ad3cc2eb0d3..e52f6f83c06 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -165,6 +165,7 @@ struct pm_ops {
int (*finish)(suspend_state_t state);
};
+#ifdef CONFIG_SUSPEND
extern struct pm_ops *pm_ops;
/**
@@ -193,6 +194,12 @@ extern void arch_suspend_disable_irqs(void);
extern void arch_suspend_enable_irqs(void);
extern int pm_suspend(suspend_state_t state);
+#else /* !CONFIG_SUSPEND */
+#define suspend_valid_only_mem NULL
+
+static inline void pm_set_ops(struct pm_ops *pm_ops) {}
+static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
+#endif /* !CONFIG_SUSPEND */
/*
* Device power management
@@ -266,7 +273,7 @@ typedef struct pm_message {
struct dev_pm_info {
pm_message_t power_state;
unsigned can_wakeup:1;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
unsigned should_wakeup:1;
struct list_head entry;
#endif
@@ -276,7 +283,7 @@ extern int device_power_down(pm_message_t state);
extern void device_power_up(void);
extern void device_resume(void);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
extern int device_suspend(pm_message_t state);
extern int device_prepare_suspend(pm_message_t state);
@@ -306,7 +313,7 @@ static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
return 0;
}
-#else /* !CONFIG_PM */
+#else /* !CONFIG_PM_SLEEP */
static inline int device_suspend(pm_message_t state)
{
@@ -323,7 +330,7 @@ static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
return 0;
}
-#endif
+#endif /* !CONFIG_PM_SLEEP */
/* changes to device_may_wakeup take effect on the next pm state change.
* by default, devices should wakeup if they can.
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 2a1897e6f93..16b46aace34 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -1,7 +1,6 @@
/*
* Linux Plug and Play Support
* Copyright by Adam Belay <ambx1@neo.rr.com>
- *
*/
#ifndef _LINUX_PNP_H
@@ -23,7 +22,6 @@
struct pnp_protocol;
struct pnp_dev;
-
/*
* Resource Management
*/
@@ -73,37 +71,37 @@ struct pnp_dev;
#define PNP_PORT_FLAG_FIXED (1<<1)
struct pnp_port {
- unsigned short min; /* min base number */
- unsigned short max; /* max base number */
- unsigned char align; /* align boundary */
- unsigned char size; /* size of range */
- unsigned char flags; /* port flags */
- unsigned char pad; /* pad */
- struct pnp_port *next; /* next port */
+ unsigned short min; /* min base number */
+ unsigned short max; /* max base number */
+ unsigned char align; /* align boundary */
+ unsigned char size; /* size of range */
+ unsigned char flags; /* port flags */
+ unsigned char pad; /* pad */
+ struct pnp_port *next; /* next port */
};
#define PNP_IRQ_NR 256
struct pnp_irq {
- DECLARE_BITMAP(map, PNP_IRQ_NR); /* bitmaks for IRQ lines */
- unsigned char flags; /* IRQ flags */
- unsigned char pad; /* pad */
- struct pnp_irq *next; /* next IRQ */
+ DECLARE_BITMAP(map, PNP_IRQ_NR); /* bitmask for IRQ lines */
+ unsigned char flags; /* IRQ flags */
+ unsigned char pad; /* pad */
+ struct pnp_irq *next; /* next IRQ */
};
struct pnp_dma {
- unsigned char map; /* bitmask for DMA channels */
- unsigned char flags; /* DMA flags */
- struct pnp_dma *next; /* next port */
+ unsigned char map; /* bitmask for DMA channels */
+ unsigned char flags; /* DMA flags */
+ struct pnp_dma *next; /* next port */
};
struct pnp_mem {
- unsigned int min; /* min base number */
- unsigned int max; /* max base number */
- unsigned int align; /* align boundary */
- unsigned int size; /* size of range */
- unsigned char flags; /* memory flags */
- unsigned char pad; /* pad */
- struct pnp_mem *next; /* next memory resource */
+ unsigned int min; /* min base number */
+ unsigned int max; /* max base number */
+ unsigned int align; /* align boundary */
+ unsigned int size; /* size of range */
+ unsigned char flags; /* memory flags */
+ unsigned char pad; /* pad */
+ struct pnp_mem *next; /* next memory resource */
};
#define PNP_RES_PRIORITY_PREFERRED 0
@@ -127,7 +125,6 @@ struct pnp_resource_table {
struct resource irq_resource[PNP_MAX_IRQ];
};
-
/*
* Device Managemnt
*/
@@ -139,14 +136,14 @@ struct pnp_card {
struct list_head protocol_list; /* node in protocol's list of cards */
struct list_head devices; /* devices attached to the card */
- struct pnp_protocol * protocol;
- struct pnp_id * id; /* contains supported EISA IDs*/
+ struct pnp_protocol *protocol;
+ struct pnp_id *id; /* contains supported EISA IDs */
char name[PNP_NAME_LEN]; /* contains a human-readable name */
- unsigned char pnpver; /* Plug & Play version */
- unsigned char productver; /* product version */
- unsigned int serial; /* serial number */
- unsigned char checksum; /* if zero - checksum passed */
+ unsigned char pnpver; /* Plug & Play version */
+ unsigned char productver; /* product version */
+ unsigned int serial; /* serial number */
+ unsigned char checksum; /* if zero - checksum passed */
struct proc_dir_entry *procdir; /* directory entry in /proc/bus/isapnp */
};
@@ -159,18 +156,18 @@ struct pnp_card {
(card) = global_to_pnp_card((card)->global_list.next))
struct pnp_card_link {
- struct pnp_card * card;
- struct pnp_card_driver * driver;
- void * driver_data;
+ struct pnp_card *card;
+ struct pnp_card_driver *driver;
+ void *driver_data;
pm_message_t pm_state;
};
-static inline void *pnp_get_card_drvdata (struct pnp_card_link *pcard)
+static inline void *pnp_get_card_drvdata(struct pnp_card_link *pcard)
{
return pcard->driver_data;
}
-static inline void pnp_set_card_drvdata (struct pnp_card_link *pcard, void *data)
+static inline void pnp_set_card_drvdata(struct pnp_card_link *pcard, void *data)
{
pcard->driver_data = data;
}
@@ -186,22 +183,22 @@ struct pnp_dev {
struct list_head card_list; /* node in card's list of devices */
struct list_head rdev_list; /* node in cards list of requested devices */
- struct pnp_protocol * protocol;
- struct pnp_card * card; /* card the device is attached to, none if NULL */
- struct pnp_driver * driver;
- struct pnp_card_link * card_link;
+ struct pnp_protocol *protocol;
+ struct pnp_card *card; /* card the device is attached to, none if NULL */
+ struct pnp_driver *driver;
+ struct pnp_card_link *card_link;
- struct pnp_id * id; /* supported EISA IDs*/
+ struct pnp_id *id; /* supported EISA IDs */
int active;
int capabilities;
- struct pnp_option * independent;
- struct pnp_option * dependent;
+ struct pnp_option *independent;
+ struct pnp_option *dependent;
struct pnp_resource_table res;
char name[PNP_NAME_LEN]; /* contains a human-readable name */
- unsigned short regs; /* ISAPnP: supported registers */
- int flags; /* used by protocols */
+ unsigned short regs; /* ISAPnP: supported registers */
+ int flags; /* used by protocols */
struct proc_dir_entry *procent; /* device entry in /proc/bus/isapnp */
void *data;
};
@@ -220,19 +217,19 @@ struct pnp_dev {
(dev) = card_to_pnp_dev((dev)->card_list.next))
#define pnp_dev_name(dev) (dev)->name
-static inline void *pnp_get_drvdata (struct pnp_dev *pdev)
+static inline void *pnp_get_drvdata(struct pnp_dev *pdev)
{
return dev_get_drvdata(&pdev->dev);
}
-static inline void pnp_set_drvdata (struct pnp_dev *pdev, void *data)
+static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
{
dev_set_drvdata(&pdev->dev, data);
}
struct pnp_fixup {
char id[7];
- void (*quirk_function)(struct pnp_dev *dev); /* fixup function */
+ void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
};
/* config parameters */
@@ -269,7 +266,6 @@ extern struct pnp_protocol pnpbios_protocol;
#define pnp_device_is_pnpbios(dev) 0
#endif
-
/* status */
#define PNP_READY 0x0000
#define PNP_ATTACHED 0x0001
@@ -287,17 +283,17 @@ extern struct pnp_protocol pnpbios_protocol;
struct pnp_id {
char id[PNP_ID_LEN];
- struct pnp_id * next;
+ struct pnp_id *next;
};
struct pnp_driver {
- char * name;
+ char *name;
const struct pnp_device_id *id_table;
unsigned int flags;
- int (*probe) (struct pnp_dev *dev, const struct pnp_device_id *dev_id);
+ int (*probe) (struct pnp_dev *dev, const struct pnp_device_id *dev_id);
void (*remove) (struct pnp_dev *dev);
- int (*suspend) (struct pnp_dev *dev, pm_message_t state);
- int (*resume) (struct pnp_dev *dev);
+ int (*suspend) (struct pnp_dev *dev, pm_message_t state);
+ int (*resume) (struct pnp_dev *dev);
struct device_driver driver;
};
@@ -305,13 +301,14 @@ struct pnp_driver {
struct pnp_card_driver {
struct list_head global_list;
- char * name;
+ char *name;
const struct pnp_card_device_id *id_table;
unsigned int flags;
- int (*probe) (struct pnp_card_link *card, const struct pnp_card_device_id *card_id);
+ int (*probe) (struct pnp_card_link *card,
+ const struct pnp_card_device_id *card_id);
void (*remove) (struct pnp_card_link *card);
- int (*suspend) (struct pnp_card_link *card, pm_message_t state);
- int (*resume) (struct pnp_card_link *card);
+ int (*suspend) (struct pnp_card_link *card, pm_message_t state);
+ int (*resume) (struct pnp_card_link *card);
struct pnp_driver link;
};
@@ -321,25 +318,28 @@ struct pnp_card_driver {
#define PNP_DRIVER_RES_DO_NOT_CHANGE 0x0001 /* do not change the state of the device */
#define PNP_DRIVER_RES_DISABLE 0x0003 /* ensure the device is disabled */
-
/*
* Protocol Management
*/
struct pnp_protocol {
- struct list_head protocol_list;
- char * name;
+ struct list_head protocol_list;
+ char *name;
/* resource control functions */
- int (*get)(struct pnp_dev *dev, struct pnp_resource_table *res);
- int (*set)(struct pnp_dev *dev, struct pnp_resource_table *res);
- int (*disable)(struct pnp_dev *dev);
+ int (*get) (struct pnp_dev *dev, struct pnp_resource_table *res);
+ int (*set) (struct pnp_dev *dev, struct pnp_resource_table *res);
+ int (*disable) (struct pnp_dev *dev);
+
+ /* protocol specific suspend/resume */
+ int (*suspend) (struct pnp_dev * dev, pm_message_t state);
+ int (*resume) (struct pnp_dev * dev);
/* used by pnp layer only (look but don't touch) */
- unsigned char number; /* protocol number*/
- struct device dev; /* link to driver model */
- struct list_head cards;
- struct list_head devices;
+ unsigned char number; /* protocol number */
+ struct device dev; /* link to driver model */
+ struct list_head cards;
+ struct list_head devices;
};
#define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list)
@@ -352,7 +352,6 @@ struct pnp_protocol {
(dev) != protocol_to_pnp_dev(&(protocol)->devices); \
(dev) = protocol_to_pnp_dev((dev)->protocol_list.next))
-
extern struct bus_type pnp_bus_type;
#if defined(CONFIG_PNP)
@@ -372,21 +371,25 @@ void pnp_remove_card(struct pnp_card *card);
int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev);
void pnp_remove_card_device(struct pnp_dev *dev);
int pnp_add_card_id(struct pnp_id *id, struct pnp_card *card);
-struct pnp_dev * pnp_request_card_device(struct pnp_card_link *clink, const char * id, struct pnp_dev * from);
-void pnp_release_card_device(struct pnp_dev * dev);
-int pnp_register_card_driver(struct pnp_card_driver * drv);
-void pnp_unregister_card_driver(struct pnp_card_driver * drv);
+struct pnp_dev *pnp_request_card_device(struct pnp_card_link *clink,
+ const char *id, struct pnp_dev *from);
+void pnp_release_card_device(struct pnp_dev *dev);
+int pnp_register_card_driver(struct pnp_card_driver *drv);
+void pnp_unregister_card_driver(struct pnp_card_driver *drv);
extern struct list_head pnp_cards;
/* resource management */
-struct pnp_option * pnp_register_independent_option(struct pnp_dev *dev);
-struct pnp_option * pnp_register_dependent_option(struct pnp_dev *dev, int priority);
+struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev);
+struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev,
+ int priority);
int pnp_register_irq_resource(struct pnp_option *option, struct pnp_irq *data);
int pnp_register_dma_resource(struct pnp_option *option, struct pnp_dma *data);
-int pnp_register_port_resource(struct pnp_option *option, struct pnp_port *data);
+int pnp_register_port_resource(struct pnp_option *option,
+ struct pnp_port *data);
int pnp_register_mem_resource(struct pnp_option *option, struct pnp_mem *data);
void pnp_init_resource_table(struct pnp_resource_table *table);
-int pnp_manual_config_dev(struct pnp_dev *dev, struct pnp_resource_table *res, int mode);
+int pnp_manual_config_dev(struct pnp_dev *dev, struct pnp_resource_table *res,
+ int mode);
int pnp_auto_config_dev(struct pnp_dev *dev);
int pnp_validate_config(struct pnp_dev *dev);
int pnp_start_dev(struct pnp_dev *dev);
@@ -394,11 +397,11 @@ int pnp_stop_dev(struct pnp_dev *dev);
int pnp_activate_dev(struct pnp_dev *dev);
int pnp_disable_dev(struct pnp_dev *dev);
void pnp_resource_change(struct resource *resource, resource_size_t start,
- resource_size_t size);
+ resource_size_t size);
/* protocol helpers */
-int pnp_is_active(struct pnp_dev * dev);
-int compare_pnp_id(struct pnp_id * pos, const char * id);
+int pnp_is_active(struct pnp_dev *dev);
+int compare_pnp_id(struct pnp_id *pos, const char *id);
int pnp_add_id(struct pnp_id *id, struct pnp_dev *dev);
int pnp_register_driver(struct pnp_driver *drv);
void pnp_unregister_driver(struct pnp_driver *drv);
@@ -411,23 +414,24 @@ static inline void pnp_unregister_protocol(struct pnp_protocol *protocol) { }
static inline int pnp_init_device(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_add_device(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_device_attach(struct pnp_dev *pnp_dev) { return -ENODEV; }
-static inline void pnp_device_detach(struct pnp_dev *pnp_dev) { ; }
+static inline void pnp_device_detach(struct pnp_dev *pnp_dev) { }
+
#define pnp_platform_devices 0
/* multidevice card support */
static inline int pnp_add_card(struct pnp_card *card) { return -ENODEV; }
-static inline void pnp_remove_card(struct pnp_card *card) { ; }
+static inline void pnp_remove_card(struct pnp_card *card) { }
static inline int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev) { return -ENODEV; }
-static inline void pnp_remove_card_device(struct pnp_dev *dev) { ; }
+static inline void pnp_remove_card_device(struct pnp_dev *dev) { }
static inline int pnp_add_card_id(struct pnp_id *id, struct pnp_card *card) { return -ENODEV; }
-static inline struct pnp_dev * pnp_request_card_device(struct pnp_card_link *clink, const char * id, struct pnp_dev * from) { return NULL; }
-static inline void pnp_release_card_device(struct pnp_dev * dev) { ; }
-static inline int pnp_register_card_driver(struct pnp_card_driver * drv) { return -ENODEV; }
-static inline void pnp_unregister_card_driver(struct pnp_card_driver * drv) { ; }
+static inline struct pnp_dev *pnp_request_card_device(struct pnp_card_link *clink, const char *id, struct pnp_dev *from) { return NULL; }
+static inline void pnp_release_card_device(struct pnp_dev *dev) { }
+static inline int pnp_register_card_driver(struct pnp_card_driver *drv) { return -ENODEV; }
+static inline void pnp_unregister_card_driver(struct pnp_card_driver *drv) { }
/* resource management */
-static inline struct pnp_option * pnp_register_independent_option(struct pnp_dev *dev) { return NULL; }
-static inline struct pnp_option * pnp_register_dependent_option(struct pnp_dev *dev, int priority) { return NULL; }
+static inline struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev) { return NULL; }
+static inline struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev, int priority) { return NULL; }
static inline int pnp_register_irq_resource(struct pnp_option *option, struct pnp_irq *data) { return -ENODEV; }
static inline int pnp_register_dma_resource(struct pnp_option *option, struct pnp_dma *data) { return -ENODEV; }
static inline int pnp_register_port_resource(struct pnp_option *option, struct pnp_port *data) { return -ENODEV; }
@@ -440,20 +444,17 @@ static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; }
-static inline void pnp_resource_change(struct resource *resource,
- resource_size_t start,
- resource_size_t size) { }
+static inline void pnp_resource_change(struct resource *resource, resource_size_t start, resource_size_t size) { }
/* protocol helpers */
-static inline int pnp_is_active(struct pnp_dev * dev) { return 0; }
-static inline int compare_pnp_id(struct pnp_id * pos, const char * id) { return -ENODEV; }
+static inline int pnp_is_active(struct pnp_dev *dev) { return 0; }
+static inline int compare_pnp_id(struct pnp_id *pos, const char *id) { return -ENODEV; }
static inline int pnp_add_id(struct pnp_id *id, struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_register_driver(struct pnp_driver *drv) { return -ENODEV; }
-static inline void pnp_unregister_driver(struct pnp_driver *drv) { ; }
+static inline void pnp_unregister_driver(struct pnp_driver *drv) { }
#endif /* CONFIG_PNP */
-
#define pnp_err(format, arg...) printk(KERN_ERR "pnp: " format "\n" , ## arg)
#define pnp_info(format, arg...) printk(KERN_INFO "pnp: " format "\n" , ## arg)
#define pnp_warn(format, arg...) printk(KERN_WARNING "pnp: " format "\n" , ## arg)
diff --git a/include/linux/pnpbios.h b/include/linux/pnpbios.h
index 0a282ac1f6b..329192adc9d 100644
--- a/include/linux/pnpbios.h
+++ b/include/linux/pnpbios.h
@@ -99,32 +99,32 @@
#pragma pack(1)
struct pnp_dev_node_info {
- __u16 no_nodes;
- __u16 max_node_size;
+ __u16 no_nodes;
+ __u16 max_node_size;
};
struct pnp_docking_station_info {
- __u32 location_id;
- __u32 serial;
- __u16 capabilities;
+ __u32 location_id;
+ __u32 serial;
+ __u16 capabilities;
};
struct pnp_isa_config_struc {
- __u8 revision;
- __u8 no_csns;
- __u16 isa_rd_data_port;
- __u16 reserved;
+ __u8 revision;
+ __u8 no_csns;
+ __u16 isa_rd_data_port;
+ __u16 reserved;
};
struct escd_info_struc {
- __u16 min_escd_write_size;
- __u16 escd_size;
- __u32 nv_storage_base;
+ __u16 min_escd_write_size;
+ __u16 escd_size;
+ __u32 nv_storage_base;
};
struct pnp_bios_node {
- __u16 size;
- __u8 handle;
- __u32 eisa_id;
- __u8 type_code[3];
- __u16 flags;
- __u8 data[0];
+ __u16 size;
+ __u8 handle;
+ __u32 eisa_id;
+ __u8 type_code[3];
+ __u16 flags;
+ __u8 data[0];
};
#pragma pack()
@@ -133,22 +133,16 @@ struct pnp_bios_node {
/* non-exported */
extern struct pnp_dev_node_info node_info;
-extern int pnp_bios_dev_node_info (struct pnp_dev_node_info *data);
-extern int pnp_bios_get_dev_node (u8 *nodenum, char config, struct pnp_bios_node *data);
-extern int pnp_bios_set_dev_node (u8 nodenum, char config, struct pnp_bios_node *data);
-extern int pnp_bios_get_stat_res (char *info);
-extern int pnp_bios_isapnp_config (struct pnp_isa_config_struc *data);
-extern int pnp_bios_escd_info (struct escd_info_struc *data);
-extern int pnp_bios_read_escd (char *data, u32 nvram_base);
+extern int pnp_bios_dev_node_info(struct pnp_dev_node_info *data);
+extern int pnp_bios_get_dev_node(u8 *nodenum, char config,
+ struct pnp_bios_node *data);
+extern int pnp_bios_set_dev_node(u8 nodenum, char config,
+ struct pnp_bios_node *data);
+extern int pnp_bios_get_stat_res(char *info);
+extern int pnp_bios_isapnp_config(struct pnp_isa_config_struc *data);
+extern int pnp_bios_escd_info(struct escd_info_struc *data);
+extern int pnp_bios_read_escd(char *data, u32 nvram_base);
extern int pnp_bios_dock_station_info(struct pnp_docking_station_info *data);
-#define needed 0
-#if needed
-extern int pnp_bios_get_event (u16 *message);
-extern int pnp_bios_send_message (u16 message);
-extern int pnp_bios_set_stat_res (char *info);
-extern int pnp_bios_apm_id_table (char *table, u16 *size);
-extern int pnp_bios_write_escd (char *data, u32 nvram_base);
-#endif
#endif /* CONFIG_PNPBIOS */
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index d0926d63406..484988ed301 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -8,6 +8,7 @@
#include <linux/thread_info.h>
#include <linux/linkage.h>
+#include <linux/list.h>
#ifdef CONFIG_DEBUG_PREEMPT
extern void fastcall add_preempt_count(int val);
@@ -60,4 +61,47 @@ do { \
#endif
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+
+struct preempt_notifier;
+
+/**
+ * preempt_ops - notifiers called when a task is preempted and rescheduled
+ * @sched_in: we're about to be rescheduled:
+ * notifier: struct preempt_notifier for the task being scheduled
+ * cpu: cpu we're scheduled on
+ * @sched_out: we've just been preempted
+ * notifier: struct preempt_notifier for the task being preempted
+ * next: the task that's kicking us out
+ */
+struct preempt_ops {
+ void (*sched_in)(struct preempt_notifier *notifier, int cpu);
+ void (*sched_out)(struct preempt_notifier *notifier,
+ struct task_struct *next);
+};
+
+/**
+ * preempt_notifier - key for installing preemption notifiers
+ * @link: internal use
+ * @ops: defines the notifier functions to be called
+ *
+ * Usually used in conjunction with container_of().
+ */
+struct preempt_notifier {
+ struct hlist_node link;
+ struct preempt_ops *ops;
+};
+
+void preempt_notifier_register(struct preempt_notifier *notifier);
+void preempt_notifier_unregister(struct preempt_notifier *notifier);
+
+static inline void preempt_notifier_init(struct preempt_notifier *notifier,
+ struct preempt_ops *ops)
+{
+ INIT_HLIST_NODE(&notifier->link);
+ notifier->ops = ops;
+}
+
+#endif
+
#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index 28ac632b42d..dcb729244f4 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -227,7 +227,7 @@ struct mddev_s
unsigned int safemode_delay;
struct timer_list safemode_timer;
atomic_t writes_pending;
- request_queue_t *queue; /* for plugging ... */
+ struct request_queue *queue; /* for plugging ... */
atomic_t write_behind; /* outstanding async IO */
unsigned int max_write_behind; /* 0 = sync */
@@ -265,7 +265,7 @@ struct mdk_personality
int level;
struct list_head list;
struct module *owner;
- int (*make_request)(request_queue_t *q, struct bio *bio);
+ int (*make_request)(struct request_queue *q, struct bio *bio);
int (*run)(mddev_t *mddev);
int (*stop)(mddev_t *mddev);
void (*status)(struct seq_file *seq, mddev_t *mddev);
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 965d5b3ea9e..180a9d832dd 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -81,14 +81,16 @@ void reiserfs_warning(struct super_block *s, const char *fmt, ...);
/* assertions handling */
/** always check a condition and panic if it's false. */
-#define RASSERT( cond, format, args... ) \
+#define __RASSERT( cond, scond, format, args... ) \
if( !( cond ) ) \
- reiserfs_panic( NULL, "reiserfs[%i]: assertion " #cond " failed at " \
+ reiserfs_panic( NULL, "reiserfs[%i]: assertion " scond " failed at " \
__FILE__ ":%i:%s: " format "\n", \
in_interrupt() ? -1 : current -> pid, __LINE__ , __FUNCTION__ , ##args )
+#define RASSERT(cond, format, args...) __RASSERT(cond, #cond, format, ##args)
+
#if defined( CONFIG_REISERFS_CHECK )
-#define RFALSE( cond, format, args... ) RASSERT( !( cond ), format, ##args )
+#define RFALSE(cond, format, args...) __RASSERT(!(cond), "!(" #cond ")", format, ##args)
#else
#define RFALSE( cond, format, args... ) do {;} while( 0 )
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 33b9b4841ee..2e490271acf 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -681,7 +681,7 @@ enum cpu_idle_type {
#define SCHED_LOAD_SHIFT 10
#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
-#define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 5)
+#define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 1)
#ifdef CONFIG_SMP
#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
@@ -786,6 +786,22 @@ extern int partition_sched_domains(cpumask_t *partition1,
#endif /* CONFIG_SMP */
+/*
+ * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
+ * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
+ * task of nice 0 or enough lower priority tasks to bring up the
+ * weighted_cpuload
+ */
+static inline int above_background_load(void)
+{
+ unsigned long cpu;
+
+ for_each_online_cpu(cpu) {
+ if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE)
+ return 1;
+ }
+ return 0;
+}
struct io_context; /* See blkdev.h */
struct cpuset;
@@ -935,6 +951,11 @@ struct task_struct {
struct sched_class *sched_class;
struct sched_entity se;
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+ /* list of struct preempt_notifier: */
+ struct hlist_head preempt_notifiers;
+#endif
+
unsigned short ioprio;
#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 8518fa2a6f8..afe0f6d9b9b 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -20,7 +20,7 @@
struct plat_serial8250_port {
unsigned long iobase; /* io base address */
void __iomem *membase; /* ioremap cookie or NULL */
- unsigned long mapbase; /* resource base */
+ resource_size_t mapbase; /* resource base */
unsigned int irq; /* interrupt number */
unsigned int uartclk; /* UART clock rate */
unsigned char regshift; /* register shift */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 773d8d8828a..09d17b06bf0 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -288,7 +288,7 @@ struct uart_port {
const struct uart_ops *ops;
unsigned int custom_divisor;
unsigned int line; /* port index */
- unsigned long mapbase; /* for ioremap */
+ resource_size_t mapbase; /* for ioremap */
struct device *dev; /* parent device */
unsigned char hub6; /* this should be in the 8250 driver */
unsigned char unused[3];
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index e8e6da394c9..388cace9751 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -24,7 +24,7 @@ struct pbe {
extern void drain_local_pages(void);
extern void mark_free_pages(struct zone *zone);
-#if defined(CONFIG_PM) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
extern int pm_prepare_console(void);
extern void pm_restore_console(void);
#else
@@ -54,8 +54,7 @@ struct hibernation_ops {
void (*restore_cleanup)(void);
};
-#ifdef CONFIG_PM
-#ifdef CONFIG_SOFTWARE_SUSPEND
+#ifdef CONFIG_HIBERNATION
/* kernel/power/snapshot.c */
extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
static inline void register_nosave_region(unsigned long b, unsigned long e)
@@ -73,15 +72,16 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
extern void hibernation_set_ops(struct hibernation_ops *ops);
extern int hibernate(void);
-#else /* CONFIG_SOFTWARE_SUSPEND */
+#else /* CONFIG_HIBERNATION */
static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
static inline void swsusp_set_page_free(struct page *p) {}
static inline void swsusp_unset_page_free(struct page *p) {}
static inline void hibernation_set_ops(struct hibernation_ops *ops) {}
static inline int hibernate(void) { return -ENOSYS; }
-#endif /* CONFIG_SOFTWARE_SUSPEND */
+#endif /* CONFIG_HIBERNATION */
+#ifdef CONFIG_PM_SLEEP
void save_processor_state(void);
void restore_processor_state(void);
struct saved_context;
@@ -106,7 +106,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
{ .notifier_call = fn, .priority = pri }; \
register_pm_notifier(&fn##_nb); \
}
-#else /* CONFIG_PM */
+#else /* !CONFIG_PM_SLEEP */
static inline int register_pm_notifier(struct notifier_block *nb)
{
@@ -119,12 +119,15 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
}
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
-#endif /* CONFIG_PM */
+#endif /* !CONFIG_PM_SLEEP */
-#if !defined CONFIG_SOFTWARE_SUSPEND || !defined(CONFIG_PM)
+#ifndef CONFIG_HIBERNATION
static inline void register_nosave_region(unsigned long b, unsigned long e)
{
}
+static inline void register_nosave_region_late(unsigned long b, unsigned long e)
+{
+}
#endif
#endif /* _LINUX_SWSUSP_H */
diff --git a/include/linux/time.h b/include/linux/time.h
index e6aea5146e5..6a5f503b4f1 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -99,15 +99,11 @@ extern int update_persistent_clock(struct timespec now);
extern int no_sync_cmos_clock __read_mostly;
void timekeeping_init(void);
-static inline unsigned long get_seconds(void)
-{
- return xtime.tv_sec;
-}
-
+unsigned long get_seconds(void);
struct timespec current_kernel_time(void);
#define CURRENT_TIME (current_kernel_time())
-#define CURRENT_TIME_SEC ((struct timespec) { xtime.tv_sec, 0 })
+#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
extern void do_gettimeofday(struct timeval *tv);
extern int do_settimeofday(struct timespec *tv);
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index 040dae5f0c9..c48e390f4b0 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -35,7 +35,7 @@ union nf_conntrack_address {
union nf_conntrack_man_proto
{
/* Add other protocols here. */
- u_int16_t all;
+ __be16 all;
struct {
__be16 port;
@@ -73,7 +73,7 @@ struct nf_conntrack_tuple
union nf_conntrack_address u3;
union {
/* Add other protocols here. */
- u_int16_t all;
+ __be16 all;
struct {
__be16 port;
diff --git a/include/scsi/sd.h b/include/scsi/sd.h
index 5261488e110..78583fee0ab 100644
--- a/include/scsi/sd.h
+++ b/include/scsi/sd.h
@@ -57,7 +57,7 @@ static int sd_resume(struct device *dev);
static void sd_rescan(struct device *);
static int sd_init_command(struct scsi_cmnd *);
static int sd_issue_flush(struct device *, sector_t *);
-static void sd_prepare_flush(request_queue_t *, struct request *);
+static void sd_prepare_flush(struct request_queue *, struct request *);
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
static void scsi_disk_release(struct class_device *cdev);
static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
diff --git a/include/xen/page.h b/include/xen/page.h
index 1df6c193057..c0c8fcb2789 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -4,6 +4,7 @@
#include <linux/pfn.h>
#include <asm/uaccess.h>
+#include <asm/pgtable.h>
#include <xen/features.h>
diff --git a/init/initramfs.c b/init/initramfs.c
index 00eff7a1108..1db02a0025d 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -133,7 +133,7 @@ static __initdata loff_t this_header, next_header;
static __initdata int dry_run;
-static inline void eat(unsigned n)
+static inline void __init eat(unsigned n)
{
victim += n;
this_header += n;
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index c64ce9c1420..6b066632e40 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -63,3 +63,6 @@ config PREEMPT_BKL
Say Y here if you are building a kernel for a desktop system.
Say N if you are unsure.
+config PREEMPT_NOTIFIERS
+ bool
+
diff --git a/kernel/acct.c b/kernel/acct.c
index 70d0d88e555..24f0f8b2ba7 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -468,7 +468,7 @@ static void do_acct_process(struct file *file)
}
#endif
do_div(elapsed, AHZ);
- ac.ac_btime = xtime.tv_sec - elapsed;
+ ac.ac_btime = get_seconds() - elapsed;
/* we really need to bite the bullet and change layout */
ac.ac_uid = current->uid;
ac.ac_gid = current->gid;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index bde1124d590..a777d376141 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -824,12 +824,14 @@ static void audit_log_execve_info(struct audit_buffer *ab,
{
int i;
long len, ret;
- const char __user *p = (const char __user *)axi->mm->arg_start;
+ const char __user *p;
char *buf;
if (axi->mm != current->mm)
return; /* execve failed, no additional info */
+ p = (const char __user *)axi->mm->arg_start;
+
for (i = 0; i < axi->argc; i++, p += len) {
len = strnlen_user(p, MAX_ARG_STRLEN);
/*
@@ -855,7 +857,7 @@ static void audit_log_execve_info(struct audit_buffer *ab,
* copied them here, and the mm hasn't been exposed to user-
* space yet.
*/
- if (!ret) {
+ if (ret) {
WARN_ON(1);
send_sig(SIGKILL, current, 0);
}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index eb1ddebd2c0..c21ca6bfaa6 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -141,11 +141,7 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
do {
seq = read_seqbegin(&xtime_lock);
-#ifdef CONFIG_NO_HZ
- getnstimeofday(&xts);
-#else
- xts = xtime;
-#endif
+ xts = current_kernel_time();
tom = wall_to_monotonic;
} while (read_seqretry(&xtime_lock, seq));
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index d8ee241115f..6d9204f3a37 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -1,5 +1,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/device.h>
/*
* Device resource management aware IRQ request/free implementation.
diff --git a/kernel/kmod.c b/kernel/kmod.c
index beedbdc6460..9809cc1f33d 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -351,11 +351,11 @@ static inline void register_pm_notifier_callback(void) {}
/**
* call_usermodehelper_setup - prepare to call a usermode helper
- * @path - path to usermode executable
- * @argv - arg vector for process
- * @envp - environment for process
+ * @path: path to usermode executable
+ * @argv: arg vector for process
+ * @envp: environment for process
*
- * Returns either NULL on allocation failure, or a subprocess_info
+ * Returns either %NULL on allocation failure, or a subprocess_info
* structure. This should be passed to call_usermodehelper_exec to
* exec the process and free the structure.
*/
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index c1a106d87d9..412859f8d94 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -46,7 +46,7 @@ config PM_VERBOSE
config DISABLE_CONSOLE_SUSPEND
bool "Keep console(s) enabled during suspend/resume (DANGEROUS)"
- depends on PM_DEBUG
+ depends on PM_DEBUG && PM_SLEEP
default n
---help---
This option turns off the console suspend mechanism that prevents
@@ -57,7 +57,7 @@ config DISABLE_CONSOLE_SUSPEND
config PM_TRACE
bool "Suspend/resume event tracing"
- depends on PM_DEBUG && X86 && EXPERIMENTAL
+ depends on PM_DEBUG && X86 && PM_SLEEP && EXPERIMENTAL
default n
---help---
This enables some cheesy code to save the last PM event point in the
@@ -72,9 +72,37 @@ config PM_TRACE
CAUTION: this option will cause your machine's real-time clock to be
set to an invalid time after a resume.
-config SOFTWARE_SUSPEND
- bool "Software Suspend (Hibernation)"
- depends on PM && SWAP && (((X86 || PPC64_SWSUSP) && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP))
+config SUSPEND_SMP_POSSIBLE
+ bool
+ depends on (X86 && !X86_VOYAGER) || (PPC64 && (PPC_PSERIES || PPC_PMAC))
+ depends on SMP
+ default y
+
+config SUSPEND_SMP
+ bool
+ depends on SUSPEND_SMP_POSSIBLE && PM_SLEEP
+ select HOTPLUG_CPU
+ default y
+
+config PM_SLEEP
+ bool
+ depends on SUSPEND || HIBERNATION
+ default y
+
+config SUSPEND
+ bool "Suspend to RAM and standby"
+ depends on PM
+ depends on !SMP || SUSPEND_SMP_POSSIBLE
+ default y
+ ---help---
+ Allow the system to enter sleep states in which main memory is
+ powered and thus its contents are preserved, such as the
+ suspend-to-RAM state (i.e. the ACPI S3 state).
+
+config HIBERNATION
+ bool "Hibernation (aka 'suspend to disk')"
+ depends on PM && SWAP
+ depends on ((X86 || PPC64_SWSUSP || FRV || PPC32) && !SMP) || SUSPEND_SMP_POSSIBLE
---help---
Enable the suspend to disk (STD) functionality, which is usually
called "hibernation" in user interfaces. STD checkpoints the
@@ -112,7 +140,7 @@ config SOFTWARE_SUSPEND
config PM_STD_PARTITION
string "Default resume partition"
- depends on SOFTWARE_SUSPEND
+ depends on HIBERNATION
default ""
---help---
The default resume partition is the partition that the suspend-
@@ -132,11 +160,6 @@ config PM_STD_PARTITION
suspended image to. It will simply pick the first available swap
device.
-config SUSPEND_SMP
- bool
- depends on HOTPLUG_CPU && (X86 || PPC64) && PM
- default y
-
config APM_EMULATION
tristate "Advanced Power Management Emulation"
depends on PM && SYS_SUPPORTS_APM_EMULATION
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 38725f526af..f7dfff28ecd 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -3,8 +3,9 @@ ifeq ($(CONFIG_PM_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
endif
-obj-y := main.o process.o console.o
+obj-y := main.o
obj-$(CONFIG_PM_LEGACY) += pm.o
-obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o disk.o snapshot.o swap.o user.o
+obj-$(CONFIG_PM_SLEEP) += process.o console.o
+obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 324ac0188ce..eb72255b5c8 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -216,6 +216,7 @@ int hibernation_platform_enter(void)
* sleep state after all
*/
error = hibernation_ops->prepare();
+ sysdev_shutdown();
if (!error)
error = hibernation_ops->enter();
} else {
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 32147b57c3b..350b485b3b6 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -25,11 +25,13 @@
BLOCKING_NOTIFIER_HEAD(pm_chain_head);
-/*This is just an arbitrary number */
-#define FREE_PAGE_NUMBER (100)
-
DEFINE_MUTEX(pm_mutex);
+#ifdef CONFIG_SUSPEND
+
+/* This is just an arbitrary number */
+#define FREE_PAGE_NUMBER (100)
+
struct pm_ops *pm_ops;
/**
@@ -269,6 +271,8 @@ int pm_suspend(suspend_state_t state)
EXPORT_SYMBOL(pm_suspend);
+#endif /* CONFIG_SUSPEND */
+
decl_subsys(power,NULL,NULL);
@@ -285,14 +289,16 @@ decl_subsys(power,NULL,NULL);
static ssize_t state_show(struct kset *kset, char *buf)
{
+ char *s = buf;
+#ifdef CONFIG_SUSPEND
int i;
- char * s = buf;
for (i = 0; i < PM_SUSPEND_MAX; i++) {
if (pm_states[i] && valid_state(i))
s += sprintf(s,"%s ", pm_states[i]);
}
-#ifdef CONFIG_SOFTWARE_SUSPEND
+#endif
+#ifdef CONFIG_HIBERNATION
s += sprintf(s, "%s\n", "disk");
#else
if (s != buf)
@@ -304,11 +310,13 @@ static ssize_t state_show(struct kset *kset, char *buf)
static ssize_t state_store(struct kset *kset, const char *buf, size_t n)
{
+#ifdef CONFIG_SUSPEND
suspend_state_t state = PM_SUSPEND_STANDBY;
const char * const *s;
+#endif
char *p;
- int error;
int len;
+ int error = -EINVAL;
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
@@ -316,17 +324,19 @@ static ssize_t state_store(struct kset *kset, const char *buf, size_t n)
/* First, check if we are requested to hibernate */
if (len == 4 && !strncmp(buf, "disk", len)) {
error = hibernate();
- return error ? error : n;
+ goto Exit;
}
+#ifdef CONFIG_SUSPEND
for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) {
if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
break;
}
if (state < PM_SUSPEND_MAX && *s)
error = enter_state(state);
- else
- error = -EINVAL;
+#endif
+
+ Exit:
return error ? error : n;
}
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 5f24c786f8e..95fbf2dd3fe 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -13,7 +13,7 @@ struct swsusp_info {
-#ifdef CONFIG_SOFTWARE_SUSPEND
+#ifdef CONFIG_HIBERNATION
/*
* Keep some memory free so that I/O operations can succeed without paging
* [Might this be more than 4 MB?]
@@ -176,9 +176,17 @@ struct timeval;
extern void swsusp_show_speed(struct timeval *, struct timeval *,
unsigned int, char *);
+#ifdef CONFIG_SUSPEND
/* kernel/power/main.c */
-extern int suspend_enter(suspend_state_t state);
extern int suspend_devices_and_enter(suspend_state_t state);
+#else /* !CONFIG_SUSPEND */
+static inline int suspend_devices_and_enter(suspend_state_t state)
+{
+ return -ENOSYS;
+}
+#endif /* !CONFIG_SUSPEND */
+
+/* kernel/power/common.c */
extern struct blocking_notifier_head pm_chain_head;
static inline int pm_notifier_call_chain(unsigned long val)
diff --git a/kernel/sched.c b/kernel/sched.c
index 93cf241cfbe..5c51d7e5dcc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -53,6 +53,7 @@
#include <linux/percpu.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
+#include <linux/sysctl.h>
#include <linux/syscalls.h>
#include <linux/times.h>
#include <linux/tsacct_kern.h>
@@ -263,8 +264,6 @@ struct rq {
unsigned int clock_warps, clock_overflows;
unsigned int clock_unstable_events;
- struct sched_class *load_balance_class;
-
atomic_t nr_iowait;
#ifdef CONFIG_SMP
@@ -385,13 +384,12 @@ static inline unsigned long long rq_clock(struct rq *rq)
*/
unsigned long long cpu_clock(int cpu)
{
- struct rq *rq = cpu_rq(cpu);
unsigned long long now;
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
- now = rq_clock(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ local_irq_save(flags);
+ now = rq_clock(cpu_rq(cpu));
+ local_irq_restore(flags);
return now;
}
@@ -1592,6 +1590,10 @@ static void __sched_fork(struct task_struct *p)
INIT_LIST_HEAD(&p->run_list);
p->se.on_rq = 0;
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+ INIT_HLIST_HEAD(&p->preempt_notifiers);
+#endif
+
/*
* We mark the process as running here, but have not actually
* inserted it onto the runqueue yet. This guarantees that
@@ -1673,6 +1675,63 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
task_rq_unlock(rq, &flags);
}
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+
+/**
+ * preempt_notifier_register - tell me when current is being being preempted
+ * and rescheduled
+ */
+void preempt_notifier_register(struct preempt_notifier *notifier)
+{
+ hlist_add_head(&notifier->link, &current->preempt_notifiers);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_register);
+
+/**
+ * preempt_notifier_unregister - no longer interested in preemption notifications
+ *
+ * This is safe to call from within a preemption notifier.
+ */
+void preempt_notifier_unregister(struct preempt_notifier *notifier)
+{
+ hlist_del(&notifier->link);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
+
+static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+{
+ struct preempt_notifier *notifier;
+ struct hlist_node *node;
+
+ hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
+ notifier->ops->sched_in(notifier, raw_smp_processor_id());
+}
+
+static void
+fire_sched_out_preempt_notifiers(struct task_struct *curr,
+ struct task_struct *next)
+{
+ struct preempt_notifier *notifier;
+ struct hlist_node *node;
+
+ hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
+ notifier->ops->sched_out(notifier, next);
+}
+
+#else
+
+static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+{
+}
+
+static void
+fire_sched_out_preempt_notifiers(struct task_struct *curr,
+ struct task_struct *next)
+{
+}
+
+#endif
+
/**
* prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch
@@ -1685,8 +1744,11 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
* prepare_task_switch sets up locking and calls architecture specific
* hooks.
*/
-static inline void prepare_task_switch(struct rq *rq, struct task_struct *next)
+static inline void
+prepare_task_switch(struct rq *rq, struct task_struct *prev,
+ struct task_struct *next)
{
+ fire_sched_out_preempt_notifiers(prev, next);
prepare_lock_switch(rq, next);
prepare_arch_switch(next);
}
@@ -1728,6 +1790,7 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
prev_state = prev->state;
finish_arch_switch(prev);
finish_lock_switch(rq, prev);
+ fire_sched_in_preempt_notifiers(current);
if (mm)
mmdrop(mm);
if (unlikely(prev_state == TASK_DEAD)) {
@@ -1768,7 +1831,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
{
struct mm_struct *mm, *oldmm;
- prepare_task_switch(rq, next);
+ prepare_task_switch(rq, prev, next);
mm = next->mm;
oldmm = prev->active_mm;
/*
@@ -5140,10 +5203,129 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
if (!next)
break;
migrate_dead(dead_cpu, next);
+
}
}
#endif /* CONFIG_HOTPLUG_CPU */
+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
+
+static struct ctl_table sd_ctl_dir[] = {
+ {CTL_UNNUMBERED, "sched_domain", NULL, 0, 0755, NULL, },
+ {0,},
+};
+
+static struct ctl_table sd_ctl_root[] = {
+ {CTL_UNNUMBERED, "kernel", NULL, 0, 0755, sd_ctl_dir, },
+ {0,},
+};
+
+static struct ctl_table *sd_alloc_ctl_entry(int n)
+{
+ struct ctl_table *entry =
+ kmalloc(n * sizeof(struct ctl_table), GFP_KERNEL);
+
+ BUG_ON(!entry);
+ memset(entry, 0, n * sizeof(struct ctl_table));
+
+ return entry;
+}
+
+static void
+set_table_entry(struct ctl_table *entry, int ctl_name,
+ const char *procname, void *data, int maxlen,
+ mode_t mode, proc_handler *proc_handler)
+{
+ entry->ctl_name = ctl_name;
+ entry->procname = procname;
+ entry->data = data;
+ entry->maxlen = maxlen;
+ entry->mode = mode;
+ entry->proc_handler = proc_handler;
+}
+
+static struct ctl_table *
+sd_alloc_ctl_domain_table(struct sched_domain *sd)
+{
+ struct ctl_table *table = sd_alloc_ctl_entry(14);
+
+ set_table_entry(&table[0], 1, "min_interval", &sd->min_interval,
+ sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[1], 2, "max_interval", &sd->max_interval,
+ sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[2], 3, "busy_idx", &sd->busy_idx,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[3], 4, "idle_idx", &sd->idle_idx,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[4], 5, "newidle_idx", &sd->newidle_idx,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[5], 6, "wake_idx", &sd->wake_idx,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[6], 7, "forkexec_idx", &sd->forkexec_idx,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[7], 8, "busy_factor", &sd->busy_factor,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[8], 9, "imbalance_pct", &sd->imbalance_pct,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[9], 10, "cache_hot_time", &sd->cache_hot_time,
+ sizeof(long long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[10], 11, "cache_nice_tries",
+ &sd->cache_nice_tries,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[12], 13, "flags", &sd->flags,
+ sizeof(int), 0644, proc_dointvec_minmax);
+
+ return table;
+}
+
+static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
+{
+ struct ctl_table *entry, *table;
+ struct sched_domain *sd;
+ int domain_num = 0, i;
+ char buf[32];
+
+ for_each_domain(cpu, sd)
+ domain_num++;
+ entry = table = sd_alloc_ctl_entry(domain_num + 1);
+
+ i = 0;
+ for_each_domain(cpu, sd) {
+ snprintf(buf, 32, "domain%d", i);
+ entry->ctl_name = i + 1;
+ entry->procname = kstrdup(buf, GFP_KERNEL);
+ entry->mode = 0755;
+ entry->child = sd_alloc_ctl_domain_table(sd);
+ entry++;
+ i++;
+ }
+ return table;
+}
+
+static struct ctl_table_header *sd_sysctl_header;
+static void init_sched_domain_sysctl(void)
+{
+ int i, cpu_num = num_online_cpus();
+ struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
+ char buf[32];
+
+ sd_ctl_dir[0].child = entry;
+
+ for (i = 0; i < cpu_num; i++, entry++) {
+ snprintf(buf, 32, "cpu%d", i);
+ entry->ctl_name = i + 1;
+ entry->procname = kstrdup(buf, GFP_KERNEL);
+ entry->mode = 0755;
+ entry->child = sd_alloc_ctl_cpu_table(i);
+ }
+ sd_sysctl_header = register_sysctl_table(sd_ctl_root);
+}
+#else
+static void init_sched_domain_sysctl(void)
+{
+}
+#endif
+
/*
* migration_call - callback that gets triggered when a CPU is added.
* Here we can start up the necessary migration thread for the new CPU.
@@ -6249,6 +6431,8 @@ void __init sched_init_smp(void)
/* XXX: Theoretical race here - CPU may be hotplugged now */
hotcpu_notifier(update_sched_domains, 0);
+ init_sched_domain_sysctl();
+
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed(current, non_isolated_cpus) < 0)
BUG();
@@ -6335,6 +6519,10 @@ void __init sched_init(void)
set_load_weight(&init_task);
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+ INIT_HLIST_HEAD(&init_task.preempt_notifiers);
+#endif
+
#ifdef CONFIG_SMP
nr_cpu_ids = highest_cpu + 1;
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 29f2c21e7da..42970f723a9 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -186,7 +186,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
return 0;
}
-void sysrq_sched_debug_show(void)
+static void sysrq_sched_debug_show(void)
{
sched_debug_show(NULL, NULL);
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 08562f41976..449b81b98b3 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -804,6 +804,7 @@ static void kernel_restart_prepare(char *cmd)
blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
system_state = SYSTEM_RESTART;
device_shutdown();
+ sysdev_shutdown();
}
/**
@@ -860,6 +861,7 @@ void kernel_shutdown_prepare(enum system_states state)
void kernel_halt(void)
{
kernel_shutdown_prepare(SYSTEM_HALT);
+ sysdev_shutdown();
printk(KERN_EMERG "System halted.\n");
machine_halt();
}
@@ -876,6 +878,7 @@ void kernel_power_off(void)
kernel_shutdown_prepare(SYSTEM_POWER_OFF);
if (pm_power_off_prepare)
pm_power_off_prepare();
+ sysdev_shutdown();
printk(KERN_EMERG "Power down.\n");
machine_power_off();
}
@@ -951,7 +954,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
unlock_kernel();
return -EINVAL;
-#ifdef CONFIG_SOFTWARE_SUSPEND
+#ifdef CONFIG_HIBERNATION
case LINUX_REBOOT_CMD_SW_SUSPEND:
{
int ret = hibernate();
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ddebf3f2aff..79c891e6266 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -689,7 +689,7 @@ static ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
-#ifdef CONFIG_ACPI_SLEEP
+#if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86)
{
.ctl_name = KERN_ACPI_VIDEO_FLAGS,
.procname = "acpi_video_flags",
diff --git a/kernel/time.c b/kernel/time.c
index 5b81da08bbd..2289a8d6831 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -215,22 +215,6 @@ asmlinkage long sys_adjtimex(struct timex __user *txc_p)
return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
}
-inline struct timespec current_kernel_time(void)
-{
- struct timespec now;
- unsigned long seq;
-
- do {
- seq = read_seqbegin(&xtime_lock);
-
- now = xtime;
- } while (read_seqretry(&xtime_lock, seq));
-
- return now;
-}
-
-EXPORT_SYMBOL(current_kernel_time);
-
/**
* current_fs_time - Return FS time
* @sb: Superblock.
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 88c81026e00..acc417b5a9b 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -47,10 +47,22 @@ EXPORT_SYMBOL(xtime_lock);
struct timespec xtime __attribute__ ((aligned (16)));
struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
static unsigned long total_sleep_time; /* seconds */
-
EXPORT_SYMBOL(xtime);
+#ifdef CONFIG_NO_HZ
+static struct timespec xtime_cache __attribute__ ((aligned (16)));
+static inline void update_xtime_cache(u64 nsec)
+{
+ xtime_cache = xtime;
+ timespec_add_ns(&xtime_cache, nsec);
+}
+#else
+#define xtime_cache xtime
+/* We do *not* want to evaluate the argument for this case */
+#define update_xtime_cache(n) do { } while (0)
+#endif
+
static struct clocksource *clock; /* pointer to current clocksource */
@@ -478,6 +490,8 @@ void update_wall_time(void)
xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
+ update_xtime_cache(cyc2ns(clock, offset));
+
/* check to see if there is a new clocksource to use */
change_clocksource();
update_vsyscall(&xtime, clock);
@@ -509,3 +523,25 @@ void monotonic_to_bootbased(struct timespec *ts)
{
ts->tv_sec += total_sleep_time;
}
+
+unsigned long get_seconds(void)
+{
+ return xtime_cache.tv_sec;
+}
+EXPORT_SYMBOL(get_seconds);
+
+
+struct timespec current_kernel_time(void)
+{
+ struct timespec now;
+ unsigned long seq;
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+
+ now = xtime_cache;
+ } while (read_seqretry(&xtime_lock, seq));
+
+ return now;
+}
+EXPORT_SYMBOL(current_kernel_time);
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 658f638c402..c122131a122 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -39,7 +39,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
ac_etime = timespec_to_ns(&ts);
do_div(ac_etime, NSEC_PER_USEC);
stats->ac_etime = ac_etime;
- stats->ac_btime = xtime.tv_sec - ts.tv_sec;
+ stats->ac_btime = get_seconds() - ts.tv_sec;
if (thread_group_leader(tsk)) {
stats->ac_exitcode = tsk->exit_code;
if (tsk->flags & PF_FORKNOEXEC)
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index b18fc2ff9ff..23985a278bb 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -139,12 +139,14 @@ static void debugfs_ul_set(void *data, u64 val)
*(unsigned long *)data = val;
}
+#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
static void debugfs_ul_set_MAX_STACK_TRACE_DEPTH(void *data, u64 val)
{
*(unsigned long *)data =
val < MAX_STACK_TRACE_DEPTH ?
val : MAX_STACK_TRACE_DEPTH;
}
+#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
static u64 debugfs_ul_get(void *data)
{
@@ -159,6 +161,7 @@ static struct dentry *debugfs_create_ul(const char *name, mode_t mode,
return debugfs_create_file(name, mode, parent, value, &fops_ul);
}
+#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
DEFINE_SIMPLE_ATTRIBUTE(fops_ul_MAX_STACK_TRACE_DEPTH, debugfs_ul_get,
debugfs_ul_set_MAX_STACK_TRACE_DEPTH, "%llu\n");
@@ -169,6 +172,7 @@ static struct dentry *debugfs_create_ul_MAX_STACK_TRACE_DEPTH(
return debugfs_create_file(name, mode, parent, value,
&fops_ul_MAX_STACK_TRACE_DEPTH);
}
+#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
static void debugfs_atomic_t_set(void *data, u64 val)
{
diff --git a/mm/Kconfig b/mm/Kconfig
index 86187221e78..e24d348083c 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -116,11 +116,11 @@ config SPARSEMEM_EXTREME
config MEMORY_HOTPLUG
bool "Allow for memory hot-add"
depends on SPARSEMEM || X86_64_ACPI_NUMA
- depends on HOTPLUG && !SOFTWARE_SUSPEND && ARCH_ENABLE_MEMORY_HOTPLUG
+ depends on HOTPLUG && !HIBERNATION && ARCH_ENABLE_MEMORY_HOTPLUG
depends on (IA64 || X86 || PPC64 || SUPERH)
comment "Memory hotplug is currently incompatible with Software Suspend"
- depends on SPARSEMEM && HOTPLUG && SOFTWARE_SUSPEND
+ depends on SPARSEMEM && HOTPLUG && HIBERNATION
config MEMORY_HOTPLUG_SPARSE
def_bool y
diff --git a/mm/bounce.c b/mm/bounce.c
index ad401fc5744..179fe38a241 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -190,7 +190,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
return 0;
}
-static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
+static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
mempool_t *pool)
{
struct page *page;
@@ -275,7 +275,7 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
*bio_orig = bio;
}
-void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
+void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
{
mempool_t *pool;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f127940ec24..d7ca59d66c5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -84,6 +84,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
list_del(&page->lru);
free_huge_pages--;
free_huge_pages_node[nid]--;
+ break;
}
}
return page;
diff --git a/mm/migrate.c b/mm/migrate.c
index 34d8ada053e..37c73b90200 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -49,9 +49,8 @@ int isolate_lru_page(struct page *page, struct list_head *pagelist)
struct zone *zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
- if (PageLRU(page)) {
+ if (PageLRU(page) && get_page_unless_zero(page)) {
ret = 0;
- get_page(page);
ClearPageLRU(page);
if (PageActive(page))
del_page_from_active_list(zone, page);
@@ -632,18 +631,35 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
goto unlock;
wait_on_page_writeback(page);
}
-
/*
- * Establish migration ptes or remove ptes
+ * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
+ * we cannot notice that anon_vma is freed while we migrates a page.
+ * This rcu_read_lock() delays freeing anon_vma pointer until the end
+ * of migration. File cache pages are no problem because of page_lock()
+ */
+ rcu_read_lock();
+ /*
+ * This is a corner case handling.
+ * When a new swap-cache is read into, it is linked to LRU
+ * and treated as swapcache but has no rmap yet.
+ * Calling try_to_unmap() against a page->mapping==NULL page is
+ * BUG. So handle it here.
*/
+ if (!page->mapping)
+ goto rcu_unlock;
+ /* Establish migration ptes or remove ptes */
try_to_unmap(page, 1);
+
if (!page_mapped(page))
rc = move_to_new_page(newpage, page);
if (rc)
remove_migration_ptes(page, page);
+rcu_unlock:
+ rcu_read_unlock();
unlock:
+
unlock_page(page);
if (rc != -EAGAIN) {
diff --git a/mm/mmap.c b/mm/mmap.c
index 7afc7a7cec6..b6537211b9c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1029,6 +1029,40 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
}
EXPORT_SYMBOL(do_mmap_pgoff);
+/*
+ * Some shared mappigns will want the pages marked read-only
+ * to track write events. If so, we'll downgrade vm_page_prot
+ * to the private version (using protection_map[] without the
+ * VM_SHARED bit).
+ */
+int vma_wants_writenotify(struct vm_area_struct *vma)
+{
+ unsigned int vm_flags = vma->vm_flags;
+
+ /* If it was private or non-writable, the write bit is already clear */
+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
+ return 0;
+
+ /* The backer wishes to know when pages are first written to? */
+ if (vma->vm_ops && vma->vm_ops->page_mkwrite)
+ return 1;
+
+ /* The open routine did something to the protections already? */
+ if (pgprot_val(vma->vm_page_prot) !=
+ pgprot_val(protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]))
+ return 0;
+
+ /* Specialty mapping? */
+ if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
+ return 0;
+
+ /* Can the mapping track the dirty pages? */
+ return vma->vm_file && vma->vm_file->f_mapping &&
+ mapping_cap_account_dirty(vma->vm_file->f_mapping);
+}
+
+
unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, unsigned long flags,
unsigned int vm_flags, unsigned long pgoff,
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index a7001410ab1..10367654ae7 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -17,6 +17,7 @@
#include <linux/oom.h>
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/timex.h>
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 40954fb8159..0bd4d82ddff 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -726,7 +726,7 @@ static void __drain_pages(unsigned int cpu)
}
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_HIBERNATION
void mark_free_pages(struct zone *zone)
{
@@ -772,7 +772,7 @@ void drain_local_pages(void)
__drain_pages(smp_processor_id());
local_irq_restore(flags);
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_HIBERNATION */
/*
* Free a 0-order page
@@ -2775,11 +2775,11 @@ unsigned long __meminit __absent_pages_in_range(int nid,
if (i == -1)
return 0;
+ prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
+
/* Account for ranges before physical memory on this node */
if (early_node_map[i].start_pfn > range_start_pfn)
- hole_pages = early_node_map[i].start_pfn - range_start_pfn;
-
- prev_end_pfn = early_node_map[i].start_pfn;
+ hole_pages = prev_end_pfn - range_start_pfn;
/* Find all holes for the zone within the node */
for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
diff --git a/mm/slab.c b/mm/slab.c
index bde271c001b..a684778b2b4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2776,7 +2776,7 @@ static int cache_grow(struct kmem_cache *cachep,
* 'nodeid'.
*/
if (!objp)
- objp = kmem_getpages(cachep, flags, nodeid);
+ objp = kmem_getpages(cachep, local_flags, nodeid);
if (!objp)
goto failed;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 7ff0a81c7b0..f071648e136 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -425,7 +425,7 @@ void free_swap_and_cache(swp_entry_t entry)
}
}
-#ifdef CONFIG_SOFTWARE_SUSPEND
+#ifdef CONFIG_HIBERNATION
/*
* Find the swap type that corresponds to given device (if any).
*
@@ -951,7 +951,7 @@ sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
}
}
-#ifdef CONFIG_SOFTWARE_SUSPEND
+#ifdef CONFIG_HIBERNATION
/*
* Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
* corresponding to given index in swap_info (swap type).
@@ -966,7 +966,7 @@ sector_t swapdev_block(int swap_type, pgoff_t offset)
sis = swap_info + swap_type;
return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0;
}
-#endif /* CONFIG_SOFTWARE_SUSPEND */
+#endif /* CONFIG_HIBERNATION */
/*
* Free all of a swapdev's extent information
diff --git a/mm/vmstat.c b/mm/vmstat.c
index fadf791cd7e..c64d169537b 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -10,6 +10,7 @@
*/
#include <linux/mm.h>
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/sched.h>
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index cda936b77d2..1583c5ef963 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -810,6 +810,7 @@ static int vlan_ioctl_handler(void __user *arg)
err = -EINVAL;
break;
case GET_VLAN_REALDEV_NAME_CMD:
+ err = 0;
vlan_dev_get_realdev_name(dev, args.u.device2);
if (copy_to_user(arg, &args,
sizeof(struct vlan_ioctl_args))) {
@@ -818,6 +819,7 @@ static int vlan_ioctl_handler(void __user *arg)
break;
case GET_VLAN_VID_CMD:
+ err = 0;
vlan_dev_get_vid(dev, &vid);
args.u.VID = vid;
if (copy_to_user(arg, &args,
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 420bbb9955e..5c18595b761 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -112,9 +112,9 @@ static int br_handle_local_finish(struct sk_buff *skb)
*/
static inline int is_link_local(const unsigned char *dest)
{
- const u16 *a = (const u16 *) dest;
- static const u16 *const b = (const u16 *const ) br_group_address;
- static const u16 m = __constant_cpu_to_be16(0xfff0);
+ __be16 *a = (__be16 *)dest;
+ static const __be16 *b = (const __be16 *)br_group_address;
+ static const __be16 m = __constant_cpu_to_be16(0xfff0);
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
}
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 031bfa4a51f..457815fb558 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -9,7 +9,6 @@
*
*/
-#include <linux/in.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_log.h>
#include <linux/netfilter.h>
@@ -196,10 +195,8 @@ static int __init ebt_log_init(void)
ret = ebt_register_watcher(&log);
if (ret < 0)
return ret;
- ret = nf_log_register(PF_BRIDGE, &ebt_log_logger);
- if (ret < 0 && ret != -EEXIST)
- ebt_unregister_watcher(&log);
- return ret;
+ nf_log_register(PF_BRIDGE, &ebt_log_logger);
+ return 0;
}
static void __exit ebt_log_fini(void)
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 9411db62591..204c968fa86 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -36,7 +36,6 @@
#include <linux/timer.h>
#include <linux/netlink.h>
#include <linux/netdevice.h>
-#include <linux/module.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_ulog.h>
#include <net/sock.h>
@@ -308,12 +307,8 @@ static int __init ebt_ulog_init(void)
else if ((ret = ebt_register_watcher(&ulog)))
sock_release(ebtulognl->sk_socket);
- if (nf_log_register(PF_BRIDGE, &ebt_ulog_logger) < 0) {
- printk(KERN_WARNING "ebt_ulog: not logging via ulog "
- "since somebody else already registered for PF_BRIDGE\n");
- /* we cannot make module load fail here, since otherwise
- * ebtables userspace would abort */
- }
+ if (ret == 0)
+ nf_log_register(PF_BRIDGE, &ebt_ulog_logger);
return ret;
}
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 5937ad150b9..127a5e89bf1 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -479,10 +479,8 @@ static int __init ipt_log_init(void)
ret = xt_register_target(&ipt_log_reg);
if (ret < 0)
return ret;
- ret = nf_log_register(PF_INET, &ipt_log_logger);
- if (ret < 0 && ret != -EEXIST)
- xt_unregister_target(&ipt_log_reg);
- return ret;
+ nf_log_register(PF_INET, &ipt_log_logger);
+ return 0;
}
static void __exit ipt_log_fini(void)
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index e848d8d6292..deab27facba 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -77,7 +77,8 @@ static inline unsigned int
hash_by_src(const struct nf_conntrack_tuple *tuple)
{
/* Original src, to ensure we map it consistently if poss. */
- return jhash_3words((__force u32)tuple->src.u3.ip, tuple->src.u.all,
+ return jhash_3words((__force u32)tuple->src.u3.ip,
+ (__force u32)tuple->src.u.all,
tuple->dst.protonum, 0) % nf_nat_htable_size;
}
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 0f45427e5fd..76ec59ae524 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -192,7 +192,7 @@ alloc_null_binding_confirmed(struct nf_conn *ct, unsigned int hooknum)
= (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip
: ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
- u_int16_t all
+ __be16 all
= (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.all
: ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.all);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index df30976f6df..ca774d8e3be 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -385,7 +385,7 @@ parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
static int
ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
- int *type, int *code, int *msg, __be32 *info, int offset)
+ int *type, int *code, int *msg, __u32 *info, int offset)
{
struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data;
struct ip6_tnl *t;
@@ -435,7 +435,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
if ((*code) == ICMPV6_HDR_FIELD)
teli = parse_tlv_tnl_enc_lim(skb, skb->data);
- if (teli && teli == ntohl(*info) - 2) {
+ if (teli && teli == *info - 2) {
tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
if (tel->encap_limit == 0) {
if (net_ratelimit())
@@ -452,7 +452,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
}
break;
case ICMPV6_PKT_TOOBIG:
- mtu = ntohl(*info) - offset;
+ mtu = *info - offset;
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
t->dev->mtu = mtu;
@@ -478,12 +478,12 @@ out:
static int
ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- int type, int code, int offset, __u32 info)
+ int type, int code, int offset, __be32 info)
{
int rel_msg = 0;
int rel_type = type;
int rel_code = code;
- __u32 rel_info = info;
+ __u32 rel_info = ntohl(info);
int err;
struct sk_buff *skb2;
struct iphdr *eiph;
@@ -564,10 +564,9 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
goto out;
skb2->dst->ops->update_pmtu(skb2->dst, rel_info);
- rel_info = htonl(rel_info);
}
- icmp_send(skb2, rel_type, rel_code, rel_info);
+ icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
out:
kfree_skb(skb2);
@@ -576,12 +575,12 @@ out:
static int
ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- int type, int code, int offset, __u32 info)
+ int type, int code, int offset, __be32 info)
{
int rel_msg = 0;
int rel_type = type;
int rel_code = code;
- __u32 rel_info = info;
+ __u32 rel_info = ntohl(info);
int err;
err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index b05327ebd33..6ab99001dcc 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -493,10 +493,8 @@ static int __init ip6t_log_init(void)
ret = xt_register_target(&ip6t_log_reg);
if (ret < 0)
return ret;
- ret = nf_log_register(PF_INET6, &ip6t_logger);
- if (ret < 0 && ret != -EEXIST)
- xt_unregister_target(&ip6t_log_reg);
- return ret;
+ nf_log_register(PF_INET6, &ip6t_logger);
+ return 0;
}
static void __exit ip6t_log_fini(void)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d67fb1ef751..f10f3689d67 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -633,6 +633,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
if (tp->md5sig_info->entries6 == 0) {
kfree(tp->md5sig_info->keys6);
tp->md5sig_info->keys6 = NULL;
+ tp->md5sig_info->alloced6 = 0;
tcp_free_md5sig_pool();
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index ad5150b8dfa..983058d432d 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -479,7 +479,8 @@ static void iucv_setmask_mp(void)
/* Enable all cpus with a declared buffer. */
if (cpu_isset(cpu, iucv_buffer_cpumask) &&
!cpu_isset(cpu, iucv_irq_cpumask))
- smp_call_function_on(iucv_allow_cpu, NULL, 0, 1, cpu);
+ smp_call_function_single(cpu, iucv_allow_cpu,
+ NULL, 0, 1);
preempt_enable();
}
@@ -497,7 +498,7 @@ static void iucv_setmask_up(void)
cpumask = iucv_irq_cpumask;
cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
for_each_cpu_mask(cpu, cpumask)
- smp_call_function_on(iucv_block_cpu, NULL, 0, 1, cpu);
+ smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1);
}
/**
@@ -522,7 +523,7 @@ static int iucv_enable(void)
rc = -EIO;
preempt_disable();
for_each_online_cpu(cpu)
- smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
+ smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
preempt_enable();
if (cpus_empty(iucv_buffer_cpumask))
/* No cpu could declare an iucv buffer. */
@@ -578,7 +579,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
case CPU_ONLINE_FROZEN:
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
- smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
+ smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
@@ -587,10 +588,10 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
if (cpus_empty(cpumask))
/* Can't offline last IUCV enabled cpu. */
return NOTIFY_BAD;
- smp_call_function_on(iucv_retrieve_cpu, NULL, 0, 1, cpu);
+ smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 0, 1);
if (cpus_empty(iucv_irq_cpumask))
- smp_call_function_on(iucv_allow_cpu, NULL, 0, 1,
- first_cpu(iucv_buffer_cpumask));
+ smp_call_function_single(first_cpu(iucv_buffer_cpumask),
+ iucv_allow_cpu, NULL, 0, 1);
break;
}
return NOTIFY_OK;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 0f8304b0246..7b0a95abe93 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2540,7 +2540,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
sel.sport = ((struct sockaddr_in *)(sa + 1))->sin_port;
if (sel.sport)
- sel.sport_mask = ~0;
+ sel.sport_mask = htons(0xffff);
/* set destination address info of selector */
sa = ext_hdrs[SADB_EXT_ADDRESS_DST - 1],
@@ -2549,7 +2549,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
sel.dport = ((struct sockaddr_in *)(sa + 1))->sin_port;
if (sel.dport)
- sel.dport_mask = ~0;
+ sel.dport_mask = htons(0xffff);
rq = (struct sadb_x_ipsecrequest *)(pol + 1);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index aa086c83af8..0fe11889ce1 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -79,7 +79,8 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
a = jhash2(tuple->src.u3.all, ARRAY_SIZE(tuple->src.u3.all),
(tuple->src.l3num << 16) | tuple->dst.protonum);
b = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
- (tuple->src.u.all << 16) | tuple->dst.u.all);
+ ((__force __u16)tuple->src.u.all << 16) |
+ (__force __u16)tuple->dst.u.all);
return jhash_2words(a, b, rnd) % size;
}
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 1aa6229ca99..eb6695dcd73 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -80,7 +80,7 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple
return jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
(((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
- tuple->dst.u.all) ^ nf_ct_expect_hash_rnd) %
+ (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd) %
nf_ct_expect_hsize;
}
@@ -259,8 +259,8 @@ void nf_ct_expect_init(struct nf_conntrack_expect *exp, int family,
}
if (src) {
- exp->tuple.src.u.all = (__force u16)*src;
- exp->mask.src.u.all = 0xFFFF;
+ exp->tuple.src.u.all = *src;
+ exp->mask.src.u.all = htons(0xFFFF);
} else {
exp->tuple.src.u.all = 0;
exp->mask.src.u.all = 0;
@@ -272,7 +272,7 @@ void nf_ct_expect_init(struct nf_conntrack_expect *exp, int family,
memset((void *)&exp->tuple.dst.u3 + len, 0x00,
sizeof(exp->tuple.dst.u3) - len);
- exp->tuple.dst.u.all = (__force u16)*dst;
+ exp->tuple.dst.u.all = *dst;
}
EXPORT_SYMBOL_GPL(nf_ct_expect_init);
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index ca10df40784..96aa637c093 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -39,7 +39,7 @@ static int nf_ct_helper_vmalloc;
static unsigned int helper_hash(const struct nf_conntrack_tuple *tuple)
{
return (((tuple->src.l3num << 8) | tuple->dst.protonum) ^
- tuple->src.u.all) % nf_ct_helper_hsize;
+ (__force __u16)tuple->src.u.all) % nf_ct_helper_hsize;
}
struct nf_conntrack_helper *
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 87ad3ccf8af..eb3fe740146 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -8,7 +8,6 @@
#include <linux/types.h>
#include <linux/timer.h>
-#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/in.h>
#include <linux/tcp.h>
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 13d94a02572..2a2fd1a764e 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -9,7 +9,6 @@
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/module.h>
-#include <linux/netfilter.h>
#include <linux/udp.h>
#include <linux/seq_file.h>
#include <linux/skbuff.h>
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 93e747b5396..b906b413997 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -10,7 +10,6 @@
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/module.h>
-#include <linux/netfilter.h>
#include <linux/udp.h>
#include <linux/seq_file.h>
#include <linux/skbuff.h>
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 3335dd5be96..06cff1d1369 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -42,13 +42,13 @@ struct xt_connlimit_data {
static u_int32_t connlimit_rnd;
static bool connlimit_rnd_inited;
-static inline unsigned int connlimit_iphash(u_int32_t addr)
+static inline unsigned int connlimit_iphash(__be32 addr)
{
if (unlikely(!connlimit_rnd_inited)) {
get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
connlimit_rnd_inited = true;
}
- return jhash_1word(addr, connlimit_rnd) & 0xFF;
+ return jhash_1word((__force __u32)addr, connlimit_rnd) & 0xFF;
}
static inline unsigned int
@@ -66,7 +66,7 @@ connlimit_iphash6(const union nf_conntrack_address *addr,
for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i)
res.ip6[i] = addr->ip6[i] & mask->ip6[i];
- return jhash2(res.ip6, ARRAY_SIZE(res.ip6), connlimit_rnd) & 0xFF;
+ return jhash2((u32 *)res.ip6, ARRAY_SIZE(res.ip6), connlimit_rnd) & 0xFF;
}
static inline bool already_closed(const struct nf_conn *conn)
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index f47cab7a696..a4bab043a6d 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -13,7 +13,6 @@
#include <linux/netfilter_bridge.h>
#include <linux/netfilter/xt_physdev.h>
#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter_bridge.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
diff --git a/net/netfilter/xt_u32.c b/net/netfilter/xt_u32.c
index 04b677ae8da..74f9b14c012 100644
--- a/net/netfilter/xt_u32.c
+++ b/net/netfilter/xt_u32.c
@@ -21,6 +21,7 @@ static bool u32_match_it(const struct xt_u32 *data,
unsigned int nnums;
unsigned int nvals;
unsigned int i;
+ __be32 n;
u_int32_t pos;
u_int32_t val;
u_int32_t at;
@@ -38,9 +39,9 @@ static bool u32_match_it(const struct xt_u32 *data,
if (skb->len < 4 || pos > skb->len - 4);
return false;
- ret = skb_copy_bits(skb, pos, &val, sizeof(val));
+ ret = skb_copy_bits(skb, pos, &n, sizeof(n));
BUG_ON(ret < 0);
- val = ntohl(val);
+ val = ntohl(n);
nnums = ct->nnums;
/* Inner loop runs over "&", "<<", ">>" and "@" operands */
@@ -65,10 +66,10 @@ static bool u32_match_it(const struct xt_u32 *data,
pos > skb->len - at - 4)
return false;
- ret = skb_copy_bits(skb, at + pos, &val,
- sizeof(val));
+ ret = skb_copy_bits(skb, at + pos, &n,
+ sizeof(n));
BUG_ON(ret < 0);
- val = ntohl(val);
+ val = ntohl(n);
break;
}
}
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index e146531faf1..8c11ca4a212 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -184,7 +184,7 @@ int genl_register_mc_group(struct genl_family *family,
}
err = netlink_change_ngroups(genl_sock,
- sizeof(unsigned long) * NETLINK_GENERIC);
+ mc_groups_longs * BITS_PER_LONG);
if (err)
goto out;
@@ -196,10 +196,22 @@ int genl_register_mc_group(struct genl_family *family,
genl_ctrl_event(CTRL_CMD_NEWMCAST_GRP, grp);
out:
genl_unlock();
- return 0;
+ return err;
}
EXPORT_SYMBOL(genl_register_mc_group);
+static void __genl_unregister_mc_group(struct genl_family *family,
+ struct genl_multicast_group *grp)
+{
+ BUG_ON(grp->family != family);
+ netlink_clear_multicast_users(genl_sock, grp->id);
+ clear_bit(grp->id, mc_groups);
+ list_del(&grp->list);
+ genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, grp);
+ grp->id = 0;
+ grp->family = NULL;
+}
+
/**
* genl_unregister_mc_group - unregister a multicast group
*
@@ -217,14 +229,8 @@ EXPORT_SYMBOL(genl_register_mc_group);
void genl_unregister_mc_group(struct genl_family *family,
struct genl_multicast_group *grp)
{
- BUG_ON(grp->family != family);
genl_lock();
- netlink_clear_multicast_users(genl_sock, grp->id);
- clear_bit(grp->id, mc_groups);
- list_del(&grp->list);
- genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, grp);
- grp->id = 0;
- grp->family = NULL;
+ __genl_unregister_mc_group(family, grp);
genl_unlock();
}
@@ -232,8 +238,10 @@ static void genl_unregister_mc_groups(struct genl_family *family)
{
struct genl_multicast_group *grp, *tmp;
+ genl_lock();
list_for_each_entry_safe(grp, tmp, &family->mcast_groups, list)
- genl_unregister_mc_group(family, grp);
+ __genl_unregister_mc_group(family, grp);
+ genl_unlock();
}
/**
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 16a68df4e36..c58fa0d1be2 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -787,7 +787,7 @@ static int __init af_rxrpc_init(void)
BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof(dummy_skb->cb));
- rxrpc_epoch = htonl(xtime.tv_sec);
+ rxrpc_epoch = htonl(get_seconds());
ret = -ENOMEM;
rxrpc_call_jar = kmem_cache_create(
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 482750efc23..d6667f7bc85 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -71,7 +71,7 @@ struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
struct rb_node *p, *parent, **pp;
_enter("%p{%x},%x,%hx,",
- rx, key_serial(key), trans->debug_id, ntohl(service_id));
+ rx, key_serial(key), trans->debug_id, ntohs(service_id));
if (rx->trans == trans && rx->bundle) {
atomic_inc(&rx->bundle->usage);
@@ -791,7 +791,7 @@ void rxrpc_put_connection(struct rxrpc_connection *conn)
ASSERTCMP(atomic_read(&conn->usage), >, 0);
- conn->put_time = xtime.tv_sec;
+ conn->put_time = get_seconds();
if (atomic_dec_and_test(&conn->usage)) {
_debug("zombie");
rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
@@ -835,7 +835,7 @@ void rxrpc_connection_reaper(struct work_struct *work)
_enter("");
- now = xtime.tv_sec;
+ now = get_seconds();
earliest = ULONG_MAX;
write_lock_bh(&rxrpc_connection_lock);
diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
index d43d78f1930..bb282a6a19f 100644
--- a/net/rxrpc/ar-transport.c
+++ b/net/rxrpc/ar-transport.c
@@ -183,7 +183,7 @@ void rxrpc_put_transport(struct rxrpc_transport *trans)
ASSERTCMP(atomic_read(&trans->usage), >, 0);
- trans->put_time = xtime.tv_sec;
+ trans->put_time = get_seconds();
if (unlikely(atomic_dec_and_test(&trans->usage)))
_debug("zombie");
/* let the reaper determine the timeout to avoid a race with
@@ -219,7 +219,7 @@ static void rxrpc_transport_reaper(struct work_struct *work)
_enter("");
- now = xtime.tv_sec;
+ now = get_seconds();
earliest = ULONG_MAX;
/* extract all the transports that have been dead too long */
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 5ec705144e1..ac3cabdca78 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -916,7 +916,7 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
issue = be32_to_cpu(stamp);
}
p += 4;
- now = xtime.tv_sec;
+ now = get_seconds();
_debug("KIV ISSUE: %lx [%lx]", issue, now);
/* check the ticket is in date */
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 490697542fc..dc2f41e9f57 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -769,11 +769,12 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
new->h.flavour = &svcauthops_gss;
new->pseudoflavor = pseudoflavor;
+ stat = 0;
test = auth_domain_lookup(name, &new->h);
- if (test != &new->h) { /* XXX Duplicate registration? */
- auth_domain_put(&new->h);
- /* dangling ref-count... */
- goto out;
+ if (test != &new->h) { /* Duplicate registration */
+ auth_domain_put(test);
+ kfree(new->h.name);
+ goto out_free_dom;
}
return 0;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 64b9b8c743c..12ff5da8160 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -131,13 +131,13 @@ static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len)
case AF_INET:
snprintf(buf, len, "%u.%u.%u.%u, port=%u",
NIPQUAD(((struct sockaddr_in *) addr)->sin_addr),
- htons(((struct sockaddr_in *) addr)->sin_port));
+ ntohs(((struct sockaddr_in *) addr)->sin_port));
break;
case AF_INET6:
snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u",
NIP6(((struct sockaddr_in6 *) addr)->sin6_addr),
- htons(((struct sockaddr_in6 *) addr)->sin6_port));
+ ntohs(((struct sockaddr_in6 *) addr)->sin6_port));
break;
default:
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 35d5ba1d4f4..ce265983637 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -72,10 +72,8 @@ static inline void msg_set_bits(struct tipc_msg *m, u32 w,
u32 pos, u32 mask, u32 val)
{
val = (val & mask) << pos;
- val = htonl(val);
- mask = htonl(mask << pos);
- m->hdr[w] &= ~mask;
- m->hdr[w] |= val;
+ m->hdr[w] &= ~htonl(mask << pos);
+ m->hdr[w] |= htonl(val);
}
/*
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 3f7b451f395..7fd6055bedf 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -315,7 +315,7 @@ quiet_cmd_link_multi-y = LD $@
cmd_link_multi-y = $(LD) $(ld_flags) -r -o $@ $(link_multi_deps)
quiet_cmd_link_multi-m = LD [M] $@
-cmd_link_multi-m = $(LD) $(ld_flags) $(LDFLAGS_MODULE) -o $@ $(link_multi_deps)
+cmd_link_multi-m = $(cmd_link_multi-y)
# We would rather have a list of rules like
# foo.o: $(foo-objs)
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index c6fcc597b3b..d988f5d21e3 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -56,23 +56,24 @@ _modpost: $(if $(KBUILD_MODPOST_NOFINAL), $(modules:.ko:.o),$(modules))
# Step 2), invoke modpost
# Includes step 3,4
+modpost = scripts/mod/modpost \
+ $(if $(CONFIG_MODVERSIONS),-m) \
+ $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \
+ $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
+ $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
+ $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
+ $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w)
+
quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules
- cmd_modpost = scripts/mod/modpost \
- $(if $(CONFIG_MODVERSIONS),-m) \
- $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \
- $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
- $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
- $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
- $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w)
+ cmd_modpost = $(modpost) -s
PHONY += __modpost
__modpost: $(modules:.ko=.o) FORCE
$(call cmd,modpost) $(wildcard vmlinux) $(filter-out FORCE,$^)
quiet_cmd_kernel-mod = MODPOST $@
- cmd_kernel-mod = $(cmd_modpost) $@
+ cmd_kernel-mod = $(modpost) $@
-PHONY += vmlinux
vmlinux.o: FORCE
$(call cmd,kernel-mod)
@@ -97,7 +98,7 @@ targets += $(modules:.ko=.mod.o)
# Step 6), final link of the modules
quiet_cmd_ld_ko_o = LD [M] $@
- cmd_ld_ko_o = $(LD) $(LDFLAGS) $(LDFLAGS_MODULE) -o $@ \
+ cmd_ld_ko_o = $(LD) -r $(LDFLAGS) $(LDFLAGS_MODULE) -o $@ \
$(filter-out FORCE,$^)
$(modules): %.ko :%.o %.mod.o FORCE
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 1199baf866c..8be6a4269e6 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -37,6 +37,14 @@ static struct menu *rootEntry;
static char nohelp_text[] = N_("Sorry, no help available for this option yet.\n");
+static const char *get_help(struct menu *menu)
+{
+ if (menu_has_help(menu))
+ return menu_get_help(menu);
+ else
+ return nohelp_text;
+}
+
static void strip(char *str)
{
char *p = str;
@@ -171,7 +179,7 @@ static void conf_askvalue(struct symbol *sym, const char *def)
int conf_string(struct menu *menu)
{
struct symbol *sym = menu->sym;
- const char *def, *help;
+ const char *def;
while (1) {
printf("%*s%s ", indent - 1, "", menu->prompt->text);
@@ -186,10 +194,7 @@ int conf_string(struct menu *menu)
case '?':
/* print help */
if (line[1] == '\n') {
- help = nohelp_text;
- if (menu->sym->help)
- help = menu->sym->help;
- printf("\n%s\n", menu->sym->help);
+ printf("\n%s\n", get_help(menu));
def = NULL;
break;
}
@@ -207,7 +212,6 @@ static int conf_sym(struct menu *menu)
struct symbol *sym = menu->sym;
int type;
tristate oldval, newval;
- const char *help;
while (1) {
printf("%*s%s ", indent - 1, "", menu->prompt->text);
@@ -233,7 +237,7 @@ static int conf_sym(struct menu *menu)
printf("/m");
if (oldval != yes && sym_tristate_within_range(sym, yes))
printf("/y");
- if (sym->help)
+ if (menu_has_help(menu))
printf("/?");
printf("] ");
conf_askvalue(sym, sym_get_string_value(sym));
@@ -269,10 +273,7 @@ static int conf_sym(struct menu *menu)
if (sym_set_tristate_value(sym, newval))
return 0;
help:
- help = nohelp_text;
- if (sym->help)
- help = sym->help;
- printf("\n%s\n", help);
+ printf("\n%s\n", get_help(menu));
}
}
@@ -342,7 +343,7 @@ static int conf_choice(struct menu *menu)
goto conf_childs;
}
printf("[1-%d", cnt);
- if (sym->help)
+ if (menu_has_help(menu))
printf("?");
printf("]: ");
switch (input_mode) {
@@ -359,8 +360,7 @@ static int conf_choice(struct menu *menu)
fgets(line, 128, stdin);
strip(line);
if (line[0] == '?') {
- printf("\n%s\n", menu->sym->help ?
- menu->sym->help : nohelp_text);
+ printf("\n%s\n", get_help(menu));
continue;
}
if (!line[0])
@@ -391,8 +391,7 @@ static int conf_choice(struct menu *menu)
if (!child)
continue;
if (line[strlen(line) - 1] == '?') {
- printf("\n%s\n", child->sym->help ?
- child->sym->help : nohelp_text);
+ printf("\n%s\n", get_help(child));
continue;
}
sym_set_choice_value(sym, child->sym);
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 6084525f604..a195986eec6 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -71,14 +71,12 @@ enum {
struct symbol {
struct symbol *next;
char *name;
- char *help;
enum symbol_type type;
struct symbol_value curr;
struct symbol_value def[4];
tristate visible;
int flags;
struct property *prop;
- struct expr *dep, *dep2;
struct expr_value rev_dep;
};
@@ -139,7 +137,7 @@ struct menu {
struct property *prompt;
struct expr *dep;
unsigned int flags;
- //char *help;
+ char *help;
struct file *file;
int lineno;
void *data;
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index 61d8166166e..262908cfc2a 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -38,9 +38,6 @@ static gboolean show_all = FALSE;
static gboolean show_debug = FALSE;
static gboolean resizeable = FALSE;
-static char nohelp_text[] =
- N_("Sorry, no help available for this option yet.\n");
-
GtkWidget *main_wnd = NULL;
GtkWidget *tree1_w = NULL; // left frame
GtkWidget *tree2_w = NULL; // right frame
@@ -462,12 +459,9 @@ static void text_insert_help(struct menu *menu)
GtkTextIter start, end;
const char *prompt = menu_get_prompt(menu);
gchar *name;
- const char *help = _(nohelp_text);
+ const char *help;
- if (!menu->sym)
- help = "";
- else if (menu->sym->help)
- help = _(menu->sym->help);
+ help = _(menu_get_help(menu));
if (menu->sym && menu->sym->name)
name = g_strdup_printf(_(menu->sym->name));
diff --git a/scripts/kconfig/kxgettext.c b/scripts/kconfig/kxgettext.c
index 11f7dab9471..6eb72a7f256 100644
--- a/scripts/kconfig/kxgettext.c
+++ b/scripts/kconfig/kxgettext.c
@@ -170,8 +170,8 @@ void menu_build_message_list(struct menu *menu)
menu->file == NULL ? "Root Menu" : menu->file->name,
menu->lineno);
- if (menu->sym != NULL && menu->sym->help != NULL)
- message__add(menu->sym->help, menu->sym->name,
+ if (menu->sym != NULL && menu_has_help(menu))
+ message__add(menu_get_help(menu), menu->sym->name,
menu->file == NULL ? "Root Menu" : menu->file->name,
menu->lineno);
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h
index 15030770d1a..4d09f6ddefe 100644
--- a/scripts/kconfig/lkc_proto.h
+++ b/scripts/kconfig/lkc_proto.h
@@ -15,6 +15,8 @@ P(menu_is_visible,bool,(struct menu *menu));
P(menu_get_prompt,const char *,(struct menu *menu));
P(menu_get_root_menu,struct menu *,(struct menu *menu));
P(menu_get_parent_menu,struct menu *,(struct menu *menu));
+P(menu_has_help,bool,(struct menu *menu));
+P(menu_get_help,const char *,(struct menu *menu));
/* symbol.c */
P(symbol_hash,struct symbol *,[SYMBOL_HASHSIZE]);
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index d2c2a429887..bc5854ed605 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -725,11 +725,11 @@ static void show_help(struct menu *menu)
struct gstr help = str_new();
struct symbol *sym = menu->sym;
- if (sym->help)
+ if (menu_has_help(menu))
{
if (sym->name) {
str_printf(&help, "CONFIG_%s:\n\n", sym->name);
- str_append(&help, _(sym->help));
+ str_append(&help, _(menu_get_help(menu)));
str_append(&help, "\n");
}
} else {
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index f14aeac67d4..f9d0d91a3fe 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -417,3 +417,15 @@ struct menu *menu_get_parent_menu(struct menu *menu)
return menu;
}
+bool menu_has_help(struct menu *menu)
+{
+ return menu->help != NULL;
+}
+
+const char *menu_get_help(struct menu *menu)
+{
+ if (menu->help)
+ return menu->help;
+ else
+ return "";
+}
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index f2a23a9c393..e4eeb59a8c2 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -1041,7 +1041,7 @@ void ConfigInfoView::menuInfo(void)
if (showDebug())
debug = debug_info(sym);
- help = print_filter(_(sym->help));
+ help = print_filter(_(menu_get_help(menu)));
} else if (menu->prompt) {
head += "<big><b>";
head += print_filter(_(menu->prompt->text));
diff --git a/scripts/kconfig/zconf.tab.c_shipped b/scripts/kconfig/zconf.tab.c_shipped
index 9a06b6771ee..ec21db77f78 100644
--- a/scripts/kconfig/zconf.tab.c_shipped
+++ b/scripts/kconfig/zconf.tab.c_shipped
@@ -1722,7 +1722,7 @@ yyreduce:
case 83:
{
- current_entry->sym->help = (yyvsp[0].string);
+ current_entry->help = (yyvsp[0].string);
;}
break;
@@ -2280,11 +2280,11 @@ void print_symbol(FILE *out, struct menu *menu)
break;
}
}
- if (sym->help) {
- int len = strlen(sym->help);
- while (sym->help[--len] == '\n')
- sym->help[len] = 0;
- fprintf(out, " help\n%s\n", sym->help);
+ if (menu->help) {
+ int len = strlen(menu->help);
+ while (menu->help[--len] == '\n')
+ menu->help[len] = 0;
+ fprintf(out, " help\n%s\n", menu->help);
}
fputc('\n', out);
}
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 92eb02bdf9c..79db4cf22a5 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -402,7 +402,7 @@ help_start: T_HELP T_EOL
help: help_start T_HELPTEXT
{
- current_entry->sym->help = $2;
+ current_entry->help = $2;
};
/* depends option */
@@ -649,11 +649,11 @@ void print_symbol(FILE *out, struct menu *menu)
break;
}
}
- if (sym->help) {
- int len = strlen(sym->help);
- while (sym->help[--len] == '\n')
- sym->help[len] = 0;
- fprintf(out, " help\n%s\n", sym->help);
+ if (menu->help) {
+ int len = strlen(menu->help);
+ while (menu->help[--len] == '\n')
+ menu->help[len] = 0;
+ fprintf(out, " help\n%s\n", menu->help);
}
fputc('\n', out);
}
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index f646381dc01..8a09021d8c5 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -290,6 +290,14 @@ static int do_serio_entry(const char *filename,
return 1;
}
+/* looks like: "acpi:ACPI0003 or acpi:PNP0C0B" or "acpi:LNXVIDEO" */
+static int do_acpi_entry(const char *filename,
+ struct acpi_device_id *id, char *alias)
+{
+ sprintf(alias, "acpi*:%s:", id->id);
+ return 1;
+}
+
/* looks like: "pnp:dD" */
static int do_pnp_entry(const char *filename,
struct pnp_device_id *id, char *alias)
@@ -551,6 +559,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
do_table(symval, sym->st_size,
sizeof(struct serio_device_id), "serio",
do_serio_entry, mod);
+ else if (sym_is(symname, "__mod_acpi_device_table"))
+ do_table(symval, sym->st_size,
+ sizeof(struct acpi_device_id), "acpi",
+ do_acpi_entry, mod);
else if (sym_is(symname, "__mod_pnp_device_table"))
do_table(symval, sym->st_size,
sizeof(struct pnp_device_id), "pnp",
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 5ab7914d30e..ee58ded021d 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -23,6 +23,8 @@ int have_vmlinux = 0;
static int all_versions = 0;
/* If we are modposting external module set to 1 */
static int external_module = 0;
+/* Warn about section mismatch in vmlinux if set to 1 */
+static int vmlinux_section_warnings = 1;
/* Only warn about unresolved symbols */
static int warn_unresolved = 0;
/* How a symbol is exported */
@@ -584,13 +586,61 @@ static int strrcmp(const char *s, const char *sub)
return memcmp(s + slen - sublen, sub, sublen);
}
+/*
+ * Functions used only during module init is marked __init and is stored in
+ * a .init.text section. Likewise data is marked __initdata and stored in
+ * a .init.data section.
+ * If this section is one of these sections return 1
+ * See include/linux/init.h for the details
+ */
+static int init_section(const char *name)
+{
+ if (strcmp(name, ".init") == 0)
+ return 1;
+ if (strncmp(name, ".init.", strlen(".init.")) == 0)
+ return 1;
+ return 0;
+}
+
+/*
+ * Functions used only during module exit is marked __exit and is stored in
+ * a .exit.text section. Likewise data is marked __exitdata and stored in
+ * a .exit.data section.
+ * If this section is one of these sections return 1
+ * See include/linux/init.h for the details
+ **/
+static int exit_section(const char *name)
+{
+ if (strcmp(name, ".exit.text") == 0)
+ return 1;
+ if (strcmp(name, ".exit.data") == 0)
+ return 1;
+ return 0;
+
+}
+
+/*
+ * Data sections are named like this:
+ * .data | .data.rel | .data.rel.*
+ * Return 1 if the specified section is a data section
+ */
+static int data_section(const char *name)
+{
+ if ((strcmp(name, ".data") == 0) ||
+ (strcmp(name, ".data.rel") == 0) ||
+ (strncmp(name, ".data.rel.", strlen(".data.rel.")) == 0))
+ return 1;
+ else
+ return 0;
+}
+
/**
* Whitelist to allow certain references to pass with no warning.
*
* Pattern 0:
* Do not warn if funtion/data are marked with __init_refok/__initdata_refok.
* The pattern is identified by:
- * fromsec = .text.init.refok | .data.init.refok
+ * fromsec = .text.init.refok* | .data.init.refok*
*
* Pattern 1:
* If a module parameter is declared __initdata and permissions=0
@@ -608,8 +658,8 @@ static int strrcmp(const char *s, const char *sub)
* These functions may often be marked __init and we do not want to
* warn here.
* the pattern is identified by:
- * tosec = .init.text | .exit.text | .init.data
- * fromsec = .data | .data.rel | .data.rel.*
+ * tosec = init or exit section
+ * fromsec = data section
* atsym = *driver, *_template, *_sht, *_ops, *_probe, *probe_one, *_console, *_timer
*
* Pattern 3:
@@ -625,12 +675,18 @@ static int strrcmp(const char *s, const char *sub)
* This pattern is identified by
* refsymname = __init_begin, _sinittext, _einittext
*
+ * Pattern 5:
+ * Xtensa uses literal sections for constants that are accessed PC-relative.
+ * Literal sections may safely reference their text sections.
+ * (Note that the name for the literal section omits any trailing '.text')
+ * tosec = <section>[.text]
+ * fromsec = <section>.literal
**/
static int secref_whitelist(const char *modname, const char *tosec,
const char *fromsec, const char *atsym,
const char *refsymname)
{
- int f1 = 1, f2 = 1;
+ int len;
const char **s;
const char *pat2sym[] = {
"driver",
@@ -652,36 +708,21 @@ static int secref_whitelist(const char *modname, const char *tosec,
};
/* Check for pattern 0 */
- if ((strcmp(fromsec, ".text.init.refok") == 0) ||
- (strcmp(fromsec, ".data.init.refok") == 0))
+ if ((strncmp(fromsec, ".text.init.refok", strlen(".text.init.refok")) == 0) ||
+ (strncmp(fromsec, ".data.init.refok", strlen(".data.init.refok")) == 0))
return 1;
/* Check for pattern 1 */
- if (strcmp(tosec, ".init.data") != 0)
- f1 = 0;
- if (strncmp(fromsec, ".data", strlen(".data")) != 0)
- f1 = 0;
- if (strncmp(atsym, "__param", strlen("__param")) != 0)
- f1 = 0;
-
- if (f1)
- return f1;
+ if ((strcmp(tosec, ".init.data") == 0) &&
+ (strncmp(fromsec, ".data", strlen(".data")) == 0) &&
+ (strncmp(atsym, "__param", strlen("__param")) == 0))
+ return 1;
/* Check for pattern 2 */
- if ((strcmp(tosec, ".init.text") != 0) &&
- (strcmp(tosec, ".exit.text") != 0) &&
- (strcmp(tosec, ".init.data") != 0))
- f2 = 0;
- if ((strcmp(fromsec, ".data") != 0) &&
- (strcmp(fromsec, ".data.rel") != 0) &&
- (strncmp(fromsec, ".data.rel.", strlen(".data.rel.")) != 0))
- f2 = 0;
-
- for (s = pat2sym; *s; s++)
- if (strrcmp(atsym, *s) == 0)
- f1 = 1;
- if (f1 && f2)
- return 1;
+ if ((init_section(tosec) || exit_section(tosec)) && data_section(fromsec))
+ for (s = pat2sym; *s; s++)
+ if (strrcmp(atsym, *s) == 0)
+ return 1;
/* Check for pattern 3 */
if ((strcmp(fromsec, ".text.head") == 0) &&
@@ -694,6 +735,15 @@ static int secref_whitelist(const char *modname, const char *tosec,
if (strcmp(refsymname, *s) == 0)
return 1;
+ /* Check for pattern 5 */
+ if (strrcmp(tosec, ".text") == 0)
+ len = strlen(tosec) - strlen(".text");
+ else
+ len = strlen(tosec);
+ if ((strncmp(tosec, fromsec, len) == 0) && (strlen(fromsec) > len) &&
+ (strcmp(fromsec + len, ".literal") == 0))
+ return 1;
+
return 0;
}
@@ -822,9 +872,9 @@ static void warn_sec_mismatch(const char *modname, const char *fromsec,
refsymname = elf->strtab + refsym->st_name;
/* check whitelist - we may ignore it */
- if (before &&
- secref_whitelist(modname, secname, fromsec,
- elf->strtab + before->st_name, refsymname))
+ if (secref_whitelist(modname, secname, fromsec,
+ before ? elf->strtab + before->st_name : "",
+ refsymname))
return;
if (before && after) {
@@ -1077,6 +1127,8 @@ static int initexit_section_ref_ok(const char *name)
".smp_locks",
".stab",
".m68k_fixup",
+ ".xt.prop", /* xtensa informational section */
+ ".xt.lit", /* xtensa informational section */
NULL
};
/* Start of section names */
@@ -1106,21 +1158,6 @@ static int initexit_section_ref_ok(const char *name)
return 0;
}
-/**
- * Functions used only during module init is marked __init and is stored in
- * a .init.text section. Likewise data is marked __initdata and stored in
- * a .init.data section.
- * If this section is one of these sections return 1
- * See include/linux/init.h for the details
- **/
-static int init_section(const char *name)
-{
- if (strcmp(name, ".init") == 0)
- return 1;
- if (strncmp(name, ".init.", strlen(".init.")) == 0)
- return 1;
- return 0;
-}
/*
* Identify sections from which references to a .init section is OK.
@@ -1178,23 +1215,6 @@ static int init_section_ref_ok(const char *name)
}
/*
- * Functions used only during module exit is marked __exit and is stored in
- * a .exit.text section. Likewise data is marked __exitdata and stored in
- * a .exit.data section.
- * If this section is one of these sections return 1
- * See include/linux/init.h for the details
- **/
-static int exit_section(const char *name)
-{
- if (strcmp(name, ".exit.text") == 0)
- return 1;
- if (strcmp(name, ".exit.data") == 0)
- return 1;
- return 0;
-
-}
-
-/*
* Identify sections from which references to a .exit section is OK.
*/
static int exit_section_ref_ok(const char *name)
@@ -1257,8 +1277,10 @@ static void read_symbols(char *modname)
handle_modversions(mod, &info, sym, symname);
handle_moddevtable(mod, &info, sym, symname);
}
- check_sec_ref(mod, modname, &info, init_section, init_section_ref_ok);
- check_sec_ref(mod, modname, &info, exit_section, exit_section_ref_ok);
+ if (is_vmlinux(modname) && vmlinux_section_warnings) {
+ check_sec_ref(mod, modname, &info, init_section, init_section_ref_ok);
+ check_sec_ref(mod, modname, &info, exit_section, exit_section_ref_ok);
+ }
version = get_modinfo(info.modinfo, info.modinfo_len, "version");
if (version)
@@ -1626,7 +1648,7 @@ int main(int argc, char **argv)
int opt;
int err;
- while ((opt = getopt(argc, argv, "i:I:mo:aw")) != -1) {
+ while ((opt = getopt(argc, argv, "i:I:mso:aw")) != -1) {
switch(opt) {
case 'i':
kernel_read = optarg;
@@ -1644,6 +1666,9 @@ int main(int argc, char **argv)
case 'a':
all_versions = 1;
break;
+ case 's':
+ vmlinux_section_warnings = 0;
+ break;
case 'w':
warn_unresolved = 1;
break;
diff --git a/scripts/ver_linux b/scripts/ver_linux
index 72876dfadc8..8f8df93141a 100755
--- a/scripts/ver_linux
+++ b/scripts/ver_linux
@@ -66,8 +66,8 @@ showmount --version 2>&1 | grep nfs-utils | awk \
'NR==1{print "nfs-utils ", $NF}'
ls -l `ldd /bin/sh | awk '/libc/{print $3}'` | sed \
--e 's/\.so$//' | awk -F'[.-]' '{print "Linux C Library " \
-$(NF-2)"."$(NF-1)"."$NF}'
+-e 's/\.so$//' | sed -e 's/>//' | \
+awk -F'[.-]' '{print "Linux C Library "$(NF-1)"."$NF}'
ldd -v > /dev/null 2>&1 && ldd -v || ldd --version |head -n 1 | awk \
'NR==1{print "Dynamic linker (ldd) ", $NF}'
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index b5f017f07a7..0ae032f3876 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -2417,8 +2417,10 @@ static void security_netlbl_cache_add(struct netlbl_lsm_secattr *secattr,
cache->type = NETLBL_CACHE_T_MLS;
if (ebitmap_cpy(&cache->data.mls_label.level[0].cat,
- &ctx->range.level[0].cat) != 0)
+ &ctx->range.level[0].cat) != 0) {
+ kfree(cache);
return;
+ }
cache->data.mls_label.level[1].cat.highbit =
cache->data.mls_label.level[0].cat.highbit;
cache->data.mls_label.level[1].cat.node =
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index bd8d1ef40a9..ba715f40b65 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -216,7 +216,7 @@ static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp,
return -ENOMEM;
*ctxp = ctx = kmalloc(sizeof(*ctx) +
- uctx->ctx_len,
+ uctx->ctx_len + 1,
GFP_KERNEL);
if (!ctx)
@@ -229,6 +229,7 @@ static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp,
memcpy(ctx->ctx_str,
uctx+1,
ctx->ctx_len);
+ ctx->ctx_str[ctx->ctx_len] = 0;
rc = security_context_to_sid(ctx->ctx_str,
ctx->ctx_len,
&ctx->ctx_sid);
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index 129d851b315..dd14abcdf1b 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -160,9 +160,9 @@ static void pxa2xx_ac97_cold_reset(struct snd_ac97 *ac97)
gsr_bits = 0;
#ifdef CONFIG_PXA27x
/* PXA27x Developers Manual section 13.5.2.2.1 */
- pxa_set_cken(1 << 31, 1);
+ pxa_set_cken(31, 1);
udelay(5);
- pxa_set_cken(1 << 31, 0);
+ pxa_set_cken(31, 0);
GCR = GCR_COLD_RST;
udelay(50);
#else