aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-doc-nano-HOWTO.txt7
-rw-r--r--Documentation/x86/boot.txt13
-rw-r--r--MAINTAINERS12
-rw-r--r--arch/arm/kernel/machine_kexec.c4
-rw-r--r--arch/arm/mach-pxa/dma.c18
-rw-r--r--arch/arm/mach-pxa/include/mach/regs-ac97.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/regs-ssp.h3
-rw-r--r--arch/arm/mach-pxa/pxa300.c4
-rw-r--r--arch/arm/mach-pxa/pxa320.c2
-rw-r--r--arch/frv/mm/dma-alloc.c2
-rw-r--r--arch/mips/include/asm/spinlock.h1
-rw-r--r--arch/powerpc/boot/dts/mpc8313erdb.dts11
-rw-r--r--arch/powerpc/configs/83xx/mpc8313_rdb_defconfig2
-rw-r--r--arch/powerpc/kernel/ftrace.c5
-rw-r--r--arch/powerpc/kernel/pci-common.c17
-rw-r--r--arch/powerpc/lib/sstep.c2
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c4
-rw-r--r--arch/powerpc/mm/hash_low_32.S2
-rw-r--r--arch/powerpc/mm/pgtable_32.c4
-rw-r--r--arch/powerpc/oprofile/cell/spu_profiler.c1
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c1
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c2
-rw-r--r--arch/powerpc/sysdev/ipic.c2
-rw-r--r--arch/s390/defconfig87
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/sparc/kernel/head_64.S31
-rw-r--r--arch/sparc/kernel/nmi.c1
-rw-r--r--arch/sparc/kernel/pcr.c7
-rw-r--r--arch/sparc/lib/GENbzero.S6
-rw-r--r--arch/sparc/lib/GENcopy_from_user.S8
-rw-r--r--arch/sparc/lib/GENcopy_to_user.S8
-rw-r--r--arch/sparc/lib/NG2copy_from_user.S9
-rw-r--r--arch/sparc/lib/NG2copy_to_user.S9
-rw-r--r--arch/sparc/lib/NGbzero.S6
-rw-r--r--arch/sparc/lib/NGcopy_from_user.S9
-rw-r--r--arch/sparc/lib/NGcopy_to_user.S9
-rw-r--r--arch/sparc/lib/U1copy_from_user.S8
-rw-r--r--arch/sparc/lib/U1copy_to_user.S8
-rw-r--r--arch/sparc/lib/U3copy_from_user.S6
-rw-r--r--arch/sparc/lib/U3copy_to_user.S8
-rw-r--r--arch/sparc/lib/bzero.S6
-rw-r--r--arch/sparc/lib/copy_in_user.S61
-rw-r--r--arch/x86/Kconfig602
-rw-r--r--arch/x86/Kconfig.cpu70
-rw-r--r--arch/x86/Kconfig.debug49
-rw-r--r--arch/x86/Makefile26
-rw-r--r--arch/x86/configs/i386_defconfig1
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/include/asm/apic.h74
-rw-r--r--arch/x86/include/asm/apm.h (renamed from arch/x86/include/asm/mach-default/apm.h)0
-rw-r--r--arch/x86/include/asm/bigsmp/apic.h155
-rw-r--r--arch/x86/include/asm/bigsmp/apicdef.h13
-rw-r--r--arch/x86/include/asm/bigsmp/ipi.h22
-rw-r--r--arch/x86/include/asm/cpu.h6
-rw-r--r--arch/x86/include/asm/do_timer.h (renamed from arch/x86/include/asm/mach-default/do_timer.h)0
-rw-r--r--arch/x86/include/asm/entry_arch.h (renamed from arch/x86/include/asm/mach-default/entry_arch.h)7
-rw-r--r--arch/x86/include/asm/es7000/apic.h242
-rw-r--r--arch/x86/include/asm/es7000/apicdef.h13
-rw-r--r--arch/x86/include/asm/es7000/ipi.h22
-rw-r--r--arch/x86/include/asm/es7000/mpparse.h29
-rw-r--r--arch/x86/include/asm/es7000/wakecpu.h37
-rw-r--r--arch/x86/include/asm/genapic.h262
-rw-r--r--arch/x86/include/asm/genapic_32.h141
-rw-r--r--arch/x86/include/asm/genapic_64.h60
-rw-r--r--arch/x86/include/asm/hw_irq.h24
-rw-r--r--arch/x86/include/asm/io.h2
-rw-r--r--arch/x86/include/asm/io_apic.h15
-rw-r--r--arch/x86/include/asm/ipi.h77
-rw-r--r--arch/x86/include/asm/irq.h4
-rw-r--r--arch/x86/include/asm/irq_vectors.h218
-rw-r--r--arch/x86/include/asm/mach-default/mach_apic.h168
-rw-r--r--arch/x86/include/asm/mach-default/mach_apicdef.h24
-rw-r--r--arch/x86/include/asm/mach-default/mach_ipi.h64
-rw-r--r--arch/x86/include/asm/mach-default/mach_mpparse.h17
-rw-r--r--arch/x86/include/asm/mach-default/mach_mpspec.h12
-rw-r--r--arch/x86/include/asm/mach-default/mach_wakecpu.h41
-rw-r--r--arch/x86/include/asm/mach-generic/gpio.h15
-rw-r--r--arch/x86/include/asm/mach-generic/mach_apic.h35
-rw-r--r--arch/x86/include/asm/mach-generic/mach_apicdef.h11
-rw-r--r--arch/x86/include/asm/mach-generic/mach_ipi.h10
-rw-r--r--arch/x86/include/asm/mach-generic/mach_mpparse.h9
-rw-r--r--arch/x86/include/asm/mach-generic/mach_mpspec.h12
-rw-r--r--arch/x86/include/asm/mach-generic/mach_wakecpu.h12
-rw-r--r--arch/x86/include/asm/mach-rdc321x/gpio.h60
-rw-r--r--arch/x86/include/asm/mach_timer.h (renamed from arch/x86/include/asm/mach-default/mach_timer.h)0
-rw-r--r--arch/x86/include/asm/mach_traps.h (renamed from arch/x86/include/asm/mach-default/mach_traps.h)0
-rw-r--r--arch/x86/include/asm/mpspec.h35
-rw-r--r--arch/x86/include/asm/numaq.h2
-rw-r--r--arch/x86/include/asm/numaq/apic.h142
-rw-r--r--arch/x86/include/asm/numaq/apicdef.h14
-rw-r--r--arch/x86/include/asm/numaq/ipi.h22
-rw-r--r--arch/x86/include/asm/numaq/mpparse.h6
-rw-r--r--arch/x86/include/asm/numaq/wakecpu.h45
-rw-r--r--arch/x86/include/asm/paravirt.h1
-rw-r--r--arch/x86/include/asm/pat.h4
-rw-r--r--arch/x86/include/asm/pci-functions.h (renamed from arch/x86/include/asm/mach-default/pci-functions.h)0
-rw-r--r--arch/x86/include/asm/prctl.h4
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/proto.h4
-rw-r--r--arch/x86/include/asm/rdc321x_defs.h (renamed from arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h)0
-rw-r--r--arch/x86/include/asm/setup.h8
-rw-r--r--arch/x86/include/asm/setup_arch.h (renamed from arch/x86/include/asm/mach-default/setup_arch.h)0
-rw-r--r--arch/x86/include/asm/smp.h19
-rw-r--r--arch/x86/include/asm/smpboot_hooks.h (renamed from arch/x86/include/asm/mach-default/smpboot_hooks.h)6
-rw-r--r--arch/x86/include/asm/spinlock.h4
-rw-r--r--arch/x86/include/asm/summit/apic.h202
-rw-r--r--arch/x86/include/asm/summit/apicdef.h13
-rw-r--r--arch/x86/include/asm/summit/ipi.h26
-rw-r--r--arch/x86/include/asm/summit/mpparse.h109
-rw-r--r--arch/x86/include/asm/system.h6
-rw-r--r--arch/x86/include/asm/voyager.h42
-rw-r--r--arch/x86/include/asm/xen/events.h6
-rw-r--r--arch/x86/kernel/Makefile13
-rw-r--r--arch/x86/kernel/acpi/boot.c43
-rw-r--r--arch/x86/kernel/apic.c120
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/bigsmp_32.c266
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c54
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/common.c93
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c28
-rw-r--r--arch/x86/kernel/cpu/intel.c14
-rw-r--r--arch/x86/kernel/crash.c2
-rw-r--r--arch/x86/kernel/entry_32.S2
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/es7000_32.c468
-rw-r--r--arch/x86/kernel/ftrace.c17
-rw-r--r--arch/x86/kernel/genapic_64.c22
-rw-r--r--arch/x86/kernel/genapic_flat_64.c176
-rw-r--r--arch/x86/kernel/genx2apic_cluster.c133
-rw-r--r--arch/x86/kernel/genx2apic_phys.c125
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c111
-rw-r--r--arch/x86/kernel/head_32.S13
-rw-r--r--arch/x86/kernel/hpet.c12
-rw-r--r--arch/x86/kernel/i8237.c17
-rw-r--r--arch/x86/kernel/io_apic.c272
-rw-r--r--arch/x86/kernel/ipi.c176
-rw-r--r--arch/x86/kernel/irq.c38
-rw-r--r--arch/x86/kernel/irq_32.c31
-rw-r--r--arch/x86/kernel/irq_64.c34
-rw-r--r--arch/x86/kernel/irqinit_32.c12
-rw-r--r--arch/x86/kernel/kgdb.c4
-rw-r--r--arch/x86/kernel/mpparse.c36
-rw-r--r--arch/x86/kernel/nmi.c2
-rw-r--r--arch/x86/kernel/numaq_32.c307
-rw-r--r--arch/x86/kernel/probe_32.c411
-rw-r--r--arch/x86/kernel/probe_roms_32.c2
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/reboot.c4
-rw-r--r--arch/x86/kernel/setup.c30
-rw-r--r--arch/x86/kernel/smp.c15
-rw-r--r--arch/x86/kernel/smpboot.c43
-rw-r--r--arch/x86/kernel/stacktrace.c2
-rw-r--r--arch/x86/kernel/summit_32.c416
-rw-r--r--arch/x86/kernel/time_32.c2
-rw-r--r--arch/x86/kernel/tlb_uv.c2
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/visws_quirks.c6
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/mach-default/Makefile5
-rw-r--r--arch/x86/mach-default/setup.c174
-rw-r--r--arch/x86/mach-generic/Makefile11
-rw-r--r--arch/x86/mach-generic/bigsmp.c60
-rw-r--r--arch/x86/mach-generic/default.c27
-rw-r--r--arch/x86/mach-generic/es7000.c103
-rw-r--r--arch/x86/mach-generic/numaq.c53
-rw-r--r--arch/x86/mach-generic/probe.c152
-rw-r--r--arch/x86/mach-generic/summit.c40
-rw-r--r--arch/x86/mach-rdc321x/Makefile5
-rw-r--r--arch/x86/mach-rdc321x/gpio.c194
-rw-r--r--arch/x86/mach-rdc321x/platform.c69
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/fault.c1
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/mmap.c2
-rw-r--r--arch/x86/mm/pat.c31
-rw-r--r--arch/x86/mm/tlb.c4
-rw-r--r--arch/x86/pci/numaq_32.c6
-rw-r--r--arch/x86/pci/pcbios.c2
-rw-r--r--arch/x86/xen/irq.c17
-rw-r--r--crypto/algapi.c6
-rw-r--r--crypto/api.c20
-rw-r--r--crypto/scatterwalk.c3
-rw-r--r--crypto/shash.c7
-rw-r--r--drivers/atm/solos-pci.c1
-rw-r--r--drivers/block/nbd.c9
-rw-r--r--drivers/char/tpm/tpm_infineon.c4
-rw-r--r--drivers/clocksource/acpi_pm.c2
-rw-r--r--drivers/clocksource/cyclone.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c47
-rw-r--r--drivers/eisa/Kconfig6
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/drm_irq.c1
-rw-r--r--drivers/gpu/drm/drm_memory.c7
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c47
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c83
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c91
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c20
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c49
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c870
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h404
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c6
-rw-r--r--drivers/input/keyboard/Kconfig4
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/net/3c509.c1
-rw-r--r--drivers/net/gianfar.c6
-rw-r--r--drivers/net/netxen/netxen_nic.h2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c13
-rw-r--r--drivers/net/r8169.c93
-rw-r--r--drivers/net/sungem.c8
-rw-r--r--drivers/net/sunhme.c12
-rw-r--r--drivers/net/tulip/de2104x.c3
-rw-r--r--drivers/net/tun.c10
-rw-r--r--drivers/parport/parport_serial.c5
-rw-r--r--drivers/power/pcf50633-charger.c3
-rw-r--r--drivers/rtc/rtc-au1xxx.c2
-rw-r--r--drivers/rtc/rtc-pxa.c2
-rw-r--r--drivers/s390/block/dasd.c46
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/staging/android/Kconfig1
-rw-r--r--drivers/staging/android/ram_console.c14
-rw-r--r--drivers/staging/android/timed_gpio.c2
-rw-r--r--drivers/staging/at76_usb/Kconfig2
-rw-r--r--drivers/staging/at76_usb/at76_usb.c4620
-rw-r--r--drivers/staging/at76_usb/at76_usb.h227
-rw-r--r--drivers/staging/panel/panel.c10
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c46
-rw-r--r--drivers/usb/serial/aircable.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/ftdi_sio.h3
-rw-r--r--drivers/usb/serial/option.c93
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c10
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.h2
-rw-r--r--drivers/usb/storage/scsiglue.c2
-rw-r--r--drivers/usb/storage/transport.c36
-rw-r--r--drivers/usb/storage/unusual_devs.h17
-rw-r--r--drivers/video/Kconfig6
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c2
-rw-r--r--drivers/video/geode/gx1fb_core.c17
-rw-r--r--drivers/video/geode/gxfb_core.c17
-rw-r--r--drivers/video/geode/lxfb_core.c17
-rw-r--r--drivers/w1/slaves/w1_therm.c2
-rw-r--r--drivers/watchdog/rdc321x_wdt.c2
-rw-r--r--drivers/xen/events.c227
-rw-r--r--fs/btrfs/ctree.c3
-rw-r--r--fs/btrfs/locking.c22
-rw-r--r--fs/btrfs/locking.h2
-rw-r--r--fs/ext2/super.c9
-rw-r--r--fs/ext3/super.c11
-rw-r--r--fs/hugetlbfs/inode.c8
-rw-r--r--fs/jbd/journal.c17
-rw-r--r--fs/lockd/svclock.c6
-rw-r--r--include/crypto/hash.h2
-rw-r--r--include/drm/i915_drm.h2
-rw-r--r--include/linux/cgroup.h1
-rw-r--r--include/linux/crypto.h7
-rw-r--r--include/linux/dmaengine.h12
-rw-r--r--include/linux/hugetlb.h11
-rw-r--r--include/linux/init_task.h11
-rw-r--r--include/linux/mm.h3
-rw-r--r--include/linux/sched.h64
-rw-r--r--include/linux/smp.h6
-rw-r--r--include/linux/spinlock.h5
-rw-r--r--include/linux/syscalls.h28
-rw-r--r--init/main.c12
-rw-r--r--ipc/shm.c8
-rw-r--r--kernel/cgroup.c3
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/itimer.c4
-rw-r--r--kernel/posix-cpu-timers.c117
-rw-r--r--kernel/profile.c3
-rw-r--r--kernel/sched.c27
-rw-r--r--kernel/sched_fair.c11
-rw-r--r--kernel/sched_stats.h45
-rw-r--r--kernel/signal.c8
-rw-r--r--kernel/sysctl.c5
-rw-r--r--mm/fremap.c2
-rw-r--r--mm/hugetlb.c28
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mlock.c5
-rw-r--r--mm/mmap.c48
-rw-r--r--mm/mprotect.c5
-rw-r--r--mm/page-writeback.c33
-rw-r--r--mm/page_cgroup.c3
-rw-r--r--mm/rmap.c3
-rw-r--r--mm/slab.c1
-rw-r--r--mm/slob.c1
-rw-r--r--mm/slub.c1
-rw-r--r--net/9p/protocol.c22
-rw-r--r--net/bridge/br_forward.c7
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/ip6_flowlabel.c8
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c25
-rw-r--r--net/netfilter/nf_conntrack_netlink.c15
-rw-r--r--net/netfilter/xt_sctp.c2
-rw-r--r--net/rxrpc/af_rxrpc.c5
-rwxr-xr-xscripts/kernel-doc40
-rw-r--r--sound/arm/aaci.c6
-rw-r--r--sound/drivers/Kconfig2
315 files changed, 10336 insertions, 6109 deletions
diff --git a/Documentation/kernel-doc-nano-HOWTO.txt b/Documentation/kernel-doc-nano-HOWTO.txt
index d73fbd2b2b4..026ec7d5738 100644
--- a/Documentation/kernel-doc-nano-HOWTO.txt
+++ b/Documentation/kernel-doc-nano-HOWTO.txt
@@ -43,7 +43,8 @@ Only comments so marked will be considered by the kernel-doc scripts,
and any comment so marked must be in kernel-doc format. Do not use
"/**" to be begin a comment block unless the comment block contains
kernel-doc formatted comments. The closing comment marker for
-kernel-doc comments can be either "*/" or "**/".
+kernel-doc comments can be either "*/" or "**/", but "*/" is
+preferred in the Linux kernel tree.
Kernel-doc comments should be placed just before the function
or data structure being described.
@@ -63,7 +64,7 @@ Example kernel-doc function comment:
* comment lines.
*
* The longer description can have multiple paragraphs.
- **/
+ */
The first line, with the short description, must be on a single line.
@@ -85,7 +86,7 @@ Example kernel-doc data structure comment.
* perhaps with more lines and words.
*
* Longer description of this structure.
- **/
+ */
The kernel-doc function comments describe each parameter to the
function, in order, with the @name lines.
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index 7b4596ac412..12299697b7c 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -158,7 +158,7 @@ Offset Proto Name Meaning
0202/4 2.00+ header Magic signature "HdrS"
0206/2 2.00+ version Boot protocol version supported
0208/4 2.00+ realmode_swtch Boot loader hook (see below)
-020C/2 2.00+ start_sys The load-low segment (0x1000) (obsolete)
+020C/2 2.00+ start_sys_seg The load-low segment (0x1000) (obsolete)
020E/2 2.00+ kernel_version Pointer to kernel version string
0210/1 2.00+ type_of_loader Boot loader identifier
0211/1 2.00+ loadflags Boot protocol option flags
@@ -170,10 +170,11 @@ Offset Proto Name Meaning
0224/2 2.01+ heap_end_ptr Free memory after setup end
0226/2 N/A pad1 Unused
0228/4 2.02+ cmd_line_ptr 32-bit pointer to the kernel command line
-022C/4 2.03+ initrd_addr_max Highest legal initrd address
+022C/4 2.03+ ramdisk_max Highest legal initrd address
0230/4 2.05+ kernel_alignment Physical addr alignment required for kernel
0234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not
-0235/3 N/A pad2 Unused
+0235/1 N/A pad2 Unused
+0236/2 N/A pad3 Unused
0238/4 2.06+ cmdline_size Maximum size of the kernel command line
023C/4 2.07+ hardware_subarch Hardware subarchitecture
0240/8 2.07+ hardware_subarch_data Subarchitecture-specific data
@@ -299,14 +300,14 @@ Protocol: 2.00+
e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version
10.17.
-Field name: readmode_swtch
+Field name: realmode_swtch
Type: modify (optional)
Offset/size: 0x208/4
Protocol: 2.00+
Boot loader hook (see ADVANCED BOOT LOADER HOOKS below.)
-Field name: start_sys
+Field name: start_sys_seg
Type: read
Offset/size: 0x20c/2
Protocol: 2.00+
@@ -468,7 +469,7 @@ Protocol: 2.02+
zero, the kernel will assume that your boot loader does not support
the 2.02+ protocol.
-Field name: initrd_addr_max
+Field name: ramdisk_max
Type: read
Offset/size: 0x22c/4
Protocol: 2.03+
diff --git a/MAINTAINERS b/MAINTAINERS
index 0ea3a6d9871..db65b4e6d13 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1202,6 +1202,8 @@ S: Supported
CONTROL GROUPS (CGROUPS)
P: Paul Menage
M: menage@google.com
+P: Li Zefan
+M: lizf@cn.fujitsu.com
L: containers@lists.linux-foundation.org
S: Maintained
@@ -3537,6 +3539,12 @@ S: Maintained
PXA MMCI DRIVER
S: Orphan
+PXA RTC DRIVER
+P: Robert Jarzmik
+M: robert.jarzmik@free.fr
+L: rtc-linux@googlegroups.com
+S: Maintained
+
QLOGIC QLA2XXX FC-SCSI DRIVER
P: Andrew Vasquez
M: linux-driver@qlogic.com
@@ -4285,8 +4293,8 @@ P: Rajiv Andrade
M: srajiv@linux.vnet.ibm.com
W: http://tpmdd.sourceforge.net
P: Marcel Selhorst
-M: tpm@selhorst.net
-W: http://www.prosec.rub.de/tpm/
+M: m.selhorst@sirrix.com
+W: http://www.sirrix.com
L: tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers)
S: Maintained
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 440dc62cdc3..598ca61e7bc 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -13,8 +13,8 @@
#include <asm/cacheflush.h>
#include <asm/mach-types.h>
-const extern unsigned char relocate_new_kernel[];
-const extern unsigned int relocate_new_kernel_size;
+extern const unsigned char relocate_new_kernel[];
+extern const unsigned int relocate_new_kernel_size;
extern void setup_mm_for_reboot(char mode);
diff --git a/arch/arm/mach-pxa/dma.c b/arch/arm/mach-pxa/dma.c
index b1514fb20d3..7de17fc5d54 100644
--- a/arch/arm/mach-pxa/dma.c
+++ b/arch/arm/mach-pxa/dma.c
@@ -121,20 +121,22 @@ int __init pxa_init_dma(int num_ch)
if (dma_channels == NULL)
return -ENOMEM;
- ret = request_irq(IRQ_DMA, dma_irq_handler, IRQF_DISABLED, "DMA", NULL);
- if (ret) {
- printk (KERN_CRIT "Wow! Can't register IRQ for DMA\n");
- kfree(dma_channels);
- return ret;
- }
-
/* dma channel priorities on pxa2xx processors:
* ch 0 - 3, 16 - 19 <--> (0) DMA_PRIO_HIGH
* ch 4 - 7, 20 - 23 <--> (1) DMA_PRIO_MEDIUM
* ch 8 - 15, 24 - 31 <--> (2) DMA_PRIO_LOW
*/
- for (i = 0; i < num_ch; i++)
+ for (i = 0; i < num_ch; i++) {
+ DCSR(i) = 0;
dma_channels[i].prio = min((i & 0xf) >> 2, DMA_PRIO_LOW);
+ }
+
+ ret = request_irq(IRQ_DMA, dma_irq_handler, IRQF_DISABLED, "DMA", NULL);
+ if (ret) {
+ printk (KERN_CRIT "Wow! Can't register IRQ for DMA\n");
+ kfree(dma_channels);
+ return ret;
+ }
num_dma_channels = num_ch;
return 0;
diff --git a/arch/arm/mach-pxa/include/mach/regs-ac97.h b/arch/arm/mach-pxa/include/mach/regs-ac97.h
index e41b9d202b8..b8d14bd9ae5 100644
--- a/arch/arm/mach-pxa/include/mach/regs-ac97.h
+++ b/arch/arm/mach-pxa/include/mach/regs-ac97.h
@@ -1,6 +1,8 @@
#ifndef __ASM_ARCH_REGS_AC97_H
#define __ASM_ARCH_REGS_AC97_H
+#include <mach/hardware.h>
+
/*
* AC97 Controller registers
*/
diff --git a/arch/arm/mach-pxa/include/mach/regs-ssp.h b/arch/arm/mach-pxa/include/mach/regs-ssp.h
index 3c04cde2cf1..cf31986f6f0 100644
--- a/arch/arm/mach-pxa/include/mach/regs-ssp.h
+++ b/arch/arm/mach-pxa/include/mach/regs-ssp.h
@@ -41,6 +41,9 @@
#elif defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx)
#define SSCR0_SCR (0x000fff00) /* Serial Clock Rate (mask) */
#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
+#endif
+
+#if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx)
#define SSCR0_EDSS (1 << 20) /* Extended data size select */
#define SSCR0_NCS (1 << 21) /* Network clock select */
#define SSCR0_RIM (1 << 22) /* Receive FIFO overrrun interrupt mask */
diff --git a/arch/arm/mach-pxa/pxa300.c b/arch/arm/mach-pxa/pxa300.c
index f735e58e666..83fb609b6eb 100644
--- a/arch/arm/mach-pxa/pxa300.c
+++ b/arch/arm/mach-pxa/pxa300.c
@@ -88,13 +88,13 @@ static struct pxa3xx_mfp_addr_map pxa310_mfp_addr_map[] __initdata = {
static DEFINE_PXA3_CKEN(common_nand, NAND, 156000000, 0);
static struct clk_lookup common_clkregs[] = {
- INIT_CLKREG(&clk_common_nand, "pxa3xx-nand", "NANDCLK"),
+ INIT_CLKREG(&clk_common_nand, "pxa3xx-nand", NULL),
};
static DEFINE_PXA3_CKEN(pxa310_mmc3, MMC3, 19500000, 0);
static struct clk_lookup pxa310_clkregs[] = {
- INIT_CLKREG(&clk_pxa310_mmc3, "pxa2xx-mci.2", "MMCCLK"),
+ INIT_CLKREG(&clk_pxa310_mmc3, "pxa2xx-mci.2", NULL),
};
static int __init pxa300_init(void)
diff --git a/arch/arm/mach-pxa/pxa320.c b/arch/arm/mach-pxa/pxa320.c
index effe408c186..36f066196fa 100644
--- a/arch/arm/mach-pxa/pxa320.c
+++ b/arch/arm/mach-pxa/pxa320.c
@@ -83,7 +83,7 @@ static struct pxa3xx_mfp_addr_map pxa320_mfp_addr_map[] __initdata = {
static DEFINE_PXA3_CKEN(pxa320_nand, NAND, 104000000, 0);
static struct clk_lookup pxa320_clkregs[] = {
- INIT_CLKREG(&clk_pxa320_nand, "pxa3xx-nand", "NANDCLK"),
+ INIT_CLKREG(&clk_pxa320_nand, "pxa3xx-nand", NULL),
};
static int __init pxa320_init(void)
diff --git a/arch/frv/mm/dma-alloc.c b/arch/frv/mm/dma-alloc.c
index dc6522c464d..44840e73e90 100644
--- a/arch/frv/mm/dma-alloc.c
+++ b/arch/frv/mm/dma-alloc.c
@@ -36,10 +36,10 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/hardirq.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
-#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 1a1f320c30d..0884947ebe2 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -51,6 +51,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
return (((counters >> 14) - counters) & 0x1fff) > 1;
}
+#define __raw_spin_is_contended __raw_spin_is_contended
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
diff --git a/arch/powerpc/boot/dts/mpc8313erdb.dts b/arch/powerpc/boot/dts/mpc8313erdb.dts
index 909a89cab9a..3ebf7ec0484 100644
--- a/arch/powerpc/boot/dts/mpc8313erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8313erdb.dts
@@ -191,7 +191,8 @@
interrupts = <37 0x8 36 0x8 35 0x8>;
interrupt-parent = <&ipic>;
tbi-handle = < &tbi0 >;
- phy-handle = < &phy1 >;
+ /* Vitesse 7385 isn't on the MDIO bus */
+ fixed-link = <1 1 1000 0 0>;
fsl,magic-packet;
mdio@24520 {
@@ -199,12 +200,6 @@
#size-cells = <0>;
compatible = "fsl,gianfar-mdio";
reg = <0x24520 0x20>;
- phy1: ethernet-phy@1 {
- interrupt-parent = <&ipic>;
- interrupts = <19 0x8>;
- reg = <0x1>;
- device_type = "ethernet-phy";
- };
phy4: ethernet-phy@4 {
interrupt-parent = <&ipic>;
interrupts = <20 0x8>;
@@ -219,6 +214,8 @@
};
enet1: ethernet@25000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
cell-index = <1>;
device_type = "network";
model = "eTSEC";
diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
index 9e47ae957e2..409d017621a 100644
--- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
@@ -651,7 +651,7 @@ CONFIG_CICADA_PHY=y
# CONFIG_NATIONAL_PHY is not set
# CONFIG_STE10XP is not set
# CONFIG_LSI_ET1011C_PHY is not set
-# CONFIG_FIXED_PHY is not set
+CONFIG_FIXED_PHY=y
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 5355244c99f..60c60ccf5e3 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -195,8 +195,9 @@ __ftrace_make_nop(struct module *mod,
return -EINVAL;
}
- offset = (unsigned)((unsigned short)jmp[0]) << 16 |
- (unsigned)((unsigned short)jmp[1]);
+ /* The bottom half is signed extended */
+ offset = ((unsigned)((unsigned short)jmp[0]) << 16) +
+ (int)((short)jmp[1]);
DEBUGP(" %x ", offset);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 19b12d2cbb4..0f418127231 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -561,8 +561,21 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus,
(unsigned long long)(offset + size - 1));
if (mmap_state == pci_mmap_mem) {
- if ((offset + size) > hose->isa_mem_size)
- return -ENXIO;
+ /* Hack alert !
+ *
+ * Because X is lame and can fail starting if it gets an error trying
+ * to mmap legacy_mem (instead of just moving on without legacy memory
+ * access) we fake it here by giving it anonymous memory, effectively
+ * behaving just like /dev/zero
+ */
+ if ((offset + size) > hose->isa_mem_size) {
+ printk(KERN_DEBUG
+ "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
+ current->comm, current->pid, pci_domain_nr(bus), bus->number);
+ if (vma->vm_flags & VM_SHARED)
+ return shmem_zero_setup(vma);
+ return 0;
+ }
offset += hose->isa_mem_phys;
} else {
unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 4aae0c38764..13b7d54f185 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -172,6 +172,8 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
}
break;
case 0x378: /* orx */
+ if (instr & 1)
+ break;
rs = (instr >> 21) & 0x1f;
rb = (instr >> 11) & 0x1f;
if (rs == rb) { /* mr */
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 1971e4ee3d6..ea6e41e39d9 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -73,7 +73,7 @@ extern unsigned int tlbcam_index;
/*
* Return PA for this VA if it is mapped by a CAM, or 0
*/
-unsigned long v_mapped_by_tlbcam(unsigned long va)
+phys_addr_t v_mapped_by_tlbcam(unsigned long va)
{
int b;
for (b = 0; b < tlbcam_index; ++b)
@@ -85,7 +85,7 @@ unsigned long v_mapped_by_tlbcam(unsigned long va)
/*
* Return VA for a given PA or 0 if not mapped
*/
-unsigned long p_mapped_by_tlbcam(unsigned long pa)
+unsigned long p_mapped_by_tlbcam(phys_addr_t pa)
{
int b;
for (b = 0; b < tlbcam_index; ++b)
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index 67850ec9feb..14af8cedab7 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -320,7 +320,7 @@ _GLOBAL(create_hpte)
and r8,r8,r0 /* writable if _RW & _DIRTY */
rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
- ori r8,r8,0xe14 /* clear out reserved bits and M */
+ ori r8,r8,0xe04 /* clear out reserved bits */
andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */
BEGIN_FTR_SECTION
rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 22972cd83cc..58bcaeba728 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -61,8 +61,8 @@ void setbat(int index, unsigned long virt, phys_addr_t phys,
#ifdef HAVE_TLBCAM
extern unsigned int tlbcam_index;
-extern unsigned long v_mapped_by_tlbcam(unsigned long va);
-extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
+extern phys_addr_t v_mapped_by_tlbcam(unsigned long va);
+extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa);
#else /* !HAVE_TLBCAM */
#define v_mapped_by_tlbcam(x) (0UL)
#define p_mapped_by_tlbcam(x) (0UL)
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c
index 9305ddaac51..b129d007e7f 100644
--- a/arch/powerpc/oprofile/cell/spu_profiler.c
+++ b/arch/powerpc/oprofile/cell/spu_profiler.c
@@ -16,6 +16,7 @@
#include <linux/smp.h>
#include <linux/slab.h>
#include <asm/cell-pmu.h>
+#include <asm/time.h>
#include "pr_util.h"
#define SCALE_SHIFT 14
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 9876d7e072f..ddf0bdc0fc8 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -186,7 +186,7 @@ out_unmap_regs:
iounmap(priv->regs);
out_free_bootmem:
free_bootmem((unsigned long)priv,
- sizeof(sizeof(struct pq2ads_pci_pic)));
+ sizeof(struct pq2ads_pci_pic));
of_node_put(np);
out_unmap_irq:
irq_dispose_mapping(irq);
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index a623ad256e9..9b21ee68ea5 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -14,6 +14,7 @@
#include <asm/firmware.h>
#include <asm/machdep.h>
#include <asm/pSeries_reconfig.h>
+#include <asm/sparsemem.h>
static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size)
{
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index b16ca3ed65d..78f1f7cca0a 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -165,7 +165,7 @@ static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type)
edibit = (14 - (src - CPM2_IRQ_EXT1));
else
if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0)
- edibit = (31 - (src - CPM2_IRQ_PORTC15));
+ edibit = (31 - (CPM2_IRQ_PORTC0 - src));
else
return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL;
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 88a983ece5c..9a89cd3e80a 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -890,7 +890,7 @@ unsigned int ipic_get_irq(void)
return irq_linear_revmap(primary_ipic->irqhost, irq);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_SUSPEND
static struct {
u32 sicfr;
u32 siprr[2];
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index a0e748da990..31e809c7779 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.28-rc6
-# Thu Nov 27 11:00:49 2008
+# Linux kernel version: 2.6.29-rc4
+# Wed Feb 11 10:07:16 2009
#
CONFIG_SCHED_MC=y
CONFIG_MMU=y
@@ -14,12 +14,14 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_BUG=y
CONFIG_NO_IOMEM=y
CONFIG_NO_DMA=y
CONFIG_GENERIC_LOCKBREAK=y
CONFIG_PGSTE=y
+CONFIG_VIRT_CPU_ACCOUNTING=y
CONFIG_S390=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
@@ -39,20 +41,29 @@ CONFIG_POSIX_MQUEUE=y
# CONFIG_TASKSTATS is not set
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=17
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
CONFIG_CGROUPS=y
# CONFIG_CGROUP_DEBUG is not set
CONFIG_CGROUP_NS=y
# CONFIG_CGROUP_FREEZER is not set
# CONFIG_CGROUP_DEVICE is not set
# CONFIG_CPUSETS is not set
-CONFIG_GROUP_SCHED=y
-CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_USER_SCHED=y
-# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUP_CPUACCT is not set
# CONFIG_RESOURCE_COUNTERS is not set
CONFIG_SYSFS_DEPRECATED=y
@@ -63,6 +74,7 @@ CONFIG_UTS_NS=y
CONFIG_IPC_NS=y
# CONFIG_USER_NS is not set
# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -91,17 +103,17 @@ CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
+CONFIG_HAVE_SYSCALL_WRAPPERS=y
CONFIG_KRETPROBES=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
# CONFIG_MODULE_FORCE_LOAD is not set
@@ -109,7 +121,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
CONFIG_MODVERSIONS=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
+CONFIG_INIT_ALL_POSSIBLE=y
CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
# CONFIG_BLK_DEV_IO_TRACE is not set
@@ -130,7 +142,6 @@ CONFIG_DEFAULT_DEADLINE=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="deadline"
CONFIG_PREEMPT_NOTIFIERS=y
-CONFIG_CLASSIC_RCU=y
# CONFIG_FREEZER is not set
#
@@ -161,6 +172,7 @@ CONFIG_S390_EXEC_PROTECT=y
CONFIG_MARCH_Z900=y
# CONFIG_MARCH_Z990 is not set
# CONFIG_MARCH_Z9_109 is not set
+# CONFIG_MARCH_Z10 is not set
CONFIG_PACK_STACK=y
# CONFIG_SMALL_STACK is not set
CONFIG_CHECK_STACK=y
@@ -174,7 +186,6 @@ CONFIG_ARCH_POPULATES_NODE_MAP=y
# CONFIG_PREEMPT_NONE is not set
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
-# CONFIG_PREEMPT_RCU is not set
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
@@ -195,7 +206,6 @@ CONFIG_MEMORY_HOTREMOVE=y
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
-CONFIG_RESOURCES_64BIT=y
CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
@@ -207,7 +217,6 @@ CONFIG_UNEVICTABLE_LRU=y
#
CONFIG_MACHCHK_WARNING=y
CONFIG_QDIO=y
-# CONFIG_QDIO_DEBUG is not set
CONFIG_CHSC_SCH=m
#
@@ -227,15 +236,13 @@ CONFIG_PFAULT=y
# CONFIG_SHARED_KERNEL is not set
# CONFIG_CMM is not set
# CONFIG_PAGE_STATES is not set
-CONFIG_VIRT_TIMER=y
-CONFIG_VIRT_CPU_ACCOUNTING=y
# CONFIG_APPLDATA_BASE is not set
CONFIG_HZ_100=y
# CONFIG_HZ_250 is not set
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=100
-# CONFIG_SCHED_HRTICK is not set
+CONFIG_SCHED_HRTICK=y
CONFIG_S390_HYPFS_FS=y
CONFIG_KEXEC=y
# CONFIG_ZFCPDUMP is not set
@@ -245,6 +252,7 @@ CONFIG_NET=y
#
# Networking options
#
+CONFIG_COMPAT_NET_DEV_OPS=y
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
@@ -383,6 +391,7 @@ CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
# CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_DRR is not set
# CONFIG_NET_SCH_INGRESS is not set
#
@@ -400,6 +409,7 @@ CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
CONFIG_NET_CLS_FLOW=m
+# CONFIG_NET_CLS_CGROUP is not set
# CONFIG_NET_EMATCH is not set
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=y
@@ -411,6 +421,7 @@ CONFIG_NET_ACT_NAT=m
# CONFIG_NET_ACT_SKBEDIT is not set
# CONFIG_NET_CLS_IND is not set
CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
#
# Network testing
@@ -428,6 +439,7 @@ CONFIG_CAN_VCAN=m
# CONFIG_CAN_DEBUG_DEVICES is not set
# CONFIG_AF_RXRPC is not set
# CONFIG_PHONET is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
# CONFIG_PCMCIA is not set
@@ -475,11 +487,15 @@ CONFIG_DASD_DIAG=y
CONFIG_DASD_EER=y
CONFIG_VIRTIO_BLK=m
CONFIG_MISC_DEVICES=y
-# CONFIG_EEPROM_93CX6 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_C2PORT is not set
#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+
+#
# SCSI device support
#
# CONFIG_RAID_ATTRS is not set
@@ -520,6 +536,7 @@ CONFIG_SCSI_FC_ATTRS=y
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
+# CONFIG_LIBFC is not set
# CONFIG_SCSI_DEBUG is not set
CONFIG_ZFCP=y
CONFIG_SCSI_DH=m
@@ -566,6 +583,10 @@ CONFIG_NET_ETHERNET=y
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
# CONFIG_TR is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
# CONFIG_WAN is not set
#
@@ -593,9 +614,11 @@ CONFIG_VIRTIO_NET=m
#
CONFIG_DEVKMEM=y
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
CONFIG_HVC_DRIVER=y
+CONFIG_HVC_IUCV=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=m
CONFIG_HW_RANDOM_VIRTIO=m
@@ -645,7 +668,6 @@ CONFIG_S390_VMUR=m
# CONFIG_NEW_LEDS is not set
CONFIG_ACCESSIBILITY=y
# CONFIG_STAGING is not set
-CONFIG_STAGING_EXCLUDE_BUILD=y
#
# File systems
@@ -668,6 +690,7 @@ CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -703,10 +726,7 @@ CONFIG_TMPFS_POSIX_ACL=y
# CONFIG_HUGETLBFS is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_CONFIGFS_FS=m
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -715,6 +735,7 @@ CONFIG_CONFIGFS_FS=m
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_OMFS_FS is not set
@@ -808,6 +829,7 @@ CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_MEMORY_INIT=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
# CONFIG_FRAME_POINTER is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
@@ -818,15 +840,19 @@ CONFIG_DEBUG_MEMORY_INIT=y
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_HAVE_FUNCTION_TRACER=y
#
# Tracers
#
+# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_CONTEXT_SWITCH_TRACER is not set
# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_STACK_TRACER is not set
# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
CONFIG_SAMPLES=y
# CONFIG_SAMPLE_KOBJECT is not set
@@ -847,11 +873,17 @@ CONFIG_CRYPTO=y
#
CONFIG_CRYPTO_FIPS=y
CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=y
-CONFIG_CRYPTO_HASH=y
-CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=m
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
CONFIG_CRYPTO_GF128MUL=m
# CONFIG_CRYPTO_NULL is not set
# CONFIG_CRYPTO_CRYPTD is not set
@@ -885,7 +917,7 @@ CONFIG_CRYPTO_HMAC=m
#
# Digest
#
-# CONFIG_CRYPTO_CRC32C is not set
+CONFIG_CRYPTO_CRC32C=m
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=m
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -942,6 +974,7 @@ CONFIG_S390_PRNG=m
# Library routines
#
CONFIG_BITREVERSE=m
+CONFIG_GENERIC_FIND_LAST_BIT=y
# CONFIG_CRC_CCITT is not set
# CONFIG_CRC16 is not set
CONFIG_CRC_T10DIF=y
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index ffdef5fe858..f3720defdd1 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -384,8 +384,8 @@ struct _lowcore
__u32 panic_magic; /* 0xe00 */
/* Per cpu primary space access list */
- __u8 pad_0xe04[0xe3c-0xe04]; /* 0xe04 */
- __u32 vdso_per_cpu_data; /* 0xe3c */
+ __u8 pad_0xe04[0xe38-0xe04]; /* 0xe04 */
+ __u64 vdso_per_cpu_data; /* 0xe38 */
__u32 paste[16]; /* 0xe40 */
__u8 pad13[0x11b8-0xe80]; /* 0xe80 */
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index e7c5bfb7c75..026a37a94fc 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -95,6 +95,7 @@ asmlinkage void do_softirq(void)
local_irq_restore(flags);
}
+#ifdef CONFIG_PROC_FS
void init_irq_proc(void)
{
struct proc_dir_entry *root_irq_dir;
@@ -102,3 +103,4 @@ void init_irq_proc(void)
root_irq_dir = proc_mkdir("irq", NULL);
create_prof_cpu_mask(root_irq_dir);
}
+#endif
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 8ffee714f93..a46c3a21e26 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -891,10 +891,35 @@ prom_tba: .xword 0
tlb_type: .word 0 /* Must NOT end up in BSS */
.section ".fixup",#alloc,#execinstr
- .globl __ret_efault, __retl_efault
-__ret_efault:
+ .globl __ret_efault, __retl_efault, __ret_one, __retl_one
+ENTRY(__ret_efault)
ret
restore %g0, -EFAULT, %o0
-__retl_efault:
+ENDPROC(__ret_efault)
+
+ENTRY(__retl_efault)
retl
mov -EFAULT, %o0
+ENDPROC(__retl_efault)
+
+ENTRY(__retl_one)
+ retl
+ mov 1, %o0
+ENDPROC(__retl_one)
+
+ENTRY(__ret_one_asi)
+ wr %g0, ASI_AIUS, %asi
+ ret
+ restore %g0, 1, %o0
+ENDPROC(__ret_one_asi)
+
+ENTRY(__retl_one_asi)
+ wr %g0, ASI_AIUS, %asi
+ retl
+ mov 1, %o0
+ENDPROC(__retl_one_asi)
+
+ENTRY(__retl_o1)
+ retl
+ mov %o1, %o0
+ENDPROC(__retl_o1)
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 09f088ed4a6..f3577223c86 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -70,6 +70,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
printk(" on CPU%d, ip %08lx, registers:\n",
smp_processor_id(), regs->tpc);
show_regs(regs);
+ dump_stack();
bust_spinlocks(0);
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 92e0dda141a..1ae8cdd7e70 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -133,11 +133,16 @@ int __init pcr_arch_init(void)
case cheetah:
case cheetah_plus:
- case spitfire:
pcr_ops = &direct_pcr_ops;
pcr_enable = PCR_SUN4U_ENABLE;
break;
+ case spitfire:
+ /* UltraSPARC-I/II and derivatives lack a profile
+ * counter overflow interrupt so we can't make use of
+ * their hardware currently.
+ */
+ /* fallthrough */
default:
err = -ENODEV;
goto out_unregister;
diff --git a/arch/sparc/lib/GENbzero.S b/arch/sparc/lib/GENbzero.S
index 6a4f956a2f7..8e7a843ddd8 100644
--- a/arch/sparc/lib/GENbzero.S
+++ b/arch/sparc/lib/GENbzero.S
@@ -6,13 +6,9 @@
#define EX_ST(x,y) \
98: x,y; \
- .section .fixup; \
- .align 4; \
-99: retl; \
- mov %o1, %o0; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, 99b; \
+ .word 98b, __retl_o1; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/GENcopy_from_user.S b/arch/sparc/lib/GENcopy_from_user.S
index 2b9df99e87f..b7d0bd6b140 100644
--- a/arch/sparc/lib/GENcopy_from_user.S
+++ b/arch/sparc/lib/GENcopy_from_user.S
@@ -5,13 +5,9 @@
#define EX_LD(x) \
98: x; \
- .section .fixup; \
- .align 4; \
-99: retl; \
- mov 1, %o0; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, 99b; \
+ .word 98b, __retl_one; \
.text; \
.align 4;
@@ -27,7 +23,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, memcpy_user_stub; \
+ bne,pn %icc, ___copy_in_user; \
nop
#endif
diff --git a/arch/sparc/lib/GENcopy_to_user.S b/arch/sparc/lib/GENcopy_to_user.S
index bb3f7084daf..780550e1afc 100644
--- a/arch/sparc/lib/GENcopy_to_user.S
+++ b/arch/sparc/lib/GENcopy_to_user.S
@@ -5,13 +5,9 @@
#define EX_ST(x) \
98: x; \
- .section .fixup; \
- .align 4; \
-99: retl; \
- mov 1, %o0; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, 99b; \
+ .word 98b, __retl_one; \
.text; \
.align 4;
@@ -31,7 +27,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, memcpy_user_stub; \
+ bne,pn %icc, ___copy_in_user; \
nop
#endif
diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S
index c77ef5f2210..119ccb9a54f 100644
--- a/arch/sparc/lib/NG2copy_from_user.S
+++ b/arch/sparc/lib/NG2copy_from_user.S
@@ -5,14 +5,9 @@
#define EX_LD(x) \
98: x; \
- .section .fixup; \
- .align 4; \
-99: wr %g0, ASI_AIUS, %asi;\
- retl; \
- mov 1, %o0; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, 99b; \
+ .word 98b, __retl_one_asi;\
.text; \
.align 4;
@@ -33,7 +28,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, memcpy_user_stub; \
+ bne,pn %icc, ___copy_in_user; \
nop
#endif
diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S
index 4bd4093acbb..7fe1ccefd9d 100644
--- a/arch/sparc/lib/NG2copy_to_user.S
+++ b/arch/sparc/lib/NG2copy_to_user.S
@@ -5,14 +5,9 @@
#define EX_ST(x) \
98: x; \
- .section .fixup; \
- .align 4; \
-99: wr %g0, ASI_AIUS, %asi;\
- retl; \
- mov 1, %o0; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, 99b; \
+ .word 98b, __retl_one_asi;\
.text; \
.align 4;
@@ -42,7 +37,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, memcpy_user_stub; \
+ bne,pn %icc, ___copy_in_user; \
nop
#endif
diff --git a/arch/sparc/lib/NGbzero.S b/arch/sparc/lib/NGbzero.S
index 814d5f7a45e..beab29bf419 100644
--- a/arch/sparc/lib/NGbzero.S
+++ b/arch/sparc/lib/NGbzero.S
@@ -6,13 +6,9 @@
#define EX_ST(x,y) \
98: x,y; \
- .section .fixup; \
- .align 4; \
-99: retl; \
- mov %o1, %o0; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, 99b; \
+ .word 98b, __retl_o1; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/NGcopy_from_user.S b/arch/sparc/lib/NGcopy_from_user.S
index e7f433f71b4..5d1e4d1ac21 100644
--- a/arch/sparc/lib/NGcopy_from_user.S
+++ b/arch/sparc/lib/NGcopy_from_user.S
@@ -5,14 +5,9 @@
#define EX_LD(x) \
98: x; \
- .section .fixup; \
- .align 4; \
-99: wr %g0, ASI_AIUS, %asi;\
- ret; \
- restore %g0, 1, %o0; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, 99b; \
+ .word 98b, __ret_one_asi;\
.text; \
.align 4;
@@ -30,7 +25,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, memcpy_user_stub; \
+ bne,pn %icc, ___copy_in_user; \
nop
#endif
diff --git a/arch/sparc/lib/NGcopy_to_user.S b/arch/sparc/lib/NGcopy_to_user.S
index 6ea01c5532a..ff630dcb273 100644
--- a/arch/sparc/lib/NGcopy_to_user.S
+++ b/arch/sparc/lib/NGcopy_to_user.S
@@ -5,14 +5,9 @@
#define EX_ST(x) \
98: x; \
- .section .fixup; \
- .align 4; \
-99: wr %g0, ASI_AIUS, %asi;\
- ret; \
- restore %g0, 1, %o0; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, 99b; \
+ .word 98b, __ret_one_asi;\
.text; \
.align 4;
@@ -33,7 +28,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, memcpy_user_stub; \
+ bne,pn %icc, ___copy_in_user; \
nop
#endif
diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S
index 3192b0bf4fa..a6ae2ea04bf 100644
--- a/arch/sparc/lib/U1copy_from_user.S
+++ b/arch/sparc/lib/U1copy_from_user.S
@@ -5,13 +5,9 @@
#define EX_LD(x) \
98: x; \
- .section .fixup; \
- .align 4; \
-99: retl; \
- mov 1, %o0; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, 99b; \
+ .word 98b, __retl_one; \
.text; \
.align 4;
@@ -27,7 +23,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, memcpy_user_stub; \
+ bne,pn %icc, ___copy_in_user; \
nop; \
#include "U1memcpy.S"
diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S
index d1210ffb0b8..f4b970eeb48 100644
--- a/arch/sparc/lib/U1copy_to_user.S
+++ b/arch/sparc/lib/U1copy_to_user.S
@@ -5,13 +5,9 @@
#define EX_ST(x) \
98: x; \
- .section .fixup; \
- .align 4; \
-99: retl; \
- mov 1, %o0; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, 99b; \
+ .word 98b, __retl_one; \
.text; \
.align 4;
@@ -27,7 +23,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, memcpy_user_stub; \
+ bne,pn %icc, ___copy_in_user; \
nop; \
#include "U1memcpy.S"
diff --git a/arch/sparc/lib/U3copy_from_user.S b/arch/sparc/lib/U3copy_from_user.S
index f5bfc8d9d21..b1acd1331c3 100644
--- a/arch/sparc/lib/U3copy_from_user.S
+++ b/arch/sparc/lib/U3copy_from_user.S
@@ -5,13 +5,9 @@
#define EX_LD(x) \
98: x; \
- .section .fixup; \
- .align 4; \
-99: retl; \
- mov 1, %o0; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, 99b; \
+ .word 98b, __retl_one; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S
index 2334f111bb0..ef1e493afdf 100644
--- a/arch/sparc/lib/U3copy_to_user.S
+++ b/arch/sparc/lib/U3copy_to_user.S
@@ -5,13 +5,9 @@
#define EX_ST(x) \
98: x; \
- .section .fixup; \
- .align 4; \
-99: retl; \
- mov 1, %o0; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, 99b; \
+ .word 98b, __retl_one; \
.text; \
.align 4;
@@ -27,7 +23,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, memcpy_user_stub; \
+ bne,pn %icc, ___copy_in_user; \
nop; \
#include "U3memcpy.S"
diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S
index c7bbae8c590..b6557297440 100644
--- a/arch/sparc/lib/bzero.S
+++ b/arch/sparc/lib/bzero.S
@@ -88,13 +88,9 @@ __bzero_done:
#define EX_ST(x,y) \
98: x,y; \
- .section .fixup; \
- .align 4; \
-99: retl; \
- mov %o1, %o0; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, 99b; \
+ .word 98b, __retl_o1; \
.text; \
.align 4;
diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S
index 650af3f21f7..302c0e60dc2 100644
--- a/arch/sparc/lib/copy_in_user.S
+++ b/arch/sparc/lib/copy_in_user.S
@@ -3,19 +3,16 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
+#include <linux/linkage.h>
#include <asm/asi.h>
#define XCC xcc
#define EX(x,y) \
98: x,y; \
- .section .fixup; \
- .align 4; \
-99: retl; \
- mov 1, %o0; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, 99b; \
+ .word 98b, __retl_one; \
.text; \
.align 4;
@@ -31,18 +28,7 @@
* to copy register windows around during thread cloning.
*/
- .globl ___copy_in_user
- .type ___copy_in_user,#function
-___copy_in_user: /* %o0=dst, %o1=src, %o2=len */
- /* Writing to %asi is _expensive_ so we hardcode it.
- * Reading %asi to check for KERNEL_DS is comparatively
- * cheap.
- */
- rd %asi, %g1
- cmp %g1, ASI_AIUS
- bne,pn %icc, memcpy_user_stub
- nop
-
+ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
cmp %o2, 0
be,pn %XCC, 85f
or %o0, %o1, %o3
@@ -53,22 +39,24 @@ ___copy_in_user: /* %o0=dst, %o1=src, %o2=len */
/* 16 < len <= 64 */
andcc %o3, 0x7, %g0
bne,pn %XCC, 90f
- sub %o0, %o1, %o3
+ nop
andn %o2, 0x7, %o4
and %o2, 0x7, %o2
1: subcc %o4, 0x8, %o4
EX(ldxa [%o1] %asi, %o5)
- EX(stxa %o5, [%o1 + %o3] ASI_AIUS)
+ EX(stxa %o5, [%o0] %asi)
+ add %o1, 0x8, %o1
bgu,pt %XCC, 1b
- add %o1, 0x8, %o1
+ add %o0, 0x8, %o0
andcc %o2, 0x4, %g0
be,pt %XCC, 1f
nop
sub %o2, 0x4, %o2
EX(lduwa [%o1] %asi, %o5)
- EX(stwa %o5, [%o1 + %o3] ASI_AIUS)
+ EX(stwa %o5, [%o0] %asi)
add %o1, 0x4, %o1
+ add %o0, 0x4, %o0
1: cmp %o2, 0
be,pt %XCC, 85f
nop
@@ -78,14 +66,15 @@ ___copy_in_user: /* %o0=dst, %o1=src, %o2=len */
80: /* 0 < len <= 16 */
andcc %o3, 0x3, %g0
bne,pn %XCC, 90f
- sub %o0, %o1, %o3
+ nop
82:
subcc %o2, 4, %o2
EX(lduwa [%o1] %asi, %g1)
- EX(stwa %g1, [%o1 + %o3] ASI_AIUS)
+ EX(stwa %g1, [%o0] %asi)
+ add %o1, 4, %o1
bgu,pt %XCC, 82b
- add %o1, 4, %o1
+ add %o0, 4, %o0
85: retl
clr %o0
@@ -94,26 +83,10 @@ ___copy_in_user: /* %o0=dst, %o1=src, %o2=len */
90:
subcc %o2, 1, %o2
EX(lduba [%o1] %asi, %g1)
- EX(stba %g1, [%o1 + %o3] ASI_AIUS)
+ EX(stba %g1, [%o0] %asi)
+ add %o1, 1, %o1
bgu,pt %XCC, 90b
- add %o1, 1, %o1
+ add %o0, 1, %o0
retl
clr %o0
-
- .size ___copy_in_user, .-___copy_in_user
-
- /* Act like copy_{to,in}_user(), ie. return zero instead
- * of original destination pointer. This is invoked when
- * copy_{to,in}_user() finds that %asi is kernel space.
- */
- .globl memcpy_user_stub
- .type memcpy_user_stub,#function
-memcpy_user_stub:
- save %sp, -192, %sp
- mov %i0, %o0
- mov %i1, %o1
- call memcpy
- mov %i2, %o2
- ret
- restore %g0, %g0, %o0
- .size memcpy_user_stub, .-memcpy_user_stub
+ENDPROC(___copy_in_user)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f760a22f95d..1042d69b267 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -5,7 +5,7 @@ mainmenu "Linux Kernel Configuration for x86"
config 64BIT
bool "64-bit kernel" if ARCH = "x86"
default ARCH = "x86_64"
- help
+ ---help---
Say yes to build a 64-bit kernel - formerly known as x86_64
Say no to build a 32-bit kernel - formerly known as i386
@@ -34,8 +34,8 @@ config X86
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
- select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
- select HAVE_ARCH_KGDB if !X86_VOYAGER
+ select HAVE_KVM
+ select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
select HAVE_GENERIC_DMA_COHERENT if X86_32
select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -140,11 +140,9 @@ config HAVE_CPUMASK_OF_CPU_MAP
config ARCH_HIBERNATION_POSSIBLE
def_bool y
- depends on !SMP || !X86_VOYAGER
config ARCH_SUSPEND_POSSIBLE
def_bool y
- depends on !X86_VOYAGER
config ZONE_DMA32
bool
@@ -174,11 +172,6 @@ config GENERIC_PENDING_IRQ
depends on GENERIC_HARDIRQS && SMP
default y
-config X86_SMP
- bool
- depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
- default y
-
config USE_GENERIC_SMP_HELPERS
def_bool y
depends on SMP
@@ -194,17 +187,11 @@ config X86_64_SMP
config X86_HT
bool
depends on SMP
- depends on (X86_32 && !X86_VOYAGER) || X86_64
- default y
-
-config X86_BIOS_REBOOT
- bool
- depends on !X86_VOYAGER
default y
config X86_TRAMPOLINE
bool
- depends on X86_SMP || (X86_VOYAGER && SMP) || (64BIT && ACPI_SLEEP)
+ depends on SMP || (64BIT && ACPI_SLEEP)
default y
config X86_32_LAZY_GS
@@ -248,14 +235,10 @@ config SMP
If you don't know what to do here, say N.
-config X86_HAS_BOOT_CPU_ID
- def_bool y
- depends on X86_VOYAGER
-
config SPARSE_IRQ
bool "Support sparse irq numbering"
depends on PCI_MSI || HT_IRQ
- help
+ ---help---
This enables support for sparse irqs. This is useful for distro
kernels that want to define a high CONFIG_NR_CPUS value but still
want to have low kernel memory footprint on smaller machines.
@@ -269,114 +252,114 @@ config NUMA_MIGRATE_IRQ_DESC
bool "Move irq desc when changing irq smp_affinity"
depends on SPARSE_IRQ && NUMA
default n
- help
+ ---help---
This enables moving irq_desc to cpu/node that irq will use handled.
If you don't know what to do here, say N.
-config X86_FIND_SMP_CONFIG
- def_bool y
- depends on X86_MPPARSE || X86_VOYAGER
-
config X86_MPPARSE
bool "Enable MPS table" if ACPI
default y
depends on X86_LOCAL_APIC
- help
+ ---help---
For old smp systems that do not have proper acpi support. Newer systems
(esp with 64bit cpus) with acpi support, MADT and DSDT will override it
-choice
- prompt "Subarchitecture Type"
- default X86_PC
+config X86_BIGSMP
+ bool "Support for big SMP systems with more than 8 CPUs"
+ depends on X86_32 && SMP
+ ---help---
+ This option is needed for the systems that have more than 8 CPUs
-config X86_PC
- bool "PC-compatible"
- help
- Choose this option if your computer is a standard PC or compatible.
+config X86_EXTENDED_PLATFORM
+ bool "Support for extended (non-PC) x86 platforms"
+ default y
+ ---help---
+ If you disable this option then the kernel will only support
+ standard PC platforms. (which covers the vast majority of
+ systems out there.)
+
+ If you enable this option then you'll be able to select a number
+ of non-PC x86 platforms.
+
+ If you have one of these systems, or if you want to build a
+ generic distribution kernel, say Y here - otherwise say N.
+
+# This is an alphabetically sorted list of 64 bit extended platforms
+# Please maintain the alphabetic order if and when there are additions
+
+config X86_VSMP
+ bool "ScaleMP vSMP"
+ select PARAVIRT
+ depends on X86_64 && PCI
+ depends on X86_EXTENDED_PLATFORM
+ ---help---
+ Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is
+ supposed to run on these EM64T-based machines. Only choose this option
+ if you have one of these machines.
+
+config X86_UV
+ bool "SGI Ultraviolet"
+ depends on X86_64
+ depends on X86_EXTENDED_PLATFORM
+ ---help---
+ This option is needed in order to support SGI Ultraviolet systems.
+ If you don't have one of these, you should say N here.
+
+# Following is an alphabetically sorted list of 32 bit extended platforms
+# Please maintain the alphabetic order if and when there are additions
config X86_ELAN
bool "AMD Elan"
depends on X86_32
- help
+ depends on X86_EXTENDED_PLATFORM
+ ---help---
Select this for an AMD Elan processor.
Do not use this option for K6/Athlon/Opteron processors!
If unsure, choose "PC-compatible" instead.
-config X86_VOYAGER
- bool "Voyager (NCR)"
- depends on X86_32 && (SMP || BROKEN) && !PCI
- help
- Voyager is an MCA-based 32-way capable SMP architecture proprietary
- to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based.
-
- *** WARNING ***
-
- If you do not specifically know you have a Voyager based machine,
- say N here, otherwise the kernel you build will not be bootable.
-
-config X86_GENERICARCH
- bool "Generic architecture"
+config X86_RDC321X
+ bool "RDC R-321x SoC"
depends on X86_32
- help
- This option compiles in the NUMAQ, Summit, bigsmp, ES7000, default
+ depends on X86_EXTENDED_PLATFORM
+ select M486
+ select X86_REBOOTFIXUPS
+ ---help---
+ This option is needed for RDC R-321x system-on-chip, also known
+ as R-8610-(G).
+ If you don't have one of these chips, you should say N here.
+
+config X86_32_NON_STANDARD
+ bool "Support non-standard 32-bit SMP architectures"
+ depends on X86_32 && SMP
+ depends on X86_EXTENDED_PLATFORM
+ ---help---
+ This option compiles in the NUMAQ, Summit, bigsmp, ES7000, default
subarchitectures. It is intended for a generic binary kernel.
if you select them all, kernel will probe it one by one. and will
fallback to default.
-if X86_GENERICARCH
+# Alphabetically sorted list of Non standard 32 bit platforms
config X86_NUMAQ
bool "NUMAQ (IBM/Sequent)"
- depends on SMP && X86_32 && PCI && X86_MPPARSE
+ depends on X86_32_NON_STANDARD
select NUMA
- help
+ select X86_MPPARSE
+ ---help---
This option is used for getting Linux to run on a NUMAQ (IBM/Sequent)
NUMA multiquad box. This changes the way that processors are
bootstrapped, and uses Clustered Logical APIC addressing mode instead
of Flat Logical. You will need a new lynxer.elf file to flash your
firmware with - send email to <Martin.Bligh@us.ibm.com>.
-config X86_SUMMIT
- bool "Summit/EXA (IBM x440)"
- depends on X86_32 && SMP
- help
- This option is needed for IBM systems that use the Summit/EXA chipset.
- In particular, it is needed for the x440.
-
-config X86_ES7000
- bool "Support for Unisys ES7000 IA32 series"
- depends on X86_32 && SMP
- help
- Support for Unisys ES7000 systems. Say 'Y' here if this kernel is
- supposed to run on an IA32-based Unisys ES7000 system.
-
-config X86_BIGSMP
- bool "Support for big SMP systems with more than 8 CPUs"
- depends on X86_32 && SMP
- help
- This option is needed for the systems that have more than 8 CPUs
- and if the system is not of any sub-arch type above.
-
-endif
-
-config X86_VSMP
- bool "Support for ScaleMP vSMP"
- select PARAVIRT
- depends on X86_64 && PCI
- help
- Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is
- supposed to run on these EM64T-based machines. Only choose this option
- if you have one of these machines.
-
-endchoice
-
config X86_VISWS
bool "SGI 320/540 (Visual Workstation)"
- depends on X86_32 && PCI && !X86_VOYAGER && X86_MPPARSE && PCI_GODIRECT
- help
+ depends on X86_32 && PCI && X86_MPPARSE && PCI_GODIRECT
+ depends on X86_32_NON_STANDARD
+ ---help---
The SGI Visual Workstation series is an IA32-based workstation
based on SGI systems chips with some legacy PC hardware attached.
@@ -385,28 +368,38 @@ config X86_VISWS
A kernel compiled for the Visual Workstation will run on general
PCs as well. See <file:Documentation/sgi-visws.txt> for details.
-config X86_RDC321X
- bool "RDC R-321x SoC"
- depends on X86_32
- select M486
- select X86_REBOOTFIXUPS
- help
- This option is needed for RDC R-321x system-on-chip, also known
- as R-8610-(G).
- If you don't have one of these chips, you should say N here.
+config X86_SUMMIT
+ bool "Summit/EXA (IBM x440)"
+ depends on X86_32_NON_STANDARD
+ ---help---
+ This option is needed for IBM systems that use the Summit/EXA chipset.
+ In particular, it is needed for the x440.
-config X86_UV
- bool "SGI Ultraviolet"
- depends on X86_64
- help
- This option is needed in order to support SGI Ultraviolet systems.
- If you don't have one of these, you should say N here.
+config X86_ES7000
+ bool "Unisys ES7000 IA32 series"
+ depends on X86_32_NON_STANDARD && X86_BIGSMP
+ ---help---
+ Support for Unisys ES7000 systems. Say 'Y' here if this kernel is
+ supposed to run on an IA32-based Unisys ES7000 system.
+
+config X86_VOYAGER
+ bool "Voyager (NCR)"
+ depends on SMP && !PCI && BROKEN
+ depends on X86_32_NON_STANDARD
+ ---help---
+ Voyager is an MCA-based 32-way capable SMP architecture proprietary
+ to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based.
+
+ *** WARNING ***
+
+ If you do not specifically know you have a Voyager based machine,
+ say N here, otherwise the kernel you build will not be bootable.
config SCHED_OMIT_FRAME_POINTER
def_bool y
prompt "Single-depth WCHAN output"
depends on X86
- help
+ ---help---
Calculate simpler /proc/<PID>/wchan values. If this option
is disabled then wchan values will recurse back to the
caller function. This provides more accurate wchan values,
@@ -416,7 +409,7 @@ config SCHED_OMIT_FRAME_POINTER
menuconfig PARAVIRT_GUEST
bool "Paravirtualized guest support"
- help
+ ---help---
Say Y here to get to see options related to running Linux under
various hypervisors. This option alone does not add any kernel code.
@@ -430,8 +423,7 @@ config VMI
bool "VMI Guest support"
select PARAVIRT
depends on X86_32
- depends on !X86_VOYAGER
- help
+ ---help---
VMI provides a paravirtualized interface to the VMware ESX server
(it could be used by other hypervisors in theory too, but is not
at the moment), by linking the kernel to a GPL-ed ROM module
@@ -441,8 +433,7 @@ config KVM_CLOCK
bool "KVM paravirtualized clock"
select PARAVIRT
select PARAVIRT_CLOCK
- depends on !X86_VOYAGER
- help
+ ---help---
Turning on this option will allow you to run a paravirtualized clock
when running over the KVM hypervisor. Instead of relying on a PIT
(or probably other) emulation by the underlying device model, the host
@@ -452,17 +443,15 @@ config KVM_CLOCK
config KVM_GUEST
bool "KVM Guest support"
select PARAVIRT
- depends on !X86_VOYAGER
- help
- This option enables various optimizations for running under the KVM
- hypervisor.
+ ---help---
+ This option enables various optimizations for running under the KVM
+ hypervisor.
source "arch/x86/lguest/Kconfig"
config PARAVIRT
bool "Enable paravirtualization code"
- depends on !X86_VOYAGER
- help
+ ---help---
This changes the kernel so it can modify itself when it is run
under a hypervisor, potentially improving performance significantly
over full virtualization. However, when run without a hypervisor
@@ -475,51 +464,51 @@ config PARAVIRT_CLOCK
endif
config PARAVIRT_DEBUG
- bool "paravirt-ops debugging"
- depends on PARAVIRT && DEBUG_KERNEL
- help
- Enable to debug paravirt_ops internals. Specifically, BUG if
- a paravirt_op is missing when it is called.
+ bool "paravirt-ops debugging"
+ depends on PARAVIRT && DEBUG_KERNEL
+ ---help---
+ Enable to debug paravirt_ops internals. Specifically, BUG if
+ a paravirt_op is missing when it is called.
config MEMTEST
bool "Memtest"
- help
+ ---help---
This option adds a kernel parameter 'memtest', which allows memtest
to be set.
- memtest=0, mean disabled; -- default
- memtest=1, mean do 1 test pattern;
- ...
- memtest=4, mean do 4 test patterns.
+ memtest=0, mean disabled; -- default
+ memtest=1, mean do 1 test pattern;
+ ...
+ memtest=4, mean do 4 test patterns.
If you are unsure how to answer this question, answer N.
config X86_SUMMIT_NUMA
def_bool y
- depends on X86_32 && NUMA && X86_GENERICARCH
+ depends on X86_32 && NUMA && X86_32_NON_STANDARD
config X86_CYCLONE_TIMER
def_bool y
- depends on X86_GENERICARCH
+ depends on X86_32_NON_STANDARD
source "arch/x86/Kconfig.cpu"
config HPET_TIMER
def_bool X86_64
prompt "HPET Timer Support" if X86_32
- help
- Use the IA-PC HPET (High Precision Event Timer) to manage
- time in preference to the PIT and RTC, if a HPET is
- present.
- HPET is the next generation timer replacing legacy 8254s.
- The HPET provides a stable time base on SMP
- systems, unlike the TSC, but it is more expensive to access,
- as it is off-chip. You can find the HPET spec at
- <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
+ ---help---
+ Use the IA-PC HPET (High Precision Event Timer) to manage
+ time in preference to the PIT and RTC, if a HPET is
+ present.
+ HPET is the next generation timer replacing legacy 8254s.
+ The HPET provides a stable time base on SMP
+ systems, unlike the TSC, but it is more expensive to access,
+ as it is off-chip. You can find the HPET spec at
+ <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
- You can safely choose Y here. However, HPET will only be
- activated if the platform and the BIOS support this feature.
- Otherwise the 8254 will be used for timing services.
+ You can safely choose Y here. However, HPET will only be
+ activated if the platform and the BIOS support this feature.
+ Otherwise the 8254 will be used for timing services.
- Choose N to continue using the legacy 8254 timer.
+ Choose N to continue using the legacy 8254 timer.
config HPET_EMULATE_RTC
def_bool y
@@ -530,7 +519,7 @@ config HPET_EMULATE_RTC
config DMI
default y
bool "Enable DMI scanning" if EMBEDDED
- help
+ ---help---
Enabled scanning of DMI to identify machine quirks. Say Y
here unless you have verified that your setup is not
affected by entries in the DMI blacklist. Required by PNP
@@ -542,7 +531,7 @@ config GART_IOMMU
select SWIOTLB
select AGP
depends on X86_64 && PCI
- help
+ ---help---
Support for full DMA access of devices with 32bit memory access only
on systems with more than 3GB. This is usually needed for USB,
sound, many IDE/SATA chipsets and some other devices.
@@ -557,7 +546,7 @@ config CALGARY_IOMMU
bool "IBM Calgary IOMMU support"
select SWIOTLB
depends on X86_64 && PCI && EXPERIMENTAL
- help
+ ---help---
Support for hardware IOMMUs in IBM's xSeries x366 and x460
systems. Needed to run systems with more than 3GB of memory
properly with 32-bit PCI devices that do not support DAC
@@ -575,7 +564,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
def_bool y
prompt "Should Calgary be enabled by default?"
depends on CALGARY_IOMMU
- help
+ ---help---
Should Calgary be enabled by default? if you choose 'y', Calgary
will be used (if it exists). If you choose 'n', Calgary will not be
used even if it exists. If you choose 'n' and would like to use
@@ -587,7 +576,7 @@ config AMD_IOMMU
select SWIOTLB
select PCI_MSI
depends on X86_64 && PCI && ACPI
- help
+ ---help---
With this option you can enable support for AMD IOMMU hardware in
your system. An IOMMU is a hardware component which provides
remapping of DMA memory accesses from devices. With an AMD IOMMU you
@@ -602,7 +591,7 @@ config AMD_IOMMU_STATS
bool "Export AMD IOMMU statistics to debugfs"
depends on AMD_IOMMU
select DEBUG_FS
- help
+ ---help---
This option enables code in the AMD IOMMU driver to collect various
statistics about whats happening in the driver and exports that
information to userspace via debugfs.
@@ -611,7 +600,7 @@ config AMD_IOMMU_STATS
# need this always selected by IOMMU for the VIA workaround
config SWIOTLB
def_bool y if X86_64
- help
+ ---help---
Support for software bounce buffers used on x86-64 systems
which don't have a hardware IOMMU (e.g. the current generation
of Intel's x86-64 CPUs). Using this PCI devices which can only
@@ -629,7 +618,7 @@ config MAXSMP
depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
select CPUMASK_OFFSTACK
default n
- help
+ ---help---
Configure maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
@@ -640,7 +629,7 @@ config NR_CPUS
default "4096" if MAXSMP
default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000)
default "8" if SMP
- help
+ ---help---
This allows you to specify the maximum number of CPUs which this
kernel will support. The maximum supported value is 512 and the
minimum value which makes sense is 2.
@@ -651,7 +640,7 @@ config NR_CPUS
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
depends on X86_HT
- help
+ ---help---
SMT scheduler support improves the CPU scheduler's decision making
when dealing with Intel Pentium 4 chips with HyperThreading at a
cost of slightly increased overhead in some places. If unsure say
@@ -661,7 +650,7 @@ config SCHED_MC
def_bool y
prompt "Multi-core scheduler support"
depends on X86_HT
- help
+ ---help---
Multi-core scheduler support improves the CPU scheduler's decision
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
@@ -670,8 +659,8 @@ source "kernel/Kconfig.preempt"
config X86_UP_APIC
bool "Local APIC support on uniprocessors"
- depends on X86_32 && !SMP && !(X86_VOYAGER || X86_GENERICARCH)
- help
+ depends on X86_32 && !SMP && !X86_32_NON_STANDARD
+ ---help---
A local APIC (Advanced Programmable Interrupt Controller) is an
integrated interrupt controller in the CPU. If you have a single-CPU
system which has a processor with a local APIC, you can say Y here to
@@ -684,7 +673,7 @@ config X86_UP_APIC
config X86_UP_IOAPIC
bool "IO-APIC support on uniprocessors"
depends on X86_UP_APIC
- help
+ ---help---
An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an
SMP-capable replacement for PC-style interrupt controllers. Most
SMP systems and many recent uniprocessor systems have one.
@@ -695,11 +684,11 @@ config X86_UP_IOAPIC
config X86_LOCAL_APIC
def_bool y
- depends on X86_64 || (X86_32 && (X86_UP_APIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH))
+ depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
config X86_IO_APIC
def_bool y
- depends on X86_64 || (X86_32 && (X86_UP_IOAPIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH))
+ depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
config X86_VISWS_APIC
def_bool y
@@ -709,7 +698,7 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
bool "Reroute for broken boot IRQs"
default n
depends on X86_IO_APIC
- help
+ ---help---
This option enables a workaround that fixes a source of
spurious interrupts. This is recommended when threaded
interrupt handling is used on systems where the generation of
@@ -731,7 +720,6 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
config X86_MCE
bool "Machine Check Exception"
- depends on !X86_VOYAGER
---help---
Machine Check Exception support allows the processor to notify the
kernel if it detects a problem (e.g. overheating, component failure).
@@ -750,7 +738,7 @@ config X86_MCE_INTEL
def_bool y
prompt "Intel MCE features"
depends on X86_64 && X86_MCE && X86_LOCAL_APIC
- help
+ ---help---
Additional support for intel specific MCE features such as
the thermal monitor.
@@ -758,14 +746,14 @@ config X86_MCE_AMD
def_bool y
prompt "AMD MCE features"
depends on X86_64 && X86_MCE && X86_LOCAL_APIC
- help
+ ---help---
Additional support for AMD specific MCE features such as
the DRAM Error Threshold.
config X86_MCE_NONFATAL
tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4"
depends on X86_32 && X86_MCE
- help
+ ---help---
Enabling this feature starts a timer that triggers every 5 seconds which
will look at the machine check registers to see if anything happened.
Non-fatal problems automatically get corrected (but still logged).
@@ -778,7 +766,7 @@ config X86_MCE_NONFATAL
config X86_MCE_P4THERMAL
bool "check for P4 thermal throttling interrupt."
depends on X86_32 && X86_MCE && (X86_UP_APIC || SMP)
- help
+ ---help---
Enabling this feature will cause a message to be printed when the P4
enters thermal throttling.
@@ -786,11 +774,11 @@ config VM86
bool "Enable VM86 support" if EMBEDDED
default y
depends on X86_32
- help
- This option is required by programs like DOSEMU to run 16-bit legacy
+ ---help---
+ This option is required by programs like DOSEMU to run 16-bit legacy
code on X86 processors. It also may be needed by software like
- XFree86 to initialize some video cards via BIOS. Disabling this
- option saves about 6k.
+ XFree86 to initialize some video cards via BIOS. Disabling this
+ option saves about 6k.
config TOSHIBA
tristate "Toshiba Laptop support"
@@ -864,33 +852,33 @@ config MICROCODE
module will be called microcode.
config MICROCODE_INTEL
- bool "Intel microcode patch loading support"
- depends on MICROCODE
- default MICROCODE
- select FW_LOADER
- --help---
- This options enables microcode patch loading support for Intel
- processors.
-
- For latest news and information on obtaining all the required
- Intel ingredients for this driver, check:
- <http://www.urbanmyth.org/microcode/>.
+ bool "Intel microcode patch loading support"
+ depends on MICROCODE
+ default MICROCODE
+ select FW_LOADER
+ ---help---
+ This options enables microcode patch loading support for Intel
+ processors.
+
+ For latest news and information on obtaining all the required
+ Intel ingredients for this driver, check:
+ <http://www.urbanmyth.org/microcode/>.
config MICROCODE_AMD
- bool "AMD microcode patch loading support"
- depends on MICROCODE
- select FW_LOADER
- --help---
- If you select this option, microcode patch loading support for AMD
- processors will be enabled.
+ bool "AMD microcode patch loading support"
+ depends on MICROCODE
+ select FW_LOADER
+ ---help---
+ If you select this option, microcode patch loading support for AMD
+ processors will be enabled.
- config MICROCODE_OLD_INTERFACE
+config MICROCODE_OLD_INTERFACE
def_bool y
depends on MICROCODE
config X86_MSR
tristate "/dev/cpu/*/msr - Model-specific register support"
- help
+ ---help---
This device gives privileged processes access to the x86
Model-Specific Registers (MSRs). It is a character device with
major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
@@ -899,7 +887,7 @@ config X86_MSR
config X86_CPUID
tristate "/dev/cpu/*/cpuid - CPU information support"
- help
+ ---help---
This device gives processes access to the x86 CPUID instruction to
be executed on a specific processor. It is a character device
with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
@@ -951,7 +939,7 @@ config NOHIGHMEM
config HIGHMEM4G
bool "4GB"
depends on !X86_NUMAQ
- help
+ ---help---
Select this if you have a 32-bit processor and between 1 and 4
gigabytes of physical RAM.
@@ -959,7 +947,7 @@ config HIGHMEM64G
bool "64GB"
depends on !M386 && !M486
select X86_PAE
- help
+ ---help---
Select this if you have a 32-bit processor and more than 4
gigabytes of physical RAM.
@@ -970,7 +958,7 @@ choice
prompt "Memory split" if EMBEDDED
default VMSPLIT_3G
depends on X86_32
- help
+ ---help---
Select the desired split between kernel and user memory.
If the address range available to the kernel is less than the
@@ -1016,20 +1004,20 @@ config HIGHMEM
config X86_PAE
bool "PAE (Physical Address Extension) Support"
depends on X86_32 && !HIGHMEM4G
- help
+ ---help---
PAE is required for NX support, and furthermore enables
larger swapspace support for non-overcommit purposes. It
has the cost of more pagetable lookup overhead, and also
consumes more pagetable space per process.
config ARCH_PHYS_ADDR_T_64BIT
- def_bool X86_64 || X86_PAE
+ def_bool X86_64 || X86_PAE
config DIRECT_GBPAGES
bool "Enable 1GB pages for kernel pagetables" if EMBEDDED
default y
depends on X86_64
- help
+ ---help---
Allow the kernel linear mapping to use 1GB pages on CPUs that
support it. This can improve the kernel's performance a tiny bit by
reducing TLB pressure. If in doubt, say "Y".
@@ -1039,9 +1027,8 @@ config NUMA
bool "Numa Memory Allocation and Scheduler Support"
depends on SMP
depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
- default n if X86_PC
default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
- help
+ ---help---
Enable NUMA (Non Uniform Memory Access) support.
The kernel will try to allocate memory used by a CPU on the
@@ -1064,19 +1051,19 @@ config K8_NUMA
def_bool y
prompt "Old style AMD Opteron NUMA detection"
depends on X86_64 && NUMA && PCI
- help
- Enable K8 NUMA node topology detection. You should say Y here if
- you have a multi processor AMD K8 system. This uses an old
- method to read the NUMA configuration directly from the builtin
- Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA
- instead, which also takes priority if both are compiled in.
+ ---help---
+ Enable K8 NUMA node topology detection. You should say Y here if
+ you have a multi processor AMD K8 system. This uses an old
+ method to read the NUMA configuration directly from the builtin
+ Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA
+ instead, which also takes priority if both are compiled in.
config X86_64_ACPI_NUMA
def_bool y
prompt "ACPI NUMA detection"
depends on X86_64 && NUMA && ACPI && PCI
select ACPI_NUMA
- help
+ ---help---
Enable ACPI SRAT based node topology detection.
# Some NUMA nodes have memory ranges that span
@@ -1091,7 +1078,7 @@ config NODES_SPAN_OTHER_NODES
config NUMA_EMU
bool "NUMA emulation"
depends on X86_64 && NUMA
- help
+ ---help---
Enable NUMA emulation. A flat machine will be split
into virtual nodes when booted with "numa=fake=N", where N is the
number of nodes. This is only useful for debugging.
@@ -1104,7 +1091,7 @@ config NODES_SHIFT
default "4" if X86_NUMAQ
default "3"
depends on NEED_MULTIPLE_NODES
- help
+ ---help---
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accomodate various tables.
@@ -1142,7 +1129,7 @@ config ARCH_SPARSEMEM_DEFAULT
config ARCH_SPARSEMEM_ENABLE
def_bool y
- depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) || X86_GENERICARCH
+ depends on X86_64 || NUMA || (EXPERIMENTAL && X86_32) || X86_32_NON_STANDARD
select SPARSEMEM_STATIC if X86_32
select SPARSEMEM_VMEMMAP_ENABLE if X86_64
@@ -1159,61 +1146,61 @@ source "mm/Kconfig"
config HIGHPTE
bool "Allocate 3rd-level pagetables from highmem"
depends on X86_32 && (HIGHMEM4G || HIGHMEM64G)
- help
+ ---help---
The VM uses one page table entry for each page of physical memory.
For systems with a lot of RAM, this can be wasteful of precious
low memory. Setting this option will put user-space page table
entries in high memory.
config X86_CHECK_BIOS_CORRUPTION
- bool "Check for low memory corruption"
- help
- Periodically check for memory corruption in low memory, which
- is suspected to be caused by BIOS. Even when enabled in the
- configuration, it is disabled at runtime. Enable it by
- setting "memory_corruption_check=1" on the kernel command
- line. By default it scans the low 64k of memory every 60
- seconds; see the memory_corruption_check_size and
- memory_corruption_check_period parameters in
- Documentation/kernel-parameters.txt to adjust this.
-
- When enabled with the default parameters, this option has
- almost no overhead, as it reserves a relatively small amount
- of memory and scans it infrequently. It both detects corruption
- and prevents it from affecting the running system.
-
- It is, however, intended as a diagnostic tool; if repeatable
- BIOS-originated corruption always affects the same memory,
- you can use memmap= to prevent the kernel from using that
- memory.
+ bool "Check for low memory corruption"
+ ---help---
+ Periodically check for memory corruption in low memory, which
+ is suspected to be caused by BIOS. Even when enabled in the
+ configuration, it is disabled at runtime. Enable it by
+ setting "memory_corruption_check=1" on the kernel command
+ line. By default it scans the low 64k of memory every 60
+ seconds; see the memory_corruption_check_size and
+ memory_corruption_check_period parameters in
+ Documentation/kernel-parameters.txt to adjust this.
+
+ When enabled with the default parameters, this option has
+ almost no overhead, as it reserves a relatively small amount
+ of memory and scans it infrequently. It both detects corruption
+ and prevents it from affecting the running system.
+
+ It is, however, intended as a diagnostic tool; if repeatable
+ BIOS-originated corruption always affects the same memory,
+ you can use memmap= to prevent the kernel from using that
+ memory.
config X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK
- bool "Set the default setting of memory_corruption_check"
+ bool "Set the default setting of memory_corruption_check"
depends on X86_CHECK_BIOS_CORRUPTION
default y
- help
- Set whether the default state of memory_corruption_check is
- on or off.
+ ---help---
+ Set whether the default state of memory_corruption_check is
+ on or off.
config X86_RESERVE_LOW_64K
- bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen"
+ bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen"
default y
- help
- Reserve the first 64K of physical RAM on BIOSes that are known
- to potentially corrupt that memory range. A numbers of BIOSes are
- known to utilize this area during suspend/resume, so it must not
- be used by the kernel.
+ ---help---
+ Reserve the first 64K of physical RAM on BIOSes that are known
+ to potentially corrupt that memory range. A numbers of BIOSes are
+ known to utilize this area during suspend/resume, so it must not
+ be used by the kernel.
- Set this to N if you are absolutely sure that you trust the BIOS
- to get all its memory reservations and usages right.
+ Set this to N if you are absolutely sure that you trust the BIOS
+ to get all its memory reservations and usages right.
- If you have doubts about the BIOS (e.g. suspend/resume does not
- work or there's kernel crashes after certain hardware hotplug
- events) and it's not AMI or Phoenix, then you might want to enable
- X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical
- corruption patterns.
+ If you have doubts about the BIOS (e.g. suspend/resume does not
+ work or there's kernel crashes after certain hardware hotplug
+ events) and it's not AMI or Phoenix, then you might want to enable
+ X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical
+ corruption patterns.
- Say Y if unsure.
+ Say Y if unsure.
config MATH_EMULATION
bool
@@ -1279,7 +1266,7 @@ config MTRR_SANITIZER
def_bool y
prompt "MTRR cleanup support"
depends on MTRR
- help
+ ---help---
Convert MTRR layout from continuous to discrete, so X drivers can
add writeback entries.
@@ -1294,7 +1281,7 @@ config MTRR_SANITIZER_ENABLE_DEFAULT
range 0 1
default "0"
depends on MTRR_SANITIZER
- help
+ ---help---
Enable mtrr cleanup default value
config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
@@ -1302,7 +1289,7 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
range 0 7
default "1"
depends on MTRR_SANITIZER
- help
+ ---help---
mtrr cleanup spare entries default, it can be changed via
mtrr_spare_reg_nr=N on the kernel command line.
@@ -1310,7 +1297,7 @@ config X86_PAT
bool
prompt "x86 PAT support"
depends on MTRR
- help
+ ---help---
Use PAT attributes to setup page level cache control.
PATs are the modern equivalents of MTRRs and are much more
@@ -1325,20 +1312,20 @@ config EFI
bool "EFI runtime service support"
depends on ACPI
---help---
- This enables the kernel to use EFI runtime services that are
- available (such as the EFI variable services).
+ This enables the kernel to use EFI runtime services that are
+ available (such as the EFI variable services).
- This option is only useful on systems that have EFI firmware.
- In addition, you should use the latest ELILO loader available
- at <http://elilo.sourceforge.net> in order to take advantage
- of EFI runtime services. However, even with this option, the
- resultant kernel should continue to boot on existing non-EFI
- platforms.
+ This option is only useful on systems that have EFI firmware.
+ In addition, you should use the latest ELILO loader available
+ at <http://elilo.sourceforge.net> in order to take advantage
+ of EFI runtime services. However, even with this option, the
+ resultant kernel should continue to boot on existing non-EFI
+ platforms.
config SECCOMP
def_bool y
prompt "Enable seccomp to safely compute untrusted bytecode"
- help
+ ---help---
This kernel feature is useful for number crunching applications
that may need to compute untrusted bytecode during their
execution. By using pipes or other transports made available to
@@ -1357,8 +1344,8 @@ config CC_STACKPROTECTOR_ALL
config CC_STACKPROTECTOR
bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
select CC_STACKPROTECTOR_ALL
- help
- This option turns on the -fstack-protector GCC feature. This
+ ---help---
+ This option turns on the -fstack-protector GCC feature. This
feature puts, at the beginning of functions, a canary value on
the stack just before the return address, and validates
the value just before actually returning. Stack based buffer
@@ -1375,8 +1362,7 @@ source kernel/Kconfig.hz
config KEXEC
bool "kexec system call"
- depends on X86_BIOS_REBOOT
- help
+ ---help---
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
but it is independent of the system firmware. And like a reboot
@@ -1393,7 +1379,7 @@ config KEXEC
config CRASH_DUMP
bool "kernel crash dumps"
depends on X86_64 || (X86_32 && HIGHMEM)
- help
+ ---help---
Generate crash dump after being started by kexec.
This should be normally only set in special crash dump kernels
which are loaded in the main kernel with kexec-tools into
@@ -1408,7 +1394,7 @@ config KEXEC_JUMP
bool "kexec jump (EXPERIMENTAL)"
depends on EXPERIMENTAL
depends on KEXEC && HIBERNATION && X86_32
- help
+ ---help---
Jump between original kernel and kexeced kernel and invoke
code in physical address mode via KEXEC
@@ -1417,7 +1403,7 @@ config PHYSICAL_START
default "0x1000000" if X86_NUMAQ
default "0x200000" if X86_64
default "0x100000"
- help
+ ---help---
This gives the physical address where the kernel is loaded.
If kernel is a not relocatable (CONFIG_RELOCATABLE=n) then
@@ -1458,7 +1444,7 @@ config PHYSICAL_START
config RELOCATABLE
bool "Build a relocatable kernel (EXPERIMENTAL)"
depends on EXPERIMENTAL
- help
+ ---help---
This builds a kernel image that retains relocation information
so it can be loaded someplace besides the default 1MB.
The relocations tend to make the kernel binary about 10% larger,
@@ -1478,7 +1464,7 @@ config PHYSICAL_ALIGN
default "0x100000" if X86_32
default "0x200000" if X86_64
range 0x2000 0x400000
- help
+ ---help---
This value puts the alignment restrictions on physical address
where kernel is loaded and run from. Kernel is compiled for an
address which meets above alignment restriction.
@@ -1499,7 +1485,7 @@ config PHYSICAL_ALIGN
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
- depends on SMP && HOTPLUG && !X86_VOYAGER
+ depends on SMP && HOTPLUG
---help---
Say Y here to allow turning CPUs off and on. CPUs can be
controlled through /sys/devices/system/cpu.
@@ -1511,7 +1497,7 @@ config COMPAT_VDSO
def_bool y
prompt "Compat VDSO support"
depends on X86_32 || IA32_EMULATION
- help
+ ---help---
Map the 32-bit VDSO to the predictable old-style address too.
---help---
Say N here if you are running a sufficiently recent glibc
@@ -1523,7 +1509,7 @@ config COMPAT_VDSO
config CMDLINE_BOOL
bool "Built-in kernel command line"
default n
- help
+ ---help---
Allow for specifying boot arguments to the kernel at
build time. On some systems (e.g. embedded ones), it is
necessary or convenient to provide some or all of the
@@ -1541,7 +1527,7 @@ config CMDLINE
string "Built-in kernel command string"
depends on CMDLINE_BOOL
default ""
- help
+ ---help---
Enter arguments here that should be compiled into the kernel
image and used at boot time. If the boot loader provides a
command line at boot time, it is appended to this string to
@@ -1558,7 +1544,7 @@ config CMDLINE_OVERRIDE
bool "Built-in command line overrides boot loader arguments"
default n
depends on CMDLINE_BOOL
- help
+ ---help---
Set this option to 'Y' to have the kernel ignore the boot loader
command line, and use ONLY the built-in command line.
@@ -1580,7 +1566,6 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
depends on NUMA
menu "Power management and ACPI options"
- depends on !X86_VOYAGER
config ARCH_HIBERNATION_HEADER
def_bool y
@@ -1658,7 +1643,7 @@ if APM
config APM_IGNORE_USER_SUSPEND
bool "Ignore USER SUSPEND"
- help
+ ---help---
This option will ignore USER SUSPEND requests. On machines with a
compliant APM BIOS, you want to say N. However, on the NEC Versa M
series notebooks, it is necessary to say Y because of a BIOS bug.
@@ -1682,7 +1667,7 @@ config APM_DO_ENABLE
config APM_CPU_IDLE
bool "Make CPU Idle calls when idle"
- help
+ ---help---
Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop.
On some machines, this can activate improved power savings, such as
a slowed CPU clock rate, when the machine is idle. These idle calls
@@ -1693,7 +1678,7 @@ config APM_CPU_IDLE
config APM_DISPLAY_BLANK
bool "Enable console blanking using APM"
- help
+ ---help---
Enable console blanking using the APM. Some laptops can use this to
turn off the LCD backlight when the screen blanker of the Linux
virtual console blanks the screen. Note that this is only used by
@@ -1706,7 +1691,7 @@ config APM_DISPLAY_BLANK
config APM_ALLOW_INTS
bool "Allow interrupts during APM BIOS calls"
- help
+ ---help---
Normally we disable external interrupts while we are making calls to
the APM BIOS as a measure to lessen the effects of a badly behaving
BIOS implementation. The BIOS should reenable interrupts if it
@@ -1731,7 +1716,7 @@ config PCI
bool "PCI support"
default y
select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
- help
+ ---help---
Find out whether you have a PCI motherboard. PCI is the name of a
bus system, i.e. the way the CPU talks to the other stuff inside
your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
@@ -1802,7 +1787,7 @@ config PCI_MMCONFIG
config DMAR
bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
depends on X86_64 && PCI_MSI && ACPI && EXPERIMENTAL
- help
+ ---help---
DMA remapping (DMAR) devices support enables independent address
translations for Direct Memory Access (DMA) from devices.
These DMA remapping devices are reported via ACPI tables
@@ -1824,29 +1809,29 @@ config DMAR_GFX_WA
def_bool y
prompt "Support for Graphics workaround"
depends on DMAR
- help
- Current Graphics drivers tend to use physical address
- for DMA and avoid using DMA APIs. Setting this config
- option permits the IOMMU driver to set a unity map for
- all the OS-visible memory. Hence the driver can continue
- to use physical addresses for DMA.
+ ---help---
+ Current Graphics drivers tend to use physical address
+ for DMA and avoid using DMA APIs. Setting this config
+ option permits the IOMMU driver to set a unity map for
+ all the OS-visible memory. Hence the driver can continue
+ to use physical addresses for DMA.
config DMAR_FLOPPY_WA
def_bool y
depends on DMAR
- help
- Floppy disk drivers are know to bypass DMA API calls
- thereby failing to work when IOMMU is enabled. This
- workaround will setup a 1:1 mapping for the first
- 16M to make floppy (an ISA device) work.
+ ---help---
+ Floppy disk drivers are know to bypass DMA API calls
+ thereby failing to work when IOMMU is enabled. This
+ workaround will setup a 1:1 mapping for the first
+ 16M to make floppy (an ISA device) work.
config INTR_REMAP
bool "Support for Interrupt Remapping (EXPERIMENTAL)"
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
- help
- Supports Interrupt remapping for IO-APIC and MSI devices.
- To use x2apic mode in the CPU's which support x2APIC enhancements or
- to support platforms with CPU's having > 8 bit APIC ID, say Y.
+ ---help---
+ Supports Interrupt remapping for IO-APIC and MSI devices.
+ To use x2apic mode in the CPU's which support x2APIC enhancements or
+ to support platforms with CPU's having > 8 bit APIC ID, say Y.
source "drivers/pci/pcie/Kconfig"
@@ -1860,8 +1845,7 @@ if X86_32
config ISA
bool "ISA support"
- depends on !X86_VOYAGER
- help
+ ---help---
Find out whether you have ISA slots on your motherboard. ISA is the
name of a bus system, i.e. the way the CPU talks to the other stuff
inside your box. Other bus systems are PCI, EISA, MicroChannel
@@ -1887,9 +1871,8 @@ config EISA
source "drivers/eisa/Kconfig"
config MCA
- bool "MCA support" if !X86_VOYAGER
- default y if X86_VOYAGER
- help
+ bool "MCA support"
+ ---help---
MicroChannel Architecture is found in some IBM PS/2 machines and
laptops. It is a bus system similar to PCI or ISA. See
<file:Documentation/mca.txt> (and especially the web page given
@@ -1899,8 +1882,7 @@ source "drivers/mca/Kconfig"
config SCx200
tristate "NatSemi SCx200 support"
- depends on !X86_VOYAGER
- help
+ ---help---
This provides basic support for National Semiconductor's
(now AMD's) Geode processors. The driver probes for the
PCI-IDs of several on-chip devices, so its a good dependency
@@ -1912,7 +1894,7 @@ config SCx200HR_TIMER
tristate "NatSemi SCx200 27MHz High-Resolution Timer Support"
depends on SCx200 && GENERIC_TIME
default y
- help
+ ---help---
This driver provides a clocksource built upon the on-chip
27MHz high-resolution timer. Its also a workaround for
NSC Geode SC-1100's buggy TSC, which loses time when the
@@ -1923,7 +1905,7 @@ config GEODE_MFGPT_TIMER
def_bool y
prompt "Geode Multi-Function General Purpose Timer (MFGPT) events"
depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS
- help
+ ---help---
This driver provides a clock event source based on the MFGPT
timer(s) in the CS5535 and CS5536 companion chip for the geode.
MFGPTs have a better resolution and max interval than the
@@ -1932,7 +1914,7 @@ config GEODE_MFGPT_TIMER
config OLPC
bool "One Laptop Per Child support"
default n
- help
+ ---help---
Add support for detecting the unique features of the OLPC
XO hardware.
@@ -1957,16 +1939,16 @@ config IA32_EMULATION
bool "IA32 Emulation"
depends on X86_64
select COMPAT_BINFMT_ELF
- help
+ ---help---
Include code to run 32-bit programs under a 64-bit kernel. You should
likely turn this on, unless you're 100% sure that you don't have any
32-bit programs left.
config IA32_AOUT
- tristate "IA32 a.out support"
- depends on IA32_EMULATION
- help
- Support old a.out binaries in the 32bit emulation.
+ tristate "IA32 a.out support"
+ depends on IA32_EMULATION
+ ---help---
+ Support old a.out binaries in the 32bit emulation.
config COMPAT
def_bool y
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 085fef4d866..a95eaf0e582 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -50,7 +50,7 @@ config M386
config M486
bool "486"
depends on X86_32
- help
+ ---help---
Select this for a 486 series processor, either Intel or one of the
compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX,
DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
@@ -59,7 +59,7 @@ config M486
config M586
bool "586/K5/5x86/6x86/6x86MX"
depends on X86_32
- help
+ ---help---
Select this for an 586 or 686 series processor such as the AMD K5,
the Cyrix 5x86, 6x86 and 6x86MX. This choice does not
assume the RDTSC (Read Time Stamp Counter) instruction.
@@ -67,21 +67,21 @@ config M586
config M586TSC
bool "Pentium-Classic"
depends on X86_32
- help
+ ---help---
Select this for a Pentium Classic processor with the RDTSC (Read
Time Stamp Counter) instruction for benchmarking.
config M586MMX
bool "Pentium-MMX"
depends on X86_32
- help
+ ---help---
Select this for a Pentium with the MMX graphics/multimedia
extended instructions.
config M686
bool "Pentium-Pro"
depends on X86_32
- help
+ ---help---
Select this for Intel Pentium Pro chips. This enables the use of
Pentium Pro extended instructions, and disables the init-time guard
against the f00f bug found in earlier Pentiums.
@@ -89,7 +89,7 @@ config M686
config MPENTIUMII
bool "Pentium-II/Celeron(pre-Coppermine)"
depends on X86_32
- help
+ ---help---
Select this for Intel chips based on the Pentium-II and
pre-Coppermine Celeron core. This option enables an unaligned
copy optimization, compiles the kernel with optimization flags
@@ -99,7 +99,7 @@ config MPENTIUMII
config MPENTIUMIII
bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
depends on X86_32
- help
+ ---help---
Select this for Intel chips based on the Pentium-III and
Celeron-Coppermine core. This option enables use of some
extended prefetch instructions in addition to the Pentium II
@@ -108,14 +108,14 @@ config MPENTIUMIII
config MPENTIUMM
bool "Pentium M"
depends on X86_32
- help
+ ---help---
Select this for Intel Pentium M (not Pentium-4 M)
notebook chips.
config MPENTIUM4
bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
depends on X86_32
- help
+ ---help---
Select this for Intel Pentium 4 chips. This includes the
Pentium 4, Pentium D, P4-based Celeron and Xeon, and
Pentium-4 M (not Pentium M) chips. This option enables compile
@@ -151,7 +151,7 @@ config MPENTIUM4
config MK6
bool "K6/K6-II/K6-III"
depends on X86_32
- help
+ ---help---
Select this for an AMD K6-family processor. Enables use of
some extended instructions, and passes appropriate optimization
flags to GCC.
@@ -159,14 +159,14 @@ config MK6
config MK7
bool "Athlon/Duron/K7"
depends on X86_32
- help
+ ---help---
Select this for an AMD Athlon K7-family processor. Enables use of
some extended instructions, and passes appropriate optimization
flags to GCC.
config MK8
bool "Opteron/Athlon64/Hammer/K8"
- help
+ ---help---
Select this for an AMD Opteron or Athlon64 Hammer-family processor.
Enables use of some extended instructions, and passes appropriate
optimization flags to GCC.
@@ -174,7 +174,7 @@ config MK8
config MCRUSOE
bool "Crusoe"
depends on X86_32
- help
+ ---help---
Select this for a Transmeta Crusoe processor. Treats the processor
like a 586 with TSC, and sets some GCC optimization flags (like a
Pentium Pro with no alignment requirements).
@@ -182,13 +182,13 @@ config MCRUSOE
config MEFFICEON
bool "Efficeon"
depends on X86_32
- help
+ ---help---
Select this for a Transmeta Efficeon processor.
config MWINCHIPC6
bool "Winchip-C6"
depends on X86_32
- help
+ ---help---
Select this for an IDT Winchip C6 chip. Linux and GCC
treat this chip as a 586TSC with some extended instructions
and alignment requirements.
@@ -196,7 +196,7 @@ config MWINCHIPC6
config MWINCHIP3D
bool "Winchip-2/Winchip-2A/Winchip-3"
depends on X86_32
- help
+ ---help---
Select this for an IDT Winchip-2, 2A or 3. Linux and GCC
treat this chip as a 586TSC with some extended instructions
and alignment requirements. Also enable out of order memory
@@ -206,19 +206,19 @@ config MWINCHIP3D
config MGEODEGX1
bool "GeodeGX1"
depends on X86_32
- help
+ ---help---
Select this for a Geode GX1 (Cyrix MediaGX) chip.
config MGEODE_LX
bool "Geode GX/LX"
depends on X86_32
- help
+ ---help---
Select this for AMD Geode GX and LX processors.
config MCYRIXIII
bool "CyrixIII/VIA-C3"
depends on X86_32
- help
+ ---help---
Select this for a Cyrix III or C3 chip. Presently Linux and GCC
treat this chip as a generic 586. Whilst the CPU is 686 class,
it lacks the cmov extension which gcc assumes is present when
@@ -230,7 +230,7 @@ config MCYRIXIII
config MVIAC3_2
bool "VIA C3-2 (Nehemiah)"
depends on X86_32
- help
+ ---help---
Select this for a VIA C3 "Nehemiah". Selecting this enables usage
of SSE and tells gcc to treat the CPU as a 686.
Note, this kernel will not boot on older (pre model 9) C3s.
@@ -238,14 +238,14 @@ config MVIAC3_2
config MVIAC7
bool "VIA C7"
depends on X86_32
- help
+ ---help---
Select this for a VIA C7. Selecting this uses the correct cache
shift and tells gcc to treat the CPU as a 686.
config MPSC
bool "Intel P4 / older Netburst based Xeon"
depends on X86_64
- help
+ ---help---
Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
Xeon CPUs with Intel 64bit which is compatible with x86-64.
Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the
@@ -255,7 +255,7 @@ config MPSC
config MCORE2
bool "Core 2/newer Xeon"
- help
+ ---help---
Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
53xx) CPUs. You can distinguish newer from older Xeons by the CPU
@@ -265,7 +265,7 @@ config MCORE2
config GENERIC_CPU
bool "Generic-x86-64"
depends on X86_64
- help
+ ---help---
Generic x86-64 CPU.
Run equally well on all x86-64 CPUs.
@@ -274,7 +274,7 @@ endchoice
config X86_GENERIC
bool "Generic x86 support"
depends on X86_32
- help
+ ---help---
Instead of just including optimizations for the selected
x86 variant (e.g. PII, Crusoe or Athlon), include some more
generic optimizations as well. This will make the kernel
@@ -319,7 +319,7 @@ config X86_XADD
config X86_PPRO_FENCE
bool "PentiumPro memory ordering errata workaround"
depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
- help
+ ---help---
Old PentiumPro multiprocessor systems had errata that could cause
memory operations to violate the x86 ordering standard in rare cases.
Enabling this option will attempt to work around some (but not all)
@@ -412,14 +412,14 @@ config X86_DEBUGCTLMSR
menuconfig PROCESSOR_SELECT
bool "Supported processor vendors" if EMBEDDED
- help
+ ---help---
This lets you choose what x86 vendor support code your kernel
will include.
config CPU_SUP_INTEL
default y
bool "Support Intel processors" if PROCESSOR_SELECT
- help
+ ---help---
This enables detection, tunings and quirks for Intel processors
You need this enabled if you want your kernel to run on an
@@ -433,7 +433,7 @@ config CPU_SUP_CYRIX_32
default y
bool "Support Cyrix processors" if PROCESSOR_SELECT
depends on !64BIT
- help
+ ---help---
This enables detection, tunings and quirks for Cyrix processors
You need this enabled if you want your kernel to run on a
@@ -446,7 +446,7 @@ config CPU_SUP_CYRIX_32
config CPU_SUP_AMD
default y
bool "Support AMD processors" if PROCESSOR_SELECT
- help
+ ---help---
This enables detection, tunings and quirks for AMD processors
You need this enabled if you want your kernel to run on an
@@ -460,7 +460,7 @@ config CPU_SUP_CENTAUR_32
default y
bool "Support Centaur processors" if PROCESSOR_SELECT
depends on !64BIT
- help
+ ---help---
This enables detection, tunings and quirks for Centaur processors
You need this enabled if you want your kernel to run on a
@@ -474,7 +474,7 @@ config CPU_SUP_CENTAUR_64
default y
bool "Support Centaur processors" if PROCESSOR_SELECT
depends on 64BIT
- help
+ ---help---
This enables detection, tunings and quirks for Centaur processors
You need this enabled if you want your kernel to run on a
@@ -488,7 +488,7 @@ config CPU_SUP_TRANSMETA_32
default y
bool "Support Transmeta processors" if PROCESSOR_SELECT
depends on !64BIT
- help
+ ---help---
This enables detection, tunings and quirks for Transmeta processors
You need this enabled if you want your kernel to run on a
@@ -502,7 +502,7 @@ config CPU_SUP_UMC_32
default y
bool "Support UMC processors" if PROCESSOR_SELECT
depends on !64BIT
- help
+ ---help---
This enables detection, tunings and quirks for UMC processors
You need this enabled if you want your kernel to run on a
@@ -521,7 +521,7 @@ config X86_PTRACE_BTS
bool "Branch Trace Store"
default y
depends on X86_DEBUGCTLMSR
- help
+ ---help---
This adds a ptrace interface to the hardware's branch trace store.
Debuggers may use it to collect an execution trace of the debugged
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 28f111461ca..ba4781b9389 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -7,7 +7,7 @@ source "lib/Kconfig.debug"
config STRICT_DEVMEM
bool "Filter access to /dev/mem"
- help
+ ---help---
If this option is disabled, you allow userspace (root) access to all
of memory, including kernel and userspace memory. Accidental
access to this is obviously disastrous, but specific access can
@@ -25,7 +25,7 @@ config STRICT_DEVMEM
config X86_VERBOSE_BOOTUP
bool "Enable verbose x86 bootup info messages"
default y
- help
+ ---help---
Enables the informational output from the decompression stage
(e.g. bzImage) of the boot. If you disable this you will still
see errors. Disable this if you want silent bootup.
@@ -33,7 +33,7 @@ config X86_VERBOSE_BOOTUP
config EARLY_PRINTK
bool "Early printk" if EMBEDDED
default y
- help
+ ---help---
Write kernel log output directly into the VGA buffer or to a serial
port.
@@ -47,7 +47,7 @@ config EARLY_PRINTK_DBGP
bool "Early printk via EHCI debug port"
default n
depends on EARLY_PRINTK && PCI
- help
+ ---help---
Write kernel log output directly into the EHCI debug port.
This is useful for kernel debugging when your machine crashes very
@@ -59,14 +59,14 @@ config EARLY_PRINTK_DBGP
config DEBUG_STACKOVERFLOW
bool "Check for stack overflows"
depends on DEBUG_KERNEL
- help
+ ---help---
This option will cause messages to be printed if free stack space
drops below a certain limit.
config DEBUG_STACK_USAGE
bool "Stack utilization instrumentation"
depends on DEBUG_KERNEL
- help
+ ---help---
Enables the display of the minimum amount of free stack which each
task has ever had available in the sysrq-T and sysrq-P debug output.
@@ -75,7 +75,7 @@ config DEBUG_STACK_USAGE
config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
depends on DEBUG_KERNEL
- help
+ ---help---
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
of memory corruptions.
@@ -83,9 +83,9 @@ config DEBUG_PAGEALLOC
config DEBUG_PER_CPU_MAPS
bool "Debug access to per_cpu maps"
depends on DEBUG_KERNEL
- depends on X86_SMP
+ depends on SMP
default n
- help
+ ---help---
Say Y to verify that the per_cpu map being accessed has
been setup. Adds a fair amount of code to kernel memory
and decreases performance.
@@ -96,7 +96,7 @@ config X86_PTDUMP
bool "Export kernel pagetable layout to userspace via debugfs"
depends on DEBUG_KERNEL
select DEBUG_FS
- help
+ ---help---
Say Y here if you want to show the kernel pagetable layout in a
debugfs file. This information is only useful for kernel developers
who are working in architecture specific areas of the kernel.
@@ -108,7 +108,7 @@ config DEBUG_RODATA
bool "Write protect kernel read-only data structures"
default y
depends on DEBUG_KERNEL
- help
+ ---help---
Mark the kernel read-only data as write-protected in the pagetables,
in order to catch accidental (and incorrect) writes to such const
data. This is recommended so that we can catch kernel bugs sooner.
@@ -118,7 +118,7 @@ config DEBUG_RODATA_TEST
bool "Testcase for the DEBUG_RODATA feature"
depends on DEBUG_RODATA
default y
- help
+ ---help---
This option enables a testcase for the DEBUG_RODATA
feature as well as for the change_page_attr() infrastructure.
If in doubt, say "N"
@@ -126,7 +126,7 @@ config DEBUG_RODATA_TEST
config DEBUG_NX_TEST
tristate "Testcase for the NX non-executable stack feature"
depends on DEBUG_KERNEL && m
- help
+ ---help---
This option enables a testcase for the CPU NX capability
and the software setup of this feature.
If in doubt, say "N"
@@ -134,7 +134,7 @@ config DEBUG_NX_TEST
config 4KSTACKS
bool "Use 4Kb for kernel stacks instead of 8Kb"
depends on X86_32
- help
+ ---help---
If you say Y here the kernel will use a 4Kb stacksize for the
kernel stack attached to each process/thread. This facilitates
running more threads on a system and also reduces the pressure
@@ -145,7 +145,7 @@ config DOUBLEFAULT
default y
bool "Enable doublefault exception handler" if EMBEDDED
depends on X86_32
- help
+ ---help---
This option allows trapping of rare doublefault exceptions that
would otherwise cause a system to silently reboot. Disabling this
option saves about 4k and might cause you much additional grey
@@ -155,7 +155,7 @@ config IOMMU_DEBUG
bool "Enable IOMMU debugging"
depends on GART_IOMMU && DEBUG_KERNEL
depends on X86_64
- help
+ ---help---
Force the IOMMU to on even when you have less than 4GB of
memory and add debugging code. On overflow always panic. And
allow to enable IOMMU leak tracing. Can be disabled at boot
@@ -171,7 +171,7 @@ config IOMMU_LEAK
bool "IOMMU leak tracing"
depends on DEBUG_KERNEL
depends on IOMMU_DEBUG
- help
+ ---help---
Add a simple leak tracer to the IOMMU code. This is useful when you
are debugging a buggy device driver that leaks IOMMU mappings.
@@ -224,25 +224,25 @@ choice
config IO_DELAY_0X80
bool "port 0x80 based port-IO delay [recommended]"
- help
+ ---help---
This is the traditional Linux IO delay used for in/out_p.
It is the most tested hence safest selection here.
config IO_DELAY_0XED
bool "port 0xed based port-IO delay"
- help
+ ---help---
Use port 0xed as the IO delay. This frees up port 0x80 which is
often used as a hardware-debug port.
config IO_DELAY_UDELAY
bool "udelay based port-IO delay"
- help
+ ---help---
Use udelay(2) as the IO delay method. This provides the delay
while not having any side-effect on the IO port space.
config IO_DELAY_NONE
bool "no port-IO delay"
- help
+ ---help---
No port-IO delay. Will break on old boxes that require port-IO
delay for certain operations. Should work on most new machines.
@@ -276,18 +276,18 @@ config DEBUG_BOOT_PARAMS
bool "Debug boot parameters"
depends on DEBUG_KERNEL
depends on DEBUG_FS
- help
+ ---help---
This option will cause struct boot_params to be exported via debugfs.
config CPA_DEBUG
bool "CPA self-test code"
depends on DEBUG_KERNEL
- help
+ ---help---
Do change_page_attr() self-tests every 30 seconds.
config OPTIMIZE_INLINING
bool "Allow gcc to uninline functions marked 'inline'"
- help
+ ---help---
This option determines if the kernel forces gcc to inline the functions
developers have marked 'inline'. Doing so takes away freedom from gcc to
do what it thinks is best, which is desirable for the gcc 3.x series of
@@ -300,4 +300,3 @@ config OPTIMIZE_INLINING
If unsure, say N.
endmenu
-
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index ab48ab497e5..1836191839e 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -105,29 +105,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# prevent gcc from generating any FP code by mistake
KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
-###
-# Sub architecture support
-# fcore-y is linked before mcore-y files.
-
-# Default subarch .c files
-mcore-y := arch/x86/mach-default/
-
-# Voyager subarch support
-mflags-$(CONFIG_X86_VOYAGER) := -Iarch/x86/include/asm/mach-voyager
-mcore-$(CONFIG_X86_VOYAGER) := arch/x86/mach-voyager/
-
-# generic subarchitecture
-mflags-$(CONFIG_X86_GENERICARCH):= -Iarch/x86/include/asm/mach-generic
-fcore-$(CONFIG_X86_GENERICARCH) += arch/x86/mach-generic/
-mcore-$(CONFIG_X86_GENERICARCH) := arch/x86/mach-default/
-
-# default subarch .h files
-mflags-y += -Iarch/x86/include/asm/mach-default
-
-# 64 bit does not support subarch support - clear sub arch variables
-fcore-$(CONFIG_X86_64) :=
-mcore-$(CONFIG_X86_64) :=
-
KBUILD_CFLAGS += $(mflags-y)
KBUILD_AFLAGS += $(mflags-y)
@@ -153,9 +130,6 @@ core-$(CONFIG_LGUEST_GUEST) += arch/x86/lguest/
core-y += arch/x86/kernel/
core-y += arch/x86/mm/
-# Remaining sub architecture files
-core-y += $(mcore-y)
-
core-y += arch/x86/crypto/
core-y += arch/x86/vdso/
core-$(CONFIG_IA32_EMULATION) += arch/x86/ia32/
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index edba00d98ac..739bce993b5 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -188,7 +188,6 @@ CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
CONFIG_SMP=y
CONFIG_X86_FIND_SMP_CONFIG=y
CONFIG_X86_MPPARSE=y
-CONFIG_X86_PC=y
# CONFIG_X86_ELAN is not set
# CONFIG_X86_VOYAGER is not set
# CONFIG_X86_GENERICARCH is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 322dd2748fc..02b514e8f4c 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -187,7 +187,6 @@ CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
CONFIG_SMP=y
CONFIG_X86_FIND_SMP_CONFIG=y
CONFIG_X86_MPPARSE=y
-CONFIG_X86_PC=y
# CONFIG_X86_ELAN is not set
# CONFIG_X86_VOYAGER is not set
# CONFIG_X86_GENERICARCH is not set
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index ab1d51a8855..fba49f66228 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -33,7 +33,13 @@
} while (0)
+#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
extern void generic_apic_probe(void);
+#else
+static inline void generic_apic_probe(void)
+{
+}
+#endif
#ifdef CONFIG_X86_LOCAL_APIC
@@ -41,6 +47,21 @@ extern unsigned int apic_verbosity;
extern int local_apic_timer_c2_ok;
extern int disable_apic;
+
+#ifdef CONFIG_SMP
+extern void __inquire_remote_apic(int apicid);
+#else /* CONFIG_SMP */
+static inline void __inquire_remote_apic(int apicid)
+{
+}
+#endif /* CONFIG_SMP */
+
+static inline void default_inquire_remote_apic(int apicid)
+{
+ if (apic_verbosity >= APIC_DEBUG)
+ __inquire_remote_apic(apicid);
+}
+
/*
* Basic functions accessing APICs.
*/
@@ -124,12 +145,35 @@ struct apic_ops {
extern struct apic_ops *apic_ops;
-#define apic_read (apic_ops->read)
-#define apic_write (apic_ops->write)
-#define apic_icr_read (apic_ops->icr_read)
-#define apic_icr_write (apic_ops->icr_write)
-#define apic_wait_icr_idle (apic_ops->wait_icr_idle)
-#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle)
+static inline u32 apic_read(u32 reg)
+{
+ return apic_ops->read(reg);
+}
+
+static inline void apic_write(u32 reg, u32 val)
+{
+ apic_ops->write(reg, val);
+}
+
+static inline u64 apic_icr_read(void)
+{
+ return apic_ops->icr_read();
+}
+
+static inline void apic_icr_write(u32 low, u32 high)
+{
+ apic_ops->icr_write(low, high);
+}
+
+static inline void apic_wait_icr_idle(void)
+{
+ apic_ops->wait_icr_idle();
+}
+
+static inline u32 safe_apic_wait_icr_idle(void)
+{
+ return apic_ops->safe_wait_icr_idle();
+}
extern int get_physical_broadcast(void);
@@ -196,4 +240,22 @@ static inline void disable_local_APIC(void) { }
#endif /* !CONFIG_X86_LOCAL_APIC */
+#ifdef CONFIG_X86_64
+#define SET_APIC_ID(x) (apic->set_apic_id(x))
+#else
+
+#ifdef CONFIG_X86_LOCAL_APIC
+static inline unsigned default_get_apic_id(unsigned long x)
+{
+ unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
+
+ if (APIC_XAPIC(ver))
+ return (x >> 24) & 0xFF;
+ else
+ return (x >> 24) & 0x0F;
+}
+#endif
+
+#endif
+
#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/mach-default/apm.h b/arch/x86/include/asm/apm.h
index 20370c6db74..20370c6db74 100644
--- a/arch/x86/include/asm/mach-default/apm.h
+++ b/arch/x86/include/asm/apm.h
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h
deleted file mode 100644
index d8dd9f53791..00000000000
--- a/arch/x86/include/asm/bigsmp/apic.h
+++ /dev/null
@@ -1,155 +0,0 @@
-#ifndef __ASM_MACH_APIC_H
-#define __ASM_MACH_APIC_H
-
-#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
-#define esr_disable (1)
-
-static inline int apic_id_registered(void)
-{
- return (1);
-}
-
-static inline const cpumask_t *target_cpus(void)
-{
-#ifdef CONFIG_SMP
- return &cpu_online_map;
-#else
- return &cpumask_of_cpu(0);
-#endif
-}
-
-#undef APIC_DEST_LOGICAL
-#define APIC_DEST_LOGICAL 0
-#define APIC_DFR_VALUE (APIC_DFR_FLAT)
-#define INT_DELIVERY_MODE (dest_Fixed)
-#define INT_DEST_MODE (0) /* phys delivery to target proc */
-#define NO_BALANCE_IRQ (0)
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
- return (0);
-}
-
-static inline unsigned long check_apicid_present(int bit)
-{
- return (1);
-}
-
-static inline unsigned long calculate_ldr(int cpu)
-{
- unsigned long val, id;
- val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
- id = xapic_phys_to_log_apicid(cpu);
- val |= SET_APIC_LOGICAL_ID(id);
- return val;
-}
-
-/*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LDR and TPR before enabling
- * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116). So here it goes...
- */
-static inline void init_apic_ldr(void)
-{
- unsigned long val;
- int cpu = smp_processor_id();
-
- apic_write(APIC_DFR, APIC_DFR_VALUE);
- val = calculate_ldr(cpu);
- apic_write(APIC_LDR, val);
-}
-
-static inline void setup_apic_routing(void)
-{
- printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
- "Physflat", nr_ioapics);
-}
-
-static inline int multi_timer_check(int apic, int irq)
-{
- return (0);
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
- return apicid_2_node[hard_smp_processor_id()];
-}
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
- if (mps_cpu < nr_cpu_ids)
- return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
-
- return BAD_APICID;
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
-{
- return physid_mask_of_physid(phys_apicid);
-}
-
-extern u8 cpu_2_logical_apicid[];
-/* Mapping from cpu number to logical apicid */
-static inline int cpu_to_logical_apicid(int cpu)
-{
- if (cpu >= nr_cpu_ids)
- return BAD_APICID;
- return cpu_physical_id(cpu);
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
-{
- /* For clustered we don't have a good way to do this yet - hack */
- return physids_promote(0xFFL);
-}
-
-static inline void setup_portio_remap(void)
-{
-}
-
-static inline void enable_apic_mode(void)
-{
-}
-
-static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
- return (1);
-}
-
-/* As we are using single CPU as destination, pick only one CPU here */
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
-{
- int cpu;
- int apicid;
-
- cpu = first_cpu(*cpumask);
- apicid = cpu_to_logical_apicid(cpu);
- return apicid;
-}
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
-{
- int cpu;
-
- /*
- * We're using fixed IRQ delivery, can only return one phys APIC ID.
- * May as well be the first.
- */
- for_each_cpu_and(cpu, cpumask, andmask)
- if (cpumask_test_cpu(cpu, cpu_online_mask))
- break;
- if (cpu < nr_cpu_ids)
- return cpu_to_logical_apicid(cpu);
-
- return BAD_APICID;
-}
-
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
- return cpuid_apic >> index_msb;
-}
-
-#endif /* __ASM_MACH_APIC_H */
diff --git a/arch/x86/include/asm/bigsmp/apicdef.h b/arch/x86/include/asm/bigsmp/apicdef.h
deleted file mode 100644
index 392c3f5ef2f..00000000000
--- a/arch/x86/include/asm/bigsmp/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_MACH_APICDEF_H
-#define __ASM_MACH_APICDEF_H
-
-#define APIC_ID_MASK (0xFF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
- return (((x)>>24)&0xFF);
-}
-
-#define GET_APIC_ID(x) get_apic_id(x)
-
-#endif
diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h
deleted file mode 100644
index 27fcd01b3ae..00000000000
--- a/arch/x86/include/asm/bigsmp/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef __ASM_MACH_IPI_H
-#define __ASM_MACH_IPI_H
-
-void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-
-static inline void send_IPI_mask(const struct cpumask *mask, int vector)
-{
- send_IPI_mask_sequence(mask, vector);
-}
-
-static inline void send_IPI_allbutself(int vector)
-{
- send_IPI_mask_allbutself(cpu_online_mask, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
- send_IPI_mask(cpu_online_mask, vector);
-}
-
-#endif /* __ASM_MACH_IPI_H */
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index f03b23e3286..b185091bf19 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -32,10 +32,6 @@ extern void arch_unregister_cpu(int);
DECLARE_PER_CPU(int, cpu_state);
-#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
-extern unsigned char boot_cpu_id;
-#else
-#define boot_cpu_id 0
-#endif
+extern unsigned int boot_cpu_id;
#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/mach-default/do_timer.h b/arch/x86/include/asm/do_timer.h
index 23ecda0b28a..23ecda0b28a 100644
--- a/arch/x86/include/asm/mach-default/do_timer.h
+++ b/arch/x86/include/asm/do_timer.h
diff --git a/arch/x86/include/asm/mach-default/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 6fa399ad1de..854d538ae85 100644
--- a/arch/x86/include/asm/mach-default/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -9,7 +9,7 @@
* is no hardware IRQ pin equivalent for them, they are triggered
* through the ICC by us (IPIs)
*/
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
@@ -41,10 +41,15 @@ BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
* a much simpler SMP time architecture:
*/
#ifdef CONFIG_X86_LOCAL_APIC
+
BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
+#ifdef CONFIG_PERF_COUNTERS
+BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR)
+#endif
+
#ifdef CONFIG_X86_MCE_P4THERMAL
BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
#endif
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
deleted file mode 100644
index c58b9cc7446..00000000000
--- a/arch/x86/include/asm/es7000/apic.h
+++ /dev/null
@@ -1,242 +0,0 @@
-#ifndef __ASM_ES7000_APIC_H
-#define __ASM_ES7000_APIC_H
-
-#include <linux/gfp.h>
-
-#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
-#define esr_disable (1)
-
-static inline int apic_id_registered(void)
-{
- return (1);
-}
-
-static inline const cpumask_t *target_cpus_cluster(void)
-{
- return &CPU_MASK_ALL;
-}
-
-static inline const cpumask_t *target_cpus(void)
-{
- return &cpumask_of_cpu(smp_processor_id());
-}
-
-#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
-#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
-#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
-#define NO_BALANCE_IRQ_CLUSTER (1)
-
-#define APIC_DFR_VALUE (APIC_DFR_FLAT)
-#define INT_DELIVERY_MODE (dest_Fixed)
-#define INT_DEST_MODE (0) /* phys delivery to target procs */
-#define NO_BALANCE_IRQ (0)
-#undef APIC_DEST_LOGICAL
-#define APIC_DEST_LOGICAL 0x0
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
- return 0;
-}
-static inline unsigned long check_apicid_present(int bit)
-{
- return physid_isset(bit, phys_cpu_present_map);
-}
-
-#define apicid_cluster(apicid) (apicid & 0xF0)
-
-static inline unsigned long calculate_ldr(int cpu)
-{
- unsigned long id;
- id = xapic_phys_to_log_apicid(cpu);
- return (SET_APIC_LOGICAL_ID(id));
-}
-
-/*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LdR and TPR before enabling
- * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116). So here it goes...
- */
-static inline void init_apic_ldr_cluster(void)
-{
- unsigned long val;
- int cpu = smp_processor_id();
-
- apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
- val = calculate_ldr(cpu);
- apic_write(APIC_LDR, val);
-}
-
-static inline void init_apic_ldr(void)
-{
- unsigned long val;
- int cpu = smp_processor_id();
-
- apic_write(APIC_DFR, APIC_DFR_VALUE);
- val = calculate_ldr(cpu);
- apic_write(APIC_LDR, val);
-}
-
-extern int apic_version [MAX_APICS];
-static inline void setup_apic_routing(void)
-{
- int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
- printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
- (apic_version[apic] == 0x14) ?
- "Physical Cluster" : "Logical Cluster",
- nr_ioapics, cpus_addr(*target_cpus())[0]);
-}
-
-static inline int multi_timer_check(int apic, int irq)
-{
- return 0;
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
- return 0;
-}
-
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
- if (!mps_cpu)
- return boot_cpu_physical_apicid;
- else if (mps_cpu < nr_cpu_ids)
- return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
- else
- return BAD_APICID;
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
-{
- static int id = 0;
- physid_mask_t mask;
- mask = physid_mask_of_physid(id);
- ++id;
- return mask;
-}
-
-extern u8 cpu_2_logical_apicid[];
-/* Mapping from cpu number to logical apicid */
-static inline int cpu_to_logical_apicid(int cpu)
-{
-#ifdef CONFIG_SMP
- if (cpu >= nr_cpu_ids)
- return BAD_APICID;
- return (int)cpu_2_logical_apicid[cpu];
-#else
- return logical_smp_processor_id();
-#endif
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
-{
- /* For clustered we don't have a good way to do this yet - hack */
- return physids_promote(0xff);
-}
-
-
-static inline void setup_portio_remap(void)
-{
-}
-
-extern unsigned int boot_cpu_physical_apicid;
-static inline int check_phys_apicid_present(int cpu_physical_apicid)
-{
- boot_cpu_physical_apicid = read_apic_id();
- return (1);
-}
-
-static inline unsigned int
-cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
-{
- int num_bits_set;
- int cpus_found = 0;
- int cpu;
- int apicid;
-
- num_bits_set = cpumask_weight(cpumask);
- /* Return id to all */
- if (num_bits_set == nr_cpu_ids)
- return 0xFF;
- /*
- * The cpus in the mask must all be on the apic cluster. If are not
- * on the same apicid cluster return default value of TARGET_CPUS.
- */
- cpu = cpumask_first(cpumask);
- apicid = cpu_to_logical_apicid(cpu);
- while (cpus_found < num_bits_set) {
- if (cpumask_test_cpu(cpu, cpumask)) {
- int new_apicid = cpu_to_logical_apicid(cpu);
- if (apicid_cluster(apicid) !=
- apicid_cluster(new_apicid)){
- printk ("%s: Not a valid mask!\n", __func__);
- return 0xFF;
- }
- apicid = new_apicid;
- cpus_found++;
- }
- cpu++;
- }
- return apicid;
-}
-
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
-{
- int num_bits_set;
- int cpus_found = 0;
- int cpu;
- int apicid;
-
- num_bits_set = cpus_weight(*cpumask);
- /* Return id to all */
- if (num_bits_set == nr_cpu_ids)
- return cpu_to_logical_apicid(0);
- /*
- * The cpus in the mask must all be on the apic cluster. If are not
- * on the same apicid cluster return default value of TARGET_CPUS.
- */
- cpu = first_cpu(*cpumask);
- apicid = cpu_to_logical_apicid(cpu);
- while (cpus_found < num_bits_set) {
- if (cpu_isset(cpu, *cpumask)) {
- int new_apicid = cpu_to_logical_apicid(cpu);
- if (apicid_cluster(apicid) !=
- apicid_cluster(new_apicid)){
- printk ("%s: Not a valid mask!\n", __func__);
- return cpu_to_logical_apicid(0);
- }
- apicid = new_apicid;
- cpus_found++;
- }
- cpu++;
- }
- return apicid;
-}
-
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
- const struct cpumask *andmask)
-{
- int apicid = cpu_to_logical_apicid(0);
- cpumask_var_t cpumask;
-
- if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
- return apicid;
-
- cpumask_and(cpumask, inmask, andmask);
- cpumask_and(cpumask, cpumask, cpu_online_mask);
- apicid = cpu_mask_to_apicid(cpumask);
-
- free_cpumask_var(cpumask);
- return apicid;
-}
-
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
- return cpuid_apic >> index_msb;
-}
-
-#endif /* __ASM_ES7000_APIC_H */
diff --git a/arch/x86/include/asm/es7000/apicdef.h b/arch/x86/include/asm/es7000/apicdef.h
deleted file mode 100644
index 8b234a3cb85..00000000000
--- a/arch/x86/include/asm/es7000/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_ES7000_APICDEF_H
-#define __ASM_ES7000_APICDEF_H
-
-#define APIC_ID_MASK (0xFF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
- return (((x)>>24)&0xFF);
-}
-
-#define GET_APIC_ID(x) get_apic_id(x)
-
-#endif
diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h
deleted file mode 100644
index 7e8ed24d4b8..00000000000
--- a/arch/x86/include/asm/es7000/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef __ASM_ES7000_IPI_H
-#define __ASM_ES7000_IPI_H
-
-void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-
-static inline void send_IPI_mask(const struct cpumask *mask, int vector)
-{
- send_IPI_mask_sequence(mask, vector);
-}
-
-static inline void send_IPI_allbutself(int vector)
-{
- send_IPI_mask_allbutself(cpu_online_mask, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
- send_IPI_mask(cpu_online_mask, vector);
-}
-
-#endif /* __ASM_ES7000_IPI_H */
diff --git a/arch/x86/include/asm/es7000/mpparse.h b/arch/x86/include/asm/es7000/mpparse.h
deleted file mode 100644
index c1629b090ec..00000000000
--- a/arch/x86/include/asm/es7000/mpparse.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __ASM_ES7000_MPPARSE_H
-#define __ASM_ES7000_MPPARSE_H
-
-#include <linux/acpi.h>
-
-extern int parse_unisys_oem (char *oemptr);
-extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
-extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
-extern void setup_unisys(void);
-
-#ifndef CONFIG_X86_GENERICARCH
-extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
-extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid);
-#endif
-
-#ifdef CONFIG_ACPI
-
-static inline int es7000_check_dsdt(void)
-{
- struct acpi_table_header header;
-
- if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
- !strncmp(header.oem_id, "UNISYS", 6))
- return 1;
- return 0;
-}
-#endif
-
-#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h
deleted file mode 100644
index 78f0daaee43..00000000000
--- a/arch/x86/include/asm/es7000/wakecpu.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef __ASM_ES7000_WAKECPU_H
-#define __ASM_ES7000_WAKECPU_H
-
-#define TRAMPOLINE_PHYS_LOW 0x467
-#define TRAMPOLINE_PHYS_HIGH 0x469
-
-static inline void wait_for_init_deassert(atomic_t *deassert)
-{
-#ifndef CONFIG_ES7000_CLUSTERED_APIC
- while (!atomic_read(deassert))
- cpu_relax();
-#endif
- return;
-}
-
-/* Nothing to do for most platforms, since cleared by the INIT cycle */
-static inline void smp_callin_clear_local_apic(void)
-{
-}
-
-static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
-{
-}
-
-static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
-{
-}
-
-extern void __inquire_remote_apic(int apicid);
-
-static inline void inquire_remote_apic(int apicid)
-{
- if (apic_verbosity >= APIC_DEBUG)
- __inquire_remote_apic(apicid);
-}
-
-#endif /* __ASM_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h
index d48bee663a6..273b99452ae 100644
--- a/arch/x86/include/asm/genapic.h
+++ b/arch/x86/include/asm/genapic.h
@@ -1,5 +1,263 @@
+#ifndef _ASM_X86_GENAPIC_H
+#define _ASM_X86_GENAPIC_H
+
+#include <linux/cpumask.h>
+
+#include <asm/mpspec.h>
+#include <asm/atomic.h>
+
+/*
+ * Copyright 2004 James Cleverdon, IBM.
+ * Subject to the GNU Public License, v.2
+ *
+ * Generic APIC sub-arch data struct.
+ *
+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
+ * James Cleverdon.
+ */
+struct genapic {
+ char *name;
+
+ int (*probe)(void);
+ int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
+ int (*apic_id_registered)(void);
+
+ u32 irq_delivery_mode;
+ u32 irq_dest_mode;
+
+ const struct cpumask *(*target_cpus)(void);
+
+ int disable_esr;
+
+ int dest_logical;
+ unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
+ unsigned long (*check_apicid_present)(int apicid);
+
+ void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
+ void (*init_apic_ldr)(void);
+
+ physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
+
+ void (*setup_apic_routing)(void);
+ int (*multi_timer_check)(int apic, int irq);
+ int (*apicid_to_node)(int logical_apicid);
+ int (*cpu_to_logical_apicid)(int cpu);
+ int (*cpu_present_to_apicid)(int mps_cpu);
+ physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
+ void (*setup_portio_remap)(void);
+ int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
+ void (*enable_apic_mode)(void);
+ int (*phys_pkg_id)(int cpuid_apic, int index_msb);
+
+ /*
+ * When one of the next two hooks returns 1 the genapic
+ * is switched to this. Essentially they are additional
+ * probe functions:
+ */
+ int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
+
+ unsigned int (*get_apic_id)(unsigned long x);
+ unsigned long (*set_apic_id)(unsigned int id);
+ unsigned long apic_id_mask;
+
+ unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
+ unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
+ const struct cpumask *andmask);
+
+ /* ipi */
+ void (*send_IPI_mask)(const struct cpumask *mask, int vector);
+ void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
+ int vector);
+ void (*send_IPI_allbutself)(int vector);
+ void (*send_IPI_all)(int vector);
+ void (*send_IPI_self)(int vector);
+
+ /* wakeup_secondary_cpu */
+ int (*wakeup_cpu)(int apicid, unsigned long start_eip);
+
+ int trampoline_phys_low;
+ int trampoline_phys_high;
+
+ void (*wait_for_init_deassert)(atomic_t *deassert);
+ void (*smp_callin_clear_local_apic)(void);
+ void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
+ void (*inquire_remote_apic)(int apicid);
+};
+
+extern struct genapic *apic;
+
+/*
+ * Warm reset vector default position:
+ */
+#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467
+#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
+
#ifdef CONFIG_X86_32
-# include "genapic_32.h"
+extern void es7000_update_genapic_to_cluster(void);
#else
-# include "genapic_64.h"
+extern struct genapic apic_flat;
+extern struct genapic apic_physflat;
+extern struct genapic apic_x2apic_cluster;
+extern struct genapic apic_x2apic_phys;
+extern int default_acpi_madt_oem_check(char *, char *);
+
+extern void apic_send_IPI_self(int vector);
+
+extern struct genapic apic_x2apic_uv_x;
+DECLARE_PER_CPU(int, x2apic_extra_bits);
+
+extern void default_setup_apic_routing(void);
+
+extern int default_cpu_present_to_apicid(int mps_cpu);
+extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
#endif
+
+static inline void default_wait_for_init_deassert(atomic_t *deassert)
+{
+ while (!atomic_read(deassert))
+ cpu_relax();
+ return;
+}
+
+extern void generic_bigsmp_probe(void);
+
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+#include <asm/smp.h>
+
+#define APIC_DFR_VALUE (APIC_DFR_FLAT)
+
+static inline const struct cpumask *default_target_cpus(void)
+{
+#ifdef CONFIG_SMP
+ return cpu_online_mask;
+#else
+ return cpumask_of(0);
+#endif
+}
+
+DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
+
+
+static inline unsigned int read_apic_id(void)
+{
+ unsigned int reg;
+
+ reg = apic_read(APIC_ID);
+
+ return apic->get_apic_id(reg);
+}
+
+#ifdef CONFIG_X86_64
+extern void default_setup_apic_routing(void);
+#else
+
+/*
+ * Set up the logical destination ID.
+ *
+ * Intel recommends to set DFR, LDR and TPR before enabling
+ * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
+ * document number 292116). So here it goes...
+ */
+extern void default_init_apic_ldr(void);
+
+static inline int default_apic_id_registered(void)
+{
+ return physid_isset(read_apic_id(), phys_cpu_present_map);
+}
+
+static inline unsigned int
+default_cpu_mask_to_apicid(const struct cpumask *cpumask)
+{
+ return cpumask_bits(cpumask)[0];
+}
+
+static inline unsigned int
+default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
+{
+ unsigned long mask1 = cpumask_bits(cpumask)[0];
+ unsigned long mask2 = cpumask_bits(andmask)[0];
+ unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
+
+ return (unsigned int)(mask1 & mask2 & mask3);
+}
+
+static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+ return cpuid_apic >> index_msb;
+}
+
+static inline void default_setup_apic_routing(void)
+{
+#ifdef CONFIG_X86_IO_APIC
+ printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
+ "Flat", nr_ioapics);
+#endif
+}
+
+extern int default_apicid_to_node(int logical_apicid);
+
+#endif
+
+static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+ return physid_isset(apicid, bitmap);
+}
+
+static inline unsigned long default_check_apicid_present(int bit)
+{
+ return physid_isset(bit, phys_cpu_present_map);
+}
+
+static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map)
+{
+ return phys_map;
+}
+
+/* Mapping from cpu number to logical apicid */
+static inline int default_cpu_to_logical_apicid(int cpu)
+{
+ return 1 << cpu;
+}
+
+static inline int __default_cpu_present_to_apicid(int mps_cpu)
+{
+ if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
+ return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
+ else
+ return BAD_APICID;
+}
+
+static inline int
+__default_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+ return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
+}
+
+#ifdef CONFIG_X86_32
+static inline int default_cpu_present_to_apicid(int mps_cpu)
+{
+ return __default_cpu_present_to_apicid(mps_cpu);
+}
+
+static inline int
+default_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+ return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
+}
+#else
+extern int default_cpu_present_to_apicid(int mps_cpu);
+extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
+#endif
+
+static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
+{
+ return physid_mask_of_physid(phys_apicid);
+}
+
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+#endif /* _ASM_X86_GENAPIC_64_H */
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h
deleted file mode 100644
index 4334502d366..00000000000
--- a/arch/x86/include/asm/genapic_32.h
+++ /dev/null
@@ -1,141 +0,0 @@
-#ifndef _ASM_X86_GENAPIC_32_H
-#define _ASM_X86_GENAPIC_32_H
-
-#include <asm/mpspec.h>
-#include <asm/atomic.h>
-
-/*
- * Generic APIC driver interface.
- *
- * An straight forward mapping of the APIC related parts of the
- * x86 subarchitecture interface to a dynamic object.
- *
- * This is used by the "generic" x86 subarchitecture.
- *
- * Copyright 2003 Andi Kleen, SuSE Labs.
- */
-
-struct mpc_bus;
-struct mpc_table;
-struct mpc_cpu;
-
-struct genapic {
- char *name;
- int (*probe)(void);
-
- int (*apic_id_registered)(void);
- const struct cpumask *(*target_cpus)(void);
- int int_delivery_mode;
- int int_dest_mode;
- int ESR_DISABLE;
- int apic_destination_logical;
- unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
- unsigned long (*check_apicid_present)(int apicid);
- int no_balance_irq;
- int no_ioapic_check;
- void (*init_apic_ldr)(void);
- physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
-
- void (*setup_apic_routing)(void);
- int (*multi_timer_check)(int apic, int irq);
- int (*apicid_to_node)(int logical_apicid);
- int (*cpu_to_logical_apicid)(int cpu);
- int (*cpu_present_to_apicid)(int mps_cpu);
- physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
- void (*setup_portio_remap)(void);
- int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
- void (*enable_apic_mode)(void);
- u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
-
- /* mpparse */
- /* When one of the next two hooks returns 1 the genapic
- is switched to this. Essentially they are additional probe
- functions. */
- int (*mps_oem_check)(struct mpc_table *mpc, char *oem,
- char *productid);
- int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
-
- unsigned (*get_apic_id)(unsigned long x);
- unsigned long apic_id_mask;
- unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
- unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
- const struct cpumask *andmask);
- void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
-
-#ifdef CONFIG_SMP
- /* ipi */
- void (*send_IPI_mask)(const struct cpumask *mask, int vector);
- void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
- int vector);
- void (*send_IPI_allbutself)(int vector);
- void (*send_IPI_all)(int vector);
-#endif
- int (*wakeup_cpu)(int apicid, unsigned long start_eip);
- int trampoline_phys_low;
- int trampoline_phys_high;
- void (*wait_for_init_deassert)(atomic_t *deassert);
- void (*smp_callin_clear_local_apic)(void);
- void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
- void (*restore_NMI_vector)(unsigned short *high, unsigned short *low);
- void (*inquire_remote_apic)(int apicid);
-};
-
-#define APICFUNC(x) .x = x,
-
-/* More functions could be probably marked IPIFUNC and save some space
- in UP GENERICARCH kernels, but I don't have the nerve right now
- to untangle this mess. -AK */
-#ifdef CONFIG_SMP
-#define IPIFUNC(x) APICFUNC(x)
-#else
-#define IPIFUNC(x)
-#endif
-
-#define APIC_INIT(aname, aprobe) \
-{ \
- .name = aname, \
- .probe = aprobe, \
- .int_delivery_mode = INT_DELIVERY_MODE, \
- .int_dest_mode = INT_DEST_MODE, \
- .no_balance_irq = NO_BALANCE_IRQ, \
- .ESR_DISABLE = esr_disable, \
- .apic_destination_logical = APIC_DEST_LOGICAL, \
- APICFUNC(apic_id_registered) \
- APICFUNC(target_cpus) \
- APICFUNC(check_apicid_used) \
- APICFUNC(check_apicid_present) \
- APICFUNC(init_apic_ldr) \
- APICFUNC(ioapic_phys_id_map) \
- APICFUNC(setup_apic_routing) \
- APICFUNC(multi_timer_check) \
- APICFUNC(apicid_to_node) \
- APICFUNC(cpu_to_logical_apicid) \
- APICFUNC(cpu_present_to_apicid) \
- APICFUNC(apicid_to_cpu_present) \
- APICFUNC(setup_portio_remap) \
- APICFUNC(check_phys_apicid_present) \
- APICFUNC(mps_oem_check) \
- APICFUNC(get_apic_id) \
- .apic_id_mask = APIC_ID_MASK, \
- APICFUNC(cpu_mask_to_apicid) \
- APICFUNC(cpu_mask_to_apicid_and) \
- APICFUNC(vector_allocation_domain) \
- APICFUNC(acpi_madt_oem_check) \
- IPIFUNC(send_IPI_mask) \
- IPIFUNC(send_IPI_allbutself) \
- IPIFUNC(send_IPI_all) \
- APICFUNC(enable_apic_mode) \
- APICFUNC(phys_pkg_id) \
- .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, \
- .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, \
- APICFUNC(wait_for_init_deassert) \
- APICFUNC(smp_callin_clear_local_apic) \
- APICFUNC(store_NMI_vector) \
- APICFUNC(restore_NMI_vector) \
- APICFUNC(inquire_remote_apic) \
-}
-
-extern struct genapic *genapic;
-extern void es7000_update_genapic_to_cluster(void);
-
-#endif /* _ASM_X86_GENAPIC_32_H */
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h
deleted file mode 100644
index 7bb092c5905..00000000000
--- a/arch/x86/include/asm/genapic_64.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef _ASM_X86_GENAPIC_64_H
-#define _ASM_X86_GENAPIC_64_H
-
-#include <linux/cpumask.h>
-
-/*
- * Copyright 2004 James Cleverdon, IBM.
- * Subject to the GNU Public License, v.2
- *
- * Generic APIC sub-arch data struct.
- *
- * Hacked for x86-64 by James Cleverdon from i386 architecture code by
- * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
- * James Cleverdon.
- */
-
-struct genapic {
- char *name;
- int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
- u32 int_delivery_mode;
- u32 int_dest_mode;
- int (*apic_id_registered)(void);
- const struct cpumask *(*target_cpus)(void);
- void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
- void (*init_apic_ldr)(void);
- /* ipi */
- void (*send_IPI_mask)(const struct cpumask *mask, int vector);
- void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
- int vector);
- void (*send_IPI_allbutself)(int vector);
- void (*send_IPI_all)(int vector);
- void (*send_IPI_self)(int vector);
- /* */
- unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
- unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
- const struct cpumask *andmask);
- unsigned int (*phys_pkg_id)(int index_msb);
- unsigned int (*get_apic_id)(unsigned long x);
- unsigned long (*set_apic_id)(unsigned int id);
- unsigned long apic_id_mask;
- /* wakeup_secondary_cpu */
- int (*wakeup_cpu)(int apicid, unsigned long start_eip);
-};
-
-extern struct genapic *genapic;
-
-extern struct genapic apic_flat;
-extern struct genapic apic_physflat;
-extern struct genapic apic_x2apic_cluster;
-extern struct genapic apic_x2apic_phys;
-extern int acpi_madt_oem_check(char *, char *);
-
-extern void apic_send_IPI_self(int vector);
-
-extern struct genapic apic_x2apic_uv_x;
-DECLARE_PER_CPU(int, x2apic_extra_bits);
-
-extern void setup_apic_routing(void);
-
-#endif /* _ASM_X86_GENAPIC_64_H */
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 8de644b6b95..370e1c83bb4 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -25,8 +25,6 @@
#include <asm/irq.h>
#include <asm/sections.h>
-#define platform_legacy_irq(irq) ((irq) < 16)
-
/* Interrupt handlers registered during init_IRQ */
extern void apic_timer_interrupt(void);
extern void error_interrupt(void);
@@ -58,7 +56,7 @@ extern void make_8259A_irq(unsigned int irq);
extern void init_8259A(int aeoi);
/* IOAPIC */
-#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
+#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
extern unsigned long io_apic_irqs;
extern void init_VISWS_APIC_irqs(void);
@@ -67,15 +65,7 @@ extern void disable_IO_APIC(void);
extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
extern void setup_ioapic_dest(void);
-#ifdef CONFIG_X86_64
extern void enable_IO_APIC(void);
-#endif
-
-/* IPI functions */
-#ifdef CONFIG_X86_32
-extern void send_IPI_self(int vector);
-#endif
-extern void send_IPI(int dest, int vector);
/* Statistics */
extern atomic_t irq_err_count;
@@ -84,21 +74,11 @@ extern atomic_t irq_mis_count;
/* EISA */
extern void eisa_set_level_irq(unsigned int irq);
-/* Voyager functions */
-extern asmlinkage void vic_cpi_interrupt(void);
-extern asmlinkage void vic_sys_interrupt(void);
-extern asmlinkage void vic_cmn_interrupt(void);
-extern asmlinkage void qic_timer_interrupt(void);
-extern asmlinkage void qic_invalidate_interrupt(void);
-extern asmlinkage void qic_reschedule_interrupt(void);
-extern asmlinkage void qic_enable_irq_interrupt(void);
-extern asmlinkage void qic_call_function_interrupt(void);
-
/* SMP */
extern void smp_apic_timer_interrupt(struct pt_regs *);
extern void smp_spurious_interrupt(struct pt_regs *);
extern void smp_error_interrupt(struct pt_regs *);
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
extern void smp_reschedule_interrupt(struct pt_regs *);
extern void smp_call_function_interrupt(struct pt_regs *);
extern void smp_call_function_single_interrupt(struct pt_regs *);
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 1dbbdf4be9b..bcf7ea4e136 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -91,7 +91,7 @@ extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
unsigned long prot_val);
-extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
+extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
/*
* early_ioremap() and early_iounmap() are for temporary early boot-time
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 08ec793aa04..59cb4a1317b 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -143,15 +143,6 @@ extern int noioapicreroute;
/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
extern int timer_through_8259;
-static inline void disable_ioapic_setup(void)
-{
-#ifdef CONFIG_PCI
- noioapicquirk = 1;
- noioapicreroute = -1;
-#endif
- skip_ioapic_setup = 1;
-}
-
/*
* If we use the IO-APIC for IRQ routing, disable automatic
* assignment of PCI IRQ's.
@@ -178,6 +169,12 @@ extern void reinit_intr_remapped_IO_APIC(int);
extern void probe_nr_irqs_gsi(void);
+extern int setup_ioapic_entry(int apic, int irq,
+ struct IO_APIC_route_entry *entry,
+ unsigned int destination, int trigger,
+ int polarity, int vector);
+extern void ioapic_write_entry(int apic, int pin,
+ struct IO_APIC_route_entry e);
#else /* !CONFIG_X86_IO_APIC */
#define io_apic_assign_pci_irqs 0
static const int timer_through_8259 = 0;
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index c745a306f7d..5f2efc5d992 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_IPI_H
#define _ASM_X86_IPI_H
+#ifdef CONFIG_X86_LOCAL_APIC
+
/*
* Copyright 2004 James Cleverdon, IBM.
* Subject to the GNU Public License, v.2
@@ -55,8 +57,8 @@ static inline void __xapic_wait_icr_idle(void)
cpu_relax();
}
-static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
- unsigned int dest)
+static inline void
+__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
{
/*
* Subtle. In the case of the 'never do double writes' workaround
@@ -87,8 +89,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
* This is used to send an IPI with no shorthand notation (the destination is
* specified in bits 56 to 63 of the ICR).
*/
-static inline void __send_IPI_dest_field(unsigned int mask, int vector,
- unsigned int dest)
+static inline void
+ __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
{
unsigned long cfg;
@@ -117,41 +119,46 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
native_apic_mem_write(APIC_ICR, cfg);
}
-static inline void send_IPI_mask_sequence(const struct cpumask *mask,
- int vector)
-{
- unsigned long flags;
- unsigned long query_cpu;
+extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
+ int vector);
+extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
+ int vector);
+#include <asm/genapic.h>
- /*
- * Hack. The clustered APIC addressing mode doesn't allow us to send
- * to an arbitrary mask, so I do a unicast to each CPU instead.
- * - mbligh
- */
- local_irq_save(flags);
- for_each_cpu(query_cpu, mask) {
- __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
- vector, APIC_DEST_PHYSICAL);
- }
- local_irq_restore(flags);
+extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
+ int vector);
+extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
+ int vector);
+
+/* Avoid include hell */
+#define NMI_VECTOR 0x02
+
+extern int no_broadcast;
+
+static inline void __default_local_send_IPI_allbutself(int vector)
+{
+ if (no_broadcast || vector == NMI_VECTOR)
+ apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
+ else
+ __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
}
-static inline void send_IPI_mask_allbutself(const struct cpumask *mask,
- int vector)
+static inline void __default_local_send_IPI_all(int vector)
{
- unsigned long flags;
- unsigned int query_cpu;
- unsigned int this_cpu = smp_processor_id();
-
- /* See Hack comment above */
-
- local_irq_save(flags);
- for_each_cpu(query_cpu, mask)
- if (query_cpu != this_cpu)
- __send_IPI_dest_field(
- per_cpu(x86_cpu_to_apicid, query_cpu),
- vector, APIC_DEST_PHYSICAL);
- local_irq_restore(flags);
+ if (no_broadcast || vector == NMI_VECTOR)
+ apic->send_IPI_mask(cpu_online_mask, vector);
+ else
+ __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
}
+#ifdef CONFIG_X86_32
+extern void default_send_IPI_mask_logical(const struct cpumask *mask,
+ int vector);
+extern void default_send_IPI_allbutself(int vector);
+extern void default_send_IPI_all(int vector);
+extern void default_send_IPI_self(int vector);
+#endif
+
+#endif
+
#endif /* _ASM_X86_IPI_H */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 592688ed04d..107eb219669 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -36,9 +36,11 @@ static inline int irq_canonicalize(int irq)
extern void fixup_irqs(void);
#endif
-extern unsigned int do_IRQ(struct pt_regs *regs);
extern void init_IRQ(void);
extern void native_init_IRQ(void);
+extern bool handle_irq(unsigned irq, struct pt_regs *regs);
+
+extern unsigned int do_IRQ(struct pt_regs *regs);
/* Interrupt vector management */
extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 9a83a10a5d5..b07278c55e9 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -1,47 +1,69 @@
#ifndef _ASM_X86_IRQ_VECTORS_H
#define _ASM_X86_IRQ_VECTORS_H
-#include <linux/threads.h>
+/*
+ * Linux IRQ vector layout.
+ *
+ * There are 256 IDT entries (per CPU - each entry is 8 bytes) which can
+ * be defined by Linux. They are used as a jump table by the CPU when a
+ * given vector is triggered - by a CPU-external, CPU-internal or
+ * software-triggered event.
+ *
+ * Linux sets the kernel code address each entry jumps to early during
+ * bootup, and never changes them. This is the general layout of the
+ * IDT entries:
+ *
+ * Vectors 0 ... 31 : system traps and exceptions - hardcoded events
+ * Vectors 32 ... 127 : device interrupts
+ * Vector 128 : legacy int80 syscall interface
+ * Vectors 129 ... 237 : device interrupts
+ * Vectors 238 ... 255 : special interrupts
+ *
+ * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
+ *
+ * This file enumerates the exact layout of them:
+ */
-#define NMI_VECTOR 0x02
+#define NMI_VECTOR 0x02
/*
* IDT vectors usable for external interrupt sources start
* at 0x20:
*/
-#define FIRST_EXTERNAL_VECTOR 0x20
+#define FIRST_EXTERNAL_VECTOR 0x20
#ifdef CONFIG_X86_32
-# define SYSCALL_VECTOR 0x80
+# define SYSCALL_VECTOR 0x80
#else
-# define IA32_SYSCALL_VECTOR 0x80
+# define IA32_SYSCALL_VECTOR 0x80
#endif
/*
* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
* cleanup after irq migration.
*/
-#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
+#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
/*
* Vectors 0x30-0x3f are used for ISA interrupts.
*/
-#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
-#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
-#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
-#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
-#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
-#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
-#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
-#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
-#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
-#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
-#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
-#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
-#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
-#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
-#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
-#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
+#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
+
+#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
+#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
+#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
+#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
+#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
+#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
+#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
+#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
+#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
+#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
+#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
+#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
+#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
+#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
+#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
/*
* Special IRQ vectors used by the SMP architecture, 0xf0-0xff
@@ -50,123 +72,97 @@
* into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
* TLB, reschedule and local APIC vectors are performance-critical.
*/
-#ifdef CONFIG_X86_32
-# define SPURIOUS_APIC_VECTOR 0xff
-# define ERROR_APIC_VECTOR 0xfe
-# define RESCHEDULE_VECTOR 0xfd
-# define CALL_FUNCTION_VECTOR 0xfc
-# define CALL_FUNCTION_SINGLE_VECTOR 0xfb
-# define THERMAL_APIC_VECTOR 0xfa
-/* 0xf8 - 0xf9 : free */
-# define INVALIDATE_TLB_VECTOR_END 0xf7
-# define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
+#define SPURIOUS_APIC_VECTOR 0xff
+/*
+ * Sanity check
+ */
+#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
+# error SPURIOUS_APIC_VECTOR definition error
+#endif
-# define NUM_INVALIDATE_TLB_VECTORS 8
+#define ERROR_APIC_VECTOR 0xfe
+#define RESCHEDULE_VECTOR 0xfd
+#define CALL_FUNCTION_VECTOR 0xfc
+#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
+#define THERMAL_APIC_VECTOR 0xfa
+#ifdef CONFIG_X86_32
+/* 0xf8 - 0xf9 : free */
#else
-
-# define SPURIOUS_APIC_VECTOR 0xff
-# define ERROR_APIC_VECTOR 0xfe
-# define RESCHEDULE_VECTOR 0xfd
-# define CALL_FUNCTION_VECTOR 0xfc
-# define CALL_FUNCTION_SINGLE_VECTOR 0xfb
-# define THERMAL_APIC_VECTOR 0xfa
# define THRESHOLD_APIC_VECTOR 0xf9
# define UV_BAU_MESSAGE 0xf8
-# define INVALIDATE_TLB_VECTOR_END 0xf7
-# define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
-
-#define NUM_INVALIDATE_TLB_VECTORS 8
-
#endif
+/* f0-f7 used for spreading out TLB flushes: */
+#define INVALIDATE_TLB_VECTOR_END 0xf7
+#define INVALIDATE_TLB_VECTOR_START 0xf0
+#define NUM_INVALIDATE_TLB_VECTORS 8
+
/*
* Local APIC timer IRQ vector is on a different priority level,
* to work around the 'lost local interrupt if more than 2 IRQ
* sources per level' errata.
*/
-#define LOCAL_TIMER_VECTOR 0xef
+#define LOCAL_TIMER_VECTOR 0xef
+
+/*
+ * Performance monitoring interrupt vector:
+ */
+#define LOCAL_PERF_VECTOR 0xee
/*
* First APIC vector available to drivers: (vectors 0x30-0xee) we
* start at 0x31(0x41) to spread out vectors evenly between priority
* levels. (0x80 is the syscall vector)
*/
-#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
-
-#define NR_VECTORS 256
-
-#define FPU_IRQ 13
-
-#define FIRST_VM86_IRQ 3
-#define LAST_VM86_IRQ 15
-#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
+#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
-#define NR_IRQS_LEGACY 16
+#define NR_VECTORS 256
-#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
-
-#include <asm/apicnum.h> /* need MAX_IO_APICS */
-
-#ifndef CONFIG_SPARSE_IRQ
-# if NR_CPUS < MAX_IO_APICS
-# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
-# else
-# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
-# endif
-#else
+#define FPU_IRQ 13
-# define NR_IRQS \
- ((8 * NR_CPUS) > (32 * MAX_IO_APICS) ? \
- (NR_VECTORS + (8 * NR_CPUS)) : \
- (NR_VECTORS + (32 * MAX_IO_APICS))) \
+#define FIRST_VM86_IRQ 3
+#define LAST_VM86_IRQ 15
+#ifndef __ASSEMBLY__
+static inline int invalid_vm86_irq(int irq)
+{
+ return irq < 3 || irq > 15;
+}
#endif
-#elif defined(CONFIG_X86_VOYAGER)
-
-# define NR_IRQS 224
+/*
+ * Size the maximum number of interrupts.
+ *
+ * If the irq_desc[] array has a sparse layout, we can size things
+ * generously - it scales up linearly with the maximum number of CPUs,
+ * and the maximum number of IO-APICs, whichever is higher.
+ *
+ * In other cases we size more conservatively, to not create too large
+ * static arrays.
+ */
-#else /* IO_APIC || VOYAGER */
+#define NR_IRQS_LEGACY 16
-# define NR_IRQS 16
+#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
+#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
+#ifdef CONFIG_X86_IO_APIC
+# ifdef CONFIG_SPARSE_IRQ
+# define NR_IRQS \
+ (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
+ (NR_VECTORS + CPU_VECTOR_LIMIT) : \
+ (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
+# else
+# if NR_CPUS < MAX_IO_APICS
+# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT)
+# else
+# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
+# endif
+# endif
+#else /* !CONFIG_X86_IO_APIC: */
+# define NR_IRQS NR_IRQS_LEGACY
#endif
-/* Voyager specific defines */
-/* These define the CPIs we use in linux */
-#define VIC_CPI_LEVEL0 0
-#define VIC_CPI_LEVEL1 1
-/* now the fake CPIs */
-#define VIC_TIMER_CPI 2
-#define VIC_INVALIDATE_CPI 3
-#define VIC_RESCHEDULE_CPI 4
-#define VIC_ENABLE_IRQ_CPI 5
-#define VIC_CALL_FUNCTION_CPI 6
-#define VIC_CALL_FUNCTION_SINGLE_CPI 7
-
-/* Now the QIC CPIs: Since we don't need the two initial levels,
- * these are 2 less than the VIC CPIs */
-#define QIC_CPI_OFFSET 1
-#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
-#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
-#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
-#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
-#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
-#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
-
-#define VIC_START_FAKE_CPI VIC_TIMER_CPI
-#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
-
-/* this is the SYS_INT CPI. */
-#define VIC_SYS_INT 8
-#define VIC_CMN_INT 15
-
-/* This is the boot CPI for alternate processors. It gets overwritten
- * by the above once the system has activated all available processors */
-#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
-#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
-
-
#endif /* _ASM_X86_IRQ_VECTORS_H */
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h
deleted file mode 100644
index cc09cbbee27..00000000000
--- a/arch/x86/include/asm/mach-default/mach_apic.h
+++ /dev/null
@@ -1,168 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_APIC_H
-#define _ASM_X86_MACH_DEFAULT_MACH_APIC_H
-
-#ifdef CONFIG_X86_LOCAL_APIC
-
-#include <mach_apicdef.h>
-#include <asm/smp.h>
-
-#define APIC_DFR_VALUE (APIC_DFR_FLAT)
-
-static inline const struct cpumask *target_cpus(void)
-{
-#ifdef CONFIG_SMP
- return cpu_online_mask;
-#else
- return cpumask_of(0);
-#endif
-}
-
-#define NO_BALANCE_IRQ (0)
-#define esr_disable (0)
-
-#ifdef CONFIG_X86_64
-#include <asm/genapic.h>
-#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
-#define INT_DEST_MODE (genapic->int_dest_mode)
-#define TARGET_CPUS (genapic->target_cpus())
-#define apic_id_registered (genapic->apic_id_registered)
-#define init_apic_ldr (genapic->init_apic_ldr)
-#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
-#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
-#define phys_pkg_id (genapic->phys_pkg_id)
-#define vector_allocation_domain (genapic->vector_allocation_domain)
-#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
-#define send_IPI_self (genapic->send_IPI_self)
-#define wakeup_secondary_cpu (genapic->wakeup_cpu)
-extern void setup_apic_routing(void);
-#else
-#define INT_DELIVERY_MODE dest_LowestPrio
-#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
-#define TARGET_CPUS (target_cpus())
-#define wakeup_secondary_cpu wakeup_secondary_cpu_via_init
-/*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LDR and TPR before enabling
- * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116). So here it goes...
- */
-static inline void init_apic_ldr(void)
-{
- unsigned long val;
-
- apic_write(APIC_DFR, APIC_DFR_VALUE);
- val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
- val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
- apic_write(APIC_LDR, val);
-}
-
-static inline int apic_id_registered(void)
-{
- return physid_isset(read_apic_id(), phys_cpu_present_map);
-}
-
-static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
-{
- return cpumask_bits(cpumask)[0];
-}
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
-{
- unsigned long mask1 = cpumask_bits(cpumask)[0];
- unsigned long mask2 = cpumask_bits(andmask)[0];
- unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
-
- return (unsigned int)(mask1 & mask2 & mask3);
-}
-
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
- return cpuid_apic >> index_msb;
-}
-
-static inline void setup_apic_routing(void)
-{
-#ifdef CONFIG_X86_IO_APIC
- printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
- "Flat", nr_ioapics);
-#endif
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
-#ifdef CONFIG_SMP
- return apicid_2_node[hard_smp_processor_id()];
-#else
- return 0;
-#endif
-}
-
-static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
- /* Careful. Some cpus do not strictly honor the set of cpus
- * specified in the interrupt destination when using lowest
- * priority interrupt delivery mode.
- *
- * In particular there was a hyperthreading cpu observed to
- * deliver interrupts to the wrong hyperthread when only one
- * hyperthread was specified in the interrupt desitination.
- */
- *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
-}
-#endif
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
- return physid_isset(apicid, bitmap);
-}
-
-static inline unsigned long check_apicid_present(int bit)
-{
- return physid_isset(bit, phys_cpu_present_map);
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
-{
- return phys_map;
-}
-
-static inline int multi_timer_check(int apic, int irq)
-{
- return 0;
-}
-
-/* Mapping from cpu number to logical apicid */
-static inline int cpu_to_logical_apicid(int cpu)
-{
- return 1 << cpu;
-}
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
- if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
- return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
- else
- return BAD_APICID;
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
-{
- return physid_mask_of_physid(phys_apicid);
-}
-
-static inline void setup_portio_remap(void)
-{
-}
-
-static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
- return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
-}
-
-static inline void enable_apic_mode(void)
-{
-}
-#endif /* CONFIG_X86_LOCAL_APIC */
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-default/mach_apicdef.h b/arch/x86/include/asm/mach-default/mach_apicdef.h
deleted file mode 100644
index 53179936d6c..00000000000
--- a/arch/x86/include/asm/mach-default/mach_apicdef.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
-#define _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
-
-#include <asm/apic.h>
-
-#ifdef CONFIG_X86_64
-#define APIC_ID_MASK (genapic->apic_id_mask)
-#define GET_APIC_ID(x) (genapic->get_apic_id(x))
-#define SET_APIC_ID(x) (genapic->set_apic_id(x))
-#else
-#define APIC_ID_MASK (0xF<<24)
-static inline unsigned get_apic_id(unsigned long x)
-{
- unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
- if (APIC_XAPIC(ver))
- return (((x)>>24)&0xFF);
- else
- return (((x)>>24)&0xF);
-}
-
-#define GET_APIC_ID(x) get_apic_id(x)
-#endif
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H */
diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h
deleted file mode 100644
index 191312d155d..00000000000
--- a/arch/x86/include/asm/mach-default/mach_ipi.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_IPI_H
-#define _ASM_X86_MACH_DEFAULT_MACH_IPI_H
-
-/* Avoid include hell */
-#define NMI_VECTOR 0x02
-
-void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-void __send_IPI_shortcut(unsigned int shortcut, int vector);
-
-extern int no_broadcast;
-
-#ifdef CONFIG_X86_64
-#include <asm/genapic.h>
-#define send_IPI_mask (genapic->send_IPI_mask)
-#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
-#else
-static inline void send_IPI_mask(const struct cpumask *mask, int vector)
-{
- send_IPI_mask_bitmask(mask, vector);
-}
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-#endif
-
-static inline void __local_send_IPI_allbutself(int vector)
-{
- if (no_broadcast || vector == NMI_VECTOR)
- send_IPI_mask_allbutself(cpu_online_mask, vector);
- else
- __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
-}
-
-static inline void __local_send_IPI_all(int vector)
-{
- if (no_broadcast || vector == NMI_VECTOR)
- send_IPI_mask(cpu_online_mask, vector);
- else
- __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
-}
-
-#ifdef CONFIG_X86_64
-#define send_IPI_allbutself (genapic->send_IPI_allbutself)
-#define send_IPI_all (genapic->send_IPI_all)
-#else
-static inline void send_IPI_allbutself(int vector)
-{
- /*
- * if there are no other CPUs in the system then we get an APIC send
- * error if we try to broadcast, thus avoid sending IPIs in this case.
- */
- if (!(num_online_cpus() > 1))
- return;
-
- __local_send_IPI_allbutself(vector);
- return;
-}
-
-static inline void send_IPI_all(int vector)
-{
- __local_send_IPI_all(vector);
-}
-#endif
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_IPI_H */
diff --git a/arch/x86/include/asm/mach-default/mach_mpparse.h b/arch/x86/include/asm/mach-default/mach_mpparse.h
deleted file mode 100644
index c70a263d68c..00000000000
--- a/arch/x86/include/asm/mach-default/mach_mpparse.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
-#define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
-
-static inline int
-mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
-{
- return 0;
-}
-
-/* Hook from generic ACPI tables.c */
-static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
- return 0;
-}
-
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-default/mach_mpspec.h b/arch/x86/include/asm/mach-default/mach_mpspec.h
deleted file mode 100644
index e85ede686be..00000000000
--- a/arch/x86/include/asm/mach-default/mach_mpspec.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
-#define _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
-
-#define MAX_IRQ_SOURCES 256
-
-#if CONFIG_BASE_SMALL == 0
-#define MAX_MP_BUSSES 256
-#else
-#define MAX_MP_BUSSES 32
-#endif
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h
deleted file mode 100644
index 89897a6a65b..00000000000
--- a/arch/x86/include/asm/mach-default/mach_wakecpu.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
-#define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
-
-#define TRAMPOLINE_PHYS_LOW (0x467)
-#define TRAMPOLINE_PHYS_HIGH (0x469)
-
-static inline void wait_for_init_deassert(atomic_t *deassert)
-{
- while (!atomic_read(deassert))
- cpu_relax();
- return;
-}
-
-/* Nothing to do for most platforms, since cleared by the INIT cycle */
-static inline void smp_callin_clear_local_apic(void)
-{
-}
-
-static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
-{
-}
-
-static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
-{
-}
-
-#ifdef CONFIG_SMP
-extern void __inquire_remote_apic(int apicid);
-#else /* CONFIG_SMP */
-static inline void __inquire_remote_apic(int apicid)
-{
-}
-#endif /* CONFIG_SMP */
-
-static inline void inquire_remote_apic(int apicid)
-{
- if (apic_verbosity >= APIC_DEBUG)
- __inquire_remote_apic(apicid);
-}
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/mach-generic/gpio.h b/arch/x86/include/asm/mach-generic/gpio.h
deleted file mode 100644
index 995c45efdb3..00000000000
--- a/arch/x86/include/asm/mach-generic/gpio.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_GPIO_H
-#define _ASM_X86_MACH_GENERIC_GPIO_H
-
-int gpio_request(unsigned gpio, const char *label);
-void gpio_free(unsigned gpio);
-int gpio_direction_input(unsigned gpio);
-int gpio_direction_output(unsigned gpio, int value);
-int gpio_get_value(unsigned gpio);
-void gpio_set_value(unsigned gpio, int value);
-int gpio_to_irq(unsigned gpio);
-int irq_to_gpio(unsigned irq);
-
-#include <asm-generic/gpio.h> /* cansleep wrappers */
-
-#endif /* _ASM_X86_MACH_GENERIC_GPIO_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h
deleted file mode 100644
index 48553e958ad..00000000000
--- a/arch/x86/include/asm/mach-generic/mach_apic.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_APIC_H
-#define _ASM_X86_MACH_GENERIC_MACH_APIC_H
-
-#include <asm/genapic.h>
-
-#define esr_disable (genapic->ESR_DISABLE)
-#define NO_BALANCE_IRQ (genapic->no_balance_irq)
-#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
-#define INT_DEST_MODE (genapic->int_dest_mode)
-#undef APIC_DEST_LOGICAL
-#define APIC_DEST_LOGICAL (genapic->apic_destination_logical)
-#define TARGET_CPUS (genapic->target_cpus())
-#define apic_id_registered (genapic->apic_id_registered)
-#define init_apic_ldr (genapic->init_apic_ldr)
-#define ioapic_phys_id_map (genapic->ioapic_phys_id_map)
-#define setup_apic_routing (genapic->setup_apic_routing)
-#define multi_timer_check (genapic->multi_timer_check)
-#define apicid_to_node (genapic->apicid_to_node)
-#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid)
-#define cpu_present_to_apicid (genapic->cpu_present_to_apicid)
-#define apicid_to_cpu_present (genapic->apicid_to_cpu_present)
-#define setup_portio_remap (genapic->setup_portio_remap)
-#define check_apicid_present (genapic->check_apicid_present)
-#define check_phys_apicid_present (genapic->check_phys_apicid_present)
-#define check_apicid_used (genapic->check_apicid_used)
-#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
-#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
-#define vector_allocation_domain (genapic->vector_allocation_domain)
-#define enable_apic_mode (genapic->enable_apic_mode)
-#define phys_pkg_id (genapic->phys_pkg_id)
-#define wakeup_secondary_cpu (genapic->wakeup_cpu)
-
-extern void generic_bigsmp_probe(void);
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_apicdef.h b/arch/x86/include/asm/mach-generic/mach_apicdef.h
deleted file mode 100644
index 68041f3802f..00000000000
--- a/arch/x86/include/asm/mach-generic/mach_apicdef.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
-#define _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
-
-#ifndef APIC_DEFINITION
-#include <asm/genapic.h>
-
-#define GET_APIC_ID (genapic->get_apic_id)
-#define APIC_ID_MASK (genapic->apic_id_mask)
-#endif
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_APICDEF_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_ipi.h b/arch/x86/include/asm/mach-generic/mach_ipi.h
deleted file mode 100644
index ffd637e3c3d..00000000000
--- a/arch/x86/include/asm/mach-generic/mach_ipi.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_IPI_H
-#define _ASM_X86_MACH_GENERIC_MACH_IPI_H
-
-#include <asm/genapic.h>
-
-#define send_IPI_mask (genapic->send_IPI_mask)
-#define send_IPI_allbutself (genapic->send_IPI_allbutself)
-#define send_IPI_all (genapic->send_IPI_all)
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_IPI_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpparse.h b/arch/x86/include/asm/mach-generic/mach_mpparse.h
deleted file mode 100644
index 9444ab8dca9..00000000000
--- a/arch/x86/include/asm/mach-generic/mach_mpparse.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
-#define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
-
-
-extern int mps_oem_check(struct mpc_table *, char *, char *);
-
-extern int acpi_madt_oem_check(char *, char *);
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpspec.h b/arch/x86/include/asm/mach-generic/mach_mpspec.h
deleted file mode 100644
index 3bc40722657..00000000000
--- a/arch/x86/include/asm/mach-generic/mach_mpspec.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
-#define _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
-
-#define MAX_IRQ_SOURCES 256
-
-/* Summit or generic (i.e. installer) kernels need lots of bus entries. */
-/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
-#define MAX_MP_BUSSES 260
-
-extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h
deleted file mode 100644
index 1ab16b168c8..00000000000
--- a/arch/x86/include/asm/mach-generic/mach_wakecpu.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
-#define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
-
-#define TRAMPOLINE_PHYS_LOW (genapic->trampoline_phys_low)
-#define TRAMPOLINE_PHYS_HIGH (genapic->trampoline_phys_high)
-#define wait_for_init_deassert (genapic->wait_for_init_deassert)
-#define smp_callin_clear_local_apic (genapic->smp_callin_clear_local_apic)
-#define store_NMI_vector (genapic->store_NMI_vector)
-#define restore_NMI_vector (genapic->restore_NMI_vector)
-#define inquire_remote_apic (genapic->inquire_remote_apic)
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-rdc321x/gpio.h b/arch/x86/include/asm/mach-rdc321x/gpio.h
deleted file mode 100644
index c210ab5788b..00000000000
--- a/arch/x86/include/asm/mach-rdc321x/gpio.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef _ASM_X86_MACH_RDC321X_GPIO_H
-#define _ASM_X86_MACH_RDC321X_GPIO_H
-
-#include <linux/kernel.h>
-
-extern int rdc_gpio_get_value(unsigned gpio);
-extern void rdc_gpio_set_value(unsigned gpio, int value);
-extern int rdc_gpio_direction_input(unsigned gpio);
-extern int rdc_gpio_direction_output(unsigned gpio, int value);
-extern int rdc_gpio_request(unsigned gpio, const char *label);
-extern void rdc_gpio_free(unsigned gpio);
-extern void __init rdc321x_gpio_setup(void);
-
-/* Wrappers for the arch-neutral GPIO API */
-
-static inline int gpio_request(unsigned gpio, const char *label)
-{
- return rdc_gpio_request(gpio, label);
-}
-
-static inline void gpio_free(unsigned gpio)
-{
- might_sleep();
- rdc_gpio_free(gpio);
-}
-
-static inline int gpio_direction_input(unsigned gpio)
-{
- return rdc_gpio_direction_input(gpio);
-}
-
-static inline int gpio_direction_output(unsigned gpio, int value)
-{
- return rdc_gpio_direction_output(gpio, value);
-}
-
-static inline int gpio_get_value(unsigned gpio)
-{
- return rdc_gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
- rdc_gpio_set_value(gpio, value);
-}
-
-static inline int gpio_to_irq(unsigned gpio)
-{
- return gpio;
-}
-
-static inline int irq_to_gpio(unsigned irq)
-{
- return irq;
-}
-
-/* For cansleep */
-#include <asm-generic/gpio.h>
-
-#endif /* _ASM_X86_MACH_RDC321X_GPIO_H */
diff --git a/arch/x86/include/asm/mach-default/mach_timer.h b/arch/x86/include/asm/mach_timer.h
index 853728519ae..853728519ae 100644
--- a/arch/x86/include/asm/mach-default/mach_timer.h
+++ b/arch/x86/include/asm/mach_timer.h
diff --git a/arch/x86/include/asm/mach-default/mach_traps.h b/arch/x86/include/asm/mach_traps.h
index f7920601e47..f7920601e47 100644
--- a/arch/x86/include/asm/mach-default/mach_traps.h
+++ b/arch/x86/include/asm/mach_traps.h
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index bd22f2a3713..5916c8df09d 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -9,7 +9,18 @@ extern int apic_version[MAX_APICS];
extern int pic_mode;
#ifdef CONFIG_X86_32
-#include <mach_mpspec.h>
+
+/*
+ * Summit or generic (i.e. installer) kernels need lots of bus entries.
+ * Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets.
+ */
+#if CONFIG_BASE_SMALL == 0
+# define MAX_MP_BUSSES 260
+#else
+# define MAX_MP_BUSSES 32
+#endif
+
+#define MAX_IRQ_SOURCES 256
extern unsigned int def_to_bigsmp;
extern u8 apicid_2_node[];
@@ -20,15 +31,15 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
#endif
-#define MAX_APICID 256
+#define MAX_APICID 256
-#else
+#else /* CONFIG_X86_64: */
-#define MAX_MP_BUSSES 256
+#define MAX_MP_BUSSES 256
/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
-#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
+#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
-#endif
+#endif /* CONFIG_X86_64 */
extern void early_find_smp_config(void);
extern void early_get_smp_config(void);
@@ -45,11 +56,13 @@ extern int smp_found_config;
extern int mpc_default_type;
extern unsigned long mp_lapic_addr;
-extern void find_smp_config(void);
extern void get_smp_config(void);
+
#ifdef CONFIG_X86_MPPARSE
+extern void find_smp_config(void);
extern void early_reserve_e820_mpc_new(void);
#else
+static inline void find_smp_config(void) { }
static inline void early_reserve_e820_mpc_new(void) { }
#endif
@@ -64,6 +77,8 @@ extern int acpi_probe_gsi(void);
#ifdef CONFIG_X86_IO_APIC
extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
u32 gsi, int triggering, int polarity);
+extern int mp_find_ioapic(int gsi);
+extern int mp_find_ioapic_pin(int ioapic, int gsi);
#else
static inline int
mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
@@ -148,4 +163,10 @@ static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
extern physid_mask_t phys_cpu_present_map;
+extern int generic_mps_oem_check(struct mpc_table *, char *, char *);
+
+extern int default_acpi_madt_oem_check(char *, char *);
+
+extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
+
#endif /* _ASM_X86_MPSPEC_H */
diff --git a/arch/x86/include/asm/numaq.h b/arch/x86/include/asm/numaq.h
index 1e8bd30b4c1..9f0a5f5d29e 100644
--- a/arch/x86/include/asm/numaq.h
+++ b/arch/x86/include/asm/numaq.h
@@ -31,6 +31,8 @@
extern int found_numaq;
extern int get_memcfg_numaq(void);
+extern void *xquad_portio;
+
/*
* SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
*/
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h
deleted file mode 100644
index bf37bc49bd8..00000000000
--- a/arch/x86/include/asm/numaq/apic.h
+++ /dev/null
@@ -1,142 +0,0 @@
-#ifndef __ASM_NUMAQ_APIC_H
-#define __ASM_NUMAQ_APIC_H
-
-#include <asm/io.h>
-#include <linux/mmzone.h>
-#include <linux/nodemask.h>
-
-#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
-
-static inline const cpumask_t *target_cpus(void)
-{
- return &CPU_MASK_ALL;
-}
-
-#define NO_BALANCE_IRQ (1)
-#define esr_disable (1)
-
-#define INT_DELIVERY_MODE dest_LowestPrio
-#define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
- return physid_isset(apicid, bitmap);
-}
-static inline unsigned long check_apicid_present(int bit)
-{
- return physid_isset(bit, phys_cpu_present_map);
-}
-#define apicid_cluster(apicid) (apicid & 0xF0)
-
-static inline int apic_id_registered(void)
-{
- return 1;
-}
-
-static inline void init_apic_ldr(void)
-{
- /* Already done in NUMA-Q firmware */
-}
-
-static inline void setup_apic_routing(void)
-{
- printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
- "NUMA-Q", nr_ioapics);
-}
-
-/*
- * Skip adding the timer int on secondary nodes, which causes
- * a small but painful rift in the time-space continuum.
- */
-static inline int multi_timer_check(int apic, int irq)
-{
- return apic != 0 && irq == 0;
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
-{
- /* We don't have a good way to do this yet - hack */
- return physids_promote(0xFUL);
-}
-
-/* Mapping from cpu number to logical apicid */
-extern u8 cpu_2_logical_apicid[];
-static inline int cpu_to_logical_apicid(int cpu)
-{
- if (cpu >= nr_cpu_ids)
- return BAD_APICID;
- return (int)cpu_2_logical_apicid[cpu];
-}
-
-/*
- * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
- * cpu to APIC ID relation to properly interact with the intelligent
- * mode of the cluster controller.
- */
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
- if (mps_cpu < 60)
- return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
- else
- return BAD_APICID;
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
- return logical_apicid >> 4;
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int logical_apicid)
-{
- int node = apicid_to_node(logical_apicid);
- int cpu = __ffs(logical_apicid & 0xf);
-
- return physid_mask_of_physid(cpu + 4*node);
-}
-
-extern void *xquad_portio;
-
-static inline void setup_portio_remap(void)
-{
- int num_quads = num_online_nodes();
-
- if (num_quads <= 1)
- return;
-
- printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
- xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
- printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
- (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
-}
-
-static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
- return (1);
-}
-
-static inline void enable_apic_mode(void)
-{
-}
-
-/*
- * We use physical apicids here, not logical, so just return the default
- * physical broadcast to stop people from breaking us
- */
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
-{
- return (int) 0xF;
-}
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
-{
- return (int) 0xF;
-}
-
-/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
- return cpuid_apic >> index_msb;
-}
-
-#endif /* __ASM_NUMAQ_APIC_H */
diff --git a/arch/x86/include/asm/numaq/apicdef.h b/arch/x86/include/asm/numaq/apicdef.h
deleted file mode 100644
index e012a46cc22..00000000000
--- a/arch/x86/include/asm/numaq/apicdef.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __ASM_NUMAQ_APICDEF_H
-#define __ASM_NUMAQ_APICDEF_H
-
-
-#define APIC_ID_MASK (0xF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
- return (((x)>>24)&0x0F);
-}
-
-#define GET_APIC_ID(x) get_apic_id(x)
-
-#endif
diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h
deleted file mode 100644
index a8374c65277..00000000000
--- a/arch/x86/include/asm/numaq/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef __ASM_NUMAQ_IPI_H
-#define __ASM_NUMAQ_IPI_H
-
-void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-
-static inline void send_IPI_mask(const struct cpumask *mask, int vector)
-{
- send_IPI_mask_sequence(mask, vector);
-}
-
-static inline void send_IPI_allbutself(int vector)
-{
- send_IPI_mask_allbutself(cpu_online_mask, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
- send_IPI_mask(cpu_online_mask, vector);
-}
-
-#endif /* __ASM_NUMAQ_IPI_H */
diff --git a/arch/x86/include/asm/numaq/mpparse.h b/arch/x86/include/asm/numaq/mpparse.h
deleted file mode 100644
index a2eeefcd1cc..00000000000
--- a/arch/x86/include/asm/numaq/mpparse.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_NUMAQ_MPPARSE_H
-#define __ASM_NUMAQ_MPPARSE_H
-
-extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
-
-#endif /* __ASM_NUMAQ_MPPARSE_H */
diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h
deleted file mode 100644
index 6f499df8edd..00000000000
--- a/arch/x86/include/asm/numaq/wakecpu.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef __ASM_NUMAQ_WAKECPU_H
-#define __ASM_NUMAQ_WAKECPU_H
-
-/* This file copes with machines that wakeup secondary CPUs by NMIs */
-
-#define TRAMPOLINE_PHYS_LOW (0x8)
-#define TRAMPOLINE_PHYS_HIGH (0xa)
-
-/* We don't do anything here because we use NMI's to boot instead */
-static inline void wait_for_init_deassert(atomic_t *deassert)
-{
-}
-
-/*
- * Because we use NMIs rather than the INIT-STARTUP sequence to
- * bootstrap the CPUs, the APIC may be in a weird state. Kick it.
- */
-static inline void smp_callin_clear_local_apic(void)
-{
- clear_local_APIC();
-}
-
-static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
-{
- printk("Storing NMI vector\n");
- *high =
- *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH));
- *low =
- *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW));
-}
-
-static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
-{
- printk("Restoring NMI vector\n");
- *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
- *high;
- *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
- *low;
-}
-
-static inline void inquire_remote_apic(int apicid)
-{
-}
-
-#endif /* __ASM_NUMAQ_WAKECPU_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index ff691736f5e..1c244b64573 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -1482,6 +1482,7 @@ static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
}
+#define __raw_spin_is_contended __raw_spin_is_contended
static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
{
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index b8493b3b989..9709fdff661 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -5,10 +5,8 @@
#ifdef CONFIG_X86_PAT
extern int pat_enabled;
-extern void validate_pat_support(struct cpuinfo_x86 *c);
#else
static const int pat_enabled;
-static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
#endif
extern void pat_init(void);
@@ -17,6 +15,4 @@ extern int reserve_memtype(u64 start, u64 end,
unsigned long req_type, unsigned long *ret_type);
extern int free_memtype(u64 start, u64 end);
-extern void pat_disable(char *reason);
-
#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/mach-default/pci-functions.h b/arch/x86/include/asm/pci-functions.h
index ed0bab42735..ed0bab42735 100644
--- a/arch/x86/include/asm/mach-default/pci-functions.h
+++ b/arch/x86/include/asm/pci-functions.h
diff --git a/arch/x86/include/asm/prctl.h b/arch/x86/include/asm/prctl.h
index a8894647dd9..3ac5032fae0 100644
--- a/arch/x86/include/asm/prctl.h
+++ b/arch/x86/include/asm/prctl.h
@@ -6,8 +6,4 @@
#define ARCH_GET_FS 0x1003
#define ARCH_GET_GS 0x1004
-#ifdef CONFIG_X86_64
-extern long sys_arch_prctl(int, unsigned long);
-#endif /* CONFIG_X86_64 */
-
#endif /* _ASM_X86_PRCTL_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 5a947210425..a0133838b67 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -73,7 +73,7 @@ struct cpuinfo_x86 {
char pad0;
#else
/* Number of 4K pages in DTLB/ITLB combined(in pages): */
- int x86_tlbsize;
+ int x86_tlbsize;
__u8 x86_virt_bits;
__u8 x86_phys_bits;
#endif
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index d6a22f92ba7..49fb3ecf3bb 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -18,11 +18,7 @@ extern void syscall32_cpu_init(void);
extern void check_efer(void);
-#ifdef CONFIG_X86_BIOS_REBOOT
extern int reboot_force;
-#else
-static const int reboot_force = 0;
-#endif
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
diff --git a/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h b/arch/x86/include/asm/rdc321x_defs.h
index c8e9c8bed3d..c8e9c8bed3d 100644
--- a/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h
+++ b/arch/x86/include/asm/rdc321x_defs.h
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 536949749bc..45b40278b58 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_SETUP_H
#define _ASM_X86_SETUP_H
+#ifdef __KERNEL__
+
#define COMMAND_LINE_SIZE 2048
#ifndef __ASSEMBLY__
@@ -8,10 +10,8 @@
/* Interrupt control for vSMPowered x86_64 systems */
void vsmp_init(void);
-
void setup_bios_corruption_check(void);
-
#ifdef CONFIG_X86_VISWS
extern void visws_early_detect(void);
extern int is_visws_box(void);
@@ -43,7 +43,7 @@ struct x86_quirks {
void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
void (*mpc_oem_pci_bus)(struct mpc_bus *m);
void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
- unsigned short oemsize);
+ unsigned short oemsize);
int (*setup_ioapic_ids)(void);
int (*update_genapic)(void);
};
@@ -56,8 +56,6 @@ extern unsigned long saved_video_mode;
#endif
#endif /* __ASSEMBLY__ */
-#ifdef __KERNEL__
-
#ifdef __i386__
#include <linux/pfn.h>
diff --git a/arch/x86/include/asm/mach-default/setup_arch.h b/arch/x86/include/asm/setup_arch.h
index 38846208b54..38846208b54 100644
--- a/arch/x86/include/asm/mach-default/setup_arch.h
+++ b/arch/x86/include/asm/setup_arch.h
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 45ef8a1b9d7..47d0e21f2b9 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -182,28 +182,9 @@ static inline int logical_smp_processor_id(void)
return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
}
-#include <mach_apicdef.h>
-static inline unsigned int read_apic_id(void)
-{
- unsigned int reg;
-
- reg = *(u32 *)(APIC_BASE + APIC_ID);
-
- return GET_APIC_ID(reg);
-}
#endif
-
-# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
extern int hard_smp_processor_id(void);
-# else
-#include <mach_apicdef.h>
-static inline int hard_smp_processor_id(void)
-{
- /* we don't want to mark this access volatile - bad code generation */
- return read_apic_id();
-}
-# endif /* APIC_DEFINITION */
#else /* CONFIG_X86_LOCAL_APIC */
diff --git a/arch/x86/include/asm/mach-default/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
index 23bf52103b8..1def6011490 100644
--- a/arch/x86/include/asm/mach-default/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -13,10 +13,10 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
CMOS_WRITE(0xa, 0xf);
local_flush_tlb();
pr_debug("1.\n");
- *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
+ *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) =
start_eip >> 4;
pr_debug("2.\n");
- *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
+ *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_low)) =
start_eip & 0xf;
pr_debug("3.\n");
}
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
*/
CMOS_WRITE(0, 0xf);
- *((volatile long *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
+ *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
}
static inline void __init smpboot_setup_io_apic(void)
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 2bd6b111a41..3a569665668 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -183,6 +183,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
+#define __raw_spin_is_contended __raw_spin_is_contended
static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
{
@@ -267,8 +268,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
- atomic_dec(count);
- if (atomic_read(count) >= 0)
+ if (atomic_dec_return(count) >= 0)
return 1;
atomic_inc(count);
return 0;
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h
deleted file mode 100644
index 93d2c8667cf..00000000000
--- a/arch/x86/include/asm/summit/apic.h
+++ /dev/null
@@ -1,202 +0,0 @@
-#ifndef __ASM_SUMMIT_APIC_H
-#define __ASM_SUMMIT_APIC_H
-
-#include <asm/smp.h>
-#include <linux/gfp.h>
-
-#define esr_disable (1)
-#define NO_BALANCE_IRQ (0)
-
-/* In clustered mode, the high nibble of APIC ID is a cluster number.
- * The low nibble is a 4-bit bitmap. */
-#define XAPIC_DEST_CPUS_SHIFT 4
-#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
-#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
-
-#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
-
-static inline const cpumask_t *target_cpus(void)
-{
- /* CPU_MASK_ALL (0xff) has undefined behaviour with
- * dest_LowestPrio mode logical clustered apic interrupt routing
- * Just start on cpu 0. IRQ balancing will spread load
- */
- return &cpumask_of_cpu(0);
-}
-
-#define INT_DELIVERY_MODE (dest_LowestPrio)
-#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
- return 0;
-}
-
-/* we don't use the phys_cpu_present_map to indicate apicid presence */
-static inline unsigned long check_apicid_present(int bit)
-{
- return 1;
-}
-
-#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
-
-extern u8 cpu_2_logical_apicid[];
-
-static inline void init_apic_ldr(void)
-{
- unsigned long val, id;
- int count = 0;
- u8 my_id = (u8)hard_smp_processor_id();
- u8 my_cluster = (u8)apicid_cluster(my_id);
-#ifdef CONFIG_SMP
- u8 lid;
- int i;
-
- /* Create logical APIC IDs by counting CPUs already in cluster. */
- for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
- lid = cpu_2_logical_apicid[i];
- if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
- ++count;
- }
-#endif
- /* We only have a 4 wide bitmap in cluster mode. If a deranged
- * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
- BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
- id = my_cluster | (1UL << count);
- apic_write(APIC_DFR, APIC_DFR_VALUE);
- val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
- val |= SET_APIC_LOGICAL_ID(id);
- apic_write(APIC_LDR, val);
-}
-
-static inline int multi_timer_check(int apic, int irq)
-{
- return 0;
-}
-
-static inline int apic_id_registered(void)
-{
- return 1;
-}
-
-static inline void setup_apic_routing(void)
-{
- printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
- nr_ioapics);
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
-#ifdef CONFIG_SMP
- return apicid_2_node[hard_smp_processor_id()];
-#else
- return 0;
-#endif
-}
-
-/* Mapping from cpu number to logical apicid */
-static inline int cpu_to_logical_apicid(int cpu)
-{
-#ifdef CONFIG_SMP
- if (cpu >= nr_cpu_ids)
- return BAD_APICID;
- return (int)cpu_2_logical_apicid[cpu];
-#else
- return logical_smp_processor_id();
-#endif
-}
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
- if (mps_cpu < nr_cpu_ids)
- return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
- else
- return BAD_APICID;
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
-{
- /* For clustered we don't have a good way to do this yet - hack */
- return physids_promote(0x0F);
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int apicid)
-{
- return physid_mask_of_physid(0);
-}
-
-static inline void setup_portio_remap(void)
-{
-}
-
-static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
- return 1;
-}
-
-static inline void enable_apic_mode(void)
-{
-}
-
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
-{
- int num_bits_set;
- int cpus_found = 0;
- int cpu;
- int apicid;
-
- num_bits_set = cpus_weight(*cpumask);
- /* Return id to all */
- if (num_bits_set >= nr_cpu_ids)
- return (int) 0xFF;
- /*
- * The cpus in the mask must all be on the apic cluster. If are not
- * on the same apicid cluster return default value of TARGET_CPUS.
- */
- cpu = first_cpu(*cpumask);
- apicid = cpu_to_logical_apicid(cpu);
- while (cpus_found < num_bits_set) {
- if (cpu_isset(cpu, *cpumask)) {
- int new_apicid = cpu_to_logical_apicid(cpu);
- if (apicid_cluster(apicid) !=
- apicid_cluster(new_apicid)){
- printk ("%s: Not a valid mask!\n", __func__);
- return 0xFF;
- }
- apicid = apicid | new_apicid;
- cpus_found++;
- }
- cpu++;
- }
- return apicid;
-}
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
- const struct cpumask *andmask)
-{
- int apicid = cpu_to_logical_apicid(0);
- cpumask_var_t cpumask;
-
- if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
- return apicid;
-
- cpumask_and(cpumask, inmask, andmask);
- cpumask_and(cpumask, cpumask, cpu_online_mask);
- apicid = cpu_mask_to_apicid(cpumask);
-
- free_cpumask_var(cpumask);
- return apicid;
-}
-
-/* cpuid returns the value latched in the HW at reset, not the APIC ID
- * register's value. For any box whose BIOS changes APIC IDs, like
- * clustered APIC systems, we must use hard_smp_processor_id.
- *
- * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
- */
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
- return hard_smp_processor_id() >> index_msb;
-}
-
-#endif /* __ASM_SUMMIT_APIC_H */
diff --git a/arch/x86/include/asm/summit/apicdef.h b/arch/x86/include/asm/summit/apicdef.h
deleted file mode 100644
index f3fbca1f61c..00000000000
--- a/arch/x86/include/asm/summit/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_SUMMIT_APICDEF_H
-#define __ASM_SUMMIT_APICDEF_H
-
-#define APIC_ID_MASK (0xFF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
- return (x>>24)&0xFF;
-}
-
-#define GET_APIC_ID(x) get_apic_id(x)
-
-#endif
diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h
deleted file mode 100644
index a8a2c24f50c..00000000000
--- a/arch/x86/include/asm/summit/ipi.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __ASM_SUMMIT_IPI_H
-#define __ASM_SUMMIT_IPI_H
-
-void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
-void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
-
-static inline void send_IPI_mask(const cpumask_t *mask, int vector)
-{
- send_IPI_mask_sequence(mask, vector);
-}
-
-static inline void send_IPI_allbutself(int vector)
-{
- cpumask_t mask = cpu_online_map;
- cpu_clear(smp_processor_id(), mask);
-
- if (!cpus_empty(mask))
- send_IPI_mask(&mask, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
- send_IPI_mask(&cpu_online_map, vector);
-}
-
-#endif /* __ASM_SUMMIT_IPI_H */
diff --git a/arch/x86/include/asm/summit/mpparse.h b/arch/x86/include/asm/summit/mpparse.h
deleted file mode 100644
index 380e86c0236..00000000000
--- a/arch/x86/include/asm/summit/mpparse.h
+++ /dev/null
@@ -1,109 +0,0 @@
-#ifndef __ASM_SUMMIT_MPPARSE_H
-#define __ASM_SUMMIT_MPPARSE_H
-
-#include <asm/tsc.h>
-
-extern int use_cyclone;
-
-#ifdef CONFIG_X86_SUMMIT_NUMA
-extern void setup_summit(void);
-#else
-#define setup_summit() {}
-#endif
-
-static inline int mps_oem_check(struct mpc_table *mpc, char *oem,
- char *productid)
-{
- if (!strncmp(oem, "IBM ENSW", 8) &&
- (!strncmp(productid, "VIGIL SMP", 9)
- || !strncmp(productid, "EXA", 3)
- || !strncmp(productid, "RUTHLESS SMP", 12))){
- mark_tsc_unstable("Summit based system");
- use_cyclone = 1; /*enable cyclone-timer*/
- setup_summit();
- return 1;
- }
- return 0;
-}
-
-/* Hook from generic ACPI tables.c */
-static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
- if (!strncmp(oem_id, "IBM", 3) &&
- (!strncmp(oem_table_id, "SERVIGIL", 8)
- || !strncmp(oem_table_id, "EXA", 3))){
- mark_tsc_unstable("Summit based system");
- use_cyclone = 1; /*enable cyclone-timer*/
- setup_summit();
- return 1;
- }
- return 0;
-}
-
-struct rio_table_hdr {
- unsigned char version; /* Version number of this data structure */
- /* Version 3 adds chassis_num & WP_index */
- unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */
- unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */
-} __attribute__((packed));
-
-struct scal_detail {
- unsigned char node_id; /* Scalability Node ID */
- unsigned long CBAR; /* Address of 1MB register space */
- unsigned char port0node; /* Node ID port connected to: 0xFF=None */
- unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
- unsigned char port1node; /* Node ID port connected to: 0xFF = None */
- unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
- unsigned char port2node; /* Node ID port connected to: 0xFF = None */
- unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */
- unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */
-} __attribute__((packed));
-
-struct rio_detail {
- unsigned char node_id; /* RIO Node ID */
- unsigned long BBAR; /* Address of 1MB register space */
- unsigned char type; /* Type of device */
- unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/
- /* For CYC: Node ID of Twister that owns this CYC */
- unsigned char port0node; /* Node ID port connected to: 0xFF=None */
- unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
- unsigned char port1node; /* Node ID port connected to: 0xFF=None */
- unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
- unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */
- /* For CYC: 0 */
- unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */
- /* = 0 : the XAPIC is not used, ie:*/
- /* ints fwded to another XAPIC */
- /* Bits1:7 Reserved */
- /* For CYC: Bits0:7 Reserved */
- unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */
- /* lower slot numbers/PCI bus numbers */
- /* For CYC: No meaning */
- unsigned char chassis_num; /* 1 based Chassis number */
- /* For LookOut WPEGs this field indicates the */
- /* Expansion Chassis #, enumerated from Boot */
- /* Node WPEG external port, then Boot Node CYC */
- /* external port, then Next Vigil chassis WPEG */
- /* external port, etc. */
- /* Shared Lookouts have only 1 chassis number (the */
- /* first one assigned) */
-} __attribute__((packed));
-
-
-typedef enum {
- CompatTwister = 0, /* Compatibility Twister */
- AltTwister = 1, /* Alternate Twister of internal 8-way */
- CompatCyclone = 2, /* Compatibility Cyclone */
- AltCyclone = 3, /* Alternate Cyclone of internal 8-way */
- CompatWPEG = 4, /* Compatibility WPEG */
- AltWPEG = 5, /* Second Planar WPEG */
- LookOutAWPEG = 6, /* LookOut WPEG */
- LookOutBWPEG = 7, /* LookOut WPEG */
-} node_type;
-
-static inline int is_WPEG(struct rio_detail *rio){
- return (rio->type == CompatWPEG || rio->type == AltWPEG ||
- rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
-}
-
-#endif /* __ASM_SUMMIT_MPPARSE_H */
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index 7a80f72bec4..c00bfdbdd45 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -130,16 +130,16 @@ do { \
"movq "__percpu_arg([current_task])",%%rsi\n\t" \
__switch_canary \
"movq %P[thread_info](%%rsi),%%r8\n\t" \
- LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
"movq %%rax,%%rdi\n\t" \
- "jc ret_from_fork\n\t" \
+ "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
+ "jnz ret_from_fork\n\t" \
RESTORE_CONTEXT \
: "=a" (last) \
__switch_canary_oparam \
: [next] "S" (next), [prev] "D" (prev), \
[threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
[ti_flags] "i" (offsetof(struct thread_info, flags)), \
- [tif_fork] "i" (TIF_FORK), \
+ [_tif_fork] "i" (_TIF_FORK), \
[thread_info] "i" (offsetof(struct task_struct, stack)), \
[current_task] "m" (per_cpu_var(current_task)) \
__switch_canary_iparam \
diff --git a/arch/x86/include/asm/voyager.h b/arch/x86/include/asm/voyager.h
index b3e64730762..c1635d43616 100644
--- a/arch/x86/include/asm/voyager.h
+++ b/arch/x86/include/asm/voyager.h
@@ -527,3 +527,45 @@ extern void voyager_smp_intr_init(void);
#define VOYAGER_PSI_SUBREAD 2
#define VOYAGER_PSI_SUBWRITE 3
extern void voyager_cat_psi(__u8, __u16, __u8 *);
+
+/* These define the CPIs we use in linux */
+#define VIC_CPI_LEVEL0 0
+#define VIC_CPI_LEVEL1 1
+/* now the fake CPIs */
+#define VIC_TIMER_CPI 2
+#define VIC_INVALIDATE_CPI 3
+#define VIC_RESCHEDULE_CPI 4
+#define VIC_ENABLE_IRQ_CPI 5
+#define VIC_CALL_FUNCTION_CPI 6
+#define VIC_CALL_FUNCTION_SINGLE_CPI 7
+
+/* Now the QIC CPIs: Since we don't need the two initial levels,
+ * these are 2 less than the VIC CPIs */
+#define QIC_CPI_OFFSET 1
+#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
+#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
+#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
+#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
+#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
+#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
+
+#define VIC_START_FAKE_CPI VIC_TIMER_CPI
+#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
+
+/* this is the SYS_INT CPI. */
+#define VIC_SYS_INT 8
+#define VIC_CMN_INT 15
+
+/* This is the boot CPI for alternate processors. It gets overwritten
+ * by the above once the system has activated all available processors */
+#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
+#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
+
+extern asmlinkage void vic_cpi_interrupt(void);
+extern asmlinkage void vic_sys_interrupt(void);
+extern asmlinkage void vic_cmn_interrupt(void);
+extern asmlinkage void qic_timer_interrupt(void);
+extern asmlinkage void qic_invalidate_interrupt(void);
+extern asmlinkage void qic_reschedule_interrupt(void);
+extern asmlinkage void qic_enable_irq_interrupt(void);
+extern asmlinkage void qic_call_function_interrupt(void);
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index 19144184983..1df35417c41 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -15,10 +15,4 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return raw_irqs_disabled_flags(regs->flags);
}
-static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
-{
- regs->orig_ax = ~irq;
- do_IRQ(regs);
-}
-
#endif /* _ASM_X86_XEN_EVENTS_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 37fa30bada1..24f357e7557 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -30,7 +30,7 @@ obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o
obj-y += setup.o i8259.o irqinit_$(BITS).o
obj-$(CONFIG_X86_VISWS) += visws_quirks.o
-obj-$(CONFIG_X86_32) += probe_roms_32.o
+obj-$(CONFIG_X86_32) += probe_32.o probe_roms_32.o
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o
@@ -50,20 +50,20 @@ obj-y += step.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += cpu/
obj-y += acpi/
-obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o
+obj-y += reboot.o
obj-$(CONFIG_MCA) += mca_32.o
obj-$(CONFIG_X86_MSR) += msr.o
obj-$(CONFIG_X86_CPUID) += cpuid.o
obj-$(CONFIG_PCI) += early-quirks.o
apm-y := apm_32.o
obj-$(CONFIG_APM) += apm.o
-obj-$(CONFIG_X86_SMP) += smp.o
-obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o ipi.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SMP) += smpboot.o tsc_sync.o ipi.o
obj-$(CONFIG_SMP) += setup_percpu.o
obj-$(CONFIG_X86_64_SMP) += tsc_sync.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
-obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
+obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o ipi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
@@ -71,9 +71,10 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
+obj-$(CONFIG_X86_BIGSMP) += bigsmp_32.o
obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
obj-$(CONFIG_X86_ES7000) += es7000_32.o
-obj-$(CONFIG_X86_SUMMIT_NUMA) += summit_32.o
+obj-$(CONFIG_X86_SUMMIT) += summit_32.o
obj-y += vsmp_64.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_MODULES) += module_$(BITS).o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index c193ec3c695..bba162c81d5 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -42,10 +42,6 @@
#include <asm/mpspec.h>
#include <asm/smp.h>
-#ifdef CONFIG_X86_LOCAL_APIC
-# include <mach_apic.h>
-#endif
-
static int __initdata acpi_force = 0;
u32 acpi_rsdt_forced;
#ifdef CONFIG_ACPI
@@ -56,16 +52,7 @@ int acpi_disabled = 1;
EXPORT_SYMBOL(acpi_disabled);
#ifdef CONFIG_X86_64
-
-#include <asm/proto.h>
-
-#else /* X86 */
-
-#ifdef CONFIG_X86_LOCAL_APIC
-#include <mach_apic.h>
-#include <mach_mpparse.h>
-#endif /* CONFIG_X86_LOCAL_APIC */
-
+# include <asm/proto.h>
#endif /* X86 */
#define BAD_MADT_ENTRY(entry, end) ( \
@@ -239,7 +226,8 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
madt->address);
}
- acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
+ default_acpi_madt_oem_check(madt->header.oem_id,
+ madt->header.oem_table_id);
return 0;
}
@@ -884,7 +872,7 @@ static struct {
DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
} mp_ioapic_routing[MAX_IO_APICS];
-static int mp_find_ioapic(int gsi)
+int mp_find_ioapic(int gsi)
{
int i = 0;
@@ -899,6 +887,16 @@ static int mp_find_ioapic(int gsi)
return -1;
}
+int mp_find_ioapic_pin(int ioapic, int gsi)
+{
+ if (WARN_ON(ioapic == -1))
+ return -1;
+ if (WARN_ON(gsi > mp_ioapic_routing[ioapic].gsi_end))
+ return -1;
+
+ return gsi - mp_ioapic_routing[ioapic].gsi_base;
+}
+
static u8 __init uniq_ioapic_id(u8 id)
{
#ifdef CONFIG_X86_32
@@ -1034,7 +1032,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
ioapic = mp_find_ioapic(gsi);
if (ioapic < 0)
return;
- pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
+ pin = mp_find_ioapic_pin(ioapic, gsi);
/*
* TBD: This check is for faulty timer entries, where the override
@@ -1154,7 +1152,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
return gsi;
}
- ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
+ ioapic_pin = mp_find_ioapic_pin(ioapic, gsi);
#ifdef CONFIG_X86_32
if (ioapic_renumber_irq)
@@ -1243,7 +1241,7 @@ int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
ioapic = mp_find_ioapic(gsi);
mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id;
- mp_irq.dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base;
+ mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi);
save_mp_irq(&mp_irq);
#endif
@@ -1370,7 +1368,7 @@ static void __init acpi_process_madt(void)
if (!error) {
acpi_lapic = 1;
-#ifdef CONFIG_X86_GENERICARCH
+#ifdef CONFIG_X86_BIGSMP
generic_bigsmp_probe();
#endif
/*
@@ -1382,9 +1380,8 @@ static void __init acpi_process_madt(void)
acpi_ioapic = 1;
smp_found_config = 1;
-#ifdef CONFIG_X86_32
- setup_apic_routing();
-#endif
+ if (apic->setup_apic_routing)
+ apic->setup_apic_routing();
}
}
if (error == -EINVAL) {
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index 383d827eef8..8bd801db24d 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -1,7 +1,7 @@
/*
* Local APIC handling, local APIC timers
*
- * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ * (c) 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
*
* Fixes
* Maciej W. Rozycki : Bits for genuine 82489DX APICs;
@@ -14,60 +14,62 @@
* Mikael Pettersson : PM converted to driver model.
*/
-#include <linux/init.h>
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/bootmem.h>
-#include <linux/interrupt.h>
-#include <linux/mc146818rtc.h>
#include <linux/kernel_stat.h>
-#include <linux/sysdev.h>
-#include <linux/ioport.h>
-#include <linux/cpu.h>
-#include <linux/clockchips.h>
+#include <linux/mc146818rtc.h>
#include <linux/acpi_pmtmr.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/bootmem.h>
+#include <linux/ftrace.h>
+#include <linux/ioport.h>
#include <linux/module.h>
-#include <linux/dmi.h>
+#include <linux/sysdev.h>
+#include <linux/delay.h>
+#include <linux/timex.h>
#include <linux/dmar.h>
-#include <linux/ftrace.h>
-#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/dmi.h>
#include <linux/nmi.h>
-#include <linux/timex.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
-#include <asm/atomic.h>
-#include <asm/mtrr.h>
-#include <asm/mpspec.h>
-#include <asm/desc.h>
#include <asm/arch_hooks.h>
-#include <asm/hpet.h>
#include <asm/pgalloc.h>
+#include <asm/genapic.h>
+#include <asm/atomic.h>
+#include <asm/mpspec.h>
#include <asm/i8253.h>
-#include <asm/idle.h>
+#include <asm/i8259.h>
#include <asm/proto.h>
#include <asm/apic.h>
-#include <asm/i8259.h>
+#include <asm/desc.h>
+#include <asm/hpet.h>
+#include <asm/idle.h>
+#include <asm/mtrr.h>
#include <asm/smp.h>
-#include <mach_apic.h>
-#include <mach_apicdef.h>
-#include <mach_ipi.h>
-
-/*
- * Sanity check
- */
-#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
-# error SPURIOUS_APIC_VECTOR definition error
-#endif
-
unsigned int num_processors;
+
unsigned disabled_cpus __cpuinitdata;
+
/* Processor that is doing the boot up */
unsigned int boot_cpu_physical_apicid = -1U;
-EXPORT_SYMBOL(boot_cpu_physical_apicid);
+
+/*
+ * The highest APIC ID seen during enumeration.
+ *
+ * This determines the messaging protocol we can use: if all APIC IDs
+ * are in the 0 ... 7 range, then we can use logical addressing which
+ * has some performance advantages (better broadcasting).
+ *
+ * If there's an APIC ID above 8, we use physical addressing.
+ */
unsigned int max_physical_apicid;
-/* Bitmask of physically existing CPUs */
+/*
+ * Bitmask of physically existing CPUs:
+ */
physid_mask_t phys_cpu_present_map;
/*
@@ -475,7 +477,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
static void lapic_timer_broadcast(const struct cpumask *mask)
{
#ifdef CONFIG_SMP
- send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
+ apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
#endif
}
@@ -1009,11 +1011,11 @@ int __init verify_local_APIC(void)
*/
reg0 = apic_read(APIC_ID);
apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
- apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
+ apic_write(APIC_ID, reg0 ^ apic->apic_id_mask);
reg1 = apic_read(APIC_ID);
apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
apic_write(APIC_ID, reg0);
- if (reg1 != (reg0 ^ APIC_ID_MASK))
+ if (reg1 != (reg0 ^ apic->apic_id_mask))
return 0;
/*
@@ -1107,7 +1109,7 @@ static void __cpuinit lapic_setup_esr(void)
return;
}
- if (esr_disable) {
+ if (apic->disable_esr) {
/*
* Something untraceable is creating bad interrupts on
* secondary quads ... for the moment, just leave the
@@ -1149,15 +1151,13 @@ void __cpuinit setup_local_APIC(void)
int i, j;
if (disable_apic) {
-#ifdef CONFIG_X86_IO_APIC
- disable_ioapic_setup();
-#endif
+ arch_disable_smp_support();
return;
}
#ifdef CONFIG_X86_32
/* Pound the ESR really hard over the head with a big hammer - mbligh */
- if (lapic_is_integrated() && esr_disable) {
+ if (lapic_is_integrated() && apic->disable_esr) {
apic_write(APIC_ESR, 0);
apic_write(APIC_ESR, 0);
apic_write(APIC_ESR, 0);
@@ -1171,7 +1171,7 @@ void __cpuinit setup_local_APIC(void)
* Double-check whether this APIC is really registered.
* This is meaningless in clustered apic mode, so we skip it.
*/
- if (!apic_id_registered())
+ if (!apic->apic_id_registered())
BUG();
/*
@@ -1179,7 +1179,7 @@ void __cpuinit setup_local_APIC(void)
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes...
*/
- init_apic_ldr();
+ apic->init_apic_ldr();
/*
* Set Task Priority to 'accept all'. We never change this
@@ -1625,7 +1625,7 @@ int __init APIC_init_uniprocessor(void)
enable_IR_x2apic();
#endif
#ifdef CONFIG_X86_64
- setup_apic_routing();
+ default_setup_apic_routing();
#endif
verify_local_APIC();
@@ -1763,7 +1763,8 @@ void __init connect_bsp_APIC(void)
outb(0x01, 0x23);
}
#endif
- enable_apic_mode();
+ if (apic->enable_apic_mode)
+ apic->enable_apic_mode();
}
/**
@@ -1901,7 +1902,7 @@ void __cpuinit generic_processor_info(int apicid, int version)
}
#endif
-#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64)
+#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
#endif
@@ -1910,11 +1911,30 @@ void __cpuinit generic_processor_info(int apicid, int version)
set_cpu_present(cpu, true);
}
-#ifdef CONFIG_X86_64
int hard_smp_processor_id(void)
{
return read_apic_id();
}
+
+void default_init_apic_ldr(void)
+{
+ unsigned long val;
+
+ apic_write(APIC_DFR, APIC_DFR_VALUE);
+ val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+ val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
+ apic_write(APIC_LDR, val);
+}
+
+#ifdef CONFIG_X86_32
+int default_apicid_to_node(int logical_apicid)
+{
+#ifdef CONFIG_SMP
+ return apicid_2_node[hard_smp_processor_id()];
+#else
+ return 0;
+#endif
+}
#endif
/*
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 98807bb095a..37ba5f85b71 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -301,7 +301,7 @@ extern int (*console_blank_hook)(int);
*/
#define APM_ZERO_SEGS
-#include "apm.h"
+#include <asm/apm.h>
/*
* Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend.
diff --git a/arch/x86/kernel/bigsmp_32.c b/arch/x86/kernel/bigsmp_32.c
new file mode 100644
index 00000000000..47a62f46afd
--- /dev/null
+++ b/arch/x86/kernel/bigsmp_32.c
@@ -0,0 +1,266 @@
+/*
+ * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs.
+ * Drives the local APIC in "clustered mode".
+ */
+#define APIC_DEFINITION 1
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <asm/mpspec.h>
+#include <asm/genapic.h>
+#include <asm/fixmap.h>
+#include <asm/apicdef.h>
+#include <asm/ipi.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/smp.h>
+
+
+static inline unsigned bigsmp_get_apic_id(unsigned long x)
+{
+ return (x >> 24) & 0xFF;
+}
+
+#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
+
+static inline int bigsmp_apic_id_registered(void)
+{
+ return 1;
+}
+
+static inline const cpumask_t *bigsmp_target_cpus(void)
+{
+#ifdef CONFIG_SMP
+ return &cpu_online_map;
+#else
+ return &cpumask_of_cpu(0);
+#endif
+}
+
+#define APIC_DFR_VALUE (APIC_DFR_FLAT)
+
+static inline unsigned long
+bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+ return 0;
+}
+
+static inline unsigned long bigsmp_check_apicid_present(int bit)
+{
+ return 1;
+}
+
+static inline unsigned long calculate_ldr(int cpu)
+{
+ unsigned long val, id;
+ val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+ id = xapic_phys_to_log_apicid(cpu);
+ val |= SET_APIC_LOGICAL_ID(id);
+ return val;
+}
+
+/*
+ * Set up the logical destination ID.
+ *
+ * Intel recommends to set DFR, LDR and TPR before enabling
+ * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
+ * document number 292116). So here it goes...
+ */
+static inline void bigsmp_init_apic_ldr(void)
+{
+ unsigned long val;
+ int cpu = smp_processor_id();
+
+ apic_write(APIC_DFR, APIC_DFR_VALUE);
+ val = calculate_ldr(cpu);
+ apic_write(APIC_LDR, val);
+}
+
+static inline void bigsmp_setup_apic_routing(void)
+{
+ printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
+ "Physflat", nr_ioapics);
+}
+
+static inline int bigsmp_apicid_to_node(int logical_apicid)
+{
+ return apicid_2_node[hard_smp_processor_id()];
+}
+
+static inline int bigsmp_cpu_present_to_apicid(int mps_cpu)
+{
+ if (mps_cpu < nr_cpu_ids)
+ return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
+
+ return BAD_APICID;
+}
+
+static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid)
+{
+ return physid_mask_of_physid(phys_apicid);
+}
+
+extern u8 cpu_2_logical_apicid[];
+/* Mapping from cpu number to logical apicid */
+static inline int bigsmp_cpu_to_logical_apicid(int cpu)
+{
+ if (cpu >= nr_cpu_ids)
+ return BAD_APICID;
+ return cpu_physical_id(cpu);
+}
+
+static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map)
+{
+ /* For clustered we don't have a good way to do this yet - hack */
+ return physids_promote(0xFFL);
+}
+
+static inline void bigsmp_setup_portio_remap(void)
+{
+}
+
+static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+ return 1;
+}
+
+/* As we are using single CPU as destination, pick only one CPU here */
+static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask)
+{
+ return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask));
+}
+
+static inline unsigned int
+bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
+{
+ int cpu;
+
+ /*
+ * We're using fixed IRQ delivery, can only return one phys APIC ID.
+ * May as well be the first.
+ */
+ for_each_cpu_and(cpu, cpumask, andmask) {
+ if (cpumask_test_cpu(cpu, cpu_online_mask))
+ break;
+ }
+ if (cpu < nr_cpu_ids)
+ return bigsmp_cpu_to_logical_apicid(cpu);
+
+ return BAD_APICID;
+}
+
+static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+ return cpuid_apic >> index_msb;
+}
+
+static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
+{
+ default_send_IPI_mask_sequence_phys(mask, vector);
+}
+
+static inline void bigsmp_send_IPI_allbutself(int vector)
+{
+ default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
+}
+
+static inline void bigsmp_send_IPI_all(int vector)
+{
+ bigsmp_send_IPI_mask(cpu_online_mask, vector);
+}
+
+static int dmi_bigsmp; /* can be set by dmi scanners */
+
+static int hp_ht_bigsmp(const struct dmi_system_id *d)
+{
+ printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
+ dmi_bigsmp = 1;
+ return 0;
+}
+
+
+static const struct dmi_system_id bigsmp_dmi_table[] = {
+ { hp_ht_bigsmp, "HP ProLiant DL760 G2",
+ { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
+ DMI_MATCH(DMI_BIOS_VERSION, "P44-"),}
+ },
+
+ { hp_ht_bigsmp, "HP ProLiant DL740",
+ { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
+ DMI_MATCH(DMI_BIOS_VERSION, "P47-"),}
+ },
+ { }
+};
+
+static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask)
+{
+ cpus_clear(*retmask);
+ cpu_set(cpu, *retmask);
+}
+
+static int probe_bigsmp(void)
+{
+ if (def_to_bigsmp)
+ dmi_bigsmp = 1;
+ else
+ dmi_check_system(bigsmp_dmi_table);
+ return dmi_bigsmp;
+}
+
+struct genapic apic_bigsmp = {
+
+ .name = "bigsmp",
+ .probe = probe_bigsmp,
+ .acpi_madt_oem_check = NULL,
+ .apic_id_registered = bigsmp_apic_id_registered,
+
+ .irq_delivery_mode = dest_Fixed,
+ /* phys delivery to target CPU: */
+ .irq_dest_mode = 0,
+
+ .target_cpus = bigsmp_target_cpus,
+ .disable_esr = 1,
+ .dest_logical = 0,
+ .check_apicid_used = bigsmp_check_apicid_used,
+ .check_apicid_present = bigsmp_check_apicid_present,
+
+ .vector_allocation_domain = bigsmp_vector_allocation_domain,
+ .init_apic_ldr = bigsmp_init_apic_ldr,
+
+ .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
+ .setup_apic_routing = bigsmp_setup_apic_routing,
+ .multi_timer_check = NULL,
+ .apicid_to_node = bigsmp_apicid_to_node,
+ .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid,
+ .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
+ .apicid_to_cpu_present = bigsmp_apicid_to_cpu_present,
+ .setup_portio_remap = NULL,
+ .check_phys_apicid_present = bigsmp_check_phys_apicid_present,
+ .enable_apic_mode = NULL,
+ .phys_pkg_id = bigsmp_phys_pkg_id,
+ .mps_oem_check = NULL,
+
+ .get_apic_id = bigsmp_get_apic_id,
+ .set_apic_id = NULL,
+ .apic_id_mask = 0xFF << 24,
+
+ .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid,
+ .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
+
+ .send_IPI_mask = bigsmp_send_IPI_mask,
+ .send_IPI_mask_allbutself = NULL,
+ .send_IPI_allbutself = bigsmp_send_IPI_allbutself,
+ .send_IPI_all = bigsmp_send_IPI_all,
+ .send_IPI_self = default_send_IPI_self,
+
+ .wakeup_cpu = NULL,
+ .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
+ .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+
+ .wait_for_init_deassert = default_wait_for_init_deassert,
+
+ .smp_callin_clear_local_apic = NULL,
+ .store_NMI_vector = NULL,
+ .inquire_remote_apic = default_inquire_remote_apic,
+};
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 2cf23634b6d..e48640cfac0 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -7,7 +7,7 @@
#include <asm/pat.h>
#include <asm/processor.h>
-#include <mach_apic.h>
+#include <asm/genapic.h>
struct cpuid_bit {
u16 feature;
@@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
*/
void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
{
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
unsigned int eax, ebx, ecx, edx, sub_index;
unsigned int ht_mask_width, core_plus_mask_width;
unsigned int core_select_mask, core_level_siblings;
@@ -116,22 +116,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
-#ifdef CONFIG_X86_32
- c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
+ c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width)
& core_select_mask;
- c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width);
+ c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width);
/*
* Reinit the apicid, now that we have extended initial_apicid.
*/
- c->apicid = phys_pkg_id(c->initial_apicid, 0);
-#else
- c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
- c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
- /*
- * Reinit the apicid, now that we have extended initial_apicid.
- */
- c->apicid = phys_pkg_id(0);
-#endif
+ c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
+
c->x86_max_cores = (core_level_siblings / smp_num_siblings);
@@ -143,37 +135,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
return;
#endif
}
-
-#ifdef CONFIG_X86_PAT
-void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
-{
- if (!cpu_has_pat)
- pat_disable("PAT not supported by CPU.");
-
- switch (c->x86_vendor) {
- case X86_VENDOR_INTEL:
- /*
- * There is a known erratum on Pentium III and Core Solo
- * and Core Duo CPUs.
- * " Page with PAT set to WC while associated MTRR is UC
- * may consolidate to UC "
- * Because of this erratum, it is better to stick with
- * setting WC in MTRR rather than using PAT on these CPUs.
- *
- * Enable PAT WC only on P4, Core 2 or later CPUs.
- */
- if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
- return;
-
- pat_disable("PAT WC disabled due to known CPU erratum.");
- return;
-
- case X86_VENDOR_AMD:
- case X86_VENDOR_CENTAUR:
- case X86_VENDOR_TRANSMETA:
- return;
- }
-
- pat_disable("PAT disabled. Not yet verified on this CPU type.");
-}
-#endif
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 7c878f6aa91..ff4d7b9e32e 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -12,7 +12,7 @@
# include <asm/cacheflush.h>
#endif
-#include <mach_apic.h>
+#include <asm/genapic.h>
#include "cpu.h"
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 260fe4cb2c8..e8f4a386bd9 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -26,7 +26,7 @@
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h>
#include <asm/apic.h>
-#include <mach_apic.h>
+#include <asm/genapic.h>
#include <asm/genapic.h>
#include <asm/uv/uv.h>
#endif
@@ -226,6 +226,49 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
#endif
/*
+ * Some CPU features depend on higher CPUID levels, which may not always
+ * be available due to CPUID level capping or broken virtualization
+ * software. Add those features to this table to auto-disable them.
+ */
+struct cpuid_dependent_feature {
+ u32 feature;
+ u32 level;
+};
+static const struct cpuid_dependent_feature __cpuinitconst
+cpuid_dependent_features[] = {
+ { X86_FEATURE_MWAIT, 0x00000005 },
+ { X86_FEATURE_DCA, 0x00000009 },
+ { X86_FEATURE_XSAVE, 0x0000000d },
+ { 0, 0 }
+};
+
+static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
+{
+ const struct cpuid_dependent_feature *df;
+ for (df = cpuid_dependent_features; df->feature; df++) {
+ /*
+ * Note: cpuid_level is set to -1 if unavailable, but
+ * extended_extended_level is set to 0 if unavailable
+ * and the legitimate extended levels are all negative
+ * when signed; hence the weird messing around with
+ * signs here...
+ */
+ if (cpu_has(c, df->feature) &&
+ ((s32)df->feature < 0 ?
+ (u32)df->feature > (u32)c->extended_cpuid_level :
+ (s32)df->feature > (s32)c->cpuid_level)) {
+ clear_cpu_cap(c, df->feature);
+ if (warn)
+ printk(KERN_WARNING
+ "CPU: CPU feature %s disabled "
+ "due to lack of CPUID level 0x%x\n",
+ x86_cap_flags[df->feature],
+ df->level);
+ }
+ }
+}
+
+/*
* Naming convention should be: <Name> [(<Codename>)]
* This table only is used unless init_<vendor>() below doesn't set it;
* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
@@ -407,11 +450,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
}
index_msb = get_count_order(smp_num_siblings);
-#ifdef CONFIG_X86_64
- c->phys_proc_id = phys_pkg_id(index_msb);
-#else
- c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
-#endif
+ c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
@@ -419,13 +458,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
core_bits = get_count_order(c->x86_max_cores);
-#ifdef CONFIG_X86_64
- c->cpu_core_id = phys_pkg_id(index_msb) &
- ((1 << core_bits) - 1);
-#else
- c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
+ c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
((1 << core_bits) - 1);
-#endif
}
out:
@@ -594,11 +628,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
if (this_cpu->c_early_init)
this_cpu->c_early_init(c);
- validate_pat_support(c);
-
#ifdef CONFIG_SMP
c->cpu_index = boot_cpu_id;
#endif
+ filter_cpuid_features(c, false);
}
void __init early_cpu_init(void)
@@ -661,7 +694,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
#ifdef CONFIG_X86_32
# ifdef CONFIG_X86_HT
- c->apicid = phys_pkg_id(c->initial_apicid, 0);
+ c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
# else
c->apicid = c->initial_apicid;
# endif
@@ -708,7 +741,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
this_cpu->c_identify(c);
#ifdef CONFIG_X86_64
- c->apicid = phys_pkg_id(0);
+ c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
#endif
/*
@@ -732,6 +765,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
* we do "generic changes."
*/
+ /* Filter out anything that depends on CPUID levels we don't have */
+ filter_cpuid_features(c, true);
+
/* If the model name is still unset, do table lookup. */
if (!c->x86_model_id[0]) {
char *p;
@@ -1062,22 +1098,19 @@ void __cpuinit cpu_init(void)
*/
if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
arch_kgdb_ops.correct_hw_break();
- else {
+ else
#endif
- /*
- * Clear all 6 debug registers:
- */
-
- set_debugreg(0UL, 0);
- set_debugreg(0UL, 1);
- set_debugreg(0UL, 2);
- set_debugreg(0UL, 3);
- set_debugreg(0UL, 6);
- set_debugreg(0UL, 7);
-#ifdef CONFIG_KGDB
- /* If the kgdb is connected no debug regs should be altered. */
+ {
+ /*
+ * Clear all 6 debug registers:
+ */
+ set_debugreg(0UL, 0);
+ set_debugreg(0UL, 1);
+ set_debugreg(0UL, 2);
+ set_debugreg(0UL, 3);
+ set_debugreg(0UL, 6);
+ set_debugreg(0UL, 7);
}
-#endif
fpu_init();
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 5c28b37dea1..fb039cd345d 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -939,10 +939,25 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
free_cpumask_var(data->acpi_data.shared_cpu_map);
}
+static int get_transition_latency(struct powernow_k8_data *data)
+{
+ int max_latency = 0;
+ int i;
+ for (i = 0; i < data->acpi_data.state_count; i++) {
+ int cur_latency = data->acpi_data.states[i].transition_latency
+ + data->acpi_data.states[i].bus_master_latency;
+ if (cur_latency > max_latency)
+ max_latency = cur_latency;
+ }
+ /* value in usecs, needs to be in nanoseconds */
+ return 1000 * max_latency;
+}
+
#else
static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
+static int get_transition_latency(struct powernow_k8_data *data) { return 0; }
#endif /* CONFIG_X86_POWERNOW_K8_ACPI */
/* Take a frequency, and issue the fid/vid transition command */
@@ -1173,7 +1188,13 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
if (rc) {
goto err_out;
}
- }
+ /* Take a crude guess here.
+ * That guess was in microseconds, so multiply with 1000 */
+ pol->cpuinfo.transition_latency = (
+ ((data->rvo + 8) * data->vstable * VST_UNITS_20US) +
+ ((1 << data->irt) * 30)) * 1000;
+ } else /* ACPI _PSS objects available */
+ pol->cpuinfo.transition_latency = get_transition_latency(data);
/* only run on specific CPU from here on */
oldmask = current->cpus_allowed;
@@ -1204,11 +1225,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu));
data->available_cores = pol->cpus;
- /* Take a crude guess here.
- * That guess was in microseconds, so multiply with 1000 */
- pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US)
- + (3 * (1 << data->irt) * 10)) * 1000;
-
if (cpu_family == CPU_HW_PSTATE)
pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
else
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 24ff26a38ad..1f137a87d4b 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -24,7 +24,7 @@
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h>
#include <asm/apic.h>
-#include <mach_apic.h>
+#include <asm/genapic.h>
#endif
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
@@ -63,6 +63,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
}
+ /*
+ * There is a known erratum on Pentium III and Core Solo
+ * and Core Duo CPUs.
+ * " Page with PAT set to WC while associated MTRR is UC
+ * may consolidate to UC "
+ * Because of this erratum, it is better to stick with
+ * setting WC in MTRR rather than using PAT on these CPUs.
+ *
+ * Enable PAT WC only on P4, Core 2 or later CPUs.
+ */
+ if (c->x86 == 6 && c->x86_model < 15)
+ clear_cpu_cap(c, X86_FEATURE_PAT);
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 11b93cabdf7..ad7f2a696f4 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -28,7 +28,7 @@
#include <asm/reboot.h>
#include <asm/virtext.h>
-#include <mach_ipi.h>
+#include <asm/genapic.h>
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 3de7b5710dc..e9920683145 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -812,7 +812,7 @@ ENDPROC(name)
#define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
/* The include is where all of the SMP etc. interrupts come from */
-#include "entry_arch.h"
+#include <asm/entry_arch.h>
ENTRY(coprocessor_error)
RING0_INT_FRAME
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 586bed67755..fbcf96b295f 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -410,6 +410,8 @@ END(save_paranoid)
ENTRY(ret_from_fork)
DEFAULT_FRAME
+ LOCK ; btr $TIF_FORK,TI_flags(%r8)
+
push kernel_eflags(%rip)
CFI_ADJUST_CFA_OFFSET 8
popf # reset kernel eflags
diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c
index 53699c931ad..d6184c12a18 100644
--- a/arch/x86/kernel/es7000_32.c
+++ b/arch/x86/kernel/es7000_32.c
@@ -40,7 +40,6 @@
#include <asm/smp.h>
#include <asm/atomic.h>
#include <asm/apicdef.h>
-#include <mach_mpparse.h>
#include <asm/genapic.h>
#include <asm/setup.h>
@@ -182,20 +181,16 @@ static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
return 0;
}
-static void noop_wait_for_deassert(atomic_t *deassert_not_used)
-{
-}
-
static int __init es7000_update_genapic(void)
{
- genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
+ apic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
/* MPENTIUMIII */
if (boot_cpu_data.x86 == 6 &&
(boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) {
es7000_update_genapic_to_cluster();
- genapic->wait_for_init_deassert = noop_wait_for_deassert;
- genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
+ apic->wait_for_init_deassert = NULL;
+ apic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
}
return 0;
@@ -359,20 +354,449 @@ es7000_mip_write(struct mip_reg *mip_reg)
return status;
}
-void __init
-es7000_sw_apic(void)
-{
- if (es7000_plat) {
- int mip_status;
- struct mip_reg es7000_mip_reg;
-
- printk("ES7000: Enabling APIC mode.\n");
- memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
- es7000_mip_reg.off_0 = MIP_SW_APIC;
- es7000_mip_reg.off_38 = (MIP_VALID);
- while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0)
- printk("es7000_sw_apic: command failed, status = %x\n",
- mip_status);
+void __init es7000_enable_apic_mode(void)
+{
+ struct mip_reg es7000_mip_reg;
+ int mip_status;
+
+ if (!es7000_plat)
return;
+
+ printk("ES7000: Enabling APIC mode.\n");
+ memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
+ es7000_mip_reg.off_0 = MIP_SW_APIC;
+ es7000_mip_reg.off_38 = MIP_VALID;
+
+ while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0) {
+ printk("es7000_enable_apic_mode: command failed, status = %x\n",
+ mip_status);
+ }
+}
+
+/*
+ * APIC driver for the Unisys ES7000 chipset.
+ */
+#define APIC_DEFINITION 1
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <asm/mpspec.h>
+#include <asm/genapic.h>
+#include <asm/fixmap.h>
+#include <asm/apicdef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/smp.h>
+#include <asm/ipi.h>
+
+#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
+#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
+#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
+
+#define APIC_DFR_VALUE (APIC_DFR_FLAT)
+
+extern void es7000_enable_apic_mode(void);
+extern int apic_version [MAX_APICS];
+extern u8 cpu_2_logical_apicid[];
+extern unsigned int boot_cpu_physical_apicid;
+
+extern int parse_unisys_oem (char *oemptr);
+extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
+extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
+extern void setup_unisys(void);
+
+#define apicid_cluster(apicid) (apicid & 0xF0)
+#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
+
+static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
+{
+ /* Careful. Some cpus do not strictly honor the set of cpus
+ * specified in the interrupt destination when using lowest
+ * priority interrupt delivery mode.
+ *
+ * In particular there was a hyperthreading cpu observed to
+ * deliver interrupts to the wrong hyperthread when only one
+ * hyperthread was specified in the interrupt desitination.
+ */
+ *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
+}
+
+
+static void es7000_wait_for_init_deassert(atomic_t *deassert)
+{
+#ifndef CONFIG_ES7000_CLUSTERED_APIC
+ while (!atomic_read(deassert))
+ cpu_relax();
+#endif
+ return;
+}
+
+static unsigned int es7000_get_apic_id(unsigned long x)
+{
+ return (x >> 24) & 0xFF;
+}
+
+#ifdef CONFIG_ACPI
+static int es7000_check_dsdt(void)
+{
+ struct acpi_table_header header;
+
+ if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
+ !strncmp(header.oem_id, "UNISYS", 6))
+ return 1;
+ return 0;
+}
+#endif
+
+static void es7000_send_IPI_mask(const struct cpumask *mask, int vector)
+{
+ default_send_IPI_mask_sequence_phys(mask, vector);
+}
+
+static void es7000_send_IPI_allbutself(int vector)
+{
+ default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
+}
+
+static void es7000_send_IPI_all(int vector)
+{
+ es7000_send_IPI_mask(cpu_online_mask, vector);
+}
+
+static int es7000_apic_id_registered(void)
+{
+ return 1;
+}
+
+static const cpumask_t *target_cpus_cluster(void)
+{
+ return &CPU_MASK_ALL;
+}
+
+static const cpumask_t *es7000_target_cpus(void)
+{
+ return &cpumask_of_cpu(smp_processor_id());
+}
+
+static unsigned long
+es7000_check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+ return 0;
+}
+static unsigned long es7000_check_apicid_present(int bit)
+{
+ return physid_isset(bit, phys_cpu_present_map);
+}
+
+static unsigned long calculate_ldr(int cpu)
+{
+ unsigned long id = xapic_phys_to_log_apicid(cpu);
+
+ return (SET_APIC_LOGICAL_ID(id));
+}
+
+/*
+ * Set up the logical destination ID.
+ *
+ * Intel recommends to set DFR, LdR and TPR before enabling
+ * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
+ * document number 292116). So here it goes...
+ */
+static void es7000_init_apic_ldr_cluster(void)
+{
+ unsigned long val;
+ int cpu = smp_processor_id();
+
+ apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
+ val = calculate_ldr(cpu);
+ apic_write(APIC_LDR, val);
+}
+
+static void es7000_init_apic_ldr(void)
+{
+ unsigned long val;
+ int cpu = smp_processor_id();
+
+ apic_write(APIC_DFR, APIC_DFR_VALUE);
+ val = calculate_ldr(cpu);
+ apic_write(APIC_LDR, val);
+}
+
+static void es7000_setup_apic_routing(void)
+{
+ int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
+ printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
+ (apic_version[apic] == 0x14) ?
+ "Physical Cluster" : "Logical Cluster",
+ nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
+}
+
+static int es7000_apicid_to_node(int logical_apicid)
+{
+ return 0;
+}
+
+
+static int es7000_cpu_present_to_apicid(int mps_cpu)
+{
+ if (!mps_cpu)
+ return boot_cpu_physical_apicid;
+ else if (mps_cpu < nr_cpu_ids)
+ return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
+ else
+ return BAD_APICID;
+}
+
+static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid)
+{
+ static int id = 0;
+ physid_mask_t mask;
+
+ mask = physid_mask_of_physid(id);
+ ++id;
+
+ return mask;
+}
+
+/* Mapping from cpu number to logical apicid */
+static int es7000_cpu_to_logical_apicid(int cpu)
+{
+#ifdef CONFIG_SMP
+ if (cpu >= nr_cpu_ids)
+ return BAD_APICID;
+ return (int)cpu_2_logical_apicid[cpu];
+#else
+ return logical_smp_processor_id();
+#endif
+}
+
+static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map)
+{
+ /* For clustered we don't have a good way to do this yet - hack */
+ return physids_promote(0xff);
+}
+
+static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
+{
+ boot_cpu_physical_apicid = read_apic_id();
+ return (1);
+}
+
+static unsigned int
+es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
+{
+ int cpus_found = 0;
+ int num_bits_set;
+ int apicid;
+ int cpu;
+
+ num_bits_set = cpumask_weight(cpumask);
+ /* Return id to all */
+ if (num_bits_set == nr_cpu_ids)
+ return 0xFF;
+ /*
+ * The cpus in the mask must all be on the apic cluster. If are not
+ * on the same apicid cluster return default value of target_cpus():
+ */
+ cpu = cpumask_first(cpumask);
+ apicid = es7000_cpu_to_logical_apicid(cpu);
+
+ while (cpus_found < num_bits_set) {
+ if (cpumask_test_cpu(cpu, cpumask)) {
+ int new_apicid = es7000_cpu_to_logical_apicid(cpu);
+
+ if (apicid_cluster(apicid) !=
+ apicid_cluster(new_apicid)) {
+ printk ("%s: Not a valid mask!\n", __func__);
+
+ return 0xFF;
+ }
+ apicid = new_apicid;
+ cpus_found++;
+ }
+ cpu++;
}
+ return apicid;
}
+
+static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask)
+{
+ int cpus_found = 0;
+ int num_bits_set;
+ int apicid;
+ int cpu;
+
+ num_bits_set = cpus_weight(*cpumask);
+ /* Return id to all */
+ if (num_bits_set == nr_cpu_ids)
+ return es7000_cpu_to_logical_apicid(0);
+ /*
+ * The cpus in the mask must all be on the apic cluster. If are not
+ * on the same apicid cluster return default value of target_cpus():
+ */
+ cpu = first_cpu(*cpumask);
+ apicid = es7000_cpu_to_logical_apicid(cpu);
+ while (cpus_found < num_bits_set) {
+ if (cpu_isset(cpu, *cpumask)) {
+ int new_apicid = es7000_cpu_to_logical_apicid(cpu);
+
+ if (apicid_cluster(apicid) !=
+ apicid_cluster(new_apicid)) {
+ printk ("%s: Not a valid mask!\n", __func__);
+
+ return es7000_cpu_to_logical_apicid(0);
+ }
+ apicid = new_apicid;
+ cpus_found++;
+ }
+ cpu++;
+ }
+ return apicid;
+}
+
+static unsigned int
+es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
+ const struct cpumask *andmask)
+{
+ int apicid = es7000_cpu_to_logical_apicid(0);
+ cpumask_var_t cpumask;
+
+ if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
+ return apicid;
+
+ cpumask_and(cpumask, inmask, andmask);
+ cpumask_and(cpumask, cpumask, cpu_online_mask);
+ apicid = es7000_cpu_mask_to_apicid(cpumask);
+
+ free_cpumask_var(cpumask);
+
+ return apicid;
+}
+
+static int es7000_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+ return cpuid_apic >> index_msb;
+}
+
+void __init es7000_update_genapic_to_cluster(void)
+{
+ apic->target_cpus = target_cpus_cluster;
+ apic->irq_delivery_mode = INT_DELIVERY_MODE_CLUSTER;
+ apic->irq_dest_mode = INT_DEST_MODE_CLUSTER;
+
+ apic->init_apic_ldr = es7000_init_apic_ldr_cluster;
+
+ apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster;
+}
+
+static int probe_es7000(void)
+{
+ /* probed later in mptable/ACPI hooks */
+ return 0;
+}
+
+static __init int
+es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
+{
+ if (mpc->oemptr) {
+ struct mpc_oemtable *oem_table =
+ (struct mpc_oemtable *)mpc->oemptr;
+
+ if (!strncmp(oem, "UNISYS", 6))
+ return parse_unisys_oem((char *)oem_table);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+/* Hook from generic ACPI tables.c */
+static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+ unsigned long oem_addr = 0;
+ int check_dsdt;
+ int ret = 0;
+
+ /* check dsdt at first to avoid clear fix_map for oem_addr */
+ check_dsdt = es7000_check_dsdt();
+
+ if (!find_unisys_acpi_oem_table(&oem_addr)) {
+ if (check_dsdt)
+ ret = parse_unisys_oem((char *)oem_addr);
+ else {
+ setup_unisys();
+ ret = 1;
+ }
+ /*
+ * we need to unmap it
+ */
+ unmap_unisys_acpi_oem_table(oem_addr);
+ }
+ return ret;
+}
+#else
+static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+ return 0;
+}
+#endif
+
+
+struct genapic apic_es7000 = {
+
+ .name = "es7000",
+ .probe = probe_es7000,
+ .acpi_madt_oem_check = es7000_acpi_madt_oem_check,
+ .apic_id_registered = es7000_apic_id_registered,
+
+ .irq_delivery_mode = dest_Fixed,
+ /* phys delivery to target CPUs: */
+ .irq_dest_mode = 0,
+
+ .target_cpus = es7000_target_cpus,
+ .disable_esr = 1,
+ .dest_logical = 0,
+ .check_apicid_used = es7000_check_apicid_used,
+ .check_apicid_present = es7000_check_apicid_present,
+
+ .vector_allocation_domain = es7000_vector_allocation_domain,
+ .init_apic_ldr = es7000_init_apic_ldr,
+
+ .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
+ .setup_apic_routing = es7000_setup_apic_routing,
+ .multi_timer_check = NULL,
+ .apicid_to_node = es7000_apicid_to_node,
+ .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
+ .cpu_present_to_apicid = es7000_cpu_present_to_apicid,
+ .apicid_to_cpu_present = es7000_apicid_to_cpu_present,
+ .setup_portio_remap = NULL,
+ .check_phys_apicid_present = es7000_check_phys_apicid_present,
+ .enable_apic_mode = es7000_enable_apic_mode,
+ .phys_pkg_id = es7000_phys_pkg_id,
+ .mps_oem_check = es7000_mps_oem_check,
+
+ .get_apic_id = es7000_get_apic_id,
+ .set_apic_id = NULL,
+ .apic_id_mask = 0xFF << 24,
+
+ .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
+ .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
+
+ .send_IPI_mask = es7000_send_IPI_mask,
+ .send_IPI_mask_allbutself = NULL,
+ .send_IPI_allbutself = es7000_send_IPI_allbutself,
+ .send_IPI_all = es7000_send_IPI_all,
+ .send_IPI_self = default_send_IPI_self,
+
+ .wakeup_cpu = NULL,
+
+ .trampoline_phys_low = 0x467,
+ .trampoline_phys_high = 0x469,
+
+ .wait_for_init_deassert = es7000_wait_for_init_deassert,
+
+ /* Nothing to do for most platforms, since cleared by the INIT cycle: */
+ .smp_callin_clear_local_apic = NULL,
+ .store_NMI_vector = NULL,
+ .inquire_remote_apic = default_inquire_remote_apic,
+};
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 1b43086b097..231bdd3c5b1 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -488,20 +488,21 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
* ignore such a protection.
*/
asm volatile(
- "1: " _ASM_MOV " (%[parent_old]), %[old]\n"
- "2: " _ASM_MOV " %[return_hooker], (%[parent_replaced])\n"
+ "1: " _ASM_MOV " (%[parent]), %[old]\n"
+ "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
" movl $0, %[faulted]\n"
+ "3:\n"
".section .fixup, \"ax\"\n"
- "3: movl $1, %[faulted]\n"
+ "4: movl $1, %[faulted]\n"
+ " jmp 3b\n"
".previous\n"
- _ASM_EXTABLE(1b, 3b)
- _ASM_EXTABLE(2b, 3b)
+ _ASM_EXTABLE(1b, 4b)
+ _ASM_EXTABLE(2b, 4b)
- : [parent_replaced] "=r" (parent), [old] "=r" (old),
- [faulted] "=r" (faulted)
- : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
+ : [old] "=r" (old), [faulted] "=r" (faulted)
+ : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
: "memory"
);
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
index e656c272115..820dea5d0eb 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/genapic_64.c
@@ -29,7 +29,7 @@ extern struct genapic apic_x2xpic_uv_x;
extern struct genapic apic_x2apic_phys;
extern struct genapic apic_x2apic_cluster;
-struct genapic __read_mostly *genapic = &apic_flat;
+struct genapic __read_mostly *apic = &apic_flat;
static struct genapic *apic_probe[] __initdata = {
#ifdef CONFIG_X86_UV
@@ -44,17 +44,17 @@ static struct genapic *apic_probe[] __initdata = {
/*
* Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
*/
-void __init setup_apic_routing(void)
+void __init default_setup_apic_routing(void)
{
- if (genapic == &apic_x2apic_phys || genapic == &apic_x2apic_cluster) {
+ if (apic == &apic_x2apic_phys || apic == &apic_x2apic_cluster) {
if (!intr_remapping_enabled)
- genapic = &apic_flat;
+ apic = &apic_flat;
}
- if (genapic == &apic_flat) {
+ if (apic == &apic_flat) {
if (max_physical_apicid >= 8)
- genapic = &apic_physflat;
- printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
+ apic = &apic_physflat;
+ printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
}
if (x86_quirks->update_genapic)
@@ -65,18 +65,18 @@ void __init setup_apic_routing(void)
void apic_send_IPI_self(int vector)
{
- __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
+ __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
}
-int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
int i;
for (i = 0; apic_probe[i]; ++i) {
if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
- genapic = apic_probe[i];
+ apic = apic_probe[i];
printk(KERN_INFO "Setting APIC routing to %s.\n",
- genapic->name);
+ apic->name);
return 1;
}
}
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index 34185488e4f..249d2d3c034 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -19,7 +19,6 @@
#include <asm/smp.h>
#include <asm/ipi.h>
#include <asm/genapic.h>
-#include <mach_apicdef.h>
#ifdef CONFIG_ACPI
#include <acpi/acpi_bus.h>
@@ -74,7 +73,7 @@ static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
unsigned long flags;
local_irq_save(flags);
- __send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
+ __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
local_irq_restore(flags);
}
@@ -85,14 +84,15 @@ static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
_flat_send_IPI_mask(mask, vector);
}
-static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
- int vector)
+static void
+ flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
{
unsigned long mask = cpumask_bits(cpumask)[0];
int cpu = smp_processor_id();
if (cpu < BITS_PER_LONG)
clear_bit(cpu, &mask);
+
_flat_send_IPI_mask(mask, vector);
}
@@ -114,23 +114,27 @@ static void flat_send_IPI_allbutself(int vector)
_flat_send_IPI_mask(mask, vector);
}
} else if (num_online_cpus() > 1) {
- __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
+ __default_send_IPI_shortcut(APIC_DEST_ALLBUT,
+ vector, apic->dest_logical);
}
}
static void flat_send_IPI_all(int vector)
{
- if (vector == NMI_VECTOR)
+ if (vector == NMI_VECTOR) {
flat_send_IPI_mask(cpu_online_mask, vector);
- else
- __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
+ } else {
+ __default_send_IPI_shortcut(APIC_DEST_ALLINC,
+ vector, apic->dest_logical);
+ }
}
-static unsigned int get_apic_id(unsigned long x)
+static unsigned int flat_get_apic_id(unsigned long x)
{
unsigned int id;
id = (((x)>>24) & 0xFFu);
+
return id;
}
@@ -146,7 +150,7 @@ static unsigned int read_xapic_id(void)
{
unsigned int id;
- id = get_apic_id(apic_read(APIC_ID));
+ id = flat_get_apic_id(apic_read(APIC_ID));
return id;
}
@@ -169,31 +173,62 @@ static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
return mask1 & mask2;
}
-static unsigned int phys_pkg_id(int index_msb)
+static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
{
return hard_smp_processor_id() >> index_msb;
}
struct genapic apic_flat = {
- .name = "flat",
- .acpi_madt_oem_check = flat_acpi_madt_oem_check,
- .int_delivery_mode = dest_LowestPrio,
- .int_dest_mode = (APIC_DEST_LOGICAL != 0),
- .target_cpus = flat_target_cpus,
- .vector_allocation_domain = flat_vector_allocation_domain,
- .apic_id_registered = flat_apic_id_registered,
- .init_apic_ldr = flat_init_apic_ldr,
- .send_IPI_all = flat_send_IPI_all,
- .send_IPI_allbutself = flat_send_IPI_allbutself,
- .send_IPI_mask = flat_send_IPI_mask,
- .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
- .send_IPI_self = apic_send_IPI_self,
- .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
- .phys_pkg_id = phys_pkg_id,
- .get_apic_id = get_apic_id,
- .set_apic_id = set_apic_id,
- .apic_id_mask = (0xFFu<<24),
+ .name = "flat",
+ .probe = NULL,
+ .acpi_madt_oem_check = flat_acpi_madt_oem_check,
+ .apic_id_registered = flat_apic_id_registered,
+
+ .irq_delivery_mode = dest_LowestPrio,
+ .irq_dest_mode = 1, /* logical */
+
+ .target_cpus = flat_target_cpus,
+ .disable_esr = 0,
+ .dest_logical = APIC_DEST_LOGICAL,
+ .check_apicid_used = NULL,
+ .check_apicid_present = NULL,
+
+ .vector_allocation_domain = flat_vector_allocation_domain,
+ .init_apic_ldr = flat_init_apic_ldr,
+
+ .ioapic_phys_id_map = NULL,
+ .setup_apic_routing = NULL,
+ .multi_timer_check = NULL,
+ .apicid_to_node = NULL,
+ .cpu_to_logical_apicid = NULL,
+ .cpu_present_to_apicid = default_cpu_present_to_apicid,
+ .apicid_to_cpu_present = NULL,
+ .setup_portio_remap = NULL,
+ .check_phys_apicid_present = default_check_phys_apicid_present,
+ .enable_apic_mode = NULL,
+ .phys_pkg_id = flat_phys_pkg_id,
+ .mps_oem_check = NULL,
+
+ .get_apic_id = flat_get_apic_id,
+ .set_apic_id = set_apic_id,
+ .apic_id_mask = 0xFFu << 24,
+
+ .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
+ .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
+
+ .send_IPI_mask = flat_send_IPI_mask,
+ .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
+ .send_IPI_allbutself = flat_send_IPI_allbutself,
+ .send_IPI_all = flat_send_IPI_all,
+ .send_IPI_self = apic_send_IPI_self,
+
+ .wakeup_cpu = NULL,
+ .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
+ .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+ .wait_for_init_deassert = NULL,
+ .smp_callin_clear_local_apic = NULL,
+ .store_NMI_vector = NULL,
+ .inquire_remote_apic = NULL,
};
/*
@@ -232,18 +267,18 @@ static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
{
- send_IPI_mask_sequence(cpumask, vector);
+ default_send_IPI_mask_sequence_phys(cpumask, vector);
}
static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
int vector)
{
- send_IPI_mask_allbutself(cpumask, vector);
+ default_send_IPI_mask_allbutself_phys(cpumask, vector);
}
static void physflat_send_IPI_allbutself(int vector)
{
- send_IPI_mask_allbutself(cpu_online_mask, vector);
+ default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
}
static void physflat_send_IPI_all(int vector)
@@ -276,32 +311,67 @@ physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- for_each_cpu_and(cpu, cpumask, andmask)
+ for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
+ }
if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
+
return BAD_APICID;
}
struct genapic apic_physflat = {
- .name = "physical flat",
- .acpi_madt_oem_check = physflat_acpi_madt_oem_check,
- .int_delivery_mode = dest_Fixed,
- .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
- .target_cpus = physflat_target_cpus,
- .vector_allocation_domain = physflat_vector_allocation_domain,
- .apic_id_registered = flat_apic_id_registered,
- .init_apic_ldr = flat_init_apic_ldr,/*not needed, but shouldn't hurt*/
- .send_IPI_all = physflat_send_IPI_all,
- .send_IPI_allbutself = physflat_send_IPI_allbutself,
- .send_IPI_mask = physflat_send_IPI_mask,
- .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
- .send_IPI_self = apic_send_IPI_self,
- .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
- .phys_pkg_id = phys_pkg_id,
- .get_apic_id = get_apic_id,
- .set_apic_id = set_apic_id,
- .apic_id_mask = (0xFFu<<24),
+
+ .name = "physical flat",
+ .probe = NULL,
+ .acpi_madt_oem_check = physflat_acpi_madt_oem_check,
+ .apic_id_registered = flat_apic_id_registered,
+
+ .irq_delivery_mode = dest_Fixed,
+ .irq_dest_mode = 0, /* physical */
+
+ .target_cpus = physflat_target_cpus,
+ .disable_esr = 0,
+ .dest_logical = 0,
+ .check_apicid_used = NULL,
+ .check_apicid_present = NULL,
+
+ .vector_allocation_domain = physflat_vector_allocation_domain,
+ /* not needed, but shouldn't hurt: */
+ .init_apic_ldr = flat_init_apic_ldr,
+
+ .ioapic_phys_id_map = NULL,
+ .setup_apic_routing = NULL,
+ .multi_timer_check = NULL,
+ .apicid_to_node = NULL,
+ .cpu_to_logical_apicid = NULL,
+ .cpu_present_to_apicid = default_cpu_present_to_apicid,
+ .apicid_to_cpu_present = NULL,
+ .setup_portio_remap = NULL,
+ .check_phys_apicid_present = default_check_phys_apicid_present,
+ .enable_apic_mode = NULL,
+ .phys_pkg_id = flat_phys_pkg_id,
+ .mps_oem_check = NULL,
+
+ .get_apic_id = flat_get_apic_id,
+ .set_apic_id = set_apic_id,
+ .apic_id_mask = 0xFFu << 24,
+
+ .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
+ .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
+
+ .send_IPI_mask = physflat_send_IPI_mask,
+ .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
+ .send_IPI_allbutself = physflat_send_IPI_allbutself,
+ .send_IPI_all = physflat_send_IPI_all,
+ .send_IPI_self = apic_send_IPI_self,
+
+ .wakeup_cpu = NULL,
+ .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
+ .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+ .wait_for_init_deassert = NULL,
+ .smp_callin_clear_local_apic = NULL,
+ .store_NMI_vector = NULL,
+ .inquire_remote_apic = NULL,
};
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c
index 6ce497cc372..7c87156b641 100644
--- a/arch/x86/kernel/genx2apic_cluster.c
+++ b/arch/x86/kernel/genx2apic_cluster.c
@@ -36,8 +36,8 @@ static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
cpumask_set_cpu(cpu, retmask);
}
-static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
- unsigned int dest)
+static void
+ __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
{
unsigned long cfg;
@@ -57,45 +57,50 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
*/
static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
{
- unsigned long flags;
unsigned long query_cpu;
+ unsigned long flags;
local_irq_save(flags);
- for_each_cpu(query_cpu, mask)
+ for_each_cpu(query_cpu, mask) {
__x2apic_send_IPI_dest(
per_cpu(x86_cpu_to_logical_apicid, query_cpu),
- vector, APIC_DEST_LOGICAL);
+ vector, apic->dest_logical);
+ }
local_irq_restore(flags);
}
-static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
- int vector)
+static void
+ x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
{
- unsigned long flags;
- unsigned long query_cpu;
unsigned long this_cpu = smp_processor_id();
+ unsigned long query_cpu;
+ unsigned long flags;
local_irq_save(flags);
- for_each_cpu(query_cpu, mask)
- if (query_cpu != this_cpu)
- __x2apic_send_IPI_dest(
+ for_each_cpu(query_cpu, mask) {
+ if (query_cpu == this_cpu)
+ continue;
+ __x2apic_send_IPI_dest(
per_cpu(x86_cpu_to_logical_apicid, query_cpu),
- vector, APIC_DEST_LOGICAL);
+ vector, apic->dest_logical);
+ }
local_irq_restore(flags);
}
static void x2apic_send_IPI_allbutself(int vector)
{
- unsigned long flags;
- unsigned long query_cpu;
unsigned long this_cpu = smp_processor_id();
+ unsigned long query_cpu;
+ unsigned long flags;
local_irq_save(flags);
- for_each_online_cpu(query_cpu)
- if (query_cpu != this_cpu)
- __x2apic_send_IPI_dest(
+ for_each_online_cpu(query_cpu) {
+ if (query_cpu == this_cpu)
+ continue;
+ __x2apic_send_IPI_dest(
per_cpu(x86_cpu_to_logical_apicid, query_cpu),
- vector, APIC_DEST_LOGICAL);
+ vector, apic->dest_logical);
+ }
local_irq_restore(flags);
}
@@ -111,21 +116,21 @@ static int x2apic_apic_id_registered(void)
static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
- int cpu;
-
/*
* We're using fixed IRQ delivery, can only return one logical APIC ID.
* May as well be the first.
*/
- cpu = cpumask_first(cpumask);
+ int cpu = cpumask_first(cpumask);
+
if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_logical_apicid, cpu);
else
return BAD_APICID;
}
-static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
+static unsigned int
+x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
int cpu;
@@ -133,15 +138,18 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
* We're using fixed IRQ delivery, can only return one logical APIC ID.
* May as well be the first.
*/
- for_each_cpu_and(cpu, cpumask, andmask)
+ for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
+ }
+
if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_logical_apicid, cpu);
+
return BAD_APICID;
}
-static unsigned int get_apic_id(unsigned long x)
+static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x)
{
unsigned int id;
@@ -157,7 +165,7 @@ static unsigned long set_apic_id(unsigned int id)
return x;
}
-static unsigned int phys_pkg_id(int index_msb)
+static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
{
return current_cpu_data.initial_apicid >> index_msb;
}
@@ -172,27 +180,58 @@ static void init_x2apic_ldr(void)
int cpu = smp_processor_id();
per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
- return;
}
struct genapic apic_x2apic_cluster = {
- .name = "cluster x2apic",
- .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
- .int_delivery_mode = dest_LowestPrio,
- .int_dest_mode = (APIC_DEST_LOGICAL != 0),
- .target_cpus = x2apic_target_cpus,
- .vector_allocation_domain = x2apic_vector_allocation_domain,
- .apic_id_registered = x2apic_apic_id_registered,
- .init_apic_ldr = init_x2apic_ldr,
- .send_IPI_all = x2apic_send_IPI_all,
- .send_IPI_allbutself = x2apic_send_IPI_allbutself,
- .send_IPI_mask = x2apic_send_IPI_mask,
- .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
- .send_IPI_self = x2apic_send_IPI_self,
- .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
- .phys_pkg_id = phys_pkg_id,
- .get_apic_id = get_apic_id,
- .set_apic_id = set_apic_id,
- .apic_id_mask = (0xFFFFFFFFu),
+
+ .name = "cluster x2apic",
+ .probe = NULL,
+ .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
+ .apic_id_registered = x2apic_apic_id_registered,
+
+ .irq_delivery_mode = dest_LowestPrio,
+ .irq_dest_mode = 1, /* logical */
+
+ .target_cpus = x2apic_target_cpus,
+ .disable_esr = 0,
+ .dest_logical = APIC_DEST_LOGICAL,
+ .check_apicid_used = NULL,
+ .check_apicid_present = NULL,
+
+ .vector_allocation_domain = x2apic_vector_allocation_domain,
+ .init_apic_ldr = init_x2apic_ldr,
+
+ .ioapic_phys_id_map = NULL,
+ .setup_apic_routing = NULL,
+ .multi_timer_check = NULL,
+ .apicid_to_node = NULL,
+ .cpu_to_logical_apicid = NULL,
+ .cpu_present_to_apicid = default_cpu_present_to_apicid,
+ .apicid_to_cpu_present = NULL,
+ .setup_portio_remap = NULL,
+ .check_phys_apicid_present = default_check_phys_apicid_present,
+ .enable_apic_mode = NULL,
+ .phys_pkg_id = x2apic_cluster_phys_pkg_id,
+ .mps_oem_check = NULL,
+
+ .get_apic_id = x2apic_cluster_phys_get_apic_id,
+ .set_apic_id = set_apic_id,
+ .apic_id_mask = 0xFFFFFFFFu,
+
+ .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
+ .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
+
+ .send_IPI_mask = x2apic_send_IPI_mask,
+ .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
+ .send_IPI_allbutself = x2apic_send_IPI_allbutself,
+ .send_IPI_all = x2apic_send_IPI_all,
+ .send_IPI_self = x2apic_send_IPI_self,
+
+ .wakeup_cpu = NULL,
+ .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
+ .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+ .wait_for_init_deassert = NULL,
+ .smp_callin_clear_local_apic = NULL,
+ .store_NMI_vector = NULL,
+ .inquire_remote_apic = NULL,
};
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c
index 21bcc0e098b..5cbae8aa040 100644
--- a/arch/x86/kernel/genx2apic_phys.c
+++ b/arch/x86/kernel/genx2apic_phys.c
@@ -55,8 +55,8 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
{
- unsigned long flags;
unsigned long query_cpu;
+ unsigned long flags;
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
@@ -66,12 +66,12 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
local_irq_restore(flags);
}
-static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
- int vector)
+static void
+ x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
{
- unsigned long flags;
- unsigned long query_cpu;
unsigned long this_cpu = smp_processor_id();
+ unsigned long query_cpu;
+ unsigned long flags;
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
@@ -85,16 +85,17 @@ static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
static void x2apic_send_IPI_allbutself(int vector)
{
- unsigned long flags;
- unsigned long query_cpu;
unsigned long this_cpu = smp_processor_id();
+ unsigned long query_cpu;
+ unsigned long flags;
local_irq_save(flags);
- for_each_online_cpu(query_cpu)
- if (query_cpu != this_cpu)
- __x2apic_send_IPI_dest(
- per_cpu(x86_cpu_to_apicid, query_cpu),
- vector, APIC_DEST_PHYSICAL);
+ for_each_online_cpu(query_cpu) {
+ if (query_cpu == this_cpu)
+ continue;
+ __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
+ vector, APIC_DEST_PHYSICAL);
+ }
local_irq_restore(flags);
}
@@ -110,21 +111,21 @@ static int x2apic_apic_id_registered(void)
static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
- int cpu;
-
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- cpu = cpumask_first(cpumask);
+ int cpu = cpumask_first(cpumask);
+
if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
else
return BAD_APICID;
}
-static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
+static unsigned int
+x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
int cpu;
@@ -132,31 +133,28 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- for_each_cpu_and(cpu, cpumask, andmask)
+ for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
+ }
+
if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
+
return BAD_APICID;
}
-static unsigned int get_apic_id(unsigned long x)
+static unsigned int x2apic_phys_get_apic_id(unsigned long x)
{
- unsigned int id;
-
- id = x;
- return id;
+ return x;
}
static unsigned long set_apic_id(unsigned int id)
{
- unsigned long x;
-
- x = id;
- return x;
+ return id;
}
-static unsigned int phys_pkg_id(int index_msb)
+static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
{
return current_cpu_data.initial_apicid >> index_msb;
}
@@ -168,27 +166,58 @@ static void x2apic_send_IPI_self(int vector)
static void init_x2apic_ldr(void)
{
- return;
}
struct genapic apic_x2apic_phys = {
- .name = "physical x2apic",
- .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
- .int_delivery_mode = dest_Fixed,
- .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
- .target_cpus = x2apic_target_cpus,
- .vector_allocation_domain = x2apic_vector_allocation_domain,
- .apic_id_registered = x2apic_apic_id_registered,
- .init_apic_ldr = init_x2apic_ldr,
- .send_IPI_all = x2apic_send_IPI_all,
- .send_IPI_allbutself = x2apic_send_IPI_allbutself,
- .send_IPI_mask = x2apic_send_IPI_mask,
- .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
- .send_IPI_self = x2apic_send_IPI_self,
- .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
- .phys_pkg_id = phys_pkg_id,
- .get_apic_id = get_apic_id,
- .set_apic_id = set_apic_id,
- .apic_id_mask = (0xFFFFFFFFu),
+
+ .name = "physical x2apic",
+ .probe = NULL,
+ .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
+ .apic_id_registered = x2apic_apic_id_registered,
+
+ .irq_delivery_mode = dest_Fixed,
+ .irq_dest_mode = 0, /* physical */
+
+ .target_cpus = x2apic_target_cpus,
+ .disable_esr = 0,
+ .dest_logical = 0,
+ .check_apicid_used = NULL,
+ .check_apicid_present = NULL,
+
+ .vector_allocation_domain = x2apic_vector_allocation_domain,
+ .init_apic_ldr = init_x2apic_ldr,
+
+ .ioapic_phys_id_map = NULL,
+ .setup_apic_routing = NULL,
+ .multi_timer_check = NULL,
+ .apicid_to_node = NULL,
+ .cpu_to_logical_apicid = NULL,
+ .cpu_present_to_apicid = default_cpu_present_to_apicid,
+ .apicid_to_cpu_present = NULL,
+ .setup_portio_remap = NULL,
+ .check_phys_apicid_present = default_check_phys_apicid_present,
+ .enable_apic_mode = NULL,
+ .phys_pkg_id = x2apic_phys_pkg_id,
+ .mps_oem_check = NULL,
+
+ .get_apic_id = x2apic_phys_get_apic_id,
+ .set_apic_id = set_apic_id,
+ .apic_id_mask = 0xFFFFFFFFu,
+
+ .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
+ .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
+
+ .send_IPI_mask = x2apic_send_IPI_mask,
+ .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
+ .send_IPI_allbutself = x2apic_send_IPI_allbutself,
+ .send_IPI_all = x2apic_send_IPI_all,
+ .send_IPI_self = x2apic_send_IPI_self,
+
+ .wakeup_cpu = NULL,
+ .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
+ .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+ .wait_for_init_deassert = NULL,
+ .smp_callin_clear_local_apic = NULL,
+ .store_NMI_vector = NULL,
+ .inquire_remote_apic = NULL,
};
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index bfe36249145..6adb5e6f4d9 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -118,12 +118,13 @@ static void uv_send_IPI_one(int cpu, int vector)
int pnode;
apicid = per_cpu(x86_cpu_to_apicid, cpu);
- lapicid = apicid & 0x3f; /* ZZZ macro needed */
+ lapicid = apicid & 0x3f; /* ZZZ macro needed */
pnode = uv_apicid_to_pnode(apicid);
- val =
- (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid <<
- UVH_IPI_INT_APIC_ID_SHFT) |
- (vector << UVH_IPI_INT_VECTOR_SHFT);
+
+ val = ( 1UL << UVH_IPI_INT_SEND_SHFT ) |
+ ( lapicid << UVH_IPI_INT_APIC_ID_SHFT ) |
+ ( vector << UVH_IPI_INT_VECTOR_SHFT );
+
uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
}
@@ -137,22 +138,24 @@ static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
{
- unsigned int cpu;
unsigned int this_cpu = smp_processor_id();
+ unsigned int cpu;
- for_each_cpu(cpu, mask)
+ for_each_cpu(cpu, mask) {
if (cpu != this_cpu)
uv_send_IPI_one(cpu, vector);
+ }
}
static void uv_send_IPI_allbutself(int vector)
{
- unsigned int cpu;
unsigned int this_cpu = smp_processor_id();
+ unsigned int cpu;
- for_each_online_cpu(cpu)
+ for_each_online_cpu(cpu) {
if (cpu != this_cpu)
uv_send_IPI_one(cpu, vector);
+ }
}
static void uv_send_IPI_all(int vector)
@@ -171,21 +174,21 @@ static void uv_init_apic_ldr(void)
static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
- int cpu;
-
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- cpu = cpumask_first(cpumask);
+ int cpu = cpumask_first(cpumask);
+
if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
else
return BAD_APICID;
}
-static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
+static unsigned int
+uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
{
int cpu;
@@ -193,15 +196,17 @@ static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
- for_each_cpu_and(cpu, cpumask, andmask)
+ for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
+ }
if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
+
return BAD_APICID;
}
-static unsigned int get_apic_id(unsigned long x)
+static unsigned int x2apic_get_apic_id(unsigned long x)
{
unsigned int id;
@@ -223,10 +228,10 @@ static unsigned long set_apic_id(unsigned int id)
static unsigned int uv_read_apic_id(void)
{
- return get_apic_id(apic_read(APIC_ID));
+ return x2apic_get_apic_id(apic_read(APIC_ID));
}
-static unsigned int phys_pkg_id(int index_msb)
+static int uv_phys_pkg_id(int initial_apicid, int index_msb)
{
return uv_read_apic_id() >> index_msb;
}
@@ -237,25 +242,57 @@ static void uv_send_IPI_self(int vector)
}
struct genapic apic_x2apic_uv_x = {
- .name = "UV large system",
- .acpi_madt_oem_check = uv_acpi_madt_oem_check,
- .int_delivery_mode = dest_Fixed,
- .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
- .target_cpus = uv_target_cpus,
- .vector_allocation_domain = uv_vector_allocation_domain,
- .apic_id_registered = uv_apic_id_registered,
- .init_apic_ldr = uv_init_apic_ldr,
- .send_IPI_all = uv_send_IPI_all,
- .send_IPI_allbutself = uv_send_IPI_allbutself,
- .send_IPI_mask = uv_send_IPI_mask,
- .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
- .send_IPI_self = uv_send_IPI_self,
- .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
- .phys_pkg_id = phys_pkg_id,
- .get_apic_id = get_apic_id,
- .set_apic_id = set_apic_id,
- .apic_id_mask = (0xFFFFFFFFu),
+
+ .name = "UV large system",
+ .probe = NULL,
+ .acpi_madt_oem_check = uv_acpi_madt_oem_check,
+ .apic_id_registered = uv_apic_id_registered,
+
+ .irq_delivery_mode = dest_Fixed,
+ .irq_dest_mode = 1, /* logical */
+
+ .target_cpus = uv_target_cpus,
+ .disable_esr = 0,
+ .dest_logical = APIC_DEST_LOGICAL,
+ .check_apicid_used = NULL,
+ .check_apicid_present = NULL,
+
+ .vector_allocation_domain = uv_vector_allocation_domain,
+ .init_apic_ldr = uv_init_apic_ldr,
+
+ .ioapic_phys_id_map = NULL,
+ .setup_apic_routing = NULL,
+ .multi_timer_check = NULL,
+ .apicid_to_node = NULL,
+ .cpu_to_logical_apicid = NULL,
+ .cpu_present_to_apicid = default_cpu_present_to_apicid,
+ .apicid_to_cpu_present = NULL,
+ .setup_portio_remap = NULL,
+ .check_phys_apicid_present = default_check_phys_apicid_present,
+ .enable_apic_mode = NULL,
+ .phys_pkg_id = uv_phys_pkg_id,
+ .mps_oem_check = NULL,
+
+ .get_apic_id = x2apic_get_apic_id,
+ .set_apic_id = set_apic_id,
+ .apic_id_mask = 0xFFFFFFFFu,
+
+ .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
+ .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
+
+ .send_IPI_mask = uv_send_IPI_mask,
+ .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
+ .send_IPI_allbutself = uv_send_IPI_allbutself,
+ .send_IPI_all = uv_send_IPI_all,
+ .send_IPI_self = uv_send_IPI_self,
+
+ .wakeup_cpu = NULL,
+ .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
+ .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+ .wait_for_init_deassert = NULL,
+ .smp_callin_clear_local_apic = NULL,
+ .store_NMI_vector = NULL,
+ .inquire_remote_apic = NULL,
};
static __cpuinit void set_x2apic_extra_bits(int pnode)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index cf21fd0cf6a..2a0aad7718d 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -567,12 +567,8 @@ early_fault:
pushl %eax
pushl %edx /* trapno */
pushl $fault_msg
-#ifdef CONFIG_EARLY_PRINTK
- call early_printk
-#else
call printk
#endif
-#endif
call dump_stack
hlt_loop:
hlt
@@ -599,11 +595,10 @@ ignore_int:
pushl 32(%esp)
pushl 40(%esp)
pushl $int_msg
-#ifdef CONFIG_EARLY_PRINTK
- call early_printk
-#else
call printk
-#endif
+
+ call dump_stack
+
addl $(5*4),%esp
popl %ds
popl %es
@@ -679,7 +674,7 @@ early_recursion_flag:
.long 0
int_msg:
- .asciz "Unknown interrupt or fault at EIP %p %p %p\n"
+ .asciz "Unknown interrupt or fault at: %p %p %p\n"
fault_msg:
/* fault info: */
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 64d5ad0b8ad..388254f69a2 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -897,7 +897,7 @@ static unsigned long hpet_rtc_flags;
static int hpet_prev_update_sec;
static struct rtc_time hpet_alarm_time;
static unsigned long hpet_pie_count;
-static unsigned long hpet_t1_cmp;
+static u32 hpet_t1_cmp;
static unsigned long hpet_default_delta;
static unsigned long hpet_pie_delta;
static unsigned long hpet_pie_limit;
@@ -905,6 +905,14 @@ static unsigned long hpet_pie_limit;
static rtc_irq_handler irq_handler;
/*
+ * Check that the hpet counter c1 is ahead of the c2
+ */
+static inline int hpet_cnt_ahead(u32 c1, u32 c2)
+{
+ return (s32)(c2 - c1) < 0;
+}
+
+/*
* Registers a IRQ handler.
*/
int hpet_register_irq_handler(rtc_irq_handler handler)
@@ -1075,7 +1083,7 @@ static void hpet_rtc_timer_reinit(void)
hpet_t1_cmp += delta;
hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
lost_ints++;
- } while ((long)(hpet_readl(HPET_COUNTER) - hpet_t1_cmp) > 0);
+ } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER)));
if (lost_ints) {
if (hpet_rtc_flags & RTC_PIE)
diff --git a/arch/x86/kernel/i8237.c b/arch/x86/kernel/i8237.c
index dbd6c1d1b63..b42ca694dc6 100644
--- a/arch/x86/kernel/i8237.c
+++ b/arch/x86/kernel/i8237.c
@@ -28,10 +28,10 @@ static int i8237A_resume(struct sys_device *dev)
flags = claim_dma_lock();
- dma_outb(DMA1_RESET_REG, 0);
- dma_outb(DMA2_RESET_REG, 0);
+ dma_outb(0, DMA1_RESET_REG);
+ dma_outb(0, DMA2_RESET_REG);
- for (i = 0;i < 8;i++) {
+ for (i = 0; i < 8; i++) {
set_dma_addr(i, 0x000000);
/* DMA count is a bit weird so this is not 0 */
set_dma_count(i, 1);
@@ -51,14 +51,14 @@ static int i8237A_suspend(struct sys_device *dev, pm_message_t state)
}
static struct sysdev_class i8237_sysdev_class = {
- .name = "i8237",
- .suspend = i8237A_suspend,
- .resume = i8237A_resume,
+ .name = "i8237",
+ .suspend = i8237A_suspend,
+ .resume = i8237A_resume,
};
static struct sys_device device_i8237A = {
- .id = 0,
- .cls = &i8237_sysdev_class,
+ .id = 0,
+ .cls = &i8237_sysdev_class,
};
static int __init i8237A_init_sysfs(void)
@@ -68,5 +68,4 @@ static int __init i8237A_init_sysfs(void)
error = sysdev_register(&device_i8237A);
return error;
}
-
device_initcall(i8237A_init_sysfs);
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 0480d06a12a..7248ca11bdc 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -1,7 +1,7 @@
/*
* Intel IO-APIC support for multi-Pentium hosts.
*
- * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
+ * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
*
* Many thanks to Stig Venaas for trying out countless experimental
* patches and reporting/debugging problems patiently!
@@ -62,9 +62,7 @@
#include <asm/uv/uv_hub.h>
#include <asm/uv/uv_irq.h>
-#include <mach_ipi.h>
-#include <mach_apic.h>
-#include <mach_apicdef.h>
+#include <asm/genapic.h>
#define __apicdebuginit(type) static type __init
@@ -100,10 +98,19 @@ DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
int skip_ioapic_setup;
+void arch_disable_smp_support(void)
+{
+#ifdef CONFIG_PCI
+ noioapicquirk = 1;
+ noioapicreroute = -1;
+#endif
+ skip_ioapic_setup = 1;
+}
+
static int __init parse_noapic(char *str)
{
/* disable IO-APIC */
- disable_ioapic_setup();
+ arch_disable_smp_support();
return 0;
}
early_param("noapic", parse_noapic);
@@ -479,7 +486,7 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
io_apic_write(apic, 0x10 + 2*pin, eu.w1);
}
-static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{
unsigned long flags;
spin_lock_irqsave(&ioapic_lock, flags);
@@ -514,11 +521,11 @@ static void send_cleanup_vector(struct irq_cfg *cfg)
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
cfg->move_cleanup_count++;
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
- send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
+ apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
} else {
cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
- send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+ apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
free_cpumask_var(cleanup_mask);
}
cfg->move_in_progress = 0;
@@ -563,8 +570,9 @@ static int
assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
/*
- * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid
- * of that, or returns BAD_APICID and leaves desc->affinity untouched.
+ * Either sets desc->affinity to a valid value, and returns
+ * ->cpu_mask_to_apicid of that, or returns BAD_APICID and
+ * leaves desc->affinity untouched.
*/
static unsigned int
set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
@@ -582,7 +590,8 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
cpumask_and(desc->affinity, cfg->domain, mask);
set_extra_move_desc(desc, mask);
- return cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
+
+ return apic->cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
}
static void
@@ -797,23 +806,6 @@ static void clear_IO_APIC (void)
clear_IO_APIC_pin(apic, pin);
}
-#if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
-void send_IPI_self(int vector)
-{
- unsigned int cfg;
-
- /*
- * Wait for idle.
- */
- apic_wait_icr_idle();
- cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
- /*
- * Send the IPI. The write to APIC_ICR fires this off.
- */
- apic_write(APIC_ICR, cfg);
-}
-#endif /* !CONFIG_SMP && CONFIG_X86_32*/
-
#ifdef CONFIG_X86_32
/*
* support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
@@ -1316,7 +1308,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
int new_cpu;
int vector, offset;
- vector_allocation_domain(cpu, tmp_mask);
+ apic->vector_allocation_domain(cpu, tmp_mask);
vector = current_vector;
offset = current_offset;
@@ -1486,10 +1478,10 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t
handle_edge_irq, "edge");
}
-static int setup_ioapic_entry(int apic, int irq,
- struct IO_APIC_route_entry *entry,
- unsigned int destination, int trigger,
- int polarity, int vector)
+int setup_ioapic_entry(int apic_id, int irq,
+ struct IO_APIC_route_entry *entry,
+ unsigned int destination, int trigger,
+ int polarity, int vector)
{
/*
* add it to the IO-APIC irq-routing table:
@@ -1498,25 +1490,25 @@ static int setup_ioapic_entry(int apic, int irq,
#ifdef CONFIG_INTR_REMAP
if (intr_remapping_enabled) {
- struct intel_iommu *iommu = map_ioapic_to_ir(apic);
+ struct intel_iommu *iommu = map_ioapic_to_ir(apic_id);
struct irte irte;
struct IR_IO_APIC_route_entry *ir_entry =
(struct IR_IO_APIC_route_entry *) entry;
int index;
if (!iommu)
- panic("No mapping iommu for ioapic %d\n", apic);
+ panic("No mapping iommu for ioapic %d\n", apic_id);
index = alloc_irte(iommu, irq, 1);
if (index < 0)
- panic("Failed to allocate IRTE for ioapic %d\n", apic);
+ panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
memset(&irte, 0, sizeof(irte));
irte.present = 1;
- irte.dst_mode = INT_DEST_MODE;
+ irte.dst_mode = apic->irq_dest_mode;
irte.trigger_mode = trigger;
- irte.dlvry_mode = INT_DELIVERY_MODE;
+ irte.dlvry_mode = apic->irq_delivery_mode;
irte.vector = vector;
irte.dest_id = IRTE_DEST(destination);
@@ -1529,8 +1521,8 @@ static int setup_ioapic_entry(int apic, int irq,
} else
#endif
{
- entry->delivery_mode = INT_DELIVERY_MODE;
- entry->dest_mode = INT_DEST_MODE;
+ entry->delivery_mode = apic->irq_delivery_mode;
+ entry->dest_mode = apic->irq_dest_mode;
entry->dest = destination;
}
@@ -1547,7 +1539,7 @@ static int setup_ioapic_entry(int apic, int irq,
return 0;
}
-static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_desc *desc,
+static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc,
int trigger, int polarity)
{
struct irq_cfg *cfg;
@@ -1559,22 +1551,22 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
cfg = desc->chip_data;
- if (assign_irq_vector(irq, cfg, TARGET_CPUS))
+ if (assign_irq_vector(irq, cfg, apic->target_cpus()))
return;
- dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
+ dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
apic_printk(APIC_VERBOSE,KERN_DEBUG
"IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
"IRQ %d Mode:%i Active:%i)\n",
- apic, mp_ioapics[apic].apicid, pin, cfg->vector,
+ apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector,
irq, trigger, polarity);
- if (setup_ioapic_entry(mp_ioapics[apic].apicid, irq, &entry,
+ if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry,
dest, trigger, polarity, cfg->vector)) {
printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
- mp_ioapics[apic].apicid, pin);
+ mp_ioapics[apic_id].apicid, pin);
__clear_irq_vector(irq, cfg);
return;
}
@@ -1583,12 +1575,12 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
if (irq < NR_IRQS_LEGACY)
disable_8259A_irq(irq);
- ioapic_write_entry(apic, pin, entry);
+ ioapic_write_entry(apic_id, pin, entry);
}
static void __init setup_IO_APIC_irqs(void)
{
- int apic, pin, idx, irq;
+ int apic_id, pin, idx, irq;
int notcon = 0;
struct irq_desc *desc;
struct irq_cfg *cfg;
@@ -1596,19 +1588,19 @@ static void __init setup_IO_APIC_irqs(void)
apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
- for (apic = 0; apic < nr_ioapics; apic++) {
- for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+ for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
+ for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
- idx = find_irq_entry(apic, pin, mp_INT);
+ idx = find_irq_entry(apic_id, pin, mp_INT);
if (idx == -1) {
if (!notcon) {
notcon = 1;
apic_printk(APIC_VERBOSE,
KERN_DEBUG " %d-%d",
- mp_ioapics[apic].apicid, pin);
+ mp_ioapics[apic_id].apicid, pin);
} else
apic_printk(APIC_VERBOSE, " %d-%d",
- mp_ioapics[apic].apicid, pin);
+ mp_ioapics[apic_id].apicid, pin);
continue;
}
if (notcon) {
@@ -1617,20 +1609,25 @@ static void __init setup_IO_APIC_irqs(void)
notcon = 0;
}
- irq = pin_2_irq(idx, apic, pin);
-#ifdef CONFIG_X86_32
- if (multi_timer_check(apic, irq))
+ irq = pin_2_irq(idx, apic_id, pin);
+
+ /*
+ * Skip the timer IRQ if there's a quirk handler
+ * installed and if it returns 1:
+ */
+ if (apic->multi_timer_check &&
+ apic->multi_timer_check(apic_id, irq))
continue;
-#endif
+
desc = irq_to_desc_alloc_cpu(irq, cpu);
if (!desc) {
printk(KERN_INFO "can not get irq_desc for %d\n", irq);
continue;
}
cfg = desc->chip_data;
- add_pin_to_irq_cpu(cfg, cpu, apic, pin);
+ add_pin_to_irq_cpu(cfg, cpu, apic_id, pin);
- setup_IO_APIC_irq(apic, pin, irq, desc,
+ setup_IO_APIC_irq(apic_id, pin, irq, desc,
irq_trigger(idx), irq_polarity(idx));
}
}
@@ -1643,7 +1640,7 @@ static void __init setup_IO_APIC_irqs(void)
/*
* Set up the timer pin, possibly with the 8259A-master behind.
*/
-static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
+static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
int vector)
{
struct IO_APIC_route_entry entry;
@@ -1659,10 +1656,10 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
* We use logical delivery to get the timer IRQ
* to the first CPU.
*/
- entry.dest_mode = INT_DEST_MODE;
- entry.mask = 1; /* mask IRQ now */
- entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
- entry.delivery_mode = INT_DELIVERY_MODE;
+ entry.dest_mode = apic->irq_dest_mode;
+ entry.mask = 0; /* don't mask IRQ for edge */
+ entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
+ entry.delivery_mode = apic->irq_delivery_mode;
entry.polarity = 0;
entry.trigger = 0;
entry.vector = vector;
@@ -1676,7 +1673,7 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
/*
* Add it to the IO-APIC irq-routing table:
*/
- ioapic_write_entry(apic, pin, entry);
+ ioapic_write_entry(apic_id, pin, entry);
}
@@ -2089,7 +2086,7 @@ static void __init setup_ioapic_ids_from_mpc(void)
{
union IO_APIC_reg_00 reg_00;
physid_mask_t phys_id_present_map;
- int apic;
+ int apic_id;
int i;
unsigned char old_id;
unsigned long flags;
@@ -2108,26 +2105,26 @@ static void __init setup_ioapic_ids_from_mpc(void)
* This is broken; anything with a real cpu count has to
* circumvent this idiocy regardless.
*/
- phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
+ phys_id_present_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
/*
* Set the IOAPIC ID to the value stored in the MPC table.
*/
- for (apic = 0; apic < nr_ioapics; apic++) {
+ for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
/* Read the register 0 value */
spin_lock_irqsave(&ioapic_lock, flags);
- reg_00.raw = io_apic_read(apic, 0);
+ reg_00.raw = io_apic_read(apic_id, 0);
spin_unlock_irqrestore(&ioapic_lock, flags);
- old_id = mp_ioapics[apic].apicid;
+ old_id = mp_ioapics[apic_id].apicid;
- if (mp_ioapics[apic].apicid >= get_physical_broadcast()) {
+ if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) {
printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
- apic, mp_ioapics[apic].apicid);
+ apic_id, mp_ioapics[apic_id].apicid);
printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
reg_00.bits.ID);
- mp_ioapics[apic].apicid = reg_00.bits.ID;
+ mp_ioapics[apic_id].apicid = reg_00.bits.ID;
}
/*
@@ -2135,10 +2132,10 @@ static void __init setup_ioapic_ids_from_mpc(void)
* system must have a unique ID or we get lots of nice
* 'stuck on smp_invalidate_needed IPI wait' messages.
*/
- if (check_apicid_used(phys_id_present_map,
- mp_ioapics[apic].apicid)) {
+ if (apic->check_apicid_used(phys_id_present_map,
+ mp_ioapics[apic_id].apicid)) {
printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
- apic, mp_ioapics[apic].apicid);
+ apic_id, mp_ioapics[apic_id].apicid);
for (i = 0; i < get_physical_broadcast(); i++)
if (!physid_isset(i, phys_id_present_map))
break;
@@ -2147,13 +2144,13 @@ static void __init setup_ioapic_ids_from_mpc(void)
printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
i);
physid_set(i, phys_id_present_map);
- mp_ioapics[apic].apicid = i;
+ mp_ioapics[apic_id].apicid = i;
} else {
physid_mask_t tmp;
- tmp = apicid_to_cpu_present(mp_ioapics[apic].apicid);
+ tmp = apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid);
apic_printk(APIC_VERBOSE, "Setting %d in the "
"phys_id_present_map\n",
- mp_ioapics[apic].apicid);
+ mp_ioapics[apic_id].apicid);
physids_or(phys_id_present_map, phys_id_present_map, tmp);
}
@@ -2162,11 +2159,11 @@ static void __init setup_ioapic_ids_from_mpc(void)
* We need to adjust the IRQ routing table
* if the ID changed.
*/
- if (old_id != mp_ioapics[apic].apicid)
+ if (old_id != mp_ioapics[apic_id].apicid)
for (i = 0; i < mp_irq_entries; i++)
if (mp_irqs[i].dstapic == old_id)
mp_irqs[i].dstapic
- = mp_ioapics[apic].apicid;
+ = mp_ioapics[apic_id].apicid;
/*
* Read the right value from the MPC table and
@@ -2174,20 +2171,20 @@ static void __init setup_ioapic_ids_from_mpc(void)
*/
apic_printk(APIC_VERBOSE, KERN_INFO
"...changing IO-APIC physical APIC ID to %d ...",
- mp_ioapics[apic].apicid);
+ mp_ioapics[apic_id].apicid);
- reg_00.bits.ID = mp_ioapics[apic].apicid;
+ reg_00.bits.ID = mp_ioapics[apic_id].apicid;
spin_lock_irqsave(&ioapic_lock, flags);
- io_apic_write(apic, 0, reg_00.raw);
+ io_apic_write(apic_id, 0, reg_00.raw);
spin_unlock_irqrestore(&ioapic_lock, flags);
/*
* Sanity check
*/
spin_lock_irqsave(&ioapic_lock, flags);
- reg_00.raw = io_apic_read(apic, 0);
+ reg_00.raw = io_apic_read(apic_id, 0);
spin_unlock_irqrestore(&ioapic_lock, flags);
- if (reg_00.bits.ID != mp_ioapics[apic].apicid)
+ if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
printk("could not set ID!\n");
else
apic_printk(APIC_VERBOSE, " ok.\n");
@@ -2290,7 +2287,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
unsigned long flags;
spin_lock_irqsave(&vector_lock, flags);
- send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
+ apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
spin_unlock_irqrestore(&vector_lock, flags);
return 1;
@@ -2298,7 +2295,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
#else
static int ioapic_retrigger_irq(unsigned int irq)
{
- send_IPI_self(irq_cfg(irq)->vector);
+ apic->send_IPI_self(irq_cfg(irq)->vector);
return 1;
}
@@ -2362,7 +2359,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
set_extra_move_desc(desc, mask);
- dest = cpu_mask_to_apicid_and(cfg->domain, mask);
+ dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
modify_ioapic_rte = desc->status & IRQ_LEVEL;
if (modify_ioapic_rte) {
@@ -2866,19 +2863,15 @@ static inline void __init check_timer(void)
int cpu = boot_cpu_id;
int apic1, pin1, apic2, pin2;
unsigned long flags;
- unsigned int ver;
int no_pin1 = 0;
local_irq_save(flags);
- ver = apic_read(APIC_LVR);
- ver = GET_APIC_VERSION(ver);
-
/*
* get/set the timer IRQ vector:
*/
disable_8259A_irq(0);
- assign_irq_vector(0, cfg, TARGET_CPUS);
+ assign_irq_vector(0, cfg, apic->target_cpus());
/*
* As IRQ0 is to be enabled in the 8259A, the virtual
@@ -2892,7 +2885,13 @@ static inline void __init check_timer(void)
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
init_8259A(1);
#ifdef CONFIG_X86_32
- timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
+ {
+ unsigned int ver;
+
+ ver = apic_read(APIC_LVR);
+ ver = GET_APIC_VERSION(ver);
+ timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
+ }
#endif
pin1 = find_isa_irq_pin(0, mp_INT);
@@ -2931,8 +2930,17 @@ static inline void __init check_timer(void)
if (no_pin1) {
add_pin_to_irq_cpu(cfg, cpu, apic1, pin1);
setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
+ } else {
+ /* for edge trigger, setup_IO_APIC_irq already
+ * leave it unmasked.
+ * so only need to unmask if it is level-trigger
+ * do we really have level trigger timer?
+ */
+ int idx;
+ idx = find_irq_entry(apic1, pin1, mp_INT);
+ if (idx != -1 && irq_trigger(idx))
+ unmask_IO_APIC_irq_desc(desc);
}
- unmask_IO_APIC_irq_desc(desc);
if (timer_irq_works()) {
if (nmi_watchdog == NMI_IO_APIC) {
setup_nmi();
@@ -2946,6 +2954,7 @@ static inline void __init check_timer(void)
if (intr_remapping_enabled)
panic("timer doesn't work through Interrupt-remapped IO-APIC");
#endif
+ local_irq_disable();
clear_IO_APIC_pin(apic1, pin1);
if (!no_pin1)
apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
@@ -2960,7 +2969,6 @@ static inline void __init check_timer(void)
*/
replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2);
setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
- unmask_IO_APIC_irq_desc(desc);
enable_8259A_irq(0);
if (timer_irq_works()) {
apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
@@ -2975,6 +2983,7 @@ static inline void __init check_timer(void)
/*
* Cleanup, just in case ...
*/
+ local_irq_disable();
disable_8259A_irq(0);
clear_IO_APIC_pin(apic2, pin2);
apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
@@ -3000,6 +3009,7 @@ static inline void __init check_timer(void)
apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
goto out;
}
+ local_irq_disable();
disable_8259A_irq(0);
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
@@ -3017,6 +3027,7 @@ static inline void __init check_timer(void)
apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
goto out;
}
+ local_irq_disable();
apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
"report. Then try booting with the 'noapic' option.\n");
@@ -3168,6 +3179,7 @@ static int __init ioapic_init_sysfs(void)
device_initcall(ioapic_init_sysfs);
+static int nr_irqs_gsi = NR_IRQS_LEGACY;
/*
* Dynamic irq allocate and deallocation
*/
@@ -3182,11 +3194,11 @@ unsigned int create_irq_nr(unsigned int irq_want)
struct irq_desc *desc_new = NULL;
irq = 0;
+ if (irq_want < nr_irqs_gsi)
+ irq_want = nr_irqs_gsi;
+
spin_lock_irqsave(&vector_lock, flags);
for (new = irq_want; new < nr_irqs; new++) {
- if (platform_legacy_irq(new))
- continue;
-
desc_new = irq_to_desc_alloc_cpu(new, cpu);
if (!desc_new) {
printk(KERN_INFO "can not get irq_desc for %d\n", new);
@@ -3196,7 +3208,7 @@ unsigned int create_irq_nr(unsigned int irq_want)
if (cfg_new->vector != 0)
continue;
- if (__assign_irq_vector(new, cfg_new, TARGET_CPUS) == 0)
+ if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0)
irq = new;
break;
}
@@ -3211,7 +3223,6 @@ unsigned int create_irq_nr(unsigned int irq_want)
return irq;
}
-static int nr_irqs_gsi = NR_IRQS_LEGACY;
int create_irq(void)
{
unsigned int irq_want;
@@ -3262,11 +3273,11 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
return -ENXIO;
cfg = irq_cfg(irq);
- err = assign_irq_vector(irq, cfg, TARGET_CPUS);
+ err = assign_irq_vector(irq, cfg, apic->target_cpus());
if (err)
return err;
- dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
+ dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
#ifdef CONFIG_INTR_REMAP
if (irq_remapped(irq)) {
@@ -3280,9 +3291,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
memset (&irte, 0, sizeof(irte));
irte.present = 1;
- irte.dst_mode = INT_DEST_MODE;
+ irte.dst_mode = apic->irq_dest_mode;
irte.trigger_mode = 0; /* edge */
- irte.dlvry_mode = INT_DELIVERY_MODE;
+ irte.dlvry_mode = apic->irq_delivery_mode;
irte.vector = cfg->vector;
irte.dest_id = IRTE_DEST(dest);
@@ -3300,10 +3311,10 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
msg->address_hi = MSI_ADDR_BASE_HI;
msg->address_lo =
MSI_ADDR_BASE_LO |
- ((INT_DEST_MODE == 0) ?
+ ((apic->irq_dest_mode == 0) ?
MSI_ADDR_DEST_MODE_PHYSICAL:
MSI_ADDR_DEST_MODE_LOGICAL) |
- ((INT_DELIVERY_MODE != dest_LowestPrio) ?
+ ((apic->irq_delivery_mode != dest_LowestPrio) ?
MSI_ADDR_REDIRECTION_CPU:
MSI_ADDR_REDIRECTION_LOWPRI) |
MSI_ADDR_DEST_ID(dest);
@@ -3311,7 +3322,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
msg->data =
MSI_DATA_TRIGGER_EDGE |
MSI_DATA_LEVEL_ASSERT |
- ((INT_DELIVERY_MODE != dest_LowestPrio) ?
+ ((apic->irq_delivery_mode != dest_LowestPrio) ?
MSI_DATA_DELIVERY_FIXED:
MSI_DATA_DELIVERY_LOWPRI) |
MSI_DATA_VECTOR(cfg->vector);
@@ -3482,9 +3493,9 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
sub_handle = 0;
list_for_each_entry(msidesc, &dev->msi_list, list) {
irq = create_irq_nr(irq_want);
- irq_want++;
if (irq == 0)
return -1;
+ irq_want = irq + 1;
#ifdef CONFIG_INTR_REMAP
if (!intr_remapping_enabled)
goto no_ir;
@@ -3699,12 +3710,13 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
return -ENXIO;
cfg = irq_cfg(irq);
- err = assign_irq_vector(irq, cfg, TARGET_CPUS);
+ err = assign_irq_vector(irq, cfg, apic->target_cpus());
if (!err) {
struct ht_irq_msg msg;
unsigned dest;
- dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
+ dest = apic->cpu_mask_to_apicid_and(cfg->domain,
+ apic->target_cpus());
msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
@@ -3712,11 +3724,11 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
HT_IRQ_LOW_BASE |
HT_IRQ_LOW_DEST_ID(dest) |
HT_IRQ_LOW_VECTOR(cfg->vector) |
- ((INT_DEST_MODE == 0) ?
+ ((apic->irq_dest_mode == 0) ?
HT_IRQ_LOW_DM_PHYSICAL :
HT_IRQ_LOW_DM_LOGICAL) |
HT_IRQ_LOW_RQEOI_EDGE |
- ((INT_DELIVERY_MODE != dest_LowestPrio) ?
+ ((apic->irq_delivery_mode != dest_LowestPrio) ?
HT_IRQ_LOW_MT_FIXED :
HT_IRQ_LOW_MT_ARBITRATED) |
HT_IRQ_LOW_IRQ_MASKED;
@@ -3764,12 +3776,12 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
entry->vector = cfg->vector;
- entry->delivery_mode = INT_DELIVERY_MODE;
- entry->dest_mode = INT_DEST_MODE;
+ entry->delivery_mode = apic->irq_delivery_mode;
+ entry->dest_mode = apic->irq_dest_mode;
entry->polarity = 0;
entry->trigger = 0;
entry->mask = 0;
- entry->dest = cpu_mask_to_apicid(eligible_cpu);
+ entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
mmr_pnode = uv_blade_to_pnode(mmr_blade);
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
@@ -3837,11 +3849,17 @@ int __init arch_probe_nr_irqs(void)
{
int nr;
- nr = ((8 * nr_cpu_ids) > (32 * nr_ioapics) ?
- (NR_VECTORS + (8 * nr_cpu_ids)) :
- (NR_VECTORS + (32 * nr_ioapics)));
+ if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
+ nr_irqs = NR_VECTORS * nr_cpu_ids;
- if (nr < nr_irqs && nr > nr_irqs_gsi)
+ nr = nr_irqs_gsi + 8 * nr_cpu_ids;
+#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
+ /*
+ * for MSI and HT dyn irq
+ */
+ nr += nr_irqs_gsi * 16;
+#endif
+ if (nr < nr_irqs)
nr_irqs = nr;
return 0;
@@ -3873,7 +3891,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
*/
if (physids_empty(apic_id_map))
- apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
+ apic_id_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(ioapic, 0);
@@ -3889,10 +3907,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
* Every APIC in a system must have a unique ID or we get lots of nice
* 'stuck on smp_invalidate_needed IPI wait' messages.
*/
- if (check_apicid_used(apic_id_map, apic_id)) {
+ if (apic->check_apicid_used(apic_id_map, apic_id)) {
for (i = 0; i < get_physical_broadcast(); i++) {
- if (!check_apicid_used(apic_id_map, i))
+ if (!apic->check_apicid_used(apic_id_map, i))
break;
}
@@ -3905,7 +3923,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
apic_id = i;
}
- tmp = apicid_to_cpu_present(apic_id);
+ tmp = apic->apicid_to_cpu_present(apic_id);
physids_or(apic_id_map, apic_id_map, tmp);
if (reg_00.bits.ID != apic_id) {
@@ -3998,7 +4016,7 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
/*
* This function currently is only a helper for the i386 smp boot process where
* we need to reprogram the ioredtbls to cater for the cpus which have come online
- * so mask in all cases should simply be TARGET_CPUS
+ * so mask in all cases should simply be apic->target_cpus()
*/
#ifdef CONFIG_SMP
void __init setup_ioapic_dest(void)
@@ -4039,7 +4057,7 @@ void __init setup_ioapic_dest(void)
(IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
mask = desc->affinity;
else
- mask = TARGET_CPUS;
+ mask = apic->target_cpus();
#ifdef CONFIG_INTR_REMAP
if (intr_remapping_enabled)
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c
index 285bbf8831f..dbf5445727a 100644
--- a/arch/x86/kernel/ipi.c
+++ b/arch/x86/kernel/ipi.c
@@ -17,147 +17,121 @@
#include <asm/mmu_context.h>
#include <asm/apic.h>
#include <asm/proto.h>
+#include <asm/ipi.h>
-#ifdef CONFIG_X86_32
-#include <mach_apic.h>
-#include <mach_ipi.h>
-
-/*
- * the following functions deal with sending IPIs between CPUs.
- *
- * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
- */
-
-static inline int __prepare_ICR(unsigned int shortcut, int vector)
+void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
{
- unsigned int icr = shortcut | APIC_DEST_LOGICAL;
-
- switch (vector) {
- default:
- icr |= APIC_DM_FIXED | vector;
- break;
- case NMI_VECTOR:
- icr |= APIC_DM_NMI;
- break;
+ unsigned long query_cpu;
+ unsigned long flags;
+
+ /*
+ * Hack. The clustered APIC addressing mode doesn't allow us to send
+ * to an arbitrary mask, so I do a unicast to each CPU instead.
+ * - mbligh
+ */
+ local_irq_save(flags);
+ for_each_cpu(query_cpu, mask) {
+ __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
+ query_cpu), vector, APIC_DEST_PHYSICAL);
}
- return icr;
+ local_irq_restore(flags);
}
-static inline int __prepare_ICR2(unsigned int mask)
+void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
+ int vector)
{
- return SET_APIC_DEST_FIELD(mask);
-}
+ unsigned int this_cpu = smp_processor_id();
+ unsigned int query_cpu;
+ unsigned long flags;
-void __send_IPI_shortcut(unsigned int shortcut, int vector)
-{
- /*
- * Subtle. In the case of the 'never do double writes' workaround
- * we have to lock out interrupts to be safe. As we don't care
- * of the value read we use an atomic rmw access to avoid costly
- * cli/sti. Otherwise we use an even cheaper single atomic write
- * to the APIC.
- */
- unsigned int cfg;
+ /* See Hack comment above */
- /*
- * Wait for idle.
- */
- apic_wait_icr_idle();
+ local_irq_save(flags);
+ for_each_cpu(query_cpu, mask) {
+ if (query_cpu == this_cpu)
+ continue;
+ __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
+ query_cpu), vector, APIC_DEST_PHYSICAL);
+ }
+ local_irq_restore(flags);
+}
- /*
- * No need to touch the target chip field
- */
- cfg = __prepare_ICR(shortcut, vector);
+void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
+ int vector)
+{
+ unsigned long flags;
+ unsigned int query_cpu;
/*
- * Send the IPI. The write to APIC_ICR fires this off.
+ * Hack. The clustered APIC addressing mode doesn't allow us to send
+ * to an arbitrary mask, so I do a unicasts to each CPU instead. This
+ * should be modified to do 1 message per cluster ID - mbligh
*/
- apic_write(APIC_ICR, cfg);
-}
-void send_IPI_self(int vector)
-{
- __send_IPI_shortcut(APIC_DEST_SELF, vector);
+ local_irq_save(flags);
+ for_each_cpu(query_cpu, mask)
+ __default_send_IPI_dest_field(
+ apic->cpu_to_logical_apicid(query_cpu), vector,
+ apic->dest_logical);
+ local_irq_restore(flags);
}
-/*
- * This is used to send an IPI with no shorthand notation (the destination is
- * specified in bits 56 to 63 of the ICR).
- */
-static inline void __send_IPI_dest_field(unsigned long mask, int vector)
+void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
+ int vector)
{
- unsigned long cfg;
-
- /*
- * Wait for idle.
- */
- if (unlikely(vector == NMI_VECTOR))
- safe_apic_wait_icr_idle();
- else
- apic_wait_icr_idle();
-
- /*
- * prepare target chip field
- */
- cfg = __prepare_ICR2(mask);
- apic_write(APIC_ICR2, cfg);
+ unsigned long flags;
+ unsigned int query_cpu;
+ unsigned int this_cpu = smp_processor_id();
- /*
- * program the ICR
- */
- cfg = __prepare_ICR(0, vector);
+ /* See Hack comment above */
- /*
- * Send the IPI. The write to APIC_ICR fires this off.
- */
- apic_write(APIC_ICR, cfg);
+ local_irq_save(flags);
+ for_each_cpu(query_cpu, mask) {
+ if (query_cpu == this_cpu)
+ continue;
+ __default_send_IPI_dest_field(
+ apic->cpu_to_logical_apicid(query_cpu), vector,
+ apic->dest_logical);
+ }
+ local_irq_restore(flags);
}
+#ifdef CONFIG_X86_32
+
/*
* This is only used on smaller machines.
*/
-void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
+void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
{
unsigned long mask = cpumask_bits(cpumask)[0];
unsigned long flags;
local_irq_save(flags);
WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
- __send_IPI_dest_field(mask, vector);
+ __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
local_irq_restore(flags);
}
-void send_IPI_mask_sequence(const struct cpumask *mask, int vector)
+void default_send_IPI_allbutself(int vector)
{
- unsigned long flags;
- unsigned int query_cpu;
-
/*
- * Hack. The clustered APIC addressing mode doesn't allow us to send
- * to an arbitrary mask, so I do a unicasts to each CPU instead. This
- * should be modified to do 1 message per cluster ID - mbligh
+ * if there are no other CPUs in the system then we get an APIC send
+ * error if we try to broadcast, thus avoid sending IPIs in this case.
*/
+ if (!(num_online_cpus() > 1))
+ return;
- local_irq_save(flags);
- for_each_cpu(query_cpu, mask)
- __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
- local_irq_restore(flags);
+ __default_local_send_IPI_allbutself(vector);
}
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
+void default_send_IPI_all(int vector)
{
- unsigned long flags;
- unsigned int query_cpu;
- unsigned int this_cpu = smp_processor_id();
-
- /* See Hack comment above */
+ __default_local_send_IPI_all(vector);
+}
- local_irq_save(flags);
- for_each_cpu(query_cpu, mask)
- if (query_cpu != this_cpu)
- __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
- vector);
- local_irq_restore(flags);
+void default_send_IPI_self(int vector)
+{
+ __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
}
/* must come after the send_IPI functions above for inlining */
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 8b30d0c2512..f13ca1650aa 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -6,10 +6,12 @@
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/smp.h>
+#include <linux/ftrace.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/irq.h>
+#include <asm/idle.h>
atomic_t irq_err_count;
@@ -188,4 +190,40 @@ u64 arch_irq_stat(void)
return sum;
}
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ /* high bit used in ret_from_ code */
+ unsigned vector = ~regs->orig_ax;
+ unsigned irq;
+
+ exit_idle();
+ irq_enter();
+
+ irq = __get_cpu_var(vector_irq)[vector];
+
+ if (!handle_irq(irq, regs)) {
+#ifdef CONFIG_X86_64
+ if (!disable_apic)
+ ack_APIC_irq();
+#endif
+
+ if (printk_ratelimit())
+ printk(KERN_EMERG "%s: %d.%d No irq handler for vector (irq %d)\n",
+ __func__, smp_processor_id(), vector, irq);
+ }
+
+ irq_exit();
+
+ set_irq_regs(old_regs);
+ return 1;
+}
+
EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index e0f29be8ab0..4beb9a13873 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -191,33 +191,16 @@ static inline int
execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
#endif
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
-unsigned int do_IRQ(struct pt_regs *regs)
+bool handle_irq(unsigned irq, struct pt_regs *regs)
{
- struct pt_regs *old_regs;
- /* high bit used in ret_from_ code */
- int overflow;
- unsigned vector = ~regs->orig_ax;
struct irq_desc *desc;
- unsigned irq;
-
-
- old_regs = set_irq_regs(regs);
- irq_enter();
- irq = __get_cpu_var(vector_irq)[vector];
+ int overflow;
overflow = check_stack_overflow();
desc = irq_to_desc(irq);
- if (unlikely(!desc)) {
- printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x cpu %d\n",
- __func__, irq, vector, smp_processor_id());
- BUG();
- }
+ if (unlikely(!desc))
+ return false;
if (!execute_on_irq_stack(overflow, desc, irq)) {
if (unlikely(overflow))
@@ -225,13 +208,11 @@ unsigned int do_IRQ(struct pt_regs *regs)
desc->handle_irq(irq, desc);
}
- irq_exit();
- set_irq_regs(old_regs);
- return 1;
+ return true;
}
#ifdef CONFIG_HOTPLUG_CPU
-#include <mach_apic.h>
+#include <asm/genapic.h>
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs(void)
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 018963aa6ee..977d8b43a0d 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -48,42 +48,18 @@ static inline void stack_overflow_check(struct pt_regs *regs)
#endif
}
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
-asmlinkage unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
+bool handle_irq(unsigned irq, struct pt_regs *regs)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
struct irq_desc *desc;
- /* high bit used in ret_from_ code */
- unsigned vector = ~regs->orig_ax;
- unsigned irq;
-
- exit_idle();
- irq_enter();
- irq = __get_cpu_var(vector_irq)[vector];
-
stack_overflow_check(regs);
desc = irq_to_desc(irq);
- if (likely(desc))
- generic_handle_irq_desc(irq, desc);
- else {
- if (!disable_apic)
- ack_APIC_irq();
-
- if (printk_ratelimit())
- printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
- __func__, smp_processor_id(), vector);
- }
-
- irq_exit();
+ if (unlikely(!desc))
+ return false;
- set_irq_regs(old_regs);
- return 1;
+ generic_handle_irq_desc(irq, desc);
+ return true;
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c
index 22608ebf831..bf629cadec1 100644
--- a/arch/x86/kernel/irqinit_32.c
+++ b/arch/x86/kernel/irqinit_32.c
@@ -78,6 +78,15 @@ void __init init_ISA_irqs(void)
}
}
+/*
+ * IRQ2 is cascade interrupt to second interrupt controller
+ */
+static struct irqaction irq2 = {
+ .handler = no_action,
+ .mask = CPU_MASK_NONE,
+ .name = "cascade",
+};
+
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
[0 ... IRQ0_VECTOR - 1] = -1,
[IRQ0_VECTOR] = 0,
@@ -176,6 +185,9 @@ void __init native_init_IRQ(void)
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
#endif
+ if (!acpi_ioapic)
+ setup_irq(2, &irq2);
+
/* setup after call gates are initialised (usually add in
* the architecture specific gates)
*/
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 10435a120d2..5c4f5548384 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -46,7 +46,7 @@
#include <asm/apicdef.h>
#include <asm/system.h>
-#include <mach_ipi.h>
+#include <asm/genapic.h>
/*
* Put the error code here just in case the user cares:
@@ -347,7 +347,7 @@ void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
*/
void kgdb_roundup_cpus(unsigned long flags)
{
- send_IPI_allbutself(APIC_DM_NMI);
+ apic->send_IPI_allbutself(APIC_DM_NMI);
}
#endif
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index fa6bb263892..20076445319 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -3,7 +3,7 @@
* compliant MP-table parsing routines.
*
* (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
- * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
* (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
*/
@@ -29,12 +29,7 @@
#include <asm/setup.h>
#include <asm/smp.h>
-#include <mach_apic.h>
-#ifdef CONFIG_X86_32
-#include <mach_apicdef.h>
-#include <mach_mpparse.h>
-#endif
-
+#include <asm/genapic.h>
/*
* Checksum an MP configuration block.
*/
@@ -292,16 +287,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
return 0;
#ifdef CONFIG_X86_32
- /*
- * need to make sure summit and es7000's mps_oem_check is safe to be
- * called early via genericarch 's mps_oem_check
- */
- if (early) {
-#ifdef CONFIG_X86_NUMAQ
- numaq_mps_oem_check(mpc, oem, str);
-#endif
- } else
- mps_oem_check(mpc, oem, str);
+ generic_mps_oem_check(mpc, oem, str);
#endif
/* save the local APIC address, it might be non-default */
if (!acpi_lapic)
@@ -386,13 +372,13 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
(*x86_quirks->mpc_record)++;
}
-#ifdef CONFIG_X86_GENERICARCH
- generic_bigsmp_probe();
+#ifdef CONFIG_X86_BIGSMP
+ generic_bigsmp_probe();
#endif
-#ifdef CONFIG_X86_32
- setup_apic_routing();
-#endif
+ if (apic->setup_apic_routing)
+ apic->setup_apic_routing();
+
if (!num_processors)
printk(KERN_ERR "MPTABLE: no processors registered!\n");
return num_processors;
@@ -706,8 +692,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
#endif
mpf_found = mpf;
- printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n",
- mpf, virt_to_phys(mpf));
+ printk(KERN_INFO "found SMP MP-table at [%p] %llx\n",
+ mpf, (u64)virt_to_phys(mpf));
if (!reserve)
return 1;
@@ -1025,7 +1011,7 @@ static int __init update_mp_table(void)
if (!smp_check_mpc(mpc, oem, str))
return 0;
- printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf));
+ printk(KERN_INFO "mpf: %llx\n", (u64)virt_to_phys(mpf));
printk(KERN_INFO "physptr: %x\n", mpf->physptr);
if (mpc_new_phys && mpc->length > mpc_new_length) {
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 23b6d9e6e4f..bdfad80c3cf 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -34,7 +34,7 @@
#include <asm/mce.h>
-#include <mach_traps.h>
+#include <asm/mach_traps.h>
int unknown_nmi_panic;
int nmi_watchdog_enabled;
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c
index f2191d4f271..0cc41a1d255 100644
--- a/arch/x86/kernel/numaq_32.c
+++ b/arch/x86/kernel/numaq_32.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2002, IBM Corp.
*
- * All rights reserved.
+ * All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -23,17 +23,18 @@
* Send feedback to <gone@us.ibm.com>
*/
-#include <linux/mm.h>
+#include <linux/nodemask.h>
#include <linux/bootmem.h>
#include <linux/mmzone.h>
#include <linux/module.h>
-#include <linux/nodemask.h>
-#include <asm/numaq.h>
-#include <asm/topology.h>
+#include <linux/mm.h>
+
#include <asm/processor.h>
+#include <asm/topology.h>
#include <asm/genapic.h>
-#include <asm/e820.h>
+#include <asm/numaq.h>
#include <asm/setup.h>
+#include <asm/e820.h>
#define MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT))
@@ -91,19 +92,20 @@ static int __init numaq_pre_time_init(void)
}
int found_numaq;
+
/*
* Have to match translation table entries to main table entries by counter
* hence the mpc_record variable .... can't see a less disgusting way of
* doing this ....
*/
struct mpc_config_translation {
- unsigned char mpc_type;
- unsigned char trans_len;
- unsigned char trans_type;
- unsigned char trans_quad;
- unsigned char trans_global;
- unsigned char trans_local;
- unsigned short trans_reserved;
+ unsigned char mpc_type;
+ unsigned char trans_len;
+ unsigned char trans_type;
+ unsigned char trans_quad;
+ unsigned char trans_global;
+ unsigned char trans_local;
+ unsigned short trans_reserved;
};
/* x86_quirks member */
@@ -236,7 +238,7 @@ static int __init numaq_setup_ioapic_ids(void)
static int __init numaq_update_genapic(void)
{
- genapic->wakeup_cpu = wakeup_secondary_cpu_via_nmi;
+ apic->wakeup_cpu = wakeup_secondary_cpu_via_nmi;
return 0;
}
@@ -291,3 +293,280 @@ int __init get_memcfg_numaq(void)
smp_dump_qct();
return 1;
}
+
+/*
+ * APIC driver for the IBM NUMAQ chipset.
+ */
+#define APIC_DEFINITION 1
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <asm/mpspec.h>
+#include <asm/genapic.h>
+#include <asm/fixmap.h>
+#include <asm/apicdef.h>
+#include <asm/ipi.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/numa.h>
+#include <linux/smp.h>
+#include <asm/numaq.h>
+#include <asm/io.h>
+#include <linux/mmzone.h>
+#include <linux/nodemask.h>
+
+#define NUMAQ_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
+
+static inline unsigned int numaq_get_apic_id(unsigned long x)
+{
+ return (x >> 24) & 0x0F;
+}
+
+static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector)
+{
+ default_send_IPI_mask_sequence_logical(mask, vector);
+}
+
+static inline void numaq_send_IPI_allbutself(int vector)
+{
+ default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector);
+}
+
+static inline void numaq_send_IPI_all(int vector)
+{
+ numaq_send_IPI_mask(cpu_online_mask, vector);
+}
+
+extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
+
+#define NUMAQ_TRAMPOLINE_PHYS_LOW (0x8)
+#define NUMAQ_TRAMPOLINE_PHYS_HIGH (0xa)
+
+/*
+ * Because we use NMIs rather than the INIT-STARTUP sequence to
+ * bootstrap the CPUs, the APIC may be in a weird state. Kick it:
+ */
+static inline void numaq_smp_callin_clear_local_apic(void)
+{
+ clear_local_APIC();
+}
+
+static inline void
+numaq_store_NMI_vector(unsigned short *high, unsigned short *low)
+{
+ printk("Storing NMI vector\n");
+ *high =
+ *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_HIGH));
+ *low =
+ *((volatile unsigned short *)phys_to_virt(NUMAQ_TRAMPOLINE_PHYS_LOW));
+}
+
+static inline const cpumask_t *numaq_target_cpus(void)
+{
+ return &CPU_MASK_ALL;
+}
+
+static inline unsigned long
+numaq_check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+ return physid_isset(apicid, bitmap);
+}
+
+static inline unsigned long numaq_check_apicid_present(int bit)
+{
+ return physid_isset(bit, phys_cpu_present_map);
+}
+
+#define apicid_cluster(apicid) (apicid & 0xF0)
+
+static inline int numaq_apic_id_registered(void)
+{
+ return 1;
+}
+
+static inline void numaq_init_apic_ldr(void)
+{
+ /* Already done in NUMA-Q firmware */
+}
+
+static inline void numaq_setup_apic_routing(void)
+{
+ printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
+ "NUMA-Q", nr_ioapics);
+}
+
+/*
+ * Skip adding the timer int on secondary nodes, which causes
+ * a small but painful rift in the time-space continuum.
+ */
+static inline int numaq_multi_timer_check(int apic, int irq)
+{
+ return apic != 0 && irq == 0;
+}
+
+static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map)
+{
+ /* We don't have a good way to do this yet - hack */
+ return physids_promote(0xFUL);
+}
+
+/* Mapping from cpu number to logical apicid */
+extern u8 cpu_2_logical_apicid[];
+
+static inline int numaq_cpu_to_logical_apicid(int cpu)
+{
+ if (cpu >= nr_cpu_ids)
+ return BAD_APICID;
+ return (int)cpu_2_logical_apicid[cpu];
+}
+
+/*
+ * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
+ * cpu to APIC ID relation to properly interact with the intelligent
+ * mode of the cluster controller.
+ */
+static inline int numaq_cpu_present_to_apicid(int mps_cpu)
+{
+ if (mps_cpu < 60)
+ return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
+ else
+ return BAD_APICID;
+}
+
+static inline int numaq_apicid_to_node(int logical_apicid)
+{
+ return logical_apicid >> 4;
+}
+
+static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid)
+{
+ int node = numaq_apicid_to_node(logical_apicid);
+ int cpu = __ffs(logical_apicid & 0xf);
+
+ return physid_mask_of_physid(cpu + 4*node);
+}
+
+/* Where the IO area was mapped on multiquad, always 0 otherwise */
+void *xquad_portio;
+
+static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+ return 1;
+}
+
+/*
+ * We use physical apicids here, not logical, so just return the default
+ * physical broadcast to stop people from breaking us
+ */
+static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask)
+{
+ return 0x0F;
+}
+
+static inline unsigned int
+numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
+{
+ return 0x0F;
+}
+
+/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
+static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+ return cpuid_apic >> index_msb;
+}
+static int __numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
+{
+ numaq_mps_oem_check(mpc, oem, productid);
+ return found_numaq;
+}
+
+static int probe_numaq(void)
+{
+ /* already know from get_memcfg_numaq() */
+ return found_numaq;
+}
+
+static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask)
+{
+ /* Careful. Some cpus do not strictly honor the set of cpus
+ * specified in the interrupt destination when using lowest
+ * priority interrupt delivery mode.
+ *
+ * In particular there was a hyperthreading cpu observed to
+ * deliver interrupts to the wrong hyperthread when only one
+ * hyperthread was specified in the interrupt desitination.
+ */
+ *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
+}
+
+static void numaq_setup_portio_remap(void)
+{
+ int num_quads = num_online_nodes();
+
+ if (num_quads <= 1)
+ return;
+
+ printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
+ xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
+ printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
+ (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
+}
+
+struct genapic apic_numaq = {
+
+ .name = "NUMAQ",
+ .probe = probe_numaq,
+ .acpi_madt_oem_check = NULL,
+ .apic_id_registered = numaq_apic_id_registered,
+
+ .irq_delivery_mode = dest_LowestPrio,
+ /* physical delivery on LOCAL quad: */
+ .irq_dest_mode = 0,
+
+ .target_cpus = numaq_target_cpus,
+ .disable_esr = 1,
+ .dest_logical = APIC_DEST_LOGICAL,
+ .check_apicid_used = numaq_check_apicid_used,
+ .check_apicid_present = numaq_check_apicid_present,
+
+ .vector_allocation_domain = numaq_vector_allocation_domain,
+ .init_apic_ldr = numaq_init_apic_ldr,
+
+ .ioapic_phys_id_map = numaq_ioapic_phys_id_map,
+ .setup_apic_routing = numaq_setup_apic_routing,
+ .multi_timer_check = numaq_multi_timer_check,
+ .apicid_to_node = numaq_apicid_to_node,
+ .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid,
+ .cpu_present_to_apicid = numaq_cpu_present_to_apicid,
+ .apicid_to_cpu_present = numaq_apicid_to_cpu_present,
+ .setup_portio_remap = numaq_setup_portio_remap,
+ .check_phys_apicid_present = numaq_check_phys_apicid_present,
+ .enable_apic_mode = NULL,
+ .phys_pkg_id = numaq_phys_pkg_id,
+ .mps_oem_check = __numaq_mps_oem_check,
+
+ .get_apic_id = numaq_get_apic_id,
+ .set_apic_id = NULL,
+ .apic_id_mask = 0x0F << 24,
+
+ .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid,
+ .cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and,
+
+ .send_IPI_mask = numaq_send_IPI_mask,
+ .send_IPI_mask_allbutself = NULL,
+ .send_IPI_allbutself = numaq_send_IPI_allbutself,
+ .send_IPI_all = numaq_send_IPI_all,
+ .send_IPI_self = default_send_IPI_self,
+
+ .wakeup_cpu = NULL,
+ .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW,
+ .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH,
+
+ /* We don't do anything here because we use NMI's to boot instead */
+ .wait_for_init_deassert = NULL,
+
+ .smp_callin_clear_local_apic = numaq_smp_callin_clear_local_apic,
+ .store_NMI_vector = numaq_store_NMI_vector,
+ .inquire_remote_apic = NULL,
+};
diff --git a/arch/x86/kernel/probe_32.c b/arch/x86/kernel/probe_32.c
new file mode 100644
index 00000000000..22337b75de6
--- /dev/null
+++ b/arch/x86/kernel/probe_32.c
@@ -0,0 +1,411 @@
+/*
+ * Default generic APIC driver. This handles up to 8 CPUs.
+ *
+ * Copyright 2003 Andi Kleen, SuSE Labs.
+ * Subject to the GNU Public License, v.2
+ *
+ * Generic x86 APIC driver probe layer.
+ */
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <asm/fixmap.h>
+#include <asm/mpspec.h>
+#include <asm/apicdef.h>
+#include <asm/genapic.h>
+#include <asm/setup.h>
+
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <asm/mpspec.h>
+#include <asm/genapic.h>
+#include <asm/fixmap.h>
+#include <asm/apicdef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <asm/genapic.h>
+#include <asm/ipi.h>
+
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/acpi.h>
+#include <asm/arch_hooks.h>
+#include <asm/e820.h>
+#include <asm/setup.h>
+
+#include <asm/genapic.h>
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define DEFAULT_SEND_IPI (1)
+#else
+#define DEFAULT_SEND_IPI (0)
+#endif
+
+int no_broadcast = DEFAULT_SEND_IPI;
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
+{
+ /*
+ * Careful. Some cpus do not strictly honor the set of cpus
+ * specified in the interrupt destination when using lowest
+ * priority interrupt delivery mode.
+ *
+ * In particular there was a hyperthreading cpu observed to
+ * deliver interrupts to the wrong hyperthread when only one
+ * hyperthread was specified in the interrupt desitination.
+ */
+ *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
+}
+
+/* should be called last. */
+static int probe_default(void)
+{
+ return 1;
+}
+
+struct genapic apic_default = {
+
+ .name = "default",
+ .probe = probe_default,
+ .acpi_madt_oem_check = NULL,
+ .apic_id_registered = default_apic_id_registered,
+
+ .irq_delivery_mode = dest_LowestPrio,
+ /* logical delivery broadcast to all CPUs: */
+ .irq_dest_mode = 1,
+
+ .target_cpus = default_target_cpus,
+ .disable_esr = 0,
+ .dest_logical = APIC_DEST_LOGICAL,
+ .check_apicid_used = default_check_apicid_used,
+ .check_apicid_present = default_check_apicid_present,
+
+ .vector_allocation_domain = default_vector_allocation_domain,
+ .init_apic_ldr = default_init_apic_ldr,
+
+ .ioapic_phys_id_map = default_ioapic_phys_id_map,
+ .setup_apic_routing = default_setup_apic_routing,
+ .multi_timer_check = NULL,
+ .apicid_to_node = default_apicid_to_node,
+ .cpu_to_logical_apicid = default_cpu_to_logical_apicid,
+ .cpu_present_to_apicid = default_cpu_present_to_apicid,
+ .apicid_to_cpu_present = default_apicid_to_cpu_present,
+ .setup_portio_remap = NULL,
+ .check_phys_apicid_present = default_check_phys_apicid_present,
+ .enable_apic_mode = NULL,
+ .phys_pkg_id = default_phys_pkg_id,
+ .mps_oem_check = NULL,
+
+ .get_apic_id = default_get_apic_id,
+ .set_apic_id = NULL,
+ .apic_id_mask = 0x0F << 24,
+
+ .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
+ .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
+
+ .send_IPI_mask = default_send_IPI_mask_logical,
+ .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical,
+ .send_IPI_allbutself = default_send_IPI_allbutself,
+ .send_IPI_all = default_send_IPI_all,
+ .send_IPI_self = default_send_IPI_self,
+
+ .wakeup_cpu = NULL,
+ .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
+ .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+
+ .wait_for_init_deassert = default_wait_for_init_deassert,
+
+ .smp_callin_clear_local_apic = NULL,
+ .store_NMI_vector = NULL,
+ .inquire_remote_apic = default_inquire_remote_apic,
+};
+
+extern struct genapic apic_numaq;
+extern struct genapic apic_summit;
+extern struct genapic apic_bigsmp;
+extern struct genapic apic_es7000;
+extern struct genapic apic_default;
+
+struct genapic *apic = &apic_default;
+
+static struct genapic *apic_probe[] __initdata = {
+#ifdef CONFIG_X86_NUMAQ
+ &apic_numaq,
+#endif
+#ifdef CONFIG_X86_SUMMIT
+ &apic_summit,
+#endif
+#ifdef CONFIG_X86_BIGSMP
+ &apic_bigsmp,
+#endif
+#ifdef CONFIG_X86_ES7000
+ &apic_es7000,
+#endif
+ &apic_default, /* must be last */
+ NULL,
+};
+
+static int cmdline_apic __initdata;
+static int __init parse_apic(char *arg)
+{
+ int i;
+
+ if (!arg)
+ return -EINVAL;
+
+ for (i = 0; apic_probe[i]; i++) {
+ if (!strcmp(apic_probe[i]->name, arg)) {
+ apic = apic_probe[i];
+ cmdline_apic = 1;
+ return 0;
+ }
+ }
+
+ if (x86_quirks->update_genapic)
+ x86_quirks->update_genapic();
+
+ /* Parsed again by __setup for debug/verbose */
+ return 0;
+}
+early_param("apic", parse_apic);
+
+void __init generic_bigsmp_probe(void)
+{
+#ifdef CONFIG_X86_BIGSMP
+ /*
+ * This routine is used to switch to bigsmp mode when
+ * - There is no apic= option specified by the user
+ * - generic_apic_probe() has chosen apic_default as the sub_arch
+ * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
+ */
+
+ if (!cmdline_apic && apic == &apic_default) {
+ if (apic_bigsmp.probe()) {
+ apic = &apic_bigsmp;
+ if (x86_quirks->update_genapic)
+ x86_quirks->update_genapic();
+ printk(KERN_INFO "Overriding APIC driver with %s\n",
+ apic->name);
+ }
+ }
+#endif
+}
+
+void __init generic_apic_probe(void)
+{
+ if (!cmdline_apic) {
+ int i;
+ for (i = 0; apic_probe[i]; i++) {
+ if (apic_probe[i]->probe()) {
+ apic = apic_probe[i];
+ break;
+ }
+ }
+ /* Not visible without early console */
+ if (!apic_probe[i])
+ panic("Didn't find an APIC driver");
+
+ if (x86_quirks->update_genapic)
+ x86_quirks->update_genapic();
+ }
+ printk(KERN_INFO "Using APIC driver %s\n", apic->name);
+}
+
+/* These functions can switch the APIC even after the initial ->probe() */
+
+int __init
+generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
+{
+ int i;
+
+ for (i = 0; apic_probe[i]; ++i) {
+ if (!apic_probe[i]->mps_oem_check)
+ continue;
+ if (!apic_probe[i]->mps_oem_check(mpc, oem, productid))
+ continue;
+
+ if (!cmdline_apic) {
+ apic = apic_probe[i];
+ if (x86_quirks->update_genapic)
+ x86_quirks->update_genapic();
+ printk(KERN_INFO "Switched to APIC driver `%s'.\n",
+ apic->name);
+ }
+ return 1;
+ }
+ return 0;
+}
+
+int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+ int i;
+
+ for (i = 0; apic_probe[i]; ++i) {
+ if (!apic_probe[i]->acpi_madt_oem_check)
+ continue;
+ if (!apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id))
+ continue;
+
+ if (!cmdline_apic) {
+ apic = apic_probe[i];
+ if (x86_quirks->update_genapic)
+ x86_quirks->update_genapic();
+ printk(KERN_INFO "Switched to APIC driver `%s'.\n",
+ apic->name);
+ }
+ return 1;
+ }
+ return 0;
+}
+
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+/**
+ * pre_intr_init_hook - initialisation prior to setting up interrupt vectors
+ *
+ * Description:
+ * Perform any necessary interrupt initialisation prior to setting up
+ * the "ordinary" interrupt call gates. For legacy reasons, the ISA
+ * interrupts should be initialised here if the machine emulates a PC
+ * in any way.
+ **/
+void __init pre_intr_init_hook(void)
+{
+ if (x86_quirks->arch_pre_intr_init) {
+ if (x86_quirks->arch_pre_intr_init())
+ return;
+ }
+ init_ISA_irqs();
+}
+
+/**
+ * intr_init_hook - post gate setup interrupt initialisation
+ *
+ * Description:
+ * Fill in any interrupts that may have been left out by the general
+ * init_IRQ() routine. interrupts having to do with the machine rather
+ * than the devices on the I/O bus (like APIC interrupts in intel MP
+ * systems) are started here.
+ **/
+void __init intr_init_hook(void)
+{
+ if (x86_quirks->arch_intr_init) {
+ if (x86_quirks->arch_intr_init())
+ return;
+ }
+}
+
+/**
+ * pre_setup_arch_hook - hook called prior to any setup_arch() execution
+ *
+ * Description:
+ * generally used to activate any machine specific identification
+ * routines that may be needed before setup_arch() runs. On Voyager
+ * this is used to get the board revision and type.
+ **/
+void __init pre_setup_arch_hook(void)
+{
+}
+
+/**
+ * trap_init_hook - initialise system specific traps
+ *
+ * Description:
+ * Called as the final act of trap_init(). Used in VISWS to initialise
+ * the various board specific APIC traps.
+ **/
+void __init trap_init_hook(void)
+{
+ if (x86_quirks->arch_trap_init) {
+ if (x86_quirks->arch_trap_init())
+ return;
+ }
+}
+
+static struct irqaction irq0 = {
+ .handler = timer_interrupt,
+ .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL,
+ .mask = CPU_MASK_NONE,
+ .name = "timer"
+};
+
+/**
+ * pre_time_init_hook - do any specific initialisations before.
+ *
+ **/
+void __init pre_time_init_hook(void)
+{
+ if (x86_quirks->arch_pre_time_init)
+ x86_quirks->arch_pre_time_init();
+}
+
+/**
+ * time_init_hook - do any specific initialisations for the system timer.
+ *
+ * Description:
+ * Must plug the system timer interrupt source at HZ into the IRQ listed
+ * in irq_vectors.h:TIMER_IRQ
+ **/
+void __init time_init_hook(void)
+{
+ if (x86_quirks->arch_time_init) {
+ /*
+ * A nonzero return code does not mean failure, it means
+ * that the architecture quirk does not want any
+ * generic (timer) setup to be performed after this:
+ */
+ if (x86_quirks->arch_time_init())
+ return;
+ }
+
+ irq0.mask = cpumask_of_cpu(0);
+ setup_irq(0, &irq0);
+}
+
+#ifdef CONFIG_MCA
+/**
+ * mca_nmi_hook - hook into MCA specific NMI chain
+ *
+ * Description:
+ * The MCA (Microchannel Architecture) has an NMI chain for NMI sources
+ * along the MCA bus. Use this to hook into that chain if you will need
+ * it.
+ **/
+void mca_nmi_hook(void)
+{
+ /*
+ * If I recall correctly, there's a whole bunch of other things that
+ * we can do to check for NMI problems, but that's all I know about
+ * at the moment.
+ */
+ pr_warning("NMI generated from unknown source!\n");
+}
+#endif
+
+static __init int no_ipi_broadcast(char *str)
+{
+ get_option(&str, &no_broadcast);
+ pr_info("Using %s mode\n",
+ no_broadcast ? "No IPI Broadcast" : "IPI Broadcast");
+ return 1;
+}
+__setup("no_ipi_broadcast=", no_ipi_broadcast);
+
+static int __init print_ipi_mode(void)
+{
+ pr_info("Using IPI %s mode\n",
+ no_broadcast ? "No-Shortcut" : "Shortcut");
+ return 0;
+}
+
+late_initcall(print_ipi_mode);
+
diff --git a/arch/x86/kernel/probe_roms_32.c b/arch/x86/kernel/probe_roms_32.c
index 675a48c404a..071e7fea42e 100644
--- a/arch/x86/kernel/probe_roms_32.c
+++ b/arch/x86/kernel/probe_roms_32.c
@@ -18,7 +18,7 @@
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/io.h>
-#include <setup_arch.h>
+#include <asm/setup_arch.h>
static struct resource system_rom_resource = {
.name = "System ROM",
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 6d12f7e37f8..87b69d4fac1 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -350,7 +350,7 @@ static void c1e_idle(void)
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
{
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
if (pm_idle == poll_idle && smp_num_siblings > 1) {
printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
" performance may degrade.\n");
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index f8536fee5c1..32e8f0af292 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -24,7 +24,7 @@
# include <asm/iommu.h>
#endif
-#include <mach_ipi.h>
+#include <asm/genapic.h>
/*
* Power off function, if any
@@ -651,7 +651,7 @@ static int crash_nmi_callback(struct notifier_block *self,
static void smp_send_nmi_allbutself(void)
{
- send_IPI_allbutself(NMI_VECTOR);
+ apic->send_IPI_allbutself(NMI_VECTOR);
}
static struct notifier_block crash_nmi_nb = {
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index d5d6693b706..8fce6c71451 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -81,7 +81,7 @@
#include <asm/io_apic.h>
#include <asm/ist.h>
#include <asm/vmi.h>
-#include <setup_arch.h>
+#include <asm/setup_arch.h>
#include <asm/bios_ebda.h>
#include <asm/cacheflush.h>
#include <asm/processor.h>
@@ -97,7 +97,7 @@
#include <asm/mmu_context.h>
#include <asm/proto.h>
-#include <mach_apic.h>
+#include <asm/genapic.h>
#include <asm/paravirt.h>
#include <asm/hypervisor.h>
@@ -112,6 +112,20 @@
#define ARCH_SETUP
#endif
+unsigned int boot_cpu_id __read_mostly;
+
+#ifdef CONFIG_X86_64
+int default_cpu_present_to_apicid(int mps_cpu)
+{
+ return __default_cpu_present_to_apicid(mps_cpu);
+}
+
+int default_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+ return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
+}
+#endif
+
#ifndef CONFIG_DEBUG_BOOT_PARAMS
struct boot_params __initdata boot_params;
#else
@@ -588,10 +602,9 @@ early_param("elfcorehdr", setup_elfcorehdr);
static int __init default_update_genapic(void)
{
-#ifdef CONFIG_X86_SMP
-# if defined(CONFIG_X86_GENERICARCH) || defined(CONFIG_X86_64)
- genapic->wakeup_cpu = wakeup_secondary_cpu_via_init;
-# endif
+#ifdef CONFIG_SMP
+ if (!apic->wakeup_cpu)
+ apic->wakeup_cpu = wakeup_secondary_cpu_via_init;
#endif
return 0;
@@ -892,12 +905,11 @@ void __init setup_arch(char **cmdline_p)
*/
acpi_reserve_bootmem();
#endif
-#ifdef CONFIG_X86_FIND_SMP_CONFIG
/*
* Find and reserve possible boot-time SMP configuration:
*/
find_smp_config();
-#endif
+
reserve_crashkernel();
#ifdef CONFIG_X86_64
@@ -924,9 +936,7 @@ void __init setup_arch(char **cmdline_p)
map_vsyscall();
#endif
-#ifdef CONFIG_X86_GENERICARCH
generic_apic_probe();
-#endif
early_quirks();
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index e6faa3316bd..eaaffae31cc 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -2,7 +2,7 @@
* Intel SMP support routines.
*
* (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
- * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
+ * (c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com>
* (c) 2002,2003 Andi Kleen, SuSE Labs.
*
* i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
@@ -26,8 +26,7 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/proto.h>
-#include <mach_ipi.h>
-#include <mach_apic.h>
+#include <asm/genapic.h>
/*
* Some notes on x86 processor bugs affecting SMP operation:
*
@@ -118,12 +117,12 @@ static void native_smp_send_reschedule(int cpu)
WARN_ON(1);
return;
}
- send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
+ apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
}
void native_send_call_func_single_ipi(int cpu)
{
- send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
+ apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
}
void native_send_call_func_ipi(const struct cpumask *mask)
@@ -131,7 +130,7 @@ void native_send_call_func_ipi(const struct cpumask *mask)
cpumask_var_t allbutself;
if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
- send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
+ apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
return;
}
@@ -140,9 +139,9 @@ void native_send_call_func_ipi(const struct cpumask *mask)
if (cpumask_equal(mask, allbutself) &&
cpumask_equal(cpu_online_mask, cpu_callout_mask))
- send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+ apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
else
- send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
+ apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
free_cpumask_var(allbutself);
}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 612d3c74f6a..af57f88186e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -2,7 +2,7 @@
* x86 SMP booting functions
*
* (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
- * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
* Copyright 2001 Andi Kleen, SuSE Labs.
*
* Much of the core SMP work is based on previous work by Thomas Radke, to
@@ -65,9 +65,8 @@
#include <asm/uv/uv.h>
#include <linux/mc146818rtc.h>
-#include <mach_apic.h>
-#include <mach_wakecpu.h>
-#include <smpboot_hooks.h>
+#include <asm/genapic.h>
+#include <asm/smpboot_hooks.h>
#ifdef CONFIG_X86_32
u8 apicid_2_node[MAX_APICID];
@@ -163,7 +162,7 @@ static void map_cpu_to_logical_apicid(void)
{
int cpu = smp_processor_id();
int apicid = logical_smp_processor_id();
- int node = apicid_to_node(apicid);
+ int node = apic->apicid_to_node(apicid);
if (!node_online(node))
node = first_online_node;
@@ -196,7 +195,8 @@ static void __cpuinit smp_callin(void)
* our local APIC. We have to wait for the IPI or we'll
* lock up on an APIC access.
*/
- wait_for_init_deassert(&init_deasserted);
+ if (apic->wait_for_init_deassert)
+ apic->wait_for_init_deassert(&init_deasserted);
/*
* (This works even if the APIC is not enabled.)
@@ -243,7 +243,8 @@ static void __cpuinit smp_callin(void)
*/
pr_debug("CALLIN, before setup_local_APIC().\n");
- smp_callin_clear_local_apic();
+ if (apic->smp_callin_clear_local_apic)
+ apic->smp_callin_clear_local_apic();
setup_local_APIC();
end_local_APIC_setup();
map_cpu_to_logical_apicid();
@@ -583,7 +584,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
/* Target chip */
/* Boot on the stack */
/* Kick the second */
- apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid);
+ apic_icr_write(APIC_DM_NMI | apic->dest_logical, logical_apicid);
pr_debug("Waiting for send to finish...\n");
send_status = safe_apic_wait_icr_idle();
@@ -749,7 +750,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
/*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
* (ie clustered apic addressing mode), this is a LOGICAL apic ID.
- * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
+ * Returns zero if CPU booted OK, else error code from ->wakeup_cpu.
*/
{
unsigned long boot_error = 0;
@@ -824,7 +825,8 @@ do_rest:
pr_debug("Setting warm reset code and vector.\n");
- store_NMI_vector(&nmi_high, &nmi_low);
+ if (apic->store_NMI_vector)
+ apic->store_NMI_vector(&nmi_high, &nmi_low);
smpboot_setup_warm_reset_vector(start_ip);
/*
@@ -839,7 +841,7 @@ do_rest:
/*
* Starting actual IPI sequence...
*/
- boot_error = wakeup_secondary_cpu(apicid, start_ip);
+ boot_error = apic->wakeup_cpu(apicid, start_ip);
if (!boot_error) {
/*
@@ -873,8 +875,8 @@ do_rest:
else
/* trampoline code not run */
printk(KERN_ERR "Not responding.\n");
- if (get_uv_system_type() != UV_NON_UNIQUE_APIC)
- inquire_remote_apic(apicid);
+ if (apic->inquire_remote_apic)
+ apic->inquire_remote_apic(apicid);
}
}
@@ -905,7 +907,7 @@ do_rest:
int __cpuinit native_cpu_up(unsigned int cpu)
{
- int apicid = cpu_present_to_apicid(cpu);
+ int apicid = apic->cpu_present_to_apicid(cpu);
unsigned long flags;
int err;
@@ -998,14 +1000,14 @@ static int __init smp_sanity_check(unsigned max_cpus)
{
preempt_disable();
-#if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
+#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
if (def_to_bigsmp && nr_cpu_ids > 8) {
unsigned int cpu;
unsigned nr;
printk(KERN_WARNING
"More than 8 CPUs detected - skipping them.\n"
- "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
+ "Use CONFIG_X86_BIGSMP.\n");
nr = 0;
for_each_present_cpu(cpu) {
@@ -1051,7 +1053,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
* Should not be necessary because the MP table should list the boot
* CPU too, but we do it for the sake of robustness anyway.
*/
- if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
+ if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
printk(KERN_NOTICE
"weird, boot CPU (#%d) not listed by the BIOS.\n",
boot_cpu_physical_apicid);
@@ -1069,7 +1071,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
printk(KERN_ERR "... forcing use of dummy APIC emulation."
"(tell your hw vendor)\n");
smpboot_clear_io_apic();
- disable_ioapic_setup();
+ arch_disable_smp_support();
return -1;
}
@@ -1128,7 +1130,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
#ifdef CONFIG_X86_64
enable_IR_x2apic();
- setup_apic_routing();
+ default_setup_apic_routing();
#endif
if (smp_sanity_check(max_cpus) < 0) {
@@ -1163,7 +1165,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
map_cpu_to_logical_apicid();
- setup_portio_remap();
+ if (apic->setup_portio_remap)
+ apic->setup_portio_remap();
smpboot_setup_io_apic();
/*
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 10786af9554..f7bddc2e37d 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -1,7 +1,7 @@
/*
* Stack trace management functions
*
- * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
#include <linux/sched.h>
#include <linux/stacktrace.h>
diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c
index 7b987852e87..1e733eff9b3 100644
--- a/arch/x86/kernel/summit_32.c
+++ b/arch/x86/kernel/summit_32.c
@@ -30,8 +30,364 @@
#include <linux/init.h>
#include <asm/io.h>
#include <asm/bios_ebda.h>
-#include <asm/summit/mpparse.h>
+/*
+ * APIC driver for the IBM "Summit" chipset.
+ */
+#define APIC_DEFINITION 1
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <asm/mpspec.h>
+#include <asm/apic.h>
+#include <asm/smp.h>
+#include <asm/genapic.h>
+#include <asm/fixmap.h>
+#include <asm/apicdef.h>
+#include <asm/ipi.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <linux/smp.h>
+
+static inline unsigned summit_get_apic_id(unsigned long x)
+{
+ return (x >> 24) & 0xFF;
+}
+
+static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector)
+{
+ default_send_IPI_mask_sequence_logical(mask, vector);
+}
+
+static inline void summit_send_IPI_allbutself(int vector)
+{
+ cpumask_t mask = cpu_online_map;
+ cpu_clear(smp_processor_id(), mask);
+
+ if (!cpus_empty(mask))
+ summit_send_IPI_mask(&mask, vector);
+}
+
+static inline void summit_send_IPI_all(int vector)
+{
+ summit_send_IPI_mask(&cpu_online_map, vector);
+}
+
+#include <asm/tsc.h>
+
+extern int use_cyclone;
+
+#ifdef CONFIG_X86_SUMMIT_NUMA
+extern void setup_summit(void);
+#else
+#define setup_summit() {}
+#endif
+
+static inline int
+summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
+{
+ if (!strncmp(oem, "IBM ENSW", 8) &&
+ (!strncmp(productid, "VIGIL SMP", 9)
+ || !strncmp(productid, "EXA", 3)
+ || !strncmp(productid, "RUTHLESS SMP", 12))){
+ mark_tsc_unstable("Summit based system");
+ use_cyclone = 1; /*enable cyclone-timer*/
+ setup_summit();
+ return 1;
+ }
+ return 0;
+}
+
+/* Hook from generic ACPI tables.c */
+static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+ if (!strncmp(oem_id, "IBM", 3) &&
+ (!strncmp(oem_table_id, "SERVIGIL", 8)
+ || !strncmp(oem_table_id, "EXA", 3))){
+ mark_tsc_unstable("Summit based system");
+ use_cyclone = 1; /*enable cyclone-timer*/
+ setup_summit();
+ return 1;
+ }
+ return 0;
+}
+
+struct rio_table_hdr {
+ unsigned char version; /* Version number of this data structure */
+ /* Version 3 adds chassis_num & WP_index */
+ unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */
+ unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */
+} __attribute__((packed));
+
+struct scal_detail {
+ unsigned char node_id; /* Scalability Node ID */
+ unsigned long CBAR; /* Address of 1MB register space */
+ unsigned char port0node; /* Node ID port connected to: 0xFF=None */
+ unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
+ unsigned char port1node; /* Node ID port connected to: 0xFF = None */
+ unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
+ unsigned char port2node; /* Node ID port connected to: 0xFF = None */
+ unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */
+ unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */
+} __attribute__((packed));
+
+struct rio_detail {
+ unsigned char node_id; /* RIO Node ID */
+ unsigned long BBAR; /* Address of 1MB register space */
+ unsigned char type; /* Type of device */
+ unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/
+ /* For CYC: Node ID of Twister that owns this CYC */
+ unsigned char port0node; /* Node ID port connected to: 0xFF=None */
+ unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
+ unsigned char port1node; /* Node ID port connected to: 0xFF=None */
+ unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
+ unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */
+ /* For CYC: 0 */
+ unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */
+ /* = 0 : the XAPIC is not used, ie:*/
+ /* ints fwded to another XAPIC */
+ /* Bits1:7 Reserved */
+ /* For CYC: Bits0:7 Reserved */
+ unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */
+ /* lower slot numbers/PCI bus numbers */
+ /* For CYC: No meaning */
+ unsigned char chassis_num; /* 1 based Chassis number */
+ /* For LookOut WPEGs this field indicates the */
+ /* Expansion Chassis #, enumerated from Boot */
+ /* Node WPEG external port, then Boot Node CYC */
+ /* external port, then Next Vigil chassis WPEG */
+ /* external port, etc. */
+ /* Shared Lookouts have only 1 chassis number (the */
+ /* first one assigned) */
+} __attribute__((packed));
+
+
+typedef enum {
+ CompatTwister = 0, /* Compatibility Twister */
+ AltTwister = 1, /* Alternate Twister of internal 8-way */
+ CompatCyclone = 2, /* Compatibility Cyclone */
+ AltCyclone = 3, /* Alternate Cyclone of internal 8-way */
+ CompatWPEG = 4, /* Compatibility WPEG */
+ AltWPEG = 5, /* Second Planar WPEG */
+ LookOutAWPEG = 6, /* LookOut WPEG */
+ LookOutBWPEG = 7, /* LookOut WPEG */
+} node_type;
+
+static inline int is_WPEG(struct rio_detail *rio){
+ return (rio->type == CompatWPEG || rio->type == AltWPEG ||
+ rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
+}
+
+
+/* In clustered mode, the high nibble of APIC ID is a cluster number.
+ * The low nibble is a 4-bit bitmap. */
+#define XAPIC_DEST_CPUS_SHIFT 4
+#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
+#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
+
+#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
+
+static inline const cpumask_t *summit_target_cpus(void)
+{
+ /* CPU_MASK_ALL (0xff) has undefined behaviour with
+ * dest_LowestPrio mode logical clustered apic interrupt routing
+ * Just start on cpu 0. IRQ balancing will spread load
+ */
+ return &cpumask_of_cpu(0);
+}
+
+static inline unsigned long
+summit_check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+ return 0;
+}
+
+/* we don't use the phys_cpu_present_map to indicate apicid presence */
+static inline unsigned long summit_check_apicid_present(int bit)
+{
+ return 1;
+}
+
+#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
+
+extern u8 cpu_2_logical_apicid[];
+
+static inline void summit_init_apic_ldr(void)
+{
+ unsigned long val, id;
+ int count = 0;
+ u8 my_id = (u8)hard_smp_processor_id();
+ u8 my_cluster = (u8)apicid_cluster(my_id);
+#ifdef CONFIG_SMP
+ u8 lid;
+ int i;
+
+ /* Create logical APIC IDs by counting CPUs already in cluster. */
+ for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
+ lid = cpu_2_logical_apicid[i];
+ if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
+ ++count;
+ }
+#endif
+ /* We only have a 4 wide bitmap in cluster mode. If a deranged
+ * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
+ BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
+ id = my_cluster | (1UL << count);
+ apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
+ val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+ val |= SET_APIC_LOGICAL_ID(id);
+ apic_write(APIC_LDR, val);
+}
+
+static inline int summit_apic_id_registered(void)
+{
+ return 1;
+}
+
+static inline void summit_setup_apic_routing(void)
+{
+ printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
+ nr_ioapics);
+}
+
+static inline int summit_apicid_to_node(int logical_apicid)
+{
+#ifdef CONFIG_SMP
+ return apicid_2_node[hard_smp_processor_id()];
+#else
+ return 0;
+#endif
+}
+
+/* Mapping from cpu number to logical apicid */
+static inline int summit_cpu_to_logical_apicid(int cpu)
+{
+#ifdef CONFIG_SMP
+ if (cpu >= nr_cpu_ids)
+ return BAD_APICID;
+ return (int)cpu_2_logical_apicid[cpu];
+#else
+ return logical_smp_processor_id();
+#endif
+}
+
+static inline int summit_cpu_present_to_apicid(int mps_cpu)
+{
+ if (mps_cpu < nr_cpu_ids)
+ return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
+ else
+ return BAD_APICID;
+}
+
+static inline physid_mask_t
+summit_ioapic_phys_id_map(physid_mask_t phys_id_map)
+{
+ /* For clustered we don't have a good way to do this yet - hack */
+ return physids_promote(0x0F);
+}
+
+static inline physid_mask_t summit_apicid_to_cpu_present(int apicid)
+{
+ return physid_mask_of_physid(0);
+}
+
+static inline void summit_setup_portio_remap(void)
+{
+}
+
+static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+ return 1;
+}
+
+static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask)
+{
+ int cpus_found = 0;
+ int num_bits_set;
+ int apicid;
+ int cpu;
+
+ num_bits_set = cpus_weight(*cpumask);
+ /* Return id to all */
+ if (num_bits_set >= nr_cpu_ids)
+ return 0xFF;
+ /*
+ * The cpus in the mask must all be on the apic cluster. If are not
+ * on the same apicid cluster return default value of target_cpus():
+ */
+ cpu = first_cpu(*cpumask);
+ apicid = summit_cpu_to_logical_apicid(cpu);
+
+ while (cpus_found < num_bits_set) {
+ if (cpu_isset(cpu, *cpumask)) {
+ int new_apicid = summit_cpu_to_logical_apicid(cpu);
+
+ if (apicid_cluster(apicid) !=
+ apicid_cluster(new_apicid)) {
+ printk ("%s: Not a valid mask!\n", __func__);
+
+ return 0xFF;
+ }
+ apicid = apicid | new_apicid;
+ cpus_found++;
+ }
+ cpu++;
+ }
+ return apicid;
+}
+
+static inline unsigned int
+summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
+ const struct cpumask *andmask)
+{
+ int apicid = summit_cpu_to_logical_apicid(0);
+ cpumask_var_t cpumask;
+
+ if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
+ return apicid;
+
+ cpumask_and(cpumask, inmask, andmask);
+ cpumask_and(cpumask, cpumask, cpu_online_mask);
+ apicid = summit_cpu_mask_to_apicid(cpumask);
+
+ free_cpumask_var(cpumask);
+
+ return apicid;
+}
+
+/*
+ * cpuid returns the value latched in the HW at reset, not the APIC ID
+ * register's value. For any box whose BIOS changes APIC IDs, like
+ * clustered APIC systems, we must use hard_smp_processor_id.
+ *
+ * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
+ */
+static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+ return hard_smp_processor_id() >> index_msb;
+}
+
+static int probe_summit(void)
+{
+ /* probed later in mptable/ACPI hooks */
+ return 0;
+}
+
+static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
+{
+ /* Careful. Some cpus do not strictly honor the set of cpus
+ * specified in the interrupt destination when using lowest
+ * priority interrupt delivery mode.
+ *
+ * In particular there was a hyperthreading cpu observed to
+ * deliver interrupts to the wrong hyperthread when only one
+ * hyperthread was specified in the interrupt desitination.
+ */
+ *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
+}
+
+#ifdef CONFIG_X86_SUMMIT_NUMA
static struct rio_table_hdr *rio_table_hdr __initdata;
static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata;
static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata;
@@ -186,3 +542,61 @@ void __init setup_summit(void)
next_wpeg = 0;
} while (next_wpeg != 0);
}
+#endif
+
+struct genapic apic_summit = {
+
+ .name = "summit",
+ .probe = probe_summit,
+ .acpi_madt_oem_check = summit_acpi_madt_oem_check,
+ .apic_id_registered = summit_apic_id_registered,
+
+ .irq_delivery_mode = dest_LowestPrio,
+ /* logical delivery broadcast to all CPUs: */
+ .irq_dest_mode = 1,
+
+ .target_cpus = summit_target_cpus,
+ .disable_esr = 1,
+ .dest_logical = APIC_DEST_LOGICAL,
+ .check_apicid_used = summit_check_apicid_used,
+ .check_apicid_present = summit_check_apicid_present,
+
+ .vector_allocation_domain = summit_vector_allocation_domain,
+ .init_apic_ldr = summit_init_apic_ldr,
+
+ .ioapic_phys_id_map = summit_ioapic_phys_id_map,
+ .setup_apic_routing = summit_setup_apic_routing,
+ .multi_timer_check = NULL,
+ .apicid_to_node = summit_apicid_to_node,
+ .cpu_to_logical_apicid = summit_cpu_to_logical_apicid,
+ .cpu_present_to_apicid = summit_cpu_present_to_apicid,
+ .apicid_to_cpu_present = summit_apicid_to_cpu_present,
+ .setup_portio_remap = NULL,
+ .check_phys_apicid_present = summit_check_phys_apicid_present,
+ .enable_apic_mode = NULL,
+ .phys_pkg_id = summit_phys_pkg_id,
+ .mps_oem_check = summit_mps_oem_check,
+
+ .get_apic_id = summit_get_apic_id,
+ .set_apic_id = NULL,
+ .apic_id_mask = 0xFF << 24,
+
+ .cpu_mask_to_apicid = summit_cpu_mask_to_apicid,
+ .cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and,
+
+ .send_IPI_mask = summit_send_IPI_mask,
+ .send_IPI_mask_allbutself = NULL,
+ .send_IPI_allbutself = summit_send_IPI_allbutself,
+ .send_IPI_all = summit_send_IPI_all,
+ .send_IPI_self = default_send_IPI_self,
+
+ .wakeup_cpu = NULL,
+ .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
+ .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+
+ .wait_for_init_deassert = default_wait_for_init_deassert,
+
+ .smp_callin_clear_local_apic = NULL,
+ .store_NMI_vector = NULL,
+ .inquire_remote_apic = default_inquire_remote_apic,
+};
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index 3985cac0ed4..764c74e871f 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -38,7 +38,7 @@
#include <asm/time.h>
#include <asm/timer.h>
-#include "do_timer.h"
+#include <asm/do_timer.h>
int timer_ack;
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index f4b2f27d19b..f396e61bcb3 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -20,7 +20,7 @@
#include <asm/tsc.h>
#include <asm/irq_vectors.h>
-#include <mach_apic.h>
+#include <asm/genapic.h>
static struct bau_control **uv_bau_table_bases __read_mostly;
static int uv_bau_retry_limit __read_mostly;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 71a8f871331..bde57f0f161 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -54,7 +54,7 @@
#include <asm/desc.h>
#include <asm/i387.h>
-#include <mach_traps.h>
+#include <asm/mach_traps.h>
#ifdef CONFIG_X86_64
#include <asm/pgalloc.h>
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 599e5816863..83d53ce5d4c 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -773,7 +773,7 @@ __cpuinit int unsynchronized_tsc(void)
if (!cpu_has_tsc || tsc_unstable)
return 1;
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
if (apic_is_clustered_box())
return 1;
#endif
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index d801d06af06..4fd646e6dd4 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -32,9 +32,9 @@
#include <asm/e820.h>
#include <asm/io.h>
-#include <mach_ipi.h>
+#include <asm/genapic.h>
-#include "mach_apic.h"
+#include <asm/genapic.h>
#include <linux/kernel_stat.h>
@@ -200,7 +200,7 @@ static void __init MP_processor_info(struct mpc_cpu *m)
return;
}
- apic_cpus = apicid_to_cpu_present(m->apicid);
+ apic_cpus = apic->apicid_to_cpu_present(m->apicid);
physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus);
/*
* Validate version
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index c4c1f9e0940..a4791ef412d 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -256,7 +256,7 @@ void __devinit vmi_time_bsp_init(void)
*/
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
local_irq_disable();
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
/*
* XXX handle_percpu_irq only defined for SMP; we need to switch over
* to using it, since this is a local interrupt, which each CPU must
diff --git a/arch/x86/mach-default/Makefile b/arch/x86/mach-default/Makefile
deleted file mode 100644
index 012fe34459e..00000000000
--- a/arch/x86/mach-default/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-obj-y := setup.o
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c
deleted file mode 100644
index a265a7c6319..00000000000
--- a/arch/x86/mach-default/setup.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Machine specific setup for generic
- */
-
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <asm/acpi.h>
-#include <asm/arch_hooks.h>
-#include <asm/e820.h>
-#include <asm/setup.h>
-
-#include <mach_ipi.h>
-
-#ifdef CONFIG_HOTPLUG_CPU
-#define DEFAULT_SEND_IPI (1)
-#else
-#define DEFAULT_SEND_IPI (0)
-#endif
-
-int no_broadcast = DEFAULT_SEND_IPI;
-
-/**
- * pre_intr_init_hook - initialisation prior to setting up interrupt vectors
- *
- * Description:
- * Perform any necessary interrupt initialisation prior to setting up
- * the "ordinary" interrupt call gates. For legacy reasons, the ISA
- * interrupts should be initialised here if the machine emulates a PC
- * in any way.
- **/
-void __init pre_intr_init_hook(void)
-{
- if (x86_quirks->arch_pre_intr_init) {
- if (x86_quirks->arch_pre_intr_init())
- return;
- }
- init_ISA_irqs();
-}
-
-/*
- * IRQ2 is cascade interrupt to second interrupt controller
- */
-static struct irqaction irq2 = {
- .handler = no_action,
- .mask = CPU_MASK_NONE,
- .name = "cascade",
-};
-
-/**
- * intr_init_hook - post gate setup interrupt initialisation
- *
- * Description:
- * Fill in any interrupts that may have been left out by the general
- * init_IRQ() routine. interrupts having to do with the machine rather
- * than the devices on the I/O bus (like APIC interrupts in intel MP
- * systems) are started here.
- **/
-void __init intr_init_hook(void)
-{
- if (x86_quirks->arch_intr_init) {
- if (x86_quirks->arch_intr_init())
- return;
- }
- if (!acpi_ioapic)
- setup_irq(2, &irq2);
-
-}
-
-/**
- * pre_setup_arch_hook - hook called prior to any setup_arch() execution
- *
- * Description:
- * generally used to activate any machine specific identification
- * routines that may be needed before setup_arch() runs. On Voyager
- * this is used to get the board revision and type.
- **/
-void __init pre_setup_arch_hook(void)
-{
-}
-
-/**
- * trap_init_hook - initialise system specific traps
- *
- * Description:
- * Called as the final act of trap_init(). Used in VISWS to initialise
- * the various board specific APIC traps.
- **/
-void __init trap_init_hook(void)
-{
- if (x86_quirks->arch_trap_init) {
- if (x86_quirks->arch_trap_init())
- return;
- }
-}
-
-static struct irqaction irq0 = {
- .handler = timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL,
- .mask = CPU_MASK_NONE,
- .name = "timer"
-};
-
-/**
- * pre_time_init_hook - do any specific initialisations before.
- *
- **/
-void __init pre_time_init_hook(void)
-{
- if (x86_quirks->arch_pre_time_init)
- x86_quirks->arch_pre_time_init();
-}
-
-/**
- * time_init_hook - do any specific initialisations for the system timer.
- *
- * Description:
- * Must plug the system timer interrupt source at HZ into the IRQ listed
- * in irq_vectors.h:TIMER_IRQ
- **/
-void __init time_init_hook(void)
-{
- if (x86_quirks->arch_time_init) {
- /*
- * A nonzero return code does not mean failure, it means
- * that the architecture quirk does not want any
- * generic (timer) setup to be performed after this:
- */
- if (x86_quirks->arch_time_init())
- return;
- }
-
- irq0.mask = cpumask_of_cpu(0);
- setup_irq(0, &irq0);
-}
-
-#ifdef CONFIG_MCA
-/**
- * mca_nmi_hook - hook into MCA specific NMI chain
- *
- * Description:
- * The MCA (Microchannel Architecture) has an NMI chain for NMI sources
- * along the MCA bus. Use this to hook into that chain if you will need
- * it.
- **/
-void mca_nmi_hook(void)
-{
- /*
- * If I recall correctly, there's a whole bunch of other things that
- * we can do to check for NMI problems, but that's all I know about
- * at the moment.
- */
- pr_warning("NMI generated from unknown source!\n");
-}
-#endif
-
-static __init int no_ipi_broadcast(char *str)
-{
- get_option(&str, &no_broadcast);
- pr_info("Using %s mode\n",
- no_broadcast ? "No IPI Broadcast" : "IPI Broadcast");
- return 1;
-}
-__setup("no_ipi_broadcast=", no_ipi_broadcast);
-
-static int __init print_ipi_mode(void)
-{
- pr_info("Using IPI %s mode\n",
- no_broadcast ? "No-Shortcut" : "Shortcut");
- return 0;
-}
-
-late_initcall(print_ipi_mode);
-
diff --git a/arch/x86/mach-generic/Makefile b/arch/x86/mach-generic/Makefile
deleted file mode 100644
index 6730f4e7c74..00000000000
--- a/arch/x86/mach-generic/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Makefile for the generic architecture
-#
-
-EXTRA_CFLAGS := -Iarch/x86/kernel
-
-obj-y := probe.o default.o
-obj-$(CONFIG_X86_NUMAQ) += numaq.o
-obj-$(CONFIG_X86_SUMMIT) += summit.o
-obj-$(CONFIG_X86_BIGSMP) += bigsmp.o
-obj-$(CONFIG_X86_ES7000) += es7000.o
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c
deleted file mode 100644
index bc4c7840b2a..00000000000
--- a/arch/x86/mach-generic/bigsmp.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs.
- * Drives the local APIC in "clustered mode".
- */
-#define APIC_DEFINITION 1
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <asm/mpspec.h>
-#include <asm/genapic.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/dmi.h>
-#include <asm/bigsmp/apicdef.h>
-#include <linux/smp.h>
-#include <asm/bigsmp/apic.h>
-#include <asm/bigsmp/ipi.h>
-#include <asm/mach-default/mach_mpparse.h>
-#include <asm/mach-default/mach_wakecpu.h>
-
-static int dmi_bigsmp; /* can be set by dmi scanners */
-
-static int hp_ht_bigsmp(const struct dmi_system_id *d)
-{
- printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
- dmi_bigsmp = 1;
- return 0;
-}
-
-
-static const struct dmi_system_id bigsmp_dmi_table[] = {
- { hp_ht_bigsmp, "HP ProLiant DL760 G2",
- { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
- DMI_MATCH(DMI_BIOS_VERSION, "P44-"),}
- },
-
- { hp_ht_bigsmp, "HP ProLiant DL740",
- { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
- DMI_MATCH(DMI_BIOS_VERSION, "P47-"),}
- },
- { }
-};
-
-static void vector_allocation_domain(int cpu, cpumask_t *retmask)
-{
- cpus_clear(*retmask);
- cpu_set(cpu, *retmask);
-}
-
-static int probe_bigsmp(void)
-{
- if (def_to_bigsmp)
- dmi_bigsmp = 1;
- else
- dmi_check_system(bigsmp_dmi_table);
- return dmi_bigsmp;
-}
-
-struct genapic apic_bigsmp = APIC_INIT("bigsmp", probe_bigsmp);
diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c
deleted file mode 100644
index e63a4a76d8c..00000000000
--- a/arch/x86/mach-generic/default.c
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Default generic APIC driver. This handles up to 8 CPUs.
- */
-#define APIC_DEFINITION 1
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <asm/mpspec.h>
-#include <asm/mach-default/mach_apicdef.h>
-#include <asm/genapic.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <asm/mach-default/mach_apic.h>
-#include <asm/mach-default/mach_ipi.h>
-#include <asm/mach-default/mach_mpparse.h>
-#include <asm/mach-default/mach_wakecpu.h>
-
-/* should be called last. */
-static int probe_default(void)
-{
- return 1;
-}
-
-struct genapic apic_default = APIC_INIT("default", probe_default);
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c
deleted file mode 100644
index c2ded144802..00000000000
--- a/arch/x86/mach-generic/es7000.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * APIC driver for the Unisys ES7000 chipset.
- */
-#define APIC_DEFINITION 1
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <asm/mpspec.h>
-#include <asm/genapic.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <asm/es7000/apicdef.h>
-#include <linux/smp.h>
-#include <asm/es7000/apic.h>
-#include <asm/es7000/ipi.h>
-#include <asm/es7000/mpparse.h>
-#include <asm/mach-default/mach_wakecpu.h>
-
-void __init es7000_update_genapic_to_cluster(void)
-{
- genapic->target_cpus = target_cpus_cluster;
- genapic->int_delivery_mode = INT_DELIVERY_MODE_CLUSTER;
- genapic->int_dest_mode = INT_DEST_MODE_CLUSTER;
- genapic->no_balance_irq = NO_BALANCE_IRQ_CLUSTER;
-
- genapic->init_apic_ldr = init_apic_ldr_cluster;
-
- genapic->cpu_mask_to_apicid = cpu_mask_to_apicid_cluster;
-}
-
-static int probe_es7000(void)
-{
- /* probed later in mptable/ACPI hooks */
- return 0;
-}
-
-extern void es7000_sw_apic(void);
-static void __init enable_apic_mode(void)
-{
- es7000_sw_apic();
- return;
-}
-
-static __init int
-mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
-{
- if (mpc->oemptr) {
- struct mpc_oemtable *oem_table =
- (struct mpc_oemtable *)mpc->oemptr;
- if (!strncmp(oem, "UNISYS", 6))
- return parse_unisys_oem((char *)oem_table);
- }
- return 0;
-}
-
-#ifdef CONFIG_ACPI
-/* Hook from generic ACPI tables.c */
-static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
- unsigned long oem_addr = 0;
- int check_dsdt;
- int ret = 0;
-
- /* check dsdt at first to avoid clear fix_map for oem_addr */
- check_dsdt = es7000_check_dsdt();
-
- if (!find_unisys_acpi_oem_table(&oem_addr)) {
- if (check_dsdt)
- ret = parse_unisys_oem((char *)oem_addr);
- else {
- setup_unisys();
- ret = 1;
- }
- /*
- * we need to unmap it
- */
- unmap_unisys_acpi_oem_table(oem_addr);
- }
- return ret;
-}
-#else
-static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
- return 0;
-}
-#endif
-
-static void vector_allocation_domain(int cpu, cpumask_t *retmask)
-{
- /* Careful. Some cpus do not strictly honor the set of cpus
- * specified in the interrupt destination when using lowest
- * priority interrupt delivery mode.
- *
- * In particular there was a hyperthreading cpu observed to
- * deliver interrupts to the wrong hyperthread when only one
- * hyperthread was specified in the interrupt desitination.
- */
- *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
-}
-
-struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c
deleted file mode 100644
index 3679e225564..00000000000
--- a/arch/x86/mach-generic/numaq.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * APIC driver for the IBM NUMAQ chipset.
- */
-#define APIC_DEFINITION 1
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <asm/mpspec.h>
-#include <asm/genapic.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <asm/numaq/apicdef.h>
-#include <linux/smp.h>
-#include <asm/numaq/apic.h>
-#include <asm/numaq/ipi.h>
-#include <asm/numaq/mpparse.h>
-#include <asm/numaq/wakecpu.h>
-#include <asm/numaq.h>
-
-static int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
-{
- numaq_mps_oem_check(mpc, oem, productid);
- return found_numaq;
-}
-
-static int probe_numaq(void)
-{
- /* already know from get_memcfg_numaq() */
- return found_numaq;
-}
-
-/* Hook from generic ACPI tables.c */
-static int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
- return 0;
-}
-
-static void vector_allocation_domain(int cpu, cpumask_t *retmask)
-{
- /* Careful. Some cpus do not strictly honor the set of cpus
- * specified in the interrupt destination when using lowest
- * priority interrupt delivery mode.
- *
- * In particular there was a hyperthreading cpu observed to
- * deliver interrupts to the wrong hyperthread when only one
- * hyperthread was specified in the interrupt desitination.
- */
- *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
-}
-
-struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c
deleted file mode 100644
index 15a38daef1a..00000000000
--- a/arch/x86/mach-generic/probe.c
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Copyright 2003 Andi Kleen, SuSE Labs.
- * Subject to the GNU Public License, v.2
- *
- * Generic x86 APIC driver probe layer.
- */
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/ctype.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <asm/fixmap.h>
-#include <asm/mpspec.h>
-#include <asm/apicdef.h>
-#include <asm/genapic.h>
-#include <asm/setup.h>
-
-extern struct genapic apic_numaq;
-extern struct genapic apic_summit;
-extern struct genapic apic_bigsmp;
-extern struct genapic apic_es7000;
-extern struct genapic apic_default;
-
-struct genapic *genapic = &apic_default;
-
-static struct genapic *apic_probe[] __initdata = {
-#ifdef CONFIG_X86_NUMAQ
- &apic_numaq,
-#endif
-#ifdef CONFIG_X86_SUMMIT
- &apic_summit,
-#endif
-#ifdef CONFIG_X86_BIGSMP
- &apic_bigsmp,
-#endif
-#ifdef CONFIG_X86_ES7000
- &apic_es7000,
-#endif
- &apic_default, /* must be last */
- NULL,
-};
-
-static int cmdline_apic __initdata;
-static int __init parse_apic(char *arg)
-{
- int i;
-
- if (!arg)
- return -EINVAL;
-
- for (i = 0; apic_probe[i]; i++) {
- if (!strcmp(apic_probe[i]->name, arg)) {
- genapic = apic_probe[i];
- cmdline_apic = 1;
- return 0;
- }
- }
-
- if (x86_quirks->update_genapic)
- x86_quirks->update_genapic();
-
- /* Parsed again by __setup for debug/verbose */
- return 0;
-}
-early_param("apic", parse_apic);
-
-void __init generic_bigsmp_probe(void)
-{
-#ifdef CONFIG_X86_BIGSMP
- /*
- * This routine is used to switch to bigsmp mode when
- * - There is no apic= option specified by the user
- * - generic_apic_probe() has chosen apic_default as the sub_arch
- * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
- */
-
- if (!cmdline_apic && genapic == &apic_default) {
- if (apic_bigsmp.probe()) {
- genapic = &apic_bigsmp;
- if (x86_quirks->update_genapic)
- x86_quirks->update_genapic();
- printk(KERN_INFO "Overriding APIC driver with %s\n",
- genapic->name);
- }
- }
-#endif
-}
-
-void __init generic_apic_probe(void)
-{
- if (!cmdline_apic) {
- int i;
- for (i = 0; apic_probe[i]; i++) {
- if (apic_probe[i]->probe()) {
- genapic = apic_probe[i];
- break;
- }
- }
- /* Not visible without early console */
- if (!apic_probe[i])
- panic("Didn't find an APIC driver");
-
- if (x86_quirks->update_genapic)
- x86_quirks->update_genapic();
- }
- printk(KERN_INFO "Using APIC driver %s\n", genapic->name);
-}
-
-/* These functions can switch the APIC even after the initial ->probe() */
-
-int __init mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
-{
- int i;
- for (i = 0; apic_probe[i]; ++i) {
- if (apic_probe[i]->mps_oem_check(mpc, oem, productid)) {
- if (!cmdline_apic) {
- genapic = apic_probe[i];
- if (x86_quirks->update_genapic)
- x86_quirks->update_genapic();
- printk(KERN_INFO "Switched to APIC driver `%s'.\n",
- genapic->name);
- }
- return 1;
- }
- }
- return 0;
-}
-
-int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
- int i;
- for (i = 0; apic_probe[i]; ++i) {
- if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
- if (!cmdline_apic) {
- genapic = apic_probe[i];
- if (x86_quirks->update_genapic)
- x86_quirks->update_genapic();
- printk(KERN_INFO "Switched to APIC driver `%s'.\n",
- genapic->name);
- }
- return 1;
- }
- }
- return 0;
-}
-
-int hard_smp_processor_id(void)
-{
- return genapic->get_apic_id(*(unsigned long *)(APIC_BASE+APIC_ID));
-}
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c
deleted file mode 100644
index 2821ffc188b..00000000000
--- a/arch/x86/mach-generic/summit.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * APIC driver for the IBM "Summit" chipset.
- */
-#define APIC_DEFINITION 1
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <asm/mpspec.h>
-#include <asm/genapic.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <asm/summit/apicdef.h>
-#include <linux/smp.h>
-#include <asm/summit/apic.h>
-#include <asm/summit/ipi.h>
-#include <asm/summit/mpparse.h>
-#include <asm/mach-default/mach_wakecpu.h>
-
-static int probe_summit(void)
-{
- /* probed later in mptable/ACPI hooks */
- return 0;
-}
-
-static void vector_allocation_domain(int cpu, cpumask_t *retmask)
-{
- /* Careful. Some cpus do not strictly honor the set of cpus
- * specified in the interrupt destination when using lowest
- * priority interrupt delivery mode.
- *
- * In particular there was a hyperthreading cpu observed to
- * deliver interrupts to the wrong hyperthread when only one
- * hyperthread was specified in the interrupt desitination.
- */
- *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
-}
-
-struct genapic apic_summit = APIC_INIT("summit", probe_summit);
diff --git a/arch/x86/mach-rdc321x/Makefile b/arch/x86/mach-rdc321x/Makefile
deleted file mode 100644
index 8325b4ca431..00000000000
--- a/arch/x86/mach-rdc321x/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the RDC321x specific parts of the kernel
-#
-obj-$(CONFIG_X86_RDC321X) := gpio.o platform.o
-
diff --git a/arch/x86/mach-rdc321x/gpio.c b/arch/x86/mach-rdc321x/gpio.c
deleted file mode 100644
index 247f33d3a40..00000000000
--- a/arch/x86/mach-rdc321x/gpio.c
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * GPIO support for RDC SoC R3210/R8610
- *
- * Copyright (C) 2007, Florian Fainelli <florian@openwrt.org>
- * Copyright (C) 2008, Volker Weiss <dev@tintuc.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <linux/types.h>
-#include <linux/module.h>
-
-#include <asm/gpio.h>
-#include <asm/mach-rdc321x/rdc321x_defs.h>
-
-
-/* spin lock to protect our private copy of GPIO data register plus
- the access to PCI conf registers. */
-static DEFINE_SPINLOCK(gpio_lock);
-
-/* copy of GPIO data registers */
-static u32 gpio_data_reg1;
-static u32 gpio_data_reg2;
-
-static u32 gpio_request_data[2];
-
-
-static inline void rdc321x_conf_write(unsigned addr, u32 value)
-{
- outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
- outl(value, RDC3210_CFGREG_DATA);
-}
-
-static inline void rdc321x_conf_or(unsigned addr, u32 value)
-{
- outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
- value |= inl(RDC3210_CFGREG_DATA);
- outl(value, RDC3210_CFGREG_DATA);
-}
-
-static inline u32 rdc321x_conf_read(unsigned addr)
-{
- outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
-
- return inl(RDC3210_CFGREG_DATA);
-}
-
-/* configure pin as GPIO */
-static void rdc321x_configure_gpio(unsigned gpio)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
- rdc321x_conf_or(gpio < 32
- ? RDC321X_GPIO_CTRL_REG1 : RDC321X_GPIO_CTRL_REG2,
- 1 << (gpio & 0x1f));
- spin_unlock_irqrestore(&gpio_lock, flags);
-}
-
-/* initially setup the 2 copies of the gpio data registers.
- This function must be called by the platform setup code. */
-void __init rdc321x_gpio_setup()
-{
- /* this might not be, what others (BIOS, bootloader, etc.)
- wrote to these registers before, but it's a good guess. Still
- better than just using 0xffffffff. */
-
- gpio_data_reg1 = rdc321x_conf_read(RDC321X_GPIO_DATA_REG1);
- gpio_data_reg2 = rdc321x_conf_read(RDC321X_GPIO_DATA_REG2);
-}
-
-/* determine, if gpio number is valid */
-static inline int rdc321x_is_gpio(unsigned gpio)
-{
- return gpio <= RDC321X_MAX_GPIO;
-}
-
-/* request GPIO */
-int rdc_gpio_request(unsigned gpio, const char *label)
-{
- unsigned long flags;
-
- if (!rdc321x_is_gpio(gpio))
- return -EINVAL;
-
- spin_lock_irqsave(&gpio_lock, flags);
- if (gpio_request_data[(gpio & 0x20) ? 1 : 0] & (1 << (gpio & 0x1f)))
- goto inuse;
- gpio_request_data[(gpio & 0x20) ? 1 : 0] |= (1 << (gpio & 0x1f));
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- return 0;
-inuse:
- spin_unlock_irqrestore(&gpio_lock, flags);
- return -EINVAL;
-}
-EXPORT_SYMBOL(rdc_gpio_request);
-
-/* release previously-claimed GPIO */
-void rdc_gpio_free(unsigned gpio)
-{
- unsigned long flags;
-
- if (!rdc321x_is_gpio(gpio))
- return;
-
- spin_lock_irqsave(&gpio_lock, flags);
- gpio_request_data[(gpio & 0x20) ? 1 : 0] &= ~(1 << (gpio & 0x1f));
- spin_unlock_irqrestore(&gpio_lock, flags);
-}
-EXPORT_SYMBOL(rdc_gpio_free);
-
-/* read GPIO pin */
-int rdc_gpio_get_value(unsigned gpio)
-{
- u32 reg;
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
- reg = rdc321x_conf_read(gpio < 32
- ? RDC321X_GPIO_DATA_REG1 : RDC321X_GPIO_DATA_REG2);
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- return (1 << (gpio & 0x1f)) & reg ? 1 : 0;
-}
-EXPORT_SYMBOL(rdc_gpio_get_value);
-
-/* set GPIO pin to value */
-void rdc_gpio_set_value(unsigned gpio, int value)
-{
- unsigned long flags;
- u32 reg;
-
- reg = 1 << (gpio & 0x1f);
- if (gpio < 32) {
- spin_lock_irqsave(&gpio_lock, flags);
- if (value)
- gpio_data_reg1 |= reg;
- else
- gpio_data_reg1 &= ~reg;
- rdc321x_conf_write(RDC321X_GPIO_DATA_REG1, gpio_data_reg1);
- spin_unlock_irqrestore(&gpio_lock, flags);
- } else {
- spin_lock_irqsave(&gpio_lock, flags);
- if (value)
- gpio_data_reg2 |= reg;
- else
- gpio_data_reg2 &= ~reg;
- rdc321x_conf_write(RDC321X_GPIO_DATA_REG2, gpio_data_reg2);
- spin_unlock_irqrestore(&gpio_lock, flags);
- }
-}
-EXPORT_SYMBOL(rdc_gpio_set_value);
-
-/* configure GPIO pin as input */
-int rdc_gpio_direction_input(unsigned gpio)
-{
- if (!rdc321x_is_gpio(gpio))
- return -EINVAL;
-
- rdc321x_configure_gpio(gpio);
-
- return 0;
-}
-EXPORT_SYMBOL(rdc_gpio_direction_input);
-
-/* configure GPIO pin as output and set value */
-int rdc_gpio_direction_output(unsigned gpio, int value)
-{
- if (!rdc321x_is_gpio(gpio))
- return -EINVAL;
-
- gpio_set_value(gpio, value);
- rdc321x_configure_gpio(gpio);
-
- return 0;
-}
-EXPORT_SYMBOL(rdc_gpio_direction_output);
diff --git a/arch/x86/mach-rdc321x/platform.c b/arch/x86/mach-rdc321x/platform.c
deleted file mode 100644
index 4f4e50c3ad3..00000000000
--- a/arch/x86/mach-rdc321x/platform.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Generic RDC321x platform devices
- *
- * Copyright (C) 2007 Florian Fainelli <florian@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- * Boston, MA 02110-1301, USA.
- *
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-
-#include <asm/gpio.h>
-
-/* LEDS */
-static struct gpio_led default_leds[] = {
- { .name = "rdc:dmz", .gpio = 1, },
-};
-
-static struct gpio_led_platform_data rdc321x_led_data = {
- .num_leds = ARRAY_SIZE(default_leds),
- .leds = default_leds,
-};
-
-static struct platform_device rdc321x_leds = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &rdc321x_led_data,
- }
-};
-
-/* Watchdog */
-static struct platform_device rdc321x_wdt = {
- .name = "rdc321x-wdt",
- .id = -1,
- .num_resources = 0,
-};
-
-static struct platform_device *rdc321x_devs[] = {
- &rdc321x_leds,
- &rdc321x_wdt
-};
-
-static int __init rdc_board_setup(void)
-{
- rdc321x_gpio_setup();
-
- return platform_add_devices(rdc321x_devs, ARRAY_SIZE(rdc321x_devs));
-}
-
-arch_initcall(rdc_board_setup);
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 9f05157220f..2b938a38491 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,7 +1,7 @@
obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
pat.o pgtable.o gup.o
-obj-$(CONFIG_X86_SMP) += tlb.o
+obj-$(CONFIG_SMP) += tlb.o
obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 8c3f3113a6e..2a9ea3aee49 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -420,7 +420,6 @@ static noinline void pgtable_bad(struct pt_regs *regs,
printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
tsk->comm, address);
dump_pagetable(address);
- tsk = current;
tsk->thread.cr2 = address;
tsk->thread.trap_no = 14;
tsk->thread.error_code = error_code;
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index af750ab973b..1448bcb7f22 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -367,7 +367,7 @@ EXPORT_SYMBOL(ioremap_nocache);
*
* Must be freed with iounmap.
*/
-void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
{
if (pat_enabled)
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 56fe7124fbe..16582960056 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -4,7 +4,7 @@
* Based on code by Ingo Molnar and Andi Kleen, copyrighted
* as follows:
*
- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
+ * Copyright 2003-2009 Red Hat Inc.
* All Rights Reserved.
* Copyright 2005 Andi Kleen, SUSE Labs.
* Copyright 2007 Jiri Kosina, SUSE Labs.
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 7b61036427d..9127e31c726 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -30,7 +30,7 @@
#ifdef CONFIG_X86_PAT
int __read_mostly pat_enabled = 1;
-void __cpuinit pat_disable(char *reason)
+void __cpuinit pat_disable(const char *reason)
{
pat_enabled = 0;
printk(KERN_INFO "%s\n", reason);
@@ -42,6 +42,11 @@ static int __init nopat(char *str)
return 0;
}
early_param("nopat", nopat);
+#else
+static inline void pat_disable(const char *reason)
+{
+ (void)reason;
+}
#endif
@@ -78,16 +83,20 @@ void pat_init(void)
if (!pat_enabled)
return;
- /* Paranoia check. */
- if (!cpu_has_pat && boot_pat_state) {
- /*
- * If this happens we are on a secondary CPU, but
- * switched to PAT on the boot CPU. We have no way to
- * undo PAT.
- */
- printk(KERN_ERR "PAT enabled, "
- "but not supported by secondary CPU\n");
- BUG();
+ if (!cpu_has_pat) {
+ if (!boot_pat_state) {
+ pat_disable("PAT not supported by CPU.");
+ return;
+ } else {
+ /*
+ * If this happens we are on a secondary CPU, but
+ * switched to PAT on the boot CPU. We have no way to
+ * undo PAT.
+ */
+ printk(KERN_ERR "PAT enabled, "
+ "but not supported by secondary CPU\n");
+ BUG();
+ }
}
/* Set PWT to Write-Combining. All other bits stay the same */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 72a6d4ebe34..14c5af4d11e 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -14,7 +14,7 @@
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
= { &init_mm, 0, };
-#include <mach_ipi.h>
+#include <asm/genapic.h>
/*
* Smarter SMP flushing macros.
* c/o Linus Torvalds.
@@ -196,7 +196,7 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
* We have to send the IPI only to
* CPUs affected.
*/
- send_IPI_mask(to_cpumask(f->flush_cpumask),
+ apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
INVALIDATE_TLB_VECTOR_START + sender);
while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
index 2089354968a..5601e829c38 100644
--- a/arch/x86/pci/numaq_32.c
+++ b/arch/x86/pci/numaq_32.c
@@ -5,7 +5,7 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/nodemask.h>
-#include <mach_apic.h>
+#include <asm/genapic.h>
#include <asm/mpspec.h>
#include <asm/pci_x86.h>
@@ -18,10 +18,6 @@
#define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local])
-/* Where the IO area was mapped on multiquad, always 0 otherwise */
-void *xquad_portio;
-EXPORT_SYMBOL(xquad_portio);
-
#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
#define PCI_CONF1_MQ_ADDRESS(bus, devfn, reg) \
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index b82cae970df..1c975cc9839 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -7,7 +7,7 @@
#include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/pci_x86.h>
-#include <asm/mach-default/pci-functions.h>
+#include <asm/pci-functions.h>
/* BIOS32 signature: "_32_" */
#define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24))
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 5a070900ad3..cfd17799bd6 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -19,21 +19,6 @@ void xen_force_evtchn_callback(void)
(void)HYPERVISOR_xen_version(0, NULL);
}
-static void __init __xen_init_IRQ(void)
-{
- int i;
-
- /* Create identity vector->irq map */
- for(i = 0; i < NR_VECTORS; i++) {
- int cpu;
-
- for_each_possible_cpu(cpu)
- per_cpu(vector_irq, cpu)[i] = i;
- }
-
- xen_init_IRQ();
-}
-
static unsigned long xen_save_fl(void)
{
struct vcpu_info *vcpu;
@@ -127,7 +112,7 @@ static void xen_halt(void)
}
static const struct pv_irq_ops xen_irq_ops __initdata = {
- .init_IRQ = __xen_init_IRQ,
+ .init_IRQ = xen_init_IRQ,
.save_fl = PV_CALLEE_SAVE(xen_save_fl),
.restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 7c41e7405c4..56c62e2858d 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -149,6 +149,9 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
if (q == alg)
goto err;
+ if (crypto_is_moribund(q))
+ continue;
+
if (crypto_is_larval(q)) {
if (!strcmp(alg->cra_driver_name, q->cra_driver_name))
goto err;
@@ -197,7 +200,7 @@ void crypto_alg_tested(const char *name, int err)
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (!crypto_is_larval(q))
+ if (crypto_is_moribund(q) || !crypto_is_larval(q))
continue;
test = (struct crypto_larval *)q;
@@ -210,6 +213,7 @@ void crypto_alg_tested(const char *name, int err)
goto unlock;
found:
+ q->cra_flags |= CRYPTO_ALG_DEAD;
alg = test->adult;
if (err || list_empty(&alg->cra_list))
goto complete;
diff --git a/crypto/api.c b/crypto/api.c
index 9975a7bd246..efe77df6863 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -557,34 +557,34 @@ err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
-
+
/*
- * crypto_free_tfm - Free crypto transform
+ * crypto_destroy_tfm - Free crypto transform
+ * @mem: Start of tfm slab
* @tfm: Transform to free
*
- * crypto_free_tfm() frees up the transform and any associated resources,
+ * This function frees up the transform and any associated resources,
* then drops the refcount on the associated algorithm.
*/
-void crypto_free_tfm(struct crypto_tfm *tfm)
+void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
{
struct crypto_alg *alg;
int size;
- if (unlikely(!tfm))
+ if (unlikely(!mem))
return;
alg = tfm->__crt_alg;
- size = sizeof(*tfm) + alg->cra_ctxsize;
+ size = ksize(mem);
if (!tfm->exit && alg->cra_exit)
alg->cra_exit(tfm);
crypto_exit_ops(tfm);
crypto_mod_put(alg);
- memset(tfm, 0, size);
- kfree(tfm);
+ memset(mem, 0, size);
+ kfree(mem);
}
-
-EXPORT_SYMBOL_GPL(crypto_free_tfm);
+EXPORT_SYMBOL_GPL(crypto_destroy_tfm);
int crypto_has_alg(const char *name, u32 type, u32 mask)
{
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 9aeeb52004a..3de89a42440 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -54,7 +54,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
struct page *page;
page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
- flush_dcache_page(page);
+ if (!PageSlab(page))
+ flush_dcache_page(page);
}
if (more) {
diff --git a/crypto/shash.c b/crypto/shash.c
index c9df367332f..d5a2b619c55 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -388,10 +388,15 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
struct shash_desc *desc = crypto_tfm_ctx(tfm);
struct crypto_shash *shash;
+ if (!crypto_mod_get(calg))
+ return -EAGAIN;
+
shash = __crypto_shash_cast(crypto_create_tfm(
calg, &crypto_shash_type));
- if (IS_ERR(shash))
+ if (IS_ERR(shash)) {
+ crypto_mod_put(calg);
return PTR_ERR(shash);
+ }
desc->tfm = shash;
tfm->exit = crypto_exit_shash_ops_compat;
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 72fc0f799a6..89d7a6e94c9 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -685,6 +685,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
out_release_regions:
pci_release_regions(dev);
out:
+ kfree(card);
return err;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 34f80fa6fed..8299e2d3b61 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -549,6 +549,15 @@ static void do_nbd_request(struct request_queue * q)
BUG_ON(lo->magic != LO_MAGIC);
+ if (unlikely(!lo->sock)) {
+ printk(KERN_ERR "%s: Attempted send on closed socket\n",
+ lo->disk->disk_name);
+ req->errors++;
+ nbd_end_request(req);
+ spin_lock_irq(q->queue_lock);
+ continue;
+ }
+
spin_lock_irq(&lo->queue_lock);
list_add_tail(&req->queuelist, &lo->waiting_queue);
spin_unlock_irq(&lo->queue_lock);
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 726ee8a0277..ecba4942fc8 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -4,7 +4,7 @@
* SLD 9630 TT 1.1 and SLB 9635 TT 1.2 Trusted Platform Module
* Specifications at www.trustedcomputinggroup.org
*
- * Copyright (C) 2005, Marcel Selhorst <selhorst@crypto.rub.de>
+ * Copyright (C) 2005, Marcel Selhorst <m.selhorst@sirrix.com>
* Sirrix AG - security technologies, http://www.sirrix.com and
* Applied Data Security Group, Ruhr-University Bochum, Germany
* Project-Homepage: http://www.prosec.rub.de/tpm
@@ -636,7 +636,7 @@ static void __exit cleanup_inf(void)
module_init(init_inf);
module_exit(cleanup_inf);
-MODULE_AUTHOR("Marcel Selhorst <selhorst@crypto.rub.de>");
+MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
MODULE_VERSION("1.9");
MODULE_LICENSE("GPL");
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index e1129fad96d..ee19b6e8fcb 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -143,7 +143,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE,
#endif
#ifndef CONFIG_X86_64
-#include "mach_timer.h"
+#include <asm/mach_timer.h>
#define PMTMR_EXPECTED_RATE \
((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10))
/*
diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c
index 1bde303b970..8615059a872 100644
--- a/drivers/clocksource/cyclone.c
+++ b/drivers/clocksource/cyclone.c
@@ -7,7 +7,7 @@
#include <asm/pgtable.h>
#include <asm/io.h>
-#include "mach_timer.h"
+#include <asm/mach_timer.h>
#define CYCLONE_CBAR_ADDR 0xFEB00CD0 /* base address ptr */
#define CYCLONE_PMCC_OFFSET 0x51A0 /* offset to control register */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 6a2b036c938..6f45b1658a6 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -117,11 +117,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
-
- if (!dbs_tuners_ins.ignore_nice) {
- busy_time = cputime64_add(busy_time,
- kstat_cpu(cpu).cpustat.nice);
- }
+ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
idle_time = cputime64_sub(cur_wall_time, busy_time);
if (wall)
@@ -137,23 +133,6 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
if (idle_time == -1ULL)
return get_cpu_idle_time_jiffy(cpu, wall);
- if (dbs_tuners_ins.ignore_nice) {
- cputime64_t cur_nice;
- unsigned long cur_nice_jiffies;
- struct cpu_dbs_info_s *dbs_info;
-
- dbs_info = &per_cpu(cpu_dbs_info, cpu);
- cur_nice = cputime64_sub(kstat_cpu(cpu).cpustat.nice,
- dbs_info->prev_cpu_nice);
- /*
- * Assumption: nice time between sampling periods will be
- * less than 2^32 jiffies for 32 bit sys
- */
- cur_nice_jiffies = (unsigned long)
- cputime64_to_jiffies64(cur_nice);
- dbs_info->prev_cpu_nice = kstat_cpu(cpu).cpustat.nice;
- return idle_time + jiffies_to_usecs(cur_nice_jiffies);
- }
return idle_time;
}
@@ -319,6 +298,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
dbs_info = &per_cpu(cpu_dbs_info, j);
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall);
+ if (dbs_tuners_ins.ignore_nice)
+ dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+
}
mutex_unlock(&dbs_mutex);
@@ -419,6 +401,23 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time;
+ if (dbs_tuners_ins.ignore_nice) {
+ cputime64_t cur_nice;
+ unsigned long cur_nice_jiffies;
+
+ cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
+ j_dbs_info->prev_cpu_nice);
+ /*
+ * Assumption: nice time between sampling periods will
+ * be less than 2^32 jiffies for 32 bit sys
+ */
+ cur_nice_jiffies = (unsigned long)
+ cputime64_to_jiffies64(cur_nice);
+
+ j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+ idle_time += jiffies_to_usecs(cur_nice_jiffies);
+ }
+
if (unlikely(!wall_time || wall_time < idle_time))
continue;
@@ -575,6 +574,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&j_dbs_info->prev_cpu_wall);
+ if (dbs_tuners_ins.ignore_nice) {
+ j_dbs_info->prev_cpu_nice =
+ kstat_cpu(j).cpustat.nice;
+ }
}
this_dbs_info->cpu = cpu;
/*
diff --git a/drivers/eisa/Kconfig b/drivers/eisa/Kconfig
index c0646576cf4..2705284f622 100644
--- a/drivers/eisa/Kconfig
+++ b/drivers/eisa/Kconfig
@@ -3,7 +3,7 @@
#
config EISA_VLB_PRIMING
bool "Vesa Local Bus priming"
- depends on X86_PC && EISA
+ depends on X86 && EISA
default n
---help---
Activate this option if your system contains a Vesa Local
@@ -24,11 +24,11 @@ config EISA_PCI_EISA
When in doubt, say Y.
# Using EISA_VIRTUAL_ROOT on something other than an Alpha or
-# an X86_PC may lead to crashes...
+# an X86 may lead to crashes...
config EISA_VIRTUAL_ROOT
bool "EISA virtual root device"
- depends on EISA && (ALPHA || X86_PC)
+ depends on EISA && (ALPHA || X86)
default y
---help---
Activate this option if your system only have EISA bus
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 5130b72d593..4be3acbaaf9 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -70,7 +70,7 @@ config DRM_I915
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
- depends on FB
+ select FB
tristate "i915 driver"
help
Choose this option if you have a system that has Intel 830M, 845G,
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 69aa0ab2840..3795dbc0f50 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -276,6 +276,7 @@ int drm_irq_uninstall(struct drm_device * dev)
for (i = 0; i < dev->num_crtcs; i++) {
DRM_WAKEUP(&dev->vbl_queue[i]);
dev->vblank_enabled[i] = 0;
+ dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 803bc9e7ce3..bcc869bc409 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -171,9 +171,14 @@ EXPORT_SYMBOL(drm_core_ioremap);
void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev)
{
- map->handle = ioremap_wc(map->offset, map->size);
+ if (drm_core_has_AGP(dev) &&
+ dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+ map->handle = agp_remap(map->offset, map->size, dev);
+ else
+ map->handle = ioremap_wc(map->offset, map->size);
}
EXPORT_SYMBOL(drm_core_ioremap_wc);
+
void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
{
if (!map->handle || !map->size)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ee64b7301f6..81f1cff56fd 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -731,8 +731,11 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_GEM:
value = dev_priv->has_gem;
break;
+ case I915_PARAM_NUM_FENCES_AVAIL:
+ value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
+ break;
default:
- DRM_ERROR("Unknown parameter %d\n", param->param);
+ DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
}
@@ -764,8 +767,15 @@ static int i915_setparam(struct drm_device *dev, void *data,
case I915_SETPARAM_ALLOW_BATCHBUFFER:
dev_priv->allow_batchbuffer = param->value;
break;
+ case I915_SETPARAM_NUM_USED_FENCES:
+ if (param->value > dev_priv->num_fence_regs ||
+ param->value < 0)
+ return -EINVAL;
+ /* Userspace can use first N regs */
+ dev_priv->fence_reg_start = param->value;
+ break;
default:
- DRM_ERROR("unknown parameter %d\n", param->param);
+ DRM_DEBUG("unknown parameter %d\n", param->param);
return -EINVAL;
}
@@ -966,10 +976,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto kfree_devname;
- dev_priv->mm.gtt_mapping =
- io_mapping_create_wc(dev->agp->base,
- dev->agp->agp_info.aper_size * 1024*1024);
-
/* Allow hardware batchbuffers unless told otherwise.
*/
dev_priv->allow_batchbuffer = 1;
@@ -1081,6 +1087,23 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
+ dev_priv->mm.gtt_mapping =
+ io_mapping_create_wc(dev->agp->base,
+ dev->agp->agp_info.aper_size * 1024*1024);
+ /* Set up a WC MTRR for non-PAT systems. This is more common than
+ * one would think, because the kernel disables PAT on first
+ * generation Core chips because WC PAT gets overridden by a UC
+ * MTRR if present. Even if a UC MTRR isn't present.
+ */
+ dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
+ dev->agp->agp_info.aper_size *
+ 1024 * 1024,
+ MTRR_TYPE_WRCOMB, 1);
+ if (dev_priv->mm.gtt_mtrr < 0) {
+ DRM_INFO("MTRR allocation failed\n. Graphics "
+ "performance may suffer.\n");
+ }
+
#ifdef CONFIG_HIGHMEM64G
/* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
dev_priv->has_gem = 0;
@@ -1089,6 +1112,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->has_gem = 1;
#endif
+ dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ if (IS_GM45(dev))
+ dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+
i915_gem_load(dev);
/* Init HWS */
@@ -1145,8 +1172,14 @@ int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ io_mapping_free(dev_priv->mm.gtt_mapping);
+ if (dev_priv->mm.gtt_mtrr >= 0) {
+ mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
+ dev->agp->agp_info.aper_size * 1024 * 1024);
+ dev_priv->mm.gtt_mtrr = -1;
+ }
+
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- io_mapping_free(dev_priv->mm.gtt_mapping);
drm_irq_uninstall(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f8b3df0926c..aac12ee31a4 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -112,7 +112,6 @@ static struct drm_driver driver = {
.suspend = i915_suspend,
.resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
- .get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i915_enable_vblank,
.disable_vblank = i915_disable_vblank,
.irq_preinstall = i915_driver_irq_preinstall,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e1351825200..7325363164f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -284,6 +284,7 @@ typedef struct drm_i915_private {
struct drm_mm gtt_space;
struct io_mapping *gtt_mapping;
+ int gtt_mtrr;
/**
* List of objects currently involved in rendering from the
@@ -534,6 +535,7 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
extern int i915_enable_vblank(struct drm_device *dev, int crtc);
extern void i915_disable_vblank(struct drm_device *dev, int crtc);
extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
+extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
@@ -601,6 +603,7 @@ int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
void i915_gem_object_unpin(struct drm_gem_object *obj);
+int i915_gem_object_unbind(struct drm_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
uint32_t i915_get_gem_seqno(struct drm_device *dev);
void i915_gem_retire_requests(struct drm_device *dev);
@@ -784,6 +787,11 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
+/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
+ IS_I915GM(dev)))
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index debad5c04cc..81857665409 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -52,7 +52,7 @@ static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
-static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
+static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
static int i915_gem_evict_something(struct drm_device *dev);
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
@@ -567,6 +567,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
pgoff_t page_offset;
unsigned long pfn;
int ret = 0;
+ bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
@@ -585,8 +586,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Need a new fence register? */
if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
- obj_priv->tiling_mode != I915_TILING_NONE)
- i915_gem_object_get_fence_reg(obj);
+ obj_priv->tiling_mode != I915_TILING_NONE) {
+ ret = i915_gem_object_get_fence_reg(obj, write);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return VM_FAULT_SIGBUS;
+ }
+ }
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
page_offset;
@@ -1211,7 +1217,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
/**
* Unbinds an object from the GTT aperture.
*/
-static int
+int
i915_gem_object_unbind(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
@@ -1445,21 +1451,26 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int regnum = obj_priv->fence_reg;
+ int tile_width;
uint32_t val;
uint32_t pitch_val;
if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
(obj_priv->gtt_offset & (obj->size - 1))) {
- WARN(1, "%s: object not 1M or size aligned\n", __func__);
+ WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
+ __func__, obj_priv->gtt_offset, obj->size);
return;
}
- if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) ||
- IS_I945GM(dev) ||
- IS_G33(dev)))
- pitch_val = (obj_priv->stride / 128) - 1;
+ if (obj_priv->tiling_mode == I915_TILING_Y &&
+ HAS_128_BYTE_Y_TILING(dev))
+ tile_width = 128;
else
- pitch_val = (obj_priv->stride / 512) - 1;
+ tile_width = 512;
+
+ /* Note: pitch better be a power of two tile widths */
+ pitch_val = obj_priv->stride / tile_width;
+ pitch_val = ffs(pitch_val) - 1;
val = obj_priv->gtt_offset;
if (obj_priv->tiling_mode == I915_TILING_Y)
@@ -1483,7 +1494,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
(obj_priv->gtt_offset & (obj->size - 1))) {
- WARN(1, "%s: object not 1M or size aligned\n", __func__);
+ WARN(1, "%s: object 0x%08x not 1M or size aligned\n",
+ __func__, obj_priv->gtt_offset);
return;
}
@@ -1503,6 +1515,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
/**
* i915_gem_object_get_fence_reg - set up a fence reg for an object
* @obj: object to map through a fence reg
+ * @write: object is about to be written
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
@@ -1513,8 +1526,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
* It then sets up the reg based on the object's properties: address, pitch
* and tiling format.
*/
-static void
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
+static int
+i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
{
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1527,12 +1540,18 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
WARN(1, "allocating a fence for non-tiled object?\n");
break;
case I915_TILING_X:
- WARN(obj_priv->stride & (512 - 1),
- "object is X tiled but has non-512B pitch\n");
+ if (!obj_priv->stride)
+ return -EINVAL;
+ WARN((obj_priv->stride & (512 - 1)),
+ "object 0x%08x is X tiled but has non-512B pitch\n",
+ obj_priv->gtt_offset);
break;
case I915_TILING_Y:
- WARN(obj_priv->stride & (128 - 1),
- "object is Y tiled but has non-128B pitch\n");
+ if (!obj_priv->stride)
+ return -EINVAL;
+ WARN((obj_priv->stride & (128 - 1)),
+ "object 0x%08x is Y tiled but has non-128B pitch\n",
+ obj_priv->gtt_offset);
break;
}
@@ -1563,10 +1582,11 @@ try_again:
* objects to finish before trying again.
*/
if (i == dev_priv->num_fence_regs) {
- ret = i915_gem_object_wait_rendering(reg->obj);
+ ret = i915_gem_object_set_to_gtt_domain(reg->obj, 0);
if (ret) {
- WARN(ret, "wait_rendering failed: %d\n", ret);
- return;
+ WARN(ret != -ERESTARTSYS,
+ "switch to GTT domain failed: %d\n", ret);
+ return ret;
}
goto try_again;
}
@@ -1591,6 +1611,8 @@ try_again:
i915_write_fence_reg(reg);
else
i830_write_fence_reg(reg);
+
+ return 0;
}
/**
@@ -1631,7 +1653,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
if (dev_priv->mm.suspended)
return -EBUSY;
if (alignment == 0)
- alignment = PAGE_SIZE;
+ alignment = i915_gem_get_gtt_alignment(obj);
if (alignment & (PAGE_SIZE - 1)) {
DRM_ERROR("Invalid object alignment requested %u\n", alignment);
return -EINVAL;
@@ -2652,6 +2674,14 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
DRM_ERROR("Failure to bind: %d", ret);
return ret;
}
+ /*
+ * Pre-965 chips need a fence register set up in order to
+ * properly handle tiled surfaces.
+ */
+ if (!IS_I965G(dev) &&
+ obj_priv->fence_reg == I915_FENCE_REG_NONE &&
+ obj_priv->tiling_mode != I915_TILING_NONE)
+ i915_gem_object_get_fence_reg(obj, true);
}
obj_priv->pin_count++;
@@ -3229,10 +3259,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
dev_priv->mm.wedged = 0;
}
- dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base,
- dev->agp->agp_info.aper_size
- * 1024 * 1024);
-
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
@@ -3255,7 +3281,6 @@ int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -3264,7 +3289,6 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_idle(dev);
drm_irq_uninstall(dev);
- io_mapping_free(dev_priv->mm.gtt_mapping);
return ret;
}
@@ -3273,6 +3297,9 @@ i915_gem_lastclose(struct drm_device *dev)
{
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
ret = i915_gem_idle(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
@@ -3294,7 +3321,7 @@ i915_gem_load(struct drm_device *dev)
/* Old X drivers will take 0-2 for front, back, depth buffers */
dev_priv->fence_reg_start = 3;
- if (IS_I965G(dev))
+ if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dev_priv->num_fence_regs = 16;
else
dev_priv->num_fence_regs = 8;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 241f39b7f46..fa1685cba84 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -173,6 +173,73 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
+
+/**
+ * Returns the size of the fence for a tiled object of the given size.
+ */
+static int
+i915_get_fence_size(struct drm_device *dev, int size)
+{
+ int i;
+ int start;
+
+ if (IS_I965G(dev)) {
+ /* The 965 can have fences at any page boundary. */
+ return ALIGN(size, 4096);
+ } else {
+ /* Align the size to a power of two greater than the smallest
+ * fence size.
+ */
+ if (IS_I9XX(dev))
+ start = 1024 * 1024;
+ else
+ start = 512 * 1024;
+
+ for (i = start; i < size; i <<= 1)
+ ;
+
+ return i;
+ }
+}
+
+/* Check pitch constriants for all chips & tiling formats */
+static bool
+i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+{
+ int tile_width;
+
+ /* Linear is always fine */
+ if (tiling_mode == I915_TILING_NONE)
+ return true;
+
+ if (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ /* 965+ just needs multiples of tile width */
+ if (IS_I965G(dev)) {
+ if (stride & (tile_width - 1))
+ return false;
+ return true;
+ }
+
+ /* Pre-965 needs power of two tile widths */
+ if (stride < tile_width)
+ return false;
+
+ if (stride & (stride - 1))
+ return false;
+
+ /* We don't handle the aperture area covered by the fence being bigger
+ * than the object size.
+ */
+ if (i915_get_fence_size(dev, size) != size)
+ return false;
+
+ return true;
+}
+
/**
* Sets the tiling mode of an object, returning the required swizzling of
* bit 6 of addresses in the object.
@@ -191,6 +258,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL;
obj_priv = obj->driver_private;
+ if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
+ drm_gem_object_unreference(obj);
+ return -EINVAL;
+ }
+
mutex_lock(&dev->struct_mutex);
if (args->tiling_mode == I915_TILING_NONE) {
@@ -207,7 +279,24 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
}
}
- obj_priv->tiling_mode = args->tiling_mode;
+ if (args->tiling_mode != obj_priv->tiling_mode) {
+ int ret;
+
+ /* Unbind the object, as switching tiling means we're
+ * switching the cache organization due to fencing, probably.
+ */
+ ret = i915_gem_object_unbind(obj);
+ if (ret != 0) {
+ WARN(ret != -ERESTARTSYS,
+ "failed to unbind object for tiling switch");
+ args->tiling_mode = obj_priv->tiling_mode;
+ mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+
+ return ret;
+ }
+ obj_priv->tiling_mode = args->tiling_mode;
+ }
obj_priv->stride = args->stride;
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6290219de6c..548ff2c6643 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -174,6 +174,19 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
return count;
}
+u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
+
+ if (!i915_pipe_enabled(dev, pipe)) {
+ DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
+ return 0;
+ }
+
+ return I915_READ(reg);
+}
+
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 273162579e1..9d6539a868b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -186,12 +186,12 @@
#define FENCE_REG_830_0 0x2000
#define I830_FENCE_START_MASK 0x07f80000
#define I830_FENCE_TILING_Y_SHIFT 12
-#define I830_FENCE_SIZE_BITS(size) ((get_order(size >> 19) - 1) << 8)
+#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
#define I830_FENCE_PITCH_SHIFT 4
#define I830_FENCE_REG_VALID (1<<0)
#define I915_FENCE_START_MASK 0x0ff00000
-#define I915_FENCE_SIZE_BITS(size) ((get_order(size >> 20) - 1) << 8)
+#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8)
#define FENCE_REG_965_0 0x03000
#define I965_FENCE_PITCH_SHIFT 2
@@ -1371,6 +1371,9 @@
#define PIPE_FRAME_LOW_SHIFT 24
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
+/* GM45+ just has to be different */
+#define PIPEA_FRMCOUNT_GM45 0x70040
+#define PIPEA_FLIPCOUNT_GM45 0x70044
/* Cursor A & B regs */
#define CURACNTR 0x70080
@@ -1439,6 +1442,9 @@
#define PIPEBSTAT 0x71024
#define PIPEBFRAMEHIGH 0x71040
#define PIPEBFRAMEPIXEL 0x71044
+#define PIPEB_FRMCOUNT_GM45 0x71040
+#define PIPEB_FLIPCOUNT_GM45 0x71044
+
/* Display B control */
#define DSPBCNTR 0x71180
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 31c3732b7a6..bbdd72909a1 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -755,6 +755,8 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
+ if (intel_output->needs_tv_clock)
+ is_tv = true;
break;
case INTEL_OUTPUT_DVO:
is_dvo = true;
@@ -1452,6 +1454,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
static void intel_setup_outputs(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
intel_crt_init(dev);
@@ -1463,13 +1466,16 @@ static void intel_setup_outputs(struct drm_device *dev)
if (IS_I9XX(dev)) {
int found;
- found = intel_sdvo_init(dev, SDVOB);
- if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
- intel_hdmi_init(dev, SDVOB);
-
- found = intel_sdvo_init(dev, SDVOC);
- if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
- intel_hdmi_init(dev, SDVOC);
+ if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ found = intel_sdvo_init(dev, SDVOB);
+ if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
+ intel_hdmi_init(dev, SDVOB);
+ }
+ if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) {
+ found = intel_sdvo_init(dev, SDVOC);
+ if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
+ intel_hdmi_init(dev, SDVOC);
+ }
} else
intel_dvo_init(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8a4cc50c5b4..957daef8edf 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -82,6 +82,7 @@ struct intel_output {
struct intel_i2c_chan *i2c_bus; /* for control functions */
struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
bool load_detect_temp;
+ bool needs_tv_clock;
void *dev_priv;
};
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b36a5214d8d..6d4f9126535 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -27,6 +27,7 @@
* Jesse Barnes <jesse.barnes@intel.com>
*/
+#include <linux/dmi.h>
#include <linux/i2c.h>
#include "drmP.h"
#include "drm.h"
@@ -311,10 +312,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
if (dev_priv->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
- mutex_lock(&dev->mode_config.mutex);
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
drm_mode_probed_add(connector, mode);
- mutex_unlock(&dev->mode_config.mutex);
return 1;
}
@@ -405,6 +404,16 @@ void intel_lvds_init(struct drm_device *dev)
u32 lvds;
int pipe;
+ /* Blacklist machines that we know falsely report LVDS. */
+ /* FIXME: add a check for the Aopen Mini PC */
+
+ /* Apple Mac Mini Core Duo and Mac Mini Core 2 Duo */
+ if(dmi_match(DMI_PRODUCT_NAME, "Macmini1,1") ||
+ dmi_match(DMI_PRODUCT_NAME, "Macmini2,1")) {
+ DRM_DEBUG("Skipping LVDS initialization for Apple Mac Mini\n");
+ return;
+ }
+
intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
if (!intel_output) {
return;
@@ -458,7 +467,7 @@ void intel_lvds_init(struct drm_device *dev)
dev_priv->panel_fixed_mode =
drm_mode_duplicate(dev, scan);
mutex_unlock(&dev->mode_config.mutex);
- goto out; /* FIXME: check for quirks */
+ goto out;
}
mutex_unlock(&dev->mode_config.mutex);
}
@@ -492,7 +501,7 @@ void intel_lvds_init(struct drm_device *dev)
if (dev_priv->panel_fixed_mode) {
dev_priv->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
- goto out; /* FIXME: check for quirks */
+ goto out;
}
}
@@ -500,38 +509,6 @@ void intel_lvds_init(struct drm_device *dev)
if (!dev_priv->panel_fixed_mode)
goto failed;
- /* FIXME: detect aopen & mac mini type stuff automatically? */
- /*
- * Blacklist machines with BIOSes that list an LVDS panel without
- * actually having one.
- */
- if (IS_I945GM(dev)) {
- /* aopen mini pc */
- if (dev->pdev->subsystem_vendor == 0xa0a0)
- goto failed;
-
- if ((dev->pdev->subsystem_vendor == 0x8086) &&
- (dev->pdev->subsystem_device == 0x7270)) {
- /* It's a Mac Mini or Macbook Pro.
- *
- * Apple hardware is out to get us. The macbook pro
- * has a real LVDS panel, but the mac mini does not,
- * and they have the same device IDs. We'll
- * distinguish by panel size, on the assumption
- * that Apple isn't about to make any machines with an
- * 800x600 display.
- */
-
- if (dev_priv->panel_fixed_mode != NULL &&
- dev_priv->panel_fixed_mode->hdisplay == 800 &&
- dev_priv->panel_fixed_mode->vdisplay == 600) {
- DRM_DEBUG("Suspected Mac Mini, ignoring the LVDS\n");
- goto failed;
- }
- }
- }
-
-
out:
drm_sysfs_connector_add(connector);
return;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 40721546910..a30508b639b 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -40,13 +40,59 @@
struct intel_sdvo_priv {
struct intel_i2c_chan *i2c_bus;
int slaveaddr;
+
+ /* Register for the SDVO device: SDVOB or SDVOC */
int output_device;
- u16 active_outputs;
+ /* Active outputs controlled by this SDVO output */
+ uint16_t controlled_output;
+ /*
+ * Capabilities of the SDVO device returned by
+ * i830_sdvo_get_capabilities()
+ */
struct intel_sdvo_caps caps;
+
+ /* Pixel clock limitations reported by the SDVO device, in kHz */
int pixel_clock_min, pixel_clock_max;
+ /**
+ * This is set if we're going to treat the device as TV-out.
+ *
+ * While we have these nice friendly flags for output types that ought
+ * to decide this for us, the S-Video output on our HDMI+S-Video card
+ * shows up as RGB1 (VGA).
+ */
+ bool is_tv;
+
+ /**
+ * This is set if we treat the device as HDMI, instead of DVI.
+ */
+ bool is_hdmi;
+
+ /**
+ * Returned SDTV resolutions allowed for the current format, if the
+ * device reported it.
+ */
+ struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
+
+ /**
+ * Current selected TV format.
+ *
+ * This is stored in the same structure that's passed to the device, for
+ * convenience.
+ */
+ struct intel_sdvo_tv_format tv_format;
+
+ /*
+ * supported encoding mode, used to determine whether HDMI is
+ * supported
+ */
+ struct intel_sdvo_encode encode;
+
+ /* DDC bus used by this SDVO output */
+ uint8_t ddc_bus;
+
int save_sdvo_mult;
u16 save_active_outputs;
struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
@@ -148,8 +194,8 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
/** Mapping of command numbers to names, for debug output */
const static struct _sdvo_cmd_name {
- u8 cmd;
- char *name;
+ u8 cmd;
+ char *name;
} sdvo_cmd_names[] = {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
@@ -186,8 +232,35 @@ const static struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+ /* HDMI op code */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
@@ -506,6 +579,50 @@ static bool intel_sdvo_set_output_timing(struct intel_output *intel_output,
SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
}
+static bool
+intel_sdvo_create_preferred_input_timing(struct intel_output *output,
+ uint16_t clock,
+ uint16_t width,
+ uint16_t height)
+{
+ struct intel_sdvo_preferred_input_timing_args args;
+ uint8_t status;
+
+ args.clock = clock;
+ args.width = width;
+ args.height = height;
+ intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+ &args, sizeof(args));
+ status = intel_sdvo_read_response(output, NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ return true;
+}
+
+static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output,
+ struct intel_sdvo_dtd *dtd)
+{
+ bool status;
+
+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+ NULL, 0);
+
+ status = intel_sdvo_read_response(output, &dtd->part1,
+ sizeof(dtd->part1));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+ NULL, 0);
+
+ status = intel_sdvo_read_response(output, &dtd->part2,
+ sizeof(dtd->part2));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ return false;
+}
static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output)
{
@@ -536,36 +653,12 @@ static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8
return true;
}
-static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
- * device will be told of the multiplier during mode_set.
- */
- adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
- return true;
-}
-
-static void intel_sdvo_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
+ struct drm_display_mode *mode)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- u16 width, height;
- u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
- u16 h_sync_offset, v_sync_offset;
- u32 sdvox;
- struct intel_sdvo_dtd output_dtd;
- int sdvo_pixel_multiply;
-
- if (!mode)
- return;
+ uint16_t width, height;
+ uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+ uint16_t h_sync_offset, v_sync_offset;
width = mode->crtc_hdisplay;
height = mode->crtc_vdisplay;
@@ -580,93 +673,423 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
- output_dtd.part1.clock = mode->clock / 10;
- output_dtd.part1.h_active = width & 0xff;
- output_dtd.part1.h_blank = h_blank_len & 0xff;
- output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
+ dtd->part1.clock = mode->clock / 10;
+ dtd->part1.h_active = width & 0xff;
+ dtd->part1.h_blank = h_blank_len & 0xff;
+ dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
((h_blank_len >> 8) & 0xf);
- output_dtd.part1.v_active = height & 0xff;
- output_dtd.part1.v_blank = v_blank_len & 0xff;
- output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
+ dtd->part1.v_active = height & 0xff;
+ dtd->part1.v_blank = v_blank_len & 0xff;
+ dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
((v_blank_len >> 8) & 0xf);
- output_dtd.part2.h_sync_off = h_sync_offset;
- output_dtd.part2.h_sync_width = h_sync_len & 0xff;
- output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+ dtd->part2.h_sync_off = h_sync_offset;
+ dtd->part2.h_sync_width = h_sync_len & 0xff;
+ dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
(v_sync_len & 0xf);
- output_dtd.part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+ dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
((v_sync_len & 0x30) >> 4);
- output_dtd.part2.dtd_flags = 0x18;
+ dtd->part2.dtd_flags = 0x18;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- output_dtd.part2.dtd_flags |= 0x2;
+ dtd->part2.dtd_flags |= 0x2;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- output_dtd.part2.dtd_flags |= 0x4;
+ dtd->part2.dtd_flags |= 0x4;
+
+ dtd->part2.sdvo_flags = 0;
+ dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+ dtd->part2.reserved = 0;
+}
+
+static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+ struct intel_sdvo_dtd *dtd)
+{
+ uint16_t width, height;
+ uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+ uint16_t h_sync_offset, v_sync_offset;
+
+ width = mode->crtc_hdisplay;
+ height = mode->crtc_vdisplay;
+
+ /* do some mode translations */
+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+
+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+
+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+
+ mode->hdisplay = dtd->part1.h_active;
+ mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+ mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
+ mode->hsync_start += (dtd->part2.sync_off_width_high & 0xa0) << 2;
+ mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
+ mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+ mode->htotal = mode->hdisplay + dtd->part1.h_blank;
+ mode->htotal += (dtd->part1.h_high & 0xf) << 8;
+
+ mode->vdisplay = dtd->part1.v_active;
+ mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+ mode->vsync_start = mode->vdisplay;
+ mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+ mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0a) << 2;
+ mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+ mode->vsync_end = mode->vsync_start +
+ (dtd->part2.v_sync_off_width & 0xf);
+ mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+ mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
+ mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+
+ mode->clock = dtd->part1.clock * 10;
+
+ mode->flags &= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ if (dtd->part2.dtd_flags & 0x2)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ if (dtd->part2.dtd_flags & 0x4)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+}
+
+static bool intel_sdvo_get_supp_encode(struct intel_output *output,
+ struct intel_sdvo_encode *encode)
+{
+ uint8_t status;
+
+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
+ status = intel_sdvo_read_response(output, encode, sizeof(*encode));
+ if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */
+ memset(encode, 0, sizeof(*encode));
+ return false;
+ }
+
+ return true;
+}
+
+static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode)
+{
+ uint8_t status;
+
+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1);
+ status = intel_sdvo_read_response(output, NULL, 0);
+
+ return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+static bool intel_sdvo_set_colorimetry(struct intel_output *output,
+ uint8_t mode)
+{
+ uint8_t status;
+
+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+ status = intel_sdvo_read_response(output, NULL, 0);
+
+ return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+#if 0
+static void intel_sdvo_dump_hdmi_buf(struct intel_output *output)
+{
+ int i, j;
+ uint8_t set_buf_index[2];
+ uint8_t av_split;
+ uint8_t buf_size;
+ uint8_t buf[48];
+ uint8_t *pos;
+
+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
+ intel_sdvo_read_response(output, &av_split, 1);
+
+ for (i = 0; i <= av_split; i++) {
+ set_buf_index[0] = i; set_buf_index[1] = 0;
+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2);
+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+ intel_sdvo_read_response(output, &buf_size, 1);
+
+ pos = buf;
+ for (j = 0; j <= buf_size; j += 8) {
+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA,
+ NULL, 0);
+ intel_sdvo_read_response(output, pos, 8);
+ pos += 8;
+ }
+ }
+}
+#endif
+
+static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index,
+ uint8_t *data, int8_t size, uint8_t tx_rate)
+{
+ uint8_t set_buf_index[2];
+
+ set_buf_index[0] = index;
+ set_buf_index[1] = 0;
+
+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2);
+
+ for (; size > 0; size -= 8) {
+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8);
+ data += 8;
+ }
+
+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
+}
+
+static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
+{
+ uint8_t csum = 0;
+ int i;
+
+ for (i = 0; i < size; i++)
+ csum += data[i];
+
+ return 0x100 - csum;
+}
+
+#define DIP_TYPE_AVI 0x82
+#define DIP_VERSION_AVI 0x2
+#define DIP_LEN_AVI 13
+
+struct dip_infoframe {
+ uint8_t type;
+ uint8_t version;
+ uint8_t len;
+ uint8_t checksum;
+ union {
+ struct {
+ /* Packet Byte #1 */
+ uint8_t S:2;
+ uint8_t B:2;
+ uint8_t A:1;
+ uint8_t Y:2;
+ uint8_t rsvd1:1;
+ /* Packet Byte #2 */
+ uint8_t R:4;
+ uint8_t M:2;
+ uint8_t C:2;
+ /* Packet Byte #3 */
+ uint8_t SC:2;
+ uint8_t Q:2;
+ uint8_t EC:3;
+ uint8_t ITC:1;
+ /* Packet Byte #4 */
+ uint8_t VIC:7;
+ uint8_t rsvd2:1;
+ /* Packet Byte #5 */
+ uint8_t PR:4;
+ uint8_t rsvd3:4;
+ /* Packet Byte #6~13 */
+ uint16_t top_bar_end;
+ uint16_t bottom_bar_start;
+ uint16_t left_bar_end;
+ uint16_t right_bar_start;
+ } avi;
+ struct {
+ /* Packet Byte #1 */
+ uint8_t channel_count:3;
+ uint8_t rsvd1:1;
+ uint8_t coding_type:4;
+ /* Packet Byte #2 */
+ uint8_t sample_size:2; /* SS0, SS1 */
+ uint8_t sample_frequency:3;
+ uint8_t rsvd2:3;
+ /* Packet Byte #3 */
+ uint8_t coding_type_private:5;
+ uint8_t rsvd3:3;
+ /* Packet Byte #4 */
+ uint8_t channel_allocation;
+ /* Packet Byte #5 */
+ uint8_t rsvd4:3;
+ uint8_t level_shift:4;
+ uint8_t downmix_inhibit:1;
+ } audio;
+ uint8_t payload[28];
+ } __attribute__ ((packed)) u;
+} __attribute__((packed));
+
+static void intel_sdvo_set_avi_infoframe(struct intel_output *output,
+ struct drm_display_mode * mode)
+{
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .version = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+
+ avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
+ 4 + avi_if.len);
+ intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len,
+ SDVO_HBUF_TX_VSYNC);
+}
+
+static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_output *output = enc_to_intel_output(encoder);
+ struct intel_sdvo_priv *dev_priv = output->dev_priv;
- output_dtd.part2.sdvo_flags = 0;
- output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
- output_dtd.part2.reserved = 0;
+ if (!dev_priv->is_tv) {
+ /* Make the CRTC code factor in the SDVO pixel multiplier. The
+ * SDVO device will be told of the multiplier during mode_set.
+ */
+ adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+ } else {
+ struct intel_sdvo_dtd output_dtd;
+ bool success;
+
+ /* We need to construct preferred input timings based on our
+ * output timings. To do that, we have to set the output
+ * timings, even though this isn't really the right place in
+ * the sequence to do it. Oh well.
+ */
+
+
+ /* Set output timings */
+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ intel_sdvo_set_target_output(output,
+ dev_priv->controlled_output);
+ intel_sdvo_set_output_timing(output, &output_dtd);
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ intel_sdvo_set_target_input(output, true, false);
+
+
+ success = intel_sdvo_create_preferred_input_timing(output,
+ mode->clock / 10,
+ mode->hdisplay,
+ mode->vdisplay);
+ if (success) {
+ struct intel_sdvo_dtd input_dtd;
- /* Set the output timing to the screen */
- intel_sdvo_set_target_output(intel_output, sdvo_priv->active_outputs);
- intel_sdvo_set_output_timing(intel_output, &output_dtd);
+ intel_sdvo_get_preferred_input_timing(output,
+ &input_dtd);
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
+
+static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_output *output = enc_to_intel_output(encoder);
+ struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ u32 sdvox = 0;
+ int sdvo_pixel_multiply;
+ struct intel_sdvo_in_out_map in_out;
+ struct intel_sdvo_dtd input_dtd;
+ u8 status;
+
+ if (!mode)
+ return;
+
+ /* First, set the input mapping for the first input to our controlled
+ * output. This is only correct if we're a single-input device, in
+ * which case the first input is the output from the appropriate SDVO
+ * channel on the motherboard. In a two-input device, the first input
+ * will be SDVOB and the second SDVOC.
+ */
+ in_out.in0 = sdvo_priv->controlled_output;
+ in_out.in1 = 0;
+
+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP,
+ &in_out, sizeof(in_out));
+ status = intel_sdvo_read_response(output, NULL, 0);
+
+ if (sdvo_priv->is_hdmi) {
+ intel_sdvo_set_avi_infoframe(output, mode);
+ sdvox |= SDVO_AUDIO_ENABLE;
+ }
+
+ intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
+
+ /* If it's a TV, we already set the output timing in mode_fixup.
+ * Otherwise, the output timing is equal to the input timing.
+ */
+ if (!sdvo_priv->is_tv) {
+ /* Set the output timing to the screen */
+ intel_sdvo_set_target_output(output,
+ sdvo_priv->controlled_output);
+ intel_sdvo_set_output_timing(output, &input_dtd);
+ }
/* Set the input timing to the screen. Assume always input 0. */
- intel_sdvo_set_target_input(intel_output, true, false);
+ intel_sdvo_set_target_input(output, true, false);
- /* We would like to use i830_sdvo_create_preferred_input_timing() to
+ /* We would like to use intel_sdvo_create_preferred_input_timing() to
* provide the device with a timing it can support, if it supports that
* feature. However, presumably we would need to adjust the CRTC to
* output the preferred timing, and we don't support that currently.
*/
- intel_sdvo_set_input_timing(intel_output, &output_dtd);
+#if 0
+ success = intel_sdvo_create_preferred_input_timing(output, clock,
+ width, height);
+ if (success) {
+ struct intel_sdvo_dtd *input_dtd;
+
+ intel_sdvo_get_preferred_input_timing(output, &input_dtd);
+ intel_sdvo_set_input_timing(output, &input_dtd);
+ }
+#else
+ intel_sdvo_set_input_timing(output, &input_dtd);
+#endif
switch (intel_sdvo_get_pixel_multiplier(mode)) {
case 1:
- intel_sdvo_set_clock_rate_mult(intel_output,
+ intel_sdvo_set_clock_rate_mult(output,
SDVO_CLOCK_RATE_MULT_1X);
break;
case 2:
- intel_sdvo_set_clock_rate_mult(intel_output,
+ intel_sdvo_set_clock_rate_mult(output,
SDVO_CLOCK_RATE_MULT_2X);
break;
case 4:
- intel_sdvo_set_clock_rate_mult(intel_output,
+ intel_sdvo_set_clock_rate_mult(output,
SDVO_CLOCK_RATE_MULT_4X);
break;
}
/* Set the SDVO control regs. */
- if (0/*IS_I965GM(dev)*/) {
- sdvox = SDVO_BORDER_ENABLE;
- } else {
- sdvox = I915_READ(sdvo_priv->output_device);
- switch (sdvo_priv->output_device) {
- case SDVOB:
- sdvox &= SDVOB_PRESERVE_MASK;
- break;
- case SDVOC:
- sdvox &= SDVOC_PRESERVE_MASK;
- break;
- }
- sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
- }
+ if (IS_I965G(dev)) {
+ sdvox |= SDVO_BORDER_ENABLE |
+ SDVO_VSYNC_ACTIVE_HIGH |
+ SDVO_HSYNC_ACTIVE_HIGH;
+ } else {
+ sdvox |= I915_READ(sdvo_priv->output_device);
+ switch (sdvo_priv->output_device) {
+ case SDVOB:
+ sdvox &= SDVOB_PRESERVE_MASK;
+ break;
+ case SDVOC:
+ sdvox &= SDVOC_PRESERVE_MASK;
+ break;
+ }
+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+ }
if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
if (IS_I965G(dev)) {
- /* done in crtc_mode_set as the dpll_md reg must be written
- early */
- } else if (IS_I945G(dev) || IS_I945GM(dev)) {
- /* done in crtc_mode_set as it lives inside the
- dpll register */
+ /* done in crtc_mode_set as the dpll_md reg must be written early */
+ } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+ /* done in crtc_mode_set as it lives inside the dpll register */
} else {
sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
}
- intel_sdvo_write_sdvox(intel_output, sdvox);
+ intel_sdvo_write_sdvox(output, sdvox);
}
static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
@@ -714,7 +1137,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
if (0)
intel_sdvo_set_encoder_power_state(intel_output, mode);
- intel_sdvo_set_active_outputs(intel_output, sdvo_priv->active_outputs);
+ intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output);
}
return;
}
@@ -752,6 +1175,9 @@ static void intel_sdvo_save(struct drm_connector *connector)
&sdvo_priv->save_output_dtd[o]);
}
}
+ if (sdvo_priv->is_tv) {
+ /* XXX: Save TV format/enhancements. */
+ }
sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device);
}
@@ -759,7 +1185,6 @@ static void intel_sdvo_save(struct drm_connector *connector)
static void intel_sdvo_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_output *intel_output = to_intel_output(connector);
struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
int o;
@@ -790,7 +1215,11 @@ static void intel_sdvo_restore(struct drm_connector *connector)
intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult);
- I915_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
+ if (sdvo_priv->is_tv) {
+ /* XXX: Restore TV format/enhancements. */
+ }
+
+ intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX);
if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
{
@@ -916,20 +1345,173 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
status = intel_sdvo_read_response(intel_output, &response, 2);
DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
+
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return connector_status_unknown;
+
if ((response[0] != 0) || (response[1] != 0))
return connector_status_connected;
else
return connector_status_disconnected;
}
-static int intel_sdvo_get_modes(struct drm_connector *connector)
+static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
/* set the bus switch and get the modes */
- intel_sdvo_set_control_bus_switch(intel_output, SDVO_CONTROL_BUS_DDC2);
+ intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
intel_ddc_get_modes(intel_output);
+#if 0
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ /* Mac mini hack. On this device, I get DDC through the analog, which
+ * load-detects as disconnected. I fail to DDC through the SDVO DDC,
+ * but it does load-detect as connected. So, just steal the DDC bits
+ * from analog when we fail at finding it the right way.
+ */
+ crt = xf86_config->output[0];
+ intel_output = crt->driver_private;
+ if (intel_output->type == I830_OUTPUT_ANALOG &&
+ crt->funcs->detect(crt) == XF86OutputStatusDisconnected) {
+ I830I2CInit(pScrn, &intel_output->pDDCBus, GPIOA, "CRTDDC_A");
+ edid_mon = xf86OutputGetEDID(crt, intel_output->pDDCBus);
+ xf86DestroyI2CBusRec(intel_output->pDDCBus, true, true);
+ }
+ if (edid_mon) {
+ xf86OutputSetEDID(output, edid_mon);
+ modes = xf86OutputGetEDIDModes(output);
+ }
+#endif
+}
+
+/**
+ * This function checks the current TV format, and chooses a default if
+ * it hasn't been set.
+ */
+static void
+intel_sdvo_check_tv_format(struct intel_output *output)
+{
+ struct intel_sdvo_priv *dev_priv = output->dev_priv;
+ struct intel_sdvo_tv_format format, unset;
+ uint8_t status;
+
+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_TV_FORMAT, NULL, 0);
+ status = intel_sdvo_read_response(output, &format, sizeof(format));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return;
+
+ memset(&unset, 0, sizeof(unset));
+ if (memcmp(&format, &unset, sizeof(format))) {
+ DRM_DEBUG("%s: Choosing default TV format of NTSC-M\n",
+ SDVO_NAME(dev_priv));
+
+ format.ntsc_m = true;
+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, NULL, 0);
+ status = intel_sdvo_read_response(output, NULL, 0);
+ }
+}
+
+/*
+ * Set of SDVO TV modes.
+ * Note! This is in reply order (see loop in get_tv_modes).
+ * XXX: all 60Hz refresh?
+ */
+struct drm_display_mode sdvo_tv_modes[] = {
+ { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815680, 321, 384, 416,
+ 200, 0, 232, 201, 233, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814080, 321, 384, 416,
+ 240, 0, 272, 241, 273, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910080, 401, 464, 496,
+ 300, 0, 332, 301, 333, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913280, 641, 704, 736,
+ 350, 0, 382, 351, 383, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121280, 641, 704, 736,
+ 400, 0, 432, 401, 433, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121280, 641, 704, 736,
+ 400, 0, 432, 401, 433, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624000, 705, 768, 800,
+ 480, 0, 512, 481, 513, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232000, 705, 768, 800,
+ 576, 0, 608, 577, 609, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751680, 721, 784, 816,
+ 350, 0, 382, 351, 383, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199680, 721, 784, 816,
+ 400, 0, 432, 401, 433, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116480, 721, 784, 816,
+ 480, 0, 512, 481, 513, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054080, 721, 784, 816,
+ 540, 0, 572, 541, 573, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816640, 721, 784, 816,
+ 576, 0, 608, 577, 609, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570560, 769, 832, 864,
+ 576, 0, 608, 577, 609, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030080, 801, 864, 896,
+ 600, 0, 632, 601, 633, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581760, 833, 896, 928,
+ 624, 0, 656, 625, 657, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707040, 921, 984, 1016,
+ 766, 0, 798, 767, 799, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827200, 1025, 1088, 1120,
+ 768, 0, 800, 769, 801, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265920, 1281, 1344, 1376,
+ 1024, 0, 1056, 1025, 1057, 4196112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
+static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
+{
+ struct intel_output *output = to_intel_output(connector);
+ uint32_t reply = 0;
+ uint8_t status;
+ int i = 0;
+
+ intel_sdvo_check_tv_format(output);
+
+ /* Read the list of supported input resolutions for the selected TV
+ * format.
+ */
+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ NULL, 0);
+ status = intel_sdvo_read_response(output, &reply, 3);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
+ if (reply & (1 << i))
+ drm_mode_probed_add(connector, &sdvo_tv_modes[i]);
+}
+
+static int intel_sdvo_get_modes(struct drm_connector *connector)
+{
+ struct intel_output *output = to_intel_output(connector);
+ struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+
+ if (sdvo_priv->is_tv)
+ intel_sdvo_get_tv_modes(connector);
+ else
+ intel_sdvo_get_ddc_modes(connector);
+
if (list_empty(&connector->probed_modes))
return 0;
return 1;
@@ -978,6 +1560,65 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
};
+/**
+ * Choose the appropriate DDC bus for control bus switch command for this
+ * SDVO output based on the controlled output.
+ *
+ * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
+ * outputs, then LVDS outputs.
+ */
+static void
+intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
+{
+ uint16_t mask = 0;
+ unsigned int num_bits;
+
+ /* Make a mask of outputs less than or equal to our own priority in the
+ * list.
+ */
+ switch (dev_priv->controlled_output) {
+ case SDVO_OUTPUT_LVDS1:
+ mask |= SDVO_OUTPUT_LVDS1;
+ case SDVO_OUTPUT_LVDS0:
+ mask |= SDVO_OUTPUT_LVDS0;
+ case SDVO_OUTPUT_TMDS1:
+ mask |= SDVO_OUTPUT_TMDS1;
+ case SDVO_OUTPUT_TMDS0:
+ mask |= SDVO_OUTPUT_TMDS0;
+ case SDVO_OUTPUT_RGB1:
+ mask |= SDVO_OUTPUT_RGB1;
+ case SDVO_OUTPUT_RGB0:
+ mask |= SDVO_OUTPUT_RGB0;
+ break;
+ }
+
+ /* Count bits to find what number we are in the priority list. */
+ mask &= dev_priv->caps.output_flags;
+ num_bits = hweight16(mask);
+ if (num_bits > 3) {
+ /* if more than 3 outputs, default to DDC bus 3 for now */
+ num_bits = 3;
+ }
+
+ /* Corresponds to SDVO_CONTROL_BUS_DDCx */
+ dev_priv->ddc_bus = 1 << num_bits;
+}
+
+static bool
+intel_sdvo_get_digital_encoding_mode(struct intel_output *output)
+{
+ struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ uint8_t status;
+
+ intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+
+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
+ status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+ return true;
+}
+
bool intel_sdvo_init(struct drm_device *dev, int output_device)
{
struct drm_connector *connector;
@@ -1040,45 +1681,76 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
- memset(&sdvo_priv->active_outputs, 0, sizeof(sdvo_priv->active_outputs));
+ if (sdvo_priv->caps.output_flags &
+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
+ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
+ sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0;
+ else
+ sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
+
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector_type = DRM_MODE_CONNECTOR_DVID;
- /* TODO, CVBS, SVID, YPRPB & SCART outputs. */
- if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0)
+ if (intel_sdvo_get_supp_encode(intel_output,
+ &sdvo_priv->encode) &&
+ intel_sdvo_get_digital_encoding_mode(intel_output) &&
+ sdvo_priv->is_hdmi) {
+ /* enable hdmi encoding mode if supported */
+ intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_output,
+ SDVO_COLORIMETRY_RGB256);
+ connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ }
+ }
+ else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0)
{
- sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
+ sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ sdvo_priv->is_tv = true;
+ intel_output->needs_tv_clock = true;
+ }
+ else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0)
+ {
+ sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
encoder_type = DRM_MODE_ENCODER_DAC;
connector_type = DRM_MODE_CONNECTOR_VGA;
}
else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1)
{
- sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
+ sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
encoder_type = DRM_MODE_ENCODER_DAC;
connector_type = DRM_MODE_CONNECTOR_VGA;
}
- else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
+ else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0)
{
- sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
+ sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
- encoder_type = DRM_MODE_ENCODER_TMDS;
- connector_type = DRM_MODE_CONNECTOR_DVID;
+ encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector_type = DRM_MODE_CONNECTOR_LVDS;
}
- else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1)
+ else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1)
{
- sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
+ sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
- encoder_type = DRM_MODE_ENCODER_TMDS;
- connector_type = DRM_MODE_CONNECTOR_DVID;
+ encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector_type = DRM_MODE_CONNECTOR_LVDS;
}
else
{
unsigned char bytes[2];
+ sdvo_priv->controlled_output = 0;
memcpy (bytes, &sdvo_priv->caps.output_flags, 2);
- DRM_DEBUG("%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
+ DRM_DEBUG("%s: Unknown SDVO output type (0x%02x%02x)\n",
SDVO_NAME(sdvo_priv),
bytes[0], bytes[1]);
+ encoder_type = DRM_MODE_ENCODER_NONE;
+ connector_type = DRM_MODE_CONNECTOR_Unknown;
goto err_i2c;
}
@@ -1089,6 +1761,8 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
drm_sysfs_connector_add(connector);
+ intel_sdvo_select_ddc_bus(sdvo_priv);
+
/* Set the input timing to the screen. Assume always input 0. */
intel_sdvo_set_target_input(intel_output, true, false);
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 861a43f8693..1117b9c151a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -173,6 +173,9 @@ struct intel_sdvo_get_trained_inputs_response {
* Returns two struct intel_sdvo_output_flags structures.
*/
#define SDVO_CMD_GET_IN_OUT_MAP 0x06
+struct intel_sdvo_in_out_map {
+ u16 in0, in1;
+};
/**
* Sets the current mapping of SDVO inputs to outputs on the device.
@@ -206,7 +209,8 @@ struct intel_sdvo_get_trained_inputs_response {
struct intel_sdvo_get_interrupt_event_source_response {
u16 interrupt_status;
unsigned int ambient_light_interrupt:1;
- unsigned int pad:7;
+ unsigned int hdmi_audio_encrypt_change:1;
+ unsigned int pad:6;
} __attribute__((packed));
/**
@@ -305,23 +309,411 @@ struct intel_sdvo_set_target_input_args {
# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
+/** 5 bytes of bit flags for TV formats shared by all TV format functions */
+struct intel_sdvo_tv_format {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:3;
+} __attribute__((packed));
#define SDVO_CMD_GET_TV_FORMAT 0x28
#define SDVO_CMD_SET_TV_FORMAT 0x29
+/** Returns the resolutiosn that can be used with the given TV format */
+#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
+struct intel_sdvo_sdtv_resolution_request {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int pad:5;
+} __attribute__((packed));
+
+struct intel_sdvo_sdtv_resolution_reply {
+ unsigned int res_320x200:1;
+ unsigned int res_320x240:1;
+ unsigned int res_400x300:1;
+ unsigned int res_640x350:1;
+ unsigned int res_640x400:1;
+ unsigned int res_640x480:1;
+ unsigned int res_704x480:1;
+ unsigned int res_704x576:1;
+
+ unsigned int res_720x350:1;
+ unsigned int res_720x400:1;
+ unsigned int res_720x480:1;
+ unsigned int res_720x540:1;
+ unsigned int res_720x576:1;
+ unsigned int res_768x576:1;
+ unsigned int res_800x600:1;
+ unsigned int res_832x624:1;
+
+ unsigned int res_920x766:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x1024:1;
+ unsigned int pad:5;
+} __attribute__((packed));
+
+/* Get supported resolution with squire pixel aspect ratio that can be
+ scaled for the requested HDTV format */
+#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
+
+struct intel_sdvo_hdtv_resolution_request {
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+struct intel_sdvo_hdtv_resolution_reply {
+ unsigned int res_640x480:1;
+ unsigned int res_800x600:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x960:1;
+ unsigned int res_1400x1050:1;
+ unsigned int res_1600x1200:1;
+ unsigned int res_1920x1440:1;
+ unsigned int res_2048x1536:1;
+
+ unsigned int res_2560x1920:1;
+ unsigned int res_3200x2400:1;
+ unsigned int res_3840x2880:1;
+ unsigned int pad1:5;
+
+ unsigned int res_848x480:1;
+ unsigned int res_1064x600:1;
+ unsigned int res_1280x720:1;
+ unsigned int res_1360x768:1;
+ unsigned int res_1704x960:1;
+ unsigned int res_1864x1050:1;
+ unsigned int res_1920x1080:1;
+ unsigned int res_2128x1200:1;
+
+ unsigned int res_2560x1400:1;
+ unsigned int res_2728x1536:1;
+ unsigned int res_3408x1920:1;
+ unsigned int res_4264x2400:1;
+ unsigned int res_5120x2880:1;
+ unsigned int pad2:3;
+
+ unsigned int res_768x480:1;
+ unsigned int res_960x600:1;
+ unsigned int res_1152x720:1;
+ unsigned int res_1124x768:1;
+ unsigned int res_1536x960:1;
+ unsigned int res_1680x1050:1;
+ unsigned int res_1728x1080:1;
+ unsigned int res_1920x1200:1;
+
+ unsigned int res_2304x1440:1;
+ unsigned int res_2456x1536:1;
+ unsigned int res_3072x1920:1;
+ unsigned int res_3840x2400:1;
+ unsigned int res_4608x2880:1;
+ unsigned int pad3:3;
+
+ unsigned int res_1280x1024:1;
+ unsigned int pad4:7;
+
+ unsigned int res_1280x768:1;
+ unsigned int pad5:7;
+} __attribute__((packed));
+
+/* Get supported power state returns info for encoder and monitor, rely on
+ last SetTargetInput and SetTargetOutput calls */
#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
+/* Get power state returns info for encoder and monitor, rely on last
+ SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_POWER_STATE 0x2b
#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
# define SDVO_ENCODER_STATE_ON (1 << 0)
# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
# define SDVO_ENCODER_STATE_OFF (1 << 3)
+# define SDVO_MONITOR_STATE_ON (1 << 4)
+# define SDVO_MONITOR_STATE_STANDBY (1 << 5)
+# define SDVO_MONITOR_STATE_SUSPEND (1 << 6)
+# define SDVO_MONITOR_STATE_OFF (1 << 7)
+
+#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
+#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
+#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
+/**
+ * The panel power sequencing parameters are in units of milliseconds.
+ * The high fields are bits 8:9 of the 10-bit values.
+ */
+struct sdvo_panel_power_sequencing {
+ u8 t0;
+ u8 t1;
+ u8 t2;
+ u8 t3;
+ u8 t4;
+
+ unsigned int t0_high:2;
+ unsigned int t1_high:2;
+ unsigned int t2_high:2;
+ unsigned int t3_high:2;
+
+ unsigned int t4_high:2;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
+struct sdvo_max_backlight_reply {
+ u8 max_value;
+ u8 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
+#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
+
+#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
+struct sdvo_get_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ u16 value;
+} __attribute__((packed));
+#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
+struct sdvo_set_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ unsigned int enable:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+
+/* Set display power state */
+#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
+# define SDVO_DISPLAY_STATE_ON (1 << 0)
+# define SDVO_DISPLAY_STATE_STANDBY (1 << 1)
+# define SDVO_DISPLAY_STATE_SUSPEND (1 << 2)
+# define SDVO_DISPLAY_STATE_OFF (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
+struct intel_sdvo_enhancements_reply {
+ unsigned int flicker_filter:1;
+ unsigned int flicker_filter_adaptive:1;
+ unsigned int flicker_filter_2d:1;
+ unsigned int saturation:1;
+ unsigned int hue:1;
+ unsigned int brightness:1;
+ unsigned int contrast:1;
+ unsigned int overscan_h:1;
+
+ unsigned int overscan_v:1;
+ unsigned int position_h:1;
+ unsigned int position_v:1;
+ unsigned int sharpness:1;
+ unsigned int dot_crawl:1;
+ unsigned int dither:1;
+ unsigned int max_tv_chroma_filter:1;
+ unsigned int max_tv_luma_filter:1;
+} __attribute__((packed));
+
+/* Picture enhancement limits below are dependent on the current TV format,
+ * and thus need to be queried and set after it.
+ */
+#define SDVO_CMD_GET_MAX_FLICKER_FITER 0x4d
+#define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FITER 0x7b
+#define SDVO_CMD_GET_MAX_2D_FLICKER_FITER 0x52
+#define SDVO_CMD_GET_MAX_SATURATION 0x55
+#define SDVO_CMD_GET_MAX_HUE 0x58
+#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
+#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
+#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
+#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
+#define SDVO_CMD_GET_MAX_POSITION_H 0x67
+#define SDVO_CMD_GET_MAX_POSITION_V 0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS_V 0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA 0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA 0x77
+struct intel_sdvo_enhancement_limits_reply {
+ u16 max_value;
+ u16 default_value;
+} __attribute__((packed));
-#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
+#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
+#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
+# define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0)
+# define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0)
+# define SDVO_LVDS_CONNECTOR_SPWG (0 << 2)
+# define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2)
+# define SDVO_LVDS_SINGLE_CHANNEL (0 << 4)
+# define SDVO_LVDS_DUAL_CHANNEL (1 << 4)
+
+#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
+#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
+#define SDVO_CMD_GET_ADAPTIVE_FLICKER_FITER 0x50
+#define SDVO_CMD_SET_ADAPTIVE_FLICKER_FITER 0x51
+#define SDVO_CMD_GET_2D_FLICKER_FITER 0x53
+#define SDVO_CMD_SET_2D_FLICKER_FITER 0x54
+#define SDVO_CMD_GET_SATURATION 0x56
+#define SDVO_CMD_SET_SATURATION 0x57
+#define SDVO_CMD_GET_HUE 0x59
+#define SDVO_CMD_SET_HUE 0x5a
+#define SDVO_CMD_GET_BRIGHTNESS 0x5c
+#define SDVO_CMD_SET_BRIGHTNESS 0x5d
+#define SDVO_CMD_GET_CONTRAST 0x5f
+#define SDVO_CMD_SET_CONTRAST 0x60
+#define SDVO_CMD_GET_OVERSCAN_H 0x62
+#define SDVO_CMD_SET_OVERSCAN_H 0x63
+#define SDVO_CMD_GET_OVERSCAN_V 0x65
+#define SDVO_CMD_SET_OVERSCAN_V 0x66
+#define SDVO_CMD_GET_POSITION_H 0x68
+#define SDVO_CMD_SET_POSITION_H 0x69
+#define SDVO_CMD_GET_POSITION_V 0x6b
+#define SDVO_CMD_SET_POSITION_V 0x6c
+#define SDVO_CMD_GET_SHARPNESS 0x6e
+#define SDVO_CMD_SET_SHARPNESS 0x6f
+#define SDVO_CMD_GET_TV_CHROMA 0x75
+#define SDVO_CMD_SET_TV_CHROMA 0x76
+#define SDVO_CMD_GET_TV_LUMA 0x78
+#define SDVO_CMD_SET_TV_LUMA 0x79
+struct intel_sdvo_enhancements_arg {
+ u16 value;
+}__attribute__((packed));
+
+#define SDVO_CMD_GET_DOT_CRAWL 0x70
+#define SDVO_CMD_SET_DOT_CRAWL 0x71
+# define SDVO_DOT_CRAWL_ON (1 << 0)
+# define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_GET_DITHER 0x72
+#define SDVO_CMD_SET_DITHER 0x73
+# define SDVO_DITHER_ON (1 << 0)
+# define SDVO_DITHER_DEFAULT_ON (1 << 1)
#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
-# define SDVO_CONTROL_BUS_PROM 0x0
-# define SDVO_CONTROL_BUS_DDC1 0x1
-# define SDVO_CONTROL_BUS_DDC2 0x2
-# define SDVO_CONTROL_BUS_DDC3 0x3
+# define SDVO_CONTROL_BUS_PROM (1 << 0)
+# define SDVO_CONTROL_BUS_DDC1 (1 << 1)
+# define SDVO_CONTROL_BUS_DDC2 (1 << 2)
+# define SDVO_CONTROL_BUS_DDC3 (1 << 3)
+
+/* HDMI op codes */
+#define SDVO_CMD_GET_SUPP_ENCODE 0x9d
+#define SDVO_CMD_GET_ENCODE 0x9e
+#define SDVO_CMD_SET_ENCODE 0x9f
+ #define SDVO_ENCODE_DVI 0x0
+ #define SDVO_ENCODE_HDMI 0x1
+#define SDVO_CMD_SET_PIXEL_REPLI 0x8b
+#define SDVO_CMD_GET_PIXEL_REPLI 0x8c
+#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d
+#define SDVO_CMD_SET_COLORIMETRY 0x8e
+ #define SDVO_COLORIMETRY_RGB256 0x0
+ #define SDVO_COLORIMETRY_RGB220 0x1
+ #define SDVO_COLORIMETRY_YCrCb422 0x3
+ #define SDVO_COLORIMETRY_YCrCb444 0x4
+#define SDVO_CMD_GET_COLORIMETRY 0x8f
+#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
+#define SDVO_CMD_SET_AUDIO_STAT 0x91
+#define SDVO_CMD_GET_AUDIO_STAT 0x92
+#define SDVO_CMD_SET_HBUF_INDEX 0x93
+#define SDVO_CMD_GET_HBUF_INDEX 0x94
+#define SDVO_CMD_GET_HBUF_INFO 0x95
+#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
+#define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97
+#define SDVO_CMD_SET_HBUF_DATA 0x98
+#define SDVO_CMD_GET_HBUF_DATA 0x99
+#define SDVO_CMD_SET_HBUF_TXRATE 0x9a
+#define SDVO_CMD_GET_HBUF_TXRATE 0x9b
+ #define SDVO_HBUF_TX_DISABLED (0 << 6)
+ #define SDVO_HBUF_TX_ONCE (2 << 6)
+ #define SDVO_HBUF_TX_VSYNC (3 << 6)
+#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
+
+struct intel_sdvo_encode{
+ u8 dvi_rev;
+ u8 hdmi_rev;
+} __attribute__ ((packed));
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 63212d7bbc2..df4cf97e5d9 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1039,9 +1039,9 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
- drm_core_ioremap(dev_priv->cp_ring, dev);
- drm_core_ioremap(dev_priv->ring_rptr, dev);
- drm_core_ioremap(dev->agp_buffer_map, dev);
+ drm_core_ioremap_wc(dev_priv->cp_ring, dev);
+ drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
+ drm_core_ioremap_wc(dev->agp_buffer_map, dev);
if (!dev_priv->cp_ring->handle ||
!dev_priv->ring_rptr->handle ||
!dev->agp_buffer_map->handle) {
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 35561689ff3..ea2638b4198 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -13,11 +13,11 @@ menuconfig INPUT_KEYBOARD
if INPUT_KEYBOARD
config KEYBOARD_ATKBD
- tristate "AT keyboard" if EMBEDDED || !X86_PC
+ tristate "AT keyboard" if EMBEDDED || !X86
default y
select SERIO
select SERIO_LIBPS2
- select SERIO_I8042 if X86_PC
+ select SERIO_I8042 if X86
select SERIO_GSCPS2 if GSC
help
Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 093c8c1bca7..9bef935ef19 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -17,7 +17,7 @@ config MOUSE_PS2
default y
select SERIO
select SERIO_LIBPS2
- select SERIO_I8042 if X86_PC
+ select SERIO_I8042 if X86
select SERIO_GSCPS2 if GSC
help
Say Y here if you have a PS/2 mouse connected to your system. This
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 595ba8eb4a0..0b28141e43b 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -4599,6 +4599,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
printk(KERN_ERR "%s: no memory for coeffs\n",
__func__);
ret = -ENOMEM;
+ kfree(bch);
goto free_chan;
}
bch->nr = ch;
@@ -4767,6 +4768,7 @@ init_multi_port(struct hfc_multi *hc, int pt)
printk(KERN_ERR "%s: no memory for coeffs\n",
__func__);
ret = -ENOMEM;
+ kfree(bch);
goto free_chan;
}
bch->nr = ch + 1;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 8b12e6e109d..2ff88791ceb 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -273,7 +273,7 @@ config MTD_NAND_CAFE
config MTD_NAND_CS553X
tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
- depends on X86_32 && (X86_PC || X86_GENERICARCH)
+ depends on X86_32
help
The CS553x companion chips for the AMD Geode processor
include NAND flash controllers with built-in hardware ECC
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 535c234286e..8c694213035 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -1475,6 +1475,7 @@ el3_resume(struct device *pdev)
spin_lock_irqsave(&lp->lock, flags);
outw(PowerUp, ioaddr + EL3_CMD);
+ EL3WINDOW(0);
el3_up(dev);
if (netif_running(dev))
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index acae2d8cd68..9b12a13a640 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1629,6 +1629,12 @@ static void gfar_schedule_cleanup(struct net_device *dev)
if (netif_rx_schedule_prep(&priv->napi)) {
gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
__netif_rx_schedule(&priv->napi);
+ } else {
+ /*
+ * Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived.
+ */
+ gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
}
spin_unlock(&priv->rxlock);
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 9c78c963b72..f4dd9acb687 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -1203,7 +1203,7 @@ typedef struct {
#define NETXEN_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
-#define MSIX_ENTRIES_PER_ADAPTER 8
+#define MSIX_ENTRIES_PER_ADAPTER 1
#define NETXEN_MSIX_TBL_SPACE 8192
#define NETXEN_PCI_REG_MSIX_TBL 0x44
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 645d384fe87..3b17a793614 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -76,6 +76,7 @@ static void netxen_nic_poll_controller(struct net_device *netdev);
#endif
static irqreturn_t netxen_intr(int irq, void *data);
static irqreturn_t netxen_msi_intr(int irq, void *data);
+static irqreturn_t netxen_msix_intr(int irq, void *data);
/* PCI Device ID Table */
#define ENTRY(device) \
@@ -1084,7 +1085,9 @@ static int netxen_nic_open(struct net_device *netdev)
for (ring = 0; ring < adapter->max_rds_rings; ring++)
netxen_post_rx_buffers(adapter, ctx, ring);
}
- if (NETXEN_IS_MSI_FAMILY(adapter))
+ if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
+ handler = netxen_msix_intr;
+ else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
handler = netxen_msi_intr;
else {
flags |= IRQF_SHARED;
@@ -1612,6 +1615,14 @@ static irqreturn_t netxen_msi_intr(int irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t netxen_msix_intr(int irq, void *data)
+{
+ struct netxen_adapter *adapter = data;
+
+ napi_schedule(&adapter->napi);
+ return IRQ_HANDLED;
+}
+
static int netxen_nic_poll(struct napi_struct *napi, int budget)
{
struct netxen_adapter *adapter = container_of(napi, struct netxen_adapter, napi);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 2c73ca606b3..0771eb6fc6e 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -437,6 +437,22 @@ enum features {
RTL_FEATURE_GMII = (1 << 2),
};
+struct rtl8169_counters {
+ __le64 tx_packets;
+ __le64 rx_packets;
+ __le64 tx_errors;
+ __le32 rx_errors;
+ __le16 rx_missed;
+ __le16 align_errors;
+ __le32 tx_one_collision;
+ __le32 tx_multi_collision;
+ __le64 rx_unicast;
+ __le64 rx_broadcast;
+ __le32 rx_multicast;
+ __le16 tx_aborted;
+ __le16 tx_underun;
+};
+
struct rtl8169_private {
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev; /* Index of PCI device */
@@ -480,6 +496,7 @@ struct rtl8169_private {
unsigned features;
struct mii_if_info mii;
+ struct rtl8169_counters counters;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -1100,22 +1117,6 @@ static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
"tx_underrun",
};
-struct rtl8169_counters {
- __le64 tx_packets;
- __le64 rx_packets;
- __le64 tx_errors;
- __le32 rx_errors;
- __le16 rx_missed;
- __le16 align_errors;
- __le32 tx_one_collision;
- __le32 tx_multi_collision;
- __le64 rx_unicast;
- __le64 rx_broadcast;
- __le32 rx_multicast;
- __le16 tx_aborted;
- __le16 tx_underun;
-};
-
static int rtl8169_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
@@ -1126,16 +1127,21 @@ static int rtl8169_get_sset_count(struct net_device *dev, int sset)
}
}
-static void rtl8169_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
+static void rtl8169_update_counters(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
struct rtl8169_counters *counters;
dma_addr_t paddr;
u32 cmd;
+ int wait = 1000;
- ASSERT_RTNL();
+ /*
+ * Some chips are unable to dump tally counters when the receiver
+ * is disabled.
+ */
+ if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
+ return;
counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
if (!counters)
@@ -1146,31 +1152,45 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
RTL_W32(CounterAddrLow, cmd);
RTL_W32(CounterAddrLow, cmd | CounterDump);
- while (RTL_R32(CounterAddrLow) & CounterDump) {
- if (msleep_interruptible(1))
+ while (wait--) {
+ if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
+ /* copy updated counters */
+ memcpy(&tp->counters, counters, sizeof(*counters));
break;
+ }
+ udelay(10);
}
RTL_W32(CounterAddrLow, 0);
RTL_W32(CounterAddrHigh, 0);
- data[0] = le64_to_cpu(counters->tx_packets);
- data[1] = le64_to_cpu(counters->rx_packets);
- data[2] = le64_to_cpu(counters->tx_errors);
- data[3] = le32_to_cpu(counters->rx_errors);
- data[4] = le16_to_cpu(counters->rx_missed);
- data[5] = le16_to_cpu(counters->align_errors);
- data[6] = le32_to_cpu(counters->tx_one_collision);
- data[7] = le32_to_cpu(counters->tx_multi_collision);
- data[8] = le64_to_cpu(counters->rx_unicast);
- data[9] = le64_to_cpu(counters->rx_broadcast);
- data[10] = le32_to_cpu(counters->rx_multicast);
- data[11] = le16_to_cpu(counters->tx_aborted);
- data[12] = le16_to_cpu(counters->tx_underun);
-
pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
}
+static void rtl8169_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ ASSERT_RTNL();
+
+ rtl8169_update_counters(dev);
+
+ data[0] = le64_to_cpu(tp->counters.tx_packets);
+ data[1] = le64_to_cpu(tp->counters.rx_packets);
+ data[2] = le64_to_cpu(tp->counters.tx_errors);
+ data[3] = le32_to_cpu(tp->counters.rx_errors);
+ data[4] = le16_to_cpu(tp->counters.rx_missed);
+ data[5] = le16_to_cpu(tp->counters.align_errors);
+ data[6] = le32_to_cpu(tp->counters.tx_one_collision);
+ data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
+ data[8] = le64_to_cpu(tp->counters.rx_unicast);
+ data[9] = le64_to_cpu(tp->counters.rx_broadcast);
+ data[10] = le32_to_cpu(tp->counters.rx_multicast);
+ data[11] = le16_to_cpu(tp->counters.tx_aborted);
+ data[12] = le16_to_cpu(tp->counters.tx_underun);
+}
+
static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
switch(stringset) {
@@ -3682,6 +3702,9 @@ static int rtl8169_close(struct net_device *dev)
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
+ /* update counters before going down */
+ rtl8169_update_counters(dev);
+
rtl8169_down(dev);
free_irq(dev->irq, dev);
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index b17efa9cc53..49187634106 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -2221,6 +2221,8 @@ static int gem_do_start(struct net_device *dev)
gp->running = 1;
+ napi_enable(&gp->napi);
+
if (gp->lstate == link_up) {
netif_carrier_on(gp->dev);
gem_set_link_modes(gp);
@@ -2238,6 +2240,8 @@ static int gem_do_start(struct net_device *dev)
spin_lock_irqsave(&gp->lock, flags);
spin_lock(&gp->tx_lock);
+ napi_disable(&gp->napi);
+
gp->running = 0;
gem_reset(gp);
gem_clean_rings(gp);
@@ -2338,8 +2342,6 @@ static int gem_open(struct net_device *dev)
if (!gp->asleep)
rc = gem_do_start(dev);
gp->opened = (rc == 0);
- if (gp->opened)
- napi_enable(&gp->napi);
mutex_unlock(&gp->pm_mutex);
@@ -2476,8 +2478,6 @@ static int gem_resume(struct pci_dev *pdev)
/* Re-attach net device */
netif_device_attach(dev);
-
- napi_enable(&gp->napi);
}
spin_lock_irqsave(&gp->lock, flags);
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 7a72a3112f0..cc4013be5e1 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2629,6 +2629,14 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
int i, qfe_slot = -1;
int err = -ENODEV;
+ sbus_dp = to_of_device(op->dev.parent)->node;
+ if (is_qfe)
+ sbus_dp = to_of_device(op->dev.parent->parent)->node;
+
+ /* We can match PCI devices too, do not accept those here. */
+ if (strcmp(sbus_dp->name, "sbus"))
+ return err;
+
if (is_qfe) {
qp = quattro_sbus_find(op);
if (qp == NULL)
@@ -2734,10 +2742,6 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
if (qp != NULL)
hp->happy_flags |= HFLAG_QUATTRO;
- sbus_dp = to_of_device(op->dev.parent)->node;
- if (is_qfe)
- sbus_dp = to_of_device(op->dev.parent->parent)->node;
-
/* Get the supported DVMA burst sizes from our Happy SBUS. */
hp->happy_bursts = of_getintprop_default(sbus_dp,
"burst-sizes", 0x00);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 0bf2114738b..d4c5ecc51f7 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -464,13 +464,14 @@ static void de_rx (struct de_private *de)
drop = 1;
rx_next:
- de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
if (rx_tail == (DE_RX_RING_SIZE - 1))
de->rx_ring[rx_tail].opts2 =
cpu_to_le32(RingEnd | de->rx_buf_sz);
else
de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
+ wmb();
+ de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
rx_tail = NEXT_RX(rx_tail);
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index d7b81e4fdd5..09fea31d3e3 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -157,10 +157,16 @@ static int update_filter(struct tap_filter *filter, void __user *arg)
nexact = n;
- /* The rest is hashed */
+ /* Remaining multicast addresses are hashed,
+ * unicast will leave the filter disabled. */
memset(filter->mask, 0, sizeof(filter->mask));
- for (; n < uf.count; n++)
+ for (; n < uf.count; n++) {
+ if (!is_multicast_ether_addr(addr[n].u)) {
+ err = 0; /* no filter */
+ goto done;
+ }
addr_hash_set(filter->mask, addr[n].u);
+ }
/* For ALLMULTI just set the mask to all ones.
* This overrides the mask populated above. */
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 101ed49a2d1..032db815b0f 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -64,6 +64,11 @@ struct parport_pc_pci {
static int __devinit netmos_parallel_init(struct pci_dev *dev, struct parport_pc_pci *card, int autoirq, int autodma)
{
+ /* the rule described below doesn't hold for this device */
+ if (dev->device == PCI_DEVICE_ID_NETMOS_9835 &&
+ dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
+ dev->subsystem_device == 0x0299)
+ return -ENODEV;
/*
* Netmos uses the subdevice ID to indicate the number of parallel
* and serial ports. The form is 0x00PS, where <P> is the number of
diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c
index e988ec130fc..41aec2acbb9 100644
--- a/drivers/power/pcf50633-charger.c
+++ b/drivers/power/pcf50633-charger.c
@@ -199,7 +199,8 @@ static int adapter_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
- struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, usb);
+ struct pcf50633_mbc *mbc = container_of(psy,
+ struct pcf50633_mbc, adapter);
int ret = 0;
switch (psp) {
diff --git a/drivers/rtc/rtc-au1xxx.c b/drivers/rtc/rtc-au1xxx.c
index 8906a688e6a..979ed0406ce 100644
--- a/drivers/rtc/rtc-au1xxx.c
+++ b/drivers/rtc/rtc-au1xxx.c
@@ -81,7 +81,7 @@ static int __devinit au1xtoy_rtc_probe(struct platform_device *pdev)
if (au_readl(SYS_TOYTRIM) != 32767) {
/* wait until hardware gives access to TRIM register */
t = 0x00100000;
- while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T0S) && t--)
+ while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T0S) && --t)
msleep(1);
if (!t) {
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index bd56a033bfd..bb8cc05605a 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -485,7 +485,7 @@ static void __exit pxa_rtc_exit(void)
module_init(pxa_rtc_init);
module_exit(pxa_rtc_exit);
-MODULE_AUTHOR("Robert Jarzmik");
+MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
MODULE_DESCRIPTION("PXA27x/PXA3xx Realtime Clock Driver (RTC)");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pxa-rtc");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index bd591499414..08c23a92101 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -57,6 +57,8 @@ static void dasd_device_tasklet(struct dasd_device *);
static void dasd_block_tasklet(struct dasd_block *);
static void do_kick_device(struct work_struct *);
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
+static void dasd_device_timeout(unsigned long);
+static void dasd_block_timeout(unsigned long);
/*
* SECTION: Operations on the device structure.
@@ -99,6 +101,8 @@ struct dasd_device *dasd_alloc_device(void)
(unsigned long) device);
INIT_LIST_HEAD(&device->ccw_queue);
init_timer(&device->timer);
+ device->timer.function = dasd_device_timeout;
+ device->timer.data = (unsigned long) device;
INIT_WORK(&device->kick_work, do_kick_device);
device->state = DASD_STATE_NEW;
device->target = DASD_STATE_NEW;
@@ -138,6 +142,8 @@ struct dasd_block *dasd_alloc_block(void)
INIT_LIST_HEAD(&block->ccw_queue);
spin_lock_init(&block->queue_lock);
init_timer(&block->timer);
+ block->timer.function = dasd_block_timeout;
+ block->timer.data = (unsigned long) block;
return block;
}
@@ -915,19 +921,10 @@ static void dasd_device_timeout(unsigned long ptr)
*/
void dasd_device_set_timer(struct dasd_device *device, int expires)
{
- if (expires == 0) {
- if (timer_pending(&device->timer))
- del_timer(&device->timer);
- return;
- }
- if (timer_pending(&device->timer)) {
- if (mod_timer(&device->timer, jiffies + expires))
- return;
- }
- device->timer.function = dasd_device_timeout;
- device->timer.data = (unsigned long) device;
- device->timer.expires = jiffies + expires;
- add_timer(&device->timer);
+ if (expires == 0)
+ del_timer(&device->timer);
+ else
+ mod_timer(&device->timer, jiffies + expires);
}
/*
@@ -935,8 +932,7 @@ void dasd_device_set_timer(struct dasd_device *device, int expires)
*/
void dasd_device_clear_timer(struct dasd_device *device)
{
- if (timer_pending(&device->timer))
- del_timer(&device->timer);
+ del_timer(&device->timer);
}
static void dasd_handle_killed_request(struct ccw_device *cdev,
@@ -1586,19 +1582,10 @@ static void dasd_block_timeout(unsigned long ptr)
*/
void dasd_block_set_timer(struct dasd_block *block, int expires)
{
- if (expires == 0) {
- if (timer_pending(&block->timer))
- del_timer(&block->timer);
- return;
- }
- if (timer_pending(&block->timer)) {
- if (mod_timer(&block->timer, jiffies + expires))
- return;
- }
- block->timer.function = dasd_block_timeout;
- block->timer.data = (unsigned long) block;
- block->timer.expires = jiffies + expires;
- add_timer(&block->timer);
+ if (expires == 0)
+ del_timer(&block->timer);
+ else
+ mod_timer(&block->timer, jiffies + expires);
}
/*
@@ -1606,8 +1593,7 @@ void dasd_block_set_timer(struct dasd_block *block, int expires)
*/
void dasd_block_clear_timer(struct dasd_block *block)
{
- if (timer_pending(&block->timer))
- del_timer(&block->timer);
+ del_timer(&block->timer);
}
/*
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 300e28a531f..34339902efb 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -677,7 +677,7 @@ static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr,
struct dasd_devmap *devmap;
int ff_flag;
- devmap = dasd_find_busid(dev->bus_id);
+ devmap = dasd_find_busid(dev_name(dev));
if (!IS_ERR(devmap))
ff_flag = (devmap->features & DASD_FEATURE_FAILFAST) != 0;
else
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 6b996db0dd6..604bd1e0d54 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -27,6 +27,7 @@ menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
bool "Android RAM Console Enable error correction"
default n
depends on ANDROID_RAM_CONSOLE
+ depends on !ANDROID_RAM_CONSOLE_EARLY_INIT
select REED_SOLOMON
select REED_SOLOMON_ENC8
select REED_SOLOMON_DEC8
diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c
index bf006857a87..643ac5ce381 100644
--- a/drivers/staging/android/ram_console.c
+++ b/drivers/staging/android/ram_console.c
@@ -224,9 +224,23 @@ static int __init ram_console_init(struct ram_console_buffer *buffer,
ram_console_buffer_size =
buffer_size - sizeof(struct ram_console_buffer);
+ if (ram_console_buffer_size > buffer_size) {
+ pr_err("ram_console: buffer %p, invalid size %d, datasize %d\n",
+ buffer, buffer_size, ram_console_buffer_size);
+ return 0;
+ }
+
#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
ECC_BLOCK_SIZE) + 1) * ECC_SIZE;
+
+ if (ram_console_buffer_size > buffer_size) {
+ pr_err("ram_console: buffer %p, invalid size %d, "
+ "non-ecc datasize %d\n",
+ buffer, buffer_size, ram_console_buffer_size);
+ return 0;
+ }
+
ram_console_par_buffer = buffer->data + ram_console_buffer_size;
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index 903270cbbe0..33daff0481d 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -50,7 +50,7 @@ static ssize_t gpio_enable_show(struct device *dev, struct device_attribute *att
if (hrtimer_active(&gpio_data->timer)) {
ktime_t r = hrtimer_get_remaining(&gpio_data->timer);
struct timeval t = ktime_to_timeval(r);
- remaining = t.tv_sec * 1000 + t.tv_usec;
+ remaining = t.tv_sec * 1000 + t.tv_usec / 1000;
} else
remaining = 0;
diff --git a/drivers/staging/at76_usb/Kconfig b/drivers/staging/at76_usb/Kconfig
index 4c0e55e1544..8606f962162 100644
--- a/drivers/staging/at76_usb/Kconfig
+++ b/drivers/staging/at76_usb/Kconfig
@@ -1,6 +1,6 @@
config USB_ATMEL
tristate "Atmel at76c503/at76c505/at76c505a USB cards"
- depends on MAC80211 && WLAN_80211 && USB
+ depends on WLAN_80211 && USB
default N
select FW_LOADER
---help---
diff --git a/drivers/staging/at76_usb/at76_usb.c b/drivers/staging/at76_usb/at76_usb.c
index 185533e5476..c8e4d31c7df 100644
--- a/drivers/staging/at76_usb/at76_usb.c
+++ b/drivers/staging/at76_usb/at76_usb.c
@@ -6,7 +6,6 @@
* Copyright (c) 2004 Nick Jones
* Copyright (c) 2004 Balint Seeber <n0_5p4m_p13453@hotmail.com>
* Copyright (c) 2007 Guido Guenther <agx@sigxcpu.org>
- * Copyright (c) 2007 Kalle Valo <kalle.valo@iki.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -17,13 +16,6 @@
* Atmel AT76C503A/505/505A.
*
* Some iw_handler code was taken from airo.c, (C) 1999 Benjamin Reed
- *
- * TODO for the mac80211 port:
- * o adhoc support
- * o RTS/CTS support
- * o Power Save Mode support
- * o support for short/long preambles
- * o export variables through debugfs/sysfs
*/
#include <linux/init.h>
@@ -44,7 +36,7 @@
#include <net/ieee80211_radiotap.h>
#include <linux/firmware.h>
#include <linux/leds.h>
-#include <net/mac80211.h>
+#include <net/ieee80211.h>
#include "at76_usb.h"
@@ -84,43 +76,31 @@
#define DBG_WE_EVENTS 0x08000000 /* dump wireless events */
#define DBG_FW 0x10000000 /* firmware download */
#define DBG_DFU 0x20000000 /* device firmware upgrade */
-#define DBG_CMD 0x40000000
-#define DBG_MAC80211 0x80000000
#define DBG_DEFAULTS 0
/* Use our own dbg macro */
#define at76_dbg(bits, format, arg...) \
-do { \
- if (at76_debug & (bits)) \
- printk(KERN_DEBUG DRIVER_NAME ": " format "\n" , ## arg); \
-} while (0)
-
-#define at76_dbg_dump(bits, buf, len, format, arg...) \
-do { \
- if (at76_debug & (bits)) { \
+ do { \
+ if (at76_debug & (bits)) \
printk(KERN_DEBUG DRIVER_NAME ": " format "\n" , ## arg); \
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); \
- } \
-} while (0)
+ } while (0)
static int at76_debug = DBG_DEFAULTS;
-#define FIRMWARE_IS_WPA(ver) ((ver.major == 1) && (ver.minor == 103))
-
/* Protect against concurrent firmware loading and parsing */
static struct mutex fw_mutex;
static struct fwentry firmwares[] = {
- [0] = { "" },
- [BOARD_503_ISL3861] = { "atmel_at76c503-i3861.bin" },
- [BOARD_503_ISL3863] = { "atmel_at76c503-i3863.bin" },
- [BOARD_503] = { "atmel_at76c503-rfmd.bin" },
- [BOARD_503_ACC] = { "atmel_at76c503-rfmd-acc.bin" },
- [BOARD_505] = { "atmel_at76c505-rfmd.bin" },
- [BOARD_505_2958] = { "atmel_at76c505-rfmd2958.bin" },
- [BOARD_505A] = { "atmel_at76c505a-rfmd2958.bin" },
- [BOARD_505AMX] = { "atmel_at76c505amx-rfmd.bin" },
+ [0] = {""},
+ [BOARD_503_ISL3861] = {"atmel_at76c503-i3861.bin"},
+ [BOARD_503_ISL3863] = {"atmel_at76c503-i3863.bin"},
+ [BOARD_503] = {"atmel_at76c503-rfmd.bin"},
+ [BOARD_503_ACC] = {"atmel_at76c503-rfmd-acc.bin"},
+ [BOARD_505] = {"atmel_at76c505-rfmd.bin"},
+ [BOARD_505_2958] = {"atmel_at76c505-rfmd2958.bin"},
+ [BOARD_505A] = {"atmel_at76c505a-rfmd2958.bin"},
+ [BOARD_505AMX] = {"atmel_at76c505amx-rfmd.bin"},
};
#define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops)
@@ -130,133 +110,135 @@ static struct usb_device_id dev_table[] = {
* at76c503-i3861
*/
/* Generic AT76C503/3861 device */
- { USB_DEVICE(0x03eb, 0x7603), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ {USB_DEVICE(0x03eb, 0x7603), USB_DEVICE_DATA(BOARD_503_ISL3861)},
/* Linksys WUSB11 v2.1/v2.6 */
- { USB_DEVICE(0x066b, 0x2211), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ {USB_DEVICE(0x066b, 0x2211), USB_DEVICE_DATA(BOARD_503_ISL3861)},
/* Netgear MA101 rev. A */
- { USB_DEVICE(0x0864, 0x4100), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ {USB_DEVICE(0x0864, 0x4100), USB_DEVICE_DATA(BOARD_503_ISL3861)},
/* Tekram U300C / Allnet ALL0193 */
- { USB_DEVICE(0x0b3b, 0x1612), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ {USB_DEVICE(0x0b3b, 0x1612), USB_DEVICE_DATA(BOARD_503_ISL3861)},
/* HP HN210W J7801A */
- { USB_DEVICE(0x03f0, 0x011c), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ {USB_DEVICE(0x03f0, 0x011c), USB_DEVICE_DATA(BOARD_503_ISL3861)},
/* Sitecom/Z-Com/Zyxel M4Y-750 */
- { USB_DEVICE(0x0cde, 0x0001), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ {USB_DEVICE(0x0cde, 0x0001), USB_DEVICE_DATA(BOARD_503_ISL3861)},
/* Dynalink/Askey WLL013 (intersil) */
- { USB_DEVICE(0x069a, 0x0320), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ {USB_DEVICE(0x069a, 0x0320), USB_DEVICE_DATA(BOARD_503_ISL3861)},
/* EZ connect 11Mpbs Wireless USB Adapter SMC2662W v1 */
- { USB_DEVICE(0x0d5c, 0xa001), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ {USB_DEVICE(0x0d5c, 0xa001), USB_DEVICE_DATA(BOARD_503_ISL3861)},
/* BenQ AWL300 */
- { USB_DEVICE(0x04a5, 0x9000), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ {USB_DEVICE(0x04a5, 0x9000), USB_DEVICE_DATA(BOARD_503_ISL3861)},
/* Addtron AWU-120, Compex WLU11 */
- { USB_DEVICE(0x05dd, 0xff31), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ {USB_DEVICE(0x05dd, 0xff31), USB_DEVICE_DATA(BOARD_503_ISL3861)},
/* Intel AP310 AnyPoint II USB */
- { USB_DEVICE(0x8086, 0x0200), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ {USB_DEVICE(0x8086, 0x0200), USB_DEVICE_DATA(BOARD_503_ISL3861)},
/* Dynalink L11U */
- { USB_DEVICE(0x0d8e, 0x7100), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ {USB_DEVICE(0x0d8e, 0x7100), USB_DEVICE_DATA(BOARD_503_ISL3861)},
/* Arescom WL-210, FCC id 07J-GL2411USB */
- { USB_DEVICE(0x0d8e, 0x7110), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ {USB_DEVICE(0x0d8e, 0x7110), USB_DEVICE_DATA(BOARD_503_ISL3861)},
/* I-O DATA WN-B11/USB */
- { USB_DEVICE(0x04bb, 0x0919), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ {USB_DEVICE(0x04bb, 0x0919), USB_DEVICE_DATA(BOARD_503_ISL3861)},
/* BT Voyager 1010 */
- { USB_DEVICE(0x069a, 0x0821), USB_DEVICE_DATA(BOARD_503_ISL3861) },
+ {USB_DEVICE(0x069a, 0x0821), USB_DEVICE_DATA(BOARD_503_ISL3861)},
/*
* at76c503-i3863
*/
/* Generic AT76C503/3863 device */
- { USB_DEVICE(0x03eb, 0x7604), USB_DEVICE_DATA(BOARD_503_ISL3863) },
+ {USB_DEVICE(0x03eb, 0x7604), USB_DEVICE_DATA(BOARD_503_ISL3863)},
/* Samsung SWL-2100U */
- { USB_DEVICE(0x055d, 0xa000), USB_DEVICE_DATA(BOARD_503_ISL3863) },
+ {USB_DEVICE(0x055d, 0xa000), USB_DEVICE_DATA(BOARD_503_ISL3863)},
/*
* at76c503-rfmd
*/
/* Generic AT76C503/RFMD device */
- { USB_DEVICE(0x03eb, 0x7605), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x03eb, 0x7605), USB_DEVICE_DATA(BOARD_503)},
/* Dynalink/Askey WLL013 (rfmd) */
- { USB_DEVICE(0x069a, 0x0321), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x069a, 0x0321), USB_DEVICE_DATA(BOARD_503)},
/* Linksys WUSB11 v2.6 */
- { USB_DEVICE(0x077b, 0x2219), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x077b, 0x2219), USB_DEVICE_DATA(BOARD_503)},
/* Network Everywhere NWU11B */
- { USB_DEVICE(0x077b, 0x2227), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x077b, 0x2227), USB_DEVICE_DATA(BOARD_503)},
/* Netgear MA101 rev. B */
- { USB_DEVICE(0x0864, 0x4102), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x0864, 0x4102), USB_DEVICE_DATA(BOARD_503)},
/* D-Link DWL-120 rev. E */
- { USB_DEVICE(0x2001, 0x3200), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x2001, 0x3200), USB_DEVICE_DATA(BOARD_503)},
/* Actiontec 802UAT1, HWU01150-01UK */
- { USB_DEVICE(0x1668, 0x7605), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x1668, 0x7605), USB_DEVICE_DATA(BOARD_503)},
/* AirVast W-Buddie WN210 */
- { USB_DEVICE(0x03eb, 0x4102), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x03eb, 0x4102), USB_DEVICE_DATA(BOARD_503)},
/* Dick Smith Electronics XH1153 802.11b USB adapter */
- { USB_DEVICE(0x1371, 0x5743), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x1371, 0x5743), USB_DEVICE_DATA(BOARD_503)},
/* CNet CNUSB611 */
- { USB_DEVICE(0x1371, 0x0001), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x1371, 0x0001), USB_DEVICE_DATA(BOARD_503)},
/* FiberLine FL-WL200U */
- { USB_DEVICE(0x1371, 0x0002), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x1371, 0x0002), USB_DEVICE_DATA(BOARD_503)},
/* BenQ AWL400 USB stick */
- { USB_DEVICE(0x04a5, 0x9001), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x04a5, 0x9001), USB_DEVICE_DATA(BOARD_503)},
/* 3Com 3CRSHEW696 */
- { USB_DEVICE(0x0506, 0x0a01), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x0506, 0x0a01), USB_DEVICE_DATA(BOARD_503)},
/* Siemens Santis ADSL WLAN USB adapter WLL 013 */
- { USB_DEVICE(0x0681, 0x001b), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x0681, 0x001b), USB_DEVICE_DATA(BOARD_503)},
/* Belkin F5D6050, version 2 */
- { USB_DEVICE(0x050d, 0x0050), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x050d, 0x0050), USB_DEVICE_DATA(BOARD_503)},
/* iBlitzz, BWU613 (not *B or *SB) */
- { USB_DEVICE(0x07b8, 0xb000), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x07b8, 0xb000), USB_DEVICE_DATA(BOARD_503)},
/* Gigabyte GN-WLBM101 */
- { USB_DEVICE(0x1044, 0x8003), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x1044, 0x8003), USB_DEVICE_DATA(BOARD_503)},
/* Planex GW-US11S */
- { USB_DEVICE(0x2019, 0x3220), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x2019, 0x3220), USB_DEVICE_DATA(BOARD_503)},
/* Internal WLAN adapter in h5[4,5]xx series iPAQs */
- { USB_DEVICE(0x049f, 0x0032), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x049f, 0x0032), USB_DEVICE_DATA(BOARD_503)},
/* Corega Wireless LAN USB-11 mini */
- { USB_DEVICE(0x07aa, 0x0011), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x07aa, 0x0011), USB_DEVICE_DATA(BOARD_503)},
/* Corega Wireless LAN USB-11 mini2 */
- { USB_DEVICE(0x07aa, 0x0018), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x07aa, 0x0018), USB_DEVICE_DATA(BOARD_503)},
/* Uniden PCW100 */
- { USB_DEVICE(0x05dd, 0xff35), USB_DEVICE_DATA(BOARD_503) },
+ {USB_DEVICE(0x05dd, 0xff35), USB_DEVICE_DATA(BOARD_503)},
/*
* at76c503-rfmd-acc
*/
/* SMC2664W */
- { USB_DEVICE(0x083a, 0x3501), USB_DEVICE_DATA(BOARD_503_ACC) },
+ {USB_DEVICE(0x083a, 0x3501), USB_DEVICE_DATA(BOARD_503_ACC)},
/* Belkin F5D6050, SMC2662W v2, SMC2662W-AR */
- { USB_DEVICE(0x0d5c, 0xa002), USB_DEVICE_DATA(BOARD_503_ACC) },
+ {USB_DEVICE(0x0d5c, 0xa002), USB_DEVICE_DATA(BOARD_503_ACC)},
/*
* at76c505-rfmd
*/
/* Generic AT76C505/RFMD */
- { USB_DEVICE(0x03eb, 0x7606), USB_DEVICE_DATA(BOARD_505) },
+ {USB_DEVICE(0x03eb, 0x7606), USB_DEVICE_DATA(BOARD_505)},
/*
* at76c505-rfmd2958
*/
/* Generic AT76C505/RFMD, OvisLink WL-1130USB */
- { USB_DEVICE(0x03eb, 0x7613), USB_DEVICE_DATA(BOARD_505_2958) },
+ {USB_DEVICE(0x03eb, 0x7613), USB_DEVICE_DATA(BOARD_505_2958)},
/* Fiberline FL-WL240U */
- { USB_DEVICE(0x1371, 0x0014), USB_DEVICE_DATA(BOARD_505_2958) },
+ {USB_DEVICE(0x1371, 0x0014), USB_DEVICE_DATA(BOARD_505_2958)},
/* CNet CNUSB-611G */
- { USB_DEVICE(0x1371, 0x0013), USB_DEVICE_DATA(BOARD_505_2958) },
+ {USB_DEVICE(0x1371, 0x0013), USB_DEVICE_DATA(BOARD_505_2958)},
/* Linksys WUSB11 v2.8 */
- { USB_DEVICE(0x1915, 0x2233), USB_DEVICE_DATA(BOARD_505_2958) },
+ {USB_DEVICE(0x1915, 0x2233), USB_DEVICE_DATA(BOARD_505_2958)},
/* Xterasys XN-2122B, IBlitzz BWU613B/BWU613SB */
- { USB_DEVICE(0x12fd, 0x1001), USB_DEVICE_DATA(BOARD_505_2958) },
+ {USB_DEVICE(0x12fd, 0x1001), USB_DEVICE_DATA(BOARD_505_2958)},
/* Corega WLAN USB Stick 11 */
- { USB_DEVICE(0x07aa, 0x7613), USB_DEVICE_DATA(BOARD_505_2958) },
+ {USB_DEVICE(0x07aa, 0x7613), USB_DEVICE_DATA(BOARD_505_2958)},
/* Microstar MSI Box MS6978 */
- { USB_DEVICE(0x0db0, 0x1020), USB_DEVICE_DATA(BOARD_505_2958) },
+ {USB_DEVICE(0x0db0, 0x1020), USB_DEVICE_DATA(BOARD_505_2958)},
/*
* at76c505a-rfmd2958
*/
/* Generic AT76C505A device */
- { USB_DEVICE(0x03eb, 0x7614), USB_DEVICE_DATA(BOARD_505A) },
+ {USB_DEVICE(0x03eb, 0x7614), USB_DEVICE_DATA(BOARD_505A)},
/* Generic AT76C505AS device */
- { USB_DEVICE(0x03eb, 0x7617), USB_DEVICE_DATA(BOARD_505A) },
+ {USB_DEVICE(0x03eb, 0x7617), USB_DEVICE_DATA(BOARD_505A)},
/* Siemens Gigaset USB WLAN Adapter 11 */
- { USB_DEVICE(0x1690, 0x0701), USB_DEVICE_DATA(BOARD_505A) },
+ {USB_DEVICE(0x1690, 0x0701), USB_DEVICE_DATA(BOARD_505A)},
+ /* OQO Model 01+ Internal Wi-Fi */
+ {USB_DEVICE(0x1557, 0x0002), USB_DEVICE_DATA(BOARD_505A)},
/*
* at76c505amx-rfmd
*/
/* Generic AT76C505AMX device */
- { USB_DEVICE(0x03eb, 0x7615), USB_DEVICE_DATA(BOARD_505AMX) },
- { }
+ {USB_DEVICE(0x03eb, 0x7615), USB_DEVICE_DATA(BOARD_505AMX)},
+ {}
};
MODULE_DEVICE_TABLE(usb, dev_table);
@@ -264,8 +246,26 @@ MODULE_DEVICE_TABLE(usb, dev_table);
/* Supported rates of this hardware, bit 7 marks basic rates */
static const u8 hw_rates[] = { 0x82, 0x84, 0x0b, 0x16 };
+/* Frequency of each channel in MHz */
+static const long channel_frequency[] = {
+ 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+ 2447, 2452, 2457, 2462, 2467, 2472, 2484
+};
+
+#define NUM_CHANNELS ARRAY_SIZE(channel_frequency)
+
static const char *const preambles[] = { "long", "short", "auto" };
+static const char *const mac_states[] = {
+ [MAC_INIT] = "INIT",
+ [MAC_SCANNING] = "SCANNING",
+ [MAC_AUTH] = "AUTH",
+ [MAC_ASSOC] = "ASSOC",
+ [MAC_JOINING] = "JOINING",
+ [MAC_CONNECTED] = "CONNECTED",
+ [MAC_OWN_IBSS] = "OWN_IBSS"
+};
+
/* Firmware download */
/* DFU states */
#define STATE_IDLE 0x00
@@ -300,30 +300,17 @@ struct dfu_status {
static inline int at76_is_intersil(enum board_type board)
{
- if (board == BOARD_503_ISL3861 || board == BOARD_503_ISL3863)
- return 1;
- return 0;
+ return (board == BOARD_503_ISL3861 || board == BOARD_503_ISL3863);
}
static inline int at76_is_503rfmd(enum board_type board)
{
- if (board == BOARD_503 || board == BOARD_503_ACC)
- return 1;
- return 0;
-}
-
-static inline int at76_is_505(enum board_type board)
-{
- if (board == BOARD_505 || board == BOARD_505_2958)
- return 1;
- return 0;
+ return (board == BOARD_503 || board == BOARD_503_ACC);
}
static inline int at76_is_505a(enum board_type board)
{
- if (board == BOARD_505A || board == BOARD_505AMX)
- return 1;
- return 0;
+ return (board == BOARD_505A || board == BOARD_505AMX);
}
/* Load a block of the first (internal) part of the firmware */
@@ -504,6 +491,41 @@ exit:
return ret;
}
+/* Report that the scan results are ready */
+static inline void at76_iwevent_scan_complete(struct net_device *netdev)
+{
+ union iwreq_data wrqu;
+ wrqu.data.length = 0;
+ wrqu.data.flags = 0;
+ wireless_send_event(netdev, SIOCGIWSCAN, &wrqu, NULL);
+ at76_dbg(DBG_WE_EVENTS, "%s: SIOCGIWSCAN sent", netdev->name);
+}
+
+static inline void at76_iwevent_bss_connect(struct net_device *netdev,
+ u8 *bssid)
+{
+ union iwreq_data wrqu;
+ wrqu.data.length = 0;
+ wrqu.data.flags = 0;
+ memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL);
+ at76_dbg(DBG_WE_EVENTS, "%s: %s: SIOCGIWAP sent", netdev->name,
+ __func__);
+}
+
+static inline void at76_iwevent_bss_disconnect(struct net_device *netdev)
+{
+ union iwreq_data wrqu;
+ wrqu.data.length = 0;
+ wrqu.data.flags = 0;
+ memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL);
+ at76_dbg(DBG_WE_EVENTS, "%s: %s: SIOCGIWAP sent", netdev->name,
+ __func__);
+}
+
#define HEX2STR_BUFFERS 4
#define HEX2STR_MAX_LEN 64
#define BIN2HEX(x) ((x) < 10 ? '0' + (x) : (x) + 'A' - 10)
@@ -575,6 +597,37 @@ static void at76_ledtrig_tx_activity(void)
mod_timer(&ledtrig_tx_timer, jiffies + HZ / 4);
}
+/* Check if the given ssid is hidden */
+static inline int at76_is_hidden_ssid(u8 *ssid, int length)
+{
+ static const u8 zeros[32];
+
+ if (length == 0)
+ return 1;
+
+ if (length == 1 && ssid[0] == ' ')
+ return 1;
+
+ return (memcmp(ssid, zeros, length) == 0);
+}
+
+static inline void at76_free_bss_list(struct at76_priv *priv)
+{
+ struct list_head *next, *ptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->bss_list_spinlock, flags);
+
+ priv->curr_bss = NULL;
+
+ list_for_each_safe(ptr, next, &priv->bss_list) {
+ list_del(ptr);
+ kfree(list_entry(ptr, struct bss_info, list));
+ }
+
+ spin_unlock_irqrestore(&priv->bss_list_spinlock, flags);
+}
+
static int at76_remap(struct usb_device *udev)
{
int ret;
@@ -598,7 +651,7 @@ static int at76_get_op_mode(struct usb_device *udev)
return -ENOMEM;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x33,
USB_TYPE_VENDOR | USB_DIR_IN |
- USB_RECIP_INTERFACE, 0x01, 0, &op_mode, 1,
+ USB_RECIP_INTERFACE, 0x01, 0, op_mode, 1,
USB_CTRL_GET_TIMEOUT);
saved = *op_mode;
kfree(op_mode);
@@ -676,7 +729,7 @@ exit:
kfree(hwcfg);
if (ret < 0)
printk(KERN_ERR "%s: cannot get HW Config (error %d)\n",
- wiphy_name(priv->hw->wiphy), ret);
+ priv->netdev->name, ret);
return ret;
}
@@ -685,15 +738,15 @@ static struct reg_domain const *at76_get_reg_domain(u16 code)
{
int i;
static struct reg_domain const fd_tab[] = {
- { 0x10, "FCC (USA)", 0x7ff }, /* ch 1-11 */
- { 0x20, "IC (Canada)", 0x7ff }, /* ch 1-11 */
- { 0x30, "ETSI (most of Europe)", 0x1fff }, /* ch 1-13 */
- { 0x31, "Spain", 0x600 }, /* ch 10-11 */
- { 0x32, "France", 0x1e00 }, /* ch 10-13 */
- { 0x40, "MKK (Japan)", 0x2000 }, /* ch 14 */
- { 0x41, "MKK1 (Japan)", 0x3fff }, /* ch 1-14 */
- { 0x50, "Israel", 0x3fc }, /* ch 3-9 */
- { 0x00, "<unknown>", 0xffffffff } /* ch 1-32 */
+ {0x10, "FCC (USA)", 0x7ff}, /* ch 1-11 */
+ {0x20, "IC (Canada)", 0x7ff}, /* ch 1-11 */
+ {0x30, "ETSI (most of Europe)", 0x1fff}, /* ch 1-13 */
+ {0x31, "Spain", 0x600}, /* ch 10-11 */
+ {0x32, "France", 0x1e00}, /* ch 10-13 */
+ {0x40, "MKK (Japan)", 0x2000}, /* ch 14 */
+ {0x41, "MKK1 (Japan)", 0x3fff}, /* ch 1-14 */
+ {0x50, "Israel", 0x3fc}, /* ch 3-9 */
+ {0x00, "<unknown>", 0xffffffff} /* ch 1-32 */
};
/* Last entry is fallback for unknown domain code */
@@ -731,7 +784,7 @@ static inline int at76_get_cmd_status(struct usb_device *udev, u8 cmd)
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x22,
USB_TYPE_VENDOR | USB_DIR_IN |
USB_RECIP_INTERFACE, cmd, 0, stat_buf,
- sizeof(stat_buf), USB_CTRL_GET_TIMEOUT);
+ 40, USB_CTRL_GET_TIMEOUT);
if (ret >= 0)
ret = stat_buf[5];
kfree(stat_buf);
@@ -739,24 +792,6 @@ static inline int at76_get_cmd_status(struct usb_device *udev, u8 cmd)
return ret;
}
-#define MAKE_CMD_CASE(c) case (c): return #c
-
-static const char *at76_get_cmd_string(u8 cmd_status)
-{
- switch (cmd_status) {
- MAKE_CMD_CASE(CMD_SET_MIB);
- MAKE_CMD_CASE(CMD_GET_MIB);
- MAKE_CMD_CASE(CMD_SCAN);
- MAKE_CMD_CASE(CMD_JOIN);
- MAKE_CMD_CASE(CMD_START_IBSS);
- MAKE_CMD_CASE(CMD_RADIO_ON);
- MAKE_CMD_CASE(CMD_RADIO_OFF);
- MAKE_CMD_CASE(CMD_STARTUP);
- }
-
- return "UNKNOWN";
-}
-
static int at76_set_card_command(struct usb_device *udev, u8 cmd, void *buf,
int buf_size)
{
@@ -772,10 +807,6 @@ static int at76_set_card_command(struct usb_device *udev, u8 cmd, void *buf,
cmd_buf->size = cpu_to_le16(buf_size);
memcpy(cmd_buf->data, buf, buf_size);
- at76_dbg_dump(DBG_CMD, cmd_buf, sizeof(struct at76_command) + buf_size,
- "issuing command %s (0x%02x)",
- at76_get_cmd_string(cmd), cmd);
-
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0e,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
0, 0, cmd_buf,
@@ -813,13 +844,13 @@ static int at76_wait_completion(struct at76_priv *priv, int cmd)
status = at76_get_cmd_status(priv->udev, cmd);
if (status < 0) {
printk(KERN_ERR "%s: at76_get_cmd_status failed: %d\n",
- wiphy_name(priv->hw->wiphy), status);
+ priv->netdev->name, status);
break;
}
at76_dbg(DBG_WAIT_COMPLETE,
"%s: Waiting on cmd %d, status = %d (%s)",
- wiphy_name(priv->hw->wiphy), cmd, status,
+ priv->netdev->name, cmd, status,
at76_get_cmd_status_string(status));
if (status != CMD_STATUS_IN_PROGRESS
@@ -830,7 +861,7 @@ static int at76_wait_completion(struct at76_priv *priv, int cmd)
if (time_after(jiffies, timeout)) {
printk(KERN_ERR
"%s: completion timeout for command %d\n",
- wiphy_name(priv->hw->wiphy), cmd);
+ priv->netdev->name, cmd);
status = -ETIMEDOUT;
break;
}
@@ -853,7 +884,7 @@ static int at76_set_mib(struct at76_priv *priv, struct set_mib_buffer *buf)
if (ret != CMD_STATUS_COMPLETE) {
printk(KERN_INFO
"%s: set_mib: at76_wait_completion failed "
- "with %d\n", wiphy_name(priv->hw->wiphy), ret);
+ "with %d\n", priv->netdev->name, ret);
ret = -EIO;
}
@@ -874,7 +905,7 @@ static int at76_set_radio(struct at76_priv *priv, int enable)
ret = at76_set_card_command(priv->udev, cmd, NULL, 0);
if (ret < 0)
printk(KERN_ERR "%s: at76_set_card_command(%d) failed: %d\n",
- wiphy_name(priv->hw->wiphy), cmd, ret);
+ priv->netdev->name, cmd, ret);
else
ret = 1;
@@ -895,7 +926,44 @@ static int at76_set_pm_mode(struct at76_priv *priv)
ret = at76_set_mib(priv, &priv->mib_buf);
if (ret < 0)
printk(KERN_ERR "%s: set_mib (pm_mode) failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
+ priv->netdev->name, ret);
+
+ return ret;
+}
+
+/* Set the association id for power save mode */
+static int at76_set_associd(struct at76_priv *priv, u16 id)
+{
+ int ret = 0;
+
+ priv->mib_buf.type = MIB_MAC_MGMT;
+ priv->mib_buf.size = 2;
+ priv->mib_buf.index = offsetof(struct mib_mac_mgmt, station_id);
+ priv->mib_buf.data.word = cpu_to_le16(id);
+
+ ret = at76_set_mib(priv, &priv->mib_buf);
+ if (ret < 0)
+ printk(KERN_ERR "%s: set_mib (associd) failed: %d\n",
+ priv->netdev->name, ret);
+
+ return ret;
+}
+
+/* Set the listen interval for power save mode */
+static int at76_set_listen_interval(struct at76_priv *priv, u16 interval)
+{
+ int ret = 0;
+
+ priv->mib_buf.type = MIB_MAC;
+ priv->mib_buf.size = 2;
+ priv->mib_buf.index = offsetof(struct mib_mac, listen_interval);
+ priv->mib_buf.data.word = cpu_to_le16(interval);
+
+ ret = at76_set_mib(priv, &priv->mib_buf);
+ if (ret < 0)
+ printk(KERN_ERR
+ "%s: set_mib (listen_interval) failed: %d\n",
+ priv->netdev->name, ret);
return ret;
}
@@ -912,7 +980,7 @@ static int at76_set_preamble(struct at76_priv *priv, u8 type)
ret = at76_set_mib(priv, &priv->mib_buf);
if (ret < 0)
printk(KERN_ERR "%s: set_mib (preamble) failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
+ priv->netdev->name, ret);
return ret;
}
@@ -929,7 +997,7 @@ static int at76_set_frag(struct at76_priv *priv, u16 size)
ret = at76_set_mib(priv, &priv->mib_buf);
if (ret < 0)
printk(KERN_ERR "%s: set_mib (frag threshold) failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
+ priv->netdev->name, ret);
return ret;
}
@@ -946,7 +1014,7 @@ static int at76_set_rts(struct at76_priv *priv, u16 size)
ret = at76_set_mib(priv, &priv->mib_buf);
if (ret < 0)
printk(KERN_ERR "%s: set_mib (rts) failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
+ priv->netdev->name, ret);
return ret;
}
@@ -963,41 +1031,24 @@ static int at76_set_autorate_fallback(struct at76_priv *priv, int onoff)
ret = at76_set_mib(priv, &priv->mib_buf);
if (ret < 0)
printk(KERN_ERR "%s: set_mib (autorate fallback) failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
+ priv->netdev->name, ret);
return ret;
}
-static int at76_set_tkip_bssid(struct at76_priv *priv, const void *addr)
+static int at76_add_mac_address(struct at76_priv *priv, void *addr)
{
int ret = 0;
- priv->mib_buf.type = MIB_MAC_ENCRYPTION;
+ priv->mib_buf.type = MIB_MAC_ADDR;
priv->mib_buf.size = ETH_ALEN;
- priv->mib_buf.index = offsetof(struct mib_mac_encryption, tkip_bssid);
+ priv->mib_buf.index = offsetof(struct mib_mac_addr, mac_addr);
memcpy(priv->mib_buf.data.addr, addr, ETH_ALEN);
ret = at76_set_mib(priv, &priv->mib_buf);
if (ret < 0)
- printk(KERN_ERR "%s: set_mib (MAC_ENCRYPTION, tkip_bssid) failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
-
- return ret;
-}
-
-static int at76_reset_rsc(struct at76_priv *priv)
-{
- int ret = 0;
-
- priv->mib_buf.type = MIB_MAC_ENCRYPTION;
- priv->mib_buf.size = 4 * 8;
- priv->mib_buf.index = offsetof(struct mib_mac_encryption, key_rsc);
- memset(priv->mib_buf.data.data, 0 , priv->mib_buf.size);
-
- ret = at76_set_mib(priv, &priv->mib_buf);
- if (ret < 0)
- printk(KERN_ERR "%s: set_mib (MAC_ENCRYPTION, key_rsc) failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
+ printk(KERN_ERR "%s: set_mib (MAC_ADDR, mac_addr) failed: %d\n",
+ priv->netdev->name, ret);
return ret;
}
@@ -1016,16 +1067,16 @@ static void at76_dump_mib_mac_addr(struct at76_priv *priv)
sizeof(struct mib_mac_addr));
if (ret < 0) {
printk(KERN_ERR "%s: at76_get_mib (MAC_ADDR) failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
+ priv->netdev->name, ret);
goto exit;
}
at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: mac_addr %s res 0x%x 0x%x",
- wiphy_name(priv->hw->wiphy),
+ priv->netdev->name,
mac2str(m->mac_addr), m->res[0], m->res[1]);
for (i = 0; i < ARRAY_SIZE(m->group_addr); i++)
at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: group addr %d: %s, "
- "status %d", wiphy_name(priv->hw->wiphy), i,
+ "status %d", priv->netdev->name, i,
mac2str(m->group_addr[i]), m->group_addr_status[i]);
exit:
kfree(m);
@@ -1045,13 +1096,13 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
sizeof(struct mib_mac_wep));
if (ret < 0) {
printk(KERN_ERR "%s: at76_get_mib (MAC_WEP) failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
+ priv->netdev->name, ret);
goto exit;
}
at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: priv_invoked %u def_key_id %u "
"key_len %u excl_unencr %u wep_icv_err %u wep_excluded %u "
- "encr_level %u key %d", wiphy_name(priv->hw->wiphy),
+ "encr_level %u key %d", priv->netdev->name,
m->privacy_invoked, m->wep_default_key_id,
m->wep_key_mapping_len, m->exclude_unencrypted,
le32_to_cpu(m->wep_icv_error_count),
@@ -1063,55 +1114,12 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
for (i = 0; i < WEP_KEYS; i++)
at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: key %d: %s",
- wiphy_name(priv->hw->wiphy), i,
+ priv->netdev->name, i,
hex2str(m->wep_default_keyvalue[i], key_len));
exit:
kfree(m);
}
-static void at76_dump_mib_mac_encryption(struct at76_priv *priv)
-{
- int i;
- int ret;
- /*int key_len;*/
- struct mib_mac_encryption *m;
-
- m = kmalloc(sizeof(struct mib_mac_encryption), GFP_KERNEL);
- if (!m)
- return;
-
- ret = at76_get_mib(priv->udev, MIB_MAC_ENCRYPTION, m,
- sizeof(struct mib_mac_encryption));
- if (ret < 0) {
- dev_err(&priv->udev->dev,
- "%s: at76_get_mib (MAC_ENCRYPTION) failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
- goto exit;
- }
-
- at76_dbg(DBG_MIB,
- "%s: MIB MAC_ENCRYPTION: tkip_bssid %s priv_invoked %u "
- "ciph_key_id %u grp_key_id %u excl_unencr %u "
- "ckip_key_perm %u wep_icv_err %u wep_excluded %u",
- wiphy_name(priv->hw->wiphy), mac2str(m->tkip_bssid),
- m->privacy_invoked, m->cipher_default_key_id,
- m->cipher_default_group_key_id, m->exclude_unencrypted,
- m->ckip_key_permutation,
- le32_to_cpu(m->wep_icv_error_count),
- le32_to_cpu(m->wep_excluded_count));
-
- /*key_len = (m->encryption_level == 1) ?
- WEP_SMALL_KEY_LEN : WEP_LARGE_KEY_LEN;*/
-
- for (i = 0; i < CIPHER_KEYS; i++)
- at76_dbg(DBG_MIB, "%s: MIB MAC_ENCRYPTION: key %d: %s",
- wiphy_name(priv->hw->wiphy), i,
- hex2str(m->cipher_default_keyvalue[i],
- CIPHER_KEY_LEN));
-exit:
- kfree(m);
-}
-
static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
{
int ret;
@@ -1125,7 +1133,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
sizeof(struct mib_mac_mgmt));
if (ret < 0) {
printk(KERN_ERR "%s: at76_get_mib (MAC_MGMT) failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
+ priv->netdev->name, ret);
goto exit;
}
@@ -1136,7 +1144,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
"pm_mode %d ibss_change %d res %d "
"multi_domain_capability_implemented %d "
"international_roaming %d country_string %.3s",
- wiphy_name(priv->hw->wiphy), le16_to_cpu(m->beacon_period),
+ priv->netdev->name, le16_to_cpu(m->beacon_period),
le16_to_cpu(m->CFP_max_duration),
le16_to_cpu(m->medium_occupancy_limit),
le16_to_cpu(m->station_id), le16_to_cpu(m->ATIM_window),
@@ -1161,7 +1169,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac));
if (ret < 0) {
printk(KERN_ERR "%s: at76_get_mib (MAC) failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
+ priv->netdev->name, ret);
goto exit;
}
@@ -1171,8 +1179,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
"scan_type %d scan_channel %d probe_delay %u "
"min_channel_time %d max_channel_time %d listen_int %d "
"desired_ssid %s desired_bssid %s desired_bsstype %d",
- wiphy_name(priv->hw->wiphy),
- le32_to_cpu(m->max_tx_msdu_lifetime),
+ priv->netdev->name, le32_to_cpu(m->max_tx_msdu_lifetime),
le32_to_cpu(m->max_rx_lifetime),
le16_to_cpu(m->frag_threshold), le16_to_cpu(m->rts_threshold),
le16_to_cpu(m->cwmin), le16_to_cpu(m->cwmax),
@@ -1198,7 +1205,7 @@ static void at76_dump_mib_phy(struct at76_priv *priv)
ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy));
if (ret < 0) {
printk(KERN_ERR "%s: at76_get_mib (PHY) failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
+ priv->netdev->name, ret);
goto exit;
}
@@ -1207,7 +1214,7 @@ static void at76_dump_mib_phy(struct at76_priv *priv)
"mpdu_max_length %d cca_mode_supported %d operation_rate_set "
"0x%x 0x%x 0x%x 0x%x channel_id %d current_cca_mode %d "
"phy_type %d current_reg_domain %d",
- wiphy_name(priv->hw->wiphy), le32_to_cpu(m->ed_threshold),
+ priv->netdev->name, le32_to_cpu(m->ed_threshold),
le16_to_cpu(m->slot_time), le16_to_cpu(m->sifs_time),
le16_to_cpu(m->preamble_length),
le16_to_cpu(m->plcp_header_length),
@@ -1231,14 +1238,13 @@ static void at76_dump_mib_local(struct at76_priv *priv)
ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local));
if (ret < 0) {
printk(KERN_ERR "%s: at76_get_mib (LOCAL) failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
+ priv->netdev->name, ret);
goto exit;
}
at76_dbg(DBG_MIB, "%s: MIB LOCAL: beacon_enable %d "
"txautorate_fallback %d ssid_size %d promiscuous_mode %d "
- "preamble_type %d", wiphy_name(priv->hw->wiphy),
- m->beacon_enable,
+ "preamble_type %d", priv->netdev->name, m->beacon_enable,
m->txautorate_fallback, m->ssid_size, m->promiscuous_mode,
m->preamble_type);
exit:
@@ -1257,21 +1263,118 @@ static void at76_dump_mib_mdomain(struct at76_priv *priv)
sizeof(struct mib_mdomain));
if (ret < 0) {
printk(KERN_ERR "%s: at76_get_mib (MDOMAIN) failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
+ priv->netdev->name, ret);
goto exit;
}
at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: channel_list %s",
- wiphy_name(priv->hw->wiphy),
+ priv->netdev->name,
hex2str(m->channel_list, sizeof(m->channel_list)));
at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: tx_powerlevel %s",
- wiphy_name(priv->hw->wiphy),
+ priv->netdev->name,
hex2str(m->tx_powerlevel, sizeof(m->tx_powerlevel)));
exit:
kfree(m);
}
+static int at76_get_current_bssid(struct at76_priv *priv)
+{
+ int ret = 0;
+ struct mib_mac_mgmt *mac_mgmt =
+ kmalloc(sizeof(struct mib_mac_mgmt), GFP_KERNEL);
+
+ if (!mac_mgmt) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, mac_mgmt,
+ sizeof(struct mib_mac_mgmt));
+ if (ret < 0) {
+ printk(KERN_ERR "%s: at76_get_mib failed: %d\n",
+ priv->netdev->name, ret);
+ goto error;
+ }
+ memcpy(priv->bssid, mac_mgmt->current_bssid, ETH_ALEN);
+ printk(KERN_INFO "%s: using BSSID %s\n", priv->netdev->name,
+ mac2str(priv->bssid));
+error:
+ kfree(mac_mgmt);
+exit:
+ return ret;
+}
+
+static int at76_get_current_channel(struct at76_priv *priv)
+{
+ int ret = 0;
+ struct mib_phy *phy = kmalloc(sizeof(struct mib_phy), GFP_KERNEL);
+
+ if (!phy) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ret = at76_get_mib(priv->udev, MIB_PHY, phy, sizeof(struct mib_phy));
+ if (ret < 0) {
+ printk(KERN_ERR "%s: at76_get_mib(MIB_PHY) failed: %d\n",
+ priv->netdev->name, ret);
+ goto error;
+ }
+ priv->channel = phy->channel_id;
+error:
+ kfree(phy);
+exit:
+ return ret;
+}
+
+/**
+ * at76_start_scan - start a scan
+ *
+ * @use_essid - use the configured ESSID in non passive mode
+ */
+static int at76_start_scan(struct at76_priv *priv, int use_essid)
+{
+ struct at76_req_scan scan;
+
+ memset(&scan, 0, sizeof(struct at76_req_scan));
+ memset(scan.bssid, 0xff, ETH_ALEN);
+
+ if (use_essid) {
+ memcpy(scan.essid, priv->essid, IW_ESSID_MAX_SIZE);
+ scan.essid_size = priv->essid_size;
+ } else
+ scan.essid_size = 0;
+
+ /* jal: why should we start at a certain channel? we do scan the whole
+ range allowed by reg domain. */
+ scan.channel = priv->channel;
+
+ /* atmelwlandriver differs between scan type 0 and 1 (active/passive)
+ For ad-hoc mode, it uses type 0 only. */
+ scan.scan_type = priv->scan_mode;
+
+ /* INFO: For probe_delay, not multiplying by 1024 as this will be
+ slightly less than min_channel_time
+ (per spec: probe delay < min. channel time) */
+ scan.min_channel_time = cpu_to_le16(priv->scan_min_time);
+ scan.max_channel_time = cpu_to_le16(priv->scan_max_time);
+ scan.probe_delay = cpu_to_le16(priv->scan_min_time * 1000);
+ scan.international_scan = 0;
+
+ /* other values are set to 0 for type 0 */
+
+ at76_dbg(DBG_PROGRESS, "%s: start_scan (use_essid = %d, intl = %d, "
+ "channel = %d, probe_delay = %d, scan_min_time = %d, "
+ "scan_max_time = %d)",
+ priv->netdev->name, use_essid,
+ scan.international_scan, scan.channel,
+ le16_to_cpu(scan.probe_delay),
+ le16_to_cpu(scan.min_channel_time),
+ le16_to_cpu(scan.max_channel_time));
+
+ return at76_set_card_command(priv->udev, CMD_SCAN, &scan, sizeof(scan));
+}
+
/* Enable monitor mode */
static int at76_start_monitor(struct at76_priv *priv)
{
@@ -1292,6 +1395,86 @@ static int at76_start_monitor(struct at76_priv *priv)
return ret;
}
+static int at76_start_ibss(struct at76_priv *priv)
+{
+ struct at76_req_ibss bss;
+ int ret;
+
+ WARN_ON(priv->mac_state != MAC_OWN_IBSS);
+ if (priv->mac_state != MAC_OWN_IBSS)
+ return -EBUSY;
+
+ memset(&bss, 0, sizeof(struct at76_req_ibss));
+ memset(bss.bssid, 0xff, ETH_ALEN);
+ memcpy(bss.essid, priv->essid, IW_ESSID_MAX_SIZE);
+ bss.essid_size = priv->essid_size;
+ bss.bss_type = ADHOC_MODE;
+ bss.channel = priv->channel;
+
+ ret = at76_set_card_command(priv->udev, CMD_START_IBSS, &bss,
+ sizeof(struct at76_req_ibss));
+ if (ret < 0) {
+ printk(KERN_ERR "%s: start_ibss failed: %d\n",
+ priv->netdev->name, ret);
+ return ret;
+ }
+
+ ret = at76_wait_completion(priv, CMD_START_IBSS);
+ if (ret != CMD_STATUS_COMPLETE) {
+ printk(KERN_ERR "%s: start_ibss failed to complete, %d\n",
+ priv->netdev->name, ret);
+ return ret;
+ }
+
+ ret = at76_get_current_bssid(priv);
+ if (ret < 0)
+ return ret;
+
+ ret = at76_get_current_channel(priv);
+ if (ret < 0)
+ return ret;
+
+ /* not sure what this is good for ??? */
+ priv->mib_buf.type = MIB_MAC_MGMT;
+ priv->mib_buf.size = 1;
+ priv->mib_buf.index = offsetof(struct mib_mac_mgmt, ibss_change);
+ priv->mib_buf.data.byte = 0;
+
+ ret = at76_set_mib(priv, &priv->mib_buf);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: set_mib (ibss change ok) failed: %d\n",
+ priv->netdev->name, ret);
+ return ret;
+ }
+
+ netif_carrier_on(priv->netdev);
+ netif_start_queue(priv->netdev);
+ return 0;
+}
+
+/* Request card to join BSS in managed or ad-hoc mode */
+static int at76_join_bss(struct at76_priv *priv, struct bss_info *ptr)
+{
+ struct at76_req_join join;
+
+ BUG_ON(!ptr);
+
+ memset(&join, 0, sizeof(struct at76_req_join));
+ memcpy(join.bssid, ptr->bssid, ETH_ALEN);
+ memcpy(join.essid, ptr->ssid, ptr->ssid_len);
+ join.essid_size = ptr->ssid_len;
+ join.bss_type = (priv->iw_mode == IW_MODE_ADHOC ? 1 : 2);
+ join.channel = ptr->channel;
+ join.timeout = cpu_to_le16(2000);
+
+ at76_dbg(DBG_PROGRESS,
+ "%s join addr %s ssid %s type %d ch %d timeout %d",
+ priv->netdev->name, mac2str(join.bssid), join.essid,
+ join.bss_type, join.channel, le16_to_cpu(join.timeout));
+ return at76_set_card_command(priv->udev, CMD_JOIN, &join,
+ sizeof(struct at76_req_join));
+}
+
/* Calculate padding from txbuf->wlength (which excludes the USB TX header),
likely to compensate a flaw in the AT76C503A USB part ... */
static inline int at76_calc_padding(int wlen)
@@ -1310,6 +1493,14 @@ static inline int at76_calc_padding(int wlen)
return 0;
}
+/* We are doing a lot of things here in an interrupt. Need
+ a bh handler (Watching TV with a TV card is probably
+ a good test: if you see flickers, we are doing too much.
+ Currently I do see flickers... even with our tasklet :-( )
+ Maybe because the bttv driver and usb-uhci use the same interrupt
+*/
+/* Or maybe because our BH handler is preempting bttv's BH handler.. BHs don't
+ * solve everything.. (alex) */
static void at76_rx_callback(struct urb *urb)
{
struct at76_priv *priv = urb->context;
@@ -1319,6 +1510,1758 @@ static void at76_rx_callback(struct urb *urb)
return;
}
+static void at76_tx_callback(struct urb *urb)
+{
+ struct at76_priv *priv = urb->context;
+ struct net_device_stats *stats = &priv->stats;
+ unsigned long flags;
+ struct at76_tx_buffer *mgmt_buf;
+ int ret;
+
+ switch (urb->status) {
+ case 0:
+ stats->tx_packets++;
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ /* urb has been unlinked */
+ return;
+ default:
+ at76_dbg(DBG_URB, "%s - nonzero tx status received: %d",
+ __func__, urb->status);
+ stats->tx_errors++;
+ break;
+ }
+
+ spin_lock_irqsave(&priv->mgmt_spinlock, flags);
+ mgmt_buf = priv->next_mgmt_bulk;
+ priv->next_mgmt_bulk = NULL;
+ spin_unlock_irqrestore(&priv->mgmt_spinlock, flags);
+
+ if (!mgmt_buf) {
+ netif_wake_queue(priv->netdev);
+ return;
+ }
+
+ /* we don't copy the padding bytes, but add them
+ to the length */
+ memcpy(priv->bulk_out_buffer, mgmt_buf,
+ le16_to_cpu(mgmt_buf->wlength) + AT76_TX_HDRLEN);
+ usb_fill_bulk_urb(priv->tx_urb, priv->udev, priv->tx_pipe,
+ priv->bulk_out_buffer,
+ le16_to_cpu(mgmt_buf->wlength) + mgmt_buf->padding +
+ AT76_TX_HDRLEN, at76_tx_callback, priv);
+ ret = usb_submit_urb(priv->tx_urb, GFP_ATOMIC);
+ if (ret)
+ printk(KERN_ERR "%s: error in tx submit urb: %d\n",
+ priv->netdev->name, ret);
+
+ kfree(mgmt_buf);
+}
+
+/* Send a management frame on bulk-out. txbuf->wlength must be set */
+static int at76_tx_mgmt(struct at76_priv *priv, struct at76_tx_buffer *txbuf)
+{
+ unsigned long flags;
+ int ret;
+ int urb_status;
+ void *oldbuf = NULL;
+
+ netif_carrier_off(priv->netdev); /* stop netdev watchdog */
+ netif_stop_queue(priv->netdev); /* stop tx data packets */
+
+ spin_lock_irqsave(&priv->mgmt_spinlock, flags);
+
+ urb_status = priv->tx_urb->status;
+ if (urb_status == -EINPROGRESS) {
+ /* cannot transmit now, put in the queue */
+ oldbuf = priv->next_mgmt_bulk;
+ priv->next_mgmt_bulk = txbuf;
+ }
+ spin_unlock_irqrestore(&priv->mgmt_spinlock, flags);
+
+ if (oldbuf) {
+ /* a data/mgmt tx is already pending in the URB -
+ if this is no error in some situations we must
+ implement a queue or silently modify the old msg */
+ printk(KERN_ERR "%s: removed pending mgmt buffer %s\n",
+ priv->netdev->name, hex2str(oldbuf, 64));
+ kfree(oldbuf);
+ return 0;
+ }
+
+ txbuf->tx_rate = TX_RATE_1MBIT;
+ txbuf->padding = at76_calc_padding(le16_to_cpu(txbuf->wlength));
+ memset(txbuf->reserved, 0, sizeof(txbuf->reserved));
+
+ if (priv->next_mgmt_bulk)
+ printk(KERN_ERR "%s: URB status %d, but mgmt is pending\n",
+ priv->netdev->name, urb_status);
+
+ at76_dbg(DBG_TX_MGMT,
+ "%s: tx mgmt: wlen %d tx_rate %d pad %d %s",
+ priv->netdev->name, le16_to_cpu(txbuf->wlength),
+ txbuf->tx_rate, txbuf->padding,
+ hex2str(txbuf->packet, le16_to_cpu(txbuf->wlength)));
+
+ /* txbuf was not consumed above -> send mgmt msg immediately */
+ memcpy(priv->bulk_out_buffer, txbuf,
+ le16_to_cpu(txbuf->wlength) + AT76_TX_HDRLEN);
+ usb_fill_bulk_urb(priv->tx_urb, priv->udev, priv->tx_pipe,
+ priv->bulk_out_buffer,
+ le16_to_cpu(txbuf->wlength) + txbuf->padding +
+ AT76_TX_HDRLEN, at76_tx_callback, priv);
+ ret = usb_submit_urb(priv->tx_urb, GFP_ATOMIC);
+ if (ret)
+ printk(KERN_ERR "%s: error in tx submit urb: %d\n",
+ priv->netdev->name, ret);
+
+ kfree(txbuf);
+
+ return ret;
+}
+
+/* Go to the next information element */
+static inline void next_ie(struct ieee80211_info_element **ie)
+{
+ *ie = (struct ieee80211_info_element *)(&(*ie)->data[(*ie)->len]);
+}
+
+/* Challenge is the challenge string (in TLV format)
+ we got with seq_nr 2 for shared secret authentication only and
+ send in seq_nr 3 WEP encrypted to prove we have the correct WEP key;
+ otherwise it is NULL */
+static int at76_auth_req(struct at76_priv *priv, struct bss_info *bss,
+ int seq_nr, struct ieee80211_info_element *challenge)
+{
+ struct at76_tx_buffer *tx_buffer;
+ struct ieee80211_hdr_3addr *mgmt;
+ struct ieee80211_auth *req;
+ int buf_len = (seq_nr != 3 ? AUTH_FRAME_SIZE :
+ AUTH_FRAME_SIZE + 1 + 1 + challenge->len);
+
+ BUG_ON(!bss);
+ BUG_ON(seq_nr == 3 && !challenge);
+ tx_buffer = kmalloc(buf_len + MAX_PADDING_SIZE, GFP_ATOMIC);
+ if (!tx_buffer)
+ return -ENOMEM;
+
+ req = (struct ieee80211_auth *)tx_buffer->packet;
+ mgmt = &req->header;
+
+ /* make wireless header */
+ /* first auth msg is not encrypted, only the second (seq_nr == 3) */
+ mgmt->frame_ctl =
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH |
+ (seq_nr == 3 ? IEEE80211_FCTL_PROTECTED : 0));
+
+ mgmt->duration_id = cpu_to_le16(0x8000);
+ memcpy(mgmt->addr1, bss->bssid, ETH_ALEN);
+ memcpy(mgmt->addr2, priv->netdev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->addr3, bss->bssid, ETH_ALEN);
+ mgmt->seq_ctl = cpu_to_le16(0);
+
+ req->algorithm = cpu_to_le16(priv->auth_mode);
+ req->transaction = cpu_to_le16(seq_nr);
+ req->status = cpu_to_le16(0);
+
+ if (seq_nr == 3)
+ memcpy(req->info_element, challenge, 1 + 1 + challenge->len);
+
+ /* init. at76_priv tx header */
+ tx_buffer->wlength = cpu_to_le16(buf_len - AT76_TX_HDRLEN);
+ at76_dbg(DBG_TX_MGMT, "%s: AuthReq bssid %s alg %d seq_nr %d",
+ priv->netdev->name, mac2str(mgmt->addr3),
+ le16_to_cpu(req->algorithm), le16_to_cpu(req->transaction));
+ if (seq_nr == 3)
+ at76_dbg(DBG_TX_MGMT, "%s: AuthReq challenge: %s ...",
+ priv->netdev->name, hex2str(req->info_element, 18));
+
+ /* either send immediately (if no data tx is pending
+ or put it in pending list */
+ return at76_tx_mgmt(priv, tx_buffer);
+}
+
+static int at76_assoc_req(struct at76_priv *priv, struct bss_info *bss)
+{
+ struct at76_tx_buffer *tx_buffer;
+ struct ieee80211_hdr_3addr *mgmt;
+ struct ieee80211_assoc_request *req;
+ struct ieee80211_info_element *ie;
+ char *essid;
+ int essid_len;
+ u16 capa;
+
+ BUG_ON(!bss);
+
+ tx_buffer = kmalloc(ASSOCREQ_MAX_SIZE + MAX_PADDING_SIZE, GFP_ATOMIC);
+ if (!tx_buffer)
+ return -ENOMEM;
+
+ req = (struct ieee80211_assoc_request *)tx_buffer->packet;
+ mgmt = &req->header;
+ ie = req->info_element;
+
+ /* make wireless header */
+ mgmt->frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ASSOC_REQ);
+
+ mgmt->duration_id = cpu_to_le16(0x8000);
+ memcpy(mgmt->addr1, bss->bssid, ETH_ALEN);
+ memcpy(mgmt->addr2, priv->netdev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->addr3, bss->bssid, ETH_ALEN);
+ mgmt->seq_ctl = cpu_to_le16(0);
+
+ /* we must set the Privacy bit in the capabilities to assure an
+ Agere-based AP with optional WEP transmits encrypted frames
+ to us. AP only set the Privacy bit in their capabilities
+ if WEP is mandatory in the BSS! */
+ capa = bss->capa;
+ if (priv->wep_enabled)
+ capa |= WLAN_CAPABILITY_PRIVACY;
+ if (priv->preamble_type != PREAMBLE_TYPE_LONG)
+ capa |= WLAN_CAPABILITY_SHORT_PREAMBLE;
+ req->capability = cpu_to_le16(capa);
+
+ req->listen_interval = cpu_to_le16(2 * bss->beacon_interval);
+
+ /* write TLV data elements */
+
+ ie->id = MFIE_TYPE_SSID;
+ ie->len = bss->ssid_len;
+ memcpy(ie->data, bss->ssid, bss->ssid_len);
+ next_ie(&ie);
+
+ ie->id = MFIE_TYPE_RATES;
+ ie->len = sizeof(hw_rates);
+ memcpy(ie->data, hw_rates, sizeof(hw_rates));
+ next_ie(&ie); /* ie points behind the supp_rates field */
+
+ /* init. at76_priv tx header */
+ tx_buffer->wlength = cpu_to_le16((u8 *)ie - (u8 *)mgmt);
+
+ ie = req->info_element;
+ essid = ie->data;
+ essid_len = min_t(int, IW_ESSID_MAX_SIZE, ie->len);
+
+ next_ie(&ie); /* points to IE of rates now */
+ at76_dbg(DBG_TX_MGMT,
+ "%s: AssocReq bssid %s capa 0x%04x ssid %.*s rates %s",
+ priv->netdev->name, mac2str(mgmt->addr3),
+ le16_to_cpu(req->capability), essid_len, essid,
+ hex2str(ie->data, ie->len));
+
+ /* either send immediately (if no data tx is pending
+ or put it in pending list */
+ return at76_tx_mgmt(priv, tx_buffer);
+}
+
+/* We got to check the bss_list for old entries */
+static void at76_bss_list_timeout(unsigned long par)
+{
+ struct at76_priv *priv = (struct at76_priv *)par;
+ unsigned long flags;
+ struct list_head *lptr, *nptr;
+ struct bss_info *ptr;
+
+ spin_lock_irqsave(&priv->bss_list_spinlock, flags);
+
+ list_for_each_safe(lptr, nptr, &priv->bss_list) {
+
+ ptr = list_entry(lptr, struct bss_info, list);
+
+ if (ptr != priv->curr_bss
+ && time_after(jiffies, ptr->last_rx + BSS_LIST_TIMEOUT)) {
+ at76_dbg(DBG_BSS_TABLE_RM,
+ "%s: bss_list: removing old BSS %s ch %d",
+ priv->netdev->name, mac2str(ptr->bssid),
+ ptr->channel);
+ list_del(&ptr->list);
+ kfree(ptr);
+ }
+ }
+ spin_unlock_irqrestore(&priv->bss_list_spinlock, flags);
+ /* restart the timer */
+ mod_timer(&priv->bss_list_timer, jiffies + BSS_LIST_TIMEOUT);
+}
+
+static inline void at76_set_mac_state(struct at76_priv *priv,
+ enum mac_state mac_state)
+{
+ at76_dbg(DBG_MAC_STATE, "%s state: %s", priv->netdev->name,
+ mac_states[mac_state]);
+ priv->mac_state = mac_state;
+}
+
+static void at76_dump_bss_table(struct at76_priv *priv)
+{
+ struct bss_info *ptr;
+ unsigned long flags;
+ struct list_head *lptr;
+
+ spin_lock_irqsave(&priv->bss_list_spinlock, flags);
+
+ at76_dbg(DBG_BSS_TABLE, "%s BSS table (curr=%p):", priv->netdev->name,
+ priv->curr_bss);
+
+ list_for_each(lptr, &priv->bss_list) {
+ ptr = list_entry(lptr, struct bss_info, list);
+ at76_dbg(DBG_BSS_TABLE, "0x%p: bssid %s channel %d ssid %.*s "
+ "(%s) capa 0x%04x rates %s rssi %d link %d noise %d",
+ ptr, mac2str(ptr->bssid), ptr->channel, ptr->ssid_len,
+ ptr->ssid, hex2str(ptr->ssid, ptr->ssid_len),
+ ptr->capa, hex2str(ptr->rates, ptr->rates_len),
+ ptr->rssi, ptr->link_qual, ptr->noise_level);
+ }
+ spin_unlock_irqrestore(&priv->bss_list_spinlock, flags);
+}
+
+/* Called upon successful association to mark interface as connected */
+static void at76_work_assoc_done(struct work_struct *work)
+{
+ struct at76_priv *priv = container_of(work, struct at76_priv,
+ work_assoc_done);
+
+ mutex_lock(&priv->mtx);
+
+ WARN_ON(priv->mac_state != MAC_ASSOC);
+ WARN_ON(!priv->curr_bss);
+ if (priv->mac_state != MAC_ASSOC || !priv->curr_bss)
+ goto exit;
+
+ if (priv->iw_mode == IW_MODE_INFRA) {
+ if (priv->pm_mode != AT76_PM_OFF) {
+ /* calculate the listen interval in units of
+ beacon intervals of the curr_bss */
+ u32 pm_period_beacon = (priv->pm_period >> 10) /
+ priv->curr_bss->beacon_interval;
+
+ pm_period_beacon = max(pm_period_beacon, 2u);
+ pm_period_beacon = min(pm_period_beacon, 0xffffu);
+
+ at76_dbg(DBG_PM,
+ "%s: pm_mode %d assoc id 0x%x listen int %d",
+ priv->netdev->name, priv->pm_mode,
+ priv->assoc_id, pm_period_beacon);
+
+ at76_set_associd(priv, priv->assoc_id);
+ at76_set_listen_interval(priv, (u16)pm_period_beacon);
+ }
+ schedule_delayed_work(&priv->dwork_beacon, BEACON_TIMEOUT);
+ }
+ at76_set_pm_mode(priv);
+
+ netif_carrier_on(priv->netdev);
+ netif_wake_queue(priv->netdev);
+ at76_set_mac_state(priv, MAC_CONNECTED);
+ at76_iwevent_bss_connect(priv->netdev, priv->curr_bss->bssid);
+ at76_dbg(DBG_PROGRESS, "%s: connected to BSSID %s",
+ priv->netdev->name, mac2str(priv->curr_bss->bssid));
+
+exit:
+ mutex_unlock(&priv->mtx);
+}
+
+/* We only store the new mac address in netdev struct,
+ it gets set when the netdev is opened. */
+static int at76_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *mac = addr;
+ memcpy(netdev->dev_addr, mac->sa_data, ETH_ALEN);
+ return 1;
+}
+
+static struct net_device_stats *at76_get_stats(struct net_device *netdev)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ return &priv->stats;
+}
+
+static struct iw_statistics *at76_get_wireless_stats(struct net_device *netdev)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ at76_dbg(DBG_IOCTL, "RETURN qual %d level %d noise %d updated %d",
+ priv->wstats.qual.qual, priv->wstats.qual.level,
+ priv->wstats.qual.noise, priv->wstats.qual.updated);
+
+ return &priv->wstats;
+}
+
+static void at76_set_multicast(struct net_device *netdev)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int promisc;
+
+ promisc = ((netdev->flags & IFF_PROMISC) != 0);
+ if (promisc != priv->promisc) {
+ /* This gets called in interrupt, must reschedule */
+ priv->promisc = promisc;
+ schedule_work(&priv->work_set_promisc);
+ }
+}
+
+/* Stop all network activity, flush all pending tasks */
+static void at76_quiesce(struct at76_priv *priv)
+{
+ unsigned long flags;
+
+ netif_stop_queue(priv->netdev);
+ netif_carrier_off(priv->netdev);
+
+ at76_set_mac_state(priv, MAC_INIT);
+
+ cancel_delayed_work(&priv->dwork_get_scan);
+ cancel_delayed_work(&priv->dwork_beacon);
+ cancel_delayed_work(&priv->dwork_auth);
+ cancel_delayed_work(&priv->dwork_assoc);
+ cancel_delayed_work(&priv->dwork_restart);
+
+ spin_lock_irqsave(&priv->mgmt_spinlock, flags);
+ kfree(priv->next_mgmt_bulk);
+ priv->next_mgmt_bulk = NULL;
+ spin_unlock_irqrestore(&priv->mgmt_spinlock, flags);
+}
+
+/*******************************************************************************
+ * at76_priv implementations of iw_handler functions:
+ */
+static int at76_iw_handler_commit(struct net_device *netdev,
+ struct iw_request_info *info,
+ void *null, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ at76_dbg(DBG_IOCTL, "%s %s: restarting the device", netdev->name,
+ __func__);
+
+ if (priv->mac_state != MAC_INIT)
+ at76_quiesce(priv);
+
+ /* Wait half second before the restart to process subsequent
+ * requests from the same iwconfig in a single restart */
+ schedule_delayed_work(&priv->dwork_restart, HZ / 2);
+
+ return 0;
+}
+
+static int at76_iw_handler_get_name(struct net_device *netdev,
+ struct iw_request_info *info,
+ char *name, char *extra)
+{
+ strcpy(name, "IEEE 802.11b");
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWNAME - name %s", netdev->name, name);
+ return 0;
+}
+
+static int at76_iw_handler_set_freq(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_freq *freq, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int chan = -1;
+ int ret = -EIWCOMMIT;
+ at76_dbg(DBG_IOCTL, "%s: SIOCSIWFREQ - freq.m %d freq.e %d",
+ netdev->name, freq->m, freq->e);
+
+ if ((freq->e == 0) && (freq->m <= 1000))
+ /* Setting by channel number */
+ chan = freq->m;
+ else {
+ /* Setting by frequency - search the table */
+ int mult = 1;
+ int i;
+
+ for (i = 0; i < (6 - freq->e); i++)
+ mult *= 10;
+
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ if (freq->m == (channel_frequency[i] * mult))
+ chan = i + 1;
+ }
+ }
+
+ if (chan < 1 || !priv->domain)
+ /* non-positive channels are invalid
+ * we need a domain info to set the channel
+ * either that or an invalid frequency was
+ * provided by the user */
+ ret = -EINVAL;
+ else if (!(priv->domain->channel_map & (1 << (chan - 1)))) {
+ printk(KERN_INFO "%s: channel %d not allowed for domain %s\n",
+ priv->netdev->name, chan, priv->domain->name);
+ ret = -EINVAL;
+ }
+
+ if (ret == -EIWCOMMIT) {
+ priv->channel = chan;
+ at76_dbg(DBG_IOCTL, "%s: SIOCSIWFREQ - ch %d", netdev->name,
+ chan);
+ }
+
+ return ret;
+}
+
+static int at76_iw_handler_get_freq(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_freq *freq, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ freq->m = priv->channel;
+ freq->e = 0;
+
+ if (priv->channel)
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWFREQ - freq %ld x 10e%d",
+ netdev->name, channel_frequency[priv->channel - 1], 6);
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWFREQ - ch %d", netdev->name,
+ priv->channel);
+
+ return 0;
+}
+
+static int at76_iw_handler_set_mode(struct net_device *netdev,
+ struct iw_request_info *info,
+ __u32 *mode, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCSIWMODE - %d", netdev->name, *mode);
+
+ if ((*mode != IW_MODE_ADHOC) && (*mode != IW_MODE_INFRA) &&
+ (*mode != IW_MODE_MONITOR))
+ return -EINVAL;
+
+ priv->iw_mode = *mode;
+ if (priv->iw_mode != IW_MODE_INFRA)
+ priv->pm_mode = AT76_PM_OFF;
+
+ return -EIWCOMMIT;
+}
+
+static int at76_iw_handler_get_mode(struct net_device *netdev,
+ struct iw_request_info *info,
+ __u32 *mode, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ *mode = priv->iw_mode;
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWMODE - %d", netdev->name, *mode);
+
+ return 0;
+}
+
+static int at76_iw_handler_get_range(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ /* inspired by atmel.c */
+ struct at76_priv *priv = netdev_priv(netdev);
+ struct iw_range *range = (struct iw_range *)extra;
+ int i;
+
+ data->length = sizeof(struct iw_range);
+ memset(range, 0, sizeof(struct iw_range));
+
+ /* TODO: range->throughput = xxxxxx; */
+
+ range->min_nwid = 0x0000;
+ range->max_nwid = 0x0000;
+
+ /* this driver doesn't maintain sensitivity information */
+ range->sensitivity = 0;
+
+ range->max_qual.qual = 100;
+ range->max_qual.level = 100;
+ range->max_qual.noise = 0;
+ range->max_qual.updated = IW_QUAL_NOISE_INVALID;
+
+ range->avg_qual.qual = 50;
+ range->avg_qual.level = 50;
+ range->avg_qual.noise = 0;
+ range->avg_qual.updated = IW_QUAL_NOISE_INVALID;
+
+ range->bitrate[0] = 1000000;
+ range->bitrate[1] = 2000000;
+ range->bitrate[2] = 5500000;
+ range->bitrate[3] = 11000000;
+ range->num_bitrates = 4;
+
+ range->min_rts = 0;
+ range->max_rts = MAX_RTS_THRESHOLD;
+
+ range->min_frag = MIN_FRAG_THRESHOLD;
+ range->max_frag = MAX_FRAG_THRESHOLD;
+
+ range->pmp_flags = IW_POWER_PERIOD;
+ range->pmt_flags = IW_POWER_ON;
+ range->pm_capa = IW_POWER_PERIOD | IW_POWER_ALL_R;
+
+ range->encoding_size[0] = WEP_SMALL_KEY_LEN;
+ range->encoding_size[1] = WEP_LARGE_KEY_LEN;
+ range->num_encoding_sizes = 2;
+ range->max_encoding_tokens = WEP_KEYS;
+
+ /* both WL-240U and Linksys WUSB11 v2.6 specify 15 dBm as output power
+ - take this for all (ignore antenna gains) */
+ range->txpower[0] = 15;
+ range->num_txpower = 1;
+ range->txpower_capa = IW_TXPOW_DBM;
+
+ range->we_version_source = WIRELESS_EXT;
+ range->we_version_compiled = WIRELESS_EXT;
+
+ /* same as the values used in atmel.c */
+ range->retry_capa = IW_RETRY_LIMIT;
+ range->retry_flags = IW_RETRY_LIMIT;
+ range->r_time_flags = 0;
+ range->min_retry = 1;
+ range->max_retry = 255;
+
+ range->num_channels = NUM_CHANNELS;
+ range->num_frequency = 0;
+
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ /* test if channel map bit is raised */
+ if (priv->domain->channel_map & (0x1 << i)) {
+ range->num_frequency += 1;
+
+ range->freq[i].i = i + 1;
+ range->freq[i].m = channel_frequency[i] * 100000;
+ range->freq[i].e = 1; /* freq * 10^1 */
+ }
+ }
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWRANGE", netdev->name);
+
+ return 0;
+}
+
+static int at76_iw_handler_set_spy(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int ret = 0;
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCSIWSPY - number of addresses %d",
+ netdev->name, data->length);
+
+ spin_lock_bh(&priv->spy_spinlock);
+ ret = iw_handler_set_spy(priv->netdev, info, (union iwreq_data *)data,
+ extra);
+ spin_unlock_bh(&priv->spy_spinlock);
+
+ return ret;
+}
+
+static int at76_iw_handler_get_spy(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+
+ struct at76_priv *priv = netdev_priv(netdev);
+ int ret = 0;
+
+ spin_lock_bh(&priv->spy_spinlock);
+ ret = iw_handler_get_spy(priv->netdev, info,
+ (union iwreq_data *)data, extra);
+ spin_unlock_bh(&priv->spy_spinlock);
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWSPY - number of addresses %d",
+ netdev->name, data->length);
+
+ return ret;
+}
+
+static int at76_iw_handler_set_thrspy(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCSIWTHRSPY - number of addresses %d)",
+ netdev->name, data->length);
+
+ spin_lock_bh(&priv->spy_spinlock);
+ ret = iw_handler_set_thrspy(netdev, info, (union iwreq_data *)data,
+ extra);
+ spin_unlock_bh(&priv->spy_spinlock);
+
+ return ret;
+}
+
+static int at76_iw_handler_get_thrspy(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ spin_lock_bh(&priv->spy_spinlock);
+ ret = iw_handler_get_thrspy(netdev, info, (union iwreq_data *)data,
+ extra);
+ spin_unlock_bh(&priv->spy_spinlock);
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWTHRSPY - number of addresses %d)",
+ netdev->name, data->length);
+
+ return ret;
+}
+
+static int at76_iw_handler_set_wap(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCSIWAP - wap/bssid %s", netdev->name,
+ mac2str(ap_addr->sa_data));
+
+ /* if the incoming address == ff:ff:ff:ff:ff:ff, the user has
+ chosen any or auto AP preference */
+ if (is_broadcast_ether_addr(ap_addr->sa_data)
+ || is_zero_ether_addr(ap_addr->sa_data))
+ priv->wanted_bssid_valid = 0;
+ else {
+ /* user wants to set a preferred AP address */
+ priv->wanted_bssid_valid = 1;
+ memcpy(priv->wanted_bssid, ap_addr->sa_data, ETH_ALEN);
+ }
+
+ return -EIWCOMMIT;
+}
+
+static int at76_iw_handler_get_wap(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ ap_addr->sa_family = ARPHRD_ETHER;
+ memcpy(ap_addr->sa_data, priv->bssid, ETH_ALEN);
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWAP - wap/bssid %s", netdev->name,
+ mac2str(ap_addr->sa_data));
+
+ return 0;
+}
+
+static int at76_iw_handler_set_scan(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int ret = 0;
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCSIWSCAN", netdev->name);
+
+ if (mutex_lock_interruptible(&priv->mtx))
+ return -EINTR;
+
+ if (!netif_running(netdev)) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ /* jal: we don't allow "iwlist ethX scan" while we are
+ in monitor mode */
+ if (priv->iw_mode == IW_MODE_MONITOR) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* Discard old scan results */
+ if ((jiffies - priv->last_scan) > (20 * HZ))
+ priv->scan_state = SCAN_IDLE;
+ priv->last_scan = jiffies;
+
+ /* Initiate a scan command */
+ if (priv->scan_state == SCAN_IN_PROGRESS) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ priv->scan_state = SCAN_IN_PROGRESS;
+
+ at76_quiesce(priv);
+
+ /* Try to do passive or active scan if WE asks as. */
+ if (wrqu->data.length
+ && wrqu->data.length == sizeof(struct iw_scan_req)) {
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+
+ if (req->scan_type == IW_SCAN_TYPE_PASSIVE)
+ priv->scan_mode = SCAN_TYPE_PASSIVE;
+ else if (req->scan_type == IW_SCAN_TYPE_ACTIVE)
+ priv->scan_mode = SCAN_TYPE_ACTIVE;
+
+ /* Sanity check values? */
+ if (req->min_channel_time > 0)
+ priv->scan_min_time = req->min_channel_time;
+
+ if (req->max_channel_time > 0)
+ priv->scan_max_time = req->max_channel_time;
+ }
+
+ /* change to scanning state */
+ at76_set_mac_state(priv, MAC_SCANNING);
+ schedule_work(&priv->work_start_scan);
+
+exit:
+ mutex_unlock(&priv->mtx);
+ return ret;
+}
+
+static int at76_iw_handler_get_scan(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ unsigned long flags;
+ struct list_head *lptr, *nptr;
+ struct bss_info *curr_bss;
+ struct iw_event *iwe = kmalloc(sizeof(struct iw_event), GFP_KERNEL);
+ char *curr_val, *curr_pos = extra;
+ int i;
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWSCAN", netdev->name);
+
+ if (!iwe)
+ return -ENOMEM;
+
+ if (priv->scan_state != SCAN_COMPLETED) {
+ /* scan not yet finished */
+ kfree(iwe);
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&priv->bss_list_spinlock, flags);
+
+ list_for_each_safe(lptr, nptr, &priv->bss_list) {
+ curr_bss = list_entry(lptr, struct bss_info, list);
+
+ iwe->cmd = SIOCGIWAP;
+ iwe->u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe->u.ap_addr.sa_data, curr_bss->bssid, 6);
+ curr_pos = iwe_stream_add_event(info, curr_pos,
+ extra + IW_SCAN_MAX_DATA, iwe,
+ IW_EV_ADDR_LEN);
+
+ iwe->u.data.length = curr_bss->ssid_len;
+ iwe->cmd = SIOCGIWESSID;
+ iwe->u.data.flags = 1;
+
+ curr_pos = iwe_stream_add_point(info, curr_pos,
+ extra + IW_SCAN_MAX_DATA, iwe,
+ curr_bss->ssid);
+
+ iwe->cmd = SIOCGIWMODE;
+ iwe->u.mode = (curr_bss->capa & WLAN_CAPABILITY_IBSS) ?
+ IW_MODE_ADHOC :
+ (curr_bss->capa & WLAN_CAPABILITY_ESS) ?
+ IW_MODE_MASTER : IW_MODE_AUTO;
+ /* IW_MODE_AUTO = 0 which I thought is
+ * the most logical value to return in this case */
+ curr_pos = iwe_stream_add_event(info, curr_pos,
+ extra + IW_SCAN_MAX_DATA, iwe,
+ IW_EV_UINT_LEN);
+
+ iwe->cmd = SIOCGIWFREQ;
+ iwe->u.freq.m = curr_bss->channel;
+ iwe->u.freq.e = 0;
+ curr_pos = iwe_stream_add_event(info, curr_pos,
+ extra + IW_SCAN_MAX_DATA, iwe,
+ IW_EV_FREQ_LEN);
+
+ iwe->cmd = SIOCGIWENCODE;
+ if (curr_bss->capa & WLAN_CAPABILITY_PRIVACY)
+ iwe->u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe->u.data.flags = IW_ENCODE_DISABLED;
+
+ iwe->u.data.length = 0;
+ curr_pos = iwe_stream_add_point(info, curr_pos,
+ extra + IW_SCAN_MAX_DATA, iwe,
+ NULL);
+
+ /* Add quality statistics */
+ iwe->cmd = IWEVQUAL;
+ iwe->u.qual.noise = 0;
+ iwe->u.qual.updated =
+ IW_QUAL_NOISE_INVALID | IW_QUAL_LEVEL_UPDATED;
+ iwe->u.qual.level = (curr_bss->rssi * 100 / 42);
+ if (iwe->u.qual.level > 100)
+ iwe->u.qual.level = 100;
+ if (at76_is_intersil(priv->board_type))
+ iwe->u.qual.qual = curr_bss->link_qual;
+ else {
+ iwe->u.qual.qual = 0;
+ iwe->u.qual.updated |= IW_QUAL_QUAL_INVALID;
+ }
+ /* Add new value to event */
+ curr_pos = iwe_stream_add_event(info, curr_pos,
+ extra + IW_SCAN_MAX_DATA, iwe,
+ IW_EV_QUAL_LEN);
+
+ /* Rate: stuffing multiple values in a single event requires
+ * a bit more of magic - Jean II */
+ curr_val = curr_pos + IW_EV_LCP_LEN;
+
+ iwe->cmd = SIOCGIWRATE;
+ /* Those two flags are ignored... */
+ iwe->u.bitrate.fixed = 0;
+ iwe->u.bitrate.disabled = 0;
+ /* Max 8 values */
+ for (i = 0; i < curr_bss->rates_len; i++) {
+ /* Bit rate given in 500 kb/s units (+ 0x80) */
+ iwe->u.bitrate.value =
+ ((curr_bss->rates[i] & 0x7f) * 500000);
+ /* Add new value to event */
+ curr_val = iwe_stream_add_value(info, curr_pos,
+ curr_val,
+ extra +
+ IW_SCAN_MAX_DATA, iwe,
+ IW_EV_PARAM_LEN);
+ }
+
+ /* Check if we added any event */
+ if ((curr_val - curr_pos) > IW_EV_LCP_LEN)
+ curr_pos = curr_val;
+
+ /* more information may be sent back using IWECUSTOM */
+
+ }
+
+ spin_unlock_irqrestore(&priv->bss_list_spinlock, flags);
+
+ data->length = (curr_pos - extra);
+ data->flags = 0;
+
+ kfree(iwe);
+ return 0;
+}
+
+static int at76_iw_handler_set_essid(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCSIWESSID - %s", netdev->name, extra);
+
+ if (data->flags) {
+ memcpy(priv->essid, extra, data->length);
+ priv->essid_size = data->length;
+ } else
+ priv->essid_size = 0; /* Use any SSID */
+
+ return -EIWCOMMIT;
+}
+
+static int at76_iw_handler_get_essid(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ if (priv->essid_size) {
+ /* not the ANY ssid in priv->essid */
+ data->flags = 1;
+ data->length = priv->essid_size;
+ memcpy(extra, priv->essid, data->length);
+ } else {
+ /* the ANY ssid was specified */
+ if (priv->mac_state == MAC_CONNECTED && priv->curr_bss) {
+ /* report the SSID we have found */
+ data->flags = 1;
+ data->length = priv->curr_bss->ssid_len;
+ memcpy(extra, priv->curr_bss->ssid, data->length);
+ } else {
+ /* report ANY back */
+ data->flags = 0;
+ data->length = 0;
+ }
+ }
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWESSID - %.*s", netdev->name,
+ data->length, extra);
+
+ return 0;
+}
+
+static int at76_iw_handler_set_rate(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_param *bitrate, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int ret = -EIWCOMMIT;
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCSIWRATE - %d", netdev->name,
+ bitrate->value);
+
+ switch (bitrate->value) {
+ case -1:
+ priv->txrate = TX_RATE_AUTO;
+ break; /* auto rate */
+ case 1000000:
+ priv->txrate = TX_RATE_1MBIT;
+ break;
+ case 2000000:
+ priv->txrate = TX_RATE_2MBIT;
+ break;
+ case 5500000:
+ priv->txrate = TX_RATE_5_5MBIT;
+ break;
+ case 11000000:
+ priv->txrate = TX_RATE_11MBIT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int at76_iw_handler_get_rate(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_param *bitrate, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (priv->txrate) {
+ /* return max rate if RATE_AUTO */
+ case TX_RATE_AUTO:
+ bitrate->value = 11000000;
+ break;
+ case TX_RATE_1MBIT:
+ bitrate->value = 1000000;
+ break;
+ case TX_RATE_2MBIT:
+ bitrate->value = 2000000;
+ break;
+ case TX_RATE_5_5MBIT:
+ bitrate->value = 5500000;
+ break;
+ case TX_RATE_11MBIT:
+ bitrate->value = 11000000;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ bitrate->fixed = (priv->txrate != TX_RATE_AUTO);
+ bitrate->disabled = 0;
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWRATE - %d", netdev->name,
+ bitrate->value);
+
+ return ret;
+}
+
+static int at76_iw_handler_set_rts(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_param *rts, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int ret = -EIWCOMMIT;
+ int rthr = rts->value;
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCSIWRTS - value %d disabled %s",
+ netdev->name, rts->value, (rts->disabled) ? "true" : "false");
+
+ if (rts->disabled)
+ rthr = MAX_RTS_THRESHOLD;
+
+ if ((rthr < 0) || (rthr > MAX_RTS_THRESHOLD))
+ ret = -EINVAL;
+ else
+ priv->rts_threshold = rthr;
+
+ return ret;
+}
+
+static int at76_iw_handler_get_rts(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_param *rts, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ rts->value = priv->rts_threshold;
+ rts->disabled = (rts->value >= MAX_RTS_THRESHOLD);
+ rts->fixed = 1;
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWRTS - value %d disabled %s",
+ netdev->name, rts->value, (rts->disabled) ? "true" : "false");
+
+ return 0;
+}
+
+static int at76_iw_handler_set_frag(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_param *frag, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int ret = -EIWCOMMIT;
+ int fthr = frag->value;
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCSIWFRAG - value %d, disabled %s",
+ netdev->name, frag->value,
+ (frag->disabled) ? "true" : "false");
+
+ if (frag->disabled)
+ fthr = MAX_FRAG_THRESHOLD;
+
+ if ((fthr < MIN_FRAG_THRESHOLD) || (fthr > MAX_FRAG_THRESHOLD))
+ ret = -EINVAL;
+ else
+ priv->frag_threshold = fthr & ~0x1; /* get an even value */
+
+ return ret;
+}
+
+static int at76_iw_handler_get_frag(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_param *frag, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ frag->value = priv->frag_threshold;
+ frag->disabled = (frag->value >= MAX_FRAG_THRESHOLD);
+ frag->fixed = 1;
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWFRAG - value %d, disabled %s",
+ netdev->name, frag->value,
+ (frag->disabled) ? "true" : "false");
+
+ return 0;
+}
+
+static int at76_iw_handler_get_txpow(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_param *power, char *extra)
+{
+ power->value = 15;
+ power->fixed = 1; /* No power control */
+ power->disabled = 0;
+ power->flags = IW_TXPOW_DBM;
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWTXPOW - txpow %d dBm", netdev->name,
+ power->value);
+
+ return 0;
+}
+
+/* jal: short retry is handled by the firmware (at least 0.90.x),
+ while long retry is not (?) */
+static int at76_iw_handler_set_retry(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_param *retry, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int ret = -EIWCOMMIT;
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCSIWRETRY disabled %d flags 0x%x val %d",
+ netdev->name, retry->disabled, retry->flags, retry->value);
+
+ if (!retry->disabled && (retry->flags & IW_RETRY_LIMIT)) {
+ if ((retry->flags & IW_RETRY_MIN) ||
+ !(retry->flags & IW_RETRY_MAX))
+ priv->short_retry_limit = retry->value;
+ else
+ ret = -EINVAL;
+ } else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+/* Adapted (ripped) from atmel.c */
+static int at76_iw_handler_get_retry(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_param *retry, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWRETRY", netdev->name);
+
+ retry->disabled = 0; /* Can't be disabled */
+ retry->flags = IW_RETRY_LIMIT;
+ retry->value = priv->short_retry_limit;
+
+ return 0;
+}
+
+static int at76_iw_handler_set_encode(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_point *encoding, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int index = (encoding->flags & IW_ENCODE_INDEX) - 1;
+ int len = encoding->length;
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCSIWENCODE - enc.flags %08x "
+ "pointer %p len %d", netdev->name, encoding->flags,
+ encoding->pointer, encoding->length);
+ at76_dbg(DBG_IOCTL,
+ "%s: SIOCSIWENCODE - old wepstate: enabled %s key_id %d "
+ "auth_mode %s", netdev->name,
+ (priv->wep_enabled) ? "true" : "false", priv->wep_key_id,
+ (priv->auth_mode ==
+ WLAN_AUTH_SHARED_KEY) ? "restricted" : "open");
+
+ /* take the old default key if index is invalid */
+ if ((index < 0) || (index >= WEP_KEYS))
+ index = priv->wep_key_id;
+
+ if (len > 0) {
+ if (len > WEP_LARGE_KEY_LEN)
+ len = WEP_LARGE_KEY_LEN;
+
+ memset(priv->wep_keys[index], 0, WEP_KEY_LEN);
+ memcpy(priv->wep_keys[index], extra, len);
+ priv->wep_keys_len[index] = (len <= WEP_SMALL_KEY_LEN) ?
+ WEP_SMALL_KEY_LEN : WEP_LARGE_KEY_LEN;
+ priv->wep_enabled = 1;
+ }
+
+ priv->wep_key_id = index;
+ priv->wep_enabled = ((encoding->flags & IW_ENCODE_DISABLED) == 0);
+
+ if (encoding->flags & IW_ENCODE_RESTRICTED)
+ priv->auth_mode = WLAN_AUTH_SHARED_KEY;
+ if (encoding->flags & IW_ENCODE_OPEN)
+ priv->auth_mode = WLAN_AUTH_OPEN;
+
+ at76_dbg(DBG_IOCTL,
+ "%s: SIOCSIWENCODE - new wepstate: enabled %s key_id %d "
+ "key_len %d auth_mode %s", netdev->name,
+ (priv->wep_enabled) ? "true" : "false", priv->wep_key_id + 1,
+ priv->wep_keys_len[priv->wep_key_id],
+ (priv->auth_mode ==
+ WLAN_AUTH_SHARED_KEY) ? "restricted" : "open");
+
+ return -EIWCOMMIT;
+}
+
+static int at76_iw_handler_get_encode(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_point *encoding, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int index = (encoding->flags & IW_ENCODE_INDEX) - 1;
+
+ if ((index < 0) || (index >= WEP_KEYS))
+ index = priv->wep_key_id;
+
+ encoding->flags =
+ (priv->auth_mode == WLAN_AUTH_SHARED_KEY) ?
+ IW_ENCODE_RESTRICTED : IW_ENCODE_OPEN;
+
+ if (!priv->wep_enabled)
+ encoding->flags |= IW_ENCODE_DISABLED;
+
+ if (encoding->pointer) {
+ encoding->length = priv->wep_keys_len[index];
+
+ memcpy(extra, priv->wep_keys[index], priv->wep_keys_len[index]);
+
+ encoding->flags |= (index + 1);
+ }
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWENCODE - enc.flags %08x "
+ "pointer %p len %d", netdev->name, encoding->flags,
+ encoding->pointer, encoding->length);
+ at76_dbg(DBG_IOCTL,
+ "%s: SIOCGIWENCODE - wepstate: enabled %s key_id %d "
+ "key_len %d auth_mode %s", netdev->name,
+ (priv->wep_enabled) ? "true" : "false", priv->wep_key_id + 1,
+ priv->wep_keys_len[priv->wep_key_id],
+ (priv->auth_mode ==
+ WLAN_AUTH_SHARED_KEY) ? "restricted" : "open");
+
+ return 0;
+}
+
+static int at76_iw_handler_set_power(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_param *prq, char *extra)
+{
+ int err = -EIWCOMMIT;
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ at76_dbg(DBG_IOCTL,
+ "%s: SIOCSIWPOWER - disabled %s flags 0x%x value 0x%x",
+ netdev->name, (prq->disabled) ? "true" : "false", prq->flags,
+ prq->value);
+
+ if (prq->disabled)
+ priv->pm_mode = AT76_PM_OFF;
+ else {
+ switch (prq->flags & IW_POWER_MODE) {
+ case IW_POWER_ALL_R:
+ case IW_POWER_ON:
+ break;
+ default:
+ err = -EINVAL;
+ goto exit;
+ }
+ if (prq->flags & IW_POWER_PERIOD)
+ priv->pm_period = prq->value;
+
+ if (prq->flags & IW_POWER_TIMEOUT) {
+ err = -EINVAL;
+ goto exit;
+ }
+ priv->pm_mode = AT76_PM_ON;
+ }
+exit:
+ return err;
+}
+
+static int at76_iw_handler_get_power(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_param *power, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ power->disabled = (priv->pm_mode == AT76_PM_OFF);
+ if (!power->disabled) {
+ power->flags = IW_POWER_PERIOD | IW_POWER_ALL_R;
+ power->value = priv->pm_period;
+ }
+
+ at76_dbg(DBG_IOCTL, "%s: SIOCGIWPOWER - %s flags 0x%x value 0x%x",
+ netdev->name, power->disabled ? "disabled" : "enabled",
+ power->flags, power->value);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Private IOCTLS
+ */
+static int at76_iw_set_short_preamble(struct net_device *netdev,
+ struct iw_request_info *info, char *name,
+ char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int val = *((int *)name);
+ int ret = -EIWCOMMIT;
+
+ at76_dbg(DBG_IOCTL, "%s: AT76_SET_SHORT_PREAMBLE, %d",
+ netdev->name, val);
+
+ if (val < PREAMBLE_TYPE_LONG || val > PREAMBLE_TYPE_AUTO)
+ ret = -EINVAL;
+ else
+ priv->preamble_type = val;
+
+ return ret;
+}
+
+static int at76_iw_get_short_preamble(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ snprintf(wrqu->name, sizeof(wrqu->name), "%s (%d)",
+ preambles[priv->preamble_type], priv->preamble_type);
+ return 0;
+}
+
+static int at76_iw_set_debug(struct net_device *netdev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ char *ptr;
+ u32 val;
+
+ if (data->length > 0) {
+ val = simple_strtol(extra, &ptr, 0);
+
+ if (ptr == extra)
+ val = DBG_DEFAULTS;
+
+ at76_dbg(DBG_IOCTL, "%s: AT76_SET_DEBUG input %d: %s -> 0x%x",
+ netdev->name, data->length, extra, val);
+ } else
+ val = DBG_DEFAULTS;
+
+ at76_dbg(DBG_IOCTL, "%s: AT76_SET_DEBUG, old 0x%x, new 0x%x",
+ netdev->name, at76_debug, val);
+
+ /* jal: some more output to pin down lockups */
+ at76_dbg(DBG_IOCTL, "%s: netif running %d queue_stopped %d "
+ "carrier_ok %d", netdev->name, netif_running(netdev),
+ netif_queue_stopped(netdev), netif_carrier_ok(netdev));
+
+ at76_debug = val;
+
+ return 0;
+}
+
+static int at76_iw_get_debug(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ snprintf(wrqu->name, sizeof(wrqu->name), "0x%08x", at76_debug);
+ return 0;
+}
+
+static int at76_iw_set_powersave_mode(struct net_device *netdev,
+ struct iw_request_info *info, char *name,
+ char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int val = *((int *)name);
+ int ret = -EIWCOMMIT;
+
+ at76_dbg(DBG_IOCTL, "%s: AT76_SET_POWERSAVE_MODE, %d (%s)",
+ netdev->name, val,
+ val == AT76_PM_OFF ? "active" : val == AT76_PM_ON ? "save" :
+ val == AT76_PM_SMART ? "smart save" : "<invalid>");
+ if (val < AT76_PM_OFF || val > AT76_PM_SMART)
+ ret = -EINVAL;
+ else
+ priv->pm_mode = val;
+
+ return ret;
+}
+
+static int at76_iw_get_powersave_mode(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int *param = (int *)extra;
+
+ param[0] = priv->pm_mode;
+ return 0;
+}
+
+static int at76_iw_set_scan_times(struct net_device *netdev,
+ struct iw_request_info *info, char *name,
+ char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int mint = *((int *)name);
+ int maxt = *((int *)name + 1);
+ int ret = -EIWCOMMIT;
+
+ at76_dbg(DBG_IOCTL, "%s: AT76_SET_SCAN_TIMES - min %d max %d",
+ netdev->name, mint, maxt);
+ if (mint <= 0 || maxt <= 0 || mint > maxt)
+ ret = -EINVAL;
+ else {
+ priv->scan_min_time = mint;
+ priv->scan_max_time = maxt;
+ }
+
+ return ret;
+}
+
+static int at76_iw_get_scan_times(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int *param = (int *)extra;
+
+ param[0] = priv->scan_min_time;
+ param[1] = priv->scan_max_time;
+ return 0;
+}
+
+static int at76_iw_set_scan_mode(struct net_device *netdev,
+ struct iw_request_info *info, char *name,
+ char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int val = *((int *)name);
+ int ret = -EIWCOMMIT;
+
+ at76_dbg(DBG_IOCTL, "%s: AT76_SET_SCAN_MODE - mode %s",
+ netdev->name, (val = SCAN_TYPE_ACTIVE) ? "active" :
+ (val = SCAN_TYPE_PASSIVE) ? "passive" : "<invalid>");
+
+ if (val != SCAN_TYPE_ACTIVE && val != SCAN_TYPE_PASSIVE)
+ ret = -EINVAL;
+ else
+ priv->scan_mode = val;
+
+ return ret;
+}
+
+static int at76_iw_get_scan_mode(struct net_device *netdev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int *param = (int *)extra;
+
+ param[0] = priv->scan_mode;
+ return 0;
+}
+
+#define AT76_SET_HANDLER(h, f) [h - SIOCIWFIRST] = (iw_handler) f
+
+/* Standard wireless handlers */
+static const iw_handler at76_handlers[] = {
+ AT76_SET_HANDLER(SIOCSIWCOMMIT, at76_iw_handler_commit),
+ AT76_SET_HANDLER(SIOCGIWNAME, at76_iw_handler_get_name),
+ AT76_SET_HANDLER(SIOCSIWFREQ, at76_iw_handler_set_freq),
+ AT76_SET_HANDLER(SIOCGIWFREQ, at76_iw_handler_get_freq),
+ AT76_SET_HANDLER(SIOCSIWMODE, at76_iw_handler_set_mode),
+ AT76_SET_HANDLER(SIOCGIWMODE, at76_iw_handler_get_mode),
+ AT76_SET_HANDLER(SIOCGIWRANGE, at76_iw_handler_get_range),
+ AT76_SET_HANDLER(SIOCSIWSPY, at76_iw_handler_set_spy),
+ AT76_SET_HANDLER(SIOCGIWSPY, at76_iw_handler_get_spy),
+ AT76_SET_HANDLER(SIOCSIWTHRSPY, at76_iw_handler_set_thrspy),
+ AT76_SET_HANDLER(SIOCGIWTHRSPY, at76_iw_handler_get_thrspy),
+ AT76_SET_HANDLER(SIOCSIWAP, at76_iw_handler_set_wap),
+ AT76_SET_HANDLER(SIOCGIWAP, at76_iw_handler_get_wap),
+ AT76_SET_HANDLER(SIOCSIWSCAN, at76_iw_handler_set_scan),
+ AT76_SET_HANDLER(SIOCGIWSCAN, at76_iw_handler_get_scan),
+ AT76_SET_HANDLER(SIOCSIWESSID, at76_iw_handler_set_essid),
+ AT76_SET_HANDLER(SIOCGIWESSID, at76_iw_handler_get_essid),
+ AT76_SET_HANDLER(SIOCSIWRATE, at76_iw_handler_set_rate),
+ AT76_SET_HANDLER(SIOCGIWRATE, at76_iw_handler_get_rate),
+ AT76_SET_HANDLER(SIOCSIWRTS, at76_iw_handler_set_rts),
+ AT76_SET_HANDLER(SIOCGIWRTS, at76_iw_handler_get_rts),
+ AT76_SET_HANDLER(SIOCSIWFRAG, at76_iw_handler_set_frag),
+ AT76_SET_HANDLER(SIOCGIWFRAG, at76_iw_handler_get_frag),
+ AT76_SET_HANDLER(SIOCGIWTXPOW, at76_iw_handler_get_txpow),
+ AT76_SET_HANDLER(SIOCSIWRETRY, at76_iw_handler_set_retry),
+ AT76_SET_HANDLER(SIOCGIWRETRY, at76_iw_handler_get_retry),
+ AT76_SET_HANDLER(SIOCSIWENCODE, at76_iw_handler_set_encode),
+ AT76_SET_HANDLER(SIOCGIWENCODE, at76_iw_handler_get_encode),
+ AT76_SET_HANDLER(SIOCSIWPOWER, at76_iw_handler_set_power),
+ AT76_SET_HANDLER(SIOCGIWPOWER, at76_iw_handler_get_power)
+};
+
+#define AT76_SET_PRIV(h, f) [h - SIOCIWFIRSTPRIV] = (iw_handler) f
+
+/* Private wireless handlers */
+static const iw_handler at76_priv_handlers[] = {
+ AT76_SET_PRIV(AT76_SET_SHORT_PREAMBLE, at76_iw_set_short_preamble),
+ AT76_SET_PRIV(AT76_GET_SHORT_PREAMBLE, at76_iw_get_short_preamble),
+ AT76_SET_PRIV(AT76_SET_DEBUG, at76_iw_set_debug),
+ AT76_SET_PRIV(AT76_GET_DEBUG, at76_iw_get_debug),
+ AT76_SET_PRIV(AT76_SET_POWERSAVE_MODE, at76_iw_set_powersave_mode),
+ AT76_SET_PRIV(AT76_GET_POWERSAVE_MODE, at76_iw_get_powersave_mode),
+ AT76_SET_PRIV(AT76_SET_SCAN_TIMES, at76_iw_set_scan_times),
+ AT76_SET_PRIV(AT76_GET_SCAN_TIMES, at76_iw_get_scan_times),
+ AT76_SET_PRIV(AT76_SET_SCAN_MODE, at76_iw_set_scan_mode),
+ AT76_SET_PRIV(AT76_GET_SCAN_MODE, at76_iw_get_scan_mode),
+};
+
+/* Names and arguments of private wireless handlers */
+static const struct iw_priv_args at76_priv_args[] = {
+ /* 0 - long, 1 - short */
+ {AT76_SET_SHORT_PREAMBLE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_preamble"},
+
+ {AT76_GET_SHORT_PREAMBLE,
+ 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 10, "get_preamble"},
+
+ /* we must pass the new debug mask as a string, because iwpriv cannot
+ * parse hex numbers starting with 0x :-( */
+ {AT76_SET_DEBUG,
+ IW_PRIV_TYPE_CHAR | 10, 0, "set_debug"},
+
+ {AT76_GET_DEBUG,
+ 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 10, "get_debug"},
+
+ /* 1 - active, 2 - power save, 3 - smart power save */
+ {AT76_SET_POWERSAVE_MODE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_powersave"},
+
+ {AT76_GET_POWERSAVE_MODE,
+ 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_powersave"},
+
+ /* min_channel_time, max_channel_time */
+ {AT76_SET_SCAN_TIMES,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "set_scan_times"},
+
+ {AT76_GET_SCAN_TIMES,
+ 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, "get_scan_times"},
+
+ /* 0 - active, 1 - passive scan */
+ {AT76_SET_SCAN_MODE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_scan_mode"},
+
+ {AT76_GET_SCAN_MODE,
+ 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_scan_mode"},
+};
+
+static const struct iw_handler_def at76_handler_def = {
+ .num_standard = ARRAY_SIZE(at76_handlers),
+ .num_private = ARRAY_SIZE(at76_priv_handlers),
+ .num_private_args = ARRAY_SIZE(at76_priv_args),
+ .standard = at76_handlers,
+ .private = at76_priv_handlers,
+ .private_args = at76_priv_args,
+ .get_wireless_stats = at76_get_wireless_stats,
+};
+
+static const u8 snapsig[] = { 0xaa, 0xaa, 0x03 };
+
+/* RFC 1042 encapsulates Ethernet frames in 802.2 SNAP (0xaa, 0xaa, 0x03) with
+ * a SNAP OID of 0 (0x00, 0x00, 0x00) */
+static const u8 rfc1042sig[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+
+static int at76_tx(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ struct net_device_stats *stats = &priv->stats;
+ int ret = 0;
+ int wlen;
+ int submit_len;
+ struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
+ struct ieee80211_hdr_3addr *i802_11_hdr =
+ (struct ieee80211_hdr_3addr *)tx_buffer->packet;
+ u8 *payload = i802_11_hdr->payload;
+ struct ethhdr *eh = (struct ethhdr *)skb->data;
+
+ if (netif_queue_stopped(netdev)) {
+ printk(KERN_ERR "%s: %s called while netdev is stopped\n",
+ netdev->name, __func__);
+ /* skip this packet */
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (priv->tx_urb->status == -EINPROGRESS) {
+ printk(KERN_ERR "%s: %s called while tx urb is pending\n",
+ netdev->name, __func__);
+ /* skip this packet */
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (skb->len < ETH_HLEN) {
+ printk(KERN_ERR "%s: %s: skb too short (%d)\n",
+ netdev->name, __func__, skb->len);
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ at76_ledtrig_tx_activity(); /* tell ledtrigger we send a packet */
+
+ /* we can get rid of memcpy if we set netdev->hard_header_len to
+ reserve enough space, but we would need to keep the skb around */
+
+ if (ntohs(eh->h_proto) <= ETH_DATA_LEN) {
+ /* this is a 802.3 packet */
+ if (skb->len >= ETH_HLEN + sizeof(rfc1042sig)
+ && skb->data[ETH_HLEN] == rfc1042sig[0]
+ && skb->data[ETH_HLEN + 1] == rfc1042sig[1]) {
+ /* higher layer delivered SNAP header - keep it */
+ memcpy(payload, skb->data + ETH_HLEN,
+ skb->len - ETH_HLEN);
+ wlen = IEEE80211_3ADDR_LEN + skb->len - ETH_HLEN;
+ } else {
+ printk(KERN_ERR "%s: dropping non-SNAP 802.2 packet "
+ "(DSAP 0x%02x SSAP 0x%02x cntrl 0x%02x)\n",
+ priv->netdev->name, skb->data[ETH_HLEN],
+ skb->data[ETH_HLEN + 1],
+ skb->data[ETH_HLEN + 2]);
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ } else {
+ /* add RFC 1042 header in front */
+ memcpy(payload, rfc1042sig, sizeof(rfc1042sig));
+ memcpy(payload + sizeof(rfc1042sig), &eh->h_proto,
+ skb->len - offsetof(struct ethhdr, h_proto));
+ wlen = IEEE80211_3ADDR_LEN + sizeof(rfc1042sig) + skb->len -
+ offsetof(struct ethhdr, h_proto);
+ }
+
+ /* make wireless header */
+ i802_11_hdr->frame_ctl =
+ cpu_to_le16(IEEE80211_FTYPE_DATA |
+ (priv->wep_enabled ? IEEE80211_FCTL_PROTECTED : 0) |
+ (priv->iw_mode ==
+ IW_MODE_INFRA ? IEEE80211_FCTL_TODS : 0));
+
+ if (priv->iw_mode == IW_MODE_ADHOC) {
+ memcpy(i802_11_hdr->addr1, eh->h_dest, ETH_ALEN);
+ memcpy(i802_11_hdr->addr2, eh->h_source, ETH_ALEN);
+ memcpy(i802_11_hdr->addr3, priv->bssid, ETH_ALEN);
+ } else if (priv->iw_mode == IW_MODE_INFRA) {
+ memcpy(i802_11_hdr->addr1, priv->bssid, ETH_ALEN);
+ memcpy(i802_11_hdr->addr2, eh->h_source, ETH_ALEN);
+ memcpy(i802_11_hdr->addr3, eh->h_dest, ETH_ALEN);
+ }
+
+ i802_11_hdr->duration_id = cpu_to_le16(0);
+ i802_11_hdr->seq_ctl = cpu_to_le16(0);
+
+ /* setup 'Atmel' header */
+ tx_buffer->wlength = cpu_to_le16(wlen);
+ tx_buffer->tx_rate = priv->txrate;
+ /* for broadcast destination addresses, the firmware 0.100.x
+ seems to choose the highest rate set with CMD_STARTUP in
+ basic_rate_set replacing this value */
+
+ memset(tx_buffer->reserved, 0, sizeof(tx_buffer->reserved));
+
+ tx_buffer->padding = at76_calc_padding(wlen);
+ submit_len = wlen + AT76_TX_HDRLEN + tx_buffer->padding;
+
+ at76_dbg(DBG_TX_DATA_CONTENT, "%s skb->data %s", priv->netdev->name,
+ hex2str(skb->data, 32));
+ at76_dbg(DBG_TX_DATA, "%s tx: wlen 0x%x pad 0x%x rate %d hdr %s",
+ priv->netdev->name,
+ le16_to_cpu(tx_buffer->wlength),
+ tx_buffer->padding, tx_buffer->tx_rate,
+ hex2str(i802_11_hdr, sizeof(*i802_11_hdr)));
+ at76_dbg(DBG_TX_DATA_CONTENT, "%s payload %s", priv->netdev->name,
+ hex2str(payload, 48));
+
+ /* send stuff */
+ netif_stop_queue(netdev);
+ netdev->trans_start = jiffies;
+
+ usb_fill_bulk_urb(priv->tx_urb, priv->udev, priv->tx_pipe, tx_buffer,
+ submit_len, at76_tx_callback, priv);
+ ret = usb_submit_urb(priv->tx_urb, GFP_ATOMIC);
+ if (ret) {
+ stats->tx_errors++;
+ printk(KERN_ERR "%s: error in tx submit urb: %d\n",
+ netdev->name, ret);
+ if (ret == -EINVAL)
+ printk(KERN_ERR
+ "%s: -EINVAL: tx urb %p hcpriv %p complete %p\n",
+ priv->netdev->name, priv->tx_urb,
+ priv->tx_urb->hcpriv, priv->tx_urb->complete);
+ } else {
+ stats->tx_bytes += skb->len;
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+static void at76_tx_timeout(struct net_device *netdev)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ if (!priv)
+ return;
+ dev_warn(&netdev->dev, "tx timeout.");
+
+ usb_unlink_urb(priv->tx_urb);
+ priv->stats.tx_errors++;
+}
+
static int at76_submit_rx_urb(struct at76_priv *priv)
{
int ret;
@@ -1327,7 +3270,7 @@ static int at76_submit_rx_urb(struct at76_priv *priv)
if (!priv->rx_urb) {
printk(KERN_ERR "%s: %s: priv->rx_urb is NULL\n",
- wiphy_name(priv->hw->wiphy), __func__);
+ priv->netdev->name, __func__);
return -EFAULT;
}
@@ -1335,7 +3278,7 @@ static int at76_submit_rx_urb(struct at76_priv *priv)
skb = dev_alloc_skb(sizeof(struct at76_rx_buffer));
if (!skb) {
printk(KERN_ERR "%s: cannot allocate rx skbuff\n",
- wiphy_name(priv->hw->wiphy));
+ priv->netdev->name);
ret = -ENOMEM;
goto exit;
}
@@ -1355,18 +3298,110 @@ static int at76_submit_rx_urb(struct at76_priv *priv)
"usb_submit_urb returned -ENODEV");
else
printk(KERN_ERR "%s: rx, usb_submit_urb failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
+ priv->netdev->name, ret);
}
exit:
if (ret < 0 && ret != -ENODEV)
printk(KERN_ERR "%s: cannot submit rx urb - please unload the "
"driver and/or power cycle the device\n",
- wiphy_name(priv->hw->wiphy));
+ priv->netdev->name);
return ret;
}
+static int at76_open(struct net_device *netdev)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ int ret = 0;
+
+ at76_dbg(DBG_PROC_ENTRY, "%s(): entry", __func__);
+
+ if (mutex_lock_interruptible(&priv->mtx))
+ return -EINTR;
+
+ /* if netdev->dev_addr != priv->mac_addr we must
+ set the mac address in the device ! */
+ if (compare_ether_addr(netdev->dev_addr, priv->mac_addr)) {
+ if (at76_add_mac_address(priv, netdev->dev_addr) >= 0)
+ at76_dbg(DBG_PROGRESS, "%s: set new MAC addr %s",
+ netdev->name, mac2str(netdev->dev_addr));
+ }
+
+ priv->scan_state = SCAN_IDLE;
+ priv->last_scan = jiffies;
+
+ ret = at76_submit_rx_urb(priv);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: open: submit_rx_urb failed: %d\n",
+ netdev->name, ret);
+ goto error;
+ }
+
+ schedule_delayed_work(&priv->dwork_restart, 0);
+
+ at76_dbg(DBG_PROC_ENTRY, "%s(): end", __func__);
+error:
+ mutex_unlock(&priv->mtx);
+ return ret < 0 ? ret : 0;
+}
+
+static int at76_stop(struct net_device *netdev)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ at76_dbg(DBG_DEVSTART, "%s: ENTER", __func__);
+
+ if (mutex_lock_interruptible(&priv->mtx))
+ return -EINTR;
+
+ at76_quiesce(priv);
+
+ if (!priv->device_unplugged) {
+ /* We are called by "ifconfig ethX down", not because the
+ * device is not available anymore. */
+ at76_set_radio(priv, 0);
+
+ /* We unlink rx_urb because at76_open() re-submits it.
+ * If unplugged, at76_delete_device() takes care of it. */
+ usb_kill_urb(priv->rx_urb);
+ }
+
+ /* free the bss_list */
+ at76_free_bss_list(priv);
+
+ mutex_unlock(&priv->mtx);
+ at76_dbg(DBG_DEVSTART, "%s: EXIT", __func__);
+
+ return 0;
+}
+
+static void at76_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+
+ strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+
+ usb_make_path(priv->udev, info->bus_info, sizeof(info->bus_info));
+
+ snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d.%d-%d",
+ priv->fw_version.major, priv->fw_version.minor,
+ priv->fw_version.patch, priv->fw_version.build);
+}
+
+static u32 at76_ethtool_get_link(struct net_device *netdev)
+{
+ struct at76_priv *priv = netdev_priv(netdev);
+ return priv->mac_state == MAC_CONNECTED;
+}
+
+static struct ethtool_ops at76_ethtool_ops = {
+ .get_drvinfo = at76_ethtool_get_drvinfo,
+ .get_link = at76_ethtool_get_link,
+};
+
/* Download external firmware */
static int at76_load_external_fw(struct usb_device *udev, struct fwentry *fwe)
{
@@ -1463,6 +3498,406 @@ exit:
return ret;
}
+static int at76_match_essid(struct at76_priv *priv, struct bss_info *ptr)
+{
+ /* common criteria for both modi */
+
+ int ret = (priv->essid_size == 0 /* ANY ssid */ ||
+ (priv->essid_size == ptr->ssid_len &&
+ !memcmp(priv->essid, ptr->ssid, ptr->ssid_len)));
+ if (!ret)
+ at76_dbg(DBG_BSS_MATCH,
+ "%s bss table entry %p: essid didn't match",
+ priv->netdev->name, ptr);
+ return ret;
+}
+
+static inline int at76_match_mode(struct at76_priv *priv, struct bss_info *ptr)
+{
+ int ret;
+
+ if (priv->iw_mode == IW_MODE_ADHOC)
+ ret = ptr->capa & WLAN_CAPABILITY_IBSS;
+ else
+ ret = ptr->capa & WLAN_CAPABILITY_ESS;
+ if (!ret)
+ at76_dbg(DBG_BSS_MATCH,
+ "%s bss table entry %p: mode didn't match",
+ priv->netdev->name, ptr);
+ return ret;
+}
+
+static int at76_match_rates(struct at76_priv *priv, struct bss_info *ptr)
+{
+ int i;
+
+ for (i = 0; i < ptr->rates_len; i++) {
+ u8 rate = ptr->rates[i];
+
+ if (!(rate & 0x80))
+ continue;
+
+ /* this is a basic rate we have to support
+ (see IEEE802.11, ch. 7.3.2.2) */
+ if (rate != (0x80 | hw_rates[0])
+ && rate != (0x80 | hw_rates[1])
+ && rate != (0x80 | hw_rates[2])
+ && rate != (0x80 | hw_rates[3])) {
+ at76_dbg(DBG_BSS_MATCH,
+ "%s: bss table entry %p: basic rate %02x not "
+ "supported", priv->netdev->name, ptr, rate);
+ return 0;
+ }
+ }
+
+ /* if we use short preamble, the bss must support it */
+ if (priv->preamble_type == PREAMBLE_TYPE_SHORT &&
+ !(ptr->capa & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
+ at76_dbg(DBG_BSS_MATCH,
+ "%s: %p does not support short preamble",
+ priv->netdev->name, ptr);
+ return 0;
+ } else
+ return 1;
+}
+
+static inline int at76_match_wep(struct at76_priv *priv, struct bss_info *ptr)
+{
+ if (!priv->wep_enabled && ptr->capa & WLAN_CAPABILITY_PRIVACY) {
+ /* we have disabled WEP, but the BSS signals privacy */
+ at76_dbg(DBG_BSS_MATCH,
+ "%s: bss table entry %p: requires encryption",
+ priv->netdev->name, ptr);
+ return 0;
+ }
+ /* otherwise if the BSS does not signal privacy it may well
+ accept encrypted packets from us ... */
+ return 1;
+}
+
+static inline int at76_match_bssid(struct at76_priv *priv, struct bss_info *ptr)
+{
+ if (!priv->wanted_bssid_valid ||
+ !compare_ether_addr(ptr->bssid, priv->wanted_bssid))
+ return 1;
+
+ at76_dbg(DBG_BSS_MATCH,
+ "%s: requested bssid - %s does not match",
+ priv->netdev->name, mac2str(priv->wanted_bssid));
+ at76_dbg(DBG_BSS_MATCH,
+ " AP bssid - %s of bss table entry %p",
+ mac2str(ptr->bssid), ptr);
+ return 0;
+}
+
+/**
+ * at76_match_bss - try to find a matching bss in priv->bss
+ *
+ * last - last bss tried
+ *
+ * last == NULL signals a new round starting with priv->bss_list.next
+ * this function must be called inside an acquired priv->bss_list_spinlock
+ * otherwise the timeout on bss may remove the newly chosen entry
+ */
+static struct bss_info *at76_match_bss(struct at76_priv *priv,
+ struct bss_info *last)
+{
+ struct bss_info *ptr = NULL;
+ struct list_head *curr;
+
+ curr = last ? last->list.next : priv->bss_list.next;
+ while (curr != &priv->bss_list) {
+ ptr = list_entry(curr, struct bss_info, list);
+ if (at76_match_essid(priv, ptr) && at76_match_mode(priv, ptr)
+ && at76_match_wep(priv, ptr) && at76_match_rates(priv, ptr)
+ && at76_match_bssid(priv, ptr))
+ break;
+ curr = curr->next;
+ }
+
+ if (curr == &priv->bss_list)
+ ptr = NULL;
+ /* otherwise ptr points to the struct bss_info we have chosen */
+
+ at76_dbg(DBG_BSS_TABLE, "%s %s: returned %p", priv->netdev->name,
+ __func__, ptr);
+ return ptr;
+}
+
+/* Start joining a matching BSS, or create own IBSS */
+static void at76_work_join(struct work_struct *work)
+{
+ struct at76_priv *priv = container_of(work, struct at76_priv,
+ work_join);
+ int ret;
+ unsigned long flags;
+
+ mutex_lock(&priv->mtx);
+
+ WARN_ON(priv->mac_state != MAC_JOINING);
+ if (priv->mac_state != MAC_JOINING)
+ goto exit;
+
+ /* secure the access to priv->curr_bss ! */
+ spin_lock_irqsave(&priv->bss_list_spinlock, flags);
+ priv->curr_bss = at76_match_bss(priv, priv->curr_bss);
+ spin_unlock_irqrestore(&priv->bss_list_spinlock, flags);
+
+ if (!priv->curr_bss) {
+ /* here we haven't found a matching (i)bss ... */
+ if (priv->iw_mode == IW_MODE_ADHOC) {
+ at76_set_mac_state(priv, MAC_OWN_IBSS);
+ at76_start_ibss(priv);
+ goto exit;
+ }
+ /* haven't found a matching BSS in infra mode - try again */
+ at76_set_mac_state(priv, MAC_SCANNING);
+ schedule_work(&priv->work_start_scan);
+ goto exit;
+ }
+
+ ret = at76_join_bss(priv, priv->curr_bss);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: join_bss failed with %d\n",
+ priv->netdev->name, ret);
+ goto exit;
+ }
+
+ ret = at76_wait_completion(priv, CMD_JOIN);
+ if (ret != CMD_STATUS_COMPLETE) {
+ if (ret != CMD_STATUS_TIME_OUT)
+ printk(KERN_ERR "%s: join_bss completed with %d\n",
+ priv->netdev->name, ret);
+ else
+ printk(KERN_INFO "%s: join_bss ssid %s timed out\n",
+ priv->netdev->name,
+ mac2str(priv->curr_bss->bssid));
+
+ /* retry next BSS immediately */
+ schedule_work(&priv->work_join);
+ goto exit;
+ }
+
+ /* here we have joined the (I)BSS */
+ if (priv->iw_mode == IW_MODE_ADHOC) {
+ struct bss_info *bptr = priv->curr_bss;
+ at76_set_mac_state(priv, MAC_CONNECTED);
+ /* get ESSID, BSSID and channel for priv->curr_bss */
+ priv->essid_size = bptr->ssid_len;
+ memcpy(priv->essid, bptr->ssid, bptr->ssid_len);
+ memcpy(priv->bssid, bptr->bssid, ETH_ALEN);
+ priv->channel = bptr->channel;
+ at76_iwevent_bss_connect(priv->netdev, bptr->bssid);
+ netif_carrier_on(priv->netdev);
+ netif_start_queue(priv->netdev);
+ /* just to be sure */
+ cancel_delayed_work(&priv->dwork_get_scan);
+ cancel_delayed_work(&priv->dwork_auth);
+ cancel_delayed_work(&priv->dwork_assoc);
+ } else {
+ /* send auth req */
+ priv->retries = AUTH_RETRIES;
+ at76_set_mac_state(priv, MAC_AUTH);
+ at76_auth_req(priv, priv->curr_bss, 1, NULL);
+ at76_dbg(DBG_MGMT_TIMER,
+ "%s:%d: starting mgmt_timer + HZ", __func__, __LINE__);
+ schedule_delayed_work(&priv->dwork_auth, AUTH_TIMEOUT);
+ }
+
+exit:
+ mutex_unlock(&priv->mtx);
+}
+
+/* Reap scan results */
+static void at76_dwork_get_scan(struct work_struct *work)
+{
+ int status;
+ int ret;
+ struct at76_priv *priv = container_of(work, struct at76_priv,
+ dwork_get_scan.work);
+
+ mutex_lock(&priv->mtx);
+ WARN_ON(priv->mac_state != MAC_SCANNING);
+ if (priv->mac_state != MAC_SCANNING)
+ goto exit;
+
+ status = at76_get_cmd_status(priv->udev, CMD_SCAN);
+ if (status < 0) {
+ printk(KERN_ERR "%s: %s: at76_get_cmd_status failed with %d\n",
+ priv->netdev->name, __func__, status);
+ status = CMD_STATUS_IN_PROGRESS;
+ /* INFO: Hope it was a one off error - if not, scanning
+ further down the line and stop this cycle */
+ }
+ at76_dbg(DBG_PROGRESS,
+ "%s %s: got cmd_status %d (state %s, need_any %d)",
+ priv->netdev->name, __func__, status,
+ mac_states[priv->mac_state], priv->scan_need_any);
+
+ if (status != CMD_STATUS_COMPLETE) {
+ if ((status != CMD_STATUS_IN_PROGRESS) &&
+ (status != CMD_STATUS_IDLE))
+ printk(KERN_ERR "%s: %s: Bad scan status: %s\n",
+ priv->netdev->name, __func__,
+ at76_get_cmd_status_string(status));
+
+ /* the first cmd status after scan start is always a IDLE ->
+ start the timer to poll again until COMPLETED */
+ at76_dbg(DBG_MGMT_TIMER,
+ "%s:%d: starting mgmt_timer for %d ticks",
+ __func__, __LINE__, SCAN_POLL_INTERVAL);
+ schedule_delayed_work(&priv->dwork_get_scan,
+ SCAN_POLL_INTERVAL);
+ goto exit;
+ }
+
+ if (at76_debug & DBG_BSS_TABLE)
+ at76_dump_bss_table(priv);
+
+ if (priv->scan_need_any) {
+ ret = at76_start_scan(priv, 0);
+ if (ret < 0)
+ printk(KERN_ERR
+ "%s: %s: start_scan (ANY) failed with %d\n",
+ priv->netdev->name, __func__, ret);
+ at76_dbg(DBG_MGMT_TIMER,
+ "%s:%d: starting mgmt_timer for %d ticks", __func__,
+ __LINE__, SCAN_POLL_INTERVAL);
+ schedule_delayed_work(&priv->dwork_get_scan,
+ SCAN_POLL_INTERVAL);
+ priv->scan_need_any = 0;
+ } else {
+ priv->scan_state = SCAN_COMPLETED;
+ /* report the end of scan to user space */
+ at76_iwevent_scan_complete(priv->netdev);
+ at76_set_mac_state(priv, MAC_JOINING);
+ schedule_work(&priv->work_join);
+ }
+
+exit:
+ mutex_unlock(&priv->mtx);
+}
+
+/* Handle loss of beacons from the AP */
+static void at76_dwork_beacon(struct work_struct *work)
+{
+ struct at76_priv *priv = container_of(work, struct at76_priv,
+ dwork_beacon.work);
+
+ mutex_lock(&priv->mtx);
+ if (priv->mac_state != MAC_CONNECTED || priv->iw_mode != IW_MODE_INFRA)
+ goto exit;
+
+ /* We haven't received any beacons from out AP for BEACON_TIMEOUT */
+ printk(KERN_INFO "%s: lost beacon bssid %s\n",
+ priv->netdev->name, mac2str(priv->curr_bss->bssid));
+
+ netif_carrier_off(priv->netdev);
+ netif_stop_queue(priv->netdev);
+ at76_iwevent_bss_disconnect(priv->netdev);
+ at76_set_mac_state(priv, MAC_SCANNING);
+ schedule_work(&priv->work_start_scan);
+
+exit:
+ mutex_unlock(&priv->mtx);
+}
+
+/* Handle authentication response timeout */
+static void at76_dwork_auth(struct work_struct *work)
+{
+ struct at76_priv *priv = container_of(work, struct at76_priv,
+ dwork_auth.work);
+
+ mutex_lock(&priv->mtx);
+ WARN_ON(priv->mac_state != MAC_AUTH);
+ if (priv->mac_state != MAC_AUTH)
+ goto exit;
+
+ at76_dbg(DBG_PROGRESS, "%s: authentication response timeout",
+ priv->netdev->name);
+
+ if (priv->retries-- >= 0) {
+ at76_auth_req(priv, priv->curr_bss, 1, NULL);
+ at76_dbg(DBG_MGMT_TIMER, "%s:%d: starting mgmt_timer + HZ",
+ __func__, __LINE__);
+ schedule_delayed_work(&priv->dwork_auth, AUTH_TIMEOUT);
+ } else {
+ /* try to get next matching BSS */
+ at76_set_mac_state(priv, MAC_JOINING);
+ schedule_work(&priv->work_join);
+ }
+
+exit:
+ mutex_unlock(&priv->mtx);
+}
+
+/* Handle association response timeout */
+static void at76_dwork_assoc(struct work_struct *work)
+{
+ struct at76_priv *priv = container_of(work, struct at76_priv,
+ dwork_assoc.work);
+
+ mutex_lock(&priv->mtx);
+ WARN_ON(priv->mac_state != MAC_ASSOC);
+ if (priv->mac_state != MAC_ASSOC)
+ goto exit;
+
+ at76_dbg(DBG_PROGRESS, "%s: association response timeout",
+ priv->netdev->name);
+
+ if (priv->retries-- >= 0) {
+ at76_assoc_req(priv, priv->curr_bss);
+ at76_dbg(DBG_MGMT_TIMER, "%s:%d: starting mgmt_timer + HZ",
+ __func__, __LINE__);
+ schedule_delayed_work(&priv->dwork_assoc, ASSOC_TIMEOUT);
+ } else {
+ /* try to get next matching BSS */
+ at76_set_mac_state(priv, MAC_JOINING);
+ schedule_work(&priv->work_join);
+ }
+
+exit:
+ mutex_unlock(&priv->mtx);
+}
+
+/* Read new bssid in ad-hoc mode */
+static void at76_work_new_bss(struct work_struct *work)
+{
+ struct at76_priv *priv = container_of(work, struct at76_priv,
+ work_new_bss);
+ int ret;
+ struct mib_mac_mgmt mac_mgmt;
+
+ mutex_lock(&priv->mtx);
+
+ ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, &mac_mgmt,
+ sizeof(struct mib_mac_mgmt));
+ if (ret < 0) {
+ printk(KERN_ERR "%s: at76_get_mib failed: %d\n",
+ priv->netdev->name, ret);
+ goto exit;
+ }
+
+ at76_dbg(DBG_PROGRESS, "ibss_change = 0x%2x", mac_mgmt.ibss_change);
+ memcpy(priv->bssid, mac_mgmt.current_bssid, ETH_ALEN);
+ at76_dbg(DBG_PROGRESS, "using BSSID %s", mac2str(priv->bssid));
+
+ at76_iwevent_bss_connect(priv->netdev, priv->bssid);
+
+ priv->mib_buf.type = MIB_MAC_MGMT;
+ priv->mib_buf.size = 1;
+ priv->mib_buf.index = offsetof(struct mib_mac_mgmt, ibss_change);
+ priv->mib_buf.data.byte = 0;
+
+ ret = at76_set_mib(priv, &priv->mib_buf);
+ if (ret < 0)
+ printk(KERN_ERR "%s: set_mib (ibss change ok) failed: %d\n",
+ priv->netdev->name, ret);
+
+exit:
+ mutex_unlock(&priv->mtx);
+}
+
static int at76_startup_device(struct at76_priv *priv)
{
struct at76_card_config *ccfg = &priv->card_config;
@@ -1470,14 +3905,14 @@ static int at76_startup_device(struct at76_priv *priv)
at76_dbg(DBG_PARAMS,
"%s param: ssid %.*s (%s) mode %s ch %d wep %s key %d "
- "keylen %d", wiphy_name(priv->hw->wiphy), priv->essid_size,
- priv->essid, hex2str(priv->essid, IW_ESSID_MAX_SIZE),
+ "keylen %d", priv->netdev->name, priv->essid_size, priv->essid,
+ hex2str(priv->essid, IW_ESSID_MAX_SIZE),
priv->iw_mode == IW_MODE_ADHOC ? "adhoc" : "infra",
priv->channel, priv->wep_enabled ? "enabled" : "disabled",
priv->wep_key_id, priv->wep_keys_len[priv->wep_key_id]);
at76_dbg(DBG_PARAMS,
"%s param: preamble %s rts %d retry %d frag %d "
- "txrate %s auth_mode %d", wiphy_name(priv->hw->wiphy),
+ "txrate %s auth_mode %d", priv->netdev->name,
preambles[priv->preamble_type], priv->rts_threshold,
priv->short_retry_limit, priv->frag_threshold,
priv->txrate == TX_RATE_1MBIT ? "1MBit" : priv->txrate ==
@@ -1488,7 +3923,7 @@ static int at76_startup_device(struct at76_priv *priv)
at76_dbg(DBG_PARAMS,
"%s param: pm_mode %d pm_period %d auth_mode %s "
"scan_times %d %d scan_mode %s",
- wiphy_name(priv->hw->wiphy), priv->pm_mode, priv->pm_period,
+ priv->netdev->name, priv->pm_mode, priv->pm_period,
priv->auth_mode == WLAN_AUTH_OPEN ? "open" : "shared_secret",
priv->scan_min_time, priv->scan_max_time,
priv->scan_mode == SCAN_TYPE_ACTIVE ? "active" : "passive");
@@ -1522,8 +3957,7 @@ static int at76_startup_device(struct at76_priv *priv)
ccfg->ssid_len = priv->essid_size;
ccfg->wep_default_key_id = priv->wep_key_id;
- memcpy(ccfg->wep_default_key_value, priv->wep_keys,
- sizeof(priv->wep_keys));
+ memcpy(ccfg->wep_default_key_value, priv->wep_keys, 4 * WEP_KEY_LEN);
ccfg->short_preamble = priv->preamble_type;
ccfg->beacon_period = cpu_to_le16(priv->beacon_period);
@@ -1532,7 +3966,7 @@ static int at76_startup_device(struct at76_priv *priv)
sizeof(struct at76_card_config));
if (ret < 0) {
printk(KERN_ERR "%s: at76_set_card_command failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
+ priv->netdev->name, ret);
return ret;
}
@@ -1578,6 +4012,69 @@ static int at76_startup_device(struct at76_priv *priv)
return 0;
}
+/* Restart the interface */
+static void at76_dwork_restart(struct work_struct *work)
+{
+ struct at76_priv *priv = container_of(work, struct at76_priv,
+ dwork_restart.work);
+
+ mutex_lock(&priv->mtx);
+
+ netif_carrier_off(priv->netdev); /* stop netdev watchdog */
+ netif_stop_queue(priv->netdev); /* stop tx data packets */
+
+ at76_startup_device(priv);
+
+ if (priv->iw_mode != IW_MODE_MONITOR) {
+ priv->netdev->type = ARPHRD_ETHER;
+ at76_set_mac_state(priv, MAC_SCANNING);
+ schedule_work(&priv->work_start_scan);
+ } else {
+ priv->netdev->type = ARPHRD_IEEE80211_RADIOTAP;
+ at76_start_monitor(priv);
+ }
+
+ mutex_unlock(&priv->mtx);
+}
+
+/* Initiate scanning */
+static void at76_work_start_scan(struct work_struct *work)
+{
+ struct at76_priv *priv = container_of(work, struct at76_priv,
+ work_start_scan);
+ int ret;
+
+ mutex_lock(&priv->mtx);
+
+ WARN_ON(priv->mac_state != MAC_SCANNING);
+ if (priv->mac_state != MAC_SCANNING)
+ goto exit;
+
+ /* only clear the bss list when a scan is actively initiated,
+ * otherwise simply rely on at76_bss_list_timeout */
+ if (priv->scan_state == SCAN_IN_PROGRESS) {
+ at76_free_bss_list(priv);
+ priv->scan_need_any = 1;
+ } else
+ priv->scan_need_any = 0;
+
+ ret = at76_start_scan(priv, 1);
+
+ if (ret < 0)
+ printk(KERN_ERR "%s: %s: start_scan failed with %d\n",
+ priv->netdev->name, __func__, ret);
+ else {
+ at76_dbg(DBG_MGMT_TIMER,
+ "%s:%d: starting mgmt_timer for %d ticks",
+ __func__, __LINE__, SCAN_POLL_INTERVAL);
+ schedule_delayed_work(&priv->dwork_get_scan,
+ SCAN_POLL_INTERVAL);
+ }
+
+exit:
+ mutex_unlock(&priv->mtx);
+}
+
/* Enable or disable promiscuous mode */
static void at76_work_set_promisc(struct work_struct *work)
{
@@ -1595,7 +4092,7 @@ static void at76_work_set_promisc(struct work_struct *work)
ret = at76_set_mib(priv, &priv->mib_buf);
if (ret < 0)
printk(KERN_ERR "%s: set_mib (promiscuous_mode) failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
+ priv->netdev->name, ret);
mutex_unlock(&priv->mtx);
}
@@ -1611,759 +4108,1088 @@ static void at76_work_submit_rx(struct work_struct *work)
mutex_unlock(&priv->mtx);
}
-static void at76_rx_tasklet(unsigned long param)
+/* We got an association response */
+static void at76_rx_mgmt_assoc(struct at76_priv *priv,
+ struct at76_rx_buffer *buf)
{
- struct urb *urb = (struct urb *)param;
- struct at76_priv *priv = urb->context;
- struct at76_rx_buffer *buf;
- struct ieee80211_rx_status rx_status = { 0 };
-
- if (priv->device_unplugged) {
- at76_dbg(DBG_DEVSTART, "device unplugged");
- if (urb)
- at76_dbg(DBG_DEVSTART, "urb status %d", urb->status);
+ struct ieee80211_assoc_response *resp =
+ (struct ieee80211_assoc_response *)buf->packet;
+ u16 assoc_id = le16_to_cpu(resp->aid);
+ u16 status = le16_to_cpu(resp->status);
+
+ at76_dbg(DBG_RX_MGMT, "%s: rx AssocResp bssid %s capa 0x%04x status "
+ "0x%04x assoc_id 0x%04x rates %s", priv->netdev->name,
+ mac2str(resp->header.addr3), le16_to_cpu(resp->capability),
+ status, assoc_id, hex2str(resp->info_element->data,
+ resp->info_element->len));
+
+ if (priv->mac_state != MAC_ASSOC) {
+ printk(KERN_INFO "%s: AssocResp in state %s ignored\n",
+ priv->netdev->name, mac_states[priv->mac_state]);
return;
}
- if (!priv->rx_skb || !priv->rx_skb->data)
- return;
-
- buf = (struct at76_rx_buffer *)priv->rx_skb->data;
-
- if (urb->status != 0) {
- if (urb->status != -ENOENT && urb->status != -ECONNRESET)
- at76_dbg(DBG_URB,
- "%s %s: - nonzero Rx bulk status received: %d",
- __func__, wiphy_name(priv->hw->wiphy),
- urb->status);
- return;
+ BUG_ON(!priv->curr_bss);
+
+ cancel_delayed_work(&priv->dwork_assoc);
+ if (status == WLAN_STATUS_SUCCESS) {
+ struct bss_info *ptr = priv->curr_bss;
+ priv->assoc_id = assoc_id & 0x3fff;
+ /* update iwconfig params */
+ memcpy(priv->bssid, ptr->bssid, ETH_ALEN);
+ memcpy(priv->essid, ptr->ssid, ptr->ssid_len);
+ priv->essid_size = ptr->ssid_len;
+ priv->channel = ptr->channel;
+ schedule_work(&priv->work_assoc_done);
+ } else {
+ at76_set_mac_state(priv, MAC_JOINING);
+ schedule_work(&priv->work_join);
}
+}
- at76_dbg(DBG_RX_ATMEL_HDR,
- "%s: rx frame: rate %d rssi %d noise %d link %d",
- wiphy_name(priv->hw->wiphy), buf->rx_rate, buf->rssi,
- buf->noise_level, buf->link_quality);
-
- skb_trim(priv->rx_skb, le16_to_cpu(buf->wlength) + AT76_RX_HDRLEN);
- at76_dbg_dump(DBG_RX_DATA, &priv->rx_skb->data[AT76_RX_HDRLEN],
- priv->rx_skb->len, "RX: len=%d",
- (int)(priv->rx_skb->len - AT76_RX_HDRLEN));
+/* Process disassociation request from the AP */
+static void at76_rx_mgmt_disassoc(struct at76_priv *priv,
+ struct at76_rx_buffer *buf)
+{
+ struct ieee80211_disassoc *resp =
+ (struct ieee80211_disassoc *)buf->packet;
+ struct ieee80211_hdr_3addr *mgmt = &resp->header;
+
+ at76_dbg(DBG_RX_MGMT,
+ "%s: rx DisAssoc bssid %s reason 0x%04x destination %s",
+ priv->netdev->name, mac2str(mgmt->addr3),
+ le16_to_cpu(resp->reason), mac2str(mgmt->addr1));
+
+ /* We are not connected, ignore */
+ if (priv->mac_state == MAC_SCANNING || priv->mac_state == MAC_INIT
+ || !priv->curr_bss)
+ return;
- rx_status.signal = buf->rssi;
- /* FIXME: is rate_idx still present in structure? */
- rx_status.rate_idx = buf->rx_rate;
- rx_status.flag |= RX_FLAG_DECRYPTED;
- rx_status.flag |= RX_FLAG_IV_STRIPPED;
+ /* Not our BSSID, ignore */
+ if (compare_ether_addr(mgmt->addr3, priv->curr_bss->bssid))
+ return;
- skb_pull(priv->rx_skb, AT76_RX_HDRLEN);
- at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d",
- priv->rx_skb->len, priv->rx_skb->data_len);
- ieee80211_rx_irqsafe(priv->hw, priv->rx_skb, &rx_status);
+ /* Not for our STA and not broadcast, ignore */
+ if (compare_ether_addr(priv->netdev->dev_addr, mgmt->addr1)
+ && !is_broadcast_ether_addr(mgmt->addr1))
+ return;
- /* Use a new skb for the next receive */
- priv->rx_skb = NULL;
+ if (priv->mac_state != MAC_ASSOC && priv->mac_state != MAC_CONNECTED
+ && priv->mac_state != MAC_JOINING) {
+ printk(KERN_INFO "%s: DisAssoc in state %s ignored\n",
+ priv->netdev->name, mac_states[priv->mac_state]);
+ return;
+ }
- at76_submit_rx_urb(priv);
+ if (priv->mac_state == MAC_CONNECTED) {
+ netif_carrier_off(priv->netdev);
+ netif_stop_queue(priv->netdev);
+ at76_iwevent_bss_disconnect(priv->netdev);
+ }
+ cancel_delayed_work(&priv->dwork_get_scan);
+ cancel_delayed_work(&priv->dwork_beacon);
+ cancel_delayed_work(&priv->dwork_auth);
+ cancel_delayed_work(&priv->dwork_assoc);
+ at76_set_mac_state(priv, MAC_JOINING);
+ schedule_work(&priv->work_join);
}
-/* Load firmware into kernel memory and parse it */
-static struct fwentry *at76_load_firmware(struct usb_device *udev,
- enum board_type board_type)
+static void at76_rx_mgmt_auth(struct at76_priv *priv,
+ struct at76_rx_buffer *buf)
{
- int ret;
- char *str;
- struct at76_fw_header *fwh;
- struct fwentry *fwe = &firmwares[board_type];
-
- mutex_lock(&fw_mutex);
-
- if (fwe->loaded) {
- at76_dbg(DBG_FW, "re-using previously loaded fw");
- goto exit;
+ struct ieee80211_auth *resp = (struct ieee80211_auth *)buf->packet;
+ struct ieee80211_hdr_3addr *mgmt = &resp->header;
+ int seq_nr = le16_to_cpu(resp->transaction);
+ int alg = le16_to_cpu(resp->algorithm);
+ int status = le16_to_cpu(resp->status);
+
+ at76_dbg(DBG_RX_MGMT,
+ "%s: rx AuthFrame bssid %s alg %d seq_nr %d status %d "
+ "destination %s", priv->netdev->name, mac2str(mgmt->addr3),
+ alg, seq_nr, status, mac2str(mgmt->addr1));
+
+ if (alg == WLAN_AUTH_SHARED_KEY && seq_nr == 2)
+ at76_dbg(DBG_RX_MGMT, "%s: AuthFrame challenge %s ...",
+ priv->netdev->name, hex2str(resp->info_element, 18));
+
+ if (priv->mac_state != MAC_AUTH) {
+ printk(KERN_INFO "%s: ignored AuthFrame in state %s\n",
+ priv->netdev->name, mac_states[priv->mac_state]);
+ return;
}
-
- at76_dbg(DBG_FW, "downloading firmware %s", fwe->fwname);
- ret = request_firmware(&fwe->fw, fwe->fwname, &udev->dev);
- if (ret < 0) {
- dev_printk(KERN_ERR, &udev->dev, "firmware %s not found!\n",
- fwe->fwname);
- dev_printk(KERN_ERR, &udev->dev,
- "you may need to download the firmware from "
- "http://developer.berlios.de/projects/at76c503a/\n");
- goto exit;
+ if (priv->auth_mode != alg) {
+ printk(KERN_INFO "%s: ignored AuthFrame for alg %d\n",
+ priv->netdev->name, alg);
+ return;
}
- at76_dbg(DBG_FW, "got it.");
- fwh = (struct at76_fw_header *)(fwe->fw->data);
+ BUG_ON(!priv->curr_bss);
- if (fwe->fw->size <= sizeof(*fwh)) {
- dev_printk(KERN_ERR, &udev->dev,
- "firmware is too short (0x%zx)\n", fwe->fw->size);
- goto exit;
+ /* Not our BSSID or not for our STA, ignore */
+ if (compare_ether_addr(mgmt->addr3, priv->curr_bss->bssid)
+ || compare_ether_addr(priv->netdev->dev_addr, mgmt->addr1))
+ return;
+
+ cancel_delayed_work(&priv->dwork_auth);
+ if (status != WLAN_STATUS_SUCCESS) {
+ /* try to join next bss */
+ at76_set_mac_state(priv, MAC_JOINING);
+ schedule_work(&priv->work_join);
+ return;
}
- /* CRC currently not checked */
- fwe->board_type = le32_to_cpu(fwh->board_type);
- if (fwe->board_type != board_type) {
- dev_printk(KERN_ERR, &udev->dev,
- "board type mismatch, requested %u, got %u\n",
- board_type, fwe->board_type);
- goto exit;
+ if (priv->auth_mode == WLAN_AUTH_OPEN || seq_nr == 4) {
+ priv->retries = ASSOC_RETRIES;
+ at76_set_mac_state(priv, MAC_ASSOC);
+ at76_assoc_req(priv, priv->curr_bss);
+ at76_dbg(DBG_MGMT_TIMER,
+ "%s:%d: starting mgmt_timer + HZ", __func__, __LINE__);
+ schedule_delayed_work(&priv->dwork_assoc, ASSOC_TIMEOUT);
+ return;
}
- fwe->fw_version.major = fwh->major;
- fwe->fw_version.minor = fwh->minor;
- fwe->fw_version.patch = fwh->patch;
- fwe->fw_version.build = fwh->build;
+ WARN_ON(seq_nr != 2);
+ at76_auth_req(priv, priv->curr_bss, seq_nr + 1, resp->info_element);
+ at76_dbg(DBG_MGMT_TIMER, "%s:%d: starting mgmt_timer + HZ", __func__,
+ __LINE__);
+ schedule_delayed_work(&priv->dwork_auth, AUTH_TIMEOUT);
+}
- str = (char *)fwh + le32_to_cpu(fwh->str_offset);
- fwe->intfw = (u8 *)fwh + le32_to_cpu(fwh->int_fw_offset);
- fwe->intfw_size = le32_to_cpu(fwh->int_fw_len);
- fwe->extfw = (u8 *)fwh + le32_to_cpu(fwh->ext_fw_offset);
- fwe->extfw_size = le32_to_cpu(fwh->ext_fw_len);
+static void at76_rx_mgmt_deauth(struct at76_priv *priv,
+ struct at76_rx_buffer *buf)
+{
+ struct ieee80211_disassoc *resp =
+ (struct ieee80211_disassoc *)buf->packet;
+ struct ieee80211_hdr_3addr *mgmt = &resp->header;
+
+ at76_dbg(DBG_RX_MGMT | DBG_PROGRESS,
+ "%s: rx DeAuth bssid %s reason 0x%04x destination %s",
+ priv->netdev->name, mac2str(mgmt->addr3),
+ le16_to_cpu(resp->reason), mac2str(mgmt->addr1));
+
+ if (priv->mac_state != MAC_AUTH && priv->mac_state != MAC_ASSOC
+ && priv->mac_state != MAC_CONNECTED) {
+ printk(KERN_INFO "%s: DeAuth in state %s ignored\n",
+ priv->netdev->name, mac_states[priv->mac_state]);
+ return;
+ }
- fwe->loaded = 1;
+ BUG_ON(!priv->curr_bss);
- dev_printk(KERN_DEBUG, &udev->dev,
- "using firmware %s (version %d.%d.%d-%d)\n",
- fwe->fwname, fwh->major, fwh->minor, fwh->patch, fwh->build);
+ /* Not our BSSID, ignore */
+ if (compare_ether_addr(mgmt->addr3, priv->curr_bss->bssid))
+ return;
- at76_dbg(DBG_DEVSTART, "board %u, int %d:%d, ext %d:%d", board_type,
- le32_to_cpu(fwh->int_fw_offset), le32_to_cpu(fwh->int_fw_len),
- le32_to_cpu(fwh->ext_fw_offset), le32_to_cpu(fwh->ext_fw_len));
- at76_dbg(DBG_DEVSTART, "firmware id %s", str);
+ /* Not for our STA and not broadcast, ignore */
+ if (compare_ether_addr(priv->netdev->dev_addr, mgmt->addr1)
+ && !is_broadcast_ether_addr(mgmt->addr1))
+ return;
-exit:
- mutex_unlock(&fw_mutex);
+ if (priv->mac_state == MAC_CONNECTED)
+ at76_iwevent_bss_disconnect(priv->netdev);
- if (fwe->loaded)
- return fwe;
- else
- return NULL;
+ at76_set_mac_state(priv, MAC_JOINING);
+ schedule_work(&priv->work_join);
+ cancel_delayed_work(&priv->dwork_get_scan);
+ cancel_delayed_work(&priv->dwork_beacon);
+ cancel_delayed_work(&priv->dwork_auth);
+ cancel_delayed_work(&priv->dwork_assoc);
}
-static void at76_mac80211_tx_callback(struct urb *urb)
+static void at76_rx_mgmt_beacon(struct at76_priv *priv,
+ struct at76_rx_buffer *buf)
{
- struct at76_priv *priv = urb->context;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(priv->tx_skb);
+ int varpar_len;
+ /* beacon content */
+ struct ieee80211_beacon *bdata = (struct ieee80211_beacon *)buf->packet;
+ struct ieee80211_hdr_3addr *mgmt = &bdata->header;
+
+ struct list_head *lptr;
+ struct bss_info *match; /* entry matching addr3 with its bssid */
+ int new_entry = 0;
+ int len;
+ struct ieee80211_info_element *ie;
+ int have_ssid = 0;
+ int have_rates = 0;
+ int have_channel = 0;
+ int keep_going = 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->bss_list_spinlock, flags);
+ if (priv->mac_state == MAC_CONNECTED) {
+ /* in state MAC_CONNECTED we use the mgmt_timer to control
+ the beacon of the BSS */
+ BUG_ON(!priv->curr_bss);
+
+ if (!compare_ether_addr(priv->curr_bss->bssid, mgmt->addr3)) {
+ /* We got our AP's beacon, defer the timeout handler.
+ Kill pending work first, as schedule_delayed_work()
+ won't do it. */
+ cancel_delayed_work(&priv->dwork_beacon);
+ schedule_delayed_work(&priv->dwork_beacon,
+ BEACON_TIMEOUT);
+ priv->curr_bss->rssi = buf->rssi;
+ priv->beacons_received++;
+ goto exit;
+ }
+ }
- at76_dbg(DBG_MAC80211, "%s()", __func__);
+ /* look if we have this BSS already in the list */
+ match = NULL;
- switch (urb->status) {
- case 0:
- /* success */
- /* FIXME:
- * is the frame really ACKed when tx_callback is called ? */
- info->flags |= IEEE80211_TX_STAT_ACK;
- break;
- case -ENOENT:
- case -ECONNRESET:
- /* fail, urb has been unlinked */
- /* FIXME: add error message */
- break;
- default:
- at76_dbg(DBG_URB, "%s - nonzero tx status received: %d",
- __func__, urb->status);
- break;
+ if (!list_empty(&priv->bss_list)) {
+ list_for_each(lptr, &priv->bss_list) {
+ struct bss_info *bss_ptr =
+ list_entry(lptr, struct bss_info, list);
+ if (!compare_ether_addr(bss_ptr->bssid, mgmt->addr3)) {
+ match = bss_ptr;
+ break;
+ }
+ }
}
- memset(&info->status, 0, sizeof(info->status));
+ if (!match) {
+ /* BSS not in the list - append it */
+ match = kzalloc(sizeof(struct bss_info), GFP_ATOMIC);
+ if (!match) {
+ at76_dbg(DBG_BSS_TABLE,
+ "%s: cannot kmalloc new bss info (%zd byte)",
+ priv->netdev->name, sizeof(struct bss_info));
+ goto exit;
+ }
+ new_entry = 1;
+ list_add_tail(&match->list, &priv->bss_list);
+ }
- ieee80211_tx_status_irqsafe(priv->hw, priv->tx_skb);
+ match->capa = le16_to_cpu(bdata->capability);
+ match->beacon_interval = le16_to_cpu(bdata->beacon_interval);
+ match->rssi = buf->rssi;
+ match->link_qual = buf->link_quality;
+ match->noise_level = buf->noise_level;
+ memcpy(match->bssid, mgmt->addr3, ETH_ALEN);
+ at76_dbg(DBG_RX_BEACON, "%s: bssid %s", priv->netdev->name,
+ mac2str(match->bssid));
+
+ ie = bdata->info_element;
+
+ /* length of var length beacon parameters */
+ varpar_len = min_t(int, le16_to_cpu(buf->wlength) -
+ sizeof(struct ieee80211_beacon),
+ BEACON_MAX_DATA_LENGTH);
+
+ /* This routine steps through the bdata->data array to get
+ * some useful information about the access point.
+ * Currently, this implementation supports receipt of: SSID,
+ * supported transfer rates and channel, in any order, with some
+ * tolerance for intermittent unknown codes (although this
+ * functionality may not be necessary as the useful information will
+ * usually arrive in consecutively, but there have been some
+ * reports of some of the useful information fields arriving in a
+ * different order).
+ * It does not support any more IE types although MFIE_TYPE_TIM may
+ * be supported (on my AP at least).
+ * The bdata->data array is about 1500 bytes long but only ~36 of those
+ * bytes are useful, hence the have_ssid etc optimizations. */
+
+ while (keep_going &&
+ ((&ie->data[ie->len] - (u8 *)bdata->info_element) <=
+ varpar_len)) {
+
+ switch (ie->id) {
+
+ case MFIE_TYPE_SSID:
+ if (have_ssid)
+ break;
- priv->tx_skb = NULL;
+ len = min_t(int, IW_ESSID_MAX_SIZE, ie->len);
- ieee80211_wake_queues(priv->hw);
-}
+ /* we copy only if this is a new entry,
+ or the incoming SSID is not a hidden SSID. This
+ will protect us from overwriting a real SSID read
+ in a ProbeResponse with a hidden one from a
+ following beacon. */
+ if (!new_entry && at76_is_hidden_ssid(ie->data, len)) {
+ have_ssid = 1;
+ break;
+ }
-static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct at76_priv *priv = hw->priv;
- struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int padding, submit_len, ret;
+ match->ssid_len = len;
+ memcpy(match->ssid, ie->data, len);
+ at76_dbg(DBG_RX_BEACON, "%s: SSID - %.*s",
+ priv->netdev->name, len, match->ssid);
+ have_ssid = 1;
+ break;
- at76_dbg(DBG_MAC80211, "%s()", __func__);
+ case MFIE_TYPE_RATES:
+ if (have_rates)
+ break;
- if (priv->tx_urb->status == -EINPROGRESS) {
- printk(KERN_ERR "%s: %s called while tx urb is pending\n",
- wiphy_name(priv->hw->wiphy), __func__);
- return NETDEV_TX_BUSY;
- }
+ match->rates_len =
+ min_t(int, sizeof(match->rates), ie->len);
+ memcpy(match->rates, ie->data, match->rates_len);
+ have_rates = 1;
+ at76_dbg(DBG_RX_BEACON, "%s: SUPPORTED RATES %s",
+ priv->netdev->name,
+ hex2str(ie->data, ie->len));
+ break;
- ieee80211_stop_queues(hw);
+ case MFIE_TYPE_DS_SET:
+ if (have_channel)
+ break;
- at76_ledtrig_tx_activity(); /* tell ledtrigger we send a packet */
+ match->channel = ie->data[0];
+ have_channel = 1;
+ at76_dbg(DBG_RX_BEACON, "%s: CHANNEL - %d",
+ priv->netdev->name, match->channel);
+ break;
- WARN_ON(priv->tx_skb != NULL);
+ case MFIE_TYPE_CF_SET:
+ case MFIE_TYPE_TIM:
+ case MFIE_TYPE_IBSS_SET:
+ default:
+ at76_dbg(DBG_RX_BEACON, "%s: beacon IE id %d len %d %s",
+ priv->netdev->name, ie->id, ie->len,
+ hex2str(ie->data, ie->len));
+ break;
+ }
- priv->tx_skb = skb;
- padding = at76_calc_padding(skb->len);
- submit_len = AT76_TX_HDRLEN + skb->len + padding;
+ /* advance to the next informational element */
+ next_ie(&ie);
- /* setup 'Atmel' header */
- memset(tx_buffer, 0, sizeof(*tx_buffer));
- tx_buffer->padding = padding;
- tx_buffer->wlength = cpu_to_le16(skb->len);
- tx_buffer->tx_rate = ieee80211_get_tx_rate(hw, info)->hw_value;
- if (FIRMWARE_IS_WPA(priv->fw_version) && info->control.hw_key) {
- tx_buffer->key_id = (info->control.hw_key->keyidx);
- tx_buffer->cipher_type =
- priv->keys[info->control.hw_key->keyidx].cipher;
- tx_buffer->cipher_length =
- priv->keys[info->control.hw_key->keyidx].keylen;
- tx_buffer->reserved = 0;
- } else {
- tx_buffer->key_id = 0;
- tx_buffer->cipher_type = 0;
- tx_buffer->cipher_length = 0;
- tx_buffer->reserved = 0;
- };
- /* memset(tx_buffer->reserved, 0, sizeof(tx_buffer->reserved)); */
- memcpy(tx_buffer->packet, skb->data, skb->len);
+ /* Optimization: after all, the bdata->data array is
+ * varpar_len bytes long, whereas we get all of the useful
+ * information after only ~36 bytes, this saves us a lot of
+ * time (and trouble as the remaining portion of the array
+ * could be full of junk)
+ * Comment this out if you want to see what other information
+ * comes from the AP - although little of it may be useful */
+ }
- at76_dbg(DBG_TX_DATA, "%s tx: wlen 0x%x pad 0x%x rate %d hdr",
- wiphy_name(priv->hw->wiphy), le16_to_cpu(tx_buffer->wlength),
- tx_buffer->padding, tx_buffer->tx_rate);
+ at76_dbg(DBG_RX_BEACON, "%s: Finished processing beacon data",
+ priv->netdev->name);
- /* send stuff */
- at76_dbg_dump(DBG_TX_DATA_CONTENT, tx_buffer, submit_len,
- "%s(): tx_buffer %d bytes:", __func__, submit_len);
- usb_fill_bulk_urb(priv->tx_urb, priv->udev, priv->tx_pipe, tx_buffer,
- submit_len, at76_mac80211_tx_callback, priv);
- ret = usb_submit_urb(priv->tx_urb, GFP_ATOMIC);
- if (ret) {
- printk(KERN_ERR "%s: error in tx submit urb: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
- if (ret == -EINVAL)
- printk(KERN_ERR
- "%s: -EINVAL: tx urb %p hcpriv %p complete %p\n",
- wiphy_name(priv->hw->wiphy), priv->tx_urb,
- priv->tx_urb->hcpriv, priv->tx_urb->complete);
- }
+ match->last_rx = jiffies; /* record last rx of beacon */
- return 0;
+exit:
+ spin_unlock_irqrestore(&priv->bss_list_spinlock, flags);
}
-static int at76_mac80211_start(struct ieee80211_hw *hw)
+/* Calculate the link level from a given rx_buffer */
+static void at76_calc_level(struct at76_priv *priv, struct at76_rx_buffer *buf,
+ struct iw_quality *qual)
{
- struct at76_priv *priv = hw->priv;
- int ret;
-
- at76_dbg(DBG_MAC80211, "%s()", __func__);
+ /* just a guess for now, might be different for other chips */
+ int max_rssi = 42;
- mutex_lock(&priv->mtx);
-
- ret = at76_submit_rx_urb(priv);
- if (ret < 0) {
- printk(KERN_ERR "%s: open: submit_rx_urb failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
- goto error;
- }
+ qual->level = (buf->rssi * 100 / max_rssi);
+ if (qual->level > 100)
+ qual->level = 100;
+ qual->updated |= IW_QUAL_LEVEL_UPDATED;
+}
- at76_startup_device(priv);
+/* Calculate the link quality from a given rx_buffer */
+static void at76_calc_qual(struct at76_priv *priv, struct at76_rx_buffer *buf,
+ struct iw_quality *qual)
+{
+ if (at76_is_intersil(priv->board_type))
+ qual->qual = buf->link_quality;
+ else {
+ unsigned long elapsed;
- at76_start_monitor(priv);
+ /* Update qual at most once a second */
+ elapsed = jiffies - priv->beacons_last_qual;
+ if (elapsed < 1 * HZ)
+ return;
-error:
- mutex_unlock(&priv->mtx);
+ qual->qual = qual->level * priv->beacons_received *
+ msecs_to_jiffies(priv->beacon_period) / elapsed;
- return 0;
+ priv->beacons_last_qual = jiffies;
+ priv->beacons_received = 0;
+ }
+ qual->qual = (qual->qual > 100) ? 100 : qual->qual;
+ qual->updated |= IW_QUAL_QUAL_UPDATED;
}
-static void at76_mac80211_stop(struct ieee80211_hw *hw)
+/* Calculate the noise quality from a given rx_buffer */
+static void at76_calc_noise(struct at76_priv *priv, struct at76_rx_buffer *buf,
+ struct iw_quality *qual)
{
- struct at76_priv *priv = hw->priv;
-
- at76_dbg(DBG_MAC80211, "%s()", __func__);
-
- mutex_lock(&priv->mtx);
+ qual->noise = 0;
+ qual->updated |= IW_QUAL_NOISE_INVALID;
+}
- if (!priv->device_unplugged) {
- /* We are called by "ifconfig ethX down", not because the
- * device is not available anymore. */
- if (at76_set_radio(priv, 0) == 1)
- at76_wait_completion(priv, CMD_RADIO_ON);
+static void at76_update_wstats(struct at76_priv *priv,
+ struct at76_rx_buffer *buf)
+{
+ struct iw_quality *qual = &priv->wstats.qual;
- /* We unlink rx_urb because at76_open() re-submits it.
- * If unplugged, at76_delete_device() takes care of it. */
- usb_kill_urb(priv->rx_urb);
+ if (buf->rssi && priv->mac_state == MAC_CONNECTED) {
+ qual->updated = 0;
+ at76_calc_level(priv, buf, qual);
+ at76_calc_qual(priv, buf, qual);
+ at76_calc_noise(priv, buf, qual);
+ } else {
+ qual->qual = 0;
+ qual->level = 0;
+ qual->noise = 0;
+ qual->updated = IW_QUAL_ALL_INVALID;
}
-
- mutex_unlock(&priv->mtx);
}
-static int at76_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+static void at76_rx_mgmt(struct at76_priv *priv, struct at76_rx_buffer *buf)
{
- struct at76_priv *priv = hw->priv;
- int ret = 0;
+ struct ieee80211_hdr_3addr *mgmt =
+ (struct ieee80211_hdr_3addr *)buf->packet;
+ u16 framectl = le16_to_cpu(mgmt->frame_ctl);
+
+ /* update wstats */
+ if (priv->mac_state != MAC_INIT && priv->mac_state != MAC_SCANNING) {
+ /* jal: this is a dirty hack needed by Tim in ad-hoc mode */
+ /* Data packets always seem to have a 0 link level, so we
+ only read link quality info from management packets.
+ Atmel driver actually averages the present, and previous
+ values, we just present the raw value at the moment - TJS */
+ if (priv->iw_mode == IW_MODE_ADHOC
+ || (priv->curr_bss
+ && !compare_ether_addr(mgmt->addr3,
+ priv->curr_bss->bssid)))
+ at76_update_wstats(priv, buf);
+ }
- at76_dbg(DBG_MAC80211, "%s()", __func__);
+ at76_dbg(DBG_RX_MGMT_CONTENT, "%s rx mgmt framectl 0x%x %s",
+ priv->netdev->name, framectl,
+ hex2str(mgmt, le16_to_cpu(buf->wlength)));
- mutex_lock(&priv->mtx);
+ switch (framectl & IEEE80211_FCTL_STYPE) {
+ case IEEE80211_STYPE_BEACON:
+ case IEEE80211_STYPE_PROBE_RESP:
+ at76_rx_mgmt_beacon(priv, buf);
+ break;
+
+ case IEEE80211_STYPE_ASSOC_RESP:
+ at76_rx_mgmt_assoc(priv, buf);
+ break;
- switch (conf->type) {
- case NL80211_IFTYPE_STATION:
- priv->iw_mode = IW_MODE_INFRA;
+ case IEEE80211_STYPE_DISASSOC:
+ at76_rx_mgmt_disassoc(priv, buf);
break;
+
+ case IEEE80211_STYPE_AUTH:
+ at76_rx_mgmt_auth(priv, buf);
+ break;
+
+ case IEEE80211_STYPE_DEAUTH:
+ at76_rx_mgmt_deauth(priv, buf);
+ break;
+
default:
- ret = -EOPNOTSUPP;
- goto exit;
+ printk(KERN_DEBUG "%s: ignoring frame with framectl 0x%04x\n",
+ priv->netdev->name, framectl);
}
-exit:
- mutex_unlock(&priv->mtx);
-
- return ret;
+ return;
}
-static void at76_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+/* Convert the 802.11 header into an ethernet-style header, make skb
+ * ready for consumption by netif_rx() */
+static void at76_ieee80211_to_eth(struct sk_buff *skb, int iw_mode)
{
- at76_dbg(DBG_MAC80211, "%s()", __func__);
-}
+ struct ieee80211_hdr_3addr *i802_11_hdr;
+ struct ethhdr *eth_hdr_p;
+ u8 *src_addr;
+ u8 *dest_addr;
-static int at76_join(struct at76_priv *priv)
-{
- struct at76_req_join join;
- int ret;
+ i802_11_hdr = (struct ieee80211_hdr_3addr *)skb->data;
- memset(&join, 0, sizeof(struct at76_req_join));
- memcpy(join.essid, priv->essid, priv->essid_size);
- join.essid_size = priv->essid_size;
- memcpy(join.bssid, priv->bssid, ETH_ALEN);
- join.bss_type = INFRASTRUCTURE_MODE;
- join.channel = priv->channel;
- join.timeout = cpu_to_le16(2000);
+ /* That would be the ethernet header if the hardware converted
+ * the frame for us. Make sure the source and the destination
+ * match the 802.11 header. Which hardware does it? */
+ eth_hdr_p = (struct ethhdr *)skb_pull(skb, IEEE80211_3ADDR_LEN);
- at76_dbg(DBG_MAC80211, "%s: sending CMD_JOIN", __func__);
- ret = at76_set_card_command(priv->udev, CMD_JOIN, &join,
- sizeof(struct at76_req_join));
+ dest_addr = i802_11_hdr->addr1;
+ if (iw_mode == IW_MODE_ADHOC)
+ src_addr = i802_11_hdr->addr2;
+ else
+ src_addr = i802_11_hdr->addr3;
- if (ret < 0) {
- printk(KERN_ERR "%s: at76_set_card_command failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
- return 0;
- }
+ if (!compare_ether_addr(eth_hdr_p->h_source, src_addr) &&
+ !compare_ether_addr(eth_hdr_p->h_dest, dest_addr))
+ /* Yes, we already have an ethernet header */
+ skb_reset_mac_header(skb);
+ else {
+ u16 len;
+
+ /* Need to build an ethernet header */
+ if (!memcmp(skb->data, snapsig, sizeof(snapsig))) {
+ /* SNAP frame - decapsulate, keep proto */
+ skb_push(skb, offsetof(struct ethhdr, h_proto) -
+ sizeof(rfc1042sig));
+ len = 0;
+ } else {
+ /* 802.3 frame, proto is length */
+ len = skb->len;
+ skb_push(skb, ETH_HLEN);
+ }
- ret = at76_wait_completion(priv, CMD_JOIN);
- at76_dbg(DBG_MAC80211, "%s: CMD_JOIN returned: 0x%02x", __func__, ret);
- if (ret != CMD_STATUS_COMPLETE) {
- printk(KERN_ERR "%s: at76_wait_completion failed: %d\n",
- wiphy_name(priv->hw->wiphy), ret);
- return 0;
+ skb_reset_mac_header(skb);
+ eth_hdr_p = eth_hdr(skb);
+ /* This needs to be done in this order (eth_hdr_p->h_dest may
+ * overlap src_addr) */
+ memcpy(eth_hdr_p->h_source, src_addr, ETH_ALEN);
+ memcpy(eth_hdr_p->h_dest, dest_addr, ETH_ALEN);
+ if (len)
+ eth_hdr_p->h_proto = htons(len);
}
- at76_set_tkip_bssid(priv, priv->bssid);
- at76_set_pm_mode(priv);
-
- return 0;
+ skb->protocol = eth_type_trans(skb, skb->dev);
}
-static void at76_dwork_hw_scan(struct work_struct *work)
+/* Check for fragmented data in priv->rx_skb. If the packet was no fragment
+ or it was the last of a fragment set a skb containing the whole packet
+ is returned for further processing. Otherwise we get NULL and are
+ done and the packet is either stored inside the fragment buffer
+ or thrown away. Every returned skb starts with the ieee802_11 header
+ and contains _no_ FCS at the end */
+static struct sk_buff *at76_check_for_rx_frags(struct at76_priv *priv)
{
- struct at76_priv *priv = container_of(work, struct at76_priv,
- dwork_hw_scan.work);
- int ret;
+ struct sk_buff *skb = priv->rx_skb;
+ struct at76_rx_buffer *buf = (struct at76_rx_buffer *)skb->data;
+ struct ieee80211_hdr_3addr *i802_11_hdr =
+ (struct ieee80211_hdr_3addr *)buf->packet;
+ /* seq_ctrl, fragment_number, sequence number of new packet */
+ u16 sctl = le16_to_cpu(i802_11_hdr->seq_ctl);
+ u16 fragnr = sctl & 0xf;
+ u16 seqnr = sctl >> 4;
+ u16 frame_ctl = le16_to_cpu(i802_11_hdr->frame_ctl);
- ret = at76_get_cmd_status(priv->udev, CMD_SCAN);
- at76_dbg(DBG_MAC80211, "%s: CMD_SCAN status 0x%02x", __func__, ret);
+ /* Length including the IEEE802.11 header, but without the trailing
+ * FCS and without the Atmel Rx header */
+ int length = le16_to_cpu(buf->wlength) - IEEE80211_FCS_LEN;
- /* FIXME: add maximum time for scan to complete */
+ /* where does the data payload start in skb->data ? */
+ u8 *data = i802_11_hdr->payload;
- if (ret != CMD_STATUS_COMPLETE) {
- queue_delayed_work(priv->hw->workqueue, &priv->dwork_hw_scan,
- SCAN_POLL_INTERVAL);
- goto exit;
+ /* length of payload, excl. the trailing FCS */
+ int data_len = length - IEEE80211_3ADDR_LEN;
+
+ int i;
+ struct rx_data_buf *bptr, *optr;
+ unsigned long oldest = ~0UL;
+
+ at76_dbg(DBG_RX_FRAGS,
+ "%s: rx data frame_ctl %04x addr2 %s seq/frag %d/%d "
+ "length %d data %d: %s ...", priv->netdev->name, frame_ctl,
+ mac2str(i802_11_hdr->addr2), seqnr, fragnr, length, data_len,
+ hex2str(data, 32));
+
+ at76_dbg(DBG_RX_FRAGS_SKB, "%s: incoming skb: head %p data %p "
+ "tail %p end %p len %d", priv->netdev->name, skb->head,
+ skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
+ skb->len);
+
+ if (data_len < 0) {
+ /* make sure data starts in the buffer */
+ printk(KERN_INFO "%s: data frame too short\n",
+ priv->netdev->name);
+ return NULL;
}
- ieee80211_scan_completed(priv->hw);
+ WARN_ON(length <= AT76_RX_HDRLEN);
+ if (length <= AT76_RX_HDRLEN)
+ return NULL;
- if (is_valid_ether_addr(priv->bssid)) {
- ieee80211_wake_queues(priv->hw);
- at76_join(priv);
+ /* remove the at76_rx_buffer header - we don't need it anymore */
+ /* we need the IEEE802.11 header (for the addresses) if this packet
+ is the first of a chain */
+ skb_pull(skb, AT76_RX_HDRLEN);
+
+ /* remove FCS at end */
+ skb_trim(skb, length);
+
+ at76_dbg(DBG_RX_FRAGS_SKB, "%s: trimmed skb: head %p data %p tail %p "
+ "end %p len %d data %p data_len %d", priv->netdev->name,
+ skb->head, skb->data, skb_tail_pointer(skb),
+ skb_end_pointer(skb), skb->len, data, data_len);
+
+ if (fragnr == 0 && !(frame_ctl & IEEE80211_FCTL_MOREFRAGS)) {
+ /* unfragmented packet received */
+ /* Use a new skb for the next receive */
+ priv->rx_skb = NULL;
+ at76_dbg(DBG_RX_FRAGS, "%s: unfragmented", priv->netdev->name);
+ return skb;
}
- ieee80211_wake_queues(priv->hw);
+ /* look if we've got a chain for the sender address.
+ afterwards optr points to first free or the oldest entry,
+ or, if i < NR_RX_DATA_BUF, bptr points to the entry for the
+ sender address */
+ /* determining the oldest entry doesn't cope with jiffies wrapping
+ but I don't care to delete a young entry at these rare moments ... */
+
+ bptr = priv->rx_data;
+ optr = NULL;
+ for (i = 0; i < NR_RX_DATA_BUF; i++, bptr++) {
+ if (!bptr->skb) {
+ optr = bptr;
+ oldest = 0UL;
+ continue;
+ }
-exit:
- return;
-}
+ if (!compare_ether_addr(i802_11_hdr->addr2, bptr->sender))
+ break;
-static int at76_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
-{
- struct at76_priv *priv = hw->priv;
- struct at76_req_scan scan;
- int ret;
+ if (!optr) {
+ optr = bptr;
+ oldest = bptr->last_rx;
+ } else if (bptr->last_rx < oldest)
+ optr = bptr;
+ }
- at76_dbg(DBG_MAC80211, "%s():", __func__);
- at76_dbg_dump(DBG_MAC80211, ssid, len, "ssid %zd bytes:", len);
+ if (i < NR_RX_DATA_BUF) {
+
+ at76_dbg(DBG_RX_FRAGS, "%s: %d. cacheentry (seq/frag = %d/%d) "
+ "matched sender addr",
+ priv->netdev->name, i, bptr->seqnr, bptr->fragnr);
+
+ /* bptr points to an entry for the sender address */
+ if (bptr->seqnr == seqnr) {
+ int left;
+ /* the fragment has the current sequence number */
+ if (((bptr->fragnr + 1) & 0xf) != fragnr) {
+ /* wrong fragment number -> ignore it */
+ /* is & 0xf necessary above ??? */
+ at76_dbg(DBG_RX_FRAGS,
+ "%s: frag nr mismatch: %d + 1 != %d",
+ priv->netdev->name, bptr->fragnr,
+ fragnr);
+ return NULL;
+ }
+ bptr->last_rx = jiffies;
+ /* the next following fragment number ->
+ add the data at the end */
+
+ /* for test only ??? */
+ left = skb_tailroom(bptr->skb);
+ if (left < data_len)
+ printk(KERN_INFO
+ "%s: only %d byte free (need %d)\n",
+ priv->netdev->name, left, data_len);
+ else
+ memcpy(skb_put(bptr->skb, data_len), data,
+ data_len);
+
+ bptr->fragnr = fragnr;
+ if (frame_ctl & IEEE80211_FCTL_MOREFRAGS)
+ return NULL;
+
+ /* this was the last fragment - send it */
+ skb = bptr->skb;
+ bptr->skb = NULL; /* free the entry */
+ at76_dbg(DBG_RX_FRAGS, "%s: last frag of seq %d",
+ priv->netdev->name, seqnr);
+ return skb;
+ }
- mutex_lock(&priv->mtx);
+ /* got another sequence number */
+ if (fragnr == 0) {
+ /* it's the start of a new chain - replace the
+ old one by this */
+ /* bptr->sender has the correct value already */
+ at76_dbg(DBG_RX_FRAGS,
+ "%s: start of new seq %d, removing old seq %d",
+ priv->netdev->name, seqnr, bptr->seqnr);
+ bptr->seqnr = seqnr;
+ bptr->fragnr = 0;
+ bptr->last_rx = jiffies;
+ /* swap bptr->skb and priv->rx_skb */
+ skb = bptr->skb;
+ bptr->skb = priv->rx_skb;
+ priv->rx_skb = skb;
+ } else {
+ /* it from the middle of a new chain ->
+ delete the old entry and skip the new one */
+ at76_dbg(DBG_RX_FRAGS,
+ "%s: middle of new seq %d (%d) "
+ "removing old seq %d",
+ priv->netdev->name, seqnr, fragnr,
+ bptr->seqnr);
+ dev_kfree_skb(bptr->skb);
+ bptr->skb = NULL;
+ }
+ return NULL;
+ }
- ieee80211_stop_queues(hw);
+ /* if we didn't find a chain for the sender address, optr
+ points either to the first free or the oldest entry */
- memset(&scan, 0, sizeof(struct at76_req_scan));
- memset(scan.bssid, 0xFF, ETH_ALEN);
- scan.scan_type = SCAN_TYPE_ACTIVE;
- if (priv->essid_size > 0) {
- memcpy(scan.essid, ssid, len);
- scan.essid_size = len;
+ if (fragnr != 0) {
+ /* this is not the begin of a fragment chain ... */
+ at76_dbg(DBG_RX_FRAGS,
+ "%s: no chain for non-first fragment (%d)",
+ priv->netdev->name, fragnr);
+ return NULL;
}
- scan.min_channel_time = cpu_to_le16(priv->scan_min_time);
- scan.max_channel_time = cpu_to_le16(priv->scan_max_time);
- scan.probe_delay = cpu_to_le16(priv->scan_min_time * 1000);
- scan.international_scan = 0;
- at76_dbg(DBG_MAC80211, "%s: sending CMD_SCAN", __func__);
- ret = at76_set_card_command(priv->udev, CMD_SCAN, &scan, sizeof(scan));
+ BUG_ON(!optr);
+ if (optr->skb) {
+ /* swap the skb's */
+ skb = optr->skb;
+ optr->skb = priv->rx_skb;
+ priv->rx_skb = skb;
- if (ret < 0) {
- err("CMD_SCAN failed: %d", ret);
- goto exit;
- }
+ at76_dbg(DBG_RX_FRAGS,
+ "%s: free old contents: sender %s seq/frag %d/%d",
+ priv->netdev->name, mac2str(optr->sender),
+ optr->seqnr, optr->fragnr);
- queue_delayed_work(priv->hw->workqueue, &priv->dwork_hw_scan,
- SCAN_POLL_INTERVAL);
+ } else {
+ /* take the skb from priv->rx_skb */
+ optr->skb = priv->rx_skb;
+ /* let at76_submit_rx_urb() allocate a new skb */
+ priv->rx_skb = NULL;
-exit:
- mutex_unlock(&priv->mtx);
+ at76_dbg(DBG_RX_FRAGS, "%s: use a free entry",
+ priv->netdev->name);
+ }
+ memcpy(optr->sender, i802_11_hdr->addr2, ETH_ALEN);
+ optr->seqnr = seqnr;
+ optr->fragnr = 0;
+ optr->last_rx = jiffies;
- return 0;
+ return NULL;
}
-static int at76_config(struct ieee80211_hw *hw, u32 changed)
+/* Rx interrupt: we expect the complete data buffer in priv->rx_skb */
+static void at76_rx_data(struct at76_priv *priv)
{
- struct at76_priv *priv = hw->priv;
- struct ieee80211_conf *conf = &hw->conf;
+ struct net_device *netdev = priv->netdev;
+ struct net_device_stats *stats = &priv->stats;
+ struct sk_buff *skb = priv->rx_skb;
+ struct at76_rx_buffer *buf = (struct at76_rx_buffer *)skb->data;
+ struct ieee80211_hdr_3addr *i802_11_hdr;
+ int length = le16_to_cpu(buf->wlength);
- at76_dbg(DBG_MAC80211, "%s(): channel %d radio %d",
- __func__, conf->channel->hw_value, conf->radio_enabled);
- at76_dbg_dump(DBG_MAC80211, priv->essid, priv->essid_size, "ssid:");
- at76_dbg_dump(DBG_MAC80211, priv->bssid, ETH_ALEN, "bssid:");
+ at76_dbg(DBG_RX_DATA, "%s received data packet: %s", netdev->name,
+ hex2str(skb->data, AT76_RX_HDRLEN));
- mutex_lock(&priv->mtx);
+ at76_dbg(DBG_RX_DATA_CONTENT, "rx packet: %s",
+ hex2str(skb->data + AT76_RX_HDRLEN, length));
- priv->channel = conf->channel->hw_value;
+ skb = at76_check_for_rx_frags(priv);
+ if (!skb)
+ return;
- if (is_valid_ether_addr(priv->bssid)) {
- at76_join(priv);
- ieee80211_wake_queues(priv->hw);
- } else {
- ieee80211_stop_queues(priv->hw);
- at76_start_monitor(priv);
- };
+ /* Atmel header and the FCS are already removed */
+ i802_11_hdr = (struct ieee80211_hdr_3addr *)skb->data;
- mutex_unlock(&priv->mtx);
+ skb->dev = netdev;
+ skb->ip_summed = CHECKSUM_NONE; /* TODO: should check CRC */
- return 0;
-}
+ if (is_broadcast_ether_addr(i802_11_hdr->addr1)) {
+ if (!compare_ether_addr(i802_11_hdr->addr1, netdev->broadcast))
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ } else if (compare_ether_addr(i802_11_hdr->addr1, netdev->dev_addr))
+ skb->pkt_type = PACKET_OTHERHOST;
-static int at76_config_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_if_conf *conf)
-{
- struct at76_priv *priv = hw->priv;
+ at76_ieee80211_to_eth(skb, priv->iw_mode);
- at76_dbg_dump(DBG_MAC80211, conf->bssid, ETH_ALEN, "bssid:");
+ netdev->last_rx = jiffies;
+ netif_rx(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += length;
- mutex_lock(&priv->mtx);
+ return;
+}
- memcpy(priv->bssid, conf->bssid, ETH_ALEN);
-// memcpy(priv->essid, conf->ssid, conf->ssid_len);
-// priv->essid_size = conf->ssid_len;
+static void at76_rx_monitor_mode(struct at76_priv *priv)
+{
+ struct at76_rx_radiotap *rt;
+ u8 *payload;
+ int skblen;
+ struct net_device *netdev = priv->netdev;
+ struct at76_rx_buffer *buf =
+ (struct at76_rx_buffer *)priv->rx_skb->data;
+ /* length including the IEEE802.11 header and the trailing FCS,
+ but not at76_rx_buffer */
+ int length = le16_to_cpu(buf->wlength);
+ struct sk_buff *skb = priv->rx_skb;
+ struct net_device_stats *stats = &priv->stats;
- if (is_valid_ether_addr(priv->bssid)) {
- /* mac80211 is joining a bss */
- ieee80211_wake_queues(priv->hw);
- at76_join(priv);
- } else
- ieee80211_stop_queues(priv->hw);
+ if (length < IEEE80211_FCS_LEN) {
+ /* buffer contains no data */
+ at76_dbg(DBG_MONITOR_MODE,
+ "%s: MONITOR MODE: rx skb without data",
+ priv->netdev->name);
+ return;
+ }
- mutex_unlock(&priv->mtx);
+ skblen = sizeof(struct at76_rx_radiotap) + length;
- return 0;
+ skb = dev_alloc_skb(skblen);
+ if (!skb) {
+ printk(KERN_ERR "%s: MONITOR MODE: dev_alloc_skb for radiotap "
+ "header returned NULL\n", priv->netdev->name);
+ return;
+ }
+
+ skb_put(skb, skblen);
+
+ rt = (struct at76_rx_radiotap *)skb->data;
+ payload = skb->data + sizeof(struct at76_rx_radiotap);
+
+ rt->rt_hdr.it_version = 0;
+ rt->rt_hdr.it_pad = 0;
+ rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct at76_rx_radiotap));
+ rt->rt_hdr.it_present = cpu_to_le32(AT76_RX_RADIOTAP_PRESENT);
+
+ rt->rt_tsft = cpu_to_le64(le32_to_cpu(buf->rx_time));
+ rt->rt_rate = hw_rates[buf->rx_rate] & (~0x80);
+ rt->rt_signal = buf->rssi;
+ rt->rt_noise = buf->noise_level;
+ rt->rt_flags = IEEE80211_RADIOTAP_F_FCS;
+ if (buf->fragmentation)
+ rt->rt_flags |= IEEE80211_RADIOTAP_F_FRAG;
+
+ memcpy(payload, buf->packet, length);
+ skb->dev = netdev;
+ skb->ip_summed = CHECKSUM_NONE;
+ skb_reset_mac_header(skb);
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->protocol = htons(ETH_P_802_2);
+
+ netdev->last_rx = jiffies;
+ netif_rx(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += length;
}
-/* must be atomic */
-static void at76_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags, int mc_count,
- struct dev_addr_list *mc_list)
+/* Check if we spy on the sender address in buf and update stats */
+static void at76_iwspy_update(struct at76_priv *priv,
+ struct at76_rx_buffer *buf)
{
- struct at76_priv *priv = hw->priv;
- int flags;
-
- at76_dbg(DBG_MAC80211, "%s(): changed_flags=0x%08x "
- "total_flags=0x%08x mc_count=%d",
- __func__, changed_flags, *total_flags, mc_count);
+ struct ieee80211_hdr_3addr *hdr =
+ (struct ieee80211_hdr_3addr *)buf->packet;
+ struct iw_quality qual;
- flags = changed_flags & AT76_SUPPORTED_FILTERS;
- *total_flags = AT76_SUPPORTED_FILTERS;
+ /* We can only set the level here */
+ qual.updated = IW_QUAL_QUAL_INVALID | IW_QUAL_NOISE_INVALID;
+ qual.level = 0;
+ qual.noise = 0;
+ at76_calc_level(priv, buf, &qual);
- /* FIXME: access to priv->promisc should be protected with
- * priv->mtx, but it's impossible because this function needs to be
- * atomic */
+ spin_lock_bh(&priv->spy_spinlock);
- if (flags && !priv->promisc) {
- /* mac80211 wants us to enable promiscuous mode */
- priv->promisc = 1;
- } else if (!flags && priv->promisc) {
- /* we need to disable promiscuous mode */
- priv->promisc = 0;
- } else
- return;
+ if (priv->spy_data.spy_number > 0)
+ wireless_spy_update(priv->netdev, hdr->addr2, &qual);
- queue_work(hw->workqueue, &priv->work_set_promisc);
+ spin_unlock_bh(&priv->spy_spinlock);
}
-static int at76_set_key_oldfw(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- const u8 *local_address, const u8 *address,
- struct ieee80211_key_conf *key)
+static void at76_rx_tasklet(unsigned long param)
{
- struct at76_priv *priv = hw->priv;
-
- int i;
-
- at76_dbg(DBG_MAC80211, "%s(): cmd %d key->alg %d key->keyidx %d "
- "key->keylen %d",
- __func__, cmd, key->alg, key->keyidx, key->keylen);
+ struct urb *urb = (struct urb *)param;
+ struct at76_priv *priv = urb->context;
+ struct net_device *netdev = priv->netdev;
+ struct at76_rx_buffer *buf;
+ struct ieee80211_hdr_3addr *i802_11_hdr;
+ u16 frame_ctl;
- if (key->alg != ALG_WEP)
- return -EOPNOTSUPP;
+ if (priv->device_unplugged) {
+ at76_dbg(DBG_DEVSTART, "device unplugged");
+ if (urb)
+ at76_dbg(DBG_DEVSTART, "urb status %d", urb->status);
+ return;
+ }
- key->hw_key_idx = key->keyidx;
+ if (!priv->rx_skb || !netdev || !priv->rx_skb->data)
+ return;
- mutex_lock(&priv->mtx);
+ buf = (struct at76_rx_buffer *)priv->rx_skb->data;
- switch (cmd) {
- case SET_KEY:
- memcpy(priv->wep_keys[key->keyidx], key->key, key->keylen);
- priv->wep_keys_len[key->keyidx] = key->keylen;
+ i802_11_hdr = (struct ieee80211_hdr_3addr *)buf->packet;
- /* FIXME: find out how to do this properly */
- priv->wep_key_id = key->keyidx;
+ frame_ctl = le16_to_cpu(i802_11_hdr->frame_ctl);
- break;
- case DISABLE_KEY:
- default:
- priv->wep_keys_len[key->keyidx] = 0;
- break;
+ if (urb->status != 0) {
+ if (urb->status != -ENOENT && urb->status != -ECONNRESET)
+ at76_dbg(DBG_URB,
+ "%s %s: - nonzero Rx bulk status received: %d",
+ __func__, netdev->name, urb->status);
+ return;
}
- priv->wep_enabled = 0;
+ at76_dbg(DBG_RX_ATMEL_HDR,
+ "%s: rx frame: rate %d rssi %d noise %d link %d %s",
+ priv->netdev->name, buf->rx_rate, buf->rssi, buf->noise_level,
+ buf->link_quality, hex2str(i802_11_hdr, 48));
+ if (priv->iw_mode == IW_MODE_MONITOR) {
+ at76_rx_monitor_mode(priv);
+ goto exit;
+ }
- for (i = 0; i < WEP_KEYS; i++) {
- if (priv->wep_keys_len[i] != 0)
- priv->wep_enabled = 1;
+ /* there is a new bssid around, accept it: */
+ if (buf->newbss && priv->iw_mode == IW_MODE_ADHOC) {
+ at76_dbg(DBG_PROGRESS, "%s: rx newbss", netdev->name);
+ schedule_work(&priv->work_new_bss);
}
- at76_startup_device(priv);
+ switch (frame_ctl & IEEE80211_FCTL_FTYPE) {
+ case IEEE80211_FTYPE_DATA:
+ at76_rx_data(priv);
+ break;
- mutex_unlock(&priv->mtx);
+ case IEEE80211_FTYPE_MGMT:
+ /* jal: TODO: find out if we can update iwspy also on
+ other frames than management (might depend on the
+ radio chip / firmware version !) */
- return 0;
-}
+ at76_iwspy_update(priv, buf);
-static int at76_set_key_newfw(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- const u8 *local_address, const u8 *address,
- struct ieee80211_key_conf *key)
-{
- struct at76_priv *priv = hw->priv;
- int ret = -EOPNOTSUPP;
-
- at76_dbg(DBG_MAC80211, "%s(): cmd %d key->alg %d key->keyidx %d "
- "key->keylen %d",
- __func__, cmd, key->alg, key->keyidx, key->keylen);
+ at76_rx_mgmt(priv, buf);
+ break;
- mutex_lock(&priv->mtx);
+ case IEEE80211_FTYPE_CTL:
+ at76_dbg(DBG_RX_CTRL, "%s: ignored ctrl frame: %04x",
+ priv->netdev->name, frame_ctl);
+ break;
- priv->mib_buf.type = MIB_MAC_ENCRYPTION;
+ default:
+ printk(KERN_DEBUG "%s: ignoring frame with framectl 0x%04x\n",
+ priv->netdev->name, frame_ctl);
+ }
+exit:
+ at76_submit_rx_urb(priv);
+}
- if (cmd == DISABLE_KEY) {
- priv->mib_buf.size = CIPHER_KEY_LEN;
- priv->mib_buf.index = offsetof(struct mib_mac_encryption,
- cipher_default_keyvalue[key->keyidx]);
- memset(priv->mib_buf.data.data, 0, CIPHER_KEY_LEN);
- if (at76_set_mib(priv, &priv->mib_buf) != CMD_STATUS_COMPLETE)
- ret = -EOPNOTSUPP; /* -EIO would be probably better */
- else {
+/* Load firmware into kernel memory and parse it */
+static struct fwentry *at76_load_firmware(struct usb_device *udev,
+ enum board_type board_type)
+{
+ int ret;
+ char *str;
+ struct at76_fw_header *fwh;
+ struct fwentry *fwe = &firmwares[board_type];
- priv->keys[key->keyidx].cipher = CIPHER_NONE;
- priv->keys[key->keyidx].keylen = 0;
- };
- if (priv->default_group_key == key->keyidx)
- priv->default_group_key = 0xff;
+ mutex_lock(&fw_mutex);
- if (priv->default_pairwise_key == key->keyidx)
- priv->default_pairwise_key = 0xff;
- /* If default pairwise key is removed, fall back to
- * group key? */
- ret = 0;
+ if (fwe->loaded) {
+ at76_dbg(DBG_FW, "re-using previously loaded fw");
goto exit;
- };
-
- if (cmd == SET_KEY) {
- /* store key into MIB */
- priv->mib_buf.size = CIPHER_KEY_LEN;
- priv->mib_buf.index = offsetof(struct mib_mac_encryption,
- cipher_default_keyvalue[key->keyidx]);
- memset(priv->mib_buf.data.data, 0, CIPHER_KEY_LEN);
- memcpy(priv->mib_buf.data.data, key->key, key->keylen);
-
- switch (key->alg) {
- case ALG_WEP:
- if (key->keylen == 5) {
- priv->keys[key->keyidx].cipher =
- CIPHER_WEP64;
- priv->keys[key->keyidx].keylen = 8;
- } else if (key->keylen == 13) {
- priv->keys[key->keyidx].cipher =
- CIPHER_WEP128;
- /* Firmware needs this */
- priv->keys[key->keyidx].keylen = 8;
- } else {
- ret = -EOPNOTSUPP;
- goto exit;
- };
- break;
- case ALG_TKIP:
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- priv->keys[key->keyidx].cipher = CIPHER_TKIP;
- priv->keys[key->keyidx].keylen = 12;
- break;
+ }
- case ALG_CCMP:
- if (!at76_is_505a(priv->board_type)) {
- ret = -EOPNOTSUPP;
- goto exit;
- };
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- priv->keys[key->keyidx].cipher = CIPHER_CCMP;
- priv->keys[key->keyidx].keylen = 16;
- break;
+ at76_dbg(DBG_FW, "downloading firmware %s", fwe->fwname);
+ ret = request_firmware(&fwe->fw, fwe->fwname, &udev->dev);
+ if (ret < 0) {
+ dev_printk(KERN_ERR, &udev->dev, "firmware %s not found!\n",
+ fwe->fwname);
+ dev_printk(KERN_ERR, &udev->dev,
+ "you may need to download the firmware from "
+ "http://developer.berlios.de/projects/at76c503a/");
+ goto exit;
+ }
- default:
- ret = -EOPNOTSUPP;
- goto exit;
- };
-
- priv->mib_buf.data.data[38] = priv->keys[key->keyidx].cipher;
- priv->mib_buf.data.data[39] = 1; /* Taken from atmelwlandriver,
- not documented */
-
- if (is_valid_ether_addr(address))
- /* Pairwise key */
- priv->mib_buf.data.data[39] |= (KEY_PAIRWISE | KEY_TX);
- else if (is_broadcast_ether_addr(address))
- /* Group key */
- priv->mib_buf.data.data[39] |= (KEY_TX);
- else /* Key used only for transmission ??? */
- priv->mib_buf.data.data[39] |= (KEY_TX);
-
- if (at76_set_mib(priv, &priv->mib_buf) !=
- CMD_STATUS_COMPLETE) {
- ret = -EOPNOTSUPP; /* -EIO would be probably better */
- goto exit;
- };
+ at76_dbg(DBG_FW, "got it.");
+ fwh = (struct at76_fw_header *)(fwe->fw->data);
- if ((key->alg == ALG_TKIP) || (key->alg == ALG_CCMP))
- at76_reset_rsc(priv);
+ if (fwe->fw->size <= sizeof(*fwh)) {
+ dev_printk(KERN_ERR, &udev->dev,
+ "firmware is too short (0x%zx)\n", fwe->fw->size);
+ goto exit;
+ }
- key->hw_key_idx = key->keyidx;
+ /* CRC currently not checked */
+ fwe->board_type = le32_to_cpu(fwh->board_type);
+ if (fwe->board_type != board_type) {
+ dev_printk(KERN_ERR, &udev->dev,
+ "board type mismatch, requested %u, got %u\n",
+ board_type, fwe->board_type);
+ goto exit;
+ }
- /* Set up default keys */
- if (is_broadcast_ether_addr(address))
- priv->default_group_key = key->keyidx;
- if (is_valid_ether_addr(address))
- priv->default_pairwise_key = key->keyidx;
+ fwe->fw_version.major = fwh->major;
+ fwe->fw_version.minor = fwh->minor;
+ fwe->fw_version.patch = fwh->patch;
+ fwe->fw_version.build = fwh->build;
- /* Set up encryption MIBs */
+ str = (char *)fwh + le32_to_cpu(fwh->str_offset);
+ fwe->intfw = (u8 *)fwh + le32_to_cpu(fwh->int_fw_offset);
+ fwe->intfw_size = le32_to_cpu(fwh->int_fw_len);
+ fwe->extfw = (u8 *)fwh + le32_to_cpu(fwh->ext_fw_offset);
+ fwe->extfw_size = le32_to_cpu(fwh->ext_fw_len);
- /* first block of settings */
- priv->mib_buf.size = 3;
- priv->mib_buf.index = offsetof(struct mib_mac_encryption,
- privacy_invoked);
- priv->mib_buf.data.data[0] = 1; /* privacy_invoked */
- priv->mib_buf.data.data[1] = priv->default_pairwise_key;
- priv->mib_buf.data.data[2] = priv->default_group_key;
+ fwe->loaded = 1;
- ret = at76_set_mib(priv, &priv->mib_buf);
- if (ret != CMD_STATUS_COMPLETE)
- goto exit;
+ dev_printk(KERN_DEBUG, &udev->dev,
+ "using firmware %s (version %d.%d.%d-%d)\n",
+ fwe->fwname, fwh->major, fwh->minor, fwh->patch, fwh->build);
- /* second block of settings */
- priv->mib_buf.size = 3;
- priv->mib_buf.index = offsetof(struct mib_mac_encryption,
- exclude_unencrypted);
- priv->mib_buf.data.data[0] = 1; /* exclude_unencrypted */
- priv->mib_buf.data.data[1] = 0; /* wep_encryption_type */
- priv->mib_buf.data.data[2] = 0; /* ckip_key_permutation */
+ at76_dbg(DBG_DEVSTART, "board %u, int %d:%d, ext %d:%d", board_type,
+ le32_to_cpu(fwh->int_fw_offset), le32_to_cpu(fwh->int_fw_len),
+ le32_to_cpu(fwh->ext_fw_offset), le32_to_cpu(fwh->ext_fw_len));
+ at76_dbg(DBG_DEVSTART, "firmware id %s", str);
- ret = at76_set_mib(priv, &priv->mib_buf);
- if (ret != CMD_STATUS_COMPLETE)
- goto exit;
- ret = 0;
- };
exit:
- at76_dump_mib_mac_encryption(priv);
- mutex_unlock(&priv->mtx);
- return ret;
-}
-
-static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- const u8 *local_address, const u8 *address,
- struct ieee80211_key_conf *key)
-{
- struct at76_priv *priv = hw->priv;
-
- at76_dbg(DBG_MAC80211, "%s(): cmd %d key->alg %d key->keyidx %d "
- "key->keylen %d",
- __func__, cmd, key->alg, key->keyidx, key->keylen);
+ mutex_unlock(&fw_mutex);
- if (FIRMWARE_IS_WPA(priv->fw_version))
- return at76_set_key_newfw(hw, cmd, local_address, address, key);
+ if (fwe->loaded)
+ return fwe;
else
- return at76_set_key_oldfw(hw, cmd, local_address, address, key);
-
+ return NULL;
}
-static const struct ieee80211_ops at76_ops = {
- .tx = at76_mac80211_tx,
- .add_interface = at76_add_interface,
- .remove_interface = at76_remove_interface,
- .config = at76_config,
- .config_interface = at76_config_interface,
- .configure_filter = at76_configure_filter,
- .start = at76_mac80211_start,
- .stop = at76_mac80211_stop,
- .hw_scan = at76_hw_scan,
- .set_key = at76_set_key,
-};
-
/* Allocate network device and initialize private data */
static struct at76_priv *at76_alloc_new_device(struct usb_device *udev)
{
- struct ieee80211_hw *hw;
+ struct net_device *netdev;
struct at76_priv *priv;
+ int i;
- hw = ieee80211_alloc_hw(sizeof(struct at76_priv), &at76_ops);
- if (!hw) {
- printk(KERN_ERR DRIVER_NAME ": could not register"
- " ieee80211_hw\n");
+ /* allocate memory for our device state and initialize it */
+ netdev = alloc_etherdev(sizeof(struct at76_priv));
+ if (!netdev) {
+ dev_printk(KERN_ERR, &udev->dev, "out of memory\n");
return NULL;
}
- priv = hw->priv;
- priv->hw = hw;
+ priv = netdev_priv(netdev);
priv->udev = udev;
+ priv->netdev = netdev;
mutex_init(&priv->mtx);
+ INIT_WORK(&priv->work_assoc_done, at76_work_assoc_done);
+ INIT_WORK(&priv->work_join, at76_work_join);
+ INIT_WORK(&priv->work_new_bss, at76_work_new_bss);
+ INIT_WORK(&priv->work_start_scan, at76_work_start_scan);
INIT_WORK(&priv->work_set_promisc, at76_work_set_promisc);
INIT_WORK(&priv->work_submit_rx, at76_work_submit_rx);
- INIT_DELAYED_WORK(&priv->dwork_hw_scan, at76_dwork_hw_scan);
+ INIT_DELAYED_WORK(&priv->dwork_restart, at76_dwork_restart);
+ INIT_DELAYED_WORK(&priv->dwork_get_scan, at76_dwork_get_scan);
+ INIT_DELAYED_WORK(&priv->dwork_beacon, at76_dwork_beacon);
+ INIT_DELAYED_WORK(&priv->dwork_auth, at76_dwork_auth);
+ INIT_DELAYED_WORK(&priv->dwork_assoc, at76_dwork_assoc);
+
+ spin_lock_init(&priv->mgmt_spinlock);
+ priv->next_mgmt_bulk = NULL;
+ priv->mac_state = MAC_INIT;
+
+ /* initialize empty BSS list */
+ priv->curr_bss = NULL;
+ INIT_LIST_HEAD(&priv->bss_list);
+ spin_lock_init(&priv->bss_list_spinlock);
+
+ init_timer(&priv->bss_list_timer);
+ priv->bss_list_timer.data = (unsigned long)priv;
+ priv->bss_list_timer.function = at76_bss_list_timeout;
+
+ spin_lock_init(&priv->spy_spinlock);
+
+ /* mark all rx data entries as unused */
+ for (i = 0; i < NR_RX_DATA_BUF; i++)
+ priv->rx_data[i].skb = NULL;
priv->rx_tasklet.func = at76_rx_tasklet;
priv->rx_tasklet.data = 0;
@@ -2371,9 +5197,6 @@ static struct at76_priv *at76_alloc_new_device(struct usb_device *udev)
priv->pm_mode = AT76_PM_OFF;
priv->pm_period = 0;
- /* unit us */
- priv->hw->channel_change_time = 100000;
-
return priv;
}
@@ -2436,42 +5259,11 @@ static int at76_alloc_urbs(struct at76_priv *priv,
return 0;
}
-static struct ieee80211_rate at76_rates[] = {
- { .bitrate = 10, .hw_value = TX_RATE_1MBIT, },
- { .bitrate = 20, .hw_value = TX_RATE_2MBIT, },
- { .bitrate = 55, .hw_value = TX_RATE_5_5MBIT, },
- { .bitrate = 110, .hw_value = TX_RATE_11MBIT, },
-};
-
-static struct ieee80211_channel at76_channels[] = {
- { .center_freq = 2412, .hw_value = 1 },
- { .center_freq = 2417, .hw_value = 2 },
- { .center_freq = 2422, .hw_value = 3 },
- { .center_freq = 2427, .hw_value = 4 },
- { .center_freq = 2432, .hw_value = 5 },
- { .center_freq = 2437, .hw_value = 6 },
- { .center_freq = 2442, .hw_value = 7 },
- { .center_freq = 2447, .hw_value = 8 },
- { .center_freq = 2452, .hw_value = 9 },
- { .center_freq = 2457, .hw_value = 10 },
- { .center_freq = 2462, .hw_value = 11 },
- { .center_freq = 2467, .hw_value = 12 },
- { .center_freq = 2472, .hw_value = 13 },
- { .center_freq = 2484, .hw_value = 14 }
-};
-
-static struct ieee80211_supported_band at76_supported_band = {
- .channels = at76_channels,
- .n_channels = ARRAY_SIZE(at76_channels),
- .bitrates = at76_rates,
- .n_bitrates = ARRAY_SIZE(at76_rates),
-};
-
/* Register network device and initialize the hardware */
static int at76_init_new_device(struct at76_priv *priv,
struct usb_interface *interface)
{
- struct device *dev = &interface->dev;
+ struct net_device *netdev = priv->netdev;
int ret;
/* set up the endpoint information */
@@ -2487,11 +5279,14 @@ static int at76_init_new_device(struct at76_priv *priv,
/* MAC address */
ret = at76_get_hw_config(priv);
if (ret < 0) {
- dev_err(dev, "cannot get MAC address\n");
+ dev_printk(KERN_ERR, &interface->dev,
+ "cannot get MAC address\n");
goto exit;
}
priv->domain = at76_get_reg_domain(priv->regulatory_domain);
+ /* init. netdev->dev_addr */
+ memcpy(netdev->dev_addr, priv->mac_addr, ETH_ALEN);
priv->channel = DEF_CHANNEL;
priv->iw_mode = IW_MODE_INFRA;
@@ -2501,54 +5296,47 @@ static int at76_init_new_device(struct at76_priv *priv,
priv->txrate = TX_RATE_AUTO;
priv->preamble_type = PREAMBLE_TYPE_LONG;
priv->beacon_period = 100;
+ priv->beacons_last_qual = jiffies;
priv->auth_mode = WLAN_AUTH_OPEN;
priv->scan_min_time = DEF_SCAN_MIN_TIME;
priv->scan_max_time = DEF_SCAN_MAX_TIME;
priv->scan_mode = SCAN_TYPE_ACTIVE;
- priv->default_pairwise_key = 0xff;
- priv->default_group_key = 0xff;
-
- /* mac80211 initialisation */
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &at76_supported_band;
- if (FIRMWARE_IS_WPA(priv->fw_version) &&
- (at76_is_503rfmd(priv->board_type) ||
- at76_is_505(priv->board_type)))
- priv->hw->flags = IEEE80211_HW_SIGNAL_UNSPEC;
- else
- priv->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_UNSPEC;
-
- priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
-
- SET_IEEE80211_DEV(priv->hw, &interface->dev);
- SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
-
- ret = ieee80211_register_hw(priv->hw);
+ netdev->flags &= ~IFF_MULTICAST; /* not yet or never */
+ netdev->open = at76_open;
+ netdev->stop = at76_stop;
+ netdev->get_stats = at76_get_stats;
+ netdev->ethtool_ops = &at76_ethtool_ops;
+
+ /* Add pointers to enable iwspy support. */
+ priv->wireless_data.spy_data = &priv->spy_data;
+ netdev->wireless_data = &priv->wireless_data;
+
+ netdev->hard_start_xmit = at76_tx;
+ netdev->tx_timeout = at76_tx_timeout;
+ netdev->watchdog_timeo = 2 * HZ;
+ netdev->wireless_handlers = &at76_handler_def;
+ netdev->set_multicast_list = at76_set_multicast;
+ netdev->set_mac_address = at76_set_mac_address;
+ dev_alloc_name(netdev, "wlan%d");
+
+ ret = register_netdev(priv->netdev);
if (ret) {
- dev_err(dev, "cannot register mac80211 hw (status %d)!\n", ret);
+ dev_printk(KERN_ERR, &interface->dev,
+ "cannot register netdevice (status %d)!\n", ret);
goto exit;
}
+ priv->netdev_registered = 1;
- priv->mac80211_registered = 1;
+ printk(KERN_INFO "%s: USB %s, MAC %s, firmware %d.%d.%d-%d\n",
+ netdev->name, dev_name(&interface->dev), mac2str(priv->mac_addr),
+ priv->fw_version.major, priv->fw_version.minor,
+ priv->fw_version.patch, priv->fw_version.build);
+ printk(KERN_INFO "%s: regulatory domain 0x%02x: %s\n", netdev->name,
+ priv->regulatory_domain, priv->domain->name);
- dev_info(dev, "%s: USB %s, MAC %s, firmware %d.%d.%d-%d\n",
- wiphy_name(priv->hw->wiphy),
- dev_name(&interface->dev), mac2str(priv->mac_addr),
- priv->fw_version.major, priv->fw_version.minor,
- priv->fw_version.patch, priv->fw_version.build);
- dev_info(dev, "%s: regulatory domain 0x%02x: %s\n",
- wiphy_name(priv->hw->wiphy),
- priv->regulatory_domain, priv->domain->name);
- dev_info(dev, "%s: WPA support: ", wiphy_name(priv->hw->wiphy));
- if (!FIRMWARE_IS_WPA(priv->fw_version))
- printk("none\n");
- else {
- if (!at76_is_505a(priv->board_type))
- printk("TKIP\n");
- else
- printk("TKIP, AES/CCMP\n");
- };
+ /* we let this timer run the whole time this driver instance lives */
+ mod_timer(&priv->bss_list_timer, jiffies + BSS_LIST_TIMEOUT);
exit:
return ret;
@@ -2556,13 +5344,15 @@ exit:
static void at76_delete_device(struct at76_priv *priv)
{
+ int i;
+
at76_dbg(DBG_PROC_ENTRY, "%s: ENTER", __func__);
/* The device is gone, don't bother turning it off */
priv->device_unplugged = 1;
- if (priv->mac80211_registered)
- ieee80211_unregister_hw(priv->hw);
+ if (priv->netdev_registered)
+ unregister_netdev(priv->netdev);
/* assuming we used keventd, it must quiesce too */
flush_scheduled_work();
@@ -2583,11 +5373,25 @@ static void at76_delete_device(struct at76_priv *priv)
if (priv->rx_skb)
kfree_skb(priv->rx_skb);
+ at76_free_bss_list(priv);
+ del_timer_sync(&priv->bss_list_timer);
+ cancel_delayed_work(&priv->dwork_get_scan);
+ cancel_delayed_work(&priv->dwork_beacon);
+ cancel_delayed_work(&priv->dwork_auth);
+ cancel_delayed_work(&priv->dwork_assoc);
+
+ if (priv->mac_state == MAC_CONNECTED)
+ at76_iwevent_bss_disconnect(priv->netdev);
+
+ for (i = 0; i < NR_RX_DATA_BUF; i++)
+ if (priv->rx_data[i].skb) {
+ dev_kfree_skb(priv->rx_data[i].skb);
+ priv->rx_data[i].skb = NULL;
+ }
usb_put_dev(priv->udev);
- at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw",
- __func__);
- ieee80211_free_hw(priv->hw);
+ at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/netdev", __func__);
+ free_netdev(priv->netdev); /* priv is in netdev */
at76_dbg(DBG_PROC_ENTRY, "%s: EXIT", __func__);
}
@@ -2621,8 +5425,8 @@ static int at76_probe(struct usb_interface *interface,
we get 204 with 2.4.23, Fiberline FL-WL240u (505A+RFMD2958) ??? */
if (op_mode == OPMODE_HW_CONFIG_MODE) {
- dev_err(&interface->dev,
- "cannot handle a device in HW_CONFIG_MODE\n");
+ dev_printk(KERN_ERR, &interface->dev,
+ "cannot handle a device in HW_CONFIG_MODE\n");
ret = -EBUSY;
goto error;
}
@@ -2630,12 +5434,13 @@ static int at76_probe(struct usb_interface *interface,
if (op_mode != OPMODE_NORMAL_NIC_WITH_FLASH
&& op_mode != OPMODE_NORMAL_NIC_WITHOUT_FLASH) {
/* download internal firmware part */
- dev_dbg(&interface->dev, "downloading internal firmware\n");
+ dev_printk(KERN_DEBUG, &interface->dev,
+ "downloading internal firmware\n");
ret = at76_load_internal_fw(udev, fwe);
if (ret < 0) {
- dev_err(&interface->dev,
- "error %d downloading internal firmware\n",
- ret);
+ dev_printk(KERN_ERR, &interface->dev,
+ "error %d downloading internal firmware\n",
+ ret);
goto error;
}
usb_put_dev(udev);
@@ -2660,7 +5465,8 @@ static int at76_probe(struct usb_interface *interface,
need_ext_fw = 1;
if (need_ext_fw) {
- dev_dbg(&interface->dev, "downloading external firmware\n");
+ dev_printk(KERN_DEBUG, &interface->dev,
+ "downloading external firmware\n");
ret = at76_load_external_fw(udev, fwe);
if (ret)
@@ -2669,8 +5475,8 @@ static int at76_probe(struct usb_interface *interface,
/* Re-check firmware version */
ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv));
if (ret < 0) {
- dev_err(&interface->dev,
- "error %d getting firmware version\n", ret);
+ dev_printk(KERN_ERR, &interface->dev,
+ "error %d getting firmware version\n", ret);
goto error;
}
}
@@ -2681,6 +5487,7 @@ static int at76_probe(struct usb_interface *interface,
goto error;
}
+ SET_NETDEV_DEV(priv->netdev, &interface->dev);
usb_set_intfdata(interface, priv);
memcpy(&priv->fw_version, &fwv, sizeof(struct mib_fw_version));
@@ -2708,7 +5515,7 @@ static void at76_disconnect(struct usb_interface *interface)
if (!priv)
return;
- printk(KERN_INFO "%s: disconnecting\n", wiphy_name(priv->hw->wiphy));
+ printk(KERN_INFO "%s: disconnecting\n", priv->netdev->name);
at76_delete_device(priv);
dev_printk(KERN_INFO, &interface->dev, "disconnected\n");
}
@@ -2764,8 +5571,5 @@ MODULE_AUTHOR("Alex <alex@foogod.com>");
MODULE_AUTHOR("Nick Jones");
MODULE_AUTHOR("Balint Seeber <n0_5p4m_p13453@hotmail.com>");
MODULE_AUTHOR("Pavel Roskin <proski@gnu.org>");
-MODULE_AUTHOR("Guido Guenther <agx@sigxcpu.org>");
-MODULE_AUTHOR("Kalle Valo <kalle.valo@iki.fi>");
-MODULE_AUTHOR("Milan Plzik <milan.plzik@gmail.com>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/at76_usb/at76_usb.h b/drivers/staging/at76_usb/at76_usb.h
index 8bb352f16d4..b20be9da1fa 100644
--- a/drivers/staging/at76_usb/at76_usb.h
+++ b/drivers/staging/at76_usb/at76_usb.h
@@ -34,6 +34,23 @@ enum board_type {
BOARD_505AMX = 8
};
+/* our private ioctl's */
+/* preamble length (0 - long, 1 - short, 2 - auto) */
+#define AT76_SET_SHORT_PREAMBLE (SIOCIWFIRSTPRIV + 0)
+#define AT76_GET_SHORT_PREAMBLE (SIOCIWFIRSTPRIV + 1)
+/* which debug channels are enabled */
+#define AT76_SET_DEBUG (SIOCIWFIRSTPRIV + 2)
+#define AT76_GET_DEBUG (SIOCIWFIRSTPRIV + 3)
+/* power save mode (incl. the Atmel proprietary smart save mode) */
+#define AT76_SET_POWERSAVE_MODE (SIOCIWFIRSTPRIV + 4)
+#define AT76_GET_POWERSAVE_MODE (SIOCIWFIRSTPRIV + 5)
+/* min and max channel times for scan */
+#define AT76_SET_SCAN_TIMES (SIOCIWFIRSTPRIV + 6)
+#define AT76_GET_SCAN_TIMES (SIOCIWFIRSTPRIV + 7)
+/* scan mode (0 - active, 1 - passive) */
+#define AT76_SET_SCAN_MODE (SIOCIWFIRSTPRIV + 8)
+#define AT76_GET_SCAN_MODE (SIOCIWFIRSTPRIV + 9)
+
#define CMD_STATUS_IDLE 0x00
#define CMD_STATUS_COMPLETE 0x01
#define CMD_STATUS_UNKNOWN 0x02
@@ -65,7 +82,6 @@ enum board_type {
#define MIB_MAC 0x03
#define MIB_MAC_MGMT 0x05
#define MIB_MAC_WEP 0x06
-#define MIB_MAC_ENCRYPTION 0x06
#define MIB_PHY 0x07
#define MIB_FW_VERSION 0x08
#define MIB_MDOMAIN 0x09
@@ -90,26 +106,6 @@ enum board_type {
#define AT76_PM_ON 2
#define AT76_PM_SMART 3
-/* cipher values for encryption keys */
-#define CIPHER_NONE 0 /* this value is only guessed */
-#define CIPHER_WEP64 1
-#define CIPHER_TKIP 2
-#define CIPHER_CCMP 3
-#define CIPHER_CCX 4 /* for consistency sake only */
-#define CIPHER_WEP128 5
-
-/* bit flags key types for encryption keys */
-#define KEY_PAIRWISE 2
-#define KEY_TX 4
-
-#define CIPHER_KEYS (4)
-#define CIPHER_KEY_LEN (40)
-
-struct key_config {
- u8 cipher;
- u8 keylen;
-};
-
struct hwcfg_r505 {
u8 cr39_values[14];
u8 reserved1[14];
@@ -151,9 +147,6 @@ union at76_hwcfg {
#define WEP_SMALL_KEY_LEN (40 / 8)
#define WEP_LARGE_KEY_LEN (104 / 8)
-#define WEP_KEYS (4)
-
-
struct at76_card_config {
u8 exclude_unencrypted;
@@ -168,7 +161,7 @@ struct at76_card_config {
u8 privacy_invoked;
u8 wep_default_key_id; /* 0..3 */
u8 current_ssid[32];
- u8 wep_default_key_value[4][WEP_LARGE_KEY_LEN];
+ u8 wep_default_key_value[4][WEP_KEY_LEN];
u8 ssid_len;
u8 short_preamble;
__le16 beacon_period;
@@ -193,7 +186,7 @@ struct at76_rx_buffer {
u8 link_quality;
u8 noise_level;
__le32 rx_time;
- u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
+ u8 packet[IEEE80211_FRAME_LEN + IEEE80211_FCS_LEN];
} __attribute__((packed));
/* Length of Atmel-specific Tx header before 802.11 frame */
@@ -203,11 +196,8 @@ struct at76_tx_buffer {
__le16 wlength;
u8 tx_rate;
u8 padding;
- u8 key_id;
- u8 cipher_type;
- u8 cipher_length;
- u8 reserved;
- u8 packet[IEEE80211_MAX_FRAG_THRESHOLD];
+ u8 reserved[4];
+ u8 packet[IEEE80211_FRAME_LEN + IEEE80211_FCS_LEN];
} __attribute__((packed));
/* defines for scan_type below */
@@ -254,7 +244,6 @@ struct set_mib_buffer {
u8 byte;
__le16 word;
u8 addr[ETH_ALEN];
- u8 data[256]; /* we need more space for mib_mac_encryption */
} data;
} __attribute__((packed));
@@ -328,24 +317,10 @@ struct mib_mac_wep {
u8 exclude_unencrypted;
__le32 wep_icv_error_count;
__le32 wep_excluded_count;
- u8 wep_default_keyvalue[WEP_KEYS][WEP_LARGE_KEY_LEN];
+ u8 wep_default_keyvalue[WEP_KEYS][WEP_KEY_LEN];
u8 encryption_level; /* 1 for 40bit, 2 for 104bit encryption */
} __attribute__((packed));
-struct mib_mac_encryption {
- u8 cipher_default_keyvalue[CIPHER_KEYS][CIPHER_KEY_LEN];
- u8 tkip_bssid[6];
- u8 privacy_invoked;
- u8 cipher_default_key_id;
- u8 cipher_default_group_key_id;
- u8 exclude_unencrypted;
- u8 wep_encryption_type;
- u8 ckip_key_permutation; /* bool */
- __le32 wep_icv_error_count;
- __le32 wep_excluded_count;
- u8 key_rsc[CIPHER_KEYS][8];
-} __attribute__((packed));
-
struct mib_phy {
__le32 ed_threshold;
@@ -389,6 +364,16 @@ struct at76_fw_header {
__le32 ext_fw_len; /* external firmware image length */
} __attribute__((packed));
+enum mac_state {
+ MAC_INIT,
+ MAC_SCANNING,
+ MAC_AUTH,
+ MAC_ASSOC,
+ MAC_JOINING,
+ MAC_CONNECTED,
+ MAC_OWN_IBSS
+};
+
/* a description of a regulatory domain and the allowed channels */
struct reg_domain {
u16 code;
@@ -396,6 +381,47 @@ struct reg_domain {
u32 channel_map; /* if bit N is set, channel (N+1) is allowed */
};
+/* how long do we keep a (I)BSS in the bss_list in jiffies
+ this should be long enough for the user to retrieve the table
+ (by iwlist ?) after the device started, because all entries from
+ other channels than the one the device locks on get removed, too */
+#define BSS_LIST_TIMEOUT (120 * HZ)
+/* struct to store BSS info found during scan */
+#define BSS_LIST_MAX_RATE_LEN 32 /* 32 rates should be enough ... */
+
+struct bss_info {
+ struct list_head list;
+
+ u8 bssid[ETH_ALEN]; /* bssid */
+ u8 ssid[IW_ESSID_MAX_SIZE]; /* essid */
+ u8 ssid_len; /* length of ssid above */
+ u8 channel;
+ u16 capa; /* BSS capabilities */
+ u16 beacon_interval; /* beacon interval, Kus (1024 microseconds) */
+ u8 rates[BSS_LIST_MAX_RATE_LEN]; /* supported rates in units of
+ 500 kbps, ORed with 0x80 for
+ basic rates */
+ u8 rates_len;
+
+ /* quality of received beacon */
+ u8 rssi;
+ u8 link_qual;
+ u8 noise_level;
+
+ unsigned long last_rx; /* time (jiffies) of last beacon received */
+};
+
+/* a rx data buffer to collect rx fragments */
+struct rx_data_buf {
+ u8 sender[ETH_ALEN]; /* sender address */
+ u16 seqnr; /* sequence number */
+ u16 fragnr; /* last fragment received */
+ unsigned long last_rx; /* jiffies of last rx */
+ struct sk_buff *skb; /* == NULL if entry is free */
+};
+
+#define NR_RX_DATA_BUF 8
+
/* Data for one loaded firmware file */
struct fwentry {
const char *const fwname;
@@ -412,9 +438,11 @@ struct fwentry {
struct at76_priv {
struct usb_device *udev; /* USB device pointer */
+ struct net_device *netdev; /* net device pointer */
+ struct net_device_stats stats; /* net device stats */
+ struct iw_statistics wstats; /* wireless stats */
struct sk_buff *rx_skb; /* skbuff for receiving data */
- struct sk_buff *tx_skb; /* skbuff for transmitting data */
void *bulk_out_buffer; /* buffer for sending data */
struct urb *tx_urb; /* URB for sending data */
@@ -426,17 +454,26 @@ struct at76_priv {
struct mutex mtx; /* locks this structure */
/* work queues */
+ struct work_struct work_assoc_done;
+ struct work_struct work_join;
+ struct work_struct work_new_bss;
+ struct work_struct work_start_scan;
struct work_struct work_set_promisc;
struct work_struct work_submit_rx;
- struct delayed_work dwork_hw_scan;
+ struct delayed_work dwork_restart;
+ struct delayed_work dwork_get_scan;
+ struct delayed_work dwork_beacon;
+ struct delayed_work dwork_auth;
+ struct delayed_work dwork_assoc;
struct tasklet_struct rx_tasklet;
/* the WEP stuff */
int wep_enabled; /* 1 if WEP is enabled */
int wep_key_id; /* key id to be used */
- u8 wep_keys[WEP_KEYS][WEP_LARGE_KEY_LEN]; /* WEP keys */
- u8 wep_keys_len[WEP_KEYS]; /* length of WEP keys */
+ u8 wep_keys[WEP_KEYS][WEP_KEY_LEN]; /* the four WEP keys,
+ 5 or 13 bytes are used */
+ u8 wep_keys_len[WEP_KEYS]; /* the length of the above keys */
int channel;
int iw_mode;
@@ -458,13 +495,44 @@ struct at76_priv {
int scan_mode; /* SCAN_TYPE_ACTIVE, SCAN_TYPE_PASSIVE */
int scan_need_any; /* if set, need to scan for any ESSID */
+ /* the list we got from scanning */
+ spinlock_t bss_list_spinlock; /* protects bss_list operations */
+ struct list_head bss_list; /* list of BSS we got beacons from */
+ struct timer_list bss_list_timer; /* timer to purge old entries
+ from bss_list */
+ struct bss_info *curr_bss; /* current BSS */
u16 assoc_id; /* current association ID, if associated */
+ u8 wanted_bssid[ETH_ALEN];
+ int wanted_bssid_valid; /* != 0 if wanted_bssid is to be used */
+
+ /* some data for infrastructure mode only */
+ spinlock_t mgmt_spinlock; /* this spinlock protects access to
+ next_mgmt_bulk */
+
+ struct at76_tx_buffer *next_mgmt_bulk; /* pending management msg to
+ send via bulk out */
+ enum mac_state mac_state;
+ enum {
+ SCAN_IDLE,
+ SCAN_IN_PROGRESS,
+ SCAN_COMPLETED
+ } scan_state;
+ time_t last_scan;
+
+ int retries; /* remaining retries in case of timeout when
+ * sending AuthReq or AssocReq */
u8 pm_mode; /* power management mode */
u32 pm_period; /* power management period in microseconds */
struct reg_domain const *domain; /* reg domain description */
+ /* iwspy support */
+ spinlock_t spy_spinlock;
+ struct iw_spy_data spy_data;
+
+ struct iw_public_data wireless_data;
+
/* These fields contain HW config provided by the device (not all of
* these fields are used by all board types) */
u8 mac_addr[ETH_ALEN];
@@ -472,6 +540,9 @@ struct at76_priv {
struct at76_card_config card_config;
+ /* store rx fragments until complete */
+ struct rx_data_buf rx_data[NR_RX_DATA_BUF];
+
enum board_type board_type;
struct mib_fw_version fw_version;
@@ -479,20 +550,58 @@ struct at76_priv {
unsigned int netdev_registered:1;
struct set_mib_buffer mib_buf; /* global buffer for set_mib calls */
+ /* beacon counting */
int beacon_period; /* period of mgmt beacons, Kus */
+ int beacons_received;
+ unsigned long beacons_last_qual; /* time we restarted counting
+ beacons */
+};
- struct ieee80211_hw *hw;
- int mac80211_registered;
-
- struct key_config keys[4]; /* installed key types */
- u8 default_pairwise_key;
- u8 default_group_key;
+struct at76_rx_radiotap {
+ struct ieee80211_radiotap_header rt_hdr;
+ __le64 rt_tsft;
+ u8 rt_flags;
+ u8 rt_rate;
+ s8 rt_signal;
+ s8 rt_noise;
};
-#define AT76_SUPPORTED_FILTERS FIF_PROMISC_IN_BSS
+#define AT76_RX_RADIOTAP_PRESENT \
+ ((1 << IEEE80211_RADIOTAP_TSFT) | \
+ (1 << IEEE80211_RADIOTAP_FLAGS) | \
+ (1 << IEEE80211_RADIOTAP_RATE) | \
+ (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) | \
+ (1 << IEEE80211_RADIOTAP_DB_ANTNOISE))
+
+#define BEACON_MAX_DATA_LENGTH 1500
+
+/* the maximum size of an AssocReq packet */
+#define ASSOCREQ_MAX_SIZE \
+ (AT76_TX_HDRLEN + sizeof(struct ieee80211_assoc_request) + \
+ 1 + 1 + IW_ESSID_MAX_SIZE + 1 + 1 + 4)
+
+/* for shared secret auth, add the challenge text size */
+#define AUTH_FRAME_SIZE (AT76_TX_HDRLEN + sizeof(struct ieee80211_auth))
+
+/* Maximal number of AuthReq retries */
+#define AUTH_RETRIES 3
+/* Maximal number of AssocReq retries */
+#define ASSOC_RETRIES 3
+
+/* Beacon timeout in managed mode when we are connected */
+#define BEACON_TIMEOUT (10 * HZ)
+
+/* Timeout for authentication response */
+#define AUTH_TIMEOUT (1 * HZ)
+
+/* Timeout for association response */
+#define ASSOC_TIMEOUT (1 * HZ)
+
+/* Polling interval when scan is running */
#define SCAN_POLL_INTERVAL (HZ / 4)
+/* Command completion timeout */
#define CMD_COMPLETION_TIMEOUT (5 * HZ)
#define DEF_RTS_THRESHOLD 1536
@@ -502,6 +611,8 @@ struct at76_priv {
#define DEF_SCAN_MIN_TIME 10
#define DEF_SCAN_MAX_TIME 120
+#define MAX_RTS_THRESHOLD (MAX_FRAG_THRESHOLD + 1)
+
/* the max padding size for tx in bytes (see calc_padding) */
#define MAX_PADDING_SIZE 53
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 5ffe269c238..ab69c1bf36a 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -622,7 +622,7 @@ static int set_ctrl_bits(void)
}
/* sets ctrl & data port bits according to current signals values */
-static void set_bits(void)
+static void panel_set_bits(void)
{
set_data_bits();
set_ctrl_bits();
@@ -707,12 +707,12 @@ static void lcd_send_serial(int byte)
*/
for (bit = 0; bit < 8; bit++) {
bits.cl = BIT_CLR; /* CLK low */
- set_bits();
+ panel_set_bits();
bits.da = byte & 1;
- set_bits();
+ panel_set_bits();
udelay(2); /* maintain the data during 2 us before CLK up */
bits.cl = BIT_SET; /* CLK high */
- set_bits();
+ panel_set_bits();
udelay(1); /* maintain the strobe during 1 us */
byte >>= 1;
}
@@ -727,7 +727,7 @@ static void lcd_backlight(int on)
/* The backlight is activated by seting the AUTOFEED line to +5V */
spin_lock(&pprt_lock);
bits.bl = on;
- set_bits();
+ panel_set_bits();
spin_unlock(&pprt_lock);
}
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 8bcde8cde55..b2ceb4aff23 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_USB_MON) += mon/
obj-$(CONFIG_PCI) += host/
obj-$(CONFIG_USB_EHCI_HCD) += host/
obj-$(CONFIG_USB_ISP116X_HCD) += host/
+obj-$(CONFIG_USB_ISP1760_HCD) += host/
obj-$(CONFIG_USB_OHCI_HCD) += host/
obj-$(CONFIG_USB_UHCI_HCD) += host/
obj-$(CONFIG_USB_FHCI_HCD) += host/
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 97ba4a985ed..326dd7f65ee 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1349,9 +1349,6 @@ static struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
- { USB_DEVICE(0x0e8d, 0x3329), /* i-blue 747, Qstarz BT-Q1000, Holux M-241 */
- .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
- },
{ USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index d6c5bcd4006..d701bf4698d 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -1622,6 +1622,8 @@ static int qe_ep_disable(struct usb_ep *_ep)
nuke(ep, -ESHUTDOWN);
ep->desc = NULL;
ep->stopped = 1;
+ ep->tx_req = NULL;
+ qe_ep_reset(udc, ep->epnum);
spin_unlock_irqrestore(&udc->lock, flags);
cpm_muram_free(cpm_muram_offset(ep->rxbase));
@@ -1681,14 +1683,11 @@ static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req)
kfree(req);
}
-/* queues (submits) an I/O request to an endpoint */
-static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
- gfp_t gfp_flags)
+static int __qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req)
{
struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
struct qe_req *req = container_of(_req, struct qe_req, req);
struct qe_udc *udc;
- unsigned long flags;
int reval;
udc = ep->udc;
@@ -1732,7 +1731,7 @@ static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
list_add_tail(&req->queue, &ep->queue);
dev_vdbg(udc->dev, "gadget have request in %s! %d\n",
ep->name, req->req.length);
- spin_lock_irqsave(&udc->lock, flags);
+
/* push the request to device */
if (ep_is_in(ep))
reval = ep_req_send(ep, req);
@@ -1748,11 +1747,24 @@ static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
if (ep->dir == USB_DIR_OUT)
reval = ep_req_receive(ep, req);
- spin_unlock_irqrestore(&udc->lock, flags);
-
return 0;
}
+/* queues (submits) an I/O request to an endpoint */
+static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
+ struct qe_udc *udc = ep->udc;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ ret = __qe_ep_queue(_ep, _req);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return ret;
+}
+
/* dequeues (cancels, unlinks) an I/O request from an endpoint */
static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
@@ -2008,7 +2020,7 @@ static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
udc->ep0_dir = USB_DIR_IN;
/* data phase */
- status = qe_ep_queue(&ep->ep, &req->req, GFP_ATOMIC);
+ status = __qe_ep_queue(&ep->ep, &req->req);
if (status == 0)
return;
@@ -2151,6 +2163,9 @@ static int reset_irq(struct qe_udc *udc)
{
unsigned char i;
+ if (udc->usb_state == USB_STATE_DEFAULT)
+ return 0;
+
qe_usb_disable();
out_8(&udc->usb_regs->usb_usadr, 0);
@@ -2442,8 +2457,12 @@ static int __devinit qe_udc_reg_init(struct qe_udc *udc)
struct usb_ctlr __iomem *qe_usbregs;
qe_usbregs = udc->usb_regs;
- /* Init the usb register */
+ /* Spec says that we must enable the USB controller to change mode. */
out_8(&qe_usbregs->usb_usmod, 0x01);
+ /* Mode changed, now disable it, since muram isn't initialized yet. */
+ out_8(&qe_usbregs->usb_usmod, 0x00);
+
+ /* Initialize the rest. */
out_be16(&qe_usbregs->usb_usbmr, 0);
out_8(&qe_usbregs->usb_uscom, 0);
out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR);
@@ -2604,6 +2623,10 @@ static int __devinit qe_udc_probe(struct of_device *ofdev,
(unsigned long)udc_controller);
/* request irq and disable DR */
udc_controller->usb_irq = irq_of_parse_and_map(np, 0);
+ if (!udc_controller->usb_irq) {
+ ret = -EINVAL;
+ goto err_noirq;
+ }
ret = request_irq(udc_controller->usb_irq, qe_udc_irq, 0,
driver_name, udc_controller);
@@ -2625,6 +2648,8 @@ static int __devinit qe_udc_probe(struct of_device *ofdev,
err6:
free_irq(udc_controller->usb_irq, udc_controller);
err5:
+ irq_dispose_mapping(udc_controller->usb_irq);
+err_noirq:
if (udc_controller->nullmap) {
dma_unmap_single(udc_controller->gadget.dev.parent,
udc_controller->nullp, 256,
@@ -2648,7 +2673,7 @@ err2:
iounmap(udc_controller->usb_regs);
err1:
kfree(udc_controller);
-
+ udc_controller = NULL;
return ret;
}
@@ -2710,6 +2735,7 @@ static int __devexit qe_udc_remove(struct of_device *ofdev)
kfree(ep->txframe);
free_irq(udc_controller->usb_irq, udc_controller);
+ irq_dispose_mapping(udc_controller->usb_irq);
tasklet_kill(&udc_controller->rx_tasklet);
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 537f953bd7f..6d106e74265 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -621,9 +621,9 @@ static int __init aircable_init(void)
goto failed_usb_register;
return 0;
-failed_serial_register:
- usb_serial_deregister(&aircable_device);
failed_usb_register:
+ usb_serial_deregister(&aircable_device);
+failed_serial_register:
return retval;
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 75597337583..f92f4d77337 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -662,6 +662,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
{ USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
{ USB_DEVICE(FTDI_VID, DIEBOLD_BCS_SE923_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_NDI_HUC_PID) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
@@ -1064,8 +1065,10 @@ static int set_serial_info(struct tty_struct *tty,
if (!capable(CAP_SYS_ADMIN)) {
if (((new_serial.flags & ~ASYNC_USR_MASK) !=
- (priv->flags & ~ASYNC_USR_MASK)))
+ (priv->flags & ~ASYNC_USR_MASK))) {
+ unlock_kernel();
return -EPERM;
+ }
priv->flags = ((priv->flags & ~ASYNC_USR_MASK) |
(new_serial.flags & ASYNC_USR_MASK));
priv->custom_divisor = new_serial.custom_divisor;
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 1b62eff475d..e300c840f8c 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -844,6 +844,9 @@
#define TML_VID 0x1B91 /* Vendor ID */
#define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */
+/* NDI Polaris System */
+#define FTDI_NDI_HUC_PID 0xDA70
+
/* Propox devices */
#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 6c89da9c6fe..bfd0b68cecc 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -199,14 +199,15 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
/* FUTURE NOVATEL PRODUCTS */
-#define NOVATELWIRELESS_PRODUCT_EVDO_1 0x6000
-#define NOVATELWIRELESS_PRODUCT_HSPA_1 0x7000
-#define NOVATELWIRELESS_PRODUCT_EMBEDDED_1 0x8000
-#define NOVATELWIRELESS_PRODUCT_GLOBAL_1 0x9000
-#define NOVATELWIRELESS_PRODUCT_EVDO_2 0x6001
-#define NOVATELWIRELESS_PRODUCT_HSPA_2 0x7001
-#define NOVATELWIRELESS_PRODUCT_EMBEDDED_2 0x8001
-#define NOVATELWIRELESS_PRODUCT_GLOBAL_2 0x9001
+#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0X6000
+#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001
+#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001
+#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0X8000
+#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0X8001
+#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0X9000
+#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0X9001
+#define NOVATELWIRELESS_PRODUCT_GLOBAL 0XA001
/* AMOI PRODUCTS */
#define AMOI_VENDOR_ID 0x1614
@@ -216,6 +217,27 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define DELL_VENDOR_ID 0x413C
+/* Dell modems */
+#define DELL_PRODUCT_5700_MINICARD 0x8114
+#define DELL_PRODUCT_5500_MINICARD 0x8115
+#define DELL_PRODUCT_5505_MINICARD 0x8116
+#define DELL_PRODUCT_5700_EXPRESSCARD 0x8117
+#define DELL_PRODUCT_5510_EXPRESSCARD 0x8118
+
+#define DELL_PRODUCT_5700_MINICARD_SPRINT 0x8128
+#define DELL_PRODUCT_5700_MINICARD_TELUS 0x8129
+
+#define DELL_PRODUCT_5720_MINICARD_VZW 0x8133
+#define DELL_PRODUCT_5720_MINICARD_SPRINT 0x8134
+#define DELL_PRODUCT_5720_MINICARD_TELUS 0x8135
+#define DELL_PRODUCT_5520_MINICARD_CINGULAR 0x8136
+#define DELL_PRODUCT_5520_MINICARD_GENERIC_L 0x8137
+#define DELL_PRODUCT_5520_MINICARD_GENERIC_I 0x8138
+
+#define DELL_PRODUCT_5730_MINICARD_SPRINT 0x8180
+#define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181
+#define DELL_PRODUCT_5730_MINICARD_VZW 0x8182
+
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
#define KYOCERA_PRODUCT_KPC680 0x180a
@@ -274,12 +296,6 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define ERICSSON_VENDOR_ID 0x0bdb
#define ERICSSON_PRODUCT_F3507G 0x1900
-/* Pantech products */
-#define PANTECH_VENDOR_ID 0x106c
-#define PANTECH_PRODUCT_PC5740 0x3701
-#define PANTECH_PRODUCT_PC5750 0x3702 /* PX-500 */
-#define PANTECH_PRODUCT_UM150 0x3711
-
static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -395,31 +411,37 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_1) }, /* Novatel EVDO product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_1) }, /* Novatel HSPA product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_1) }, /* Novatel Embedded product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL_1) }, /* Novatel Global product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_2) }, /* Novatel EVDO product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_2) }, /* Novatel HSPA product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_2) }, /* Novatel Embedded product */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL_2) }, /* Novatel Global product */
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) }, /* Novatel EVDO product */
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) },
- { USB_DEVICE(DELL_VENDOR_ID, 0x8114) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */
- { USB_DEVICE(DELL_VENDOR_ID, 0x8115) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
- { USB_DEVICE(DELL_VENDOR_ID, 0x8116) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
- { USB_DEVICE(DELL_VENDOR_ID, 0x8117) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO ExpressCard == Novatel Merlin XV620 CDMA/EV-DO */
- { USB_DEVICE(DELL_VENDOR_ID, 0x8118) }, /* Dell Wireless 5510 Mobile Broadband HSDPA ExpressCard == Novatel Merlin XU870 HSDPA/3G */
- { USB_DEVICE(DELL_VENDOR_ID, 0x8128) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite E720 CDMA/EV-DO */
- { USB_DEVICE(DELL_VENDOR_ID, 0x8129) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite ET620 CDMA/EV-DO */
- { USB_DEVICE(DELL_VENDOR_ID, 0x8133) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
- { USB_DEVICE(DELL_VENDOR_ID, 0x8136) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */
- { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */
- { USB_DEVICE(DELL_VENDOR_ID, 0x8138) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */
- { USB_DEVICE(DELL_VENDOR_ID, 0x8147) }, /* Dell Wireless 5530 Mobile Broadband (3G HSPA) Mini-Card */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5505_MINICARD) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_EXPRESSCARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO ExpressCard == Novatel Merlin XV620 CDMA/EV-DO */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5510_EXPRESSCARD) }, /* Dell Wireless 5510 Mobile Broadband HSDPA ExpressCard == Novatel Merlin XU870 HSDPA/3G */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_SPRINT) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite E720 CDMA/EV-DO */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_TELUS) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite ET620 CDMA/EV-DO */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_VZW) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_SPRINT) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_TELUS) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_CINGULAR) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_L) }, /* Dell Wireless HSDPA 5520 */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_I) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */
+ { USB_DEVICE(DELL_VENDOR_ID, 0x8147) }, /* Dell Wireless 5530 Mobile Broadband (3G HSPA) Mini-Card */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -488,9 +510,6 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) },
{ USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G) },
- { USB_DEVICE(PANTECH_VENDOR_ID, PANTECH_PRODUCT_PC5740) },
- { USB_DEVICE(PANTECH_VENDOR_ID, PANTECH_PRODUCT_PC5750) },
- { USB_DEVICE(PANTECH_VENDOR_ID, PANTECH_PRODUCT_UM150) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index baf591137b8..2620bf6fe5e 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -176,7 +176,7 @@ static unsigned int product_5052_count;
/* the array dimension is the number of default entries plus */
/* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
/* null entry */
-static struct usb_device_id ti_id_table_3410[7+TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -185,9 +185,11 @@ static struct usb_device_id ti_id_table_3410[7+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
};
-static struct usb_device_id ti_id_table_5052[4+TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
@@ -195,7 +197,7 @@ static struct usb_device_id ti_id_table_5052[4+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
};
-static struct usb_device_id ti_id_table_combined[6+2*TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -208,6 +210,8 @@ static struct usb_device_id ti_id_table_combined[6+2*TI_EXTRA_VID_PID_COUNT+1] =
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
{ }
};
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index b7ea5dbadee..f323c602585 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -30,6 +30,8 @@
#define IBM_VENDOR_ID 0x04b3
#define TI_3410_PRODUCT_ID 0x3410
#define IBM_4543_PRODUCT_ID 0x4543
+#define IBM_454B_PRODUCT_ID 0x454b
+#define IBM_454C_PRODUCT_ID 0x454c
#define TI_3410_EZ430_ID 0xF430 /* TI ez430 development tool */
#define TI_5052_BOOT_PRODUCT_ID 0x5052 /* no EEPROM, no firmware */
#define TI_5152_BOOT_PRODUCT_ID 0x5152 /* no EEPROM, no firmware */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 2a42b862aa9..727c506417c 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -64,6 +64,7 @@
*/
#define VENDOR_ID_NOKIA 0x0421
#define VENDOR_ID_NIKON 0x04b0
+#define VENDOR_ID_PENTAX 0x0a17
#define VENDOR_ID_MOTOROLA 0x22b8
/***********************************************************************
@@ -158,6 +159,7 @@ static int slave_configure(struct scsi_device *sdev)
switch (le16_to_cpu(us->pusb_dev->descriptor.idVendor)) {
case VENDOR_ID_NOKIA:
case VENDOR_ID_NIKON:
+ case VENDOR_ID_PENTAX:
case VENDOR_ID_MOTOROLA:
if (!(us->fflags & (US_FL_FIX_CAPACITY |
US_FL_CAPACITY_OK)))
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 1d5438e6363..fb65d221ced 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -558,32 +558,10 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
if (srb->result == SAM_STAT_GOOD && scsi_get_resid(srb) == 0) {
- /* The command succeeded. If the capacity is odd
- * (i.e., if the sector number is even) then the
- * "always-even" heuristic would be wrong for this
- * device. Issue a WARN() so that the kerneloops.org
- * project will be notified and we will then know to
- * mark the device with a CAPACITY_OK flag. Hopefully
- * this will occur for only a few devices.
- *
- * Use the sign of us->last_sector_hacks to tell whether
- * the warning has already been issued; we don't need
- * more than one warning per device.
+ /* The command succeeded. We know this device doesn't
+ * have the last-sector bug, so stop checking it.
*/
- if (!(sector & 1) && us->use_last_sector_hacks > 0) {
- unsigned vid = le16_to_cpu(
- us->pusb_dev->descriptor.idVendor);
- unsigned pid = le16_to_cpu(
- us->pusb_dev->descriptor.idProduct);
- unsigned rev = le16_to_cpu(
- us->pusb_dev->descriptor.bcdDevice);
-
- WARN(1, "%s: Successful last sector success at %u, "
- "device %04x:%04x:%04x\n",
- sdkp->disk->disk_name, sector,
- vid, pid, rev);
- us->use_last_sector_hacks = -1;
- }
+ us->use_last_sector_hacks = 0;
} else {
/* The command failed. Allow up to 3 retries in case this
@@ -599,14 +577,6 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
srb->result = SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer, record_not_found,
sizeof(record_not_found));
-
- /* In theory we might want to issue a WARN() here if the
- * capacity is even, since it could indicate the device
- * has the READ CAPACITY bug _and_ the real capacity is
- * odd. But it could also indicate that the device
- * simply can't access its last sector, a failure mode
- * which is surprisingly common. So no warning.
- */
}
done:
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 69269f73956..50dc33a6065 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1214,7 +1214,7 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff,
"Datafab",
"KECF-USB",
US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_INQUIRY ),
+ US_FL_FIX_INQUIRY | US_FL_FIX_CAPACITY ),
/* Reported by Rauch Wolke <rauchwolke@gmx.net> */
UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
@@ -1354,21 +1354,6 @@ UNUSUAL_DEV( 0x0a17, 0x0004, 0x1000, 0x1000,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),
-
-/* Submitted by Per Winkvist <per.winkvist@uk.com> */
-UNUSUAL_DEV( 0x0a17, 0x006, 0x0000, 0xffff,
- "Pentax",
- "Optio S/S4",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_INQUIRY ),
-
-/* Reported by Jaak Ristioja <Ristioja@gmail.com> */
-UNUSUAL_DEV( 0x0a17, 0x006e, 0x0100, 0x0100,
- "Pentax",
- "K10D",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_CAPACITY ),
-
/* These are virtual windows driver CDs, which the zd1211rw driver
* automatically converts into WLAN devices. */
UNUSUAL_DEV( 0x0ace, 0x2011, 0x0101, 0x0101,
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index f0267706cb4..bf0af660df8 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1054,9 +1054,10 @@ config FB_RIVA_BACKLIGHT
config FB_I810
tristate "Intel 810/815 support (EXPERIMENTAL)"
- depends on FB && EXPERIMENTAL && PCI && X86_32
+ depends on EXPERIMENTAL && PCI && X86_32
select AGP
select AGP_INTEL
+ select FB
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -1119,7 +1120,8 @@ config FB_CARILLO_RANCH
config FB_INTEL
tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)"
- depends on FB && EXPERIMENTAL && PCI && X86
+ depends on EXPERIMENTAL && PCI && X86
+ select FB
select AGP
select AGP_INTEL
select FB_MODE_HELPERS
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 2a423d3a2a8..90cfddabf1f 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -447,7 +447,7 @@ static irqreturn_t bfin_t350mcqb_irq_error(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init bfin_t350mcqb_probe(struct platform_device *pdev)
+static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
{
struct bfin_t350mcqbfb_info *info;
struct fb_info *fbinfo;
diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
index 751e491ca8c..f20eff8c4a8 100644
--- a/drivers/video/geode/gx1fb_core.c
+++ b/drivers/video/geode/gx1fb_core.c
@@ -136,13 +136,10 @@ static int gx1fb_set_par(struct fb_info *info)
{
struct geodefb_par *par = info->par;
- if (info->var.bits_per_pixel == 16) {
+ if (info->var.bits_per_pixel == 16)
info->fix.visual = FB_VISUAL_TRUECOLOR;
- fb_dealloc_cmap(&info->cmap);
- } else {
+ else
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
- fb_alloc_cmap(&info->cmap, 1<<info->var.bits_per_pixel, 0);
- }
info->fix.line_length = gx1_line_delta(info->var.xres, info->var.bits_per_pixel);
@@ -315,6 +312,10 @@ static struct fb_info * __init gx1fb_init_fbinfo(struct device *dev)
if (!par->panel_x)
par->enable_crt = 1; /* fall back to CRT if no panel is specified */
+ if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
+ framebuffer_release(info);
+ return NULL;
+ }
return info;
}
@@ -374,8 +375,11 @@ static int __init gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *
release_mem_region(gx1_gx_base() + 0x8300, 0x100);
}
- if (info)
+ if (info) {
+ fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
+ }
+
return ret;
}
@@ -395,6 +399,7 @@ static void gx1fb_remove(struct pci_dev *pdev)
iounmap(par->dc_regs);
release_mem_region(gx1_gx_base() + 0x8300, 0x100);
+ fb_dealloc_cmap(&info->cmap);
pci_set_drvdata(pdev, NULL);
framebuffer_release(info);
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index 48411892631..2552cac39e1 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -171,13 +171,10 @@ static int gxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
static int gxfb_set_par(struct fb_info *info)
{
- if (info->var.bits_per_pixel > 8) {
+ if (info->var.bits_per_pixel > 8)
info->fix.visual = FB_VISUAL_TRUECOLOR;
- fb_dealloc_cmap(&info->cmap);
- } else {
+ else
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
- fb_alloc_cmap(&info->cmap, 1<<info->var.bits_per_pixel, 0);
- }
info->fix.line_length = gx_line_delta(info->var.xres, info->var.bits_per_pixel);
@@ -331,6 +328,11 @@ static struct fb_info * __init gxfb_init_fbinfo(struct device *dev)
info->var.grayscale = 0;
+ if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
+ framebuffer_release(info);
+ return NULL;
+ }
+
return info;
}
@@ -443,8 +445,10 @@ static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *i
pci_release_region(pdev, 1);
}
- if (info)
+ if (info) {
+ fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
+ }
return ret;
}
@@ -467,6 +471,7 @@ static void gxfb_remove(struct pci_dev *pdev)
iounmap(par->gp_regs);
pci_release_region(pdev, 1);
+ fb_dealloc_cmap(&info->cmap);
pci_set_drvdata(pdev, NULL);
framebuffer_release(info);
diff --git a/drivers/video/geode/lxfb_core.c b/drivers/video/geode/lxfb_core.c
index b965ecdbc60..889cbe39e58 100644
--- a/drivers/video/geode/lxfb_core.c
+++ b/drivers/video/geode/lxfb_core.c
@@ -278,13 +278,10 @@ static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
static int lxfb_set_par(struct fb_info *info)
{
- if (info->var.bits_per_pixel > 8) {
+ if (info->var.bits_per_pixel > 8)
info->fix.visual = FB_VISUAL_TRUECOLOR;
- fb_dealloc_cmap(&info->cmap);
- } else {
+ else
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
- fb_alloc_cmap(&info->cmap, 1<<info->var.bits_per_pixel, 0);
- }
info->fix.line_length = lx_get_pitch(info->var.xres,
info->var.bits_per_pixel);
@@ -451,6 +448,11 @@ static struct fb_info * __init lxfb_init_fbinfo(struct device *dev)
info->pseudo_palette = (void *)par + sizeof(struct lxfb_par);
+ if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
+ framebuffer_release(info);
+ return NULL;
+ }
+
info->var.grayscale = 0;
return info;
@@ -579,8 +581,10 @@ err:
pci_release_region(pdev, 3);
}
- if (info)
+ if (info) {
+ fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
+ }
return ret;
}
@@ -604,6 +608,7 @@ static void lxfb_remove(struct pci_dev *pdev)
iounmap(par->vp_regs);
pci_release_region(pdev, 3);
+ fb_dealloc_cmap(&info->cmap);
pci_set_drvdata(pdev, NULL);
framebuffer_release(info);
}
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 2c8dff9f77d..1ed3d554e37 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -115,7 +115,7 @@ static struct w1_therm_family_converter w1_therm_families[] = {
static inline int w1_DS18B20_convert_temp(u8 rom[9])
{
- s16 t = (rom[1] << 8) | rom[0];
+ int t = ((s16)rom[1] << 8) | rom[0];
t = t*1000/16;
return t;
}
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index bf92802f2bb..36e221beedc 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -37,7 +37,7 @@
#include <linux/io.h>
#include <linux/uaccess.h>
-#include <asm/mach-rdc321x/rdc321x_defs.h>
+#include <asm/rdc321x_defs.h>
#define RDC_WDT_MASK 0x80000000 /* Mask */
#define RDC_WDT_EN 0x00800000 /* Enable bit */
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 3141e149d59..30963af5dba 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -30,6 +30,7 @@
#include <asm/ptrace.h>
#include <asm/irq.h>
+#include <asm/idle.h>
#include <asm/sync_bitops.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
@@ -51,27 +52,43 @@ static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
/* IRQ <-> IPI mapping */
static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
-/* Packed IRQ information: binding type, sub-type index, and event channel. */
-struct packed_irq
-{
- unsigned short evtchn;
- unsigned char index;
- unsigned char type;
-};
-
-static struct packed_irq irq_info[NR_IRQS];
-
-/* Binding types. */
-enum {
- IRQT_UNBOUND,
+/* Interrupt types. */
+enum xen_irq_type {
+ IRQT_UNBOUND = 0,
IRQT_PIRQ,
IRQT_VIRQ,
IRQT_IPI,
IRQT_EVTCHN
};
-/* Convenient shorthand for packed representation of an unbound IRQ. */
-#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
+/*
+ * Packed IRQ information:
+ * type - enum xen_irq_type
+ * event channel - irq->event channel mapping
+ * cpu - cpu this event channel is bound to
+ * index - type-specific information:
+ * PIRQ - vector, with MSB being "needs EIO"
+ * VIRQ - virq number
+ * IPI - IPI vector
+ * EVTCHN -
+ */
+struct irq_info
+{
+ enum xen_irq_type type; /* type */
+ unsigned short evtchn; /* event channel */
+ unsigned short cpu; /* cpu bound */
+
+ union {
+ unsigned short virq;
+ enum ipi_vector ipi;
+ struct {
+ unsigned short gsi;
+ unsigned short vector;
+ } pirq;
+ } u;
+};
+
+static struct irq_info irq_info[NR_IRQS];
static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
[0 ... NR_EVENT_CHANNELS-1] = -1
@@ -84,10 +101,6 @@ static inline unsigned long *cpu_evtchn_mask(int cpu)
{
return cpu_evtchn_mask_p[cpu].bits;
}
-static u8 cpu_evtchn[NR_EVENT_CHANNELS];
-
-/* Reference counts for bindings to IRQs. */
-static int irq_bindcount[NR_IRQS];
/* Xen will never allocate port zero for any purpose. */
#define VALID_EVTCHN(chn) ((chn) != 0)
@@ -95,27 +108,108 @@ static int irq_bindcount[NR_IRQS];
static struct irq_chip xen_dynamic_chip;
/* Constructor for packed IRQ information. */
-static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn)
+static struct irq_info mk_unbound_info(void)
+{
+ return (struct irq_info) { .type = IRQT_UNBOUND };
+}
+
+static struct irq_info mk_evtchn_info(unsigned short evtchn)
+{
+ return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
+ .cpu = 0 };
+}
+
+static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
+{
+ return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
+ .cpu = 0, .u.ipi = ipi };
+}
+
+static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
{
- return (struct packed_irq) { evtchn, index, type };
+ return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
+ .cpu = 0, .u.virq = virq };
+}
+
+static struct irq_info mk_pirq_info(unsigned short evtchn,
+ unsigned short gsi, unsigned short vector)
+{
+ return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
+ .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } };
}
/*
* Accessors for packed IRQ information.
*/
-static inline unsigned int evtchn_from_irq(int irq)
+static struct irq_info *info_for_irq(unsigned irq)
{
- return irq_info[irq].evtchn;
+ return &irq_info[irq];
}
-static inline unsigned int index_from_irq(int irq)
+static unsigned int evtchn_from_irq(unsigned irq)
{
- return irq_info[irq].index;
+ return info_for_irq(irq)->evtchn;
}
-static inline unsigned int type_from_irq(int irq)
+static enum ipi_vector ipi_from_irq(unsigned irq)
{
- return irq_info[irq].type;
+ struct irq_info *info = info_for_irq(irq);
+
+ BUG_ON(info == NULL);
+ BUG_ON(info->type != IRQT_IPI);
+
+ return info->u.ipi;
+}
+
+static unsigned virq_from_irq(unsigned irq)
+{
+ struct irq_info *info = info_for_irq(irq);
+
+ BUG_ON(info == NULL);
+ BUG_ON(info->type != IRQT_VIRQ);
+
+ return info->u.virq;
+}
+
+static unsigned gsi_from_irq(unsigned irq)
+{
+ struct irq_info *info = info_for_irq(irq);
+
+ BUG_ON(info == NULL);
+ BUG_ON(info->type != IRQT_PIRQ);
+
+ return info->u.pirq.gsi;
+}
+
+static unsigned vector_from_irq(unsigned irq)
+{
+ struct irq_info *info = info_for_irq(irq);
+
+ BUG_ON(info == NULL);
+ BUG_ON(info->type != IRQT_PIRQ);
+
+ return info->u.pirq.vector;
+}
+
+static enum xen_irq_type type_from_irq(unsigned irq)
+{
+ return info_for_irq(irq)->type;
+}
+
+static unsigned cpu_from_irq(unsigned irq)
+{
+ return info_for_irq(irq)->cpu;
+}
+
+static unsigned int cpu_from_evtchn(unsigned int evtchn)
+{
+ int irq = evtchn_to_irq[evtchn];
+ unsigned ret = 0;
+
+ if (irq != -1)
+ ret = cpu_from_irq(irq);
+
+ return ret;
}
static inline unsigned long active_evtchns(unsigned int cpu,
@@ -136,10 +230,10 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
#endif
- __clear_bit(chn, cpu_evtchn_mask(cpu_evtchn[chn]));
+ __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
__set_bit(chn, cpu_evtchn_mask(cpu));
- cpu_evtchn[chn] = cpu;
+ irq_info[irq].cpu = cpu;
}
static void init_evtchn_cpu_bindings(void)
@@ -154,15 +248,9 @@ static void init_evtchn_cpu_bindings(void)
}
#endif
- memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
}
-static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
-{
- return cpu_evtchn[evtchn];
-}
-
static inline void clear_evtchn(int port)
{
struct shared_info *s = HYPERVISOR_shared_info;
@@ -240,9 +328,8 @@ static int find_unbound_irq(void)
int irq;
struct irq_desc *desc;
- /* Only allocate from dynirq range */
for (irq = 0; irq < nr_irqs; irq++)
- if (irq_bindcount[irq] == 0)
+ if (irq_info[irq].type == IRQT_UNBOUND)
break;
if (irq == nr_irqs)
@@ -252,6 +339,8 @@ static int find_unbound_irq(void)
if (WARN_ON(desc == NULL))
return -1;
+ dynamic_irq_init(irq);
+
return irq;
}
@@ -266,16 +355,13 @@ int bind_evtchn_to_irq(unsigned int evtchn)
if (irq == -1) {
irq = find_unbound_irq();
- dynamic_irq_init(irq);
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
handle_level_irq, "event");
evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
+ irq_info[irq] = mk_evtchn_info(evtchn);
}
- irq_bindcount[irq]++;
-
spin_unlock(&irq_mapping_update_lock);
return irq;
@@ -290,12 +376,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
spin_lock(&irq_mapping_update_lock);
irq = per_cpu(ipi_to_irq, cpu)[ipi];
+
if (irq == -1) {
irq = find_unbound_irq();
if (irq < 0)
goto out;
- dynamic_irq_init(irq);
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
handle_level_irq, "ipi");
@@ -306,15 +392,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
evtchn = bind_ipi.port;
evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
-
+ irq_info[irq] = mk_ipi_info(evtchn, ipi);
per_cpu(ipi_to_irq, cpu)[ipi] = irq;
bind_evtchn_to_cpu(evtchn, cpu);
}
- irq_bindcount[irq]++;
-
out:
spin_unlock(&irq_mapping_update_lock);
return irq;
@@ -340,20 +423,17 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
irq = find_unbound_irq();
- dynamic_irq_init(irq);
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
handle_level_irq, "virq");
evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
+ irq_info[irq] = mk_virq_info(evtchn, virq);
per_cpu(virq_to_irq, cpu)[virq] = irq;
bind_evtchn_to_cpu(evtchn, cpu);
}
- irq_bindcount[irq]++;
-
spin_unlock(&irq_mapping_update_lock);
return irq;
@@ -366,7 +446,7 @@ static void unbind_from_irq(unsigned int irq)
spin_lock(&irq_mapping_update_lock);
- if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
+ if (VALID_EVTCHN(evtchn)) {
close.port = evtchn;
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
BUG();
@@ -374,11 +454,11 @@ static void unbind_from_irq(unsigned int irq)
switch (type_from_irq(irq)) {
case IRQT_VIRQ:
per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
- [index_from_irq(irq)] = -1;
+ [virq_from_irq(irq)] = -1;
break;
case IRQT_IPI:
per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
- [index_from_irq(irq)] = -1;
+ [ipi_from_irq(irq)] = -1;
break;
default:
break;
@@ -388,7 +468,7 @@ static void unbind_from_irq(unsigned int irq)
bind_evtchn_to_cpu(evtchn, 0);
evtchn_to_irq[evtchn] = -1;
- irq_info[irq] = IRQ_UNBOUND;
+ irq_info[irq] = mk_unbound_info();
dynamic_irq_cleanup(irq);
}
@@ -506,8 +586,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
for(i = 0; i < NR_EVENT_CHANNELS; i++) {
if (sync_test_bit(i, sh->evtchn_pending)) {
printk(" %d: event %d -> irq %d\n",
- cpu_evtchn[i], i,
- evtchn_to_irq[i]);
+ cpu_from_evtchn(i), i,
+ evtchn_to_irq[i]);
}
}
@@ -516,7 +596,6 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-
/*
* Search the CPUs pending events bitmasks. For each one found, map
* the event number to an irq, and feed it into do_IRQ() for
@@ -529,11 +608,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
void xen_evtchn_do_upcall(struct pt_regs *regs)
{
int cpu = get_cpu();
+ struct pt_regs *old_regs = set_irq_regs(regs);
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
static DEFINE_PER_CPU(unsigned, nesting_count);
unsigned count;
+ exit_idle();
+ irq_enter();
+
do {
unsigned long pending_words;
@@ -558,7 +641,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
int irq = evtchn_to_irq[port];
if (irq != -1)
- xen_do_IRQ(irq, regs);
+ handle_irq(irq, regs);
}
}
@@ -569,12 +652,17 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
} while(count != 1);
out:
+ irq_exit();
+ set_irq_regs(old_regs);
+
put_cpu();
}
/* Rebind a new event channel to an existing irq. */
void rebind_evtchn_irq(int evtchn, int irq)
{
+ struct irq_info *info = info_for_irq(irq);
+
/* Make sure the irq is masked, since the new event channel
will also be masked. */
disable_irq(irq);
@@ -584,11 +672,11 @@ void rebind_evtchn_irq(int evtchn, int irq)
/* After resume the irq<->evtchn mappings are all cleared out */
BUG_ON(evtchn_to_irq[evtchn] != -1);
/* Expect irq to have been bound before,
- so the bindcount should be non-0 */
- BUG_ON(irq_bindcount[irq] == 0);
+ so there should be a proper type */
+ BUG_ON(info->type == IRQT_UNBOUND);
evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
+ irq_info[irq] = mk_evtchn_info(evtchn);
spin_unlock(&irq_mapping_update_lock);
@@ -698,8 +786,7 @@ static void restore_cpu_virqs(unsigned int cpu)
if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
continue;
- BUG_ON(irq_info[irq].type != IRQT_VIRQ);
- BUG_ON(irq_info[irq].index != virq);
+ BUG_ON(virq_from_irq(irq) != virq);
/* Get a new binding from Xen. */
bind_virq.virq = virq;
@@ -711,7 +798,7 @@ static void restore_cpu_virqs(unsigned int cpu)
/* Record the new mapping. */
evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
+ irq_info[irq] = mk_virq_info(evtchn, virq);
bind_evtchn_to_cpu(evtchn, cpu);
/* Ready for use. */
@@ -728,8 +815,7 @@ static void restore_cpu_ipis(unsigned int cpu)
if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
continue;
- BUG_ON(irq_info[irq].type != IRQT_IPI);
- BUG_ON(irq_info[irq].index != ipi);
+ BUG_ON(ipi_from_irq(irq) != ipi);
/* Get a new binding from Xen. */
bind_ipi.vcpu = cpu;
@@ -740,7 +826,7 @@ static void restore_cpu_ipis(unsigned int cpu)
/* Record the new mapping. */
evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
+ irq_info[irq] = mk_ipi_info(evtchn, ipi);
bind_evtchn_to_cpu(evtchn, cpu);
/* Ready for use. */
@@ -820,8 +906,11 @@ void xen_irq_resume(void)
static struct irq_chip xen_dynamic_chip __read_mostly = {
.name = "xen-dyn",
+
+ .disable = disable_dynirq,
.mask = disable_dynirq,
.unmask = enable_dynirq,
+
.ack = ack_dynirq,
.set_affinity = set_affinity_irq,
.retrigger = retrigger_dynirq,
@@ -841,9 +930,5 @@ void __init xen_init_IRQ(void)
for (i = 0; i < NR_EVENT_CHANNELS; i++)
mask_evtchn(i);
- /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
- for (i = 0; i < nr_irqs; i++)
- irq_bindcount[i] = 0;
-
irq_ctx_init(smp_processor_id());
}
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 551177c0011..35443cc4b9a 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1530,8 +1530,7 @@ again:
* for higher level blocks, try not to allocate blocks
* with the block and the parent locks held.
*/
- if (level > 0 && !prealloc_block.objectid &&
- btrfs_path_lock_waiting(p, level)) {
+ if (level > 0 && !prealloc_block.objectid) {
u32 size = b->len;
u64 hint = b->start;
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 68fd9ccf180..9ebe9385129 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -236,25 +236,3 @@ int btrfs_tree_locked(struct extent_buffer *eb)
return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) ||
spin_is_locked(&eb->lock);
}
-
-/*
- * btrfs_search_slot uses this to decide if it should drop its locks
- * before doing something expensive like allocating free blocks for cow.
- */
-int btrfs_path_lock_waiting(struct btrfs_path *path, int level)
-{
- int i;
- struct extent_buffer *eb;
-
- for (i = level; i <= level + 1 && i < BTRFS_MAX_LEVEL; i++) {
- eb = path->nodes[i];
- if (!eb)
- break;
- smp_mb();
- if (spin_is_contended(&eb->lock) ||
- waitqueue_active(&eb->lock_wq))
- return 1;
- }
- return 0;
-}
-
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index d92e707f587..6bb0afbff92 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -26,8 +26,6 @@ int btrfs_tree_locked(struct extent_buffer *eb);
int btrfs_try_tree_lock(struct extent_buffer *eb);
int btrfs_try_spin_lock(struct extent_buffer *eb);
-int btrfs_path_lock_waiting(struct btrfs_path *path, int level);
-
void btrfs_set_lock_blocking(struct extent_buffer *eb);
void btrfs_clear_lock_blocking(struct extent_buffer *eb);
#endif
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index da8bdeaa2e6..7c6e3606f0e 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1185,9 +1185,12 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
es = sbi->s_es;
if (((sbi->s_mount_opt & EXT2_MOUNT_XIP) !=
(old_mount_opt & EXT2_MOUNT_XIP)) &&
- invalidate_inodes(sb))
- ext2_warning(sb, __func__, "busy inodes while remounting "\
- "xip remain in cache (no functional problem)");
+ invalidate_inodes(sb)) {
+ ext2_warning(sb, __func__, "refusing change of xip flag "
+ "with busy inodes while remounting");
+ sbi->s_mount_opt &= ~EXT2_MOUNT_XIP;
+ sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP;
+ }
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
return 0;
if (*flags & MS_RDONLY) {
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index b70d90e08a3..4a970411a45 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2428,12 +2428,13 @@ static void ext3_write_super (struct super_block * sb)
static int ext3_sync_fs(struct super_block *sb, int wait)
{
- sb->s_dirt = 0;
- if (wait)
- ext3_force_commit(sb);
- else
- journal_start_commit(EXT3_SB(sb)->s_journal, NULL);
+ tid_t target;
+ sb->s_dirt = 0;
+ if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) {
+ if (wait)
+ log_wait_commit(EXT3_SB(sb)->s_journal, target);
+ }
return 0;
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 6903d37af03..9b800d97a68 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -108,7 +108,8 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
if (hugetlb_reserve_pages(inode,
vma->vm_pgoff >> huge_page_order(h),
- len >> huge_page_shift(h), vma))
+ len >> huge_page_shift(h), vma,
+ vma->vm_flags))
goto out;
ret = 0;
@@ -947,7 +948,7 @@ static int can_do_hugetlb_shm(void)
can_do_mlock());
}
-struct file *hugetlb_file_setup(const char *name, size_t size)
+struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag)
{
int error = -ENOMEM;
struct file *file;
@@ -981,7 +982,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size)
error = -ENOMEM;
if (hugetlb_reserve_pages(inode, 0,
- size >> huge_page_shift(hstate_inode(inode)), NULL))
+ size >> huge_page_shift(hstate_inode(inode)), NULL,
+ acctflag))
goto out_inode;
d_instantiate(dentry, inode);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 9e4fa52d7dc..e79c07812af 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -427,7 +427,7 @@ int __log_space_left(journal_t *journal)
}
/*
- * Called under j_state_lock. Returns true if a transaction was started.
+ * Called under j_state_lock. Returns true if a transaction commit was started.
*/
int __log_start_commit(journal_t *journal, tid_t target)
{
@@ -495,7 +495,8 @@ int journal_force_commit_nested(journal_t *journal)
/*
* Start a commit of the current running transaction (if any). Returns true
- * if a transaction was started, and fills its tid in at *ptid
+ * if a transaction is going to be committed (or is currently already
+ * committing), and fills its tid in at *ptid
*/
int journal_start_commit(journal_t *journal, tid_t *ptid)
{
@@ -505,15 +506,19 @@ int journal_start_commit(journal_t *journal, tid_t *ptid)
if (journal->j_running_transaction) {
tid_t tid = journal->j_running_transaction->t_tid;
- ret = __log_start_commit(journal, tid);
- if (ret && ptid)
+ __log_start_commit(journal, tid);
+ /* There's a running transaction and we've just made sure
+ * it's commit has been scheduled. */
+ if (ptid)
*ptid = tid;
- } else if (journal->j_committing_transaction && ptid) {
+ ret = 1;
+ } else if (journal->j_committing_transaction) {
/*
* If ext3_write_super() recently started a commit, then we
* have to wait for completion of that transaction
*/
- *ptid = journal->j_committing_transaction->t_tid;
+ if (ptid)
+ *ptid = journal->j_committing_transaction->t_tid;
ret = 1;
}
spin_unlock(&journal->j_state_lock);
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 6063a8e4b9f..763b78a6e9d 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -427,7 +427,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
goto out;
case -EAGAIN:
ret = nlm_lck_denied;
- goto out;
+ break;
case FILE_LOCK_DEFERRED:
if (wait)
break;
@@ -443,6 +443,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
goto out;
}
+ ret = nlm_lck_denied;
+ if (!wait)
+ goto out;
+
ret = nlm_lck_blocked;
/* Append to list of blocked */
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index cd16d6e668c..d797e119e3d 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -222,7 +222,7 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
static inline void crypto_free_shash(struct crypto_shash *tfm)
{
- crypto_free_tfm(crypto_shash_tfm(tfm));
+ crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
}
static inline unsigned int crypto_shash_alignmask(
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index b3bcf72dc65..912cd52db96 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -261,6 +261,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_LAST_DISPATCH 3
#define I915_PARAM_CHIPSET_ID 4
#define I915_PARAM_HAS_GEM 5
+#define I915_PARAM_NUM_FENCES_AVAIL 6
typedef struct drm_i915_getparam {
int param;
@@ -272,6 +273,7 @@ typedef struct drm_i915_getparam {
#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
+#define I915_SETPARAM_NUM_USED_FENCES 4
typedef struct drm_i915_setparam {
int param;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index e4e8e117d27..499900d0cee 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -378,6 +378,7 @@ struct cgroup_subsys {
* - initiating hotplug events
*/
struct mutex hierarchy_mutex;
+ struct lock_class_key subsys_key;
/*
* Link to parent, and list entry in parent's children.
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 3bacd71509f..1f2e9020acc 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -552,7 +552,12 @@ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend,
u32 type, u32 mask);
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
-void crypto_free_tfm(struct crypto_tfm *tfm);
+void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
+
+static inline void crypto_free_tfm(struct crypto_tfm *tfm)
+{
+ return crypto_destroy_tfm(tfm, tfm);
+}
int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 3e0f64c335c..3e68469c188 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -282,6 +282,18 @@ static inline void dmaengine_put(void)
}
#endif
+#ifdef CONFIG_NET_DMA
+#define net_dmaengine_get() dmaengine_get()
+#define net_dmaengine_put() dmaengine_put()
+#else
+static inline void net_dmaengine_get(void)
+{
+}
+static inline void net_dmaengine_put(void)
+{
+}
+#endif
+
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
void *dest, void *src, size_t len);
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index f1d2fba19ea..03be7f29ca0 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -33,7 +33,8 @@ unsigned long hugetlb_total_pages(void);
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access);
int hugetlb_reserve_pages(struct inode *inode, long from, long to,
- struct vm_area_struct *vma);
+ struct vm_area_struct *vma,
+ int acctflags);
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
extern unsigned long hugepages_treat_as_movable;
@@ -138,7 +139,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
extern const struct file_operations hugetlbfs_file_operations;
extern struct vm_operations_struct hugetlb_vm_ops;
-struct file *hugetlb_file_setup(const char *name, size_t);
+struct file *hugetlb_file_setup(const char *name, size_t, int);
int hugetlb_get_quota(struct address_space *mapping, long delta);
void hugetlb_put_quota(struct address_space *mapping, long delta);
@@ -158,9 +159,9 @@ static inline void set_file_hugepages(struct file *file)
}
#else /* !CONFIG_HUGETLBFS */
-#define is_file_hugepages(file) 0
-#define set_file_hugepages(file) BUG()
-#define hugetlb_file_setup(name,size) ERR_PTR(-ENOSYS)
+#define is_file_hugepages(file) 0
+#define set_file_hugepages(file) BUG()
+#define hugetlb_file_setup(name,size,acctflag) ERR_PTR(-ENOSYS)
#endif /* !CONFIG_HUGETLBFS */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index ea0ea1a4c36..e752d973fa2 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -48,12 +48,11 @@ extern struct fs_struct init_fs;
.posix_timers = LIST_HEAD_INIT(sig.posix_timers), \
.cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
.rlim = INIT_RLIMITS, \
- .cputime = { .totals = { \
- .utime = cputime_zero, \
- .stime = cputime_zero, \
- .sum_exec_runtime = 0, \
- .lock = __SPIN_LOCK_UNLOCKED(sig.cputime.totals.lock), \
- }, }, \
+ .cputimer = { \
+ .cputime = INIT_CPUTIME, \
+ .running = 0, \
+ .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
+ }, \
}
extern struct nsproxy init_nsproxy;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e8ddc98b840..323561582c1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1129,8 +1129,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long flag, unsigned long pgoff);
extern unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, unsigned long flags,
- unsigned int vm_flags, unsigned long pgoff,
- int accountable);
+ unsigned int vm_flags, unsigned long pgoff);
static inline unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 28b3f505f18..f0a50b20e8a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -443,7 +443,6 @@ struct pacct_struct {
* @utime: time spent in user mode, in &cputime_t units
* @stime: time spent in kernel mode, in &cputime_t units
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
- * @lock: lock for fields in this struct
*
* This structure groups together three kinds of CPU time that are
* tracked for threads and thread groups. Most things considering
@@ -454,23 +453,33 @@ struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
- spinlock_t lock;
};
/* Alternate field names when used to cache expirations. */
#define prof_exp stime
#define virt_exp utime
#define sched_exp sum_exec_runtime
+#define INIT_CPUTIME \
+ (struct task_cputime) { \
+ .utime = cputime_zero, \
+ .stime = cputime_zero, \
+ .sum_exec_runtime = 0, \
+ }
+
/**
- * struct thread_group_cputime - thread group interval timer counts
- * @totals: thread group interval timers; substructure for
- * uniprocessor kernel, per-cpu for SMP kernel.
+ * struct thread_group_cputimer - thread group interval timer counts
+ * @cputime: thread group interval timers.
+ * @running: non-zero when there are timers running and
+ * @cputime receives updates.
+ * @lock: lock for fields in this struct.
*
* This structure contains the version of task_cputime, above, that is
- * used for thread group CPU clock calculations.
+ * used for thread group CPU timer calculations.
*/
-struct thread_group_cputime {
- struct task_cputime totals;
+struct thread_group_cputimer {
+ struct task_cputime cputime;
+ int running;
+ spinlock_t lock;
};
/*
@@ -519,10 +528,10 @@ struct signal_struct {
cputime_t it_prof_incr, it_virt_incr;
/*
- * Thread group totals for process CPU clocks.
- * See thread_group_cputime(), et al, for details.
+ * Thread group totals for process CPU timers.
+ * See thread_group_cputimer(), et al, for details.
*/
- struct thread_group_cputime cputime;
+ struct thread_group_cputimer cputimer;
/* Earliest-expiration cache. */
struct task_cputime cputime_expires;
@@ -559,7 +568,7 @@ struct signal_struct {
* Live threads maintain their own counters and add to these
* in __exit_signal, except for the group leader.
*/
- cputime_t cutime, cstime;
+ cputime_t utime, stime, cutime, cstime;
cputime_t gtime;
cputime_t cgtime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -568,6 +577,14 @@ struct signal_struct {
struct task_io_accounting ioac;
/*
+ * Cumulative ns of schedule CPU time fo dead threads in the
+ * group, not including a zombie group leader, (This only differs
+ * from jiffies_to_ns(utime + stime) if sched_clock uses something
+ * other than jiffies.)
+ */
+ unsigned long long sum_sched_runtime;
+
+ /*
* We don't bother to synchronize most readers of this at all,
* because there is no reader checking a limit that actually needs
* to get both rlim_cur and rlim_max atomically, and either one
@@ -2195,27 +2212,14 @@ static inline int spin_needbreak(spinlock_t *lock)
/*
* Thread group CPU time accounting.
*/
-
-static inline
-void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
-{
- struct task_cputime *totals = &tsk->signal->cputime.totals;
- unsigned long flags;
-
- spin_lock_irqsave(&totals->lock, flags);
- *times = *totals;
- spin_unlock_irqrestore(&totals->lock, flags);
-}
+void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
+void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
static inline void thread_group_cputime_init(struct signal_struct *sig)
{
- sig->cputime.totals = (struct task_cputime){
- .utime = cputime_zero,
- .stime = cputime_zero,
- .sum_exec_runtime = 0,
- };
-
- spin_lock_init(&sig->cputime.totals.lock);
+ sig->cputimer.cputime = INIT_CPUTIME;
+ spin_lock_init(&sig->cputimer.lock);
+ sig->cputimer.running = 0;
}
static inline void thread_group_cputime_free(struct signal_struct *sig)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 715196b09d6..bbacb7baa44 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -176,6 +176,12 @@ static inline void init_call_single_data(void)
#define put_cpu() preempt_enable()
#define put_cpu_no_resched() preempt_enable_no_resched()
+/*
+ * Callback to arch code if there's nosmp or maxcpus=0 on the
+ * boot command line:
+ */
+extern void arch_disable_smp_support(void);
+
void smp_setup_processor_id(void);
#endif /* __LINUX_SMP_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index e0c0fccced4..a0c66a2e00a 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -124,7 +124,12 @@ do { \
#ifdef CONFIG_GENERIC_LOCKBREAK
#define spin_is_contended(lock) ((lock)->break_lock)
#else
+
+#ifdef __raw_spin_is_contended
#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
+#else
+#define spin_is_contended(lock) (((void)(lock), 0))
+#endif /*__raw_spin_is_contended*/
#endif
/**
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 0eda02ff241..f9f900cfd06 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -95,13 +95,13 @@ struct old_linux_dirent;
#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
-#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void)
-#define SYSCALL_DEFINE1(...) SYSCALL_DEFINEx(1, __VA_ARGS__)
-#define SYSCALL_DEFINE2(...) SYSCALL_DEFINEx(2, __VA_ARGS__)
-#define SYSCALL_DEFINE3(...) SYSCALL_DEFINEx(3, __VA_ARGS__)
-#define SYSCALL_DEFINE4(...) SYSCALL_DEFINEx(4, __VA_ARGS__)
-#define SYSCALL_DEFINE5(...) SYSCALL_DEFINEx(5, __VA_ARGS__)
-#define SYSCALL_DEFINE6(...) SYSCALL_DEFINEx(6, __VA_ARGS__)
+#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void)
+#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
+#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__)
+#define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__)
+#define SYSCALL_DEFINE4(name, ...) SYSCALL_DEFINEx(4, _##name, __VA_ARGS__)
+#define SYSCALL_DEFINE5(name, ...) SYSCALL_DEFINEx(5, _##name, __VA_ARGS__)
+#define SYSCALL_DEFINE6(name, ...) SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
#ifdef CONFIG_PPC64
#define SYSCALL_ALIAS(alias, name) \
@@ -121,21 +121,21 @@ struct old_linux_dirent;
#define SYSCALL_DEFINE(name) static inline long SYSC_##name
#define SYSCALL_DEFINEx(x, name, ...) \
- asmlinkage long sys_##name(__SC_DECL##x(__VA_ARGS__)); \
- static inline long SYSC_##name(__SC_DECL##x(__VA_ARGS__)); \
- asmlinkage long SyS_##name(__SC_LONG##x(__VA_ARGS__)) \
+ asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \
+ static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \
+ asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \
{ \
__SC_TEST##x(__VA_ARGS__); \
- return (long) SYSC_##name(__SC_CAST##x(__VA_ARGS__)); \
+ return (long) SYSC##name(__SC_CAST##x(__VA_ARGS__)); \
} \
- SYSCALL_ALIAS(sys_##name, SyS_##name); \
- static inline long SYSC_##name(__SC_DECL##x(__VA_ARGS__))
+ SYSCALL_ALIAS(sys##name, SyS##name); \
+ static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__))
#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */
#define SYSCALL_DEFINE(name) asmlinkage long sys_##name
#define SYSCALL_DEFINEx(x, name, ...) \
- asmlinkage long sys_##name(__SC_DECL##x(__VA_ARGS__))
+ asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__))
#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */
diff --git a/init/main.c b/init/main.c
index bfe4fb0c984..6441083f827 100644
--- a/init/main.c
+++ b/init/main.c
@@ -136,14 +136,14 @@ unsigned int __initdata setup_max_cpus = NR_CPUS;
* greater than 0, limits the maximum number of CPUs activated in
* SMP mode to <NUM>.
*/
-#ifndef CONFIG_X86_IO_APIC
-static inline void disable_ioapic_setup(void) {};
-#endif
+
+void __weak arch_disable_smp_support(void) { }
static int __init nosmp(char *str)
{
setup_max_cpus = 0;
- disable_ioapic_setup();
+ arch_disable_smp_support();
+
return 0;
}
@@ -153,14 +153,14 @@ static int __init maxcpus(char *str)
{
get_option(&str, &setup_max_cpus);
if (setup_max_cpus == 0)
- disable_ioapic_setup();
+ arch_disable_smp_support();
return 0;
}
early_param("maxcpus", maxcpus);
#else
-#define setup_max_cpus NR_CPUS
+const unsigned int setup_max_cpus = NR_CPUS;
#endif
/*
diff --git a/ipc/shm.c b/ipc/shm.c
index f8f69fad3a2..05d51d2a792 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -340,6 +340,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
struct file * file;
char name[13];
int id;
+ int acctflag = 0;
if (size < SHMMIN || size > ns->shm_ctlmax)
return -EINVAL;
@@ -364,11 +365,12 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
sprintf (name, "SYSV%08x", key);
if (shmflg & SHM_HUGETLB) {
- /* hugetlb_file_setup takes care of mlock user accounting */
- file = hugetlb_file_setup(name, size);
+ /* hugetlb_file_setup applies strict accounting */
+ if (shmflg & SHM_NORESERVE)
+ acctflag = VM_NORESERVE;
+ file = hugetlb_file_setup(name, size, acctflag);
shp->mlock_user = current_user();
} else {
- int acctflag = 0;
/*
* Do not allow no accounting for OVERCOMMIT_NEVER, even
* if it's asked for.
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 5a54ff42874..e14db9c089b 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2351,7 +2351,7 @@ static void cgroup_lock_hierarchy(struct cgroupfs_root *root)
for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
if (ss->root == root)
- mutex_lock_nested(&ss->hierarchy_mutex, i);
+ mutex_lock(&ss->hierarchy_mutex);
}
}
@@ -2637,6 +2637,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
BUG_ON(!list_empty(&init_task.tasks));
mutex_init(&ss->hierarchy_mutex);
+ lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
ss->active = 1;
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 70612c19ac9..167e1e3ad7c 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -118,6 +118,8 @@ static void __exit_signal(struct task_struct *tsk)
* We won't ever get here for the group leader, since it
* will have been the last reference on the signal_struct.
*/
+ sig->utime = cputime_add(sig->utime, task_utime(tsk));
+ sig->stime = cputime_add(sig->stime, task_stime(tsk));
sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt;
@@ -126,6 +128,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk);
task_io_accounting_add(&sig->ioac, &tsk->ioac);
+ sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
sig = NULL; /* Marker for below. */
}
diff --git a/kernel/fork.c b/kernel/fork.c
index c078438ba6f..8de303bdd4e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -856,13 +856,14 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->tty_old_pgrp = NULL;
sig->tty = NULL;
- sig->cutime = sig->cstime = cputime_zero;
+ sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
sig->gtime = cputime_zero;
sig->cgtime = cputime_zero;
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
task_io_accounting_init(&sig->ioac);
+ sig->sum_sched_runtime = 0;
taskstats_tgid_init(sig);
task_lock(current->group_leader);
@@ -1099,7 +1100,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
- if (unlikely(ptrace_reparented(current)))
+ if (unlikely(current->ptrace))
ptrace_fork(p, clone_flags);
/* Perform scheduler related setup. Assign this task to a CPU. */
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 6a5fe93dd8b..58762f7077e 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -62,7 +62,7 @@ int do_getitimer(int which, struct itimerval *value)
struct task_cputime cputime;
cputime_t utime;
- thread_group_cputime(tsk, &cputime);
+ thread_group_cputimer(tsk, &cputime);
utime = cputime.utime;
if (cputime_le(cval, utime)) { /* about to fire */
cval = jiffies_to_cputime(1);
@@ -82,7 +82,7 @@ int do_getitimer(int which, struct itimerval *value)
struct task_cputime times;
cputime_t ptime;
- thread_group_cputime(tsk, &times);
+ thread_group_cputimer(tsk, &times);
ptime = cputime_add(times.utime, times.stime);
if (cputime_le(cval, ptime)) { /* about to fire */
cval = jiffies_to_cputime(1);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index fa07da94d7b..2313a4cc14e 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -230,6 +230,71 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
return 0;
}
+void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
+{
+ struct sighand_struct *sighand;
+ struct signal_struct *sig;
+ struct task_struct *t;
+
+ *times = INIT_CPUTIME;
+
+ rcu_read_lock();
+ sighand = rcu_dereference(tsk->sighand);
+ if (!sighand)
+ goto out;
+
+ sig = tsk->signal;
+
+ t = tsk;
+ do {
+ times->utime = cputime_add(times->utime, t->utime);
+ times->stime = cputime_add(times->stime, t->stime);
+ times->sum_exec_runtime += t->se.sum_exec_runtime;
+
+ t = next_thread(t);
+ } while (t != tsk);
+
+ times->utime = cputime_add(times->utime, sig->utime);
+ times->stime = cputime_add(times->stime, sig->stime);
+ times->sum_exec_runtime += sig->sum_sched_runtime;
+out:
+ rcu_read_unlock();
+}
+
+static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
+{
+ if (cputime_gt(b->utime, a->utime))
+ a->utime = b->utime;
+
+ if (cputime_gt(b->stime, a->stime))
+ a->stime = b->stime;
+
+ if (b->sum_exec_runtime > a->sum_exec_runtime)
+ a->sum_exec_runtime = b->sum_exec_runtime;
+}
+
+void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
+{
+ struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
+ struct task_cputime sum;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cputimer->lock, flags);
+ if (!cputimer->running) {
+ cputimer->running = 1;
+ /*
+ * The POSIX timer interface allows for absolute time expiry
+ * values through the TIMER_ABSTIME flag, therefore we have
+ * to synchronize the timer to the clock every time we start
+ * it.
+ */
+ thread_group_cputime(tsk, &sum);
+ update_gt_cputime(&cputimer->cputime, &sum);
+ }
+ *times = cputimer->cputime;
+ spin_unlock_irqrestore(&cputimer->lock, flags);
+}
+
/*
* Sample a process (thread group) clock for the given group_leader task.
* Must be called with tasklist_lock held for reading.
@@ -457,7 +522,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
{
struct task_cputime cputime;
- thread_group_cputime(tsk, &cputime);
+ thread_group_cputimer(tsk, &cputime);
cleanup_timers(tsk->signal->cpu_timers,
cputime.utime, cputime.stime, cputime.sum_exec_runtime);
}
@@ -964,6 +1029,19 @@ static void check_thread_timers(struct task_struct *tsk,
}
}
+static void stop_process_timers(struct task_struct *tsk)
+{
+ struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
+ unsigned long flags;
+
+ if (!cputimer->running)
+ return;
+
+ spin_lock_irqsave(&cputimer->lock, flags);
+ cputimer->running = 0;
+ spin_unlock_irqrestore(&cputimer->lock, flags);
+}
+
/*
* Check for any per-thread CPU timers that have fired and move them
* off the tsk->*_timers list onto the firing list. Per-thread timers
@@ -987,13 +1065,15 @@ static void check_process_timers(struct task_struct *tsk,
sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
list_empty(&timers[CPUCLOCK_VIRT]) &&
cputime_eq(sig->it_virt_expires, cputime_zero) &&
- list_empty(&timers[CPUCLOCK_SCHED]))
+ list_empty(&timers[CPUCLOCK_SCHED])) {
+ stop_process_timers(tsk);
return;
+ }
/*
* Collect the current process totals.
*/
- thread_group_cputime(tsk, &cputime);
+ thread_group_cputimer(tsk, &cputime);
utime = cputime.utime;
ptime = cputime_add(utime, cputime.stime);
sum_sched_runtime = cputime.sum_exec_runtime;
@@ -1259,7 +1339,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
if (!task_cputime_zero(&sig->cputime_expires)) {
struct task_cputime group_sample;
- thread_group_cputime(tsk, &group_sample);
+ thread_group_cputimer(tsk, &group_sample);
if (task_cputime_expired(&group_sample, &sig->cputime_expires))
return 1;
}
@@ -1329,6 +1409,33 @@ void run_posix_cpu_timers(struct task_struct *tsk)
}
/*
+ * Sample a process (thread group) timer for the given group_leader task.
+ * Must be called with tasklist_lock held for reading.
+ */
+static int cpu_timer_sample_group(const clockid_t which_clock,
+ struct task_struct *p,
+ union cpu_time_count *cpu)
+{
+ struct task_cputime cputime;
+
+ thread_group_cputimer(p, &cputime);
+ switch (CPUCLOCK_WHICH(which_clock)) {
+ default:
+ return -EINVAL;
+ case CPUCLOCK_PROF:
+ cpu->cpu = cputime_add(cputime.utime, cputime.stime);
+ break;
+ case CPUCLOCK_VIRT:
+ cpu->cpu = cputime.utime;
+ break;
+ case CPUCLOCK_SCHED:
+ cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
+ break;
+ }
+ return 0;
+}
+
+/*
* Set one of the process-wide special case CPU timers.
* The tsk->sighand->siglock must be held by the caller.
* The *newval argument is relative and we update it to be absolute, *oldval
@@ -1341,7 +1448,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
struct list_head *head;
BUG_ON(clock_idx == CPUCLOCK_SCHED);
- cpu_clock_sample_group(clock_idx, tsk, &now);
+ cpu_timer_sample_group(clock_idx, tsk, &now);
if (oldval) {
if (!cputime_eq(*oldval, cputime_zero)) {
diff --git a/kernel/profile.c b/kernel/profile.c
index 784933acf5b..7724e0409ba 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -114,12 +114,15 @@ int __ref profile_init(void)
if (!slab_is_available()) {
prof_buffer = alloc_bootmem(buffer_bytes);
alloc_bootmem_cpumask_var(&prof_cpu_mask);
+ cpumask_copy(prof_cpu_mask, cpu_possible_mask);
return 0;
}
if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
return -ENOMEM;
+ cpumask_copy(prof_cpu_mask, cpu_possible_mask);
+
prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL);
if (prof_buffer)
return 0;
diff --git a/kernel/sched.c b/kernel/sched.c
index fc17fd91ab5..61245b8d0f1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2266,16 +2266,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
if (!sched_feat(SYNC_WAKEUPS))
sync = 0;
- if (!sync) {
- if (current->se.avg_overlap < sysctl_sched_migration_cost &&
- p->se.avg_overlap < sysctl_sched_migration_cost)
- sync = 1;
- } else {
- if (current->se.avg_overlap >= sysctl_sched_migration_cost ||
- p->se.avg_overlap >= sysctl_sched_migration_cost)
- sync = 0;
- }
-
#ifdef CONFIG_SMP
if (sched_feat(LB_WAKEUP_UPDATE)) {
struct sched_domain *sd;
@@ -3890,19 +3880,24 @@ int select_nohz_load_balancer(int stop_tick)
int cpu = smp_processor_id();
if (stop_tick) {
- cpumask_set_cpu(cpu, nohz.cpu_mask);
cpu_rq(cpu)->in_nohz_recently = 1;
- /*
- * If we are going offline and still the leader, give up!
- */
- if (!cpu_active(cpu) &&
- atomic_read(&nohz.load_balancer) == cpu) {
+ if (!cpu_active(cpu)) {
+ if (atomic_read(&nohz.load_balancer) != cpu)
+ return 0;
+
+ /*
+ * If we are going offline and still the leader,
+ * give up!
+ */
if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
BUG();
+
return 0;
}
+ cpumask_set_cpu(cpu, nohz.cpu_mask);
+
/* time for ilb owner also to sleep */
if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
if (atomic_read(&nohz.load_balancer) == cpu)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a7e50ba185a..0566f2a03c4 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1191,15 +1191,20 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
int idx, unsigned long load, unsigned long this_load,
unsigned int imbalance)
{
+ struct task_struct *curr = this_rq->curr;
+ struct task_group *tg;
unsigned long tl = this_load;
unsigned long tl_per_task;
- struct task_group *tg;
unsigned long weight;
int balanced;
if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
return 0;
+ if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost ||
+ p->se.avg_overlap > sysctl_sched_migration_cost))
+ sync = 0;
+
/*
* If sync wakeup then subtract the (maximum possible)
* effect of the currently running task from the load
@@ -1426,7 +1431,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
if (!sched_feat(WAKEUP_PREEMPT))
return;
- if (sched_feat(WAKEUP_OVERLAP) && sync) {
+ if (sched_feat(WAKEUP_OVERLAP) && (sync ||
+ (se->avg_overlap < sysctl_sched_migration_cost &&
+ pse->avg_overlap < sysctl_sched_migration_cost))) {
resched_task(curr);
return;
}
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 8ab0cef8eca..a8f93dd374e 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -296,19 +296,21 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
static inline void account_group_user_time(struct task_struct *tsk,
cputime_t cputime)
{
- struct task_cputime *times;
- struct signal_struct *sig;
+ struct thread_group_cputimer *cputimer;
/* tsk == current, ensure it is safe to use ->signal */
if (unlikely(tsk->exit_state))
return;
- sig = tsk->signal;
- times = &sig->cputime.totals;
+ cputimer = &tsk->signal->cputimer;
- spin_lock(&times->lock);
- times->utime = cputime_add(times->utime, cputime);
- spin_unlock(&times->lock);
+ if (!cputimer->running)
+ return;
+
+ spin_lock(&cputimer->lock);
+ cputimer->cputime.utime =
+ cputime_add(cputimer->cputime.utime, cputime);
+ spin_unlock(&cputimer->lock);
}
/**
@@ -324,19 +326,21 @@ static inline void account_group_user_time(struct task_struct *tsk,
static inline void account_group_system_time(struct task_struct *tsk,
cputime_t cputime)
{
- struct task_cputime *times;
- struct signal_struct *sig;
+ struct thread_group_cputimer *cputimer;
/* tsk == current, ensure it is safe to use ->signal */
if (unlikely(tsk->exit_state))
return;
- sig = tsk->signal;
- times = &sig->cputime.totals;
+ cputimer = &tsk->signal->cputimer;
+
+ if (!cputimer->running)
+ return;
- spin_lock(&times->lock);
- times->stime = cputime_add(times->stime, cputime);
- spin_unlock(&times->lock);
+ spin_lock(&cputimer->lock);
+ cputimer->cputime.stime =
+ cputime_add(cputimer->cputime.stime, cputime);
+ spin_unlock(&cputimer->lock);
}
/**
@@ -352,7 +356,7 @@ static inline void account_group_system_time(struct task_struct *tsk,
static inline void account_group_exec_runtime(struct task_struct *tsk,
unsigned long long ns)
{
- struct task_cputime *times;
+ struct thread_group_cputimer *cputimer;
struct signal_struct *sig;
sig = tsk->signal;
@@ -361,9 +365,12 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
if (unlikely(!sig))
return;
- times = &sig->cputime.totals;
+ cputimer = &sig->cputimer;
+
+ if (!cputimer->running)
+ return;
- spin_lock(&times->lock);
- times->sum_exec_runtime += ns;
- spin_unlock(&times->lock);
+ spin_lock(&cputimer->lock);
+ cputimer->cputime.sum_exec_runtime += ns;
+ spin_unlock(&cputimer->lock);
}
diff --git a/kernel/signal.c b/kernel/signal.c
index b6b36768b75..2a74fe87c0d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1367,7 +1367,6 @@ int do_notify_parent(struct task_struct *tsk, int sig)
struct siginfo info;
unsigned long flags;
struct sighand_struct *psig;
- struct task_cputime cputime;
int ret = sig;
BUG_ON(sig == -1);
@@ -1397,9 +1396,10 @@ int do_notify_parent(struct task_struct *tsk, int sig)
info.si_uid = __task_cred(tsk)->uid;
rcu_read_unlock();
- thread_group_cputime(tsk, &cputime);
- info.si_utime = cputime_to_jiffies(cputime.utime);
- info.si_stime = cputime_to_jiffies(cputime.stime);
+ info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
+ tsk->signal->utime));
+ info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
+ tsk->signal->stime));
info.si_status = tsk->exit_code & 0x7f;
if (tsk->exit_code & 0x80)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 790f9d78566..c5ef44ff850 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -101,6 +101,7 @@ static int two = 2;
static int zero;
static int one = 1;
+static unsigned long one_ul = 1;
static int one_hundred = 100;
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
@@ -974,7 +975,7 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = &dirty_background_bytes_handler,
.strategy = &sysctl_intvec,
- .extra1 = &one,
+ .extra1 = &one_ul,
},
{
.ctl_name = VM_DIRTY_RATIO,
@@ -995,7 +996,7 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = &dirty_bytes_handler,
.strategy = &sysctl_intvec,
- .extra1 = &one,
+ .extra1 = &one_ul,
},
{
.procname = "dirty_writeback_centisecs",
diff --git a/mm/fremap.c b/mm/fremap.c
index 736ba7f3306..b6ec85abbb3 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -198,7 +198,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
flags &= MAP_NONBLOCK;
get_file(file);
addr = mmap_region(file, start, size,
- flags, vma->vm_flags, pgoff, 1);
+ flags, vma->vm_flags, pgoff);
fput(file);
if (IS_ERR_VALUE(addr)) {
err = addr;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 618e9830408..107da3d809a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2269,12 +2269,18 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
int hugetlb_reserve_pages(struct inode *inode,
long from, long to,
- struct vm_area_struct *vma)
+ struct vm_area_struct *vma,
+ int acctflag)
{
long ret, chg;
struct hstate *h = hstate_inode(inode);
- if (vma && vma->vm_flags & VM_NORESERVE)
+ /*
+ * Only apply hugepage reservation if asked. At fault time, an
+ * attempt will be made for VM_NORESERVE to allocate a page
+ * and filesystem quota without using reserves
+ */
+ if (acctflag & VM_NORESERVE)
return 0;
/*
@@ -2299,13 +2305,31 @@ int hugetlb_reserve_pages(struct inode *inode,
if (chg < 0)
return chg;
+ /* There must be enough filesystem quota for the mapping */
if (hugetlb_get_quota(inode->i_mapping, chg))
return -ENOSPC;
+
+ /*
+ * Check enough hugepages are available for the reservation.
+ * Hand back the quota if there are not
+ */
ret = hugetlb_acct_memory(h, chg);
if (ret < 0) {
hugetlb_put_quota(inode->i_mapping, chg);
return ret;
}
+
+ /*
+ * Account for the reservations made. Shared mappings record regions
+ * that have reservations as they are shared by multiple VMAs.
+ * When the last VMA disappears, the region map says how much
+ * the reservation was and the page cache tells how much of
+ * the reservation was consumed. Private mappings are per-VMA and
+ * only the consumed reservations are tracked. When the VMA
+ * disappears, the original reservation is the VMA size and the
+ * consumed reservations are stored in the map. Hence, nothing
+ * else has to be done for private mappings here
+ */
if (!vma || vma->vm_flags & VM_SHARED)
region_add(&inode->i_mapping->private_list, from, to);
return 0;
diff --git a/mm/migrate.c b/mm/migrate.c
index 2bb4e1d6352..a9eff3f092f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1129,7 +1129,7 @@ int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
struct vm_area_struct *vma;
int err = 0;
- for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) {
+ for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
if (vma->vm_ops && vma->vm_ops->migrate) {
err = vma->vm_ops->migrate(vma, to, from, flags);
if (err)
diff --git a/mm/mlock.c b/mm/mlock.c
index 028ec482fdd..037161d61b4 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -311,7 +311,10 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current))) {
- return __mlock_vma_pages_range(vma, start, end, 1);
+ __mlock_vma_pages_range(vma, start, end, 1);
+
+ /* Hide errors from mmap() and other callers */
+ return 0;
}
/*
diff --git a/mm/mmap.c b/mm/mmap.c
index 214b6a258ee..00ced3ee49a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -918,7 +918,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
struct inode *inode;
unsigned int vm_flags;
int error;
- int accountable = 1;
unsigned long reqprot = prot;
/*
@@ -1019,8 +1018,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
return -EPERM;
vm_flags &= ~VM_MAYEXEC;
}
- if (is_file_hugepages(file))
- accountable = 0;
if (!file->f_op || !file->f_op->mmap)
return -ENODEV;
@@ -1053,8 +1050,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
if (error)
return error;
- return mmap_region(file, addr, len, flags, vm_flags, pgoff,
- accountable);
+ return mmap_region(file, addr, len, flags, vm_flags, pgoff);
}
EXPORT_SYMBOL(do_mmap_pgoff);
@@ -1092,17 +1088,23 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
/*
* We account for memory if it's a private writeable mapping,
- * and VM_NORESERVE wasn't set.
+ * not hugepages and VM_NORESERVE wasn't set.
*/
-static inline int accountable_mapping(unsigned int vm_flags)
+static inline int accountable_mapping(struct file *file, unsigned int vm_flags)
{
+ /*
+ * hugetlb has its own accounting separate from the core VM
+ * VM_HUGETLB may not be set yet so we cannot check for that flag.
+ */
+ if (file && is_file_hugepages(file))
+ return 0;
+
return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
}
unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, unsigned long flags,
- unsigned int vm_flags, unsigned long pgoff,
- int accountable)
+ unsigned int vm_flags, unsigned long pgoff)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
@@ -1128,18 +1130,22 @@ munmap_back:
/*
* Set 'VM_NORESERVE' if we should not account for the
- * memory use of this mapping. We only honor MAP_NORESERVE
- * if we're allowed to overcommit memory.
+ * memory use of this mapping.
*/
- if ((flags & MAP_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER)
- vm_flags |= VM_NORESERVE;
- if (!accountable)
- vm_flags |= VM_NORESERVE;
+ if ((flags & MAP_NORESERVE)) {
+ /* We honor MAP_NORESERVE if allowed to overcommit */
+ if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
+ vm_flags |= VM_NORESERVE;
+
+ /* hugetlb applies strict overcommit unless MAP_NORESERVE */
+ if (file && is_file_hugepages(file))
+ vm_flags |= VM_NORESERVE;
+ }
/*
* Private writable mapping: check memory availability
*/
- if (accountable_mapping(vm_flags)) {
+ if (accountable_mapping(file, vm_flags)) {
charged = len >> PAGE_SHIFT;
if (security_vm_enough_memory(charged))
return -ENOMEM;
@@ -2078,12 +2084,8 @@ void exit_mmap(struct mm_struct *mm)
unsigned long end;
/* mm's last user has gone, and its about to be pulled down */
- arch_exit_mmap(mm);
mmu_notifier_release(mm);
- if (!mm->mmap) /* Can happen if dup_mmap() received an OOM */
- return;
-
if (mm->locked_vm) {
vma = mm->mmap;
while (vma) {
@@ -2092,7 +2094,13 @@ void exit_mmap(struct mm_struct *mm)
vma = vma->vm_next;
}
}
+
+ arch_exit_mmap(mm);
+
vma = mm->mmap;
+ if (!vma) /* Can happen if dup_mmap() received an OOM */
+ return;
+
lru_add_drain();
flush_cache_mm(mm);
tlb = tlb_gather_mmu(mm, 1);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index abe2694e13f..258197b76fb 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -151,10 +151,11 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
/*
* If we make a private mapping writable we increase our commit;
* but (without finer accounting) cannot reduce our commit if we
- * make it unwritable again.
+ * make it unwritable again. hugetlb mapping were accounted for
+ * even if read-only so there is no need to account for them here
*/
if (newflags & VM_WRITE) {
- if (!(oldflags & (VM_ACCOUNT|VM_WRITE|
+ if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
VM_SHARED|VM_NORESERVE))) {
charged = nrpages;
if (security_vm_enough_memory(charged))
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index dc32dae01e5..3c84128596b 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -209,7 +209,7 @@ int dirty_bytes_handler(struct ctl_table *table, int write,
struct file *filp, void __user *buffer, size_t *lenp,
loff_t *ppos)
{
- int old_bytes = vm_dirty_bytes;
+ unsigned long old_bytes = vm_dirty_bytes;
int ret;
ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
@@ -1051,20 +1051,23 @@ continue_unlock:
}
}
- if (nr_to_write > 0)
+ if (nr_to_write > 0) {
nr_to_write--;
- else if (wbc->sync_mode == WB_SYNC_NONE) {
- /*
- * We stop writing back only if we are not
- * doing integrity sync. In case of integrity
- * sync we have to keep going because someone
- * may be concurrently dirtying pages, and we
- * might have synced a lot of newly appeared
- * dirty pages, but have not synced all of the
- * old dirty pages.
- */
- done = 1;
- break;
+ if (nr_to_write == 0 &&
+ wbc->sync_mode == WB_SYNC_NONE) {
+ /*
+ * We stop writing back only if we are
+ * not doing integrity sync. In case of
+ * integrity sync we have to keep going
+ * because someone may be concurrently
+ * dirtying pages, and we might have
+ * synced a lot of newly appeared dirty
+ * pages, but have not synced all of the
+ * old dirty pages.
+ */
+ done = 1;
+ break;
+ }
}
if (wbc->nonblocking && bdi_write_congested(bdi)) {
@@ -1076,7 +1079,7 @@ continue_unlock:
pagevec_release(&pvec);
cond_resched();
}
- if (!cycled) {
+ if (!cycled && !done) {
/*
* range_cyclic:
* We hit the last page and there is more work to be done: wrap
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 7006a11350c..ceecfbb143f 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -114,7 +114,8 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
nid = page_to_nid(pfn_to_page(pfn));
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
if (slab_is_available()) {
- base = kmalloc_node(table_size, GFP_KERNEL, nid);
+ base = kmalloc_node(table_size,
+ GFP_KERNEL | __GFP_NOWARN, nid);
if (!base)
base = vmalloc_node(table_size, nid);
} else {
diff --git a/mm/rmap.c b/mm/rmap.c
index ac4af8cffbf..16521664010 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1072,7 +1072,8 @@ static int try_to_unmap_file(struct page *page, int unlock, int migration)
spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
if (MLOCK_PAGES && unlikely(unlock)) {
- if (!(vma->vm_flags & VM_LOCKED))
+ if (!((vma->vm_flags & VM_LOCKED) &&
+ page_mapped_in_vma(page, vma)))
continue; /* must visit all vmas */
ret = SWAP_MLOCK;
} else {
diff --git a/mm/slab.c b/mm/slab.c
index ddc41f337d5..4d00855629c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4457,3 +4457,4 @@ size_t ksize(const void *objp)
return obj_size(virt_to_cache(objp));
}
+EXPORT_SYMBOL(ksize);
diff --git a/mm/slob.c b/mm/slob.c
index bf7e8fc3aed..52bc8a2bd9e 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -521,6 +521,7 @@ size_t ksize(const void *block)
} else
return sp->page.private;
}
+EXPORT_SYMBOL(ksize);
struct kmem_cache {
unsigned int size, align;
diff --git a/mm/slub.c b/mm/slub.c
index bdc9abb08a2..0280eee6cf3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2736,6 +2736,7 @@ size_t ksize(const void *object)
*/
return s->size;
}
+EXPORT_SYMBOL(ksize);
void kfree(const void *x)
{
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index dcd7666824b..fc70147c771 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -29,6 +29,7 @@
#include <linux/errno.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
+#include <linux/types.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include "protocol.h"
@@ -160,29 +161,32 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
break;
case 'w':{
int16_t *val = va_arg(ap, int16_t *);
- if (pdu_read(pdu, val, sizeof(*val))) {
+ __le16 le_val;
+ if (pdu_read(pdu, &le_val, sizeof(le_val))) {
errcode = -EFAULT;
break;
}
- *val = cpu_to_le16(*val);
+ *val = le16_to_cpu(le_val);
}
break;
case 'd':{
int32_t *val = va_arg(ap, int32_t *);
- if (pdu_read(pdu, val, sizeof(*val))) {
+ __le32 le_val;
+ if (pdu_read(pdu, &le_val, sizeof(le_val))) {
errcode = -EFAULT;
break;
}
- *val = cpu_to_le32(*val);
+ *val = le32_to_cpu(le_val);
}
break;
case 'q':{
int64_t *val = va_arg(ap, int64_t *);
- if (pdu_read(pdu, val, sizeof(*val))) {
+ __le64 le_val;
+ if (pdu_read(pdu, &le_val, sizeof(le_val))) {
errcode = -EFAULT;
break;
}
- *val = cpu_to_le64(*val);
+ *val = le64_to_cpu(le_val);
}
break;
case 's':{
@@ -362,19 +366,19 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
}
break;
case 'w':{
- int16_t val = va_arg(ap, int);
+ __le16 val = cpu_to_le16(va_arg(ap, int));
if (pdu_write(pdu, &val, sizeof(val)))
errcode = -EFAULT;
}
break;
case 'd':{
- int32_t val = va_arg(ap, int32_t);
+ __le32 val = cpu_to_le32(va_arg(ap, int32_t));
if (pdu_write(pdu, &val, sizeof(val)))
errcode = -EFAULT;
}
break;
case 'q':{
- int64_t val = va_arg(ap, int64_t);
+ __le64 val = cpu_to_le64(va_arg(ap, int64_t));
if (pdu_write(pdu, &val, sizeof(val)))
errcode = -EFAULT;
}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index bdd9ccea17c..d2c27c808d3 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -67,6 +67,11 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
{
struct net_device *indev;
+ if (skb_warn_if_lro(skb)) {
+ kfree_skb(skb);
+ return;
+ }
+
indev = skb->dev;
skb->dev = to->dev;
skb_forward_csum(skb);
@@ -89,7 +94,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
/* called with rcu_read_lock */
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
{
- if (!skb_warn_if_lro(skb) && should_deliver(to, skb)) {
+ if (should_deliver(to, skb)) {
__br_forward(to, skb);
return;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 5379b0c1190..a17e0066236 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1090,7 +1090,7 @@ int dev_open(struct net_device *dev)
/*
* Enable NET_DMA
*/
- dmaengine_get();
+ net_dmaengine_get();
/*
* Initialize multicasting status
@@ -1172,7 +1172,7 @@ int dev_close(struct net_device *dev)
/*
* Shutdown NET_DMA
*/
- dmaengine_put();
+ net_dmaengine_put();
return 0;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f66c58df895..278a142d104 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1994,8 +1994,8 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
if (!net_eq(neigh_parms_net(p), net))
continue;
- if (nidx++ < neigh_skip)
- continue;
+ if (nidx < neigh_skip)
+ goto next;
if (neightbl_fill_param_info(skb, tbl, p,
NETLINK_CB(cb->skb).pid,
@@ -2003,6 +2003,8 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
RTM_NEWNEIGHTBL,
NLM_F_MULTI) <= 0)
goto out;
+ next:
+ nidx++;
}
neigh_skip = 0;
@@ -2082,12 +2084,10 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
if (h > s_h)
s_idx = 0;
for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
- int lidx;
if (dev_net(n->dev) != net)
continue;
- lidx = idx++;
- if (lidx < s_idx)
- continue;
+ if (idx < s_idx)
+ goto next;
if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
@@ -2096,6 +2096,8 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
rc = -1;
goto out;
}
+ next:
+ idx++;
}
}
read_unlock_bh(&tbl->lock);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index cc3a0a06c00..c47c989cb1f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1234,8 +1234,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
struct udphdr *uh;
unsigned short ulen;
struct rtable *rt = (struct rtable*)skb->dst;
- __be32 saddr = ip_hdr(skb)->saddr;
- __be32 daddr = ip_hdr(skb)->daddr;
+ __be32 saddr, daddr;
struct net *net = dev_net(skb->dev);
/*
@@ -1259,6 +1258,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (udp4_csum_init(skb, uh, proto))
goto csum_error;
+ saddr = ip_hdr(skb)->saddr;
+ daddr = ip_hdr(skb)->daddr;
+
if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
return __udp4_lib_mcast_deliver(net, skb, uh,
saddr, daddr, udptable);
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index c62dd247774..7712578bdc6 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -323,17 +323,21 @@ static struct ip6_flowlabel *
fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
int optlen, int *err_p)
{
- struct ip6_flowlabel *fl;
+ struct ip6_flowlabel *fl = NULL;
int olen;
int addr_type;
int err;
+ olen = optlen - CMSG_ALIGN(sizeof(*freq));
+ err = -EINVAL;
+ if (olen > 64 * 1024)
+ goto done;
+
err = -ENOMEM;
fl = kzalloc(sizeof(*fl), GFP_KERNEL);
if (fl == NULL)
goto done;
- olen = optlen - CMSG_ALIGN(sizeof(*freq));
if (olen > 0) {
struct msghdr msg;
struct flowi flowi;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 58e2b0d9375..d994c55a5b1 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -249,8 +249,8 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
}
t = netdev_priv(dev);
- ip6_tnl_dev_init(dev);
t->parms = *p;
+ ip6_tnl_dev_init(dev);
if ((err = register_netdevice(dev)) < 0)
goto failed_free;
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index c455cf4ee75..c323643ffcf 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -49,8 +49,19 @@ static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
static const u_int8_t invmap[] = {
[ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1,
[ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1,
- [ICMPV6_NI_QUERY - 128] = ICMPV6_NI_QUERY + 1,
- [ICMPV6_NI_REPLY - 128] = ICMPV6_NI_REPLY +1
+ [ICMPV6_NI_QUERY - 128] = ICMPV6_NI_REPLY + 1,
+ [ICMPV6_NI_REPLY - 128] = ICMPV6_NI_QUERY +1
+};
+
+static const u_int8_t noct_valid_new[] = {
+ [ICMPV6_MGM_QUERY - 130] = 1,
+ [ICMPV6_MGM_REPORT -130] = 1,
+ [ICMPV6_MGM_REDUCTION - 130] = 1,
+ [NDISC_ROUTER_SOLICITATION - 130] = 1,
+ [NDISC_ROUTER_ADVERTISEMENT - 130] = 1,
+ [NDISC_NEIGHBOUR_SOLICITATION - 130] = 1,
+ [NDISC_NEIGHBOUR_ADVERTISEMENT - 130] = 1,
+ [ICMPV6_MLD2_REPORT - 130] = 1
};
static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
@@ -178,6 +189,7 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
{
const struct icmp6hdr *icmp6h;
struct icmp6hdr _ih;
+ int type;
icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
if (icmp6h == NULL) {
@@ -194,6 +206,15 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
return -NF_ACCEPT;
}
+ type = icmp6h->icmp6_type - 130;
+ if (type >= 0 && type < sizeof(noct_valid_new) &&
+ noct_valid_new[type]) {
+ skb->nfct = &nf_conntrack_untracked.ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+ nf_conntrack_get(skb->nfct);
+ return NF_ACCEPT;
+ }
+
/* is not error message ? */
if (icmp6h->icmp6_type >= 128)
return NF_ACCEPT;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index c32a7e8e3a1..cb78aa00399 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -434,7 +434,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
} else
return NOTIFY_DONE;
- if (!nfnetlink_has_listeners(group))
+ if (!item->report && !nfnetlink_has_listeners(group))
return NOTIFY_DONE;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
@@ -1215,6 +1215,16 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
}
}
+#ifdef CONFIG_NF_NAT_NEEDED
+ if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
+ err = ctnetlink_change_nat_seq_adj(ct, cda);
+ if (err < 0) {
+ rcu_read_unlock();
+ goto err;
+ }
+ }
+#endif
+
if (cda[CTA_PROTOINFO]) {
err = ctnetlink_change_protoinfo(ct, cda);
if (err < 0) {
@@ -1492,7 +1502,8 @@ static int ctnetlink_expect_event(struct notifier_block *this,
} else
return NOTIFY_DONE;
- if (!nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW))
+ if (!item->report &&
+ !nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW))
return NOTIFY_DONE;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index e223cb43ae8..a189ada9128 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -105,7 +105,7 @@ match_packet(const struct sk_buff *skb,
switch (chunk_match_type) {
case SCTP_CHUNK_MATCH_ALL:
- return SCTP_CHUNKMAP_IS_CLEAR(info->chunkmap);
+ return SCTP_CHUNKMAP_IS_CLEAR(chunkmapcopy);
case SCTP_CHUNK_MATCH_ANY:
return false;
case SCTP_CHUNK_MATCH_ONLY:
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index d7d2bed7a69..eac5e7bb736 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -284,13 +284,13 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
if (IS_ERR(trans)) {
call = ERR_CAST(trans);
trans = NULL;
- goto out;
+ goto out_notrans;
}
} else {
trans = rx->trans;
if (!trans) {
call = ERR_PTR(-ENOTCONN);
- goto out;
+ goto out_notrans;
}
atomic_inc(&trans->usage);
}
@@ -315,6 +315,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
rxrpc_put_bundle(trans, bundle);
out:
rxrpc_put_transport(trans);
+out_notrans:
release_sock(&rx->sk);
_leave(" = %p", call);
return call;
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 8bb83a100ed..0f11870116d 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1827,6 +1827,40 @@ sub reset_state {
$state = 0;
}
+sub syscall_munge() {
+ my $void = 0;
+
+ $prototype =~ s@[\r\n\t]+@ @gos; # strip newlines/CR's/tabs
+## if ($prototype =~ m/SYSCALL_DEFINE0\s*\(\s*(a-zA-Z0-9_)*\s*\)/) {
+ if ($prototype =~ m/SYSCALL_DEFINE0/) {
+ $void = 1;
+## $prototype = "long sys_$1(void)";
+ }
+
+ $prototype =~ s/SYSCALL_DEFINE.*\(/long sys_/; # fix return type & func name
+ if ($prototype =~ m/long (sys_.*?),/) {
+ $prototype =~ s/,/\(/;
+ } elsif ($void) {
+ $prototype =~ s/\)/\(void\)/;
+ }
+
+ # now delete all of the odd-number commas in $prototype
+ # so that arg types & arg names don't have a comma between them
+ my $count = 0;
+ my $len = length($prototype);
+ if ($void) {
+ $len = 0; # skip the for-loop
+ }
+ for (my $ix = 0; $ix < $len; $ix++) {
+ if (substr($prototype, $ix, 1) eq ',') {
+ $count++;
+ if ($count % 2 == 1) {
+ substr($prototype, $ix, 1) = ' ';
+ }
+ }
+ }
+}
+
sub process_state3_function($$) {
my $x = shift;
my $file = shift;
@@ -1839,11 +1873,15 @@ sub process_state3_function($$) {
elsif ($x =~ /([^\{]*)/) {
$prototype .= $1;
}
+
if (($x =~ /\{/) || ($x =~ /\#\s*define/) || ($x =~ /;/)) {
$prototype =~ s@/\*.*?\*/@@gos; # strip comments.
$prototype =~ s@[\r\n]+@ @gos; # strip newlines/cr's.
$prototype =~ s@^\s+@@gos; # strip leading spaces
- dump_function($prototype,$file);
+ if ($prototype =~ /SYSCALL_DEFINE/) {
+ syscall_munge();
+ }
+ dump_function($prototype, $file);
reset_state();
}
}
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 89096e811a4..772901e41ec 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -90,7 +90,7 @@ static void aaci_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
*/
do {
v = readl(aaci->base + AACI_SLFR);
- } while ((v & (SLFR_1TXB|SLFR_2TXB)) && timeout--);
+ } while ((v & (SLFR_1TXB|SLFR_2TXB)) && --timeout);
if (!timeout)
dev_err(&aaci->dev->dev,
@@ -126,7 +126,7 @@ static unsigned short aaci_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
*/
do {
v = readl(aaci->base + AACI_SLFR);
- } while ((v & SLFR_1TXB) && timeout--);
+ } while ((v & SLFR_1TXB) && --timeout);
if (!timeout) {
dev_err(&aaci->dev->dev, "timeout on slot 1 TX busy\n");
@@ -147,7 +147,7 @@ static unsigned short aaci_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
do {
cond_resched();
v = readl(aaci->base + AACI_SLFR) & (SLFR_1RXV|SLFR_2RXV);
- } while ((v != (SLFR_1RXV|SLFR_2RXV)) && timeout--);
+ } while ((v != (SLFR_1RXV|SLFR_2RXV)) && --timeout);
if (!timeout) {
dev_err(&aaci->dev->dev, "timeout on RX valid\n");
diff --git a/sound/drivers/Kconfig b/sound/drivers/Kconfig
index 0bcf14640fd..84714a65e5c 100644
--- a/sound/drivers/Kconfig
+++ b/sound/drivers/Kconfig
@@ -33,7 +33,7 @@ if SND_DRIVERS
config SND_PCSP
tristate "PC-Speaker support (READ HELP!)"
- depends on PCSPKR_PLATFORM && X86_PC && HIGH_RES_TIMERS
+ depends on PCSPKR_PLATFORM && X86 && HIGH_RES_TIMERS
depends on INPUT
depends on EXPERIMENTAL
select SND_PCM