aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt2
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/boot/dts/lite5200.dts2
-rw-r--r--arch/powerpc/boot/dts/lite5200b.dts2
-rw-r--r--arch/powerpc/mm/pgtable_32.c2
-rw-r--r--arch/powerpc/platforms/chrp/pegasos_eth.c2
-rw-r--r--arch/powerpc/platforms/powermac/setup.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/Kconfig4
-rw-r--r--arch/ppc/kernel/entry.S18
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c2
-rw-r--r--arch/ppc/mm/hashtable.S20
-rw-r--r--arch/ppc/mm/pgtable.c2
-rw-r--r--arch/sparc/Kconfig7
-rw-r--r--arch/sparc/kernel/time.c4
-rw-r--r--arch/sparc/lib/atomic32.c15
-rw-r--r--arch/sparc64/Kconfig6
-rw-r--r--arch/sparc64/kernel/Makefile4
-rw-r--r--arch/sparc64/kernel/devices.c196
-rw-r--r--arch/sparc64/kernel/entry.S575
-rw-r--r--arch/sparc64/kernel/head.S31
-rw-r--r--arch/sparc64/kernel/hvapi.c5
-rw-r--r--arch/sparc64/kernel/irq.c83
-rw-r--r--arch/sparc64/kernel/itlb_miss.S4
-rw-r--r--arch/sparc64/kernel/mdesc.c619
-rw-r--r--arch/sparc64/kernel/pci.c54
-rw-r--r--arch/sparc64/kernel/pci_sabre.c7
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c54
-rw-r--r--arch/sparc64/kernel/power.c2
-rw-r--r--arch/sparc64/kernel/process.c4
-rw-r--r--arch/sparc64/kernel/prom.c148
-rw-r--r--arch/sparc64/kernel/setup.c18
-rw-r--r--arch/sparc64/kernel/smp.c155
-rw-r--r--arch/sparc64/kernel/sstate.c104
-rw-r--r--arch/sparc64/kernel/sun4v_ivec.S30
-rw-r--r--arch/sparc64/kernel/time.c47
-rw-r--r--arch/sparc64/kernel/traps.c27
-rw-r--r--arch/sparc64/mm/init.c90
-rw-r--r--arch/sparc64/prom/misc.c19
-rw-r--r--drivers/char/drm/Kconfig2
-rw-r--r--drivers/char/random.c67
-rw-r--r--drivers/firewire/Kconfig14
-rw-r--r--drivers/firewire/Makefile12
-rw-r--r--drivers/firewire/fw-cdev.c2
-rw-r--r--drivers/firewire/fw-ohci.c5
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/applesmc.c7
-rw-r--r--drivers/hwmon/coretemp.c32
-rw-r--r--drivers/hwmon/ds1621.c8
-rw-r--r--drivers/hwmon/hwmon-vid.c4
-rw-r--r--drivers/hwmon/w83627hf.c4
-rw-r--r--drivers/ieee1394/eth1394.c91
-rw-r--r--drivers/ieee1394/eth1394.h4
-rw-r--r--drivers/ieee1394/raw1394.c8
-rw-r--r--drivers/ieee1394/sbp2.c1
-rw-r--r--drivers/infiniband/core/cm.c25
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c74
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/message/fusion/mptbase.h2
-rw-r--r--drivers/message/fusion/mptscsih.c8
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/defxx.c1
-rw-r--r--drivers/net/e1000/e1000_main.c9
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c2
-rw-r--r--drivers/net/mlx4/alloc.c2
-rw-r--r--drivers/net/sky2.c6
-rw-r--r--drivers/net/sky2.h24
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c13
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c5
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c5
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c40
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c3
-rw-r--r--drivers/scsi/Kconfig14
-rw-r--r--drivers/scsi/Makefile5
-rw-r--r--drivers/scsi/aacraid/aachba.c15
-rw-r--r--drivers/scsi/aacraid/aacraid.h3
-rw-r--r--drivers/scsi/aacraid/rx.c8
-rw-r--r--drivers/scsi/aacraid/sa.c9
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c14
-rw-r--r--drivers/scsi/ipr.c7
-rw-r--r--drivers/scsi/jazz_esp.c429
-rw-r--r--drivers/scsi/libsrp.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c67
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h14
-rw-r--r--drivers/scsi/pluto.c18
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/sd.c18
-rw-r--r--drivers/scsi/stex.c90
-rw-r--r--drivers/serial/suncore.c6
-rw-r--r--drivers/serial/sunzilog.c4
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hub.c32
-rw-r--r--drivers/usb/core/usb.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c26
-rw-r--r--include/asm-sparc/atomic.h38
-rw-r--r--include/asm-sparc64/bugs.h8
-rw-r--r--include/asm-sparc64/cpudata.h24
-rw-r--r--include/asm-sparc64/hypervisor.h643
-rw-r--r--include/asm-sparc64/kdebug.h1
-rw-r--r--include/asm-sparc64/mdesc.h39
-rw-r--r--include/asm-sparc64/oplib.h7
-rw-r--r--include/asm-sparc64/percpu.h4
-rw-r--r--include/asm-sparc64/prom.h1
-rw-r--r--include/asm-sparc64/smp.h4
-rw-r--r--include/asm-sparc64/sstate.h13
-rw-r--r--include/asm-sparc64/thread_info.h8
-rw-r--r--include/asm-sparc64/topology.h3
-rw-r--r--include/asm-sparc64/tsb.h2
-rw-r--r--include/linux/timer.h6
-rw-r--r--kernel/time/tick-sched.c16
-rw-r--r--kernel/timer.c10
-rw-r--r--net/ieee80211/ieee80211_module.c2
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_module.c5
119 files changed, 3310 insertions, 1178 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 5c8695a3d13..2d7ea85075b 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -62,7 +62,7 @@ Who: Dan Dennedy <dan@dennedy.org>, Stefan Richter <stefanr@s5r6.in-berlin.de>
What: old NCR53C9x driver
When: October 2007
Why: Replaced by the much better esp_scsi driver. Actual low-level
- driver can ported over almost trivially.
+ driver can be ported over almost trivially.
Who: David Miller <davem@davemloft.net>
Christoph Hellwig <hch@lst.de>
diff --git a/MAINTAINERS b/MAINTAINERS
index 953291d08c7..4cc17b993b6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2334,7 +2334,7 @@ S: Maintained
MEGARAID SCSI DRIVERS
P: Neela Syam Kolli
-M: Neela.Kolli@engenio.com
+M: megaraidlinux@lsi.com
S: linux-scsi@vger.kernel.org
W: http://megaraid.lsilogic.com
S: Maintained
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 56d3c0dcd2b..5eaeafd30bd 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -118,6 +118,7 @@ config GENERIC_BUG
depends on BUG
config SYS_SUPPORTS_APM_EMULATION
+ default y if PMAC_APM_EMU
bool
config DEFAULT_UIMAGE
diff --git a/arch/powerpc/boot/dts/lite5200.dts b/arch/powerpc/boot/dts/lite5200.dts
index eae68ab1177..d29308fe4c2 100644
--- a/arch/powerpc/boot/dts/lite5200.dts
+++ b/arch/powerpc/boot/dts/lite5200.dts
@@ -67,7 +67,7 @@
interrupt-controller;
#interrupt-cells = <3>;
device_type = "interrupt-controller";
- compatible = "mpc5200_pic";
+ compatible = "mpc5200-pic";
reg = <500 80>;
built-in;
};
diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts
index 5185625a941..f242531f045 100644
--- a/arch/powerpc/boot/dts/lite5200b.dts
+++ b/arch/powerpc/boot/dts/lite5200b.dts
@@ -67,7 +67,7 @@
interrupt-controller;
#interrupt-cells = <3>;
device_type = "interrupt-controller";
- compatible = "mpc5200b-pic\0mpc5200_pic";
+ compatible = "mpc5200b-pic\0mpc5200-pic";
reg = <500 80>;
built-in;
};
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index d8232b7a08f..f6ae1a57d65 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -93,7 +93,7 @@ void pgd_free(pgd_t *pgd)
free_pages((unsigned long)pgd, PGDIR_ORDER);
}
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
extern int mem_init_done;
diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c
index 71045677559..5bcc58d9a4d 100644
--- a/arch/powerpc/platforms/chrp/pegasos_eth.c
+++ b/arch/powerpc/platforms/chrp/pegasos_eth.c
@@ -169,7 +169,7 @@ static int Enable_SRAM(void)
/***********/
/***********/
-int mv643xx_eth_add_pds(void)
+static int __init mv643xx_eth_add_pds(void)
{
int ret = 0;
static struct pci_device_id pci_marvell_mv64360[] = {
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index a410bc76a8a..07b1c4ec428 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -384,7 +384,7 @@ int boot_part;
static dev_t boot_dev;
#ifdef CONFIG_SCSI
-void __init note_scsi_host(struct device_node *node, void *host)
+void note_scsi_host(struct device_node *node, void *host)
{
int l;
char *p;
diff --git a/arch/powerpc/sysdev/qe_lib/Kconfig b/arch/powerpc/sysdev/qe_lib/Kconfig
index 887739f3bad..f611d344a12 100644
--- a/arch/powerpc/sysdev/qe_lib/Kconfig
+++ b/arch/powerpc/sysdev/qe_lib/Kconfig
@@ -5,15 +5,13 @@
config UCC_SLOW
bool
default n
- select UCC
help
This option provides qe_lib support to UCC slow
protocols: UART, BISYNC, QMC
config UCC_FAST
bool
- default n
- select UCC
+ default y if UCC_GETH
help
This option provides qe_lib support to UCC fast
protocols: HDLC, Ethernet, ATM, transparent
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index ab64256110b..fba7ca17a67 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -596,7 +596,11 @@ fast_exception_return:
mr r12,r4 /* restart at exc_exit_restart */
b 2b
- .comm fee_restarts,4
+ .section .bss
+ .align 2
+fee_restarts:
+ .space 4
+ .previous
/* aargh, a nonrecoverable interrupt, panic */
/* aargh, we don't know which trap this is */
@@ -851,7 +855,11 @@ load_dbcr0:
mtspr SPRN_DBSR,r11 /* clear all pending debug events */
blr
- .comm global_dbcr0,8
+ .section .bss
+ .align 4
+global_dbcr0:
+ .space 8
+ .previous
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
@@ -926,4 +934,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
/* shouldn't return */
b 4b
- .comm ee_restarts,4
+ .section .bss
+ .align 2
+ee_restarts:
+ .space 4
+ .previous
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index 4ad499605d0..a4165209ac7 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -40,7 +40,6 @@
#include <asm/time.h>
#include <asm/cputable.h>
#include <asm/btext.h>
-#include <asm/div64.h>
#include <asm/xmon.h>
#include <asm/signal.h>
#include <asm/dcr.h>
@@ -93,7 +92,6 @@ EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strcat);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(__div64_32);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
diff --git a/arch/ppc/mm/hashtable.S b/arch/ppc/mm/hashtable.S
index e756942e65c..5f364dc5015 100644
--- a/arch/ppc/mm/hashtable.S
+++ b/arch/ppc/mm/hashtable.S
@@ -30,7 +30,11 @@
#include <asm/asm-offsets.h>
#ifdef CONFIG_SMP
- .comm mmu_hash_lock,4
+ .section .bss
+ .align 2
+ .globl mmu_hash_lock
+mmu_hash_lock:
+ .space 4
#endif /* CONFIG_SMP */
/*
@@ -461,9 +465,17 @@ found_slot:
sync /* make sure pte updates get to memory */
blr
- .comm next_slot,4
- .comm primary_pteg_full,4
- .comm htab_hash_searches,4
+ .section .bss
+ .align 2
+next_slot:
+ .space 4
+ .globl primary_pteg_full
+primary_pteg_full:
+ .space 4
+ .globl htab_hash_searches
+htab_hash_searches:
+ .space 4
+ .previous
/*
* Flush the entry for a particular page from the hash table.
diff --git a/arch/ppc/mm/pgtable.c b/arch/ppc/mm/pgtable.c
index c023b729880..35ebb6395ae 100644
--- a/arch/ppc/mm/pgtable.c
+++ b/arch/ppc/mm/pgtable.c
@@ -92,7 +92,7 @@ void pgd_free(pgd_t *pgd)
free_pages((unsigned long)pgd, PGDIR_ORDER);
}
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
extern int mem_init_done;
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index bd992c0048f..fbcc00c6c06 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -178,6 +178,13 @@ config ARCH_HAS_ILOG2_U64
bool
default n
+config EMULATED_CMPXCHG
+ bool
+ default y
+ help
+ Sparc32 does not have a CAS instruction like sparc64. cmpxchg()
+ is emulated, and therefore it is not completely atomic.
+
config SUN_PM
bool
default y
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index f1401b57ccc..7b4612da74a 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -148,7 +148,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
}
/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
-static void __init kick_start_clock(void)
+static void __devinit kick_start_clock(void)
{
struct mostek48t02 *regs = (struct mostek48t02 *)mstk48t02_regs;
unsigned char sec;
@@ -223,7 +223,7 @@ static __inline__ int has_low_battery(void)
return (data1 == data2); /* Was the write blocked? */
}
-static void __init mostek_set_system_time(void)
+static void __devinit mostek_set_system_time(void)
{
unsigned int year, mon, day, hour, min, sec;
struct mostek48t02 *mregs;
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 559335f4917..617d29832e1 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -2,6 +2,7 @@
* atomic32.c: 32-bit atomic_t implementation
*
* Copyright (C) 2004 Keith M Wesolowski
+ * Copyright (C) 2007 Kyle McMartin
*
* Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
*/
@@ -117,3 +118,17 @@ unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
return old & mask;
}
EXPORT_SYMBOL(___change_bit);
+
+unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
+{
+ unsigned long flags;
+ u32 prev;
+
+ spin_lock_irqsave(ATOMIC_HASH(addr), flags);
+ if ((prev = *ptr) == old)
+ *ptr = new;
+ spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
+
+ return (unsigned long)prev;
+}
+EXPORT_SYMBOL(__cmpxchg_u32);
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 831781cab27..bd00f89eed1 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -147,10 +147,10 @@ config SMP
If you don't know what to do here, say N.
config NR_CPUS
- int "Maximum number of CPUs (2-64)"
- range 2 64
+ int "Maximum number of CPUs (2-1024)"
+ range 2 1024
depends on SMP
- default "32"
+ default "64"
source "drivers/cpufreq/Kconfig"
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index c749dccacc3..d8d19093d12 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -8,11 +8,11 @@ EXTRA_CFLAGS := -Werror
extra-y := head.o init_task.o vmlinux.lds
obj-y := process.o setup.o cpu.o idprom.o \
- traps.o devices.o auxio.o una_asm.o \
+ traps.o auxio.o una_asm.o \
irq.o ptrace.o time.o sys_sparc.o signal.o \
unaligned.o central.o pci.o starfire.o semaphore.o \
power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \
- visemul.o prom.o of_device.o hvapi.o
+ visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
deleted file mode 100644
index 0e03c8e218c..00000000000
--- a/arch/sparc64/kernel/devices.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/* devices.c: Initial scan of the prom device tree for important
- * Sparc device nodes which we need to find.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#include <linux/kernel.h>
-#include <linux/threads.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/bootmem.h>
-
-#include <asm/page.h>
-#include <asm/oplib.h>
-#include <asm/system.h>
-#include <asm/smp.h>
-#include <asm/spitfire.h>
-#include <asm/timer.h>
-#include <asm/cpudata.h>
-
-/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
- * operations in asm/ns87303.h
- */
-DEFINE_SPINLOCK(ns87303_lock);
-
-extern void cpu_probe(void);
-extern void central_probe(void);
-
-static const char *cpu_mid_prop(void)
-{
- if (tlb_type == spitfire)
- return "upa-portid";
- return "portid";
-}
-
-static int get_cpu_mid(struct device_node *dp)
-{
- struct property *prop;
-
- if (tlb_type == hypervisor) {
- struct linux_prom64_registers *reg;
- int len;
-
- prop = of_find_property(dp, "cpuid", &len);
- if (prop && len == 4)
- return *(int *) prop->value;
-
- prop = of_find_property(dp, "reg", NULL);
- reg = prop->value;
- return (reg[0].phys_addr >> 32) & 0x0fffffffUL;
- } else {
- const char *prop_name = cpu_mid_prop();
-
- prop = of_find_property(dp, prop_name, NULL);
- if (prop)
- return *(int *) prop->value;
- return 0;
- }
-}
-
-static int check_cpu_node(struct device_node *dp, int *cur_inst,
- int (*compare)(struct device_node *, int, void *),
- void *compare_arg,
- struct device_node **dev_node, int *mid)
-{
- if (!compare(dp, *cur_inst, compare_arg)) {
- if (dev_node)
- *dev_node = dp;
- if (mid)
- *mid = get_cpu_mid(dp);
- return 0;
- }
-
- (*cur_inst)++;
-
- return -ENODEV;
-}
-
-static int __cpu_find_by(int (*compare)(struct device_node *, int, void *),
- void *compare_arg,
- struct device_node **dev_node, int *mid)
-{
- struct device_node *dp;
- int cur_inst;
-
- cur_inst = 0;
- for_each_node_by_type(dp, "cpu") {
- int err = check_cpu_node(dp, &cur_inst,
- compare, compare_arg,
- dev_node, mid);
- if (err == 0)
- return 0;
- }
-
- return -ENODEV;
-}
-
-static int cpu_instance_compare(struct device_node *dp, int instance, void *_arg)
-{
- int desired_instance = (int) (long) _arg;
-
- if (instance == desired_instance)
- return 0;
- return -ENODEV;
-}
-
-int cpu_find_by_instance(int instance, struct device_node **dev_node, int *mid)
-{
- return __cpu_find_by(cpu_instance_compare, (void *)(long)instance,
- dev_node, mid);
-}
-
-static int cpu_mid_compare(struct device_node *dp, int instance, void *_arg)
-{
- int desired_mid = (int) (long) _arg;
- int this_mid;
-
- this_mid = get_cpu_mid(dp);
- if (this_mid == desired_mid)
- return 0;
- return -ENODEV;
-}
-
-int cpu_find_by_mid(int mid, struct device_node **dev_node)
-{
- return __cpu_find_by(cpu_mid_compare, (void *)(long)mid,
- dev_node, NULL);
-}
-
-void __init device_scan(void)
-{
- /* FIX ME FAST... -DaveM */
- ioport_resource.end = 0xffffffffffffffffUL;
-
- prom_printf("Booting Linux...\n");
-
-#ifndef CONFIG_SMP
- {
- struct device_node *dp;
- int err, def;
-
- err = cpu_find_by_instance(0, &dp, NULL);
- if (err) {
- prom_printf("No cpu nodes, cannot continue\n");
- prom_halt();
- }
- cpu_data(0).clock_tick =
- of_getintprop_default(dp, "clock-frequency", 0);
-
- def = ((tlb_type == hypervisor) ?
- (8 * 1024) :
- (16 * 1024));
- cpu_data(0).dcache_size = of_getintprop_default(dp,
- "dcache-size",
- def);
-
- def = 32;
- cpu_data(0).dcache_line_size =
- of_getintprop_default(dp, "dcache-line-size", def);
-
- def = 16 * 1024;
- cpu_data(0).icache_size = of_getintprop_default(dp,
- "icache-size",
- def);
-
- def = 32;
- cpu_data(0).icache_line_size =
- of_getintprop_default(dp, "icache-line-size", def);
-
- def = ((tlb_type == hypervisor) ?
- (3 * 1024 * 1024) :
- (4 * 1024 * 1024));
- cpu_data(0).ecache_size = of_getintprop_default(dp,
- "ecache-size",
- def);
-
- def = 64;
- cpu_data(0).ecache_line_size =
- of_getintprop_default(dp, "ecache-line-size", def);
- printk("CPU[0]: Caches "
- "D[sz(%d):line_sz(%d)] "
- "I[sz(%d):line_sz(%d)] "
- "E[sz(%d):line_sz(%d)]\n",
- cpu_data(0).dcache_size, cpu_data(0).dcache_line_size,
- cpu_data(0).icache_size, cpu_data(0).icache_line_size,
- cpu_data(0).ecache_size, cpu_data(0).ecache_line_size);
- }
-#endif
-
- central_probe();
-
- cpu_probe();
-}
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 732b77cb71f..8f10dda0f5c 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1725,96 +1725,142 @@ real_hard_smp_processor_id:
* returns %o0: sysino
*/
.globl sun4v_devino_to_sysino
+ .type sun4v_devino_to_sysino,#function
sun4v_devino_to_sysino:
mov HV_FAST_INTR_DEVINO2SYSINO, %o5
ta HV_FAST_TRAP
retl
mov %o1, %o0
+ .size sun4v_devino_to_sysino, .-sun4v_devino_to_sysino
/* %o0: sysino
*
* returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED})
*/
.globl sun4v_intr_getenabled
+ .type sun4v_intr_getenabled,#function
sun4v_intr_getenabled:
mov HV_FAST_INTR_GETENABLED, %o5
ta HV_FAST_TRAP
retl
mov %o1, %o0
+ .size sun4v_intr_getenabled, .-sun4v_intr_getenabled
/* %o0: sysino
* %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED})
*/
.globl sun4v_intr_setenabled
+ .type sun4v_intr_setenabled,#function
sun4v_intr_setenabled:
mov HV_FAST_INTR_SETENABLED, %o5
ta HV_FAST_TRAP
retl
nop
+ .size sun4v_intr_setenabled, .-sun4v_intr_setenabled
/* %o0: sysino
*
* returns %o0: intr_state (HV_INTR_STATE_*)
*/
.globl sun4v_intr_getstate
+ .type sun4v_intr_getstate,#function
sun4v_intr_getstate:
mov HV_FAST_INTR_GETSTATE, %o5
ta HV_FAST_TRAP
retl
mov %o1, %o0
+ .size sun4v_intr_getstate, .-sun4v_intr_getstate
/* %o0: sysino
* %o1: intr_state (HV_INTR_STATE_*)
*/
.globl sun4v_intr_setstate
+ .type sun4v_intr_setstate,#function
sun4v_intr_setstate:
mov HV_FAST_INTR_SETSTATE, %o5
ta HV_FAST_TRAP
retl
nop
+ .size sun4v_intr_setstate, .-sun4v_intr_setstate
/* %o0: sysino
*
* returns %o0: cpuid
*/
.globl sun4v_intr_gettarget
+ .type sun4v_intr_gettarget,#function
sun4v_intr_gettarget:
mov HV_FAST_INTR_GETTARGET, %o5
ta HV_FAST_TRAP
retl
mov %o1, %o0
+ .size sun4v_intr_gettarget, .-sun4v_intr_gettarget
/* %o0: sysino
* %o1: cpuid
*/
.globl sun4v_intr_settarget
+ .type sun4v_intr_settarget,#function
sun4v_intr_settarget:
mov HV_FAST_INTR_SETTARGET, %o5
ta HV_FAST_TRAP
retl
nop
+ .size sun4v_intr_settarget, .-sun4v_intr_settarget
- /* %o0: type
- * %o1: queue paddr
- * %o2: num queue entries
+ /* %o0: cpuid
+ * %o1: pc
+ * %o2: rtba
+ * %o3: arg0
*
* returns %o0: status
*/
- .globl sun4v_cpu_qconf
-sun4v_cpu_qconf:
- mov HV_FAST_CPU_QCONF, %o5
+ .globl sun4v_cpu_start
+ .type sun4v_cpu_start,#function
+sun4v_cpu_start:
+ mov HV_FAST_CPU_START, %o5
ta HV_FAST_TRAP
retl
nop
+ .size sun4v_cpu_start, .-sun4v_cpu_start
- /* returns %o0: status
+ /* %o0: cpuid
+ *
+ * returns %o0: status
*/
+ .globl sun4v_cpu_stop
+ .type sun4v_cpu_stop,#function
+sun4v_cpu_stop:
+ mov HV_FAST_CPU_STOP, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_cpu_stop, .-sun4v_cpu_stop
+
+ /* returns %o0: status */
.globl sun4v_cpu_yield
+ .type sun4v_cpu_yield, #function
sun4v_cpu_yield:
mov HV_FAST_CPU_YIELD, %o5
ta HV_FAST_TRAP
retl
nop
+ .size sun4v_cpu_yield, .-sun4v_cpu_yield
+
+ /* %o0: type
+ * %o1: queue paddr
+ * %o2: num queue entries
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_cpu_qconf
+ .type sun4v_cpu_qconf,#function
+sun4v_cpu_qconf:
+ mov HV_FAST_CPU_QCONF, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_cpu_qconf, .-sun4v_cpu_qconf
/* %o0: num cpus in cpu list
* %o1: cpu list paddr
@@ -1823,11 +1869,13 @@ sun4v_cpu_yield:
* returns %o0: status
*/
.globl sun4v_cpu_mondo_send
+ .type sun4v_cpu_mondo_send,#function
sun4v_cpu_mondo_send:
mov HV_FAST_CPU_MONDO_SEND, %o5
ta HV_FAST_TRAP
retl
nop
+ .size sun4v_cpu_mondo_send, .-sun4v_cpu_mondo_send
/* %o0: CPU ID
*
@@ -1835,6 +1883,7 @@ sun4v_cpu_mondo_send:
* %o0: cpu state as HV_CPU_STATE_*
*/
.globl sun4v_cpu_state
+ .type sun4v_cpu_state,#function
sun4v_cpu_state:
mov HV_FAST_CPU_STATE, %o5
ta HV_FAST_TRAP
@@ -1843,6 +1892,37 @@ sun4v_cpu_state:
mov %o1, %o0
1: retl
nop
+ .size sun4v_cpu_state, .-sun4v_cpu_state
+
+ /* %o0: virtual address
+ * %o1: must be zero
+ * %o2: TTE
+ * %o3: HV_MMU_* flags
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_mmu_map_perm_addr
+ .type sun4v_mmu_map_perm_addr,#function
+sun4v_mmu_map_perm_addr:
+ mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_mmu_map_perm_addr, .-sun4v_mmu_map_perm_addr
+
+ /* %o0: number of TSB descriptions
+ * %o1: TSB descriptions real address
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_mmu_tsb_ctx0
+ .type sun4v_mmu_tsb_ctx0,#function
+sun4v_mmu_tsb_ctx0:
+ mov HV_FAST_MMU_TSB_CTX0, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_mmu_tsb_ctx0, .-sun4v_mmu_tsb_ctx0
/* %o0: API group number
* %o1: pointer to unsigned long major number storage
@@ -1851,6 +1931,7 @@ sun4v_cpu_state:
* returns %o0: status
*/
.globl sun4v_get_version
+ .type sun4v_get_version,#function
sun4v_get_version:
mov HV_CORE_GET_VER, %o5
mov %o1, %o3
@@ -1859,6 +1940,7 @@ sun4v_get_version:
stx %o1, [%o3]
retl
stx %o2, [%o4]
+ .size sun4v_get_version, .-sun4v_get_version
/* %o0: API group number
* %o1: desired major number
@@ -1868,18 +1950,49 @@ sun4v_get_version:
* returns %o0: status
*/
.globl sun4v_set_version
+ .type sun4v_set_version,#function
sun4v_set_version:
mov HV_CORE_SET_VER, %o5
mov %o3, %o4
ta HV_CORE_TRAP
retl
stx %o1, [%o4]
+ .size sun4v_set_version, .-sun4v_set_version
+
+ /* %o0: pointer to unsigned long time
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_tod_get
+ .type sun4v_tod_get,#function
+sun4v_tod_get:
+ mov %o0, %o4
+ mov HV_FAST_TOD_GET, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o4]
+ retl
+ nop
+ .size sun4v_tod_get, .-sun4v_tod_get
+
+ /* %o0: time
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_tod_set
+ .type sun4v_tod_set,#function
+sun4v_tod_set:
+ mov HV_FAST_TOD_SET, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_tod_set, .-sun4v_tod_set
/* %o0: pointer to unsigned long status
*
* returns %o0: signed character
*/
.globl sun4v_con_getchar
+ .type sun4v_con_getchar,#function
sun4v_con_getchar:
mov %o0, %o4
mov HV_FAST_CONS_GETCHAR, %o5
@@ -1889,17 +2002,20 @@ sun4v_con_getchar:
stx %o0, [%o4]
retl
sra %o1, 0, %o0
+ .size sun4v_con_getchar, .-sun4v_con_getchar
/* %o0: signed long character
*
* returns %o0: status
*/
.globl sun4v_con_putchar
+ .type sun4v_con_putchar,#function
sun4v_con_putchar:
mov HV_FAST_CONS_PUTCHAR, %o5
ta HV_FAST_TRAP
retl
sra %o0, 0, %o0
+ .size sun4v_con_putchar, .-sun4v_con_putchar
/* %o0: buffer real address
* %o1: buffer size
@@ -1908,6 +2024,7 @@ sun4v_con_putchar:
* returns %o0: status
*/
.globl sun4v_con_read
+ .type sun4v_con_read,#function
sun4v_con_read:
mov %o2, %o4
mov HV_FAST_CONS_READ, %o5
@@ -1922,6 +2039,7 @@ sun4v_con_read:
stx %o1, [%o4]
1: retl
nop
+ .size sun4v_con_read, .-sun4v_con_read
/* %o0: buffer real address
* %o1: buffer size
@@ -1930,6 +2048,7 @@ sun4v_con_read:
* returns %o0: status
*/
.globl sun4v_con_write
+ .type sun4v_con_write,#function
sun4v_con_write:
mov %o2, %o4
mov HV_FAST_CONS_WRITE, %o5
@@ -1937,3 +2056,445 @@ sun4v_con_write:
stx %o1, [%o4]
retl
nop
+ .size sun4v_con_write, .-sun4v_con_write
+
+ /* %o0: soft state
+ * %o1: address of description string
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_mach_set_soft_state
+ .type sun4v_mach_set_soft_state,#function
+sun4v_mach_set_soft_state:
+ mov HV_FAST_MACH_SET_SOFT_STATE, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_mach_set_soft_state, .-sun4v_mach_set_soft_state
+
+ /* %o0: exit code
+ *
+ * Does not return.
+ */
+ .globl sun4v_mach_exit
+ .type sun4v_mach_exit,#function
+sun4v_mach_exit:
+ mov HV_FAST_MACH_EXIT, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_mach_exit, .-sun4v_mach_exit
+
+ /* %o0: buffer real address
+ * %o1: buffer length
+ * %o2: pointer to unsigned long real_buf_len
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_mach_desc
+ .type sun4v_mach_desc,#function
+sun4v_mach_desc:
+ mov %o2, %o4
+ mov HV_FAST_MACH_DESC, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o4]
+ retl
+ nop
+ .size sun4v_mach_desc, .-sun4v_mach_desc
+
+ /* %o0: new timeout in milliseconds
+ * %o1: pointer to unsigned long orig_timeout
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_mach_set_watchdog
+ .type sun4v_mach_set_watchdog,#function
+sun4v_mach_set_watchdog:
+ mov %o1, %o4
+ mov HV_FAST_MACH_SET_WATCHDOG, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o4]
+ retl
+ nop
+ .size sun4v_mach_set_watchdog, .-sun4v_mach_set_watchdog
+
+ /* No inputs and does not return. */
+ .globl sun4v_mach_sir
+ .type sun4v_mach_sir,#function
+sun4v_mach_sir:
+ mov %o1, %o4
+ mov HV_FAST_MACH_SIR, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o4]
+ retl
+ nop
+ .size sun4v_mach_sir, .-sun4v_mach_sir
+
+ /* %o0: channel
+ * %o1: ra
+ * %o2: num_entries
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_ldc_tx_qconf
+ .type sun4v_ldc_tx_qconf,#function
+sun4v_ldc_tx_qconf:
+ mov HV_FAST_LDC_TX_QCONF, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_ldc_tx_qconf, .-sun4v_ldc_tx_qconf
+
+ /* %o0: channel
+ * %o1: pointer to unsigned long ra
+ * %o2: pointer to unsigned long num_entries
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_ldc_tx_qinfo
+ .type sun4v_ldc_tx_qinfo,#function
+sun4v_ldc_tx_qinfo:
+ mov %o1, %g1
+ mov %o2, %g2
+ mov HV_FAST_LDC_TX_QINFO, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%g1]
+ stx %o2, [%g2]
+ retl
+ nop
+ .size sun4v_ldc_tx_qinfo, .-sun4v_ldc_tx_qinfo
+
+ /* %o0: channel
+ * %o1: pointer to unsigned long head_off
+ * %o2: pointer to unsigned long tail_off
+ * %o2: pointer to unsigned long chan_state
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_ldc_tx_get_state
+ .type sun4v_ldc_tx_get_state,#function
+sun4v_ldc_tx_get_state:
+ mov %o1, %g1
+ mov %o2, %g2
+ mov %o3, %g3
+ mov HV_FAST_LDC_TX_GET_STATE, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%g1]
+ stx %o2, [%g2]
+ stx %o3, [%g3]
+ retl
+ nop
+ .size sun4v_ldc_tx_get_state, .-sun4v_ldc_tx_get_state
+
+ /* %o0: channel
+ * %o1: tail_off
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_ldc_tx_set_qtail
+ .type sun4v_ldc_tx_set_qtail,#function
+sun4v_ldc_tx_set_qtail:
+ mov HV_FAST_LDC_TX_SET_QTAIL, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_ldc_tx_set_qtail, .-sun4v_ldc_tx_set_qtail
+
+ /* %o0: channel
+ * %o1: ra
+ * %o2: num_entries
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_ldc_rx_qconf
+ .type sun4v_ldc_rx_qconf,#function
+sun4v_ldc_rx_qconf:
+ mov HV_FAST_LDC_RX_QCONF, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_ldc_rx_qconf, .-sun4v_ldc_rx_qconf
+
+ /* %o0: channel
+ * %o1: pointer to unsigned long ra
+ * %o2: pointer to unsigned long num_entries
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_ldc_rx_qinfo
+ .type sun4v_ldc_rx_qinfo,#function
+sun4v_ldc_rx_qinfo:
+ mov %o1, %g1
+ mov %o2, %g2
+ mov HV_FAST_LDC_RX_QINFO, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%g1]
+ stx %o2, [%g2]
+ retl
+ nop
+ .size sun4v_ldc_rx_qinfo, .-sun4v_ldc_rx_qinfo
+
+ /* %o0: channel
+ * %o1: pointer to unsigned long head_off
+ * %o2: pointer to unsigned long tail_off
+ * %o2: pointer to unsigned long chan_state
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_ldc_rx_get_state
+ .type sun4v_ldc_rx_get_state,#function
+sun4v_ldc_rx_get_state:
+ mov %o1, %g1
+ mov %o2, %g2
+ mov %o3, %g3
+ mov HV_FAST_LDC_RX_GET_STATE, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%g1]
+ stx %o2, [%g2]
+ stx %o3, [%g3]
+ retl
+ nop
+ .size sun4v_ldc_rx_get_state, .-sun4v_ldc_rx_get_state
+
+ /* %o0: channel
+ * %o1: head_off
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_ldc_rx_set_qhead
+ .type sun4v_ldc_rx_set_qhead,#function
+sun4v_ldc_rx_set_qhead:
+ mov HV_FAST_LDC_RX_SET_QHEAD, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_ldc_rx_set_qhead, .-sun4v_ldc_rx_set_qhead
+
+ /* %o0: channel
+ * %o1: ra
+ * %o2: num_entries
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_ldc_set_map_table
+ .type sun4v_ldc_set_map_table,#function
+sun4v_ldc_set_map_table:
+ mov HV_FAST_LDC_SET_MAP_TABLE, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_ldc_set_map_table, .-sun4v_ldc_set_map_table
+
+ /* %o0: channel
+ * %o1: pointer to unsigned long ra
+ * %o2: pointer to unsigned long num_entries
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_ldc_get_map_table
+ .type sun4v_ldc_get_map_table,#function
+sun4v_ldc_get_map_table:
+ mov %o1, %g1
+ mov %o2, %g2
+ mov HV_FAST_LDC_GET_MAP_TABLE, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%g1]
+ stx %o2, [%g2]
+ retl
+ nop
+ .size sun4v_ldc_get_map_table, .-sun4v_ldc_get_map_table
+
+ /* %o0: channel
+ * %o1: dir_code
+ * %o2: tgt_raddr
+ * %o3: lcl_raddr
+ * %o4: len
+ * %o5: pointer to unsigned long actual_len
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_ldc_copy
+ .type sun4v_ldc_copy,#function
+sun4v_ldc_copy:
+ mov %o5, %g1
+ mov HV_FAST_LDC_COPY, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%g1]
+ retl
+ nop
+ .size sun4v_ldc_copy, .-sun4v_ldc_copy
+
+ /* %o0: channel
+ * %o1: cookie
+ * %o2: pointer to unsigned long ra
+ * %o3: pointer to unsigned long perm
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_ldc_mapin
+ .type sun4v_ldc_mapin,#function
+sun4v_ldc_mapin:
+ mov %o2, %g1
+ mov %o3, %g2
+ mov HV_FAST_LDC_MAPIN, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%g1]
+ stx %o2, [%g2]
+ retl
+ nop
+ .size sun4v_ldc_mapin, .-sun4v_ldc_mapin
+
+ /* %o0: ra
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_ldc_unmap
+ .type sun4v_ldc_unmap,#function
+sun4v_ldc_unmap:
+ mov HV_FAST_LDC_UNMAP, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_ldc_unmap, .-sun4v_ldc_unmap
+
+ /* %o0: cookie
+ * %o1: mte_cookie
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_ldc_revoke
+ .type sun4v_ldc_revoke,#function
+sun4v_ldc_revoke:
+ mov HV_FAST_LDC_REVOKE, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_ldc_revoke, .-sun4v_ldc_revoke
+
+ /* %o0: device handle
+ * %o1: device INO
+ * %o2: pointer to unsigned long cookie
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_vintr_get_cookie
+ .type sun4v_vintr_get_cookie,#function
+sun4v_vintr_get_cookie:
+ mov %o2, %g1
+ mov HV_FAST_VINTR_GET_COOKIE, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%g1]
+ retl
+ nop
+ .size sun4v_vintr_get_cookie, .-sun4v_vintr_get_cookie
+
+ /* %o0: device handle
+ * %o1: device INO
+ * %o2: cookie
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_vintr_set_cookie
+ .type sun4v_vintr_set_cookie,#function
+sun4v_vintr_set_cookie:
+ mov HV_FAST_VINTR_SET_COOKIE, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_vintr_set_cookie, .-sun4v_vintr_set_cookie
+
+ /* %o0: device handle
+ * %o1: device INO
+ * %o2: pointer to unsigned long valid_state
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_vintr_get_valid
+ .type sun4v_vintr_get_valid,#function
+sun4v_vintr_get_valid:
+ mov %o2, %g1
+ mov HV_FAST_VINTR_GET_VALID, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%g1]
+ retl
+ nop
+ .size sun4v_vintr_get_valid, .-sun4v_vintr_get_valid
+
+ /* %o0: device handle
+ * %o1: device INO
+ * %o2: valid_state
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_vintr_set_valid
+ .type sun4v_vintr_set_valid,#function
+sun4v_vintr_set_valid:
+ mov HV_FAST_VINTR_SET_VALID, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_vintr_set_valid, .-sun4v_vintr_set_valid
+
+ /* %o0: device handle
+ * %o1: device INO
+ * %o2: pointer to unsigned long state
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_vintr_get_state
+ .type sun4v_vintr_get_state,#function
+sun4v_vintr_get_state:
+ mov %o2, %g1
+ mov HV_FAST_VINTR_GET_STATE, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%g1]
+ retl
+ nop
+ .size sun4v_vintr_get_state, .-sun4v_vintr_get_state
+
+ /* %o0: device handle
+ * %o1: device INO
+ * %o2: state
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_vintr_set_state
+ .type sun4v_vintr_set_state,#function
+sun4v_vintr_set_state:
+ mov HV_FAST_VINTR_SET_STATE, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_vintr_set_state, .-sun4v_vintr_set_state
+
+ /* %o0: device handle
+ * %o1: device INO
+ * %o2: pointer to unsigned long cpuid
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_vintr_get_target
+ .type sun4v_vintr_get_target,#function
+sun4v_vintr_get_target:
+ mov %o2, %g1
+ mov HV_FAST_VINTR_GET_TARGET, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%g1]
+ retl
+ nop
+ .size sun4v_vintr_get_target, .-sun4v_vintr_get_target
+
+ /* %o0: device handle
+ * %o1: device INO
+ * %o2: cpuid
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_vintr_set_target
+ .type sun4v_vintr_set_target,#function
+sun4v_vintr_set_target:
+ mov HV_FAST_VINTR_SET_TARGET, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_vintr_set_target, .-sun4v_vintr_set_target
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index baea10a9819..77259526cb1 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -523,7 +523,7 @@ tlb_fixup_done:
#else
mov 0, %o0
#endif
- stb %o0, [%g6 + TI_CPU]
+ sth %o0, [%g6 + TI_CPU]
/* Off we go.... */
call start_kernel
@@ -653,33 +653,54 @@ setup_tba:
restore
sparc64_boot_end:
-#include "ktlb.S"
-#include "tsb.S"
#include "etrap.S"
#include "rtrap.S"
#include "winfixup.S"
#include "entry.S"
#include "sun4v_tlb_miss.S"
#include "sun4v_ivec.S"
+#include "ktlb.S"
+#include "tsb.S"
/*
* The following skip makes sure the trap table in ttable.S is aligned
* on a 32K boundary as required by the v9 specs for TBA register.
*
* We align to a 32K boundary, then we have the 32K kernel TSB,
- * then the 32K aligned trap table.
+ * the 64K kernel 4MB TSB, and then the 32K aligned trap table.
*/
1:
.skip 0x4000 + _start - 1b
+! 0x0000000000408000
+
.globl swapper_tsb
swapper_tsb:
.skip (32 * 1024)
-! 0x0000000000408000
+ .globl swapper_4m_tsb
+swapper_4m_tsb:
+ .skip (64 * 1024)
+
+! 0x0000000000420000
+ /* Some care needs to be exercised if you try to move the
+ * location of the trap table relative to other things. For
+ * one thing there are br* instructions in some of the
+ * trap table entires which branch back to code in ktlb.S
+ * Those instructions can only handle a signed 16-bit
+ * displacement.
+ *
+ * There is a binutils bug (bugzilla #4558) which causes
+ * the relocation overflow checks for such instructions to
+ * not be done correctly. So bintuils will not notice the
+ * error and will instead write junk into the relocation and
+ * you'll have an unbootable kernel.
+ */
#include "ttable.S"
+! 0x0000000000428000
+
#include "systbls.S"
.data
diff --git a/arch/sparc64/kernel/hvapi.c b/arch/sparc64/kernel/hvapi.c
index f03ffc829c7..f34f5d6181e 100644
--- a/arch/sparc64/kernel/hvapi.c
+++ b/arch/sparc64/kernel/hvapi.c
@@ -9,6 +9,7 @@
#include <asm/hypervisor.h>
#include <asm/oplib.h>
+#include <asm/sstate.h>
/* If the hypervisor indicates that the API setting
* calls are unsupported, by returning HV_EBADTRAP or
@@ -107,7 +108,7 @@ int sun4v_hvapi_register(unsigned long group, unsigned long major,
p->minor = actual_minor;
ret = 0;
} else if (hv_ret == HV_EBADTRAP ||
- HV_ENOTSUPPORTED) {
+ hv_ret == HV_ENOTSUPPORTED) {
if (p->flags & FLAG_PRE_API) {
if (major == 1) {
p->major = 1;
@@ -179,6 +180,8 @@ void __init sun4v_hvapi_init(void)
if (sun4v_hvapi_register(group, major, &minor))
goto bad;
+ sun4v_sstate_init();
+
return;
bad:
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 3edc18e1b81..a36f8dd0c02 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -171,8 +171,6 @@ skip:
return 0;
}
-extern unsigned long real_hard_smp_processor_id(void);
-
static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
{
unsigned int tid;
@@ -694,9 +692,20 @@ void init_irqwork_curcpu(void)
trap_block[cpu].irq_worklist = 0;
}
-static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type)
+/* Please be very careful with register_one_mondo() and
+ * sun4v_register_mondo_queues().
+ *
+ * On SMP this gets invoked from the CPU trampoline before
+ * the cpu has fully taken over the trap table from OBP,
+ * and it's kernel stack + %g6 thread register state is
+ * not fully cooked yet.
+ *
+ * Therefore you cannot make any OBP calls, not even prom_printf,
+ * from these two routines.
+ */
+static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
{
- unsigned long num_entries = 128;
+ unsigned long num_entries = (qmask + 1) / 64;
unsigned long status;
status = sun4v_cpu_qconf(type, paddr, num_entries);
@@ -711,44 +720,58 @@ static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
{
struct trap_per_cpu *tb = &trap_block[this_cpu];
- register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO);
- register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO);
- register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR);
- register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR);
+ register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
+ tb->cpu_mondo_qmask);
+ register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
+ tb->dev_mondo_qmask);
+ register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
+ tb->resum_qmask);
+ register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
+ tb->nonresum_qmask);
}
-static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem)
+static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
{
- void *page;
+ unsigned long size = PAGE_ALIGN(qmask + 1);
+ unsigned long order = get_order(size);
+ void *p = NULL;
- if (use_bootmem)
- page = alloc_bootmem_low_pages(PAGE_SIZE);
- else
- page = (void *) get_zeroed_page(GFP_ATOMIC);
+ if (use_bootmem) {
+ p = __alloc_bootmem_low(size, size, 0);
+ } else {
+ struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
+ if (page)
+ p = page_address(page);
+ }
- if (!page) {
+ if (!p) {
prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
prom_halt();
}
- *pa_ptr = __pa(page);
+ *pa_ptr = __pa(p);
}
-static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem)
+static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
{
- void *page;
+ unsigned long size = PAGE_ALIGN(qmask + 1);
+ unsigned long order = get_order(size);
+ void *p = NULL;
- if (use_bootmem)
- page = alloc_bootmem_low_pages(PAGE_SIZE);
- else
- page = (void *) get_zeroed_page(GFP_ATOMIC);
+ if (use_bootmem) {
+ p = __alloc_bootmem_low(size, size, 0);
+ } else {
+ struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
+ if (page)
+ p = page_address(page);
+ }
- if (!page) {
+ if (!p) {
prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
prom_halt();
}
- *pa_ptr = __pa(page);
+ *pa_ptr = __pa(p);
}
static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
@@ -779,12 +802,12 @@ void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int
struct trap_per_cpu *tb = &trap_block[cpu];
if (alloc) {
- alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem);
- alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem);
- alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem);
- alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem);
- alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem);
- alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem);
+ alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask, use_bootmem);
+ alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask, use_bootmem);
+ alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask, use_bootmem);
+ alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask, use_bootmem);
+ alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask, use_bootmem);
+ alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask, use_bootmem);
init_cpu_send_mondo_info(tb, use_bootmem);
}
diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S
index ad46e2024f4..5a8377b5495 100644
--- a/arch/sparc64/kernel/itlb_miss.S
+++ b/arch/sparc64/kernel/itlb_miss.S
@@ -11,12 +11,12 @@
/* ITLB ** ICACHE line 2: TSB compare and TLB load */
bne,pn %xcc, tsb_miss_itlb ! Miss
mov FAULT_CODE_ITLB, %g3
- andcc %g5, _PAGE_EXEC_4U, %g0 ! Executable?
+ sethi %hi(_PAGE_EXEC_4U), %g4
+ andcc %g5, %g4, %g0 ! Executable?
be,pn %xcc, tsb_do_fault
nop ! Delay slot, fill me
stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB
retry ! Trap done
- nop
/* ITLB ** ICACHE line 3: */
nop
diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c
new file mode 100644
index 00000000000..9246c2cf957
--- /dev/null
+++ b/arch/sparc64/kernel/mdesc.c
@@ -0,0 +1,619 @@
+/* mdesc.c: Sun4V machine description handling.
+ *
+ * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bootmem.h>
+#include <linux/log2.h>
+
+#include <asm/hypervisor.h>
+#include <asm/mdesc.h>
+#include <asm/prom.h>
+#include <asm/oplib.h>
+#include <asm/smp.h>
+
+/* Unlike the OBP device tree, the machine description is a full-on
+ * DAG. An arbitrary number of ARCs are possible from one
+ * node to other nodes and thus we can't use the OBP device_node
+ * data structure to represent these nodes inside of the kernel.
+ *
+ * Actually, it isn't even a DAG, because there are back pointers
+ * which create cycles in the graph.
+ *
+ * mdesc_hdr and mdesc_elem describe the layout of the data structure
+ * we get from the Hypervisor.
+ */
+struct mdesc_hdr {
+ u32 version; /* Transport version */
+ u32 node_sz; /* node block size */
+ u32 name_sz; /* name block size */
+ u32 data_sz; /* data block size */
+};
+
+struct mdesc_elem {
+ u8 tag;
+#define MD_LIST_END 0x00
+#define MD_NODE 0x4e
+#define MD_NODE_END 0x45
+#define MD_NOOP 0x20
+#define MD_PROP_ARC 0x61
+#define MD_PROP_VAL 0x76
+#define MD_PROP_STR 0x73
+#define MD_PROP_DATA 0x64
+ u8 name_len;
+ u16 resv;
+ u32 name_offset;
+ union {
+ struct {
+ u32 data_len;
+ u32 data_offset;
+ } data;
+ u64 val;
+ } d;
+};
+
+static struct mdesc_hdr *main_mdesc;
+static struct mdesc_node *allnodes;
+
+static struct mdesc_node *allnodes_tail;
+static unsigned int unique_id;
+
+static struct mdesc_node **mdesc_hash;
+static unsigned int mdesc_hash_size;
+
+static inline unsigned int node_hashfn(u64 node)
+{
+ return ((unsigned int) (node ^ (node >> 8) ^ (node >> 16)))
+ & (mdesc_hash_size - 1);
+}
+
+static inline void hash_node(struct mdesc_node *mp)
+{
+ struct mdesc_node **head = &mdesc_hash[node_hashfn(mp->node)];
+
+ mp->hash_next = *head;
+ *head = mp;
+
+ if (allnodes_tail) {
+ allnodes_tail->allnodes_next = mp;
+ allnodes_tail = mp;
+ } else {
+ allnodes = allnodes_tail = mp;
+ }
+}
+
+static struct mdesc_node *find_node(u64 node)
+{
+ struct mdesc_node *mp = mdesc_hash[node_hashfn(node)];
+
+ while (mp) {
+ if (mp->node == node)
+ return mp;
+
+ mp = mp->hash_next;
+ }
+ return NULL;
+}
+
+struct property *md_find_property(const struct mdesc_node *mp,
+ const char *name,
+ int *lenp)
+{
+ struct property *pp;
+
+ for (pp = mp->properties; pp != 0; pp = pp->next) {
+ if (strcasecmp(pp->name, name) == 0) {
+ if (lenp)
+ *lenp = pp->length;
+ break;
+ }
+ }
+ return pp;
+}
+EXPORT_SYMBOL(md_find_property);
+
+/*
+ * Find a property with a given name for a given node
+ * and return the value.
+ */
+const void *md_get_property(const struct mdesc_node *mp, const char *name,
+ int *lenp)
+{
+ struct property *pp = md_find_property(mp, name, lenp);
+ return pp ? pp->value : NULL;
+}
+EXPORT_SYMBOL(md_get_property);
+
+struct mdesc_node *md_find_node_by_name(struct mdesc_node *from,
+ const char *name)
+{
+ struct mdesc_node *mp;
+
+ mp = from ? from->allnodes_next : allnodes;
+ for (; mp != NULL; mp = mp->allnodes_next) {
+ if (strcmp(mp->name, name) == 0)
+ break;
+ }
+ return mp;
+}
+EXPORT_SYMBOL(md_find_node_by_name);
+
+static unsigned int mdesc_early_allocated;
+
+static void * __init mdesc_early_alloc(unsigned long size)
+{
+ void *ret;
+
+ ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
+ if (ret == NULL) {
+ prom_printf("MDESC: alloc of %lu bytes failed.\n", size);
+ prom_halt();
+ }
+
+ memset(ret, 0, size);
+
+ mdesc_early_allocated += size;
+
+ return ret;
+}
+
+static unsigned int __init count_arcs(struct mdesc_elem *ep)
+{
+ unsigned int ret = 0;
+
+ ep++;
+ while (ep->tag != MD_NODE_END) {
+ if (ep->tag == MD_PROP_ARC)
+ ret++;
+ ep++;
+ }
+ return ret;
+}
+
+static void __init mdesc_node_alloc(u64 node, struct mdesc_elem *ep, const char *names)
+{
+ unsigned int num_arcs = count_arcs(ep);
+ struct mdesc_node *mp;
+
+ mp = mdesc_early_alloc(sizeof(*mp) +
+ (num_arcs * sizeof(struct mdesc_arc)));
+ mp->name = names + ep->name_offset;
+ mp->node = node;
+ mp->unique_id = unique_id++;
+ mp->num_arcs = num_arcs;
+
+ hash_node(mp);
+}
+
+static inline struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
+{
+ return (struct mdesc_elem *) (mdesc + 1);
+}
+
+static inline void *name_block(struct mdesc_hdr *mdesc)
+{
+ return ((void *) node_block(mdesc)) + mdesc->node_sz;
+}
+
+static inline void *data_block(struct mdesc_hdr *mdesc)
+{
+ return ((void *) name_block(mdesc)) + mdesc->name_sz;
+}
+
+/* In order to avoid recursion (the graph can be very deep) we use a
+ * two pass algorithm. First we allocate all the nodes and hash them.
+ * Then we iterate over each node, filling in the arcs and properties.
+ */
+static void __init build_all_nodes(struct mdesc_hdr *mdesc)
+{
+ struct mdesc_elem *start, *ep;
+ struct mdesc_node *mp;
+ const char *names;
+ void *data;
+ u64 last_node;
+
+ start = ep = node_block(mdesc);
+ last_node = mdesc->node_sz / 16;
+
+ names = name_block(mdesc);
+
+ while (1) {
+ u64 node = ep - start;
+
+ if (ep->tag == MD_LIST_END)
+ break;
+
+ if (ep->tag != MD_NODE) {
+ prom_printf("MDESC: Inconsistent element list.\n");
+ prom_halt();
+ }
+
+ mdesc_node_alloc(node, ep, names);
+
+ if (ep->d.val >= last_node) {
+ printk("MDESC: Warning, early break out of node scan.\n");
+ printk("MDESC: Next node [%lu] last_node [%lu].\n",
+ node, last_node);
+ break;
+ }
+
+ ep = start + ep->d.val;
+ }
+
+ data = data_block(mdesc);
+ for (mp = allnodes; mp; mp = mp->allnodes_next) {
+ struct mdesc_elem *ep = start + mp->node;
+ struct property **link = &mp->properties;
+ unsigned int this_arc = 0;
+
+ ep++;
+ while (ep->tag != MD_NODE_END) {
+ switch (ep->tag) {
+ case MD_PROP_ARC: {
+ struct mdesc_node *target;
+
+ if (this_arc >= mp->num_arcs) {
+ prom_printf("MDESC: ARC overrun [%u:%u]\n",
+ this_arc, mp->num_arcs);
+ prom_halt();
+ }
+ target = find_node(ep->d.val);
+ if (!target) {
+ printk("MDESC: Warning, arc points to "
+ "missing node, ignoring.\n");
+ break;
+ }
+ mp->arcs[this_arc].name =
+ (names + ep->name_offset);
+ mp->arcs[this_arc].arc = target;
+ this_arc++;
+ break;
+ }
+
+ case MD_PROP_VAL:
+ case MD_PROP_STR:
+ case MD_PROP_DATA: {
+ struct property *p = mdesc_early_alloc(sizeof(*p));
+
+ p->unique_id = unique_id++;
+ p->name = (char *) names + ep->name_offset;
+ if (ep->tag == MD_PROP_VAL) {
+ p->value = &ep->d.val;
+ p->length = 8;
+ } else {
+ p->value = data + ep->d.data.data_offset;
+ p->length = ep->d.data.data_len;
+ }
+ *link = p;
+ link = &p->next;
+ break;
+ }
+
+ case MD_NOOP:
+ break;
+
+ default:
+ printk("MDESC: Warning, ignoring unknown tag type %02x\n",
+ ep->tag);
+ }
+ ep++;
+ }
+ }
+}
+
+static unsigned int __init count_nodes(struct mdesc_hdr *mdesc)
+{
+ struct mdesc_elem *ep = node_block(mdesc);
+ struct mdesc_elem *end;
+ unsigned int cnt = 0;
+
+ end = ((void *)ep) + mdesc->node_sz;
+ while (ep < end) {
+ if (ep->tag == MD_NODE)
+ cnt++;
+ ep++;
+ }
+ return cnt;
+}
+
+static void __init report_platform_properties(void)
+{
+ struct mdesc_node *pn = md_find_node_by_name(NULL, "platform");
+ const char *s;
+ const u64 *v;
+
+ if (!pn) {
+ prom_printf("No platform node in machine-description.\n");
+ prom_halt();
+ }
+
+ s = md_get_property(pn, "banner-name", NULL);
+ printk("PLATFORM: banner-name [%s]\n", s);
+ s = md_get_property(pn, "name", NULL);
+ printk("PLATFORM: name [%s]\n", s);
+
+ v = md_get_property(pn, "hostid", NULL);
+ if (v)
+ printk("PLATFORM: hostid [%08lx]\n", *v);
+ v = md_get_property(pn, "serial#", NULL);
+ if (v)
+ printk("PLATFORM: serial# [%08lx]\n", *v);
+ v = md_get_property(pn, "stick-frequency", NULL);
+ printk("PLATFORM: stick-frequency [%08lx]\n", *v);
+ v = md_get_property(pn, "mac-address", NULL);
+ if (v)
+ printk("PLATFORM: mac-address [%lx]\n", *v);
+ v = md_get_property(pn, "watchdog-resolution", NULL);
+ if (v)
+ printk("PLATFORM: watchdog-resolution [%lu ms]\n", *v);
+ v = md_get_property(pn, "watchdog-max-timeout", NULL);
+ if (v)
+ printk("PLATFORM: watchdog-max-timeout [%lu ms]\n", *v);
+ v = md_get_property(pn, "max-cpus", NULL);
+ if (v)
+ printk("PLATFORM: max-cpus [%lu]\n", *v);
+}
+
+static int inline find_in_proplist(const char *list, const char *match, int len)
+{
+ while (len > 0) {
+ int l;
+
+ if (!strcmp(list, match))
+ return 1;
+ l = strlen(list) + 1;
+ list += l;
+ len -= l;
+ }
+ return 0;
+}
+
+static void __init fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_node *mp)
+{
+ const u64 *level = md_get_property(mp, "level", NULL);
+ const u64 *size = md_get_property(mp, "size", NULL);
+ const u64 *line_size = md_get_property(mp, "line-size", NULL);
+ const char *type;
+ int type_len;
+
+ type = md_get_property(mp, "type", &type_len);
+
+ switch (*level) {
+ case 1:
+ if (find_in_proplist(type, "instn", type_len)) {
+ c->icache_size = *size;
+ c->icache_line_size = *line_size;
+ } else if (find_in_proplist(type, "data", type_len)) {
+ c->dcache_size = *size;
+ c->dcache_line_size = *line_size;
+ }
+ break;
+
+ case 2:
+ c->ecache_size = *size;
+ c->ecache_line_size = *line_size;
+ break;
+
+ default:
+ break;
+ }
+
+ if (*level == 1) {
+ unsigned int i;
+
+ for (i = 0; i < mp->num_arcs; i++) {
+ struct mdesc_node *t = mp->arcs[i].arc;
+
+ if (strcmp(mp->arcs[i].name, "fwd"))
+ continue;
+
+ if (!strcmp(t->name, "cache"))
+ fill_in_one_cache(c, t);
+ }
+ }
+}
+
+static void __init mark_core_ids(struct mdesc_node *mp, int core_id)
+{
+ unsigned int i;
+
+ for (i = 0; i < mp->num_arcs; i++) {
+ struct mdesc_node *t = mp->arcs[i].arc;
+ const u64 *id;
+
+ if (strcmp(mp->arcs[i].name, "back"))
+ continue;
+
+ if (!strcmp(t->name, "cpu")) {
+ id = md_get_property(t, "id", NULL);
+ if (*id < NR_CPUS)
+ cpu_data(*id).core_id = core_id;
+ } else {
+ unsigned int j;
+
+ for (j = 0; j < t->num_arcs; j++) {
+ struct mdesc_node *n = t->arcs[j].arc;
+
+ if (strcmp(t->arcs[j].name, "back"))
+ continue;
+
+ if (strcmp(n->name, "cpu"))
+ continue;
+
+ id = md_get_property(n, "id", NULL);
+ if (*id < NR_CPUS)
+ cpu_data(*id).core_id = core_id;
+ }
+ }
+ }
+}
+
+static void __init set_core_ids(void)
+{
+ struct mdesc_node *mp;
+ int idx;
+
+ idx = 1;
+ md_for_each_node_by_name(mp, "cache") {
+ const u64 *level = md_get_property(mp, "level", NULL);
+ const char *type;
+ int len;
+
+ if (*level != 1)
+ continue;
+
+ type = md_get_property(mp, "type", &len);
+ if (!find_in_proplist(type, "instn", len))
+ continue;
+
+ mark_core_ids(mp, idx);
+
+ idx++;
+ }
+}
+
+static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def)
+{
+ u64 val;
+
+ if (!p)
+ goto use_default;
+ val = *p;
+
+ if (!val || val >= 64)
+ goto use_default;
+
+ *mask = ((1U << val) * 64U) - 1U;
+ return;
+
+use_default:
+ *mask = ((1U << def) * 64U) - 1U;
+}
+
+static void __init get_mondo_data(struct mdesc_node *mp, struct trap_per_cpu *tb)
+{
+ const u64 *val;
+
+ val = md_get_property(mp, "q-cpu-mondo-#bits", NULL);
+ get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7);
+
+ val = md_get_property(mp, "q-dev-mondo-#bits", NULL);
+ get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7);
+
+ val = md_get_property(mp, "q-resumable-#bits", NULL);
+ get_one_mondo_bits(val, &tb->resum_qmask, 6);
+
+ val = md_get_property(mp, "q-nonresumable-#bits", NULL);
+ get_one_mondo_bits(val, &tb->nonresum_qmask, 2);
+}
+
+static void __init mdesc_fill_in_cpu_data(void)
+{
+ struct mdesc_node *mp;
+
+ ncpus_probed = 0;
+ md_for_each_node_by_name(mp, "cpu") {
+ const u64 *id = md_get_property(mp, "id", NULL);
+ const u64 *cfreq = md_get_property(mp, "clock-frequency", NULL);
+ struct trap_per_cpu *tb;
+ cpuinfo_sparc *c;
+ unsigned int i;
+ int cpuid;
+
+ ncpus_probed++;
+
+ cpuid = *id;
+
+#ifdef CONFIG_SMP
+ if (cpuid >= NR_CPUS)
+ continue;
+#else
+ /* On uniprocessor we only want the values for the
+ * real physical cpu the kernel booted onto, however
+ * cpu_data() only has one entry at index 0.
+ */
+ if (cpuid != real_hard_smp_processor_id())
+ continue;
+ cpuid = 0;
+#endif
+
+ c = &cpu_data(cpuid);
+ c->clock_tick = *cfreq;
+
+ tb = &trap_block[cpuid];
+ get_mondo_data(mp, tb);
+
+ for (i = 0; i < mp->num_arcs; i++) {
+ struct mdesc_node *t = mp->arcs[i].arc;
+ unsigned int j;
+
+ if (strcmp(mp->arcs[i].name, "fwd"))
+ continue;
+
+ if (!strcmp(t->name, "cache")) {
+ fill_in_one_cache(c, t);
+ continue;
+ }
+
+ for (j = 0; j < t->num_arcs; j++) {
+ struct mdesc_node *n;
+
+ n = t->arcs[j].arc;
+ if (strcmp(t->arcs[j].name, "fwd"))
+ continue;
+
+ if (!strcmp(n->name, "cache"))
+ fill_in_one_cache(c, n);
+ }
+ }
+
+#ifdef CONFIG_SMP
+ cpu_set(cpuid, cpu_present_map);
+ cpu_set(cpuid, phys_cpu_present_map);
+#endif
+
+ c->core_id = 0;
+ }
+
+ set_core_ids();
+
+ smp_fill_in_sib_core_maps();
+}
+
+void __init sun4v_mdesc_init(void)
+{
+ unsigned long len, real_len, status;
+
+ (void) sun4v_mach_desc(0UL, 0UL, &len);
+
+ printk("MDESC: Size is %lu bytes.\n", len);
+
+ main_mdesc = mdesc_early_alloc(len);
+
+ status = sun4v_mach_desc(__pa(main_mdesc), len, &real_len);
+ if (status != HV_EOK || real_len > len) {
+ prom_printf("sun4v_mach_desc fails, err(%lu), "
+ "len(%lu), real_len(%lu)\n",
+ status, len, real_len);
+ prom_halt();
+ }
+
+ len = count_nodes(main_mdesc);
+ printk("MDESC: %lu nodes.\n", len);
+
+ len = roundup_pow_of_two(len);
+
+ mdesc_hash = mdesc_early_alloc(len * sizeof(struct mdesc_node *));
+ mdesc_hash_size = len;
+
+ printk("MDESC: Hash size %lu entries.\n", len);
+
+ build_all_nodes(main_mdesc);
+
+ printk("MDESC: Built graph with %u bytes of memory.\n",
+ mdesc_early_allocated);
+
+ report_platform_properties();
+ mdesc_fill_in_cpu_data();
+}
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index d4c077dc5e8..38a32bc95d2 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -306,6 +306,20 @@ static void __init pci_controller_probe(void)
pci_controller_scan(pci_controller_init);
}
+static int ofpci_verbose;
+
+static int __init ofpci_debug(char *str)
+{
+ int val = 0;
+
+ get_option(&str, &val);
+ if (val)
+ ofpci_verbose = 1;
+ return 1;
+}
+
+__setup("ofpci_debug=", ofpci_debug);
+
static unsigned long pci_parse_of_flags(u32 addr0)
{
unsigned long flags = 0;
@@ -337,7 +351,9 @@ static void pci_parse_of_addrs(struct of_device *op,
addrs = of_get_property(node, "assigned-addresses", &proplen);
if (!addrs)
return;
- printk(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
+ if (ofpci_verbose)
+ printk(" parse addresses (%d bytes) @ %p\n",
+ proplen, addrs);
op_res = &op->resource[0];
for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
struct resource *res;
@@ -348,8 +364,9 @@ static void pci_parse_of_addrs(struct of_device *op,
if (!flags)
continue;
i = addrs[0] & 0xff;
- printk(" start: %lx, end: %lx, i: %x\n",
- op_res->start, op_res->end, i);
+ if (ofpci_verbose)
+ printk(" start: %lx, end: %lx, i: %x\n",
+ op_res->start, op_res->end, i);
if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
@@ -393,8 +410,9 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
if (type == NULL)
type = "";
- printk(" create device, devfn: %x, type: %s hostcontroller(%d)\n",
- devfn, type, host_controller);
+ if (ofpci_verbose)
+ printk(" create device, devfn: %x, type: %s\n",
+ devfn, type);
dev->bus = bus;
dev->sysdata = node;
@@ -434,8 +452,9 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
}
- printk(" class: 0x%x device name: %s\n",
- dev->class, pci_name(dev));
+ if (ofpci_verbose)
+ printk(" class: 0x%x device name: %s\n",
+ dev->class, pci_name(dev));
/* I have seen IDE devices which will not respond to
* the bmdma simplex check reads if bus mastering is
@@ -469,7 +488,8 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
}
pci_parse_of_addrs(sd->op, node, dev);
- printk(" adding to system ...\n");
+ if (ofpci_verbose)
+ printk(" adding to system ...\n");
pci_device_add(dev, bus);
@@ -547,7 +567,8 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
unsigned int flags;
u64 size;
- printk("of_scan_pci_bridge(%s)\n", node->full_name);
+ if (ofpci_verbose)
+ printk("of_scan_pci_bridge(%s)\n", node->full_name);
/* parse bus-range property */
busrange = of_get_property(node, "bus-range", &len);
@@ -632,7 +653,8 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
simba_cont:
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
bus->number);
- printk(" bus name: %s\n", bus->name);
+ if (ofpci_verbose)
+ printk(" bus name: %s\n", bus->name);
pci_of_scan_bus(pbm, node, bus);
}
@@ -646,12 +668,14 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
int reglen, devfn;
struct pci_dev *dev;
- printk("PCI: scan_bus[%s] bus no %d\n",
- node->full_name, bus->number);
+ if (ofpci_verbose)
+ printk("PCI: scan_bus[%s] bus no %d\n",
+ node->full_name, bus->number);
child = NULL;
while ((child = of_get_next_child(node, child)) != NULL) {
- printk(" * %s\n", child->full_name);
+ if (ofpci_verbose)
+ printk(" * %s\n", child->full_name);
reg = of_get_property(child, "reg", &reglen);
if (reg == NULL || reglen < 20)
continue;
@@ -661,7 +685,9 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
dev = of_create_pci_dev(pbm, child, bus, devfn, 0);
if (!dev)
continue;
- printk("PCI: dev header type: %x\n", dev->hdr_type);
+ if (ofpci_verbose)
+ printk("PCI: dev header type: %x\n",
+ dev->hdr_type);
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index e2377796de8..323d6c27851 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -762,9 +762,10 @@ void sabre_init(struct device_node *dp, char *model_name)
/* Of course, Sun has to encode things a thousand
* different ways, inconsistently.
*/
- cpu_find_by_instance(0, &dp, NULL);
- if (!strcmp(dp->name, "SUNW,UltraSPARC-IIe"))
- hummingbird_p = 1;
+ for_each_node_by_type(dp, "cpu") {
+ if (!strcmp(dp->name, "SUNW,UltraSPARC-IIe"))
+ hummingbird_p = 1;
+ }
}
}
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 044e8ec4c0f..6b3fe2c1d65 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -12,6 +12,7 @@
#include <linux/percpu.h>
#include <linux/irq.h>
#include <linux/msi.h>
+#include <linux/log2.h>
#include <asm/iommu.h>
#include <asm/irq.h>
@@ -26,6 +27,9 @@
#include "pci_sun4v.h"
+static unsigned long vpci_major = 1;
+static unsigned long vpci_minor = 1;
+
#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
struct iommu_batch {
@@ -638,9 +642,8 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{
struct iommu *iommu = pbm->iommu;
struct property *prop;
- unsigned long num_tsb_entries, sz;
+ unsigned long num_tsb_entries, sz, tsbsize;
u32 vdma[2], dma_mask, dma_offset;
- int tsbsize;
prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
if (prop) {
@@ -654,31 +657,15 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
vdma[1] = 0x80000000;
}
- dma_mask = vdma[0];
- switch (vdma[1]) {
- case 0x20000000:
- dma_mask |= 0x1fffffff;
- tsbsize = 64;
- break;
-
- case 0x40000000:
- dma_mask |= 0x3fffffff;
- tsbsize = 128;
- break;
-
- case 0x80000000:
- dma_mask |= 0x7fffffff;
- tsbsize = 256;
- break;
-
- default:
- prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
- prom_halt();
+ if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
+ prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
+ vdma[0], vdma[1]);
+ prom_halt();
};
- tsbsize *= (8 * 1024);
-
- num_tsb_entries = tsbsize / sizeof(iopte_t);
+ dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
+ num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
+ tsbsize = num_tsb_entries * sizeof(iopte_t);
dma_offset = vdma[0];
@@ -689,7 +676,7 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
iommu->dma_addr_mask = dma_mask;
/* Allocate and initialize the free area map. */
- sz = num_tsb_entries / 8;
+ sz = (num_tsb_entries + 7) / 8;
sz = (sz + 7UL) & ~7UL;
iommu->arena.map = kzalloc(sz, GFP_KERNEL);
if (!iommu->arena.map) {
@@ -1178,6 +1165,7 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node
void sun4v_pci_init(struct device_node *dp, char *model_name)
{
+ static int hvapi_negotiated = 0;
struct pci_controller_info *p;
struct pci_pbm_info *pbm;
struct iommu *iommu;
@@ -1186,6 +1174,20 @@ void sun4v_pci_init(struct device_node *dp, char *model_name)
u32 devhandle;
int i;
+ if (!hvapi_negotiated++) {
+ int err = sun4v_hvapi_register(HV_GRP_PCI,
+ vpci_major,
+ &vpci_minor);
+
+ if (err) {
+ prom_printf("SUN4V_PCI: Could not register hvapi, "
+ "err=%d\n", err);
+ prom_halt();
+ }
+ printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
+ vpci_major, vpci_minor);
+ }
+
prop = of_find_property(dp, "reg", NULL);
regs = prop->value;
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
index 699b24b890d..5d6adea3967 100644
--- a/arch/sparc64/kernel/power.c
+++ b/arch/sparc64/kernel/power.c
@@ -19,6 +19,7 @@
#include <asm/prom.h>
#include <asm/of_device.h>
#include <asm/io.h>
+#include <asm/sstate.h>
#include <linux/unistd.h>
@@ -53,6 +54,7 @@ static void (*poweroff_method)(void) = machine_alt_power_off;
void machine_power_off(void)
{
+ sstate_poweroff();
if (!serial_console || scons_pwroff) {
#ifdef CONFIG_PCI
if (power_reg) {
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 952762bfb4c..f5f97e2c669 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -45,6 +45,7 @@
#include <asm/mmu_context.h>
#include <asm/unistd.h>
#include <asm/hypervisor.h>
+#include <asm/sstate.h>
/* #define VERBOSE_SHOWREGS */
@@ -106,6 +107,7 @@ extern void (*prom_keyboard)(void);
void machine_halt(void)
{
+ sstate_halt();
if (!serial_console && prom_palette)
prom_palette (1);
if (prom_keyboard)
@@ -116,6 +118,7 @@ void machine_halt(void)
void machine_alt_power_off(void)
{
+ sstate_poweroff();
if (!serial_console && prom_palette)
prom_palette(1);
if (prom_keyboard)
@@ -128,6 +131,7 @@ void machine_restart(char * cmd)
{
char *p;
+ sstate_reboot();
p = strchr (reboot_command, '\n');
if (p) *p = 0;
if (!serial_console && prom_palette)
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index 02830e4671f..dad4b3ba705 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -28,6 +28,7 @@
#include <asm/irq.h>
#include <asm/asi.h>
#include <asm/upa.h>
+#include <asm/smp.h>
static struct device_node *allnodes;
@@ -1665,6 +1666,150 @@ static struct device_node * __init build_tree(struct device_node *parent, phandl
return ret;
}
+static const char *get_mid_prop(void)
+{
+ return (tlb_type == spitfire ? "upa-portid" : "portid");
+}
+
+struct device_node *of_find_node_by_cpuid(int cpuid)
+{
+ struct device_node *dp;
+ const char *mid_prop = get_mid_prop();
+
+ for_each_node_by_type(dp, "cpu") {
+ int id = of_getintprop_default(dp, mid_prop, -1);
+ const char *this_mid_prop = mid_prop;
+
+ if (id < 0) {
+ this_mid_prop = "cpuid";
+ id = of_getintprop_default(dp, this_mid_prop, -1);
+ }
+
+ if (id < 0) {
+ prom_printf("OF: Serious problem, cpu lacks "
+ "%s property", this_mid_prop);
+ prom_halt();
+ }
+ if (cpuid == id)
+ return dp;
+ }
+ return NULL;
+}
+
+static void __init of_fill_in_cpu_data(void)
+{
+ struct device_node *dp;
+ const char *mid_prop = get_mid_prop();
+
+ ncpus_probed = 0;
+ for_each_node_by_type(dp, "cpu") {
+ int cpuid = of_getintprop_default(dp, mid_prop, -1);
+ const char *this_mid_prop = mid_prop;
+ struct device_node *portid_parent;
+ int portid = -1;
+
+ portid_parent = NULL;
+ if (cpuid < 0) {
+ this_mid_prop = "cpuid";
+ cpuid = of_getintprop_default(dp, this_mid_prop, -1);
+ if (cpuid >= 0) {
+ int limit = 2;
+
+ portid_parent = dp;
+ while (limit--) {
+ portid_parent = portid_parent->parent;
+ if (!portid_parent)
+ break;
+ portid = of_getintprop_default(portid_parent,
+ "portid", -1);
+ if (portid >= 0)
+ break;
+ }
+ }
+ }
+
+ if (cpuid < 0) {
+ prom_printf("OF: Serious problem, cpu lacks "
+ "%s property", this_mid_prop);
+ prom_halt();
+ }
+
+ ncpus_probed++;
+
+#ifdef CONFIG_SMP
+ if (cpuid >= NR_CPUS)
+ continue;
+#else
+ /* On uniprocessor we only want the values for the
+ * real physical cpu the kernel booted onto, however
+ * cpu_data() only has one entry at index 0.
+ */
+ if (cpuid != real_hard_smp_processor_id())
+ continue;
+ cpuid = 0;
+#endif
+
+ cpu_data(cpuid).clock_tick =
+ of_getintprop_default(dp, "clock-frequency", 0);
+
+ if (portid_parent) {
+ cpu_data(cpuid).dcache_size =
+ of_getintprop_default(dp, "l1-dcache-size",
+ 16 * 1024);
+ cpu_data(cpuid).dcache_line_size =
+ of_getintprop_default(dp, "l1-dcache-line-size",
+ 32);
+ cpu_data(cpuid).icache_size =
+ of_getintprop_default(dp, "l1-icache-size",
+ 8 * 1024);
+ cpu_data(cpuid).icache_line_size =
+ of_getintprop_default(dp, "l1-icache-line-size",
+ 32);
+ cpu_data(cpuid).ecache_size =
+ of_getintprop_default(dp, "l2-cache-size", 0);
+ cpu_data(cpuid).ecache_line_size =
+ of_getintprop_default(dp, "l2-cache-line-size", 0);
+ if (!cpu_data(cpuid).ecache_size ||
+ !cpu_data(cpuid).ecache_line_size) {
+ cpu_data(cpuid).ecache_size =
+ of_getintprop_default(portid_parent,
+ "l2-cache-size",
+ (4 * 1024 * 1024));
+ cpu_data(cpuid).ecache_line_size =
+ of_getintprop_default(portid_parent,
+ "l2-cache-line-size", 64);
+ }
+
+ cpu_data(cpuid).core_id = portid + 1;
+ } else {
+ cpu_data(cpuid).dcache_size =
+ of_getintprop_default(dp, "dcache-size", 16 * 1024);
+ cpu_data(cpuid).dcache_line_size =
+ of_getintprop_default(dp, "dcache-line-size", 32);
+
+ cpu_data(cpuid).icache_size =
+ of_getintprop_default(dp, "icache-size", 16 * 1024);
+ cpu_data(cpuid).icache_line_size =
+ of_getintprop_default(dp, "icache-line-size", 32);
+
+ cpu_data(cpuid).ecache_size =
+ of_getintprop_default(dp, "ecache-size",
+ (4 * 1024 * 1024));
+ cpu_data(cpuid).ecache_line_size =
+ of_getintprop_default(dp, "ecache-line-size", 64);
+
+ cpu_data(cpuid).core_id = 0;
+ }
+
+#ifdef CONFIG_SMP
+ cpu_set(cpuid, cpu_present_map);
+ cpu_set(cpuid, phys_cpu_present_map);
+#endif
+ }
+
+ smp_fill_in_sib_core_maps();
+}
+
void __init prom_build_devicetree(void)
{
struct device_node **nextp;
@@ -1679,4 +1824,7 @@ void __init prom_build_devicetree(void)
&nextp);
printk("PROM: Built device tree with %u bytes of memory.\n",
prom_early_allocated);
+
+ if (tlb_type != hypervisor)
+ of_fill_in_cpu_data();
}
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index dea9c3c9ec5..de9b4c13f1c 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -46,11 +46,17 @@
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/mmu.h>
+#include <asm/ns87303.h>
#ifdef CONFIG_IP_PNP
#include <net/ipconfig.h>
#endif
+/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
+ * operations in asm/ns87303.h
+ */
+DEFINE_SPINLOCK(ns87303_lock);
+
struct screen_info screen_info = {
0, 0, /* orig-x, orig-y */
0, /* unused */
@@ -370,8 +376,6 @@ void __init setup_arch(char **cmdline_p)
init_cur_cpu_trap(current_thread_info());
paging_init();
-
- smp_setup_cpu_possible_map();
}
static int __init set_preferred_console(void)
@@ -424,7 +428,7 @@ extern void mmu_info(struct seq_file *);
unsigned int dcache_parity_tl1_occurred;
unsigned int icache_parity_tl1_occurred;
-static int ncpus_probed;
+int ncpus_probed;
static int show_cpuinfo(struct seq_file *m, void *__unused)
{
@@ -516,14 +520,6 @@ static int __init topology_init(void)
err = -ENOMEM;
- /* Count the number of physically present processors in
- * the machine, even on uniprocessor, so that /proc/cpuinfo
- * output is consistent with 2.4.x
- */
- ncpus_probed = 0;
- while (!cpu_find_by_instance(ncpus_probed, NULL, NULL))
- ncpus_probed++;
-
for_each_possible_cpu(i) {
struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p) {
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 24fdf1d0adc..c550bba3490 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -40,6 +40,7 @@
#include <asm/tlb.h>
#include <asm/sections.h>
#include <asm/prom.h>
+#include <asm/mdesc.h>
extern void calibrate_delay(void);
@@ -75,53 +76,6 @@ void smp_bogo(struct seq_file *m)
i, cpu_data(i).clock_tick);
}
-void __init smp_store_cpu_info(int id)
-{
- struct device_node *dp;
- int def;
-
- cpu_data(id).udelay_val = loops_per_jiffy;
-
- cpu_find_by_mid(id, &dp);
- cpu_data(id).clock_tick =
- of_getintprop_default(dp, "clock-frequency", 0);
-
- def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024));
- cpu_data(id).dcache_size =
- of_getintprop_default(dp, "dcache-size", def);
-
- def = 32;
- cpu_data(id).dcache_line_size =
- of_getintprop_default(dp, "dcache-line-size", def);
-
- def = 16 * 1024;
- cpu_data(id).icache_size =
- of_getintprop_default(dp, "icache-size", def);
-
- def = 32;
- cpu_data(id).icache_line_size =
- of_getintprop_default(dp, "icache-line-size", def);
-
- def = ((tlb_type == hypervisor) ?
- (3 * 1024 * 1024) :
- (4 * 1024 * 1024));
- cpu_data(id).ecache_size =
- of_getintprop_default(dp, "ecache-size", def);
-
- def = 64;
- cpu_data(id).ecache_line_size =
- of_getintprop_default(dp, "ecache-line-size", def);
-
- printk("CPU[%d]: Caches "
- "D[sz(%d):line_sz(%d)] "
- "I[sz(%d):line_sz(%d)] "
- "E[sz(%d):line_sz(%d)]\n",
- id,
- cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
- cpu_data(id).icache_size, cpu_data(id).icache_line_size,
- cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
-}
-
extern void setup_sparc64_timer(void);
static volatile unsigned long callin_flag = 0;
@@ -145,7 +99,7 @@ void __init smp_callin(void)
local_irq_enable();
calibrate_delay();
- smp_store_cpu_info(cpuid);
+ cpu_data(cpuid).udelay_val = loops_per_jiffy;
callin_flag = 1;
__asm__ __volatile__("membar #Sync\n\t"
"flush %%g6" : : : "memory");
@@ -340,9 +294,8 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
prom_startcpu_cpuid(cpu, entry, cookie);
} else {
- struct device_node *dp;
+ struct device_node *dp = of_find_node_by_cpuid(cpu);
- cpu_find_by_mid(cpu, &dp);
prom_startcpu(dp->node, entry, cookie);
}
@@ -447,7 +400,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c
static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
{
u64 pstate, ver;
- int nack_busy_id, is_jbus;
+ int nack_busy_id, is_jbus, need_more;
if (cpus_empty(mask))
return;
@@ -463,6 +416,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
retry:
+ need_more = 0;
__asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
: : "r" (pstate), "i" (PSTATE_IE));
@@ -491,6 +445,10 @@ retry:
: /* no outputs */
: "r" (target), "i" (ASI_INTR_W));
nack_busy_id++;
+ if (nack_busy_id == 32) {
+ need_more = 1;
+ break;
+ }
}
}
@@ -507,6 +465,16 @@ retry:
if (dispatch_stat == 0UL) {
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
: : "r" (pstate));
+ if (unlikely(need_more)) {
+ int i, cnt = 0;
+ for_each_cpu_mask(i, mask) {
+ cpu_clear(i, mask);
+ cnt++;
+ if (cnt == 32)
+ break;
+ }
+ goto retry;
+ }
return;
}
if (!--stuck)
@@ -544,6 +512,8 @@ retry:
if ((dispatch_stat & check_mask) == 0)
cpu_clear(i, mask);
this_busy_nack += 2;
+ if (this_busy_nack == 64)
+ break;
}
goto retry;
@@ -1191,23 +1161,14 @@ int setup_profiling_timer(unsigned int multiplier)
static void __init smp_tune_scheduling(void)
{
- struct device_node *dp;
- int instance;
- unsigned int def, smallest = ~0U;
-
- def = ((tlb_type == hypervisor) ?
- (3 * 1024 * 1024) :
- (4 * 1024 * 1024));
+ unsigned int smallest = ~0U;
+ int i;
- instance = 0;
- while (!cpu_find_by_instance(instance, &dp, NULL)) {
- unsigned int val;
+ for (i = 0; i < NR_CPUS; i++) {
+ unsigned int val = cpu_data(i).ecache_size;
- val = of_getintprop_default(dp, "ecache-size", def);
- if (val < smallest)
+ if (val && val < smallest)
smallest = val;
-
- instance++;
}
/* Any value less than 256K is nonsense. */
@@ -1230,58 +1191,42 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
int i;
if (num_possible_cpus() > max_cpus) {
- int instance, mid;
-
- instance = 0;
- while (!cpu_find_by_instance(instance, NULL, &mid)) {
- if (mid != boot_cpu_id) {
- cpu_clear(mid, phys_cpu_present_map);
- cpu_clear(mid, cpu_present_map);
+ for_each_possible_cpu(i) {
+ if (i != boot_cpu_id) {
+ cpu_clear(i, phys_cpu_present_map);
+ cpu_clear(i, cpu_present_map);
if (num_possible_cpus() <= max_cpus)
break;
}
- instance++;
}
}
- for_each_possible_cpu(i) {
- if (tlb_type == hypervisor) {
- int j;
-
- /* XXX get this mapping from machine description */
- for_each_possible_cpu(j) {
- if ((j >> 2) == (i >> 2))
- cpu_set(j, cpu_sibling_map[i]);
- }
- } else {
- cpu_set(i, cpu_sibling_map[i]);
- }
- }
-
- smp_store_cpu_info(boot_cpu_id);
+ cpu_data(boot_cpu_id).udelay_val = loops_per_jiffy;
smp_tune_scheduling();
}
-/* Set this up early so that things like the scheduler can init
- * properly. We use the same cpu mask for both the present and
- * possible cpu map.
- */
-void __init smp_setup_cpu_possible_map(void)
+void __devinit smp_prepare_boot_cpu(void)
{
- int instance, mid;
-
- instance = 0;
- while (!cpu_find_by_instance(instance, NULL, &mid)) {
- if (mid < NR_CPUS) {
- cpu_set(mid, phys_cpu_present_map);
- cpu_set(mid, cpu_present_map);
- }
- instance++;
- }
}
-void __devinit smp_prepare_boot_cpu(void)
+void __devinit smp_fill_in_sib_core_maps(void)
{
+ unsigned int i;
+
+ for_each_possible_cpu(i) {
+ unsigned int j;
+
+ if (cpu_data(i).core_id == 0) {
+ cpu_set(i, cpu_sibling_map[i]);
+ continue;
+ }
+
+ for_each_possible_cpu(j) {
+ if (cpu_data(i).core_id ==
+ cpu_data(j).core_id)
+ cpu_set(j, cpu_sibling_map[i]);
+ }
+ }
}
int __cpuinit __cpu_up(unsigned int cpu)
@@ -1337,7 +1282,7 @@ unsigned long __per_cpu_shift __read_mostly;
EXPORT_SYMBOL(__per_cpu_base);
EXPORT_SYMBOL(__per_cpu_shift);
-void __init setup_per_cpu_areas(void)
+void __init real_setup_per_cpu_areas(void)
{
unsigned long goal, size, i;
char *ptr;
diff --git a/arch/sparc64/kernel/sstate.c b/arch/sparc64/kernel/sstate.c
new file mode 100644
index 00000000000..5b6e75b7f05
--- /dev/null
+++ b/arch/sparc64/kernel/sstate.c
@@ -0,0 +1,104 @@
+/* sstate.c: System soft state support.
+ *
+ * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+
+#include <asm/hypervisor.h>
+#include <asm/sstate.h>
+#include <asm/oplib.h>
+#include <asm/head.h>
+#include <asm/io.h>
+
+static int hv_supports_soft_state;
+
+static unsigned long kimage_addr_to_ra(const char *p)
+{
+ unsigned long val = (unsigned long) p;
+
+ return kern_base + (val - KERNBASE);
+}
+
+static void do_set_sstate(unsigned long state, const char *msg)
+{
+ unsigned long err;
+
+ if (!hv_supports_soft_state)
+ return;
+
+ err = sun4v_mach_set_soft_state(state, kimage_addr_to_ra(msg));
+ if (err) {
+ printk(KERN_WARNING "SSTATE: Failed to set soft-state to "
+ "state[%lx] msg[%s], err=%lu\n",
+ state, msg, err);
+ }
+}
+
+static const char booting_msg[32] __attribute__((aligned(32))) =
+ "Linux booting";
+static const char running_msg[32] __attribute__((aligned(32))) =
+ "Linux running";
+static const char halting_msg[32] __attribute__((aligned(32))) =
+ "Linux halting";
+static const char poweroff_msg[32] __attribute__((aligned(32))) =
+ "Linux powering off";
+static const char rebooting_msg[32] __attribute__((aligned(32))) =
+ "Linux rebooting";
+static const char panicing_msg[32] __attribute__((aligned(32))) =
+ "Linux panicing";
+
+void sstate_booting(void)
+{
+ do_set_sstate(HV_SOFT_STATE_TRANSITION, booting_msg);
+}
+
+void sstate_running(void)
+{
+ do_set_sstate(HV_SOFT_STATE_NORMAL, running_msg);
+}
+
+void sstate_halt(void)
+{
+ do_set_sstate(HV_SOFT_STATE_TRANSITION, halting_msg);
+}
+
+void sstate_poweroff(void)
+{
+ do_set_sstate(HV_SOFT_STATE_TRANSITION, poweroff_msg);
+}
+
+void sstate_reboot(void)
+{
+ do_set_sstate(HV_SOFT_STATE_TRANSITION, rebooting_msg);
+}
+
+static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
+{
+ do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sstate_panic_block = {
+ .notifier_call = sstate_panic_event,
+ .priority = INT_MAX,
+};
+
+void __init sun4v_sstate_init(void)
+{
+ unsigned long major, minor;
+
+ major = 1;
+ minor = 0;
+ if (sun4v_hvapi_register(HV_GRP_SOFT_STATE, major, &minor))
+ return;
+
+ hv_supports_soft_state = 1;
+
+ prom_sun4v_guest_soft_state();
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &sstate_panic_block);
+}
diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S
index 405855dd886..574bc248bca 100644
--- a/arch/sparc64/kernel/sun4v_ivec.S
+++ b/arch/sparc64/kernel/sun4v_ivec.S
@@ -22,12 +22,12 @@ sun4v_cpu_mondo:
be,pn %xcc, sun4v_cpu_mondo_queue_empty
nop
- /* Get &trap_block[smp_processor_id()] into %g3. */
- ldxa [%g0] ASI_SCRATCHPAD, %g3
- sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
+ /* Get &trap_block[smp_processor_id()] into %g4. */
+ ldxa [%g0] ASI_SCRATCHPAD, %g4
+ sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
/* Get CPU mondo queue base phys address into %g7. */
- ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
+ ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
/* Now get the cross-call arguments and handler PC, same
* layout as sun4u:
@@ -47,8 +47,7 @@ sun4v_cpu_mondo:
add %g2, 0x40 - 0x8 - 0x8, %g2
/* Update queue head pointer. */
- sethi %hi(8192 - 1), %g4
- or %g4, %lo(8192 - 1), %g4
+ lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
and %g2, %g4, %g2
mov INTRQ_CPU_MONDO_HEAD, %g4
@@ -71,12 +70,12 @@ sun4v_dev_mondo:
be,pn %xcc, sun4v_dev_mondo_queue_empty
nop
- /* Get &trap_block[smp_processor_id()] into %g3. */
- ldxa [%g0] ASI_SCRATCHPAD, %g3
- sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
+ /* Get &trap_block[smp_processor_id()] into %g4. */
+ ldxa [%g0] ASI_SCRATCHPAD, %g4
+ sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
/* Get DEV mondo queue base phys address into %g5. */
- ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
+ ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
/* Load IVEC into %g3. */
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
@@ -90,8 +89,7 @@ sun4v_dev_mondo:
*/
/* Update queue head pointer, this frees up some registers. */
- sethi %hi(8192 - 1), %g4
- or %g4, %lo(8192 - 1), %g4
+ lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
and %g2, %g4, %g2
mov INTRQ_DEVICE_MONDO_HEAD, %g4
@@ -143,6 +141,8 @@ sun4v_res_mondo:
brnz,pn %g1, sun4v_res_mondo_queue_full
nop
+ lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
+
/* Remember this entry's offset in %g1. */
mov %g2, %g1
@@ -173,8 +173,6 @@ sun4v_res_mondo:
add %g2, 0x08, %g2
/* Update queue head pointer. */
- sethi %hi(8192 - 1), %g4
- or %g4, %lo(8192 - 1), %g4
and %g2, %g4, %g2
mov INTRQ_RESUM_MONDO_HEAD, %g4
@@ -254,6 +252,8 @@ sun4v_nonres_mondo:
brnz,pn %g1, sun4v_nonres_mondo_queue_full
nop
+ lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
+
/* Remember this entry's offset in %g1. */
mov %g2, %g1
@@ -284,8 +284,6 @@ sun4v_nonres_mondo:
add %g2, 0x08, %g2
/* Update queue head pointer. */
- sethi %hi(8192 - 1), %g4
- or %g4, %lo(8192 - 1), %g4
and %g2, %g4, %g2
mov INTRQ_NONRESUM_MONDO_HEAD, %g4
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 2d63d768996..a31a0439244 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -680,22 +680,14 @@ static int starfire_set_time(u32 val)
static u32 hypervisor_get_time(void)
{
- register unsigned long func asm("%o5");
- register unsigned long arg0 asm("%o0");
- register unsigned long arg1 asm("%o1");
+ unsigned long ret, time;
int retries = 10000;
retry:
- func = HV_FAST_TOD_GET;
- arg0 = 0;
- arg1 = 0;
- __asm__ __volatile__("ta %6"
- : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
- : "0" (func), "1" (arg0), "2" (arg1),
- "i" (HV_FAST_TRAP));
- if (arg0 == HV_EOK)
- return arg1;
- if (arg0 == HV_EWOULDBLOCK) {
+ ret = sun4v_tod_get(&time);
+ if (ret == HV_EOK)
+ return time;
+ if (ret == HV_EWOULDBLOCK) {
if (--retries > 0) {
udelay(100);
goto retry;
@@ -709,20 +701,14 @@ retry:
static int hypervisor_set_time(u32 secs)
{
- register unsigned long func asm("%o5");
- register unsigned long arg0 asm("%o0");
+ unsigned long ret;
int retries = 10000;
retry:
- func = HV_FAST_TOD_SET;
- arg0 = secs;
- __asm__ __volatile__("ta %4"
- : "=&r" (func), "=&r" (arg0)
- : "0" (func), "1" (arg0),
- "i" (HV_FAST_TRAP));
- if (arg0 == HV_EOK)
+ ret = sun4v_tod_set(secs);
+ if (ret == HV_EOK)
return 0;
- if (arg0 == HV_EWOULDBLOCK) {
+ if (ret == HV_EWOULDBLOCK) {
if (--retries > 0) {
udelay(100);
goto retry;
@@ -862,7 +848,6 @@ fs_initcall(clock_init);
static unsigned long sparc64_init_timers(void)
{
struct device_node *dp;
- struct property *prop;
unsigned long clock;
#ifdef CONFIG_SMP
extern void smp_tick_init(void);
@@ -879,17 +864,15 @@ static unsigned long sparc64_init_timers(void)
if (manuf == 0x17 && impl == 0x13) {
/* Hummingbird, aka Ultra-IIe */
tick_ops = &hbtick_operations;
- prop = of_find_property(dp, "stick-frequency", NULL);
+ clock = of_getintprop_default(dp, "stick-frequency", 0);
} else {
tick_ops = &tick_operations;
- cpu_find_by_instance(0, &dp, NULL);
- prop = of_find_property(dp, "clock-frequency", NULL);
+ clock = local_cpu_data().clock_tick;
}
} else {
tick_ops = &stick_operations;
- prop = of_find_property(dp, "stick-frequency", NULL);
+ clock = of_getintprop_default(dp, "stick-frequency", 0);
}
- clock = *(unsigned int *) prop->value;
#ifdef CONFIG_SMP
smp_tick_init();
@@ -1365,6 +1348,7 @@ static int hypervisor_set_rtc_time(struct rtc_time *time)
return hypervisor_set_time(seconds);
}
+#ifdef CONFIG_PCI
static void bq4802_get_rtc_time(struct rtc_time *time)
{
unsigned char val = readb(bq4802_regs + 0x0e);
@@ -1436,6 +1420,7 @@ static int bq4802_set_rtc_time(struct rtc_time *time)
return 0;
}
+#endif /* CONFIG_PCI */
struct mini_rtc_ops {
void (*get_rtc_time)(struct rtc_time *);
@@ -1452,10 +1437,12 @@ static struct mini_rtc_ops hypervisor_rtc_ops = {
.set_rtc_time = hypervisor_set_rtc_time,
};
+#ifdef CONFIG_PCI
static struct mini_rtc_ops bq4802_rtc_ops = {
.get_rtc_time = bq4802_get_rtc_time,
.set_rtc_time = bq4802_set_rtc_time,
};
+#endif /* CONFIG_PCI */
static struct mini_rtc_ops *mini_rtc_ops;
@@ -1579,8 +1566,10 @@ static int __init rtc_mini_init(void)
mini_rtc_ops = &hypervisor_rtc_ops;
else if (this_is_starfire)
mini_rtc_ops = &starfire_rtc_ops;
+#ifdef CONFIG_PCI
else if (bq4802_regs)
mini_rtc_ops = &bq4802_rtc_ops;
+#endif /* CONFIG_PCI */
else
return -ENODEV;
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index d0fde36395b..00a9e3286c8 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -795,8 +795,7 @@ extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector
void __init cheetah_ecache_flush_init(void)
{
unsigned long largest_size, smallest_linesize, order, ver;
- struct device_node *dp;
- int i, instance, sz;
+ int i, sz;
/* Scan all cpu device tree nodes, note two values:
* 1) largest E-cache size
@@ -805,18 +804,20 @@ void __init cheetah_ecache_flush_init(void)
largest_size = 0UL;
smallest_linesize = ~0UL;
- instance = 0;
- while (!cpu_find_by_instance(instance, &dp, NULL)) {
+ for (i = 0; i < NR_CPUS; i++) {
unsigned long val;
- val = of_getintprop_default(dp, "ecache-size",
- (2 * 1024 * 1024));
+ val = cpu_data(i).ecache_size;
+ if (!val)
+ continue;
+
if (val > largest_size)
largest_size = val;
- val = of_getintprop_default(dp, "ecache-line-size", 64);
+
+ val = cpu_data(i).ecache_line_size;
if (val < smallest_linesize)
smallest_linesize = val;
- instance++;
+
}
if (largest_size == 0UL || smallest_linesize == ~0UL) {
@@ -2564,7 +2565,15 @@ void __init trap_init(void)
(TRAP_PER_CPU_TSB_HUGE_TEMP !=
offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
(TRAP_PER_CPU_IRQ_WORKLIST !=
- offsetof(struct trap_per_cpu, irq_worklist)))
+ offsetof(struct trap_per_cpu, irq_worklist)) ||
+ (TRAP_PER_CPU_CPU_MONDO_QMASK !=
+ offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
+ (TRAP_PER_CPU_DEV_MONDO_QMASK !=
+ offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
+ (TRAP_PER_CPU_RESUM_QMASK !=
+ offsetof(struct trap_per_cpu, resum_qmask)) ||
+ (TRAP_PER_CPU_NONRESUM_QMASK !=
+ offsetof(struct trap_per_cpu, nonresum_qmask)))
trap_per_cpu_offsets_are_bolixed_dave();
if ((TSB_CONFIG_TSB !=
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 6e5b01d779d..3010227fe24 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -23,6 +23,7 @@
#include <linux/kprobes.h>
#include <linux/cache.h>
#include <linux/sort.h>
+#include <linux/percpu.h>
#include <asm/head.h>
#include <asm/system.h>
@@ -43,8 +44,8 @@
#include <asm/tsb.h>
#include <asm/hypervisor.h>
#include <asm/prom.h>
-
-extern void device_scan(void);
+#include <asm/sstate.h>
+#include <asm/mdesc.h>
#define MAX_PHYS_ADDRESS (1UL << 42UL)
#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
@@ -60,8 +61,11 @@ unsigned long kern_linear_pte_xor[2] __read_mostly;
unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
#ifndef CONFIG_DEBUG_PAGEALLOC
-/* A special kernel TSB for 4MB and 256MB linear mappings. */
-struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
+/* A special kernel TSB for 4MB and 256MB linear mappings.
+ * Space is allocated for this right after the trap table
+ * in arch/sparc64/kernel/head.S
+ */
+extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
#endif
#define MAX_BANKS 32
@@ -190,12 +194,9 @@ inline void flush_dcache_page_impl(struct page *page)
}
#define PG_dcache_dirty PG_arch_1
-#define PG_dcache_cpu_shift 24UL
-#define PG_dcache_cpu_mask (256UL - 1UL)
-
-#if NR_CPUS > 256
-#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
-#endif
+#define PG_dcache_cpu_shift 32UL
+#define PG_dcache_cpu_mask \
+ ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
#define dcache_dirty_cpu(page) \
(((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
@@ -557,26 +558,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr,
unsigned long pte,
unsigned long mmu)
{
- register unsigned long func asm("%o5");
- register unsigned long arg0 asm("%o0");
- register unsigned long arg1 asm("%o1");
- register unsigned long arg2 asm("%o2");
- register unsigned long arg3 asm("%o3");
-
- func = HV_FAST_MMU_MAP_PERM_ADDR;
- arg0 = vaddr;
- arg1 = 0;
- arg2 = pte;
- arg3 = mmu;
- __asm__ __volatile__("ta 0x80"
- : "=&r" (func), "=&r" (arg0),
- "=&r" (arg1), "=&r" (arg2),
- "=&r" (arg3)
- : "0" (func), "1" (arg0), "2" (arg1),
- "3" (arg2), "4" (arg3));
- if (arg0 != 0) {
+ unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
+
+ if (ret != 0) {
prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
- "errors with %lx\n", vaddr, 0, pte, mmu, arg0);
+ "errors with %lx\n", vaddr, 0, pte, mmu, ret);
prom_halt();
}
}
@@ -1313,20 +1299,16 @@ static void __init sun4v_ktsb_init(void)
void __cpuinit sun4v_ktsb_register(void)
{
- register unsigned long func asm("%o5");
- register unsigned long arg0 asm("%o0");
- register unsigned long arg1 asm("%o1");
- unsigned long pa;
+ unsigned long pa, ret;
pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
- func = HV_FAST_MMU_TSB_CTX0;
- arg0 = NUM_KTSB_DESCR;
- arg1 = pa;
- __asm__ __volatile__("ta %6"
- : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
- : "0" (func), "1" (arg0), "2" (arg1),
- "i" (HV_FAST_TRAP));
+ ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
+ if (ret != 0) {
+ prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
+ "errors with %lx\n", pa, ret);
+ prom_halt();
+ }
}
/* paging_init() sets up the page tables */
@@ -1334,6 +1316,9 @@ void __cpuinit sun4v_ktsb_register(void)
extern void cheetah_ecache_flush_init(void);
extern void sun4v_patch_tlb_handlers(void);
+extern void cpu_probe(void);
+extern void central_probe(void);
+
static unsigned long last_valid_pfn;
pgd_t swapper_pg_dir[2048];
@@ -1345,9 +1330,24 @@ void __init paging_init(void)
unsigned long end_pfn, pages_avail, shift, phys_base;
unsigned long real_end, i;
+ /* These build time checkes make sure that the dcache_dirty_cpu()
+ * page->flags usage will work.
+ *
+ * When a page gets marked as dcache-dirty, we store the
+ * cpu number starting at bit 32 in the page->flags. Also,
+ * functions like clear_dcache_dirty_cpu use the cpu mask
+ * in 13-bit signed-immediate instruction fields.
+ */
+ BUILD_BUG_ON(FLAGS_RESERVED != 32);
+ BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
+ ilog2(roundup_pow_of_two(NR_CPUS)) > FLAGS_RESERVED);
+ BUILD_BUG_ON(NR_CPUS > 4096);
+
kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
+ sstate_booting();
+
/* Invalidate both kernel TSBs. */
memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
#ifndef CONFIG_DEBUG_PAGEALLOC
@@ -1416,8 +1416,13 @@ void __init paging_init(void)
kernel_physical_mapping_init();
+ real_setup_per_cpu_areas();
+
prom_build_devicetree();
+ if (tlb_type == hypervisor)
+ sun4v_mdesc_init();
+
{
unsigned long zones_size[MAX_NR_ZONES];
unsigned long zholes_size[MAX_NR_ZONES];
@@ -1434,7 +1439,10 @@ void __init paging_init(void)
zholes_size);
}
- device_scan();
+ prom_printf("Booting Linux...\n");
+
+ central_probe();
+ cpu_probe();
}
static void __init taint_real_pages(void)
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
index 0b4213720d4..f3e0c14e9ee 100644
--- a/arch/sparc64/prom/misc.c
+++ b/arch/sparc64/prom/misc.c
@@ -15,6 +15,25 @@
#include <asm/oplib.h>
#include <asm/system.h>
+int prom_service_exists(const char *service_name)
+{
+ int err = p1275_cmd("test", P1275_ARG(0, P1275_ARG_IN_STRING) |
+ P1275_INOUT(1, 1), service_name);
+
+ if (err)
+ return 0;
+ return 1;
+}
+
+void prom_sun4v_guest_soft_state(void)
+{
+ const char *svc = "SUNW,soft-state-supported";
+
+ if (!prom_service_exists(svc))
+ return;
+ p1275_cmd(svc, P1275_INOUT(0, 0));
+}
+
/* Reset and reboot the machine with the command 'bcommand'. */
void prom_reboot(const char *bcommand)
{
diff --git a/drivers/char/drm/Kconfig b/drivers/char/drm/Kconfig
index ef833a1c27e..0b7ffa5191c 100644
--- a/drivers/char/drm/Kconfig
+++ b/drivers/char/drm/Kconfig
@@ -6,7 +6,7 @@
#
config DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
- depends on (AGP || AGP=n) && PCI
+ depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 46c1b97748b..0474cac4a84 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -760,7 +760,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
static void extract_buf(struct entropy_store *r, __u8 *out)
{
- int i, x;
+ int i;
__u32 data[16], buf[5 + SHA_WORKSPACE_WORDS];
sha_init(buf);
@@ -772,9 +772,11 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
* attempts to find previous ouputs), unless the hash
* function can be inverted.
*/
- for (i = 0, x = 0; i < r->poolinfo->poolwords; i += 16, x+=2) {
- sha_transform(buf, (__u8 *)r->pool+i, buf + 5);
- add_entropy_words(r, &buf[x % 5], 1);
+ for (i = 0; i < r->poolinfo->poolwords; i += 16) {
+ /* hash blocks of 16 words = 512 bits */
+ sha_transform(buf, (__u8 *)(r->pool + i), buf + 5);
+ /* feed back portion of the resulting hash */
+ add_entropy_words(r, &buf[i % 5], 1);
}
/*
@@ -782,7 +784,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
* portion of the pool while mixing, and hash one
* final time.
*/
- __add_entropy_words(r, &buf[x % 5], 1, data);
+ __add_entropy_words(r, &buf[i % 5], 1, data);
sha_transform(buf, (__u8 *)data, buf + 5);
/*
@@ -1018,37 +1020,44 @@ random_poll(struct file *file, poll_table * wait)
return mask;
}
-static ssize_t
-random_write(struct file * file, const char __user * buffer,
- size_t count, loff_t *ppos)
+static int
+write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
{
- int ret = 0;
size_t bytes;
__u32 buf[16];
const char __user *p = buffer;
- size_t c = count;
- while (c > 0) {
- bytes = min(c, sizeof(buf));
+ while (count > 0) {
+ bytes = min(count, sizeof(buf));
+ if (copy_from_user(&buf, p, bytes))
+ return -EFAULT;
- bytes -= copy_from_user(&buf, p, bytes);
- if (!bytes) {
- ret = -EFAULT;
- break;
- }
- c -= bytes;
+ count -= bytes;
p += bytes;
- add_entropy_words(&input_pool, buf, (bytes + 3) / 4);
- }
- if (p == buffer) {
- return (ssize_t)ret;
- } else {
- struct inode *inode = file->f_path.dentry->d_inode;
- inode->i_mtime = current_fs_time(inode->i_sb);
- mark_inode_dirty(inode);
- return (ssize_t)(p - buffer);
+ add_entropy_words(r, buf, (bytes + 3) / 4);
}
+
+ return 0;
+}
+
+static ssize_t
+random_write(struct file * file, const char __user * buffer,
+ size_t count, loff_t *ppos)
+{
+ size_t ret;
+ struct inode *inode = file->f_path.dentry->d_inode;
+
+ ret = write_pool(&blocking_pool, buffer, count);
+ if (ret)
+ return ret;
+ ret = write_pool(&nonblocking_pool, buffer, count);
+ if (ret)
+ return ret;
+
+ inode->i_mtime = current_fs_time(inode->i_sb);
+ mark_inode_dirty(inode);
+ return (ssize_t)count;
}
static int
@@ -1087,8 +1096,8 @@ random_ioctl(struct inode * inode, struct file * file,
return -EINVAL;
if (get_user(size, p++))
return -EFAULT;
- retval = random_write(file, (const char __user *) p,
- size, &file->f_pos);
+ retval = write_pool(&input_pool, (const char __user *)p,
+ size);
if (retval < 0)
return retval;
credit_entropy_store(&input_pool, ent_count);
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 5932c72f9e4..396dade731f 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -18,7 +18,7 @@ config FIREWIRE
your IEEE 1394 adapter.
To compile this driver as a module, say M here: the module will be
- called fw-core.
+ called firewire-core.
This is the "JUJU" FireWire stack, an alternative implementation
designed for robustness and simplicity. You can build either this
@@ -34,11 +34,11 @@ config FIREWIRE_OHCI
is the only chipset in use, so say Y here.
To compile this driver as a module, say M here: The module will be
- called fw-ohci.
+ called firewire-ohci.
If you also build ohci1394 of the classic IEEE 1394 driver stack,
- blacklist either ohci1394 or fw-ohci to let hotplug load the desired
- driver.
+ blacklist either ohci1394 or firewire-ohci to let hotplug load the
+ desired driver.
config FIREWIRE_SBP2
tristate "Support for storage devices (SBP-2 protocol driver)"
@@ -50,12 +50,12 @@ config FIREWIRE_SBP2
like scanners.
To compile this driver as a module, say M here: The module will be
- called fw-sbp2.
+ called firewire-sbp2.
You should also enable support for disks, CD-ROMs, etc. in the SCSI
configuration section.
If you also build sbp2 of the classic IEEE 1394 driver stack,
- blacklist either sbp2 or fw-sbp2 to let hotplug load the desired
- driver.
+ blacklist either sbp2 or firewire-sbp2 to let hotplug load the
+ desired driver.
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
index fc7d59d4bce..a7c31e9039c 100644
--- a/drivers/firewire/Makefile
+++ b/drivers/firewire/Makefile
@@ -2,9 +2,11 @@
# Makefile for the Linux IEEE 1394 implementation
#
-fw-core-y += fw-card.o fw-topology.o fw-transaction.o fw-iso.o \
- fw-device.o fw-cdev.o
+firewire-core-y += fw-card.o fw-topology.o fw-transaction.o fw-iso.o \
+ fw-device.o fw-cdev.o
+firewire-ohci-y += fw-ohci.o
+firewire-sbp2-y += fw-sbp2.o
-obj-$(CONFIG_FIREWIRE) += fw-core.o
-obj-$(CONFIG_FIREWIRE_OHCI) += fw-ohci.o
-obj-$(CONFIG_FIREWIRE_SBP2) += fw-sbp2.o
+obj-$(CONFIG_FIREWIRE) += firewire-core.o
+obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o
+obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index 0fa5bd54c6a..3ab3585d360 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -365,7 +365,7 @@ complete_transaction(struct fw_card *card, int rcode,
response->response.data, response->response.length);
}
-static ssize_t ioctl_send_request(struct client *client, void *buffer)
+static int ioctl_send_request(struct client *client, void *buffer)
{
struct fw_device *device = client->device;
struct fw_cdev_send_request *request = buffer;
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index c17342d3e6f..2e4cfa57126 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -268,7 +268,7 @@ static int ar_context_add_page(struct ar_context *ctx)
dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
- ctx->last_buffer->descriptor.branch_address = ab_bus | 1;
+ ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
ctx->last_buffer->next = ab;
ctx->last_buffer = ab;
@@ -417,7 +417,8 @@ ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
ctx->current_buffer = ab.next;
ctx->pointer = ctx->current_buffer->data;
- reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab.descriptor.branch_address);
+ reg_write(ctx->ohci, COMMAND_PTR(ctx->regs),
+ le32_to_cpu(ab.descriptor.branch_address));
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
flush_writes(ctx->ohci);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4d1cb5b855d..13eea47dceb 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -620,7 +620,7 @@ config SENSORS_HDAPS
config SENSORS_APPLESMC
tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
- depends on HWMON && INPUT && X86
+ depends on INPUT && X86
select NEW_LEDS
select LEDS_CLASS
default n
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 366f4a1a2cb..fd1281f4220 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -1206,11 +1206,13 @@ static int __init applesmc_init(void)
}
ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_name.attr);
+ if (ret)
+ goto out_device;
/* Create key enumeration sysfs files */
ret = sysfs_create_group(&pdev->dev.kobj, &key_enumeration_group);
if (ret)
- goto out_device;
+ goto out_name;
/* create fan files */
count = applesmc_get_fan_count();
@@ -1310,6 +1312,8 @@ out_fan_1:
sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[1]);
out_key_enumeration:
sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group);
+out_name:
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr);
out_device:
platform_device_unregister(pdev);
out_driver:
@@ -1335,6 +1339,7 @@ static void __exit applesmc_exit(void)
sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[0]);
sysfs_remove_group(&pdev->dev.kobj, &fan_attribute_groups[1]);
sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group);
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr);
platform_device_unregister(pdev);
platform_driver_unregister(&applesmc_driver);
release_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 75e3911810a..0328382df8f 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -176,6 +176,22 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
goto exit_free;
}
+ /* Check if we have problem with errata AE18 of Core processors:
+ Readings might stop update when processor visited too deep sleep,
+ fixed for stepping D0 (6EC).
+ */
+
+ if ((c->x86_model == 0xe) && (c->x86_mask < 0xc)) {
+ /* check for microcode update */
+ rdmsr_on_cpu(data->id, MSR_IA32_UCODE_REV, &eax, &edx);
+ if (edx < 0x39) {
+ dev_err(&pdev->dev,
+ "Errata AE18 not fixed, update BIOS or "
+ "microcode of the CPU!\n");
+ goto exit_free;
+ }
+ }
+
/* Some processors have Tjmax 85 following magic should detect it
Intel won't disclose the information without signed NDA, but
individuals cannot sign it. Catch(ed) 22.
@@ -193,6 +209,19 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
}
}
+ /* Intel says that above should not work for desktop Core2 processors,
+ but it seems to work. There is no other way how get the absolute
+ readings. Warn the user about this. First check if are desktop,
+ bit 50 of MSR_IA32_PLATFORM_ID should be 0.
+ */
+
+ rdmsr_safe_on_cpu(data->id, MSR_IA32_PLATFORM_ID, &eax, &edx);
+
+ if ((c->x86_model == 0xf) && (!(edx & 0x00040000))) {
+ dev_warn(&pdev->dev, "Using undocumented features, absolute "
+ "temperature might be wrong!\n");
+ }
+
platform_set_drvdata(pdev, data);
if ((err = sysfs_create_group(&pdev->dev.kobj, &coretemp_group)))
@@ -330,9 +359,6 @@ static int __init coretemp_init(void)
int i, err = -ENODEV;
struct pdev_entry *p, *n;
- printk(KERN_NOTICE DRVNAME ": This driver uses undocumented features "
- "of Core CPU. Temperature might be wrong!\n");
-
/* quick check if we run Intel */
if (cpu_data[0].x86_vendor != X86_VENDOR_INTEL)
goto exit;
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index c849c0c6ee9..d5ac422d73b 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -53,8 +53,8 @@ MODULE_PARM_DESC(polarity, "Output's polarity: 0 = active high, 1 = active low")
/* The DS1621 registers */
#define DS1621_REG_TEMP 0xAA /* word, RO */
-#define DS1621_REG_TEMP_MIN 0xA1 /* word, RW */
-#define DS1621_REG_TEMP_MAX 0xA2 /* word, RW */
+#define DS1621_REG_TEMP_MIN 0xA2 /* word, RW */
+#define DS1621_REG_TEMP_MAX 0xA1 /* word, RW */
#define DS1621_REG_CONF 0xAC /* byte, RW */
#define DS1621_COM_START 0xEE /* no data */
#define DS1621_COM_STOP 0x22 /* no data */
@@ -328,9 +328,9 @@ static struct ds1621_data *ds1621_update_client(struct device *dev)
/* reset alarms if necessary */
new_conf = data->conf;
- if (data->temp < data->temp_min)
+ if (data->temp > data->temp_min)
new_conf &= ~DS1621_ALARM_TEMP_LOW;
- if (data->temp > data->temp_max)
+ if (data->temp < data->temp_max)
new_conf &= ~DS1621_ALARM_TEMP_HIGH;
if (data->conf != new_conf)
ds1621_write_value(client, DS1621_REG_CONF,
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 5aab23b93e2..f17e771e42f 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -132,7 +132,9 @@ int vid_from_reg(int val, u8 vrm)
val &= 0x7f;
return(val > 0x77 ? 0 : (1500000 - (val * 12500) + 500) / 1000);
default: /* report 0 for unknown */
- printk(KERN_INFO "hwmon-vid: requested unknown VRM version\n");
+ if (vrm)
+ printk(KERN_WARNING "hwmon-vid: Requested unsupported "
+ "VRM version (%u)\n", (unsigned int)vrm);
return 0;
}
}
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index a5b774b07cb..12cb40a975d 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -965,8 +965,10 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
case W687THF_DEVID:
sio_data->type = w83687thf;
break;
+ case 0xff: /* No device at all */
+ goto exit;
default:
- pr_debug(DRVNAME ": Unsupported chip (DEVID=0x%x)\n", val);
+ pr_debug(DRVNAME ": Unsupported chip (DEVID=0x%02x)\n", val);
goto exit;
}
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 2296d43a241..5f026b5d785 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -47,6 +47,7 @@
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/workqueue.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
@@ -235,6 +236,9 @@ static int ether1394_open(struct net_device *dev)
/* This is called after an "ifdown" */
static int ether1394_stop(struct net_device *dev)
{
+ /* flush priv->wake */
+ flush_scheduled_work();
+
netif_stop_queue(dev);
return 0;
}
@@ -531,6 +535,37 @@ static void ether1394_init_dev(struct net_device *dev)
}
/*
+ * Wake the queue up after commonly encountered transmit failure conditions are
+ * hopefully over. Currently only tlabel exhaustion is accounted for.
+ */
+static void ether1394_wake_queue(struct work_struct *work)
+{
+ struct eth1394_priv *priv;
+ struct hpsb_packet *packet;
+
+ priv = container_of(work, struct eth1394_priv, wake);
+ packet = hpsb_alloc_packet(0);
+
+ /* This is really bad, but unjam the queue anyway. */
+ if (!packet)
+ goto out;
+
+ packet->host = priv->host;
+ packet->node_id = priv->wake_node;
+ /*
+ * A transaction label is all we really want. If we get one, it almost
+ * always means we can get a lot more because the ieee1394 core recycled
+ * a whole batch of tlabels, at last.
+ */
+ if (hpsb_get_tlabel(packet) == 0)
+ hpsb_free_tlabel(packet);
+
+ hpsb_free_packet(packet);
+out:
+ netif_wake_queue(priv->wake_dev);
+}
+
+/*
* This function is called every time a card is found. It is generally called
* when the module is installed. This is where we add all of our ethernet
* devices. One for each host.
@@ -564,16 +599,17 @@ static void ether1394_add_host(struct hpsb_host *host)
}
SET_MODULE_OWNER(dev);
-#if 0
- /* FIXME - Is this the correct parent device anyway? */
- SET_NETDEV_DEV(dev, &host->device);
-#endif
+
+ /* This used to be &host->device in Linux 2.6.20 and before. */
+ SET_NETDEV_DEV(dev, host->device.parent);
priv = netdev_priv(dev);
INIT_LIST_HEAD(&priv->ip_node_list);
spin_lock_init(&priv->lock);
priv->host = host;
priv->local_fifo = fifo_addr;
+ INIT_WORK(&priv->wake, ether1394_wake_queue);
+ priv->wake_dev = dev;
hi = hpsb_create_hostinfo(&eth1394_highlevel, host, sizeof(*hi));
if (hi == NULL) {
@@ -1390,22 +1426,17 @@ static int ether1394_prep_write_packet(struct hpsb_packet *p,
u64 addr, void *data, int tx_len)
{
p->node_id = node;
- p->data = NULL;
- p->tcode = TCODE_WRITEB;
- p->header[1] = host->node_id << 16 | addr >> 32;
- p->header[2] = addr & 0xffffffff;
+ if (hpsb_get_tlabel(p))
+ return -EAGAIN;
+ p->tcode = TCODE_WRITEB;
p->header_size = 16;
p->expect_response = 1;
-
- if (hpsb_get_tlabel(p)) {
- ETH1394_PRINT_G(KERN_ERR, "Out of tlabels\n");
- return -1;
- }
p->header[0] =
p->node_id << 16 | p->tlabel << 10 | 1 << 8 | TCODE_WRITEB << 4;
-
+ p->header[1] = host->node_id << 16 | addr >> 32;
+ p->header[2] = addr & 0xffffffff;
p->header[3] = tx_len << 16;
p->data_size = (tx_len + 3) & ~3;
p->data = data;
@@ -1451,7 +1482,7 @@ static int ether1394_send_packet(struct packet_task *ptask, unsigned int tx_len)
packet = ether1394_alloc_common_packet(priv->host);
if (!packet)
- return -1;
+ return -ENOMEM;
if (ptask->tx_type == ETH1394_GASP) {
int length = tx_len + 2 * sizeof(quadlet_t);
@@ -1462,7 +1493,7 @@ static int ether1394_send_packet(struct packet_task *ptask, unsigned int tx_len)
ptask->addr, ptask->skb->data,
tx_len)) {
hpsb_free_packet(packet);
- return -1;
+ return -EAGAIN;
}
ptask->packet = packet;
@@ -1471,7 +1502,7 @@ static int ether1394_send_packet(struct packet_task *ptask, unsigned int tx_len)
if (hpsb_send_packet(packet) < 0) {
ether1394_free_packet(packet);
- return -1;
+ return -EIO;
}
return 0;
@@ -1514,13 +1545,18 @@ static void ether1394_complete_cb(void *__ptask)
ptask->outstanding_pkts--;
if (ptask->outstanding_pkts > 0 && !fail) {
- int tx_len;
+ int tx_len, err;
/* Add the encapsulation header to the fragment */
tx_len = ether1394_encapsulate(ptask->skb, ptask->max_payload,
&ptask->hdr);
- if (ether1394_send_packet(ptask, tx_len))
+ err = ether1394_send_packet(ptask, tx_len);
+ if (err) {
+ if (err == -EAGAIN)
+ ETH1394_PRINT_G(KERN_ERR, "Out of tlabels\n");
+
ether1394_dg_complete(ptask, 1);
+ }
} else {
ether1394_dg_complete(ptask, fail);
}
@@ -1633,10 +1669,18 @@ static int ether1394_tx(struct sk_buff *skb, struct net_device *dev)
/* Add the encapsulation header to the fragment */
tx_len = ether1394_encapsulate(skb, max_payload, &ptask->hdr);
dev->trans_start = jiffies;
- if (ether1394_send_packet(ptask, tx_len))
- goto fail;
+ if (ether1394_send_packet(ptask, tx_len)) {
+ if (dest_node == (LOCAL_BUS | ALL_NODES))
+ goto fail;
+
+ /* Most failures of ether1394_send_packet are recoverable. */
+ netif_stop_queue(dev);
+ priv->wake_node = dest_node;
+ schedule_work(&priv->wake);
+ kmem_cache_free(packet_task_cache, ptask);
+ return NETDEV_TX_BUSY;
+ }
- netif_wake_queue(dev);
return NETDEV_TX_OK;
fail:
if (ptask)
@@ -1650,9 +1694,6 @@ fail:
priv->stats.tx_errors++;
spin_unlock_irqrestore(&priv->lock, flags);
- if (netif_queue_stopped(dev))
- netif_wake_queue(dev);
-
/*
* FIXME: According to a patch from 2003-02-26, "returning non-zero
* causes serious problems" here, allegedly. Before that patch,
diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h
index a3439ee7cb4..4f3e2dd46f0 100644
--- a/drivers/ieee1394/eth1394.h
+++ b/drivers/ieee1394/eth1394.h
@@ -66,6 +66,10 @@ struct eth1394_priv {
int bc_dgl; /* Outgoing broadcast datagram label */
struct list_head ip_node_list; /* List of IP capable nodes */
struct unit_directory *ud_list[ALL_NODES]; /* Cached unit dir list */
+
+ struct work_struct wake; /* Wake up after xmit failure */
+ struct net_device *wake_dev; /* Stupid backlink for .wake */
+ nodeid_t wake_node; /* Destination of failed xmit */
};
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index d382500f421..f1d05eeb9f5 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -936,6 +936,7 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req)
struct hpsb_packet *packet;
int header_length = req->req.misc & 0xffff;
int expect_response = req->req.misc >> 16;
+ size_t data_size;
if (header_length > req->req.length || header_length < 12 ||
header_length > FIELD_SIZEOF(struct hpsb_packet, header)) {
@@ -945,7 +946,8 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req)
return sizeof(struct raw1394_request);
}
- packet = hpsb_alloc_packet(req->req.length - header_length);
+ data_size = req->req.length - header_length;
+ packet = hpsb_alloc_packet(data_size);
req->packet = packet;
if (!packet)
return -ENOMEM;
@@ -960,7 +962,7 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req)
if (copy_from_user
(packet->data, int2ptr(req->req.sendb) + header_length,
- packet->data_size)) {
+ data_size)) {
req->req.error = RAW1394_ERROR_MEMFAULT;
req->req.length = 0;
queue_complete_req(req);
@@ -974,7 +976,7 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req)
packet->host = fi->host;
packet->expect_response = expect_response;
packet->header_size = header_length;
- packet->data_size = req->req.length - header_length;
+ packet->data_size = data_size;
req->req.length = 0;
hpsb_set_packet_complete_task(packet,
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 4cb6fa2bcfb..875eadd5e8f 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -70,6 +70,7 @@
#include <linux/stringify.h>
#include <linux/types.h>
#include <linux/wait.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
#include <asm/errno.h>
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index e840434a96d..40c004a2697 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1297,26 +1297,29 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
- /* Check for duplicate REQ and stale connections. */
+ /* Check for possible duplicate REQ. */
spin_lock_irqsave(&cm.lock, flags);
timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
- if (!timewait_info)
- timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
-
if (timewait_info) {
cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
- cm_cleanup_timewait(cm_id_priv->timewait_info);
spin_unlock_irqrestore(&cm.lock, flags);
if (cur_cm_id_priv) {
cm_dup_req_handler(work, cur_cm_id_priv);
cm_deref_id(cur_cm_id_priv);
- } else
- cm_issue_rej(work->port, work->mad_recv_wc,
- IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
- NULL, 0);
- listen_cm_id_priv = NULL;
- goto out;
+ }
+ return NULL;
+ }
+
+ /* Check for stale connections. */
+ timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
+ if (timewait_info) {
+ cm_cleanup_timewait(cm_id_priv->timewait_info);
+ spin_unlock_irqrestore(&cm.lock, flags);
+ cm_issue_rej(work->port, work->mad_recv_wc,
+ IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
+ NULL, 0);
+ return NULL;
}
/* Find matching listen request. */
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 027664979fe..eef415b12b2 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -2284,10 +2284,10 @@ void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
struct mthca_next_seg *next;
/*
- * For SRQs, all WQEs generate a CQE, so we're always at the
- * end of the doorbell chain.
+ * For SRQs, all receive WQEs generate a CQE, so we're always
+ * at the end of the doorbell chain.
*/
- if (qp->ibqp.srq) {
+ if (qp->ibqp.srq && !is_send) {
*new_wqe = 0;
return;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 158759e28a5..285c143115c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -156,7 +156,7 @@ struct ipoib_cm_data {
* - and then invoke a Destroy QP or Reset QP.
*
* We use the second option and wait for a completion on the
- * rx_drain_qp before destroying QPs attached to our SRQ.
+ * same CQ before destroying QPs attached to our SRQ.
*/
enum ipoib_cm_state {
@@ -199,7 +199,6 @@ struct ipoib_cm_dev_priv {
struct ib_srq *srq;
struct ipoib_cm_rx_buf *srq_ring;
struct ib_cm_id *id;
- struct ib_qp *rx_drain_qp; /* generates WR described in 10.3.1 */
struct list_head passive_ids; /* state: LIVE */
struct list_head rx_error_list; /* state: ERROR */
struct list_head rx_flush_list; /* state: FLUSH, drain not started */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index f133b56fd97..076a0bbb63d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -69,8 +69,9 @@ static struct ib_qp_attr ipoib_cm_err_attr = {
#define IPOIB_CM_RX_DRAIN_WRID 0x7fffffff
-static struct ib_recv_wr ipoib_cm_rx_drain_wr = {
- .wr_id = IPOIB_CM_RX_DRAIN_WRID
+static struct ib_send_wr ipoib_cm_rx_drain_wr = {
+ .wr_id = IPOIB_CM_RX_DRAIN_WRID,
+ .opcode = IB_WR_SEND,
};
static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
@@ -163,16 +164,22 @@ partial_error:
static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv* priv)
{
- struct ib_recv_wr *bad_wr;
+ struct ib_send_wr *bad_wr;
+ struct ipoib_cm_rx *p;
- /* rx_drain_qp send queue depth is 1, so
+ /* We only reserved 1 extra slot in CQ for drain WRs, so
* make sure we have at most 1 outstanding WR. */
if (list_empty(&priv->cm.rx_flush_list) ||
!list_empty(&priv->cm.rx_drain_list))
return;
- if (ib_post_recv(priv->cm.rx_drain_qp, &ipoib_cm_rx_drain_wr, &bad_wr))
- ipoib_warn(priv, "failed to post rx_drain wr\n");
+ /*
+ * QPs on flush list are error state. This way, a "flush
+ * error" WC will be immediately generated for each WR we post.
+ */
+ p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
+ if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
+ ipoib_warn(priv, "failed to post drain wr\n");
list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
}
@@ -199,10 +206,10 @@ static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_init_attr attr = {
.event_handler = ipoib_cm_rx_event_handler,
- .send_cq = priv->cq, /* does not matter, we never send anything */
+ .send_cq = priv->cq, /* For drain WR */
.recv_cq = priv->cq,
.srq = priv->cm.srq,
- .cap.max_send_wr = 1, /* FIXME: 0 Seems not to work */
+ .cap.max_send_wr = 1, /* For drain WR */
.cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
.sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_RC,
@@ -242,6 +249,27 @@ static int ipoib_cm_modify_rx_qp(struct net_device *dev,
ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
return ret;
}
+
+ /*
+ * Current Mellanox HCA firmware won't generate completions
+ * with error for drain WRs unless the QP has been moved to
+ * RTS first. This work-around leaves a window where a QP has
+ * moved to error asynchronously, but this will eventually get
+ * fixed in firmware, so let's not error out if modify QP
+ * fails.
+ */
+ qp_attr.qp_state = IB_QPS_RTS;
+ ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
+ return 0;
+ }
+ ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
+ if (ret) {
+ ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
+ return 0;
+ }
+
return 0;
}
@@ -623,38 +651,11 @@ static void ipoib_cm_tx_completion(struct ib_cq *cq, void *tx_ptr)
int ipoib_cm_dev_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ib_qp_init_attr qp_init_attr = {
- .send_cq = priv->cq, /* does not matter, we never send anything */
- .recv_cq = priv->cq,
- .cap.max_send_wr = 1, /* FIXME: 0 Seems not to work */
- .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
- .cap.max_recv_wr = 1,
- .cap.max_recv_sge = 1, /* FIXME: 0 Seems not to work */
- .sq_sig_type = IB_SIGNAL_ALL_WR,
- .qp_type = IB_QPT_UC,
- };
int ret;
if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
return 0;
- priv->cm.rx_drain_qp = ib_create_qp(priv->pd, &qp_init_attr);
- if (IS_ERR(priv->cm.rx_drain_qp)) {
- printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
- ret = PTR_ERR(priv->cm.rx_drain_qp);
- return ret;
- }
-
- /*
- * We put the QP in error state directly. This way, a "flush
- * error" WC will be immediately generated for each WR we post.
- */
- ret = ib_modify_qp(priv->cm.rx_drain_qp, &ipoib_cm_err_attr, IB_QP_STATE);
- if (ret) {
- ipoib_warn(priv, "failed to modify drain QP to error: %d\n", ret);
- goto err_qp;
- }
-
priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
if (IS_ERR(priv->cm.id)) {
printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
@@ -676,8 +677,6 @@ err_listen:
ib_destroy_cm_id(priv->cm.id);
err_cm:
priv->cm.id = NULL;
-err_qp:
- ib_destroy_qp(priv->cm.rx_drain_qp);
return ret;
}
@@ -740,7 +739,6 @@ void ipoib_cm_dev_stop(struct net_device *dev)
kfree(p);
}
- ib_destroy_qp(priv->cm.rx_drain_qp);
cancel_delayed_work(&priv->cm.stale_task);
}
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 58926da0ae1..f44c94abd88 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -113,7 +113,6 @@ config PMAC_SMU
config PMAC_APM_EMU
tristate "APM emulation"
- select SYS_SUPPORTS_APM_EMULATION
select APM_EMULATION
depends on ADB_PMU && PM
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index d25d3be8fcd..165f81d16d0 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -436,7 +436,7 @@ typedef struct _MPT_SAS_MGMT {
typedef struct _mpt_ioctl_events {
u32 event; /* Specified by define above */
u32 eventContext; /* Index or counter */
- int data[2]; /* First 8 bytes of Event Data */
+ u32 data[2]; /* First 8 bytes of Event Data */
} MPT_IOCTL_EVENTS;
/*
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index fa0f7761652..3bd94f11e7d 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -2463,11 +2463,11 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE;
ioc->events[idx].eventContext = ioc->eventContext;
- ioc->events[idx].data[0] = (pReq->LUN[1] << 24) ||
- (MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA << 16) ||
- (sc->device->channel << 8) || sc->device->id;
+ ioc->events[idx].data[0] = (pReq->LUN[1] << 24) |
+ (MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA << 16) |
+ (sc->device->channel << 8) | sc->device->id;
- ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12];
+ ioc->events[idx].data[1] = (sense_data[13] << 8) | sense_data[12];
ioc->eventContext++;
if (hd->ioc->pcidev->vendor ==
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 30fd479fea5..1798a9f9fb2 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2280,7 +2280,6 @@ config GFAR_NAPI
config UCC_GETH
tristate "Freescale QE Gigabit Ethernet"
depends on QUICC_ENGINE
- select UCC_FAST
help
This driver supports the Gigabit Ethernet mode of the QUICC Engine,
which is available on some Freescale SOCs.
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 571d82f8008..7df23dc2819 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -566,6 +566,7 @@ static int __devinit dfx_register(struct device *bdev)
bp->base.mem = ioremap_nocache(bar_start, bar_len);
if (!bp->base.mem) {
printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
+ err = -ENOMEM;
goto err_out_region;
}
} else {
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index cbc7febe9cd..9ec35b7a820 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1325,7 +1325,10 @@ e1000_sw_init(struct e1000_adapter *adapter)
spin_lock_init(&adapter->tx_queue_lock);
#endif
- atomic_set(&adapter->irq_sem, 1);
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ atomic_set(&adapter->irq_sem, 0);
+ e1000_irq_disable(adapter);
+
spin_lock_init(&adapter->stats_lock);
set_bit(__E1000_DOWN, &adapter->flags);
@@ -1431,6 +1434,10 @@ e1000_open(struct net_device *netdev)
/* From here on the code is the same as e1000_up() */
clear_bit(__E1000_DOWN, &adapter->flags);
+#ifdef CONFIG_E1000_NAPI
+ netif_poll_enable(netdev);
+#endif
+
e1000_irq_enable(adapter);
/* fire a link status change interrupt to start the watchdog */
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 602872dbe15..e85a933a476 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -39,7 +39,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0058"
+#define DRV_VERSION "EHEA_0061"
#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
| NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index f6e0cb1ada1..152bb2016a2 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -428,7 +428,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
}
skb_copy_to_linear_data(skb, ((char*)cqe) + 64,
cqe->num_bytes_transfered - 4);
- ehea_fill_skb(dev, skb, cqe);
+ ehea_fill_skb(port->netdev, skb, cqe);
} else if (rq == 2) { /* RQ2 */
skb = get_skb_by_index(skb_arr_rq2,
skb_arr_rq2_len, cqe);
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index dfbd5809d74..f8d63d39f59 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -51,8 +51,8 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
if (obj < bitmap->max) {
set_bit(obj, bitmap->table);
+ bitmap->last = (obj + 1) & (bitmap->max - 1);
obj |= bitmap->top;
- bitmap->last = obj + 1;
} else
obj = -1;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 832fd69a0e5..adfbe81693a 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -364,7 +364,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
/* for SFP-module set SIGDET polarity to low */
ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
ctrl |= PHY_M_FIB_SIGD_POL;
- gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
}
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
@@ -658,7 +658,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
const u8 *addr = hw->dev[port]->dev_addr;
sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
- sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR|GPC_ENA_PAUSE);
+ sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
@@ -1432,7 +1432,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
tcpsum = offset << 16; /* sum start */
tcpsum |= offset + skb->csum_offset; /* sum write */
- ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
+ ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
if (ip_hdr(skb)->protocol == IPPROTO_UDP)
ctrl |= UDPTCP;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 5efb5afc45b..b8c4a3b5ead 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -1149,7 +1149,7 @@ enum {
PHY_M_IS_JABBER = 1<<0, /* Jabber */
PHY_M_DEF_MSK = PHY_M_IS_LSP_CHANGE | PHY_M_IS_LST_CHANGE
- | PHY_M_IS_FIFO_ERROR,
+ | PHY_M_IS_DUP_CHANGE,
PHY_M_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL,
};
@@ -1732,28 +1732,6 @@ enum {
/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */
enum {
- GPC_SEL_BDT = 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */
- GPC_INT_POL_HI = 1<<27, /* IRQ Polarity is Active HIGH */
- GPC_75_OHM = 1<<26, /* Use 75 Ohm Termination instead of 50 */
- GPC_DIS_FC = 1<<25, /* Disable Automatic Fiber/Copper Detection */
- GPC_DIS_SLEEP = 1<<24, /* Disable Energy Detect */
- GPC_HWCFG_M_3 = 1<<23, /* HWCFG_MODE[3] */
- GPC_HWCFG_M_2 = 1<<22, /* HWCFG_MODE[2] */
- GPC_HWCFG_M_1 = 1<<21, /* HWCFG_MODE[1] */
- GPC_HWCFG_M_0 = 1<<20, /* HWCFG_MODE[0] */
- GPC_ANEG_0 = 1<<19, /* ANEG[0] */
- GPC_ENA_XC = 1<<18, /* Enable MDI crossover */
- GPC_DIS_125 = 1<<17, /* Disable 125 MHz clock */
- GPC_ANEG_3 = 1<<16, /* ANEG[3] */
- GPC_ANEG_2 = 1<<15, /* ANEG[2] */
- GPC_ANEG_1 = 1<<14, /* ANEG[1] */
- GPC_ENA_PAUSE = 1<<13, /* Enable Pause (SYM_OR_REM) */
- GPC_PHYADDR_4 = 1<<12, /* Bit 4 of Phy Addr */
- GPC_PHYADDR_3 = 1<<11, /* Bit 3 of Phy Addr */
- GPC_PHYADDR_2 = 1<<10, /* Bit 2 of Phy Addr */
- GPC_PHYADDR_1 = 1<<9, /* Bit 1 of Phy Addr */
- GPC_PHYADDR_0 = 1<<8, /* Bit 0 of Phy Addr */
- /* Bits 7..2: reserved */
GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */
GPC_RST_SET = 1<<0, /* Set GPHY Reset */
};
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 246fac0e800..3df3c60263d 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -311,7 +311,7 @@ static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
local_info_t *local;
struct ieee80211_hdr_4addr *hdr;
u16 fc;
- int hdr_len, res;
+ int prefix_len, postfix_len, hdr_len, res;
iface = netdev_priv(skb->dev);
local = iface->local;
@@ -337,10 +337,13 @@ static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
if (skb == NULL)
return NULL;
- if ((skb_headroom(skb) < crypt->ops->extra_mpdu_prefix_len ||
- skb_tailroom(skb) < crypt->ops->extra_mpdu_postfix_len) &&
- pskb_expand_head(skb, crypt->ops->extra_mpdu_prefix_len,
- crypt->ops->extra_mpdu_postfix_len, GFP_ATOMIC)) {
+ prefix_len = crypt->ops->extra_mpdu_prefix_len +
+ crypt->ops->extra_msdu_prefix_len;
+ postfix_len = crypt->ops->extra_mpdu_postfix_len +
+ crypt->ops->extra_msdu_postfix_len;
+ if ((skb_headroom(skb) < prefix_len ||
+ skb_tailroom(skb) < postfix_len) &&
+ pskb_expand_head(skb, prefix_len, postfix_len, GFP_ATOMIC)) {
kfree_skb(skb);
return NULL;
}
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index dd070cccf32..f49eb068c7d 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -378,9 +378,10 @@ islpci_eth_receive(islpci_private *priv)
display_buffer((char *) skb->data, skb->len);
#endif
/* take care of monitor mode and spy monitoring. */
- if (unlikely(priv->iw_mode == IW_MODE_MONITOR))
+ if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) {
+ skb->dev = ndev;
discard = islpci_monitor_rx(priv, &skb);
- else {
+ } else {
if (unlikely(skb->data[2 * ETH_ALEN] == 0)) {
/* The packet has a rx_annex. Read it for spy monitoring, Then
* remove it, while keeping the 2 leading MAC addr.
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index ddff40c4212..821cde65e36 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -1127,6 +1127,7 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
int retval = 0;
unsigned long flags;
+ zfcp_adapter_scsi_unregister(adapter);
device_unregister(&adapter->generic_services);
zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev);
dev_set_drvdata(&adapter->ccw_device->dev, NULL);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 81680efa172..1c8f71a5985 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -189,9 +189,7 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
* @ccw_device: pointer to belonging ccw device
*
* This function gets called by the common i/o layer and sets an adapter
- * into state offline. Setting an fcp device offline means that it will be
- * unregistered from the SCSI stack and that the adapter will be shut down
- * asynchronously.
+ * into state offline.
*/
static int
zfcp_ccw_set_offline(struct ccw_device *ccw_device)
@@ -202,7 +200,6 @@ zfcp_ccw_set_offline(struct ccw_device *ccw_device)
adapter = dev_get_drvdata(&ccw_device->dev);
zfcp_erp_adapter_shutdown(adapter, 0);
zfcp_erp_wait(adapter);
- zfcp_adapter_scsi_unregister(adapter);
zfcp_erp_thread_kill(adapter);
zfcp_adapter_debug_unregister(adapter);
up(&zfcp_data.config_sema);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index a8b02542ac2..0eb31e162b1 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -156,44 +156,30 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
kfree(fsf_req);
}
-/**
- * zfcp_fsf_req_dismiss - dismiss a single fsf request
- */
-static void zfcp_fsf_req_dismiss(struct zfcp_adapter *adapter,
- struct zfcp_fsf_req *fsf_req,
- unsigned int counter)
-{
- u64 dbg_tmp[2];
-
- dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active);
- dbg_tmp[1] = (u64) counter;
- debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
- list_del(&fsf_req->list);
- fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
- zfcp_fsf_req_complete(fsf_req);
-}
-
-/**
- * zfcp_fsf_req_dismiss_all - dismiss all remaining fsf requests
+/*
+ * Never ever call this without shutting down the adapter first.
+ * Otherwise the adapter would continue using and corrupting s390 storage.
+ * Included BUG_ON() call to ensure this is done.
+ * ERP is supposed to be the only user of this function.
*/
void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
{
- struct zfcp_fsf_req *request, *tmp;
+ struct zfcp_fsf_req *fsf_req, *tmp;
unsigned long flags;
LIST_HEAD(remove_queue);
- unsigned int i, counter;
+ unsigned int i;
+ BUG_ON(atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status));
spin_lock_irqsave(&adapter->req_list_lock, flags);
atomic_set(&adapter->reqs_active, 0);
- for (i=0; i<REQUEST_LIST_SIZE; i++)
+ for (i = 0; i < REQUEST_LIST_SIZE; i++)
list_splice_init(&adapter->req_list[i], &remove_queue);
-
spin_unlock_irqrestore(&adapter->req_list_lock, flags);
- counter = 0;
- list_for_each_entry_safe(request, tmp, &remove_queue, list) {
- zfcp_fsf_req_dismiss(adapter, request, counter);
- counter++;
+ list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) {
+ list_del(&fsf_req->list);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
+ zfcp_fsf_req_complete(fsf_req);
}
}
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 16e2d64658a..0acf6db0a08 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -569,6 +569,9 @@ zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
int retval = 0;
static unsigned int unique_id = 0;
+ if (adapter->scsi_host)
+ goto out;
+
/* register adapter as SCSI host with mid layer of SCSI stack */
adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template,
sizeof (struct zfcp_adapter *));
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index d28c14e23c3..572034ceb14 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1753,23 +1753,9 @@ config SUN3X_ESP
The ESP was an on-board SCSI controller used on Sun 3/80
machines. Say Y here to compile in support for it.
-config SCSI_ESP_CORE
- tristate "ESP Scsi Driver Core"
- depends on SCSI
- select SCSI_SPI_ATTRS
- help
- This is a core driver for NCR53c9x based scsi chipsets,
- also known as "ESP" for Emulex Scsi Processor or
- Enhanced Scsi Processor. This driver does not exist by
- itself, there are front-end drivers which, when enabled,
- select and enable this driver. One example is SCSI_SUNESP.
- These front-end drivers provide probing, DMA, and register
- access support for the core driver.
-
config SCSI_SUNESP
tristate "Sparc ESP Scsi Driver"
depends on SBUS && SCSI
- select SCSI_ESP_CORE
help
This is the driver for the Sun ESP SCSI host adapter. The ESP
chipset is present in most SPARC SBUS-based computers.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 51e884fa10b..b1b63279158 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -106,8 +106,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
-obj-$(CONFIG_SCSI_ESP_CORE) += esp_scsi.o
-obj-$(CONFIG_SCSI_SUNESP) += sun_esp.o
+obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
obj-$(CONFIG_SCSI_GDTH) += gdth.o
obj-$(CONFIG_SCSI_INITIO) += initio.o
obj-$(CONFIG_SCSI_INIA100) += a100u2w.o
@@ -121,7 +120,7 @@ obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
obj-$(CONFIG_SCSI_PPA) += ppa.o
obj-$(CONFIG_SCSI_IMM) += imm.o
-obj-$(CONFIG_JAZZ_ESP) += NCR53C9x.o jazz_esp.o
+obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o
obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o
obj-$(CONFIG_SCSI_FCAL) += fcal.o
obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 1e82c69b36b..8dcfe4ec35c 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -146,7 +146,7 @@ static char *aac_get_status_string(u32 status);
static int nondasd = -1;
static int dacmode = -1;
-static int commit = -1;
+int aac_commit = -1;
int startup_timeout = 180;
int aif_timeout = 120;
@@ -154,7 +154,7 @@ module_param(nondasd, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
module_param(dacmode, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on");
-module_param(commit, int, S_IRUGO|S_IWUSR);
+module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on");
module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for adapter to have it's kernel up and\nrunning. This is typically adjusted for large systems that do not have a BIOS.");
@@ -173,6 +173,9 @@ int expose_physicals = -1;
module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on");
+int aac_reset_devices = 0;
+module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
struct fib *fibptr) {
@@ -246,7 +249,7 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
aac_fib_complete(fibptr);
/* Send a CT_COMMIT_CONFIG to enable discovery of devices */
if (status >= 0) {
- if ((commit == 1) || commit_flag) {
+ if ((aac_commit == 1) || commit_flag) {
struct aac_commit_config * dinfo;
aac_fib_init(fibptr);
dinfo = (struct aac_commit_config *) fib_data(fibptr);
@@ -261,7 +264,7 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
1, 1,
NULL, NULL);
aac_fib_complete(fibptr);
- } else if (commit == 0) {
+ } else if (aac_commit == 0) {
printk(KERN_WARNING
"aac_get_config_status: Foreign device configurations are being ignored\n");
}
@@ -340,7 +343,7 @@ int aac_get_containers(struct aac_dev *dev)
static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigned int offset, unsigned int len)
{
void *buf;
- unsigned int transfer_len;
+ int transfer_len;
struct scatterlist *sg = scsicmd->request_buffer;
if (scsicmd->use_sg) {
@@ -351,7 +354,7 @@ static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigne
transfer_len = min(scsicmd->request_bufflen, len + offset);
}
transfer_len -= offset;
- if (buf && transfer_len)
+ if (buf && transfer_len > 0)
memcpy(buf + offset, data, transfer_len);
if (scsicmd->use_sg)
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 45ca3e80161..c81edf36913 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1823,9 +1823,12 @@ int aac_send_shutdown(struct aac_dev *dev);
int aac_probe_container(struct aac_dev *dev, int cid);
int _aac_rx_init(struct aac_dev *dev);
int aac_rx_select_comm(struct aac_dev *dev, int comm);
+int aac_rx_deliver_producer(struct fib * fib);
extern int numacb;
extern int acbsize;
extern char aac_driver_version[];
extern int startup_timeout;
extern int aif_timeout;
extern int expose_physicals;
+extern int aac_reset_devices;
+extern int aac_commit;
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 291cd14f4e9..ae978a373c5 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -378,7 +378,7 @@ static int aac_rx_check_health(struct aac_dev *dev)
*
* Will send a fib, returning 0 if successful.
*/
-static int aac_rx_deliver_producer(struct fib * fib)
+int aac_rx_deliver_producer(struct fib * fib)
{
struct aac_dev *dev = fib->dev;
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
@@ -488,6 +488,8 @@ static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
return -EINVAL;
if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
return -ENODEV;
+ if (startup_timeout < 300)
+ startup_timeout = 300;
return 0;
}
@@ -542,7 +544,7 @@ int _aac_rx_init(struct aac_dev *dev)
dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
- if ((((status & 0x0c) != 0x0c) || reset_devices) &&
+ if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
!aac_rx_restart_adapter(dev, 0))
++restart;
/*
@@ -594,6 +596,8 @@ int _aac_rx_init(struct aac_dev *dev)
}
msleep(1);
}
+ if (restart)
+ aac_commit = 1;
/*
* Fill in the common function dispatch table.
*/
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index f4b5e9742ab..85b91bc578c 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -5,7 +5,7 @@
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -257,6 +257,11 @@ static void aac_sa_start_adapter(struct aac_dev *dev)
NULL, NULL, NULL, NULL, NULL);
}
+static int aac_sa_restart_adapter(struct aac_dev *dev, int bled)
+{
+ return -EINVAL;
+}
+
/**
* aac_sa_check_health
* @dev: device to check if healthy
@@ -366,7 +371,9 @@ int aac_sa_init(struct aac_dev *dev)
dev->a_ops.adapter_notify = aac_sa_notify_adapter;
dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
dev->a_ops.adapter_check_health = aac_sa_check_health;
+ dev->a_ops.adapter_restart = aac_sa_restart_adapter;
dev->a_ops.adapter_intr = aac_sa_intr;
+ dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
dev->a_ops.adapter_ioremap = aac_sa_ioremap;
/*
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index c328596def3..6066998ed56 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -106,6 +106,7 @@ static void make_expression(expression_t *immed, int value);
static void add_conditional(symbol_t *symbol);
static void add_version(const char *verstring);
static int is_download_const(expression_t *immed);
+void yyerror(const char *string);
#define SRAM_SYMNAME "SRAM_BASE"
#define SCB_SYMNAME "SCB_BASE"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
index 439f760b34b..ff46aa6801b 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
@@ -65,6 +65,7 @@
static symbol_t *macro_symbol;
static void add_macro_arg(const char *argtext, int position);
+void mmerror(const char *string);
%}
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 9a14a6d9727..c0d0b7d7a8c 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -290,6 +290,7 @@ static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
static inline int asd_clear_nexus(struct sas_task *task)
{
int res = TMF_RESP_FUNC_FAILED;
+ int leftover;
struct asd_ascb *tascb = task->lldd_task;
unsigned long flags;
@@ -298,10 +299,12 @@ static inline int asd_clear_nexus(struct sas_task *task)
res = asd_clear_nexus_tag(task);
else
res = asd_clear_nexus_index(task);
- wait_for_completion_timeout(&tascb->completion,
- AIC94XX_SCB_TIMEOUT);
+ leftover = wait_for_completion_timeout(&tascb->completion,
+ AIC94XX_SCB_TIMEOUT);
ASD_DPRINTK("came back from clear nexus\n");
spin_lock_irqsave(&task->task_state_lock, flags);
+ if (leftover < 1)
+ res = TMF_RESP_FUNC_FAILED;
if (task->task_state_flags & SAS_TASK_STATE_DONE)
res = TMF_RESP_FUNC_COMPLETE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -350,6 +353,7 @@ int asd_abort_task(struct sas_task *task)
unsigned long flags;
struct asd_ascb *ascb = NULL;
struct scb *scb;
+ int leftover;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
@@ -455,9 +459,11 @@ int asd_abort_task(struct sas_task *task)
break;
case TF_TMF_TASK_DONE + 0xFF00: /* done but not reported yet */
res = TMF_RESP_FUNC_FAILED;
- wait_for_completion_timeout(&tascb->completion,
- AIC94XX_SCB_TIMEOUT);
+ leftover = wait_for_completion_timeout(&tascb->completion,
+ AIC94XX_SCB_TIMEOUT);
spin_lock_irqsave(&task->task_state_lock, flags);
+ if (leftover < 1)
+ res = TMF_RESP_FUNC_FAILED;
if (task->task_state_flags & SAS_TASK_STATE_DONE)
res = TMF_RESP_FUNC_COMPLETE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 4baa79e6867..fa6ff295e56 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3954,6 +3954,13 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
spin_unlock_irq(scsi_cmd->device->host->host_lock);
ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
spin_lock_irq(scsi_cmd->device->host->host_lock);
+
+ list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
+ if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
+ rc = -EIO;
+ break;
+ }
+ }
} else
rc = ipr_device_reset(ioa_cfg, res);
res->resetting_device = 0;
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 19dd4b962e1..81e497d9eae 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -1,307 +1,244 @@
-/*
- * jazz_esp.c: Driver for SCSI chip on Mips Magnum Boards (JAZZ architecture)
+/* jazz_esp.c: ESP front-end for MIPS JAZZ systems.
*
- * Copyright (C) 1997 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
- *
- * jazz_esp is based on David S. Miller's ESP driver and cyber_esp
+ * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende)
*/
-#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/delay.h>
#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "NCR53C9x.h"
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
#include <asm/jazz.h>
#include <asm/jazzdma.h>
-#include <asm/dma.h>
-#include <asm/pgtable.h>
-
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
-static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp);
-static void dma_dump_state(struct NCR_ESP *esp);
-static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length);
-static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length);
-static void dma_ints_off(struct NCR_ESP *esp);
-static void dma_ints_on(struct NCR_ESP *esp);
-static int dma_irq_p(struct NCR_ESP *esp);
-static int dma_ports_p(struct NCR_ESP *esp);
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
-static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp);
-static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp);
-static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp);
-static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp);
-static void dma_advance_sg (struct scsi_cmnd *sp);
-static void dma_led_off(struct NCR_ESP *);
-static void dma_led_on(struct NCR_ESP *);
-
-
-static volatile unsigned char cmd_buffer[16];
- /* This is where all commands are put
- * before they are trasfered to the ESP chip
- * via PIO.
- */
-
-static int jazz_esp_release(struct Scsi_Host *shost)
-{
- if (shost->irq)
- free_irq(shost->irq, NULL);
- if (shost->dma_channel != 0xff)
- free_dma(shost->dma_channel);
- if (shost->io_port && shost->n_io_port)
- release_region(shost->io_port, shost->n_io_port);
- scsi_unregister(shost);
- return 0;
-}
+#include <scsi/scsi_host.h>
-/***************************************************************** Detection */
-static int jazz_esp_detect(struct scsi_host_template *tpnt)
-{
- struct NCR_ESP *esp;
- struct ConfigDev *esp_dev;
-
- /*
- * first assumption it is there:-)
- */
- if (1) {
- esp_dev = NULL;
- esp = esp_allocate(tpnt, esp_dev, 0);
-
- /* Do command transfer with programmed I/O */
- esp->do_pio_cmds = 1;
-
- /* Required functions */
- esp->dma_bytes_sent = &dma_bytes_sent;
- esp->dma_can_transfer = &dma_can_transfer;
- esp->dma_dump_state = &dma_dump_state;
- esp->dma_init_read = &dma_init_read;
- esp->dma_init_write = &dma_init_write;
- esp->dma_ints_off = &dma_ints_off;
- esp->dma_ints_on = &dma_ints_on;
- esp->dma_irq_p = &dma_irq_p;
- esp->dma_ports_p = &dma_ports_p;
- esp->dma_setup = &dma_setup;
-
- /* Optional functions */
- esp->dma_barrier = NULL;
- esp->dma_drain = NULL;
- esp->dma_invalidate = NULL;
- esp->dma_irq_entry = NULL;
- esp->dma_irq_exit = NULL;
- esp->dma_poll = NULL;
- esp->dma_reset = NULL;
- esp->dma_led_off = &dma_led_off;
- esp->dma_led_on = &dma_led_on;
-
- /* virtual DMA functions */
- esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one;
- esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl;
- esp->dma_mmu_release_scsi_one = &dma_mmu_release_scsi_one;
- esp->dma_mmu_release_scsi_sgl = &dma_mmu_release_scsi_sgl;
- esp->dma_advance_sg = &dma_advance_sg;
-
-
- /* SCSI chip speed */
- esp->cfreq = 40000000;
+#include "esp_scsi.h"
- /*
- * we don't give the address of DMA channel, but the number
- * of DMA channel, so we can use the jazz DMA functions
- *
- */
- esp->dregs = (void *) JAZZ_SCSI_DMA;
-
- /* ESP register base */
- esp->eregs = (struct ESP_regs *)(JAZZ_SCSI_BASE);
-
- /* Set the command buffer */
- esp->esp_command = (volatile unsigned char *)cmd_buffer;
-
- /* get virtual dma address for command buffer */
- esp->esp_command_dvma = vdma_alloc(CPHYSADDR(cmd_buffer), sizeof (cmd_buffer));
-
- esp->irq = JAZZ_SCSI_IRQ;
- request_irq(JAZZ_SCSI_IRQ, esp_intr, IRQF_DISABLED, "JAZZ SCSI",
- esp->ehost);
-
- /*
- * FIXME, look if the scsi id is available from NVRAM
- */
- esp->scsi_id = 7;
-
- /* Check for differential SCSI-bus */
- /* What is this stuff? */
- esp->diff = 0;
-
- esp_initialize(esp);
-
- printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps,esps_in_use);
- esps_running = esps_in_use;
- return esps_in_use;
- }
- return 0;
-}
+#define DRV_MODULE_NAME "jazz_esp"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_VERSION "1.000"
+#define DRV_MODULE_RELDATE "May 19, 2007"
-/************************************************************* DMA Functions */
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
+static void jazz_esp_write8(struct esp *esp, u8 val, unsigned long reg)
{
- return fifo_count;
+ *(volatile u8 *)(esp->regs + reg) = val;
}
-static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp)
+static u8 jazz_esp_read8(struct esp *esp, unsigned long reg)
{
- /*
- * maximum DMA size is 1MB
- */
- unsigned long sz = sp->SCp.this_residual;
- if(sz > 0x100000)
- sz = 0x100000;
- return sz;
+ return *(volatile u8 *)(esp->regs + reg);
}
-static void dma_dump_state(struct NCR_ESP *esp)
+static dma_addr_t jazz_esp_map_single(struct esp *esp, void *buf,
+ size_t sz, int dir)
{
-
- ESPLOG(("esp%d: dma -- enable <%08x> residue <%08x\n",
- esp->esp_id, vdma_get_enable((int)esp->dregs), vdma_get_residue((int)esp->dregs)));
+ return dma_map_single(esp->dev, buf, sz, dir);
}
-static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length)
+static int jazz_esp_map_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
{
- dma_cache_wback_inv ((unsigned long)phys_to_virt(vdma_log2phys(vaddress)), length);
- vdma_disable ((int)esp->dregs);
- vdma_set_mode ((int)esp->dregs, DMA_MODE_READ);
- vdma_set_addr ((int)esp->dregs, vaddress);
- vdma_set_count ((int)esp->dregs, length);
- vdma_enable ((int)esp->dregs);
+ return dma_map_sg(esp->dev, sg, num_sg, dir);
}
-static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length)
+static void jazz_esp_unmap_single(struct esp *esp, dma_addr_t addr,
+ size_t sz, int dir)
{
- dma_cache_wback_inv ((unsigned long)phys_to_virt(vdma_log2phys(vaddress)), length);
- vdma_disable ((int)esp->dregs);
- vdma_set_mode ((int)esp->dregs, DMA_MODE_WRITE);
- vdma_set_addr ((int)esp->dregs, vaddress);
- vdma_set_count ((int)esp->dregs, length);
- vdma_enable ((int)esp->dregs);
+ dma_unmap_single(esp->dev, addr, sz, dir);
}
-static void dma_ints_off(struct NCR_ESP *esp)
+static void jazz_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
{
- disable_irq(esp->irq);
+ dma_unmap_sg(esp->dev, sg, num_sg, dir);
}
-static void dma_ints_on(struct NCR_ESP *esp)
+static int jazz_esp_irq_pending(struct esp *esp)
{
- enable_irq(esp->irq);
+ if (jazz_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
+ return 1;
+ return 0;
}
-static int dma_irq_p(struct NCR_ESP *esp)
+static void jazz_esp_reset_dma(struct esp *esp)
{
- return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
+ vdma_disable ((int)esp->dma_regs);
}
-static int dma_ports_p(struct NCR_ESP *esp)
+static void jazz_esp_dma_drain(struct esp *esp)
{
- int enable = vdma_get_enable((int)esp->dregs);
-
- return (enable & R4030_CHNL_ENABLE);
+ /* nothing to do */
}
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
+static void jazz_esp_dma_invalidate(struct esp *esp)
{
- /*
- * On the Sparc, DMA_ST_WRITE means "move data from device to memory"
- * so when (write) is true, it actually means READ!
- */
- if(write){
- dma_init_read(esp, addr, count);
- } else {
- dma_init_write(esp, addr, count);
- }
+ vdma_disable ((int)esp->dma_regs);
}
-static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp)
+static void jazz_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd)
{
- sp->SCp.have_data_in = vdma_alloc(CPHYSADDR(sp->SCp.buffer), sp->SCp.this_residual);
- sp->SCp.ptr = (char *)((unsigned long)sp->SCp.have_data_in);
+ BUG_ON(!(cmd & ESP_CMD_DMA));
+
+ jazz_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+ jazz_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+ vdma_disable ((int)esp->dma_regs);
+ if (write)
+ vdma_set_mode ((int)esp->dma_regs, DMA_MODE_READ);
+ else
+ vdma_set_mode ((int)esp->dma_regs, DMA_MODE_WRITE);
+
+ vdma_set_addr ((int)esp->dma_regs, addr);
+ vdma_set_count ((int)esp->dma_regs, dma_count);
+ vdma_enable ((int)esp->dma_regs);
+
+ scsi_esp_cmd(esp, cmd);
}
-static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp)
-{
- int sz = sp->SCp.buffers_residual;
- struct scatterlist *sg = (struct scatterlist *) sp->SCp.buffer;
-
- while (sz >= 0) {
- sg[sz].dma_address = vdma_alloc(CPHYSADDR(page_address(sg[sz].page) + sg[sz].offset), sg[sz].length);
- sz--;
- }
- sp->SCp.ptr=(char *)(sp->SCp.buffer->dma_address);
-}
-
-static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp)
+static int jazz_esp_dma_error(struct esp *esp)
{
- vdma_free(sp->SCp.have_data_in);
+ u32 enable = vdma_get_enable((int)esp->dma_regs);
+
+ if (enable & (R4030_MEM_INTR|R4030_ADDR_INTR))
+ return 1;
+
+ return 0;
}
-static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp)
+static const struct esp_driver_ops jazz_esp_ops = {
+ .esp_write8 = jazz_esp_write8,
+ .esp_read8 = jazz_esp_read8,
+ .map_single = jazz_esp_map_single,
+ .map_sg = jazz_esp_map_sg,
+ .unmap_single = jazz_esp_unmap_single,
+ .unmap_sg = jazz_esp_unmap_sg,
+ .irq_pending = jazz_esp_irq_pending,
+ .reset_dma = jazz_esp_reset_dma,
+ .dma_drain = jazz_esp_dma_drain,
+ .dma_invalidate = jazz_esp_dma_invalidate,
+ .send_dma_cmd = jazz_esp_send_dma_cmd,
+ .dma_error = jazz_esp_dma_error,
+};
+
+static int __devinit esp_jazz_probe(struct platform_device *dev)
{
- int sz = sp->use_sg - 1;
- struct scatterlist *sg = (struct scatterlist *)sp->request_buffer;
-
- while(sz >= 0) {
- vdma_free(sg[sz].dma_address);
- sz--;
- }
+ struct scsi_host_template *tpnt = &scsi_esp_template;
+ struct Scsi_Host *host;
+ struct esp *esp;
+ struct resource *res;
+ int err;
+
+ host = scsi_host_alloc(tpnt, sizeof(struct esp));
+
+ err = -ENOMEM;
+ if (!host)
+ goto fail;
+
+ host->max_id = 8;
+ esp = host_to_esp(host);
+
+ esp->host = host;
+ esp->dev = dev;
+ esp->ops = &jazz_esp_ops;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ goto fail_unlink;
+
+ esp->regs = (void __iomem *)res->start;
+ if (!esp->regs)
+ goto fail_unlink;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ if (!res)
+ goto fail_unlink;
+
+ esp->dma_regs = (void __iomem *)res->start;
+
+ esp->command_block = dma_alloc_coherent(esp->dev, 16,
+ &esp->command_block_dma,
+ GFP_KERNEL);
+ if (!esp->command_block)
+ goto fail_unmap_regs;
+
+ host->irq = platform_get_irq(dev, 0);
+ err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
+ if (err < 0)
+ goto fail_unmap_command_block;
+
+ esp->scsi_id = 7;
+ esp->host->this_id = esp->scsi_id;
+ esp->scsi_id_mask = (1 << esp->scsi_id);
+ esp->cfreq = 40000000;
+
+ dev_set_drvdata(&dev->dev, esp);
+
+ err = scsi_esp_register(esp, &dev->dev);
+ if (err)
+ goto fail_free_irq;
+
+ return 0;
+
+fail_free_irq:
+ free_irq(host->irq, esp);
+fail_unmap_command_block:
+ dma_free_coherent(esp->dev, 16,
+ esp->command_block,
+ esp->command_block_dma);
+fail_unmap_regs:
+fail_unlink:
+ scsi_host_put(host);
+fail:
+ return err;
}
-static void dma_advance_sg (struct scsi_cmnd *sp)
+static int __devexit esp_jazz_remove(struct platform_device *dev)
{
- sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address);
+ struct esp *esp = dev_get_drvdata(&dev->dev);
+ unsigned int irq = esp->host->irq;
+
+ scsi_esp_unregister(esp);
+
+ free_irq(irq, esp);
+ dma_free_coherent(esp->dev, 16,
+ esp->command_block,
+ esp->command_block_dma);
+
+ scsi_host_put(esp->host);
+
+ return 0;
}
-#define JAZZ_HDC_LED 0xe000d100 /* FIXME, find correct address */
+static struct platform_driver esp_jazz_driver = {
+ .probe = esp_jazz_probe,
+ .remove = __devexit_p(esp_jazz_remove),
+ .driver = {
+ .name = "jazz_esp",
+ },
+};
-static void dma_led_off(struct NCR_ESP *esp)
+static int __init jazz_esp_init(void)
{
-#if 0
- *(unsigned char *)JAZZ_HDC_LED = 0;
-#endif
+ return platform_driver_register(&esp_jazz_driver);
}
-static void dma_led_on(struct NCR_ESP *esp)
-{
-#if 0
- *(unsigned char *)JAZZ_HDC_LED = 1;
-#endif
+static void __exit jazz_esp_exit(void)
+{
+ platform_driver_unregister(&esp_jazz_driver);
}
-static struct scsi_host_template driver_template = {
- .proc_name = "jazz_esp",
- .proc_info = esp_proc_info,
- .name = "ESP 100/100a/200",
- .detect = jazz_esp_detect,
- .slave_alloc = esp_slave_alloc,
- .slave_destroy = esp_slave_destroy,
- .release = jazz_esp_release,
- .info = esp_info,
- .queuecommand = esp_queue,
- .eh_abort_handler = esp_abort,
- .eh_bus_reset_handler = esp_reset,
- .can_queue = 7,
- .this_id = 7,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
- .use_clustering = DISABLE_CLUSTERING,
-};
-#include "scsi_module.c"
+MODULE_DESCRIPTION("JAZZ ESP SCSI driver");
+MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(jazz_esp_init);
+module_exit(jazz_esp_exit);
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 5631c199a8e..732446e6396 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -254,6 +254,7 @@ static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
sg_init_one(&dummy, md, id->table_desc.len);
sg_dma_address(&dummy) = token;
+ sg_dma_len(&dummy) = id->table_desc.len;
err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
id->table_desc.len);
if (err) {
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 7a812677ff8..e2cf12ef368 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_sas.c
- * Version : v00.00.03.10-rc1
+ * Version : v00.00.03.10-rc5
*
* Authors:
* (email-id : megaraidlinux@lsi.com)
@@ -886,6 +886,7 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
goto out_return_cmd;
cmd->scmd = scmd;
+ scmd->SCp.ptr = (char *)cmd;
/*
* Issue the command to the FW
@@ -919,7 +920,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
* The RAID firmware may require extended timeouts.
*/
if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS)
- sdev->timeout = 90 * HZ;
+ sdev->timeout = MEGASAS_DEFAULT_CMD_TIMEOUT * HZ;
return 0;
}
@@ -981,8 +982,8 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
instance = (struct megasas_instance *)scmd->device->host->hostdata;
- scmd_printk(KERN_NOTICE, scmd, "megasas: RESET -%ld cmd=%x\n",
- scmd->serial_number, scmd->cmnd[0]);
+ scmd_printk(KERN_NOTICE, scmd, "megasas: RESET -%ld cmd=%x retries=%x\n",
+ scmd->serial_number, scmd->cmnd[0], scmd->retries);
if (instance->hw_crit_error) {
printk(KERN_ERR "megasas: cannot recover from previous reset "
@@ -1000,6 +1001,39 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
}
/**
+ * megasas_reset_timer - quiesce the adapter if required
+ * @scmd: scsi cmnd
+ *
+ * Sets the FW busy flag and reduces the host->can_queue if the
+ * cmd has not been completed within the timeout period.
+ */
+static enum
+scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
+{
+ struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
+ struct megasas_instance *instance;
+ unsigned long flags;
+
+ if (time_after(jiffies, scmd->jiffies_at_alloc +
+ (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
+ return EH_NOT_HANDLED;
+ }
+
+ instance = cmd->instance;
+ if (!(instance->flag & MEGASAS_FW_BUSY)) {
+ /* FW is busy, throttle IO */
+ spin_lock_irqsave(instance->host->host_lock, flags);
+
+ instance->host->can_queue = 16;
+ instance->last_time = jiffies;
+ instance->flag |= MEGASAS_FW_BUSY;
+
+ spin_unlock_irqrestore(instance->host->host_lock, flags);
+ }
+ return EH_RESET_TIMER;
+}
+
+/**
* megasas_reset_device - Device reset handler entry point
*/
static int megasas_reset_device(struct scsi_cmnd *scmd)
@@ -1112,6 +1146,7 @@ static struct scsi_host_template megasas_template = {
.eh_device_reset_handler = megasas_reset_device,
.eh_bus_reset_handler = megasas_reset_bus_host,
.eh_host_reset_handler = megasas_reset_bus_host,
+ .eh_timed_out = megasas_reset_timer,
.bios_param = megasas_bios_param,
.use_clustering = ENABLE_CLUSTERING,
};
@@ -1215,9 +1250,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
int exception = 0;
struct megasas_header *hdr = &cmd->frame->hdr;
- if (cmd->scmd) {
- cmd->scmd->SCp.ptr = (char *)0;
- }
+ if (cmd->scmd)
+ cmd->scmd->SCp.ptr = NULL;
switch (hdr->cmd) {
@@ -1806,6 +1840,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
u32 context;
struct megasas_cmd *cmd;
struct megasas_instance *instance = (struct megasas_instance *)instance_addr;
+ unsigned long flags;
/* If we have already declared adapter dead, donot complete cmds */
if (instance->hw_crit_error)
@@ -1828,6 +1863,22 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
}
*instance->consumer = producer;
+
+ /*
+ * Check if we can restore can_queue
+ */
+ if (instance->flag & MEGASAS_FW_BUSY
+ && time_after(jiffies, instance->last_time + 5 * HZ)
+ && atomic_read(&instance->fw_outstanding) < 17) {
+
+ spin_lock_irqsave(instance->host->host_lock, flags);
+ instance->flag &= ~MEGASAS_FW_BUSY;
+ instance->host->can_queue =
+ instance->max_fw_cmds - MEGASAS_INT_CMDS;
+
+ spin_unlock_irqrestore(instance->host->host_lock, flags);
+ }
+
}
/**
@@ -2398,6 +2449,8 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
instance->init_id = MEGASAS_DEFAULT_INIT_ID;
megasas_dbg_lvl = 0;
+ instance->flag = 0;
+ instance->last_time = 0;
/*
* Initialize MFI Firmware
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index e862992ee37..4dffc918a41 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.03.10-rc1"
-#define MEGASAS_RELDATE "Feb 14, 2007"
-#define MEGASAS_EXT_VERSION "Wed Feb 14 10:14:25 PST 2007"
+#define MEGASAS_VERSION "00.00.03.10-rc5"
+#define MEGASAS_RELDATE "May 17, 2007"
+#define MEGASAS_EXT_VERSION "Thu May 17 10:09:32 PDT 2007"
/*
* Device IDs
@@ -539,6 +539,8 @@ struct megasas_ctrl_info {
#define MEGASAS_DBG_LVL 1
+#define MEGASAS_FW_BUSY 1
+
/*
* When SCSI mid-layer calls driver's reset routine, driver waits for
* MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note
@@ -549,8 +551,8 @@ struct megasas_ctrl_info {
#define MEGASAS_RESET_WAIT_TIME 180
#define MEGASAS_INTERNAL_CMD_WAIT_TIME 180
#define MEGASAS_RESET_NOTICE_INTERVAL 5
-
#define MEGASAS_IOCTL_CMD 0
+#define MEGASAS_DEFAULT_CMD_TIMEOUT 90
/*
* FW reports the maximum of number of commands that it can accept (maximum
@@ -1073,7 +1075,6 @@ struct megasas_instance {
struct megasas_register_set __iomem *reg_set;
s8 init_id;
- u8 reserved[3];
u16 max_num_sge;
u16 max_fw_cmds;
@@ -1104,6 +1105,9 @@ struct megasas_instance {
struct megasas_instance_template *instancet;
struct tasklet_struct isr_tasklet;
+
+ u8 flag;
+ unsigned long last_time;
};
#define MEGASAS_IS_LOGICAL(scp) \
diff --git a/drivers/scsi/pluto.c b/drivers/scsi/pluto.c
index 3b2e1a53e6e..d953d43fe2e 100644
--- a/drivers/scsi/pluto.c
+++ b/drivers/scsi/pluto.c
@@ -4,6 +4,7 @@
*
*/
+#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/types.h>
@@ -50,16 +51,10 @@ static struct ctrl_inquiry {
} *fcs __initdata;
static int fcscount __initdata = 0;
static atomic_t fcss __initdata = ATOMIC_INIT(0);
-DECLARE_MUTEX_LOCKED(fc_sem);
+static DECLARE_COMPLETION(fc_detect_complete);
static int pluto_encode_addr(Scsi_Cmnd *SCpnt, u16 *addr, fc_channel *fc, fcp_cmnd *fcmd);
-static void __init pluto_detect_timeout(unsigned long data)
-{
- PLND(("Timeout\n"))
- up(&fc_sem);
-}
-
static void __init pluto_detect_done(Scsi_Cmnd *SCpnt)
{
/* Do nothing */
@@ -69,7 +64,7 @@ static void __init pluto_detect_scsi_done(Scsi_Cmnd *SCpnt)
{
PLND(("Detect done %08lx\n", (long)SCpnt))
if (atomic_dec_and_test (&fcss))
- up(&fc_sem);
+ complete(&fc_detect_complete);
}
int pluto_slave_configure(struct scsi_device *device)
@@ -96,7 +91,6 @@ int __init pluto_detect(struct scsi_host_template *tpnt)
int i, retry, nplutos;
fc_channel *fc;
struct scsi_device dev;
- DEFINE_TIMER(fc_timer, pluto_detect_timeout, 0, 0);
tpnt->proc_name = "pluto";
fcscount = 0;
@@ -187,15 +181,11 @@ int __init pluto_detect(struct scsi_host_template *tpnt)
}
}
- fc_timer.expires = jiffies + 10 * HZ;
- add_timer(&fc_timer);
-
- down(&fc_sem);
+ wait_for_completion_timeout(&fc_detect_complete, 10 * HZ);
PLND(("Woken up\n"))
if (!atomic_read(&fcss))
break; /* All fc channels have answered us */
}
- del_timer_sync(&fc_timer);
PLND(("Finished search\n"))
for (i = 0, nplutos = 0; i < fcscount; i++) {
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index ce63044b1ec..18dd5cc4d7c 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -209,6 +209,7 @@ static struct {
{"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"Promise", "", NULL, BLIST_SPARSELUN},
{"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
{"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
{"SEAGATE", "ST34555N", "0930", BLIST_NOTQ}, /* Chokes on tagged INQUIRY */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 00e46662296..3d8c9cb24f9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1789,7 +1789,7 @@ static void sd_shutdown(struct device *dev)
static int sd_suspend(struct device *dev, pm_message_t mesg)
{
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
- int ret;
+ int ret = 0;
if (!sdkp)
return 0; /* this can happen */
@@ -1798,30 +1798,34 @@ static int sd_suspend(struct device *dev, pm_message_t mesg)
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
ret = sd_sync_cache(sdkp);
if (ret)
- return ret;
+ goto done;
}
if (mesg.event == PM_EVENT_SUSPEND &&
sdkp->device->manage_start_stop) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
ret = sd_start_stop_device(sdkp, 0);
- if (ret)
- return ret;
}
- return 0;
+done:
+ scsi_disk_put(sdkp);
+ return ret;
}
static int sd_resume(struct device *dev)
{
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
+ int ret = 0;
if (!sdkp->device->manage_start_stop)
- return 0;
+ goto done;
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
+ ret = sd_start_stop_device(sdkp, 1);
- return sd_start_stop_device(sdkp, 1);
+done:
+ scsi_disk_put(sdkp);
+ return ret;
}
/**
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 69be1324b11..9ac83abc402 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -32,11 +32,12 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
#define DRV_NAME "stex"
-#define ST_DRIVER_VERSION "3.1.0.1"
+#define ST_DRIVER_VERSION "3.6.0000.1"
#define ST_VER_MAJOR 3
-#define ST_VER_MINOR 1
+#define ST_VER_MINOR 6
#define ST_OEM 0
#define ST_BUILD_VER 1
@@ -113,10 +114,6 @@ enum {
SG_CF_64B = 0x40, /* 64 bit item */
SG_CF_HOST = 0x20, /* sg in host memory */
- ST_MAX_ARRAY_SUPPORTED = 16,
- ST_MAX_TARGET_NUM = (ST_MAX_ARRAY_SUPPORTED+1),
- ST_MAX_LUN_PER_TARGET = 16,
-
st_shasta = 0,
st_vsc = 1,
st_vsc1 = 2,
@@ -586,7 +583,7 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
u16 tag;
host = cmd->device->host;
id = cmd->device->id;
- lun = cmd->device->channel; /* firmware lun issue work around */
+ lun = cmd->device->lun;
hba = (struct st_hba *) &host->hostdata[0];
switch (cmd->cmnd[0]) {
@@ -605,8 +602,26 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
stex_invalid_field(cmd, done);
return 0;
}
+ case REPORT_LUNS:
+ /*
+ * The shasta firmware does not report actual luns in the
+ * target, so fail the command to force sequential lun scan.
+ * Also, the console device does not support this command.
+ */
+ if (hba->cardtype == st_shasta || id == host->max_id - 1) {
+ stex_invalid_field(cmd, done);
+ return 0;
+ }
+ break;
+ case TEST_UNIT_READY:
+ if (id == host->max_id - 1) {
+ cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ done(cmd);
+ return 0;
+ }
+ break;
case INQUIRY:
- if (id != ST_MAX_ARRAY_SUPPORTED)
+ if (id != host->max_id - 1)
break;
if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
stex_direct_copy(cmd, console_inq_page,
@@ -624,7 +639,7 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
ver.oem = ST_OEM;
ver.build = ST_BUILD_VER;
ver.signature[0] = PASSTHRU_SIGNATURE;
- ver.console_id = ST_MAX_ARRAY_SUPPORTED;
+ ver.console_id = host->max_id - 1;
ver.host_no = hba->host->host_no;
cmd->result = stex_direct_copy(cmd, &ver, sizeof(ver)) ?
DID_OK << 16 | COMMAND_COMPLETE << 8 :
@@ -645,13 +660,8 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
req = stex_alloc_req(hba);
- if (hba->cardtype == st_yosemite) {
- req->lun = lun * (ST_MAX_TARGET_NUM - 1) + id;
- req->target = 0;
- } else {
- req->lun = lun;
- req->target = id;
- }
+ req->lun = lun;
+ req->target = id;
/* cdb */
memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
@@ -767,18 +777,6 @@ static void stex_ys_commands(struct st_hba *hba,
ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT;
else
ccb->srb_status = SRB_STATUS_SUCCESS;
- } else if (ccb->cmd->cmnd[0] == REPORT_LUNS) {
- u8 *report_lun_data = (u8 *)hba->copy_buffer;
-
- count = STEX_EXTRA_SIZE;
- stex_internal_copy(ccb->cmd, report_lun_data,
- &count, ccb->sg_count, ST_FROM_CMD);
- if (report_lun_data[2] || report_lun_data[3]) {
- report_lun_data[2] = 0x00;
- report_lun_data[3] = 0x08;
- stex_internal_copy(ccb->cmd, report_lun_data,
- &count, ccb->sg_count, ST_TO_CMD);
- }
}
}
@@ -995,6 +993,11 @@ static int stex_abort(struct scsi_cmnd *cmd)
u32 data;
int result = SUCCESS;
unsigned long flags;
+
+ printk(KERN_INFO DRV_NAME
+ "(%s): aborting command\n", pci_name(hba->pdev));
+ scsi_print_command(cmd);
+
base = hba->mmio_base;
spin_lock_irqsave(host->host_lock, flags);
if (tag < host->can_queue && hba->ccb[tag].cmd == cmd)
@@ -1051,7 +1054,12 @@ static void stex_hard_reset(struct st_hba *hba)
pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
- msleep(1);
+
+ /*
+ * 1 ms may be enough for 8-port controllers. But 16-port controllers
+ * require more time to finish bus reset. Use 100 ms here for safety
+ */
+ msleep(100);
pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
@@ -1075,6 +1083,10 @@ static int stex_reset(struct scsi_cmnd *cmd)
unsigned long before;
hba = (struct st_hba *) &cmd->device->host->hostdata[0];
+ printk(KERN_INFO DRV_NAME
+ "(%s): resetting host\n", pci_name(hba->pdev));
+ scsi_print_command(cmd);
+
hba->mu_status = MU_STATE_RESETTING;
if (hba->cardtype == st_shasta)
@@ -1194,7 +1206,7 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_scsi_host_put;
}
- hba->mmio_base = ioremap(pci_resource_start(pdev, 0),
+ hba->mmio_base = ioremap_nocache(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if ( !hba->mmio_base) {
printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
@@ -1229,12 +1241,18 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hba->copy_buffer = hba->dma_mem + MU_BUFFER_SIZE;
hba->mu_status = MU_STATE_STARTING;
- /* firmware uses id/lun pair for a logical drive, but lun would be
- always 0 if CONFIG_SCSI_MULTI_LUN not configured, so we use
- channel to map lun here */
- host->max_channel = ST_MAX_LUN_PER_TARGET - 1;
- host->max_id = ST_MAX_TARGET_NUM;
- host->max_lun = 1;
+ if (hba->cardtype == st_shasta) {
+ host->max_lun = 8;
+ host->max_id = 16 + 1;
+ } else if (hba->cardtype == st_yosemite) {
+ host->max_lun = 128;
+ host->max_id = 1 + 1;
+ } else {
+ /* st_vsc and st_vsc1 */
+ host->max_lun = 1;
+ host->max_id = 128 + 1;
+ }
+ host->max_channel = 0;
host->unique_id = host->host_no;
host->max_cmd_len = STEX_CDB_LENGTH;
diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c
index e35d9ab359f..b45ba5392dd 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/serial/suncore.c
@@ -30,9 +30,9 @@ void
sunserial_console_termios(struct console *con)
{
char mode[16], buf[16], *s;
- char *mode_prop = "ttyX-mode";
- char *cd_prop = "ttyX-ignore-cd";
- char *dtr_prop = "ttyX-rts-dtr-off";
+ char mode_prop[] = "ttyX-mode";
+ char cd_prop[] = "ttyX-ignore-cd";
+ char dtr_prop[] = "ttyX-rts-dtr-off";
char *ssp_console_modes_prop = "ssp-console-modes";
int baud, bits, stop, cflag;
char parity;
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 0985193dc57..15b6e1cb040 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -1239,7 +1239,7 @@ static inline struct console *SUNZILOG_CONSOLE(void)
#define SUNZILOG_CONSOLE() (NULL)
#endif
-static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channel)
+static void __devinit sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channel)
{
int baud, brg;
@@ -1259,7 +1259,7 @@ static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channe
}
#ifdef CONFIG_SERIO
-static void __init sunzilog_register_serio(struct uart_sunzilog_port *up)
+static void __devinit sunzilog_register_serio(struct uart_sunzilog_port *up)
{
struct serio *serio = &up->serio;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index e277258df38..8969e42434b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1681,7 +1681,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
spin_unlock_irq (&hcd_root_hub_lock);
#ifdef CONFIG_PM
- flush_workqueue(ksuspend_usb_wq);
+ cancel_work_sync(&hcd->wakeup_work);
#endif
mutex_lock(&usb_bus_list_lock);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index caaa46f2dec..24f10a19dbd 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1158,6 +1158,30 @@ static void release_address(struct usb_device *udev)
}
}
+#ifdef CONFIG_USB_SUSPEND
+
+static void usb_stop_pm(struct usb_device *udev)
+{
+ /* Synchronize with the ksuspend thread to prevent any more
+ * autosuspend requests from being submitted, and decrement
+ * the parent's count of unsuspended children.
+ */
+ usb_pm_lock(udev);
+ if (udev->parent && !udev->discon_suspended)
+ usb_autosuspend_device(udev->parent);
+ usb_pm_unlock(udev);
+
+ /* Stop any autosuspend requests already submitted */
+ cancel_rearming_delayed_work(&udev->autosuspend);
+}
+
+#else
+
+static inline void usb_stop_pm(struct usb_device *udev)
+{ }
+
+#endif
+
/**
* usb_disconnect - disconnect a device (usbcore-internal)
* @pdev: pointer to device being disconnected
@@ -1224,13 +1248,7 @@ void usb_disconnect(struct usb_device **pdev)
*pdev = NULL;
spin_unlock_irq(&device_state_lock);
- /* Decrement the parent's count of unsuspended children */
- if (udev->parent) {
- usb_pm_lock(udev);
- if (!udev->discon_suspended)
- usb_autosuspend_device(udev->parent);
- usb_pm_unlock(udev);
- }
+ usb_stop_pm(udev);
put_device(&udev->dev);
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 80627b6a2bf..4a6299bd004 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -184,10 +184,6 @@ static void usb_release_dev(struct device *dev)
udev = to_usb_device(dev);
-#ifdef CONFIG_USB_SUSPEND
- cancel_delayed_work(&udev->autosuspend);
- flush_workqueue(ksuspend_usb_wq);
-#endif
usb_destroy_configuration(udev);
usb_put_hcd(bus_to_hcd(udev->bus));
kfree(udev->product);
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 4475588e973..7361861e3aa 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -701,7 +701,7 @@ xfs_is_delayed_page(
else if (buffer_delay(bh))
acceptable = (type == IOMAP_DELAY);
else if (buffer_dirty(bh) && buffer_mapped(bh))
- acceptable = (type == 0);
+ acceptable = (type == IOMAP_NEW);
else
break;
} while ((bh = bh->b_this_page) != head);
@@ -810,7 +810,7 @@ xfs_convert_page(
page_dirty--;
count++;
} else {
- type = 0;
+ type = IOMAP_NEW;
if (buffer_mapped(bh) && all_bh && startio) {
lock_buffer(bh);
xfs_add_to_ioend(inode, bh, offset,
@@ -968,8 +968,8 @@ xfs_page_state_convert(
bh = head = page_buffers(page);
offset = page_offset(page);
- flags = -1;
- type = IOMAP_READ;
+ flags = BMAPI_READ;
+ type = IOMAP_NEW;
/* TODO: cleanup count and page_dirty */
@@ -999,14 +999,14 @@ xfs_page_state_convert(
*
* Third case, an unmapped buffer was found, and we are
* in a path where we need to write the whole page out.
- */
+ */
if (buffer_unwritten(bh) || buffer_delay(bh) ||
((buffer_uptodate(bh) || PageUptodate(page)) &&
!buffer_mapped(bh) && (unmapped || startio))) {
- /*
+ /*
* Make sure we don't use a read-only iomap
*/
- if (flags == BMAPI_READ)
+ if (flags == BMAPI_READ)
iomap_valid = 0;
if (buffer_unwritten(bh)) {
@@ -1055,7 +1055,7 @@ xfs_page_state_convert(
* That means it must already have extents allocated
* underneath it. Map the extent by reading it.
*/
- if (!iomap_valid || type != IOMAP_READ) {
+ if (!iomap_valid || flags != BMAPI_READ) {
flags = BMAPI_READ;
size = xfs_probe_cluster(inode, page, bh,
head, 1);
@@ -1066,7 +1066,15 @@ xfs_page_state_convert(
iomap_valid = xfs_iomap_valid(&iomap, offset);
}
- type = IOMAP_READ;
+ /*
+ * We set the type to IOMAP_NEW in case we are doing a
+ * small write at EOF that is extending the file but
+ * without needing an allocation. We need to update the
+ * file size on I/O completion in this case so it is
+ * the same case as having just allocated a new extent
+ * that we are writing into for the first time.
+ */
+ type = IOMAP_NEW;
if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
ASSERT(buffer_mapped(bh));
if (iomap_valid)
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h
index 731fa56e0c3..bdca5416d8b 100644
--- a/include/asm-sparc/atomic.h
+++ b/include/asm-sparc/atomic.h
@@ -2,6 +2,7 @@
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
+ * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
*
* Additions by Keith M Wesolowski (wesolows@foobazco.org) based
* on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
@@ -10,11 +11,48 @@
#ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__
+#include <linux/types.h>
typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
+/* Emulate cmpxchg() the same way we emulate atomics,
+ * by hashing the object address and indexing into an array
+ * of spinlocks to get a bit of performance...
+ *
+ * See arch/sparc/lib/atomic32.c for implementation.
+ *
+ * Cribbed from <asm-parisc/atomic.h>
+ */
+#define __HAVE_ARCH_CMPXCHG 1
+
+/* bug catcher for when unsupported size is used - won't link */
+extern void __cmpxchg_called_with_bad_pointer(void);
+/* we only need to support cmpxchg of a u32 on sparc */
+extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
+
+/* don't worry...optimizer will get rid of most of this */
+static __inline__ unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
+{
+ switch(size) {
+ case 4:
+ return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
+ default:
+ __cmpxchg_called_with_bad_pointer();
+ break;
+ }
+ return old;
+}
+
+#define cmpxchg(ptr,o,n) ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+})
+
#define ATOMIC_INIT(i) { (i) }
extern int __atomic_add_return(int, atomic_t *);
diff --git a/include/asm-sparc64/bugs.h b/include/asm-sparc64/bugs.h
index 120422fdb02..bf39d86c0c9 100644
--- a/include/asm-sparc64/bugs.h
+++ b/include/asm-sparc64/bugs.h
@@ -1,9 +1,8 @@
-/* $Id: bugs.h,v 1.1 1996/12/26 13:25:20 davem Exp $
- * include/asm-sparc64/bugs.h: Sparc probes for various bugs.
+/* bugs.h: Sparc64 probes for various bugs.
*
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
*/
-
+#include <asm/sstate.h>
extern unsigned long loops_per_jiffy;
@@ -12,4 +11,5 @@ static void __init check_bugs(void)
#ifndef CONFIG_SMP
cpu_data(0).udelay_val = loops_per_jiffy;
#endif
+ sstate_running();
}
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index e89922d6718..03c385de761 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -17,11 +17,11 @@
typedef struct {
/* Dcache line 1 */
unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
- unsigned int __pad0_1;
- unsigned int __pad0_2;
- unsigned int __pad1;
+ unsigned int __pad0;
unsigned long clock_tick; /* %tick's per second */
unsigned long udelay_val;
+ unsigned int __pad1;
+ unsigned int __pad2;
/* Dcache line 2, rarely used */
unsigned int dcache_size;
@@ -30,8 +30,8 @@ typedef struct {
unsigned int icache_line_size;
unsigned int ecache_size;
unsigned int ecache_line_size;
+ int core_id;
unsigned int __pad3;
- unsigned int __pad4;
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
@@ -76,12 +76,18 @@ struct trap_per_cpu {
/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
unsigned int irq_worklist;
- unsigned int __pad1;
- unsigned long __pad2[3];
+ unsigned int cpu_mondo_qmask;
+ unsigned int dev_mondo_qmask;
+ unsigned int resum_qmask;
+ unsigned int nonresum_qmask;
+ unsigned int __pad2[3];
} __attribute__((aligned(64)));
extern struct trap_per_cpu trap_block[NR_CPUS];
extern void init_cur_cpu_trap(struct thread_info *);
extern void setup_tba(void);
+extern int ncpus_probed;
+
+extern unsigned long real_hard_smp_processor_id(void);
struct cpuid_patch_entry {
unsigned int addr;
@@ -122,6 +128,10 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
#define TRAP_PER_CPU_TSB_HUGE 0xd0
#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
#define TRAP_PER_CPU_IRQ_WORKLIST 0xe0
+#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe4
+#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xe8
+#define TRAP_PER_CPU_RESUM_QMASK 0xec
+#define TRAP_PER_CPU_NONRESUM_QMASK 0xf0
#define TRAP_BLOCK_SZ_SHIFT 8
@@ -192,7 +202,7 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
* the calculations done by the macro mid-stream.
*/
#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
- ldub [THR + TI_CPU], REG1; \
+ lduh [THR + TI_CPU], REG1; \
sethi %hi(__per_cpu_shift), REG3; \
sethi %hi(__per_cpu_base), REG2; \
ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
index a5558c87556..5cdb1ff0483 100644
--- a/include/asm-sparc64/hypervisor.h
+++ b/include/asm-sparc64/hypervisor.h
@@ -73,6 +73,8 @@
#define HV_ENOTSUPPORTED 13 /* Function not supported */
#define HV_ENOMAP 14 /* No mapping found */
#define HV_ETOOMANY 15 /* Too many items specified */
+#define HV_ECHANNEL 16 /* Invalid LDC channel */
+#define HV_EBUSY 17 /* Resource busy */
/* mach_exit()
* TRAP: HV_FAST_TRAP
@@ -95,6 +97,10 @@
*/
#define HV_FAST_MACH_EXIT 0x00
+#ifndef __ASSEMBLY__
+extern void sun4v_mach_exit(unsigned long exit_core);
+#endif
+
/* Domain services. */
/* mach_desc()
@@ -120,7 +126,13 @@
*/
#define HV_FAST_MACH_DESC 0x01
-/* mach_exit()
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_mach_desc(unsigned long buffer_pa,
+ unsigned long buf_len,
+ unsigned long *real_buf_len);
+#endif
+
+/* mach_sir()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_MACH_SIR
* ERRORS: This service does not return.
@@ -135,53 +147,66 @@
*/
#define HV_FAST_MACH_SIR 0x02
-/* mach_set_soft_state()
+#ifndef __ASSEMBLY__
+extern void sun4v_mach_sir(void);
+#endif
+
+/* mach_set_watchdog()
* TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_MACH_SET_SOFT_STATE
- * ARG0: software state
- * ARG1: software state description pointer
+ * FUNCTION: HV_FAST_MACH_SET_WATCHDOG
+ * ARG0: timeout in milliseconds
* RET0: status
- * ERRORS: EINVAL software state not valid or software state
- * description is not NULL terminated
- * ENORADDR software state description pointer is not a
- * valid real address
- * EBADALIGNED software state description is not correctly
- * aligned
+ * RET1: time remaining in milliseconds
*
- * This allows the guest to report it's soft state to the hypervisor. There
- * are two primary components to this state. The first part states whether
- * the guest software is running or not. The second containts optional
- * details specific to the software.
+ * A guest uses this API to set a watchdog timer. Once the gues has set
+ * the timer, it must call the timer service again either to disable or
+ * postpone the expiration. If the timer expires before being reset or
+ * disabled, then the hypervisor take a platform specific action leading
+ * to guest termination within a bounded time period. The platform action
+ * may include recovery actions such as reporting the expiration to a
+ * Service Processor, and/or automatically restarting the gues.
*
- * The software state argument is defined below in HV_SOFT_STATE_*, and
- * indicates whether the guest is operating normally or in a transitional
- * state.
+ * The 'timeout' parameter is specified in milliseconds, however the
+ * implementated granularity is given by the 'watchdog-resolution'
+ * property in the 'platform' node of the guest's machine description.
+ * The largest allowed timeout value is specified by the
+ * 'watchdog-max-timeout' property of the 'platform' node.
*
- * The software state description argument is a real address of a data buffer
- * of size 32-bytes aligned on a 32-byte boundary. It is treated as a NULL
- * terminated 7-bit ASCII string of up to 31 characters not including the
- * NULL termination.
- */
-#define HV_FAST_MACH_SET_SOFT_STATE 0x03
-#define HV_SOFT_STATE_NORMAL 0x01
-#define HV_SOFT_STATE_TRANSITION 0x02
-
-/* mach_get_soft_state()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_MACH_GET_SOFT_STATE
- * ARG0: software state description pointer
- * RET0: status
- * RET1: software state
- * ERRORS: ENORADDR software state description pointer is not a
- * valid real address
- * EBADALIGNED software state description is not correctly
- * aligned
+ * If the 'timeout' argument is not zero, the watchdog timer is set to
+ * expire after a minimum of 'timeout' milliseconds.
*
- * Retrieve the current value of the guest's software state. The rules
- * for the software state pointer are the same as for mach_set_soft_state()
- * above.
+ * If the 'timeout' argument is zero, the watchdog timer is disabled.
+ *
+ * If the 'timeout' value exceeds the value of the 'max-watchdog-timeout'
+ * property, the hypervisor leaves the watchdog timer state unchanged,
+ * and returns a status of EINVAL.
+ *
+ * The 'time remaining' return value is valid regardless of whether the
+ * return status is EOK or EINVAL. A non-zero return value indicates the
+ * number of milliseconds that were remaining until the timer was to expire.
+ * If less than one millisecond remains, the return value is '1'. If the
+ * watchdog timer was disabled at the time of the call, the return value is
+ * zero.
+ *
+ * If the hypervisor cannot support the exact timeout value requested, but
+ * can support a larger timeout value, the hypervisor may round the actual
+ * timeout to a value larger than the requested timeout, consequently the
+ * 'time remaining' return value may be larger than the previously requested
+ * timeout value.
+ *
+ * Any guest OS debugger should be aware that the watchdog service may be in
+ * use. Consequently, it is recommended that the watchdog service is
+ * disabled upon debugger entry (e.g. reaching a breakpoint), and then
+ * re-enabled upon returning to normal execution. The API has been designed
+ * with this in mind, and the 'time remaining' result of the disable call may
+ * be used directly as the timeout argument of the re-enable call.
*/
-#define HV_FAST_MACH_GET_SOFT_STATE 0x04
+#define HV_FAST_MACH_SET_WATCHDOG 0x05
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_mach_set_watchdog(unsigned long timeout,
+ unsigned long *orig_timeout);
+#endif
/* CPU services.
*
@@ -206,8 +231,8 @@
* FUNCTION: HV_FAST_CPU_START
* ARG0: CPU ID
* ARG1: PC
- * ARG1: RTBA
- * ARG1: target ARG0
+ * ARG2: RTBA
+ * ARG3: target ARG0
* RET0: status
* ERRORS: ENOCPU Invalid CPU ID
* EINVAL Target CPU ID is not in the stopped state
@@ -224,6 +249,13 @@
*/
#define HV_FAST_CPU_START 0x10
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_cpu_start(unsigned long cpuid,
+ unsigned long pc,
+ unsigned long rtba,
+ unsigned long arg0);
+#endif
+
/* cpu_stop()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_CPU_STOP
@@ -245,6 +277,10 @@
*/
#define HV_FAST_CPU_STOP 0x11
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_cpu_stop(unsigned long cpuid);
+#endif
+
/* cpu_yield()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_CPU_YIELD
@@ -588,6 +624,11 @@ struct hv_fault_status {
*/
#define HV_FAST_MMU_TSB_CTX0 0x20
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
+ unsigned long tsb_desc_ra);
+#endif
+
/* mmu_tsb_ctxnon0()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_MMU_TSB_CTXNON0
@@ -694,6 +735,13 @@ struct hv_fault_status {
*/
#define HV_FAST_MMU_MAP_PERM_ADDR 0x25
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
+ unsigned long set_to_zero,
+ unsigned long tte,
+ unsigned long flags);
+#endif
+
/* mmu_fault_area_conf()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_MMU_FAULT_AREA_CONF
@@ -892,6 +940,10 @@ struct hv_fault_status {
*/
#define HV_FAST_TOD_GET 0x50
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_tod_get(unsigned long *time);
+#endif
+
/* tod_set()
* TRAP: HV_FAST_TRAP
* FUNCTION: HV_FAST_TOD_SET
@@ -905,6 +957,10 @@ struct hv_fault_status {
*/
#define HV_FAST_TOD_SET 0x51
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_tod_set(unsigned long time);
+#endif
+
/* Console services */
/* con_getchar()
@@ -988,6 +1044,59 @@ extern unsigned long sun4v_con_write(unsigned long buffer,
unsigned long *bytes_written);
#endif
+/* mach_set_soft_state()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MACH_SET_SOFT_STATE
+ * ARG0: software state
+ * ARG1: software state description pointer
+ * RET0: status
+ * ERRORS: EINVAL software state not valid or software state
+ * description is not NULL terminated
+ * ENORADDR software state description pointer is not a
+ * valid real address
+ * EBADALIGNED software state description is not correctly
+ * aligned
+ *
+ * This allows the guest to report it's soft state to the hypervisor. There
+ * are two primary components to this state. The first part states whether
+ * the guest software is running or not. The second containts optional
+ * details specific to the software.
+ *
+ * The software state argument is defined below in HV_SOFT_STATE_*, and
+ * indicates whether the guest is operating normally or in a transitional
+ * state.
+ *
+ * The software state description argument is a real address of a data buffer
+ * of size 32-bytes aligned on a 32-byte boundary. It is treated as a NULL
+ * terminated 7-bit ASCII string of up to 31 characters not including the
+ * NULL termination.
+ */
+#define HV_FAST_MACH_SET_SOFT_STATE 0x70
+#define HV_SOFT_STATE_NORMAL 0x01
+#define HV_SOFT_STATE_TRANSITION 0x02
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
+ unsigned long msg_string_ra);
+#endif
+
+/* mach_get_soft_state()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MACH_GET_SOFT_STATE
+ * ARG0: software state description pointer
+ * RET0: status
+ * RET1: software state
+ * ERRORS: ENORADDR software state description pointer is not a
+ * valid real address
+ * EBADALIGNED software state description is not correctly
+ * aligned
+ *
+ * Retrieve the current value of the guest's software state. The rules
+ * for the software state pointer are the same as for mach_set_soft_state()
+ * above.
+ */
+#define HV_FAST_MACH_GET_SOFT_STATE 0x71
+
/* Trap trace services.
*
* The hypervisor provides a trap tracing capability for privileged
@@ -1379,6 +1488,113 @@ extern unsigned long sun4v_intr_gettarget(unsigned long sysino);
extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
#endif
+/* vintr_get_cookie()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_VINTR_GET_COOKIE
+ * ARG0: device handle
+ * ARG1: device ino
+ * RET0: status
+ * RET1: cookie
+ */
+#define HV_FAST_VINTR_GET_COOKIE 0xa7
+
+/* vintr_set_cookie()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_VINTR_SET_COOKIE
+ * ARG0: device handle
+ * ARG1: device ino
+ * ARG2: cookie
+ * RET0: status
+ */
+#define HV_FAST_VINTR_SET_COOKIE 0xa8
+
+/* vintr_get_valid()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_VINTR_GET_VALID
+ * ARG0: device handle
+ * ARG1: device ino
+ * RET0: status
+ * RET1: valid state
+ */
+#define HV_FAST_VINTR_GET_VALID 0xa9
+
+/* vintr_set_valid()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_VINTR_SET_VALID
+ * ARG0: device handle
+ * ARG1: device ino
+ * ARG2: valid state
+ * RET0: status
+ */
+#define HV_FAST_VINTR_SET_VALID 0xaa
+
+/* vintr_get_state()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_VINTR_GET_STATE
+ * ARG0: device handle
+ * ARG1: device ino
+ * RET0: status
+ * RET1: state
+ */
+#define HV_FAST_VINTR_GET_STATE 0xab
+
+/* vintr_set_state()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_VINTR_SET_STATE
+ * ARG0: device handle
+ * ARG1: device ino
+ * ARG2: state
+ * RET0: status
+ */
+#define HV_FAST_VINTR_SET_STATE 0xac
+
+/* vintr_get_target()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_VINTR_GET_TARGET
+ * ARG0: device handle
+ * ARG1: device ino
+ * RET0: status
+ * RET1: cpuid
+ */
+#define HV_FAST_VINTR_GET_TARGET 0xad
+
+/* vintr_set_target()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_VINTR_SET_TARGET
+ * ARG0: device handle
+ * ARG1: device ino
+ * ARG2: cpuid
+ * RET0: status
+ */
+#define HV_FAST_VINTR_SET_TARGET 0xae
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_vintr_get_cookie(unsigned long dev_handle,
+ unsigned long dev_ino,
+ unsigned long *cookie);
+extern unsigned long sun4v_vintr_set_cookie(unsigned long dev_handle,
+ unsigned long dev_ino,
+ unsigned long cookie);
+extern unsigned long sun4v_vintr_get_valid(unsigned long dev_handle,
+ unsigned long dev_ino,
+ unsigned long *valid);
+extern unsigned long sun4v_vintr_set_valid(unsigned long dev_handle,
+ unsigned long dev_ino,
+ unsigned long valid);
+extern unsigned long sun4v_vintr_get_state(unsigned long dev_handle,
+ unsigned long dev_ino,
+ unsigned long *state);
+extern unsigned long sun4v_vintr_set_state(unsigned long dev_handle,
+ unsigned long dev_ino,
+ unsigned long state);
+extern unsigned long sun4v_vintr_get_target(unsigned long dev_handle,
+ unsigned long dev_ino,
+ unsigned long *cpuid);
+extern unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
+ unsigned long dev_ino,
+ unsigned long cpuid);
+#endif
+
/* PCI IO services.
*
* See the terminology descriptions in the device interrupt services
@@ -2037,6 +2253,346 @@ extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cp
*/
#define HV_FAST_PCI_MSG_SETVALID 0xd3
+/* Logical Domain Channel services. */
+
+#define LDC_CHANNEL_DOWN 0
+#define LDC_CHANNEL_UP 1
+#define LDC_CHANNEL_RESETTING 2
+
+/* ldc_tx_qconf()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_LDC_TX_QCONF
+ * ARG0: channel ID
+ * ARG1: real address base of queue
+ * ARG2: num entries in queue
+ * RET0: status
+ *
+ * Configure transmit queue for the LDC endpoint specified by the
+ * given channel ID, to be placed at the given real address, and
+ * be of the given num entries. Num entries must be a power of two.
+ * The real address base of the queue must be aligned on the queue
+ * size. Each queue entry is 64-bytes, so for example, a 32 entry
+ * queue must be aligned on a 2048 byte real address boundary.
+ *
+ * Upon configuration of a valid transmit queue the head and tail
+ * pointers are set to a hypervisor specific identical value indicating
+ * that the queue initially is empty.
+ *
+ * The endpoint's transmit queue is un-configured if num entries is zero.
+ *
+ * The maximum number of entries for each queue for a specific cpu may be
+ * determined from the machine description. A transmit queue may be
+ * specified even in the event that the LDC is down (peer endpoint has no
+ * receive queue specified). Transmission will begin as soon as the peer
+ * endpoint defines a receive queue.
+ *
+ * It is recommended that a guest wait for a transmit queue to empty prior
+ * to reconfiguring it, or un-configuring it. Re or un-configuring of a
+ * non-empty transmit queue behaves exactly as defined above, however it
+ * is undefined as to how many of the pending entries in the original queue
+ * will be delivered prior to the re-configuration taking effect.
+ * Furthermore, as the queue configuration causes a reset of the head and
+ * tail pointers there is no way for a guest to determine how many entries
+ * have been sent after the configuration operation.
+ */
+#define HV_FAST_LDC_TX_QCONF 0xe0
+
+/* ldc_tx_qinfo()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_LDC_TX_QINFO
+ * ARG0: channel ID
+ * RET0: status
+ * RET1: real address base of queue
+ * RET2: num entries in queue
+ *
+ * Return the configuration info for the transmit queue of LDC endpoint
+ * defined by the given channel ID. The real address is the currently
+ * defined real address base of the defined queue, and num entries is the
+ * size of the queue in terms of number of entries.
+ *
+ * If the specified channel ID is a valid endpoint number, but no transmit
+ * queue has been defined this service will return success, but with num
+ * entries set to zero and the real address will have an undefined value.
+ */
+#define HV_FAST_LDC_TX_QINFO 0xe1
+
+/* ldc_tx_get_state()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_LDC_TX_GET_STATE
+ * ARG0: channel ID
+ * RET0: status
+ * RET1: head offset
+ * RET2: tail offset
+ * RET3: channel state
+ *
+ * Return the transmit state, and the head and tail queue pointers, for
+ * the transmit queue of the LDC endpoint defined by the given channel ID.
+ * The head and tail values are the byte offset of the head and tail
+ * positions of the transmit queue for the specified endpoint.
+ */
+#define HV_FAST_LDC_TX_GET_STATE 0xe2
+
+/* ldc_tx_set_qtail()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_LDC_TX_SET_QTAIL
+ * ARG0: channel ID
+ * ARG1: tail offset
+ * RET0: status
+ *
+ * Update the tail pointer for the transmit queue associated with the LDC
+ * endpoint defined by the given channel ID. The tail offset specified
+ * must be aligned on a 64 byte boundary, and calculated so as to increase
+ * the number of pending entries on the transmit queue. Any attempt to
+ * decrease the number of pending transmit queue entires is considered
+ * an invalid tail offset and will result in an EINVAL error.
+ *
+ * Since the tail of the transmit queue may not be moved backwards, the
+ * transmit queue may be flushed by configuring a new transmit queue,
+ * whereupon the hypervisor will configure the initial transmit head and
+ * tail pointers to be equal.
+ */
+#define HV_FAST_LDC_TX_SET_QTAIL 0xe3
+
+/* ldc_rx_qconf()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_LDC_RX_QCONF
+ * ARG0: channel ID
+ * ARG1: real address base of queue
+ * ARG2: num entries in queue
+ * RET0: status
+ *
+ * Configure receive queue for the LDC endpoint specified by the
+ * given channel ID, to be placed at the given real address, and
+ * be of the given num entries. Num entries must be a power of two.
+ * The real address base of the queue must be aligned on the queue
+ * size. Each queue entry is 64-bytes, so for example, a 32 entry
+ * queue must be aligned on a 2048 byte real address boundary.
+ *
+ * The endpoint's transmit queue is un-configured if num entries is zero.
+ *
+ * If a valid receive queue is specified for a local endpoint the LDC is
+ * in the up state for the purpose of transmission to this endpoint.
+ *
+ * The maximum number of entries for each queue for a specific cpu may be
+ * determined from the machine description.
+ *
+ * As receive queue configuration causes a reset of the queue's head and
+ * tail pointers there is no way for a gues to determine how many entries
+ * have been received between a preceeding ldc_get_rx_state() API call
+ * and the completion of the configuration operation. It should be noted
+ * that datagram delivery is not guarenteed via domain channels anyway,
+ * and therefore any higher protocol should be resilient to datagram
+ * loss if necessary. However, to overcome this specific race potential
+ * it is recommended, for example, that a higher level protocol be employed
+ * to ensure either retransmission, or ensure that no datagrams are pending
+ * on the peer endpoint's transmit queue prior to the configuration process.
+ */
+#define HV_FAST_LDC_RX_QCONF 0xe4
+
+/* ldc_rx_qinfo()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_LDC_RX_QINFO
+ * ARG0: channel ID
+ * RET0: status
+ * RET1: real address base of queue
+ * RET2: num entries in queue
+ *
+ * Return the configuration info for the receive queue of LDC endpoint
+ * defined by the given channel ID. The real address is the currently
+ * defined real address base of the defined queue, and num entries is the
+ * size of the queue in terms of number of entries.
+ *
+ * If the specified channel ID is a valid endpoint number, but no receive
+ * queue has been defined this service will return success, but with num
+ * entries set to zero and the real address will have an undefined value.
+ */
+#define HV_FAST_LDC_RX_QINFO 0xe5
+
+/* ldc_rx_get_state()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_LDC_RX_GET_STATE
+ * ARG0: channel ID
+ * RET0: status
+ * RET1: head offset
+ * RET2: tail offset
+ * RET3: channel state
+ *
+ * Return the receive state, and the head and tail queue pointers, for
+ * the receive queue of the LDC endpoint defined by the given channel ID.
+ * The head and tail values are the byte offset of the head and tail
+ * positions of the receive queue for the specified endpoint.
+ */
+#define HV_FAST_LDC_RX_GET_STATE 0xe6
+
+/* ldc_rx_set_qhead()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_LDC_RX_SET_QHEAD
+ * ARG0: channel ID
+ * ARG1: head offset
+ * RET0: status
+ *
+ * Update the head pointer for the receive queue associated with the LDC
+ * endpoint defined by the given channel ID. The head offset specified
+ * must be aligned on a 64 byte boundary, and calculated so as to decrease
+ * the number of pending entries on the receive queue. Any attempt to
+ * increase the number of pending receive queue entires is considered
+ * an invalid head offset and will result in an EINVAL error.
+ *
+ * The receive queue may be flushed by setting the head offset equal
+ * to the current tail offset.
+ */
+#define HV_FAST_LDC_RX_SET_QHEAD 0xe7
+
+/* LDC Map Table Entry. Each slot is defined by a translation table
+ * entry, as specified by the LDC_MTE_* bits below, and a 64-bit
+ * hypervisor invalidation cookie.
+ */
+#define LDC_MTE_PADDR 0x0fffffffffffe000 /* pa[55:13] */
+#define LDC_MTE_COPY_W 0x0000000000000400 /* copy write access */
+#define LDC_MTE_COPY_R 0x0000000000000200 /* copy read access */
+#define LDC_MTE_IOMMU_W 0x0000000000000100 /* IOMMU write access */
+#define LDC_MTE_IOMMU_R 0x0000000000000080 /* IOMMU read access */
+#define LDC_MTE_EXEC 0x0000000000000040 /* execute */
+#define LDC_MTE_WRITE 0x0000000000000020 /* read */
+#define LDC_MTE_READ 0x0000000000000010 /* write */
+#define LDC_MTE_SZALL 0x000000000000000f /* page size bits */
+#define LDC_MTE_SZ16GB 0x0000000000000007 /* 16GB page */
+#define LDC_MTE_SZ2GB 0x0000000000000006 /* 2GB page */
+#define LDC_MTE_SZ256MB 0x0000000000000005 /* 256MB page */
+#define LDC_MTE_SZ32MB 0x0000000000000004 /* 32MB page */
+#define LDC_MTE_SZ4MB 0x0000000000000003 /* 4MB page */
+#define LDC_MTE_SZ512K 0x0000000000000002 /* 512K page */
+#define LDC_MTE_SZ64K 0x0000000000000001 /* 64K page */
+#define LDC_MTE_SZ8K 0x0000000000000000 /* 8K page */
+
+#ifndef __ASSEMBLY__
+struct ldc_mtable_entry {
+ unsigned long mte;
+ unsigned long cookie;
+};
+#endif
+
+/* ldc_set_map_table()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_LDC_SET_MAP_TABLE
+ * ARG0: channel ID
+ * ARG1: table real address
+ * ARG2: num entries
+ * RET0: status
+ *
+ * Register the MTE table at the given table real address, with the
+ * specified num entries, for the LDC indicated by the given channel
+ * ID.
+ */
+#define HV_FAST_LDC_SET_MAP_TABLE 0xea
+
+/* ldc_get_map_table()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_LDC_GET_MAP_TABLE
+ * ARG0: channel ID
+ * RET0: status
+ * RET1: table real address
+ * RET2: num entries
+ *
+ * Return the configuration of the current mapping table registered
+ * for the given channel ID.
+ */
+#define HV_FAST_LDC_GET_MAP_TABLE 0xeb
+
+#define LDC_COPY_IN 0
+#define LDC_COPY_OUT 1
+
+/* ldc_copy()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_LDC_COPY
+ * ARG0: channel ID
+ * ARG1: LDC_COPY_* direction code
+ * ARG2: target real address
+ * ARG3: local real address
+ * ARG4: length in bytes
+ * RET0: status
+ * RET1: actual length in bytes
+ */
+#define HV_FAST_LDC_COPY 0xec
+
+#define LDC_MEM_READ 1
+#define LDC_MEM_WRITE 2
+#define LDC_MEM_EXEC 4
+
+/* ldc_mapin()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_LDC_MAPIN
+ * ARG0: channel ID
+ * ARG1: cookie
+ * RET0: status
+ * RET1: real address
+ * RET2: LDC_MEM_* permissions
+ */
+#define HV_FAST_LDC_MAPIN 0xed
+
+/* ldc_unmap()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_LDC_UNMAP
+ * ARG0: real address
+ * RET0: status
+ */
+#define HV_FAST_LDC_UNMAP 0xee
+
+/* ldc_revoke()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_LDC_REVOKE
+ * ARG0: cookie
+ * ARG1: ldc_mtable_entry cookie
+ * RET0: status
+ */
+#define HV_FAST_LDC_REVOKE 0xef
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_ldc_tx_qconf(unsigned long channel,
+ unsigned long ra,
+ unsigned long num_entries);
+extern unsigned long sun4v_ldc_tx_qinfo(unsigned long channel,
+ unsigned long *ra,
+ unsigned long *num_entries);
+extern unsigned long sun4v_ldc_tx_get_state(unsigned long channel,
+ unsigned long *head_off,
+ unsigned long *tail_off,
+ unsigned long *chan_state);
+extern unsigned long sun4v_ldc_tx_set_qtail(unsigned long channel,
+ unsigned long tail_off);
+extern unsigned long sun4v_ldc_rx_qconf(unsigned long channel,
+ unsigned long ra,
+ unsigned long num_entries);
+extern unsigned long sun4v_ldc_rx_qinfo(unsigned long channel,
+ unsigned long *ra,
+ unsigned long *num_entries);
+extern unsigned long sun4v_ldc_rx_get_state(unsigned long channel,
+ unsigned long *head_off,
+ unsigned long *tail_off,
+ unsigned long *chan_state);
+extern unsigned long sun4v_ldc_rx_set_qhead(unsigned long channel,
+ unsigned long head_off);
+extern unsigned long sun4v_ldc_set_map_table(unsigned long channel,
+ unsigned long ra,
+ unsigned long num_entries);
+extern unsigned long sun4v_ldc_get_map_table(unsigned long channel,
+ unsigned long *ra,
+ unsigned long *num_entries);
+extern unsigned long sun4v_ldc_copy(unsigned long channel,
+ unsigned long dir_code,
+ unsigned long tgt_raddr,
+ unsigned long lcl_raddr,
+ unsigned long len,
+ unsigned long *actual_len);
+extern unsigned long sun4v_ldc_mapin(unsigned long channel,
+ unsigned long cookie,
+ unsigned long *ra,
+ unsigned long *perm);
+extern unsigned long sun4v_ldc_unmap(unsigned long ra);
+extern unsigned long sun4v_ldc_revoke(unsigned long cookie,
+ unsigned long mte_cookie);
+#endif
+
/* Performance counter services. */
#define HV_PERF_JBUS_PERF_CTRL_REG 0x00
@@ -2204,6 +2760,7 @@ extern void sun4v_hvapi_unregister(unsigned long group);
extern int sun4v_hvapi_get(unsigned long group,
unsigned long *major,
unsigned long *minor);
+extern void sun4v_hvapi_init(void);
#endif
#endif /* !(_SPARC64_HYPERVISOR_H) */
diff --git a/include/asm-sparc64/kdebug.h b/include/asm-sparc64/kdebug.h
index 627e3396a5f..9974c7b0aeb 100644
--- a/include/asm-sparc64/kdebug.h
+++ b/include/asm-sparc64/kdebug.h
@@ -32,7 +32,6 @@ enum die_val {
DIE_TRAP,
DIE_TRAP_TL1,
DIE_CALL,
- DIE_PAGE_FAULT,
};
#endif
diff --git a/include/asm-sparc64/mdesc.h b/include/asm-sparc64/mdesc.h
new file mode 100644
index 00000000000..124eb8ca237
--- /dev/null
+++ b/include/asm-sparc64/mdesc.h
@@ -0,0 +1,39 @@
+#ifndef _SPARC64_MDESC_H
+#define _SPARC64_MDESC_H
+
+#include <linux/types.h>
+#include <asm/prom.h>
+
+struct mdesc_node;
+struct mdesc_arc {
+ const char *name;
+ struct mdesc_node *arc;
+};
+
+struct mdesc_node {
+ const char *name;
+ u64 node;
+ unsigned int unique_id;
+ unsigned int num_arcs;
+ struct property *properties;
+ struct mdesc_node *hash_next;
+ struct mdesc_node *allnodes_next;
+ struct mdesc_arc arcs[0];
+};
+
+extern struct mdesc_node *md_find_node_by_name(struct mdesc_node *from,
+ const char *name);
+#define md_for_each_node_by_name(__mn, __name) \
+ for (__mn = md_find_node_by_name(NULL, __name); __mn; \
+ __mn = md_find_node_by_name(__mn, __name))
+
+extern struct property *md_find_property(const struct mdesc_node *mp,
+ const char *name,
+ int *lenp);
+extern const void *md_get_property(const struct mdesc_node *mp,
+ const char *name,
+ int *lenp);
+
+extern void sun4v_mdesc_init(void);
+
+#endif
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h
index 6a0da3b1695..992f9f7a476 100644
--- a/include/asm-sparc64/oplib.h
+++ b/include/asm-sparc64/oplib.h
@@ -316,11 +316,8 @@ extern int prom_setprop(int node, const char *prop_name, char *prop_value,
extern int prom_pathtoinode(const char *path);
extern int prom_inst2pkg(int);
-
-/* CPU probing helpers. */
-struct device_node;
-int cpu_find_by_instance(int instance, struct device_node **dev_node, int *mid);
-int cpu_find_by_mid(int mid, struct device_node **prom_node);
+extern int prom_service_exists(const char *service_name);
+extern void prom_sun4v_guest_soft_state(void);
/* Client interface level routines. */
extern void prom_set_trap_table(unsigned long tba);
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index ced8cbde046..88db872ce2f 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -5,7 +5,8 @@
#ifdef CONFIG_SMP
-extern void setup_per_cpu_areas(void);
+#define setup_per_cpu_areas() do { } while (0)
+extern void real_setup_per_cpu_areas(void);
extern unsigned long __per_cpu_base;
extern unsigned long __per_cpu_shift;
@@ -34,6 +35,7 @@ do { \
} while (0)
#else /* ! SMP */
+#define real_setup_per_cpu_areas() do { } while (0)
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) per_cpu__##name
diff --git a/include/asm-sparc64/prom.h b/include/asm-sparc64/prom.h
index ddad5f99ac7..b4df3042add 100644
--- a/include/asm-sparc64/prom.h
+++ b/include/asm-sparc64/prom.h
@@ -90,6 +90,7 @@ extern struct device_node *of_find_compatible_node(struct device_node *from,
const char *type, const char *compat);
extern struct device_node *of_find_node_by_path(const char *path);
extern struct device_node *of_find_node_by_phandle(phandle handle);
+extern struct device_node *of_find_node_by_cpuid(int cpuid);
extern struct device_node *of_get_parent(const struct device_node *node);
extern struct device_node *of_get_next_child(const struct device_node *node,
struct device_node *prev);
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index 869d16fb907..f76e1492add 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -41,7 +41,7 @@ extern cpumask_t cpu_sibling_map[NR_CPUS];
extern int hard_smp_processor_id(void);
#define raw_smp_processor_id() (current_thread_info()->cpu)
-extern void smp_setup_cpu_possible_map(void);
+extern void smp_fill_in_sib_core_maps(void);
extern unsigned char boot_cpu_id;
#endif /* !(__ASSEMBLY__) */
@@ -49,7 +49,7 @@ extern unsigned char boot_cpu_id;
#else
#define hard_smp_processor_id() 0
-#define smp_setup_cpu_possible_map() do { } while (0)
+#define smp_fill_in_sib_core_maps() do { } while (0)
#define boot_cpu_id (0)
#endif /* !(CONFIG_SMP) */
diff --git a/include/asm-sparc64/sstate.h b/include/asm-sparc64/sstate.h
new file mode 100644
index 00000000000..a7c35dbcb28
--- /dev/null
+++ b/include/asm-sparc64/sstate.h
@@ -0,0 +1,13 @@
+#ifndef _SPARC64_SSTATE_H
+#define _SPARC64_SSTATE_H
+
+extern void sstate_booting(void);
+extern void sstate_running(void);
+extern void sstate_halt(void);
+extern void sstate_poweroff(void);
+extern void sstate_panic(void);
+extern void sstate_reboot(void);
+
+extern void sun4v_sstate_init(void);
+
+#endif /* _SPARC64_SSTATE_H */
diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h
index 2ebf7f27bf9..98252cd44dd 100644
--- a/include/asm-sparc64/thread_info.h
+++ b/include/asm-sparc64/thread_info.h
@@ -38,8 +38,8 @@ struct thread_info {
/* D$ line 1 */
struct task_struct *task;
unsigned long flags;
- __u8 cpu;
__u8 fpsaved[7];
+ __u8 pad;
unsigned long ksp;
/* D$ line 2 */
@@ -49,7 +49,7 @@ struct thread_info {
int preempt_count; /* 0 => preemptable, <0 => BUG */
__u8 new_child;
__u8 syscall_noerror;
- __u16 __pad;
+ __u16 cpu;
unsigned long *utraps;
@@ -83,8 +83,7 @@ struct thread_info {
#define TI_CURRENT_DS (TI_FLAGS + TI_FLAG_BYTE_CURRENT_DS)
#define TI_FPDEPTH (TI_FLAGS + TI_FLAG_BYTE_FPDEPTH)
#define TI_WSAVED (TI_FLAGS + TI_FLAG_BYTE_WSAVED)
-#define TI_CPU 0x00000010
-#define TI_FPSAVED 0x00000011
+#define TI_FPSAVED 0x00000010
#define TI_KSP 0x00000018
#define TI_FAULT_ADDR 0x00000020
#define TI_KREGS 0x00000028
@@ -92,6 +91,7 @@ struct thread_info {
#define TI_PRE_COUNT 0x00000038
#define TI_NEW_CHILD 0x0000003c
#define TI_SYS_NOERROR 0x0000003d
+#define TI_CPU 0x0000003e
#define TI_UTRAPS 0x00000040
#define TI_REG_WINDOW 0x00000048
#define TI_RWIN_SPTRS 0x000003c8
diff --git a/include/asm-sparc64/topology.h b/include/asm-sparc64/topology.h
index 98a6c613589..e0d450d600e 100644
--- a/include/asm-sparc64/topology.h
+++ b/include/asm-sparc64/topology.h
@@ -6,4 +6,7 @@
#include <asm-generic/topology.h>
+#define topology_core_id(cpu) (cpu_data(cpu).core_id)
+#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
+
#endif /* _ASM_SPARC64_TOPOLOGY_H */
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index ab55ffcb7bf..76e4299dd9b 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -271,7 +271,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
#define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
sethi %hi(swapper_4m_tsb), REG1; \
or REG1, %lo(swapper_4m_tsb), REG1; \
- and TAG, (KERNEL_TSB_NENTRIES - 1), REG2; \
+ and TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \
sllx REG2, 4, REG2; \
add REG1, REG2, REG2; \
KTSB_LOAD_QUAD(REG2, REG3); \
diff --git a/include/linux/timer.h b/include/linux/timer.h
index e0c5c16c992..c661710d362 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -69,6 +69,12 @@ extern int __mod_timer(struct timer_list *timer, unsigned long expires);
extern int mod_timer(struct timer_list *timer, unsigned long expires);
/*
+ * The jiffies value which is added to now, when there is no timer
+ * in the timer wheel:
+ */
+#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
+
+/*
* Return when the next timer-wheel timeout occurs (in absolute jiffies),
* locks the timer base:
*/
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3e7ebc4646b..52db9e3c526 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -247,6 +247,21 @@ void tick_nohz_stop_sched_tick(void)
if (cpu == tick_do_timer_cpu)
tick_do_timer_cpu = -1;
+ ts->idle_sleeps++;
+
+ /*
+ * delta_jiffies >= NEXT_TIMER_MAX_DELTA signals that
+ * there is no timer pending or at least extremly far
+ * into the future (12 days for HZ=1000). In this case
+ * we simply stop the tick timer:
+ */
+ if (unlikely(delta_jiffies >= NEXT_TIMER_MAX_DELTA)) {
+ ts->idle_expires.tv64 = KTIME_MAX;
+ if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
+ hrtimer_cancel(&ts->sched_timer);
+ goto out;
+ }
+
/*
* calculate the expiry time for the next timer wheel
* timer
@@ -254,7 +269,6 @@ void tick_nohz_stop_sched_tick(void)
expires = ktime_add_ns(last_update, tick_period.tv64 *
delta_jiffies);
ts->idle_expires = expires;
- ts->idle_sleeps++;
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
hrtimer_start(&ts->sched_timer, expires,
diff --git a/kernel/timer.c b/kernel/timer.c
index 5ec5490f8d8..1a69705c2fb 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -666,7 +666,7 @@ static inline void __run_timers(tvec_base_t *base)
static unsigned long __next_timer_interrupt(tvec_base_t *base)
{
unsigned long timer_jiffies = base->timer_jiffies;
- unsigned long expires = timer_jiffies + (LONG_MAX >> 1);
+ unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
int index, slot, array, found = 0;
struct timer_list *nte;
tvec_t *varray[4];
@@ -752,6 +752,14 @@ static unsigned long cmp_next_hrtimer_event(unsigned long now,
tsdelta = ktime_to_timespec(hr_delta);
delta = timespec_to_jiffies(&tsdelta);
+
+ /*
+ * Limit the delta to the max value, which is checked in
+ * tick_nohz_stop_sched_tick():
+ */
+ if (delta > NEXT_TIMER_MAX_DELTA)
+ delta = NEXT_TIMER_MAX_DELTA;
+
/*
* Take rounding errors in to account and make sure, that it
* expires in the next tick. Otherwise we go into an endless
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c
index 7ec6610841b..17ad278696e 100644
--- a/net/ieee80211/ieee80211_module.c
+++ b/net/ieee80211/ieee80211_module.c
@@ -140,7 +140,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
dev = alloc_etherdev(sizeof(struct ieee80211_device) + sizeof_priv);
if (!dev) {
- IEEE80211_ERROR("Unable to network device.\n");
+ IEEE80211_ERROR("Unable to allocate network device.\n");
goto failed;
}
ieee = netdev_priv(dev);
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c
index e9cdc6615dd..c308756c2f9 100644
--- a/net/ieee80211/softmac/ieee80211softmac_module.c
+++ b/net/ieee80211/softmac/ieee80211softmac_module.c
@@ -33,7 +33,10 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv)
struct ieee80211softmac_device *softmac;
struct net_device *dev;
- dev = alloc_ieee80211(sizeof(struct ieee80211softmac_device) + sizeof_priv);
+ dev = alloc_ieee80211(sizeof(*softmac) + sizeof_priv);
+ if (!dev)
+ return NULL;
+
softmac = ieee80211_priv(dev);
softmac->dev = dev;
softmac->ieee = netdev_priv(dev);