aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devices.txt2
-rw-r--r--Documentation/lguest/lguest.c2
-rw-r--r--Makefile2
-rw-r--r--arch/arm/kernel/bios32.c4
-rw-r--r--arch/i386/boot/memory.c39
-rw-r--r--arch/i386/xen/mmu.c5
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/sgi-ip32/ip32-platform.c4
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitx.dts1
-rw-r--r--arch/powerpc/kernel/process.c7
-rw-r--r--arch/powerpc/platforms/83xx/usb.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c4
-rw-r--r--arch/powerpc/platforms/pseries/xics.c2
-rw-r--r--arch/powerpc/sysdev/commproc.c2
-rw-r--r--arch/ppc/8xx_io/commproc.c2
-rw-r--r--arch/sparc/kernel/ebus.c2
-rw-r--r--arch/sparc64/kernel/binfmt_aout32.c4
-rw-r--r--arch/sparc64/kernel/ebus.c5
-rw-r--r--arch/sparc64/lib/NGcopy_from_user.S8
-rw-r--r--arch/sparc64/lib/NGcopy_to_user.S8
-rw-r--r--arch/sparc64/lib/NGmemcpy.S371
-rw-r--r--arch/x86_64/vdso/voffset.h2
-rw-r--r--drivers/acpi/sleep/Makefile2
-rw-r--r--drivers/acpi/sleep/main.c46
-rw-r--r--drivers/ata/ata_piix.c7
-rw-r--r--drivers/ata/pata_sis.c3
-rw-r--r--drivers/ata/sata_sil24.c16
-rw-r--r--drivers/base/core.c1
-rw-r--r--drivers/cdrom/cdrom.c4
-rw-r--r--drivers/char/drm/i915_drv.h6
-rw-r--r--drivers/char/drm/i915_irq.c12
-rw-r--r--drivers/char/hpet.c9
-rw-r--r--drivers/char/mspec.c26
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/char/vt_ioctl.c15
-rw-r--r--drivers/input/joystick/Kconfig2
-rw-r--r--drivers/input/mouse/appletouch.c6
-rw-r--r--drivers/lguest/lguest_asm.S6
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c6
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c5
-rw-r--r--drivers/net/e1000/e1000_ethtool.c1
-rw-r--r--drivers/net/e1000/e1000_hw.c1
-rw-r--r--drivers/net/e1000/e1000_hw.h1
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/mv643xx_eth.c5
-rw-r--r--drivers/net/mv643xx_eth.h4
-rw-r--r--drivers/net/pcmcia/3c589_cs.c2
-rw-r--r--drivers/net/ppp_mppe.c14
-rwxr-xr-xdrivers/net/qla3xxx.c7
-rw-r--r--drivers/net/r8169.c14
-rw-r--r--drivers/net/sky2.c90
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/pci/quirks.c7
-rw-r--r--drivers/scsi/esp_scsi.c3
-rw-r--r--drivers/scsi/scsi_transport_spi.c28
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm1.h2
-rw-r--r--drivers/serial/sunsab.c2
-rw-r--r--drivers/w1/w1.c1
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/lockd/svclock.c29
-rw-r--r--fs/nfs/client.c29
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/getroot.c3
-rw-r--r--fs/splice.c46
-rw-r--r--fs/ufs/super.c4
-rw-r--r--fs/xfs/xfs_buf_item.h5
-rw-r--r--fs/xfs/xfs_log_recover.c51
-rw-r--r--fs/xfs/xfs_trans_buf.c1
-rw-r--r--include/acpi/acpi_drivers.h4
-rw-r--r--include/asm-i386/system.h5
-rw-r--r--include/asm-mips/fcntl.h1
-rw-r--r--include/asm-mips/page.h2
-rw-r--r--include/linux/cpufreq.h19
-rw-r--r--include/net/sctp/sm.h4
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/net/tcp.h6
-rw-r--r--kernel/futex.c26
-rw-r--r--kernel/futex_compat.c28
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/sched_fair.c10
-rw-r--r--kernel/sys.c2
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--mm/hugetlb.c2
-rw-r--r--net/ieee80211/ieee80211_rx.c6
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c2
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c54
-rw-r--r--net/ipv4/tcp_ipv4.c19
-rw-r--r--net/ipv6/tcp_ipv6.c18
-rw-r--r--net/mac80211/ieee80211.c2
-rw-r--r--net/mac80211/rc80211_simple.c2
-rw-r--r--net/mac80211/wme.c2
-rw-r--r--net/sched/sch_sfq.c47
-rw-r--r--net/sctp/bind_addr.c2
-rw-r--r--net/sctp/input.c8
-rw-r--r--net/sctp/inqueue.c8
-rw-r--r--net/sctp/sm_make_chunk.c46
-rw-r--r--net/sctp/sm_statefuns.c243
-rw-r--r--net/sctp/sm_statetable.c16
-rw-r--r--net/socket.c3
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/sysfs.c2
104 files changed, 1034 insertions, 592 deletions
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 8de132a02ba..6c46730c631 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -94,6 +94,8 @@ Your cooperation is appreciated.
9 = /dev/urandom Faster, less secure random number gen.
10 = /dev/aio Asynchronous I/O notification interface
11 = /dev/kmsg Writes to this come out as printk's
+ 12 = /dev/oldmem Used by crashdump kernels to access
+ the memory of the kernel that crashed.
1 block RAM disk
0 = /dev/ram0 First RAM disk
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index f7918401a00..73c5f1f3d5d 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -882,7 +882,7 @@ static u32 handle_block_output(int fd, const struct iovec *iov,
* of the block file (possibly extending it). */
if (off + len > device_len) {
/* Trim it back to the correct length */
- ftruncate(dev->fd, device_len);
+ ftruncate64(dev->fd, device_len);
/* Die, bad Guest, die. */
errx(1, "Write past end %llu+%u", off, len);
}
diff --git a/Makefile b/Makefile
index 4dac25301d5..6fc97bfea36 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 23
-EXTRAVERSION =-rc8
+EXTRAVERSION =-rc9
NAME = Arr Matey! A Hairy Bilge Rat!
# *DOCUMENTATION*
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 240c448ec31..a2dd930d11e 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -338,7 +338,7 @@ pbus_assign_bus_resources(struct pci_bus *bus, struct pci_sys_data *root)
* pcibios_fixup_bus - Called after each bus is probed,
* but before its children are examined.
*/
-void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+void pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_sys_data *root = bus->sysdata;
struct pci_dev *dev;
@@ -419,7 +419,7 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
/*
* Convert from Linux-centric to bus-centric addresses for bridge devices.
*/
-void __devinit
+void
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res)
{
diff --git a/arch/i386/boot/memory.c b/arch/i386/boot/memory.c
index 1a2e62db8be..378353956b5 100644
--- a/arch/i386/boot/memory.c
+++ b/arch/i386/boot/memory.c
@@ -20,6 +20,7 @@
static int detect_memory_e820(void)
{
+ int count = 0;
u32 next = 0;
u32 size, id;
u8 err;
@@ -27,20 +28,33 @@ static int detect_memory_e820(void)
do {
size = sizeof(struct e820entry);
- id = SMAP;
+
+ /* Important: %edx is clobbered by some BIOSes,
+ so it must be either used for the error output
+ or explicitly marked clobbered. */
asm("int $0x15; setc %0"
- : "=am" (err), "+b" (next), "+d" (id), "+c" (size),
+ : "=d" (err), "+b" (next), "=a" (id), "+c" (size),
"=m" (*desc)
- : "D" (desc), "a" (0xe820));
+ : "D" (desc), "d" (SMAP), "a" (0xe820));
+
+ /* Some BIOSes stop returning SMAP in the middle of
+ the search loop. We don't know exactly how the BIOS
+ screwed up the map at that point, we might have a
+ partial map, the full map, or complete garbage, so
+ just return failure. */
+ if (id != SMAP) {
+ count = 0;
+ break;
+ }
- if (err || id != SMAP)
+ if (err)
break;
- boot_params.e820_entries++;
+ count++;
desc++;
- } while (next && boot_params.e820_entries < E820MAX);
+ } while (next && count < E820MAX);
- return boot_params.e820_entries;
+ return boot_params.e820_entries = count;
}
static int detect_memory_e801(void)
@@ -89,11 +103,16 @@ static int detect_memory_88(void)
int detect_memory(void)
{
+ int err = -1;
+
if (detect_memory_e820() > 0)
- return 0;
+ err = 0;
if (!detect_memory_e801())
- return 0;
+ err = 0;
+
+ if (!detect_memory_88())
+ err = 0;
- return detect_memory_88();
+ return err;
}
diff --git a/arch/i386/xen/mmu.c b/arch/i386/xen/mmu.c
index 4ae038aa6c2..874db0cd1d2 100644
--- a/arch/i386/xen/mmu.c
+++ b/arch/i386/xen/mmu.c
@@ -559,6 +559,9 @@ void xen_exit_mmap(struct mm_struct *mm)
put_cpu();
spin_lock(&mm->page_table_lock);
- xen_pgd_unpin(mm->pgd);
+
+ /* pgd may not be pinned in the error exit path of execve */
+ if (PagePinned(virt_to_page(mm->pgd)))
+ xen_pgd_unpin(mm->pgd);
spin_unlock(&mm->page_table_lock);
}
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b3ed731a24c..dd68afce7da 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -525,5 +525,5 @@ sys_call_table:
PTR compat_sys_signalfd
PTR compat_sys_timerfd
PTR sys_eventfd
- PTR sys_fallocate /* 4320 */
+ PTR sys32_fallocate /* 4320 */
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 60bbaecde18..087ab997487 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -45,6 +45,8 @@ SECTIONS
__dbe_table : { *(__dbe_table) }
__stop___dbe_table = .;
+ NOTES
+
RODATA
/* writeable */
diff --git a/arch/mips/sgi-ip32/ip32-platform.c b/arch/mips/sgi-ip32/ip32-platform.c
index ba3697ee7ff..7309e48d163 100644
--- a/arch/mips/sgi-ip32/ip32-platform.c
+++ b/arch/mips/sgi-ip32/ip32-platform.c
@@ -41,8 +41,8 @@ static struct platform_device uart8250_device = {
static int __init uart8250_init(void)
{
- uart8250_data[0].iobase = (unsigned long) &mace->isa.serial1;
- uart8250_data[1].iobase = (unsigned long) &mace->isa.serial1;
+ uart8250_data[0].membase = (void __iomem *) &mace->isa.serial1;
+ uart8250_data[1].membase = (void __iomem *) &mace->isa.serial1;
return platform_device_register(&uart8250_device);
}
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index 502f47c0179..44c065a6b5e 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -99,6 +99,7 @@
#size-cells = <0>;
interrupt-parent = < &ipic >;
interrupts = <26 8>;
+ dr_mode = "peripheral";
phy_type = "ulpi";
};
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e477c9d0498..8a1b001d0b1 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -605,6 +605,13 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
regs->ccr = 0;
regs->gpr[1] = sp;
+ /*
+ * We have just cleared all the nonvolatile GPRs, so make
+ * FULL_REGS(regs) return true. This is necessary to allow
+ * ptrace to examine the thread immediately after exec.
+ */
+ regs->trap &= ~1UL;
+
#ifdef CONFIG_PPC32
regs->mq = 0;
regs->nip = start;
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
index e7fdf013cd3..eafe7605cda 100644
--- a/arch/powerpc/platforms/83xx/usb.c
+++ b/arch/powerpc/platforms/83xx/usb.c
@@ -76,14 +76,14 @@ int mpc834x_usb_cfg(void)
if (port0_is_dr)
printk(KERN_WARNING
"834x USB port0 can't be used by both DR and MPH!\n");
- sicrl |= MPC834X_SICRL_USB0;
+ sicrl &= ~MPC834X_SICRL_USB0;
}
prop = of_get_property(np, "port1", NULL);
if (prop) {
if (port1_is_dr)
printk(KERN_WARNING
"834x USB port1 can't be used by both DR and MPH!\n");
- sicrl |= MPC834X_SICRL_USB1;
+ sicrl &= ~MPC834X_SICRL_USB1;
}
of_node_put(np);
}
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 4100ddc52f0..7de4e919687 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -2177,8 +2177,8 @@ struct tree_descr spufs_dir_contents[] = {
{ "mbox_stat", &spufs_mbox_stat_fops, 0444, },
{ "ibox_stat", &spufs_ibox_stat_fops, 0444, },
{ "wbox_stat", &spufs_wbox_stat_fops, 0444, },
- { "signal1", &spufs_signal1_nosched_fops, 0222, },
- { "signal2", &spufs_signal2_nosched_fops, 0222, },
+ { "signal1", &spufs_signal1_fops, 0666, },
+ { "signal2", &spufs_signal2_fops, 0666, },
{ "signal1_type", &spufs_signal1_type, 0666, },
{ "signal2_type", &spufs_signal2_type, 0666, },
{ "cntl", &spufs_cntl_fops, 0666, },
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 5bd90a7eb76..f0b5ff17d86 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -419,7 +419,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
* For the moment only implement delivery to all cpus or one cpu.
* Get current irq_server for the given irq
*/
- irq_server = get_irq_server(irq, 1);
+ irq_server = get_irq_server(virq, 1);
if (irq_server == -1) {
char cpulist[128];
cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
diff --git a/arch/powerpc/sysdev/commproc.c b/arch/powerpc/sysdev/commproc.c
index 4f67b89ba1d..dd5417aec1b 100644
--- a/arch/powerpc/sysdev/commproc.c
+++ b/arch/powerpc/sysdev/commproc.c
@@ -395,4 +395,4 @@ uint cpm_dpram_phys(u8* addr)
{
return (dpram_pbase + (uint)(addr - dpram_vbase));
}
-EXPORT_SYMBOL(cpm_dpram_addr);
+EXPORT_SYMBOL(cpm_dpram_phys);
diff --git a/arch/ppc/8xx_io/commproc.c b/arch/ppc/8xx_io/commproc.c
index 7088428e1fe..9da880be4dc 100644
--- a/arch/ppc/8xx_io/commproc.c
+++ b/arch/ppc/8xx_io/commproc.c
@@ -459,7 +459,7 @@ EXPORT_SYMBOL(cpm_dpdump);
void *cpm_dpram_addr(unsigned long offset)
{
- return ((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem + offset;
+ return (void *)(dpram_vbase + offset);
}
EXPORT_SYMBOL(cpm_dpram_addr);
diff --git a/arch/sparc/kernel/ebus.c b/arch/sparc/kernel/ebus.c
index e2d02fd13f3..d850785b208 100644
--- a/arch/sparc/kernel/ebus.c
+++ b/arch/sparc/kernel/ebus.c
@@ -156,6 +156,8 @@ void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *d
dev->prom_node = dp;
regs = of_get_property(dp, "reg", &len);
+ if (!regs)
+ len = 0;
if (len % sizeof(struct linux_prom_registers)) {
prom_printf("UGH: proplen for %s was %d, need multiple of %d\n",
dev->prom_node->name, len,
diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c
index f205fc7cbcd..d208cc7804f 100644
--- a/arch/sparc64/kernel/binfmt_aout32.c
+++ b/arch/sparc64/kernel/binfmt_aout32.c
@@ -177,7 +177,7 @@ static u32 __user *create_aout32_tables(char __user *p, struct linux_binprm *bpr
get_user(c,p++);
} while (c);
}
- put_user(NULL,argv);
+ put_user(0,argv);
current->mm->arg_end = current->mm->env_start = (unsigned long) p;
while (envc-->0) {
char c;
@@ -186,7 +186,7 @@ static u32 __user *create_aout32_tables(char __user *p, struct linux_binprm *bpr
get_user(c,p++);
} while (c);
}
- put_user(NULL,envp);
+ put_user(0,envp);
current->mm->env_end = (unsigned long) p;
return sp;
}
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c
index bc9ae36f7a4..04ab81cb4f4 100644
--- a/arch/sparc64/kernel/ebus.c
+++ b/arch/sparc64/kernel/ebus.c
@@ -375,7 +375,10 @@ static void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_de
dev->num_addrs = 0;
dev->num_irqs = 0;
} else {
- (void) of_get_property(dp, "reg", &len);
+ const int *regs = of_get_property(dp, "reg", &len);
+
+ if (!regs)
+ len = 0;
dev->num_addrs = len / sizeof(struct linux_prom_registers);
for (i = 0; i < dev->num_addrs; i++)
diff --git a/arch/sparc64/lib/NGcopy_from_user.S b/arch/sparc64/lib/NGcopy_from_user.S
index 2d93456f76d..e7f433f71b4 100644
--- a/arch/sparc64/lib/NGcopy_from_user.S
+++ b/arch/sparc64/lib/NGcopy_from_user.S
@@ -1,6 +1,6 @@
/* NGcopy_from_user.S: Niagara optimized copy from userspace.
*
- * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
#define EX_LD(x) \
@@ -8,8 +8,8 @@
.section .fixup; \
.align 4; \
99: wr %g0, ASI_AIUS, %asi;\
- retl; \
- mov 1, %o0; \
+ ret; \
+ restore %g0, 1, %o0; \
.section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
@@ -24,7 +24,7 @@
#define LOAD(type,addr,dest) type##a [addr] ASI_AIUS, dest
#define LOAD_TWIN(addr_reg,dest0,dest1) \
ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_AIUS, dest0
-#define EX_RETVAL(x) 0
+#define EX_RETVAL(x) %g0
#ifdef __KERNEL__
#define PREAMBLE \
diff --git a/arch/sparc64/lib/NGcopy_to_user.S b/arch/sparc64/lib/NGcopy_to_user.S
index 34112d5054e..6ea01c5532a 100644
--- a/arch/sparc64/lib/NGcopy_to_user.S
+++ b/arch/sparc64/lib/NGcopy_to_user.S
@@ -1,6 +1,6 @@
/* NGcopy_to_user.S: Niagara optimized copy to userspace.
*
- * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
#define EX_ST(x) \
@@ -8,8 +8,8 @@
.section .fixup; \
.align 4; \
99: wr %g0, ASI_AIUS, %asi;\
- retl; \
- mov 1, %o0; \
+ ret; \
+ restore %g0, 1, %o0; \
.section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
@@ -23,7 +23,7 @@
#define FUNC_NAME NGcopy_to_user
#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS
-#define EX_RETVAL(x) 0
+#define EX_RETVAL(x) %g0
#ifdef __KERNEL__
/* Writing to %asi is _expensive_ so we hardcode it.
diff --git a/arch/sparc64/lib/NGmemcpy.S b/arch/sparc64/lib/NGmemcpy.S
index 66063a9a66b..605cb3f0990 100644
--- a/arch/sparc64/lib/NGmemcpy.S
+++ b/arch/sparc64/lib/NGmemcpy.S
@@ -1,6 +1,6 @@
/* NGmemcpy.S: Niagara optimized memcpy.
*
- * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
#ifdef __KERNEL__
@@ -16,6 +16,12 @@
wr %g0, ASI_PNF, %asi
#endif
+#ifdef __sparc_v9__
+#define SAVE_AMOUNT 128
+#else
+#define SAVE_AMOUNT 64
+#endif
+
#ifndef STORE_ASI
#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P
#endif
@@ -50,7 +56,11 @@
#endif
#ifndef STORE_INIT
+#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA
#define STORE_INIT(src,addr) stxa src, [addr] %asi
+#else
+#define STORE_INIT(src,addr) stx src, [addr + 0x00]
+#endif
#endif
#ifndef FUNC_NAME
@@ -73,18 +83,19 @@
.globl FUNC_NAME
.type FUNC_NAME,#function
-FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
- srlx %o2, 31, %g2
+FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
+ PREAMBLE
+ save %sp, -SAVE_AMOUNT, %sp
+ srlx %i2, 31, %g2
cmp %g2, 0
tne %xcc, 5
- PREAMBLE
- mov %o0, GLOBAL_SPARE
- cmp %o2, 0
+ mov %i0, %o0
+ cmp %i2, 0
be,pn %XCC, 85f
- or %o0, %o1, %o3
- cmp %o2, 16
+ or %o0, %i1, %i3
+ cmp %i2, 16
blu,a,pn %XCC, 80f
- or %o3, %o2, %o3
+ or %i3, %i2, %i3
/* 2 blocks (128 bytes) is the minimum we can do the block
* copy with. We need to ensure that we'll iterate at least
@@ -93,31 +104,31 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
* to (64 - 1) bytes from the length before we perform the
* block copy loop.
*/
- cmp %o2, (2 * 64)
+ cmp %i2, (2 * 64)
blu,pt %XCC, 70f
- andcc %o3, 0x7, %g0
+ andcc %i3, 0x7, %g0
/* %o0: dst
- * %o1: src
- * %o2: len (known to be >= 128)
+ * %i1: src
+ * %i2: len (known to be >= 128)
*
- * The block copy loops will use %o4/%o5,%g2/%g3 as
+ * The block copy loops will use %i4/%i5,%g2/%g3 as
* temporaries while copying the data.
*/
- LOAD(prefetch, %o1, #one_read)
+ LOAD(prefetch, %i1, #one_read)
wr %g0, STORE_ASI, %asi
/* Align destination on 64-byte boundary. */
- andcc %o0, (64 - 1), %o4
+ andcc %o0, (64 - 1), %i4
be,pt %XCC, 2f
- sub %o4, 64, %o4
- sub %g0, %o4, %o4 ! bytes to align dst
- sub %o2, %o4, %o2
-1: subcc %o4, 1, %o4
- EX_LD(LOAD(ldub, %o1, %g1))
+ sub %i4, 64, %i4
+ sub %g0, %i4, %i4 ! bytes to align dst
+ sub %i2, %i4, %i2
+1: subcc %i4, 1, %i4
+ EX_LD(LOAD(ldub, %i1, %g1))
EX_ST(STORE(stb, %g1, %o0))
- add %o1, 1, %o1
+ add %i1, 1, %i1
bne,pt %XCC, 1b
add %o0, 1, %o0
@@ -136,111 +147,155 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
* aligned store data at a time, this is easy to ensure.
*/
2:
- andcc %o1, (16 - 1), %o4
- andn %o2, (64 - 1), %g1 ! block copy loop iterator
- sub %o2, %g1, %o2 ! final sub-block copy bytes
+ andcc %i1, (16 - 1), %i4
+ andn %i2, (64 - 1), %g1 ! block copy loop iterator
be,pt %XCC, 50f
- cmp %o4, 8
- be,a,pt %XCC, 10f
- sub %o1, 0x8, %o1
+ sub %i2, %g1, %i2 ! final sub-block copy bytes
+
+ cmp %i4, 8
+ be,pt %XCC, 10f
+ sub %i1, %i4, %i1
/* Neither 8-byte nor 16-byte aligned, shift and mask. */
- mov %g1, %o4
- and %o1, 0x7, %g1
- sll %g1, 3, %g1
- mov 64, %o3
- andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1, %g2))
- sub %o3, %g1, %o3
- sllx %g2, %g1, %g2
+ and %i4, 0x7, GLOBAL_SPARE
+ sll GLOBAL_SPARE, 3, GLOBAL_SPARE
+ mov 64, %i5
+ EX_LD(LOAD_TWIN(%i1, %g2, %g3))
+ sub %i5, GLOBAL_SPARE, %i5
+ mov 16, %o4
+ mov 32, %o5
+ mov 48, %o7
+ mov 64, %i3
+
+ bg,pn %XCC, 9f
+ nop
-#define SWIVEL_ONE_DWORD(SRC, TMP1, TMP2, PRE_VAL, PRE_SHIFT, POST_SHIFT, DST)\
- EX_LD(LOAD(ldx, SRC, TMP1)); \
- srlx TMP1, PRE_SHIFT, TMP2; \
- or TMP2, PRE_VAL, TMP2; \
- EX_ST(STORE_INIT(TMP2, DST)); \
- sllx TMP1, POST_SHIFT, PRE_VAL;
-
-1: add %o1, 0x8, %o1
- SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x00)
- add %o1, 0x8, %o1
- SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x08)
- add %o1, 0x8, %o1
- SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x10)
- add %o1, 0x8, %o1
- SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x18)
- add %o1, 32, %o1
- LOAD(prefetch, %o1, #one_read)
- sub %o1, 32 - 8, %o1
- SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x20)
- add %o1, 8, %o1
- SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x28)
- add %o1, 8, %o1
- SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x30)
- add %o1, 8, %o1
- SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x38)
- subcc %o4, 64, %o4
- bne,pt %XCC, 1b
+#define MIX_THREE_WORDS(WORD1, WORD2, WORD3, PRE_SHIFT, POST_SHIFT, TMP) \
+ sllx WORD1, POST_SHIFT, WORD1; \
+ srlx WORD2, PRE_SHIFT, TMP; \
+ sllx WORD2, POST_SHIFT, WORD2; \
+ or WORD1, TMP, WORD1; \
+ srlx WORD3, PRE_SHIFT, TMP; \
+ or WORD2, TMP, WORD2;
+
+8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
+ MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
+ LOAD(prefetch, %i1 + %i3, #one_read)
+
+ EX_ST(STORE_INIT(%g2, %o0 + 0x00))
+ EX_ST(STORE_INIT(%g3, %o0 + 0x08))
+
+ EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
+ MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
+
+ EX_ST(STORE_INIT(%o2, %o0 + 0x10))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x18))
+
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+ MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
+
+ EX_ST(STORE_INIT(%g2, %o0 + 0x20))
+ EX_ST(STORE_INIT(%g3, %o0 + 0x28))
+
+ EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
+ add %i1, 64, %i1
+ MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
+
+ EX_ST(STORE_INIT(%o2, %o0 + 0x30))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x38))
+
+ subcc %g1, 64, %g1
+ bne,pt %XCC, 8b
add %o0, 64, %o0
-#undef SWIVEL_ONE_DWORD
+ ba,pt %XCC, 60f
+ add %i1, %i4, %i1
+
+9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
+ MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
+ LOAD(prefetch, %i1 + %i3, #one_read)
+
+ EX_ST(STORE_INIT(%g3, %o0 + 0x00))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x08))
+
+ EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
+ MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
+
+ EX_ST(STORE_INIT(%o3, %o0 + 0x10))
+ EX_ST(STORE_INIT(%g2, %o0 + 0x18))
+
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+ MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
+
+ EX_ST(STORE_INIT(%g3, %o0 + 0x20))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x28))
+
+ EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
+ add %i1, 64, %i1
+ MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
+
+ EX_ST(STORE_INIT(%o3, %o0 + 0x30))
+ EX_ST(STORE_INIT(%g2, %o0 + 0x38))
+
+ subcc %g1, 64, %g1
+ bne,pt %XCC, 9b
+ add %o0, 64, %o0
- srl %g1, 3, %g1
ba,pt %XCC, 60f
- add %o1, %g1, %o1
+ add %i1, %i4, %i1
10: /* Destination is 64-byte aligned, source was only 8-byte
* aligned but it has been subtracted by 8 and we perform
* one twin load ahead, then add 8 back into source when
* we finish the loop.
*/
- EX_LD(LOAD_TWIN(%o1, %o4, %o5))
-1: add %o1, 16, %o1
- EX_LD(LOAD_TWIN(%o1, %g2, %g3))
- add %o1, 16 + 32, %o1
- LOAD(prefetch, %o1, #one_read)
- sub %o1, 32, %o1
+ EX_LD(LOAD_TWIN(%i1, %o4, %o5))
+ mov 16, %o7
+ mov 32, %g2
+ mov 48, %g3
+ mov 64, %o1
+1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+ LOAD(prefetch, %i1 + %o1, #one_read)
EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line
- EX_ST(STORE_INIT(%g2, %o0 + 0x08))
- EX_LD(LOAD_TWIN(%o1, %o4, %o5))
- add %o1, 16, %o1
- EX_ST(STORE_INIT(%g3, %o0 + 0x10))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x08))
+ EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x10))
EX_ST(STORE_INIT(%o4, %o0 + 0x18))
- EX_LD(LOAD_TWIN(%o1, %g2, %g3))
- add %o1, 16, %o1
+ EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
EX_ST(STORE_INIT(%o5, %o0 + 0x20))
- EX_ST(STORE_INIT(%g2, %o0 + 0x28))
- EX_LD(LOAD_TWIN(%o1, %o4, %o5))
- EX_ST(STORE_INIT(%g3, %o0 + 0x30))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x28))
+ EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5))
+ add %i1, 64, %i1
+ EX_ST(STORE_INIT(%o3, %o0 + 0x30))
EX_ST(STORE_INIT(%o4, %o0 + 0x38))
subcc %g1, 64, %g1
bne,pt %XCC, 1b
add %o0, 64, %o0
ba,pt %XCC, 60f
- add %o1, 0x8, %o1
+ add %i1, 0x8, %i1
50: /* Destination is 64-byte aligned, and source is 16-byte
* aligned.
*/
-1: EX_LD(LOAD_TWIN(%o1, %o4, %o5))
- add %o1, 16, %o1
- EX_LD(LOAD_TWIN(%o1, %g2, %g3))
- add %o1, 16 + 32, %o1
- LOAD(prefetch, %o1, #one_read)
- sub %o1, 32, %o1
+ mov 16, %o7
+ mov 32, %g2
+ mov 48, %g3
+ mov 64, %o1
+1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5))
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+ LOAD(prefetch, %i1 + %o1, #one_read)
EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line
EX_ST(STORE_INIT(%o5, %o0 + 0x08))
- EX_LD(LOAD_TWIN(%o1, %o4, %o5))
- add %o1, 16, %o1
- EX_ST(STORE_INIT(%g2, %o0 + 0x10))
- EX_ST(STORE_INIT(%g3, %o0 + 0x18))
- EX_LD(LOAD_TWIN(%o1, %g2, %g3))
- add %o1, 16, %o1
+ EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x10))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x18))
+ EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
+ add %i1, 64, %i1
EX_ST(STORE_INIT(%o4, %o0 + 0x20))
EX_ST(STORE_INIT(%o5, %o0 + 0x28))
- EX_ST(STORE_INIT(%g2, %o0 + 0x30))
- EX_ST(STORE_INIT(%g3, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x30))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x38))
subcc %g1, 64, %g1
bne,pt %XCC, 1b
add %o0, 64, %o0
@@ -249,47 +304,47 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
60:
membar #Sync
- /* %o2 contains any final bytes still needed to be copied
+ /* %i2 contains any final bytes still needed to be copied
* over. If anything is left, we copy it one byte at a time.
*/
- RESTORE_ASI(%o3)
- brz,pt %o2, 85f
- sub %o0, %o1, %o3
+ RESTORE_ASI(%i3)
+ brz,pt %i2, 85f
+ sub %o0, %i1, %i3
ba,a,pt %XCC, 90f
.align 64
70: /* 16 < len <= 64 */
bne,pn %XCC, 75f
- sub %o0, %o1, %o3
+ sub %o0, %i1, %i3
72:
- andn %o2, 0xf, %o4
- and %o2, 0xf, %o2
-1: subcc %o4, 0x10, %o4
- EX_LD(LOAD(ldx, %o1, %o5))
- add %o1, 0x08, %o1
- EX_LD(LOAD(ldx, %o1, %g1))
- sub %o1, 0x08, %o1
- EX_ST(STORE(stx, %o5, %o1 + %o3))
- add %o1, 0x8, %o1
- EX_ST(STORE(stx, %g1, %o1 + %o3))
+ andn %i2, 0xf, %i4
+ and %i2, 0xf, %i2
+1: subcc %i4, 0x10, %i4
+ EX_LD(LOAD(ldx, %i1, %i5))
+ add %i1, 0x08, %i1
+ EX_LD(LOAD(ldx, %i1, %g1))
+ sub %i1, 0x08, %i1
+ EX_ST(STORE(stx, %i5, %i1 + %i3))
+ add %i1, 0x8, %i1
+ EX_ST(STORE(stx, %g1, %i1 + %i3))
bgu,pt %XCC, 1b
- add %o1, 0x8, %o1
-73: andcc %o2, 0x8, %g0
+ add %i1, 0x8, %i1
+73: andcc %i2, 0x8, %g0
be,pt %XCC, 1f
nop
- sub %o2, 0x8, %o2
- EX_LD(LOAD(ldx, %o1, %o5))
- EX_ST(STORE(stx, %o5, %o1 + %o3))
- add %o1, 0x8, %o1
-1: andcc %o2, 0x4, %g0
+ sub %i2, 0x8, %i2
+ EX_LD(LOAD(ldx, %i1, %i5))
+ EX_ST(STORE(stx, %i5, %i1 + %i3))
+ add %i1, 0x8, %i1
+1: andcc %i2, 0x4, %g0
be,pt %XCC, 1f
nop
- sub %o2, 0x4, %o2
- EX_LD(LOAD(lduw, %o1, %o5))
- EX_ST(STORE(stw, %o5, %o1 + %o3))
- add %o1, 0x4, %o1
-1: cmp %o2, 0
+ sub %i2, 0x4, %i2
+ EX_LD(LOAD(lduw, %i1, %i5))
+ EX_ST(STORE(stw, %i5, %i1 + %i3))
+ add %i1, 0x4, %i1
+1: cmp %i2, 0
be,pt %XCC, 85f
nop
ba,pt %xcc, 90f
@@ -300,71 +355,71 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %g1, 0x8, %g1
be,pn %icc, 2f
sub %g0, %g1, %g1
- sub %o2, %g1, %o2
+ sub %i2, %g1, %i2
1: subcc %g1, 1, %g1
- EX_LD(LOAD(ldub, %o1, %o5))
- EX_ST(STORE(stb, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldub, %i1, %i5))
+ EX_ST(STORE(stb, %i5, %i1 + %i3))
bgu,pt %icc, 1b
- add %o1, 1, %o1
+ add %i1, 1, %i1
-2: add %o1, %o3, %o0
- andcc %o1, 0x7, %g1
+2: add %i1, %i3, %o0
+ andcc %i1, 0x7, %g1
bne,pt %icc, 8f
sll %g1, 3, %g1
- cmp %o2, 16
+ cmp %i2, 16
bgeu,pt %icc, 72b
nop
ba,a,pt %xcc, 73b
-8: mov 64, %o3
- andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1, %g2))
- sub %o3, %g1, %o3
- andn %o2, 0x7, %o4
+8: mov 64, %i3
+ andn %i1, 0x7, %i1
+ EX_LD(LOAD(ldx, %i1, %g2))
+ sub %i3, %g1, %i3
+ andn %i2, 0x7, %i4
sllx %g2, %g1, %g2
-1: add %o1, 0x8, %o1
- EX_LD(LOAD(ldx, %o1, %g3))
- subcc %o4, 0x8, %o4
- srlx %g3, %o3, %o5
- or %o5, %g2, %o5
- EX_ST(STORE(stx, %o5, %o0))
+1: add %i1, 0x8, %i1
+ EX_LD(LOAD(ldx, %i1, %g3))
+ subcc %i4, 0x8, %i4
+ srlx %g3, %i3, %i5
+ or %i5, %g2, %i5
+ EX_ST(STORE(stx, %i5, %o0))
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
srl %g1, 3, %g1
- andcc %o2, 0x7, %o2
+ andcc %i2, 0x7, %i2
be,pn %icc, 85f
- add %o1, %g1, %o1
+ add %i1, %g1, %i1
ba,pt %xcc, 90f
- sub %o0, %o1, %o3
+ sub %o0, %i1, %i3
.align 64
80: /* 0 < len <= 16 */
- andcc %o3, 0x3, %g0
+ andcc %i3, 0x3, %g0
bne,pn %XCC, 90f
- sub %o0, %o1, %o3
+ sub %o0, %i1, %i3
1:
- subcc %o2, 4, %o2
- EX_LD(LOAD(lduw, %o1, %g1))
- EX_ST(STORE(stw, %g1, %o1 + %o3))
+ subcc %i2, 4, %i2
+ EX_LD(LOAD(lduw, %i1, %g1))
+ EX_ST(STORE(stw, %g1, %i1 + %i3))
bgu,pt %XCC, 1b
- add %o1, 4, %o1
+ add %i1, 4, %i1
-85: retl
- mov EX_RETVAL(GLOBAL_SPARE), %o0
+85: ret
+ restore EX_RETVAL(%i0), %g0, %o0
.align 32
90:
- subcc %o2, 1, %o2
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o1 + %o3))
+ subcc %i2, 1, %i2
+ EX_LD(LOAD(ldub, %i1, %g1))
+ EX_ST(STORE(stb, %g1, %i1 + %i3))
bgu,pt %XCC, 90b
- add %o1, 1, %o1
- retl
- mov EX_RETVAL(GLOBAL_SPARE), %o0
+ add %i1, 1, %i1
+ ret
+ restore EX_RETVAL(%i0), %g0, %o0
.size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/x86_64/vdso/voffset.h b/arch/x86_64/vdso/voffset.h
index 5304204911f..4af67c79085 100644
--- a/arch/x86_64/vdso/voffset.h
+++ b/arch/x86_64/vdso/voffset.h
@@ -1 +1 @@
-#define VDSO_TEXT_OFFSET 0x500
+#define VDSO_TEXT_OFFSET 0x600
diff --git a/drivers/acpi/sleep/Makefile b/drivers/acpi/sleep/Makefile
index ba9bd403d44..f1fb888c2d2 100644
--- a/drivers/acpi/sleep/Makefile
+++ b/drivers/acpi/sleep/Makefile
@@ -1,5 +1,5 @@
obj-y := wakeup.o
-obj-$(CONFIG_ACPI_SLEEP) += main.o
+obj-y += main.o
obj-$(CONFIG_ACPI_SLEEP) += proc.o
EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index 85633c585aa..2cbb9aabd00 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -24,7 +24,30 @@
u8 sleep_states[ACPI_S_STATE_COUNT];
+#ifdef CONFIG_PM_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
+#endif
+
+int acpi_sleep_prepare(u32 acpi_state)
+{
+#ifdef CONFIG_ACPI_SLEEP
+ /* do we have a wakeup address for S2 and S3? */
+ if (acpi_state == ACPI_STATE_S3) {
+ if (!acpi_wakeup_address) {
+ return -EFAULT;
+ }
+ acpi_set_firmware_waking_vector((acpi_physical_address)
+ virt_to_phys((void *)
+ acpi_wakeup_address));
+
+ }
+ ACPI_FLUSH_CPU_CACHE();
+ acpi_enable_wakeup_device_prep(acpi_state);
+#endif
+ acpi_gpe_sleep_prepare(acpi_state);
+ acpi_enter_sleep_state_prep(acpi_state);
+ return 0;
+}
#ifdef CONFIG_SUSPEND
static struct pm_ops acpi_pm_ops;
@@ -60,27 +83,6 @@ static int acpi_pm_set_target(suspend_state_t pm_state)
return error;
}
-int acpi_sleep_prepare(u32 acpi_state)
-{
-#ifdef CONFIG_ACPI_SLEEP
- /* do we have a wakeup address for S2 and S3? */
- if (acpi_state == ACPI_STATE_S3) {
- if (!acpi_wakeup_address) {
- return -EFAULT;
- }
- acpi_set_firmware_waking_vector((acpi_physical_address)
- virt_to_phys((void *)
- acpi_wakeup_address));
-
- }
- ACPI_FLUSH_CPU_CACHE();
- acpi_enable_wakeup_device_prep(acpi_state);
-#endif
- acpi_gpe_sleep_prepare(acpi_state);
- acpi_enter_sleep_state_prep(acpi_state);
- return 0;
-}
-
/**
* acpi_pm_prepare - Do preliminary suspend work.
* @pm_state: ignored
@@ -299,6 +301,7 @@ int acpi_suspend(u32 acpi_state)
return -EINVAL;
}
+#ifdef CONFIG_PM_SLEEP
/**
* acpi_pm_device_sleep_state - return preferred power state of ACPI device
* in the system sleep state given by %acpi_target_sleep_state
@@ -373,6 +376,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p)
*d_min_p = d_min;
return d_max;
}
+#endif
static void acpi_power_off_prepare(void)
{
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 3b8bf1812dc..6996eb5b750 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -921,6 +921,13 @@ static int piix_broken_suspend(void)
{
static struct dmi_system_id sysids[] = {
{
+ .ident = "TECRA M3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M3"),
+ },
+ },
+ {
.ident = "TECRA M5",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 2bd7645f1a8..cce2834b2b6 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -375,8 +375,9 @@ static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
int drive_pci = sis_old_port_base(adev);
u16 timing;
+ /* MWDMA 0-2 and UDMA 0-5 */
const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
- const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000};
+ const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000, 0x8000 };
pci_read_config_word(pdev, drive_pci, &timing);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index ef83e6b1e31..233e8869339 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -888,6 +888,16 @@ static inline void sil24_host_intr(struct ata_port *ap)
u32 slot_stat, qc_active;
int rc;
+ /* If PCIX_IRQ_WOC, there's an inherent race window between
+ * clearing IRQ pending status and reading PORT_SLOT_STAT
+ * which may cause spurious interrupts afterwards. This is
+ * unavoidable and much better than losing interrupts which
+ * happens if IRQ pending is cleared after reading
+ * PORT_SLOT_STAT.
+ */
+ if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
+ writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
+
slot_stat = readl(port + PORT_SLOT_STAT);
if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
@@ -895,9 +905,6 @@ static inline void sil24_host_intr(struct ata_port *ap)
return;
}
- if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
- writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
-
qc_active = slot_stat & ~HOST_SSTAT_ATTN;
rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
if (rc > 0)
@@ -910,7 +917,8 @@ static inline void sil24_host_intr(struct ata_port *ap)
return;
}
- if (ata_ratelimit())
+ /* spurious interrupts are expected if PCIX_IRQ_WOC */
+ if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit())
ata_port_printk(ap, KERN_INFO, "spurious interrupt "
"(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
slot_stat, ap->active_tag, ap->sactive);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 6de33d7a29b..67c92582d6e 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -284,6 +284,7 @@ static ssize_t show_uevent(struct device *dev, struct device_attribute *attr,
/* let the kset specific function add its keys */
pos = data;
+ memset(envp, 0, sizeof(envp));
retval = kset->uevent_ops->uevent(kset, &dev->kobj,
envp, ARRAY_SIZE(envp),
pos, PAGE_SIZE);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 67ee3d4b287..79245714f0a 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1032,6 +1032,10 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
check_disk_change(ip->i_bdev);
return 0;
err_release:
+ if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
+ cdi->ops->lock_door(cdi, 0);
+ cdinfo(CD_OPEN, "door unlocked.\n");
+ }
cdi->ops->release(cdi);
err:
cdi->use_count--;
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 737088bd078..28b98733beb 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -210,6 +210,12 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define I915REG_INT_MASK_R 0x020a8
#define I915REG_INT_ENABLE_R 0x020a0
+#define I915REG_PIPEASTAT 0x70024
+#define I915REG_PIPEBSTAT 0x71024
+
+#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
+#define I915_VBLANK_CLEAR (1UL<<1)
+
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
#define SR01 1
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index 4b4b2ce8986..bb8e9e9c820 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -214,6 +214,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u16 temp;
+ u32 pipea_stats, pipeb_stats;
+
+ pipea_stats = I915_READ(I915REG_PIPEASTAT);
+ pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
temp = I915_READ16(I915REG_INT_IDENTITY_R);
@@ -225,6 +229,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_NONE;
I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
+ (void) I915_READ16(I915REG_INT_IDENTITY_R);
+ DRM_READMEMORYBARRIER();
dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
@@ -252,6 +258,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
if (dev_priv->swaps_pending > 0)
drm_locked_tasklet(dev, i915_vblank_tasklet);
+ I915_WRITE(I915REG_PIPEASTAT,
+ pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
+ I915_VBLANK_CLEAR);
+ I915_WRITE(I915REG_PIPEBSTAT,
+ pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
+ I915_VBLANK_CLEAR);
}
return IRQ_HANDLED;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 7ecffc9c738..4c16778e3f8 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -62,6 +62,8 @@
static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
+/* This clocksource driver currently only works on ia64 */
+#ifdef CONFIG_IA64
static void __iomem *hpet_mctr;
static cycle_t read_hpet(void)
@@ -79,6 +81,7 @@ static struct clocksource clocksource_hpet = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct clocksource *hpet_clocksource;
+#endif
/* A lock for concurrent access by app and isr hpet activity. */
static DEFINE_SPINLOCK(hpet_lock);
@@ -943,14 +946,14 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
printk(KERN_DEBUG "%s: 0x%lx is busy\n",
__FUNCTION__, hdp->hd_phys_address);
iounmap(hdp->hd_address);
- return -EBUSY;
+ return AE_ALREADY_EXISTS;
}
} else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
struct acpi_resource_fixed_memory32 *fixmem32;
fixmem32 = &res->data.fixed_memory32;
if (!fixmem32)
- return -EINVAL;
+ return AE_NO_MEMORY;
hdp->hd_phys_address = fixmem32->address;
hdp->hd_address = ioremap(fixmem32->address,
@@ -960,7 +963,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
printk(KERN_DEBUG "%s: 0x%lx is busy\n",
__FUNCTION__, hdp->hd_phys_address);
iounmap(hdp->hd_address);
- return -EBUSY;
+ return AE_ALREADY_EXISTS;
}
} else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
struct acpi_resource_extended_irq *irqp;
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 049a46cc9f8..04ac155d3a0 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -155,23 +155,22 @@ mspec_open(struct vm_area_struct *vma)
* mspec_close
*
* Called when unmapping a device mapping. Frees all mspec pages
- * belonging to the vma.
+ * belonging to all the vma's sharing this vma_data structure.
*/
static void
mspec_close(struct vm_area_struct *vma)
{
struct vma_data *vdata;
- int index, last_index, result;
+ int index, last_index;
unsigned long my_page;
vdata = vma->vm_private_data;
- BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end);
+ if (!atomic_dec_and_test(&vdata->refcnt))
+ return;
- spin_lock(&vdata->lock);
- index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT;
- last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT;
- for (; index < last_index; index++) {
+ last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
+ for (index = 0; index < last_index; index++) {
if (vdata->maddr[index] == 0)
continue;
/*
@@ -180,20 +179,12 @@ mspec_close(struct vm_area_struct *vma)
*/
my_page = vdata->maddr[index];
vdata->maddr[index] = 0;
- spin_unlock(&vdata->lock);
- result = mspec_zero_block(my_page, PAGE_SIZE);
- if (!result)
+ if (!mspec_zero_block(my_page, PAGE_SIZE))
uncached_free_page(my_page);
else
printk(KERN_WARNING "mspec_close(): "
- "failed to zero page %i\n",
- result);
- spin_lock(&vdata->lock);
+ "failed to zero page %ld\n", my_page);
}
- spin_unlock(&vdata->lock);
-
- if (!atomic_dec_and_test(&vdata->refcnt))
- return;
if (vdata->flags & VMD_VMALLOCED)
vfree(vdata);
@@ -201,7 +192,6 @@ mspec_close(struct vm_area_struct *vma)
kfree(vdata);
}
-
/*
* mspec_nopfn
*
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 397c714cf2b..af274e5a25e 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1550,11 +1550,13 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
* As close as possible to RFC 793, which
* suggests using a 250 kHz clock.
* Further reading shows this assumes 2 Mb/s networks.
- * For 10 Gb/s Ethernet, a 1 GHz clock is appropriate.
- * That's funny, Linux has one built in! Use it!
- * (Networks are faster now - should this be increased?)
+ * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
+ * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
+ * we also need to limit the resolution so that the u32 seq
+ * overlaps less than one time per MSL (2 minutes).
+ * Choosing a clock of 64 ns period is OK. (period of 274 s)
*/
- seq += ktime_get_real().tv64;
+ seq += ktime_get_real().tv64 >> 6;
#if 0
printk("init_seq(%lx, %lx, %d, %d) = %d\n",
saddr, daddr, sport, dport, seq);
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index c6f6f420973..c799b7f7bbb 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -770,6 +770,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
/*
* Switching-from response
*/
+ acquire_console_sem();
if (vc->vt_newvt >= 0) {
if (arg == 0)
/*
@@ -784,7 +785,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
* complete the switch.
*/
int newvt;
- acquire_console_sem();
newvt = vc->vt_newvt;
vc->vt_newvt = -1;
i = vc_allocate(newvt);
@@ -798,7 +798,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
* other console switches..
*/
complete_change_console(vc_cons[newvt].d);
- release_console_sem();
}
}
@@ -810,9 +809,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
/*
* If it's just an ACK, ignore it
*/
- if (arg != VT_ACKACQ)
+ if (arg != VT_ACKACQ) {
+ release_console_sem();
return -EINVAL;
+ }
}
+ release_console_sem();
return 0;
@@ -1208,15 +1210,18 @@ void change_console(struct vc_data *new_vc)
/*
* Send the signal as privileged - kill_pid() will
* tell us if the process has gone or something else
- * is awry
+ * is awry.
+ *
+ * We need to set vt_newvt *before* sending the signal or we
+ * have a race.
*/
+ vc->vt_newvt = new_vc->vc_num;
if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) {
/*
* It worked. Mark the vt to switch to and
* return. The process needs to send us a
* VT_RELDISP ioctl to complete the switch.
*/
- vc->vt_newvt = new_vc->vc_num;
return;
}
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index e2abe18e575..7c662ee594a 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -277,7 +277,7 @@ config JOYSTICK_XPAD_FF
config JOYSTICK_XPAD_LEDS
bool "LED Support for Xbox360 controller 'BigX' LED"
- depends on LEDS_CLASS && JOYSTICK_XPAD
+ depends on JOYSTICK_XPAD && (LEDS_CLASS=y || LEDS_CLASS=JOYSTICK_XPAD)
---help---
This option enables support for the LED which surrounds the Big X on
XBox 360 controller.
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index 2bea1b2c631..a1804bfdbb8 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -328,6 +328,7 @@ static void atp_complete(struct urb* urb)
{
int x, y, x_z, y_z, x_f, y_f;
int retval, i, j;
+ int key;
struct atp *dev = urb->context;
switch (urb->status) {
@@ -468,6 +469,7 @@ static void atp_complete(struct urb* urb)
ATP_XFACT, &x_z, &x_f);
y = atp_calculate_abs(dev->xy_acc + ATP_XSENSORS, ATP_YSENSORS,
ATP_YFACT, &y_z, &y_f);
+ key = dev->data[dev->datalen - 1] & 1;
if (x && y) {
if (dev->x_old != -1) {
@@ -505,7 +507,7 @@ static void atp_complete(struct urb* urb)
the first touch unless reinitialised. Do so if it's been
idle for a while in order to avoid waking the kernel up
several hundred times a second */
- if (atp_is_geyser_3(dev)) {
+ if (!key && atp_is_geyser_3(dev)) {
dev->idlecount++;
if (dev->idlecount == 10) {
dev->valid = 0;
@@ -514,7 +516,7 @@ static void atp_complete(struct urb* urb)
}
}
- input_report_key(dev->input, BTN_LEFT, dev->data[dev->datalen - 1] & 1);
+ input_report_key(dev->input, BTN_LEFT, key);
input_sync(dev->input);
exit:
diff --git a/drivers/lguest/lguest_asm.S b/drivers/lguest/lguest_asm.S
index f182c6a3620..1ddcd5cd20f 100644
--- a/drivers/lguest/lguest_asm.S
+++ b/drivers/lguest/lguest_asm.S
@@ -22,8 +22,9 @@
jmp lguest_init
/*G:055 We create a macro which puts the assembler code between lgstart_ and
- * lgend_ markers. These templates end up in the .init.text section, so they
- * are discarded after boot. */
+ * lgend_ markers. These templates are put in the .text section: they can't be
+ * discarded after boot as we may need to patch modules, too. */
+.text
#define LGUEST_PATCH(name, insns...) \
lgstart_##name: insns; lgend_##name:; \
.globl lgstart_##name; .globl lgend_##name
@@ -34,7 +35,6 @@ LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled)
LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
/*:*/
-.text
/* These demark the EIP range where host should never deliver interrupts. */
.global lguest_noirq_start
.global lguest_noirq_end
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index 0285c4a830e..66ea3cbc369 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -754,9 +754,11 @@ static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts)
ivtv_yuv_close(itv);
}
if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_YUV)
- itv->output_mode = OUT_NONE;
+ itv->output_mode = OUT_NONE;
+ else if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_UDMA_YUV)
+ itv->output_mode = OUT_NONE;
else if (s->type == IVTV_DEC_STREAM_TYPE_MPG && itv->output_mode == OUT_MPG)
- itv->output_mode = OUT_NONE;
+ itv->output_mode = OUT_NONE;
itv->speed = 0;
clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags);
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index e3371f97224..0cb006f2943 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -1387,7 +1387,6 @@ static const struct file_operations usbvision_fops = {
.ioctl = video_ioctl2,
.llseek = no_llseek,
/* .poll = video_poll, */
- .mmap = usbvision_v4l2_mmap,
.compat_ioctl = v4l_compat_ioctl32,
};
static struct video_device usbvision_video_template = {
@@ -1413,7 +1412,7 @@ static struct video_device usbvision_video_template = {
.vidioc_s_input = vidioc_s_input,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_audio = vidioc_g_audio,
- .vidioc_g_audio = vidioc_s_audio,
+ .vidioc_s_audio = vidioc_s_audio,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_streamon = vidioc_streamon,
@@ -1459,7 +1458,7 @@ static struct video_device usbvision_radio_template=
.vidioc_s_input = vidioc_s_input,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_audio = vidioc_g_audio,
- .vidioc_g_audio = vidioc_s_audio,
+ .vidioc_s_audio = vidioc_s_audio,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_tuner = vidioc_g_tuner,
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 4c3785c9d4b..9ecc3adcf6c 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -1726,6 +1726,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol
case E1000_DEV_ID_82571EB_QUAD_COPPER:
case E1000_DEV_ID_82571EB_QUAD_FIBER:
case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+ case E1000_DEV_ID_82571PT_QUAD_COPPER:
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
/* quad port adapters only support WoL on port A */
if (!adapter->quad_port_a) {
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index ba120f7fb0b..8604adbe351 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -387,6 +387,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82571EB_SERDES_DUAL:
case E1000_DEV_ID_82571EB_SERDES_QUAD:
case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571PT_QUAD_COPPER:
case E1000_DEV_ID_82571EB_QUAD_FIBER:
case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
hw->mac_type = e1000_82571;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index fe8714655c9..07f0ea73676 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -475,6 +475,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_DEV_ID_82571EB_FIBER 0x105F
#define E1000_DEV_ID_82571EB_SERDES 0x1060
#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC
#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 4a225950fb4..e7c8951f47f 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -108,6 +108,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x10BC),
INTEL_E1000_ETHERNET_DEVICE(0x10C4),
INTEL_E1000_ETHERNET_DEVICE(0x10C5),
+ INTEL_E1000_ETHERNET_DEVICE(0x10D5),
INTEL_E1000_ETHERNET_DEVICE(0x10D9),
INTEL_E1000_ETHERNET_DEVICE(0x10DA),
/* required last entry */
@@ -1101,6 +1102,7 @@ e1000_probe(struct pci_dev *pdev,
case E1000_DEV_ID_82571EB_QUAD_COPPER:
case E1000_DEV_ID_82571EB_QUAD_FIBER:
case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+ case E1000_DEV_ID_82571PT_QUAD_COPPER:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
adapter->eeprom_wol = 0;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 6a117e9968c..315335671f0 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -534,7 +534,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
}
/* PHY status changed */
- if (eth_int_cause_ext & ETH_INT_CAUSE_PHY) {
+ if (eth_int_cause_ext & (ETH_INT_CAUSE_PHY | ETH_INT_CAUSE_STATE)) {
struct ethtool_cmd cmd;
if (mii_link_ok(&mp->mii)) {
@@ -1357,7 +1357,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
#endif
dev->watchdog_timeo = 2 * HZ;
- dev->tx_queue_len = mp->tx_ring_size;
dev->base_addr = 0;
dev->change_mtu = mv643xx_eth_change_mtu;
dev->do_ioctl = mv643xx_eth_do_ioctl;
@@ -2768,8 +2767,6 @@ static const struct ethtool_ops mv643xx_ethtool_ops = {
.get_stats_count = mv643xx_get_stats_count,
.get_ethtool_stats = mv643xx_get_ethtool_stats,
.get_strings = mv643xx_get_strings,
- .get_stats_count = mv643xx_get_stats_count,
- .get_ethtool_stats = mv643xx_get_ethtool_stats,
.nway_reset = mv643xx_eth_nway_restart,
};
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
index 82f8c0cbfb6..565b96696ac 100644
--- a/drivers/net/mv643xx_eth.h
+++ b/drivers/net/mv643xx_eth.h
@@ -64,7 +64,9 @@
#define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8)
#define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
#define ETH_INT_CAUSE_PHY 0x00010000
-#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY)
+#define ETH_INT_CAUSE_STATE 0x00100000
+#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \
+ ETH_INT_CAUSE_STATE)
#define ETH_INT_MASK_ALL 0x00000000
#define ETH_INT_MASK_ALL_EXT 0x00000000
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index c06cae3f0b5..503f2685fb7 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -116,7 +116,7 @@ struct el3_private {
spinlock_t lock;
};
-static const char *if_names[] = { "auto", "10base2", "10baseT", "AUI" };
+static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
/*====================================================================*/
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index f79cf87a2bf..c0b6d19d145 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -136,7 +136,7 @@ struct ppp_mppe_state {
* Key Derivation, from RFC 3078, RFC 3079.
* Equivalent to Get_Key() for MS-CHAP as described in RFC 3079.
*/
-static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey)
+static void get_new_key_from_sha(struct ppp_mppe_state * state)
{
struct hash_desc desc;
struct scatterlist sg[4];
@@ -153,8 +153,6 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I
desc.flags = 0;
crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest);
-
- memcpy(InterimKey, state->sha1_digest, state->keylen);
}
/*
@@ -163,21 +161,21 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I
*/
static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
{
- unsigned char InterimKey[MPPE_MAX_KEY_LEN];
struct scatterlist sg_in[1], sg_out[1];
struct blkcipher_desc desc = { .tfm = state->arc4 };
- get_new_key_from_sha(state, InterimKey);
+ get_new_key_from_sha(state);
if (!initial_key) {
- crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen);
- setup_sg(sg_in, InterimKey, state->keylen);
+ crypto_blkcipher_setkey(state->arc4, state->sha1_digest,
+ state->keylen);
+ setup_sg(sg_in, state->sha1_digest, state->keylen);
setup_sg(sg_out, state->session_key, state->keylen);
if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
state->keylen) != 0) {
printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
}
} else {
- memcpy(state->session_key, InterimKey, state->keylen);
+ memcpy(state->session_key, state->sha1_digest, state->keylen);
}
if (state->keylen == 8) {
/* See RFC 3078 */
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 69da95b5ad0..ea151315050 100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2248,6 +2248,13 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
qdev->rsp_consumer_index) && (work_done < work_to_do)) {
net_rsp = qdev->rsp_current;
+ rmb();
+ /*
+ * Fix 4032 chipe undocumented "feature" where bit-8 is set if the
+ * inbound completion is for a VLAN.
+ */
+ if (qdev->device_id == QL3032_DEVICE_ID)
+ net_rsp->opcode &= 0x7f;
switch (net_rsp->opcode) {
case OPCODE_OB_MAC_IOCB_FN0:
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index b85ab4a8f2a..c921ec32c23 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1228,7 +1228,10 @@ static void rtl8169_hw_phy_config(struct net_device *dev)
return;
}
- /* phy config for RTL8169s mac_version C chip */
+ if ((tp->mac_version != RTL_GIGA_MAC_VER_02) &&
+ (tp->mac_version != RTL_GIGA_MAC_VER_03))
+ return;
+
mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1
mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000
mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7
@@ -2567,6 +2570,15 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
(TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
netif_wake_queue(dev);
}
+ /*
+ * 8168 hack: TxPoll requests are lost when the Tx packets are
+ * too close. Let's kick an extra TxPoll request when a burst
+ * of start_xmit activity is detected (if it is not detected,
+ * it is slow enough). -- FR
+ */
+ smp_rmb();
+ if (tp->cur_tx != dirty_tx)
+ RTL_W8(TxPoll, NPQ);
}
}
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index eaffe551d1d..162489b9f59 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -338,6 +338,16 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
if (!(hw->flags & SKY2_HW_GIGABIT)) {
/* enable automatic crossover */
ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
+
+ if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+ hw->chip_rev == CHIP_REV_YU_FE2_A0) {
+ u16 spec;
+
+ /* Enable Class A driver for FE+ A0 */
+ spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
+ spec |= PHY_M_FESC_SEL_CL_A;
+ gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
+ }
} else {
/* disable energy detect */
ctrl &= ~PHY_M_PC_EN_DET_MSK;
@@ -816,7 +826,8 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
- if (!(hw->flags & SKY2_HW_RAMBUFFER)) {
+ /* On chips without ram buffer, pause is controled by MAC level */
+ if (sky2_read8(hw, B2_E_0) == 0) {
sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
@@ -899,6 +910,20 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
return le;
}
+static void tx_init(struct sky2_port *sky2)
+{
+ struct sky2_tx_le *le;
+
+ sky2->tx_prod = sky2->tx_cons = 0;
+ sky2->tx_tcpsum = 0;
+ sky2->tx_last_mss = 0;
+
+ le = get_tx_le(sky2);
+ le->addr = 0;
+ le->opcode = OP_ADDR64 | HW_OWNER;
+ sky2->tx_addr64 = 0;
+}
+
static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2,
struct sky2_tx_le *le)
{
@@ -1271,7 +1296,7 @@ static int sky2_up(struct net_device *dev)
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- u32 imask;
+ u32 imask, ramsize;
int cap, err = -ENOMEM;
struct net_device *otherdev = hw->dev[sky2->port^1];
@@ -1309,7 +1334,8 @@ static int sky2_up(struct net_device *dev)
GFP_KERNEL);
if (!sky2->tx_ring)
goto err_out;
- sky2->tx_prod = sky2->tx_cons = 0;
+
+ tx_init(sky2);
sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
&sky2->rx_le_map);
@@ -1326,13 +1352,12 @@ static int sky2_up(struct net_device *dev)
sky2_mac_init(hw, port);
- if (hw->flags & SKY2_HW_RAMBUFFER) {
- /* Register is number of 4K blocks on internal RAM buffer. */
- u32 ramsize = sky2_read8(hw, B2_E_0) * 4;
+ /* Register is number of 4K blocks on internal RAM buffer. */
+ ramsize = sky2_read8(hw, B2_E_0) * 4;
+ if (ramsize > 0) {
u32 rxspace;
- printk(KERN_DEBUG PFX "%s: ram buffer %dK\n", dev->name, ramsize);
-
+ pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
if (ramsize < 16)
rxspace = ramsize / 2;
else
@@ -1995,7 +2020,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
synchronize_irq(hw->pdev->irq);
- if (!(hw->flags & SKY2_HW_RAMBUFFER))
+ if (sky2_read8(hw, B2_E_0) == 0)
sky2_set_tx_stfwd(hw, port);
ctl = gma_read16(hw, port, GM_GP_CTRL);
@@ -2138,6 +2163,18 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
prefetch(sky2->rx_ring + sky2->rx_next);
+ if (length < ETH_ZLEN || length > sky2->rx_data_size)
+ goto len_error;
+
+ /* This chip has hardware problems that generates bogus status.
+ * So do only marginal checking and expect higher level protocols
+ * to handle crap frames.
+ */
+ if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
+ sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 &&
+ length != count)
+ goto okay;
+
if (status & GMR_FS_ANY_ERR)
goto error;
@@ -2146,8 +2183,9 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
/* if length reported by DMA does not match PHY, packet was truncated */
if (length != count)
- goto len_mismatch;
+ goto len_error;
+okay:
if (length < copybreak)
skb = receive_copy(sky2, re, length);
else
@@ -2157,13 +2195,13 @@ resubmit:
return skb;
-len_mismatch:
+len_error:
/* Truncation of overlength packets
causes PHY length to not match MAC length */
++sky2->net_stats.rx_length_errors;
if (netif_msg_rx_err(sky2) && net_ratelimit())
- pr_info(PFX "%s: rx length mismatch: length %d status %#x\n",
- dev->name, length, status);
+ pr_info(PFX "%s: rx length error: status %#x length %d\n",
+ dev->name, status, length);
goto resubmit;
error:
@@ -2526,7 +2564,7 @@ static void sky2_watchdog(unsigned long arg)
++active;
/* For chips with Rx FIFO, check if stuck */
- if ((hw->flags & SKY2_HW_RAMBUFFER) &&
+ if ((hw->flags & SKY2_HW_FIFO_HANG_CHECK) &&
sky2_rx_hung(dev)) {
pr_info(PFX "%s: receiver hang detected\n",
dev->name);
@@ -2684,8 +2722,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
switch(hw->chip_id) {
case CHIP_ID_YUKON_XL:
hw->flags = SKY2_HW_GIGABIT
- | SKY2_HW_NEWER_PHY
- | SKY2_HW_RAMBUFFER;
+ | SKY2_HW_NEWER_PHY;
+ if (hw->chip_rev < 3)
+ hw->flags |= SKY2_HW_FIFO_HANG_CHECK;
+
break;
case CHIP_ID_YUKON_EC_U:
@@ -2711,11 +2751,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
return -EOPNOTSUPP;
}
- hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RAMBUFFER;
+ hw->flags = SKY2_HW_GIGABIT | SKY2_HW_FIFO_HANG_CHECK;
break;
case CHIP_ID_YUKON_FE:
- hw->flags = SKY2_HW_RAMBUFFER;
break;
case CHIP_ID_YUKON_FE_P:
@@ -3923,13 +3962,6 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
sky2->hw = hw;
sky2->msg_enable = netif_msg_init(debug, default_msg);
- /* This chip has hardware problems that generates
- * bogus PHY receive status so by default shut up the message.
- */
- if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
- hw->chip_rev == CHIP_REV_YU_FE2_A0)
- sky2->msg_enable &= ~NETIF_MSG_RX_ERR;
-
/* Auto speed and flow control */
sky2->autoneg = AUTONEG_ENABLE;
sky2->flow_mode = FC_BOTH;
@@ -3953,8 +3985,12 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
dev->features |= NETIF_F_HIGHDMA;
#ifdef SKY2_VLAN_TAG_USED
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- dev->vlan_rx_register = sky2_vlan_rx_register;
+ /* The workaround for FE+ status conflicts with VLAN tag detection. */
+ if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
+ sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_rx_register = sky2_vlan_rx_register;
+ }
#endif
/* read the mac address */
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 69cd98400fe..8bc5c54e3ef 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2063,7 +2063,7 @@ struct sky2_hw {
#define SKY2_HW_FIBRE_PHY 0x00000002
#define SKY2_HW_GIGABIT 0x00000004
#define SKY2_HW_NEWER_PHY 0x00000008
-#define SKY2_HW_RAMBUFFER 0x00000010 /* chip has RAM FIFO */
+#define SKY2_HW_FIFO_HANG_CHECK 0x00000010
#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 16c7a0e8785..a2de32fabc1 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -405,7 +405,7 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->ethtool_ops = &dm9601_ethtool_ops;
dev->net->hard_header_len += DM_TX_OVERHEAD;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
- dev->rx_urb_size = dev->net->mtu + DM_RX_OVERHEAD;
+ dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
dev->mii.dev = dev->net;
dev->mii.mdio_read = dm9601_mdio_read;
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index ef35bc6c4a2..4eb6d975288 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o
obj-$(CONFIG_USB_ZD1201) += zd1201.o
-obj-$(CONFIG_LIBERTAS_USB) += libertas/
+obj-$(CONFIG_LIBERTAS) += libertas/
rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o
obj-$(CONFIG_RTL8187) += rtl8187.o
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 7dcaa09b3c2..50f2dd9e1bb 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1444,7 +1444,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos);
static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
{
u16 command;
- u32 bar;
u8 __iomem *csr;
u8 cmd_hi;
@@ -1476,12 +1475,12 @@ static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
* re-enable them when it's ready.
*/
pci_read_config_word(dev, PCI_COMMAND, &command);
- pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar);
- if (!(command & PCI_COMMAND_MEMORY) || !bar)
+ if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
return;
- csr = ioremap(bar, 8);
+ /* Convert from PCI bus to resource space. */
+ csr = ioremap(pci_resource_start(dev, 0), 8);
if (!csr) {
printk(KERN_WARNING "PCI: Can't map %s e100 registers\n",
pci_name(dev));
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 77b06a983fa..95cf7b6cd62 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -2314,6 +2314,7 @@ int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
esp->host->transportt = esp_transport_template;
esp->host->max_lun = ESP_MAX_LUN;
esp->host->cmd_per_lun = 2;
+ esp->host->unique_id = instance;
esp_set_clock_params(esp);
@@ -2337,7 +2338,7 @@ int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
if (err)
return err;
- esp->host->unique_id = instance++;
+ instance++;
scsi_scan_host(esp->host);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 6f56f875063..4df21c92ff1 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -787,10 +787,12 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
struct scsi_target *starget = sdev->sdev_target;
struct Scsi_Host *shost = sdev->host;
int len = sdev->inquiry_len;
+ int min_period = spi_min_period(starget);
+ int max_width = spi_max_width(starget);
/* first set us up for narrow async */
DV_SET(offset, 0);
DV_SET(width, 0);
-
+
if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS)
!= SPI_COMPARE_SUCCESS) {
starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n");
@@ -798,9 +800,13 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
return;
}
+ if (!scsi_device_wide(sdev)) {
+ spi_max_width(starget) = 0;
+ max_width = 0;
+ }
+
/* test width */
- if (i->f->set_width && spi_max_width(starget) &&
- scsi_device_wide(sdev)) {
+ if (i->f->set_width && max_width) {
i->f->set_width(starget, 1);
if (spi_dv_device_compare_inquiry(sdev, buffer,
@@ -809,6 +815,11 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
!= SPI_COMPARE_SUCCESS) {
starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n");
i->f->set_width(starget, 0);
+ /* Make sure we don't force wide back on by asking
+ * for a transfer period that requires it */
+ max_width = 0;
+ if (min_period < 10)
+ min_period = 10;
}
}
@@ -828,7 +839,8 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
/* now set up to the maximum */
DV_SET(offset, spi_max_offset(starget));
- DV_SET(period, spi_min_period(starget));
+ DV_SET(period, min_period);
+
/* try QAS requests; this should be harmless to set if the
* target supports it */
if (scsi_device_qas(sdev)) {
@@ -837,14 +849,14 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
DV_SET(qas, 0);
}
- if (scsi_device_ius(sdev) && spi_min_period(starget) < 9) {
+ if (scsi_device_ius(sdev) && min_period < 9) {
/* This u320 (or u640). Set IU transfers */
DV_SET(iu, 1);
/* Then set the optional parameters */
DV_SET(rd_strm, 1);
DV_SET(wr_flow, 1);
DV_SET(rti, 1);
- if (spi_min_period(starget) == 8)
+ if (min_period == 8)
DV_SET(pcomp_en, 1);
} else {
DV_SET(iu, 0);
@@ -862,6 +874,10 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
} else {
DV_SET(dt, 1);
}
+ /* set width last because it will pull all the other
+ * parameters down to required values */
+ DV_SET(width, max_width);
+
/* Do the read only INQUIRY tests */
spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len,
spi_dv_device_compare_inquiry);
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/serial/cpm_uart/cpm_uart_cpm1.h
index a99e45e2b6d..2a6477834c3 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.h
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.h
@@ -37,6 +37,6 @@ static inline void cpm_set_smc_fcr(volatile smc_uart_t * up)
up->smc_tfcr = SMC_EB;
}
-#define DPRAM_BASE ((unsigned char *)&cpmp->cp_dpmem[0])
+#define DPRAM_BASE ((unsigned char *)cpm_dpram_addr(0))
#endif
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index e348ba68405..ff610c23314 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -38,7 +38,7 @@
#include <asm/prom.h>
#include <asm/of_device.h>
-#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 8d7ab74170d..a593f900eff 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -431,6 +431,7 @@ static int w1_uevent(struct device *dev, char **envp, int num_envp,
err = add_uevent_var(envp, num_envp, &cur_index, buffer, buffer_size,
&cur_len, "W1_SLAVE_ID=%024LX",
(unsigned long long)sl->reg_num.id);
+ envp[cur_index] = NULL;
if (err)
return err;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 5a5b7116cef..37310b0e810 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -3190,6 +3190,8 @@ COMPATIBLE_IOCTL(SIOCSIWRETRY)
COMPATIBLE_IOCTL(SIOCGIWRETRY)
COMPATIBLE_IOCTL(SIOCSIWPOWER)
COMPATIBLE_IOCTL(SIOCGIWPOWER)
+COMPATIBLE_IOCTL(SIOCSIWAUTH)
+COMPATIBLE_IOCTL(SIOCGIWAUTH)
/* hiddev */
COMPATIBLE_IOCTL(HIDIOCGVERSION)
COMPATIBLE_IOCTL(HIDIOCAPPLICATION)
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index a21e4bc5444..d098c7af0d2 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -171,19 +171,14 @@ found:
* GRANTED_RES message by cookie, without having to rely on the client's IP
* address. --okir
*/
-static inline struct nlm_block *
-nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file,
- struct nlm_lock *lock, struct nlm_cookie *cookie)
+static struct nlm_block *
+nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
+ struct nlm_file *file, struct nlm_lock *lock,
+ struct nlm_cookie *cookie)
{
struct nlm_block *block;
- struct nlm_host *host;
struct nlm_rqst *call = NULL;
- /* Create host handle for callback */
- host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
- if (host == NULL)
- return NULL;
-
call = nlm_alloc_call(host);
if (call == NULL)
return NULL;
@@ -366,6 +361,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
struct nlm_lock *lock, int wait, struct nlm_cookie *cookie)
{
struct nlm_block *block = NULL;
+ struct nlm_host *host;
int error;
__be32 ret;
@@ -377,6 +373,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
(long long)lock->fl.fl_end,
wait);
+ /* Create host handle for callback */
+ host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
+ if (host == NULL)
+ return nlm_lck_denied_nolocks;
/* Lock file against concurrent access */
mutex_lock(&file->f_mutex);
@@ -385,7 +385,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
*/
block = nlmsvc_lookup_block(file, lock);
if (block == NULL) {
- block = nlmsvc_create_block(rqstp, file, lock, cookie);
+ block = nlmsvc_create_block(rqstp, nlm_get_host(host), file,
+ lock, cookie);
ret = nlm_lck_denied_nolocks;
if (block == NULL)
goto out;
@@ -449,6 +450,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
out:
mutex_unlock(&file->f_mutex);
nlmsvc_release_block(block);
+ nlm_release_host(host);
dprintk("lockd: nlmsvc_lock returned %u\n", ret);
return ret;
}
@@ -477,10 +479,15 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
if (block == NULL) {
struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
+ struct nlm_host *host;
if (conf == NULL)
return nlm_granted;
- block = nlmsvc_create_block(rqstp, file, lock, cookie);
+ /* Create host handle for callback */
+ host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
+ if (host == NULL)
+ return nlm_lck_denied_nolocks;
+ block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
if (block == NULL) {
kfree(conf);
return nlm_granted;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index a49f9feff77..a204484072f 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -588,16 +588,6 @@ static int nfs_init_server(struct nfs_server *server, const struct nfs_mount_dat
server->namelen = data->namlen;
/* Create a client RPC handle for the NFSv3 ACL management interface */
nfs_init_server_aclclient(server);
- if (clp->cl_nfsversion == 3) {
- if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
- server->namelen = NFS3_MAXNAMLEN;
- if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
- server->caps |= NFS_CAP_READDIRPLUS;
- } else {
- if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
- server->namelen = NFS2_MAXNAMLEN;
- }
-
dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp);
return 0;
@@ -794,6 +784,16 @@ struct nfs_server *nfs_create_server(const struct nfs_mount_data *data,
error = nfs_probe_fsinfo(server, mntfh, &fattr);
if (error < 0)
goto error;
+ if (server->nfs_client->rpc_ops->version == 3) {
+ if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
+ server->namelen = NFS3_MAXNAMLEN;
+ if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
+ server->caps |= NFS_CAP_READDIRPLUS;
+ } else {
+ if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
+ server->namelen = NFS2_MAXNAMLEN;
+ }
+
if (!(fattr.valid & NFS_ATTR_FATTR)) {
error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr);
if (error < 0) {
@@ -984,6 +984,9 @@ struct nfs_server *nfs4_create_server(const struct nfs4_mount_data *data,
if (error < 0)
goto error;
+ if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
+ server->namelen = NFS4_MAXNAMLEN;
+
BUG_ON(!server->nfs_client);
BUG_ON(!server->nfs_client->rpc_ops);
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
@@ -1056,6 +1059,9 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
if (error < 0)
goto error;
+ if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
+ server->namelen = NFS4_MAXNAMLEN;
+
dprintk("Referral FSID: %llx:%llx\n",
(unsigned long long) server->fsid.major,
(unsigned long long) server->fsid.minor);
@@ -1115,6 +1121,9 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
if (error < 0)
goto out_free_server;
+ if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
+ server->namelen = NFS4_MAXNAMLEN;
+
dprintk("Cloned FSID: %llx:%llx\n",
(unsigned long long) server->fsid.major,
(unsigned long long) server->fsid.minor);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index ea97408e423..e4a04d16b8b 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1162,6 +1162,8 @@ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
}
if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR))
return NULL;
+ if (name.len > NFS_SERVER(dir)->namelen)
+ return NULL;
/* Note: caller is already holding the dir->i_mutex! */
dentry = d_alloc(parent, &name);
if (dentry == NULL)
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index d1cbf0a0fbb..522e5ad4d8a 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -175,6 +175,9 @@ next_component:
path++;
name.len = path - (const char *) name.name;
+ if (name.len > NFS4_MAXNAMLEN)
+ return -ENAMETOOLONG;
+
eat_dot_dir:
while (*path == '/')
path++;
diff --git a/fs/splice.c b/fs/splice.c
index c010a72ca2d..e95a3622886 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1224,6 +1224,33 @@ static long do_splice(struct file *in, loff_t __user *off_in,
}
/*
+ * Do a copy-from-user while holding the mmap_semaphore for reading, in a
+ * manner safe from deadlocking with simultaneous mmap() (grabbing mmap_sem
+ * for writing) and page faulting on the user memory pointed to by src.
+ * This assumes that we will very rarely hit the partial != 0 path, or this
+ * will not be a win.
+ */
+static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n)
+{
+ int partial;
+
+ pagefault_disable();
+ partial = __copy_from_user_inatomic(dst, src, n);
+ pagefault_enable();
+
+ /*
+ * Didn't copy everything, drop the mmap_sem and do a faulting copy
+ */
+ if (unlikely(partial)) {
+ up_read(&current->mm->mmap_sem);
+ partial = copy_from_user(dst, src, n);
+ down_read(&current->mm->mmap_sem);
+ }
+
+ return partial;
+}
+
+/*
* Map an iov into an array of pages and offset/length tupples. With the
* partial_page structure, we can map several non-contiguous ranges into
* our ones pages[] map instead of splitting that operation into pieces.
@@ -1236,31 +1263,26 @@ static int get_iovec_page_array(const struct iovec __user *iov,
{
int buffers = 0, error = 0;
- /*
- * It's ok to take the mmap_sem for reading, even
- * across a "get_user()".
- */
down_read(&current->mm->mmap_sem);
while (nr_vecs) {
unsigned long off, npages;
+ struct iovec entry;
void __user *base;
size_t len;
int i;
- /*
- * Get user address base and length for this iovec.
- */
- error = get_user(base, &iov->iov_base);
- if (unlikely(error))
- break;
- error = get_user(len, &iov->iov_len);
- if (unlikely(error))
+ error = -EFAULT;
+ if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry)))
break;
+ base = entry.iov_base;
+ len = entry.iov_len;
+
/*
* Sanity check this iovec. 0 read succeeds.
*/
+ error = 0;
if (unlikely(!len))
break;
error = -EFAULT;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 73402c5eeb8..38eb0b7a1f3 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -894,7 +894,7 @@ magic_found:
goto again;
}
-
+ sbi->s_flags = flags;/*after that line some functions use s_flags*/
ufs_print_super_stuff(sb, usb1, usb2, usb3);
/*
@@ -1025,8 +1025,6 @@ magic_found:
UFS_MOUNT_UFSTYPE_44BSD)
uspi->s_maxsymlinklen =
fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen);
-
- sbi->s_flags = flags;
inode = iget(sb, UFS_ROOTINO);
if (!inode || is_bad_inode(inode))
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index fa25b7dcc6c..d7e13614306 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -52,11 +52,6 @@ typedef struct xfs_buf_log_format_t {
#define XFS_BLI_UDQUOT_BUF 0x4
#define XFS_BLI_PDQUOT_BUF 0x8
#define XFS_BLI_GDQUOT_BUF 0x10
-/*
- * This flag indicates that the buffer contains newly allocated
- * inodes.
- */
-#define XFS_BLI_INODE_NEW_BUF 0x20
#define XFS_BLI_CHUNK 128
#define XFS_BLI_SHIFT 7
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 7174991f4be..8ae6e8e5f3d 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1874,7 +1874,6 @@ xlog_recover_do_inode_buffer(
/*ARGSUSED*/
STATIC void
xlog_recover_do_reg_buffer(
- xfs_mount_t *mp,
xlog_recover_item_t *item,
xfs_buf_t *bp,
xfs_buf_log_format_t *buf_f)
@@ -1885,50 +1884,6 @@ xlog_recover_do_reg_buffer(
unsigned int *data_map = NULL;
unsigned int map_size = 0;
int error;
- int stale_buf = 1;
-
- /*
- * Scan through the on-disk inode buffer and attempt to
- * determine if it has been written to since it was logged.
- *
- * - If any of the magic numbers are incorrect then the buffer is stale
- * - If any of the modes are non-zero then the buffer is not stale
- * - If all of the modes are zero and at least one of the generation
- * counts is non-zero then the buffer is stale
- *
- * If the end result is a stale buffer then the log buffer is replayed
- * otherwise it is skipped.
- *
- * This heuristic is not perfect. It can be improved by scanning the
- * entire inode chunk for evidence that any of the inode clusters have
- * been updated. To fix this problem completely we will need a major
- * architectural change to the logging system.
- */
- if (buf_f->blf_flags & XFS_BLI_INODE_NEW_BUF) {
- xfs_dinode_t *dip;
- int inodes_per_buf;
- int mode_count = 0;
- int gen_count = 0;
-
- stale_buf = 0;
- inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
- for (i = 0; i < inodes_per_buf; i++) {
- dip = (xfs_dinode_t *)xfs_buf_offset(bp,
- i * mp->m_sb.sb_inodesize);
- if (be16_to_cpu(dip->di_core.di_magic) !=
- XFS_DINODE_MAGIC) {
- stale_buf = 1;
- break;
- }
- if (dip->di_core.di_mode)
- mode_count++;
- if (dip->di_core.di_gen)
- gen_count++;
- }
-
- if (!mode_count && gen_count)
- stale_buf = 1;
- }
switch (buf_f->blf_type) {
case XFS_LI_BUF:
@@ -1962,7 +1917,7 @@ xlog_recover_do_reg_buffer(
-1, 0, XFS_QMOPT_DOWARN,
"dquot_buf_recover");
}
- if (!error && stale_buf)
+ if (!error)
memcpy(xfs_buf_offset(bp,
(uint)bit << XFS_BLI_SHIFT), /* dest */
item->ri_buf[i].i_addr, /* source */
@@ -2134,7 +2089,7 @@ xlog_recover_do_dquot_buffer(
if (log->l_quotaoffs_flag & type)
return;
- xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
+ xlog_recover_do_reg_buffer(item, bp, buf_f);
}
/*
@@ -2235,7 +2190,7 @@ xlog_recover_do_buffer_trans(
(XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
} else {
- xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
+ xlog_recover_do_reg_buffer(item, bp, buf_f);
}
if (error)
return XFS_ERROR(error);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 95fff6872a2..60b6b898022 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -966,7 +966,6 @@ xfs_trans_inode_alloc_buf(
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
- bip->bli_format.blf_flags |= XFS_BLI_INODE_NEW_BUF;
}
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 202acb9ff4d..f85f77a538a 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -147,10 +147,6 @@ static inline void unregister_hotplug_dock_device(acpi_handle handle)
/*--------------------------------------------------------------------------
Suspend/Resume
-------------------------------------------------------------------------- */
-#ifdef CONFIG_ACPI_SLEEP
extern int acpi_sleep_init(void);
-#else
-static inline int acpi_sleep_init(void) { return 0; }
-#endif
#endif /*__ACPI_DRIVERS_H__*/
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 609756c6167..d69ba937e09 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -214,11 +214,6 @@ static inline unsigned long get_limit(unsigned long segment)
*/
-/*
- * Actually only lfence would be needed for mb() because all stores done
- * by the kernel should be already ordered. But keep a full barrier for now.
- */
-
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
diff --git a/include/asm-mips/fcntl.h b/include/asm-mips/fcntl.h
index 00a50ec1c19..2a52333a062 100644
--- a/include/asm-mips/fcntl.h
+++ b/include/asm-mips/fcntl.h
@@ -13,6 +13,7 @@
#define O_SYNC 0x0010
#define O_NONBLOCK 0x0080
#define O_CREAT 0x0100 /* not fcntl */
+#define O_TRUNC 0x0200 /* not fcntl */
#define O_EXCL 0x0400 /* not fcntl */
#define O_NOCTTY 0x0800 /* not fcntl */
#define FASYNC 0x1000 /* fcntl, for BSD compatibility */
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index b92dd8c760d..e3301e54d55 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -142,7 +142,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
/*
* __pa()/__va() should be used only during mem init.
*/
-#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64)
+#ifdef CONFIG_64BIT
#define __pa(x) \
({ \
unsigned long __x = (unsigned long)(x); \
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 963051a967d..3ec6e7ff5fb 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -32,15 +32,7 @@
* CPUFREQ NOTIFIER INTERFACE *
*********************************************************************/
-#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
-#else
-static inline int cpufreq_register_notifier(struct notifier_block *nb,
- unsigned int list)
-{
- return 0;
-}
-#endif
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
#define CPUFREQ_TRANSITION_NOTIFIER (0)
@@ -268,22 +260,17 @@ struct freq_attr {
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
int cpufreq_update_policy(unsigned int cpu);
+/* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */
+unsigned int cpufreq_get(unsigned int cpu);
-/*
- * query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it
- */
+/* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */
#ifdef CONFIG_CPU_FREQ
unsigned int cpufreq_quick_get(unsigned int cpu);
-unsigned int cpufreq_get(unsigned int cpu);
#else
static inline unsigned int cpufreq_quick_get(unsigned int cpu)
{
return 0;
}
-static inline unsigned int cpufreq_get(unsigned int cpu)
-{
- return 0;
-}
#endif
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 991c85bb9e3..e8e3a64eb32 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -114,7 +114,6 @@ sctp_state_fn_t sctp_sf_do_4_C;
sctp_state_fn_t sctp_sf_eat_data_6_2;
sctp_state_fn_t sctp_sf_eat_data_fast_4_4;
sctp_state_fn_t sctp_sf_eat_sack_6_2;
-sctp_state_fn_t sctp_sf_tabort_8_4_8;
sctp_state_fn_t sctp_sf_operr_notify;
sctp_state_fn_t sctp_sf_t1_init_timer_expire;
sctp_state_fn_t sctp_sf_t1_cookie_timer_expire;
@@ -247,6 +246,9 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
int, __be16);
struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
union sctp_addr *addr);
+int sctp_verify_asconf(const struct sctp_association *asoc,
+ struct sctp_paramhdr *param_hdr, void *chunk_end,
+ struct sctp_paramhdr **errp);
struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
struct sctp_chunk *asconf);
int sctp_process_asconf_ack(struct sctp_association *asoc,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index c2fe2dcc9af..baff49dfcdb 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -421,6 +421,7 @@ struct sctp_signed_cookie {
* internally.
*/
union sctp_addr_param {
+ struct sctp_paramhdr p;
struct sctp_ipv4addr_param v4;
struct sctp_ipv6addr_param v6;
};
@@ -1156,7 +1157,7 @@ int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
__u8 use_as_src, gfp_t gfp);
int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
- void (*rcu_call)(struct rcu_head *,
+ void fastcall (*rcu_call)(struct rcu_head *,
void (*func)(struct rcu_head *)));
int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,
struct sctp_sock *);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 185c7ecce4c..54053de0bdd 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1059,14 +1059,12 @@ struct tcp_md5sig_key {
};
struct tcp4_md5sig_key {
- u8 *key;
- u16 keylen;
+ struct tcp_md5sig_key base;
__be32 addr;
};
struct tcp6_md5sig_key {
- u8 *key;
- u16 keylen;
+ struct tcp_md5sig_key base;
#if 0
u32 scope_id; /* XXX */
#endif
diff --git a/kernel/futex.c b/kernel/futex.c
index e8935b195e8..fcc94e7b408 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1943,9 +1943,10 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
void exit_robust_list(struct task_struct *curr)
{
struct robust_list_head __user *head = curr->robust_list;
- struct robust_list __user *entry, *pending;
- unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
+ struct robust_list __user *entry, *next_entry, *pending;
+ unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
unsigned long futex_offset;
+ int rc;
/*
* Fetch the list head (which was registered earlier, via
@@ -1965,12 +1966,14 @@ void exit_robust_list(struct task_struct *curr)
if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
return;
- if (pending)
- handle_futex_death((void __user *)pending + futex_offset,
- curr, pip);
-
+ next_entry = NULL; /* avoid warning with gcc */
while (entry != &head->list) {
/*
+ * Fetch the next entry in the list before calling
+ * handle_futex_death:
+ */
+ rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
+ /*
* A pending lock might already be on the list, so
* don't process it twice:
*/
@@ -1978,11 +1981,10 @@ void exit_robust_list(struct task_struct *curr)
if (handle_futex_death((void __user *)entry + futex_offset,
curr, pi))
return;
- /*
- * Fetch the next entry in the list:
- */
- if (fetch_robust_entry(&entry, &entry->next, &pi))
+ if (rc)
return;
+ entry = next_entry;
+ pi = next_pi;
/*
* Avoid excessively long or circular lists:
*/
@@ -1991,6 +1993,10 @@ void exit_robust_list(struct task_struct *curr)
cond_resched();
}
+
+ if (pending)
+ handle_futex_death((void __user *)pending + futex_offset,
+ curr, pip);
}
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 7e52eb051f2..2c2e2954b71 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -38,10 +38,11 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
void compat_exit_robust_list(struct task_struct *curr)
{
struct compat_robust_list_head __user *head = curr->compat_robust_list;
- struct robust_list __user *entry, *pending;
- unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
- compat_uptr_t uentry, upending;
+ struct robust_list __user *entry, *next_entry, *pending;
+ unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
+ compat_uptr_t uentry, next_uentry, upending;
compat_long_t futex_offset;
+ int rc;
/*
* Fetch the list head (which was registered earlier, via
@@ -61,11 +62,16 @@ void compat_exit_robust_list(struct task_struct *curr)
if (fetch_robust_entry(&upending, &pending,
&head->list_op_pending, &pip))
return;
- if (pending)
- handle_futex_death((void __user *)pending + futex_offset, curr, pip);
+ next_entry = NULL; /* avoid warning with gcc */
while (entry != (struct robust_list __user *) &head->list) {
/*
+ * Fetch the next entry in the list before calling
+ * handle_futex_death:
+ */
+ rc = fetch_robust_entry(&next_uentry, &next_entry,
+ (compat_uptr_t __user *)&entry->next, &next_pi);
+ /*
* A pending lock might already be on the list, so
* dont process it twice:
*/
@@ -74,12 +80,11 @@ void compat_exit_robust_list(struct task_struct *curr)
curr, pi))
return;
- /*
- * Fetch the next entry in the list:
- */
- if (fetch_robust_entry(&uentry, &entry,
- (compat_uptr_t __user *)&entry->next, &pi))
+ if (rc)
return;
+ uentry = next_uentry;
+ entry = next_entry;
+ pi = next_pi;
/*
* Avoid excessively long or circular lists:
*/
@@ -88,6 +93,9 @@ void compat_exit_robust_list(struct task_struct *curr)
cond_resched();
}
+ if (pending)
+ handle_futex_death((void __user *)pending + futex_offset,
+ curr, pip);
}
asmlinkage long
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index c8580a1e687..14b0e10dc95 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -110,7 +110,7 @@ config SUSPEND
config HIBERNATION_UP_POSSIBLE
bool
- depends on X86 || PPC64_SWSUSP || FRV || PPC32
+ depends on X86 || PPC64_SWSUSP || PPC32
depends on !SMP
default y
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c9fbe8e73a4..67c67a87146 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -639,6 +639,16 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->block_start = 0;
se->sum_sleep_runtime += delta;
+
+ /*
+ * Blocking time is in units of nanosecs, so shift by 20 to
+ * get a milliseconds-range estimation of the amount of
+ * time that the task spent sleeping:
+ */
+ if (unlikely(prof_on == SLEEP_PROFILING)) {
+ profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
+ delta >> 20);
+ }
}
#endif
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 1b33b05d346..8ae2e636eb1 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -32,6 +32,7 @@
#include <linux/getcpu.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/seccomp.h>
+#include <linux/cpu.h>
#include <linux/compat.h>
#include <linux/syscalls.h>
@@ -878,6 +879,7 @@ void kernel_power_off(void)
kernel_shutdown_prepare(SYSTEM_POWER_OFF);
if (pm_power_off_prepare)
pm_power_off_prepare();
+ disable_nonboot_cpus();
sysdev_shutdown();
printk(KERN_EMERG "Power down.\n");
machine_power_off();
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 50a94eee4d9..495863a500c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -284,7 +284,7 @@ config LOCKDEP
select KALLSYMS_ALL
config LOCK_STAT
- bool "Lock usage statisitics"
+ bool "Lock usage statistics"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select LOCKDEP
select DEBUG_SPINLOCK
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 84c795ee2d6..eab8c428cc9 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -42,7 +42,7 @@ static void clear_huge_page(struct page *page, unsigned long addr)
might_sleep();
for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
cond_resched();
- clear_user_highpage(page + i, addr);
+ clear_user_highpage(page + i, addr + i * PAGE_SIZE);
}
}
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index f2de2e48b02..6284c99b456 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -366,6 +366,12 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
frag = WLAN_GET_SEQ_FRAG(sc);
hdrlen = ieee80211_get_hdrlen(fc);
+ if (skb->len < hdrlen) {
+ printk(KERN_INFO "%s: invalid SKB length %d\n",
+ dev->name, skb->len);
+ goto rx_dropped;
+ }
+
/* Put this code here so that we avoid duplicating it in all
* Rx paths. - Jean II */
#ifdef CONFIG_WIRELESS_EXT
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index afb6c6698b2..e475f2e1be1 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -273,8 +273,6 @@ ieee80211softmac_assoc_work(struct work_struct *work)
ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL);
if (ieee80211softmac_start_scan(mac)) {
dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n");
- mac->associnfo.associating = 0;
- mac->associnfo.associated = 0;
}
goto out;
} else {
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index d054e9224b3..442b9875f3f 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -70,44 +70,30 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
char *extra)
{
struct ieee80211softmac_device *sm = ieee80211_priv(net_dev);
- struct ieee80211softmac_network *n;
struct ieee80211softmac_auth_queue_item *authptr;
int length = 0;
check_assoc_again:
mutex_lock(&sm->associnfo.mutex);
- /* Check if we're already associating to this or another network
- * If it's another network, cancel and start over with our new network
- * If it's our network, ignore the change, we're already doing it!
- */
if((sm->associnfo.associating || sm->associnfo.associated) &&
(data->essid.flags && data->essid.length)) {
- /* Get the associating network */
- n = ieee80211softmac_get_network_by_bssid(sm, sm->associnfo.bssid);
- if(n && n->essid.len == data->essid.length &&
- !memcmp(n->essid.data, extra, n->essid.len)) {
- dprintk(KERN_INFO PFX "Already associating or associated to "MAC_FMT"\n",
- MAC_ARG(sm->associnfo.bssid));
- goto out;
- } else {
- dprintk(KERN_INFO PFX "Canceling existing associate request!\n");
- /* Cancel assoc work */
- cancel_delayed_work(&sm->associnfo.work);
- /* We don't have to do this, but it's a little cleaner */
- list_for_each_entry(authptr, &sm->auth_queue, list)
- cancel_delayed_work(&authptr->work);
- sm->associnfo.bssvalid = 0;
- sm->associnfo.bssfixed = 0;
- sm->associnfo.associating = 0;
- sm->associnfo.associated = 0;
- /* We must unlock to avoid deadlocks with the assoc workqueue
- * on the associnfo.mutex */
- mutex_unlock(&sm->associnfo.mutex);
- flush_scheduled_work();
- /* Avoid race! Check assoc status again. Maybe someone started an
- * association while we flushed. */
- goto check_assoc_again;
- }
+ dprintk(KERN_INFO PFX "Canceling existing associate request!\n");
+ /* Cancel assoc work */
+ cancel_delayed_work(&sm->associnfo.work);
+ /* We don't have to do this, but it's a little cleaner */
+ list_for_each_entry(authptr, &sm->auth_queue, list)
+ cancel_delayed_work(&authptr->work);
+ sm->associnfo.bssvalid = 0;
+ sm->associnfo.bssfixed = 0;
+ sm->associnfo.associating = 0;
+ sm->associnfo.associated = 0;
+ /* We must unlock to avoid deadlocks with the assoc workqueue
+ * on the associnfo.mutex */
+ mutex_unlock(&sm->associnfo.mutex);
+ flush_scheduled_work();
+ /* Avoid race! Check assoc status again. Maybe someone started an
+ * association while we flushed. */
+ goto check_assoc_again;
}
sm->associnfo.static_essid = 0;
@@ -153,13 +139,13 @@ ieee80211softmac_wx_get_essid(struct net_device *net_dev,
data->essid.length = sm->associnfo.req_essid.len;
data->essid.flags = 1; /* active */
memcpy(extra, sm->associnfo.req_essid.data, sm->associnfo.req_essid.len);
- }
-
+ dprintk(KERN_INFO PFX "Getting essid from req_essid\n");
+ } else if (sm->associnfo.associated || sm->associnfo.associating) {
/* If we're associating/associated, return that */
- if (sm->associnfo.associated || sm->associnfo.associating) {
data->essid.length = sm->associnfo.associate_essid.len;
data->essid.flags = 1; /* active */
memcpy(extra, sm->associnfo.associate_essid.data, sm->associnfo.associate_essid.len);
+ dprintk(KERN_INFO PFX "Getting essid from associate_essid\n");
}
mutex_unlock(&sm->associnfo.mutex);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9c94627c8c7..e089a978e12 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -833,8 +833,7 @@ static struct tcp_md5sig_key *
return NULL;
for (i = 0; i < tp->md5sig_info->entries4; i++) {
if (tp->md5sig_info->keys4[i].addr == addr)
- return (struct tcp_md5sig_key *)
- &tp->md5sig_info->keys4[i];
+ return &tp->md5sig_info->keys4[i].base;
}
return NULL;
}
@@ -865,9 +864,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr);
if (key) {
/* Pre-existing entry - just update that one. */
- kfree(key->key);
- key->key = newkey;
- key->keylen = newkeylen;
+ kfree(key->base.key);
+ key->base.key = newkey;
+ key->base.keylen = newkeylen;
} else {
struct tcp_md5sig_info *md5sig;
@@ -906,9 +905,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
md5sig->alloced4++;
}
md5sig->entries4++;
- md5sig->keys4[md5sig->entries4 - 1].addr = addr;
- md5sig->keys4[md5sig->entries4 - 1].key = newkey;
- md5sig->keys4[md5sig->entries4 - 1].keylen = newkeylen;
+ md5sig->keys4[md5sig->entries4 - 1].addr = addr;
+ md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
+ md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
}
return 0;
}
@@ -930,7 +929,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
for (i = 0; i < tp->md5sig_info->entries4; i++) {
if (tp->md5sig_info->keys4[i].addr == addr) {
/* Free the key */
- kfree(tp->md5sig_info->keys4[i].key);
+ kfree(tp->md5sig_info->keys4[i].base.key);
tp->md5sig_info->entries4--;
if (tp->md5sig_info->entries4 == 0) {
@@ -964,7 +963,7 @@ static void tcp_v4_clear_md5_list(struct sock *sk)
if (tp->md5sig_info->entries4) {
int i;
for (i = 0; i < tp->md5sig_info->entries4; i++)
- kfree(tp->md5sig_info->keys4[i].key);
+ kfree(tp->md5sig_info->keys4[i].base.key);
tp->md5sig_info->entries4 = 0;
tcp_free_md5sig_pool();
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 0f7defb482e..3e06799b37a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -539,7 +539,7 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
for (i = 0; i < tp->md5sig_info->entries6; i++) {
if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
- return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i];
+ return &tp->md5sig_info->keys6[i].base;
}
return NULL;
}
@@ -567,9 +567,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
if (key) {
/* modify existing entry - just update that one */
- kfree(key->key);
- key->key = newkey;
- key->keylen = newkeylen;
+ kfree(key->base.key);
+ key->base.key = newkey;
+ key->base.keylen = newkeylen;
} else {
/* reallocate new list if current one is full. */
if (!tp->md5sig_info) {
@@ -603,8 +603,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
peer);
- tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey;
- tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen;
+ tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
+ tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
tp->md5sig_info->entries6++;
}
@@ -626,7 +626,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
for (i = 0; i < tp->md5sig_info->entries6; i++) {
if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
/* Free the key */
- kfree(tp->md5sig_info->keys6[i].key);
+ kfree(tp->md5sig_info->keys6[i].base.key);
tp->md5sig_info->entries6--;
if (tp->md5sig_info->entries6 == 0) {
@@ -657,7 +657,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk)
if (tp->md5sig_info->entries6) {
for (i = 0; i < tp->md5sig_info->entries6; i++)
- kfree(tp->md5sig_info->keys6[i].key);
+ kfree(tp->md5sig_info->keys6[i].base.key);
tp->md5sig_info->entries6 = 0;
tcp_free_md5sig_pool();
}
@@ -668,7 +668,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk)
if (tp->md5sig_info->entries4) {
for (i = 0; i < tp->md5sig_info->entries4; i++)
- kfree(tp->md5sig_info->keys4[i].key);
+ kfree(tp->md5sig_info->keys4[i].base.key);
tp->md5sig_info->entries4 = 0;
tcp_free_md5sig_pool();
}
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c
index 7286c389a4d..ff2172ffd86 100644
--- a/net/mac80211/ieee80211.c
+++ b/net/mac80211/ieee80211.c
@@ -5259,7 +5259,7 @@ static void __exit ieee80211_exit(void)
}
-module_init(ieee80211_init);
+subsys_initcall(ieee80211_init);
module_exit(ieee80211_exit);
MODULE_DESCRIPTION("IEEE 802.11 subsystem");
diff --git a/net/mac80211/rc80211_simple.c b/net/mac80211/rc80211_simple.c
index f6780d63b34..17b9f46bbf2 100644
--- a/net/mac80211/rc80211_simple.c
+++ b/net/mac80211/rc80211_simple.c
@@ -431,7 +431,7 @@ static void __exit rate_control_simple_exit(void)
}
-module_init(rate_control_simple_init);
+subsys_initcall(rate_control_simple_init);
module_exit(rate_control_simple_exit);
MODULE_DESCRIPTION("Simple rate control algorithm for ieee80211");
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 89ce8152969..7ab82b376e1 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -424,7 +424,7 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct rtattr *opt)
skb_queue_head_init(&q->requeued[i]);
q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
qd->handle);
- if (q->queues[i] == 0) {
+ if (!q->queues[i]) {
q->queues[i] = &noop_qdisc;
printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i);
}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 3a23e30bc79..b542c875e15 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/ipv6.h>
#include <linux/skbuff.h>
+#include <linux/jhash.h>
#include <net/ip.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
@@ -95,7 +96,7 @@ struct sfq_sched_data
/* Variables */
struct timer_list perturb_timer;
- int perturbation;
+ u32 perturbation;
sfq_index tail; /* Index of current slot in round */
sfq_index max_depth; /* Maximal depth */
@@ -109,12 +110,7 @@ struct sfq_sched_data
static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
{
- int pert = q->perturbation;
-
- /* Have we any rotation primitives? If not, WHY? */
- h ^= (h1<<pert) ^ (h1>>(0x1F - pert));
- h ^= h>>10;
- return h & 0x3FF;
+ return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
}
static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
@@ -256,6 +252,13 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
q->ht[hash] = x = q->dep[SFQ_DEPTH].next;
q->hash[x] = hash;
}
+ /* If selected queue has length q->limit, this means that
+ * all another queues are empty and that we do simple tail drop,
+ * i.e. drop _this_ packet.
+ */
+ if (q->qs[x].qlen >= q->limit)
+ return qdisc_drop(skb, sch);
+
sch->qstats.backlog += skb->len;
__skb_queue_tail(&q->qs[x], skb);
sfq_inc(q, x);
@@ -294,6 +297,19 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
}
sch->qstats.backlog += skb->len;
__skb_queue_head(&q->qs[x], skb);
+ /* If selected queue has length q->limit+1, this means that
+ * all another queues are empty and we do simple tail drop.
+ * This packet is still requeued at head of queue, tail packet
+ * is dropped.
+ */
+ if (q->qs[x].qlen > q->limit) {
+ skb = q->qs[x].prev;
+ __skb_unlink(skb, &q->qs[x]);
+ sch->qstats.drops++;
+ sch->qstats.backlog -= skb->len;
+ kfree_skb(skb);
+ return NET_XMIT_CN;
+ }
sfq_inc(q, x);
if (q->qs[x].qlen == 1) { /* The flow is new */
if (q->tail == SFQ_DEPTH) { /* It is the first flow */
@@ -370,12 +386,10 @@ static void sfq_perturbation(unsigned long arg)
struct Qdisc *sch = (struct Qdisc*)arg;
struct sfq_sched_data *q = qdisc_priv(sch);
- q->perturbation = net_random()&0x1F;
+ get_random_bytes(&q->perturbation, 4);
- if (q->perturb_period) {
- q->perturb_timer.expires = jiffies + q->perturb_period;
- add_timer(&q->perturb_timer);
- }
+ if (q->perturb_period)
+ mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
}
static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
@@ -391,7 +405,7 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
q->perturb_period = ctl->perturb_period*HZ;
if (ctl->limit)
- q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 2);
+ q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
qlen = sch->q.qlen;
while (sch->q.qlen > q->limit)
@@ -400,8 +414,8 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
del_timer(&q->perturb_timer);
if (q->perturb_period) {
- q->perturb_timer.expires = jiffies + q->perturb_period;
- add_timer(&q->perturb_timer);
+ mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
+ get_random_bytes(&q->perturbation, 4);
}
sch_tree_unlock(sch);
return 0;
@@ -423,12 +437,13 @@ static int sfq_init(struct Qdisc *sch, struct rtattr *opt)
q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH;
q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH;
}
- q->limit = SFQ_DEPTH - 2;
+ q->limit = SFQ_DEPTH - 1;
q->max_depth = 0;
q->tail = SFQ_DEPTH;
if (opt == NULL) {
q->quantum = psched_mtu(sch->dev);
q->perturb_period = 0;
+ get_random_bytes(&q->perturbation, 4);
} else {
int err = sfq_change(sch, opt);
if (err)
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index d35cbf5aae3..dfffa94fb9f 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -181,7 +181,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
* structure.
*/
int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr,
- void (*rcu_call)(struct rcu_head *head,
+ void fastcall (*rcu_call)(struct rcu_head *head,
void (*func)(struct rcu_head *head)))
{
struct sctp_sockaddr_entry *addr, *temp;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 47e56017f4c..f9a0c9276e3 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -622,6 +622,14 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type)
goto discard;
+ /* RFC 4460, 2.11.2
+ * This will discard packets with INIT chunk bundled as
+ * subsequent chunks in the packet. When INIT is first,
+ * the normal INIT processing will discard the chunk.
+ */
+ if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
+ goto discard;
+
/* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
* or a COOKIE ACK the SCTP Packet should be silently
* discarded.
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 88aa2240754..e4ea7fdf36e 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -130,6 +130,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
/* Force chunk->skb->data to chunk->chunk_end. */
skb_pull(chunk->skb,
chunk->chunk_end - chunk->skb->data);
+
+ /* Verify that we have at least chunk headers
+ * worth of buffer left.
+ */
+ if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
+ sctp_chunk_free(chunk);
+ chunk = queue->in_progress = NULL;
+ }
}
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 2e34220d94c..23ae37ec871 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2499,6 +2499,52 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
return SCTP_ERROR_NO_ERROR;
}
+/* Verify the ASCONF packet before we process it. */
+int sctp_verify_asconf(const struct sctp_association *asoc,
+ struct sctp_paramhdr *param_hdr, void *chunk_end,
+ struct sctp_paramhdr **errp) {
+ sctp_addip_param_t *asconf_param;
+ union sctp_params param;
+ int length, plen;
+
+ param.v = (sctp_paramhdr_t *) param_hdr;
+ while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
+ length = ntohs(param.p->length);
+ *errp = param.p;
+
+ if (param.v > chunk_end - length ||
+ length < sizeof(sctp_paramhdr_t))
+ return 0;
+
+ switch (param.p->type) {
+ case SCTP_PARAM_ADD_IP:
+ case SCTP_PARAM_DEL_IP:
+ case SCTP_PARAM_SET_PRIMARY:
+ asconf_param = (sctp_addip_param_t *)param.v;
+ plen = ntohs(asconf_param->param_hdr.length);
+ if (plen < sizeof(sctp_addip_param_t) +
+ sizeof(sctp_paramhdr_t))
+ return 0;
+ break;
+ case SCTP_PARAM_SUCCESS_REPORT:
+ case SCTP_PARAM_ADAPTATION_LAYER_IND:
+ if (length != sizeof(sctp_addip_param_t))
+ return 0;
+
+ break;
+ default:
+ break;
+ }
+
+ param.v += WORD_ROUND(length);
+ }
+
+ if (param.v != chunk_end)
+ return 0;
+
+ return 1;
+}
+
/* Process an incoming ASCONF chunk with the next expected serial no. and
* return an ASCONF_ACK chunk to be sent in response.
*/
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 177528ed3e1..a583d67cab6 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -90,6 +90,11 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
+static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ void *arg,
+ sctp_cmd_seq_t *commands);
static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
@@ -98,6 +103,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
struct sctp_transport *transport);
static sctp_disposition_t sctp_sf_abort_violation(
+ const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
void *arg,
sctp_cmd_seq_t *commands,
@@ -111,6 +117,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
void *arg,
sctp_cmd_seq_t *commands);
+static sctp_disposition_t sctp_sf_violation_paramlen(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ void *arg,
+ sctp_cmd_seq_t *commands);
+
static sctp_disposition_t sctp_sf_violation_ctsn(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
@@ -118,6 +131,13 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
void *arg,
sctp_cmd_seq_t *commands);
+static sctp_disposition_t sctp_sf_violation_chunk(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ void *arg,
+ sctp_cmd_seq_t *commands);
+
/* Small helper function that checks if the chunk length
* is of the appropriate length. The 'required_length' argument
* is set to be the size of a specific chunk we are testing.
@@ -181,16 +201,21 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep,
struct sctp_chunk *chunk = arg;
struct sctp_ulpevent *ev;
+ if (!sctp_vtag_verify_either(chunk, asoc))
+ return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+
/* RFC 2960 6.10 Bundling
*
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*/
if (!chunk->singleton)
- return SCTP_DISPOSITION_VIOLATION;
+ return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
- if (!sctp_vtag_verify_either(chunk, asoc))
- return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+ /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
/* RFC 2960 10.2 SCTP-to-ULP
*
@@ -450,17 +475,17 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
- /* Make sure that the INIT-ACK chunk has a valid length */
- if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
- return sctp_sf_violation_chunklen(ep, asoc, type, arg,
- commands);
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*/
if (!chunk->singleton)
- return SCTP_DISPOSITION_VIOLATION;
+ return sctp_sf_violation_chunk(ep, asoc, type, arg, commands);
+ /* Make sure that the INIT-ACK chunk has a valid length */
+ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
/* Grab the INIT header. */
chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
@@ -585,7 +610,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
* control endpoint, respond with an ABORT.
*/
if (ep == sctp_sk((sctp_get_ctl_sock()))->ep)
- return sctp_sf_ootb(ep, asoc, type, arg, commands);
+ return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
/* Make sure that the COOKIE_ECHO chunk has a valid length.
* In this case, we check that we have enough for at least a
@@ -2496,6 +2521,11 @@ sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep,
struct sctp_chunk *chunk = (struct sctp_chunk *) arg;
struct sctp_chunk *reply;
+ /* Make sure that the chunk has a valid length */
+ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
+
/* Since we are not going to really process this INIT, there
* is no point in verifying chunk boundries. Just generate
* the SHUTDOWN ACK.
@@ -2929,7 +2959,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
*
* The return value is the disposition of the chunk.
*/
-sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
@@ -2965,6 +2995,7 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ sctp_sf_pdiscard(ep, asoc, type, arg, commands);
return SCTP_DISPOSITION_CONSUME;
}
@@ -3125,14 +3156,14 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
do {
- /* Break out if chunk length is less then minimal. */
+ /* Report violation if the chunk is less then minimal */
if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
- break;
-
- ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
- if (ch_end > skb_tail_pointer(skb))
- break;
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
+ /* Now that we know we at least have a chunk header,
+ * do things that are type appropriate.
+ */
if (SCTP_CID_SHUTDOWN_ACK == ch->type)
ootb_shut_ack = 1;
@@ -3144,15 +3175,19 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
if (SCTP_CID_ABORT == ch->type)
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+ /* Report violation if chunk len overflows */
+ ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+ if (ch_end > skb_tail_pointer(skb))
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
+
ch = (sctp_chunkhdr_t *) ch_end;
} while (ch_end < skb_tail_pointer(skb));
if (ootb_shut_ack)
- sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
+ return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
else
- sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
-
- return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+ return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
}
/*
@@ -3218,7 +3253,11 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
- return SCTP_DISPOSITION_CONSUME;
+ /* We need to discard the rest of the packet to prevent
+ * potential bomming attacks from additional bundled chunks.
+ * This is documented in SCTP Threats ID.
+ */
+ return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
}
return SCTP_DISPOSITION_NOMEM;
@@ -3241,6 +3280,13 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep,
void *arg,
sctp_cmd_seq_t *commands)
{
+ struct sctp_chunk *chunk = arg;
+
+ /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
+
/* Although we do have an association in this case, it corresponds
* to a restarted association. So the packet is treated as an OOTB
* packet and the state function that handles OOTB SHUTDOWN_ACK is
@@ -3257,8 +3303,11 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
{
struct sctp_chunk *chunk = arg;
struct sctp_chunk *asconf_ack = NULL;
+ struct sctp_paramhdr *err_param = NULL;
sctp_addiphdr_t *hdr;
+ union sctp_addr_param *addr_param;
__u32 serial;
+ int length;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
@@ -3274,6 +3323,20 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
hdr = (sctp_addiphdr_t *)chunk->skb->data;
serial = ntohl(hdr->serial);
+ addr_param = (union sctp_addr_param *)hdr->params;
+ length = ntohs(addr_param->p.length);
+ if (length < sizeof(sctp_paramhdr_t))
+ return sctp_sf_violation_paramlen(ep, asoc, type,
+ (void *)addr_param, commands);
+
+ /* Verify the ASCONF chunk before processing it. */
+ if (!sctp_verify_asconf(asoc,
+ (sctp_paramhdr_t *)((void *)addr_param + length),
+ (void *)chunk->chunk_end,
+ &err_param))
+ return sctp_sf_violation_paramlen(ep, asoc, type,
+ (void *)&err_param, commands);
+
/* ADDIP 4.2 C1) Compare the value of the serial number to the value
* the endpoint stored in a new association variable
* 'Peer-Serial-Number'.
@@ -3328,6 +3391,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
struct sctp_chunk *asconf_ack = arg;
struct sctp_chunk *last_asconf = asoc->addip_last_asconf;
struct sctp_chunk *abort;
+ struct sctp_paramhdr *err_param = NULL;
sctp_addiphdr_t *addip_hdr;
__u32 sent_serial, rcvd_serial;
@@ -3345,6 +3409,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data;
rcvd_serial = ntohl(addip_hdr->serial);
+ /* Verify the ASCONF-ACK chunk before processing it. */
+ if (!sctp_verify_asconf(asoc,
+ (sctp_paramhdr_t *)addip_hdr->params,
+ (void *)asconf_ack->chunk_end,
+ &err_param))
+ return sctp_sf_violation_paramlen(ep, asoc, type,
+ (void *)&err_param, commands);
+
if (last_asconf) {
addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr;
sent_serial = ntohl(addip_hdr->serial);
@@ -3655,6 +3727,16 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
void *arg,
sctp_cmd_seq_t *commands)
{
+ struct sctp_chunk *chunk = arg;
+
+ /* Make sure that the chunk has a valid length.
+ * Since we don't know the chunk type, we use a general
+ * chunkhdr structure to make a comparison.
+ */
+ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
+
SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk);
return SCTP_DISPOSITION_DISCARD;
}
@@ -3710,6 +3792,13 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
void *arg,
sctp_cmd_seq_t *commands)
{
+ struct sctp_chunk *chunk = arg;
+
+ /* Make sure that the chunk has a valid length. */
+ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
+
return SCTP_DISPOSITION_VIOLATION;
}
@@ -3717,12 +3806,14 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
* Common function to handle a protocol violation.
*/
static sctp_disposition_t sctp_sf_abort_violation(
+ const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
void *arg,
sctp_cmd_seq_t *commands,
const __u8 *payload,
const size_t paylen)
{
+ struct sctp_packet *packet = NULL;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *abort = NULL;
@@ -3731,30 +3822,51 @@ static sctp_disposition_t sctp_sf_abort_violation(
if (!abort)
goto nomem;
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
- SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
+ if (asoc) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+ SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
- if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
- sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
- SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
- sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
- SCTP_ERROR(ECONNREFUSED));
- sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
- SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
+ if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNREFUSED));
+ sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
+ SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
+ } else {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
+ SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ }
} else {
- sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
- SCTP_ERROR(ECONNABORTED));
- sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
- SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
- SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
+ packet = sctp_ootb_pkt_new(asoc, chunk);
+
+ if (!packet)
+ goto nomem_pkt;
+
+ if (sctp_test_T_bit(abort))
+ packet->vtag = ntohl(chunk->sctp_hdr->vtag);
+
+ abort->skb->sk = ep->base.sk;
+
+ sctp_packet_append_chunk(packet, abort);
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
+ SCTP_PACKET(packet));
+
+ SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
}
- sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
+ sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
return SCTP_DISPOSITION_ABORT;
+nomem_pkt:
+ sctp_chunk_free(abort);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
@@ -3787,7 +3899,24 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
{
char err_str[]="The following chunk had invalid length:";
- return sctp_sf_abort_violation(asoc, arg, commands, err_str,
+ return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+ sizeof(err_str));
+}
+
+/*
+ * Handle a protocol violation when the parameter length is invalid.
+ * "Invalid" length is identified as smaller then the minimal length a
+ * given parameter can be.
+ */
+static sctp_disposition_t sctp_sf_violation_paramlen(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ void *arg,
+ sctp_cmd_seq_t *commands) {
+ char err_str[] = "The following parameter had invalid length:";
+
+ return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
@@ -3806,10 +3935,31 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
{
char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
- return sctp_sf_abort_violation(asoc, arg, commands, err_str,
+ return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
+/* Handle protocol violation of an invalid chunk bundling. For example,
+ * when we have an association and we recieve bundled INIT-ACK, or
+ * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle"
+ * statement from the specs. Additinally, there might be an attacker
+ * on the path and we may not want to continue this communication.
+ */
+static sctp_disposition_t sctp_sf_violation_chunk(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ void *arg,
+ sctp_cmd_seq_t *commands)
+{
+ char err_str[]="The following chunk violates protocol:";
+
+ if (!asoc)
+ return sctp_sf_violation(ep, asoc, type, arg, commands);
+
+ return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str,
+ sizeof(err_str));
+}
/***************************************************************************
* These are the state functions for handling primitive (Section 10) events.
***************************************************************************/
@@ -5176,7 +5326,22 @@ static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc
* association exists, otherwise, use the peer's vtag.
*/
if (asoc) {
- vtag = asoc->peer.i.init_tag;
+ /* Special case the INIT-ACK as there is no peer's vtag
+ * yet.
+ */
+ switch(chunk->chunk_hdr->type) {
+ case SCTP_CID_INIT_ACK:
+ {
+ sctp_initack_chunk_t *initack;
+
+ initack = (sctp_initack_chunk_t *)chunk->chunk_hdr;
+ vtag = ntohl(initack->init_hdr.init_tag);
+ break;
+ }
+ default:
+ vtag = asoc->peer.i.init_tag;
+ break;
+ }
} else {
/* Special case the INIT and stale COOKIE_ECHO as there is no
* vtag yet.
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 70a91ece3c4..ddb0ba3974b 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -110,7 +110,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
/* SCTP_STATE_EMPTY */ \
TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_CLOSED */ \
- TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_COOKIE_WAIT */ \
TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
/* SCTP_STATE_COOKIE_ECHOED */ \
@@ -173,7 +173,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
/* SCTP_STATE_EMPTY */ \
TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_CLOSED */ \
- TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_COOKIE_WAIT */ \
TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
/* SCTP_STATE_COOKIE_ECHOED */ \
@@ -194,7 +194,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
/* SCTP_STATE_EMPTY */ \
TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_CLOSED */ \
- TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_COOKIE_WAIT */ \
TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
/* SCTP_STATE_COOKIE_ECHOED */ \
@@ -216,7 +216,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
/* SCTP_STATE_EMPTY */ \
TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_CLOSED */ \
- TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_COOKIE_WAIT */ \
TYPE_SCTP_FUNC(sctp_sf_violation), \
/* SCTP_STATE_COOKIE_ECHOED */ \
@@ -258,7 +258,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
/* SCTP_STATE_EMPTY */ \
TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_CLOSED */ \
- TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_COOKIE_WAIT */ \
TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
/* SCTP_STATE_COOKIE_ECHOED */ \
@@ -300,7 +300,7 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
/* SCTP_STATE_EMPTY */ \
TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_CLOSED */ \
- TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_COOKIE_WAIT */ \
TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
/* SCTP_STATE_COOKIE_ECHOED */ \
@@ -499,7 +499,7 @@ static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_
/* SCTP_STATE_EMPTY */ \
TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_CLOSED */ \
- TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \
+ TYPE_SCTP_FUNC(sctp_sf_ootb), \
/* SCTP_STATE_COOKIE_WAIT */ \
TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
/* SCTP_STATE_COOKIE_ECHOED */ \
@@ -528,7 +528,7 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
/* SCTP_STATE_EMPTY */
TYPE_SCTP_FUNC(sctp_sf_ootb),
/* SCTP_STATE_CLOSED */
- TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8),
+ TYPE_SCTP_FUNC(sctp_sf_ootb),
/* SCTP_STATE_COOKIE_WAIT */
TYPE_SCTP_FUNC(sctp_sf_unk_chunk),
/* SCTP_STATE_COOKIE_ECHOED */
diff --git a/net/socket.c b/net/socket.c
index 7d44453dfae..b09eb9036a1 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -777,9 +777,6 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (pos != 0)
return -ESPIPE;
- if (iocb->ki_left == 0) /* Match SYS5 behaviour */
- return 0;
-
x = alloc_sock_iocb(iocb, &siocb);
if (!x)
return -ENOMEM;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 7eabd55417a..9771451eae2 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -213,7 +213,7 @@ out_fail_notifier:
out_fail_sysfs:
return err;
}
-module_init(cfg80211_init);
+subsys_initcall(cfg80211_init);
static void cfg80211_exit(void)
{
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 88aaacd9f82..2d5d2255a27 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -52,12 +52,14 @@ static void wiphy_dev_release(struct device *dev)
cfg80211_dev_free(rdev);
}
+#ifdef CONFIG_HOTPLUG
static int wiphy_uevent(struct device *dev, char **envp,
int num_envp, char *buf, int size)
{
/* TODO, we probably need stuff here */
return 0;
}
+#endif
struct class ieee80211_class = {
.name = "ieee80211",