diff options
69 files changed, 583 insertions, 301 deletions
diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4 index fa0c786a8bf..cf6b6cb02aa 100644 --- a/Documentation/i2c/busses/i2c-piix4 +++ b/Documentation/i2c/busses/i2c-piix4 @@ -6,7 +6,7 @@ Supported adapters: Datasheet: Publicly available at the Intel website * ServerWorks OSB4, CSB5, CSB6 and HT-1000 southbridges Datasheet: Only available via NDA from ServerWorks - * ATI IXP200, IXP300, IXP400, SB600 and SB700 southbridges + * ATI IXP200, IXP300, IXP400, SB600, SB700 and SB800 southbridges Datasheet: Not publicly available * Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge Datasheet: Publicly available at the SMSC website http://www.smsc.com diff --git a/MAINTAINERS b/MAINTAINERS index 48ca8b47150..9c54a5ef0ba 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -44,9 +44,10 @@ trivial patch so apply some common sense. or does something very odd once a month document it. PLEASE remember that submissions must be made under the terms - of the OSDL certificate of contribution - (http://www.osdl.org/newsroom/press_releases/2004/2004_05_24_dco.html) - and should include a Signed-off-by: line. + of the OSDL certificate of contribution and should include a + Signed-off-by: line. The current version of this "Developer's + Certificate of Origin" (DCO) is listed in the file + Documentation/SubmittingPatches. 6. Make sure you have the right to send any changes you make. If you do changes at work you may find your employer owns the patch @@ -2057,12 +2058,18 @@ L: http://lists.sourceforge.net/mailman/listinfo/ipw2100-devel W: http://ipw2200.sourceforge.net S: Supported -IOC3 DRIVER +IOC3 ETHERNET DRIVER P: Ralf Baechle M: ralf@linux-mips.org L: linux-mips@linux-mips.org S: Maintained +IOC3 SERIAL DRIVER +P: Pat Gefre +M: pfg@sgi.com +L: linux-kernel@linux-mips.org +S: Maintained + IP MASQUERADING: P: Juanjo Ciarlante M: jjciarla@raiz.uncu.edu.ar @@ -2594,6 +2601,19 @@ M: shemminger@linux-foundation.org L: netem@lists.linux-foundation.org S: Maintained +NETERION (S2IO) Xframe 10GbE DRIVER +P: Ramkrishna Vepa +M: ram.vepa@neterion.com +P: Rastapur Santosh +M: santosh.rastapur@neterion.com +P: Sivakumar Subramani +M: sivakumar.subramani@neterion.com +P: Sreenivasa Honnur +M: sreenivasa.honnur@neterion.com +L: netdev@vger.kernel.org +W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/TitleIndex?anonymous +S: Supported + NETFILTER/IPTABLES/IPCHAINS P: Rusty Russell P: Marc Boucher @@ -2734,19 +2754,6 @@ M: adaplas@gmail.com L: linux-fbdev-devel@lists.sourceforge.net (subscribers-only) S: Maintained -NETERION (S2IO) Xframe 10GbE DRIVER -P: Ramkrishna Vepa -M: ram.vepa@neterion.com -P: Rastapur Santosh -M: santosh.rastapur@neterion.com -P: Sivakumar Subramani -M: sivakumar.subramani@neterion.com -P: Sreenivasa Honnur -M: sreenivasa.honnur@neterion.com -L: netdev@vger.kernel.org -W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/TitleIndex?anonymous -S: Supported - OPENCORES I2C BUS DRIVER P: Peter Korsgaard M: jacmet@sunsite.dk diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c index efdf95ac803..6c06d9c0488 100644 --- a/arch/i386/mm/hugetlbpage.c +++ b/arch/i386/mm/hugetlbpage.c @@ -367,7 +367,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, return -ENOMEM; if (flags & MAP_FIXED) { - if (prepare_hugepage_range(addr, len, pgoff)) + if (prepare_hugepage_range(addr, len)) return -EINVAL; return addr; } diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index d22861c5b04..a9ff685aea2 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -75,10 +75,8 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) * Don't actually need to do any preparation, but need to make sure * the address is in the right region. */ -int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff) +int prepare_hugepage_range(unsigned long addr, unsigned long len) { - if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT)) - return -EINVAL; if (len & ~HPAGE_MASK) return -EINVAL; if (addr & ~HPAGE_MASK) @@ -151,7 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u /* Handle MAP_FIXED */ if (flags & MAP_FIXED) { - if (prepare_hugepage_range(addr, len, pgoff)) + if (prepare_hugepage_range(addr, len)) return -EINVAL; return addr; } diff --git a/arch/mips/tx4938/toshiba_rbtx4938/setup.c b/arch/mips/tx4938/toshiba_rbtx4938/setup.c index 84ebff711e6..f236b1ff892 100644 --- a/arch/mips/tx4938/toshiba_rbtx4938/setup.c +++ b/arch/mips/tx4938/toshiba_rbtx4938/setup.c @@ -1108,7 +1108,7 @@ static void __init txx9_spi_init(unsigned long base, int irq) .flags = IORESOURCE_IRQ, }, }; - platform_device_register_simple("txx9spi", 0, + platform_device_register_simple("spi_txx9", 0, res, ARRAY_SIZE(res)); } diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 384abf410cf..23956096b3b 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -217,8 +217,27 @@ struct irq_handler_data { void (*pre_handler)(unsigned int, void *, void *); void *pre_handler_arg1; void *pre_handler_arg2; + + u32 msi; }; +void sparc64_set_msi(unsigned int virt_irq, u32 msi) +{ + struct irq_handler_data *data = get_irq_chip_data(virt_irq); + + if (data) + data->msi = msi; +} + +u32 sparc64_get_msi(unsigned int virt_irq) +{ + struct irq_handler_data *data = get_irq_chip_data(virt_irq); + + if (data) + return data->msi; + return 0xffffffff; +} + static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq) { unsigned int real_irq = virt_to_real_irq(virt_irq); @@ -308,7 +327,7 @@ static void sun4u_irq_disable(unsigned int virt_irq) if (likely(data)) { unsigned long imap = data->imap; - u32 tmp = upa_readq(imap); + unsigned long tmp = upa_readq(imap); tmp &= ~IMAP_VALID; upa_writeq(tmp, imap); @@ -741,7 +760,7 @@ unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p, break; } if (devino >= msi_end) - return 0; + return -ENOSPC; sysino = sun4v_devino_to_sysino(devhandle, devino); bucket = &ivector_table[sysino]; @@ -755,8 +774,8 @@ unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p, data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); if (unlikely(!data)) { - prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); - prom_halt(); + virt_irq_free(*virt_irq_p); + return -ENOMEM; } set_irq_chip_data(bucket->virt_irq, data); diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c index 3d93e9203ba..139b4cff801 100644 --- a/arch/sparc64/kernel/pci.c +++ b/arch/sparc64/kernel/pci.c @@ -393,7 +393,6 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, sd->host_controller = pbm; sd->prom_node = node; sd->op = of_find_device_by_node(node); - sd->msi_num = 0xffffffff; sd = &sd->op->dev.archdata; sd->iommu = pbm->iommu; diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 466f4aa8fc8..da724b13e89 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -940,13 +940,13 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p, if (msi_num < 0) return msi_num; - devino = sun4v_build_msi(pbm->devhandle, virt_irq_p, - pbm->msiq_first_devino, - (pbm->msiq_first_devino + - pbm->msiq_num)); - err = -ENOMEM; - if (!devino) + err = sun4v_build_msi(pbm->devhandle, virt_irq_p, + pbm->msiq_first_devino, + (pbm->msiq_first_devino + + pbm->msiq_num)); + if (err < 0) goto out_err; + devino = err; msiqid = ((devino - pbm->msiq_first_devino) + pbm->msiq_first); @@ -971,7 +971,7 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p, if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID)) goto out_err; - pdev->dev.archdata.msi_num = msi_num; + sparc64_set_msi(*virt_irq_p, msi_num); if (entry->msi_attrib.is_64) { msg.address_hi = pbm->msi64_start >> 32; @@ -993,8 +993,6 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p, out_err: free_msi(pbm, msi_num); - sun4v_destroy_msi(*virt_irq_p); - *virt_irq_p = 0; return err; } @@ -1006,7 +1004,7 @@ static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq, unsigned long msiqid, err; unsigned int msi_num; - msi_num = pdev->dev.archdata.msi_num; + msi_num = sparc64_get_msi(virt_irq); err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid); if (err) { printk(KERN_ERR "%s: getmsiq gives error %lu\n", diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index eaba9b70b18..6cfab2e4d34 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c @@ -175,7 +175,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, return -ENOMEM; if (flags & MAP_FIXED) { - if (prepare_hugepage_range(addr, len, pgoff)) + if (prepare_hugepage_range(addr, len)) return -EINVAL; return addr; } diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index 24547741b20..41850906116 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -71,11 +71,13 @@ SECTIONS *(.gnu.warning) . = ALIGN(4096); - __syscall_stub_start = .; - *(.__syscall_stub*) - __syscall_stub_end = .; - . = ALIGN(4096); } =0x90909090 + . = ALIGN(4096); + .syscall_stub : { + __syscall_stub_start = .; + *(.__syscall_stub*) + __syscall_stub_end = .; + } .fini : { KEEP (*(.fini)) } =0x90909090 @@ -138,8 +140,8 @@ SECTIONS .got : { *(.got.plt) *(.got) } _edata = .; PROVIDE (edata = .); - __bss_start = .; .bss : { + __bss_start = .; *(.dynbss) *(.bss .bss.* .gnu.linkonce.b.*) *(COMMON) diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index 307b9373676..81acdc24348 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -44,12 +44,13 @@ SECTIONS /* .gnu.warning sections are handled specially by elf32.em. */ *(.gnu.warning) *(.gnu.linkonce.t*) + } - . = ALIGN(4096); - __syscall_stub_start = .; - *(.__syscall_stub*) - __syscall_stub_end = .; - . = ALIGN(4096); + . = ALIGN(4096); + .syscall_stub : { + __syscall_stub_start = .; + *(.__syscall_stub*) + __syscall_stub_end = .; } #include "asm/common.lds.S" diff --git a/arch/um/os-Linux/sys-x86_64/registers.c b/arch/um/os-Linux/sys-x86_64/registers.c index e6fc2179d1b..9467315b805 100644 --- a/arch/um/os-Linux/sys-x86_64/registers.c +++ b/arch/um/os-Linux/sys-x86_64/registers.c @@ -4,6 +4,7 @@ */ #include <errno.h> +#include <sys/ptrace.h> #include <string.h> #include "ptrace_user.h" #include "uml-config.h" @@ -17,6 +18,20 @@ static unsigned long exec_regs[MAX_REG_NR]; static unsigned long exec_fp_regs[HOST_FP_SIZE]; +int save_fp_registers(int pid, unsigned long *fp_regs) +{ + if(ptrace(PTRACE_GETFPREGS, pid, 0, fp_regs) < 0) + return -errno; + return 0; +} + +int restore_fp_registers(int pid, unsigned long *fp_regs) +{ + if(ptrace(PTRACE_SETFPREGS, pid, 0, fp_regs) < 0) + return -errno; + return 0; +} + void init_thread_registers(union uml_pt_regs *to) { memcpy(to->skas.regs, exec_regs, sizeof(to->skas.regs)); diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c index 55b66e09a98..1970d78aa52 100644 --- a/arch/um/sys-x86_64/ptrace.c +++ b/arch/um/sys-x86_64/ptrace.c @@ -156,12 +156,6 @@ int is_syscall(unsigned long addr) return(instr == 0x050f); } -int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu ) -{ - panic("dump_fpu"); - return(1); -} - int get_fpregs(unsigned long buf, struct task_struct *child) { panic("get_fpregs"); diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig index e64f65c9d90..b091c5e3555 100644 --- a/arch/x86_64/defconfig +++ b/arch/x86_64/defconfig @@ -201,7 +201,6 @@ CONFIG_PM=y # CONFIG_PM_DEBUG is not set CONFIG_HIBERNATION=y CONFIG_PM_STD_PARTITION="" -CONFIG_SUSPEND_SMP=y # # ACPI (Advanced Configuration and Power Interface) Support diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 1842f523c23..9f3a4cd0b07 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -208,6 +208,7 @@ config I2C_PIIX4 ATI IXP400 ATI SB600 ATI SB700 + ATI SB800 Serverworks OSB4 Serverworks CSB5 Serverworks CSB6 diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index debc76cd216..167e4137ee2 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -23,7 +23,7 @@ Supports: Intel PIIX4, 440MX Serverworks OSB4, CSB5, CSB6, HT-1000 - ATI IXP200, IXP300, IXP400, SB600, SB700 + ATI IXP200, IXP300, IXP400, SB600, SB700, SB800 SMSC Victory66 Note: we assume there can only be one device, with one SMBus interface. @@ -397,9 +397,7 @@ static struct pci_device_id piix4_ids[] = { .driver_data = 0 }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS), .driver_data = 0 }, - { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SMBUS), - .driver_data = 0 }, - { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SMBUS), + { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS), .driver_data = 0 }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4), .driver_data = 0 }, diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 24e7f9ab3f5..854d80c330e 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -3934,11 +3934,13 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) /* Chip reset. */ REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); + /* Reading back any register after chip reset will hang the + * bus on 5706 A0 and A1. The msleep below provides plenty + * of margin for write posting. + */ if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || - (CHIP_ID(bp) == CHIP_ID_5706_A1)) { - current->state = TASK_UNINTERRUPTIBLE; - schedule_timeout(HZ / 50); - } + (CHIP_ID(bp) == CHIP_ID_5706_A1)) + msleep(20); /* Reset takes approximate 30 usec */ for (i = 0; i < 10; i++) { diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index bd66339f7a3..1ea1ed82c35 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c @@ -610,7 +610,7 @@ static int pci_netmos_init(struct pci_dev *dev) /* enable IO_Space bit */ #define ITE_887x_POSIO_ENABLE (1 << 31) -static int __devinit pci_ite887x_init(struct pci_dev *dev) +static int pci_ite887x_init(struct pci_dev *dev) { /* inta_addr are the configuration addresses of the ITE */ static const short inta_addr[] = { 0x2a0, 0x2c0, 0x220, 0x240, 0x1e0, diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c index ad144054da3..b0469749310 100644 --- a/drivers/spi/atmel_spi.c +++ b/drivers/spi/atmel_spi.c @@ -251,7 +251,7 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, xfer->len, DMA_FROM_DEVICE); - if (dma_mapping_error(xfer->tx_dma)) { + if (dma_mapping_error(xfer->rx_dma)) { if (xfer->tx_buf) dma_unmap_single(dev, xfer->tx_dma, xfer->len, diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c index 48587c27050..f540ed77a10 100644 --- a/drivers/spi/spi_bfin5xx.c +++ b/drivers/spi/spi_bfin5xx.c @@ -1303,8 +1303,9 @@ static int bfin5xx_spi_resume(struct platform_device *pdev) #define bfin5xx_spi_resume NULL #endif /* CONFIG_PM */ +MODULE_ALIAS("bfin-spi-master"); /* for platform bus hotplug */ static struct platform_driver bfin5xx_spi_driver = { - .driver = { + .driver = { .name = "bfin-spi-master", .owner = THIS_MODULE, }, diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c index aee9ad6f633..bd9177f51de 100644 --- a/drivers/spi/spi_imx.c +++ b/drivers/spi/spi_imx.c @@ -1735,7 +1735,7 @@ static int spi_imx_resume(struct platform_device *pdev) static struct platform_driver driver = { .driver = { - .name = "imx-spi", + .name = "spi_imx", .bus = &platform_bus_type, .owner = THIS_MODULE, }, diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c index 2adf856e44c..fcbf1b8a526 100644 --- a/drivers/spi/spi_mpc83xx.c +++ b/drivers/spi/spi_mpc83xx.c @@ -530,6 +530,7 @@ static int __devexit mpc83xx_spi_remove(struct platform_device *dev) return 0; } +MODULE_ALIAS("mpc83xx_spi"); /* for platform bus hotplug */ static struct platform_driver mpc83xx_spi_driver = { .probe = mpc83xx_spi_probe, .remove = __devexit_p(mpc83xx_spi_remove), diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c index 5cf48123e0e..e9b683f7d7b 100644 --- a/drivers/spi/spi_s3c24xx.c +++ b/drivers/spi/spi_s3c24xx.c @@ -427,6 +427,7 @@ static int s3c24xx_spi_resume(struct platform_device *pdev) #define s3c24xx_spi_resume NULL #endif +MODULE_ALIAS("s3c2410_spi"); /* for platform bus hotplug */ static struct platform_driver s3c24xx_spidrv = { .probe = s3c24xx_spi_probe, .remove = s3c24xx_spi_remove, diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c index 611ac22b7cd..0fa25e2e80f 100644 --- a/drivers/spi/spi_s3c24xx_gpio.c +++ b/drivers/spi/spi_s3c24xx_gpio.c @@ -180,7 +180,7 @@ static struct platform_driver s3c2410_spigpio_drv = { .suspend = s3c2410_spigpio_suspend, .resume = s3c2410_spigpio_resume, .driver = { - .name = "s3c24xx-spi-gpio", + .name = "spi_s3c24xx_gpio", .owner = THIS_MODULE, }, }; diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c index 08e981c4064..b7f4bb239ea 100644 --- a/drivers/spi/spi_txx9.c +++ b/drivers/spi/spi_txx9.c @@ -453,7 +453,7 @@ static int __exit txx9spi_remove(struct platform_device *dev) static struct platform_driver txx9spi_driver = { .remove = __exit_p(txx9spi_remove), .driver = { - .name = "txx9spi", + .name = "spi_txx9", .owner = THIS_MODULE, }, }; diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c index f0bf9a68e96..5d04f520c12 100644 --- a/drivers/spi/xilinx_spi.c +++ b/drivers/spi/xilinx_spi.c @@ -21,7 +21,7 @@ #include <syslib/virtex_devices.h> -#define XILINX_SPI_NAME "xspi" +#define XILINX_SPI_NAME "xilinx_spi" /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e) * Product Specification", DS464 diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 5db6b1e489b..a22ccf9485a 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -182,7 +182,7 @@ config FONT_8x8 config FONT_8x16 bool "VGA 8x16 font" if FONTS - depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE=y || STI_CONSOLE || USB_SISUSBVGA_CON + depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE || STI_CONSOLE || USB_SISUSBVGA_CON default y if !SPARC && !FONTS help This is the "high resolution" font for the VGA frame buffer (the one diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 5d40ad13ab5..131954b3fb9 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -357,10 +357,6 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry, ecryptfs_printk(KERN_DEBUG, "Is a special file; returning\n"); goto out; } - if (special_file(lower_inode->i_mode)) { - ecryptfs_printk(KERN_DEBUG, "Is a special file; returning\n"); - goto out; - } if (!nd) { ecryptfs_printk(KERN_DEBUG, "We have a NULL nd, just leave" "as we *think* we are about to unlink\n"); diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index e4ab7bc14ef..fd3f94d4a66 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -834,7 +834,8 @@ static void ecryptfs_sync_page(struct page *page) ecryptfs_printk(KERN_DEBUG, "find_lock_page failed\n"); return; } - lower_page->mapping->a_ops->sync_page(lower_page); + if (lower_page->mapping->a_ops->sync_page) + lower_page->mapping->a_ops->sync_page(lower_page); ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16x]\n", lower_page->index); unlock_page(lower_page); diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index c848a191525..950c2fbb815 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -82,14 +82,19 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) int ret; /* - * vma alignment has already been checked by prepare_hugepage_range. - * If you add any error returns here, do so after setting VM_HUGETLB, - * so is_vm_hugetlb_page tests below unmap_region go the right way - * when do_mmap_pgoff unwinds (may be important on powerpc and ia64). + * vma address alignment (but not the pgoff alignment) has + * already been checked by prepare_hugepage_range. If you add + * any error returns here, do so after setting VM_HUGETLB, so + * is_vm_hugetlb_page tests below unmap_region go the right + * way when do_mmap_pgoff unwinds (may be important on powerpc + * and ia64). */ vma->vm_flags |= VM_HUGETLB | VM_RESERVED; vma->vm_ops = &hugetlb_vm_ops; + if (vma->vm_pgoff & ~(HPAGE_MASK >> PAGE_SHIFT)) + return -EINVAL; + vma_len = (loff_t)(vma->vm_end - vma->vm_start); mutex_lock(&inode->i_mutex); @@ -132,7 +137,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, return -ENOMEM; if (flags & MAP_FIXED) { - if (prepare_hugepage_range(addr, len, pgoff)) + if (prepare_hugepage_range(addr, len)) return -EINVAL; return addr; } diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 276f7207a56..87e87dcd3f9 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -540,26 +540,24 @@ static void udf_table_free_blocks(struct super_block *sb, if (epos.offset + adsize > sb->s_blocksize) { loffset = epos.offset; aed->lengthAllocDescs = cpu_to_le32(adsize); - sptr = UDF_I_DATA(inode) + epos.offset - - udf_file_entry_alloc_offset(inode) + - UDF_I_LENEATTR(inode) - adsize; + sptr = UDF_I_DATA(table) + epos.offset - adsize; dptr = epos.bh->b_data + sizeof(struct allocExtDesc); memcpy(dptr, sptr, adsize); epos.offset = sizeof(struct allocExtDesc) + adsize; } else { loffset = epos.offset + adsize; aed->lengthAllocDescs = cpu_to_le32(0); - sptr = oepos.bh->b_data + epos.offset; - epos.offset = sizeof(struct allocExtDesc); - if (oepos.bh) { + sptr = oepos.bh->b_data + epos.offset; aed = (struct allocExtDesc *)oepos.bh->b_data; aed->lengthAllocDescs = cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); } else { + sptr = UDF_I_DATA(table) + epos.offset; UDF_I_LENALLOC(table) += adsize; mark_inode_dirty(table); } + epos.offset = sizeof(struct allocExtDesc); } if (UDF_SB_UDFREV(sb) >= 0x0200) udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1, diff --git a/fs/udf/super.c b/fs/udf/super.c index 382be7be5ae..c68a6e730b9 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -89,7 +89,7 @@ static int udf_find_fileset(struct super_block *, kernel_lb_addr *, static void udf_load_pvoldesc(struct super_block *, struct buffer_head *); static void udf_load_fileset(struct super_block *, struct buffer_head *, kernel_lb_addr *); -static void udf_load_partdesc(struct super_block *, struct buffer_head *); +static int udf_load_partdesc(struct super_block *, struct buffer_head *); static void udf_open_lvid(struct super_block *); static void udf_close_lvid(struct super_block *); static unsigned int udf_count_free(struct super_block *); @@ -877,7 +877,7 @@ static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh, root->logicalBlockNum, root->partitionReferenceNum); } -static void udf_load_partdesc(struct super_block *sb, struct buffer_head *bh) +static int udf_load_partdesc(struct super_block *sb, struct buffer_head *bh) { struct partitionDesc *p; int i; @@ -912,6 +912,11 @@ static void udf_load_partdesc(struct super_block *sb, struct buffer_head *bh) UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table = udf_iget(sb, loc); + if (!UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table) { + udf_debug("cannot load unallocSpaceTable (part %d)\n", + i); + return 1; + } UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_TABLE; udf_debug("unallocSpaceTable (part %d) @ %ld\n", i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table->i_ino); @@ -938,6 +943,11 @@ static void udf_load_partdesc(struct super_block *sb, struct buffer_head *bh) UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table = udf_iget(sb, loc); + if (!UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table) { + udf_debug("cannot load freedSpaceTable (part %d)\n", + i); + return 1; + } UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_TABLE; udf_debug("freedSpaceTable (part %d) @ %ld\n", i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table->i_ino); @@ -966,6 +976,7 @@ static void udf_load_partdesc(struct super_block *sb, struct buffer_head *bh) le16_to_cpu(p->partitionNumber), i, UDF_SB_PARTTYPE(sb,i), UDF_SB_PARTROOT(sb,i), UDF_SB_PARTLEN(sb,i)); } + return 0; } static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh, @@ -1177,12 +1188,19 @@ static int udf_process_sequence(struct super_block *sb, long block, long lastblo udf_load_logicalvol(sb, bh, fileset); } else if (i == VDS_POS_PARTITION_DESC) { struct buffer_head *bh2 = NULL; - udf_load_partdesc(sb, bh); + if (udf_load_partdesc(sb, bh)) { + brelse(bh); + return 1; + } for (j = vds[i].block + 1; j < vds[VDS_POS_TERMINATING_DESC].block; j++) { bh2 = udf_read_tagged(sb, j, j, &ident); gd = (struct generic_desc *)bh2->b_data; if (ident == TAG_IDENT_PD) - udf_load_partdesc(sb, bh2); + if (udf_load_partdesc(sb, bh2)) { + brelse(bh); + brelse(bh2); + return 1; + } brelse(bh2); } } diff --git a/include/asm-sparc64/device.h b/include/asm-sparc64/device.h index d5a4559b955..5111e8717be 100644 --- a/include/asm-sparc64/device.h +++ b/include/asm-sparc64/device.h @@ -16,8 +16,6 @@ struct dev_archdata { struct device_node *prom_node; struct of_device *op; - - unsigned int msi_num; }; #endif /* _ASM_SPARC64_DEVICE_H */ diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h index e6c436ef935..c00ad152771 100644 --- a/include/asm-sparc64/irq.h +++ b/include/asm-sparc64/irq.h @@ -16,21 +16,21 @@ #include <asm/ptrace.h> /* IMAP/ICLR register defines */ -#define IMAP_VALID 0x80000000 /* IRQ Enabled */ -#define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */ -#define IMAP_TID_JBUS 0x7c000000 /* JBUS TargetID */ +#define IMAP_VALID 0x80000000UL /* IRQ Enabled */ +#define IMAP_TID_UPA 0x7c000000UL /* UPA TargetID */ +#define IMAP_TID_JBUS 0x7c000000UL /* JBUS TargetID */ #define IMAP_TID_SHIFT 26 -#define IMAP_AID_SAFARI 0x7c000000 /* Safari AgentID */ +#define IMAP_AID_SAFARI 0x7c000000UL /* Safari AgentID */ #define IMAP_AID_SHIFT 26 -#define IMAP_NID_SAFARI 0x03e00000 /* Safari NodeID */ +#define IMAP_NID_SAFARI 0x03e00000UL /* Safari NodeID */ #define IMAP_NID_SHIFT 21 -#define IMAP_IGN 0x000007c0 /* IRQ Group Number */ -#define IMAP_INO 0x0000003f /* IRQ Number */ -#define IMAP_INR 0x000007ff /* Full interrupt number*/ +#define IMAP_IGN 0x000007c0UL /* IRQ Group Number */ +#define IMAP_INO 0x0000003fUL /* IRQ Number */ +#define IMAP_INR 0x000007ffUL /* Full interrupt number*/ -#define ICLR_IDLE 0x00000000 /* Idle state */ -#define ICLR_TRANSMIT 0x00000001 /* Transmit state */ -#define ICLR_PENDING 0x00000003 /* Pending state */ +#define ICLR_IDLE 0x00000000UL /* Idle state */ +#define ICLR_TRANSMIT 0x00000001UL /* Transmit state */ +#define ICLR_PENDING 0x00000003UL /* Pending state */ /* The largest number of unique interrupt sources we support. * If this needs to ever be larger than 255, you need to change @@ -53,6 +53,9 @@ extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p, extern void sun4v_destroy_msi(unsigned int virt_irq); extern unsigned int sbus_build_irq(void *sbus, unsigned int ino); +extern void sparc64_set_msi(unsigned int virt_irq, u32 msi); +extern u32 sparc64_get_msi(unsigned int virt_irq); + extern void fixup_irqs(void); static __inline__ void set_softint(unsigned long bits) diff --git a/include/asm-um/common.lds.S b/include/asm-um/common.lds.S index e3f010bd12b..cb0248616d4 100644 --- a/include/asm-um/common.lds.S +++ b/include/asm-um/common.lds.S @@ -16,82 +16,112 @@ . = ALIGN(4096); .note : { *(.note.*) } - __start___ex_table = .; - __ex_table : { *(__ex_table) } - __stop___ex_table = .; + __ex_table : { + __start___ex_table = .; + *(__ex_table) + __stop___ex_table = .; + } BUG_TABLE - __uml_setup_start = .; - .uml.setup.init : { *(.uml.setup.init) } - __uml_setup_end = .; + .uml.setup.init : { + __uml_setup_start = .; + *(.uml.setup.init) + __uml_setup_end = .; + } - __uml_help_start = .; - .uml.help.init : { *(.uml.help.init) } - __uml_help_end = .; + .uml.help.init : { + __uml_help_start = .; + *(.uml.help.init) + __uml_help_end = .; + } - __uml_postsetup_start = .; - .uml.postsetup.init : { *(.uml.postsetup.init) } - __uml_postsetup_end = .; + .uml.postsetup.init : { + __uml_postsetup_start = .; + *(.uml.postsetup.init) + __uml_postsetup_end = .; + } - __setup_start = .; - .init.setup : { *(.init.setup) } - __setup_end = .; + .init.setup : { + __setup_start = .; + *(.init.setup) + __setup_end = .; + } . = ALIGN(32); - __per_cpu_start = . ; - .data.percpu : { *(.data.percpu) } - __per_cpu_end = . ; + .data.percpu : { + __per_cpu_start = . ; + *(.data.percpu) + __per_cpu_end = . ; + } - __initcall_start = .; .initcall.init : { + __initcall_start = .; INITCALLS + __initcall_end = .; } - __initcall_end = .; - __con_initcall_start = .; - .con_initcall.init : { *(.con_initcall.init) } - __con_initcall_end = .; + .con_initcall.init : { + __con_initcall_start = .; + *(.con_initcall.init) + __con_initcall_end = .; + } - __uml_initcall_start = .; - .uml.initcall.init : { *(.uml.initcall.init) } - __uml_initcall_end = .; + .uml.initcall.init : { + __uml_initcall_start = .; + *(.uml.initcall.init) + __uml_initcall_end = .; + } __init_end = .; SECURITY_INIT - __exitcall_begin = .; - .exitcall : { *(.exitcall.exit) } - __exitcall_end = .; + .exitcall : { + __exitcall_begin = .; + *(.exitcall.exit) + __exitcall_end = .; + } - __uml_exitcall_begin = .; - .uml.exitcall : { *(.uml.exitcall.exit) } - __uml_exitcall_end = .; + .uml.exitcall : { + __uml_exitcall_begin = .; + *(.uml.exitcall.exit) + __uml_exitcall_end = .; + } . = ALIGN(4); - __alt_instructions = .; - .altinstructions : { *(.altinstructions) } - __alt_instructions_end = .; + .altinstructions : { + __alt_instructions = .; + *(.altinstructions) + __alt_instructions_end = .; + } .altinstr_replacement : { *(.altinstr_replacement) } /* .exit.text is discard at runtime, not link time, to deal with references from .altinstructions and .eh_frame */ .exit.text : { *(.exit.text) } .exit.data : { *(.exit.data) } - __preinit_array_start = .; - .preinit_array : { *(.preinit_array) } - __preinit_array_end = .; - __init_array_start = .; - .init_array : { *(.init_array) } - __init_array_end = .; - __fini_array_start = .; - .fini_array : { *(.fini_array) } - __fini_array_end = .; + .preinit_array : { + __preinit_array_start = .; + *(.preinit_array) + __preinit_array_end = .; + } + .init_array : { + __init_array_start = .; + *(.init_array) + __init_array_end = .; + } + .fini_array : { + __fini_array_start = .; + *(.fini_array) + __fini_array_end = .; + } . = ALIGN(4096); - __initramfs_start = .; - .init.ramfs : { *(.init.ramfs) } - __initramfs_end = .; + .init.ramfs : { + __initramfs_start = .; + *(.init.ramfs) + __initramfs_end = .; + } /* Sections to be discarded */ /DISCARD/ : { diff --git a/include/asm-um/elf-x86_64.h b/include/asm-um/elf-x86_64.h index 8a8246d0393..bfe27aa2c9c 100644 --- a/include/asm-um/elf-x86_64.h +++ b/include/asm-um/elf-x86_64.h @@ -6,7 +6,9 @@ #ifndef __UM_ELF_X86_64_H #define __UM_ELF_X86_64_H +#include <linux/sched.h> #include <asm/user.h> +#include "skas.h" /* x86-64 relocation types, taken from asm-x86_64/elf.h */ #define R_X86_64_NONE 0 /* No reloc */ @@ -64,6 +66,44 @@ typedef struct { } elf_fpregset_t; PT_REGS_R15(regs) = 0; \ } while (0) +#define ELF_CORE_COPY_REGS(pr_reg, regs) \ + (pr_reg)[0] = (regs)->regs.gp[0]; \ + (pr_reg)[1] = (regs)->regs.gp[1]; \ + (pr_reg)[2] = (regs)->regs.gp[2]; \ + (pr_reg)[3] = (regs)->regs.gp[3]; \ + (pr_reg)[4] = (regs)->regs.gp[4]; \ + (pr_reg)[5] = (regs)->regs.gp[5]; \ + (pr_reg)[6] = (regs)->regs.gp[6]; \ + (pr_reg)[7] = (regs)->regs.gp[7]; \ + (pr_reg)[8] = (regs)->regs.gp[8]; \ + (pr_reg)[9] = (regs)->regs.gp[9]; \ + (pr_reg)[10] = (regs)->regs.gp[10]; \ + (pr_reg)[11] = (regs)->regs.gp[11]; \ + (pr_reg)[12] = (regs)->regs.gp[12]; \ + (pr_reg)[13] = (regs)->regs.gp[13]; \ + (pr_reg)[14] = (regs)->regs.gp[14]; \ + (pr_reg)[15] = (regs)->regs.gp[15]; \ + (pr_reg)[16] = (regs)->regs.gp[16]; \ + (pr_reg)[17] = (regs)->regs.gp[17]; \ + (pr_reg)[18] = (regs)->regs.gp[18]; \ + (pr_reg)[19] = (regs)->regs.gp[19]; \ + (pr_reg)[20] = (regs)->regs.gp[20]; \ + (pr_reg)[21] = current->thread.arch.fs; \ + (pr_reg)[22] = 0; \ + (pr_reg)[23] = 0; \ + (pr_reg)[24] = 0; \ + (pr_reg)[25] = 0; \ + (pr_reg)[26] = 0; + +static inline int elf_core_copy_fpregs(struct task_struct *t, + elf_fpregset_t *fpu) +{ + int cpu = current_thread->cpu; + return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu); +} + +#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu) + #ifdef TIF_IA32 /* XXX */ #error XXX, indeed clear_thread_flag(TIF_IA32); diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 1d5ded0836e..0ad72c4cf31 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -126,16 +126,16 @@ static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) static inline int cpu_is_offline(int cpu) { return 0; } #endif /* CONFIG_HOTPLUG_CPU */ -#ifdef CONFIG_SUSPEND_SMP +#ifdef CONFIG_PM_SLEEP_SMP extern int suspend_cpu_hotplug; extern int disable_nonboot_cpus(void); extern void enable_nonboot_cpus(void); -#else +#else /* !CONFIG_PM_SLEEP_SMP */ #define suspend_cpu_hotplug 0 static inline int disable_nonboot_cpus(void) { return 0; } static inline void enable_nonboot_cpus(void) {} -#endif +#endif /* !CONFIG_PM_SLEEP_SMP */ #endif /* _LINUX_CPU_H_ */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index e6a71c82d20..3a19b032c0e 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -66,11 +66,8 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, * If the arch doesn't supply something else, assume that hugepage * size aligned regions are ok without further preparation. */ -static inline int prepare_hugepage_range(unsigned long addr, unsigned long len, - pgoff_t pgoff) +static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) { - if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT)) - return -EINVAL; if (len & ~HPAGE_MASK) return -EINVAL; if (addr & ~HPAGE_MASK) @@ -78,8 +75,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len, return 0; } #else -int prepare_hugepage_range(unsigned long addr, unsigned long len, - pgoff_t pgoff); +int prepare_hugepage_range(unsigned long addr, unsigned long len); #endif #ifndef ARCH_HAS_SETCLEAR_HUGE_PTE @@ -117,7 +113,7 @@ static inline unsigned long hugetlb_total_pages(void) #define hugetlb_report_meminfo(buf) 0 #define hugetlb_report_node_meminfo(n, buf) 0 #define follow_huge_pmd(mm, addr, pmd, write) NULL -#define prepare_hugepage_range(addr,len,pgoff) (-EINVAL) +#define prepare_hugepage_range(addr,len) (-EINVAL) #define pmd_huge(x) 0 #define is_hugepage_only_range(mm, addr, len) 0 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 06d23e10a16..17168f3cc73 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -374,10 +374,9 @@ #define PCI_DEVICE_ID_ATI_IXP400_SATA 0x4379 #define PCI_DEVICE_ID_ATI_IXP400_SATA2 0x437a #define PCI_DEVICE_ID_ATI_IXP600_SATA 0x4380 -#define PCI_DEVICE_ID_ATI_IXP600_SMBUS 0x4385 +#define PCI_DEVICE_ID_ATI_SBX00_SMBUS 0x4385 #define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c #define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390 -#define PCI_DEVICE_ID_ATI_IXP700_SMBUS 0x4395 #define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c #define PCI_VENDOR_ID_VLSI 0x1004 diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index c91476ce314..dff3192374f 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -351,6 +351,8 @@ enum #define RTAX_INITCWND RTAX_INITCWND RTAX_FEATURES, #define RTAX_FEATURES RTAX_FEATURES + RTAX_RTO_MIN, +#define RTAX_RTO_MIN RTAX_RTO_MIN __RTAX_MAX }; diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 124270df873..74962077f63 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -78,7 +78,7 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; * Sorry that the following has to be that ugly but some versions of GCC * have trouble with constant propagation and loops. */ -static inline int kmalloc_index(size_t size) +static __always_inline int kmalloc_index(size_t size) { if (!size) return 0; @@ -133,7 +133,7 @@ static inline int kmalloc_index(size_t size) * This ought to end up with a global pointer to the right cache * in kmalloc_caches. */ -static inline struct kmem_cache *kmalloc_slab(size_t size) +static __always_inline struct kmem_cache *kmalloc_slab(size_t size) { int index = kmalloc_index(size); @@ -166,7 +166,7 @@ static inline struct kmem_cache *kmalloc_slab(size_t size) void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); -static inline void *kmalloc(size_t size, gfp_t flags) +static __always_inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); @@ -183,7 +183,7 @@ static inline void *kmalloc(size_t size, gfp_t flags) void *__kmalloc_node(size_t size, gfp_t flags, int node); void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); -static inline void *kmalloc_node(size_t size, gfp_t flags, int node) +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 73cb9943c8a..991c85bb9e3 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -214,7 +214,7 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, const struct sctp_chunk *); struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *, const struct sctp_chunk *); -void sctp_init_cause(struct sctp_chunk *, __be16 cause, const void *, size_t); +void sctp_init_cause(struct sctp_chunk *, __be16 cause, size_t); struct sctp_chunk *sctp_make_abort(const struct sctp_association *, const struct sctp_chunk *, const size_t hint); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index ee4559b1130..c0d5848c33d 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -726,6 +726,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, struct iovec *data); void sctp_chunk_free(struct sctp_chunk *); void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); +void *sctp_addto_param(struct sctp_chunk *, int len, const void *data); struct sctp_chunk *sctp_chunkify(struct sk_buff *, const struct sctp_association *, struct sock *); diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h index 39ea3f442b4..cd33270e86d 100644 --- a/include/net/sctp/ulpqueue.h +++ b/include/net/sctp/ulpqueue.h @@ -83,6 +83,7 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc); /* Skip over an SSN. */ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn); +void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32); #endif /* __sctp_ulpqueue_h__ */ diff --git a/kernel/cpu.c b/kernel/cpu.c index 181ae708602..38033db8d8e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -273,7 +273,7 @@ int __cpuinit cpu_up(unsigned int cpu) return err; } -#ifdef CONFIG_SUSPEND_SMP +#ifdef CONFIG_PM_SLEEP_SMP static cpumask_t frozen_cpus; int disable_nonboot_cpus(void) @@ -334,4 +334,4 @@ void enable_nonboot_cpus(void) out: mutex_unlock(&cpu_add_remove_lock); } -#endif +#endif /* CONFIG_PM_SLEEP_SMP */ diff --git a/kernel/exit.c b/kernel/exit.c index 9578c1ae19c..06b24b3aa37 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -975,6 +975,7 @@ fastcall NORET_TYPE void do_exit(long code) if (unlikely(tsk->audit_context)) audit_free(tsk); + tsk->exit_code = code; taskstats_exit(tsk, group_dead); exit_mm(tsk); @@ -996,7 +997,6 @@ fastcall NORET_TYPE void do_exit(long code) if (tsk->binfmt) module_put(tsk->binfmt->module); - tsk->exit_code = code; proc_exit_connector(tsk); exit_task_namespaces(tsk); exit_notify(tsk); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 853aefbd184..7230d914eaa 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -547,14 +547,11 @@ int request_irq(unsigned int irq, irq_handler_t handler, * We do this before actually registering it, to make sure that * a 'real' IRQ doesn't run in parallel with our fake */ - if (irqflags & IRQF_DISABLED) { - unsigned long flags; + unsigned long flags; - local_irq_save(flags); - handler(irq, dev_id); - local_irq_restore(flags); - } else - handler(irq, dev_id); + local_irq_save(flags); + handler(irq, dev_id); + local_irq_restore(flags); } #endif diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 412859f8d94..c8580a1e687 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -72,15 +72,10 @@ config PM_TRACE CAUTION: this option will cause your machine's real-time clock to be set to an invalid time after a resume. -config SUSPEND_SMP_POSSIBLE - bool - depends on (X86 && !X86_VOYAGER) || (PPC64 && (PPC_PSERIES || PPC_PMAC)) - depends on SMP - default y - -config SUSPEND_SMP +config PM_SLEEP_SMP bool - depends on SUSPEND_SMP_POSSIBLE && PM_SLEEP + depends on SUSPEND_SMP_POSSIBLE || HIBERNATION_SMP_POSSIBLE + depends on PM_SLEEP select HOTPLUG_CPU default y @@ -89,20 +84,46 @@ config PM_SLEEP depends on SUSPEND || HIBERNATION default y +config SUSPEND_UP_POSSIBLE + bool + depends on (X86 && !X86_VOYAGER) || PPC || ARM || BLACKFIN || MIPS \ + || SUPERH || FRV + depends on !SMP + default y + +config SUSPEND_SMP_POSSIBLE + bool + depends on (X86 && !X86_VOYAGER) \ + || (PPC && (PPC_PSERIES || PPC_PMAC)) || ARM + depends on SMP + default y + config SUSPEND bool "Suspend to RAM and standby" depends on PM - depends on !SMP || SUSPEND_SMP_POSSIBLE + depends on SUSPEND_UP_POSSIBLE || SUSPEND_SMP_POSSIBLE default y ---help--- Allow the system to enter sleep states in which main memory is powered and thus its contents are preserved, such as the suspend-to-RAM state (i.e. the ACPI S3 state). +config HIBERNATION_UP_POSSIBLE + bool + depends on X86 || PPC64_SWSUSP || FRV || PPC32 + depends on !SMP + default y + +config HIBERNATION_SMP_POSSIBLE + bool + depends on (X86 && !X86_VOYAGER) || PPC64_SWSUSP + depends on SMP + default y + config HIBERNATION bool "Hibernation (aka 'suspend to disk')" depends on PM && SWAP - depends on ((X86 || PPC64_SWSUSP || FRV || PPC32) && !SMP) || SUSPEND_SMP_POSSIBLE + depends on HIBERNATION_UP_POSSIBLE || HIBERNATION_SMP_POSSIBLE ---help--- Enable the suspend to disk (STD) functionality, which is usually called "hibernation" in user interfaces. STD checkpoints the diff --git a/kernel/signal.c b/kernel/signal.c index ad63109e413..3169bed0b4d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1300,20 +1300,19 @@ struct sigqueue *sigqueue_alloc(void) void sigqueue_free(struct sigqueue *q) { unsigned long flags; + spinlock_t *lock = ¤t->sighand->siglock; + BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); /* * If the signal is still pending remove it from the - * pending queue. + * pending queue. We must hold ->siglock while testing + * q->list to serialize with collect_signal(). */ - if (unlikely(!list_empty(&q->list))) { - spinlock_t *lock = ¤t->sighand->siglock; - read_lock(&tasklist_lock); - spin_lock_irqsave(lock, flags); - if (!list_empty(&q->list)) - list_del_init(&q->list); - spin_unlock_irqrestore(lock, flags); - read_unlock(&tasklist_lock); - } + spin_lock_irqsave(lock, flags); + if (!list_empty(&q->list)) + list_del_init(&q->list); + spin_unlock_irqrestore(lock, flags); + q->flags &= ~SIGQUEUE_PREALLOC; __sigqueue_free(q); } diff --git a/kernel/sys.c b/kernel/sys.c index 449b81b98b3..1b33b05d346 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1442,7 +1442,6 @@ asmlinkage long sys_times(struct tms __user * tbuf) * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. * LBT 04.03.94 */ - asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) { struct task_struct *p; @@ -1470,7 +1469,7 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) if (!thread_group_leader(p)) goto out; - if (p->real_parent == group_leader) { + if (p->real_parent->tgid == group_leader->tgid) { err = -EPERM; if (task_session(p) != task_session(group_leader)) goto out; diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index d055d987850..85af9422ea6 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -81,6 +81,7 @@ void free_user_ns(struct kref *kref) struct user_namespace *ns; ns = container_of(kref, struct user_namespace, kref); + free_uid(ns->root_user); kfree(ns); } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 172abffeb2e..bb54b88c3d5 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -955,6 +955,11 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, goto out; } + if (!nodes_subset(new, node_online_map)) { + err = -EINVAL; + goto out; + } + err = security_task_movememory(task); if (err) goto out; diff --git a/mm/migrate.c b/mm/migrate.c index 37c73b90200..e2fdbce1874 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -611,6 +611,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, int rc = 0; int *result = NULL; struct page *newpage = get_new_page(page, private, &result); + int rcu_locked = 0; if (!newpage) return -ENOMEM; @@ -636,8 +637,13 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, * we cannot notice that anon_vma is freed while we migrates a page. * This rcu_read_lock() delays freeing anon_vma pointer until the end * of migration. File cache pages are no problem because of page_lock() + * File Caches may use write_page() or lock_page() in migration, then, + * just care Anon page here. */ - rcu_read_lock(); + if (PageAnon(page)) { + rcu_read_lock(); + rcu_locked = 1; + } /* * This is a corner case handling. * When a new swap-cache is read into, it is linked to LRU @@ -656,7 +662,8 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, if (rc) remove_migration_ptes(page, page); rcu_unlock: - rcu_read_unlock(); + if (rcu_locked) + rcu_read_unlock(); unlock: diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6427653023a..1a8c59571cb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2345,6 +2345,8 @@ static int __cpuinit process_zones(int cpu) return 0; bad: for_each_zone(dzone) { + if (!populated_zone(dzone)) + continue; if (dzone == zone) break; kfree(zone_pcp(dzone, cpu)); diff --git a/mm/slub.c b/mm/slub.c index 04151da399c..7defe84e6bd 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3813,7 +3813,9 @@ static int __init slab_sysfs_init(void) list_for_each_entry(s, &slab_caches, list) { err = sysfs_slab_add(s); - BUG_ON(err); + if (err) + printk(KERN_ERR "SLUB: Unable to add boot slab %s" + " to sysfs\n", s->name); } while (alias_list) { @@ -3821,7 +3823,9 @@ static int __init slab_sysfs_init(void) alias_list = alias_list->next; err = sysfs_slab_alias(al->s, al->name); - BUG_ON(err); + if (err) + printk(KERN_ERR "SLUB: Unable to add boot slab alias" + " %s to sysfs\n", s->name); kfree(al); } diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 69b70977f00..eb57502bb26 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -384,6 +384,11 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, if (hold_time(br) == 0) return; + /* ignore packets unless we are using this port */ + if (!(source->state == BR_STATE_LEARNING || + source->state == BR_STATE_FORWARDING)) + return; + fdb = fdb_find(head, addr); if (likely(fdb)) { /* attempt to update an entry for a local interface */ diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 749f0e8f541..9272f12f664 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -33,17 +33,17 @@ */ static int port_cost(struct net_device *dev) { - if (dev->ethtool_ops->get_settings) { - struct ethtool_cmd ecmd = { ETHTOOL_GSET }; - int err = dev->ethtool_ops->get_settings(dev, &ecmd); - if (!err) { + if (dev->ethtool_ops && dev->ethtool_ops->get_settings) { + struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, }; + + if (!dev->ethtool_ops->get_settings(dev, &ecmd)) { switch(ecmd.speed) { - case SPEED_100: - return 19; - case SPEED_1000: - return 4; case SPEED_10000: return 2; + case SPEED_1000: + return 4; + case SPEED_100: + return 19; case SPEED_10: return 100; } diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 5c18595b761..6f468fc3357 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -101,9 +101,8 @@ static int br_handle_local_finish(struct sk_buff *skb) { struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); - if (p && p->state != BR_STATE_DISABLED) + if (p) br_fdb_update(p->br, p, eth_hdr(skb)->h_source); - return 0; /* process further */ } diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 7bae576ac11..36fdea71d74 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -380,7 +380,6 @@ struct pktgen_thread { /* Field for thread to receive "posted" events terminate, stop ifs etc. */ u32 control; - int pid; int cpu; wait_queue_head_t queue; @@ -3331,8 +3330,9 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) } if ((netif_queue_stopped(odev) || - netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping)) || - need_resched()) { + (pkt_dev->skb && + netif_subqueue_stopped(odev, pkt_dev->skb->queue_mapping))) || + need_resched()) { idle_start = getCurUs(); if (!netif_running(odev)) { @@ -3462,8 +3462,6 @@ static int pktgen_thread_worker(void *arg) init_waitqueue_head(&t->queue); - t->pid = current->pid; - pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid); max_before_softirq = t->max_before_softirq; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9785df37a65..1ee72127462 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -555,6 +555,16 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) tcp_grow_window(sk, skb); } +static u32 tcp_rto_min(struct sock *sk) +{ + struct dst_entry *dst = __sk_dst_get(sk); + u32 rto_min = TCP_RTO_MIN; + + if (dst_metric_locked(dst, RTAX_RTO_MIN)) + rto_min = dst->metrics[RTAX_RTO_MIN-1]; + return rto_min; +} + /* Called to compute a smoothed rtt estimate. The data fed to this * routine either comes from timestamps, or from segments that were * known _not_ to have been retransmitted [see Karn/Partridge @@ -616,13 +626,13 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) if (tp->mdev_max < tp->rttvar) tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2; tp->rtt_seq = tp->snd_nxt; - tp->mdev_max = TCP_RTO_MIN; + tp->mdev_max = tcp_rto_min(sk); } } else { /* no previous measure. */ tp->srtt = m<<3; /* take the measured time to be rtt */ tp->mdev = m<<1; /* make sure rto = 3*rtt */ - tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); + tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); tp->rtt_seq = tp->snd_nxt; } } diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c index ab7d845224f..223f9bded67 100644 --- a/net/netfilter/xt_tcpudp.c +++ b/net/netfilter/xt_tcpudp.c @@ -188,7 +188,7 @@ udp_checkentry(const char *tablename, void *matchinfo, unsigned int hook_mask) { - const struct xt_tcp *udpinfo = matchinfo; + const struct xt_udp *udpinfo = matchinfo; /* Must specify no unknown invflags */ return !(udpinfo->invflags & ~XT_UDP_INV_MASK); diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 4a49db65772..abd82fc3ec6 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -44,7 +44,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) if (TC_H_MAJ(skb->priority) != sch->handle) { err = tc_classify(skb, q->filter_list, &res); #ifdef CONFIG_NET_CLS_ACT - switch (tc_classify(skb, q->filter_list, &res)) { + switch (err) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: *qerr = NET_XMIT_SUCCESS; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 498edb0cd4e..2ad1caf1ea4 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -727,7 +727,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, break; case SCTP_TRANSPORT_DOWN: - transport->state = SCTP_INACTIVE; + /* if the transort was never confirmed, do not transition it + * to inactive state. + */ + if (transport->state != SCTP_UNCONFIRMED) + transport->state = SCTP_INACTIVE; + spc_state = SCTP_ADDR_UNREACHABLE; break; diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 992f361084b..28f4fe77cee 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -421,6 +421,13 @@ void sctp_retransmit_mark(struct sctp_outq *q, */ if ((fast_retransmit && (chunk->fast_retransmit > 0)) || (!fast_retransmit && !chunk->tsn_gap_acked)) { + /* If this chunk was sent less then 1 rto ago, do not + * retransmit this chunk, but give the peer time + * to acknowlege it. + */ + if ((jiffies - chunk->sent_at) < transport->rto) + continue; + /* RFC 2960 6.2.1 Processing a Received SACK * * C) Any time a DATA chunk is marked for diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 51c4d7fef1d..79856c92452 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -110,7 +110,7 @@ static const struct sctp_paramhdr prsctp_param = { * abort chunk. */ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, - const void *payload, size_t paylen) + size_t paylen) { sctp_errhdr_t err; __u16 len; @@ -120,7 +120,6 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, len = sizeof(sctp_errhdr_t) + paylen; err.length = htons(len); chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); - sctp_addto_chunk(chunk, paylen, payload); } /* 3.3.2 Initiation (INIT) (1) @@ -780,8 +779,8 @@ struct sctp_chunk *sctp_make_abort_no_data( /* Put the tsn back into network byte order. */ payload = htonl(tsn); - sctp_init_cause(retval, SCTP_ERROR_NO_DATA, (const void *)&payload, - sizeof(payload)); + sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload)); + sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * @@ -823,7 +822,8 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, goto err_copy; } - sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); + sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen); + sctp_addto_chunk(retval, paylen, payload); if (paylen) kfree(payload); @@ -850,15 +850,17 @@ struct sctp_chunk *sctp_make_abort_violation( struct sctp_paramhdr phdr; retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen - + sizeof(sctp_chunkhdr_t)); + + sizeof(sctp_paramhdr_t)); if (!retval) goto end; - sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, payload, paylen); + sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen + + sizeof(sctp_paramhdr_t)); phdr.type = htons(chunk->chunk_hdr->type); phdr.length = chunk->chunk_hdr->length; - sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &phdr); + sctp_addto_chunk(retval, paylen, payload); + sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr); end: return retval; @@ -955,7 +957,8 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, if (!retval) goto nodata; - sctp_init_cause(retval, cause_code, payload, paylen); + sctp_init_cause(retval, cause_code, paylen); + sctp_addto_chunk(retval, paylen, payload); nodata: return retval; @@ -1128,7 +1131,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) void *target; void *padding; int chunklen = ntohs(chunk->chunk_hdr->length); - int padlen = chunklen % 4; + int padlen = WORD_ROUND(chunklen) - chunklen; padding = skb_put(chunk->skb, padlen); target = skb_put(chunk->skb, len); @@ -1143,6 +1146,25 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) return target; } +/* Append bytes to the end of a parameter. Will panic if chunk is not big + * enough. + */ +void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data) +{ + void *target; + int chunklen = ntohs(chunk->chunk_hdr->length); + + target = skb_put(chunk->skb, len); + + memcpy(target, data, len); + + /* Adjust the chunk length field. */ + chunk->chunk_hdr->length = htons(chunklen + len); + chunk->chunk_end = skb_tail_pointer(chunk->skb); + + return target; +} + /* Append bytes from user space to the end of a chunk. Will panic if * chunk is not big enough. * Returns a kernel err value. @@ -1174,25 +1196,36 @@ out: */ void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) { + struct sctp_datamsg *msg; + struct sctp_chunk *lchunk; + struct sctp_stream *stream; __u16 ssn; __u16 sid; if (chunk->has_ssn) return; - /* This is the last possible instant to assign a SSN. */ - if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { - ssn = 0; - } else { - sid = ntohs(chunk->subh.data_hdr->stream); - if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) - ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid); - else - ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid); - } + /* All fragments will be on the same stream */ + sid = ntohs(chunk->subh.data_hdr->stream); + stream = &chunk->asoc->ssnmap->out; - chunk->subh.data_hdr->ssn = htons(ssn); - chunk->has_ssn = 1; + /* Now assign the sequence number to the entire message. + * All fragments must have the same stream sequence number. + */ + msg = chunk->msg; + list_for_each_entry(lchunk, &msg->chunks, frag_list) { + if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { + ssn = 0; + } else { + if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) + ssn = sctp_ssn_next(stream, sid); + else + ssn = sctp_ssn_peek(stream, sid); + } + + lchunk->subh.data_hdr->ssn = htons(ssn); + lchunk->has_ssn = 1; + } } /* Helper function to assign a TSN if needed. This assumes that both @@ -1466,7 +1499,8 @@ no_hmac: __be32 n = htonl(usecs); sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, - &n, sizeof(n)); + sizeof(n)); + sctp_addto_chunk(*errp, sizeof(n), &n); *error = -SCTP_IERROR_STALE_COOKIE; } else *error = -SCTP_IERROR_NOMEM; @@ -1556,7 +1590,8 @@ static int sctp_process_missing_param(const struct sctp_association *asoc, report.num_missing = htonl(1); report.type = paramtype; sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, - &report, sizeof(report)); + sizeof(report)); + sctp_addto_chunk(*errp, sizeof(report), &report); } /* Stop processing this chunk. */ @@ -1574,7 +1609,7 @@ static int sctp_process_inv_mandatory(const struct sctp_association *asoc, *errp = sctp_make_op_error_space(asoc, chunk, 0); if (*errp) - sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, NULL, 0); + sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0); /* Stop processing this chunk. */ return 0; @@ -1595,9 +1630,10 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc, *errp = sctp_make_op_error_space(asoc, chunk, payload_len); if (*errp) { - sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, error, - sizeof(error)); - sctp_addto_chunk(*errp, sizeof(sctp_paramhdr_t), param); + sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, + sizeof(error) + sizeof(sctp_paramhdr_t)); + sctp_addto_chunk(*errp, sizeof(error), error); + sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param); } return 0; @@ -1618,9 +1654,10 @@ static int sctp_process_hn_param(const struct sctp_association *asoc, if (!*errp) *errp = sctp_make_op_error_space(asoc, chunk, len); - if (*errp) - sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, - param.v, len); + if (*errp) { + sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len); + sctp_addto_chunk(*errp, len, param.v); + } /* Stop processing this chunk. */ return 0; @@ -1672,10 +1709,13 @@ static int sctp_process_unk_param(const struct sctp_association *asoc, *errp = sctp_make_op_error_space(asoc, chunk, ntohs(chunk->chunk_hdr->length)); - if (*errp) + if (*errp) { sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, - param.v, WORD_ROUND(ntohs(param.p->length))); + sctp_addto_chunk(*errp, + WORD_ROUND(ntohs(param.p->length)), + param.v); + } break; case SCTP_PARAM_ACTION_SKIP: @@ -1690,8 +1730,10 @@ static int sctp_process_unk_param(const struct sctp_association *asoc, if (*errp) { sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, - param.v, WORD_ROUND(ntohs(param.p->length))); + sctp_addto_chunk(*errp, + WORD_ROUND(ntohs(param.p->length)), + param.v); } else { /* If there is no memory for generating the ERROR * report as specified, an ABORT will be triggered @@ -1791,7 +1833,7 @@ int sctp_verify_init(const struct sctp_association *asoc, * VIOLATION error. We build the ERROR chunk here and let the normal * error handling code build and send the packet. */ - if (param.v < (void*)chunk->chunk_end - sizeof(sctp_paramhdr_t)) { + if (param.v != (void*)chunk->chunk_end) { sctp_process_inv_paramlength(asoc, param.p, chunk, errp); return 0; } diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index d9fad4f6ffc..8d789008349 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1013,8 +1013,9 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, break; case SCTP_DISPOSITION_VIOLATION: - printk(KERN_ERR "sctp protocol violation state %d " - "chunkid %d\n", state, subtype.chunk); + if (net_ratelimit()) + printk(KERN_ERR "sctp protocol violation state %d " + "chunkid %d\n", state, subtype.chunk); break; case SCTP_DISPOSITION_NOT_IMPL: @@ -1130,6 +1131,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, /* Move the Cumulattive TSN Ack ahead. */ sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); + /* purge the fragmentation queue */ + sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32); + /* Abort any in progress partial delivery. */ sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); break; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 71cad56dd73..177528ed3e1 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -264,7 +264,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, struct sctp_chunk *err_chunk; struct sctp_packet *packet; sctp_unrecognized_param_t *unk_param; - struct sock *sk; int len; /* 6.10 Bundling @@ -285,16 +284,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); - sk = ep->base.sk; - /* If the endpoint is not listening or if the number of associations - * on the TCP-style socket exceed the max backlog, respond with an - * ABORT. - */ - if (!sctp_sstate(sk, LISTENING) || - (sctp_style(sk, TCP) && - sk_acceptq_is_full(sk))) - return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); - /* 3.1 A packet containing an INIT chunk MUST have a zero Verification * Tag. */ @@ -590,6 +579,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, struct sctp_ulpevent *ev, *ai_ev = NULL; int error = 0; struct sctp_chunk *err_chk_p; + struct sock *sk; /* If the packet is an OOTB packet which is temporarily on the * control endpoint, respond with an ABORT. @@ -605,6 +595,15 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) return sctp_sf_pdiscard(ep, asoc, type, arg, commands); + /* If the endpoint is not listening or if the number of associations + * on the TCP-style socket exceed the max backlog, respond with an + * ABORT. + */ + sk = ep->base.sk; + if (!sctp_sstate(sk, LISTENING) || + (sctp_style(sk, TCP) && sk_acceptq_is_full(sk))) + return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); + /* "Decode" the chunk. We have no optional parameters so we * are in good shape. */ @@ -1032,19 +1031,21 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, /* This should never happen, but lets log it if so. */ if (unlikely(!link)) { if (from_addr.sa.sa_family == AF_INET6) { - printk(KERN_WARNING - "%s association %p could not find address " - NIP6_FMT "\n", - __FUNCTION__, - asoc, - NIP6(from_addr.v6.sin6_addr)); + if (net_ratelimit()) + printk(KERN_WARNING + "%s association %p could not find address " + NIP6_FMT "\n", + __FUNCTION__, + asoc, + NIP6(from_addr.v6.sin6_addr)); } else { - printk(KERN_WARNING - "%s association %p could not find address " - NIPQUAD_FMT "\n", - __FUNCTION__, - asoc, - NIPQUAD(from_addr.v4.sin_addr.s_addr)); + if (net_ratelimit()) + printk(KERN_WARNING + "%s association %p could not find address " + NIPQUAD_FMT "\n", + __FUNCTION__, + asoc, + NIPQUAD(from_addr.v4.sin_addr.s_addr)); } return SCTP_DISPOSITION_DISCARD; } @@ -3362,7 +3363,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, abort = sctp_make_abort(asoc, asconf_ack, sizeof(sctp_errhdr_t)); if (abort) { - sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, NULL, 0); + sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); } @@ -3392,7 +3393,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, abort = sctp_make_abort(asoc, asconf_ack, sizeof(sctp_errhdr_t)); if (abort) { - sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, NULL, 0); + sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 01c6364245b..33354602ae8 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -353,6 +353,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) * The function sctp_get_port_local() does duplicate address * detection. */ + addr->v4.sin_port = htons(snum); if ((ret = sctp_get_port_local(sk, addr))) { if (ret == (long) sk) { /* This endpoint has a conflicting address. */ @@ -5202,6 +5203,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog) sctp_unhash_endpoint(ep); sk->sk_state = SCTP_SS_CLOSED; + return 0; } /* Return if we are already listening. */ @@ -5249,6 +5251,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog) sctp_unhash_endpoint(ep); sk->sk_state = SCTP_SS_CLOSED; + return 0; } if (sctp_sstate(sk, LISTENING)) diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 34eb977a204..fa0ba2a5564 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -659,6 +659,46 @@ done: return retval; } +/* + * Flush out stale fragments from the reassembly queue when processing + * a Forward TSN. + * + * RFC 3758, Section 3.6 + * + * After receiving and processing a FORWARD TSN, the data receiver MUST + * take cautions in updating its re-assembly queue. The receiver MUST + * remove any partially reassembled message, which is still missing one + * or more TSNs earlier than or equal to the new cumulative TSN point. + * In the event that the receiver has invoked the partial delivery API, + * a notification SHOULD also be generated to inform the upper layer API + * that the message being partially delivered will NOT be completed. + */ +void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn) +{ + struct sk_buff *pos, *tmp; + struct sctp_ulpevent *event; + __u32 tsn; + + if (skb_queue_empty(&ulpq->reasm)) + return; + + skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { + event = sctp_skb2event(pos); + tsn = event->tsn; + + /* Since the entire message must be abandoned by the + * sender (item A3 in Section 3.5, RFC 3758), we can + * free all fragments on the list that are less then + * or equal to ctsn_point + */ + if (TSN_lte(tsn, fwd_tsn)) { + __skb_unlink(pos, &ulpq->reasm); + sctp_ulpevent_free(event); + } else + break; + } +} + /* Helper function to gather skbs that have possibly become * ordered by an an incoming chunk. */ @@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, /* Helper function to gather skbs that have possibly become * ordered by forward tsn skipping their dependencies. */ -static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) +static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) { struct sk_buff *pos, *tmp; struct sctp_ulpevent *cevent; @@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) csid = cevent->stream; cssn = cevent->ssn; - if (cssn != sctp_ssn_peek(in, csid)) + /* Have we gone too far? */ + if (csid > sid) break; - /* Found it, so mark in the ssnmap. */ - sctp_ssn_next(in, csid); + /* Have we not gone far enough? */ + if (csid < sid) + continue; + + /* see if this ssn has been marked by skipping */ + if (!SSN_lt(cssn, sctp_ssn_peek(in, csid))) + break; __skb_unlink(pos, &ulpq->lobby); - if (!event) { + if (!event) /* Create a temporary list to collect chunks on. */ event = sctp_skb2event(pos); - __skb_queue_tail(&temp, sctp_event2skb(event)); - } else { - /* Attach all gathered skbs to the event. */ - __skb_queue_tail(&temp, pos); - } + + /* Attach all gathered skbs to the event. */ + __skb_queue_tail(&temp, pos); } /* Send event to the ULP. 'event' is the sctp_ulpevent for * very first SKB on the 'temp' list. */ - if (event) + if (event) { + /* see if we have more ordered that we can deliver */ + sctp_ulpq_retrieve_ordered(ulpq, event); sctp_ulpq_tail_event(ulpq, event); + } } -/* Skip over an SSN. */ +/* Skip over an SSN. This is used during the processing of + * Forwared TSN chunk to skip over the abandoned ordered data + */ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) { struct sctp_stream *in; @@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) /* Go find any other chunks that were waiting for * ordering and deliver them if needed. */ - sctp_ulpq_reap_ordered(ulpq); + sctp_ulpq_reap_ordered(ulpq, sid); return; } |