aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/crypto/api-intro.txt2
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/sysrq.txt2
-rw-r--r--arch/i386/kernel/apic.c55
-rw-r--r--arch/i386/kernel/i8253.c10
-rw-r--r--arch/ia64/kernel/crash.c2
-rw-r--r--arch/ia64/kernel/setup.c22
-rw-r--r--arch/ia64/mm/contig.c30
-rw-r--r--arch/ia64/mm/discontig.c4
-rw-r--r--arch/ia64/mm/init.c16
-rw-r--r--arch/ia64/sn/kernel/setup.c6
-rw-r--r--arch/powerpc/kernel/process.c9
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S41
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h16
-rw-r--r--crypto/scatterwalk.c4
-rw-r--r--crypto/tcrypt.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c3
-rw-r--r--drivers/i2c/busses/i2c-i801.c25
-rw-r--r--drivers/i2c/chips/ds1374.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c5
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c6
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/video/s3fb.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c4
-rw-r--r--include/asm-ia64/meminit.h1
-rw-r--r--include/asm-powerpc/hvcall.h14
-rw-r--r--include/linux/bootmem.h2
-rw-r--r--include/linux/lockdep.h2
-rw-r--r--include/net/sctp/structs.h1
-rw-r--r--include/net/sctp/ulpqueue.h1
-rw-r--r--kernel/lockdep.c8
-rw-r--r--kernel/power/disk.c8
-rw-r--r--kernel/power/user.c9
-rw-r--r--mm/nommu.c28
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bridge/br_fdb.c4
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/sock.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c15
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c14
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/xfrm6_tunnel.c4
-rw-r--r--net/irda/irnet/irnet.h2
-rw-r--r--net/irda/irnet/irnet_irda.c34
-rw-r--r--net/irda/irttp.c1
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/sched/cls_route.c2
-rw-r--r--net/sctp/associola.c15
-rw-r--r--net/sctp/sm_statefuns.c20
-rw-r--r--net/sctp/transport.c32
-rw-r--r--net/sctp/ulpqueue.c2
-rw-r--r--net/x25/x25_forward.c88
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_user.c2
62 files changed, 483 insertions, 160 deletions
diff --git a/Documentation/crypto/api-intro.txt b/Documentation/crypto/api-intro.txt
index e41a79aa71c..9b84b805ab7 100644
--- a/Documentation/crypto/api-intro.txt
+++ b/Documentation/crypto/api-intro.txt
@@ -60,7 +60,7 @@ Here's an example of how to use the API:
desc.tfm = tfm;
desc.flags = 0;
- if (crypto_hash_digest(&desc, &sg, 2, result))
+ if (crypto_hash_digest(&desc, sg, 2, result))
fail();
crypto_free_hash(tfm);
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f0b7a5e5740..e39ab0c99fb 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1123,6 +1123,8 @@ and is between 256 and 4096 characters. It is defined in the file
nolapic [IA-32,APIC] Do not enable or use the local APIC.
+ nolapic_timer [IA-32,APIC] Do not use the local APIC timer.
+
noltlbs [PPC] Do not use large page/tlb entries for kernel
lowmem mapping on PPC40x.
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index 452c0f15230..d43aa9d3c10 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -93,6 +93,8 @@ On all - write a character to /proc/sysrq-trigger. e.g.:
'p' - Will dump the current registers and flags to your console.
+'q' - Will dump a list of all running timers.
+
'r' - Turns off keyboard raw mode and sets it to XLATE.
's' - Will attempt to sync all mounted filesystems.
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 5cff7970911..244c3fe9b8c 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -28,6 +28,7 @@
#include <linux/clockchips.h>
#include <linux/acpi_pmtmr.h>
#include <linux/module.h>
+#include <linux/dmi.h>
#include <asm/atomic.h>
#include <asm/smp.h>
@@ -61,6 +62,8 @@ static int enable_local_apic __initdata = 0;
/* Local APIC timer verification ok */
static int local_apic_timer_verify_ok;
+/* Disable local APIC timer from the kernel commandline or via dmi quirk */
+static int local_apic_timer_disabled;
/*
* Debug level, exported for io_apic.c
@@ -266,6 +269,32 @@ static void __devinit setup_APIC_timer(void)
}
/*
+ * Detect systems with known broken BIOS implementations
+ */
+static int __init lapic_check_broken_bios(struct dmi_system_id *d)
+{
+ printk(KERN_NOTICE "%s detected: disabling lapic timer.\n",
+ d->ident);
+ local_apic_timer_disabled = 1;
+ return 0;
+}
+
+static struct dmi_system_id __initdata broken_bios_dmi_table[] = {
+ {
+ /*
+ * BIOS exports only C1 state, but uses deeper power
+ * modes behind the kernels back.
+ */
+ .callback = lapic_check_broken_bios,
+ .ident = "HP nx6325",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
+ },
+ },
+ {}
+};
+
+/*
* In this functions we calibrate APIC bus clocks to the external timer.
*
* We want to do the calibration only once since we want to have local timer
@@ -340,6 +369,22 @@ void __init setup_boot_APIC_clock(void)
long delta, deltapm;
int pm_referenced = 0;
+ /* Detect know broken systems */
+ dmi_check_system(broken_bios_dmi_table);
+
+ /*
+ * The local apic timer can be disabled via the kernel
+ * commandline or from the dmi quirk above. Register the lapic
+ * timer as a dummy clock event source on SMP systems, so the
+ * broadcast mechanism is used. On UP systems simply ignore it.
+ */
+ if (local_apic_timer_disabled) {
+ /* No broadcast on UP ! */
+ if (num_possible_cpus() > 1)
+ setup_APIC_timer();
+ return;
+ }
+
apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
"calibrating APIC timer ...\n");
@@ -461,7 +506,8 @@ void __init setup_boot_APIC_clock(void)
apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
else
local_apic_timer_verify_ok = 0;
- }
+ } else
+ local_irq_enable();
if (!local_apic_timer_verify_ok) {
printk(KERN_WARNING
@@ -1179,6 +1225,13 @@ static int __init parse_nolapic(char *arg)
}
early_param("nolapic", parse_nolapic);
+static int __init parse_disable_lapic_timer(char *arg)
+{
+ local_apic_timer_disabled = 1;
+ return 0;
+}
+early_param("nolapic_timer", parse_disable_lapic_timer);
+
static int __init apic_set_verbosity(char *str)
{
if (strcmp("debug", str) == 0)
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c
index 5cbb776b308..10cef5ca8a5 100644
--- a/arch/i386/kernel/i8253.c
+++ b/arch/i386/kernel/i8253.c
@@ -47,9 +47,17 @@ static void init_pit_timer(enum clock_event_mode mode,
outb(LATCH >> 8 , PIT_CH0); /* MSB */
break;
- case CLOCK_EVT_MODE_ONESHOT:
+ /*
+ * Avoid unnecessary state transitions, as it confuses
+ * Geode / Cyrix based boxen.
+ */
case CLOCK_EVT_MODE_SHUTDOWN:
+ if (evt->mode == CLOCK_EVT_MODE_UNUSED)
+ break;
case CLOCK_EVT_MODE_UNUSED:
+ if (evt->mode == CLOCK_EVT_MODE_SHUTDOWN)
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
/* One shot setup */
outb_p(0x38, PIT_MODE);
udelay(10);
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index 7d1bbb4403b..80a94e70782 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -164,7 +164,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
nd = (struct ia64_mca_notify_die *)args->err;
/* Reason code 1 means machine check rendezous*/
- if ((val == DIE_INIT_MONARCH_ENTER || DIE_INIT_SLAVE_ENTER) &&
+ if ((val == DIE_INIT_MONARCH_ENTER || val == DIE_INIT_SLAVE_ENTER) &&
nd->sos->rv_rc == 1)
return NOTIFY_DONE;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 339e8a54c2f..69b9bb3fd7c 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -692,12 +692,15 @@ struct seq_operations cpuinfo_op = {
.show = show_cpuinfo
};
-static char brandname[128];
+#define MAX_BRANDS 8
+static char brandname[MAX_BRANDS][128];
static char * __cpuinit
get_model_name(__u8 family, __u8 model)
{
+ static int overflow;
char brand[128];
+ int i;
memcpy(brand, "Unknown", 8);
if (ia64_pal_get_brand_info(brand)) {
@@ -709,12 +712,17 @@ get_model_name(__u8 family, __u8 model)
case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
}
}
- if (brandname[0] == '\0')
- return strcpy(brandname, brand);
- else if (strcmp(brandname, brand) == 0)
- return brandname;
- else
- return kstrdup(brand, GFP_KERNEL);
+ for (i = 0; i < MAX_BRANDS; i++)
+ if (strcmp(brandname[i], brand) == 0)
+ return brandname[i];
+ for (i = 0; i < MAX_BRANDS; i++)
+ if (brandname[i][0] == '\0')
+ return strcpy(brandname[i], brand);
+ if (overflow++ == 0)
+ printk(KERN_ERR
+ "%s: Table overflow. Some processor model information will be missing\n",
+ __FUNCTION__);
+ return "Unknown";
}
static void __cpuinit
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index fb0f4698f5d..44ce5ed9444 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -97,26 +97,6 @@ void show_mem(void)
unsigned long bootmap_start;
/**
- * find_max_pfn - adjust the maximum page number callback
- * @start: start of range
- * @end: end of range
- * @arg: address of pointer to global max_pfn variable
- *
- * Passed as a callback function to efi_memmap_walk() to determine the highest
- * available page frame number in the system.
- */
-int
-find_max_pfn (unsigned long start, unsigned long end, void *arg)
-{
- unsigned long *max_pfnp = arg, pfn;
-
- pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
- if (pfn > *max_pfnp)
- *max_pfnp = pfn;
- return 0;
-}
-
-/**
* find_bootmap_location - callback to find a memory area for the bootmap
* @start: start of region
* @end: end of region
@@ -177,9 +157,10 @@ find_memory (void)
reserve_memory();
/* first find highest page frame number */
- max_pfn = 0;
- efi_memmap_walk(find_max_pfn, &max_pfn);
-
+ min_low_pfn = ~0UL;
+ max_low_pfn = 0;
+ efi_memmap_walk(find_max_min_low_pfn, NULL);
+ max_pfn = max_low_pfn;
/* how many bytes to cover all the pages */
bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
@@ -189,7 +170,8 @@ find_memory (void)
if (bootmap_start == ~0UL)
panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
- bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
+ bootmap_size = init_bootmem_node(NODE_DATA(0),
+ (bootmap_start >> PAGE_SHIFT), 0, max_pfn);
/* Free all available memory, then mark bootmem-map as being in use. */
efi_memmap_walk(filter_rsvd_memory, free_bootmem);
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 11a2d8825d8..872da7a2acc 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -88,9 +88,6 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
}
- min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT);
- max_low_pfn = max(max_low_pfn, bdp->node_low_pfn);
-
return 0;
}
@@ -438,6 +435,7 @@ void __init find_memory(void)
/* These actually end up getting called by call_pernode_memory() */
efi_memmap_walk(filter_rsvd_memory, build_node_maps);
efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
+ efi_memmap_walk(find_max_min_low_pfn, NULL);
for_each_online_node(node)
if (mem_data[node].bootmem_data.node_low_pfn) {
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index f225dd72968..c8da621aab1 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -648,6 +648,22 @@ count_reserved_pages (u64 start, u64 end, void *arg)
return 0;
}
+int
+find_max_min_low_pfn (unsigned long start, unsigned long end, void *arg)
+{
+ unsigned long pfn_start, pfn_end;
+#ifdef CONFIG_FLATMEM
+ pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
+ pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
+#else
+ pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
+ pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
+#endif
+ min_low_pfn = min(min_low_pfn, pfn_start);
+ max_low_pfn = max(max_low_pfn, pfn_end);
+ return 0;
+}
+
/*
* Boot command-line option "nolwsys" can be used to disable the use of any light-weight
* system call handler. When this option is in effect, all fsyscalls will end up bubbling
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index bd5373d593e..a9bed5ca2ed 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -348,8 +348,7 @@ sn_scan_pcdp(void)
continue; /* not PCI interconnect */
if (if_pci.translation & PCDP_PCI_TRANS_IOPORT)
- vga_console_iobase =
- if_pci.ioport_tra | __IA64_UNCACHED_OFFSET;
+ vga_console_iobase = if_pci.ioport_tra;
if (if_pci.translation & PCDP_PCI_TRANS_MMIO)
vga_console_membase =
@@ -429,7 +428,8 @@ void __init sn_setup(char **cmdline_p)
* bus containing the VGA console.
*/
if (vga_console_iobase) {
- io_space[0].mmio_base = vga_console_iobase;
+ io_space[0].mmio_base =
+ (unsigned long) ioremap(vga_console_iobase, 0);
io_space[0].sparse = 0;
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index f3d4dd580dd..e53b2988d1b 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -465,8 +465,13 @@ void flush_thread(void)
#ifdef CONFIG_PPC64
struct thread_info *t = current_thread_info();
- if (t->flags & _TIF_ABI_PENDING)
- t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
+ if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
+ clear_ti_thread_flag(t, TIF_ABI_PENDING);
+ if (test_ti_thread_flag(t, TIF_32BIT))
+ clear_ti_thread_flag(t, TIF_32BIT);
+ else
+ set_ti_thread_flag(t, TIF_32BIT);
+ }
#endif
discard_lazy_cpu_state();
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 5c7e3878989..c1427b3634e 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -30,9 +30,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_PURR);
/*
* postcall is performed immediately before function return which
- * allows liberal use of volatile registers.
+ * allows liberal use of volatile registers. We branch around this
+ * in early init (eg when populating the MMU hashtable) by using an
+ * unconditional cpu feature.
*/
#define HCALL_INST_POSTCALL \
+BEGIN_FTR_SECTION; \
+ b 1f; \
+END_FTR_SECTION(0, 1); \
ld r4,STK_PARM(r3)(r1); /* validate opcode */ \
cmpldi cr7,r4,MAX_HCALL_OPCODE; \
bgt- cr7,1f; \
@@ -123,6 +128,40 @@ _GLOBAL(plpar_hcall)
blr /* return r3 = status */
+/*
+ * plpar_hcall_raw can be called in real mode. kexec/kdump need some
+ * hypervisor calls to be executed in real mode. So plpar_hcall_raw
+ * does not access the per cpu hypervisor call statistics variables,
+ * since these variables may not be present in the RMO region.
+ */
+_GLOBAL(plpar_hcall_raw)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+
+ std r4,STK_PARM(r4)(r1) /* Save ret buffer */
+
+ mr r4,r5
+ mr r5,r6
+ mr r6,r7
+ mr r7,r8
+ mr r8,r9
+ mr r9,r10
+
+ HVSC /* invoke the hypervisor */
+
+ ld r12,STK_PARM(r4)(r1)
+ std r4, 0(r12)
+ std r5, 8(r12)
+ std r6, 16(r12)
+ std r7, 24(r12)
+
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+
+ blr /* return r3 = status */
+
_GLOBAL(plpar_hcall9)
HMT_MEDIUM
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 7496005566e..843ee964321 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -378,7 +378,7 @@ static void pSeries_lpar_hptab_clear(void)
/* TODO: Use bulk call */
for (i = 0; i < hpte_count; i++)
- plpar_pte_remove(0, i, 0, &dummy1, &dummy2);
+ plpar_pte_remove_raw(0, i, 0, &dummy1, &dummy2);
}
/*
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index 3eb7b294d92..2e4d10c9eea 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -78,6 +78,22 @@ static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
return rc;
}
+/* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
+static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
+ unsigned long avpn, unsigned long *old_pteh_ret,
+ unsigned long *old_ptel_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
+
+ *old_pteh_ret = retbuf[0];
+ *old_ptel_ret = retbuf[1];
+
+ return rc;
+}
+
static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
{
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 35172d3f043..a6642312177 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -91,6 +91,8 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
memcpy_dir(buf, vaddr, len_this_page, out);
scatterwalk_unmap(vaddr, out);
+ scatterwalk_advance(walk, nbytes);
+
if (nbytes == len_this_page)
break;
@@ -99,7 +101,5 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
scatterwalk_pagedone(walk, out, 1);
}
-
- scatterwalk_advance(walk, nbytes);
}
EXPORT_SYMBOL_GPL(scatterwalk_copychunks);
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index f5e9da319ec..8eaa5aa210b 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -768,7 +768,7 @@ static void test_deflate(void)
tv = (void *)tvmem;
tfm = crypto_alloc_comp("deflate", 0, CRYPTO_ALG_ASYNC);
- if (tfm == NULL) {
+ if (IS_ERR(tfm)) {
printk("failed to load transform for deflate\n");
return;
}
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index e15f9e37716..0c70f829334 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -254,7 +254,8 @@ static s32 amd8111_access(struct i2c_adapter * adap, u16 addr,
break;
case I2C_SMBUS_BLOCK_PROC_CALL:
- len = min_t(u8, data->block[0], 31);
+ len = min_t(u8, data->block[0],
+ I2C_SMBUS_BLOCK_MAX - 1);
amd_ec_write(smbus, AMD_SMB_CMD, command);
amd_ec_write(smbus, AMD_SMB_BCNT, len);
for (i = 0; i < len; i++)
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 6569a36985b..a320e7d82c1 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -97,6 +97,7 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
int command, int hwpec);
static unsigned long i801_smba;
+static unsigned char i801_original_hstcfg;
static struct pci_driver i801_driver;
static struct pci_dev *I801_dev;
static int isich4;
@@ -510,6 +511,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
}
pci_read_config_byte(I801_dev, SMBHSTCFG, &temp);
+ i801_original_hstcfg = temp;
temp &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */
if (!(temp & SMBHSTCFG_HST_EN)) {
dev_info(&dev->dev, "Enabling SMBus device\n");
@@ -543,6 +545,7 @@ exit:
static void __devexit i801_remove(struct pci_dev *dev)
{
i2c_del_adapter(&i801_adapter);
+ pci_write_config_byte(I801_dev, SMBHSTCFG, i801_original_hstcfg);
pci_release_region(dev, SMBBAR);
/*
* do not call pci_disable_device(dev) since it can cause hard hangs on
@@ -550,11 +553,33 @@ static void __devexit i801_remove(struct pci_dev *dev)
*/
}
+#ifdef CONFIG_PM
+static int i801_suspend(struct pci_dev *dev, pm_message_t mesg)
+{
+ pci_save_state(dev);
+ pci_write_config_byte(dev, SMBHSTCFG, i801_original_hstcfg);
+ pci_set_power_state(dev, pci_choose_state(dev, mesg));
+ return 0;
+}
+
+static int i801_resume(struct pci_dev *dev)
+{
+ pci_set_power_state(dev, PCI_D0);
+ pci_restore_state(dev);
+ return pci_enable_device(dev);
+}
+#else
+#define i801_suspend NULL
+#define i801_resume NULL
+#endif
+
static struct pci_driver i801_driver = {
.name = "i801_smbus",
.id_table = i801_ids,
.probe = i801_probe,
.remove = __devexit_p(i801_remove),
+ .suspend = i801_suspend,
+ .resume = i801_resume,
};
static int __init i2c_i801_init(void)
diff --git a/drivers/i2c/chips/ds1374.c b/drivers/i2c/chips/ds1374.c
index 15edf40828b..8a2ff0c114d 100644
--- a/drivers/i2c/chips/ds1374.c
+++ b/drivers/i2c/chips/ds1374.c
@@ -207,6 +207,10 @@ static int ds1374_probe(struct i2c_adapter *adap, int addr, int kind)
client->driver = &ds1374_driver;
ds1374_workqueue = create_singlethread_workqueue("ds1374");
+ if (!ds1374_workqueue) {
+ kfree(client);
+ return -ENOMEM; /* most expected reason */
+ }
if ((rc = i2c_attach_client(client)) != 0) {
kfree(client);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index f2774ae906b..24e0df04f7d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -545,11 +545,14 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
php = to_iwch_pd(pd);
if (mr_rereg_mask & IB_MR_REREG_ACCESS)
mh.attr.perms = iwch_ib_to_tpt_access(acc);
- if (mr_rereg_mask & IB_MR_REREG_TRANS)
+ if (mr_rereg_mask & IB_MR_REREG_TRANS) {
ret = build_phys_page_list(buffer_list, num_phys_buf,
iova_start,
&total_size, &npages,
&shift, &page_list);
+ if (ret)
+ return ret;
+ }
ret = iwch_reregister_mem(rhp, php, &mh, shift, page_list, npages);
kfree(page_list);
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 20f36bf8b2b..f284be1c916 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -66,7 +66,9 @@
static void queue_comp_task(struct ehca_cq *__cq);
static struct ehca_comp_pool* pool;
+#ifdef CONFIG_HOTPLUG_CPU
static struct notifier_block comp_pool_callback_nb;
+#endif
static inline void comp_event_callback(struct ehca_cq *cq)
{
@@ -733,6 +735,7 @@ static void take_over_work(struct ehca_comp_pool *pool,
}
+#ifdef CONFIG_HOTPLUG_CPU
static int comp_pool_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
@@ -775,6 +778,7 @@ static int comp_pool_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
+#endif
int ehca_create_comp_pool(void)
{
@@ -805,9 +809,11 @@ int ehca_create_comp_pool(void)
}
}
+#ifdef CONFIG_HOTPLUG_CPU
comp_pool_callback_nb.notifier_call = comp_pool_callback;
comp_pool_callback_nb.priority =0;
register_cpu_notifier(&comp_pool_callback_nb);
+#endif
printk(KERN_INFO "eHCA scaling code enabled\n");
@@ -821,7 +827,9 @@ void ehca_destroy_comp_pool(void)
if (!ehca_scaling_code)
return;
+#ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&comp_pool_callback_nb);
+#endif
for (i = 0; i < NR_CPUS; i++) {
if (cpu_online(i))
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 5b40a846ff9..ed55979bfd3 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -451,12 +451,18 @@ bail:
return ret;
}
-static void remove_file(struct dentry *parent, char *name)
+static int remove_file(struct dentry *parent, char *name)
{
struct dentry *tmp;
+ int ret;
tmp = lookup_one_len(name, parent, strlen(name));
+ if (IS_ERR(tmp)) {
+ ret = PTR_ERR(tmp);
+ goto bail;
+ }
+
spin_lock(&dcache_lock);
spin_lock(&tmp->d_lock);
if (!(d_unhashed(tmp) && tmp->d_inode)) {
@@ -469,6 +475,14 @@ static void remove_file(struct dentry *parent, char *name)
spin_unlock(&tmp->d_lock);
spin_unlock(&dcache_lock);
}
+
+ ret = 0;
+bail:
+ /*
+ * We don't expect clients to care about the return value, but
+ * it's there if they need it.
+ */
+ return ret;
}
static int remove_device_files(struct super_block *sb,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 3484e8ba24a..e70492db74f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -452,7 +452,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
skb->len, tx->mtu);
++priv->stats.tx_dropped;
++priv->stats.tx_errors;
- ipoib_cm_skb_too_long(dev, skb, tx->mtu - INFINIBAND_ALEN);
+ ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
return;
}
@@ -1095,7 +1095,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
/* List if sorted by LRU, start from tail,
* stop when we see a recently used entry */
p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
- if (time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
+ if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
break;
list_del_init(&p->list);
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f2aa923ddbe..ba0ee5cf2ad 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -328,9 +328,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ipoib_tx_buf *tx_req;
u64 addr;
- if (unlikely(skb->len > priv->mcast_mtu + INFINIBAND_ALEN)) {
+ if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
- skb->len, priv->mcast_mtu + INFINIBAND_ALEN);
+ skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
++priv->stats.tx_dropped;
++priv->stats.tx_errors;
ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index f9dbc6f6814..0741c6d1337 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -380,7 +380,7 @@ static void path_rec_completion(int status,
struct net_device *dev = path->dev;
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_ah *ah = NULL;
- struct ipoib_neigh *neigh;
+ struct ipoib_neigh *neigh, *tn;
struct sk_buff_head skqueue;
struct sk_buff *skb;
unsigned long flags;
@@ -418,7 +418,7 @@ static void path_rec_completion(int status,
while ((skb = __skb_dequeue(&path->queue)))
__skb_queue_tail(&skqueue, skb);
- list_for_each_entry(neigh, &path->neigh_list, list) {
+ list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
kref_get(&path->ah->ref);
neigh->ah = path->ah;
memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 56c87a81bb6..54fbead4de0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -644,6 +644,9 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret = 0;
+ if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ ib_sa_free_multicast(mcast->mc);
+
if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
ipoib_dbg_mcast(priv, "leaving MGID " IPOIB_GID_FMT "\n",
IPOIB_GID_ARG(mcast->mcmember.mgid));
@@ -655,9 +658,6 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
ipoib_warn(priv, "ipoib_mcast_detach failed (result = %d)\n", ret);
}
- if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
- ib_sa_free_multicast(mcast->mc);
-
return 0;
}
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 340ee99652e..1d510bdc9b8 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1057,6 +1057,8 @@ static int stir421x_fw_upload(struct irda_usb_cb *self,
if (ret < 0)
break;
+
+ mdelay(10);
}
kfree(patch_block);
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 98919a6975f..3091b20124b 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -1000,11 +1000,12 @@ err_enable_device:
static void __devexit s3_pci_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
- struct s3fb_info *par = info->par;
if (info) {
#ifdef CONFIG_MTRR
+ struct s3fb_info *par = info->par;
+
if (par->mtrr_reg >= 0) {
mtrr_del(par->mtrr_reg, 0, 0);
par->mtrr_reg = -1;
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index e2bea6a661f..69e9e80735d 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1829,11 +1829,11 @@ xfs_buf_init(void)
if (!xfs_buf_zone)
goto out_free_trace_buf;
- xfslogd_workqueue = create_freezeable_workqueue("xfslogd");
+ xfslogd_workqueue = create_workqueue("xfslogd");
if (!xfslogd_workqueue)
goto out_free_buf_zone;
- xfsdatad_workqueue = create_freezeable_workqueue("xfsdatad");
+ xfsdatad_workqueue = create_workqueue("xfsdatad");
if (!xfsdatad_workqueue)
goto out_destroy_xfslogd_workqueue;
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
index 21ec5f3d23d..3a62878e84f 100644
--- a/include/asm-ia64/meminit.h
+++ b/include/asm-ia64/meminit.h
@@ -36,6 +36,7 @@ extern void reserve_memory (void);
extern void find_initrd (void);
extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
extern void efi_memmap_init(unsigned long *, unsigned long *);
+extern int find_max_min_low_pfn (unsigned long , unsigned long, void *);
extern unsigned long vmcore_find_descriptor_size(unsigned long address);
extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end);
diff --git a/include/asm-powerpc/hvcall.h b/include/asm-powerpc/hvcall.h
index 60977806d2f..62efd9d7a43 100644
--- a/include/asm-powerpc/hvcall.h
+++ b/include/asm-powerpc/hvcall.h
@@ -237,6 +237,20 @@ long plpar_hcall_norets(unsigned long opcode, ...);
long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...);
/**
+ * plpar_hcall_raw: - Make a hypervisor call without calculating hcall stats
+ * @opcode: The hypervisor call to make.
+ * @retbuf: Buffer to store up to 4 return arguments in.
+ *
+ * This call supports up to 6 arguments and 4 return arguments. Use
+ * PLPAR_HCALL_BUFSIZE to size the return argument buffer.
+ *
+ * Used when phyp interface needs to be called in real mode. Similar to
+ * plpar_hcall, but plpar_hcall_raw works in real mode and does not
+ * calculate hypervisor call statistics.
+ */
+long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
+
+/**
* plpar_hcall9: - Make a pseries hypervisor call with up to 9 return arguments
* @opcode: The hypervisor call to make.
* @retbuf: Buffer to store up to 9 return arguments in.
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 2275f274870..81c07cd1864 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -108,7 +108,7 @@ static inline void *alloc_remap(int nid, unsigned long size)
#endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */
extern unsigned long __meminitdata nr_kernel_pages;
-extern unsigned long nr_all_pages;
+extern unsigned long __meminitdata nr_all_pages;
extern void *alloc_large_system_hash(const char *tablename,
unsigned long bucketsize,
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 06fe93a3e91..14c937d345c 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -245,7 +245,7 @@ extern void lock_release(struct lockdep_map *lock, int nested,
# define INIT_LOCKDEP .lockdep_recursion = 0,
-#define lockdep_depth(tsk) ((tsk)->lockdep_depth)
+#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
#else /* !LOCKDEP */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 31a8e88f1a7..f431acf3dce 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1002,6 +1002,7 @@ void sctp_transport_update_rto(struct sctp_transport *, __u32);
void sctp_transport_raise_cwnd(struct sctp_transport *, __u32, __u32);
void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t);
unsigned long sctp_transport_timeout(struct sctp_transport *);
+void sctp_transport_reset(struct sctp_transport *);
/* This is the structure we use to queue packets as they come into
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index a43c8788b65..ab26ab3adae 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -59,6 +59,7 @@ struct sctp_ulpq {
/* Prototypes. */
struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *,
struct sctp_association *);
+void sctp_ulpq_flush(struct sctp_ulpq *ulpq);
void sctp_ulpq_free(struct sctp_ulpq *);
/* Add a new DATA chunk for processing. */
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8dc24c92dc6..7065a687ac5 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2742,6 +2742,10 @@ void debug_show_all_locks(void)
int count = 10;
int unlock = 1;
+ if (unlikely(!debug_locks)) {
+ printk("INFO: lockdep is turned off.\n");
+ return;
+ }
printk("\nShowing all locks held in the system:\n");
/*
@@ -2785,6 +2789,10 @@ EXPORT_SYMBOL_GPL(debug_show_all_locks);
void debug_show_held_locks(struct task_struct *task)
{
+ if (unlikely(!debug_locks)) {
+ printk("INFO: lockdep is turned off.\n");
+ return;
+ }
lockdep_print_held_locks(task);
}
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 873cdf8ea5a..dee0ff40bef 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -241,18 +241,11 @@ static int software_resume(void)
goto Done;
}
- error = platform_prepare();
- if (error) {
- swsusp_free();
- goto Thaw;
- }
-
pr_debug("PM: Reading swsusp image.\n");
error = swsusp_read();
if (error) {
swsusp_free();
- platform_finish();
goto Thaw;
}
@@ -270,7 +263,6 @@ static int software_resume(void)
enable_nonboot_cpus();
Free:
swsusp_free();
- platform_finish();
device_resume();
resume_console();
Thaw:
diff --git a/kernel/power/user.c b/kernel/power/user.c
index d6a8dcc26ae..bf211fee122 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -368,9 +368,12 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
if (error) {
printk(KERN_ERR "Failed to suspend some devices.\n");
} else {
- /* Enter S3, system is already frozen */
- suspend_enter(PM_SUSPEND_MEM);
-
+ error = disable_nonboot_cpus();
+ if (!error) {
+ /* Enter S3, system is already frozen */
+ suspend_enter(PM_SUSPEND_MEM);
+ enable_nonboot_cpus();
+ }
/* Wake up devices */
device_resume();
}
diff --git a/mm/nommu.c b/mm/nommu.c
index 23fb033e596..cbbc1377481 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -826,6 +826,11 @@ unsigned long do_mmap_pgoff(struct file *file,
unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long vmpglen;
+ /* suppress VMA sharing for shared regions */
+ if (vm_flags & VM_SHARED &&
+ capabilities & BDI_CAP_MAP_DIRECT)
+ goto dont_share_VMAs;
+
for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) {
vma = rb_entry(rb, struct vm_area_struct, vm_rb);
@@ -859,6 +864,7 @@ unsigned long do_mmap_pgoff(struct file *file,
goto shared;
}
+ dont_share_VMAs:
vma = NULL;
/* obtain the address at which to make a shared mapping
@@ -1193,6 +1199,28 @@ void unmap_mapping_range(struct address_space *mapping,
EXPORT_SYMBOL(unmap_mapping_range);
/*
+ * ask for an unmapped area at which to create a mapping on a file
+ */
+unsigned long get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
+ unsigned long, unsigned long);
+
+ get_area = current->mm->get_unmapped_area;
+ if (file && file->f_op && file->f_op->get_unmapped_area)
+ get_area = file->f_op->get_unmapped_area;
+
+ if (!get_area)
+ return -ENOSYS;
+
+ return get_area(file, addr, len, pgoff, flags);
+}
+
+EXPORT_SYMBOL(get_unmapped_area);
+
+/*
* Check that a process has enough memory to allocate a new virtual
* mapping. 0 means there is enough memory for the allocation to
* succeed and -ENOMEM implies there is not.
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 4c914df5fd0..ecfe8da1ce6 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -319,7 +319,7 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
return 0;
}
-static int inline hidp_send_ctrl_message(struct hidp_session *session,
+static inline int hidp_send_ctrl_message(struct hidp_session *session,
unsigned char hdr, unsigned char *data, int size)
{
int err;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index def2e403f93..8d566c13cc7 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -197,8 +197,8 @@ struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br,
rcu_read_lock();
fdb = __br_fdb_get(br, addr);
- if (fdb)
- atomic_inc(&fdb->use_count);
+ if (fdb && !atomic_inc_not_zero(&fdb->use_count))
+ fdb = NULL;
rcu_read_unlock();
return fdb;
}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 7712d76f06b..5439a3c46c3 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -61,7 +61,7 @@ static int brnf_filter_vlan_tagged __read_mostly = 1;
#define brnf_filter_vlan_tagged 1
#endif
-static __be16 inline vlan_proto(const struct sk_buff *skb)
+static inline __be16 vlan_proto(const struct sk_buff *skb)
{
return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 215f1bff048..3aea4e87d3d 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -374,7 +374,7 @@ int fib_rules_dump(struct sk_buff *skb, struct netlink_callback *cb, int family)
return -EAFNOSUPPORT;
rcu_read_lock();
- list_for_each_entry(rule, ops->rules_list, list) {
+ list_for_each_entry_rcu(rule, ops->rules_list, list) {
if (idx < cb->args[0])
goto skip;
diff --git a/net/core/sock.c b/net/core/sock.c
index 8d65d6478dc..27c4f62382b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -808,7 +808,7 @@ lenout:
*
* (We also register the sk_lock with the lock validator.)
*/
-static void inline sock_lock_init(struct sock *sk)
+static inline void sock_lock_init(struct sock *sk)
{
sock_lock_init_class_and_name(sk,
af_family_slock_key_strings[sk->sk_family],
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index adf25f9f70e..6bcfdf6dfcc 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -253,14 +253,17 @@ ip_nat_local_fn(unsigned int hooknum,
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
if (ct->tuplehash[dir].tuple.dst.ip !=
- ct->tuplehash[!dir].tuple.src.ip
-#ifdef CONFIG_XFRM
- || ct->tuplehash[dir].tuple.dst.u.all !=
- ct->tuplehash[!dir].tuple.src.u.all
-#endif
- )
+ ct->tuplehash[!dir].tuple.src.ip) {
if (ip_route_me_harder(pskb, RTN_UNSPEC))
ret = NF_DROP;
+ }
+#ifdef CONFIG_XFRM
+ else if (ct->tuplehash[dir].tuple.dst.u.all !=
+ ct->tuplehash[!dir].tuple.src.u.all)
+ if (ip_xfrm_me_harder(pskb))
+ ret = NF_DROP;
+#endif
+
}
return ret;
}
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index e4d3ef17d45..15aa3db8cb3 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -245,14 +245,16 @@ nf_nat_local_fn(unsigned int hooknum,
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
if (ct->tuplehash[dir].tuple.dst.u3.ip !=
- ct->tuplehash[!dir].tuple.src.u3.ip
-#ifdef CONFIG_XFRM
- || ct->tuplehash[dir].tuple.dst.u.all !=
- ct->tuplehash[!dir].tuple.src.u.all
-#endif
- )
+ ct->tuplehash[!dir].tuple.src.u3.ip) {
if (ip_route_me_harder(pskb, RTN_UNSPEC))
ret = NF_DROP;
+ }
+#ifdef CONFIG_XFRM
+ else if (ct->tuplehash[dir].tuple.dst.u.all !=
+ ct->tuplehash[!dir].tuple.src.u.all)
+ if (ip_xfrm_me_harder(pskb))
+ ret = NF_DROP;
+#endif
}
return ret;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a7fee6b2732..1b616992d91 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -804,7 +804,7 @@ struct ipv6_saddr_score {
#define IPV6_SADDR_SCORE_LABEL 0x0020
#define IPV6_SADDR_SCORE_PRIVACY 0x0040
-static int inline ipv6_saddr_preferred(int type)
+static inline int ipv6_saddr_preferred(int type)
{
if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|
IPV6_ADDR_LOOPBACK|IPV6_ADDR_RESERVED))
@@ -813,7 +813,7 @@ static int inline ipv6_saddr_preferred(int type)
}
/* static matching label */
-static int inline ipv6_saddr_label(const struct in6_addr *addr, int type)
+static inline int ipv6_saddr_label(const struct in6_addr *addr, int type)
{
/*
* prefix (longest match) label
@@ -3318,7 +3318,7 @@ errout:
rtnl_set_sk_err(RTNLGRP_IPV6_IFADDR, err);
}
-static void inline ipv6_store_devconf(struct ipv6_devconf *cnf,
+static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
__s32 *array, int bytes)
{
BUG_ON(bytes < (DEVCONF_MAX * 4));
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0e1f4b2cd3d..a6b3117df54 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -308,7 +308,7 @@ static inline void rt6_probe(struct rt6_info *rt)
/*
* Default Router Selection (RFC 2461 6.3.6)
*/
-static int inline rt6_check_dev(struct rt6_info *rt, int oif)
+static inline int rt6_check_dev(struct rt6_info *rt, int oif)
{
struct net_device *dev = rt->rt6i_dev;
int ret = 0;
@@ -328,7 +328,7 @@ static int inline rt6_check_dev(struct rt6_info *rt, int oif)
return ret;
}
-static int inline rt6_check_neigh(struct rt6_info *rt)
+static inline int rt6_check_neigh(struct rt6_info *rt)
{
struct neighbour *neigh = rt->rt6i_nexthop;
int m = 0;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index ee4b84a33ff..93c42232aa3 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -58,7 +58,7 @@ static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
-static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
+static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
{
unsigned h;
@@ -70,7 +70,7 @@ static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
return h;
}
-static unsigned inline xfrm6_tunnel_spi_hash_byspi(u32 spi)
+static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi)
{
return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
}
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index 873ae189e37..bc2e15ce700 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -419,7 +419,7 @@ typedef struct irnet_socket
u32 raccm; /* to please pppd - dummy) */
unsigned int flags; /* PPP flags (compression, ...) */
unsigned int rbits; /* Unused receive flags ??? */
-
+ struct work_struct disconnect_work; /* Process context disconnection */
/* ------------------------ IrTTP part ------------------------ */
/* We create a pseudo "socket" over the IrDA tranport */
unsigned long ttp_open; /* Set when IrTTP is ready */
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c
index c378e668af0..a4f1439ffdd 100644
--- a/net/irda/irnet/irnet_irda.c
+++ b/net/irda/irnet/irnet_irda.c
@@ -10,6 +10,27 @@
#include "irnet_irda.h" /* Private header */
+/*
+ * PPP disconnect work: we need to make sure we're in
+ * process context when calling ppp_unregister_channel().
+ */
+static void irnet_ppp_disconnect(struct work_struct *work)
+{
+ irnet_socket * self =
+ container_of(work, irnet_socket, disconnect_work);
+
+ if (self == NULL)
+ return;
+ /*
+ * If we were connected, cleanup & close the PPP
+ * channel, which will kill pppd (hangup) and the rest.
+ */
+ if (self->ppp_open && !self->ttp_open && !self->ttp_connect) {
+ ppp_unregister_channel(&self->chan);
+ self->ppp_open = 0;
+ }
+}
+
/************************* CONTROL CHANNEL *************************/
/*
* When ppp is not active, /dev/irnet act as a control channel.
@@ -499,6 +520,8 @@ irda_irnet_create(irnet_socket * self)
#endif /* DISCOVERY_NOMASK */
self->tx_flow = FLOW_START; /* Flow control from IrTTP */
+ INIT_WORK(&self->disconnect_work, irnet_ppp_disconnect);
+
DEXIT(IRDA_SOCK_TRACE, "\n");
return(0);
}
@@ -1134,15 +1157,8 @@ irnet_disconnect_indication(void * instance,
{
if(test_open)
{
-#ifdef MISSING_PPP_API
- /* ppp_unregister_channel() wants a user context, which we
- * are guaranteed to NOT have here. What are we supposed
- * to do here ? Jean II */
- /* If we were connected, cleanup & close the PPP channel,
- * which will kill pppd (hangup) and the rest */
- ppp_unregister_channel(&self->chan);
- self->ppp_open = 0;
-#endif
+ /* ppp_unregister_channel() wants a user context. */
+ schedule_work(&self->disconnect_work);
}
else
{
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index a7486b3bddc..da3f2bc1b6f 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -1455,6 +1455,7 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
/* Not everything should be copied */
new->notify.instance = instance;
+ spin_lock_init(&new->lock);
init_timer(&new->todo_timer);
skb_queue_head_init(&new->rx_queue);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 253fce3ad2d..54698af6d0a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -275,6 +275,7 @@ config NF_CT_NETLINK
tristate 'Connection tracking netlink interface (EXPERIMENTAL)'
depends on EXPERIMENTAL && NF_CONNTRACK && NETFILTER_NETLINK
depends on NF_CONNTRACK!=y || NETFILTER_NETLINK!=m
+ depends on NF_NAT=n || NF_NAT
help
This option enables support for a netlink-based userspace interface
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index e85df07d8ce..abc47cc48ad 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -93,7 +93,7 @@ void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32
spin_unlock_bh(&dev->queue_lock);
}
-static void __inline__
+static inline void
route4_set_fastmap(struct route4_head *head, u32 id, int iif,
struct route4_filter *f)
{
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index fa82b73c965..78d2ddb5ca1 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1046,6 +1046,9 @@ void sctp_assoc_update(struct sctp_association *asoc,
trans = list_entry(pos, struct sctp_transport, transports);
if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr))
sctp_assoc_del_peer(asoc, &trans->ipaddr);
+
+ if (asoc->state >= SCTP_STATE_ESTABLISHED)
+ sctp_transport_reset(trans);
}
/* If the case is A (association restart), use
@@ -1063,6 +1066,18 @@ void sctp_assoc_update(struct sctp_association *asoc,
*/
sctp_ssnmap_clear(asoc->ssnmap);
+ /* Flush the ULP reassembly and ordered queue.
+ * Any data there will now be stale and will
+ * cause problems.
+ */
+ sctp_ulpq_flush(&asoc->ulpq);
+
+ /* reset the overall association error count so
+ * that the restarted association doesn't get torn
+ * down on the next retransmission timer.
+ */
+ asoc->overall_error_count = 0;
+
} else {
/* Add any peer addresses from the new association. */
list_for_each(pos, &new->peer.transport_addr_list) {
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 70c39eac058..e9097cf614b 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -4342,8 +4342,24 @@ sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
void *arg,
sctp_cmd_seq_t *commands)
{
- return sctp_sf_heartbeat(ep, asoc, type, (struct sctp_transport *)arg,
- commands);
+ if (SCTP_DISPOSITION_NOMEM == sctp_sf_heartbeat(ep, asoc, type,
+ (struct sctp_transport *)arg, commands))
+ return SCTP_DISPOSITION_NOMEM;
+
+ /*
+ * RFC 2960 (bis), section 8.3
+ *
+ * D) Request an on-demand HEARTBEAT on a specific destination
+ * transport address of a given association.
+ *
+ * The endpoint should increment the respective error counter of
+ * the destination transport address each time a HEARTBEAT is sent
+ * to that address and not acknowledged within one RTO.
+ *
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_RESET,
+ SCTP_TRANSPORT(arg));
+ return SCTP_DISPOSITION_CONSUME;
}
/*
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index a596f5308cb..4d8c2ab864f 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -526,3 +526,35 @@ unsigned long sctp_transport_timeout(struct sctp_transport *t)
timeout += jiffies;
return timeout;
}
+
+/* Reset transport variables to their initial values */
+void sctp_transport_reset(struct sctp_transport *t)
+{
+ struct sctp_association *asoc = t->asoc;
+
+ /* RFC 2960 (bis), Section 5.2.4
+ * All the congestion control parameters (e.g., cwnd, ssthresh)
+ * related to this peer MUST be reset to their initial values
+ * (see Section 6.2.1)
+ */
+ t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
+ t->ssthresh = asoc->peer.i.a_rwnd;
+ t->rto = asoc->rto_initial;
+ t->rtt = 0;
+ t->srtt = 0;
+ t->rttvar = 0;
+
+ /* Reset these additional varibles so that we have a clean
+ * slate.
+ */
+ t->partial_bytes_acked = 0;
+ t->flight_size = 0;
+ t->error_count = 0;
+ t->rto_pending = 0;
+
+ /* Initialize the state information for SFR-CACC */
+ t->cacc.changeover_active = 0;
+ t->cacc.cycling_changeover = 0;
+ t->cacc.next_tsn_at_change = 0;
+ t->cacc.cacc_saw_newack = 0;
+}
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index f4759a9bdae..bfb197e37da 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -73,7 +73,7 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
/* Flush the reassembly and ordering queues. */
-static void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
+void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
{
struct sk_buff *skb;
struct sctp_ulpevent *event;
diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c
index d339e0c810a..8738ec7ce69 100644
--- a/net/x25/x25_forward.c
+++ b/net/x25/x25_forward.c
@@ -26,64 +26,66 @@ int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
short same_lci = 0;
int rc = 0;
- if ((rt = x25_get_route(dest_addr)) != NULL) {
+ if ((rt = x25_get_route(dest_addr)) == NULL)
+ goto out_no_route;
- if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
- /* This shouldnt happen, if it occurs somehow
- * do something sensible
- */
- goto out_put_route;
- }
-
- /* Avoid a loop. This is the normal exit path for a
- * system with only one x.25 iface and default route
+ if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
+ /* This shouldnt happen, if it occurs somehow
+ * do something sensible
*/
- if (rt->dev == from->dev) {
- goto out_put_nb;
- }
+ goto out_put_route;
+ }
- /* Remote end sending a call request on an already
- * established LCI? It shouldnt happen, just in case..
- */
- read_lock_bh(&x25_forward_list_lock);
- list_for_each(entry, &x25_forward_list) {
- x25_frwd = list_entry(entry, struct x25_forward, node);
- if (x25_frwd->lci == lci) {
- printk(KERN_WARNING "X.25: call request for lci which is already registered!, transmitting but not registering new pair\n");
- same_lci = 1;
- }
- }
- read_unlock_bh(&x25_forward_list_lock);
-
- /* Save the forwarding details for future traffic */
- if (!same_lci){
- if ((new_frwd = kmalloc(sizeof(struct x25_forward),
- GFP_ATOMIC)) == NULL){
- rc = -ENOMEM;
- goto out_put_nb;
- }
- new_frwd->lci = lci;
- new_frwd->dev1 = rt->dev;
- new_frwd->dev2 = from->dev;
- write_lock_bh(&x25_forward_list_lock);
- list_add(&new_frwd->node, &x25_forward_list);
- write_unlock_bh(&x25_forward_list_lock);
+ /* Avoid a loop. This is the normal exit path for a
+ * system with only one x.25 iface and default route
+ */
+ if (rt->dev == from->dev) {
+ goto out_put_nb;
+ }
+
+ /* Remote end sending a call request on an already
+ * established LCI? It shouldnt happen, just in case..
+ */
+ read_lock_bh(&x25_forward_list_lock);
+ list_for_each(entry, &x25_forward_list) {
+ x25_frwd = list_entry(entry, struct x25_forward, node);
+ if (x25_frwd->lci == lci) {
+ printk(KERN_WARNING "X.25: call request for lci which is already registered!, transmitting but not registering new pair\n");
+ same_lci = 1;
}
+ }
+ read_unlock_bh(&x25_forward_list_lock);
- /* Forward the call request */
- if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
+ /* Save the forwarding details for future traffic */
+ if (!same_lci){
+ if ((new_frwd = kmalloc(sizeof(struct x25_forward),
+ GFP_ATOMIC)) == NULL){
+ rc = -ENOMEM;
goto out_put_nb;
}
- x25_transmit_link(skbn, neigh_new);
- rc = 1;
+ new_frwd->lci = lci;
+ new_frwd->dev1 = rt->dev;
+ new_frwd->dev2 = from->dev;
+ write_lock_bh(&x25_forward_list_lock);
+ list_add(&new_frwd->node, &x25_forward_list);
+ write_unlock_bh(&x25_forward_list_lock);
}
+ /* Forward the call request */
+ if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
+ goto out_put_nb;
+ }
+ x25_transmit_link(skbn, neigh_new);
+ rc = 1;
+
out_put_nb:
x25_neigh_put(neigh_new);
out_put_route:
x25_route_put(rt);
+
+out_no_route:
return rc;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 0c3a70ac507..785c3e39f06 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2089,7 +2089,7 @@ void xfrm_audit_log(uid_t auid, u32 sid, int type, int result,
sizeof(struct in6_addr));
}
audit_log_format(audit_buf,
- " src=" NIP6_FMT "dst=" NIP6_FMT,
+ " src=" NIP6_FMT " dst=" NIP6_FMT,
NIP6(saddr6), NIP6(daddr6));
}
break;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 96789952f6a..e81e2fb3d42 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2025,7 +2025,7 @@ nlmsg_failure:
return -1;
}
-static int inline xfrm_sa_len(struct xfrm_state *x)
+static inline int xfrm_sa_len(struct xfrm_state *x)
{
int l = 0;
if (x->aalg)