aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/mips/Kconfig4
-rw-r--r--arch/mips/au1000/common/dbdma.c2
-rw-r--r--arch/mips/au1000/common/irq.c16
-rw-r--r--arch/mips/au1000/pb1200/irqmap.c2
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c44
-rw-r--r--arch/mips/pci/pci-bcm1480.c5
-rw-r--r--arch/s390/kernel/entry.S2
-rw-r--r--arch/s390/kernel/setup.c4
-rw-r--r--arch/sparc/kernel/devices.c4
-rw-r--r--arch/sparc/kernel/pcic.c8
-rw-r--r--arch/sparc64/defconfig9
-rw-r--r--arch/sparc64/kernel/isa.c2
-rw-r--r--arch/sparc64/kernel/ldc.c15
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c4
-rw-r--r--arch/sparc64/kernel/smp.c5
-rw-r--r--arch/um/Makefile-i3865
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--arch/x86/kernel/nmi_32.c3
-rw-r--r--arch/x86/kernel/nmi_64.c3
-rw-r--r--arch/x86/kernel/topology.c5
-rw-r--r--crypto/fcrypt.c88
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/pata_amd.c5
-rw-r--r--drivers/ata/pata_via.c4
-rw-r--r--drivers/ata/sata_mv.c9
-rw-r--r--drivers/ata/sata_nv.c32
-rw-r--r--drivers/char/cs5535_gpio.c5
-rw-r--r--drivers/input/touchscreen/ads7846.c13
-rw-r--r--drivers/mmc/host/mmc_spi.c10
-rw-r--r--[-rwxr-xr-x]drivers/net/chelsio/cxgb2.c0
-rw-r--r--[-rwxr-xr-x]drivers/net/chelsio/pm3393.c0
-rw-r--r--[-rwxr-xr-x]drivers/net/chelsio/sge.c0
-rw-r--r--[-rwxr-xr-x]drivers/net/chelsio/sge.h0
-rw-r--r--drivers/net/fec_mpc52xx.c4
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/pasemi_mac.c4
-rw-r--r--drivers/net/phy/mdio_bus.c9
-rw-r--r--drivers/net/phy/phy_device.c12
-rw-r--r--drivers/net/sky2.c6
-rw-r--r--drivers/net/smc911x.c2
-rw-r--r--drivers/rtc/interface.c4
-rw-r--r--drivers/rtc/rtc-dev.c6
-rw-r--r--drivers/rtc/rtc-max6902.c12
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/cio/css.c1
-rw-r--r--drivers/s390/cio/device_id.c37
-rw-r--r--drivers/s390/net/ctcmain.c1
-rw-r--r--drivers/spi/at25.c7
-rw-r--r--drivers/spi/spi.c19
-rw-r--r--drivers/spi/spi_bfin5xx.c866
-rw-r--r--fs/aio.c7
-rw-r--r--fs/bfs/inode.c3
-rw-r--r--fs/cifs/cifsacl.c33
-rw-r--r--fs/jbd/checkpoint.c12
-rw-r--r--fs/jbd/commit.c8
-rw-r--r--fs/ocfs2/cluster/tcp.c20
-rw-r--r--fs/proc/generic.c9
-rw-r--r--fs/proc/inode.c9
-rw-r--r--fs/proc/root.c1
-rw-r--r--fs/reiserfs/procfs.c6
-rw-r--r--fs/ufs/dir.c2
-rw-r--r--fs/ufs/super.c4
-rw-r--r--include/asm-blackfin/bfin5xx_spi.h3
-rw-r--r--include/asm-blackfin/mach-bf533/portmux.h2
-rw-r--r--include/asm-blackfin/mach-bf548/defBF54x_base.h17
-rw-r--r--include/asm-mips/mach-au1x00/au1000.h21
-rw-r--r--include/linux/inet_lro.h3
-rw-r--r--include/linux/jbd.h2
-rw-r--r--include/linux/mm.h16
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/proc_fs.h1
-rw-r--r--include/linux/thread_info.h17
-rw-r--r--kernel/Kconfig.instrumentation8
-rw-r--r--kernel/fork.c21
-rw-r--r--kernel/futex.c27
-rw-r--r--kernel/lockdep.c29
-rw-r--r--kernel/sched.c146
-rw-r--r--kernel/sched_fair.c7
-rw-r--r--kernel/sysctl.c4
-rw-r--r--kernel/sysctl_check.c2
-rw-r--r--mm/backing-dev.c4
-rw-r--r--mm/filemap_xip.c2
-rw-r--r--mm/mmap.c15
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/slab.c1
-rw-r--r--mm/slob.c1
-rw-r--r--mm/slub.c6
-rw-r--r--net/ipv4/inet_lro.c3
-rw-r--r--net/ipv4/ipvs/ip_vs_lblc.c7
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c7
-rw-r--r--net/ipv4/ipvs/ip_vs_sched.c27
-rw-r--r--net/ipv4/tcp_input.c17
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/irda/ircomm/ircomm_tty.c2
-rw-r--r--net/rose/rose_dev.c2
-rw-r--r--security/dummy.c2
-rw-r--r--security/selinux/selinuxfs.c65
100 files changed, 1129 insertions, 805 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 2bbe40ea4d1..3002cc811c1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2598,13 +2598,6 @@ L: https://tango.0pointer.de/mailman/listinfo/s270-linux
W: http://0pointer.de/lennart/tchibo.html
S: Maintained
-MTRR AND SIMILAR SUPPORT [i386]
-P: Richard Gooch
-M: rgooch@atnf.csiro.au
-L: linux-kernel@vger.kernel.org
-W: http://www.atnf.csiro.au/~rgooch/linux/kernel-patches.html
-S: Maintained
-
MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
P: Pierre Ossman
M: drzeus-mmc@drzeus.cx
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 455bd1f560a..c6fc405a6c8 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -714,6 +714,10 @@ config ARCH_HAS_ILOG2_U64
bool
default n
+config ARCH_SUPPORTS_OPROFILE
+ bool
+ default y if !MIPS_MT_SMTC
+
config GENERIC_FIND_NEXT_BIT
bool
default y
diff --git a/arch/mips/au1000/common/dbdma.c b/arch/mips/au1000/common/dbdma.c
index 9d6ad43fded..edf91f41a78 100644
--- a/arch/mips/au1000/common/dbdma.c
+++ b/arch/mips/au1000/common/dbdma.c
@@ -859,7 +859,7 @@ dbdma_interrupt(int irq, void *dev_id)
intstat = dbdma_gptr->ddma_intstat;
au_sync();
- chan_index = ffs(intstat);
+ chan_index = __ffs(intstat);
ctp = chan_tab_ptr[chan_index];
cp = ctp->chan_ptr;
diff --git a/arch/mips/au1000/common/irq.c b/arch/mips/au1000/common/irq.c
index ddfb7f0a17a..3c7714f057a 100644
--- a/arch/mips/au1000/common/irq.c
+++ b/arch/mips/au1000/common/irq.c
@@ -462,9 +462,9 @@ static void intc0_req0_irqdispatch(void)
return;
}
#endif
- bit = ffs(intc0_req0);
+ bit = __ffs(intc0_req0);
intc0_req0 &= ~(1 << bit);
- do_IRQ(MIPS_CPU_IRQ_BASE + bit);
+ do_IRQ(AU1000_INTC0_INT_BASE + bit);
}
@@ -478,9 +478,9 @@ static void intc0_req1_irqdispatch(void)
if (!intc0_req1)
return;
- bit = ffs(intc0_req1);
+ bit = __ffs(intc0_req1);
intc0_req1 &= ~(1 << bit);
- do_IRQ(bit);
+ do_IRQ(AU1000_INTC0_INT_BASE + bit);
}
@@ -498,9 +498,9 @@ static void intc1_req0_irqdispatch(void)
if (!intc1_req0)
return;
- bit = ffs(intc1_req0);
+ bit = __ffs(intc1_req0);
intc1_req0 &= ~(1 << bit);
- do_IRQ(MIPS_CPU_IRQ_BASE + 32 + bit);
+ do_IRQ(AU1000_INTC1_INT_BASE + bit);
}
@@ -514,9 +514,9 @@ static void intc1_req1_irqdispatch(void)
if (!intc1_req1)
return;
- bit = ffs(intc1_req1);
+ bit = __ffs(intc1_req1);
intc1_req1 &= ~(1 << bit);
- do_IRQ(MIPS_CPU_IRQ_BASE + 32 + bit);
+ do_IRQ(AU1000_INTC1_INT_BASE + bit);
}
asmlinkage void plat_irq_dispatch(void)
diff --git a/arch/mips/au1000/pb1200/irqmap.c b/arch/mips/au1000/pb1200/irqmap.c
index c096be4ed4e..8fcd0df86f9 100644
--- a/arch/mips/au1000/pb1200/irqmap.c
+++ b/arch/mips/au1000/pb1200/irqmap.c
@@ -74,7 +74,7 @@ irqreturn_t pb1200_cascade_handler( int irq, void *dev_id)
bcsr->int_status = bisr;
for( ; bisr; bisr &= (bisr-1) )
{
- extirq_nr = PB1200_INT_BEGIN + ffs(bisr);
+ extirq_nr = PB1200_INT_BEGIN + __ffs(bisr);
/* Ack and dispatch IRQ */
do_IRQ(extirq_nr);
}
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 423bc2c473d..bdfa07aecd9 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -6,6 +6,7 @@
* Copyright (C) 2004, 05, 06 by Ralf Baechle
* Copyright (C) 2005 by MIPS Technologies, Inc.
*/
+#include <linux/cpumask.h>
#include <linux/oprofile.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
@@ -33,11 +34,45 @@
#ifdef CONFIG_MIPS_MT_SMP
#define WHAT (M_TC_EN_VPE | M_PERFCTL_VPEID(smp_processor_id()))
#define vpe_id() smp_processor_id()
+
+/*
+ * The number of bits to shift to convert between counters per core and
+ * counters per VPE. There is no reasonable interface atm to obtain the
+ * number of VPEs used by Linux and in the 34K this number is fixed to two
+ * anyways so we hardcore a few things here for the moment. The way it's
+ * done here will ensure that oprofile VSMP kernel will run right on a lesser
+ * core like a 24K also or with maxcpus=1.
+ */
+static inline unsigned int vpe_shift(void)
+{
+ if (num_possible_cpus() > 1)
+ return 1;
+
+ return 0;
+}
+
#else
+
#define WHAT 0
#define vpe_id() 0
+
+static inline unsigned int vpe_shift(void)
+{
+ return 0;
+}
+
#endif
+static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
+{
+ return counters >> vpe_shift();
+}
+
+static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
+{
+ return counters << vpe_shift();
+}
+
#define __define_perf_accessors(r, n, np) \
\
static inline unsigned int r_c0_ ## r ## n(void) \
@@ -269,9 +304,7 @@ static int __init mipsxx_init(void)
reset_counters(counters);
-#ifdef CONFIG_MIPS_MT_SMP
- counters >>= 1;
-#endif
+ counters = counters_total_to_per_cpu(counters);
op_model_mipsxx_ops.num_counters = counters;
switch (current_cpu_type()) {
@@ -330,9 +363,8 @@ static int __init mipsxx_init(void)
static void mipsxx_exit(void)
{
int counters = op_model_mipsxx_ops.num_counters;
-#ifdef CONFIG_MIPS_MT_SMP
- counters <<= 1;
-#endif
+
+ counters = counters_per_cpu_to_total(counters);
reset_counters(counters);
perf_irq = null_perf_irq;
diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c
index bc647cb7729..47f316c86ab 100644
--- a/arch/mips/pci/pci-bcm1480.c
+++ b/arch/mips/pci/pci-bcm1480.c
@@ -76,7 +76,10 @@ static inline void WRITECFG32(u32 addr, u32 data)
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- return K_BCM1480_INT_PCI_INTA + pin;
+ if (pin == 0)
+ return -1;
+
+ return K_BCM1480_INT_PCI_INTA - 1 + pin;
}
/* Do platform specific device initialization at pci_enable_device() time */
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index b2b2edc40eb..1a6dac8df6f 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -1079,8 +1079,10 @@ cleanup_io_leave_insn:
.Lexecve_tail: .long execve_tail
.Ljump_table: .long pgm_check_table
.Lschedule: .long schedule
+#ifdef CONFIG_PREEMPT
.Lpreempt_schedule_irq:
.long preempt_schedule_irq
+#endif
.Ltrace: .long syscall_trace
.Lschedtail: .long schedule_tail
.Lsysc_table: .long sys_call_table
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 50f8f1e3760..577aa7dd660 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -486,9 +486,7 @@ static void setup_addressing_mode(void)
if (s390_noexec) {
printk("S390 execute protection active, ");
set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY);
- return;
- }
- if (switch_amode) {
+ } else if (switch_amode) {
printk("S390 address spaces switched, ");
set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY);
}
diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c
index af90a5f9ab5..b240b8863fd 100644
--- a/arch/sparc/kernel/devices.c
+++ b/arch/sparc/kernel/devices.c
@@ -62,8 +62,10 @@ static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
int err = check_cpu_node(dp->node, &cur_inst,
compare, compare_arg,
prom_node, mid);
- if (!err)
+ if (!err) {
+ of_node_put(dp);
return 0;
+ }
}
return -ENODEV;
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index f2d432edc92..4cd5d7818dc 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -329,7 +329,7 @@ int __init pcic_probe(void)
pcic->pcic_res_cfg_addr.name = "pcic_cfg_addr";
if ((pcic->pcic_config_space_addr =
ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == 0) {
- prom_printf("PCIC: Error, cannot map"
+ prom_printf("PCIC: Error, cannot map "
"PCI Configuration Space Address.\n");
prom_halt();
}
@@ -341,7 +341,7 @@ int __init pcic_probe(void)
pcic->pcic_res_cfg_data.name = "pcic_cfg_data";
if ((pcic->pcic_config_space_data =
ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == 0) {
- prom_printf("PCIC: Error, cannot map"
+ prom_printf("PCIC: Error, cannot map "
"PCI Configuration Space Data.\n");
prom_halt();
}
@@ -518,8 +518,8 @@ static void pcic_map_pci_device(struct linux_pcic *pcic,
* board in a PCI slot. We must remap it
* under 64K but it is not done yet. XXX
*/
- printk("PCIC: Skipping I/O space at 0x%lx,"
- "this will Oops if a driver attaches;"
+ printk("PCIC: Skipping I/O space at 0x%lx, "
+ "this will Oops if a driver attaches "
"device '%s' at %02x:%02x)\n", address,
namebuf, dev->bus->number, dev->devfn);
}
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 22734ac08c8..f62d9f6c5e2 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.24-rc1
-# Wed Oct 31 15:36:47 2007
+# Linux kernel version: 2.6.24-rc4
+# Tue Dec 4 00:37:59 2007
#
CONFIG_SPARC=y
CONFIG_SPARC64=y
@@ -47,6 +47,7 @@ CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
# CONFIG_AUDIT is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=18
@@ -154,6 +155,7 @@ CONFIG_PCI_DOMAINS=y
CONFIG_PCI_SYSCALL=y
CONFIG_ARCH_SUPPORTS_MSI=y
CONFIG_PCI_MSI=y
+# CONFIG_PCI_LEGACY is not set
# CONFIG_PCI_DEBUG is not set
CONFIG_SUN_OPENPROMFS=m
CONFIG_SPARC32_COMPAT=y
@@ -359,7 +361,6 @@ CONFIG_IDE_GENERIC=y
CONFIG_BLK_DEV_IDEPCI=y
# CONFIG_IDEPCI_SHARE_IRQ is not set
CONFIG_IDEPCI_PCIBUS_ORDER=y
-# CONFIG_BLK_DEV_OFFBOARD is not set
# CONFIG_BLK_DEV_GENERIC is not set
# CONFIG_BLK_DEV_OPTI621 is not set
CONFIG_BLK_DEV_IDEDMA_PCI=y
@@ -584,7 +585,6 @@ CONFIG_NIU=m
# CONFIG_USB_KAWETH is not set
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
-# CONFIG_USB_USBNET_MII is not set
# CONFIG_USB_USBNET is not set
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
@@ -780,6 +780,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_ADT7470 is not set
# CONFIG_SENSORS_ATXP1 is not set
# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_F71882FG is not set
# CONFIG_SENSORS_F75375S is not set
diff --git a/arch/sparc64/kernel/isa.c b/arch/sparc64/kernel/isa.c
index 0f19dce1c90..b5f7b354084 100644
--- a/arch/sparc64/kernel/isa.c
+++ b/arch/sparc64/kernel/isa.c
@@ -155,6 +155,7 @@ void __init isa_init(void)
isa_br = kzalloc(sizeof(*isa_br), GFP_KERNEL);
if (!isa_br) {
printk(KERN_DEBUG "isa: cannot allocate sparc_isa_bridge");
+ pci_dev_put(pdev);
return;
}
@@ -168,6 +169,7 @@ void __init isa_init(void)
printk(KERN_DEBUG "isa: device registration error for %s!\n",
dp->path_component_name);
kfree(isa_br);
+ pci_dev_put(pdev);
return;
}
diff --git a/arch/sparc64/kernel/ldc.c b/arch/sparc64/kernel/ldc.c
index 217478a9412..63969f61028 100644
--- a/arch/sparc64/kernel/ldc.c
+++ b/arch/sparc64/kernel/ldc.c
@@ -2338,6 +2338,7 @@ static int __init ldc_init(void)
unsigned long major, minor;
struct mdesc_handle *hp;
const u64 *v;
+ int err;
u64 mp;
hp = mdesc_grab();
@@ -2345,29 +2346,33 @@ static int __init ldc_init(void)
return -ENODEV;
mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
+ err = -ENODEV;
if (mp == MDESC_NODE_NULL)
- return -ENODEV;
+ goto out;
v = mdesc_get_property(hp, mp, "domaining-enabled", NULL);
if (!v)
- return -ENODEV;
+ goto out;
major = 1;
minor = 0;
if (sun4v_hvapi_register(HV_GRP_LDOM, major, &minor)) {
printk(KERN_INFO PFX "Could not register LDOM hvapi.\n");
- return -ENODEV;
+ goto out;
}
printk(KERN_INFO "%s", version);
if (!*v) {
printk(KERN_INFO PFX "Domaining disabled.\n");
- return -ENODEV;
+ goto out;
}
ldom_domaining_enabled = 1;
+ err = 0;
- return 0;
+out:
+ mdesc_release(hp);
+ return err;
}
core_initcall(ldc_init);
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 8c4875bdb4a..e587a372f3f 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -1022,6 +1022,10 @@ void __init sun4v_pci_init(struct device_node *dp, char *model_name)
}
prop = of_find_property(dp, "reg", NULL);
+ if (!prop) {
+ prom_printf("SUN4V_PCI: Could not find config registers\n");
+ prom_halt();
+ }
regs = prop->value;
devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 7cd8d94df0d..894b506f963 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -236,8 +236,9 @@ void smp_synchronize_tick_client(void)
t[i].rt, t[i].master, t[i].diff, t[i].lat);
#endif
- printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
- "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
+ printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
+ "(last diff %ld cycles, maxerr %lu cycles)\n",
+ smp_processor_id(), delta, rt);
}
static void smp_start_sync_tick_client(int cpu);
diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386
index 67290117d90..561e373bd85 100644
--- a/arch/um/Makefile-i386
+++ b/arch/um/Makefile-i386
@@ -22,11 +22,6 @@ export LDFLAGS HOSTCFLAGS HOSTLDFLAGS UML_OBJCOPYFLAGS
endif
endif
-KBUILD_CFLAGS += -DCONFIG_X86_32
-KBUILD_AFLAGS += -DCONFIG_X86_32
-CONFIG_X86_32 := y
-export CONFIG_X86_32
-
# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
include $(srctree)/arch/x86/Makefile_32.cpu
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 368864dfe6e..80b7ba4056d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -112,8 +112,9 @@ config GENERIC_TIME_VSYSCALL
bool
default X86_64
-
-
+config ARCH_SUPPORTS_OPROFILE
+ bool
+ default y
config ZONE_DMA32
@@ -148,7 +149,8 @@ config X86_SMP
config X86_HT
bool
- depends on SMP && !(X86_VISWS || X86_VOYAGER || MK8)
+ depends on SMP
+ depends on (X86_32 && !(X86_VISWS || X86_VOYAGER)) || (X86_64 && !MK8)
default y
config X86_BIOS_REBOOT
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 9921b01fe19..606fe4d55a9 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -497,7 +497,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
#endif
-static void free_cache_attributes(unsigned int cpu)
+static void __cpuinit free_cache_attributes(unsigned int cpu)
{
int i;
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c
index f5cc47c60b1..80ca72e5ac2 100644
--- a/arch/x86/kernel/nmi_32.c
+++ b/arch/x86/kernel/nmi_32.c
@@ -106,7 +106,8 @@ static int __init check_nmi_watchdog(void)
if (!per_cpu(wd_enabled, cpu))
continue;
if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
- printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
+ printk(KERN_WARNING "WARNING: CPU#%d: NMI "
+ "appears to be stuck (%d->%d)!\n",
cpu,
prev_nmi_count[cpu],
nmi_count(cpu));
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c
index a576fd74006..4253c4e8849 100644
--- a/arch/x86/kernel/nmi_64.c
+++ b/arch/x86/kernel/nmi_64.c
@@ -109,7 +109,8 @@ int __init check_nmi_watchdog (void)
if (!per_cpu(wd_enabled, cpu))
continue;
if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
- printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
+ printk(KERN_WARNING "WARNING: CPU#%d: NMI "
+ "appears to be stuck (%d->%d)!\n",
cpu,
counts[cpu],
cpu_pda(cpu)->__nmi_count);
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index 8caa0b77746..7e16d675eb8 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -33,7 +33,7 @@
static struct i386_cpu cpu_devices[NR_CPUS];
-int arch_register_cpu(int num)
+int __cpuinit arch_register_cpu(int num)
{
/*
* CPU0 cannot be offlined due to several
@@ -53,7 +53,8 @@ int arch_register_cpu(int num)
}
#ifdef CONFIG_HOTPLUG_CPU
-void arch_unregister_cpu(int num) {
+void arch_unregister_cpu(int num)
+{
return unregister_cpu(&cpu_devices[num].cpu);
}
EXPORT_SYMBOL(arch_register_cpu);
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index d161949fdb9..a32cb68bbc6 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -51,7 +51,7 @@
#define ROUNDS 16
struct fcrypt_ctx {
- u32 sched[ROUNDS];
+ __be32 sched[ROUNDS];
};
/* Rotate right two 32 bit numbers as a 56 bit number */
@@ -73,8 +73,8 @@ do { \
* /afs/transarc.com/public/afsps/afs.rel31b.export-src/rxkad/sboxes.h
*/
#undef Z
-#define Z(x) __constant_be32_to_cpu(x << 3)
-static const u32 sbox0[256] = {
+#define Z(x) __constant_cpu_to_be32(x << 3)
+static const __be32 sbox0[256] = {
Z(0xea), Z(0x7f), Z(0xb2), Z(0x64), Z(0x9d), Z(0xb0), Z(0xd9), Z(0x11),
Z(0xcd), Z(0x86), Z(0x86), Z(0x91), Z(0x0a), Z(0xb2), Z(0x93), Z(0x06),
Z(0x0e), Z(0x06), Z(0xd2), Z(0x65), Z(0x73), Z(0xc5), Z(0x28), Z(0x60),
@@ -110,8 +110,8 @@ static const u32 sbox0[256] = {
};
#undef Z
-#define Z(x) __constant_be32_to_cpu((x << 27) | (x >> 5))
-static const u32 sbox1[256] = {
+#define Z(x) __constant_cpu_to_be32((x << 27) | (x >> 5))
+static const __be32 sbox1[256] = {
Z(0x77), Z(0x14), Z(0xa6), Z(0xfe), Z(0xb2), Z(0x5e), Z(0x8c), Z(0x3e),
Z(0x67), Z(0x6c), Z(0xa1), Z(0x0d), Z(0xc2), Z(0xa2), Z(0xc1), Z(0x85),
Z(0x6c), Z(0x7b), Z(0x67), Z(0xc6), Z(0x23), Z(0xe3), Z(0xf2), Z(0x89),
@@ -147,8 +147,8 @@ static const u32 sbox1[256] = {
};
#undef Z
-#define Z(x) __constant_be32_to_cpu(x << 11)
-static const u32 sbox2[256] = {
+#define Z(x) __constant_cpu_to_be32(x << 11)
+static const __be32 sbox2[256] = {
Z(0xf0), Z(0x37), Z(0x24), Z(0x53), Z(0x2a), Z(0x03), Z(0x83), Z(0x86),
Z(0xd1), Z(0xec), Z(0x50), Z(0xf0), Z(0x42), Z(0x78), Z(0x2f), Z(0x6d),
Z(0xbf), Z(0x80), Z(0x87), Z(0x27), Z(0x95), Z(0xe2), Z(0xc5), Z(0x5d),
@@ -184,8 +184,8 @@ static const u32 sbox2[256] = {
};
#undef Z
-#define Z(x) __constant_be32_to_cpu(x << 19)
-static const u32 sbox3[256] = {
+#define Z(x) __constant_cpu_to_be32(x << 19)
+static const __be32 sbox3[256] = {
Z(0xa9), Z(0x2a), Z(0x48), Z(0x51), Z(0x84), Z(0x7e), Z(0x49), Z(0xe2),
Z(0xb5), Z(0xb7), Z(0x42), Z(0x33), Z(0x7d), Z(0x5d), Z(0xa6), Z(0x12),
Z(0x44), Z(0x48), Z(0x6d), Z(0x28), Z(0xaa), Z(0x20), Z(0x6d), Z(0x57),
@@ -225,7 +225,7 @@ static const u32 sbox3[256] = {
*/
#define F_ENCRYPT(R, L, sched) \
do { \
- union lc4 { u32 l; u8 c[4]; } u; \
+ union lc4 { __be32 l; u8 c[4]; } u; \
u.l = sched ^ R; \
L ^= sbox0[u.c[0]] ^ sbox1[u.c[1]] ^ sbox2[u.c[2]] ^ sbox3[u.c[3]]; \
} while(0)
@@ -237,7 +237,7 @@ static void fcrypt_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm);
struct {
- u32 l, r;
+ __be32 l, r;
} X;
memcpy(&X, src, sizeof(X));
@@ -269,7 +269,7 @@ static void fcrypt_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm);
struct {
- u32 l, r;
+ __be32 l, r;
} X;
memcpy(&X, src, sizeof(X));
@@ -328,22 +328,22 @@ static int fcrypt_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key
k |= (*key) >> 1;
/* Use lower 32 bits for schedule, rotate by 11 each round (16 times) */
- ctx->sched[0x0] = be32_to_cpu(k); ror56_64(k, 11);
- ctx->sched[0x1] = be32_to_cpu(k); ror56_64(k, 11);
- ctx->sched[0x2] = be32_to_cpu(k); ror56_64(k, 11);
- ctx->sched[0x3] = be32_to_cpu(k); ror56_64(k, 11);
- ctx->sched[0x4] = be32_to_cpu(k); ror56_64(k, 11);
- ctx->sched[0x5] = be32_to_cpu(k); ror56_64(k, 11);
- ctx->sched[0x6] = be32_to_cpu(k); ror56_64(k, 11);
- ctx->sched[0x7] = be32_to_cpu(k); ror56_64(k, 11);
- ctx->sched[0x8] = be32_to_cpu(k); ror56_64(k, 11);
- ctx->sched[0x9] = be32_to_cpu(k); ror56_64(k, 11);
- ctx->sched[0xa] = be32_to_cpu(k); ror56_64(k, 11);
- ctx->sched[0xb] = be32_to_cpu(k); ror56_64(k, 11);
- ctx->sched[0xc] = be32_to_cpu(k); ror56_64(k, 11);
- ctx->sched[0xd] = be32_to_cpu(k); ror56_64(k, 11);
- ctx->sched[0xe] = be32_to_cpu(k); ror56_64(k, 11);
- ctx->sched[0xf] = be32_to_cpu(k);
+ ctx->sched[0x0] = cpu_to_be32(k); ror56_64(k, 11);
+ ctx->sched[0x1] = cpu_to_be32(k); ror56_64(k, 11);
+ ctx->sched[0x2] = cpu_to_be32(k); ror56_64(k, 11);
+ ctx->sched[0x3] = cpu_to_be32(k); ror56_64(k, 11);
+ ctx->sched[0x4] = cpu_to_be32(k); ror56_64(k, 11);
+ ctx->sched[0x5] = cpu_to_be32(k); ror56_64(k, 11);
+ ctx->sched[0x6] = cpu_to_be32(k); ror56_64(k, 11);
+ ctx->sched[0x7] = cpu_to_be32(k); ror56_64(k, 11);
+ ctx->sched[0x8] = cpu_to_be32(k); ror56_64(k, 11);
+ ctx->sched[0x9] = cpu_to_be32(k); ror56_64(k, 11);
+ ctx->sched[0xa] = cpu_to_be32(k); ror56_64(k, 11);
+ ctx->sched[0xb] = cpu_to_be32(k); ror56_64(k, 11);
+ ctx->sched[0xc] = cpu_to_be32(k); ror56_64(k, 11);
+ ctx->sched[0xd] = cpu_to_be32(k); ror56_64(k, 11);
+ ctx->sched[0xe] = cpu_to_be32(k); ror56_64(k, 11);
+ ctx->sched[0xf] = cpu_to_be32(k);
return 0;
#else
@@ -369,22 +369,22 @@ static int fcrypt_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key
lo |= (*key) >> 1;
/* Use lower 32 bits for schedule, rotate by 11 each round (16 times) */
- ctx->sched[0x0] = be32_to_cpu(lo); ror56(hi, lo, 11);
- ctx->sched[0x1] = be32_to_cpu(lo); ror56(hi, lo, 11);
- ctx->sched[0x2] = be32_to_cpu(lo); ror56(hi, lo, 11);
- ctx->sched[0x3] = be32_to_cpu(lo); ror56(hi, lo, 11);
- ctx->sched[0x4] = be32_to_cpu(lo); ror56(hi, lo, 11);
- ctx->sched[0x5] = be32_to_cpu(lo); ror56(hi, lo, 11);
- ctx->sched[0x6] = be32_to_cpu(lo); ror56(hi, lo, 11);
- ctx->sched[0x7] = be32_to_cpu(lo); ror56(hi, lo, 11);
- ctx->sched[0x8] = be32_to_cpu(lo); ror56(hi, lo, 11);
- ctx->sched[0x9] = be32_to_cpu(lo); ror56(hi, lo, 11);
- ctx->sched[0xa] = be32_to_cpu(lo); ror56(hi, lo, 11);
- ctx->sched[0xb] = be32_to_cpu(lo); ror56(hi, lo, 11);
- ctx->sched[0xc] = be32_to_cpu(lo); ror56(hi, lo, 11);
- ctx->sched[0xd] = be32_to_cpu(lo); ror56(hi, lo, 11);
- ctx->sched[0xe] = be32_to_cpu(lo); ror56(hi, lo, 11);
- ctx->sched[0xf] = be32_to_cpu(lo);
+ ctx->sched[0x0] = cpu_to_be32(lo); ror56(hi, lo, 11);
+ ctx->sched[0x1] = cpu_to_be32(lo); ror56(hi, lo, 11);
+ ctx->sched[0x2] = cpu_to_be32(lo); ror56(hi, lo, 11);
+ ctx->sched[0x3] = cpu_to_be32(lo); ror56(hi, lo, 11);
+ ctx->sched[0x4] = cpu_to_be32(lo); ror56(hi, lo, 11);
+ ctx->sched[0x5] = cpu_to_be32(lo); ror56(hi, lo, 11);
+ ctx->sched[0x6] = cpu_to_be32(lo); ror56(hi, lo, 11);
+ ctx->sched[0x7] = cpu_to_be32(lo); ror56(hi, lo, 11);
+ ctx->sched[0x8] = cpu_to_be32(lo); ror56(hi, lo, 11);
+ ctx->sched[0x9] = cpu_to_be32(lo); ror56(hi, lo, 11);
+ ctx->sched[0xa] = cpu_to_be32(lo); ror56(hi, lo, 11);
+ ctx->sched[0xb] = cpu_to_be32(lo); ror56(hi, lo, 11);
+ ctx->sched[0xc] = cpu_to_be32(lo); ror56(hi, lo, 11);
+ ctx->sched[0xd] = cpu_to_be32(lo); ror56(hi, lo, 11);
+ ctx->sched[0xe] = cpu_to_be32(lo); ror56(hi, lo, 11);
+ ctx->sched[0xf] = cpu_to_be32(lo);
return 0;
#endif
}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ed9b407e42d..4688dbf2d11 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -536,6 +536,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index c5779ad4abc..3cc27b51465 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -25,7 +25,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_amd"
-#define DRV_VERSION "0.3.9"
+#define DRV_VERSION "0.3.10"
/**
* timing_setup - shared timing computation and load
@@ -115,7 +115,8 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse
}
/* UDMA timing */
- pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
+ if (at.udma)
+ pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
}
/**
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index a4175fbdd17..453d72bf259 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -63,7 +63,7 @@
#include <linux/dmi.h>
#define DRV_NAME "pata_via"
-#define DRV_VERSION "0.3.2"
+#define DRV_VERSION "0.3.3"
/*
* The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx
@@ -296,7 +296,7 @@ static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, int mo
}
/* Set UDMA unless device is not UDMA capable */
- if (udma_type) {
+ if (udma_type && t.udma) {
u8 cable80_status;
/* Get 80-wire cable detection bit */
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 8d864e5e97e..fe0105d35ba 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2503,6 +2503,15 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
case chip_7042:
hp_flags |= MV_HP_PCIE;
+ if (pdev->vendor == PCI_VENDOR_ID_TTI &&
+ (pdev->device == 0x2300 || pdev->device == 0x2310))
+ {
+ printk(KERN_WARNING "sata_mv: Highpoint RocketRAID BIOS"
+ " will CORRUPT DATA on attached drives when"
+ " configured as \"Legacy\". BEWARE!\n");
+ printk(KERN_WARNING "sata_mv: Use BIOS \"JBOD\" volumes"
+ " instead for safety.\n");
+ }
case chip_6042:
hpriv->ops = &mv6xxx_ops;
hp_flags |= MV_HP_GEN_IIE;
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 44f9e5d9e36..ed5dc7cb50c 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -791,11 +791,13 @@ static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
- /* Since commands where a result TF is requested are not
- executed in ADMA mode, the only time this function will be called
- in ADMA mode will be if a command fails. In this case we
- don't care about going into register mode with ADMA commands
- pending, as the commands will all shortly be aborted anyway. */
+ /* Other than when internal or pass-through commands are executed,
+ the only time this function will be called in ADMA mode will be
+ if a command fails. In the failure case we don't care about going
+ into register mode with ADMA commands pending, as the commands will
+ all shortly be aborted anyway. We assume that NCQ commands are not
+ issued via passthrough, which is the only way that switching into
+ ADMA mode could abort outstanding commands. */
nv_adma_register_mode(ap);
ata_tf_read(ap, tf);
@@ -1359,11 +1361,9 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
struct nv_adma_port_priv *pp = qc->ap->private_data;
/* ADMA engine can only be used for non-ATAPI DMA commands,
- or interrupt-driven no-data commands, where a result taskfile
- is not required. */
+ or interrupt-driven no-data commands. */
if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
- (qc->tf.flags & ATA_TFLAG_POLLING) ||
- (qc->flags & ATA_QCFLAG_RESULT_TF))
+ (qc->tf.flags & ATA_TFLAG_POLLING))
return 1;
if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
@@ -1381,6 +1381,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
NV_CPB_CTL_IEN;
if (nv_adma_use_reg_mode(qc)) {
+ BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
+ (qc->flags & ATA_QCFLAG_DMAMAP));
nv_adma_register_mode(qc->ap);
ata_qc_prep(qc);
return;
@@ -1425,9 +1427,21 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
VPRINTK("ENTER\n");
+ /* We can't handle result taskfile with NCQ commands, since
+ retrieving the taskfile switches us out of ADMA mode and would abort
+ existing commands. */
+ if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
+ (qc->flags & ATA_QCFLAG_RESULT_TF))) {
+ ata_dev_printk(qc->dev, KERN_ERR,
+ "NCQ w/ RESULT_TF not allowed\n");
+ return AC_ERR_SYSTEM;
+ }
+
if (nv_adma_use_reg_mode(qc)) {
/* use ATA register mode */
VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
+ BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
+ (qc->flags & ATA_QCFLAG_DMAMAP));
nv_adma_register_mode(qc->ap);
return ata_qc_issue_prot(qc);
} else
diff --git a/drivers/char/cs5535_gpio.c b/drivers/char/cs5535_gpio.c
index fe6d2407bae..c2d23cae951 100644
--- a/drivers/char/cs5535_gpio.c
+++ b/drivers/char/cs5535_gpio.c
@@ -104,6 +104,11 @@ static ssize_t cs5535_gpio_write(struct file *file, const char __user *data,
for (j = 0; j < ARRAY_SIZE(rm); j++) {
if (c == rm[j].on) {
outl(m1, base + rm[j].wr_offset);
+ /* If enabling output, turn off AUX 1 and AUX 2 */
+ if (c == 'O') {
+ outl(m0, base + 0x10);
+ outl(m0, base + 0x14);
+ }
break;
} else if (c == rm[j].off) {
outl(m0, base + rm[j].wr_offset);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index f59aecf5ec1..fd9c5d51870 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -267,13 +267,12 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
ts->irq_disabled = 0;
enable_irq(spi->irq);
- if (req->msg.status)
- status = req->msg.status;
-
- /* on-wire is a must-ignore bit, a BE12 value, then padding */
- sample = be16_to_cpu(req->sample);
- sample = sample >> 3;
- sample &= 0x0fff;
+ if (status == 0) {
+ /* on-wire is a must-ignore bit, a BE12 value, then padding */
+ sample = be16_to_cpu(req->sample);
+ sample = sample >> 3;
+ sample &= 0x0fff;
+ }
kfree(req);
return status ? status : sample;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index a6469218f19..365024b83d3 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -176,8 +176,6 @@ mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
DMA_FROM_DEVICE);
status = spi_sync(host->spi, &host->readback);
- if (status == 0)
- status = host->readback.status;
if (host->dma_dev)
dma_sync_single_for_cpu(host->dma_dev,
@@ -480,8 +478,6 @@ mmc_spi_command_send(struct mmc_spi_host *host,
DMA_BIDIRECTIONAL);
}
status = spi_sync(host->spi, &host->m);
- if (status == 0)
- status = host->m.status;
if (host->dma_dev)
dma_sync_single_for_cpu(host->dma_dev,
@@ -624,8 +620,6 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t)
DMA_BIDIRECTIONAL);
status = spi_sync(spi, &host->m);
- if (status == 0)
- status = host->m.status;
if (status != 0) {
dev_dbg(&spi->dev, "write error (%d)\n", status);
@@ -726,8 +720,6 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t)
}
status = spi_sync(spi, &host->m);
- if (status == 0)
- status = host->m.status;
if (host->dma_dev) {
dma_sync_single_for_cpu(host->dma_dev,
@@ -905,8 +897,6 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
DMA_BIDIRECTIONAL);
tmp = spi_sync(spi, &host->m);
- if (tmp == 0)
- tmp = host->m.status;
if (host->dma_dev)
dma_sync_single_for_cpu(host->dma_dev,
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index c5975047c89..c5975047c89 100755..100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 2117c4fbb10..2117c4fbb10 100755..100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index b301c0428ae..b301c0428ae 100755..100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
index cced9dff91c..cced9dff91c 100755..100644
--- a/drivers/net/chelsio/sge.h
+++ b/drivers/net/chelsio/sge.h
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index bf5a7caa5b5..79f7eade477 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -422,7 +422,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status,
(struct bcom_bd **)&bd);
- dma_unmap_single(&dev->dev, bd->skb_pa, skb->len, DMA_FROM_DEVICE);
+ dma_unmap_single(&dev->dev, bd->skb_pa, rskb->len, DMA_FROM_DEVICE);
/* Test for errors in received frame */
if (status & BCOM_FEC_RX_BD_ERRORS) {
@@ -467,7 +467,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
bcom_prepare_next_buffer(priv->rx_dmatsk);
bd->status = FEC_RX_BUFFER_SIZE;
- bd->skb_pa = dma_map_single(&dev->dev, rskb->data,
+ bd->skb_pa = dma_map_single(&dev->dev, skb->data,
FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
bcom_submit_next_buffer(priv->rx_dmatsk, skb);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 38268d7335a..0431e9ed0fa 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -696,7 +696,7 @@ int startup_gfar(struct net_device *dev)
{
struct txbd8 *txbdp;
struct rxbd8 *rxbdp;
- dma_addr_t addr;
+ dma_addr_t addr = 0;
unsigned long vaddr;
int i;
struct gfar_private *priv = netdev_priv(dev);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 0f306ddb563..8def8657251 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1979,6 +1979,7 @@ static int myri10ge_open(struct net_device *dev)
lro_mgr->lro_arr = mgp->rx_done.lro_desc;
lro_mgr->get_frag_header = myri10ge_get_frag_header;
lro_mgr->max_aggr = myri10ge_lro_max_pkts;
+ lro_mgr->frag_align_pad = 2;
if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
lro_mgr->max_aggr = MAX_SKB_FRAGS;
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 09b4fde8d92..816a59e801b 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -586,7 +586,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
/* CRC error flagged */
mac->netdev->stats.rx_errors++;
mac->netdev->stats.rx_crc_errors++;
- dev_kfree_skb_irq(skb);
+ /* No need to free skb, it'll be reused */
goto next;
}
@@ -1362,7 +1362,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64);
- dev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX | NETIF_F_SG;
+ dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG;
/* These should come out of the device tree eventually */
mac->dma_txch = index;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index fc2f0e695a1..c30196d0ad1 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -91,9 +91,12 @@ int mdiobus_register(struct mii_bus *bus)
err = device_register(&phydev->dev);
- if (err)
+ if (err) {
printk(KERN_ERR "phy %d failed to register\n",
i);
+ phy_device_free(phydev);
+ phydev = NULL;
+ }
}
bus->phy_map[i] = phydev;
@@ -110,10 +113,8 @@ void mdiobus_unregister(struct mii_bus *bus)
int i;
for (i = 0; i < PHY_MAX_ADDR; i++) {
- if (bus->phy_map[i]) {
+ if (bus->phy_map[i])
device_unregister(&bus->phy_map[i]->dev);
- kfree(bus->phy_map[i]);
- }
}
}
EXPORT_SYMBOL(mdiobus_unregister);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index f6e484812a9..5b9e1751e1b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -44,6 +44,16 @@ static struct phy_driver genphy_driver;
extern int mdio_bus_init(void);
extern void mdio_bus_exit(void);
+void phy_device_free(struct phy_device *phydev)
+{
+ kfree(phydev);
+}
+
+static void phy_device_release(struct device *dev)
+{
+ phy_device_free(to_phy_device(dev));
+}
+
struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
{
struct phy_device *dev;
@@ -54,6 +64,8 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
if (NULL == dev)
return (struct phy_device*) PTR_ERR((void*)-ENOMEM);
+ dev->dev.release = phy_device_release;
+
dev->speed = 0;
dev->duplex = -1;
dev->pause = dev->asym_pause = 0;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 3d1dfc94840..6197afb3ed8 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -2906,16 +2906,14 @@ static void sky2_restart(struct work_struct *work)
int i, err;
rtnl_lock();
- sky2_write32(hw, B0_IMSK, 0);
- sky2_read32(hw, B0_IMSK);
- napi_disable(&hw->napi);
-
for (i = 0; i < hw->ports; i++) {
dev = hw->dev[i];
if (netif_running(dev))
sky2_down(dev);
}
+ napi_disable(&hw->napi);
+ sky2_write32(hw, B0_IMSK, 0);
sky2_reset(hw);
sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
napi_enable(&hw->napi);
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 1a3d80bfe9e..76cc1d3adf7 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1299,9 +1299,9 @@ smc911x_rx_dma_irq(int dma, void *data)
PRINT_PKT(skb->data, skb->len);
dev->last_rx = jiffies;
skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
+ netif_rx(skb);
spin_lock_irqsave(&lp->lock, flags);
pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16;
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index a4f56e95cf9..f1e00ff54ce 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -293,7 +293,7 @@ int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task)
return -EINVAL;
/* Cannot register while the char dev is in use */
- if (test_and_set_bit(RTC_DEV_BUSY, &rtc->flags))
+ if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
return -EBUSY;
spin_lock_irq(&rtc->irq_task_lock);
@@ -303,7 +303,7 @@ int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task)
}
spin_unlock_irq(&rtc->irq_task_lock);
- clear_bit(RTC_DEV_BUSY, &rtc->flags);
+ clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
return retval;
}
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index ae1bf177d62..025c60a17a4 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -26,7 +26,7 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
struct rtc_device, char_dev);
const struct rtc_class_ops *ops = rtc->ops;
- if (test_and_set_bit(RTC_DEV_BUSY, &rtc->flags))
+ if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
return -EBUSY;
file->private_data = rtc;
@@ -41,7 +41,7 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
}
/* something has gone wrong */
- clear_bit(RTC_DEV_BUSY, &rtc->flags);
+ clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
return err;
}
@@ -402,7 +402,7 @@ static int rtc_dev_release(struct inode *inode, struct file *file)
if (rtc->ops->release)
rtc->ops->release(rtc->dev.parent);
- clear_bit(RTC_DEV_BUSY, &rtc->flags);
+ clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
return 0;
}
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c
index 3e183cfee10..1f956dc5d56 100644
--- a/drivers/rtc/rtc-max6902.c
+++ b/drivers/rtc/rtc-max6902.c
@@ -89,13 +89,9 @@ static int max6902_get_reg(struct device *dev, unsigned char address,
/* do the i/o */
status = spi_sync(spi, &message);
- if (status == 0)
- status = message.status;
- else
- return status;
-
- *data = chip->rx_buf[1];
+ if (status == 0)
+ *data = chip->rx_buf[1];
return status;
}
@@ -125,9 +121,7 @@ static int max6902_get_datetime(struct device *dev, struct rtc_time *dt)
/* do the i/o */
status = spi_sync(spi, &message);
- if (status == 0)
- status = message.status;
- else
+ if (status)
return status;
/* The chip sends data in this order:
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 5e083d1f57e..15a5789b773 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -472,11 +472,11 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
if (rc)
goto unregister_dev;
- add_disk(dev_info->gd);
-
blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
+ add_disk(dev_info->gd);
+
switch (dev_info->segment_type) {
case SEG_TYPE_SR:
case SEG_TYPE_ER:
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 6db31089d2d..c3df2cd009a 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -451,6 +451,7 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data)
break;
case -ENXIO:
case -ENOMEM:
+ case -EIO:
/* These should abort looping */
break;
default:
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 2f6bf462425..156f3f9786b 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -113,6 +113,7 @@ __ccw_device_sense_id_start(struct ccw_device *cdev)
{
struct subchannel *sch;
struct ccw1 *ccw;
+ int ret;
sch = to_subchannel(cdev->dev.parent);
/* Setup sense channel program. */
@@ -124,9 +125,25 @@ __ccw_device_sense_id_start(struct ccw_device *cdev)
/* Reset device status. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
- cdev->private->flags.intretry = 0;
- return cio_start(sch, ccw, LPM_ANYPATH);
+ /* Try on every path. */
+ ret = -ENODEV;
+ while (cdev->private->imask != 0) {
+ if ((sch->opm & cdev->private->imask) != 0 &&
+ cdev->private->iretry > 0) {
+ cdev->private->iretry--;
+ /* Reset internal retry indication. */
+ cdev->private->flags.intretry = 0;
+ ret = cio_start (sch, cdev->private->iccws,
+ cdev->private->imask);
+ /* ret is 0, -EBUSY, -EACCES or -ENODEV */
+ if (ret != -EACCES)
+ return ret;
+ }
+ cdev->private->imask >>= 1;
+ cdev->private->iretry = 5;
+ }
+ return ret;
}
void
@@ -136,7 +153,8 @@ ccw_device_sense_id_start(struct ccw_device *cdev)
memset (&cdev->private->senseid, 0, sizeof (struct senseid));
cdev->private->senseid.cu_type = 0xFFFF;
- cdev->private->iretry = 3;
+ cdev->private->imask = 0x80;
+ cdev->private->iretry = 5;
ret = __ccw_device_sense_id_start(cdev);
if (ret && ret != -EBUSY)
ccw_device_sense_id_done(cdev, ret);
@@ -252,13 +270,14 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
ccw_device_sense_id_done(cdev, ret);
break;
case -EACCES: /* channel is not operational. */
+ sch->lpm &= ~cdev->private->imask;
+ cdev->private->imask >>= 1;
+ cdev->private->iretry = 5;
+ /* fall through. */
case -EAGAIN: /* try again. */
- cdev->private->iretry--;
- if (cdev->private->iretry > 0) {
- ret = __ccw_device_sense_id_start(cdev);
- if (ret == 0 || ret == -EBUSY)
- break;
- }
+ ret = __ccw_device_sense_id_start(cdev);
+ if (ret == 0 || ret == -EBUSY)
+ break;
/* fall through. */
default: /* Sense ID failed. Try asking VM. */
if (MACHINE_IS_VM) {
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index b3b6f654365..97adc701a81 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -2802,7 +2802,6 @@ void ctc_init_netdevice(struct net_device * dev)
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 100;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- SET_MODULE_OWNER(dev);
}
diff --git a/drivers/spi/at25.c b/drivers/spi/at25.c
index e007833cca5..290dbe99647 100644
--- a/drivers/spi/at25.c
+++ b/drivers/spi/at25.c
@@ -21,6 +21,13 @@
#include <linux/spi/eeprom.h>
+/*
+ * NOTE: this is an *EEPROM* driver. The vagaries of product naming
+ * mean that some AT25 products are EEPROMs, and others are FLASH.
+ * Handle FLASH chips with the drivers/mtd/devices/m25p80.c driver,
+ * not this one!
+ */
+
struct at25_data {
struct spi_device *spi;
struct mutex lock;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b31f4431849..93e9de46977 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -541,10 +541,7 @@ static void spi_complete(void *arg)
* Also, the caller is guaranteeing that the memory associated with the
* message will not be freed before this call returns.
*
- * The return value is a negative error code if the message could not be
- * submitted, else zero. When the value is zero, then message->status is
- * also defined; it's the completion code for the transfer, either zero
- * or a negative error code from the controller driver.
+ * It returns zero on success, else a negative error code.
*/
int spi_sync(struct spi_device *spi, struct spi_message *message)
{
@@ -554,8 +551,10 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
message->complete = spi_complete;
message->context = &done;
status = spi_async(spi, message);
- if (status == 0)
+ if (status == 0) {
wait_for_completion(&done);
+ status = message->status;
+ }
message->context = NULL;
return status;
}
@@ -589,7 +588,7 @@ int spi_write_then_read(struct spi_device *spi,
const u8 *txbuf, unsigned n_tx,
u8 *rxbuf, unsigned n_rx)
{
- static DECLARE_MUTEX(lock);
+ static DEFINE_MUTEX(lock);
int status;
struct spi_message message;
@@ -615,7 +614,7 @@ int spi_write_then_read(struct spi_device *spi,
}
/* ... unless someone else is using the pre-allocated buffer */
- if (down_trylock(&lock)) {
+ if (!mutex_trylock(&lock)) {
local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
if (!local_buf)
return -ENOMEM;
@@ -628,13 +627,11 @@ int spi_write_then_read(struct spi_device *spi,
/* do the i/o */
status = spi_sync(spi, &message);
- if (status == 0) {
+ if (status == 0)
memcpy(rxbuf, x[1].rx_buf, n_rx);
- status = message.status;
- }
if (x[0].tx_buf == buf)
- up(&lock);
+ mutex_unlock(&lock);
else
kfree(local_buf);
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index 2ef11bb70b2..22697b81220 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -1,17 +1,22 @@
/*
- * File: drivers/spi/bfin5xx_spi.c
- * Based on: N/A
- * Author: Luke Yang (Analog Devices Inc.)
+ * File: drivers/spi/bfin5xx_spi.c
+ * Maintainer:
+ * Bryan Wu <bryan.wu@analog.com>
+ * Original Author:
+ * Luke Yang (Analog Devices Inc.)
*
- * Created: March. 10th 2006
- * Description: SPI controller driver for Blackfin 5xx
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ * Created: March. 10th 2006
+ * Description: SPI controller driver for Blackfin BF5xx
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* Modified:
* March 10, 2006 bfin5xx_spi.c Created. (Luke Yang)
* August 7, 2006 added full duplex mode (Axel Weiss & Luke Yang)
+ * July 17, 2007 add support for BF54x SPI0 controller (Bryan Wu)
+ * July 30, 2007 add platfrom_resource interface to support multi-port
+ * SPI controller (Bryan Wu)
*
- * Copyright 2004-2006 Analog Devices Inc.
+ * Copyright 2004-2007 Analog Devices Inc.
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,50 +36,39 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/ioport.h>
+#include <linux/irq.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/spi/spi.h>
#include <linux/workqueue.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/delay.h>
#include <asm/dma.h>
-
+#include <asm/portmux.h>
#include <asm/bfin5xx_spi.h>
-MODULE_AUTHOR("Luke Yang");
-MODULE_DESCRIPTION("Blackfin 5xx SPI Contoller");
-MODULE_LICENSE("GPL");
+#define DRV_NAME "bfin-spi"
+#define DRV_AUTHOR "Bryan Wu, Luke Yang"
+#define DRV_DESC "Blackfin BF5xx on-chip SPI Contoller Driver"
+#define DRV_VERSION "1.0"
-#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
-#define DEFINE_SPI_REG(reg, off) \
-static inline u16 read_##reg(void) \
- { return *(volatile unsigned short*)(SPI0_REGBASE + off); } \
-static inline void write_##reg(u16 v) \
- {*(volatile unsigned short*)(SPI0_REGBASE + off) = v;\
- SSYNC();}
+#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07) == 0)
-DEFINE_SPI_REG(CTRL, 0x00)
-DEFINE_SPI_REG(FLAG, 0x04)
-DEFINE_SPI_REG(STAT, 0x08)
-DEFINE_SPI_REG(TDBR, 0x0C)
-DEFINE_SPI_REG(RDBR, 0x10)
-DEFINE_SPI_REG(BAUD, 0x14)
-DEFINE_SPI_REG(SHAW, 0x18)
-#define START_STATE ((void*)0)
-#define RUNNING_STATE ((void*)1)
-#define DONE_STATE ((void*)2)
-#define ERROR_STATE ((void*)-1)
-#define QUEUE_RUNNING 0
-#define QUEUE_STOPPED 1
-int dma_requested;
+#define START_STATE ((void *)0)
+#define RUNNING_STATE ((void *)1)
+#define DONE_STATE ((void *)2)
+#define ERROR_STATE ((void *)-1)
+#define QUEUE_RUNNING 0
+#define QUEUE_STOPPED 1
struct driver_data {
/* Driver model hookup */
@@ -83,6 +77,12 @@ struct driver_data {
/* SPI framework hookup */
struct spi_master *master;
+ /* Regs base of SPI controller */
+ void __iomem *regs_base;
+
+ /* Pin request list */
+ u16 *pin_req;
+
/* BFIN hookup */
struct bfin5xx_spi_master *master_info;
@@ -107,12 +107,18 @@ struct driver_data {
void *tx_end;
void *rx;
void *rx_end;
+
+ /* DMA stuffs */
+ int dma_channel;
int dma_mapped;
+ int dma_requested;
dma_addr_t rx_dma;
dma_addr_t tx_dma;
+
size_t rx_map_len;
size_t tx_map_len;
u8 n_bytes;
+ int cs_change;
void (*write) (struct driver_data *);
void (*read) (struct driver_data *);
void (*duplex) (struct driver_data *);
@@ -129,28 +135,40 @@ struct chip_data {
u8 enable_dma;
u8 bits_per_word; /* 8 or 16 */
u8 cs_change_per_word;
- u8 cs_chg_udelay;
+ u16 cs_chg_udelay; /* Some devices require > 255usec delay */
void (*write) (struct driver_data *);
void (*read) (struct driver_data *);
void (*duplex) (struct driver_data *);
};
+#define DEFINE_SPI_REG(reg, off) \
+static inline u16 read_##reg(struct driver_data *drv_data) \
+ { return bfin_read16(drv_data->regs_base + off); } \
+static inline void write_##reg(struct driver_data *drv_data, u16 v) \
+ { bfin_write16(drv_data->regs_base + off, v); }
+
+DEFINE_SPI_REG(CTRL, 0x00)
+DEFINE_SPI_REG(FLAG, 0x04)
+DEFINE_SPI_REG(STAT, 0x08)
+DEFINE_SPI_REG(TDBR, 0x0C)
+DEFINE_SPI_REG(RDBR, 0x10)
+DEFINE_SPI_REG(BAUD, 0x14)
+DEFINE_SPI_REG(SHAW, 0x18)
+
static void bfin_spi_enable(struct driver_data *drv_data)
{
u16 cr;
- cr = read_CTRL();
- write_CTRL(cr | BIT_CTL_ENABLE);
- SSYNC();
+ cr = read_CTRL(drv_data);
+ write_CTRL(drv_data, (cr | BIT_CTL_ENABLE));
}
static void bfin_spi_disable(struct driver_data *drv_data)
{
u16 cr;
- cr = read_CTRL();
- write_CTRL(cr & (~BIT_CTL_ENABLE));
- SSYNC();
+ cr = read_CTRL(drv_data);
+ write_CTRL(drv_data, (cr & (~BIT_CTL_ENABLE)));
}
/* Caculate the SPI_BAUD register value based on input HZ */
@@ -170,83 +188,71 @@ static int flush(struct driver_data *drv_data)
unsigned long limit = loops_per_jiffy << 1;
/* wait for stop and clear stat */
- while (!(read_STAT() & BIT_STAT_SPIF) && limit--)
- continue;
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && limit--)
+ cpu_relax();
- write_STAT(BIT_STAT_CLR);
+ write_STAT(drv_data, BIT_STAT_CLR);
return limit;
}
+/* Chip select operation functions for cs_change flag */
+static void cs_active(struct driver_data *drv_data, struct chip_data *chip)
+{
+ u16 flag = read_FLAG(drv_data);
+
+ flag |= chip->flag;
+ flag &= ~(chip->flag << 8);
+
+ write_FLAG(drv_data, flag);
+}
+
+static void cs_deactive(struct driver_data *drv_data, struct chip_data *chip)
+{
+ u16 flag = read_FLAG(drv_data);
+
+ flag |= (chip->flag << 8);
+
+ write_FLAG(drv_data, flag);
+
+ /* Move delay here for consistency */
+ if (chip->cs_chg_udelay)
+ udelay(chip->cs_chg_udelay);
+}
+
+#define MAX_SPI_SSEL 7
+
/* stop controller and re-config current chip*/
-static void restore_state(struct driver_data *drv_data)
+static int restore_state(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
+ int ret = 0;
/* Clear status and disable clock */
- write_STAT(BIT_STAT_CLR);
+ write_STAT(drv_data, BIT_STAT_CLR);
bfin_spi_disable(drv_data);
dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n");
-#if defined(CONFIG_BF534) || defined(CONFIG_BF536) || defined(CONFIG_BF537)
- dev_dbg(&drv_data->pdev->dev,
- "chip select number is %d\n", chip->chip_select_num);
-
- switch (chip->chip_select_num) {
- case 1:
- bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3c00);
- SSYNC();
- break;
-
- case 2:
- case 3:
- bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJSE_SPI);
- SSYNC();
- bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800);
- SSYNC();
- break;
-
- case 4:
- bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS4E_SPI);
- SSYNC();
- bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3840);
- SSYNC();
- break;
+ /* Load the registers */
+ write_CTRL(drv_data, chip->ctl_reg);
+ write_BAUD(drv_data, chip->baud);
- case 5:
- bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS5E_SPI);
- SSYNC();
- bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3820);
- SSYNC();
- break;
+ bfin_spi_enable(drv_data);
+ cs_active(drv_data, chip);
- case 6:
- bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS6E_SPI);
- SSYNC();
- bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3810);
- SSYNC();
- break;
+ if (ret)
+ dev_dbg(&drv_data->pdev->dev,
+ ": request chip select number %d failed\n",
+ chip->chip_select_num);
- case 7:
- bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJCE_SPI);
- SSYNC();
- bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800);
- SSYNC();
- break;
- }
-#endif
-
- /* Load the registers */
- write_CTRL(chip->ctl_reg);
- write_BAUD(chip->baud);
- write_FLAG(chip->flag);
+ return ret;
}
/* used to kick off transfer in rx mode */
-static unsigned short dummy_read(void)
+static unsigned short dummy_read(struct driver_data *drv_data)
{
unsigned short tmp;
- tmp = read_RDBR();
+ tmp = read_RDBR(drv_data);
return tmp;
}
@@ -255,9 +261,9 @@ static void null_writer(struct driver_data *drv_data)
u8 n_bytes = drv_data->n_bytes;
while (drv_data->tx < drv_data->tx_end) {
- write_TDBR(0);
- while ((read_STAT() & BIT_STAT_TXS))
- continue;
+ write_TDBR(drv_data, 0);
+ while ((read_STAT(drv_data) & BIT_STAT_TXS))
+ cpu_relax();
drv_data->tx += n_bytes;
}
}
@@ -265,75 +271,78 @@ static void null_writer(struct driver_data *drv_data)
static void null_reader(struct driver_data *drv_data)
{
u8 n_bytes = drv_data->n_bytes;
- dummy_read();
+ dummy_read(drv_data);
while (drv_data->rx < drv_data->rx_end) {
- while (!(read_STAT() & BIT_STAT_RXS))
- continue;
- dummy_read();
+ while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ cpu_relax();
+ dummy_read(drv_data);
drv_data->rx += n_bytes;
}
}
static void u8_writer(struct driver_data *drv_data)
{
- dev_dbg(&drv_data->pdev->dev,
- "cr8-s is 0x%x\n", read_STAT());
+ dev_dbg(&drv_data->pdev->dev,
+ "cr8-s is 0x%x\n", read_STAT(drv_data));
+
+ /* poll for SPI completion before start */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
+
while (drv_data->tx < drv_data->tx_end) {
- write_TDBR(*(u8 *) (drv_data->tx));
- while (read_STAT() & BIT_STAT_TXS)
- continue;
+ write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
+ while (read_STAT(drv_data) & BIT_STAT_TXS)
+ cpu_relax();
++drv_data->tx;
}
-
- /* poll for SPI completion before returning */
- while (!(read_STAT() & BIT_STAT_SPIF))
- continue;
}
static void u8_cs_chg_writer(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
+ /* poll for SPI completion before start */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
+
while (drv_data->tx < drv_data->tx_end) {
- write_FLAG(chip->flag);
- SSYNC();
-
- write_TDBR(*(u8 *) (drv_data->tx));
- while (read_STAT() & BIT_STAT_TXS)
- continue;
- while (!(read_STAT() & BIT_STAT_SPIF))
- continue;
- write_FLAG(0xFF00 | chip->flag);
- SSYNC();
- if (chip->cs_chg_udelay)
- udelay(chip->cs_chg_udelay);
+ cs_active(drv_data, chip);
+
+ write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
+ while (read_STAT(drv_data) & BIT_STAT_TXS)
+ cpu_relax();
+
+ cs_deactive(drv_data, chip);
+
++drv_data->tx;
}
- write_FLAG(0xFF00);
- SSYNC();
}
static void u8_reader(struct driver_data *drv_data)
{
- dev_dbg(&drv_data->pdev->dev,
- "cr-8 is 0x%x\n", read_STAT());
+ dev_dbg(&drv_data->pdev->dev,
+ "cr-8 is 0x%x\n", read_STAT(drv_data));
+
+ /* poll for SPI completion before start */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
/* clear TDBR buffer before read(else it will be shifted out) */
- write_TDBR(0xFFFF);
+ write_TDBR(drv_data, 0xFFFF);
- dummy_read();
+ dummy_read(drv_data);
while (drv_data->rx < drv_data->rx_end - 1) {
- while (!(read_STAT() & BIT_STAT_RXS))
- continue;
- *(u8 *) (drv_data->rx) = read_RDBR();
+ while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ cpu_relax();
+ *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
++drv_data->rx;
}
- while (!(read_STAT() & BIT_STAT_RXS))
- continue;
- *(u8 *) (drv_data->rx) = read_SHAW();
+ while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ cpu_relax();
+ *(u8 *) (drv_data->rx) = read_SHAW(drv_data);
++drv_data->rx;
}
@@ -341,36 +350,47 @@ static void u8_cs_chg_reader(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
- while (drv_data->rx < drv_data->rx_end) {
- write_FLAG(chip->flag);
- SSYNC();
-
- read_RDBR(); /* kick off */
- while (!(read_STAT() & BIT_STAT_RXS))
- continue;
- while (!(read_STAT() & BIT_STAT_SPIF))
- continue;
- *(u8 *) (drv_data->rx) = read_SHAW();
- write_FLAG(0xFF00 | chip->flag);
- SSYNC();
- if (chip->cs_chg_udelay)
- udelay(chip->cs_chg_udelay);
+ /* poll for SPI completion before start */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
+
+ /* clear TDBR buffer before read(else it will be shifted out) */
+ write_TDBR(drv_data, 0xFFFF);
+
+ cs_active(drv_data, chip);
+ dummy_read(drv_data);
+
+ while (drv_data->rx < drv_data->rx_end - 1) {
+ cs_deactive(drv_data, chip);
+
+ while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ cpu_relax();
+ cs_active(drv_data, chip);
+ *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
++drv_data->rx;
}
- write_FLAG(0xFF00);
- SSYNC();
+ cs_deactive(drv_data, chip);
+
+ while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ cpu_relax();
+ *(u8 *) (drv_data->rx) = read_SHAW(drv_data);
+ ++drv_data->rx;
}
static void u8_duplex(struct driver_data *drv_data)
{
+ /* poll for SPI completion before start */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
+
/* in duplex mode, clk is triggered by writing of TDBR */
while (drv_data->rx < drv_data->rx_end) {
- write_TDBR(*(u8 *) (drv_data->tx));
- while (!(read_STAT() & BIT_STAT_SPIF))
- continue;
- while (!(read_STAT() & BIT_STAT_RXS))
- continue;
- *(u8 *) (drv_data->rx) = read_RDBR();
+ write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
+ while (read_STAT(drv_data) & BIT_STAT_TXS)
+ cpu_relax();
+ while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ cpu_relax();
+ *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
++drv_data->rx;
++drv_data->tx;
}
@@ -380,83 +400,89 @@ static void u8_cs_chg_duplex(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
+ /* poll for SPI completion before start */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
+
while (drv_data->rx < drv_data->rx_end) {
- write_FLAG(chip->flag);
- SSYNC();
-
- write_TDBR(*(u8 *) (drv_data->tx));
- while (!(read_STAT() & BIT_STAT_SPIF))
- continue;
- while (!(read_STAT() & BIT_STAT_RXS))
- continue;
- *(u8 *) (drv_data->rx) = read_RDBR();
- write_FLAG(0xFF00 | chip->flag);
- SSYNC();
- if (chip->cs_chg_udelay)
- udelay(chip->cs_chg_udelay);
+ cs_active(drv_data, chip);
+
+ write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
+ while (read_STAT(drv_data) & BIT_STAT_TXS)
+ cpu_relax();
+ while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ cpu_relax();
+ *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
+
+ cs_deactive(drv_data, chip);
+
++drv_data->rx;
++drv_data->tx;
}
- write_FLAG(0xFF00);
- SSYNC();
}
static void u16_writer(struct driver_data *drv_data)
{
- dev_dbg(&drv_data->pdev->dev,
- "cr16 is 0x%x\n", read_STAT());
+ dev_dbg(&drv_data->pdev->dev,
+ "cr16 is 0x%x\n", read_STAT(drv_data));
+
+ /* poll for SPI completion before start */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
while (drv_data->tx < drv_data->tx_end) {
- write_TDBR(*(u16 *) (drv_data->tx));
- while ((read_STAT() & BIT_STAT_TXS))
- continue;
+ write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
+ while ((read_STAT(drv_data) & BIT_STAT_TXS))
+ cpu_relax();
drv_data->tx += 2;
}
-
- /* poll for SPI completion before returning */
- while (!(read_STAT() & BIT_STAT_SPIF))
- continue;
}
static void u16_cs_chg_writer(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
+ /* poll for SPI completion before start */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
+
while (drv_data->tx < drv_data->tx_end) {
- write_FLAG(chip->flag);
- SSYNC();
-
- write_TDBR(*(u16 *) (drv_data->tx));
- while ((read_STAT() & BIT_STAT_TXS))
- continue;
- while (!(read_STAT() & BIT_STAT_SPIF))
- continue;
- write_FLAG(0xFF00 | chip->flag);
- SSYNC();
- if (chip->cs_chg_udelay)
- udelay(chip->cs_chg_udelay);
+ cs_active(drv_data, chip);
+
+ write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
+ while ((read_STAT(drv_data) & BIT_STAT_TXS))
+ cpu_relax();
+
+ cs_deactive(drv_data, chip);
+
drv_data->tx += 2;
}
- write_FLAG(0xFF00);
- SSYNC();
}
static void u16_reader(struct driver_data *drv_data)
{
dev_dbg(&drv_data->pdev->dev,
- "cr-16 is 0x%x\n", read_STAT());
- dummy_read();
+ "cr-16 is 0x%x\n", read_STAT(drv_data));
+
+ /* poll for SPI completion before start */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
+
+ /* clear TDBR buffer before read(else it will be shifted out) */
+ write_TDBR(drv_data, 0xFFFF);
+
+ dummy_read(drv_data);
while (drv_data->rx < (drv_data->rx_end - 2)) {
- while (!(read_STAT() & BIT_STAT_RXS))
- continue;
- *(u16 *) (drv_data->rx) = read_RDBR();
+ while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ cpu_relax();
+ *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
drv_data->rx += 2;
}
- while (!(read_STAT() & BIT_STAT_RXS))
- continue;
- *(u16 *) (drv_data->rx) = read_SHAW();
+ while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ cpu_relax();
+ *(u16 *) (drv_data->rx) = read_SHAW(drv_data);
drv_data->rx += 2;
}
@@ -464,36 +490,47 @@ static void u16_cs_chg_reader(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
- while (drv_data->rx < drv_data->rx_end) {
- write_FLAG(chip->flag);
- SSYNC();
-
- read_RDBR(); /* kick off */
- while (!(read_STAT() & BIT_STAT_RXS))
- continue;
- while (!(read_STAT() & BIT_STAT_SPIF))
- continue;
- *(u16 *) (drv_data->rx) = read_SHAW();
- write_FLAG(0xFF00 | chip->flag);
- SSYNC();
- if (chip->cs_chg_udelay)
- udelay(chip->cs_chg_udelay);
+ /* poll for SPI completion before start */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
+
+ /* clear TDBR buffer before read(else it will be shifted out) */
+ write_TDBR(drv_data, 0xFFFF);
+
+ cs_active(drv_data, chip);
+ dummy_read(drv_data);
+
+ while (drv_data->rx < drv_data->rx_end - 2) {
+ cs_deactive(drv_data, chip);
+
+ while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ cpu_relax();
+ cs_active(drv_data, chip);
+ *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
drv_data->rx += 2;
}
- write_FLAG(0xFF00);
- SSYNC();
+ cs_deactive(drv_data, chip);
+
+ while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ cpu_relax();
+ *(u16 *) (drv_data->rx) = read_SHAW(drv_data);
+ drv_data->rx += 2;
}
static void u16_duplex(struct driver_data *drv_data)
{
+ /* poll for SPI completion before start */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
+
/* in duplex mode, clk is triggered by writing of TDBR */
while (drv_data->tx < drv_data->tx_end) {
- write_TDBR(*(u16 *) (drv_data->tx));
- while (!(read_STAT() & BIT_STAT_SPIF))
- continue;
- while (!(read_STAT() & BIT_STAT_RXS))
- continue;
- *(u16 *) (drv_data->rx) = read_RDBR();
+ write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
+ while (read_STAT(drv_data) & BIT_STAT_TXS)
+ cpu_relax();
+ while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ cpu_relax();
+ *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
drv_data->rx += 2;
drv_data->tx += 2;
}
@@ -503,25 +540,25 @@ static void u16_cs_chg_duplex(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
+ /* poll for SPI completion before start */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
+
while (drv_data->tx < drv_data->tx_end) {
- write_FLAG(chip->flag);
- SSYNC();
-
- write_TDBR(*(u16 *) (drv_data->tx));
- while (!(read_STAT() & BIT_STAT_SPIF))
- continue;
- while (!(read_STAT() & BIT_STAT_RXS))
- continue;
- *(u16 *) (drv_data->rx) = read_RDBR();
- write_FLAG(0xFF00 | chip->flag);
- SSYNC();
- if (chip->cs_chg_udelay)
- udelay(chip->cs_chg_udelay);
+ cs_active(drv_data, chip);
+
+ write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
+ while (read_STAT(drv_data) & BIT_STAT_TXS)
+ cpu_relax();
+ while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ cpu_relax();
+ *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
+
+ cs_deactive(drv_data, chip);
+
drv_data->rx += 2;
drv_data->tx += 2;
}
- write_FLAG(0xFF00);
- SSYNC();
}
/* test if ther is more transfer to be done */
@@ -546,6 +583,7 @@ static void *next_transfer(struct driver_data *drv_data)
*/
static void giveback(struct driver_data *drv_data)
{
+ struct chip_data *chip = drv_data->cur_chip;
struct spi_transfer *last_transfer;
unsigned long flags;
struct spi_message *msg;
@@ -565,10 +603,13 @@ static void giveback(struct driver_data *drv_data)
/* disable chip select signal. And not stop spi in autobuffer mode */
if (drv_data->tx_dma != 0xFFFF) {
- write_FLAG(0xFF00);
+ cs_deactive(drv_data, chip);
bfin_spi_disable(drv_data);
}
+ if (!drv_data->cs_change)
+ cs_deactive(drv_data, chip);
+
if (msg->complete)
msg->complete(msg->context);
}
@@ -576,14 +617,15 @@ static void giveback(struct driver_data *drv_data)
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
{
struct driver_data *drv_data = (struct driver_data *)dev_id;
+ struct chip_data *chip = drv_data->cur_chip;
struct spi_message *msg = drv_data->cur_msg;
dev_dbg(&drv_data->pdev->dev, "in dma_irq_handler\n");
- clear_dma_irqstat(CH_SPI);
+ clear_dma_irqstat(drv_data->dma_channel);
/* Wait for DMA to complete */
- while (get_dma_curr_irqstat(CH_SPI) & DMA_RUN)
- continue;
+ while (get_dma_curr_irqstat(drv_data->dma_channel) & DMA_RUN)
+ cpu_relax();
/*
* wait for the last transaction shifted out. HRM states:
@@ -592,18 +634,19 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
* register until it goes low for 2 successive reads
*/
if (drv_data->tx != NULL) {
- while ((bfin_read_SPI_STAT() & TXS) ||
- (bfin_read_SPI_STAT() & TXS))
- continue;
+ while ((read_STAT(drv_data) & TXS) ||
+ (read_STAT(drv_data) & TXS))
+ cpu_relax();
}
- while (!(bfin_read_SPI_STAT() & SPIF))
- continue;
-
- bfin_spi_disable(drv_data);
+ while (!(read_STAT(drv_data) & SPIF))
+ cpu_relax();
msg->actual_length += drv_data->len_in_bytes;
+ if (drv_data->cs_change)
+ cs_deactive(drv_data, chip);
+
/* Move to next transfer */
msg->state = next_transfer(drv_data);
@@ -613,8 +656,8 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
/* free the irq handler before next transfer */
dev_dbg(&drv_data->pdev->dev,
"disable dma channel irq%d\n",
- CH_SPI);
- dma_disable_irq(CH_SPI);
+ drv_data->dma_channel);
+ dma_disable_irq(drv_data->dma_channel);
return IRQ_HANDLED;
}
@@ -690,31 +733,67 @@ static void pump_transfers(unsigned long data)
drv_data->rx_dma = transfer->rx_dma;
drv_data->tx_dma = transfer->tx_dma;
drv_data->len_in_bytes = transfer->len;
+ drv_data->cs_change = transfer->cs_change;
+
+ /* Bits per word setup */
+ switch (transfer->bits_per_word) {
+ case 8:
+ drv_data->n_bytes = 1;
+ width = CFG_SPI_WORDSIZE8;
+ drv_data->read = chip->cs_change_per_word ?
+ u8_cs_chg_reader : u8_reader;
+ drv_data->write = chip->cs_change_per_word ?
+ u8_cs_chg_writer : u8_writer;
+ drv_data->duplex = chip->cs_change_per_word ?
+ u8_cs_chg_duplex : u8_duplex;
+ break;
+
+ case 16:
+ drv_data->n_bytes = 2;
+ width = CFG_SPI_WORDSIZE16;
+ drv_data->read = chip->cs_change_per_word ?
+ u16_cs_chg_reader : u16_reader;
+ drv_data->write = chip->cs_change_per_word ?
+ u16_cs_chg_writer : u16_writer;
+ drv_data->duplex = chip->cs_change_per_word ?
+ u16_cs_chg_duplex : u16_duplex;
+ break;
+
+ default:
+ /* No change, the same as default setting */
+ drv_data->n_bytes = chip->n_bytes;
+ width = chip->width;
+ drv_data->write = drv_data->tx ? chip->write : null_writer;
+ drv_data->read = drv_data->rx ? chip->read : null_reader;
+ drv_data->duplex = chip->duplex ? chip->duplex : null_writer;
+ break;
+ }
+ cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD));
+ cr |= (width << 8);
+ write_CTRL(drv_data, cr);
- width = chip->width;
if (width == CFG_SPI_WORDSIZE16) {
drv_data->len = (transfer->len) >> 1;
} else {
drv_data->len = transfer->len;
}
- drv_data->write = drv_data->tx ? chip->write : null_writer;
- drv_data->read = drv_data->rx ? chip->read : null_reader;
- drv_data->duplex = chip->duplex ? chip->duplex : null_writer;
- dev_dbg(&drv_data->pdev->dev,
- "transfer: drv_data->write is %p, chip->write is %p, null_wr is %p\n",
- drv_data->write, chip->write, null_writer);
+ dev_dbg(&drv_data->pdev->dev, "transfer: ",
+ "drv_data->write is %p, chip->write is %p, null_wr is %p\n",
+ drv_data->write, chip->write, null_writer);
/* speed and width has been set on per message */
message->state = RUNNING_STATE;
dma_config = 0;
- /* restore spi status for each spi transfer */
- if (transfer->speed_hz) {
- write_BAUD(hz_to_spi_baud(transfer->speed_hz));
- } else {
- write_BAUD(chip->baud);
- }
- write_FLAG(chip->flag);
+ /* Speed setup (surely valid because already checked) */
+ if (transfer->speed_hz)
+ write_BAUD(drv_data, hz_to_spi_baud(transfer->speed_hz));
+ else
+ write_BAUD(drv_data, chip->baud);
+
+ write_STAT(drv_data, BIT_STAT_CLR);
+ cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD));
+ cs_active(drv_data, chip);
dev_dbg(&drv_data->pdev->dev,
"now pumping a transfer: width is %d, len is %d\n",
@@ -727,25 +806,25 @@ static void pump_transfers(unsigned long data)
*/
if (drv_data->cur_chip->enable_dma && drv_data->len > 6) {
- write_STAT(BIT_STAT_CLR);
- disable_dma(CH_SPI);
- clear_dma_irqstat(CH_SPI);
+ disable_dma(drv_data->dma_channel);
+ clear_dma_irqstat(drv_data->dma_channel);
bfin_spi_disable(drv_data);
/* config dma channel */
dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n");
if (width == CFG_SPI_WORDSIZE16) {
- set_dma_x_count(CH_SPI, drv_data->len);
- set_dma_x_modify(CH_SPI, 2);
+ set_dma_x_count(drv_data->dma_channel, drv_data->len);
+ set_dma_x_modify(drv_data->dma_channel, 2);
dma_width = WDSIZE_16;
} else {
- set_dma_x_count(CH_SPI, drv_data->len);
- set_dma_x_modify(CH_SPI, 1);
+ set_dma_x_count(drv_data->dma_channel, drv_data->len);
+ set_dma_x_modify(drv_data->dma_channel, 1);
dma_width = WDSIZE_8;
}
- /* set transfer width,direction. And enable spi */
- cr = (read_CTRL() & (~BIT_CTL_TIMOD));
+ /* poll for SPI completion before start */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
/* dirty hack for autobuffer DMA mode */
if (drv_data->tx_dma == 0xFFFF) {
@@ -755,13 +834,18 @@ static void pump_transfers(unsigned long data)
/* no irq in autobuffer mode */
dma_config =
(DMAFLOW_AUTO | RESTART | dma_width | DI_EN);
- set_dma_config(CH_SPI, dma_config);
- set_dma_start_addr(CH_SPI, (unsigned long)drv_data->tx);
- enable_dma(CH_SPI);
- write_CTRL(cr | CFG_SPI_DMAWRITE | (width << 8) |
- (CFG_SPI_ENABLE << 14));
-
- /* just return here, there can only be one transfer in this mode */
+ set_dma_config(drv_data->dma_channel, dma_config);
+ set_dma_start_addr(drv_data->dma_channel,
+ (unsigned long)drv_data->tx);
+ enable_dma(drv_data->dma_channel);
+
+ /* start SPI transfer */
+ write_CTRL(drv_data,
+ (cr | CFG_SPI_DMAWRITE | BIT_CTL_ENABLE));
+
+ /* just return here, there can only be one transfer
+ * in this mode
+ */
message->status = 0;
giveback(drv_data);
return;
@@ -772,58 +856,51 @@ static void pump_transfers(unsigned long data)
/* set transfer mode, and enable SPI */
dev_dbg(&drv_data->pdev->dev, "doing DMA in.\n");
- /* disable SPI before write to TDBR */
- write_CTRL(cr & ~BIT_CTL_ENABLE);
-
/* clear tx reg soformer data is not shifted out */
- write_TDBR(0xFF);
+ write_TDBR(drv_data, 0xFFFF);
- set_dma_x_count(CH_SPI, drv_data->len);
+ set_dma_x_count(drv_data->dma_channel, drv_data->len);
/* start dma */
- dma_enable_irq(CH_SPI);
+ dma_enable_irq(drv_data->dma_channel);
dma_config = (WNR | RESTART | dma_width | DI_EN);
- set_dma_config(CH_SPI, dma_config);
- set_dma_start_addr(CH_SPI, (unsigned long)drv_data->rx);
- enable_dma(CH_SPI);
+ set_dma_config(drv_data->dma_channel, dma_config);
+ set_dma_start_addr(drv_data->dma_channel,
+ (unsigned long)drv_data->rx);
+ enable_dma(drv_data->dma_channel);
+
+ /* start SPI transfer */
+ write_CTRL(drv_data,
+ (cr | CFG_SPI_DMAREAD | BIT_CTL_ENABLE));
- cr |=
- CFG_SPI_DMAREAD | (width << 8) | (CFG_SPI_ENABLE <<
- 14);
- /* set transfer mode, and enable SPI */
- write_CTRL(cr);
} else if (drv_data->tx != NULL) {
dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n");
/* start dma */
- dma_enable_irq(CH_SPI);
+ dma_enable_irq(drv_data->dma_channel);
dma_config = (RESTART | dma_width | DI_EN);
- set_dma_config(CH_SPI, dma_config);
- set_dma_start_addr(CH_SPI, (unsigned long)drv_data->tx);
- enable_dma(CH_SPI);
-
- write_CTRL(cr | CFG_SPI_DMAWRITE | (width << 8) |
- (CFG_SPI_ENABLE << 14));
-
+ set_dma_config(drv_data->dma_channel, dma_config);
+ set_dma_start_addr(drv_data->dma_channel,
+ (unsigned long)drv_data->tx);
+ enable_dma(drv_data->dma_channel);
+
+ /* start SPI transfer */
+ write_CTRL(drv_data,
+ (cr | CFG_SPI_DMAWRITE | BIT_CTL_ENABLE));
}
} else {
/* IO mode write then read */
dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n");
- write_STAT(BIT_STAT_CLR);
-
if (drv_data->tx != NULL && drv_data->rx != NULL) {
/* full duplex mode */
BUG_ON((drv_data->tx_end - drv_data->tx) !=
(drv_data->rx_end - drv_data->rx));
- cr = (read_CTRL() & (~BIT_CTL_TIMOD));
- cr |= CFG_SPI_WRITE | (width << 8) |
- (CFG_SPI_ENABLE << 14);
dev_dbg(&drv_data->pdev->dev,
"IO duplex: cr is 0x%x\n", cr);
- write_CTRL(cr);
- SSYNC();
+ /* set SPI transfer mode */
+ write_CTRL(drv_data, (cr | CFG_SPI_WRITE));
drv_data->duplex(drv_data);
@@ -831,14 +908,11 @@ static void pump_transfers(unsigned long data)
tranf_success = 0;
} else if (drv_data->tx != NULL) {
/* write only half duplex */
- cr = (read_CTRL() & (~BIT_CTL_TIMOD));
- cr |= CFG_SPI_WRITE | (width << 8) |
- (CFG_SPI_ENABLE << 14);
- dev_dbg(&drv_data->pdev->dev,
+ dev_dbg(&drv_data->pdev->dev,
"IO write: cr is 0x%x\n", cr);
- write_CTRL(cr);
- SSYNC();
+ /* set SPI transfer mode */
+ write_CTRL(drv_data, (cr | CFG_SPI_WRITE));
drv_data->write(drv_data);
@@ -846,14 +920,11 @@ static void pump_transfers(unsigned long data)
tranf_success = 0;
} else if (drv_data->rx != NULL) {
/* read only half duplex */
- cr = (read_CTRL() & (~BIT_CTL_TIMOD));
- cr |= CFG_SPI_READ | (width << 8) |
- (CFG_SPI_ENABLE << 14);
- dev_dbg(&drv_data->pdev->dev,
+ dev_dbg(&drv_data->pdev->dev,
"IO read: cr is 0x%x\n", cr);
- write_CTRL(cr);
- SSYNC();
+ /* set SPI transfer mode */
+ write_CTRL(drv_data, (cr | CFG_SPI_READ));
drv_data->read(drv_data);
if (drv_data->rx != drv_data->rx_end)
@@ -861,7 +932,7 @@ static void pump_transfers(unsigned long data)
}
if (!tranf_success) {
- dev_dbg(&drv_data->pdev->dev,
+ dev_dbg(&drv_data->pdev->dev,
"IO write error!\n");
message->state = ERROR_STATE;
} else {
@@ -881,9 +952,11 @@ static void pump_transfers(unsigned long data)
/* pop a msg from queue and kick off real transfer */
static void pump_messages(struct work_struct *work)
{
- struct driver_data *drv_data = container_of(work, struct driver_data, pump_messages);
+ struct driver_data *drv_data;
unsigned long flags;
+ drv_data = container_of(work, struct driver_data, pump_messages);
+
/* Lock queue and check for queue work */
spin_lock_irqsave(&drv_data->lock, flags);
if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
@@ -902,6 +975,14 @@ static void pump_messages(struct work_struct *work)
/* Extract head of queue */
drv_data->cur_msg = list_entry(drv_data->queue.next,
struct spi_message, queue);
+
+ /* Setup the SSP using the per chip configuration */
+ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
+ if (restore_state(drv_data)) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return;
+ };
+
list_del_init(&drv_data->cur_msg->queue);
/* Initial message state */
@@ -909,15 +990,12 @@ static void pump_messages(struct work_struct *work)
drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
struct spi_transfer, transfer_list);
- /* Setup the SSP using the per chip configuration */
- drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
- restore_state(drv_data);
+ dev_dbg(&drv_data->pdev->dev, "got a message to pump, "
+ "state is set to: baud %d, flag 0x%x, ctl 0x%x\n",
+ drv_data->cur_chip->baud, drv_data->cur_chip->flag,
+ drv_data->cur_chip->ctl_reg);
+
dev_dbg(&drv_data->pdev->dev,
- "got a message to pump, state is set to: baud %d, flag 0x%x, ctl 0x%x\n",
- drv_data->cur_chip->baud, drv_data->cur_chip->flag,
- drv_data->cur_chip->ctl_reg);
-
- dev_dbg(&drv_data->pdev->dev,
"the first transfer len is %d\n",
drv_data->cur_transfer->len);
@@ -959,6 +1037,22 @@ static int transfer(struct spi_device *spi, struct spi_message *msg)
return 0;
}
+#define MAX_SPI_SSEL 7
+
+static u16 ssel[3][MAX_SPI_SSEL] = {
+ {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3,
+ P_SPI0_SSEL4, P_SPI0_SSEL5,
+ P_SPI0_SSEL6, P_SPI0_SSEL7},
+
+ {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3,
+ P_SPI1_SSEL4, P_SPI1_SSEL5,
+ P_SPI1_SSEL6, P_SPI1_SSEL7},
+
+ {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3,
+ P_SPI2_SSEL4, P_SPI2_SSEL5,
+ P_SPI2_SSEL6, P_SPI2_SSEL7},
+};
+
/* first setup for new devices */
static int setup(struct spi_device *spi)
{
@@ -993,6 +1087,18 @@ static int setup(struct spi_device *spi)
/* chip_info isn't always needed */
if (chip_info) {
+ /* Make sure people stop trying to set fields via ctl_reg
+ * when they should actually be using common SPI framework.
+ * Currently we let through: WOM EMISO PSSE GM SZ TIMOD.
+ * Not sure if a user actually needs/uses any of these,
+ * but let's assume (for now) they do.
+ */
+ if (chip_info->ctl_reg & (SPE|MSTR|CPOL|CPHA|LSBF|SIZE)) {
+ dev_err(&spi->dev, "do not set bits in ctl_reg "
+ "that the SPI framework manages\n");
+ return -EINVAL;
+ }
+
chip->enable_dma = chip_info->enable_dma != 0
&& drv_data->master_info->enable_dma;
chip->ctl_reg = chip_info->ctl_reg;
@@ -1015,20 +1121,20 @@ static int setup(struct spi_device *spi)
* if any one SPI chip is registered and wants DMA, request the
* DMA channel for it
*/
- if (chip->enable_dma && !dma_requested) {
+ if (chip->enable_dma && !drv_data->dma_requested) {
/* register dma irq handler */
- if (request_dma(CH_SPI, "BF53x_SPI_DMA") < 0) {
+ if (request_dma(drv_data->dma_channel, "BF53x_SPI_DMA") < 0) {
dev_dbg(&spi->dev,
"Unable to request BlackFin SPI DMA channel\n");
return -ENODEV;
}
- if (set_dma_callback(CH_SPI, (void *)dma_irq_handler, drv_data)
- < 0) {
+ if (set_dma_callback(drv_data->dma_channel,
+ (void *)dma_irq_handler, drv_data) < 0) {
dev_dbg(&spi->dev, "Unable to set dma callback\n");
return -EPERM;
}
- dma_disable_irq(CH_SPI);
- dma_requested = 1;
+ dma_disable_irq(drv_data->dma_channel);
+ drv_data->dma_requested = 1;
}
/*
@@ -1077,6 +1183,14 @@ static int setup(struct spi_device *spi)
spi_set_ctldata(spi, chip);
+ dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num);
+ if ((chip->chip_select_num > 0)
+ && (chip->chip_select_num <= spi->master->num_chipselect))
+ peripheral_request(ssel[spi->master->bus_num]
+ [chip->chip_select_num-1], DRV_NAME);
+
+ cs_deactive(drv_data, chip);
+
return 0;
}
@@ -1088,6 +1202,11 @@ static void cleanup(struct spi_device *spi)
{
struct chip_data *chip = spi_get_ctldata(spi);
+ if ((chip->chip_select_num > 0)
+ && (chip->chip_select_num <= spi->master->num_chipselect))
+ peripheral_free(ssel[spi->master->bus_num]
+ [chip->chip_select_num-1]);
+
kfree(chip);
}
@@ -1183,6 +1302,7 @@ static int __init bfin5xx_spi_probe(struct platform_device *pdev)
struct bfin5xx_spi_master *platform_info;
struct spi_master *master;
struct driver_data *drv_data = 0;
+ struct resource *res;
int status = 0;
platform_info = dev->platform_data;
@@ -1193,10 +1313,12 @@ static int __init bfin5xx_spi_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "can not alloc spi_master\n");
return -ENOMEM;
}
+
drv_data = spi_master_get_devdata(master);
drv_data->master = master;
drv_data->master_info = platform_info;
drv_data->pdev = pdev;
+ drv_data->pin_req = platform_info->pin_req;
master->bus_num = pdev->id;
master->num_chipselect = platform_info->num_chipselect;
@@ -1204,15 +1326,38 @@ static int __init bfin5xx_spi_probe(struct platform_device *pdev)
master->setup = setup;
master->transfer = transfer;
+ /* Find and map our resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "Cannot get IORESOURCE_MEM\n");
+ status = -ENOENT;
+ goto out_error_get_res;
+ }
+
+ drv_data->regs_base = ioremap(res->start, (res->end - res->start + 1));
+ if (drv_data->regs_base == NULL) {
+ dev_err(dev, "Cannot map IO\n");
+ status = -ENXIO;
+ goto out_error_ioremap;
+ }
+
+ drv_data->dma_channel = platform_get_irq(pdev, 0);
+ if (drv_data->dma_channel < 0) {
+ dev_err(dev, "No DMA channel specified\n");
+ status = -ENOENT;
+ goto out_error_no_dma_ch;
+ }
+
/* Initial and start queue */
status = init_queue(drv_data);
if (status != 0) {
- dev_err(&pdev->dev, "problem initializing queue\n");
+ dev_err(dev, "problem initializing queue\n");
goto out_error_queue_alloc;
}
+
status = start_queue(drv_data);
if (status != 0) {
- dev_err(&pdev->dev, "problem starting queue\n");
+ dev_err(dev, "problem starting queue\n");
goto out_error_queue_alloc;
}
@@ -1220,15 +1365,30 @@ static int __init bfin5xx_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drv_data);
status = spi_register_master(master);
if (status != 0) {
- dev_err(&pdev->dev, "problem registering spi master\n");
+ dev_err(dev, "problem registering spi master\n");
goto out_error_queue_alloc;
}
- dev_dbg(&pdev->dev, "controller probe successfully\n");
+
+ status = peripheral_request_list(drv_data->pin_req, DRV_NAME);
+ if (status != 0) {
+ dev_err(&pdev->dev, ": Requesting Peripherals failed\n");
+ goto out_error;
+ }
+
+ dev_info(dev, "%s, Version %s, regs_base@%p, dma channel@%d\n",
+ DRV_DESC, DRV_VERSION, drv_data->regs_base,
+ drv_data->dma_channel);
return status;
- out_error_queue_alloc:
+out_error_queue_alloc:
destroy_queue(drv_data);
+out_error_no_dma_ch:
+ iounmap((void *) drv_data->regs_base);
+out_error_ioremap:
+out_error_get_res:
+out_error:
spi_master_put(master);
+
return status;
}
@@ -1251,13 +1411,15 @@ static int __devexit bfin5xx_spi_remove(struct platform_device *pdev)
/* Release DMA */
if (drv_data->master_info->enable_dma) {
- if (dma_channel_active(CH_SPI))
- free_dma(CH_SPI);
+ if (dma_channel_active(drv_data->dma_channel))
+ free_dma(drv_data->dma_channel);
}
/* Disconnect from the SPI framework */
spi_unregister_master(drv_data->master);
+ peripheral_free_list(drv_data->pin_req);
+
/* Prevent double remove */
platform_set_drvdata(pdev, NULL);
@@ -1305,7 +1467,7 @@ static int bfin5xx_spi_resume(struct platform_device *pdev)
MODULE_ALIAS("bfin-spi-master"); /* for platform bus hotplug */
static struct platform_driver bfin5xx_spi_driver = {
.driver = {
- .name = "bfin-spi-master",
+ .name = DRV_NAME,
.owner = THIS_MODULE,
},
.suspend = bfin5xx_spi_suspend,
diff --git a/fs/aio.c b/fs/aio.c
index f12db415c0f..9dec7d2d546 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1161,7 +1161,12 @@ retry:
ret = 0;
if (to.timed_out) /* Only check after read evt */
break;
- io_schedule();
+ /* Try to only show up in io wait if there are ops
+ * in flight */
+ if (ctx->reqs_active)
+ io_schedule();
+ else
+ schedule();
if (signal_pending(tsk)) {
ret = -EINTR;
break;
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 294c41baef6..a64a71d444f 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -178,7 +178,8 @@ static void bfs_delete_inode(struct inode *inode)
brelse(bh);
if (bi->i_dsk_ino) {
- info->si_freeb += BFS_FILEBLOCKS(bi);
+ if (bi->i_sblock)
+ info->si_freeb += bi->i_eblock + 1 - bi->i_sblock;
info->si_freei++;
clear_bit(ino, info->si_imap);
dump_imap("delete_inode", s);
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index f02fdef463a..c312adcba4f 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -134,9 +134,10 @@ int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
pmode is the existing mode (we only want to overwrite part of this
bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
*/
-static void access_flags_to_mode(__u32 ace_flags, int type, umode_t *pmode,
+static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
umode_t *pbits_to_set)
{
+ __u32 flags = le32_to_cpu(ace_flags);
/* the order of ACEs is important. The canonical order is to begin with
DENY entries followed by ALLOW, otherwise an allow entry could be
encountered first, making the subsequent deny entry like "dead code"
@@ -146,17 +147,17 @@ static void access_flags_to_mode(__u32 ace_flags, int type, umode_t *pmode,
/* For deny ACEs we change the mask so that subsequent allow access
control entries do not turn on the bits we are denying */
if (type == ACCESS_DENIED) {
- if (ace_flags & GENERIC_ALL) {
+ if (flags & GENERIC_ALL) {
*pbits_to_set &= ~S_IRWXUGO;
}
- if ((ace_flags & GENERIC_WRITE) ||
- ((ace_flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
+ if ((flags & GENERIC_WRITE) ||
+ ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
*pbits_to_set &= ~S_IWUGO;
- if ((ace_flags & GENERIC_READ) ||
- ((ace_flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
+ if ((flags & GENERIC_READ) ||
+ ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
*pbits_to_set &= ~S_IRUGO;
- if ((ace_flags & GENERIC_EXECUTE) ||
- ((ace_flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
+ if ((flags & GENERIC_EXECUTE) ||
+ ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
*pbits_to_set &= ~S_IXUGO;
return;
} else if (type != ACCESS_ALLOWED) {
@@ -165,25 +166,25 @@ static void access_flags_to_mode(__u32 ace_flags, int type, umode_t *pmode,
}
/* else ACCESS_ALLOWED type */
- if (ace_flags & GENERIC_ALL) {
+ if (flags & GENERIC_ALL) {
*pmode |= (S_IRWXUGO & (*pbits_to_set));
#ifdef CONFIG_CIFS_DEBUG2
cFYI(1, ("all perms"));
#endif
return;
}
- if ((ace_flags & GENERIC_WRITE) ||
- ((ace_flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
+ if ((flags & GENERIC_WRITE) ||
+ ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
*pmode |= (S_IWUGO & (*pbits_to_set));
- if ((ace_flags & GENERIC_READ) ||
- ((ace_flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
+ if ((flags & GENERIC_READ) ||
+ ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
*pmode |= (S_IRUGO & (*pbits_to_set));
- if ((ace_flags & GENERIC_EXECUTE) ||
- ((ace_flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
+ if ((flags & GENERIC_EXECUTE) ||
+ ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
*pmode |= (S_IXUGO & (*pbits_to_set));
#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("access flags 0x%x mode now 0x%x", ace_flags, *pmode));
+ cFYI(1, ("access flags 0x%x mode now 0x%x", flags, *pmode));
#endif
return;
}
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 47552d4a632..0f69c416eeb 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -602,15 +602,15 @@ int __journal_remove_checkpoint(struct journal_head *jh)
/*
* There is one special case to worry about: if we have just pulled the
- * buffer off a committing transaction's forget list, then even if the
- * checkpoint list is empty, the transaction obviously cannot be
- * dropped!
+ * buffer off a running or committing transaction's checkpoing list,
+ * then even if the checkpoint list is empty, the transaction obviously
+ * cannot be dropped!
*
- * The locking here around j_committing_transaction is a bit sleazy.
+ * The locking here around t_state is a bit sleazy.
* See the comment at the end of journal_commit_transaction().
*/
- if (transaction == journal->j_committing_transaction) {
- JBUFFER_TRACE(jh, "belongs to committing transaction");
+ if (transaction->t_state != T_FINISHED) {
+ JBUFFER_TRACE(jh, "belongs to running/committing transaction");
goto out;
}
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 8f1f2aa5fb3..610264b99a8 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -858,10 +858,10 @@ restart_loop:
}
spin_unlock(&journal->j_list_lock);
/*
- * This is a bit sleazy. We borrow j_list_lock to protect
- * journal->j_committing_transaction in __journal_remove_checkpoint.
- * Really, __journal_remove_checkpoint should be using j_state_lock but
- * it's a bit hassle to hold that across __journal_remove_checkpoint
+ * This is a bit sleazy. We use j_list_lock to protect transition
+ * of a transaction into T_FINISHED state and calling
+ * __journal_drop_transaction(). Otherwise we could race with
+ * other checkpointing code processing the transaction...
*/
spin_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index d84bd155997..ee50c9610e7 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -72,14 +72,6 @@
#include "tcp_internal.h"
-/*
- * The linux network stack isn't sparse endian clean.. It has macros like
- * ntohs() which perform the endian checks and structs like sockaddr_in
- * which aren't annotated. So __force is found here to get the build
- * clean. When they emerge from the dark ages and annotate the code
- * we can remove these.
- */
-
#define SC_NODEF_FMT "node %s (num %u) at %u.%u.%u.%u:%u"
#define SC_NODEF_ARGS(sc) sc->sc_node->nd_name, sc->sc_node->nd_num, \
NIPQUAD(sc->sc_node->nd_ipv4_address), \
@@ -1500,7 +1492,7 @@ static void o2net_start_connect(struct work_struct *work)
myaddr.sin_family = AF_INET;
myaddr.sin_addr.s_addr = mynode->nd_ipv4_address;
- myaddr.sin_port = (__force u16)htons(0); /* any port */
+ myaddr.sin_port = htons(0); /* any port */
ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr,
sizeof(myaddr));
@@ -1701,11 +1693,11 @@ static int o2net_accept_one(struct socket *sock)
if (ret < 0)
goto out;
- node = o2nm_get_node_by_ip((__force __be32)sin.sin_addr.s_addr);
+ node = o2nm_get_node_by_ip(sin.sin_addr.s_addr);
if (node == NULL) {
mlog(ML_NOTICE, "attempt to connect from unknown node at "
"%u.%u.%u.%u:%d\n", NIPQUAD(sin.sin_addr.s_addr),
- ntohs((__force __be16)sin.sin_port));
+ ntohs(sin.sin_port));
ret = -EINVAL;
goto out;
}
@@ -1714,7 +1706,7 @@ static int o2net_accept_one(struct socket *sock)
mlog(ML_NOTICE, "unexpected connect attempted from a lower "
"numbered node '%s' at " "%u.%u.%u.%u:%d with num %u\n",
node->nd_name, NIPQUAD(sin.sin_addr.s_addr),
- ntohs((__force __be16)sin.sin_port), node->nd_num);
+ ntohs(sin.sin_port), node->nd_num);
ret = -EINVAL;
goto out;
}
@@ -1725,7 +1717,7 @@ static int o2net_accept_one(struct socket *sock)
mlog(ML_CONN, "attempt to connect from node '%s' at "
"%u.%u.%u.%u:%d but it isn't heartbeating\n",
node->nd_name, NIPQUAD(sin.sin_addr.s_addr),
- ntohs((__force __be16)sin.sin_port));
+ ntohs(sin.sin_port));
ret = -EINVAL;
goto out;
}
@@ -1742,7 +1734,7 @@ static int o2net_accept_one(struct socket *sock)
mlog(ML_NOTICE, "attempt to connect from node '%s' at "
"%u.%u.%u.%u:%d but it already has an open connection\n",
node->nd_name, NIPQUAD(sin.sin_addr.s_addr),
- ntohs((__force __be16)sin.sin_port));
+ ntohs(sin.sin_port));
goto out;
}
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 5fccfe222a6..8d49838e555 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -595,6 +595,7 @@ static struct proc_dir_entry *proc_create(struct proc_dir_entry **parent,
ent->namelen = len;
ent->mode = mode;
ent->nlink = nlink;
+ atomic_set(&ent->count, 1);
ent->pde_users = 0;
spin_lock_init(&ent->pde_unload_lock);
ent->pde_unload_completion = NULL;
@@ -692,7 +693,6 @@ void free_proc_entry(struct proc_dir_entry *de)
/*
* Remove a /proc entry and free it if it's not currently in use.
- * If it is in use, we set the 'deleted' flag.
*/
void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
{
@@ -741,13 +741,8 @@ continue_removing:
parent->nlink--;
de->nlink = 0;
WARN_ON(de->subdir);
- if (!atomic_read(&de->count))
+ if (atomic_dec_and_test(&de->count))
free_proc_entry(de);
- else {
- de->deleted = 1;
- printk("remove_proc_entry: %s/%s busy, count=%d\n",
- parent->name, de->name, atomic_read(&de->count));
- }
break;
}
spin_unlock(&proc_subdir_lock);
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index abe6a3f0436..1a551d92e1d 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -43,13 +43,8 @@ void de_put(struct proc_dir_entry *de)
return;
}
- if (atomic_dec_and_test(&de->count)) {
- if (de->deleted) {
- printk("de_put: deferred delete of %s\n",
- de->name);
- free_proc_entry(de);
- }
- }
+ if (atomic_dec_and_test(&de->count))
+ free_proc_entry(de);
unlock_kernel();
}
}
diff --git a/fs/proc/root.c b/fs/proc/root.c
index ec9cb3b6c93..81f99e691f9 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -207,6 +207,7 @@ struct proc_dir_entry proc_root = {
.name = "/proc",
.mode = S_IFDIR | S_IRUGO | S_IXUGO,
.nlink = 2,
+ .count = ATOMIC_INIT(1),
.proc_iops = &proc_root_inode_operations,
.proc_fops = &proc_root_operations,
.parent = &proc_root,
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 9aa7a06e093..00114462167 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -420,12 +420,6 @@ static void *r_start(struct seq_file *m, loff_t * pos)
return NULL;
up_write(&s->s_umount);
-
- if (de->deleted) {
- deactivate_super(s);
- return NULL;
- }
-
return s;
}
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 30f8c2bb0c3..aaf2878305c 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -179,7 +179,7 @@ bad_entry:
goto fail;
Eend:
p = (struct ufs_dir_entry *)(kaddr + offs);
- ufs_error (sb, "ext2_check_page",
+ ufs_error(sb, __FUNCTION__,
"entry in directory #%lu spans the page boundary"
"offset=%lu",
dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index c78c04fd993..0072cb33ebe 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -755,13 +755,13 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
break;
case UFS_MOUNT_UFSTYPE_NEXTSTEP:
- /*TODO: check may be we need set special dir block size?*/
UFSD("ufstype=nextstep\n");
uspi->s_fsize = block_size = 1024;
uspi->s_fmask = ~(1024 - 1);
uspi->s_fshift = 10;
uspi->s_sbsize = super_block_size = 2048;
uspi->s_sbbase = 0;
+ uspi->s_dirblksize = 1024;
flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
if (!(sb->s_flags & MS_RDONLY)) {
if (!silent)
@@ -771,13 +771,13 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
break;
case UFS_MOUNT_UFSTYPE_NEXTSTEP_CD:
- /*TODO: check may be we need set special dir block size?*/
UFSD("ufstype=nextstep-cd\n");
uspi->s_fsize = block_size = 2048;
uspi->s_fmask = ~(2048 - 1);
uspi->s_fshift = 11;
uspi->s_sbsize = super_block_size = 2048;
uspi->s_sbbase = 0;
+ uspi->s_dirblksize = 1024;
flags |= UFS_DE_OLD | UFS_UID_OLD | UFS_ST_OLD | UFS_CG_OLD;
if (!(sb->s_flags & MS_RDONLY)) {
if (!silent)
diff --git a/include/asm-blackfin/bfin5xx_spi.h b/include/asm-blackfin/bfin5xx_spi.h
index f617d876545..1a0b57f6a3d 100644
--- a/include/asm-blackfin/bfin5xx_spi.h
+++ b/include/asm-blackfin/bfin5xx_spi.h
@@ -152,6 +152,7 @@
struct bfin5xx_spi_master {
u16 num_chipselect;
u8 enable_dma;
+ u16 pin_req[4];
};
/* spi_board_info.controller_data for SPI slave devices,
@@ -162,7 +163,7 @@ struct bfin5xx_spi_chip {
u8 enable_dma;
u8 bits_per_word;
u8 cs_change_per_word;
- u8 cs_chg_udelay;
+ u16 cs_chg_udelay; /* Some devices require 16-bit delays */
};
#endif /* _SPI_CHANNEL_H_ */
diff --git a/include/asm-blackfin/mach-bf533/portmux.h b/include/asm-blackfin/mach-bf533/portmux.h
index b88d7a03ee3..137f4884acf 100644
--- a/include/asm-blackfin/mach-bf533/portmux.h
+++ b/include/asm-blackfin/mach-bf533/portmux.h
@@ -42,7 +42,7 @@
#define P_SPORT0_DRPRI (P_DONTCARE)
#define P_SPI0_MOSI (P_DONTCARE)
-#define P_SPI0_MIS0 (P_DONTCARE)
+#define P_SPI0_MISO (P_DONTCARE)
#define P_SPI0_SCK (P_DONTCARE)
#define P_SPI0_SSEL7 (P_DEFINED | P_IDENT(GPIO_PF7))
#define P_SPI0_SSEL6 (P_DEFINED | P_IDENT(GPIO_PF6))
diff --git a/include/asm-blackfin/mach-bf548/defBF54x_base.h b/include/asm-blackfin/mach-bf548/defBF54x_base.h
index da979cb62f7..319a48590c9 100644
--- a/include/asm-blackfin/mach-bf548/defBF54x_base.h
+++ b/include/asm-blackfin/mach-bf548/defBF54x_base.h
@@ -1644,8 +1644,25 @@
#define RESTART 0x20 /* Work Unit Transitions */
#define DI_SEL 0x40 /* Data Interrupt Timing Select */
#define DI_EN 0x80 /* Data Interrupt Enable */
+
#define NDSIZE 0xf00 /* Flex Descriptor Size */
+#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */
+#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */
+#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */
+#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */
+#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */
+#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */
+#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */
+#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */
+#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */
+#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */
+
#define DMAFLOW 0xf000 /* Next Operation */
+#define DMAFLOW_STOP 0x0000 /* Stop Mode */
+#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */
+#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */
+#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */
+#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */
/* Bit masks for DMAx_IRQ_STATUS, MDMA_Sx_IRQ_STATUS, MDMA_Dx_IRQ_STATUS */
diff --git a/include/asm-mips/mach-au1x00/au1000.h b/include/asm-mips/mach-au1x00/au1000.h
index 3bdce9126f1..bf7701243d7 100644
--- a/include/asm-mips/mach-au1x00/au1000.h
+++ b/include/asm-mips/mach-au1x00/au1000.h
@@ -526,7 +526,7 @@ extern struct au1xxx_irqmap au1xxx_irq_map[];
/* Au1000 */
#ifdef CONFIG_SOC_AU1000
enum soc_au1000_ints {
- AU1000_FIRST_INT = MIPS_CPU_IRQ_BASE,
+ AU1000_FIRST_INT = MIPS_CPU_IRQ_BASE + 8,
AU1000_UART0_INT = AU1000_FIRST_INT,
AU1000_UART1_INT, /* au1000 */
AU1000_UART2_INT, /* au1000 */
@@ -605,7 +605,7 @@ enum soc_au1000_ints {
/* Au1500 */
#ifdef CONFIG_SOC_AU1500
enum soc_au1500_ints {
- AU1500_FIRST_INT = MIPS_CPU_IRQ_BASE,
+ AU1500_FIRST_INT = MIPS_CPU_IRQ_BASE + 8,
AU1500_UART0_INT = AU1500_FIRST_INT,
AU1000_PCI_INTA, /* au1500 */
AU1000_PCI_INTB, /* au1500 */
@@ -686,7 +686,7 @@ enum soc_au1500_ints {
/* Au1100 */
#ifdef CONFIG_SOC_AU1100
enum soc_au1100_ints {
- AU1100_FIRST_INT = MIPS_CPU_IRQ_BASE,
+ AU1100_FIRST_INT = MIPS_CPU_IRQ_BASE + 8,
AU1100_UART0_INT,
AU1100_UART1_INT,
AU1100_SD_INT,
@@ -761,7 +761,7 @@ enum soc_au1100_ints {
#ifdef CONFIG_SOC_AU1550
enum soc_au1550_ints {
- AU1550_FIRST_INT = MIPS_CPU_IRQ_BASE,
+ AU1550_FIRST_INT = MIPS_CPU_IRQ_BASE + 8,
AU1550_UART0_INT = AU1550_FIRST_INT,
AU1550_PCI_INTA,
AU1550_PCI_INTB,
@@ -851,7 +851,7 @@ enum soc_au1550_ints {
#ifdef CONFIG_SOC_AU1200
enum soc_au1200_ints {
- AU1200_FIRST_INT = MIPS_CPU_IRQ_BASE,
+ AU1200_FIRST_INT = MIPS_CPU_IRQ_BASE + 8,
AU1200_UART0_INT = AU1200_FIRST_INT,
AU1200_SWT_INT,
AU1200_SD_INT,
@@ -948,11 +948,12 @@ enum soc_au1200_ints {
#endif /* CONFIG_SOC_AU1200 */
-#define AU1000_INTC0_INT_BASE (MIPS_CPU_IRQ_BASE + 0)
-#define AU1000_INTC0_INT_LAST (MIPS_CPU_IRQ_BASE + 31)
-#define AU1000_INTC1_INT_BASE (MIPS_CPU_IRQ_BASE + 32)
-#define AU1000_INTC1_INT_LAST (MIPS_CPU_IRQ_BASE + 63)
-#define AU1000_MAX_INTR (MIPS_CPU_IRQ_BASE + 63)
+#define AU1000_INTC0_INT_BASE (MIPS_CPU_IRQ_BASE + 8)
+#define AU1000_INTC0_INT_LAST (AU1000_INTC0_INT_BASE + 31)
+#define AU1000_INTC1_INT_BASE (AU1000_INTC0_INT_BASE + 32)
+#define AU1000_INTC1_INT_LAST (AU1000_INTC1_INT_BASE + 31)
+
+#define AU1000_MAX_INTR AU1000_INTC1_INT_LAST
#define INTX 0xFF /* not valid */
/* Programmable Counters 0 and 1 */
diff --git a/include/linux/inet_lro.h b/include/linux/inet_lro.h
index 1246d46abbc..80335b7d77c 100644
--- a/include/linux/inet_lro.h
+++ b/include/linux/inet_lro.h
@@ -91,6 +91,9 @@ struct net_lro_mgr {
int max_desc; /* Max number of LRO descriptors */
int max_aggr; /* Max number of LRO packets to be aggregated */
+ int frag_align_pad; /* Padding required to properly align layer 3
+ * headers in generated skb when using frags */
+
struct net_lro_desc *lro_arr; /* Array of LRO descriptors */
/*
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 16e7ed855a1..d9ecd13393b 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -439,6 +439,8 @@ struct transaction_s
/*
* Transaction's current state
* [no locking - only kjournald alters this]
+ * [j_list_lock] guards transition of a transaction into T_FINISHED
+ * state and subsequent call of __journal_drop_transaction()
* FIXME: needs barriers
* KLUDGE: [use j_state_lock]
*/
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 520238cbae5..1b7b95c67ac 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -12,6 +12,7 @@
#include <linux/prio_tree.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
+#include <linux/security.h>
struct mempolicy;
struct anon_vma;
@@ -513,6 +514,21 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
}
/*
+ * If a hint addr is less than mmap_min_addr change hint to be as
+ * low as possible but still greater than mmap_min_addr
+ */
+static inline unsigned long round_hint_to_min(unsigned long hint)
+{
+#ifdef CONFIG_SECURITY
+ hint &= PAGE_MASK;
+ if (((void *)hint != NULL) &&
+ (hint < mmap_min_addr))
+ return PAGE_ALIGN(mmap_min_addr);
+#endif
+ return hint;
+}
+
+/*
* Some inline functions in vmstat.h depend on page_zone()
*/
#include <linux/vmstat.h>
diff --git a/include/linux/phy.h b/include/linux/phy.h
index e10763d7918..554836edd91 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -403,6 +403,7 @@ int phy_mii_ioctl(struct phy_device *phydev,
int phy_start_interrupts(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id);
+void phy_device_free(struct phy_device *phydev);
extern struct bus_type mdio_bus_type;
#endif /* __PHY_H */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 523528d237b..a5316829215 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -77,7 +77,6 @@ struct proc_dir_entry {
read_proc_t *read_proc;
write_proc_t *write_proc;
atomic_t count; /* use count */
- int deleted; /* delete flag */
int pde_users; /* number of callers into module in progress */
spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
struct completion *pde_unload_completion;
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 1c4eb41dbd8..9c4ad755d7e 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -7,12 +7,25 @@
#ifndef _LINUX_THREAD_INFO_H
#define _LINUX_THREAD_INFO_H
+#include <linux/types.h>
+
/*
- * System call restart block.
+ * System call restart block.
*/
struct restart_block {
long (*fn)(struct restart_block *);
- unsigned long arg0, arg1, arg2, arg3;
+ union {
+ struct {
+ unsigned long arg0, arg1, arg2, arg3;
+ };
+ /* For futex_wait */
+ struct {
+ u32 *uaddr;
+ u32 val;
+ u32 flags;
+ u64 time;
+ } futex;
+ };
};
extern long do_no_restart_syscall(struct restart_block *parm);
diff --git a/kernel/Kconfig.instrumentation b/kernel/Kconfig.instrumentation
index 2ea1e347df4..468f47ad750 100644
--- a/kernel/Kconfig.instrumentation
+++ b/kernel/Kconfig.instrumentation
@@ -20,8 +20,8 @@ config PROFILING
config OPROFILE
tristate "OProfile system profiling (EXPERIMENTAL)"
- depends on PROFILING
- depends on (ALPHA || ARM || BLACKFIN || X86_32 || IA64 || M32R || MIPS || PARISC || PPC || S390 || SUPERH || SPARC || X86_64) && !UML
+ depends on PROFILING && !UML
+ depends on ARCH_SUPPORTS_OPROFILE || ALPHA || ARM || BLACKFIN || IA64 || M32R || PARISC || PPC || S390 || SUPERH || SPARC
help
OProfile is a profiling system capable of profiling the
whole system, include the kernel, kernel modules, libraries,
@@ -31,8 +31,8 @@ config OPROFILE
config KPROBES
bool "Kprobes"
- depends on KALLSYMS && MODULES
- depends on (X86_32 || IA64 || PPC || S390 || SPARC64 || X86_64 || AVR32) && !UML
+ depends on KALLSYMS && MODULES && !UML
+ depends on X86_32 || IA64 || PPC || S390 || SPARC64 || X86_64 || AVR32
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
diff --git a/kernel/fork.c b/kernel/fork.c
index 8ca1a14cdc8..8dd8ff28100 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1292,23 +1292,14 @@ static struct task_struct *copy_process(unsigned long clone_flags,
__ptrace_link(p, current->parent);
if (thread_group_leader(p)) {
- if (clone_flags & CLONE_NEWPID) {
+ if (clone_flags & CLONE_NEWPID)
p->nsproxy->pid_ns->child_reaper = p;
- p->signal->tty = NULL;
- set_task_pgrp(p, p->pid);
- set_task_session(p, p->pid);
- attach_pid(p, PIDTYPE_PGID, pid);
- attach_pid(p, PIDTYPE_SID, pid);
- } else {
- p->signal->tty = current->signal->tty;
- set_task_pgrp(p, task_pgrp_nr(current));
- set_task_session(p, task_session_nr(current));
- attach_pid(p, PIDTYPE_PGID,
- task_pgrp(current));
- attach_pid(p, PIDTYPE_SID,
- task_session(current));
- }
+ p->signal->tty = current->signal->tty;
+ set_task_pgrp(p, task_pgrp_nr(current));
+ set_task_session(p, task_session_nr(current));
+ attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
+ attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
}
diff --git a/kernel/futex.c b/kernel/futex.c
index 9dc591ab681..172a1aeeafd 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -658,7 +658,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
if (curval == -EFAULT)
ret = -EFAULT;
- if (curval != uval)
+ else if (curval != uval)
ret = -EINVAL;
if (ret) {
spin_unlock(&pi_state->pi_mutex.wait_lock);
@@ -1149,9 +1149,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
/*
* In case we must use restart_block to restart a futex_wait,
- * we encode in the 'arg3' shared capability
+ * we encode in the 'flags' shared capability
*/
-#define ARG3_SHARED 1
+#define FLAGS_SHARED 1
static long futex_wait_restart(struct restart_block *restart);
@@ -1290,12 +1290,13 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
struct restart_block *restart;
restart = &current_thread_info()->restart_block;
restart->fn = futex_wait_restart;
- restart->arg0 = (unsigned long)uaddr;
- restart->arg1 = (unsigned long)val;
- restart->arg2 = (unsigned long)abs_time;
- restart->arg3 = 0;
+ restart->futex.uaddr = (u32 *)uaddr;
+ restart->futex.val = val;
+ restart->futex.time = abs_time->tv64;
+ restart->futex.flags = 0;
+
if (fshared)
- restart->arg3 |= ARG3_SHARED;
+ restart->futex.flags |= FLAGS_SHARED;
return -ERESTART_RESTARTBLOCK;
}
@@ -1310,15 +1311,15 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
static long futex_wait_restart(struct restart_block *restart)
{
- u32 __user *uaddr = (u32 __user *)restart->arg0;
- u32 val = (u32)restart->arg1;
- ktime_t *abs_time = (ktime_t *)restart->arg2;
+ u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
struct rw_semaphore *fshared = NULL;
+ ktime_t t;
+ t.tv64 = restart->futex.time;
restart->fn = do_no_restart_syscall;
- if (restart->arg3 & ARG3_SHARED)
+ if (restart->futex.flags & FLAGS_SHARED)
fshared = &current->mm->mmap_sem;
- return (long)futex_wait(uaddr, fshared, val, abs_time);
+ return (long)futex_wait(uaddr, fshared, restart->futex.val, &t);
}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index ed38bbfc48a..0f389621bb6 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3054,11 +3054,6 @@ void __init lockdep_info(void)
#endif
}
-static inline int in_range(const void *start, const void *addr, const void *end)
-{
- return addr >= start && addr <= end;
-}
-
static void
print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
const void *mem_to, struct held_lock *hlock)
@@ -3080,6 +3075,13 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
dump_stack();
}
+static inline int not_in_range(const void* mem_from, unsigned long mem_len,
+ const void* lock_from, unsigned long lock_len)
+{
+ return lock_from + lock_len <= mem_from ||
+ mem_from + mem_len <= lock_from;
+}
+
/*
* Called when kernel memory is freed (or unmapped), or if a lock
* is destroyed or reinitialized - this code checks whether there is
@@ -3087,7 +3089,6 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
*/
void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
{
- const void *mem_to = mem_from + mem_len, *lock_from, *lock_to;
struct task_struct *curr = current;
struct held_lock *hlock;
unsigned long flags;
@@ -3100,14 +3101,11 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
- lock_from = (void *)hlock->instance;
- lock_to = (void *)(hlock->instance + 1);
-
- if (!in_range(mem_from, lock_from, mem_to) &&
- !in_range(mem_from, lock_to, mem_to))
+ if (not_in_range(mem_from, mem_len, hlock->instance,
+ sizeof(*hlock->instance)))
continue;
- print_freed_lock_bug(curr, mem_from, mem_to, hlock);
+ print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
break;
}
local_irq_restore(flags);
@@ -3173,6 +3171,13 @@ retry:
printk(" locked it.\n");
do_each_thread(g, p) {
+ /*
+ * It's not reliable to print a task's held locks
+ * if it's not sleeping (or if it's not the current
+ * task):
+ */
+ if (p->state == TASK_RUNNING && p != current)
+ continue;
if (p->lockdep_depth)
lockdep_print_held_locks(p);
if (!unlock)
diff --git a/kernel/sched.c b/kernel/sched.c
index 59ff6b140ed..67d9d1799d8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -209,9 +209,8 @@ static inline struct task_group *task_group(struct task_struct *p)
tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
struct task_group, css);
#else
- tg = &init_task_group;
+ tg = &init_task_group;
#endif
-
return tg;
}
@@ -249,15 +248,16 @@ struct cfs_rq {
#ifdef CONFIG_FAIR_GROUP_SCHED
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
- /* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
+ /*
+ * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
* a hierarchy). Non-leaf lrqs hold other higher schedulable entities
* (like users, containers etc.)
*
* leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
* list is used during load balance.
*/
- struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
- struct task_group *tg; /* group that "owns" this runqueue */
+ struct list_head leaf_cfs_rq_list;
+ struct task_group *tg; /* group that "owns" this runqueue */
#endif
};
@@ -300,7 +300,7 @@ struct rq {
/* list of leaf cfs_rq on this cpu: */
struct list_head leaf_cfs_rq_list;
#endif
- struct rt_rq rt;
+ struct rt_rq rt;
/*
* This is part of a global counter where only the total sum
@@ -457,8 +457,8 @@ enum {
SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
SCHED_FEAT_WAKEUP_PREEMPT = 2,
SCHED_FEAT_START_DEBIT = 4,
- SCHED_FEAT_TREE_AVG = 8,
- SCHED_FEAT_APPROX_AVG = 16,
+ SCHED_FEAT_TREE_AVG = 8,
+ SCHED_FEAT_APPROX_AVG = 16,
};
const_debug unsigned int sysctl_sched_features =
@@ -591,7 +591,7 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
/*
* task_rq_lock - lock the runqueue a given task resides on and disable
- * interrupts. Note the ordering: we can safely lookup the task_rq without
+ * interrupts. Note the ordering: we can safely lookup the task_rq without
* explicitly disabling preemption.
*/
static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
@@ -779,7 +779,7 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
* To aid in avoiding the subversion of "niceness" due to uneven distribution
* of tasks with abnormal "nice" values across CPUs the contribution that
* each task makes to its run queue's load is weighted according to its
- * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
+ * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
* scaled version of the new time slice allocation that they receive on time
* slice expiry etc.
*/
@@ -1854,7 +1854,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
* and do any other architecture-specific cleanup actions.
*
* Note that we may have delayed dropping an mm in context_switch(). If
- * so, we finish that here outside of the runqueue lock. (Doing it
+ * so, we finish that here outside of the runqueue lock. (Doing it
* with the lock held can cause deadlocks; see schedule() for
* details.)
*/
@@ -2136,7 +2136,7 @@ static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
/*
* If dest_cpu is allowed for this process, migrate the task to it.
* This is accomplished by forcing the cpu_allowed mask to only
- * allow dest_cpu, which will force the cpu onto dest_cpu. Then
+ * allow dest_cpu, which will force the cpu onto dest_cpu. Then
* the cpu_allowed mask is restored.
*/
static void sched_migrate_task(struct task_struct *p, int dest_cpu)
@@ -2581,7 +2581,7 @@ group_next:
* tasks around. Thus we look for the minimum possible imbalance.
* Negative imbalances (*we* are more loaded than anyone else) will
* be counted as no imbalance for these purposes -- we can't fix that
- * by pulling tasks to us. Be careful of negative numbers as they'll
+ * by pulling tasks to us. Be careful of negative numbers as they'll
* appear as very large values with unsigned longs.
*/
if (max_load <= busiest_load_per_task)
@@ -3016,7 +3016,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
/*
* This condition is "impossible", if it occurs
- * we need to fix it. Originally reported by
+ * we need to fix it. Originally reported by
* Bjorn Helgaas on a 128-cpu setup.
*/
BUG_ON(busiest_rq == target_rq);
@@ -3048,7 +3048,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
#ifdef CONFIG_NO_HZ
static struct {
atomic_t load_balancer;
- cpumask_t cpu_mask;
+ cpumask_t cpu_mask;
} nohz ____cacheline_aligned = {
.load_balancer = ATOMIC_INIT(-1),
.cpu_mask = CPU_MASK_NONE,
@@ -3552,7 +3552,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
static inline void schedule_debug(struct task_struct *prev)
{
/*
- * Test if we are atomic. Since do_exit() needs to call into
+ * Test if we are atomic. Since do_exit() needs to call into
* schedule() atomically, we ignore that path for now.
* Otherwise, whine if we are scheduling when we should not be.
*/
@@ -3674,7 +3674,7 @@ EXPORT_SYMBOL(schedule);
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
- * off of preempt_enable. Kernel preemptions off return from interrupt
+ * off of preempt_enable. Kernel preemptions off return from interrupt
* occur there and call schedule directly.
*/
asmlinkage void __sched preempt_schedule(void)
@@ -3686,7 +3686,7 @@ asmlinkage void __sched preempt_schedule(void)
#endif
/*
* If there is a non-zero preempt_count or interrupts are disabled,
- * we do not want to preempt the current task. Just return..
+ * we do not want to preempt the current task. Just return..
*/
if (likely(ti->preempt_count || irqs_disabled()))
return;
@@ -3772,12 +3772,12 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
EXPORT_SYMBOL(default_wake_function);
/*
- * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
- * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
+ * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
+ * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
* number) then we wake all the non-exclusive tasks and one exclusive task.
*
* There are circumstances in which we can try to wake a task which has already
- * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
+ * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
@@ -4390,8 +4390,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
* @policy: new policy.
* @param: structure containing the new RT priority.
*/
-asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
- struct sched_param __user *param)
+asmlinkage long
+sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
{
/* negative values for policy are not valid */
if (policy < 0)
@@ -4491,7 +4491,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
/*
* It is not safe to call set_cpus_allowed with the
- * tasklist_lock held. We will bump the task_struct's
+ * tasklist_lock held. We will bump the task_struct's
* usage count and then drop tasklist_lock.
*/
get_task_struct(p);
@@ -4687,7 +4687,7 @@ EXPORT_SYMBOL(cond_resched);
* cond_resched_lock() - if a reschedule is pending, drop the given lock,
* call schedule, and on return reacquire the lock.
*
- * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
+ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
* operations here to prevent schedule() from being called twice (once via
* spin_unlock(), once by hand).
*/
@@ -4741,7 +4741,7 @@ void __sched yield(void)
EXPORT_SYMBOL(yield);
/*
- * This task is about to go to sleep on IO. Increment rq->nr_iowait so
+ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
* that process accounting knows that this is a task in IO wait state.
*
* But don't do that if it is a deliberate, throttling IO wait (this task
@@ -4850,17 +4850,21 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
if (retval)
goto out_unlock;
- if (p->policy == SCHED_FIFO)
- time_slice = 0;
- else if (p->policy == SCHED_RR)
+ /*
+ * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
+ * tasks that are on an otherwise idle runqueue:
+ */
+ time_slice = 0;
+ if (p->policy == SCHED_RR) {
time_slice = DEF_TIMESLICE;
- else {
+ } else {
struct sched_entity *se = &p->se;
unsigned long flags;
struct rq *rq;
rq = task_rq_lock(p, &flags);
- time_slice = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
+ if (rq->cfs.load.weight)
+ time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
task_rq_unlock(rq, &flags);
}
read_unlock(&tasklist_lock);
@@ -5046,7 +5050,7 @@ static inline void sched_init_granularity(void)
* is removed from the allowed bitmask.
*
* NOTE: the caller must have a valid reference to the task, the
- * task must not exit() & deallocate itself prematurely. The
+ * task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
@@ -5083,7 +5087,7 @@ out:
EXPORT_SYMBOL_GPL(set_cpus_allowed);
/*
- * Move (not current) task off this cpu, onto dest cpu. We're doing
+ * Move (not current) task off this cpu, onto dest cpu. We're doing
* this because either it can't run here any more (set_cpus_allowed()
* away from this CPU, or CPU going down), or because we're
* attempting to rebalance this task on exec (sched_exec).
@@ -5228,7 +5232,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
* Try to stay on the same cpuset, where the
* current cpuset may be a subset of all cpus.
* The cpuset_cpus_allowed_locked() variant of
- * cpuset_cpus_allowed() will not block. It must be
+ * cpuset_cpus_allowed() will not block. It must be
* called within calls to cpuset_lock/cpuset_unlock.
*/
rq = task_rq_lock(p, &flags);
@@ -5241,10 +5245,11 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
* kernel threads (both mm NULL), since they never
* leave kernel.
*/
- if (p->mm && printk_ratelimit())
+ if (p->mm && printk_ratelimit()) {
printk(KERN_INFO "process %d (%s) no "
"longer affine to cpu%d\n",
- task_pid_nr(p), p->comm, dead_cpu);
+ task_pid_nr(p), p->comm, dead_cpu);
+ }
}
} while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
}
@@ -5346,7 +5351,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
/*
* Drop lock around migration; if someone else moves it,
- * that's OK. No task can be added to this CPU, so iteration is
+ * that's OK. No task can be added to this CPU, so iteration is
* fine.
*/
spin_unlock_irq(&rq->lock);
@@ -5410,7 +5415,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
/*
* In the intermediate directories, both the child directory and
* procname are dynamically allocated and could fail but the mode
- * will always be set. In the lowest directory the names are
+ * will always be set. In the lowest directory the names are
* static strings and all have proc handlers.
*/
for (entry = *tablep; entry->mode; entry++) {
@@ -5581,7 +5586,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_UP_CANCELED_FROZEN:
if (!cpu_rq(cpu)->migration_thread)
break;
- /* Unbind it from offline cpu so it can run. Fall thru. */
+ /* Unbind it from offline cpu so it can run. Fall thru. */
kthread_bind(cpu_rq(cpu)->migration_thread,
any_online_cpu(cpu_online_map));
kthread_stop(cpu_rq(cpu)->migration_thread);
@@ -5608,9 +5613,11 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
- /* No need to migrate the tasks: it was best-effort if
- * they didn't take sched_hotcpu_mutex. Just wake up
- * the requestors. */
+ /*
+ * No need to migrate the tasks: it was best-effort if
+ * they didn't take sched_hotcpu_mutex. Just wake up
+ * the requestors.
+ */
spin_lock_irq(&rq->lock);
while (!list_empty(&rq->migration_queue)) {
struct migration_req *req;
@@ -5918,7 +5925,7 @@ init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
* @node: node whose sched_domain we're building
* @used_nodes: nodes already in the sched_domain
*
- * Find the next node to include in a given scheduling domain. Simply
+ * Find the next node to include in a given scheduling domain. Simply
* finds the closest node not already in the @used_nodes map.
*
* Should use nodemask_t.
@@ -5958,7 +5965,7 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
* @node: node whose cpumask we're constructing
* @size: number of nodes to include in this span
*
- * Given a node, construct a good cpumask for its sched_domain to span. It
+ * Given a node, construct a good cpumask for its sched_domain to span. It
* should be one that prevents unnecessary balancing, but also spreads tasks
* out optimally.
*/
@@ -5995,8 +6002,8 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
-static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map,
- struct sched_group **sg)
+static int
+cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{
if (sg)
*sg = &per_cpu(sched_group_cpus, cpu);
@@ -6013,8 +6020,8 @@ static DEFINE_PER_CPU(struct sched_group, sched_group_core);
#endif
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
-static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
- struct sched_group **sg)
+static int
+cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{
int group;
cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
@@ -6025,8 +6032,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
return group;
}
#elif defined(CONFIG_SCHED_MC)
-static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
- struct sched_group **sg)
+static int
+cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{
if (sg)
*sg = &per_cpu(sched_group_core, cpu);
@@ -6037,8 +6044,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
static DEFINE_PER_CPU(struct sched_domain, phys_domains);
static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
-static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
- struct sched_group **sg)
+static int
+cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{
int group;
#ifdef CONFIG_SCHED_MC
@@ -6218,7 +6225,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
* Allocate the per-node list of sched groups
*/
sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!sched_group_nodes) {
printk(KERN_WARNING "Can not alloc sched group node list\n");
return -ENOMEM;
@@ -6465,7 +6472,7 @@ static int ndoms_cur; /* number of sched domains in 'doms_cur' */
static cpumask_t fallback_doms;
/*
- * Set up scheduler domains and groups. Callers must hold the hotplug lock.
+ * Set up scheduler domains and groups. Callers must hold the hotplug lock.
* For now this just excludes isolated cpus, but could be used to
* exclude other special cases in the future.
*/
@@ -6507,19 +6514,19 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
/*
* Partition sched domains as specified by the 'ndoms_new'
- * cpumasks in the array doms_new[] of cpumasks. This compares
+ * cpumasks in the array doms_new[] of cpumasks. This compares
* doms_new[] to the current sched domain partitioning, doms_cur[].
* It destroys each deleted domain and builds each new domain.
*
* 'doms_new' is an array of cpumask_t's of length 'ndoms_new'.
- * The masks don't intersect (don't overlap.) We should setup one
- * sched domain for each mask. CPUs not in any of the cpumasks will
- * not be load balanced. If the same cpumask appears both in the
+ * The masks don't intersect (don't overlap.) We should setup one
+ * sched domain for each mask. CPUs not in any of the cpumasks will
+ * not be load balanced. If the same cpumask appears both in the
* current 'doms_cur' domains and in the new 'doms_new', we can leave
* it as it is.
*
- * The passed in 'doms_new' should be kmalloc'd. This routine takes
- * ownership of it and will kfree it when done with it. If the caller
+ * The passed in 'doms_new' should be kmalloc'd. This routine takes
+ * ownership of it and will kfree it when done with it. If the caller
* failed the kmalloc call, then it can pass in doms_new == NULL,
* and partition_sched_domains() will fallback to the single partition
* 'fallback_doms'.
@@ -6649,7 +6656,7 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
#endif
/*
- * Force a reinitialization of the sched domains hierarchy. The domains
+ * Force a reinitialization of the sched domains hierarchy. The domains
* and groups cannot be updated in place without racing with the balancing
* code, so we temporarily attach all running cpus to the NULL domain
* which will prevent rebalancing while the sched domains are recalculated.
@@ -6939,8 +6946,8 @@ struct task_struct *curr_task(int cpu)
* @p: the task pointer to set.
*
* Description: This function must only be used when non-maskable interrupts
- * are serviced on a separate stack. It allows the architecture to switch the
- * notion of the current task on a cpu in a non-blocking manner. This function
+ * are serviced on a separate stack. It allows the architecture to switch the
+ * notion of the current task on a cpu in a non-blocking manner. This function
* must be called with all CPU's synchronized, and interrupts disabled, the
* and caller must save the original value of the current task (see
* curr_task() above) and restore that value before reenabling interrupts and
@@ -7189,16 +7196,17 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
return &tg->css;
}
-static void cpu_cgroup_destroy(struct cgroup_subsys *ss,
- struct cgroup *cgrp)
+static void
+cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
struct task_group *tg = cgroup_tg(cgrp);
sched_destroy_group(tg);
}
-static int cpu_cgroup_can_attach(struct cgroup_subsys *ss,
- struct cgroup *cgrp, struct task_struct *tsk)
+static int
+cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct task_struct *tsk)
{
/* We don't support RT-tasks being in separate groups */
if (tsk->sched_class != &fair_sched_class)
@@ -7304,8 +7312,8 @@ static struct cgroup_subsys_state *cpuacct_create(
}
/* destroy an existing cpu accounting group */
-static void cpuacct_destroy(struct cgroup_subsys *ss,
- struct cgroup *cont)
+static void
+cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
{
struct cpuacct *ca = cgroup_ca(cont);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 37bb265598d..c33f0ceb3de 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -799,8 +799,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
*/
static void yield_task_fair(struct rq *rq)
{
- struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr);
- struct sched_entity *rightmost, *se = &rq->curr->se;
+ struct task_struct *curr = rq->curr;
+ struct cfs_rq *cfs_rq = task_cfs_rq(curr);
+ struct sched_entity *rightmost, *se = &curr->se;
/*
* Are we the only task in the tree?
@@ -808,7 +809,7 @@ static void yield_task_fair(struct rq *rq)
if (unlikely(cfs_rq->nr_running == 1))
return;
- if (likely(!sysctl_sched_compat_yield)) {
+ if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
__update_rq_clock(rq);
/*
* Update run-time statistics of the 'current'.
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0deed82a615..8ac51714b08 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1588,6 +1588,10 @@ struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
void unregister_sysctl_table(struct ctl_table_header * header)
{
might_sleep();
+
+ if (header == NULL)
+ return;
+
spin_lock(&sysctl_lock);
start_unregistering(header);
spin_unlock(&sysctl_lock);
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index 6972f26c65f..bed939f82c3 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -96,7 +96,7 @@ static struct trans_ctl_table trans_kern_table[] = {
{ KERN_PTY, "pty", trans_pty_table },
{ KERN_NGROUPS_MAX, "ngroups_max" },
- { KERN_SPARC_SCONS_PWROFF, "scons_poweroff" },
+ { KERN_SPARC_SCONS_PWROFF, "scons-poweroff" },
{ KERN_HZ_TIMER, "hz_timer" },
{ KERN_UNKNOWN_NMI_PANIC, "unknown_nmi_panic" },
{ KERN_BOOTLOADER_TYPE, "bootloader_type" },
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index b0ceb29da4c..e8644b1e552 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -7,7 +7,7 @@
int bdi_init(struct backing_dev_info *bdi)
{
- int i, j;
+ int i;
int err;
for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
@@ -21,7 +21,7 @@ int bdi_init(struct backing_dev_info *bdi)
if (err) {
err:
- for (j = 0; j < i; j++)
+ while (i--)
percpu_counter_destroy(&bdi->bdi_stat[i]);
}
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 32132f3cd64..e233fff61b4 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -314,7 +314,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
fault_in_pages_readable(buf, bytes);
kaddr = kmap_atomic(page, KM_USER0);
copied = bytes -
- __copy_from_user_inatomic_nocache(kaddr, buf, bytes);
+ __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
diff --git a/mm/mmap.c b/mm/mmap.c
index facc1a75bd4..15678aa6ec7 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -912,6 +912,9 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
if (!len)
return -EINVAL;
+ if (!(flags & MAP_FIXED))
+ addr = round_hint_to_min(addr);
+
error = arch_mmap_check(addr, len, flags);
if (error)
return error;
@@ -1615,6 +1618,12 @@ static inline int expand_downwards(struct vm_area_struct *vma,
*/
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
+
+ address &= PAGE_MASK;
+ error = security_file_mmap(0, 0, 0, 0, address, 1);
+ if (error)
+ return error;
+
anon_vma_lock(vma);
/*
@@ -1622,8 +1631,6 @@ static inline int expand_downwards(struct vm_area_struct *vma,
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
*/
- address &= PAGE_MASK;
- error = 0;
/* Somebody else might have raced and expanded it already */
if (address < vma->vm_start) {
@@ -1934,6 +1941,10 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
if (is_hugepage_only_range(mm, addr, len))
return -EINVAL;
+ error = security_file_mmap(0, 0, 0, 0, addr, 1);
+ if (error)
+ return error;
+
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
error = arch_mmap_check(addr, len, flags);
diff --git a/mm/nommu.c b/mm/nommu.c
index 35622c59092..b989cb928a7 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -829,6 +829,9 @@ unsigned long do_mmap_pgoff(struct file *file,
void *result;
int ret;
+ if (!(flags & MAP_FIXED))
+ addr = round_hint_to_min(addr);
+
/* decide whether we should attempt the mapping, and if so what sort of
* mapping */
ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
diff --git a/mm/slab.c b/mm/slab.c
index 202465a193c..2e338a5f7b1 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4475,3 +4475,4 @@ size_t ksize(const void *objp)
return obj_size(virt_to_cache(objp));
}
+EXPORT_SYMBOL(ksize);
diff --git a/mm/slob.c b/mm/slob.c
index 08a9bd91a1a..ee2ef8af0d4 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -495,6 +495,7 @@ size_t ksize(const void *block)
else
return sp->page.private;
}
+EXPORT_SYMBOL(ksize);
struct kmem_cache {
unsigned int size, align;
diff --git a/mm/slub.c b/mm/slub.c
index 9acb413858a..b9f37cb0f2e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2558,8 +2558,12 @@ size_t ksize(const void *object)
if (unlikely(object == ZERO_SIZE_PTR))
return 0;
- page = get_object_page(object);
+ page = virt_to_head_page(object);
BUG_ON(!page);
+
+ if (unlikely(!PageSlab(page)))
+ return PAGE_SIZE << compound_order(page);
+
s = page->slab;
BUG_ON(!s);
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index ac3b1d3dba2..9a96c277393 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -401,10 +401,11 @@ static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
int data_len = len;
int hdr_len = min(len, hlen);
- skb = netdev_alloc_skb(lro_mgr->dev, hlen);
+ skb = netdev_alloc_skb(lro_mgr->dev, hlen + lro_mgr->frag_align_pad);
if (!skb)
return NULL;
+ skb_reserve(skb, lro_mgr->frag_align_pad);
skb->len = len;
skb->data_len = len - hdr_len;
skb->truesize += true_size;
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index b843a11d7cf..ad89644ef5d 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -580,9 +580,14 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
static int __init ip_vs_lblc_init(void)
{
+ int ret;
+
INIT_LIST_HEAD(&ip_vs_lblc_scheduler.n_list);
sysctl_header = register_sysctl_table(lblc_root_table);
- return register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
+ ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
+ if (ret)
+ unregister_sysctl_table(sysctl_header);
+ return ret;
}
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index e5b323a6b2f..2a5ed85a335 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -769,9 +769,14 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
static int __init ip_vs_lblcr_init(void)
{
+ int ret;
+
INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list);
sysctl_header = register_sysctl_table(lblcr_root_table);
- return register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
+ ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
+ if (ret)
+ unregister_sysctl_table(sysctl_header);
+ return ret;
}
diff --git a/net/ipv4/ipvs/ip_vs_sched.c b/net/ipv4/ipvs/ip_vs_sched.c
index 1602304abbf..43223586190 100644
--- a/net/ipv4/ipvs/ip_vs_sched.c
+++ b/net/ipv4/ipvs/ip_vs_sched.c
@@ -183,19 +183,6 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
/* increase the module use count */
ip_vs_use_count_inc();
- /*
- * Make sure that the scheduler with this name doesn't exist
- * in the scheduler list.
- */
- sched = ip_vs_sched_getbyname(scheduler->name);
- if (sched) {
- ip_vs_scheduler_put(sched);
- ip_vs_use_count_dec();
- IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler "
- "already existed in the system\n", scheduler->name);
- return -EINVAL;
- }
-
write_lock_bh(&__ip_vs_sched_lock);
if (scheduler->n_list.next != &scheduler->n_list) {
@@ -207,6 +194,20 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
}
/*
+ * Make sure that the scheduler with this name doesn't exist
+ * in the scheduler list.
+ */
+ list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
+ if (strcmp(scheduler->name, sched->name) == 0) {
+ write_unlock_bh(&__ip_vs_sched_lock);
+ ip_vs_use_count_dec();
+ IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler "
+ "already existed in the system\n",
+ scheduler->name);
+ return -EINVAL;
+ }
+ }
+ /*
* Add it into the d-linked scheduler list
*/
list_add(&scheduler->n_list, &ip_vs_schedulers);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0f0c1c9829a..b9e429d2d1d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3003,17 +3003,13 @@ static int tcp_process_frto(struct sock *sk, int flag)
}
if (tp->frto_counter == 1) {
- /* Sending of the next skb must be allowed or no F-RTO */
- if (!tcp_send_head(sk) ||
- after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
- tp->snd_una + tp->snd_wnd)) {
- tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3),
- flag);
- return 1;
- }
-
+ /* tcp_may_send_now needs to see updated state */
tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
tp->frto_counter = 2;
+
+ if (!tcp_may_send_now(sk))
+ tcp_enter_frto_loss(sk, 2, flag);
+
return 1;
} else {
switch (sysctl_tcp_frto_response) {
@@ -3069,6 +3065,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
}
prior_fackets = tp->fackets_out;
+ prior_in_flight = tcp_packets_in_flight(tp);
if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
/* Window is constant, pure forward advance.
@@ -3108,8 +3105,6 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
if (!prior_packets)
goto no_queue;
- prior_in_flight = tcp_packets_in_flight(tp);
-
/* See if we can take anything off of the retransmit queue. */
flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e5130a7fe18..f4c1eef89af 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1162,8 +1162,7 @@ int tcp_may_send_now(struct sock *sk)
return (skb &&
tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
(tcp_skb_is_last(sk, skb) ?
- TCP_NAGLE_PUSH :
- tp->nonagle)));
+ tp->nonagle : TCP_NAGLE_PUSH)));
}
/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 1120b150e21..be627e1f04d 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -1245,6 +1245,7 @@ static void ircomm_tty_flow_indication(void *instance, void *sap,
self->flow = cmd;
}
+#ifdef CONFIG_PROC_FS
static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf)
{
int ret=0;
@@ -1354,7 +1355,6 @@ static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf)
*
*
*/
-#ifdef CONFIG_PROC_FS
static int ircomm_tty_read_proc(char *buf, char **start, off_t offset, int len,
int *eof, void *unused)
{
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 1b6741f1d74..12cfcf09556 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -55,13 +55,13 @@ static int rose_header(struct sk_buff *skb, struct net_device *dev,
static int rose_rebuild_header(struct sk_buff *skb)
{
+#ifdef CONFIG_INET
struct net_device *dev = skb->dev;
struct net_device_stats *stats = netdev_priv(dev);
unsigned char *bp = (unsigned char *)skb->data;
struct sk_buff *skbn;
unsigned int len;
-#ifdef CONFIG_INET
if (arp_find(bp + 7, skb)) {
return 1;
}
diff --git a/security/dummy.c b/security/dummy.c
index 6d895ade73d..3ccfbbe973b 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -426,7 +426,7 @@ static int dummy_file_mmap (struct file *file, unsigned long reqprot,
unsigned long addr,
unsigned long addr_only)
{
- if (addr < mmap_min_addr)
+ if ((addr < mmap_min_addr) && !capable(CAP_SYS_RAWIO))
return -EACCES;
return 0;
}
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index f5f3e6da5da..2fa483f2611 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -65,6 +65,7 @@ static DEFINE_MUTEX(sel_mutex);
/* global data for booleans */
static struct dentry *bool_dir = NULL;
static int bool_num = 0;
+static char **bool_pending_names;
static int *bool_pending_values = NULL;
/* global data for classes */
@@ -832,15 +833,16 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
ssize_t length;
ssize_t ret;
int cur_enforcing;
- struct inode *inode;
+ struct inode *inode = filep->f_path.dentry->d_inode;
+ unsigned index = inode->i_ino & SEL_INO_MASK;
+ const char *name = filep->f_path.dentry->d_name.name;
mutex_lock(&sel_mutex);
- ret = -EFAULT;
-
- /* check to see if this file has been deleted */
- if (!filep->f_op)
+ if (index >= bool_num || strcmp(name, bool_pending_names[index])) {
+ ret = -EINVAL;
goto out;
+ }
if (count > PAGE_SIZE) {
ret = -EINVAL;
@@ -851,15 +853,13 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
goto out;
}
- inode = filep->f_path.dentry->d_inode;
- cur_enforcing = security_get_bool_value(inode->i_ino&SEL_INO_MASK);
+ cur_enforcing = security_get_bool_value(index);
if (cur_enforcing < 0) {
ret = cur_enforcing;
goto out;
}
-
length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing,
- bool_pending_values[inode->i_ino&SEL_INO_MASK]);
+ bool_pending_values[index]);
ret = simple_read_from_buffer(buf, count, ppos, page, length);
out:
mutex_unlock(&sel_mutex);
@@ -872,9 +872,11 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
size_t count, loff_t *ppos)
{
char *page = NULL;
- ssize_t length = -EFAULT;
+ ssize_t length;
int new_value;
- struct inode *inode;
+ struct inode *inode = filep->f_path.dentry->d_inode;
+ unsigned index = inode->i_ino & SEL_INO_MASK;
+ const char *name = filep->f_path.dentry->d_name.name;
mutex_lock(&sel_mutex);
@@ -882,16 +884,19 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
if (length)
goto out;
- /* check to see if this file has been deleted */
- if (!filep->f_op)
+ if (index >= bool_num || strcmp(name, bool_pending_names[index])) {
+ length = -EINVAL;
goto out;
+ }
if (count >= PAGE_SIZE) {
length = -ENOMEM;
goto out;
}
+
if (*ppos != 0) {
/* No partial writes. */
+ length = -EINVAL;
goto out;
}
page = (char*)get_zeroed_page(GFP_KERNEL);
@@ -900,6 +905,7 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
goto out;
}
+ length = -EFAULT;
if (copy_from_user(page, buf, count))
goto out;
@@ -910,8 +916,7 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
if (new_value)
new_value = 1;
- inode = filep->f_path.dentry->d_inode;
- bool_pending_values[inode->i_ino&SEL_INO_MASK] = new_value;
+ bool_pending_values[index] = new_value;
length = count;
out:
@@ -931,7 +936,7 @@ static ssize_t sel_commit_bools_write(struct file *filep,
size_t count, loff_t *ppos)
{
char *page = NULL;
- ssize_t length = -EFAULT;
+ ssize_t length;
int new_value;
mutex_lock(&sel_mutex);
@@ -940,10 +945,6 @@ static ssize_t sel_commit_bools_write(struct file *filep,
if (length)
goto out;
- /* check to see if this file has been deleted */
- if (!filep->f_op)
- goto out;
-
if (count >= PAGE_SIZE) {
length = -ENOMEM;
goto out;
@@ -958,6 +959,7 @@ static ssize_t sel_commit_bools_write(struct file *filep,
goto out;
}
+ length = -EFAULT;
if (copy_from_user(page, buf, count))
goto out;
@@ -982,11 +984,9 @@ static const struct file_operations sel_commit_bools_ops = {
.write = sel_commit_bools_write,
};
-/* partial revoke() from fs/proc/generic.c proc_kill_inodes */
static void sel_remove_entries(struct dentry *de)
{
- struct list_head *p, *node;
- struct super_block *sb = de->d_sb;
+ struct list_head *node;
spin_lock(&dcache_lock);
node = de->d_subdirs.next;
@@ -1006,18 +1006,6 @@ static void sel_remove_entries(struct dentry *de)
}
spin_unlock(&dcache_lock);
-
- file_list_lock();
- list_for_each(p, &sb->s_files) {
- struct file * filp = list_entry(p, struct file, f_u.fu_list);
- struct dentry * dentry = filp->f_path.dentry;
-
- if (dentry->d_parent != de) {
- continue;
- }
- filp->f_op = NULL;
- }
- file_list_unlock();
}
#define BOOL_DIR_NAME "booleans"
@@ -1036,7 +1024,9 @@ static int sel_make_bools(void)
u32 sid;
/* remove any existing files */
+ kfree(bool_pending_names);
kfree(bool_pending_values);
+ bool_pending_names = NULL;
bool_pending_values = NULL;
sel_remove_entries(dir);
@@ -1078,16 +1068,17 @@ static int sel_make_bools(void)
d_add(dentry, inode);
}
bool_num = num;
+ bool_pending_names = names;
bool_pending_values = values;
out:
free_page((unsigned long)page);
+ return ret;
+err:
if (names) {
for (i = 0; i < num; i++)
kfree(names[i]);
kfree(names);
}
- return ret;
-err:
kfree(values);
sel_remove_entries(dir);
ret = -ENOMEM;