aboutsummaryrefslogtreecommitdiff
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig35
-rw-r--r--arch/mips/au1000/common/gpio.c6
-rw-r--r--arch/mips/emma2rh/markeins/setup.c16
-rw-r--r--arch/mips/jazz/setup.c13
-rw-r--r--arch/mips/kernel/.gitignore1
-rw-r--r--arch/mips/kernel/Makefile1
-rw-r--r--arch/mips/kernel/cevt-r4k.c173
-rw-r--r--arch/mips/kernel/cevt-smtc.c321
-rw-r--r--arch/mips/kernel/cpu-probe.c26
-rw-r--r--arch/mips/kernel/entry.S10
-rw-r--r--arch/mips/kernel/genex.S41
-rw-r--r--arch/mips/kernel/kgdb.c10
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/process.c19
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mips/kernel/scall32-o32.S6
-rw-r--r--arch/mips/kernel/scall64-64.S6
-rw-r--r--arch/mips/kernel/scall64-n32.S6
-rw-r--r--arch/mips/kernel/scall64-o32.S6
-rw-r--r--arch/mips/kernel/setup.c75
-rw-r--r--arch/mips/kernel/smtc.c260
-rw-r--r--arch/mips/kernel/traps.c46
-rw-r--r--arch/mips/kernel/vmlinux.lds.S1
-rw-r--r--arch/mips/lib/csum_partial.S21
-rw-r--r--arch/mips/mm/c-r3k.c1
-rw-r--r--arch/mips/mm/c-r4k.c18
-rw-r--r--arch/mips/mm/c-tx39.c2
-rw-r--r--arch/mips/mm/cache.c1
-rw-r--r--arch/mips/mm/tlbex.c6
-rw-r--r--arch/mips/mti-malta/Makefile2
-rw-r--r--arch/mips/mti-malta/malta-smtc.c9
-rw-r--r--arch/mips/pci/Makefile1
-rw-r--r--arch/mips/pci/pci-bcm47xx.c60
-rw-r--r--arch/mips/pci/pci-ip27.c41
-rw-r--r--arch/mips/rb532/devices.c16
-rw-r--r--arch/mips/sgi-ip22/ip22-platform.c2
-rw-r--r--arch/mips/txx9/generic/setup.c4
-rw-r--r--arch/mips/vr41xx/common/irq.c6
38 files changed, 838 insertions, 434 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4da736e2533..c930b8ceb41 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1403,7 +1403,6 @@ config MIPS_MT_SMTC
depends on CPU_MIPS32_R2
#depends on CPU_MIPS64_R2 # once there is hardware ...
depends on SYS_SUPPORTS_MULTITHREADING
- select GENERIC_CLOCKEVENTS_BROADCAST
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select MIPS_MT
@@ -1451,32 +1450,17 @@ config MIPS_VPE_LOADER
Includes a loader for loading an elf relocatable object
onto another VPE and running it.
-config MIPS_MT_SMTC_INSTANT_REPLAY
- bool "Low-latency Dispatch of Deferred SMTC IPIs"
- depends on MIPS_MT_SMTC && !PREEMPT
- default y
- help
- SMTC pseudo-interrupts between TCs are deferred and queued
- if the target TC is interrupt-inhibited (IXMT). In the first
- SMTC prototypes, these queued IPIs were serviced on return
- to user mode, or on entry into the kernel idle loop. The
- INSTANT_REPLAY option dispatches them as part of local_irq_restore()
- processing, which adds runtime overhead (hence the option to turn
- it off), but ensures that IPIs are handled promptly even under
- heavy I/O interrupt load.
-
config MIPS_MT_SMTC_IM_BACKSTOP
bool "Use per-TC register bits as backstop for inhibited IM bits"
depends on MIPS_MT_SMTC
- default y
+ default n
help
To support multiple TC microthreads acting as "CPUs" within
a VPE, VPE-wide interrupt mask bits must be specially manipulated
during interrupt handling. To support legacy drivers and interrupt
controller management code, SMTC has a "backstop" to track and
if necessary restore the interrupt mask. This has some performance
- impact on interrupt service overhead. Disable it only if you know
- what you are doing.
+ impact on interrupt service overhead.
config MIPS_MT_SMTC_IRQAFF
bool "Support IRQ affinity API"
@@ -1486,10 +1470,8 @@ config MIPS_MT_SMTC_IRQAFF
Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.)
for SMTC Linux kernel. Requires platform support, of which
an example can be found in the MIPS kernel i8259 and Malta
- platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY
- be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to
- interrupt dispatch, and should be used only if you know what
- you are doing.
+ platform code. Adds some overhead to interrupt dispatch, and
+ should be used only if you know what you are doing.
config MIPS_VPE_LOADER_TOM
bool "Load VPE program into memory hidden from linux"
@@ -1886,6 +1868,15 @@ config STACKTRACE_SUPPORT
source "init/Kconfig"
+config PROBE_INITRD_HEADER
+ bool "Probe initrd header created by addinitrd"
+ depends on BLK_DEV_INITRD
+ help
+ Probe initrd header at the last page of kernel image.
+ Say Y here if you are using arch/mips/boot/addinitrd.c to
+ add initrd or initramfs image to the kernel image.
+ Otherwise, say N.
+
menu "Bus options (PCI, PCMCIA, EISA, ISA, TC)"
config HW_HAS_EISA
diff --git a/arch/mips/au1000/common/gpio.c b/arch/mips/au1000/common/gpio.c
index b485d94ce8a..e660ddd611c 100644
--- a/arch/mips/au1000/common/gpio.c
+++ b/arch/mips/au1000/common/gpio.c
@@ -48,7 +48,7 @@ static void au1xxx_gpio2_write(unsigned gpio, int value)
{
gpio -= AU1XXX_GPIO_BASE;
- gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | (value << gpio);
+ gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio);
}
static int au1xxx_gpio2_direction_input(unsigned gpio)
@@ -61,7 +61,8 @@ static int au1xxx_gpio2_direction_input(unsigned gpio)
static int au1xxx_gpio2_direction_output(unsigned gpio, int value)
{
gpio -= AU1XXX_GPIO_BASE;
- gpio2->dir = (0x01 << gpio) | (value << gpio);
+ gpio2->dir |= 0x01 << gpio;
+ gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio);
return 0;
}
@@ -90,6 +91,7 @@ static int au1xxx_gpio1_direction_input(unsigned gpio)
static int au1xxx_gpio1_direction_output(unsigned gpio, int value)
{
gpio1->trioutclr = (0x01 & gpio);
+ au1xxx_gpio1_write(gpio, value);
return 0;
}
diff --git a/arch/mips/emma2rh/markeins/setup.c b/arch/mips/emma2rh/markeins/setup.c
index 822a20e21fa..b6a23ad539f 100644
--- a/arch/mips/emma2rh/markeins/setup.c
+++ b/arch/mips/emma2rh/markeins/setup.c
@@ -25,23 +25,9 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/initrd.h>
-#include <linux/irq.h>
-#include <linux/ioport.h>
-#include <linux/param.h> /* for HZ */
-#include <linux/root_dev.h>
-#include <linux/serial.h>
-#include <linux/serial_core.h>
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/addrspace.h>
+
#include <asm/time.h>
-#include <asm/bcache.h>
-#include <asm/irq.h>
#include <asm/reboot.h>
-#include <asm/traps.h>
-#include <asm/debug.h>
#include <asm/emma2rh/emma2rh.h>
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
index b59ba6b93cd..7043f6b9ff3 100644
--- a/arch/mips/jazz/setup.c
+++ b/arch/mips/jazz/setup.c
@@ -5,33 +5,22 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 1997, 1998, 2001, 07 by Ralf Baechle
+ * Copyright (C) 1996, 1997, 1998, 2001, 07, 08 by Ralf Baechle
* Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2007 by Thomas Bogendoerfer
*/
#include <linux/eisa.h>
#include <linux/init.h>
#include <linux/ioport.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
#include <linux/console.h>
-#include <linux/fb.h>
-#include <linux/pm.h>
#include <linux/screen_info.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
#include <asm/jazz.h>
#include <asm/jazzdma.h>
#include <asm/reboot.h>
-#include <asm/io.h>
#include <asm/pgtable.h>
-#include <asm/time.h>
-#include <asm/traps.h>
-#include <asm/mc146818-time.h>
extern asmlinkage void jazz_handle_int(void);
diff --git a/arch/mips/kernel/.gitignore b/arch/mips/kernel/.gitignore
new file mode 100644
index 00000000000..c5f676c3c22
--- /dev/null
+++ b/arch/mips/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 706f9397479..25775cb5400 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -10,6 +10,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
+obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 24a2d907aa0..4a4c59f2737 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -12,6 +12,14 @@
#include <asm/smtc_ipi.h>
#include <asm/time.h>
+#include <asm/cevt-r4k.h>
+
+/*
+ * The SMTC Kernel for the 34K, 1004K, et. al. replaces several
+ * of these routines with SMTC-specific variants.
+ */
+
+#ifndef CONFIG_MIPS_MT_SMTC
static int mips_next_event(unsigned long delta,
struct clock_event_device *evt)
@@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta,
unsigned int cnt;
int res;
-#ifdef CONFIG_MIPS_MT_SMTC
- {
- unsigned long flags, vpflags;
- local_irq_save(flags);
- vpflags = dvpe();
-#endif
cnt = read_c0_count();
cnt += delta;
write_c0_compare(cnt);
res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
-#ifdef CONFIG_MIPS_MT_SMTC
- evpe(vpflags);
- local_irq_restore(flags);
- }
-#endif
return res;
}
-static void mips_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+#endif /* CONFIG_MIPS_MT_SMTC */
+
+void mips_set_clock_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
{
/* Nothing to do ... */
}
-static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
-static int cp0_timer_irq_installed;
+DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
+int cp0_timer_irq_installed;
-/*
- * Timer ack for an R4k-compatible timer of a known frequency.
- */
-static void c0_timer_ack(void)
-{
- write_c0_compare(read_c0_compare());
-}
+#ifndef CONFIG_MIPS_MT_SMTC
-/*
- * Possibly handle a performance counter interrupt.
- * Return true if the timer interrupt should not be checked
- */
-static inline int handle_perf_irq(int r2)
-{
- /*
- * The performance counter overflow interrupt may be shared with the
- * timer interrupt (cp0_perfcount_irq < 0). If it is and a
- * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
- * and we can't reliably determine if a counter interrupt has also
- * happened (!r2) then don't check for a timer interrupt.
- */
- return (cp0_perfcount_irq < 0) &&
- perf_irq() == IRQ_HANDLED &&
- !r2;
-}
-
-static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
+irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
const int r2 = cpu_has_mips_r2;
struct clock_event_device *cd;
@@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
* interrupt. Being the paranoiacs we are we check anyway.
*/
if (!r2 || (read_c0_cause() & (1 << 30))) {
- c0_timer_ack();
-#ifdef CONFIG_MIPS_MT_SMTC
- if (cpu_data[cpu].vpe_id)
- goto out;
- cpu = 0;
-#endif
+ /* Clear Count/Compare Interrupt */
+ write_c0_compare(read_c0_compare());
cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd);
}
@@ -107,65 +78,16 @@ out:
return IRQ_HANDLED;
}
-static struct irqaction c0_compare_irqaction = {
+#endif /* Not CONFIG_MIPS_MT_SMTC */
+
+struct irqaction c0_compare_irqaction = {
.handler = c0_compare_interrupt,
-#ifdef CONFIG_MIPS_MT_SMTC
- .flags = IRQF_DISABLED,
-#else
.flags = IRQF_DISABLED | IRQF_PERCPU,
-#endif
.name = "timer",
};
-#ifdef CONFIG_MIPS_MT_SMTC
-DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
-
-static void smtc_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
-}
-
-static void mips_broadcast(cpumask_t mask)
-{
- unsigned int cpu;
-
- for_each_cpu_mask(cpu, mask)
- smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
-}
-
-static void setup_smtc_dummy_clockevent_device(void)
-{
- //uint64_t mips_freq = mips_hpt_^frequency;
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *cd;
- cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
-
- cd->name = "SMTC";
- cd->features = CLOCK_EVT_FEAT_DUMMY;
-
- /* Calculate the min / max delta */
- cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
- cd->shift = 0; //32;
- cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
- cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
-
- cd->rating = 200;
- cd->irq = 17; //-1;
-// if (cpu)
-// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
-// else
- cd->cpumask = cpumask_of_cpu(cpu);
-
- cd->set_mode = smtc_set_mode;
-
- cd->broadcast = mips_broadcast;
-
- clockevents_register_device(cd);
-}
-#endif
-
-static void mips_event_handler(struct clock_event_device *dev)
+void mips_event_handler(struct clock_event_device *dev)
{
}
@@ -177,7 +99,23 @@ static int c0_compare_int_pending(void)
return (read_c0_cause() >> cp0_compare_irq) & 0x100;
}
-static int c0_compare_int_usable(void)
+/*
+ * Compare interrupt can be routed and latched outside the core,
+ * so a single execution hazard barrier may not be enough to give
+ * it time to clear as seen in the Cause register. 4 time the
+ * pipeline depth seems reasonably conservative, and empirically
+ * works better in configurations with high CPU/bus clock ratios.
+ */
+
+#define compare_change_hazard() \
+ do { \
+ irq_disable_hazard(); \
+ irq_disable_hazard(); \
+ irq_disable_hazard(); \
+ irq_disable_hazard(); \
+ } while (0)
+
+int c0_compare_int_usable(void)
{
unsigned int delta;
unsigned int cnt;
@@ -187,7 +125,7 @@ static int c0_compare_int_usable(void)
*/
if (c0_compare_int_pending()) {
write_c0_compare(read_c0_count());
- irq_disable_hazard();
+ compare_change_hazard();
if (c0_compare_int_pending())
return 0;
}
@@ -196,7 +134,7 @@ static int c0_compare_int_usable(void)
cnt = read_c0_count();
cnt += delta;
write_c0_compare(cnt);
- irq_disable_hazard();
+ compare_change_hazard();
if ((int)(read_c0_count() - cnt) < 0)
break;
/* increase delta if the timer was already expired */
@@ -205,11 +143,12 @@ static int c0_compare_int_usable(void)
while ((int)(read_c0_count() - cnt) <= 0)
; /* Wait for expiry */
+ compare_change_hazard();
if (!c0_compare_int_pending())
return 0;
write_c0_compare(read_c0_count());
- irq_disable_hazard();
+ compare_change_hazard();
if (c0_compare_int_pending())
return 0;
@@ -219,6 +158,8 @@ static int c0_compare_int_usable(void)
return 1;
}
+#ifndef CONFIG_MIPS_MT_SMTC
+
int __cpuinit mips_clockevent_init(void)
{
uint64_t mips_freq = mips_hpt_frequency;
@@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void)
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
-#ifdef CONFIG_MIPS_MT_SMTC
- setup_smtc_dummy_clockevent_device();
-
- /*
- * On SMTC we only register VPE0's compare interrupt as clockevent
- * device.
- */
- if (cpu)
- return 0;
-#endif
-
if (!c0_compare_int_usable())
return -ENXIO;
@@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void)
cd->rating = 300;
cd->irq = irq;
-#ifdef CONFIG_MIPS_MT_SMTC
- cd->cpumask = CPU_MASK_ALL;
-#else
cd->cpumask = cpumask_of_cpu(cpu);
-#endif
cd->set_next_event = mips_next_event;
- cd->set_mode = mips_set_mode;
+ cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler;
clockevents_register_device(cd);
@@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void)
cp0_timer_irq_installed = 1;
-#ifdef CONFIG_MIPS_MT_SMTC
-#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
- setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT);
-#else
setup_irq(irq, &c0_compare_irqaction);
-#endif
return 0;
}
+
+#endif /* Not CONFIG_MIPS_MT_SMTC */
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
new file mode 100644
index 00000000000..5162fe4b595
--- /dev/null
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -0,0 +1,321 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+
+#include <asm/smtc_ipi.h>
+#include <asm/time.h>
+#include <asm/cevt-r4k.h>
+
+/*
+ * Variant clock event timer support for SMTC on MIPS 34K, 1004K
+ * or other MIPS MT cores.
+ *
+ * Notes on SMTC Support:
+ *
+ * SMTC has multiple microthread TCs pretending to be Linux CPUs.
+ * But there's only one Count/Compare pair per VPE, and Compare
+ * interrupts are taken opportunisitically by available TCs
+ * bound to the VPE with the Count register. The new timer
+ * framework provides for global broadcasts, but we really
+ * want VPE-level multicasts for best behavior. So instead
+ * of invoking the high-level clock-event broadcast code,
+ * this version of SMTC support uses the historical SMTC
+ * multicast mechanisms "under the hood", appearing to the
+ * generic clock layer as if the interrupts are per-CPU.
+ *
+ * The approach taken here is to maintain a set of NR_CPUS
+ * virtual timers, and track which "CPU" needs to be alerted
+ * at each event.
+ *
+ * It's unlikely that we'll see a MIPS MT core with more than
+ * 2 VPEs, but we *know* that we won't need to handle more
+ * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
+ * is always going to be overkill, but always going to be enough.
+ */
+
+unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
+static int smtc_nextinvpe[NR_CPUS];
+
+/*
+ * Timestamps stored are absolute values to be programmed
+ * into Count register. Valid timestamps will never be zero.
+ * If a Zero Count value is actually calculated, it is converted
+ * to be a 1, which will introduce 1 or two CPU cycles of error
+ * roughly once every four billion events, which at 1000 HZ means
+ * about once every 50 days. If that's actually a problem, one
+ * could alternate squashing 0 to 1 and to -1.
+ */
+
+#define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
+#define ISVALID(x) ((x) != 0L)
+
+/*
+ * Time comparison is subtle, as it's really truncated
+ * modular arithmetic.
+ */
+
+#define IS_SOONER(a, b, reference) \
+ (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
+
+/*
+ * CATCHUP_INCREMENT, used when the function falls behind the counter.
+ * Could be an increasing function instead of a constant;
+ */
+
+#define CATCHUP_INCREMENT 64
+
+static int mips_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned long flags;
+ unsigned int mtflags;
+ unsigned long timestamp, reference, previous;
+ unsigned long nextcomp = 0L;
+ int vpe = current_cpu_data.vpe_id;
+ int cpu = smp_processor_id();
+ local_irq_save(flags);
+ mtflags = dmt();
+
+ /*
+ * Maintain the per-TC virtual timer
+ * and program the per-VPE shared Count register
+ * as appropriate here...
+ */
+ reference = (unsigned long)read_c0_count();
+ timestamp = MAKEVALID(reference + delta);
+ /*
+ * To really model the clock, we have to catch the case
+ * where the current next-in-VPE timestamp is the old
+ * timestamp for the calling CPE, but the new value is
+ * in fact later. In that case, we have to do a full
+ * scan and discover the new next-in-VPE CPU id and
+ * timestamp.
+ */
+ previous = smtc_nexttime[vpe][cpu];
+ if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
+ && IS_SOONER(previous, timestamp, reference)) {
+ int i;
+ int soonest = cpu;
+
+ /*
+ * Update timestamp array here, so that new
+ * value gets considered along with those of
+ * other virtual CPUs on the VPE.
+ */
+ smtc_nexttime[vpe][cpu] = timestamp;
+ for_each_online_cpu(i) {
+ if (ISVALID(smtc_nexttime[vpe][i])
+ && IS_SOONER(smtc_nexttime[vpe][i],
+ smtc_nexttime[vpe][soonest], reference)) {
+ soonest = i;
+ }
+ }
+ smtc_nextinvpe[vpe] = soonest;
+ nextcomp = smtc_nexttime[vpe][soonest];
+ /*
+ * Otherwise, we don't have to process the whole array rank,
+ * we just have to see if the event horizon has gotten closer.
+ */
+ } else {
+ if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
+ IS_SOONER(timestamp,
+ smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
+ smtc_nextinvpe[vpe] = cpu;
+ nextcomp = timestamp;
+ }
+ /*
+ * Since next-in-VPE may me the same as the executing
+ * virtual CPU, we update the array *after* checking
+ * its value.
+ */
+ smtc_nexttime[vpe][cpu] = timestamp;
+ }
+
+ /*
+ * It may be that, in fact, we don't need to update Compare,
+ * but if we do, we want to make sure we didn't fall into
+ * a crack just behind Count.
+ */
+ if (ISVALID(nextcomp)) {
+ write_c0_compare(nextcomp);
+ ehb();
+ /*
+ * We never return an error, we just make sure
+ * that we trigger the handlers as quickly as
+ * we can if we fell behind.
+ */
+ while ((nextcomp - (unsigned long)read_c0_count())
+ > (unsigned long)LONG_MAX) {
+ nextcomp += CATCHUP_INCREMENT;
+ write_c0_compare(nextcomp);
+ ehb();
+ }
+ }
+ emt(mtflags);
+ local_irq_restore(flags);
+ return 0;
+}
+
+
+void smtc_distribute_timer(int vpe)
+{
+ unsigned long flags;
+ unsigned int mtflags;
+ int cpu;
+ struct clock_event_device *cd;
+ unsigned long nextstamp = 0L;
+ unsigned long reference;
+
+
+repeat:
+ for_each_online_cpu(cpu) {
+ /*
+ * Find virtual CPUs within the current VPE who have
+ * unserviced timer requests whose time is now past.
+ */
+ local_irq_save(flags);
+ mtflags = dmt();
+ if (cpu_data[cpu].vpe_id == vpe &&
+ ISVALID(smtc_nexttime[vpe][cpu])) {
+ reference = (unsigned long)read_c0_count();
+ if ((smtc_nexttime[vpe][cpu] - reference)
+ > (unsigned long)LONG_MAX) {
+ smtc_nexttime[vpe][cpu] = 0L;
+ emt(mtflags);
+ local_irq_restore(flags);
+ /*
+ * We don't send IPIs to ourself.
+ */
+ if (cpu != smp_processor_id()) {
+ smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
+ } else {
+ cd = &per_cpu(mips_clockevent_device, cpu);
+ cd->event_handler(cd);
+ }
+ } else {
+ /* Local to VPE but Valid Time not yet reached. */
+ if (!ISVALID(nextstamp) ||
+ IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
+ reference)) {
+ smtc_nextinvpe[vpe] = cpu;
+ nextstamp = smtc_nexttime[vpe][cpu];
+ }
+ emt(mtflags);
+ local_irq_restore(flags);
+ }
+ } else {
+ emt(mtflags);
+ local_irq_restore(flags);
+
+ }
+ }
+ /* Reprogram for interrupt at next soonest timestamp for VPE */
+ if (ISVALID(nextstamp)) {
+ write_c0_compare(nextstamp);
+ ehb();
+ if ((nextstamp - (unsigned long)read_c0_count())
+ > (unsigned long)LONG_MAX)
+ goto repeat;
+ }
+}
+
+
+irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
+{
+ int cpu = smp_processor_id();
+
+ /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
+ handle_perf_irq(1);
+
+ if (read_c0_cause() & (1 << 30)) {
+ /* Clear Count/Compare Interrupt */
+ write_c0_compare(read_c0_compare());
+ smtc_distribute_timer(cpu_data[cpu].vpe_id);
+ }
+ return IRQ_HANDLED;
+}
+
+
+int __cpuinit mips_clockevent_init(void)
+{
+ uint64_t mips_freq = mips_hpt_frequency;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+ unsigned int irq;
+ int i;
+ int j;
+
+ if (!cpu_has_counter || !mips_hpt_frequency)
+ return -ENXIO;
+ if (cpu == 0) {
+ for (i = 0; i < num_possible_cpus(); i++) {
+ smtc_nextinvpe[i] = 0;
+ for (j = 0; j < num_possible_cpus(); j++)
+ smtc_nexttime[i][j] = 0L;
+ }
+ /*
+ * SMTC also can't have the usablility test
+ * run by secondary TCs once Compare is in use.
+ */
+ if (!c0_compare_int_usable())
+ return -ENXIO;
+ }
+
+ /*
+ * With vectored interrupts things are getting platform specific.
+ * get_c0_compare_int is a hook to allow a platform to return the
+ * interrupt number of it's liking.
+ */
+ irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+ if (get_c0_compare_int)
+ irq = get_c0_compare_int();
+
+ cd = &per_cpu(mips_clockevent_device, cpu);
+
+ cd->name = "MIPS";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT;
+
+ /* Calculate the min / max delta */
+ cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
+ cd->shift = 32;
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+
+ cd->rating = 300;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of_cpu(cpu);
+ cd->set_next_event = mips_next_event;
+ cd->set_mode = mips_set_clock_mode;
+ cd->event_handler = mips_event_handler;
+
+ clockevents_register_device(cd);
+
+ /*
+ * On SMTC we only want to do the data structure
+ * initialization and IRQ setup once.
+ */
+ if (cpu)
+ return 0;
+ /*
+ * And we need the hwmask associated with the c0_compare
+ * vector to be initialized.
+ */
+ irq_hwmask[irq] = (0x100 << cp0_compare_irq);
+ if (cp0_timer_irq_installed)
+ return 0;
+
+ cp0_timer_irq_installed = 1;
+
+ setup_irq(irq, &c0_compare_irqaction);
+
+ return 0;
+}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 335a6ae3d59..e621fda8ab3 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -45,18 +45,7 @@ static void r39xx_wait(void)
local_irq_enable();
}
-/*
- * There is a race when WAIT instruction executed with interrupt
- * enabled.
- * But it is implementation-dependent wheter the pipelie restarts when
- * a non-enabled interrupt is requested.
- */
-static void r4k_wait(void)
-{
- __asm__(" .set mips3 \n"
- " wait \n"
- " .set mips0 \n");
-}
+extern void r4k_wait(void);
/*
* This variant is preferable as it allows testing need_resched and going to
@@ -65,14 +54,18 @@ static void r4k_wait(void)
* interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
* using this version a gamble.
*/
-static void r4k_wait_irqoff(void)
+void r4k_wait_irqoff(void)
{
local_irq_disable();
if (!need_resched())
- __asm__(" .set mips3 \n"
+ __asm__(" .set push \n"
+ " .set mips3 \n"
" wait \n"
- " .set mips0 \n");
+ " .set pop \n");
local_irq_enable();
+ __asm__(" .globl __pastwait \n"
+ "__pastwait: \n");
+ return;
}
/*
@@ -128,7 +121,7 @@ static int __init wait_disable(char *s)
__setup("nowait", wait_disable);
-static inline void check_wait(void)
+void __init check_wait(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -242,7 +235,6 @@ static inline void check_errata(void)
void __init check_bugs32(void)
{
- check_wait();
check_errata();
}
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index e29598ae939..ffa331029e0 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -79,11 +79,6 @@ FEXPORT(syscall_exit)
FEXPORT(restore_all) # restore full frame
#ifdef CONFIG_MIPS_MT_SMTC
-/* Detect and execute deferred IPI "interrupts" */
- LONG_L s0, TI_REGS($28)
- LONG_S sp, TI_REGS($28)
- jal deferred_smtc_ipi
- LONG_S s0, TI_REGS($28)
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
/* Re-arm any temporarily masked interrupts not explicitly "acked" */
mfc0 v0, CP0_TCSTATUS
@@ -112,6 +107,11 @@ FEXPORT(restore_all) # restore full frame
xor t0, t0, t3
mtc0 t0, CP0_TCCONTEXT
#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
+/* Detect and execute deferred IPI "interrupts" */
+ LONG_L s0, TI_REGS($28)
+ LONG_S sp, TI_REGS($28)
+ jal deferred_smtc_ipi
+ LONG_S s0, TI_REGS($28)
#endif /* CONFIG_MIPS_MT_SMTC */
.set noat
RESTORE_TEMP
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index c6ada98ee04..01dcbe38fa0 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -20,6 +20,7 @@
#include <asm/stackframe.h>
#include <asm/war.h>
#include <asm/page.h>
+#include <asm/thread_info.h>
#define PANIC_PIC(msg) \
.set push; \
@@ -126,7 +127,42 @@ handle_vcei:
__FINIT
+ .align 5 /* 32 byte rollback region */
+LEAF(r4k_wait)
+ .set push
+ .set noreorder
+ /* start of rollback region */
+ LONG_L t0, TI_FLAGS($28)
+ nop
+ andi t0, _TIF_NEED_RESCHED
+ bnez t0, 1f
+ nop
+ nop
+ nop
+ .set mips3
+ wait
+ /* end of rollback region (the region size must be power of two) */
+ .set pop
+1:
+ jr ra
+ END(r4k_wait)
+
+ .macro BUILD_ROLLBACK_PROLOGUE handler
+ FEXPORT(rollback_\handler)
+ .set push
+ .set noat
+ MFC0 k0, CP0_EPC
+ PTR_LA k1, r4k_wait
+ ori k0, 0x1f /* 32 byte rollback region */
+ xori k0, 0x1f
+ bne k0, k1, 9f
+ MTC0 k0, CP0_EPC
+9:
+ .set pop
+ .endm
+
.align 5
+BUILD_ROLLBACK_PROLOGUE handle_int
NESTED(handle_int, PT_SIZE, sp)
#ifdef CONFIG_TRACE_IRQFLAGS
/*
@@ -201,6 +237,7 @@ NESTED(except_vec_ejtag_debug, 0, sp)
* This prototype is copied to ebase + n*IntCtl.VS and patched
* to invoke the handler
*/
+BUILD_ROLLBACK_PROLOGUE except_vec_vi
NESTED(except_vec_vi, 0, sp)
SAVE_SOME
SAVE_AT
@@ -245,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp)
and t0, a0, t1
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
mfc0 t2, CP0_TCCONTEXT
- or t0, t0, t2
- mtc0 t0, CP0_TCCONTEXT
+ or t2, t0, t2
+ mtc0 t2, CP0_TCCONTEXT
#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
xor t1, t1, t0
mtc0 t1, CP0_STATUS
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index c5a8b2d21ca..6e152c80cd4 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -62,13 +62,13 @@ void arch_kgdb_breakpoint(void)
static void kgdb_call_nmi_hook(void *ignored)
{
- kgdb_nmicallback(raw_smp_processor_id(), (void *)0);
+ kgdb_nmicallback(raw_smp_processor_id(), NULL);
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
- smp_call_function(kgdb_call_nmi_hook, NULL, NULL);
+ smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
@@ -190,9 +190,6 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
struct pt_regs *regs = args->regs;
int trap = (regs->cp0_cause & 0x7c) >> 2;
- if (fixup_exception(regs))
- return NOTIFY_DONE;
-
/* Userpace events, ignore. */
if (user_mode(regs))
return NOTIFY_DONE;
@@ -239,8 +236,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
atomic_set(&kgdb_cpu_doing_single_step, -1);
if (remcom_in_buffer[0] == 's')
- if (kgdb_contthread)
- atomic_set(&kgdb_cpu_doing_single_step, cpu);
+ atomic_set(&kgdb_cpu_doing_single_step, cpu);
return 0;
}
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index df4d3f2f740..dc9eb72ed9d 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -159,7 +159,7 @@ __setup("fpaff=", fpaff_thresh);
/*
* FPU Use Factor empirically derived from experiments on 34K
*/
-#define FPUSEFACTOR 333
+#define FPUSEFACTOR 2000
static __init int mt_fp_affinity_init(void)
{
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index b16facd9ea8..22fc19bbe87 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -55,7 +55,7 @@ void __noreturn cpu_idle(void)
while (1) {
tick_nohz_stop_sched_tick(1);
while (!need_resched()) {
-#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
+#ifdef CONFIG_MIPS_MT_SMTC
extern void smtc_idle_loop_hook(void);
smtc_idle_loop_hook();
@@ -145,17 +145,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
*/
p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
- clear_tsk_thread_flag(p, TIF_USEDFPU);
-#ifdef CONFIG_MIPS_MT_FPAFF
+#ifdef CONFIG_MIPS_MT_SMTC
/*
- * FPU affinity support is cleaner if we track the
- * user-visible CPU affinity from the very beginning.
- * The generic cpus_allowed mask will already have
- * been copied from the parent before copy_thread
- * is invoked.
+ * SMTC restores TCStatus after Status, and the CU bits
+ * are aliased there.
*/
- p->thread.user_cpus_allowed = p->cpus_allowed;
+ childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
+#endif
+ clear_tsk_thread_flag(p, TIF_USEDFPU);
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ clear_tsk_thread_flag(p, TIF_FPUBOUND);
#endif /* CONFIG_MIPS_MT_FPAFF */
if (clone_flags & CLONE_SETTLS)
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 35234b92b9a..96ffc9c6d19 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -238,7 +238,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case FPC_EIR: { /* implementation / version register */
unsigned int flags;
#ifdef CONFIG_MIPS_MT_SMTC
- unsigned int irqflags;
+ unsigned long irqflags;
unsigned int mtflags;
#endif /* CONFIG_MIPS_MT_SMTC */
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index fc4fd4d705e..5e75a316f6b 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -647,6 +647,12 @@ einval: li v0, -EINVAL
sys sys_timerfd_create 2
sys sys_timerfd_gettime 2
sys sys_timerfd_settime 4
+ sys sys_signalfd4 4
+ sys sys_eventfd2 2 /* 4325 */
+ sys sys_epoll_create1 1
+ sys sys_dup3 3
+ sys sys_pipe2 2
+ sys sys_inotify_init1 1
.endm
/* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 2b73fd1e452..3d58204c9d4 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -481,4 +481,10 @@ sys_call_table:
PTR sys_timerfd_create /* 5280 */
PTR sys_timerfd_gettime
PTR sys_timerfd_settime
+ PTR sys_signalfd4
+ PTR sys_eventfd2
+ PTR sys_epoll_create1 /* 5285 */
+ PTR sys_dup3
+ PTR sys_pipe2
+ PTR sys_inotify_init1
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 2654e75d2fe..da7f1b6ea0f 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -407,4 +407,10 @@ EXPORT(sysn32_call_table)
PTR sys_timerfd_create
PTR sys_timerfd_gettime /* 5285 */
PTR sys_timerfd_settime
+ PTR sys_signalfd4
+ PTR sys_eventfd2
+ PTR sys_epoll_create1
+ PTR sys_dup3 /* 5290 */
+ PTR sys_pipe2
+ PTR sys_inotify_init1
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 76167bea5a7..d7cd1aac9ad 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -529,4 +529,10 @@ sys_call_table:
PTR sys_timerfd_create
PTR sys_timerfd_gettime
PTR sys_timerfd_settime
+ PTR compat_sys_signalfd4
+ PTR sys_eventfd2 /* 4325 */
+ PTR sys_epoll_create1
+ PTR sys_dup3
+ PTR sys_pipe2
+ PTR sys_inotify_init1
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 8af84867e74..16f8edfe5cd 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -78,7 +78,7 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
/* Sanity check */
if (start + size < start) {
- printk("Trying to add an invalid memory region, skipped\n");
+ pr_warning("Trying to add an invalid memory region, skipped\n");
return;
}
@@ -92,7 +92,7 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
}
if (x == BOOT_MEM_MAP_MAX) {
- printk("Ooops! Too many entries in the memory map!\n");
+ pr_err("Ooops! Too many entries in the memory map!\n");
return;
}
@@ -108,22 +108,22 @@ static void __init print_memory_map(void)
const int field = 2 * sizeof(unsigned long);
for (i = 0; i < boot_mem_map.nr_map; i++) {
- printk(" memory: %0*Lx @ %0*Lx ",
+ printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
field, (unsigned long long) boot_mem_map.map[i].size,
field, (unsigned long long) boot_mem_map.map[i].addr);
switch (boot_mem_map.map[i].type) {
case BOOT_MEM_RAM:
- printk("(usable)\n");
+ printk(KERN_CONT "(usable)\n");
break;
case BOOT_MEM_ROM_DATA:
- printk("(ROM data)\n");
+ printk(KERN_CONT "(ROM data)\n");
break;
case BOOT_MEM_RESERVED:
- printk("(reserved)\n");
+ printk(KERN_CONT "(reserved)\n");
break;
default:
- printk("type %lu\n", boot_mem_map.map[i].type);
+ printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
break;
}
}
@@ -160,36 +160,39 @@ early_param("rd_size", rd_size_early);
static unsigned long __init init_initrd(void)
{
unsigned long end;
- u32 *initrd_header;
/*
* Board specific code or command line parser should have
* already set up initrd_start and initrd_end. In these cases
* perfom sanity checks and use them if all looks good.
*/
- if (initrd_start && initrd_end > initrd_start)
- goto sanitize;
+ if (!initrd_start || initrd_end <= initrd_start) {
+#ifdef CONFIG_PROBE_INITRD_HEADER
+ u32 *initrd_header;
- /*
- * See if initrd has been added to the kernel image by
- * arch/mips/boot/addinitrd.c. In that case a header is
- * prepended to initrd and is made up by 8 bytes. The fisrt
- * word is a magic number and the second one is the size of
- * initrd. Initrd start must be page aligned in any cases.
- */
- initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8;
- if (initrd_header[0] != 0x494E5244)
+ /*
+ * See if initrd has been added to the kernel image by
+ * arch/mips/boot/addinitrd.c. In that case a header is
+ * prepended to initrd and is made up by 8 bytes. The first
+ * word is a magic number and the second one is the size of
+ * initrd. Initrd start must be page aligned in any cases.
+ */
+ initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8;
+ if (initrd_header[0] != 0x494E5244)
+ goto disable;
+ initrd_start = (unsigned long)(initrd_header + 2);
+ initrd_end = initrd_start + initrd_header[1];
+#else
goto disable;
- initrd_start = (unsigned long)(initrd_header + 2);
- initrd_end = initrd_start + initrd_header[1];
+#endif
+ }
-sanitize:
if (initrd_start & ~PAGE_MASK) {
- printk(KERN_ERR "initrd start must be page aligned\n");
+ pr_err("initrd start must be page aligned\n");
goto disable;
}
if (initrd_start < PAGE_OFFSET) {
- printk(KERN_ERR "initrd start < PAGE_OFFSET\n");
+ pr_err("initrd start < PAGE_OFFSET\n");
goto disable;
}
@@ -221,18 +224,18 @@ static void __init finalize_initrd(void)
goto disable;
}
if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
- printk("Initrd extends beyond end of memory");
+ printk(KERN_ERR "Initrd extends beyond end of memory");
goto disable;
}
reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
initrd_below_start_ok = 1;
- printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
- initrd_start, size);
+ pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
+ initrd_start, size);
return;
disable:
- printk(" - disabling initrd\n");
+ printk(KERN_CONT " - disabling initrd\n");
initrd_start = 0;
initrd_end = 0;
}
@@ -310,14 +313,12 @@ static void __init bootmem_init(void)
if (min_low_pfn >= max_low_pfn)
panic("Incorrect memory mapping !!!");
if (min_low_pfn > ARCH_PFN_OFFSET) {
- printk(KERN_INFO
- "Wasting %lu bytes for tracking %lu unused pages\n",
- (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
- min_low_pfn - ARCH_PFN_OFFSET);
+ pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
+ (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
+ min_low_pfn - ARCH_PFN_OFFSET);
} else if (min_low_pfn < ARCH_PFN_OFFSET) {
- printk(KERN_INFO
- "%lu free pages won't be used\n",
- ARCH_PFN_OFFSET - min_low_pfn);
+ pr_info("%lu free pages won't be used\n",
+ ARCH_PFN_OFFSET - min_low_pfn);
}
min_low_pfn = ARCH_PFN_OFFSET;
@@ -471,7 +472,7 @@ static void __init arch_mem_init(char **cmdline_p)
/* call board setup routine */
plat_mem_setup();
- printk("Determined physical RAM map:\n");
+ pr_info("Determined physical RAM map:\n");
print_memory_map();
strlcpy(command_line, arcs_cmdline, sizeof(command_line));
@@ -482,7 +483,7 @@ static void __init arch_mem_init(char **cmdline_p)
parse_early_param();
if (usermem) {
- printk("User-defined physical RAM map:\n");
+ pr_info("User-defined physical RAM map:\n");
print_memory_map();
}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index a516286532a..897fb2b4751 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1,4 +1,21 @@
-/* Copyright (C) 2004 Mips Technologies, Inc */
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2004 Mips Technologies, Inc
+ * Copyright (C) 2008 Kevin D. Kissell
+ */
#include <linux/clockchips.h>
#include <linux/kernel.h>
@@ -21,7 +38,6 @@
#include <asm/time.h>
#include <asm/addrspace.h>
#include <asm/smtc.h>
-#include <asm/smtc_ipi.h>
#include <asm/smtc_proc.h>
/*
@@ -58,11 +74,6 @@ unsigned long irq_hwmask[NR_IRQS];
asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
-/*
- * Clock interrupt "latch" buffers, per "CPU"
- */
-
-static atomic_t ipi_timer_latch[NR_CPUS];
/*
* Number of InterProcessor Interrupt (IPI) message buffers to allocate
@@ -70,7 +81,7 @@ static atomic_t ipi_timer_latch[NR_CPUS];
#define IPIBUF_PER_CPU 4
-static struct smtc_ipi_q IPIQ[NR_CPUS];
+struct smtc_ipi_q IPIQ[NR_CPUS];
static struct smtc_ipi_q freeIPIq;
@@ -282,7 +293,7 @@ static void smtc_configure_tlb(void)
* phys_cpu_present_map and the logical/physical mappings.
*/
-int __init mipsmt_build_cpu_map(int start_cpu_slot)
+int __init smtc_build_cpu_map(int start_cpu_slot)
{
int i, ntcs;
@@ -325,7 +336,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
write_tc_c0_tcstatus((read_tc_c0_tcstatus()
& ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
| TCSTATUS_A);
- write_tc_c0_tccontext(0);
+ /*
+ * TCContext gets an offset from the base of the IPIQ array
+ * to be used in low-level code to detect the presence of
+ * an active IPI queue
+ */
+ write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
/* Bind tc to vpe */
write_tc_c0_tcbind(vpe);
/* In general, all TCs should have the same cpu_data indications */
@@ -336,10 +352,18 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
cpu_data[cpu].options &= ~MIPS_CPU_FPU;
cpu_data[cpu].vpe_id = vpe;
cpu_data[cpu].tc_id = tc;
+ /* Multi-core SMTC hasn't been tested, but be prepared */
+ cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff;
}
+/*
+ * Tweak to get Count registes in as close a sync as possible.
+ * Value seems good for 34K-class cores.
+ */
+
+#define CP0_SKEW 8
-void mipsmt_prepare_cpus(void)
+void smtc_prepare_cpus(int cpus)
{
int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
unsigned long flags;
@@ -363,13 +387,13 @@ void mipsmt_prepare_cpus(void)
IPIQ[i].head = IPIQ[i].tail = NULL;
spin_lock_init(&IPIQ[i].lock);
IPIQ[i].depth = 0;
- atomic_set(&ipi_timer_latch[i], 0);
}
/* cpu_data index starts at zero */
cpu = 0;
cpu_data[cpu].vpe_id = 0;
cpu_data[cpu].tc_id = 0;
+ cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff;
cpu++;
/* Report on boot-time options */
@@ -484,7 +508,8 @@ void mipsmt_prepare_cpus(void)
write_vpe_c0_compare(0);
/* Propagate Config7 */
write_vpe_c0_config7(read_c0_config7());
- write_vpe_c0_count(read_c0_count());
+ write_vpe_c0_count(read_c0_count() + CP0_SKEW);
+ ehb();
}
/* enable multi-threading within VPE */
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
@@ -556,7 +581,7 @@ void mipsmt_prepare_cpus(void)
void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
{
extern u32 kernelsp[NR_CPUS];
- long flags;
+ unsigned long flags;
int mtflags;
LOCK_MT_PRA();
@@ -585,24 +610,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
void smtc_init_secondary(void)
{
- /*
- * Start timer on secondary VPEs if necessary.
- * plat_timer_setup has already have been invoked by init/main
- * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
- * SMTC init code assigns TCs consdecutively and in ascending order
- * to across available VPEs.
- */
- if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
- ((read_c0_tcbind() & TCBIND_CURVPE)
- != cpu_data[smp_processor_id() - 1].vpe_id)){
- write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
- }
-
local_irq_enable();
}
void smtc_smp_finish(void)
{
+ int cpu = smp_processor_id();
+
+ /*
+ * Lowest-numbered CPU per VPE starts a clock tick.
+ * Like per_cpu_trap_init() hack, this assumes that
+ * SMTC init code assigns TCs consdecutively and
+ * in ascending order across available VPEs.
+ */
+ if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
+ write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
+
printk("TC %d going on-line as CPU %d\n",
cpu_data[smp_processor_id()].tc_id, smp_processor_id());
}
@@ -753,8 +776,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
{
int tcstatus;
struct smtc_ipi *pipi;
- long flags;
+ unsigned long flags;
int mtflags;
+ unsigned long tcrestart;
+ extern void r4k_wait_irqoff(void), __pastwait(void);
if (cpu == smp_processor_id()) {
printk("Cannot Send IPI to self!\n");
@@ -771,8 +796,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
pipi->arg = (void *)action;
pipi->dest = cpu;
if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
- if (type == SMTC_CLOCK_TICK)
- atomic_inc(&ipi_timer_latch[cpu]);
/* If not on same VPE, enqueue and send cross-VPE interrupt */
smtc_ipi_nq(&IPIQ[cpu], pipi);
LOCK_CORE_PRA();
@@ -800,22 +823,29 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
if ((tcstatus & TCSTATUS_IXMT) != 0) {
/*
- * Spin-waiting here can deadlock,
- * so we queue the message for the target TC.
+ * If we're in the the irq-off version of the wait
+ * loop, we need to force exit from the wait and
+ * do a direct post of the IPI.
+ */
+ if (cpu_wait == r4k_wait_irqoff) {
+ tcrestart = read_tc_c0_tcrestart();
+ if (tcrestart >= (unsigned long)r4k_wait_irqoff
+ && tcrestart < (unsigned long)__pastwait) {
+ write_tc_c0_tcrestart(__pastwait);
+ tcstatus &= ~TCSTATUS_IXMT;
+ write_tc_c0_tcstatus(tcstatus);
+ goto postdirect;
+ }
+ }
+ /*
+ * Otherwise we queue the message for the target TC
+ * to pick up when he does a local_irq_restore()
*/
write_tc_c0_tchalt(0);
UNLOCK_CORE_PRA();
- /* Try to reduce redundant timer interrupt messages */
- if (type == SMTC_CLOCK_TICK) {
- if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
- smtc_ipi_nq(&freeIPIq, pipi);
- return;
- }
- }
smtc_ipi_nq(&IPIQ[cpu], pipi);
} else {
- if (type == SMTC_CLOCK_TICK)
- atomic_inc(&ipi_timer_latch[cpu]);
+postdirect:
post_direct_ipi(cpu, pipi);
write_tc_c0_tchalt(0);
UNLOCK_CORE_PRA();
@@ -883,7 +913,7 @@ static void ipi_call_interrupt(void)
smp_call_function_interrupt();
}
-DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
+DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
void ipi_decode(struct smtc_ipi *pipi)
{
@@ -891,20 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
struct clock_event_device *cd;
void *arg_copy = pipi->arg;
int type_copy = pipi->type;
- int ticks;
-
smtc_ipi_nq(&freeIPIq, pipi);
switch (type_copy) {
case SMTC_CLOCK_TICK:
irq_enter();
kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
- cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
- ticks = atomic_read(&ipi_timer_latch[cpu]);
- atomic_sub(ticks, &ipi_timer_latch[cpu]);
- while (ticks) {
- cd->event_handler(cd);
- ticks--;
- }
+ cd = &per_cpu(mips_clockevent_device, cpu);
+ cd->event_handler(cd);
irq_exit();
break;
@@ -937,24 +960,48 @@ void ipi_decode(struct smtc_ipi *pipi)
}
}
+/*
+ * Similar to smtc_ipi_replay(), but invoked from context restore,
+ * so it reuses the current exception frame rather than set up a
+ * new one with self_ipi.
+ */
+
void deferred_smtc_ipi(void)
{
- struct smtc_ipi *pipi;
- unsigned long flags;
-/* DEBUG */
- int q = smp_processor_id();
+ int cpu = smp_processor_id();
/*
* Test is not atomic, but much faster than a dequeue,
* and the vast majority of invocations will have a null queue.
+ * If irq_disabled when this was called, then any IPIs queued
+ * after we test last will be taken on the next irq_enable/restore.
+ * If interrupts were enabled, then any IPIs added after the
+ * last test will be taken directly.
*/
- if (IPIQ[q].head != NULL) {
- while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
- /* ipi_decode() should be called with interrupts off */
- local_irq_save(flags);
+
+ while (IPIQ[cpu].head != NULL) {
+ struct smtc_ipi_q *q = &IPIQ[cpu];
+ struct smtc_ipi *pipi;
+ unsigned long flags;
+
+ /*
+ * It may be possible we'll come in with interrupts
+ * already enabled.
+ */
+ local_irq_save(flags);
+
+ spin_lock(&q->lock);
+ pipi = __smtc_ipi_dq(q);
+ spin_unlock(&q->lock);
+ if (pipi != NULL)
ipi_decode(pipi);
- local_irq_restore(flags);
- }
+ /*
+ * The use of the __raw_local restore isn't
+ * as obviously necessary here as in smtc_ipi_replay(),
+ * but it's more efficient, given that we're already
+ * running down the IPI queue.
+ */
+ __raw_local_irq_restore(flags);
}
}
@@ -975,7 +1022,7 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
struct smtc_ipi *pipi;
unsigned long tcstatus;
int sent;
- long flags;
+ unsigned long flags;
unsigned int mtflags;
unsigned int vpflags;
@@ -1066,55 +1113,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe)
/*
* SMTC-specific hacks invoked from elsewhere in the kernel.
- *
- * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
- * called with interrupts disabled. We do rely on interrupts being disabled
- * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
- * result in a recursive call to raw_local_irq_restore().
*/
-static void __smtc_ipi_replay(void)
+ /*
+ * smtc_ipi_replay is called from raw_local_irq_restore
+ */
+
+void smtc_ipi_replay(void)
{
unsigned int cpu = smp_processor_id();
/*
* To the extent that we've ever turned interrupts off,
* we may have accumulated deferred IPIs. This is subtle.
- * If we use the smtc_ipi_qdepth() macro, we'll get an
- * exact number - but we'll also disable interrupts
- * and create a window of failure where a new IPI gets
- * queued after we test the depth but before we re-enable
- * interrupts. So long as IXMT never gets set, however,
* we should be OK: If we pick up something and dispatch
* it here, that's great. If we see nothing, but concurrent
* with this operation, another TC sends us an IPI, IXMT
* is clear, and we'll handle it as a real pseudo-interrupt
- * and not a pseudo-pseudo interrupt.
+ * and not a pseudo-pseudo interrupt. The important thing
+ * is to do the last check for queued message *after* the
+ * re-enabling of interrupts.
*/
- if (IPIQ[cpu].depth > 0) {
- while (1) {
- struct smtc_ipi_q *q = &IPIQ[cpu];
- struct smtc_ipi *pipi;
- extern void self_ipi(struct smtc_ipi *);
-
- spin_lock(&q->lock);
- pipi = __smtc_ipi_dq(q);
- spin_unlock(&q->lock);
- if (!pipi)
- break;
+ while (IPIQ[cpu].head != NULL) {
+ struct smtc_ipi_q *q = &IPIQ[cpu];
+ struct smtc_ipi *pipi;
+ unsigned long flags;
+
+ /*
+ * It's just possible we'll come in with interrupts
+ * already enabled.
+ */
+ local_irq_save(flags);
+
+ spin_lock(&q->lock);
+ pipi = __smtc_ipi_dq(q);
+ spin_unlock(&q->lock);
+ /*
+ ** But use a raw restore here to avoid recursion.
+ */
+ __raw_local_irq_restore(flags);
+ if (pipi) {
self_ipi(pipi);
smtc_cpu_stats[cpu].selfipis++;
}
}
}
-void smtc_ipi_replay(void)
-{
- raw_local_irq_disable();
- __smtc_ipi_replay();
-}
-
EXPORT_SYMBOL(smtc_ipi_replay);
void smtc_idle_loop_hook(void)
@@ -1193,40 +1238,13 @@ void smtc_idle_loop_hook(void)
}
}
- /*
- * Now that we limit outstanding timer IPIs, check for hung TC
- */
- for (tc = 0; tc < NR_CPUS; tc++) {
- /* Don't check ourself - we'll dequeue IPIs just below */
- if ((tc != smp_processor_id()) &&
- atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
- if (clock_hang_reported[tc] == 0) {
- pdb_msg += sprintf(pdb_msg,
- "TC %d looks hung with timer latch at %d\n",
- tc, atomic_read(&ipi_timer_latch[tc]));
- clock_hang_reported[tc]++;
- }
- }
- }
emt(mtflags);
local_irq_restore(flags);
if (pdb_msg != &id_ho_db_msg[0])
printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
- /*
- * Replay any accumulated deferred IPIs. If "Instant Replay"
- * is in use, there should never be any.
- */
-#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
- {
- unsigned long flags;
-
- local_irq_save(flags);
- __smtc_ipi_replay();
- local_irq_restore(flags);
- }
-#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
+ smtc_ipi_replay();
}
void smtc_soft_dump(void)
@@ -1242,10 +1260,6 @@ void smtc_soft_dump(void)
printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
}
smtc_ipi_qdump();
- printk("Timer IPI Backlogs:\n");
- for (i=0; i < NR_CPUS; i++) {
- printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
- }
printk("%d Recoveries of \"stolen\" FPU\n",
atomic_read(&smtc_fpu_recoveries));
}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 426cced1e9d..b602ac6eb47 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -46,6 +46,9 @@
#include <asm/types.h>
#include <asm/stacktrace.h>
+extern void check_wait(void);
+extern asmlinkage void r4k_wait(void);
+extern asmlinkage void rollback_handle_int(void);
extern asmlinkage void handle_int(void);
extern asmlinkage void handle_tlbm(void);
extern asmlinkage void handle_tlbl(void);
@@ -373,8 +376,8 @@ void __noreturn die(const char * str, const struct pt_regs * regs)
do_exit(SIGSEGV);
}
-extern const struct exception_table_entry __start___dbe_table[];
-extern const struct exception_table_entry __stop___dbe_table[];
+extern struct exception_table_entry __start___dbe_table[];
+extern struct exception_table_entry __stop___dbe_table[];
__asm__(
" .section __dbe_table, \"a\"\n"
@@ -822,8 +825,10 @@ static void mt_ase_fp_affinity(void)
if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
cpumask_t tmask;
- cpus_and(tmask, current->thread.user_cpus_allowed,
- mt_fpu_cpumask);
+ current->thread.user_cpus_allowed
+ = current->cpus_allowed;
+ cpus_and(tmask, current->cpus_allowed,
+ mt_fpu_cpumask);
set_cpus_allowed(current, tmask);
set_thread_flag(TIF_FPUBOUND);
}
@@ -1200,7 +1205,7 @@ void *set_except_vector(int n, void *addr)
if (n == 0 && cpu_has_divec) {
*(u32 *)(ebase + 0x200) = 0x08000000 |
(0x03ffffff & (handler >> 2));
- flush_icache_range(ebase + 0x200, ebase + 0x204);
+ local_flush_icache_range(ebase + 0x200, ebase + 0x204);
}
return (void *)old_handler;
}
@@ -1251,6 +1256,9 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
extern char except_vec_vi, except_vec_vi_lui;
extern char except_vec_vi_ori, except_vec_vi_end;
+ extern char rollback_except_vec_vi;
+ char *vec_start = (cpu_wait == r4k_wait) ?
+ &rollback_except_vec_vi : &except_vec_vi;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* We need to provide the SMTC vectored interrupt handler
@@ -1258,11 +1266,11 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
* Status.IM bit to be masked before going there.
*/
extern char except_vec_vi_mori;
- const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
+ const int mori_offset = &except_vec_vi_mori - vec_start;
#endif /* CONFIG_MIPS_MT_SMTC */
- const int handler_len = &except_vec_vi_end - &except_vec_vi;
- const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
- const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
+ const int handler_len = &except_vec_vi_end - vec_start;
+ const int lui_offset = &except_vec_vi_lui - vec_start;
+ const int ori_offset = &except_vec_vi_ori - vec_start;
if (handler_len > VECTORSPACING) {
/*
@@ -1272,7 +1280,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
panic("VECTORSPACING too small");
}
- memcpy(b, &except_vec_vi, handler_len);
+ memcpy(b, vec_start, handler_len);
#ifdef CONFIG_MIPS_MT_SMTC
BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
@@ -1283,7 +1291,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
*w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
w = (u32 *)(b + ori_offset);
*w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
- flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len));
+ local_flush_icache_range((unsigned long)b,
+ (unsigned long)(b+handler_len));
}
else {
/*
@@ -1295,7 +1304,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
w = (u32 *)b;
*w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
*w = 0;
- flush_icache_range((unsigned long)b, (unsigned long)(b+8));
+ local_flush_icache_range((unsigned long)b,
+ (unsigned long)(b+8));
}
return (void *)old_handler;
@@ -1515,7 +1525,7 @@ void __cpuinit per_cpu_trap_init(void)
void __init set_handler(unsigned long offset, void *addr, unsigned long size)
{
memcpy((void *)(ebase + offset), addr, size);
- flush_icache_range(ebase + offset, ebase + offset + size);
+ local_flush_icache_range(ebase + offset, ebase + offset + size);
}
static char panic_null_cerr[] __cpuinitdata =
@@ -1552,6 +1562,10 @@ void __init trap_init(void)
extern char except_vec3_generic, except_vec3_r4000;
extern char except_vec4;
unsigned long i;
+ int rollback;
+
+ check_wait();
+ rollback = (cpu_wait == r4k_wait);
#if defined(CONFIG_KGDB)
if (kgdb_early_setup)
@@ -1616,7 +1630,7 @@ void __init trap_init(void)
if (board_be_init)
board_be_init();
- set_except_vector(0, handle_int);
+ set_except_vector(0, rollback ? rollback_handle_int : handle_int);
set_except_vector(1, handle_tlbm);
set_except_vector(2, handle_tlbl);
set_except_vector(3, handle_tlbs);
@@ -1680,6 +1694,8 @@ void __init trap_init(void)
signal32_init();
#endif
- flush_icache_range(ebase, ebase + 0x400);
+ local_flush_icache_range(ebase, ebase + 0x400);
flush_tlb_handlers();
+
+ sort_extable(__start___dbe_table, __stop___dbe_table);
}
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index b5470ceb418..afb119f3568 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -36,6 +36,7 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
+ *(.text.*)
*(.fixup)
*(.gnu.warning)
} :text = 0
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 8d7784122c1..edac9892c51 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -39,12 +39,14 @@
#ifdef USE_DOUBLE
#define LOAD ld
+#define LOAD32 lwu
#define ADD daddu
#define NBYTES 8
#else
#define LOAD lw
+#define LOAD32 lw
#define ADD addu
#define NBYTES 4
@@ -60,6 +62,14 @@
ADD sum, v1; \
.set pop
+#define ADDC32(sum,reg) \
+ .set push; \
+ .set noat; \
+ addu sum, reg; \
+ sltu v1, sum, reg; \
+ addu sum, v1; \
+ .set pop
+
#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
LOAD _t0, (offset + UNIT(0))(src); \
LOAD _t1, (offset + UNIT(1))(src); \
@@ -132,7 +142,7 @@ LEAF(csum_partial)
beqz t8, .Lqword_align
andi t8, src, 0x8
- lw t0, 0x00(src)
+ LOAD32 t0, 0x00(src)
LONG_SUBU a1, a1, 0x4
ADDC(sum, t0)
PTR_ADDU src, src, 0x4
@@ -211,7 +221,7 @@ LEAF(csum_partial)
LONG_SRL t8, t8, 0x2
.Lend_words:
- lw t0, (src)
+ LOAD32 t0, (src)
LONG_SUBU t8, t8, 0x1
ADDC(sum, t0)
.set reorder /* DADDI_WAR */
@@ -230,6 +240,9 @@ LEAF(csum_partial)
/* Still a full word to go */
ulw t1, (src)
PTR_ADDIU src, 4
+#ifdef USE_DOUBLE
+ dsll t1, t1, 32 /* clear lower 32bit */
+#endif
ADDC(sum, t1)
1: move t1, zero
@@ -280,7 +293,7 @@ LEAF(csum_partial)
1:
.set reorder
/* Add the passed partial csum. */
- ADDC(sum, a2)
+ ADDC32(sum, a2)
jr ra
.set noreorder
END(csum_partial)
@@ -681,7 +694,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc)
.set pop
1:
.set reorder
- ADDC(sum, psum)
+ ADDC32(sum, psum)
jr ra
.set noreorder
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 27a5b466c85..5500c20c79a 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -320,6 +320,7 @@ void __cpuinit r3k_cache_init(void)
flush_cache_range = r3k_flush_cache_range;
flush_cache_page = r3k_flush_cache_page;
flush_icache_range = r3k_flush_icache_range;
+ local_flush_icache_range = r3k_flush_icache_range;
flush_cache_sigtramp = r3k_flush_cache_sigtramp;
local_flush_data_cache_page = local_r3k_flush_data_cache_page;
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 71df3390c07..6e99665ae86 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -543,12 +543,8 @@ struct flush_icache_range_args {
unsigned long end;
};
-static inline void local_r4k_flush_icache_range(void *args)
+static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
{
- struct flush_icache_range_args *fir_args = args;
- unsigned long start = fir_args->start;
- unsigned long end = fir_args->end;
-
if (!cpu_has_ic_fills_f_dc) {
if (end - start >= dcache_size) {
r4k_blast_dcache();
@@ -564,6 +560,15 @@ static inline void local_r4k_flush_icache_range(void *args)
protected_blast_icache_range(start, end);
}
+static inline void local_r4k_flush_icache_range_ipi(void *args)
+{
+ struct flush_icache_range_args *fir_args = args;
+ unsigned long start = fir_args->start;
+ unsigned long end = fir_args->end;
+
+ local_r4k_flush_icache_range(start, end);
+}
+
static void r4k_flush_icache_range(unsigned long start, unsigned long end)
{
struct flush_icache_range_args args;
@@ -571,7 +576,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
args.start = start;
args.end = end;
- r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1);
+ r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1);
instruction_hazard();
}
@@ -1375,6 +1380,7 @@ void __cpuinit r4k_cache_init(void)
local_flush_data_cache_page = local_r4k_flush_data_cache_page;
flush_data_cache_page = r4k_flush_data_cache_page;
flush_icache_range = r4k_flush_icache_range;
+ local_flush_icache_range = local_r4k_flush_icache_range;
#if defined(CONFIG_DMA_NONCOHERENT)
if (coherentio) {
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index a9f7f1f5e9b..f7c8f9ce39c 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -362,6 +362,7 @@ void __cpuinit tx39_cache_init(void)
flush_cache_range = (void *) tx39h_flush_icache_all;
flush_cache_page = (void *) tx39h_flush_icache_all;
flush_icache_range = (void *) tx39h_flush_icache_all;
+ local_flush_icache_range = (void *) tx39h_flush_icache_all;
flush_cache_sigtramp = (void *) tx39h_flush_icache_all;
local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
@@ -390,6 +391,7 @@ void __cpuinit tx39_cache_init(void)
flush_cache_range = tx39_flush_cache_range;
flush_cache_page = tx39_flush_cache_page;
flush_icache_range = tx39_flush_icache_range;
+ local_flush_icache_range = tx39_flush_icache_range;
flush_cache_sigtramp = tx39_flush_cache_sigtramp;
local_flush_data_cache_page = local_tx39_flush_data_cache_page;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 034e8506f6e..1eb7c71e3d6 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -29,6 +29,7 @@ void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
unsigned long pfn);
void (*flush_icache_range)(unsigned long start, unsigned long end);
+void (*local_flush_icache_range)(unsigned long start, unsigned long end);
void (*__flush_cache_vmap)(void);
void (*__flush_cache_vunmap)(void);
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 76da73a5ab3..979cf919728 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1273,10 +1273,10 @@ void __cpuinit build_tlb_refill_handler(void)
void __cpuinit flush_tlb_handlers(void)
{
- flush_icache_range((unsigned long)handle_tlbl,
+ local_flush_icache_range((unsigned long)handle_tlbl,
(unsigned long)handle_tlbl + sizeof(handle_tlbl));
- flush_icache_range((unsigned long)handle_tlbs,
+ local_flush_icache_range((unsigned long)handle_tlbs,
(unsigned long)handle_tlbs + sizeof(handle_tlbs));
- flush_icache_range((unsigned long)handle_tlbm,
+ local_flush_icache_range((unsigned long)handle_tlbm,
(unsigned long)handle_tlbm + sizeof(handle_tlbm));
}
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index 3b7dd722c32..cef2db8d222 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -15,6 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += malta-console.o
obj-$(CONFIG_PCI) += malta-pci.o
# FIXME FIXME FIXME
-obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o
+obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o
EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index 5ea705e4945..f84a46a8ae6 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -84,12 +84,17 @@ static void msmtc_cpus_done(void)
static void __init msmtc_smp_setup(void)
{
- mipsmt_build_cpu_map(0);
+ /*
+ * we won't get the definitive value until
+ * we've run smtc_prepare_cpus later, but
+ * we would appear to need an upper bound now.
+ */
+ smp_num_siblings = smtc_build_cpu_map(0);
}
static void __init msmtc_prepare_cpus(unsigned int max_cpus)
{
- mipsmt_prepare_cpus();
+ smtc_prepare_cpus(max_cpus);
}
struct plat_smp_ops msmtc_smp_ops = {
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index 15e01aec37f..c8c32f417b6 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_SOC_TX3927) += ops-tx3927.o
obj-$(CONFIG_PCI_VR41XX) += ops-vr41xx.o pci-vr41xx.o
obj-$(CONFIG_MARKEINS) += ops-emma2rh.o pci-emma2rh.o fixup-emma2rh.o
obj-$(CONFIG_PCI_TX4927) += ops-tx4927.o
+obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o
#
# These are still pretty much in the old state, watch, go blind.
diff --git a/arch/mips/pci/pci-bcm47xx.c b/arch/mips/pci/pci-bcm47xx.c
new file mode 100644
index 00000000000..bea9b6cdfdb
--- /dev/null
+++ b/arch/mips/pci/pci-bcm47xx.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2008 Aurelien Jarno <aurelien@aurel32.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/ssb/ssb.h>
+
+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return 0;
+}
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ int res;
+ u8 slot, pin;
+
+ res = ssb_pcibios_plat_dev_init(dev);
+ if (res < 0) {
+ printk(KERN_ALERT "PCI: Failed to init device %s\n",
+ pci_name(dev));
+ return res;
+ }
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ slot = PCI_SLOT(dev->devfn);
+ res = ssb_pcibios_map_irq(dev, slot, pin);
+
+ /* IRQ-0 and IRQ-1 are software interrupts. */
+ if (res < 2) {
+ printk(KERN_ALERT "PCI: Failed to map IRQ of device %s\n",
+ pci_name(dev));
+ return res;
+ }
+
+ dev->irq = res;
+ return 0;
+}
+
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index ce92f82b16d..f97ab146101 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -143,25 +143,47 @@ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
*/
int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
+ return 0;
+}
+
+/* Most MIPS systems have straight-forward swizzling needs. */
+static inline u8 bridge_swizzle(u8 pin, u8 slot)
+{
+ return (((pin - 1) + slot) % 4) + 1;
+}
+
+static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev)
+{
+ while (dev->bus->parent) {
+ /* Move up the chain of bridges. */
+ dev = dev->bus->self;
+ }
+
+ return dev;
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
- int irq = bc->pci_int[slot];
+ struct pci_dev *rdev = bridge_root_dev(dev);
+ int slot = PCI_SLOT(rdev->devfn);
+ int irq;
+ irq = bc->pci_int[slot];
if (irq == -1) {
- irq = bc->pci_int[slot] = request_bridge_irq(bc);
+ irq = request_bridge_irq(bc);
if (irq < 0)
- panic("Can't allocate interrupt for PCI device %s\n",
- pci_name(dev));
+ return irq;
+
+ bc->pci_int[slot] = irq;
}
irq_to_bridge[irq] = bc;
irq_to_slot[irq] = slot;
- return irq;
-}
+ dev->irq = irq;
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
return 0;
}
@@ -205,6 +227,7 @@ int pcibus_to_node(struct pci_bus *bus)
return bc->nasid;
}
+EXPORT_SYMBOL(pcibus_to_node);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3,
pci_fixup_ioc3);
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index 44fb0a62877..82ab395efa3 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -49,8 +49,6 @@
#define GPIO_ALE (1 << 0x0a)
#define GPIO_CLE (1 << 0x0b)
-extern char *board_type;
-
static struct resource korina_dev0_res[] = {
{
.name = "korina_regs",
@@ -265,14 +263,6 @@ static void __init parse_mac_addr(char *macstr)
}
-/* DEVICE CONTROLLER 1 */
-#define CFG_DC_DEV1 ((void *)0xb8010010)
-#define CFG_DC_DEV2 ((void *)0xb8010020)
-#define CFG_DC_DEVBASE 0x0
-#define CFG_DC_DEVMASK 0x4
-#define CFG_DC_DEVC 0x8
-#define CFG_DC_DEVTC 0xC
-
/* NAND definitions */
#define NAND_CHIP_DELAY 25
@@ -301,16 +291,16 @@ static void __init rb532_nand_setup(void)
static int __init plat_setup_devices(void)
{
/* Look for the CF card reader */
- if (!readl(CFG_DC_DEV1 + CFG_DC_DEVMASK))
+ if (!readl(IDT434_REG_BASE + DEV1MASK))
rb532_devs[1] = NULL;
else {
cf_slot0_res[0].start =
- readl(CFG_DC_DEV1 + CFG_DC_DEVBASE);
+ readl(IDT434_REG_BASE + DEV1BASE);
cf_slot0_res[0].end = cf_slot0_res[0].start + 0x1000;
}
/* Read the NAND resources from the device controller */
- nand_slot0_res[0].start = readl(CFG_DC_DEV2 + CFG_DC_DEVBASE);
+ nand_slot0_res[0].start = readl(IDT434_REG_BASE + DEV2BASE);
nand_slot0_res[0].end = nand_slot0_res[0].start + 0x1000;
/* Initialise the NAND device */
diff --git a/arch/mips/sgi-ip22/ip22-platform.c b/arch/mips/sgi-ip22/ip22-platform.c
index 60141235ec4..52486c4d2b0 100644
--- a/arch/mips/sgi-ip22/ip22-platform.c
+++ b/arch/mips/sgi-ip22/ip22-platform.c
@@ -150,7 +150,7 @@ static int __init sgiseeq_devinit(void)
return res;
/* Second HPC is missing? */
- if (!ip22_is_fullhouse() ||
+ if (ip22_is_fullhouse() ||
get_dbe(tmp, (unsigned int *)&hpc3c1->pbdma[1]))
return 0;
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 1bc57d0f4c5..fe6bee09cec 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -53,6 +53,7 @@ txx9_reg_res_init(unsigned int pcode, unsigned long base, unsigned long size)
txx9_ce_res[i].name = txx9_ce_res_name[i];
}
+ txx9_pcode = pcode;
sprintf(txx9_pcode_str, "TX%x", pcode);
if (base) {
txx9_reg_res.start = base & 0xfffffffffULL;
@@ -328,6 +329,9 @@ void __init arch_init_irq(void)
void __init plat_time_init(void)
{
+#ifdef CONFIG_CPU_TX49XX
+ mips_hpt_frequency = txx9_cpu_clock / 2;
+#endif
txx9_board_vec->time_init();
}
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
index cba36a247e3..92dd1a0ca35 100644
--- a/arch/mips/vr41xx/common/irq.c
+++ b/arch/mips/vr41xx/common/irq.c
@@ -72,6 +72,7 @@ static void irq_dispatch(unsigned int irq)
cascade = irq_cascade + irq;
if (cascade->get_irq != NULL) {
unsigned int source_irq = irq;
+ int ret;
desc = irq_desc + source_irq;
if (desc->chip->mask_ack)
desc->chip->mask_ack(source_irq);
@@ -79,8 +80,9 @@ static void irq_dispatch(unsigned int irq)
desc->chip->mask(source_irq);
desc->chip->ack(source_irq);
}
- irq = cascade->get_irq(irq);
- if (irq < 0)
+ ret = cascade->get_irq(irq);
+ irq = ret;
+ if (ret < 0)
atomic_inc(&irq_err_count);
else
irq_dispatch(irq);