diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-10-14 22:24:51 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-10-14 22:24:51 +0100 |
commit | 7e69a8c4d06b7ecb874f571e82b715a9f79bc3c4 (patch) | |
tree | 0248fb8f7a3e445cc3c744252abeecabb9205c05 /arch/mips | |
parent | b6825d2df55aa7d7341c715b577b73a6a03dc944 (diff) | |
parent | d5120ae72a066b18f98e0c45ce73262f58030851 (diff) |
Merge branch 's3c-move' into devel
Conflicts:
arch/arm/mach-versatile/core.c
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/Kconfig | 53 | ||||
-rw-r--r-- | arch/mips/au1000/common/gpio.c | 6 | ||||
-rw-r--r-- | arch/mips/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/mips/kernel/cevt-r4k.c | 173 | ||||
-rw-r--r-- | arch/mips/kernel/cevt-smtc.c | 321 | ||||
-rw-r--r-- | arch/mips/kernel/cpu-probe.c | 26 | ||||
-rw-r--r-- | arch/mips/kernel/entry.S | 10 | ||||
-rw-r--r-- | arch/mips/kernel/genex.S | 41 | ||||
-rw-r--r-- | arch/mips/kernel/head.S | 1 | ||||
-rw-r--r-- | arch/mips/kernel/kgdb.c | 3 | ||||
-rw-r--r-- | arch/mips/kernel/mips-mt-fpaff.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/process.c | 19 | ||||
-rw-r--r-- | arch/mips/kernel/ptrace.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 260 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 28 | ||||
-rw-r--r-- | arch/mips/kernel/vmlinux.lds.S | 1 | ||||
-rw-r--r-- | arch/mips/lib/csum_partial.S | 21 | ||||
-rw-r--r-- | arch/mips/mti-malta/Makefile | 2 | ||||
-rw-r--r-- | arch/mips/mti-malta/malta-smtc.c | 9 | ||||
-rw-r--r-- | arch/mips/pci/Makefile | 1 | ||||
-rw-r--r-- | arch/mips/pci/pci-bcm47xx.c | 60 | ||||
-rw-r--r-- | arch/mips/pci/pci-ip27.c | 40 | ||||
-rw-r--r-- | arch/mips/sibyte/swarm/Makefile | 3 | ||||
-rw-r--r-- | arch/mips/sibyte/swarm/platform.c | 81 |
24 files changed, 819 insertions, 345 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 49896a2a1d7..1e06d233fa8 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -211,6 +211,7 @@ config MIPS_MALTA select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN + select SYS_SUPPORTS_MIPS_CMP if BROKEN # because SYNC_R4K is broken select SYS_SUPPORTS_MULTITHREADING select SYS_SUPPORTS_SMARTMIPS help @@ -1403,7 +1404,6 @@ config MIPS_MT_SMTC depends on CPU_MIPS32_R2 #depends on CPU_MIPS64_R2 # once there is hardware ... depends on SYS_SUPPORTS_MULTITHREADING - select GENERIC_CLOCKEVENTS_BROADCAST select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_EI select MIPS_MT @@ -1451,32 +1451,17 @@ config MIPS_VPE_LOADER Includes a loader for loading an elf relocatable object onto another VPE and running it. -config MIPS_MT_SMTC_INSTANT_REPLAY - bool "Low-latency Dispatch of Deferred SMTC IPIs" - depends on MIPS_MT_SMTC && !PREEMPT - default y - help - SMTC pseudo-interrupts between TCs are deferred and queued - if the target TC is interrupt-inhibited (IXMT). In the first - SMTC prototypes, these queued IPIs were serviced on return - to user mode, or on entry into the kernel idle loop. The - INSTANT_REPLAY option dispatches them as part of local_irq_restore() - processing, which adds runtime overhead (hence the option to turn - it off), but ensures that IPIs are handled promptly even under - heavy I/O interrupt load. - config MIPS_MT_SMTC_IM_BACKSTOP bool "Use per-TC register bits as backstop for inhibited IM bits" depends on MIPS_MT_SMTC - default y + default n help To support multiple TC microthreads acting as "CPUs" within a VPE, VPE-wide interrupt mask bits must be specially manipulated during interrupt handling. To support legacy drivers and interrupt controller management code, SMTC has a "backstop" to track and if necessary restore the interrupt mask. This has some performance - impact on interrupt service overhead. Disable it only if you know - what you are doing. + impact on interrupt service overhead. config MIPS_MT_SMTC_IRQAFF bool "Support IRQ affinity API" @@ -1486,10 +1471,8 @@ config MIPS_MT_SMTC_IRQAFF Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) for SMTC Linux kernel. Requires platform support, of which an example can be found in the MIPS kernel i8259 and Malta - platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY - be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to - interrupt dispatch, and should be used only if you know what - you are doing. + platform code. Adds some overhead to interrupt dispatch, and + should be used only if you know what you are doing. config MIPS_VPE_LOADER_TOM bool "Load VPE program into memory hidden from linux" @@ -1517,6 +1500,18 @@ config MIPS_APSP_KSPD "exit" syscall notifying other kernel modules the SP program is exiting. You probably want to say yes here. +config MIPS_CMP + bool "MIPS CMP framework support" + depends on SYS_SUPPORTS_MIPS_CMP + select SYNC_R4K if BROKEN + select SYS_SUPPORTS_SMP + select SYS_SUPPORTS_SCHED_SMT if SMP + select WEAK_ORDERING + default n + help + This is a placeholder option for the GCMP work. It will need to + be handled differently... + config SB1_PASS_1_WORKAROUNDS bool depends on CPU_SB1_PASS_1 @@ -1693,6 +1688,9 @@ config SMP config SMP_UP bool +config SYS_SUPPORTS_MIPS_CMP + bool + config SYS_SUPPORTS_SMP bool @@ -1740,17 +1738,6 @@ config NR_CPUS performance should round up your number of processors to the next power of two. -config MIPS_CMP - bool "MIPS CMP framework support" - depends on SMP - select SYNC_R4K - select SYS_SUPPORTS_SCHED_SMT - select WEAK_ORDERING - default n - help - This is a placeholder option for the GCMP work. It will need to - be handled differently... - source "kernel/time/Kconfig" # diff --git a/arch/mips/au1000/common/gpio.c b/arch/mips/au1000/common/gpio.c index b485d94ce8a..e660ddd611c 100644 --- a/arch/mips/au1000/common/gpio.c +++ b/arch/mips/au1000/common/gpio.c @@ -48,7 +48,7 @@ static void au1xxx_gpio2_write(unsigned gpio, int value) { gpio -= AU1XXX_GPIO_BASE; - gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | (value << gpio); + gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio); } static int au1xxx_gpio2_direction_input(unsigned gpio) @@ -61,7 +61,8 @@ static int au1xxx_gpio2_direction_input(unsigned gpio) static int au1xxx_gpio2_direction_output(unsigned gpio, int value) { gpio -= AU1XXX_GPIO_BASE; - gpio2->dir = (0x01 << gpio) | (value << gpio); + gpio2->dir |= 0x01 << gpio; + gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio); return 0; } @@ -90,6 +91,7 @@ static int au1xxx_gpio1_direction_input(unsigned gpio) static int au1xxx_gpio1_direction_output(unsigned gpio, int value) { gpio1->trioutclr = (0x01 & gpio); + au1xxx_gpio1_write(gpio, value); return 0; } diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 706f9397479..25775cb5400 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -10,6 +10,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o +obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 24a2d907aa0..4a4c59f2737 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -12,6 +12,14 @@ #include <asm/smtc_ipi.h> #include <asm/time.h> +#include <asm/cevt-r4k.h> + +/* + * The SMTC Kernel for the 34K, 1004K, et. al. replaces several + * of these routines with SMTC-specific variants. + */ + +#ifndef CONFIG_MIPS_MT_SMTC static int mips_next_event(unsigned long delta, struct clock_event_device *evt) @@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta, unsigned int cnt; int res; -#ifdef CONFIG_MIPS_MT_SMTC - { - unsigned long flags, vpflags; - local_irq_save(flags); - vpflags = dvpe(); -#endif cnt = read_c0_count(); cnt += delta; write_c0_compare(cnt); res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; -#ifdef CONFIG_MIPS_MT_SMTC - evpe(vpflags); - local_irq_restore(flags); - } -#endif return res; } -static void mips_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +#endif /* CONFIG_MIPS_MT_SMTC */ + +void mips_set_clock_mode(enum clock_event_mode mode, + struct clock_event_device *evt) { /* Nothing to do ... */ } -static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); -static int cp0_timer_irq_installed; +DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); +int cp0_timer_irq_installed; -/* - * Timer ack for an R4k-compatible timer of a known frequency. - */ -static void c0_timer_ack(void) -{ - write_c0_compare(read_c0_compare()); -} +#ifndef CONFIG_MIPS_MT_SMTC -/* - * Possibly handle a performance counter interrupt. - * Return true if the timer interrupt should not be checked - */ -static inline int handle_perf_irq(int r2) -{ - /* - * The performance counter overflow interrupt may be shared with the - * timer interrupt (cp0_perfcount_irq < 0). If it is and a - * performance counter has overflowed (perf_irq() == IRQ_HANDLED) - * and we can't reliably determine if a counter interrupt has also - * happened (!r2) then don't check for a timer interrupt. - */ - return (cp0_perfcount_irq < 0) && - perf_irq() == IRQ_HANDLED && - !r2; -} - -static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) +irqreturn_t c0_compare_interrupt(int irq, void *dev_id) { const int r2 = cpu_has_mips_r2; struct clock_event_device *cd; @@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) * interrupt. Being the paranoiacs we are we check anyway. */ if (!r2 || (read_c0_cause() & (1 << 30))) { - c0_timer_ack(); -#ifdef CONFIG_MIPS_MT_SMTC - if (cpu_data[cpu].vpe_id) - goto out; - cpu = 0; -#endif + /* Clear Count/Compare Interrupt */ + write_c0_compare(read_c0_compare()); cd = &per_cpu(mips_clockevent_device, cpu); cd->event_handler(cd); } @@ -107,65 +78,16 @@ out: return IRQ_HANDLED; } -static struct irqaction c0_compare_irqaction = { +#endif /* Not CONFIG_MIPS_MT_SMTC */ + +struct irqaction c0_compare_irqaction = { .handler = c0_compare_interrupt, -#ifdef CONFIG_MIPS_MT_SMTC - .flags = IRQF_DISABLED, -#else .flags = IRQF_DISABLED | IRQF_PERCPU, -#endif .name = "timer", }; -#ifdef CONFIG_MIPS_MT_SMTC -DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); - -static void smtc_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ -} - -static void mips_broadcast(cpumask_t mask) -{ - unsigned int cpu; - - for_each_cpu_mask(cpu, mask) - smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); -} - -static void setup_smtc_dummy_clockevent_device(void) -{ - //uint64_t mips_freq = mips_hpt_^frequency; - unsigned int cpu = smp_processor_id(); - struct clock_event_device *cd; - cd = &per_cpu(smtc_dummy_clockevent_device, cpu); - - cd->name = "SMTC"; - cd->features = CLOCK_EVT_FEAT_DUMMY; - - /* Calculate the min / max delta */ - cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); - cd->shift = 0; //32; - cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd); - cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd); - - cd->rating = 200; - cd->irq = 17; //-1; -// if (cpu) -// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu); -// else - cd->cpumask = cpumask_of_cpu(cpu); - - cd->set_mode = smtc_set_mode; - - cd->broadcast = mips_broadcast; - - clockevents_register_device(cd); -} -#endif - -static void mips_event_handler(struct clock_event_device *dev) +void mips_event_handler(struct clock_event_device *dev) { } @@ -177,7 +99,23 @@ static int c0_compare_int_pending(void) return (read_c0_cause() >> cp0_compare_irq) & 0x100; } -static int c0_compare_int_usable(void) +/* + * Compare interrupt can be routed and latched outside the core, + * so a single execution hazard barrier may not be enough to give + * it time to clear as seen in the Cause register. 4 time the + * pipeline depth seems reasonably conservative, and empirically + * works better in configurations with high CPU/bus clock ratios. + */ + +#define compare_change_hazard() \ + do { \ + irq_disable_hazard(); \ + irq_disable_hazard(); \ + irq_disable_hazard(); \ + irq_disable_hazard(); \ + } while (0) + +int c0_compare_int_usable(void) { unsigned int delta; unsigned int cnt; @@ -187,7 +125,7 @@ static int c0_compare_int_usable(void) */ if (c0_compare_int_pending()) { write_c0_compare(read_c0_count()); - irq_disable_hazard(); + compare_change_hazard(); if (c0_compare_int_pending()) return 0; } @@ -196,7 +134,7 @@ static int c0_compare_int_usable(void) cnt = read_c0_count(); cnt += delta; write_c0_compare(cnt); - irq_disable_hazard(); + compare_change_hazard(); if ((int)(read_c0_count() - cnt) < 0) break; /* increase delta if the timer was already expired */ @@ -205,11 +143,12 @@ static int c0_compare_int_usable(void) while ((int)(read_c0_count() - cnt) <= 0) ; /* Wait for expiry */ + compare_change_hazard(); if (!c0_compare_int_pending()) return 0; write_c0_compare(read_c0_count()); - irq_disable_hazard(); + compare_change_hazard(); if (c0_compare_int_pending()) return 0; @@ -219,6 +158,8 @@ static int c0_compare_int_usable(void) return 1; } +#ifndef CONFIG_MIPS_MT_SMTC + int __cpuinit mips_clockevent_init(void) { uint64_t mips_freq = mips_hpt_frequency; @@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void) if (!cpu_has_counter || !mips_hpt_frequency) return -ENXIO; -#ifdef CONFIG_MIPS_MT_SMTC - setup_smtc_dummy_clockevent_device(); - - /* - * On SMTC we only register VPE0's compare interrupt as clockevent - * device. - */ - if (cpu) - return 0; -#endif - if (!c0_compare_int_usable()) return -ENXIO; @@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void) cd->rating = 300; cd->irq = irq; -#ifdef CONFIG_MIPS_MT_SMTC - cd->cpumask = CPU_MASK_ALL; -#else cd->cpumask = cpumask_of_cpu(cpu); -#endif cd->set_next_event = mips_next_event; - cd->set_mode = mips_set_mode; + cd->set_mode = mips_set_clock_mode; cd->event_handler = mips_event_handler; clockevents_register_device(cd); @@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void) cp0_timer_irq_installed = 1; -#ifdef CONFIG_MIPS_MT_SMTC -#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq) - setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT); -#else setup_irq(irq, &c0_compare_irqaction); -#endif return 0; } + +#endif /* Not CONFIG_MIPS_MT_SMTC */ diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c new file mode 100644 index 00000000000..5162fe4b595 --- /dev/null +++ b/arch/mips/kernel/cevt-smtc.c @@ -0,0 +1,321 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007 MIPS Technologies, Inc. + * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> + * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl + */ +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/percpu.h> + +#include <asm/smtc_ipi.h> +#include <asm/time.h> +#include <asm/cevt-r4k.h> + +/* + * Variant clock event timer support for SMTC on MIPS 34K, 1004K + * or other MIPS MT cores. + * + * Notes on SMTC Support: + * + * SMTC has multiple microthread TCs pretending to be Linux CPUs. + * But there's only one Count/Compare pair per VPE, and Compare + * interrupts are taken opportunisitically by available TCs + * bound to the VPE with the Count register. The new timer + * framework provides for global broadcasts, but we really + * want VPE-level multicasts for best behavior. So instead + * of invoking the high-level clock-event broadcast code, + * this version of SMTC support uses the historical SMTC + * multicast mechanisms "under the hood", appearing to the + * generic clock layer as if the interrupts are per-CPU. + * + * The approach taken here is to maintain a set of NR_CPUS + * virtual timers, and track which "CPU" needs to be alerted + * at each event. + * + * It's unlikely that we'll see a MIPS MT core with more than + * 2 VPEs, but we *know* that we won't need to handle more + * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements + * is always going to be overkill, but always going to be enough. + */ + +unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; +static int smtc_nextinvpe[NR_CPUS]; + +/* + * Timestamps stored are absolute values to be programmed + * into Count register. Valid timestamps will never be zero. + * If a Zero Count value is actually calculated, it is converted + * to be a 1, which will introduce 1 or two CPU cycles of error + * roughly once every four billion events, which at 1000 HZ means + * about once every 50 days. If that's actually a problem, one + * could alternate squashing 0 to 1 and to -1. + */ + +#define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) +#define ISVALID(x) ((x) != 0L) + +/* + * Time comparison is subtle, as it's really truncated + * modular arithmetic. + */ + +#define IS_SOONER(a, b, reference) \ + (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) + +/* + * CATCHUP_INCREMENT, used when the function falls behind the counter. + * Could be an increasing function instead of a constant; + */ + +#define CATCHUP_INCREMENT 64 + +static int mips_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + unsigned long flags; + unsigned int mtflags; + unsigned long timestamp, reference, previous; + unsigned long nextcomp = 0L; + int vpe = current_cpu_data.vpe_id; + int cpu = smp_processor_id(); + local_irq_save(flags); + mtflags = dmt(); + + /* + * Maintain the per-TC virtual timer + * and program the per-VPE shared Count register + * as appropriate here... + */ + reference = (unsigned long)read_c0_count(); + timestamp = MAKEVALID(reference + delta); + /* + * To really model the clock, we have to catch the case + * where the current next-in-VPE timestamp is the old + * timestamp for the calling CPE, but the new value is + * in fact later. In that case, we have to do a full + * scan and discover the new next-in-VPE CPU id and + * timestamp. + */ + previous = smtc_nexttime[vpe][cpu]; + if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) + && IS_SOONER(previous, timestamp, reference)) { + int i; + int soonest = cpu; + + /* + * Update timestamp array here, so that new + * value gets considered along with those of + * other virtual CPUs on the VPE. + */ + smtc_nexttime[vpe][cpu] = timestamp; + for_each_online_cpu(i) { + if (ISVALID(smtc_nexttime[vpe][i]) + && IS_SOONER(smtc_nexttime[vpe][i], + smtc_nexttime[vpe][soonest], reference)) { + soonest = i; + } + } + smtc_nextinvpe[vpe] = soonest; + nextcomp = smtc_nexttime[vpe][soonest]; + /* + * Otherwise, we don't have to process the whole array rank, + * we just have to see if the event horizon has gotten closer. + */ + } else { + if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || + IS_SOONER(timestamp, + smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { + smtc_nextinvpe[vpe] = cpu; + nextcomp = timestamp; + } + /* + * Since next-in-VPE may me the same as the executing + * virtual CPU, we update the array *after* checking + * its value. + */ + smtc_nexttime[vpe][cpu] = timestamp; + } + + /* + * It may be that, in fact, we don't need to update Compare, + * but if we do, we want to make sure we didn't fall into + * a crack just behind Count. + */ + if (ISVALID(nextcomp)) { + write_c0_compare(nextcomp); + ehb(); + /* + * We never return an error, we just make sure + * that we trigger the handlers as quickly as + * we can if we fell behind. + */ + while ((nextcomp - (unsigned long)read_c0_count()) + > (unsigned long)LONG_MAX) { + nextcomp += CATCHUP_INCREMENT; + write_c0_compare(nextcomp); + ehb(); + } + } + emt(mtflags); + local_irq_restore(flags); + return 0; +} + + +void smtc_distribute_timer(int vpe) +{ + unsigned long flags; + unsigned int mtflags; + int cpu; + struct clock_event_device *cd; + unsigned long nextstamp = 0L; + unsigned long reference; + + +repeat: + for_each_online_cpu(cpu) { + /* + * Find virtual CPUs within the current VPE who have + * unserviced timer requests whose time is now past. + */ + local_irq_save(flags); + mtflags = dmt(); + if (cpu_data[cpu].vpe_id == vpe && + ISVALID(smtc_nexttime[vpe][cpu])) { + reference = (unsigned long)read_c0_count(); + if ((smtc_nexttime[vpe][cpu] - reference) + > (unsigned long)LONG_MAX) { + smtc_nexttime[vpe][cpu] = 0L; + emt(mtflags); + local_irq_restore(flags); + /* + * We don't send IPIs to ourself. + */ + if (cpu != smp_processor_id()) { + smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); + } else { + cd = &per_cpu(mips_clockevent_device, cpu); + cd->event_handler(cd); + } + } else { + /* Local to VPE but Valid Time not yet reached. */ + if (!ISVALID(nextstamp) || + IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, + reference)) { + smtc_nextinvpe[vpe] = cpu; + nextstamp = smtc_nexttime[vpe][cpu]; + } + emt(mtflags); + local_irq_restore(flags); + } + } else { + emt(mtflags); + local_irq_restore(flags); + + } + } + /* Reprogram for interrupt at next soonest timestamp for VPE */ + if (ISVALID(nextstamp)) { + write_c0_compare(nextstamp); + ehb(); + if ((nextstamp - (unsigned long)read_c0_count()) + > (unsigned long)LONG_MAX) + goto repeat; + } +} + + +irqreturn_t c0_compare_interrupt(int irq, void *dev_id) +{ + int cpu = smp_processor_id(); + + /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ + handle_perf_irq(1); + + if (read_c0_cause() & (1 << 30)) { + /* Clear Count/Compare Interrupt */ + write_c0_compare(read_c0_compare()); + smtc_distribute_timer(cpu_data[cpu].vpe_id); + } + return IRQ_HANDLED; +} + + +int __cpuinit mips_clockevent_init(void) +{ + uint64_t mips_freq = mips_hpt_frequency; + unsigned int cpu = smp_processor_id(); + struct clock_event_device *cd; + unsigned int irq; + int i; + int j; + + if (!cpu_has_counter || !mips_hpt_frequency) + return -ENXIO; + if (cpu == 0) { + for (i = 0; i < num_possible_cpus(); i++) { + smtc_nextinvpe[i] = 0; + for (j = 0; j < num_possible_cpus(); j++) + smtc_nexttime[i][j] = 0L; + } + /* + * SMTC also can't have the usablility test + * run by secondary TCs once Compare is in use. + */ + if (!c0_compare_int_usable()) + return -ENXIO; + } + + /* + * With vectored interrupts things are getting platform specific. + * get_c0_compare_int is a hook to allow a platform to return the + * interrupt number of it's liking. + */ + irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; + if (get_c0_compare_int) + irq = get_c0_compare_int(); + + cd = &per_cpu(mips_clockevent_device, cpu); + + cd->name = "MIPS"; + cd->features = CLOCK_EVT_FEAT_ONESHOT; + + /* Calculate the min / max delta */ + cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); + cd->shift = 32; + cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); + cd->min_delta_ns = clockevent_delta2ns(0x300, cd); + + cd->rating = 300; + cd->irq = irq; + cd->cpumask = cpumask_of_cpu(cpu); + cd->set_next_event = mips_next_event; + cd->set_mode = mips_set_clock_mode; + cd->event_handler = mips_event_handler; + + clockevents_register_device(cd); + + /* + * On SMTC we only want to do the data structure + * initialization and IRQ setup once. + */ + if (cpu) + return 0; + /* + * And we need the hwmask associated with the c0_compare + * vector to be initialized. + */ + irq_hwmask[irq] = (0x100 << cp0_compare_irq); + if (cp0_timer_irq_installed) + return 0; + + cp0_timer_irq_installed = 1; + + setup_irq(irq, &c0_compare_irqaction); + + return 0; +} diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 335a6ae3d59..e621fda8ab3 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -45,18 +45,7 @@ static void r39xx_wait(void) local_irq_enable(); } -/* - * There is a race when WAIT instruction executed with interrupt - * enabled. - * But it is implementation-dependent wheter the pipelie restarts when - * a non-enabled interrupt is requested. - */ -static void r4k_wait(void) -{ - __asm__(" .set mips3 \n" - " wait \n" - " .set mips0 \n"); -} +extern void r4k_wait(void); /* * This variant is preferable as it allows testing need_resched and going to @@ -65,14 +54,18 @@ static void r4k_wait(void) * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes * using this version a gamble. */ -static void r4k_wait_irqoff(void) +void r4k_wait_irqoff(void) { local_irq_disable(); if (!need_resched()) - __asm__(" .set mips3 \n" + __asm__(" .set push \n" + " .set mips3 \n" " wait \n" - " .set mips0 \n"); + " .set pop \n"); local_irq_enable(); + __asm__(" .globl __pastwait \n" + "__pastwait: \n"); + return; } /* @@ -128,7 +121,7 @@ static int __init wait_disable(char *s) __setup("nowait", wait_disable); -static inline void check_wait(void) +void __init check_wait(void) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -242,7 +235,6 @@ static inline void check_errata(void) void __init check_bugs32(void) { - check_wait(); check_errata(); } diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index e29598ae939..ffa331029e0 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -79,11 +79,6 @@ FEXPORT(syscall_exit) FEXPORT(restore_all) # restore full frame #ifdef CONFIG_MIPS_MT_SMTC -/* Detect and execute deferred IPI "interrupts" */ - LONG_L s0, TI_REGS($28) - LONG_S sp, TI_REGS($28) - jal deferred_smtc_ipi - LONG_S s0, TI_REGS($28) #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP /* Re-arm any temporarily masked interrupts not explicitly "acked" */ mfc0 v0, CP0_TCSTATUS @@ -112,6 +107,11 @@ FEXPORT(restore_all) # restore full frame xor t0, t0, t3 mtc0 t0, CP0_TCCONTEXT #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ +/* Detect and execute deferred IPI "interrupts" */ + LONG_L s0, TI_REGS($28) + LONG_S sp, TI_REGS($28) + jal deferred_smtc_ipi + LONG_S s0, TI_REGS($28) #endif /* CONFIG_MIPS_MT_SMTC */ .set noat RESTORE_TEMP diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index c6ada98ee04..01dcbe38fa0 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -20,6 +20,7 @@ #include <asm/stackframe.h> #include <asm/war.h> #include <asm/page.h> +#include <asm/thread_info.h> #define PANIC_PIC(msg) \ .set push; \ @@ -126,7 +127,42 @@ handle_vcei: __FINIT + .align 5 /* 32 byte rollback region */ +LEAF(r4k_wait) + .set push + .set noreorder + /* start of rollback region */ + LONG_L t0, TI_FLAGS($28) + nop + andi t0, _TIF_NEED_RESCHED + bnez t0, 1f + nop + nop + nop + .set mips3 + wait + /* end of rollback region (the region size must be power of two) */ + .set pop +1: + jr ra + END(r4k_wait) + + .macro BUILD_ROLLBACK_PROLOGUE handler + FEXPORT(rollback_\handler) + .set push + .set noat + MFC0 k0, CP0_EPC + PTR_LA k1, r4k_wait + ori k0, 0x1f /* 32 byte rollback region */ + xori k0, 0x1f + bne k0, k1, 9f + MTC0 k0, CP0_EPC +9: + .set pop + .endm + .align 5 +BUILD_ROLLBACK_PROLOGUE handle_int NESTED(handle_int, PT_SIZE, sp) #ifdef CONFIG_TRACE_IRQFLAGS /* @@ -201,6 +237,7 @@ NESTED(except_vec_ejtag_debug, 0, sp) * This prototype is copied to ebase + n*IntCtl.VS and patched * to invoke the handler */ +BUILD_ROLLBACK_PROLOGUE except_vec_vi NESTED(except_vec_vi, 0, sp) SAVE_SOME SAVE_AT @@ -245,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp) and t0, a0, t1 #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP mfc0 t2, CP0_TCCONTEXT - or t0, t0, t2 - mtc0 t0, CP0_TCCONTEXT + or t2, t0, t2 + mtc0 t2, CP0_TCCONTEXT #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ xor t1, t1, t0 mtc0 t1, CP0_STATUS diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 361364501d3..492a0a8d70f 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -22,6 +22,7 @@ #include <asm/irqflags.h> #include <asm/regdef.h> #include <asm/page.h> +#include <asm/pgtable-bits.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 8f6d58ede33..6e152c80cd4 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -236,8 +236,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, atomic_set(&kgdb_cpu_doing_single_step, -1); if (remcom_in_buffer[0] == 's') - if (kgdb_contthread) - atomic_set(&kgdb_cpu_doing_single_step, cpu); + atomic_set(&kgdb_cpu_doing_single_step, cpu); return 0; } diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index df4d3f2f740..dc9eb72ed9d 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -159,7 +159,7 @@ __setup("fpaff=", fpaff_thresh); /* * FPU Use Factor empirically derived from experiments on 34K */ -#define FPUSEFACTOR 333 +#define FPUSEFACTOR 2000 static __init int mt_fp_affinity_init(void) { diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index b16facd9ea8..22fc19bbe87 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -55,7 +55,7 @@ void __noreturn cpu_idle(void) while (1) { tick_nohz_stop_sched_tick(1); while (!need_resched()) { -#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG +#ifdef CONFIG_MIPS_MT_SMTC extern void smtc_idle_loop_hook(void); smtc_idle_loop_hook(); @@ -145,17 +145,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, */ p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); - clear_tsk_thread_flag(p, TIF_USEDFPU); -#ifdef CONFIG_MIPS_MT_FPAFF +#ifdef CONFIG_MIPS_MT_SMTC /* - * FPU affinity support is cleaner if we track the - * user-visible CPU affinity from the very beginning. - * The generic cpus_allowed mask will already have - * been copied from the parent before copy_thread - * is invoked. + * SMTC restores TCStatus after Status, and the CU bits + * are aliased there. */ - p->thread.user_cpus_allowed = p->cpus_allowed; + childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); +#endif + clear_tsk_thread_flag(p, TIF_USEDFPU); + +#ifdef CONFIG_MIPS_MT_FPAFF + clear_tsk_thread_flag(p, TIF_FPUBOUND); #endif /* CONFIG_MIPS_MT_FPAFF */ if (clone_flags & CLONE_SETTLS) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 35234b92b9a..96ffc9c6d19 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -238,7 +238,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) case FPC_EIR: { /* implementation / version register */ unsigned int flags; #ifdef CONFIG_MIPS_MT_SMTC - unsigned int irqflags; + unsigned long irqflags; unsigned int mtflags; #endif /* CONFIG_MIPS_MT_SMTC */ diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index a516286532a..897fb2b4751 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -1,4 +1,21 @@ -/* Copyright (C) 2004 Mips Technologies, Inc */ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2004 Mips Technologies, Inc + * Copyright (C) 2008 Kevin D. Kissell + */ #include <linux/clockchips.h> #include <linux/kernel.h> @@ -21,7 +38,6 @@ #include <asm/time.h> #include <asm/addrspace.h> #include <asm/smtc.h> -#include <asm/smtc_ipi.h> #include <asm/smtc_proc.h> /* @@ -58,11 +74,6 @@ unsigned long irq_hwmask[NR_IRQS]; asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; -/* - * Clock interrupt "latch" buffers, per "CPU" - */ - -static atomic_t ipi_timer_latch[NR_CPUS]; /* * Number of InterProcessor Interrupt (IPI) message buffers to allocate @@ -70,7 +81,7 @@ static atomic_t ipi_timer_latch[NR_CPUS]; #define IPIBUF_PER_CPU 4 -static struct smtc_ipi_q IPIQ[NR_CPUS]; +struct smtc_ipi_q IPIQ[NR_CPUS]; static struct smtc_ipi_q freeIPIq; @@ -282,7 +293,7 @@ static void smtc_configure_tlb(void) * phys_cpu_present_map and the logical/physical mappings. */ -int __init mipsmt_build_cpu_map(int start_cpu_slot) +int __init smtc_build_cpu_map(int start_cpu_slot) { int i, ntcs; @@ -325,7 +336,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A); - write_tc_c0_tccontext(0); + /* + * TCContext gets an offset from the base of the IPIQ array + * to be used in low-level code to detect the presence of + * an active IPI queue + */ + write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); /* Bind tc to vpe */ write_tc_c0_tcbind(vpe); /* In general, all TCs should have the same cpu_data indications */ @@ -336,10 +352,18 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) cpu_data[cpu].options &= ~MIPS_CPU_FPU; cpu_data[cpu].vpe_id = vpe; cpu_data[cpu].tc_id = tc; + /* Multi-core SMTC hasn't been tested, but be prepared */ + cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; } +/* + * Tweak to get Count registes in as close a sync as possible. + * Value seems good for 34K-class cores. + */ + +#define CP0_SKEW 8 -void mipsmt_prepare_cpus(void) +void smtc_prepare_cpus(int cpus) { int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; unsigned long flags; @@ -363,13 +387,13 @@ void mipsmt_prepare_cpus(void) IPIQ[i].head = IPIQ[i].tail = NULL; spin_lock_init(&IPIQ[i].lock); IPIQ[i].depth = 0; - atomic_set(&ipi_timer_latch[i], 0); } /* cpu_data index starts at zero */ cpu = 0; cpu_data[cpu].vpe_id = 0; cpu_data[cpu].tc_id = 0; + cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; cpu++; /* Report on boot-time options */ @@ -484,7 +508,8 @@ void mipsmt_prepare_cpus(void) write_vpe_c0_compare(0); /* Propagate Config7 */ write_vpe_c0_config7(read_c0_config7()); - write_vpe_c0_count(read_c0_count()); + write_vpe_c0_count(read_c0_count() + CP0_SKEW); + ehb(); } /* enable multi-threading within VPE */ write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); @@ -556,7 +581,7 @@ void mipsmt_prepare_cpus(void) void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) { extern u32 kernelsp[NR_CPUS]; - long flags; + unsigned long flags; int mtflags; LOCK_MT_PRA(); @@ -585,24 +610,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) void smtc_init_secondary(void) { - /* - * Start timer on secondary VPEs if necessary. - * plat_timer_setup has already have been invoked by init/main - * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that - * SMTC init code assigns TCs consdecutively and in ascending order - * to across available VPEs. - */ - if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && - ((read_c0_tcbind() & TCBIND_CURVPE) - != cpu_data[smp_processor_id() - 1].vpe_id)){ - write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); - } - local_irq_enable(); } void smtc_smp_finish(void) { + int cpu = smp_processor_id(); + + /* + * Lowest-numbered CPU per VPE starts a clock tick. + * Like per_cpu_trap_init() hack, this assumes that + * SMTC init code assigns TCs consdecutively and + * in ascending order across available VPEs. + */ + if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) + write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); + printk("TC %d going on-line as CPU %d\n", cpu_data[smp_processor_id()].tc_id, smp_processor_id()); } @@ -753,8 +776,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) { int tcstatus; struct smtc_ipi *pipi; - long flags; + unsigned long flags; int mtflags; + unsigned long tcrestart; + extern void r4k_wait_irqoff(void), __pastwait(void); if (cpu == smp_processor_id()) { printk("Cannot Send IPI to self!\n"); @@ -771,8 +796,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) pipi->arg = (void *)action; pipi->dest = cpu; if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { - if (type == SMTC_CLOCK_TICK) - atomic_inc(&ipi_timer_latch[cpu]); /* If not on same VPE, enqueue and send cross-VPE interrupt */ smtc_ipi_nq(&IPIQ[cpu], pipi); LOCK_CORE_PRA(); @@ -800,22 +823,29 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) if ((tcstatus & TCSTATUS_IXMT) != 0) { /* - * Spin-waiting here can deadlock, - * so we queue the message for the target TC. + * If we're in the the irq-off version of the wait + * loop, we need to force exit from the wait and + * do a direct post of the IPI. + */ + if (cpu_wait == r4k_wait_irqoff) { + tcrestart = read_tc_c0_tcrestart(); + if (tcrestart >= (unsigned long)r4k_wait_irqoff + && tcrestart < (unsigned long)__pastwait) { + write_tc_c0_tcrestart(__pastwait); + tcstatus &= ~TCSTATUS_IXMT; + write_tc_c0_tcstatus(tcstatus); + goto postdirect; + } + } + /* + * Otherwise we queue the message for the target TC + * to pick up when he does a local_irq_restore() */ write_tc_c0_tchalt(0); UNLOCK_CORE_PRA(); - /* Try to reduce redundant timer interrupt messages */ - if (type == SMTC_CLOCK_TICK) { - if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){ - smtc_ipi_nq(&freeIPIq, pipi); - return; - } - } smtc_ipi_nq(&IPIQ[cpu], pipi); } else { - if (type == SMTC_CLOCK_TICK) - atomic_inc(&ipi_timer_latch[cpu]); +postdirect: post_direct_ipi(cpu, pipi); write_tc_c0_tchalt(0); UNLOCK_CORE_PRA(); @@ -883,7 +913,7 @@ static void ipi_call_interrupt(void) smp_call_function_interrupt(); } -DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); +DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); void ipi_decode(struct smtc_ipi *pipi) { @@ -891,20 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi) struct clock_event_device *cd; void *arg_copy = pipi->arg; int type_copy = pipi->type; - int ticks; - smtc_ipi_nq(&freeIPIq, pipi); switch (type_copy) { case SMTC_CLOCK_TICK: irq_enter(); kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; - cd = &per_cpu(smtc_dummy_clockevent_device, cpu); - ticks = atomic_read(&ipi_timer_latch[cpu]); - atomic_sub(ticks, &ipi_timer_latch[cpu]); - while (ticks) { - cd->event_handler(cd); - ticks--; - } + cd = &per_cpu(mips_clockevent_device, cpu); + cd->event_handler(cd); irq_exit(); break; @@ -937,24 +960,48 @@ void ipi_decode(struct smtc_ipi *pipi) } } +/* + * Similar to smtc_ipi_replay(), but invoked from context restore, + * so it reuses the current exception frame rather than set up a + * new one with self_ipi. + */ + void deferred_smtc_ipi(void) { - struct smtc_ipi *pipi; - unsigned long flags; -/* DEBUG */ - int q = smp_processor_id(); + int cpu = smp_processor_id(); /* * Test is not atomic, but much faster than a dequeue, * and the vast majority of invocations will have a null queue. + * If irq_disabled when this was called, then any IPIs queued + * after we test last will be taken on the next irq_enable/restore. + * If interrupts were enabled, then any IPIs added after the + * last test will be taken directly. */ - if (IPIQ[q].head != NULL) { - while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { - /* ipi_decode() should be called with interrupts off */ - local_irq_save(flags); + + while (IPIQ[cpu].head != NULL) { + struct smtc_ipi_q *q = &IPIQ[cpu]; + struct smtc_ipi *pipi; + unsigned long flags; + + /* + * It may be possible we'll come in with interrupts + * already enabled. + */ + local_irq_save(flags); + + spin_lock(&q->lock); + pipi = __smtc_ipi_dq(q); + spin_unlock(&q->lock); + if (pipi != NULL) ipi_decode(pipi); - local_irq_restore(flags); - } + /* + * The use of the __raw_local restore isn't + * as obviously necessary here as in smtc_ipi_replay(), + * but it's more efficient, given that we're already + * running down the IPI queue. + */ + __raw_local_irq_restore(flags); } } @@ -975,7 +1022,7 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm) struct smtc_ipi *pipi; unsigned long tcstatus; int sent; - long flags; + unsigned long flags; unsigned int mtflags; unsigned int vpflags; @@ -1066,55 +1113,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe) /* * SMTC-specific hacks invoked from elsewhere in the kernel. - * - * smtc_ipi_replay is called from raw_local_irq_restore which is only ever - * called with interrupts disabled. We do rely on interrupts being disabled - * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would - * result in a recursive call to raw_local_irq_restore(). */ -static void __smtc_ipi_replay(void) + /* + * smtc_ipi_replay is called from raw_local_irq_restore + */ + +void smtc_ipi_replay(void) { unsigned int cpu = smp_processor_id(); /* * To the extent that we've ever turned interrupts off, * we may have accumulated deferred IPIs. This is subtle. - * If we use the smtc_ipi_qdepth() macro, we'll get an - * exact number - but we'll also disable interrupts - * and create a window of failure where a new IPI gets - * queued after we test the depth but before we re-enable - * interrupts. So long as IXMT never gets set, however, * we should be OK: If we pick up something and dispatch * it here, that's great. If we see nothing, but concurrent * with this operation, another TC sends us an IPI, IXMT * is clear, and we'll handle it as a real pseudo-interrupt - * and not a pseudo-pseudo interrupt. + * and not a pseudo-pseudo interrupt. The important thing + * is to do the last check for queued message *after* the + * re-enabling of interrupts. */ - if (IPIQ[cpu].depth > 0) { - while (1) { - struct smtc_ipi_q *q = &IPIQ[cpu]; - struct smtc_ipi *pipi; - extern void self_ipi(struct smtc_ipi *); - - spin_lock(&q->lock); - pipi = __smtc_ipi_dq(q); - spin_unlock(&q->lock); - if (!pipi) - break; + while (IPIQ[cpu].head != NULL) { + struct smtc_ipi_q *q = &IPIQ[cpu]; + struct smtc_ipi *pipi; + unsigned long flags; + + /* + * It's just possible we'll come in with interrupts + * already enabled. + */ + local_irq_save(flags); + + spin_lock(&q->lock); + pipi = __smtc_ipi_dq(q); + spin_unlock(&q->lock); + /* + ** But use a raw restore here to avoid recursion. + */ + __raw_local_irq_restore(flags); + if (pipi) { self_ipi(pipi); smtc_cpu_stats[cpu].selfipis++; } } } -void smtc_ipi_replay(void) -{ - raw_local_irq_disable(); - __smtc_ipi_replay(); -} - EXPORT_SYMBOL(smtc_ipi_replay); void smtc_idle_loop_hook(void) @@ -1193,40 +1238,13 @@ void smtc_idle_loop_hook(void) } } - /* - * Now that we limit outstanding timer IPIs, check for hung TC - */ - for (tc = 0; tc < NR_CPUS; tc++) { - /* Don't check ourself - we'll dequeue IPIs just below */ - if ((tc != smp_processor_id()) && - atomic_read(&ipi_timer_latch[tc]) > timerq_limit) { - if (clock_hang_reported[tc] == 0) { - pdb_msg += sprintf(pdb_msg, - "TC %d looks hung with timer latch at %d\n", - tc, atomic_read(&ipi_timer_latch[tc])); - clock_hang_reported[tc]++; - } - } - } emt(mtflags); local_irq_restore(flags); if (pdb_msg != &id_ho_db_msg[0]) printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ - /* - * Replay any accumulated deferred IPIs. If "Instant Replay" - * is in use, there should never be any. - */ -#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY - { - unsigned long flags; - - local_irq_save(flags); - __smtc_ipi_replay(); - local_irq_restore(flags); - } -#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ + smtc_ipi_replay(); } void smtc_soft_dump(void) @@ -1242,10 +1260,6 @@ void smtc_soft_dump(void) printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); } smtc_ipi_qdump(); - printk("Timer IPI Backlogs:\n"); - for (i=0; i < NR_CPUS; i++) { - printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i])); - } printk("%d Recoveries of \"stolen\" FPU\n", atomic_read(&smtc_fpu_recoveries)); } diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 6bee29097a5..b602ac6eb47 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -46,6 +46,9 @@ #include <asm/types.h> #include <asm/stacktrace.h> +extern void check_wait(void); +extern asmlinkage void r4k_wait(void); +extern asmlinkage void rollback_handle_int(void); extern asmlinkage void handle_int(void); extern asmlinkage void handle_tlbm(void); extern asmlinkage void handle_tlbl(void); @@ -822,8 +825,10 @@ static void mt_ase_fp_affinity(void) if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { cpumask_t tmask; - cpus_and(tmask, current->thread.user_cpus_allowed, - mt_fpu_cpumask); + current->thread.user_cpus_allowed + = current->cpus_allowed; + cpus_and(tmask, current->cpus_allowed, + mt_fpu_cpumask); set_cpus_allowed(current, tmask); set_thread_flag(TIF_FPUBOUND); } @@ -1251,6 +1256,9 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) extern char except_vec_vi, except_vec_vi_lui; extern char except_vec_vi_ori, except_vec_vi_end; + extern char rollback_except_vec_vi; + char *vec_start = (cpu_wait == r4k_wait) ? + &rollback_except_vec_vi : &except_vec_vi; #ifdef CONFIG_MIPS_MT_SMTC /* * We need to provide the SMTC vectored interrupt handler @@ -1258,11 +1266,11 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) * Status.IM bit to be masked before going there. */ extern char except_vec_vi_mori; - const int mori_offset = &except_vec_vi_mori - &except_vec_vi; + const int mori_offset = &except_vec_vi_mori - vec_start; #endif /* CONFIG_MIPS_MT_SMTC */ - const int handler_len = &except_vec_vi_end - &except_vec_vi; - const int lui_offset = &except_vec_vi_lui - &except_vec_vi; - const int ori_offset = &except_vec_vi_ori - &except_vec_vi; + const int handler_len = &except_vec_vi_end - vec_start; + const int lui_offset = &except_vec_vi_lui - vec_start; + const int ori_offset = &except_vec_vi_ori - vec_start; if (handler_len > VECTORSPACING) { /* @@ -1272,7 +1280,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) panic("VECTORSPACING too small"); } - memcpy(b, &except_vec_vi, handler_len); + memcpy(b, vec_start, handler_len); #ifdef CONFIG_MIPS_MT_SMTC BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ @@ -1554,6 +1562,10 @@ void __init trap_init(void) extern char except_vec3_generic, except_vec3_r4000; extern char except_vec4; unsigned long i; + int rollback; + + check_wait(); + rollback = (cpu_wait == r4k_wait); #if defined(CONFIG_KGDB) if (kgdb_early_setup) @@ -1618,7 +1630,7 @@ void __init trap_init(void) if (board_be_init) board_be_init(); - set_except_vector(0, handle_int); + set_except_vector(0, rollback ? rollback_handle_int : handle_int); set_except_vector(1, handle_tlbm); set_except_vector(2, handle_tlbl); set_except_vector(3, handle_tlbs); diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index b5470ceb418..afb119f3568 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -36,6 +36,7 @@ SECTIONS SCHED_TEXT LOCK_TEXT KPROBES_TEXT + *(.text.*) *(.fixup) *(.gnu.warning) } :text = 0 diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 8d7784122c1..edac9892c51 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S @@ -39,12 +39,14 @@ #ifdef USE_DOUBLE #define LOAD ld +#define LOAD32 lwu #define ADD daddu #define NBYTES 8 #else #define LOAD lw +#define LOAD32 lw #define ADD addu #define NBYTES 4 @@ -60,6 +62,14 @@ ADD sum, v1; \ .set pop +#define ADDC32(sum,reg) \ + .set push; \ + .set noat; \ + addu sum, reg; \ + sltu v1, sum, reg; \ + addu sum, v1; \ + .set pop + #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ LOAD _t0, (offset + UNIT(0))(src); \ LOAD _t1, (offset + UNIT(1))(src); \ @@ -132,7 +142,7 @@ LEAF(csum_partial) beqz t8, .Lqword_align andi t8, src, 0x8 - lw t0, 0x00(src) + LOAD32 t0, 0x00(src) LONG_SUBU a1, a1, 0x4 ADDC(sum, t0) PTR_ADDU src, src, 0x4 @@ -211,7 +221,7 @@ LEAF(csum_partial) LONG_SRL t8, t8, 0x2 .Lend_words: - lw t0, (src) + LOAD32 t0, (src) LONG_SUBU t8, t8, 0x1 ADDC(sum, t0) .set reorder /* DADDI_WAR */ @@ -230,6 +240,9 @@ LEAF(csum_partial) /* Still a full word to go */ ulw t1, (src) PTR_ADDIU src, 4 +#ifdef USE_DOUBLE + dsll t1, t1, 32 /* clear lower 32bit */ +#endif ADDC(sum, t1) 1: move t1, zero @@ -280,7 +293,7 @@ LEAF(csum_partial) 1: .set reorder /* Add the passed partial csum. */ - ADDC(sum, a2) + ADDC32(sum, a2) jr ra .set noreorder END(csum_partial) @@ -681,7 +694,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc) .set pop 1: .set reorder - ADDC(sum, psum) + ADDC32(sum, psum) jr ra .set noreorder diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile index 3b7dd722c32..cef2db8d222 100644 --- a/arch/mips/mti-malta/Makefile +++ b/arch/mips/mti-malta/Makefile @@ -15,6 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += malta-console.o obj-$(CONFIG_PCI) += malta-pci.o # FIXME FIXME FIXME -obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o +obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o EXTRA_CFLAGS += -Werror diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c index 5ea705e4945..f84a46a8ae6 100644 --- a/arch/mips/mti-malta/malta-smtc.c +++ b/arch/mips/mti-malta/malta-smtc.c @@ -84,12 +84,17 @@ static void msmtc_cpus_done(void) static void __init msmtc_smp_setup(void) { - mipsmt_build_cpu_map(0); + /* + * we won't get the definitive value until + * we've run smtc_prepare_cpus later, but + * we would appear to need an upper bound now. + */ + smp_num_siblings = smtc_build_cpu_map(0); } static void __init msmtc_prepare_cpus(unsigned int max_cpus) { - mipsmt_prepare_cpus(); + smtc_prepare_cpus(max_cpus); } struct plat_smp_ops msmtc_smp_ops = { diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile index 15e01aec37f..c8c32f417b6 100644 --- a/arch/mips/pci/Makefile +++ b/arch/mips/pci/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_SOC_TX3927) += ops-tx3927.o obj-$(CONFIG_PCI_VR41XX) += ops-vr41xx.o pci-vr41xx.o obj-$(CONFIG_MARKEINS) += ops-emma2rh.o pci-emma2rh.o fixup-emma2rh.o obj-$(CONFIG_PCI_TX4927) += ops-tx4927.o +obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o # # These are still pretty much in the old state, watch, go blind. diff --git a/arch/mips/pci/pci-bcm47xx.c b/arch/mips/pci/pci-bcm47xx.c new file mode 100644 index 00000000000..bea9b6cdfdb --- /dev/null +++ b/arch/mips/pci/pci-bcm47xx.c @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2008 Aurelien Jarno <aurelien@aurel32.net> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/ssb/ssb.h> + +int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return 0; +} + +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + int res; + u8 slot, pin; + + res = ssb_pcibios_plat_dev_init(dev); + if (res < 0) { + printk(KERN_ALERT "PCI: Failed to init device %s\n", + pci_name(dev)); + return res; + } + + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); + slot = PCI_SLOT(dev->devfn); + res = ssb_pcibios_map_irq(dev, slot, pin); + + /* IRQ-0 and IRQ-1 are software interrupts. */ + if (res < 2) { + printk(KERN_ALERT "PCI: Failed to map IRQ of device %s\n", + pci_name(dev)); + return res; + } + + dev->irq = res; + return 0; +} + diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c index bd78368c82b..f97ab146101 100644 --- a/arch/mips/pci/pci-ip27.c +++ b/arch/mips/pci/pci-ip27.c @@ -143,25 +143,47 @@ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid) */ int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { + return 0; +} + +/* Most MIPS systems have straight-forward swizzling needs. */ +static inline u8 bridge_swizzle(u8 pin, u8 slot) +{ + return (((pin - 1) + slot) % 4) + 1; +} + +static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev) +{ + while (dev->bus->parent) { + /* Move up the chain of bridges. */ + dev = dev->bus->self; + } + + return dev; +} + +/* Do platform specific device initialization at pci_enable_device() time */ +int pcibios_plat_dev_init(struct pci_dev *dev) +{ struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus); - int irq = bc->pci_int[slot]; + struct pci_dev *rdev = bridge_root_dev(dev); + int slot = PCI_SLOT(rdev->devfn); + int irq; + irq = bc->pci_int[slot]; if (irq == -1) { - irq = bc->pci_int[slot] = request_bridge_irq(bc); + irq = request_bridge_irq(bc); if (irq < 0) - panic("Can't allocate interrupt for PCI device %s\n", - pci_name(dev)); + return irq; + + bc->pci_int[slot] = irq; } irq_to_bridge[irq] = bc; irq_to_slot[irq] = slot; - return irq; -} + dev->irq = irq; -/* Do platform specific device initialization at pci_enable_device() time */ -int pcibios_plat_dev_init(struct pci_dev *dev) -{ return 0; } diff --git a/arch/mips/sibyte/swarm/Makefile b/arch/mips/sibyte/swarm/Makefile index f18ba9201bb..7b45f199d92 100644 --- a/arch/mips/sibyte/swarm/Makefile +++ b/arch/mips/sibyte/swarm/Makefile @@ -1,3 +1,4 @@ -obj-y := setup.o rtc_xicor1241.o rtc_m41t81.o +obj-y := platform.o setup.o rtc_xicor1241.o \ + rtc_m41t81.o obj-$(CONFIG_I2C_BOARDINFO) += swarm-i2c.o diff --git a/arch/mips/sibyte/swarm/platform.c b/arch/mips/sibyte/swarm/platform.c new file mode 100644 index 00000000000..dd0e5b9b64e --- /dev/null +++ b/arch/mips/sibyte/swarm/platform.c @@ -0,0 +1,81 @@ +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/ata_platform.h> + +#include <asm/sibyte/board.h> +#include <asm/sibyte/sb1250_genbus.h> +#include <asm/sibyte/sb1250_regs.h> + +#define DRV_NAME "pata-swarm" + +#define SWARM_IDE_SHIFT 5 +#define SWARM_IDE_BASE 0x1f0 +#define SWARM_IDE_CTRL 0x3f6 + +static struct resource swarm_pata_resource[] = { + { + .name = "Swarm GenBus IDE", + .flags = IORESOURCE_MEM, + }, { + .name = "Swarm GenBus IDE", + .flags = IORESOURCE_MEM, + }, { + .name = "Swarm GenBus IDE", + .flags = IORESOURCE_IRQ, + .start = K_INT_GB_IDE, + .end = K_INT_GB_IDE, + }, +}; + +static struct pata_platform_info pata_platform_data = { + .ioport_shift = SWARM_IDE_SHIFT, +}; + +static struct platform_device swarm_pata_device = { + .name = "pata_platform", + .id = -1, + .resource = swarm_pata_resource, + .num_resources = ARRAY_SIZE(swarm_pata_resource), + .dev = { + .platform_data = &pata_platform_data, + .coherent_dma_mask = ~0, /* grumble */ + }, +}; + +static int __init swarm_pata_init(void) +{ + u8 __iomem *base; + phys_t offset, size; + struct resource *r; + + if (!SIBYTE_HAVE_IDE) + return -ENODEV; + + base = ioremap(A_IO_EXT_BASE, 0x800); + offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS)); + size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS)); + iounmap(base); + + offset = G_IO_START_ADDR(offset) << S_IO_ADDRBASE; + size = (G_IO_MULT_SIZE(size) + 1) << S_IO_REGSIZE; + if (offset < A_PHYS_GENBUS || offset >= A_PHYS_GENBUS_END) { + pr_info(DRV_NAME ": PATA interface at GenBus disabled\n"); + + return -EBUSY; + } + + pr_info(DRV_NAME ": PATA interface at GenBus slot %i\n", IDE_CS); + + r = swarm_pata_resource; + r[0].start = offset + (SWARM_IDE_BASE << SWARM_IDE_SHIFT); + r[0].end = offset + ((SWARM_IDE_BASE + 8) << SWARM_IDE_SHIFT) - 1; + r[1].start = offset + (SWARM_IDE_CTRL << SWARM_IDE_SHIFT); + r[1].end = offset + ((SWARM_IDE_CTRL + 1) << SWARM_IDE_SHIFT) - 1; + + return platform_device_register(&swarm_pata_device); +} + +device_initcall(swarm_pata_init); |