aboutsummaryrefslogtreecommitdiff
path: root/include/asm-mips
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-mips')
-rw-r--r--include/asm-mips/cacheflush.h1
-rw-r--r--include/asm-mips/cevt-r4k.h46
-rw-r--r--include/asm-mips/irqflags.h26
-rw-r--r--include/asm-mips/kexec.h2
-rw-r--r--include/asm-mips/mipsregs.h6
-rw-r--r--include/asm-mips/pgtable-32.h2
-rw-r--r--include/asm-mips/smtc.h8
-rw-r--r--include/asm-mips/stackframe.h72
-rw-r--r--include/asm-mips/unistd.h30
9 files changed, 165 insertions, 28 deletions
diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h
index d5c0f2fda51..03b1d69b142 100644
--- a/include/asm-mips/cacheflush.h
+++ b/include/asm-mips/cacheflush.h
@@ -63,6 +63,7 @@ static inline void flush_icache_page(struct vm_area_struct *vma,
}
extern void (*flush_icache_range)(unsigned long start, unsigned long end);
+extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
extern void (*__flush_cache_vmap)(void);
diff --git a/include/asm-mips/cevt-r4k.h b/include/asm-mips/cevt-r4k.h
new file mode 100644
index 00000000000..fa4328f9124
--- /dev/null
+++ b/include/asm-mips/cevt-r4k.h
@@ -0,0 +1,46 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Kevin D. Kissell
+ */
+
+/*
+ * Definitions used for common event timer implementation
+ * for MIPS 4K-type processors and their MIPS MT variants.
+ * Avoids unsightly extern declarations in C files.
+ */
+#ifndef __ASM_CEVT_R4K_H
+#define __ASM_CEVT_R4K_H
+
+DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
+
+void mips_event_handler(struct clock_event_device *dev);
+int c0_compare_int_usable(void);
+void mips_set_clock_mode(enum clock_event_mode, struct clock_event_device *);
+irqreturn_t c0_compare_interrupt(int, void *);
+
+extern struct irqaction c0_compare_irqaction;
+extern int cp0_timer_irq_installed;
+
+/*
+ * Possibly handle a performance counter interrupt.
+ * Return true if the timer interrupt should not be checked
+ */
+
+static inline int handle_perf_irq(int r2)
+{
+ /*
+ * The performance counter overflow interrupt may be shared with the
+ * timer interrupt (cp0_perfcount_irq < 0). If it is and a
+ * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
+ * and we can't reliably determine if a counter interrupt has also
+ * happened (!r2) then don't check for a timer interrupt.
+ */
+ return (cp0_perfcount_irq < 0) &&
+ perf_irq() == IRQ_HANDLED &&
+ !r2;
+}
+
+#endif /* __ASM_CEVT_R4K_H */
diff --git a/include/asm-mips/irqflags.h b/include/asm-mips/irqflags.h
index 881e8866501..701ec0ba8fa 100644
--- a/include/asm-mips/irqflags.h
+++ b/include/asm-mips/irqflags.h
@@ -38,8 +38,17 @@ __asm__(
" .set pop \n"
" .endm");
+extern void smtc_ipi_replay(void);
+
static inline void raw_local_irq_enable(void)
{
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC kernel needs to do a software replay of queued
+ * IPIs, at the cost of call overhead on each local_irq_enable()
+ */
+ smtc_ipi_replay();
+#endif
__asm__ __volatile__(
"raw_local_irq_enable"
: /* no outputs */
@@ -47,6 +56,7 @@ static inline void raw_local_irq_enable(void)
: "memory");
}
+
/*
* For cli() we have to insert nops to make sure that the new value
* has actually arrived in the status register before the end of this
@@ -185,15 +195,14 @@ __asm__(
" .set pop \n"
" .endm \n");
-extern void smtc_ipi_replay(void);
static inline void raw_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
-#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
+#ifdef CONFIG_MIPS_MT_SMTC
/*
- * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred
+ * SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of branch and call overhead on each
* local_irq_restore()
*/
@@ -208,6 +217,17 @@ static inline void raw_local_irq_restore(unsigned long flags)
: "memory");
}
+static inline void __raw_local_irq_restore(unsigned long flags)
+{
+ unsigned long __tmp1;
+
+ __asm__ __volatile__(
+ "raw_local_irq_restore\t%0"
+ : "=r" (__tmp1)
+ : "0" (flags)
+ : "memory");
+}
+
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
#ifdef CONFIG_MIPS_MT_SMTC
diff --git a/include/asm-mips/kexec.h b/include/asm-mips/kexec.h
index cdbab43b7d3..4314892aaeb 100644
--- a/include/asm-mips/kexec.h
+++ b/include/asm-mips/kexec.h
@@ -16,7 +16,7 @@
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000)
-#define KEXEC_CONTROL_CODE_SIZE 4096
+#define KEXEC_CONTROL_PAGE_SIZE 4096
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_MIPS
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h
index a46f8e258e6..979866000da 100644
--- a/include/asm-mips/mipsregs.h
+++ b/include/asm-mips/mipsregs.h
@@ -1462,7 +1462,7 @@ set_c0_##name(unsigned int set) \
{ \
unsigned int res; \
unsigned int omt; \
- unsigned int flags; \
+ unsigned long flags; \
\
local_irq_save(flags); \
omt = __dmt(); \
@@ -1480,7 +1480,7 @@ clear_c0_##name(unsigned int clear) \
{ \
unsigned int res; \
unsigned int omt; \
- unsigned int flags; \
+ unsigned long flags; \
\
local_irq_save(flags); \
omt = __dmt(); \
@@ -1498,7 +1498,7 @@ change_c0_##name(unsigned int change, unsigned int new) \
{ \
unsigned int res; \
unsigned int omt; \
- unsigned int flags; \
+ unsigned long flags; \
\
local_irq_save(flags); \
\
diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h
index 4396e9ffd41..55813d6150c 100644
--- a/include/asm-mips/pgtable-32.h
+++ b/include/asm-mips/pgtable-32.h
@@ -57,7 +57,7 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
#define PMD_ORDER 1
#define PTE_ORDER 0
-#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
+#define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2)
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h
index 3639b28f80d..ea60bf08dcb 100644
--- a/include/asm-mips/smtc.h
+++ b/include/asm-mips/smtc.h
@@ -6,6 +6,7 @@
*/
#include <asm/mips_mt.h>
+#include <asm/smtc_ipi.h>
/*
* System-wide SMTC status information
@@ -38,14 +39,15 @@ struct mm_struct;
struct task_struct;
void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
-
+void self_ipi(struct smtc_ipi *);
void smtc_flush_tlb_asid(unsigned long asid);
-extern int mipsmt_build_cpu_map(int startslot);
-extern void mipsmt_prepare_cpus(void);
+extern int smtc_build_cpu_map(int startslot);
+extern void smtc_prepare_cpus(int cpus);
extern void smtc_smp_finish(void);
extern void smtc_boot_secondary(int cpu, struct task_struct *t);
extern void smtc_cpus_done(void);
+
/*
* Sharing the TLB between multiple VPEs means that the
* "random" index selection function is not allowed to
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h
index 051e1af0bb9..4c37c4e5f72 100644
--- a/include/asm-mips/stackframe.h
+++ b/include/asm-mips/stackframe.h
@@ -297,14 +297,31 @@
#ifdef CONFIG_MIPS_MT_SMTC
.set mips32r2
/*
- * This may not really be necessary if ints are already
- * inhibited here.
+ * We need to make sure the read-modify-write
+ * of Status below isn't perturbed by an interrupt
+ * or cross-TC access, so we need to do at least a DMT,
+ * protected by an interrupt-inhibit. But setting IXMT
+ * also creates a few-cycle window where an IPI could
+ * be queued and not be detected before potentially
+ * returning to a WAIT or user-mode loop. It must be
+ * replayed.
+ *
+ * We're in the middle of a context switch, and
+ * we can't dispatch it directly without trashing
+ * some registers, so we'll try to detect this unlikely
+ * case and program a software interrupt in the VPE,
+ * as would be done for a cross-VPE IPI. To accomodate
+ * the handling of that case, we're doing a DVPE instead
+ * of just a DMT here to protect against other threads.
+ * This is a lot of cruft to cover a tiny window.
+ * If you can find a better design, implement it!
+ *
*/
mfc0 v0, CP0_TCSTATUS
ori v0, TCSTATUS_IXMT
mtc0 v0, CP0_TCSTATUS
_ehb
- DMT 5 # dmt a1
+ DVPE 5 # dvpe a1
jal mips_ihb
#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 a0, CP0_STATUS
@@ -325,17 +342,50 @@
*/
LONG_L v1, PT_TCSTATUS(sp)
_ehb
- mfc0 v0, CP0_TCSTATUS
+ mfc0 a0, CP0_TCSTATUS
andi v1, TCSTATUS_IXMT
- /* We know that TCStatua.IXMT should be set from above */
- xori v0, v0, TCSTATUS_IXMT
- or v0, v0, v1
- mtc0 v0, CP0_TCSTATUS
- _ehb
- andi a1, a1, VPECONTROL_TE
+ bnez v1, 0f
+
+/*
+ * We'd like to detect any IPIs queued in the tiny window
+ * above and request an software interrupt to service them
+ * when we ERET.
+ *
+ * Computing the offset into the IPIQ array of the executing
+ * TC's IPI queue in-line would be tedious. We use part of
+ * the TCContext register to hold 16 bits of offset that we
+ * can add in-line to find the queue head.
+ */
+ mfc0 v0, CP0_TCCONTEXT
+ la a2, IPIQ
+ srl v0, v0, 16
+ addu a2, a2, v0
+ LONG_L v0, 0(a2)
+ beqz v0, 0f
+/*
+ * If we have a queue, provoke dispatch within the VPE by setting C_SW1
+ */
+ mfc0 v0, CP0_CAUSE
+ ori v0, v0, C_SW1
+ mtc0 v0, CP0_CAUSE
+0:
+ /*
+ * This test should really never branch but
+ * let's be prudent here. Having atomized
+ * the shared register modifications, we can
+ * now EVPE, and must do so before interrupts
+ * are potentially re-enabled.
+ */
+ andi a1, a1, MVPCONTROL_EVP
beqz a1, 1f
- emt
+ evpe
1:
+ /* We know that TCStatua.IXMT should be set from above */
+ xori a0, a0, TCSTATUS_IXMT
+ or a0, a0, v1
+ mtc0 a0, CP0_TCSTATUS
+ _ehb
+
.set mips0
#endif /* CONFIG_MIPS_MT_SMTC */
LONG_L v1, PT_EPC(sp)
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h
index 4964c82f85f..a73e1531e15 100644
--- a/include/asm-mips/unistd.h
+++ b/include/asm-mips/unistd.h
@@ -344,16 +344,22 @@
#define __NR_timerfd_create (__NR_Linux + 321)
#define __NR_timerfd_gettime (__NR_Linux + 322)
#define __NR_timerfd_settime (__NR_Linux + 323)
+#define __NR_signalfd4 (__NR_Linux + 324)
+#define __NR_eventfd2 (__NR_Linux + 325)
+#define __NR_epoll_create1 (__NR_Linux + 326)
+#define __NR_dup3 (__NR_Linux + 327)
+#define __NR_pipe2 (__NR_Linux + 328)
+#define __NR_inotify_init1 (__NR_Linux + 329)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 323
+#define __NR_Linux_syscalls 329
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 323
+#define __NR_O32_Linux_syscalls 329
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -644,16 +650,22 @@
#define __NR_timerfd_create (__NR_Linux + 280)
#define __NR_timerfd_gettime (__NR_Linux + 281)
#define __NR_timerfd_settime (__NR_Linux + 282)
+#define __NR_signalfd4 (__NR_Linux + 283)
+#define __NR_eventfd2 (__NR_Linux + 284)
+#define __NR_epoll_create1 (__NR_Linux + 285)
+#define __NR_dup3 (__NR_Linux + 286)
+#define __NR_pipe2 (__NR_Linux + 287)
+#define __NR_inotify_init1 (__NR_Linux + 288)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 282
+#define __NR_Linux_syscalls 288
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 282
+#define __NR_64_Linux_syscalls 288
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -948,16 +960,22 @@
#define __NR_timerfd_create (__NR_Linux + 284)
#define __NR_timerfd_gettime (__NR_Linux + 285)
#define __NR_timerfd_settime (__NR_Linux + 286)
+#define __NR_signalfd4 (__NR_Linux + 287)
+#define __NR_eventfd2 (__NR_Linux + 288)
+#define __NR_epoll_create1 (__NR_Linux + 289)
+#define __NR_dup3 (__NR_Linux + 290)
+#define __NR_pipe2 (__NR_Linux + 291)
+#define __NR_inotify_init1 (__NR_Linux + 292)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 286
+#define __NR_Linux_syscalls 292
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 286
+#define __NR_N32_Linux_syscalls 292
#ifdef __KERNEL__