aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/mmu_context.h6
-rw-r--r--include/asm-alpha/processor.h13
-rw-r--r--include/asm-alpha/ptrace.h6
-rw-r--r--include/asm-alpha/system.h18
-rw-r--r--include/asm-alpha/thread_info.h2
-rw-r--r--include/asm-arm/processor.h8
-rw-r--r--include/asm-arm/system.h12
-rw-r--r--include/asm-arm/thread_info.h7
-rw-r--r--include/asm-arm26/system.h12
-rw-r--r--include/asm-arm26/thread_info.h9
-rw-r--r--include/asm-cris/arch-v10/processor.h2
-rw-r--r--include/asm-cris/arch-v32/processor.h2
-rw-r--r--include/asm-cris/processor.h3
-rw-r--r--include/asm-cris/thread_info.h2
-rw-r--r--include/asm-frv/thread_info.h2
-rw-r--r--include/asm-h8300/thread_info.h2
-rw-r--r--include/asm-i386/i387.h8
-rw-r--r--include/asm-i386/processor.h12
-rw-r--r--include/asm-i386/system.h9
-rw-r--r--include/asm-i386/thread_info.h2
-rw-r--r--include/asm-i386/topology.h1
-rw-r--r--include/asm-ia64/compat.h2
-rw-r--r--include/asm-ia64/processor.h2
-rw-r--r--include/asm-ia64/ptrace.h4
-rw-r--r--include/asm-ia64/system.h9
-rw-r--r--include/asm-ia64/thread_info.h9
-rw-r--r--include/asm-ia64/topology.h2
-rw-r--r--include/asm-m32r/ptrace.h3
-rw-r--r--include/asm-m32r/system.h10
-rw-r--r--include/asm-m32r/thread_info.h2
-rw-r--r--include/asm-m68k/amigahw.h12
-rw-r--r--include/asm-m68k/amigaints.h2
-rw-r--r--include/asm-m68k/checksum.h2
-rw-r--r--include/asm-m68k/dsp56k.h2
-rw-r--r--include/asm-m68k/floppy.h2
-rw-r--r--include/asm-m68k/hardirq.h9
-rw-r--r--include/asm-m68k/io.h49
-rw-r--r--include/asm-m68k/irq.h9
-rw-r--r--include/asm-m68k/machdep.h1
-rw-r--r--include/asm-m68k/raw_io.h40
-rw-r--r--include/asm-m68k/signal.h2
-rw-r--r--include/asm-m68k/sun3_pgtable.h2
-rw-r--r--include/asm-m68k/sun3ints.h1
-rw-r--r--include/asm-m68k/sun3xflop.h4
-rw-r--r--include/asm-m68k/thread_info.h1
-rw-r--r--include/asm-m68k/uaccess.h20
-rw-r--r--include/asm-m68k/zorro.h8
-rw-r--r--include/asm-m68knommu/machdep.h1
-rw-r--r--include/asm-m68knommu/thread_info.h2
-rw-r--r--include/asm-mips/mach-ip27/topology.h1
-rw-r--r--include/asm-mips/processor.h10
-rw-r--r--include/asm-mips/system.h12
-rw-r--r--include/asm-mips/thread_info.h2
-rw-r--r--include/asm-parisc/system.h9
-rw-r--r--include/asm-parisc/thread_info.h3
-rw-r--r--include/asm-powerpc/system.h10
-rw-r--r--include/asm-powerpc/thread_info.h3
-rw-r--r--include/asm-powerpc/topology.h1
-rw-r--r--include/asm-ppc/system.h10
-rw-r--r--include/asm-s390/elf.h2
-rw-r--r--include/asm-s390/processor.h8
-rw-r--r--include/asm-s390/system.h10
-rw-r--r--include/asm-s390/thread_info.h2
-rw-r--r--include/asm-sh/ptrace.h10
-rw-r--r--include/asm-sh/system.h10
-rw-r--r--include/asm-sh/thread_info.h2
-rw-r--r--include/asm-sh64/thread_info.h2
-rw-r--r--include/asm-sparc/system.h12
-rw-r--r--include/asm-sparc/thread_info.h3
-rw-r--r--include/asm-sparc64/elf.h2
-rw-r--r--include/asm-sparc64/mmu_context.h2
-rw-r--r--include/asm-sparc64/processor.h5
-rw-r--r--include/asm-sparc64/system.h14
-rw-r--r--include/asm-um/thread_info.h3
-rw-r--r--include/asm-v850/processor.h8
-rw-r--r--include/asm-v850/thread_info.h2
-rw-r--r--include/asm-x86_64/compat.h2
-rw-r--r--include/asm-x86_64/i387.h10
-rw-r--r--include/asm-x86_64/processor.h4
-rw-r--r--include/asm-x86_64/system.h9
-rw-r--r--include/asm-x86_64/thread_info.h2
-rw-r--r--include/asm-x86_64/topology.h1
-rw-r--r--include/asm-xtensa/processor.h6
-rw-r--r--include/asm-xtensa/ptrace.h4
-rw-r--r--include/asm-xtensa/thread_info.h2
-rw-r--r--include/linux/sched.h15
-rw-r--r--include/linux/topology.h2
87 files changed, 343 insertions, 222 deletions
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index a714d0cdc20..6f92482cc96 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -156,7 +156,7 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
/* Always update the PCB ASN. Another thread may have allocated
a new mm->context (via flush_tlb_mm) without the ASN serial
number wrapping. We have no way to detect when this is needed. */
- next->thread_info->pcb.asn = mmc & HARDWARE_ASN_MASK;
+ task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK;
}
__EXTERN_INLINE void
@@ -235,7 +235,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
if (cpu_online(i))
mm->context[i] = 0;
if (tsk != current)
- tsk->thread_info->pcb.ptbr
+ task_thread_info(tsk)->pcb.ptbr
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
return 0;
}
@@ -249,7 +249,7 @@ destroy_context(struct mm_struct *mm)
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
- tsk->thread_info->pcb.ptbr
+ task_thread_info(tsk)->pcb.ptbr
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
}
diff --git a/include/asm-alpha/processor.h b/include/asm-alpha/processor.h
index bb1a7a3abb8..425b7b6d28c 100644
--- a/include/asm-alpha/processor.h
+++ b/include/asm-alpha/processor.h
@@ -52,19 +52,10 @@ extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
unsigned long get_wchan(struct task_struct *p);
-/* See arch/alpha/kernel/ptrace.c for details. */
-#define PT_REG(reg) \
- (PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg))
-
-#define SW_REG(reg) \
- (PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \
- + offsetof(struct switch_stack, reg))
-
-#define KSTK_EIP(tsk) \
- (*(unsigned long *)(PT_REG(pc) + (unsigned long) ((tsk)->thread_info)))
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
#define KSTK_ESP(tsk) \
- ((tsk) == current ? rdusp() : (tsk)->thread_info->pcb.usp)
+ ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
#define cpu_relax() barrier()
diff --git a/include/asm-alpha/ptrace.h b/include/asm-alpha/ptrace.h
index 072375c135b..9933b8b3612 100644
--- a/include/asm-alpha/ptrace.h
+++ b/include/asm-alpha/ptrace.h
@@ -75,10 +75,10 @@ struct switch_stack {
#define profile_pc(regs) instruction_pointer(regs)
extern void show_regs(struct pt_regs *);
-#define alpha_task_regs(task) \
- ((struct pt_regs *) ((long) (task)->thread_info + 2*PAGE_SIZE) - 1)
+#define task_pt_regs(task) \
+ ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1)
-#define force_successful_syscall_return() (alpha_task_regs(current)->r0 = 0)
+#define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0)
#endif
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index 050e86d1289..cc9c7e8cced 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -131,15 +131,25 @@ struct el_common_EV6_mcheck {
extern void halt(void) __attribute__((noreturn));
#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
-#define switch_to(P,N,L) \
- do { \
- (L) = alpha_switch_to(virt_to_phys(&(N)->thread_info->pcb), (P)); \
- check_mmu_context(); \
+#define switch_to(P,N,L) \
+ do { \
+ (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \
+ check_mmu_context(); \
} while (0)
struct task_struct;
extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
#define imb() \
__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
diff --git a/include/asm-alpha/thread_info.h b/include/asm-alpha/thread_info.h
index d51491ed00b..69ffd93f8e2 100644
--- a/include/asm-alpha/thread_info.h
+++ b/include/asm-alpha/thread_info.h
@@ -54,8 +54,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define alloc_thread_info(tsk) \
((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-arm/processor.h b/include/asm-arm/processor.h
index 7d4118e0905..31290694648 100644
--- a/include/asm-arm/processor.h
+++ b/include/asm-arm/processor.h
@@ -85,9 +85,11 @@ unsigned long get_wchan(struct task_struct *p);
*/
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-#define KSTK_REGS(tsk) (((struct pt_regs *)(THREAD_START_SP + (unsigned long)(tsk)->thread_info)) - 1)
-#define KSTK_EIP(tsk) KSTK_REGS(tsk)->ARM_pc
-#define KSTK_ESP(tsk) KSTK_REGS(tsk)->ARM_sp
+#define task_pt_regs(p) \
+ ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
+
+#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
+#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
/*
* Prefetching support - only ARMv5.
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 5621d61ebc0..eb2de8c1051 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -168,10 +168,20 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
- last = __switch_to(prev,prev->thread_info,next->thread_info); \
+ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
+/*
* CPU interrupt mask handling.
*/
#if __LINUX_ARM_ARCH__ >= 6
diff --git a/include/asm-arm/thread_info.h b/include/asm-arm/thread_info.h
index 7c98557b717..33a33cbb632 100644
--- a/include/asm-arm/thread_info.h
+++ b/include/asm-arm/thread_info.h
@@ -96,13 +96,10 @@ static inline struct thread_info *current_thread_info(void)
extern struct thread_info *alloc_thread_info(struct task_struct *task);
extern void free_thread_info(struct thread_info *);
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
-
#define thread_saved_pc(tsk) \
- ((unsigned long)(pc_pointer((tsk)->thread_info->cpu_context.pc)))
+ ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc)))
#define thread_saved_fp(tsk) \
- ((unsigned long)((tsk)->thread_info->cpu_context.fp))
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
extern void iwmmxt_task_disable(struct thread_info *);
extern void iwmmxt_task_copy(struct thread_info *, void *);
diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h
index f23fac1938f..ca4ccfc4b57 100644
--- a/include/asm-arm26/system.h
+++ b/include/asm-arm26/system.h
@@ -111,10 +111,20 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
- last = __switch_to(prev,prev->thread_info,next->thread_info); \
+ last = __switch_to(prev,task_thread_info(prev),task_thread_info(next)); \
} while (0)
/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
+/*
* Save the current interrupt enable state & disable IRQs
*/
#define local_irq_save(x) \
diff --git a/include/asm-arm26/thread_info.h b/include/asm-arm26/thread_info.h
index aff3e5699c6..a65e58a0a76 100644
--- a/include/asm-arm26/thread_info.h
+++ b/include/asm-arm26/thread_info.h
@@ -82,18 +82,15 @@ static inline struct thread_info *current_thread_info(void)
/* FIXME - PAGE_SIZE < 32K */
#define THREAD_SIZE (8*32768) // FIXME - this needs attention (see kernel/fork.c which gets a nice div by zero if this is lower than 8*32768
-#define __get_user_regs(x) (((struct pt_regs *)((unsigned long)(x) + THREAD_SIZE - 8)) - 1)
+#define task_pt_regs(task) ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE - 8) - 1)
extern struct thread_info *alloc_thread_info(struct task_struct *task);
extern void free_thread_info(struct thread_info *);
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
-
#define thread_saved_pc(tsk) \
- ((unsigned long)(pc_pointer((tsk)->thread_info->cpu_context.pc)))
+ ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc)))
#define thread_saved_fp(tsk) \
- ((unsigned long)((tsk)->thread_info->cpu_context.fp))
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
#else /* !__ASSEMBLY__ */
diff --git a/include/asm-cris/arch-v10/processor.h b/include/asm-cris/arch-v10/processor.h
index e23df8dc96e..cc692c7a066 100644
--- a/include/asm-cris/arch-v10/processor.h
+++ b/include/asm-cris/arch-v10/processor.h
@@ -40,7 +40,7 @@ struct thread_struct {
#define KSTK_EIP(tsk) \
({ \
unsigned long eip = 0; \
- unsigned long regs = (unsigned long)user_regs(tsk); \
+ unsigned long regs = (unsigned long)task_pt_regs(tsk); \
if (regs > PAGE_SIZE && \
virt_addr_valid(regs)) \
eip = ((struct pt_regs *)regs)->irp; \
diff --git a/include/asm-cris/arch-v32/processor.h b/include/asm-cris/arch-v32/processor.h
index 8c939bf2798..32bf2e538ce 100644
--- a/include/asm-cris/arch-v32/processor.h
+++ b/include/asm-cris/arch-v32/processor.h
@@ -36,7 +36,7 @@ struct thread_struct {
#define KSTK_EIP(tsk) \
({ \
unsigned long eip = 0; \
- unsigned long regs = (unsigned long)user_regs(tsk); \
+ unsigned long regs = (unsigned long)task_pt_regs(tsk); \
if (regs > PAGE_SIZE && virt_addr_valid(regs)) \
eip = ((struct pt_regs *)regs)->erp; \
eip; \
diff --git a/include/asm-cris/processor.h b/include/asm-cris/processor.h
index dce41009eeb..961e2bceadb 100644
--- a/include/asm-cris/processor.h
+++ b/include/asm-cris/processor.h
@@ -45,7 +45,8 @@ struct task_struct;
* Dito but for the currently running task
*/
-#define current_regs() user_regs(current->thread_info)
+#define task_pt_regs(task) user_regs(task_thread_info(task))
+#define current_regs() task_pt_regs(current)
static inline void prepare_to_copy(struct task_struct *tsk)
{
diff --git a/include/asm-cris/thread_info.h b/include/asm-cris/thread_info.h
index cef0140fc10..7ad853c3f74 100644
--- a/include/asm-cris/thread_info.h
+++ b/include/asm-cris/thread_info.h
@@ -69,8 +69,6 @@ struct thread_info {
/* thread information allocation */
#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-frv/thread_info.h b/include/asm-frv/thread_info.h
index 60f6b2aee76..a5576e02dd1 100644
--- a/include/asm-frv/thread_info.h
+++ b/include/asm-frv/thread_info.h
@@ -110,8 +110,6 @@ register struct thread_info *__current_thread_info asm("gr15");
#endif
#define free_thread_info(info) kfree(info)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#else /* !__ASSEMBLY__ */
diff --git a/include/asm-h8300/thread_info.h b/include/asm-h8300/thread_info.h
index bfcc755c3bb..45f09dc9caf 100644
--- a/include/asm-h8300/thread_info.h
+++ b/include/asm-h8300/thread_info.h
@@ -69,8 +69,6 @@ static inline struct thread_info *current_thread_info(void)
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* __ASSEMBLY__ */
/*
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h
index 6747006743f..152d0baa576 100644
--- a/include/asm-i386/i387.h
+++ b/include/asm-i386/i387.h
@@ -49,19 +49,19 @@ static inline void __save_init_fpu( struct task_struct *tsk )
X86_FEATURE_FXSR,
"m" (tsk->thread.i387.fxsave)
:"memory");
- tsk->thread_info->status &= ~TS_USEDFPU;
+ task_thread_info(tsk)->status &= ~TS_USEDFPU;
}
#define __unlazy_fpu( tsk ) do { \
- if ((tsk)->thread_info->status & TS_USEDFPU) \
+ if (task_thread_info(tsk)->status & TS_USEDFPU) \
save_init_fpu( tsk ); \
} while (0)
#define __clear_fpu( tsk ) \
do { \
- if ((tsk)->thread_info->status & TS_USEDFPU) { \
+ if (task_thread_info(tsk)->status & TS_USEDFPU) { \
asm volatile("fnclex ; fwait"); \
- (tsk)->thread_info->status &= ~TS_USEDFPU; \
+ task_thread_info(tsk)->status &= ~TS_USEDFPU; \
stts(); \
} \
} while (0)
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 13ecf66b098..feca5d961e2 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -561,10 +561,20 @@ unsigned long get_wchan(struct task_struct *p);
(unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
})
+/*
+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
+ * This is necessary to guarantee that the entire "struct pt_regs"
+ * is accessable even if the CPU haven't stored the SS/ESP registers
+ * on the stack (interrupt gate does not save these registers
+ * when switching to the same priv ring).
+ * Therefore beware: accessing the xss/esp fields of the
+ * "struct pt_regs" is possible, but they may contain the
+ * completely wrong values.
+ */
#define task_pt_regs(task) \
({ \
struct pt_regs *__regs__; \
- __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \
+ __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
__regs__ - 1; \
})
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 9c0593b7a94..36a92ed6a9d 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -548,6 +548,15 @@ void enable_hlt(void);
extern int es7000_plat;
void cpu_idle_wait(void);
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible:
+ */
+static inline void sched_cacheflush(void)
+{
+ wbinvd();
+}
+
extern unsigned long arch_align_stack(unsigned long sp);
#endif
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 8fbf791651b..2493e77e8c3 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -111,8 +111,6 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
#endif
#define free_thread_info(info) kfree(info)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#else /* !__ASSEMBLY__ */
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index 0ec27c9e8e4..d7e19eb344b 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -72,7 +72,6 @@ static inline int node_to_first_cpu(int node)
.max_interval = 32, \
.busy_factor = 32, \
.imbalance_pct = 125, \
- .cache_hot_time = (10*1000000), \
.cache_nice_tries = 1, \
.busy_idx = 3, \
.idle_idx = 1, \
diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h
index aaf11f4e916..c0b19106665 100644
--- a/include/asm-ia64/compat.h
+++ b/include/asm-ia64/compat.h
@@ -192,7 +192,7 @@ compat_ptr (compat_uptr_t uptr)
static __inline__ void __user *
compat_alloc_user_space (long len)
{
- struct pt_regs *regs = ia64_task_regs(current);
+ struct pt_regs *regs = task_pt_regs(current);
return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
}
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 94e07e72739..8c648bf72bb 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -352,7 +352,7 @@ extern unsigned long get_wchan (struct task_struct *p);
/* Return instruction pointer of blocked task TSK. */
#define KSTK_EIP(tsk) \
({ \
- struct pt_regs *_regs = ia64_task_regs(tsk); \
+ struct pt_regs *_regs = task_pt_regs(tsk); \
_regs->cr_iip + ia64_psr(_regs)->ri; \
})
diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h
index 2c703d6e0c8..9471cdc3f4c 100644
--- a/include/asm-ia64/ptrace.h
+++ b/include/asm-ia64/ptrace.h
@@ -248,7 +248,7 @@ struct switch_stack {
})
/* given a pointer to a task_struct, return the user's pt_regs */
-# define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
+# define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
@@ -271,7 +271,7 @@ struct switch_stack {
*
* On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
*/
-# define force_successful_syscall_return() (ia64_task_regs(current)->r8 = 0)
+# define force_successful_syscall_return() (task_pt_regs(current)->r8 = 0)
struct task_struct; /* forward decl */
struct unw_frame_info; /* forward decl */
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 635235fa1e3..80c5a234e25 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -219,14 +219,14 @@ extern void ia64_load_extra (struct task_struct *task);
#define IA64_HAS_EXTRA_STATE(t) \
((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
- || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
+ || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE())
#define __switch_to(prev,next,last) do { \
if (IA64_HAS_EXTRA_STATE(prev)) \
ia64_save_extra(prev); \
if (IA64_HAS_EXTRA_STATE(next)) \
ia64_load_extra(next); \
- ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
+ ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
(last) = ia64_switch_to((next)); \
} while (0)
@@ -238,8 +238,8 @@ extern void ia64_load_extra (struct task_struct *task);
* the latest fph state from another CPU. In other words: eager save, lazy restore.
*/
# define switch_to(prev,next,last) do { \
- if (ia64_psr(ia64_task_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \
- ia64_psr(ia64_task_regs(prev))->mfh = 0; \
+ if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \
+ ia64_psr(task_pt_regs(prev))->mfh = 0; \
(prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
__ia64_save_fpu((prev)->thread.fph); \
} \
@@ -279,6 +279,7 @@ extern void ia64_load_extra (struct task_struct *task);
#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
void cpu_idle_wait(void);
+void sched_cacheflush(void);
#define arch_align_stack(x) (x)
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 171b2207bde..653bb7f9a75 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -57,11 +57,20 @@ struct thread_info {
/* how to get the thread information struct from C */
#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
#else
#define current_thread_info() ((struct thread_info *) 0)
#define alloc_thread_info(tsk) ((struct thread_info *) 0)
+#define task_thread_info(tsk) ((struct thread_info *) 0)
#endif
#define free_thread_info(ti) /* nothing */
+#define task_stack_page(tsk) ((void *)(tsk))
+
+#define __HAVE_THREAD_FUNCTIONS
+#define setup_thread_stack(p, org) \
+ *task_thread_info(p) = *task_thread_info(org); \
+ task_thread_info(p)->task = (p);
+#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER))
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index f7c330467e7..d8aae4da397 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -55,7 +55,6 @@ void build_cpu_to_node_map(void);
.max_interval = 4, \
.busy_factor = 64, \
.imbalance_pct = 125, \
- .cache_hot_time = (10*1000000), \
.per_cpu_gain = 100, \
.cache_nice_tries = 2, \
.busy_idx = 2, \
@@ -81,7 +80,6 @@ void build_cpu_to_node_map(void);
.max_interval = 8*(min(num_online_cpus(), 32)), \
.busy_factor = 64, \
.imbalance_pct = 125, \
- .cache_hot_time = (10*1000000), \
.cache_nice_tries = 2, \
.busy_idx = 3, \
.idle_idx = 2, \
diff --git a/include/asm-m32r/ptrace.h b/include/asm-m32r/ptrace.h
index 55cd7ecfde4..0d058b2d844 100644
--- a/include/asm-m32r/ptrace.h
+++ b/include/asm-m32r/ptrace.h
@@ -163,6 +163,9 @@ extern void show_regs(struct pt_regs *);
extern void withdraw_debug_trap(struct pt_regs *regs);
+#define task_pt_regs(task) \
+ ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE) - 1)
+
#endif /* __KERNEL */
#endif /* _ASM_M32R_PTRACE_H */
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index dcf619a0a0b..06c12a037cb 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -68,6 +68,16 @@
last = __last; \
} while(0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
/* Interrupt Control */
#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
#define local_irq_enable() \
diff --git a/include/asm-m32r/thread_info.h b/include/asm-m32r/thread_info.h
index 0f589363f61..22aff3222d2 100644
--- a/include/asm-m32r/thread_info.h
+++ b/include/asm-m32r/thread_info.h
@@ -110,8 +110,6 @@ static inline struct thread_info *current_thread_info(void)
#endif
#define free_thread_info(info) kfree(info)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#define TI_FLAG_FAULT_CODE_SHIFT 28
diff --git a/include/asm-m68k/amigahw.h b/include/asm-m68k/amigahw.h
index 3ae5d8d55ba..a16fe4e5a28 100644
--- a/include/asm-m68k/amigahw.h
+++ b/include/asm-m68k/amigahw.h
@@ -274,7 +274,7 @@ struct CIA {
#define ZTWO_VADDR(x) (((unsigned long)(x))+zTwoBase)
#define CUSTOM_PHYSADDR (0xdff000)
-#define custom ((*(volatile struct CUSTOM *)(zTwoBase+CUSTOM_PHYSADDR)))
+#define amiga_custom ((*(volatile struct CUSTOM *)(zTwoBase+CUSTOM_PHYSADDR)))
#define CIAA_PHYSADDR (0xbfe001)
#define CIAB_PHYSADDR (0xbfd000)
@@ -294,12 +294,12 @@ static inline void amifb_video_off(void)
{
if (amiga_chipset == CS_ECS || amiga_chipset == CS_AGA) {
/* program Denise/Lisa for a higher maximum play rate */
- custom.htotal = 113; /* 31 kHz */
- custom.vtotal = 223; /* 70 Hz */
- custom.beamcon0 = 0x4390; /* HARDDIS, VAR{BEAM,VSY,HSY,CSY}EN */
+ amiga_custom.htotal = 113; /* 31 kHz */
+ amiga_custom.vtotal = 223; /* 70 Hz */
+ amiga_custom.beamcon0 = 0x4390; /* HARDDIS, VAR{BEAM,VSY,HSY,CSY}EN */
/* suspend the monitor */
- custom.hsstrt = custom.hsstop = 116;
- custom.vsstrt = custom.vsstop = 226;
+ amiga_custom.hsstrt = amiga_custom.hsstop = 116;
+ amiga_custom.vsstrt = amiga_custom.vsstop = 226;
amiga_audio_min_period = 57;
}
}
diff --git a/include/asm-m68k/amigaints.h b/include/asm-m68k/amigaints.h
index 2aff4cfbf7b..aa968d014bb 100644
--- a/include/asm-m68k/amigaints.h
+++ b/include/asm-m68k/amigaints.h
@@ -109,8 +109,6 @@
extern void amiga_do_irq(int irq, struct pt_regs *fp);
extern void amiga_do_irq_list(int irq, struct pt_regs *fp);
-extern unsigned short amiga_intena_vals[];
-
/* CIA interrupt control register bits */
#define CIA_ICR_TA 0x01
diff --git a/include/asm-m68k/checksum.h b/include/asm-m68k/checksum.h
index 78860c20db0..17280ef719f 100644
--- a/include/asm-m68k/checksum.h
+++ b/include/asm-m68k/checksum.h
@@ -25,7 +25,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
* better 64-bit) boundary
*/
-extern unsigned int csum_partial_copy_from_user(const unsigned char *src,
+extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
unsigned char *dst,
int len, int sum,
int *csum_err);
diff --git a/include/asm-m68k/dsp56k.h b/include/asm-m68k/dsp56k.h
index ab3dd33e23a..2d8c0c9f794 100644
--- a/include/asm-m68k/dsp56k.h
+++ b/include/asm-m68k/dsp56k.h
@@ -13,7 +13,7 @@
/* Used for uploading DSP binary code */
struct dsp56k_upload {
int len;
- char *bin;
+ char __user *bin;
};
/* For the DSP host flags */
diff --git a/include/asm-m68k/floppy.h b/include/asm-m68k/floppy.h
index c6e708dd9f6..63a05ed95c1 100644
--- a/include/asm-m68k/floppy.h
+++ b/include/asm-m68k/floppy.h
@@ -46,7 +46,7 @@ asmlinkage irqreturn_t floppy_hardint(int irq, void *dev_id,
static int virtual_dma_count=0;
static int virtual_dma_residue=0;
-static char *virtual_dma_addr=0;
+static char *virtual_dma_addr=NULL;
static int virtual_dma_mode=0;
static int doing_pdma=0;
diff --git a/include/asm-m68k/hardirq.h b/include/asm-m68k/hardirq.h
index 728318bf7f0..5e1c5826c83 100644
--- a/include/asm-m68k/hardirq.h
+++ b/include/asm-m68k/hardirq.h
@@ -14,13 +14,4 @@ typedef struct {
#define HARDIRQ_BITS 8
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
#endif
diff --git a/include/asm-m68k/io.h b/include/asm-m68k/io.h
index 6bb8b0d8f99..dcfaa352d34 100644
--- a/include/asm-m68k/io.h
+++ b/include/asm-m68k/io.h
@@ -24,6 +24,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
+#include <linux/compiler.h>
#include <asm/raw_io.h>
#include <asm/virtconvert.h>
@@ -120,68 +121,68 @@ extern int isa_sex;
* be compiled in so the case statement will be optimised away
*/
-static inline u8 *isa_itb(unsigned long addr)
+static inline u8 __iomem *isa_itb(unsigned long addr)
{
switch(ISA_TYPE)
{
#ifdef CONFIG_Q40
- case Q40_ISA: return (u8 *)Q40_ISA_IO_B(addr);
+ case Q40_ISA: return (u8 __iomem *)Q40_ISA_IO_B(addr);
#endif
#ifdef CONFIG_GG2
- case GG2_ISA: return (u8 *)GG2_ISA_IO_B(addr);
+ case GG2_ISA: return (u8 __iomem *)GG2_ISA_IO_B(addr);
#endif
#ifdef CONFIG_AMIGA_PCMCIA
- case AG_ISA: return (u8 *)AG_ISA_IO_B(addr);
+ case AG_ISA: return (u8 __iomem *)AG_ISA_IO_B(addr);
#endif
- default: return 0; /* avoid warnings, just in case */
+ default: return NULL; /* avoid warnings, just in case */
}
}
-static inline u16 *isa_itw(unsigned long addr)
+static inline u16 __iomem *isa_itw(unsigned long addr)
{
switch(ISA_TYPE)
{
#ifdef CONFIG_Q40
- case Q40_ISA: return (u16 *)Q40_ISA_IO_W(addr);
+ case Q40_ISA: return (u16 __iomem *)Q40_ISA_IO_W(addr);
#endif
#ifdef CONFIG_GG2
- case GG2_ISA: return (u16 *)GG2_ISA_IO_W(addr);
+ case GG2_ISA: return (u16 __iomem *)GG2_ISA_IO_W(addr);
#endif
#ifdef CONFIG_AMIGA_PCMCIA
- case AG_ISA: return (u16 *)AG_ISA_IO_W(addr);
+ case AG_ISA: return (u16 __iomem *)AG_ISA_IO_W(addr);
#endif
- default: return 0; /* avoid warnings, just in case */
+ default: return NULL; /* avoid warnings, just in case */
}
}
-static inline u8 *isa_mtb(unsigned long addr)
+static inline u8 __iomem *isa_mtb(unsigned long addr)
{
switch(ISA_TYPE)
{
#ifdef CONFIG_Q40
- case Q40_ISA: return (u8 *)Q40_ISA_MEM_B(addr);
+ case Q40_ISA: return (u8 __iomem *)Q40_ISA_MEM_B(addr);
#endif
#ifdef CONFIG_GG2
- case GG2_ISA: return (u8 *)GG2_ISA_MEM_B(addr);
+ case GG2_ISA: return (u8 __iomem *)GG2_ISA_MEM_B(addr);
#endif
#ifdef CONFIG_AMIGA_PCMCIA
- case AG_ISA: return (u8 *)addr;
+ case AG_ISA: return (u8 __iomem *)addr;
#endif
- default: return 0; /* avoid warnings, just in case */
+ default: return NULL; /* avoid warnings, just in case */
}
}
-static inline u16 *isa_mtw(unsigned long addr)
+static inline u16 __iomem *isa_mtw(unsigned long addr)
{
switch(ISA_TYPE)
{
#ifdef CONFIG_Q40
- case Q40_ISA: return (u16 *)Q40_ISA_MEM_W(addr);
+ case Q40_ISA: return (u16 __iomem *)Q40_ISA_MEM_W(addr);
#endif
#ifdef CONFIG_GG2
- case GG2_ISA: return (u16 *)GG2_ISA_MEM_W(addr);
+ case GG2_ISA: return (u16 __iomem *)GG2_ISA_MEM_W(addr);
#endif
#ifdef CONFIG_AMIGA_PCMCIA
- case AG_ISA: return (u16 *)addr;
+ case AG_ISA: return (u16 __iomem *)addr;
#endif
- default: return 0; /* avoid warnings, just in case */
+ default: return NULL; /* avoid warnings, just in case */
}
}
@@ -326,20 +327,20 @@ static inline void isa_delay(void)
#define mmiowb()
-static inline void *ioremap(unsigned long physaddr, unsigned long size)
+static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
}
-static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size)
+static inline void __iomem *ioremap_nocache(unsigned long physaddr, unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
}
-static inline void *ioremap_writethrough(unsigned long physaddr,
+static inline void __iomem *ioremap_writethrough(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
}
-static inline void *ioremap_fullcache(unsigned long physaddr,
+static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
diff --git a/include/asm-m68k/irq.h b/include/asm-m68k/irq.h
index 127ad190cf2..325c86f8512 100644
--- a/include/asm-m68k/irq.h
+++ b/include/asm-m68k/irq.h
@@ -23,6 +23,15 @@
#endif
/*
+ * The hardirq mask has to be large enough to have
+ * space for potentially all IRQ sources in the system
+ * nesting on a single CPU:
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
+#endif
+
+/*
* Interrupt source definitions
* General interrupt sources are the level 1-7.
* Adding an interrupt service routine for one of these sources
diff --git a/include/asm-m68k/machdep.h b/include/asm-m68k/machdep.h
index a0dd5c47002..7d3fee34236 100644
--- a/include/asm-m68k/machdep.h
+++ b/include/asm-m68k/machdep.h
@@ -34,7 +34,6 @@ extern void (*mach_power_off)( void );
extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
extern void (*mach_hd_setup)(char *, int *);
extern long mach_max_dma_address;
-extern void (*mach_floppy_setup)(char *, int *);
extern void (*mach_heartbeat) (int);
extern void (*mach_l2_flush) (int);
extern void (*mach_beep) (unsigned int, unsigned int);
diff --git a/include/asm-m68k/raw_io.h b/include/asm-m68k/raw_io.h
index 041f0a87b25..5439bcaa57c 100644
--- a/include/asm-m68k/raw_io.h
+++ b/include/asm-m68k/raw_io.h
@@ -19,9 +19,9 @@
#define IOMAP_NOCACHE_NONSER 2
#define IOMAP_WRITETHROUGH 3
-extern void iounmap(void *addr);
+extern void iounmap(void __iomem *addr);
-extern void *__ioremap(unsigned long physaddr, unsigned long size,
+extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
int cacheflag);
extern void __iounmap(void *addr, unsigned long size);
@@ -30,21 +30,21 @@ extern void __iounmap(void *addr, unsigned long size);
* two accesses to memory, which may be undesirable for some devices.
*/
#define in_8(addr) \
- ({ u8 __v = (*(volatile u8 *) (addr)); __v; })
+ ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
#define in_be16(addr) \
- ({ u16 __v = (*(volatile u16 *) (addr)); __v; })
+ ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
#define in_be32(addr) \
- ({ u32 __v = (*(volatile u32 *) (addr)); __v; })
+ ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
#define in_le16(addr) \
- ({ u16 __v = le16_to_cpu(*(volatile u16 *) (addr)); __v; })
+ ({ u16 __v = le16_to_cpu(*(__force volatile u16 *) (addr)); __v; })
#define in_le32(addr) \
- ({ u32 __v = le32_to_cpu(*(volatile u32 *) (addr)); __v; })
+ ({ u32 __v = le32_to_cpu(*(__force volatile u32 *) (addr)); __v; })
-#define out_8(addr,b) (void)((*(volatile u8 *) (addr)) = (b))
-#define out_be16(addr,w) (void)((*(volatile u16 *) (addr)) = (w))
-#define out_be32(addr,l) (void)((*(volatile u32 *) (addr)) = (l))
-#define out_le16(addr,w) (void)((*(volatile u16 *) (addr)) = cpu_to_le16(w))
-#define out_le32(addr,l) (void)((*(volatile u32 *) (addr)) = cpu_to_le32(l))
+#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
+#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
+#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
+#define out_le16(addr,w) (void)((*(__force volatile u16 *) (addr)) = cpu_to_le16(w))
+#define out_le32(addr,l) (void)((*(__force volatile u32 *) (addr)) = cpu_to_le32(l))
#define raw_inb in_8
#define raw_inw in_be16
@@ -54,7 +54,7 @@ extern void __iounmap(void *addr, unsigned long size);
#define raw_outw(val,port) out_be16((port),(val))
#define raw_outl(val,port) out_be32((port),(val))
-static inline void raw_insb(volatile u8 *port, u8 *buf, unsigned int len)
+static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len)
{
unsigned int i;
@@ -62,7 +62,7 @@ static inline void raw_insb(volatile u8 *port, u8 *buf, unsigned int len)
*buf++ = in_8(port);
}
-static inline void raw_outsb(volatile u8 *port, const u8 *buf,
+static inline void raw_outsb(volatile u8 __iomem *port, const u8 *buf,
unsigned int len)
{
unsigned int i;
@@ -71,7 +71,7 @@ static inline void raw_outsb(volatile u8 *port, const u8 *buf,
out_8(port, *buf++);
}
-static inline void raw_insw(volatile u16 *port, u16 *buf, unsigned int nr)
+static inline void raw_insw(volatile u16 __iomem *port, u16 *buf, unsigned int nr)
{
unsigned int tmp;
@@ -110,7 +110,7 @@ static inline void raw_insw(volatile u16 *port, u16 *buf, unsigned int nr)
}
}
-static inline void raw_outsw(volatile u16 *port, const u16 *buf,
+static inline void raw_outsw(volatile u16 __iomem *port, const u16 *buf,
unsigned int nr)
{
unsigned int tmp;
@@ -150,7 +150,7 @@ static inline void raw_outsw(volatile u16 *port, const u16 *buf,
}
}
-static inline void raw_insl(volatile u32 *port, u32 *buf, unsigned int nr)
+static inline void raw_insl(volatile u32 __iomem *port, u32 *buf, unsigned int nr)
{
unsigned int tmp;
@@ -189,7 +189,7 @@ static inline void raw_insl(volatile u32 *port, u32 *buf, unsigned int nr)
}
}
-static inline void raw_outsl(volatile u32 *port, const u32 *buf,
+static inline void raw_outsl(volatile u32 __iomem *port, const u32 *buf,
unsigned int nr)
{
unsigned int tmp;
@@ -230,7 +230,7 @@ static inline void raw_outsl(volatile u32 *port, const u32 *buf,
}
-static inline void raw_insw_swapw(volatile u16 *port, u16 *buf,
+static inline void raw_insw_swapw(volatile u16 __iomem *port, u16 *buf,
unsigned int nr)
{
if ((nr) % 8)
@@ -283,7 +283,7 @@ static inline void raw_insw_swapw(volatile u16 *port, u16 *buf,
: "d0", "a0", "a1", "d6");
}
-static inline void raw_outsw_swapw(volatile u16 *port, const u16 *buf,
+static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
unsigned int nr)
{
if ((nr) % 8)
diff --git a/include/asm-m68k/signal.h b/include/asm-m68k/signal.h
index a0cdf908237..b7b7ea20caa 100644
--- a/include/asm-m68k/signal.h
+++ b/include/asm-m68k/signal.h
@@ -144,7 +144,7 @@ struct sigaction {
#endif /* __KERNEL__ */
typedef struct sigaltstack {
- void *ss_sp;
+ void __user *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
diff --git a/include/asm-m68k/sun3_pgtable.h b/include/asm-m68k/sun3_pgtable.h
index e974bb07204..5156a28a18d 100644
--- a/include/asm-m68k/sun3_pgtable.h
+++ b/include/asm-m68k/sun3_pgtable.h
@@ -211,7 +211,7 @@ static inline unsigned long pte_to_pgoff(pte_t pte)
return pte.pte & SUN3_PAGE_PGNUM_MASK;
}
-static inline pte_t pgoff_to_pte(inline unsigned off)
+static inline pte_t pgoff_to_pte(unsigned off)
{
pte_t pte = { off + SUN3_PAGE_ACCESSED };
return pte;
diff --git a/include/asm-m68k/sun3ints.h b/include/asm-m68k/sun3ints.h
index fd838eb1421..bd038fccb64 100644
--- a/include/asm-m68k/sun3ints.h
+++ b/include/asm-m68k/sun3ints.h
@@ -31,7 +31,6 @@ int sun3_request_irq(unsigned int irq,
);
extern void sun3_init_IRQ (void);
extern irqreturn_t (*sun3_default_handler[]) (int, void *, struct pt_regs *);
-extern irqreturn_t (*sun3_inthandler[]) (int, void *, struct pt_regs *);
extern void sun3_free_irq (unsigned int irq, void *dev_id);
extern void sun3_enable_interrupts (void);
extern void sun3_disable_interrupts (void);
diff --git a/include/asm-m68k/sun3xflop.h b/include/asm-m68k/sun3xflop.h
index fda1eccf10a..98a9f79dab2 100644
--- a/include/asm-m68k/sun3xflop.h
+++ b/include/asm-m68k/sun3xflop.h
@@ -208,7 +208,7 @@ static int sun3xflop_request_irq(void)
if(!once) {
once = 1;
- error = request_irq(FLOPPY_IRQ, sun3xflop_hardint, SA_INTERRUPT, "floppy", 0);
+ error = request_irq(FLOPPY_IRQ, sun3xflop_hardint, SA_INTERRUPT, "floppy", NULL);
return ((error == 0) ? 0 : -1);
} else return 0;
}
@@ -238,7 +238,7 @@ static int sun3xflop_init(void)
*sun3x_fdc.fcr_r = 0;
/* Success... */
- floppy_set_flags(0, 1, FD_BROKEN_DCL); // I don't know how to detect this.
+ floppy_set_flags(NULL, 1, FD_BROKEN_DCL); // I don't know how to detect this.
allowed_drive_mask = 0x01;
return (int) SUN3X_FDC;
}
diff --git a/include/asm-m68k/thread_info.h b/include/asm-m68k/thread_info.h
index 9532ca3c45c..c4d622a57df 100644
--- a/include/asm-m68k/thread_info.h
+++ b/include/asm-m68k/thread_info.h
@@ -37,6 +37,7 @@ struct thread_info {
#define init_stack (init_thread_union.stack)
#define task_thread_info(tsk) (&(tsk)->thread.info)
+#define task_stack_page(tsk) ((void *)(tsk)->thread_info)
#define current_thread_info() task_thread_info(current)
#define __HAVE_THREAD_FUNCTIONS
diff --git a/include/asm-m68k/uaccess.h b/include/asm-m68k/uaccess.h
index f5cedf19cf6..2ffd87b0a76 100644
--- a/include/asm-m68k/uaccess.h
+++ b/include/asm-m68k/uaccess.h
@@ -42,6 +42,7 @@ struct exception_table_entry
({ \
int __pu_err; \
typeof(*(ptr)) __pu_val = (x); \
+ __chk_user_ptr(ptr); \
switch (sizeof (*(ptr))) { \
case 1: \
__put_user_asm(__pu_err, __pu_val, ptr, b); \
@@ -91,6 +92,7 @@ __asm__ __volatile__ \
({ \
int __gu_err; \
typeof(*(ptr)) __gu_val; \
+ __chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
__get_user_asm(__gu_err, __gu_val, ptr, b, "=d"); \
@@ -105,7 +107,7 @@ __asm__ __volatile__ \
__gu_err = __constant_copy_from_user(&__gu_val, ptr, 8); \
break; \
default: \
- __gu_val = 0; \
+ __gu_val = (typeof(*(ptr)))0; \
__gu_err = __get_user_bad(); \
break; \
} \
@@ -134,7 +136,7 @@ __asm__ __volatile__ \
: "m"(*(ptr)), "i" (-EFAULT), "0"(0))
static inline unsigned long
-__generic_copy_from_user(void *to, const void *from, unsigned long n)
+__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long tmp;
__asm__ __volatile__
@@ -189,7 +191,7 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
}
static inline unsigned long
-__generic_copy_to_user(void *to, const void *from, unsigned long n)
+__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
{
unsigned long tmp;
__asm__ __volatile__
@@ -264,7 +266,7 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n)
: "d0", "memory")
static inline unsigned long
-__constant_copy_from_user(void *to, const void *from, unsigned long n)
+__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
{
switch (n) {
case 0:
@@ -520,7 +522,7 @@ __constant_copy_from_user(void *to, const void *from, unsigned long n)
#define __copy_from_user_inatomic __copy_from_user
static inline unsigned long
-__constant_copy_to_user(void *to, const void *from, unsigned long n)
+__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
{
switch (n) {
case 0:
@@ -766,7 +768,7 @@ __constant_copy_to_user(void *to, const void *from, unsigned long n)
*/
static inline long
-strncpy_from_user(char *dst, const char *src, long count)
+strncpy_from_user(char *dst, const char __user *src, long count)
{
long res;
if (count == 0) return count;
@@ -799,11 +801,11 @@ strncpy_from_user(char *dst, const char *src, long count)
*
* Return 0 on exception, a value greater than N if too long
*/
-static inline long strnlen_user(const char *src, long n)
+static inline long strnlen_user(const char __user *src, long n)
{
long res;
- res = -(long)src;
+ res = -(unsigned long)src;
__asm__ __volatile__
("1:\n"
" tstl %2\n"
@@ -842,7 +844,7 @@ static inline long strnlen_user(const char *src, long n)
*/
static inline unsigned long
-clear_user(void *to, unsigned long n)
+clear_user(void __user *to, unsigned long n)
{
__asm__ __volatile__
(" tstl %1\n"
diff --git a/include/asm-m68k/zorro.h b/include/asm-m68k/zorro.h
index cf816588bed..5ce97c22b58 100644
--- a/include/asm-m68k/zorro.h
+++ b/include/asm-m68k/zorro.h
@@ -15,24 +15,24 @@
#define z_memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
#define z_memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
-static inline void *z_remap_nocache_ser(unsigned long physaddr,
+static inline void __iomem *z_remap_nocache_ser(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
}
-static inline void *z_remap_nocache_nonser(unsigned long physaddr,
+static inline void __iomem *z_remap_nocache_nonser(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_NONSER);
}
-static inline void *z_remap_writethrough(unsigned long physaddr,
+static inline void __iomem *z_remap_writethrough(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
}
-static inline void *z_remap_fullcache(unsigned long physaddr,
+static inline void __iomem *z_remap_fullcache(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
diff --git a/include/asm-m68knommu/machdep.h b/include/asm-m68knommu/machdep.h
index 5a9f9c297f7..27c90afd333 100644
--- a/include/asm-m68knommu/machdep.h
+++ b/include/asm-m68knommu/machdep.h
@@ -38,7 +38,6 @@ extern void (*mach_power_off)( void );
extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
extern void (*mach_hd_setup)(char *, int *);
extern long mach_max_dma_address;
-extern void (*mach_floppy_setup)(char *, int *);
extern void (*mach_floppy_eject)(void);
extern void (*mach_heartbeat) (int);
extern void (*mach_l2_flush) (int);
diff --git a/include/asm-m68knommu/thread_info.h b/include/asm-m68knommu/thread_info.h
index 7b9a3fa3af5..b8f009edf2b 100644
--- a/include/asm-m68knommu/thread_info.h
+++ b/include/asm-m68knommu/thread_info.h
@@ -75,8 +75,6 @@ static inline struct thread_info *current_thread_info(void)
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_SIZE_ORDER)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* __ASSEMBLY__ */
#define PREEMPT_ACTIVE 0x4000000
diff --git a/include/asm-mips/mach-ip27/topology.h b/include/asm-mips/mach-ip27/topology.h
index 82141c711c3..59d26b52ba3 100644
--- a/include/asm-mips/mach-ip27/topology.h
+++ b/include/asm-mips/mach-ip27/topology.h
@@ -27,7 +27,6 @@ extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
.max_interval = 32, \
.busy_factor = 32, \
.imbalance_pct = 125, \
- .cache_hot_time = (10*1000), \
.cache_nice_tries = 1, \
.per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
diff --git a/include/asm-mips/processor.h b/include/asm-mips/processor.h
index de53055a62a..39d2bd50fec 100644
--- a/include/asm-mips/processor.h
+++ b/include/asm-mips/processor.h
@@ -200,11 +200,11 @@ extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long
unsigned long get_wchan(struct task_struct *p);
-#define __PT_REG(reg) ((long)&((struct pt_regs *)0)->reg - sizeof(struct pt_regs))
-#define __KSTK_TOS(tsk) ((unsigned long)(tsk->thread_info) + THREAD_SIZE - 32)
-#define KSTK_EIP(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(cp0_epc)))
-#define KSTK_ESP(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(regs[29])))
-#define KSTK_STATUS(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(cp0_status)))
+#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + THREAD_SIZE - 32)
+#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk) - 1)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
+#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
#define cpu_relax() barrier()
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 330c4e497af..e8e5d414337 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -159,11 +159,21 @@ struct task_struct;
do { \
if (cpu_has_dsp) \
__save_dsp(prev); \
- (last) = resume(prev, next, next->thread_info); \
+ (last) = resume(prev, next, task_thread_info(next)); \
if (cpu_has_dsp) \
__restore_dsp(current); \
} while(0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
{
__u32 retval;
diff --git a/include/asm-mips/thread_info.h b/include/asm-mips/thread_info.h
index e6c24472e03..1612b3fe108 100644
--- a/include/asm-mips/thread_info.h
+++ b/include/asm-mips/thread_info.h
@@ -97,8 +97,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#endif
#define free_thread_info(info) kfree(info)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h
index f3928d3a80c..a5a973c0c07 100644
--- a/include/asm-parisc/system.h
+++ b/include/asm-parisc/system.h
@@ -49,6 +49,15 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
(last) = _switch_to(prev, next); \
} while(0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
/* interrupt control */
diff --git a/include/asm-parisc/thread_info.h b/include/asm-parisc/thread_info.h
index 57bbb76cb6c..ac32f140b83 100644
--- a/include/asm-parisc/thread_info.h
+++ b/include/asm-parisc/thread_info.h
@@ -43,9 +43,6 @@ struct thread_info {
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
-
/* how to get the thread information struct from C */
#define current_thread_info() ((struct thread_info *)mfctl(30))
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 4c888303e85..9b822afa7d0 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -183,6 +183,16 @@ struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
extern unsigned int rtas_data;
extern int mem_init_done; /* set on boot once kmalloc can be called */
extern unsigned long memory_limit;
diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h
index ac1e80e6033..7e09d7cda93 100644
--- a/include/asm-powerpc/thread_info.h
+++ b/include/asm-powerpc/thread_info.h
@@ -89,9 +89,6 @@ struct thread_info {
#endif /* THREAD_SHIFT < PAGE_SHIFT */
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
-
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index 9f3d4da261c..1e19cd00af2 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -39,7 +39,6 @@ static inline int node_to_first_cpu(int node)
.max_interval = 32, \
.busy_factor = 32, \
.imbalance_pct = 125, \
- .cache_hot_time = (10*1000000), \
.cache_nice_tries = 1, \
.per_cpu_gain = 100, \
.busy_idx = 3, \
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index b9703734827..fb49c0c49ea 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -131,6 +131,16 @@ extern struct task_struct *__switch_to(struct task_struct *,
struct task_struct *);
#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
diff --git a/include/asm-s390/elf.h b/include/asm-s390/elf.h
index 372d51cccd5..710646e64f7 100644
--- a/include/asm-s390/elf.h
+++ b/include/asm-s390/elf.h
@@ -163,7 +163,7 @@ static inline int dump_regs(struct pt_regs *ptregs, elf_gregset_t *regs)
static inline int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
{
- struct pt_regs *ptregs = __KSTK_PTREGS(tsk);
+ struct pt_regs *ptregs = task_pt_regs(tsk);
memcpy(&regs->psw, &ptregs->psw, sizeof(regs->psw)+sizeof(regs->gprs));
memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs));
regs->orig_gpr2 = ptregs->orig_gpr2;
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 4ec652ebb3b..c5cbc4bd841 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -191,10 +191,10 @@ extern void show_registers(struct pt_regs *regs);
extern void show_trace(struct task_struct *task, unsigned long *sp);
unsigned long get_wchan(struct task_struct *p);
-#define __KSTK_PTREGS(tsk) ((struct pt_regs *) \
- ((unsigned long) tsk->thread_info + THREAD_SIZE - sizeof(struct pt_regs)))
-#define KSTK_EIP(tsk) (__KSTK_PTREGS(tsk)->psw.addr)
-#define KSTK_ESP(tsk) (__KSTK_PTREGS(tsk)->gprs[15])
+#define task_pt_regs(tsk) ((struct pt_regs *) \
+ (task_stack_page(tsk) + THREAD_SIZE) - 1)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])
/*
* Give up the time slice of the virtual PU.
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 864cae7e1fd..c7c3a9ad593 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -104,6 +104,16 @@ static inline void restore_access_regs(unsigned int *acrs)
prev = __switch_to(prev,next); \
} while (0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
extern void account_user_vtime(struct task_struct *);
extern void account_system_vtime(struct task_struct *);
diff --git a/include/asm-s390/thread_info.h b/include/asm-s390/thread_info.h
index 6c18a3f2431..f3797a52c4e 100644
--- a/include/asm-s390/thread_info.h
+++ b/include/asm-s390/thread_info.h
@@ -81,8 +81,6 @@ static inline struct thread_info *current_thread_info(void)
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL,THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti),THREAD_ORDER)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#endif
diff --git a/include/asm-sh/ptrace.h b/include/asm-sh/ptrace.h
index 0f75e16a741..792fc35bd62 100644
--- a/include/asm-sh/ptrace.h
+++ b/include/asm-sh/ptrace.h
@@ -91,6 +91,16 @@ struct pt_dspregs {
#define instruction_pointer(regs) ((regs)->pc)
extern void show_regs(struct pt_regs *);
+#ifdef CONFIG_SH_DSP
+#define task_pt_regs(task) \
+ ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \
+ - sizeof(struct pt_dspregs) - sizeof(unsigned long)) - 1)
+#else
+#define task_pt_regs(task) \
+ ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \
+ - sizeof(unsigned long)) - 1)
+#endif
+
static inline unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 28a3c2d8bcd..bb0330499bd 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -57,6 +57,16 @@
last = __last; \
} while (0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
#define nop() __asm__ __volatile__ ("nop")
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h
index 46080cefaff..85f0c11b431 100644
--- a/include/asm-sh/thread_info.h
+++ b/include/asm-sh/thread_info.h
@@ -60,8 +60,6 @@ static inline struct thread_info *current_thread_info(void)
#define THREAD_SIZE (2*PAGE_SIZE)
#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#else /* !__ASSEMBLY__ */
diff --git a/include/asm-sh64/thread_info.h b/include/asm-sh64/thread_info.h
index 10f024c6a2e..1f825cb163c 100644
--- a/include/asm-sh64/thread_info.h
+++ b/include/asm-sh64/thread_info.h
@@ -66,8 +66,6 @@ static inline struct thread_info *current_thread_info(void)
#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 1f6b71f9e1b..58dd162927b 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -155,7 +155,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
"here:\n" \
: "=&r" (last) \
: "r" (&(current_set[hard_smp_processor_id()])), \
- "r" ((next)->thread_info), \
+ "r" (task_thread_info(next)), \
"i" (TI_KPSR), \
"i" (TI_KSP), \
"i" (TI_TASK) \
@@ -166,6 +166,16 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
} while(0)
/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
+/*
* Changing the IRQ level on the Sparc.
*/
extern void local_irq_restore(unsigned long);
diff --git a/include/asm-sparc/thread_info.h b/include/asm-sparc/thread_info.h
index ff6ccb3d24c..65f060b040a 100644
--- a/include/asm-sparc/thread_info.h
+++ b/include/asm-sparc/thread_info.h
@@ -92,9 +92,6 @@ BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void)
BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
-
#endif /* __ASSEMBLY__ */
/*
diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h
index 91458118277..69539a8ab83 100644
--- a/include/asm-sparc64/elf.h
+++ b/include/asm-sparc64/elf.h
@@ -119,7 +119,7 @@ typedef struct {
#endif
#define ELF_CORE_COPY_TASK_REGS(__tsk, __elf_regs) \
- ({ ELF_CORE_COPY_REGS((*(__elf_regs)), (__tsk)->thread_info->kregs); 1; })
+ ({ ELF_CORE_COPY_REGS((*(__elf_regs)), task_pt_regs(__tsk)); 1; })
/*
* This is used to ensure we don't load something for the wrong architecture.
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 08ba72d7722..57ee7b30618 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -60,7 +60,7 @@ do { \
register unsigned long pgd_cache asm("o4"); \
paddr = __pa((__mm)->pgd); \
pgd_cache = 0UL; \
- if ((__tsk)->thread_info->flags & _TIF_32BIT) \
+ if (task_thread_info(__tsk)->flags & _TIF_32BIT) \
pgd_cache = get_pgd_cache((__mm)->pgd); \
__asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \
"mov %3, %%g4\n\t" \
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index 3169f3e2237..cd8d9b4c865 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -186,8 +186,9 @@ extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern unsigned long get_wchan(struct task_struct *task);
-#define KSTK_EIP(tsk) ((tsk)->thread_info->kregs->tpc)
-#define KSTK_ESP(tsk) ((tsk)->thread_info->kregs->u_regs[UREG_FP])
+#define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
#define cpu_relax() barrier()
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index 309f1466b6f..af254e58183 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -208,7 +208,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
/* If you are tempted to conditionalize the following */ \
/* so that ASI is only written if it changes, think again. */ \
__asm__ __volatile__("wr %%g0, %0, %%asi" \
- : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS]));\
+ : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
__asm__ __volatile__( \
"mov %%g4, %%g7\n\t" \
"wrpr %%g0, 0x95, %%pstate\n\t" \
@@ -238,7 +238,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
"b,a ret_from_syscall\n\t" \
"1:\n\t" \
: "=&r" (last) \
- : "0" (next->thread_info), \
+ : "0" (task_thread_info(next)), \
"i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
"i" (TI_CWP), "i" (TI_TASK) \
: "cc", \
@@ -253,6 +253,16 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
} \
} while(0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
{
unsigned long tmp1, tmp2;
diff --git a/include/asm-um/thread_info.h b/include/asm-um/thread_info.h
index 97267f059ef..705c71972c3 100644
--- a/include/asm-um/thread_info.h
+++ b/include/asm-um/thread_info.h
@@ -56,9 +56,6 @@ static inline struct thread_info *current_thread_info(void)
((struct thread_info *) kmalloc(THREAD_SIZE, GFP_KERNEL))
#define free_thread_info(ti) kfree(ti)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
-
#endif
#define PREEMPT_ACTIVE 0x10000000
diff --git a/include/asm-v850/processor.h b/include/asm-v850/processor.h
index 98f929427d3..2d31308935a 100644
--- a/include/asm-v850/processor.h
+++ b/include/asm-v850/processor.h
@@ -98,10 +98,10 @@ unsigned long get_wchan (struct task_struct *p);
/* Return some info about the user process TASK. */
-#define task_tos(task) ((unsigned long)(task)->thread_info + THREAD_SIZE)
-#define task_regs(task) ((struct pt_regs *)task_tos (task) - 1)
-#define task_sp(task) (task_regs (task)->gpr[GPR_SP])
-#define task_pc(task) (task_regs (task)->pc)
+#define task_tos(task) ((unsigned long)task_stack_page(task) + THREAD_SIZE)
+#define task_pt_regs(task) ((struct pt_regs *)task_tos (task) - 1)
+#define task_sp(task) (task_pt_regs (task)->gpr[GPR_SP])
+#define task_pc(task) (task_pt_regs (task)->pc)
/* Grotty old names for some. */
#define KSTK_EIP(task) task_pc (task)
#define KSTK_ESP(task) task_sp (task)
diff --git a/include/asm-v850/thread_info.h b/include/asm-v850/thread_info.h
index e4cfad94a55..82b8f284620 100644
--- a/include/asm-v850/thread_info.h
+++ b/include/asm-v850/thread_info.h
@@ -58,8 +58,6 @@ struct thread_info {
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-x86_64/compat.h b/include/asm-x86_64/compat.h
index 3863a7da372..b37ab8218ef 100644
--- a/include/asm-x86_64/compat.h
+++ b/include/asm-x86_64/compat.h
@@ -198,7 +198,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
static __inline__ void __user *compat_alloc_user_space(long len)
{
- struct pt_regs *regs = (void *)current->thread.rsp0 - sizeof(struct pt_regs);
+ struct pt_regs *regs = task_pt_regs(current);
return (void __user *)regs->rsp - len;
}
diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h
index 57f7e143384..876eb9a2fe7 100644
--- a/include/asm-x86_64/i387.h
+++ b/include/asm-x86_64/i387.h
@@ -30,7 +30,7 @@ extern int save_i387(struct _fpstate __user *buf);
*/
#define unlazy_fpu(tsk) do { \
- if ((tsk)->thread_info->status & TS_USEDFPU) \
+ if (task_thread_info(tsk)->status & TS_USEDFPU) \
save_init_fpu(tsk); \
} while (0)
@@ -46,9 +46,9 @@ static inline void tolerant_fwait(void)
}
#define clear_fpu(tsk) do { \
- if ((tsk)->thread_info->status & TS_USEDFPU) { \
+ if (task_thread_info(tsk)->status & TS_USEDFPU) { \
tolerant_fwait(); \
- (tsk)->thread_info->status &= ~TS_USEDFPU; \
+ task_thread_info(tsk)->status &= ~TS_USEDFPU; \
stts(); \
} \
} while (0)
@@ -170,10 +170,10 @@ static inline void kernel_fpu_end(void)
preempt_enable();
}
-static inline void save_init_fpu( struct task_struct *tsk )
+static inline void save_init_fpu(struct task_struct *tsk)
{
__fxsave_clear(tsk);
- tsk->thread_info->status &= ~TS_USEDFPU;
+ task_thread_info(tsk)->status &= ~TS_USEDFPU;
stts();
}
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 394dd729752..87a282b1043 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -321,8 +321,8 @@ extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
extern unsigned long get_wchan(struct task_struct *p);
-#define KSTK_EIP(tsk) \
- (((struct pt_regs *)(tsk->thread.rsp0 - sizeof(struct pt_regs)))->rip)
+#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 38c1e8a69c9..0eacbefb7dd 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -193,6 +193,15 @@ static inline void write_cr4(unsigned long val)
#define wbinvd() \
__asm__ __volatile__ ("wbinvd": : :"memory");
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ */
+static inline void sched_cacheflush(void)
+{
+ wbinvd();
+}
+
#endif /* __KERNEL__ */
#define nop() __asm__ __volatile__ ("nop")
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index eb7c5fda187..4ac0e0a3693 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -76,8 +76,6 @@ static inline struct thread_info *stack_thread_info(void)
#define alloc_thread_info(tsk) \
((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#else /* !__ASSEMBLY__ */
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 7d82bc56b9f..2fa7f27381b 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -39,7 +39,6 @@ extern int __node_distance(int, int);
.max_interval = 32, \
.busy_factor = 32, \
.imbalance_pct = 125, \
- .cache_hot_time = (10*1000000), \
.cache_nice_tries = 2, \
.busy_idx = 3, \
.idle_idx = 2, \
diff --git a/include/asm-xtensa/processor.h b/include/asm-xtensa/processor.h
index 9cab5e4298b..d1d72ad36f0 100644
--- a/include/asm-xtensa/processor.h
+++ b/include/asm-xtensa/processor.h
@@ -184,12 +184,12 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define release_segments(mm) do { } while(0)
#define forget_segments() do { } while (0)
-#define thread_saved_pc(tsk) (xtensa_pt_regs(tsk)->pc)
+#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
extern unsigned long get_wchan(struct task_struct *p);
-#define KSTK_EIP(tsk) (xtensa_pt_regs(tsk)->pc)
-#define KSTK_ESP(tsk) (xtensa_pt_regs(tsk)->areg[1])
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
#define cpu_relax() do { } while (0)
diff --git a/include/asm-xtensa/ptrace.h b/include/asm-xtensa/ptrace.h
index aa4fd7fb3ce..a5ac71a5205 100644
--- a/include/asm-xtensa/ptrace.h
+++ b/include/asm-xtensa/ptrace.h
@@ -113,8 +113,8 @@ struct pt_regs {
};
#ifdef __KERNEL__
-# define xtensa_pt_regs(tsk) ((struct pt_regs*) \
- (((long)(tsk)->thread_info + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4)) - 1)
+# define task_pt_regs(tsk) ((struct pt_regs*) \
+ (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
# define instruction_pointer(regs) ((regs)->pc)
extern void show_regs(struct pt_regs *);
diff --git a/include/asm-xtensa/thread_info.h b/include/asm-xtensa/thread_info.h
index af208d41fd8..5ae34ab7159 100644
--- a/include/asm-xtensa/thread_info.h
+++ b/include/asm-xtensa/thread_info.h
@@ -93,8 +93,6 @@ static inline struct thread_info *current_thread_info(void)
/* thread information allocation */
#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#else /* !__ASSEMBLY__ */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3b74c4bf293..a72e1713542 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -631,7 +631,14 @@ struct sched_domain {
extern void partition_sched_domains(cpumask_t *partition1,
cpumask_t *partition2);
-#endif /* CONFIG_SMP */
+
+/*
+ * Maximum cache size the migration-costs auto-tuning code will
+ * search from:
+ */
+extern unsigned int max_cache_size;
+
+#endif /* CONFIG_SMP */
struct io_context; /* See blkdev.h */
@@ -689,9 +696,12 @@ struct task_struct {
int lock_depth; /* BKL lock depth */
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#if defined(CONFIG_SMP)
+ int last_waker_cpu; /* CPU that last woke this task up */
+#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
int oncpu;
#endif
+#endif
int prio, static_prio;
struct list_head run_list;
prio_array_t *array;
@@ -1230,6 +1240,7 @@ static inline void task_unlock(struct task_struct *p)
#ifndef __HAVE_THREAD_FUNCTIONS
#define task_thread_info(task) (task)->thread_info
+#define task_stack_page(task) ((void*)((task)->thread_info))
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 3df1d474e5c..315a5163d6a 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -86,7 +86,6 @@
.max_interval = 2, \
.busy_factor = 8, \
.imbalance_pct = 110, \
- .cache_hot_time = 0, \
.cache_nice_tries = 0, \
.per_cpu_gain = 25, \
.busy_idx = 0, \
@@ -117,7 +116,6 @@
.max_interval = 4, \
.busy_factor = 64, \
.imbalance_pct = 125, \
- .cache_hot_time = (5*1000000/2), \
.cache_nice_tries = 1, \
.per_cpu_gain = 100, \
.busy_idx = 2, \