diff options
author | Greg Ungerer <gerg@uclinux.org> | 2008-07-31 14:38:07 +1000 |
---|---|---|
committer | Greg Ungerer <gerg@uclinux.org> | 2008-08-07 15:36:16 +1000 |
commit | 58750139001bae11a1f9b074f3a9c774fecf5ba8 (patch) | |
tree | ecdafd4d8c3d2ef2cee7e512b7310552863a617c /arch/m68knommu/include/asm/processor.h | |
parent | 685d87f7ccc649ab92b55e18e507a65d0e694eb9 (diff) |
Move all of include/asm-m68knommu to arch/m68knommu/include/asm.
With the current kbuild infrastructure in place no other changes
are required for this to work.
Signed-off-by: Greg Ungerer <gerg@uclinux.org>
Diffstat (limited to 'arch/m68knommu/include/asm/processor.h')
-rw-r--r-- | arch/m68knommu/include/asm/processor.h | 143 |
1 files changed, 143 insertions, 0 deletions
diff --git a/arch/m68knommu/include/asm/processor.h b/arch/m68knommu/include/asm/processor.h new file mode 100644 index 00000000000..91cba18acdd --- /dev/null +++ b/arch/m68knommu/include/asm/processor.h @@ -0,0 +1,143 @@ +/* + * include/asm-m68knommu/processor.h + * + * Copyright (C) 1995 Hamish Macdonald + */ + +#ifndef __ASM_M68K_PROCESSOR_H +#define __ASM_M68K_PROCESSOR_H + +/* + * Default implementation of macro that returns current + * instruction pointer ("program counter"). + */ +#define current_text_addr() ({ __label__ _l; _l: &&_l;}) + +#include <linux/compiler.h> +#include <linux/threads.h> +#include <asm/types.h> +#include <asm/segment.h> +#include <asm/fpu.h> +#include <asm/ptrace.h> +#include <asm/current.h> + +static inline unsigned long rdusp(void) +{ +#ifdef CONFIG_COLDFIRE + extern unsigned int sw_usp; + return(sw_usp); +#else + unsigned long usp; + __asm__ __volatile__("move %/usp,%0" : "=a" (usp)); + return usp; +#endif +} + +static inline void wrusp(unsigned long usp) +{ +#ifdef CONFIG_COLDFIRE + extern unsigned int sw_usp; + sw_usp = usp; +#else + __asm__ __volatile__("move %0,%/usp" : : "a" (usp)); +#endif +} + +/* + * User space process size: 3.75GB. This is hardcoded into a few places, + * so don't change it unless you know what you are doing. + */ +#define TASK_SIZE (0xF0000000UL) + +/* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. We won't be using it + */ +#define TASK_UNMAPPED_BASE 0 + +/* + * if you change this structure, you must change the code and offsets + * in m68k/machasm.S + */ + +struct thread_struct { + unsigned long ksp; /* kernel stack pointer */ + unsigned long usp; /* user stack pointer */ + unsigned short sr; /* saved status register */ + unsigned short fs; /* saved fs (sfc, dfc) */ + unsigned long crp[2]; /* cpu root pointer */ + unsigned long esp0; /* points to SR of stack frame */ + unsigned long fp[8*3]; + unsigned long fpcntl[3]; /* fp control regs */ + unsigned char fpstate[FPSTATESIZE]; /* floating point state */ +}; + +#define INIT_THREAD { \ + sizeof(init_stack) + (unsigned long) init_stack, 0, \ + PS_S, __KERNEL_DS, \ + {0, 0}, 0, {0,}, {0, 0, 0}, {0,}, \ +} + +/* + * Coldfire stacks need to be re-aligned on trap exit, conventional + * 68k can handle this case cleanly. + */ +#if defined(CONFIG_COLDFIRE) +#define reformat(_regs) do { (_regs)->format = 0x4; } while(0) +#else +#define reformat(_regs) do { } while (0) +#endif + +/* + * Do necessary setup to start up a newly executed thread. + * + * pass the data segment into user programs if it exists, + * it can't hurt anything as far as I can tell + */ +#define start_thread(_regs, _pc, _usp) \ +do { \ + set_fs(USER_DS); /* reads from user space */ \ + (_regs)->pc = (_pc); \ + ((struct switch_stack *)(_regs))[-1].a6 = 0; \ + reformat(_regs); \ + if (current->mm) \ + (_regs)->d5 = current->mm->start_data; \ + (_regs)->sr &= ~0x2000; \ + wrusp(_usp); \ +} while(0) + +/* Forward declaration, a strange C thing */ +struct task_struct; + +/* Free all resources held by a thread. */ +static inline void release_thread(struct task_struct *dead_task) +{ +} + +/* Prepare to copy thread state - unlazy all lazy status */ +#define prepare_to_copy(tsk) do { } while (0) + +extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); + +/* + * Free current thread data structures etc.. + */ +static inline void exit_thread(void) +{ +} + +unsigned long thread_saved_pc(struct task_struct *tsk); +unsigned long get_wchan(struct task_struct *p); + +#define KSTK_EIP(tsk) \ + ({ \ + unsigned long eip = 0; \ + if ((tsk)->thread.esp0 > PAGE_SIZE && \ + (virt_addr_valid((tsk)->thread.esp0))) \ + eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \ + eip; }) +#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) + +#define cpu_relax() barrier() + +#endif |