diff options
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r-- | include/asm-powerpc/bitops.h | 5 | ||||
-rw-r--r-- | include/asm-powerpc/futex.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/hugetlb.h | 79 | ||||
-rw-r--r-- | include/asm-powerpc/irq.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/kvm.h | 53 | ||||
-rw-r--r-- | include/asm-powerpc/kvm_asm.h | 55 | ||||
-rw-r--r-- | include/asm-powerpc/kvm_host.h | 152 | ||||
-rw-r--r-- | include/asm-powerpc/kvm_para.h | 37 | ||||
-rw-r--r-- | include/asm-powerpc/kvm_ppc.h | 88 | ||||
-rw-r--r-- | include/asm-powerpc/mmu-44x.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/page_64.h | 7 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable-ppc32.h | 3 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable-ppc64.h | 3 | ||||
-rw-r--r-- | include/asm-powerpc/processor.h | 6 | ||||
-rw-r--r-- | include/asm-powerpc/rio.h | 18 | ||||
-rw-r--r-- | include/asm-powerpc/system.h | 24 | ||||
-rw-r--r-- | include/asm-powerpc/unaligned.h | 11 |
17 files changed, 516 insertions, 31 deletions
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h index a99a7492947..897eade3afb 100644 --- a/include/asm-powerpc/bitops.h +++ b/include/asm-powerpc/bitops.h @@ -313,6 +313,11 @@ static __inline__ int fls(unsigned int x) return 32 - lz; } +static __inline__ unsigned long __fls(unsigned long x) +{ + return __ilog2(x); +} + /* * 64-bit can do this using one cntlzd (count leading zeroes doubleword) * instruction; for 32-bit we use the generic version, which does two diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h index 3f3673fd3ff..6d406c5c5de 100644 --- a/include/asm-powerpc/futex.h +++ b/include/asm-powerpc/futex.h @@ -4,9 +4,9 @@ #ifdef __KERNEL__ #include <linux/futex.h> +#include <linux/uaccess.h> #include <asm/errno.h> #include <asm/synch.h> -#include <asm/uaccess.h> #include <asm/asm-compat.h> #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ diff --git a/include/asm-powerpc/hugetlb.h b/include/asm-powerpc/hugetlb.h new file mode 100644 index 00000000000..649c6c3b87b --- /dev/null +++ b/include/asm-powerpc/hugetlb.h @@ -0,0 +1,79 @@ +#ifndef _ASM_POWERPC_HUGETLB_H +#define _ASM_POWERPC_HUGETLB_H + +#include <asm/page.h> + + +int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, + unsigned long len); + +void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling); + +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte); + +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); + +/* + * If the arch doesn't supply something else, assume that hugepage + * size aligned regions are ok without further preparation. + */ +static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} + +static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) +{ +} + +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ +} + +static inline int huge_pte_none(pte_t pte) +{ + return pte_none(pte); +} + +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); +} + +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +} + +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + return *ptep; +} + +static inline int arch_prepare_hugepage(struct page *page) +{ + return 0; +} + +static inline void arch_release_hugepage(struct page *page) +{ +} + +#endif /* _ASM_POWERPC_HUGETLB_H */ diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h index b5c03127a9b..5089deb8fec 100644 --- a/include/asm-powerpc/irq.h +++ b/include/asm-powerpc/irq.h @@ -619,8 +619,6 @@ struct pt_regs; #define __ARCH_HAS_DO_SOFTIRQ -extern void __do_softirq(void); - #ifdef CONFIG_IRQSTACKS /* * Per-cpu stacks for handling hard and soft interrupts. diff --git a/include/asm-powerpc/kvm.h b/include/asm-powerpc/kvm.h index d1b530fbf8d..f993e4198d5 100644 --- a/include/asm-powerpc/kvm.h +++ b/include/asm-powerpc/kvm.h @@ -1,6 +1,55 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright IBM Corp. 2007 + * + * Authors: Hollis Blanchard <hollisb@us.ibm.com> + */ + #ifndef __LINUX_KVM_POWERPC_H #define __LINUX_KVM_POWERPC_H -/* powerpc does not support KVM */ +#include <asm/types.h> + +struct kvm_regs { + __u64 pc; + __u64 cr; + __u64 ctr; + __u64 lr; + __u64 xer; + __u64 msr; + __u64 srr0; + __u64 srr1; + __u64 pid; + + __u64 sprg0; + __u64 sprg1; + __u64 sprg2; + __u64 sprg3; + __u64 sprg4; + __u64 sprg5; + __u64 sprg6; + __u64 sprg7; + + __u64 gpr[32]; +}; + +struct kvm_sregs { +}; + +struct kvm_fpu { + __u64 fpr[32]; +}; -#endif +#endif /* __LINUX_KVM_POWERPC_H */ diff --git a/include/asm-powerpc/kvm_asm.h b/include/asm-powerpc/kvm_asm.h new file mode 100644 index 00000000000..2197764796d --- /dev/null +++ b/include/asm-powerpc/kvm_asm.h @@ -0,0 +1,55 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright IBM Corp. 2008 + * + * Authors: Hollis Blanchard <hollisb@us.ibm.com> + */ + +#ifndef __POWERPC_KVM_ASM_H__ +#define __POWERPC_KVM_ASM_H__ + +/* IVPR must be 64KiB-aligned. */ +#define VCPU_SIZE_ORDER 4 +#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12) +#define VCPU_TLB_PGSZ PPC44x_TLB_64K +#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG) + +#define BOOKE_INTERRUPT_CRITICAL 0 +#define BOOKE_INTERRUPT_MACHINE_CHECK 1 +#define BOOKE_INTERRUPT_DATA_STORAGE 2 +#define BOOKE_INTERRUPT_INST_STORAGE 3 +#define BOOKE_INTERRUPT_EXTERNAL 4 +#define BOOKE_INTERRUPT_ALIGNMENT 5 +#define BOOKE_INTERRUPT_PROGRAM 6 +#define BOOKE_INTERRUPT_FP_UNAVAIL 7 +#define BOOKE_INTERRUPT_SYSCALL 8 +#define BOOKE_INTERRUPT_AP_UNAVAIL 9 +#define BOOKE_INTERRUPT_DECREMENTER 10 +#define BOOKE_INTERRUPT_FIT 11 +#define BOOKE_INTERRUPT_WATCHDOG 12 +#define BOOKE_INTERRUPT_DTLB_MISS 13 +#define BOOKE_INTERRUPT_ITLB_MISS 14 +#define BOOKE_INTERRUPT_DEBUG 15 +#define BOOKE_MAX_INTERRUPT 15 + +#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ +#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ + +#define RESUME_GUEST 0 +#define RESUME_GUEST_NV RESUME_FLAG_NV +#define RESUME_HOST RESUME_FLAG_HOST +#define RESUME_HOST_NV (RESUME_FLAG_HOST|RESUME_FLAG_NV) + +#endif /* __POWERPC_KVM_ASM_H__ */ diff --git a/include/asm-powerpc/kvm_host.h b/include/asm-powerpc/kvm_host.h new file mode 100644 index 00000000000..04ffbb8e0a3 --- /dev/null +++ b/include/asm-powerpc/kvm_host.h @@ -0,0 +1,152 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright IBM Corp. 2007 + * + * Authors: Hollis Blanchard <hollisb@us.ibm.com> + */ + +#ifndef __POWERPC_KVM_HOST_H__ +#define __POWERPC_KVM_HOST_H__ + +#include <linux/mutex.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/kvm_types.h> +#include <asm/kvm_asm.h> + +#define KVM_MAX_VCPUS 1 +#define KVM_MEMORY_SLOTS 32 +/* memory slots that does not exposed to userspace */ +#define KVM_PRIVATE_MEM_SLOTS 4 + +/* We don't currently support large pages. */ +#define KVM_PAGES_PER_HPAGE (1<<31) + +struct kvm; +struct kvm_run; +struct kvm_vcpu; + +struct kvm_vm_stat { + u32 remote_tlb_flush; +}; + +struct kvm_vcpu_stat { + u32 sum_exits; + u32 mmio_exits; + u32 dcr_exits; + u32 signal_exits; + u32 light_exits; + /* Account for special types of light exits: */ + u32 itlb_real_miss_exits; + u32 itlb_virt_miss_exits; + u32 dtlb_real_miss_exits; + u32 dtlb_virt_miss_exits; + u32 syscall_exits; + u32 isi_exits; + u32 dsi_exits; + u32 emulated_inst_exits; + u32 dec_exits; + u32 ext_intr_exits; +}; + +struct tlbe { + u32 tid; /* Only the low 8 bits are used. */ + u32 word0; + u32 word1; + u32 word2; +}; + +struct kvm_arch { +}; + +struct kvm_vcpu_arch { + /* Unmodified copy of the guest's TLB. */ + struct tlbe guest_tlb[PPC44x_TLB_SIZE]; + /* TLB that's actually used when the guest is running. */ + struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; + /* Pages which are referenced in the shadow TLB. */ + struct page *shadow_pages[PPC44x_TLB_SIZE]; + /* Copy of the host's TLB. */ + struct tlbe host_tlb[PPC44x_TLB_SIZE]; + + u32 host_stack; + u32 host_pid; + + u64 fpr[32]; + u32 gpr[32]; + + u32 pc; + u32 cr; + u32 ctr; + u32 lr; + u32 xer; + + u32 msr; + u32 mmucr; + u32 sprg0; + u32 sprg1; + u32 sprg2; + u32 sprg3; + u32 sprg4; + u32 sprg5; + u32 sprg6; + u32 sprg7; + u32 srr0; + u32 srr1; + u32 csrr0; + u32 csrr1; + u32 dsrr0; + u32 dsrr1; + u32 dear; + u32 esr; + u32 dec; + u32 decar; + u32 tbl; + u32 tbu; + u32 tcr; + u32 tsr; + u32 ivor[16]; + u32 ivpr; + u32 pir; + u32 pid; + u32 pvr; + u32 ccr0; + u32 ccr1; + u32 dbcr0; + u32 dbcr1; + + u32 last_inst; + u32 fault_dear; + u32 fault_esr; + gpa_t paddr_accessed; + + u8 io_gpr; /* GPR used as IO source/target */ + u8 mmio_is_bigendian; + u8 dcr_needed; + u8 dcr_is_write; + + u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ + + struct timer_list dec_timer; + unsigned long pending_exceptions; +}; + +struct kvm_guest_debug { + int enabled; + unsigned long bp[4]; + int singlestep; +}; + +#endif /* __POWERPC_KVM_HOST_H__ */ diff --git a/include/asm-powerpc/kvm_para.h b/include/asm-powerpc/kvm_para.h new file mode 100644 index 00000000000..2d48f6a63d0 --- /dev/null +++ b/include/asm-powerpc/kvm_para.h @@ -0,0 +1,37 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright IBM Corp. 2008 + * + * Authors: Hollis Blanchard <hollisb@us.ibm.com> + */ + +#ifndef __POWERPC_KVM_PARA_H__ +#define __POWERPC_KVM_PARA_H__ + +#ifdef __KERNEL__ + +static inline int kvm_para_available(void) +{ + return 0; +} + +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} + +#endif /* __KERNEL__ */ + +#endif /* __POWERPC_KVM_PARA_H__ */ diff --git a/include/asm-powerpc/kvm_ppc.h b/include/asm-powerpc/kvm_ppc.h new file mode 100644 index 00000000000..7ac820308a7 --- /dev/null +++ b/include/asm-powerpc/kvm_ppc.h @@ -0,0 +1,88 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright IBM Corp. 2008 + * + * Authors: Hollis Blanchard <hollisb@us.ibm.com> + */ + +#ifndef __POWERPC_KVM_PPC_H__ +#define __POWERPC_KVM_PPC_H__ + +/* This file exists just so we can dereference kvm_vcpu, avoiding nested header + * dependencies. */ + +#include <linux/mutex.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/kvm_types.h> +#include <linux/kvm_host.h> + +struct kvm_tlb { + struct tlbe guest_tlb[PPC44x_TLB_SIZE]; + struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; +}; + +enum emulation_result { + EMULATE_DONE, /* no further processing */ + EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ + EMULATE_DO_DCR, /* kvm_run filled with DCR request */ + EMULATE_FAIL, /* can't emulate this instruction */ +}; + +extern const unsigned char exception_priority[]; +extern const unsigned char priority_exception[]; + +extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); +extern char kvmppc_handlers_start[]; +extern unsigned long kvmppc_handler_len; + +extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); +extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rt, unsigned int bytes, + int is_bigendian); +extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, + u32 val, unsigned int bytes, int is_bigendian); + +extern int kvmppc_emulate_instruction(struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, + u64 asid, u32 flags); +extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid); +extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); + +extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu); + +static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception) +{ + unsigned int priority = exception_priority[exception]; + set_bit(priority, &vcpu->arch.pending_exceptions); +} + +static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception) +{ + unsigned int priority = exception_priority[exception]; + clear_bit(priority, &vcpu->arch.pending_exceptions); +} + +static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) +{ + if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) + kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); + + vcpu->arch.msr = new_msr; +} + +#endif /* __POWERPC_KVM_PPC_H__ */ diff --git a/include/asm-powerpc/mmu-44x.h b/include/asm-powerpc/mmu-44x.h index c8b02d97f75..a825524c981 100644 --- a/include/asm-powerpc/mmu-44x.h +++ b/include/asm-powerpc/mmu-44x.h @@ -53,6 +53,8 @@ #ifndef __ASSEMBLY__ +extern unsigned int tlb_44x_hwater; + typedef struct { unsigned long id; unsigned long vdso_base; diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h index 67834eae570..25af4fc8daf 100644 --- a/include/asm-powerpc/page_64.h +++ b/include/asm-powerpc/page_64.h @@ -128,11 +128,6 @@ extern void slice_init_context(struct mm_struct *mm, unsigned int psize); extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); #define slice_mm_new_context(mm) ((mm)->context.id == 0) -#define ARCH_HAS_HUGEPAGE_ONLY_RANGE -extern int is_hugepage_only_range(struct mm_struct *m, - unsigned long addr, - unsigned long len); - #endif /* __ASSEMBLY__ */ #else #define slice_init() @@ -146,8 +141,6 @@ do { \ #ifdef CONFIG_HUGETLB_PAGE -#define ARCH_HAS_HUGETLB_FREE_PGD_RANGE -#define ARCH_HAS_SETCLEAR_HUGE_PTE #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #endif /* !CONFIG_HUGETLB_PAGE */ diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h index daea7692d07..7c97b5a08d0 100644 --- a/include/asm-powerpc/pgtable-ppc32.h +++ b/include/asm-powerpc/pgtable-ppc32.h @@ -504,6 +504,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } +static inline int pte_special(pte_t pte) { return 0; } static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } @@ -521,6 +522,8 @@ static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } +static inline pte_t pte_mkspecial(pte_t pte) { + return pte; } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h index dd4c26dc57d..27f18695f7d 100644 --- a/include/asm-powerpc/pgtable-ppc64.h +++ b/include/asm-powerpc/pgtable-ppc64.h @@ -239,6 +239,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;} static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} +static inline int pte_special(pte_t pte) { return 0; } static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } @@ -257,6 +258,8 @@ static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkhuge(pte_t pte) { return pte; } +static inline pte_t pte_mkspecial(pte_t pte) { + return pte; } /* Atomic PTE updates */ static inline unsigned long pte_update(struct mm_struct *mm, diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h index fd98ca998b4..cf83f2d7e2a 100644 --- a/include/asm-powerpc/processor.h +++ b/include/asm-powerpc/processor.h @@ -138,6 +138,8 @@ typedef struct { struct thread_struct { unsigned long ksp; /* Kernel stack pointer */ + unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ + #ifdef CONFIG_PPC64 unsigned long ksp_vsid; #endif @@ -182,11 +184,14 @@ struct thread_struct { #define ARCH_MIN_TASKALIGN 16 #define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack) +#define INIT_SP_LIMIT \ + (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack) #ifdef CONFIG_PPC32 #define INIT_THREAD { \ .ksp = INIT_SP, \ + .ksp_limit = INIT_SP_LIMIT, \ .fs = KERNEL_DS, \ .pgdir = swapper_pg_dir, \ .fpexc_mode = MSR_FE0 | MSR_FE1, \ @@ -194,6 +199,7 @@ struct thread_struct { #else #define INIT_THREAD { \ .ksp = INIT_SP, \ + .ksp_limit = INIT_SP_LIMIT, \ .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ .fs = KERNEL_DS, \ .fpr = {0}, \ diff --git a/include/asm-powerpc/rio.h b/include/asm-powerpc/rio.h new file mode 100644 index 00000000000..0018bf80cb2 --- /dev/null +++ b/include/asm-powerpc/rio.h @@ -0,0 +1,18 @@ +/* + * RapidIO architecture support + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef ASM_PPC_RIO_H +#define ASM_PPC_RIO_H + +extern void platform_rio_init(void); + +#endif /* ASM_PPC_RIO_H */ diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index fab1674b31b..2b6559a6d11 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -204,7 +204,7 @@ extern int powersave_nap; /* set if nap mode can be used in idle loop */ * Changes the memory location '*ptr' to be val and returns * the previous value stored there. */ -static __inline__ unsigned long +static __always_inline unsigned long __xchg_u32(volatile void *p, unsigned long val) { unsigned long prev; @@ -229,7 +229,7 @@ __xchg_u32(volatile void *p, unsigned long val) * Changes the memory location '*ptr' to be val and returns * the previous value stored there. */ -static __inline__ unsigned long +static __always_inline unsigned long __xchg_u32_local(volatile void *p, unsigned long val) { unsigned long prev; @@ -247,7 +247,7 @@ __xchg_u32_local(volatile void *p, unsigned long val) } #ifdef CONFIG_PPC64 -static __inline__ unsigned long +static __always_inline unsigned long __xchg_u64(volatile void *p, unsigned long val) { unsigned long prev; @@ -266,7 +266,7 @@ __xchg_u64(volatile void *p, unsigned long val) return prev; } -static __inline__ unsigned long +static __always_inline unsigned long __xchg_u64_local(volatile void *p, unsigned long val) { unsigned long prev; @@ -290,7 +290,7 @@ __xchg_u64_local(volatile void *p, unsigned long val) */ extern void __xchg_called_with_bad_pointer(void); -static __inline__ unsigned long +static __always_inline unsigned long __xchg(volatile void *ptr, unsigned long x, unsigned int size) { switch (size) { @@ -305,7 +305,7 @@ __xchg(volatile void *ptr, unsigned long x, unsigned int size) return x; } -static __inline__ unsigned long +static __always_inline unsigned long __xchg_local(volatile void *ptr, unsigned long x, unsigned int size) { switch (size) { @@ -338,7 +338,7 @@ __xchg_local(volatile void *ptr, unsigned long x, unsigned int size) */ #define __HAVE_ARCH_CMPXCHG 1 -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) { unsigned int prev; @@ -361,7 +361,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) return prev; } -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, unsigned long new) { @@ -384,7 +384,7 @@ __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, } #ifdef CONFIG_PPC64 -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) { unsigned long prev; @@ -406,7 +406,7 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) return prev; } -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, unsigned long new) { @@ -432,7 +432,7 @@ __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, if something tries to do an invalid cmpxchg(). */ extern void __cmpxchg_called_with_bad_pointer(void); -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { @@ -448,7 +448,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, return old; } -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { diff --git a/include/asm-powerpc/unaligned.h b/include/asm-powerpc/unaligned.h index 6c95dfa2652..5f1b1e3c213 100644 --- a/include/asm-powerpc/unaligned.h +++ b/include/asm-powerpc/unaligned.h @@ -5,15 +5,12 @@ /* * The PowerPC can do unaligned accesses itself in big endian mode. - * - * The strange macros are there to make sure these can't - * be misused in a way that makes them not work on other - * architectures where unaligned accesses aren't as simple. */ +#include <linux/unaligned/access_ok.h> +#include <linux/unaligned/generic.h> -#define get_unaligned(ptr) (*(ptr)) - -#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) +#define get_unaligned __get_unaligned_be +#define put_unaligned __put_unaligned_be #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_UNALIGNED_H */ |