diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/40x_mmu.c (renamed from arch/powerpc/mm/4xx_mmu.c) | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/Makefile | 15 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 7 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/init_32.c | 38 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_64.c | 11 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/slice.c | 1 |
11 files changed, 57 insertions, 33 deletions
diff --git a/arch/powerpc/mm/4xx_mmu.c b/arch/powerpc/mm/40x_mmu.c index 7ff2609b64d..e067df836be 100644 --- a/arch/powerpc/mm/4xx_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c @@ -108,7 +108,7 @@ unsigned long __init mmu_mapin_ram(void) pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE; - pmdp = pmd_offset(pgd_offset_k(v), v); + pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); pmd_val(*pmdp++) = val; pmd_val(*pmdp++) = val; pmd_val(*pmdp++) = val; @@ -123,7 +123,7 @@ unsigned long __init mmu_mapin_ram(void) pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE; - pmdp = pmd_offset(pgd_offset_k(v), v); + pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); pmd_val(*pmdp) = val; v += LARGE_PAGE_SIZE_4M; diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 7e4d27ad3de..20629ae95c5 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -6,14 +6,17 @@ ifeq ($(CONFIG_PPC64),y) EXTRA_CFLAGS += -mno-minimal-toc endif -obj-y := fault.o mem.o lmb.o -obj-$(CONFIG_PPC32) += init_32.o pgtable_32.o mmu_context_32.o +obj-y := fault.o mem.o lmb.o \ + init_$(CONFIG_WORD_SIZE).o \ + pgtable_$(CONFIG_WORD_SIZE).o \ + mmu_context_$(CONFIG_WORD_SIZE).o hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o -obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \ - hash_utils_64.o hash_low_64.o tlb_64.o \ +obj-$(CONFIG_PPC64) += hash_utils_64.o \ slb_low.o slb.o stab.o mmap.o $(hash-y) -obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o tlb_32.o -obj-$(CONFIG_40x) += 4xx_mmu.o +obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o +obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ + tlb_$(CONFIG_WORD_SIZE).o +obj-$(CONFIG_40x) += 40x_mmu.o obj-$(CONFIG_44x) += 44x_mmu.o obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index a47151e806c..d525f2eba31 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -49,7 +49,6 @@ #include <asm/tlb.h> #include <asm/cacheflush.h> #include <asm/cputable.h> -#include <asm/abs_addr.h> #include <asm/sections.h> #include <asm/spu.h> @@ -602,13 +601,7 @@ static void demote_segment_4k(struct mm_struct *mm, unsigned long addr) { if (mm->context.user_psize == MMU_PAGE_4K) return; -#ifdef CONFIG_PPC_MM_SLICES slice_set_user_psize(mm, MMU_PAGE_4K); -#else /* CONFIG_PPC_MM_SLICES */ - mm->context.user_psize = MMU_PAGE_4K; - mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp; -#endif /* CONFIG_PPC_MM_SLICES */ - #ifdef CONFIG_SPU_BASE spu_flush_all_slbs(mm); #endif diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 4835f73af30..ba5f12a6046 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -22,11 +22,8 @@ #include <asm/mmu_context.h> #include <asm/machdep.h> #include <asm/cputable.h> -#include <asm/tlb.h> #include <asm/spu.h> -#include <linux/sysctl.h> - #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT) #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT) diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index e1f5ded851f..27c234fb511 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -41,7 +41,6 @@ #include <asm/machdep.h> #include <asm/btext.h> #include <asm/tlb.h> -#include <asm/prom.h> #include <asm/lmb.h> #include <asm/sections.h> @@ -256,3 +255,40 @@ void free_initrd_mem(unsigned long start, unsigned long end) } } #endif + +#ifdef CONFIG_PROC_KCORE +static struct kcore_list kcore_vmem; + +static int __init setup_kcore(void) +{ + int i; + + for (i = 0; i < lmb.memory.cnt; i++) { + unsigned long base; + unsigned long size; + struct kcore_list *kcore_mem; + + base = lmb.memory.region[i].base; + size = lmb.memory.region[i].size; + + kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC); + if (!kcore_mem) + panic("%s: kmalloc failed\n", __FUNCTION__); + + /* must stay under 32 bits */ + if ( 0xfffffffful - (unsigned long)__va(base) < size) { + size = 0xfffffffful - (unsigned long)(__va(base)); + printk(KERN_DEBUG "setup_kcore: restrict size=%lx\n", + size); + } + + kclist_add(kcore_mem, __va(base), size); + } + + kclist_add(&kcore_vmem, (void *)VMALLOC_START, + VMALLOC_END-VMALLOC_START); + + return 0; +} +module_init(setup_kcore); +#endif diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 9f27bb56a61..fa90f6561b9 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -113,6 +113,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif +#ifdef CONFIG_PROC_KCORE static struct kcore_list kcore_vmem; static int __init setup_kcore(void) @@ -139,6 +140,7 @@ static int __init setup_kcore(void) return 0; } module_init(setup_kcore); +#endif static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags) { diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index f0e7eedb1ba..32dcfc9b008 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -42,7 +42,6 @@ #include <asm/machdep.h> #include <asm/btext.h> #include <asm/tlb.h> -#include <asm/prom.h> #include <asm/lmb.h> #include <asm/sections.h> #include <asm/vdso.h> diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_64.c index 7a78cdc0515..1db38ba1f54 100644 --- a/arch/powerpc/mm/mmu_context_64.c +++ b/arch/powerpc/mm/mmu_context_64.c @@ -28,7 +28,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int index; int err; - int new_context = (mm->context.id == 0); again: if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) @@ -50,19 +49,13 @@ again: return -ENOMEM; } - mm->context.id = index; -#ifdef CONFIG_PPC_MM_SLICES /* The old code would re-promote on fork, we don't do that * when using slices as it could cause problem promoting slices * that have been forced down to 4K */ - if (new_context) + if (slice_mm_new_context(mm)) slice_set_user_psize(mm, mmu_virtual_psize); -#else - mm->context.user_psize = mmu_virtual_psize; - mm->context.sllp = SLB_VSID_USER | - mmu_psize_defs[mmu_virtual_psize].sllp; -#endif + mm->context.id = index; return 0; } diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 3dfd10db931..60fd52cd270 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -228,5 +228,7 @@ void iounmap(volatile void __iomem *token) EXPORT_SYMBOL(ioremap); EXPORT_SYMBOL(ioremap_flags); EXPORT_SYMBOL(__ioremap); +EXPORT_SYMBOL(__ioremap_at); EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(__iounmap); +EXPORT_SYMBOL(__iounmap_at); diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index ff1811ac6c8..4bee1cfa9de 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -59,14 +59,12 @@ static inline void slb_shadow_update(unsigned long ea, { /* * Clear the ESID first so the entry is not valid while we are - * updating it. + * updating it. No write barriers are needed here, provided + * we only update the current CPU's SLB shadow buffer. */ get_slb_shadow()->save_area[entry].esid = 0; - smp_wmb(); get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags); - smp_wmb(); get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry); - smp_wmb(); } static inline void slb_shadow_clear(unsigned long entry) diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index d5fd3909d13..319826ef164 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -551,6 +551,7 @@ EXPORT_SYMBOL_GPL(get_slice_psize); * * This is also called in init_new_context() to change back the user * psize from whatever the parent context had it set to + * N.B. This may be called before mm->context.id has been set. * * This function will only change the content of the {low,high)_slice_psize * masks, it will not flush SLBs as this shall be handled lazily by the |