diff options
author | Paul Mackerras <paulus@samba.org> | 2008-05-16 23:13:42 +1000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-05-16 23:13:42 +1000 |
commit | fcff474ea5cb17ff015aa40e92ed86fede41f1e2 (patch) | |
tree | a99c0e14daaf31cb078812fb2fbc6abadfcd738f /arch/powerpc/mm | |
parent | 541b2755c2ef7dd2242ac606c115daa11e43ef69 (diff) | |
parent | f26a3988917913b3d11b2bd741601a2c64ab9204 (diff) |
Merge branch 'linux-2.6' into powerpc-next
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 28 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 10 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 16 | ||||
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 16 |
4 files changed, 59 insertions, 11 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 05f955f8df3..bf5b6d7ed30 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -94,6 +94,9 @@ unsigned long htab_hash_mask; int mmu_linear_psize = MMU_PAGE_4K; int mmu_virtual_psize = MMU_PAGE_4K; int mmu_vmalloc_psize = MMU_PAGE_4K; +#ifdef CONFIG_SPARSEMEM_VMEMMAP +int mmu_vmemmap_psize = MMU_PAGE_4K; +#endif int mmu_io_psize = MMU_PAGE_4K; int mmu_kernel_ssize = MMU_SEGSIZE_256M; int mmu_highuser_ssize = MMU_SEGSIZE_256M; @@ -387,11 +390,32 @@ static void __init htab_init_page_sizes(void) } #endif /* CONFIG_PPC_64K_PAGES */ +#ifdef CONFIG_SPARSEMEM_VMEMMAP + /* We try to use 16M pages for vmemmap if that is supported + * and we have at least 1G of RAM at boot + */ + if (mmu_psize_defs[MMU_PAGE_16M].shift && + lmb_phys_mem_size() >= 0x40000000) + mmu_vmemmap_psize = MMU_PAGE_16M; + else if (mmu_psize_defs[MMU_PAGE_64K].shift) + mmu_vmemmap_psize = MMU_PAGE_64K; + else + mmu_vmemmap_psize = MMU_PAGE_4K; +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ + printk(KERN_DEBUG "Page orders: linear mapping = %d, " - "virtual = %d, io = %d\n", + "virtual = %d, io = %d" +#ifdef CONFIG_SPARSEMEM_VMEMMAP + ", vmemmap = %d" +#endif + "\n", mmu_psize_defs[mmu_linear_psize].shift, mmu_psize_defs[mmu_virtual_psize].shift, - mmu_psize_defs[mmu_io_psize].shift); + mmu_psize_defs[mmu_io_psize].shift +#ifdef CONFIG_SPARSEMEM_VMEMMAP + ,mmu_psize_defs[mmu_vmemmap_psize].shift +#endif + ); #ifdef CONFIG_HUGETLB_PAGE /* Init large page size. Currently, we pick 16M or 1M depending diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 1e52e97d740..6ef63caca68 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -19,6 +19,8 @@ * */ +#undef DEBUG + #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -208,12 +210,12 @@ static int __meminit vmemmap_populated(unsigned long start, int page_size) } int __meminit vmemmap_populate(struct page *start_page, - unsigned long nr_pages, int node) + unsigned long nr_pages, int node) { unsigned long mode_rw; unsigned long start = (unsigned long)start_page; unsigned long end = (unsigned long)(start_page + nr_pages); - unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift; + unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; @@ -235,11 +237,11 @@ int __meminit vmemmap_populate(struct page *start_page, start, p, __pa(p)); mapped = htab_bolt_mapping(start, start + page_size, - __pa(p), mode_rw, mmu_linear_psize, + __pa(p), mode_rw, mmu_vmemmap_psize, mmu_kernel_ssize); BUG_ON(mapped < 0); } return 0; } -#endif +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index cf8705e32d6..89497fb0428 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -28,7 +28,7 @@ #include <asm/udbg.h> #ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) +#define DBG(fmt...) printk(fmt) #else #define DBG pr_debug #endif @@ -263,13 +263,19 @@ void slb_initialize(void) extern unsigned int *slb_miss_kernel_load_linear; extern unsigned int *slb_miss_kernel_load_io; extern unsigned int *slb_compare_rr_to_size; +#ifdef CONFIG_SPARSEMEM_VMEMMAP + extern unsigned int *slb_miss_kernel_load_vmemmap; + unsigned long vmemmap_llp; +#endif /* Prepare our SLB miss handler based on our page size */ linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; io_llp = mmu_psize_defs[mmu_io_psize].sllp; vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; - +#ifdef CONFIG_SPARSEMEM_VMEMMAP + vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp; +#endif if (!slb_encoding_inited) { slb_encoding_inited = 1; patch_slb_encoding(slb_miss_kernel_load_linear, @@ -281,6 +287,12 @@ void slb_initialize(void) DBG("SLB: linear LLP = %04lx\n", linear_llp); DBG("SLB: io LLP = %04lx\n", io_llp); + +#ifdef CONFIG_SPARSEMEM_VMEMMAP + patch_slb_encoding(slb_miss_kernel_load_vmemmap, + SLB_VSID_KERNEL | vmemmap_llp); + DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); +#endif } get_paca()->stab_rr = SLB_NUM_BOLTED; diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 657f6b37e9d..bc44dc4b5c6 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -47,8 +47,7 @@ _GLOBAL(slb_allocate_realmode) * it to VSID 0, which is reserved as a bad VSID - one which * will never have any pages in it. */ - /* Check if hitting the linear mapping of the vmalloc/ioremap - * kernel space + /* Check if hitting the linear mapping or some other kernel space */ bne cr7,1f @@ -62,7 +61,18 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) b slb_finish_load_1T -1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below +1: +#ifdef CONFIG_SPARSEMEM_VMEMMAP + /* Check virtual memmap region. To be patches at kernel boot */ + cmpldi cr0,r9,0xf + bne 1f +_GLOBAL(slb_miss_kernel_load_vmemmap) + li r11,0 + b 6f +1: +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ + + /* vmalloc/ioremap mapping encoding bits, the "li" instructions below * will be patched by the kernel at boot */ BEGIN_FTR_SECTION |