aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/mm/hash_utils_64.c28
-rw-r--r--arch/powerpc/mm/init_64.c10
-rw-r--r--arch/powerpc/mm/slb.c16
-rw-r--r--arch/powerpc/mm/slb_low.S16
-rw-r--r--include/asm-powerpc/mmu-hash64.h1
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h10
6 files changed, 65 insertions, 16 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 2b5a399f6fa..0f2d239d94c 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -94,6 +94,9 @@ unsigned long htab_hash_mask;
int mmu_linear_psize = MMU_PAGE_4K;
int mmu_virtual_psize = MMU_PAGE_4K;
int mmu_vmalloc_psize = MMU_PAGE_4K;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int mmu_vmemmap_psize = MMU_PAGE_4K;
+#endif
int mmu_io_psize = MMU_PAGE_4K;
int mmu_kernel_ssize = MMU_SEGSIZE_256M;
int mmu_highuser_ssize = MMU_SEGSIZE_256M;
@@ -387,11 +390,32 @@ static void __init htab_init_page_sizes(void)
}
#endif /* CONFIG_PPC_64K_PAGES */
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ /* We try to use 16M pages for vmemmap if that is supported
+ * and we have at least 1G of RAM at boot
+ */
+ if (mmu_psize_defs[MMU_PAGE_16M].shift &&
+ lmb_phys_mem_size() >= 0x40000000)
+ mmu_vmemmap_psize = MMU_PAGE_16M;
+ else if (mmu_psize_defs[MMU_PAGE_64K].shift)
+ mmu_vmemmap_psize = MMU_PAGE_64K;
+ else
+ mmu_vmemmap_psize = MMU_PAGE_4K;
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
printk(KERN_DEBUG "Page orders: linear mapping = %d, "
- "virtual = %d, io = %d\n",
+ "virtual = %d, io = %d"
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ ", vmemmap = %d"
+#endif
+ "\n",
mmu_psize_defs[mmu_linear_psize].shift,
mmu_psize_defs[mmu_virtual_psize].shift,
- mmu_psize_defs[mmu_io_psize].shift);
+ mmu_psize_defs[mmu_io_psize].shift
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ ,mmu_psize_defs[mmu_vmemmap_psize].shift
+#endif
+ );
#ifdef CONFIG_HUGETLB_PAGE
/* Init large page size. Currently, we pick 16M or 1M depending
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index c5ac532a016..6aa65375abf 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -19,6 +19,8 @@
*
*/
+#undef DEBUG
+
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -208,12 +210,12 @@ int __meminit vmemmap_populated(unsigned long start, int page_size)
}
int __meminit vmemmap_populate(struct page *start_page,
- unsigned long nr_pages, int node)
+ unsigned long nr_pages, int node)
{
unsigned long mode_rw;
unsigned long start = (unsigned long)start_page;
unsigned long end = (unsigned long)(start_page + nr_pages);
- unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift;
+ unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
@@ -235,11 +237,11 @@ int __meminit vmemmap_populate(struct page *start_page,
start, p, __pa(p));
mapped = htab_bolt_mapping(start, start + page_size,
- __pa(p), mode_rw, mmu_linear_psize,
+ __pa(p), mode_rw, mmu_vmemmap_psize,
mmu_kernel_ssize);
BUG_ON(mapped < 0);
}
return 0;
}
-#endif
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index cf8705e32d6..89497fb0428 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -28,7 +28,7 @@
#include <asm/udbg.h>
#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
+#define DBG(fmt...) printk(fmt)
#else
#define DBG pr_debug
#endif
@@ -263,13 +263,19 @@ void slb_initialize(void)
extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_io;
extern unsigned int *slb_compare_rr_to_size;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ extern unsigned int *slb_miss_kernel_load_vmemmap;
+ unsigned long vmemmap_llp;
+#endif
/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
io_llp = mmu_psize_defs[mmu_io_psize].sllp;
vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
-
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
+#endif
if (!slb_encoding_inited) {
slb_encoding_inited = 1;
patch_slb_encoding(slb_miss_kernel_load_linear,
@@ -281,6 +287,12 @@ void slb_initialize(void)
DBG("SLB: linear LLP = %04lx\n", linear_llp);
DBG("SLB: io LLP = %04lx\n", io_llp);
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ patch_slb_encoding(slb_miss_kernel_load_vmemmap,
+ SLB_VSID_KERNEL | vmemmap_llp);
+ DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
+#endif
}
get_paca()->stab_rr = SLB_NUM_BOLTED;
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 657f6b37e9d..bc44dc4b5c6 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -47,8 +47,7 @@ _GLOBAL(slb_allocate_realmode)
* it to VSID 0, which is reserved as a bad VSID - one which
* will never have any pages in it. */
- /* Check if hitting the linear mapping of the vmalloc/ioremap
- * kernel space
+ /* Check if hitting the linear mapping or some other kernel space
*/
bne cr7,1f
@@ -62,7 +61,18 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
b slb_finish_load_1T
-1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
+1:
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ /* Check virtual memmap region. To be patches at kernel boot */
+ cmpldi cr0,r9,0xf
+ bne 1f
+_GLOBAL(slb_miss_kernel_load_vmemmap)
+ li r11,0
+ b 6f
+1:
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
+ /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
* will be patched by the kernel at boot
*/
BEGIN_FTR_SECTION
diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h
index 0dff7677604..39c5c5f62bf 100644
--- a/include/asm-powerpc/mmu-hash64.h
+++ b/include/asm-powerpc/mmu-hash64.h
@@ -177,6 +177,7 @@ extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
extern int mmu_linear_psize;
extern int mmu_virtual_psize;
extern int mmu_vmalloc_psize;
+extern int mmu_vmemmap_psize;
extern int mmu_io_psize;
extern int mmu_kernel_ssize;
extern int mmu_highuser_ssize;
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index 27f18695f7d..cc6a43ba41d 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -65,15 +65,15 @@
#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
+#define VMEMMAP_REGION_ID (0xfUL)
#define USER_REGION_ID (0UL)
/*
- * Defines the address of the vmemap area, in the top 16th of the
- * kernel region.
+ * Defines the address of the vmemap area, in its own region
*/
-#define VMEMMAP_BASE (ASM_CONST(CONFIG_KERNEL_START) + \
- (0xfUL << (REGION_SHIFT - 4)))
-#define vmemmap ((struct page *)VMEMMAP_BASE)
+#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT)
+#define vmemmap ((struct page *)VMEMMAP_BASE)
+
/*
* Common bits in a linux-style PTE. These match the bits in the