aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorIlya Yanok <yanok@emcraft.com>2008-12-11 04:55:41 +0300
committerPaul Mackerras <paulus@samba.org>2008-12-29 09:53:25 +1100
commitca9153a3a2a7556d091dfe080e42b0e67881fff6 (patch)
tree35b5ce24f190690cf7a726cbb97980da51704855 /arch/powerpc/kernel
parent6ca4f7494bde078b2b730e28e4ea1dc36a772f70 (diff)
powerpc/44x: Support 16K/64K base page sizes on 44x
This adds support for 16k and 64k page sizes on PowerPC 44x processors. The PGDIR table is much smaller than a page when using 16k or 64k pages (512 and 32 bytes respectively) so we allocate the PGDIR with kzalloc() instead of __get_free_pages(). One PTE table covers rather a large memory area when using 16k or 64k pages (32MB or 512MB respectively), so we can easily put FIXMAP and PKMAP in the area covered by one PTE table. Signed-off-by: Yuri Tikhonov <yur@emcraft.com> Signed-off-by: Vladimir Panfilov <pvr@emcraft.com> Signed-off-by: Ilya Yanok <yanok@emcraft.com> Acked-by: Josh Boyer <jwboyer@linux.vnet.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kernel/head_44x.S23
-rw-r--r--arch/powerpc/kernel/misc_32.S12
3 files changed, 24 insertions, 15 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index c05ab1d3e62..661d07d2146 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -380,6 +380,10 @@ int main(void)
DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
#endif
+#ifdef CONFIG_44x
+ DEFINE(PGD_T_LOG2, PGD_T_LOG2);
+ DEFINE(PTE_T_LOG2, PTE_T_LOG2);
+#endif
return 0;
}
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index bd4fe9e7278..b56fecc93a1 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -402,12 +402,14 @@ interrupt_base:
rlwimi r13,r12,10,30,30
/* Load the PTE */
- rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
+ /* Compute pgdir/pmd offset */
+ rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
lwzx r11, r12, r11 /* Get pgd/pmd entry */
rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
beq 2f /* Bail if no table */
- rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
+ /* Compute pte address */
+ rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
lwz r11, 0(r12) /* Get high word of pte entry */
lwz r12, 4(r12) /* Get low word of pte entry */
@@ -496,12 +498,14 @@ tlb_44x_patch_hwater_D:
/* Make up the required permissions */
li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC
- rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
+ /* Compute pgdir/pmd offset */
+ rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
lwzx r11, r12, r11 /* Get pgd/pmd entry */
rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
beq 2f /* Bail if no table */
- rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
+ /* Compute pte address */
+ rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
lwz r11, 0(r12) /* Get high word of pte entry */
lwz r12, 4(r12) /* Get low word of pte entry */
@@ -565,15 +569,16 @@ tlb_44x_patch_hwater_I:
*/
finish_tlb_load:
/* Combine RPN & ERPN an write WS 0 */
- rlwimi r11,r12,0,0,19
+ rlwimi r11,r12,0,0,31-PAGE_SHIFT
tlbwe r11,r13,PPC44x_TLB_XLAT
/*
* Create WS1. This is the faulting address (EPN),
* page size, and valid flag.
*/
- li r11,PPC44x_TLB_VALID | PPC44x_TLB_4K
- rlwimi r10,r11,0,20,31 /* Insert valid and page size*/
+ li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE
+ /* Insert valid and page size */
+ rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31
tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */
/* And WS 2 */
@@ -645,12 +650,12 @@ _GLOBAL(set_context)
* goes at the beginning of the data segment, which is page-aligned.
*/
.data
- .align 12
+ .align PAGE_SHIFT
.globl sdata
sdata:
.globl empty_zero_page
empty_zero_page:
- .space 4096
+ .space PAGE_SIZE
/*
* To support >32-bit physical addresses, we use an 8KB pgdir.
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index ae0d084b6a2..15f28e0de78 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -426,8 +426,8 @@ _GLOBAL(__flush_dcache_icache)
BEGIN_FTR_SECTION
blr
END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
- rlwinm r3,r3,0,0,19 /* Get page base address */
- li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
+ rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
+ li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
mtctr r4
mr r6,r3
0: dcbst 0,r3 /* Write line to ram */
@@ -467,8 +467,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
rlwinm r0,r10,0,28,26 /* clear DR */
mtmsr r0
isync
- rlwinm r3,r3,0,0,19 /* Get page base address */
- li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
+ rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
+ li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
mtctr r4
mr r6,r3
0: dcbst 0,r3 /* Write line to ram */
@@ -492,7 +492,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
* void clear_pages(void *page, int order) ;
*/
_GLOBAL(clear_pages)
- li r0,4096/L1_CACHE_BYTES
+ li r0,PAGE_SIZE/L1_CACHE_BYTES
slw r0,r0,r4
mtctr r0
#ifdef CONFIG_8xx
@@ -550,7 +550,7 @@ _GLOBAL(copy_page)
dcbt r5,r4
li r11,L1_CACHE_BYTES+4
#endif /* MAX_COPY_PREFETCH */
- li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
+ li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
crclr 4*cr0+eq
2:
mtctr r0