aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-30 23:53:32 +0200
committerIngo Molnar <mingo@elte.hu>2009-03-30 23:53:32 +0200
commit65fb0d23fcddd8697c871047b700c78817bdaa43 (patch)
tree119e6e5f276622c4c862f6c9b6d795264ba1603a /arch/powerpc/include
parent8c083f081d0014057901c68a0a3e0f8ca7ac8d23 (diff)
parentdfbbe89e197a77f2c8046a51c74e33e35f878080 (diff)
Merge branch 'linus' into cpumask-for-linus
Conflicts: arch/x86/kernel/cpu/common.c
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/code-patching.h4
-rw-r--r--arch/powerpc/include/asm/cputable.h8
-rw-r--r--arch/powerpc/include/asm/dbell.h43
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h12
-rw-r--r--arch/powerpc/include/asm/elf.h12
-rw-r--r--arch/powerpc/include/asm/fixmap.h2
-rw-r--r--arch/powerpc/include/asm/ftrace.h39
-rw-r--r--arch/powerpc/include/asm/highmem.h12
-rw-r--r--arch/powerpc/include/asm/hw_irq.h2
-rw-r--r--arch/powerpc/include/asm/io.h6
-rw-r--r--arch/powerpc/include/asm/kvm.h7
-rw-r--r--arch/powerpc/include/asm/kvm_44x.h7
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h7
-rw-r--r--arch/powerpc/include/asm/kvm_e500.h67
-rw-r--r--arch/powerpc/include/asm/kvm_host.h21
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h15
-rw-r--r--arch/powerpc/include/asm/lppaca.h8
-rw-r--r--arch/powerpc/include/asm/machdep.h4
-rw-r--r--arch/powerpc/include/asm/mmu-44x.h2
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h (renamed from arch/powerpc/include/asm/mmu-fsl-booke.h)68
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h2
-rw-r--r--arch/powerpc/include/asm/mmu.h20
-rw-r--r--arch/powerpc/include/asm/mmu_context.h2
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h1
-rw-r--r--arch/powerpc/include/asm/page.h6
-rw-r--r--arch/powerpc/include/asm/page_32.h4
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h557
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-4k.h (renamed from arch/powerpc/include/asm/pgtable-4k.h)55
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-64k.h42
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h196
-rw-r--r--arch/powerpc/include/asm/pgtable.h134
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h73
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h6
-rw-r--r--arch/powerpc/include/asm/processor.h19
-rw-r--r--arch/powerpc/include/asm/ps3av.h2
-rw-r--r--arch/powerpc/include/asm/ps3fb.h1
-rw-r--r--arch/powerpc/include/asm/pte-40x.h64
-rw-r--r--arch/powerpc/include/asm/pte-44x.h102
-rw-r--r--arch/powerpc/include/asm/pte-8xx.h67
-rw-r--r--arch/powerpc/include/asm/pte-common.h180
-rw-r--r--arch/powerpc/include/asm/pte-fsl-booke.h48
-rw-r--r--arch/powerpc/include/asm/pte-hash32.h48
-rw-r--r--arch/powerpc/include/asm/pte-hash64-4k.h17
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h (renamed from arch/powerpc/include/asm/pgtable-64k.h)132
-rw-r--r--arch/powerpc/include/asm/pte-hash64.h54
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/reg_booke.h2
-rw-r--r--arch/powerpc/include/asm/socket.h3
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/include/asm/system.h2
-rw-r--r--arch/powerpc/include/asm/thread_info.h11
-rw-r--r--arch/powerpc/include/asm/udbg.h1
52 files changed, 1282 insertions, 919 deletions
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 107d9b915e3..37c32aba79b 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -11,9 +11,7 @@
*/
#include <asm/types.h>
-
-#define PPC_NOP_INSTR 0x60000000
-#define PPC_LWSYNC_INSTR 0x7c2004ac
+#include <asm/ppc-opcode.h>
/* Flags for create_branch:
* "b" == create_branch(addr, target, 0);
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 4911104791c..80f315e8a42 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -145,6 +145,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040)
#define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080)
#define CPU_FTR_601 ASM_CONST(0x0000000000000100)
+#define CPU_FTR_DBELL ASM_CONST(0x0000000000000200)
#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400)
#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800)
#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000)
@@ -241,9 +242,11 @@ extern const char *powerpc_base_platform;
/* We need to mark all pages as being coherent if we're SMP or we have a
* 74[45]x and an MPC107 host bridge. Also 83xx and PowerQUICC II
* require it for PCI "streaming/prefetch" to work properly.
+ * This is also required by 52xx family.
*/
#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) \
- || defined(CONFIG_PPC_83xx) || defined(CONFIG_8260)
+ || defined(CONFIG_PPC_83xx) || defined(CONFIG_8260) \
+ || defined(CONFIG_PPC_MPC52xx)
#define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT
#else
#define CPU_FTR_COMMON 0
@@ -373,7 +376,8 @@ extern const char *powerpc_base_platform;
CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
- CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE)
+ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_DBELL)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
new file mode 100644
index 00000000000..501189a543d
--- /dev/null
+++ b/arch/powerpc/include/asm/dbell.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2009 Freescale Semicondutor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * provides masks and opcode images for use by code generation, emulation
+ * and for instructions that older assemblers might not know about
+ */
+#ifndef _ASM_POWERPC_DBELL_H
+#define _ASM_POWERPC_DBELL_H
+
+#include <linux/smp.h>
+#include <linux/threads.h>
+
+#include <asm/ppc-opcode.h>
+
+#define PPC_DBELL_MSG_BRDCAST (0x04000000)
+#define PPC_DBELL_TYPE(x) (((x) & 0xf) << 28)
+enum ppc_dbell {
+ PPC_DBELL = 0, /* doorbell */
+ PPC_DBELL_CRIT = 1, /* critical doorbell */
+ PPC_G_DBELL = 2, /* guest doorbell */
+ PPC_G_DBELL_CRIT = 3, /* guest critical doorbell */
+ PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */
+};
+
+#ifdef CONFIG_SMP
+extern unsigned long dbell_smp_message[NR_CPUS];
+extern void smp_dbell_message_pass(int target, int msg);
+#endif
+
+static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag)
+{
+ u32 msg = PPC_DBELL_TYPE(type) | (flags & PPC_DBELL_MSG_BRDCAST) |
+ (tag & 0x07ffffff);
+
+ __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+}
+
+#endif /* _ASM_POWERPC_DBELL_H */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 86cef7ddc8d..c69f2b5f0cc 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -109,18 +109,8 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
* only ISA DMA device we support is the floppy and we have a hack
* in the floppy driver directly to get a device for us.
*/
-
- if (unlikely(dev == NULL) || dev->archdata.dma_ops == NULL) {
-#ifdef CONFIG_PPC64
+ if (unlikely(dev == NULL))
return NULL;
-#else
- /* Use default on 32-bit if dma_ops is not set up */
- /* TODO: Long term, we should fix drivers so that dev and
- * archdata dma_ops are set up for all buses.
- */
- return &dma_direct_ops;
-#endif
- }
return dev->archdata.dma_ops;
}
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index b5600ce6055..1a856b15226 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -8,6 +8,7 @@
#endif
#include <linux/types.h>
+
#include <asm/ptrace.h>
#include <asm/cputable.h>
#include <asm/auxvec.h>
@@ -178,7 +179,8 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
-#define ELF_ET_DYN_BASE (0x20000000)
+extern unsigned long randomize_et_dyn(unsigned long base);
+#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
/*
* Our registers are always unsigned longs, whether we're a 32 bit
@@ -270,6 +272,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
+/* 1GB for 64bit, 8MB for 32bit */
+#define STACK_RND_MASK (is_32bit_task() ? \
+ (0x7ff >> (PAGE_SHIFT - 12)) : \
+ (0x3ffff >> (PAGE_SHIFT - 12)))
+
+extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+#define arch_randomize_brk arch_randomize_brk
+
#endif /* __KERNEL__ */
/*
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 8428b38a3d3..d60fd18f428 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -61,7 +61,7 @@ extern void __set_fixmap (enum fixed_addresses idx,
* Some hardware wants to get fixmapped without caching.
*/
#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
+ __set_fixmap(idx, phys, PAGE_KERNEL_NCG)
#define clear_fixmap(idx) \
__set_fixmap(idx, 0, __pgprot(0))
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index e5f2ae8362f..dde1296b8b4 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -5,7 +5,44 @@
#define MCOUNT_ADDR ((long)(_mcount))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
-#ifndef __ASSEMBLY__
+#ifdef __ASSEMBLY__
+
+/* Based off of objdump optput from glibc */
+
+#define MCOUNT_SAVE_FRAME \
+ stwu r1,-48(r1); \
+ stw r3, 12(r1); \
+ stw r4, 16(r1); \
+ stw r5, 20(r1); \
+ stw r6, 24(r1); \
+ mflr r3; \
+ lwz r4, 52(r1); \
+ mfcr r5; \
+ stw r7, 28(r1); \
+ stw r8, 32(r1); \
+ stw r9, 36(r1); \
+ stw r10,40(r1); \
+ stw r3, 44(r1); \
+ stw r5, 8(r1)
+
+#define MCOUNT_RESTORE_FRAME \
+ lwz r6, 8(r1); \
+ lwz r0, 44(r1); \
+ lwz r3, 12(r1); \
+ mtctr r0; \
+ lwz r4, 16(r1); \
+ mtcr r6; \
+ lwz r5, 20(r1); \
+ lwz r6, 24(r1); \
+ lwz r0, 52(r1); \
+ lwz r7, 28(r1); \
+ lwz r8, 32(r1); \
+ mtlr r0; \
+ lwz r9, 36(r1); \
+ lwz r10,40(r1); \
+ addi r1, r1, 48
+
+#else /* !__ASSEMBLY__ */
extern void _mcount(void);
#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 04e4a620952..545028f8648 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -39,15 +39,15 @@ extern pte_t *pkmap_page_table;
* chunk of RAM.
*/
/*
- * We use one full pte table with 4K pages. And with 16K/64K pages pte
- * table covers enough memory (32MB and 512MB resp.) that both FIXMAP
- * and PKMAP can be placed in single pte table. We use 1024 pages for
- * PKMAP in case of 16K/64K pages.
+ * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte
+ * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP
+ * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP
+ * in case of 16K/64K/256K page sizes.
*/
#ifdef CONFIG_PPC_4K_PAGES
#define PKMAP_ORDER PTE_SHIFT
#else
-#define PKMAP_ORDER 10
+#define PKMAP_ORDER 9
#endif
#define LAST_PKMAP (1 << PKMAP_ORDER)
#ifndef CONFIG_PPC_4K_PAGES
@@ -99,7 +99,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
- __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
+ __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
local_flush_tlb_page(NULL, vaddr);
return (void*) vaddr;
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index f75a5fc64d2..b7e034b0a6d 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -129,7 +129,7 @@ static inline int irqs_disabled_flags(unsigned long flags)
* interrupt-retrigger: should we handle this via lost interrupts and IPIs
* or should we not care like we do now ? --BenH.
*/
-struct hw_interrupt_type;
+struct irq_chip;
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 494cd8b0a27..001f2f11c19 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -632,6 +632,9 @@ static inline void iosync(void)
* ioremap_flags and cannot be hooked (but can be used by a hook on one
* of the previous ones)
*
+ * * __ioremap_caller is the same as above but takes an explicit caller
+ * reference rather than using __builtin_return_address(0)
+ *
* * __iounmap, is the low level implementation used by iounmap and cannot
* be hooked (but can be used by a hook on iounmap)
*
@@ -646,6 +649,9 @@ extern void iounmap(volatile void __iomem *addr);
extern void __iomem *__ioremap(phys_addr_t, unsigned long size,
unsigned long flags);
+extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size,
+ unsigned long flags, void *caller);
+
extern void __iounmap(volatile void __iomem *addr);
extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea,
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index 4e0cf65f7f5..bb2de6aa5ce 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -52,4 +52,11 @@ struct kvm_fpu {
__u64 fpr[32];
};
+struct kvm_debug_exit_arch {
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h
index f49031b632c..d22d39942a9 100644
--- a/arch/powerpc/include/asm/kvm_44x.h
+++ b/arch/powerpc/include/asm/kvm_44x.h
@@ -28,6 +28,13 @@
* need to find some way of advertising it. */
#define KVM44x_GUEST_TLB_SIZE 64
+struct kvmppc_44x_tlbe {
+ u32 tid; /* Only the low 8 bits are used. */
+ u32 word0;
+ u32 word1;
+ u32 word2;
+};
+
struct kvmppc_44x_shadow_ref {
struct page *page;
u16 gtlb_index;
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 2197764796d..56bfae59837 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -42,7 +42,12 @@
#define BOOKE_INTERRUPT_DTLB_MISS 13
#define BOOKE_INTERRUPT_ITLB_MISS 14
#define BOOKE_INTERRUPT_DEBUG 15
-#define BOOKE_MAX_INTERRUPT 15
+
+/* E500 */
+#define BOOKE_INTERRUPT_SPE_UNAVAIL 32
+#define BOOKE_INTERRUPT_SPE_FP_DATA 33
+#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
+#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
new file mode 100644
index 00000000000..9d497ce4972
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_e500.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Yu Liu, <yu.liu@freescale.com>
+ *
+ * Description:
+ * This file is derived from arch/powerpc/include/asm/kvm_44x.h,
+ * by Hollis Blanchard <hollisb@us.ibm.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_KVM_E500_H__
+#define __ASM_KVM_E500_H__
+
+#include <linux/kvm_host.h>
+
+#define BOOKE_INTERRUPT_SIZE 36
+
+#define E500_PID_NUM 3
+#define E500_TLB_NUM 2
+
+struct tlbe{
+ u32 mas1;
+ u32 mas2;
+ u32 mas3;
+ u32 mas7;
+};
+
+struct kvmppc_vcpu_e500 {
+ /* Unmodified copy of the guest's TLB. */
+ struct tlbe *guest_tlb[E500_TLB_NUM];
+ /* TLB that's actually used when the guest is running. */
+ struct tlbe *shadow_tlb[E500_TLB_NUM];
+ /* Pages which are referenced in the shadow TLB. */
+ struct page **shadow_pages[E500_TLB_NUM];
+
+ unsigned int guest_tlb_size[E500_TLB_NUM];
+ unsigned int shadow_tlb_size[E500_TLB_NUM];
+ unsigned int guest_tlb_nv[E500_TLB_NUM];
+
+ u32 host_pid[E500_PID_NUM];
+ u32 pid[E500_PID_NUM];
+
+ u32 mas0;
+ u32 mas1;
+ u32 mas2;
+ u32 mas3;
+ u32 mas4;
+ u32 mas5;
+ u32 mas6;
+ u32 mas7;
+ u32 l1csr1;
+ u32 hid0;
+ u32 hid1;
+
+ struct kvm_vcpu vcpu;
+};
+
+static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
+{
+ return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
+}
+
+#endif /* __ASM_KVM_E500_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index c1e436fe773..dfdf13c9fef 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -64,13 +64,6 @@ struct kvm_vcpu_stat {
u32 halt_wakeup;
};
-struct kvmppc_44x_tlbe {
- u32 tid; /* Only the low 8 bits are used. */
- u32 word0;
- u32 word1;
- u32 word2;
-};
-
enum kvm_exit_types {
MMIO_EXITS,
DCR_EXITS,
@@ -118,11 +111,6 @@ struct kvm_arch {
struct kvm_vcpu_arch {
u32 host_stack;
u32 host_pid;
- u32 host_dbcr0;
- u32 host_dbcr1;
- u32 host_dbcr2;
- u32 host_iac[4];
- u32 host_msr;
u64 fpr[32];
ulong gpr[32];
@@ -157,7 +145,7 @@ struct kvm_vcpu_arch {
u32 tbu;
u32 tcr;
u32 tsr;
- u32 ivor[16];
+ u32 ivor[64];
ulong ivpr;
u32 pir;
@@ -170,6 +158,7 @@ struct kvm_vcpu_arch {
u32 ccr1;
u32 dbcr0;
u32 dbcr1;
+ u32 dbsr;
#ifdef CONFIG_KVM_EXIT_TIMING
struct kvmppc_exit_timing timing_exit;
@@ -200,10 +189,4 @@ struct kvm_vcpu_arch {
unsigned long pending_exceptions;
};
-struct kvm_guest_debug {
- int enabled;
- unsigned long bp[4];
- int singlestep;
-};
-
#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 36d2a50a848..2c6ee349df5 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -52,13 +52,19 @@ extern int kvmppc_emulate_instruction(struct kvm_run *run,
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
+/* Core-specific hooks */
+
extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
- u64 asid, u32 flags, u32 max_bytes,
unsigned int gtlb_idx);
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
-
-/* Core-specific hooks */
+extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
+extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
+extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
+extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
+ gva_t eaddr);
+extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
unsigned int id);
@@ -71,9 +77,6 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
-extern void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu);
-extern void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu);
-
extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 25aaa97facd..68235f7e4a8 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -97,7 +97,7 @@ struct lppaca {
u64 saved_gpr4; // Saved GPR4 x28-x2F
u64 saved_gpr5; // Saved GPR5 x30-x37
- u8 reserved4; // Reserved x38-x38
+ u8 dtl_enable_mask; // Dispatch Trace Log mask x38-x38
u8 donate_dedicated_cpu; // Donate dedicated CPU cycles x39-x39
u8 fpregs_in_use; // FP regs in use x3A-x3A
u8 pmcregs_in_use; // PMC regs in use x3B-x3B
@@ -133,8 +133,10 @@ struct lppaca {
//=============================================================================
// CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data
//=============================================================================
- u32 page_ins; // CMO Hint - # page ins by OS x00-x04
- u8 pmc_save_area[252]; // PMC interrupt Area x04-xFF
+ u32 page_ins; // CMO Hint - # page ins by OS x00-x03
+ u8 reserved8[148]; // Reserved x04-x97
+ volatile u64 dtl_idx; // Dispatch Trace Log head idx x98-x9F
+ u8 reserved9[96]; // Reserved xA0-xFF
} __attribute__((__aligned__(0x400)));
extern struct lppaca lppaca[];
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 2740c44ff71..0efdb1dfdc5 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -90,7 +90,7 @@ struct machdep_calls {
void (*tce_flush)(struct iommu_table *tbl);
void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
- unsigned long flags);
+ unsigned long flags, void *caller);
void (*iounmap)(volatile void __iomem *token);
#ifdef CONFIG_PM
@@ -327,8 +327,6 @@ extern void __devinit smp_generic_take_timebase(void);
*/
/* Print a boot progress message. */
void ppc64_boot_msg(unsigned int src, const char *msg);
-/* Print a termination message (print only -- does not stop the kernel) */
-void ppc64_terminate_msg(unsigned int src, const char *msg);
static inline void log_error(char *buf, unsigned int err_type, int fatal)
{
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
index 27cc6fdcd3b..3c86576bfef 100644
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -83,6 +83,8 @@ typedef struct {
#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
#elif (PAGE_SHIFT == 16)
#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
+#elif (PAGE_SHIFT == 18)
+#define PPC44x_TLBE_SIZE PPC44x_TLB_256K
#else
#error "Unsupported PAGE_SIZE"
#endif
diff --git a/arch/powerpc/include/asm/mmu-fsl-booke.h b/arch/powerpc/include/asm/mmu-book3e.h
index 3f941c0f7e8..7e74cff81d8 100644
--- a/arch/powerpc/include/asm/mmu-fsl-booke.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -1,26 +1,42 @@
-#ifndef _ASM_POWERPC_MMU_FSL_BOOKE_H_
-#define _ASM_POWERPC_MMU_FSL_BOOKE_H_
+#ifndef _ASM_POWERPC_MMU_BOOK3E_H_
+#define _ASM_POWERPC_MMU_BOOK3E_H_
/*
- * Freescale Book-E MMU support
+ * Freescale Book-E/Book-3e (ISA 2.06+) MMU support
*/
-/* Book-E defined page sizes */
-#define BOOKE_PAGESZ_1K 0
-#define BOOKE_PAGESZ_4K 1
-#define BOOKE_PAGESZ_16K 2
-#define BOOKE_PAGESZ_64K 3
-#define BOOKE_PAGESZ_256K 4
-#define BOOKE_PAGESZ_1M 5
-#define BOOKE_PAGESZ_4M 6
-#define BOOKE_PAGESZ_16M 7
-#define BOOKE_PAGESZ_64M 8
-#define BOOKE_PAGESZ_256M 9
-#define BOOKE_PAGESZ_1GB 10
-#define BOOKE_PAGESZ_4GB 11
-#define BOOKE_PAGESZ_16GB 12
-#define BOOKE_PAGESZ_64GB 13
-#define BOOKE_PAGESZ_256GB 14
-#define BOOKE_PAGESZ_1TB 15
+/* Book-3e defined page sizes */
+#define BOOK3E_PAGESZ_1K 0
+#define BOOK3E_PAGESZ_2K 1
+#define BOOK3E_PAGESZ_4K 2
+#define BOOK3E_PAGESZ_8K 3
+#define BOOK3E_PAGESZ_16K 4
+#define BOOK3E_PAGESZ_32K 5
+#define BOOK3E_PAGESZ_64K 6
+#define BOOK3E_PAGESZ_128K 7
+#define BOOK3E_PAGESZ_256K 8
+#define BOOK3E_PAGESZ_512K 9
+#define BOOK3E_PAGESZ_1M 10
+#define BOOK3E_PAGESZ_2M 11
+#define BOOK3E_PAGESZ_4M 12
+#define BOOK3E_PAGESZ_8M 13
+#define BOOK3E_PAGESZ_16M 14
+#define BOOK3E_PAGESZ_32M 15
+#define BOOK3E_PAGESZ_64M 16
+#define BOOK3E_PAGESZ_128M 17
+#define BOOK3E_PAGESZ_256M 18
+#define BOOK3E_PAGESZ_512M 19
+#define BOOK3E_PAGESZ_1GB 20
+#define BOOK3E_PAGESZ_2GB 21
+#define BOOK3E_PAGESZ_4GB 22
+#define BOOK3E_PAGESZ_8GB 23
+#define BOOK3E_PAGESZ_16GB 24
+#define BOOK3E_PAGESZ_32GB 25
+#define BOOK3E_PAGESZ_64GB 26
+#define BOOK3E_PAGESZ_128GB 27
+#define BOOK3E_PAGESZ_256GB 28
+#define BOOK3E_PAGESZ_512GB 29
+#define BOOK3E_PAGESZ_1TB 30
+#define BOOK3E_PAGESZ_2TB 31
#define MAS0_TLBSEL(x) ((x << 28) & 0x30000000)
#define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000)
@@ -29,8 +45,9 @@
#define MAS1_VALID 0x80000000
#define MAS1_IPROT 0x40000000
#define MAS1_TID(x) ((x << 16) & 0x3FFF0000)
+#define MAS1_IND 0x00002000
#define MAS1_TS 0x00001000
-#define MAS1_TSIZE(x) ((x << 8) & 0x00000F00)
+#define MAS1_TSIZE(x) ((x << 7) & 0x00000F80)
#define MAS2_EPN 0xFFFFF000
#define MAS2_X0 0x00000040
@@ -40,7 +57,7 @@
#define MAS2_M 0x00000004
#define MAS2_G 0x00000002
#define MAS2_E 0x00000001
-#define MAS2_EPN_MASK(size) (~0 << (2*(size) + 10))
+#define MAS2_EPN_MASK(size) (~0 << (size + 10))
#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags))
#define MAS3_RPN 0xFFFFF000
@@ -56,7 +73,7 @@
#define MAS3_SR 0x00000001
#define MAS4_TLBSELD(x) MAS0_TLBSEL(x)
-#define MAS4_TIDDSEL 0x000F0000
+#define MAS4_INDD 0x00008000
#define MAS4_TSIZED(x) MAS1_TSIZE(x)
#define MAS4_X0D 0x00000040
#define MAS4_X1D 0x00000020
@@ -68,6 +85,7 @@
#define MAS6_SPID0 0x3FFF0000
#define MAS6_SPID1 0x00007FFE
+#define MAS6_ISIZE(x) MAS1_TSIZE(x)
#define MAS6_SAS 0x00000001
#define MAS6_SPID MAS6_SPID0
@@ -75,6 +93,8 @@
#ifndef __ASSEMBLY__
+extern unsigned int tlbcam_index;
+
typedef struct {
unsigned int id;
unsigned int active;
@@ -82,4 +102,4 @@ typedef struct {
} mm_context_t;
#endif /* !__ASSEMBLY__ */
-#endif /* _ASM_POWERPC_MMU_FSL_BOOKE_H_ */
+#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 68b75262680..98c104a0996 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -284,8 +284,6 @@ extern void add_gpage(unsigned long addr, unsigned long page_size,
unsigned long number_of_pages);
extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
-extern void htab_initialize(void);
-extern void htab_initialize_secondary(void);
extern void hpte_init_native(void);
extern void hpte_init_lpar(void);
extern void hpte_init_iSeries(void);
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 6e763991131..cbf15438709 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -36,9 +36,9 @@
*/
#define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000)
-/* Enable use of tlbilx invalidate-by-PID variant.
+/* Enable use of tlbilx invalidate instructions.
*/
-#define MMU_FTR_USE_TLBILX_PID ASM_CONST(0x00080000)
+#define MMU_FTR_USE_TLBILX ASM_CONST(0x00080000)
/* This indicates that the processor cannot handle multiple outstanding
* broadcast tlbivax or tlbsync. This makes the code use a spinlock
@@ -46,6 +46,12 @@
*/
#define MMU_FTR_LOCK_BCAST_INVAL ASM_CONST(0x00100000)
+/* This indicates that the processor doesn't handle way selection
+ * properly and needs SW to track and update the LRU state. This
+ * is specific to an errata on e300c2/c3/c4 class parts
+ */
+#define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000)
+
#ifndef __ASSEMBLY__
#include <asm/cputable.h>
@@ -56,6 +62,10 @@ static inline int mmu_has_feature(unsigned long feature)
extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
+/* MMU initialization (64-bit only fo now) */
+extern void early_init_mmu(void);
+extern void early_init_mmu_secondary(void);
+
#endif /* !__ASSEMBLY__ */
@@ -71,9 +81,9 @@ extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
#elif defined(CONFIG_44x)
/* 44x-style software loaded TLB */
# include <asm/mmu-44x.h>
-#elif defined(CONFIG_FSL_BOOKE)
-/* Freescale Book-E software loaded TLB */
-# include <asm/mmu-fsl-booke.h>
+#elif defined(CONFIG_PPC_BOOK3E_MMU)
+/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */
+# include <asm/mmu-book3e.h>
#elif defined (CONFIG_PPC_8xx)
/* Motorola/Freescale 8xx software loaded TLB */
# include <asm/mmu-8xx.h>
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index ab4f19263c4..b7063669f97 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -31,7 +31,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
/* Mark this context has been used on the new CPU */
- cpu_set(smp_processor_id(), next->cpu_vm_mask);
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
/* 32-bit keeps track of the current PGDIR in the thread struct */
#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index 81a23932a16..52e049cd9e6 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -273,6 +273,7 @@ extern void mpc5200_setup_xlb_arbiter(void);
extern void mpc52xx_declare_of_platform_devices(void);
extern void mpc52xx_map_common_devices(void);
extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv);
+extern unsigned int mpc52xx_get_xtal_freq(struct device_node *node);
extern void mpc52xx_restart(char *cmd);
/* mpc52xx_pic.c */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 197d569f5bd..32cbf16f10e 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -19,12 +19,14 @@
#include <asm/kdump.h>
/*
- * On regular PPC32 page size is 4K (but we support 4K/16K/64K pages
+ * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
* on PPC44x). For PPC64 we support either 4K or 64K software
* page size. When using 64K pages however, whether we are really supporting
* 64K pages in HW or not is irrelevant to those definitions.
*/
-#if defined(CONFIG_PPC_64K_PAGES)
+#if defined(CONFIG_PPC_256K_PAGES)
+#define PAGE_SHIFT 18
+#elif defined(CONFIG_PPC_64K_PAGES)
#define PAGE_SHIFT 16
#elif defined(CONFIG_PPC_16K_PAGES)
#define PAGE_SHIFT 14
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index 1458d950038..a0e3f6e6b4e 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -19,7 +19,11 @@
#define PTE_FLAGS_OFFSET 0
#endif
+#ifdef CONFIG_PPC_256K_PAGES
+#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */
+#else
#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */
+#endif
#ifndef __ASSEMBLY__
/*
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 820b5f0a35c..ba45c997830 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -19,55 +19,6 @@ extern int icache_44x_need_flush;
#endif /* __ASSEMBLY__ */
/*
- * The PowerPC MMU uses a hash table containing PTEs, together with
- * a set of 16 segment registers (on 32-bit implementations), to define
- * the virtual to physical address mapping.
- *
- * We use the hash table as an extended TLB, i.e. a cache of currently
- * active mappings. We maintain a two-level page table tree, much
- * like that used by the i386, for the sake of the Linux memory
- * management code. Low-level assembler code in hashtable.S
- * (procedure hash_page) is responsible for extracting ptes from the
- * tree and putting them into the hash table when necessary, and
- * updating the accessed and modified bits in the page table tree.
- */
-
-/*
- * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
- * We also use the two level tables, but we can put the real bits in them
- * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0,
- * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has
- * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
- * based upon user/super access. The TLB does not have accessed nor write
- * protect. We assume that if the TLB get loaded with an entry it is
- * accessed, and overload the changed bit for write protect. We use
- * two bits in the software pte that are supposed to be set to zero in
- * the TLB entry (24 and 25) for these indicators. Although the level 1
- * descriptor contains the guarded and writethrough/copyback bits, we can
- * set these at the page level since they get copied from the Mx_TWC
- * register when the TLB entry is loaded. We will use bit 27 for guard, since
- * that is where it exists in the MD_TWC, and bit 26 for writethrough.
- * These will get masked from the level 2 descriptor at TLB load time, and
- * copied to the MD_TWC before it gets loaded.
- * Large page sizes added. We currently support two sizes, 4K and 8M.
- * This also allows a TLB hander optimization because we can directly
- * load the PMD into MD_TWC. The 8M pages are only used for kernel
- * mapping of well known areas. The PMD (PGD) entries contain control
- * flags in addition to the address, so care must be taken that the
- * software no longer assumes these are only pointers.
- */
-
-/*
- * At present, all PowerPC 400-class processors share a similar TLB
- * architecture. The instruction and data sides share a unified,
- * 64-entry, fully-associative TLB which is maintained totally under
- * software control. In addition, the instruction side has a
- * hardware-managed, 4-entry, fully-associative TLB which serves as a
- * first level to the shared TLB. These two TLBs are known as the UTLB
- * and ITLB, respectively (see "mmu.h" for definitions).
- */
-
-/*
* The normal case is that PTEs are 32-bits and we have a 1-page
* 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
*
@@ -135,409 +86,22 @@ extern int icache_44x_need_flush;
*/
#if defined(CONFIG_40x)
-
-/* There are several potential gotchas here. The 40x hardware TLBLO
- field looks like this:
-
- 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
- RPN..................... 0 0 EX WR ZSEL....... W I M G
-
- Where possible we make the Linux PTE bits match up with this
-
- - bits 20 and 21 must be cleared, because we use 4k pages (40x can
- support down to 1k pages), this is done in the TLBMiss exception
- handler.
- - We use only zones 0 (for kernel pages) and 1 (for user pages)
- of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
- miss handler. Bit 27 is PAGE_USER, thus selecting the correct
- zone.
- - PRESENT *must* be in the bottom two bits because swap cache
- entries use the top 30 bits. Because 40x doesn't support SMP
- anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30
- is cleared in the TLB miss handler before the TLB entry is loaded.
- - All other bits of the PTE are loaded into TLBLO without
- modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
- software PTE bits. We actually use use bits 21, 24, 25, and
- 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
- PRESENT.
-*/
-
-/* Definitions for 40x embedded chips. */
-#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
-#define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */
-#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
-#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
-#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
-#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
-#define _PAGE_RW 0x040 /* software: Writes permitted */
-#define _PAGE_DIRTY 0x080 /* software: dirty page */
-#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
-#define _PAGE_HWEXEC 0x200 /* hardware: EX permission */
-#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
-
-#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
-#define _PMD_BAD 0x802
-#define _PMD_SIZE 0x0e0 /* size field, != 0 for large-page PMD entry */
-#define _PMD_SIZE_4M 0x0c0
-#define _PMD_SIZE_16M 0x0e0
-#define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4))
-
-/* Until my rework is finished, 40x still needs atomic PTE updates */
-#define PTE_ATOMIC_UPDATES 1
-
+#include <asm/pte-40x.h>
#elif defined(CONFIG_44x)
-/*
- * Definitions for PPC440
- *
- * Because of the 3 word TLB entries to support 36-bit addressing,
- * the attribute are difficult to map in such a fashion that they
- * are easily loaded during exception processing. I decided to
- * organize the entry so the ERPN is the only portion in the
- * upper word of the PTE and the attribute bits below are packed
- * in as sensibly as they can be in the area below a 4KB page size
- * oriented RPN. This at least makes it easy to load the RPN and
- * ERPN fields in the TLB. -Matt
- *
- * Note that these bits preclude future use of a page size
- * less than 4KB.
- *
- *
- * PPC 440 core has following TLB attribute fields;
- *
- * TLB1:
- * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
- * RPN................................. - - - - - - ERPN.......
- *
- * TLB2:
- * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
- * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR
- *
- * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional
- * TLB2 storage attibute fields. Those are:
- *
- * TLB2:
- * 0...10 11 12 13 14 15 16...31
- * no change WL1 IL1I IL1D IL2I IL2D no change
- *
- * There are some constrains and options, to decide mapping software bits
- * into TLB entry.
- *
- * - PRESENT *must* be in the bottom three bits because swap cache
- * entries use the top 29 bits for TLB2.
- *
- * - FILE *must* be in the bottom three bits because swap cache
- * entries use the top 29 bits for TLB2.
- *
- * - CACHE COHERENT bit (M) has no effect on original PPC440 cores,
- * because it doesn't support SMP. However, some later 460 variants
- * have -some- form of SMP support and so I keep the bit there for
- * future use
- *
- * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
- * for memory protection related functions (see PTE structure in
- * include/asm-ppc/mmu.h). The _PAGE_XXX definitions in this file map to the
- * above bits. Note that the bit values are CPU specific, not architecture
- * specific.
- *
- * The kernel PTE entry holds an arch-dependent swp_entry structure under
- * certain situations. In other words, in such situations some portion of
- * the PTE bits are used as a swp_entry. In the PPC implementation, the
- * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still
- * hold protection values. That means the three protection bits are
- * reserved for both PTE and SWAP entry at the most significant three
- * LSBs.
- *
- * There are three protection bits available for SWAP entry:
- * _PAGE_PRESENT
- * _PAGE_FILE
- * _PAGE_HASHPTE (if HW has)
- *
- * So those three bits have to be inside of 0-2nd LSB of PTE.
- *
- */
-
-#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
-#define _PAGE_RW 0x00000002 /* S: Write permission */
-#define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */
-#define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */
-#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
-#define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
-#define _PAGE_SPECIAL 0x00000020 /* S: Special page */
-#define _PAGE_USER 0x00000040 /* S: User page */
-#define _PAGE_ENDIAN 0x00000080 /* H: E bit */
-#define _PAGE_GUARDED 0x00000100 /* H: G bit */
-#define _PAGE_COHERENT 0x00000200 /* H: M bit */
-#define _PAGE_NO_CACHE 0x00000400 /* H: I bit */
-#define _PAGE_WRITETHRU 0x00000800 /* H: W bit */
-
-/* TODO: Add large page lowmem mapping support */
-#define _PMD_PRESENT 0
-#define _PMD_PRESENT_MASK (PAGE_MASK)
-#define _PMD_BAD (~PAGE_MASK)
-
-/* ERPN in a PTE never gets cleared, ignore it */
-#define _PTE_NONE_MASK 0xffffffff00000000ULL
-
-#define __HAVE_ARCH_PTE_SPECIAL
-
+#include <asm/pte-44x.h>
#elif defined(CONFIG_FSL_BOOKE)
-/*
- MMU Assist Register 3:
-
- 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63
- RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR
-
- - PRESENT *must* be in the bottom three bits because swap cache
- entries use the top 29 bits.
-
- - FILE *must* be in the bottom three bits because swap cache
- entries use the top 29 bits.
-*/
-
-/* Definitions for FSL Book-E Cores */
-#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */
-#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */
-#define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */
-#define _PAGE_RW 0x00004 /* S: Write permission (SW) */
-#define _PAGE_DIRTY 0x00008 /* S: Page dirty */
-#define _PAGE_HWEXEC 0x00010 /* H: SX permission */
-#define _PAGE_ACCESSED 0x00020 /* S: Page referenced */
-
-#define _PAGE_ENDIAN 0x00040 /* H: E bit */
-#define _PAGE_GUARDED 0x00080 /* H: G bit */
-#define _PAGE_COHERENT 0x00100 /* H: M bit */
-#define _PAGE_NO_CACHE 0x00200 /* H: I bit */
-#define _PAGE_WRITETHRU 0x00400 /* H: W bit */
-#define _PAGE_SPECIAL 0x00800 /* S: Special page */
-
-#ifdef CONFIG_PTE_64BIT
-/* ERPN in a PTE never gets cleared, ignore it */
-#define _PTE_NONE_MASK 0xffffffffffff0000ULL
-#endif
-
-#define _PMD_PRESENT 0
-#define _PMD_PRESENT_MASK (PAGE_MASK)
-#define _PMD_BAD (~PAGE_MASK)
-
-#define __HAVE_ARCH_PTE_SPECIAL
-
+#include <asm/pte-fsl-booke.h>
#elif defined(CONFIG_8xx)
-/* Definitions for 8xx embedded chips. */
-#define _PAGE_PRESENT 0x0001 /* Page is valid */
-#define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */
-#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
-#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
-
-/* These five software bits must be masked out when the entry is loaded
- * into the TLB.
- */
-#define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */
-#define _PAGE_GUARDED 0x0010 /* software: guarded access */
-#define _PAGE_DIRTY 0x0020 /* software: page changed */
-#define _PAGE_RW 0x0040 /* software: user write access allowed */
-#define _PAGE_ACCESSED 0x0080 /* software: page referenced */
-
-/* Setting any bits in the nibble with the follow two controls will
- * require a TLB exception handler change. It is assumed unused bits
- * are always zero.
- */
-#define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */
-#define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */
-
-#define _PMD_PRESENT 0x0001
-#define _PMD_BAD 0x0ff0
-#define _PMD_PAGE_MASK 0x000c
-#define _PMD_PAGE_8M 0x000c
-
-#define _PTE_NONE_MASK _PAGE_ACCESSED
-
-/* Until my rework is finished, 8xx still needs atomic PTE updates */
-#define PTE_ATOMIC_UPDATES 1
-
+#include <asm/pte-8xx.h>
#else /* CONFIG_6xx */
-/* Definitions for 60x, 740/750, etc. */
-#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
-#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
-#define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
-#define _PAGE_USER 0x004 /* usermode access allowed */
-#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
-#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
-#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
-#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
-#define _PAGE_DIRTY 0x080 /* C: page changed */
-#define _PAGE_ACCESSED 0x100 /* R: page referenced */
-#define _PAGE_EXEC 0x200 /* software: i-cache coherency required */
-#define _PAGE_RW 0x400 /* software: user write access allowed */
-#define _PAGE_SPECIAL 0x800 /* software: Special page */
-
-#ifdef CONFIG_PTE_64BIT
-/* We never clear the high word of the pte */
-#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
-#else
-#define _PTE_NONE_MASK _PAGE_HASHPTE
+#include <asm/pte-hash32.h>
#endif
-#define _PMD_PRESENT 0
-#define _PMD_PRESENT_MASK (PAGE_MASK)
-#define _PMD_BAD (~PAGE_MASK)
-
-/* Hash table based platforms need atomic updates of the linux PTE */
-#define PTE_ATOMIC_UPDATES 1
-
-#define __HAVE_ARCH_PTE_SPECIAL
-
-#endif
-
-/*
- * Some bits are only used on some cpu families...
- */
-#ifndef _PAGE_HASHPTE
-#define _PAGE_HASHPTE 0
-#endif
-#ifndef _PTE_NONE_MASK
-#define _PTE_NONE_MASK 0
-#endif
-#ifndef _PAGE_SHARED
-#define _PAGE_SHARED 0
-#endif
-#ifndef _PAGE_HWWRITE
-#define _PAGE_HWWRITE 0
-#endif
-#ifndef _PAGE_HWEXEC
-#define _PAGE_HWEXEC 0
-#endif
-#ifndef _PAGE_EXEC
-#define _PAGE_EXEC 0
-#endif
-#ifndef _PAGE_ENDIAN
-#define _PAGE_ENDIAN 0
-#endif
-#ifndef _PAGE_COHERENT
-#define _PAGE_COHERENT 0
-#endif
-#ifndef _PAGE_WRITETHRU
-#define _PAGE_WRITETHRU 0
-#endif
-#ifndef _PAGE_SPECIAL
-#define _PAGE_SPECIAL 0
-#endif
-#ifndef _PMD_PRESENT_MASK
-#define _PMD_PRESENT_MASK _PMD_PRESENT
-#endif
-#ifndef _PMD_SIZE
-#define _PMD_SIZE 0
-#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
-#endif
-
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
- _PAGE_SPECIAL)
-
-
-#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
- _PAGE_WRITETHRU | _PAGE_ENDIAN | \
- _PAGE_USER | _PAGE_ACCESSED | \
- _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
- _PAGE_EXEC | _PAGE_HWEXEC)
-
-/*
- * We define 2 sets of base prot bits, one for basic pages (ie,
- * cacheable kernel and user pages) and one for non cacheable
- * pages. We always set _PAGE_COHERENT when SMP is enabled or
- * the processor might need it for DMA coherency.
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
-#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
-#else
-#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
-#endif
-#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)
-
-#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
-#define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE)
-#define _PAGE_KERNEL_NC (_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE)
-
-#ifdef CONFIG_PPC_STD_MMU
-/* On standard PPC MMU, no user access implies kernel read/write access,
- * so to write-protect kernel memory we must turn on user access */
-#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER)
-#else
-#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED)
-#endif
-
-#define _PAGE_IO (_PAGE_KERNEL_NC | _PAGE_GUARDED)
-#define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC)
-
-#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
- defined(CONFIG_KPROBES)
-/* We want the debuggers to be able to set breakpoints anywhere, so
- * don't write protect the kernel text */
-#define _PAGE_RAM_TEXT _PAGE_RAM
-#else
-#define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC)
-#endif
-
-#define PAGE_NONE __pgprot(_PAGE_BASE)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-
-#define PAGE_KERNEL __pgprot(_PAGE_RAM)
-#define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO)
-
-/*
- * The PowerPC can only do execute protection on a segment (256MB) basis,
- * not on a page basis. So we consider execute permission the same as read.
- * Also, write permissions imply read permissions.
- * This is the closest we can get..
- */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY_X
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY_X
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY_X
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY_X
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY_X
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED_X
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY_X
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED_X
+/* And here we include common definitions */
+#include <asm/pte-common.h>
#ifndef __ASSEMBLY__
-/* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a
- * kernel without large page PMD support */
-extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
-
-/*
- * Conversions between PTE values and page frame numbers.
- */
-
-/* in some case we want to additionaly adjust where the pfn is in the pte to
- * allow room for more flags */
-#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
-#define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8)
-#else
-#define PFN_SHIFT_OFFSET (PAGE_SHIFT)
-#endif
-#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
-#define pte_page(x) pfn_to_page(pte_pfn(x))
-
-#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\
- pgprot_val(prot))
-#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
-#endif /* __ASSEMBLY__ */
-
-#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
-#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
#define pte_clear(mm, addr, ptep) \
do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
@@ -546,43 +110,6 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
-#ifndef __ASSEMBLY__
-/*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
-static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
-
-static inline pte_t pte_wrprotect(pte_t pte) {
- pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
-static inline pte_t pte_mkclean(pte_t pte) {
- pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
-static inline pte_t pte_mkold(pte_t pte) {
- pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-
-static inline pte_t pte_mkwrite(pte_t pte) {
- pte_val(pte) |= _PAGE_RW; return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) {
- pte_val(pte) |= _PAGE_DIRTY; return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) {
- pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkspecial(pte_t pte) {
- pte_val(pte) |= _PAGE_SPECIAL; return pte; }
-static inline pgprot_t pte_pgprot(pte_t pte)
-{
- return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
-}
-
-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{
- pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
- return pte;
-}
-
/*
* When flushing the tlb entry for a page, we also need to flush the hash
* table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
@@ -599,11 +126,19 @@ extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
unsigned long address);
/*
- * Atomic PTE updates.
+ * PTE updates. This function is called whenever an existing
+ * valid PTE is updated. This does -not- include set_pte_at()
+ * which nowadays only sets a new PTE.
*
- * pte_update clears and sets bit atomically, and returns
- * the old pte value. In the 64-bit PTE case we lock around the
- * low PTE word since we expect ALL flag bits to be there
+ * Depending on the type of MMU, we may need to use atomic updates
+ * and the PTE may be either 32 or 64 bit wide. In the later case,
+ * when using atomic updates, only the low part of the PTE is
+ * accessed atomically.
+ *
+ * In addition, on 44x, we also maintain a global flag indicating
+ * that an executable user mapping was modified, which is needed
+ * to properly flush the virtually tagged instruction cache of
+ * those implementations.
*/
#ifndef CONFIG_PTE_64BIT
static inline unsigned long pte_update(pte_t *p,
@@ -668,44 +203,6 @@ static inline unsigned long long pte_update(pte_t *p,
#endif /* CONFIG_PTE_64BIT */
/*
- * set_pte stores a linux PTE into the linux page table.
- * On machines which use an MMU hash table we avoid changing the
- * _PAGE_HASHPTE bit.
- */
-
-static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
-#if (_PAGE_HASHPTE != 0) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
- pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
-#elif defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
-#if _PAGE_HASHPTE != 0
- if (pte_val(*ptep) & _PAGE_HASHPTE)
- flush_hash_entry(mm, ptep, addr);
-#endif
- __asm__ __volatile__("\
- stw%U0%X0 %2,%0\n\
- eieio\n\
- stw%U0%X0 %L2,%1"
- : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
- : "r" (pte) : "memory");
-#else
- *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
- | (pte_val(pte) & ~_PAGE_HASHPTE));
-#endif
-}
-
-
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
-#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) && defined(CONFIG_DEBUG_VM)
- WARN_ON(pte_present(*ptep));
-#endif
- __set_pte_at(mm, addr, ptep, pte);
-}
-
-/*
* 2.6 calls this without flushing the TLB entry; this is wrong
* for our hash-based implementation, we fix that up here.
*/
@@ -745,24 +242,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
}
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
+static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
{
unsigned long bits = pte_val(entry) &
- (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW);
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
+ _PAGE_HWEXEC | _PAGE_EXEC);
pte_update(ptep, 0, bits);
}
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-({ \
- int __changed = !pte_same(*(__ptep), __entry); \
- if (__changed) { \
- __ptep_set_access_flags(__ptep, __entry, __dirty); \
- flush_tlb_page_nohash(__vma, __address); \
- } \
- __changed; \
-})
-
#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
diff --git a/arch/powerpc/include/asm/pgtable-4k.h b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
index 1dbca4e7de6..6eefdcffa35 100644
--- a/arch/powerpc/include/asm/pgtable-4k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_POWERPC_PGTABLE_4K_H
-#define _ASM_POWERPC_PGTABLE_4K_H
+#ifndef _ASM_POWERPC_PGTABLE_PPC64_4K_H
+#define _ASM_POWERPC_PGTABLE_PPC64_4K_H
/*
* Entries per page directory level. The PTE level must use a 64b record
* for each page table entry. The PMD and PGD level use a 32b record for
@@ -40,28 +40,6 @@
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-/* PTE bits */
-#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */
-#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */
-#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */
-#define _PAGE_F_SECOND _PAGE_SECONDARY
-#define _PAGE_F_GIX _PAGE_GROUP_IX
-#define _PAGE_SPECIAL 0x10000 /* software: special page */
-#define __HAVE_ARCH_PTE_SPECIAL
-
-/* PTE flags to conserve for HPTE identification */
-#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
- _PAGE_SECONDARY | _PAGE_GROUP_IX)
-
-/* There is no 4K PFN hack on 4K pages */
-#define _PAGE_4K_PFN 0
-
-/* PAGE_MASK gives the right answer below, but only by accident */
-/* It should be preserving the high 48 bits and then specifically */
-/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
- _PAGE_HPTEFLAGS | _PAGE_SPECIAL)
-
/* Bits to mask out from a PMD to get to the PTE page */
#define PMD_MASKED_BITS 0
/* Bits to mask out from a PUD to get to the PMD page */
@@ -69,30 +47,6 @@
/* Bits to mask out from a PGD to get to the PUD page */
#define PGD_MASKED_BITS 0
-/* shift to put page number into pte */
-#define PTE_RPN_SHIFT (17)
-
-#ifdef STRICT_MM_TYPECHECKS
-#define __real_pte(e,p) ((real_pte_t){(e)})
-#define __rpte_to_pte(r) ((r).pte)
-#else
-#define __real_pte(e,p) (e)
-#define __rpte_to_pte(r) (__pte(r))
-#endif
-#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12)
-
-#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
- do { \
- index = 0; \
- shift = mmu_psize_defs[psize].shift; \
-
-#define pte_iterate_hashed_end() } while(0)
-
-#ifdef CONFIG_PPC_HAS_HASH_64K
-#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr)
-#else
-#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
-#endif
/*
* 4-level page tables related bits
@@ -112,6 +66,9 @@
#define pud_ERROR(e) \
printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
+/*
+ * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */
#define remap_4k_pfn(vma, addr, pfn, prot) \
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
-#endif /* _ASM_POWERPC_PGTABLE_4K_H */
+
+#endif /* _ASM_POWERPC_PGTABLE_PPC64_4K_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
new file mode 100644
index 00000000000..6cc085b945a
--- /dev/null
+++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_POWERPC_PGTABLE_PPC64_64K_H
+#define _ASM_POWERPC_PGTABLE_PPC64_64K_H
+
+#include <asm-generic/pgtable-nopud.h>
+
+
+#define PTE_INDEX_SIZE 12
+#define PMD_INDEX_SIZE 12
+#define PUD_INDEX_SIZE 0
+#define PGD_INDEX_SIZE 4
+
+#ifndef __ASSEMBLY__
+
+#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+
+#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+
+/* With 4k base page size, hugepage PTEs go at the PMD level */
+#define MIN_HUGEPTE_SHIFT PAGE_SHIFT
+
+/* PMD_SHIFT determines what a second-level page table entry can map */
+#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a third-level page table entry can map */
+#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#endif /* __ASSEMBLY__ */
+
+/* Bits to mask out from a PMD to get to the PTE page */
+#define PMD_MASKED_BITS 0x1ff
+/* Bits to mask out from a PGD/PUD to get to the PMD page */
+#define PUD_MASKED_BITS 0x1ff
+
+#endif /* _ASM_POWERPC_PGTABLE_PPC64_64K_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index b0f18be81d9..c40db05f21e 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -11,9 +11,9 @@
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_PPC_64K_PAGES
-#include <asm/pgtable-64k.h>
+#include <asm/pgtable-ppc64-64k.h>
#else
-#include <asm/pgtable-4k.h>
+#include <asm/pgtable-ppc64-4k.h>
#endif
#define FIRST_USER_ADDRESS 0
@@ -25,6 +25,8 @@
PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
+
+/* Some sanity checking */
#if TASK_SIZE_USER64 > PGTABLE_RANGE
#error TASK_SIZE_USER64 exceeds pagetable range
#endif
@@ -33,7 +35,6 @@
#error TASK_SIZE_USER64 exceeds user VSID range
#endif
-
/*
* Define the address range of the vmalloc VM area.
*/
@@ -76,83 +77,12 @@
/*
- * Common bits in a linux-style PTE. These match the bits in the
- * (hardware-defined) PowerPC PTE as closely as possible. Additional
- * bits may be defined in pgtable-*.h
+ * Include the PTE bits definitions
*/
-#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */
-#define _PAGE_USER 0x0002 /* matches one of the PP bits */
-#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
-#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
-#define _PAGE_GUARDED 0x0008
-#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */
-#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */
-#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */
-#define _PAGE_DIRTY 0x0080 /* C: page changed */
-#define _PAGE_ACCESSED 0x0100 /* R: page referenced */
-#define _PAGE_RW 0x0200 /* software: user write access allowed */
-#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
-
-/* Strong Access Ordering */
-#define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT)
-
-#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
-
-#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
-
-/* __pgprot defined in arch/powerpc/include/asm/page.h */
-#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
-
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE)
-#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
- _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED)
-#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC)
-
-#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
-#define HAVE_PAGE_AGP
-
-#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | \
- _PAGE_NO_CACHE | _PAGE_WRITETHRU | \
- _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \
- _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
-/* PTEIDX nibble */
-#define _PTEIDX_SECONDARY 0x8
-#define _PTEIDX_GROUP_IX 0x7
+#include <asm/pte-hash64.h>
+#include <asm/pte-common.h>
-/*
- * POWER4 and newer have per page execute protection, older chips can only
- * do this on a segment (256MB) basis.
- *
- * Also, write permissions imply read permissions.
- * This is the closest we can get..
- *
- * Note due to the way vm flags are laid out, the bits are XWR
- */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_X
-#define __P101 PAGE_READONLY_X
-#define __P110 PAGE_COPY_X
-#define __P111 PAGE_COPY_X
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_X
-#define __S101 PAGE_READONLY_X
-#define __S110 PAGE_SHARED_X
-#define __S111 PAGE_SHARED_X
-
#ifdef CONFIG_PPC_MM_SLICES
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
@@ -161,32 +91,38 @@
#ifndef __ASSEMBLY__
/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- *
- * mk_pte takes a (struct page *) as input
+ * This is the default implementation of various PTE accessors, it's
+ * used in all cases except Book3S with 64K pages where we have a
+ * concept of sub-pages
*/
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+#ifndef __real_pte
-static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
-{
- pte_t pte;
+#ifdef STRICT_MM_TYPECHECKS
+#define __real_pte(e,p) ((real_pte_t){(e)})
+#define __rpte_to_pte(r) ((r).pte)
+#else
+#define __real_pte(e,p) (e)
+#define __rpte_to_pte(r) (__pte(r))
+#endif
+#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12)
+#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
+ do { \
+ index = 0; \
+ shift = mmu_psize_defs[psize].shift; \
- pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot);
- return pte;
-}
+#define pte_iterate_hashed_end() } while(0)
-#define pte_modify(_pte, newprot) \
- (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
+#ifdef CONFIG_PPC_HAS_HASH_64K
+#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr)
+#else
+#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
+#endif
-#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0)
-#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
+#endif /* __real_pte */
-/* pte_clear moved to later in this file */
-#define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT)))
-#define pte_page(x) pfn_to_page(pte_pfn(x))
+/* pte_clear moved to later in this file */
#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
@@ -235,36 +171,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
/* This now only contains the vmalloc pages */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-/*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
-static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
-static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
-static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
-
-static inline pte_t pte_wrprotect(pte_t pte) {
- pte_val(pte) &= ~(_PAGE_RW); return pte; }
-static inline pte_t pte_mkclean(pte_t pte) {
- pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
-static inline pte_t pte_mkold(pte_t pte) {
- pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) {
- pte_val(pte) |= _PAGE_RW; return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) {
- pte_val(pte) |= _PAGE_DIRTY; return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) {
- pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) {
- return pte; }
-static inline pte_t pte_mkspecial(pte_t pte) {
- pte_val(pte) |= _PAGE_SPECIAL; return pte; }
-static inline pgprot_t pte_pgprot(pte_t pte)
-{
- return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
-}
/* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm,
@@ -272,6 +178,7 @@ static inline unsigned long pte_update(struct mm_struct *mm,
pte_t *ptep, unsigned long clr,
int huge)
{
+#ifdef PTE_ATOMIC_UPDATES
unsigned long old, tmp;
__asm__ __volatile__(
@@ -284,6 +191,13 @@ static inline unsigned long pte_update(struct mm_struct *mm,
: "=&r" (old), "=&r" (tmp), "=m" (*ptep)
: "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
: "cc" );
+#else
+ unsigned long old = pte_val(*ptep);
+ *ptep = __pte(old & ~clr);
+#endif
+ /* huge pages use the old page table lock */
+ if (!huge)
+ assert_pte_locked(mm, addr);
if (old & _PAGE_HASHPTE)
hpte_need_flush(mm, addr, ptep, old, huge);
@@ -359,26 +273,17 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_update(mm, addr, ptep, ~0UL, 0);
}
-/*
- * set_pte stores a linux PTE into the linux page table.
- */
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- if (pte_present(*ptep))
- pte_clear(mm, addr, ptep);
- pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
- *ptep = pte;
-}
/* Set the dirty and/or accessed bits atomically in a linux PTE, this
* function doesn't need to flush the hash entry
*/
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
+static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
{
unsigned long bits = pte_val(entry) &
- (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
+ _PAGE_EXEC | _PAGE_HWEXEC);
+
+#ifdef PTE_ATOMIC_UPDATES
unsigned long old, tmp;
__asm__ __volatile__(
@@ -391,16 +296,11 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
:"=&r" (old), "=&r" (tmp), "=m" (*ptep)
:"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
:"cc");
+#else
+ unsigned long old = pte_val(*ptep);
+ *ptep = __pte(old | bits);
+#endif
}
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-({ \
- int __changed = !pte_same(*(__ptep), __entry); \
- if (__changed) { \
- __ptep_set_access_flags(__ptep, __entry, __dirty); \
- flush_tlb_page_nohash(__vma, __address); \
- } \
- __changed; \
-})
#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 07f55e60169..eb17da78112 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -6,7 +6,17 @@
#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu.h>
#include <asm/page.h>
+
struct mm_struct;
+
+#ifdef CONFIG_DEBUG_VM
+extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
+#else /* CONFIG_DEBUG_VM */
+static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
+{
+}
+#endif /* !CONFIG_DEBUG_VM */
+
#endif /* !__ASSEMBLY__ */
#if defined(CONFIG_PPC64)
@@ -17,6 +27,130 @@ struct mm_struct;
#ifndef __ASSEMBLY__
+/* Generic accessors to PTE bits */
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
+static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
+static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
+static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+
+/* Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * Even if PTEs can be unsigned long long, a PFN is always an unsigned
+ * long for now.
+ */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
+ return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
+ pgprot_val(pgprot)); }
+static inline unsigned long pte_pfn(pte_t pte) {
+ return pte_val(pte) >> PTE_RPN_SHIFT; }
+
+/* Keep these as a macros to avoid include dependency mess */
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+/* Generic modifiers for PTE bits */
+static inline pte_t pte_wrprotect(pte_t pte) {
+ pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
+static inline pte_t pte_mkclean(pte_t pte) {
+ pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
+static inline pte_t pte_mkold(pte_t pte) {
+ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) {
+ pte_val(pte) |= _PAGE_RW; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) {
+ pte_val(pte) |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) {
+ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) {
+ pte_val(pte) |= _PAGE_SPECIAL; return pte; }
+static inline pte_t pte_mkhuge(pte_t pte) {
+ return pte; }
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+ return pte;
+}
+
+
+/* Insert a PTE, top-level function is out of line. It uses an inline
+ * low level function in the respective pgtable-* files
+ */
+extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+ pte_t pte);
+
+/* This low level function performs the actual PTE insertion
+ * Setting the PTE depends on the MMU type and other factors. It's
+ * an horrible mess that I'm not going to try to clean up now but
+ * I'm keeping it in one place rather than spread around
+ */
+static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, int percpu)
+{
+#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
+ /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
+ * helper pte_update() which does an atomic update. We need to do that
+ * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
+ * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
+ * the hash bits instead (ie, same as the non-SMP case)
+ */
+ if (percpu)
+ *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+ | (pte_val(pte) & ~_PAGE_HASHPTE));
+ else
+ pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
+
+#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
+ /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we
+ * can just store as long as we do the two halves in the right order
+ * with a barrier in between. This is possible because we take care,
+ * in the hash code, to pre-invalidate if the PTE was already hashed,
+ * which synchronizes us with any concurrent invalidation.
+ * In the percpu case, we also fallback to the simple update preserving
+ * the hash bits
+ */
+ if (percpu) {
+ *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+ | (pte_val(pte) & ~_PAGE_HASHPTE));
+ return;
+ }
+#if _PAGE_HASHPTE != 0
+ if (pte_val(*ptep) & _PAGE_HASHPTE)
+ flush_hash_entry(mm, ptep, addr);
+#endif
+ __asm__ __volatile__("\
+ stw%U0%X0 %2,%0\n\
+ eieio\n\
+ stw%U0%X0 %L2,%1"
+ : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
+ : "r" (pte) : "memory");
+
+#elif defined(CONFIG_PPC_STD_MMU_32)
+ /* Third case is 32-bit hash table in UP mode, we need to preserve
+ * the _PAGE_HASHPTE bit since we may not have invalidated the previous
+ * translation in the hash yet (done in a subsequent flush_tlb_xxx())
+ * and see we need to keep track that this PTE needs invalidating
+ */
+ *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+ | (pte_val(pte) & ~_PAGE_HASHPTE));
+
+#else
+ /* Anything else just stores the PTE normally. That covers all 64-bit
+ * cases, and 32-bit non-hash with 64-bit PTEs in UP mode
+ */
+ *ptep = pte;
+#endif
+}
+
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, pte_t entry, int dirty);
+
/*
* Macro to mark a page protection value as "uncacheable".
*/
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
new file mode 100644
index 00000000000..f4a4db8d555
--- /dev/null
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2009 Freescale Semicondutor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * provides masks and opcode images for use by code generation, emulation
+ * and for instructions that older assemblers might not know about
+ */
+#ifndef _ASM_POWERPC_PPC_OPCODE_H
+#define _ASM_POWERPC_PPC_OPCODE_H
+
+#include <linux/stringify.h>
+#include <asm/asm-compat.h>
+
+/* sorted alphabetically */
+#define PPC_INST_DCBA 0x7c0005ec
+#define PPC_INST_DCBA_MASK 0xfc0007fe
+#define PPC_INST_DCBAL 0x7c2005ec
+#define PPC_INST_DCBZL 0x7c2007ec
+#define PPC_INST_ISEL 0x7c00001e
+#define PPC_INST_ISEL_MASK 0xfc00003e
+#define PPC_INST_LSWI 0x7c0004aa
+#define PPC_INST_LSWX 0x7c00042a
+#define PPC_INST_LWSYNC 0x7c2004ac
+#define PPC_INST_MCRXR 0x7c000400
+#define PPC_INST_MCRXR_MASK 0xfc0007fe
+#define PPC_INST_MFSPR_PVR 0x7c1f42a6
+#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
+#define PPC_INST_MSGSND 0x7c00019c
+#define PPC_INST_NOP 0x60000000
+#define PPC_INST_POPCNTB 0x7c0000f4
+#define PPC_INST_POPCNTB_MASK 0xfc0007fe
+#define PPC_INST_RFCI 0x4c000066
+#define PPC_INST_RFDI 0x4c00004e
+#define PPC_INST_RFMCI 0x4c00004c
+
+#define PPC_INST_STRING 0x7c00042a
+#define PPC_INST_STRING_MASK 0xfc0007fe
+#define PPC_INST_STRING_GEN_MASK 0xfc00067e
+
+#define PPC_INST_STSWI 0x7c0005aa
+#define PPC_INST_STSWX 0x7c00052a
+#define PPC_INST_TLBILX 0x7c000626
+#define PPC_INST_WAIT 0x7c00007c
+
+/* macros to insert fields into opcodes */
+#define __PPC_RA(a) ((a & 0x1f) << 16)
+#define __PPC_RB(b) ((b & 0x1f) << 11)
+#define __PPC_T_TLB(t) ((t & 0x3) << 21)
+#define __PPC_WC(w) ((w & 0x3) << 21)
+
+/* Deal with instructions that older assemblers aren't aware of */
+#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
+ __PPC_RA(a) | __PPC_RB(b))
+#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \
+ __PPC_RA(a) | __PPC_RB(b))
+#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
+ __PPC_RB(b))
+#define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI)
+#define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI)
+#define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI)
+#define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_INST_TLBILX | \
+ __PPC_T_TLB(t) | __PPC_RA(a) | __PPC_RB(b))
+#define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b)
+#define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b)
+#define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b)
+#define PPC_WAIT(w) stringify_in_c(.long PPC_INST_WAIT | \
+ __PPC_WC(w))
+
+#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 1a0d628eb11..f59a66684ae 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -7,6 +7,7 @@
#include <linux/stringify.h>
#include <asm/asm-compat.h>
#include <asm/processor.h>
+#include <asm/ppc-opcode.h>
#ifndef __ASSEMBLY__
#error __FILE__ should only be used in assembler files
@@ -167,11 +168,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority
#define HMT_HIGH or 3,3,3
-/* handle instructions that older assemblers may not know */
-#define RFCI .long 0x4c000066 /* rfci instruction */
-#define RFDI .long 0x4c00004e /* rfdi instruction */
-#define RFMCI .long 0x4c00004c /* rfmci instruction */
-
#ifdef __KERNEL__
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index d3466490104..9eed29eee60 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -313,6 +313,25 @@ static inline void prefetchw(const void *x)
#define HAVE_ARCH_PICK_MMAP_LAYOUT
#endif
+#ifdef CONFIG_PPC64
+static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+{
+ unsigned long sp;
+
+ if (is_32)
+ sp = regs->gpr[1] & 0x0ffffffffUL;
+ else
+ sp = regs->gpr[1];
+
+ return sp;
+}
+#else
+static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
+{
+ return regs->gpr[1];
+}
+#endif
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PROCESSOR_H */
diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h
index cd24ac16660..0427b0b53d2 100644
--- a/arch/powerpc/include/asm/ps3av.h
+++ b/arch/powerpc/include/asm/ps3av.h
@@ -730,7 +730,7 @@ extern int ps3av_cmd_av_get_hw_conf(struct ps3av_pkt_av_get_hw_conf *);
extern int ps3av_cmd_video_get_monitor_info(struct ps3av_pkt_av_get_monitor_info *,
u32);
-extern int ps3av_set_video_mode(u32);
+extern int ps3av_set_video_mode(int);
extern int ps3av_set_audio_mode(u32, u32, u32, u32, u32);
extern int ps3av_get_auto_mode(void);
extern int ps3av_get_mode(void);
diff --git a/arch/powerpc/include/asm/ps3fb.h b/arch/powerpc/include/asm/ps3fb.h
index e7233a84968..90dbefb8cfc 100644
--- a/arch/powerpc/include/asm/ps3fb.h
+++ b/arch/powerpc/include/asm/ps3fb.h
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/ioctl.h>
+#include <linux/types.h>
/* ioctl */
#define PS3FB_IOCTL_SETMODE _IOW('r', 1, int) /* set video mode */
diff --git a/arch/powerpc/include/asm/pte-40x.h b/arch/powerpc/include/asm/pte-40x.h
new file mode 100644
index 00000000000..07630faae02
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-40x.h
@@ -0,0 +1,64 @@
+#ifndef _ASM_POWERPC_PTE_40x_H
+#define _ASM_POWERPC_PTE_40x_H
+#ifdef __KERNEL__
+
+/*
+ * At present, all PowerPC 400-class processors share a similar TLB
+ * architecture. The instruction and data sides share a unified,
+ * 64-entry, fully-associative TLB which is maintained totally under
+ * software control. In addition, the instruction side has a
+ * hardware-managed, 4-entry, fully-associative TLB which serves as a
+ * first level to the shared TLB. These two TLBs are known as the UTLB
+ * and ITLB, respectively (see "mmu.h" for definitions).
+ *
+ * There are several potential gotchas here. The 40x hardware TLBLO
+ * field looks like this:
+ *
+ * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ * RPN..................... 0 0 EX WR ZSEL....... W I M G
+ *
+ * Where possible we make the Linux PTE bits match up with this
+ *
+ * - bits 20 and 21 must be cleared, because we use 4k pages (40x can
+ * support down to 1k pages), this is done in the TLBMiss exception
+ * handler.
+ * - We use only zones 0 (for kernel pages) and 1 (for user pages)
+ * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
+ * miss handler. Bit 27 is PAGE_USER, thus selecting the correct
+ * zone.
+ * - PRESENT *must* be in the bottom two bits because swap cache
+ * entries use the top 30 bits. Because 40x doesn't support SMP
+ * anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30
+ * is cleared in the TLB miss handler before the TLB entry is loaded.
+ * - All other bits of the PTE are loaded into TLBLO without
+ * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
+ * software PTE bits. We actually use use bits 21, 24, 25, and
+ * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
+ * PRESENT.
+ */
+
+#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
+#define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */
+#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
+#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
+#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
+#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
+#define _PAGE_RW 0x040 /* software: Writes permitted */
+#define _PAGE_DIRTY 0x080 /* software: dirty page */
+#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
+#define _PAGE_HWEXEC 0x200 /* hardware: EX permission */
+#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
+
+#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
+#define _PMD_BAD 0x802
+#define _PMD_SIZE 0x0e0 /* size field, != 0 for large-page PMD entry */
+#define _PMD_SIZE_4M 0x0c0
+#define _PMD_SIZE_16M 0x0e0
+
+#define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4))
+
+/* Until my rework is finished, 40x still needs atomic PTE updates */
+#define PTE_ATOMIC_UPDATES 1
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PTE_40x_H */
diff --git a/arch/powerpc/include/asm/pte-44x.h b/arch/powerpc/include/asm/pte-44x.h
new file mode 100644
index 00000000000..37e98bcf83e
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-44x.h
@@ -0,0 +1,102 @@
+#ifndef _ASM_POWERPC_PTE_44x_H
+#define _ASM_POWERPC_PTE_44x_H
+#ifdef __KERNEL__
+
+/*
+ * Definitions for PPC440
+ *
+ * Because of the 3 word TLB entries to support 36-bit addressing,
+ * the attribute are difficult to map in such a fashion that they
+ * are easily loaded during exception processing. I decided to
+ * organize the entry so the ERPN is the only portion in the
+ * upper word of the PTE and the attribute bits below are packed
+ * in as sensibly as they can be in the area below a 4KB page size
+ * oriented RPN. This at least makes it easy to load the RPN and
+ * ERPN fields in the TLB. -Matt
+ *
+ * This isn't entirely true anymore, at least some bits are now
+ * easier to move into the TLB from the PTE. -BenH.
+ *
+ * Note that these bits preclude future use of a page size
+ * less than 4KB.
+ *
+ *
+ * PPC 440 core has following TLB attribute fields;
+ *
+ * TLB1:
+ * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ * RPN................................. - - - - - - ERPN.......
+ *
+ * TLB2:
+ * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR
+ *
+ * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional
+ * TLB2 storage attibute fields. Those are:
+ *
+ * TLB2:
+ * 0...10 11 12 13 14 15 16...31
+ * no change WL1 IL1I IL1D IL2I IL2D no change
+ *
+ * There are some constrains and options, to decide mapping software bits
+ * into TLB entry.
+ *
+ * - PRESENT *must* be in the bottom three bits because swap cache
+ * entries use the top 29 bits for TLB2.
+ *
+ * - FILE *must* be in the bottom three bits because swap cache
+ * entries use the top 29 bits for TLB2.
+ *
+ * - CACHE COHERENT bit (M) has no effect on original PPC440 cores,
+ * because it doesn't support SMP. However, some later 460 variants
+ * have -some- form of SMP support and so I keep the bit there for
+ * future use
+ *
+ * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
+ * for memory protection related functions (see PTE structure in
+ * include/asm-ppc/mmu.h). The _PAGE_XXX definitions in this file map to the
+ * above bits. Note that the bit values are CPU specific, not architecture
+ * specific.
+ *
+ * The kernel PTE entry holds an arch-dependent swp_entry structure under
+ * certain situations. In other words, in such situations some portion of
+ * the PTE bits are used as a swp_entry. In the PPC implementation, the
+ * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still
+ * hold protection values. That means the three protection bits are
+ * reserved for both PTE and SWAP entry at the most significant three
+ * LSBs.
+ *
+ * There are three protection bits available for SWAP entry:
+ * _PAGE_PRESENT
+ * _PAGE_FILE
+ * _PAGE_HASHPTE (if HW has)
+ *
+ * So those three bits have to be inside of 0-2nd LSB of PTE.
+ *
+ */
+
+#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
+#define _PAGE_RW 0x00000002 /* S: Write permission */
+#define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */
+#define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */
+#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
+#define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
+#define _PAGE_SPECIAL 0x00000020 /* S: Special page */
+#define _PAGE_USER 0x00000040 /* S: User page */
+#define _PAGE_ENDIAN 0x00000080 /* H: E bit */
+#define _PAGE_GUARDED 0x00000100 /* H: G bit */
+#define _PAGE_COHERENT 0x00000200 /* H: M bit */
+#define _PAGE_NO_CACHE 0x00000400 /* H: I bit */
+#define _PAGE_WRITETHRU 0x00000800 /* H: W bit */
+
+/* TODO: Add large page lowmem mapping support */
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
+
+/* ERPN in a PTE never gets cleared, ignore it */
+#define _PTE_NONE_MASK 0xffffffff00000000ULL
+
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PTE_44x_H */
diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h
new file mode 100644
index 00000000000..8c6e3125103
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-8xx.h
@@ -0,0 +1,67 @@
+#ifndef _ASM_POWERPC_PTE_8xx_H
+#define _ASM_POWERPC_PTE_8xx_H
+#ifdef __KERNEL__
+
+/*
+ * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
+ * We also use the two level tables, but we can put the real bits in them
+ * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0,
+ * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has
+ * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
+ * based upon user/super access. The TLB does not have accessed nor write
+ * protect. We assume that if the TLB get loaded with an entry it is
+ * accessed, and overload the changed bit for write protect. We use
+ * two bits in the software pte that are supposed to be set to zero in
+ * the TLB entry (24 and 25) for these indicators. Although the level 1
+ * descriptor contains the guarded and writethrough/copyback bits, we can
+ * set these at the page level since they get copied from the Mx_TWC
+ * register when the TLB entry is loaded. We will use bit 27 for guard, since
+ * that is where it exists in the MD_TWC, and bit 26 for writethrough.
+ * These will get masked from the level 2 descriptor at TLB load time, and
+ * copied to the MD_TWC before it gets loaded.
+ * Large page sizes added. We currently support two sizes, 4K and 8M.
+ * This also allows a TLB hander optimization because we can directly
+ * load the PMD into MD_TWC. The 8M pages are only used for kernel
+ * mapping of well known areas. The PMD (PGD) entries contain control
+ * flags in addition to the address, so care must be taken that the
+ * software no longer assumes these are only pointers.
+ */
+
+/* Definitions for 8xx embedded chips. */
+#define _PAGE_PRESENT 0x0001 /* Page is valid */
+#define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */
+#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
+#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
+
+/* These five software bits must be masked out when the entry is loaded
+ * into the TLB.
+ */
+#define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */
+#define _PAGE_GUARDED 0x0010 /* software: guarded access */
+#define _PAGE_DIRTY 0x0020 /* software: page changed */
+#define _PAGE_RW 0x0040 /* software: user write access allowed */
+#define _PAGE_ACCESSED 0x0080 /* software: page referenced */
+
+/* Setting any bits in the nibble with the follow two controls will
+ * require a TLB exception handler change. It is assumed unused bits
+ * are always zero.
+ */
+#define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */
+#define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */
+
+#define _PMD_PRESENT 0x0001
+#define _PMD_BAD 0x0ff0
+#define _PMD_PAGE_MASK 0x000c
+#define _PMD_PAGE_8M 0x000c
+
+#define _PTE_NONE_MASK _PAGE_ACCESSED
+
+/* Until my rework is finished, 8xx still needs atomic PTE updates */
+#define PTE_ATOMIC_UPDATES 1
+
+/* We need to add _PAGE_SHARED to kernel pages */
+#define _PAGE_KERNEL_RO (_PAGE_SHARED)
+#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PTE_8xx_H */
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
new file mode 100644
index 00000000000..d9740e88680
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -0,0 +1,180 @@
+/* Included from asm/pgtable-*.h only ! */
+
+/*
+ * Some bits are only used on some cpu families... Make sure that all
+ * the undefined gets a sensible default
+ */
+#ifndef _PAGE_HASHPTE
+#define _PAGE_HASHPTE 0
+#endif
+#ifndef _PAGE_SHARED
+#define _PAGE_SHARED 0
+#endif
+#ifndef _PAGE_HWWRITE
+#define _PAGE_HWWRITE 0
+#endif
+#ifndef _PAGE_HWEXEC
+#define _PAGE_HWEXEC 0
+#endif
+#ifndef _PAGE_EXEC
+#define _PAGE_EXEC 0
+#endif
+#ifndef _PAGE_ENDIAN
+#define _PAGE_ENDIAN 0
+#endif
+#ifndef _PAGE_COHERENT
+#define _PAGE_COHERENT 0
+#endif
+#ifndef _PAGE_WRITETHRU
+#define _PAGE_WRITETHRU 0
+#endif
+#ifndef _PAGE_SPECIAL
+#define _PAGE_SPECIAL 0
+#endif
+#ifndef _PAGE_4K_PFN
+#define _PAGE_4K_PFN 0
+#endif
+#ifndef _PAGE_PSIZE
+#define _PAGE_PSIZE 0
+#endif
+#ifndef _PMD_PRESENT_MASK
+#define _PMD_PRESENT_MASK _PMD_PRESENT
+#endif
+#ifndef _PMD_SIZE
+#define _PMD_SIZE 0
+#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
+#endif
+#ifndef _PAGE_KERNEL_RO
+#define _PAGE_KERNEL_RO 0
+#endif
+#ifndef _PAGE_KERNEL_RW
+#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
+#endif
+#ifndef _PAGE_HPTEFLAGS
+#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
+#endif
+#ifndef _PTE_NONE_MASK
+#define _PTE_NONE_MASK _PAGE_HPTEFLAGS
+#endif
+
+/* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a
+ * kernel without large page PMD support
+ */
+#ifndef __ASSEMBLY__
+extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
+#endif /* __ASSEMBLY__ */
+
+/* Location of the PFN in the PTE. Most 32-bit platforms use the same
+ * as _PAGE_SHIFT here (ie, naturally aligned).
+ * Platform who don't just pre-define the value so we don't override it here
+ */
+#ifndef PTE_RPN_SHIFT
+#define PTE_RPN_SHIFT (PAGE_SHIFT)
+#endif
+
+/* The mask convered by the RPN must be a ULL on 32-bit platforms with
+ * 64-bit PTEs
+ */
+#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+#define PTE_RPN_MAX (1ULL << (64 - PTE_RPN_SHIFT))
+#define PTE_RPN_MASK (~((1ULL<<PTE_RPN_SHIFT)-1))
+#else
+#define PTE_RPN_MAX (1UL << (32 - PTE_RPN_SHIFT))
+#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
+#endif
+
+/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
+ * pgprot changes
+ */
+#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
+ _PAGE_ACCESSED | _PAGE_SPECIAL)
+
+/* Mask of bits returned by pte_pgprot() */
+#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
+ _PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \
+ _PAGE_USER | _PAGE_ACCESSED | \
+ _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
+ _PAGE_EXEC | _PAGE_HWEXEC)
+
+/*
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
+ */
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
+#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
+#else
+#define _PAGE_BASE (_PAGE_BASE_NC)
+#endif
+
+/* Permission masks used to generate the __P and __S table,
+ *
+ * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
+ *
+ * Write permissions imply read permissions for now (we could make write-only
+ * pages on BookE but we don't bother for now). Execute permission control is
+ * possible on platforms that define _PAGE_EXEC
+ *
+ * Note due to the way vm flags are laid out, the bits are XWR
+ */
+#define PAGE_NONE __pgprot(_PAGE_BASE)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_X
+#define __P101 PAGE_READONLY_X
+#define __P110 PAGE_COPY_X
+#define __P111 PAGE_COPY_X
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_X
+#define __S101 PAGE_READONLY_X
+#define __S110 PAGE_SHARED_X
+#define __S111 PAGE_SHARED_X
+
+/* Permission masks used for kernel mappings */
+#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+ _PAGE_NO_CACHE)
+#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+ _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC)
+
+/* Protection used for kernel text. We want the debuggers to be able to
+ * set breakpoints anywhere, so don't write protect the kernel text
+ * on platforms where such control is possible.
+ */
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
+ defined(CONFIG_KPROBES)
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
+#else
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
+#endif
+
+/* Make modules code happy. We don't set RO yet */
+#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
+
+/* Advertise special mapping type for AGP */
+#define PAGE_AGP (PAGE_KERNEL_NC)
+#define HAVE_PAGE_AGP
+
+/* Advertise support for _PAGE_SPECIAL */
+#ifdef _PAGE_SPECIAL
+#define __HAVE_ARCH_PTE_SPECIAL
+#endif
+
diff --git a/arch/powerpc/include/asm/pte-fsl-booke.h b/arch/powerpc/include/asm/pte-fsl-booke.h
new file mode 100644
index 00000000000..10820f58acf
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-fsl-booke.h
@@ -0,0 +1,48 @@
+#ifndef _ASM_POWERPC_PTE_FSL_BOOKE_H
+#define _ASM_POWERPC_PTE_FSL_BOOKE_H
+#ifdef __KERNEL__
+
+/* PTE bit definitions for Freescale BookE SW loaded TLB MMU based
+ * processors
+ *
+ MMU Assist Register 3:
+
+ 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63
+ RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR
+
+ - PRESENT *must* be in the bottom three bits because swap cache
+ entries use the top 29 bits.
+
+ - FILE *must* be in the bottom three bits because swap cache
+ entries use the top 29 bits.
+*/
+
+/* Definitions for FSL Book-E Cores */
+#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */
+#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */
+#define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */
+#define _PAGE_RW 0x00004 /* S: Write permission (SW) */
+#define _PAGE_DIRTY 0x00008 /* S: Page dirty */
+#define _PAGE_HWEXEC 0x00010 /* H: SX permission */
+#define _PAGE_ACCESSED 0x00020 /* S: Page referenced */
+
+#define _PAGE_ENDIAN 0x00040 /* H: E bit */
+#define _PAGE_GUARDED 0x00080 /* H: G bit */
+#define _PAGE_COHERENT 0x00100 /* H: M bit */
+#define _PAGE_NO_CACHE 0x00200 /* H: I bit */
+#define _PAGE_WRITETHRU 0x00400 /* H: W bit */
+#define _PAGE_SPECIAL 0x00800 /* S: Special page */
+
+#ifdef CONFIG_PTE_64BIT
+/* ERPN in a PTE never gets cleared, ignore it */
+#define _PTE_NONE_MASK 0xffffffffffff0000ULL
+/* We extend the size of the PTE flags area when using 64-bit PTEs */
+#define PTE_RPN_SHIFT (PAGE_SHIFT + 8)
+#endif
+
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PTE_FSL_BOOKE_H */
diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
new file mode 100644
index 00000000000..16e571c7f9e
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-hash32.h
@@ -0,0 +1,48 @@
+#ifndef _ASM_POWERPC_PTE_HASH32_H
+#define _ASM_POWERPC_PTE_HASH32_H
+#ifdef __KERNEL__
+
+/*
+ * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
+ * table containing PTEs, together with a set of 16 segment registers,
+ * to define the virtual to physical address mapping.
+ *
+ * We use the hash table as an extended TLB, i.e. a cache of currently
+ * active mappings. We maintain a two-level page table tree, much
+ * like that used by the i386, for the sake of the Linux memory
+ * management code. Low-level assembler code in hash_low_32.S
+ * (procedure hash_page) is responsible for extracting ptes from the
+ * tree and putting them into the hash table when necessary, and
+ * updating the accessed and modified bits in the page table tree.
+ */
+
+#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
+#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
+#define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
+#define _PAGE_USER 0x004 /* usermode access allowed */
+#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
+#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+#define _PAGE_DIRTY 0x080 /* C: page changed */
+#define _PAGE_ACCESSED 0x100 /* R: page referenced */
+#define _PAGE_EXEC 0x200 /* software: i-cache coherency required */
+#define _PAGE_RW 0x400 /* software: user write access allowed */
+#define _PAGE_SPECIAL 0x800 /* software: Special page */
+
+#ifdef CONFIG_PTE_64BIT
+/* We never clear the high word of the pte */
+#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
+#else
+#define _PTE_NONE_MASK _PAGE_HASHPTE
+#endif
+
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
+
+/* Hash table based platforms need atomic updates of the linux PTE */
+#define PTE_ATOMIC_UPDATES 1
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PTE_HASH32_H */
diff --git a/arch/powerpc/include/asm/pte-hash64-4k.h b/arch/powerpc/include/asm/pte-hash64-4k.h
new file mode 100644
index 00000000000..c134e809aac
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-hash64-4k.h
@@ -0,0 +1,17 @@
+/* To be include by pgtable-hash64.h only */
+
+/* PTE bits */
+#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */
+#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */
+#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */
+#define _PAGE_F_SECOND _PAGE_SECONDARY
+#define _PAGE_F_GIX _PAGE_GROUP_IX
+#define _PAGE_SPECIAL 0x10000 /* software: special page */
+
+/* PTE flags to conserve for HPTE identification */
+#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
+ _PAGE_SECONDARY | _PAGE_GROUP_IX)
+
+/* shift to put page number into pte */
+#define PTE_RPN_SHIFT (17)
+
diff --git a/arch/powerpc/include/asm/pgtable-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index 7389003349a..e05d26fa372 100644
--- a/arch/powerpc/include/asm/pgtable-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -1,76 +1,6 @@
-#ifndef _ASM_POWERPC_PGTABLE_64K_H
-#define _ASM_POWERPC_PGTABLE_64K_H
-
-#include <asm-generic/pgtable-nopud.h>
-
-
-#define PTE_INDEX_SIZE 12
-#define PMD_INDEX_SIZE 12
-#define PUD_INDEX_SIZE 0
-#define PGD_INDEX_SIZE 4
-
-#ifndef __ASSEMBLY__
-#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
-#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
-#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
-
-#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
-#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
-#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
-
-#ifdef CONFIG_PPC_SUBPAGE_PROT
-/*
- * For the sub-page protection option, we extend the PGD with one of
- * these. Basically we have a 3-level tree, with the top level being
- * the protptrs array. To optimize speed and memory consumption when
- * only addresses < 4GB are being protected, pointers to the first
- * four pages of sub-page protection words are stored in the low_prot
- * array.
- * Each page of sub-page protection words protects 1GB (4 bytes
- * protects 64k). For the 3-level tree, each page of pointers then
- * protects 8TB.
- */
-struct subpage_prot_table {
- unsigned long maxaddr; /* only addresses < this are protected */
- unsigned int **protptrs[2];
- unsigned int *low_prot[4];
-};
-
-#undef PGD_TABLE_SIZE
-#define PGD_TABLE_SIZE ((sizeof(pgd_t) << PGD_INDEX_SIZE) + \
- sizeof(struct subpage_prot_table))
-
-#define SBP_L1_BITS (PAGE_SHIFT - 2)
-#define SBP_L2_BITS (PAGE_SHIFT - 3)
-#define SBP_L1_COUNT (1 << SBP_L1_BITS)
-#define SBP_L2_COUNT (1 << SBP_L2_BITS)
-#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
-#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
-
-extern void subpage_prot_free(pgd_t *pgd);
-
-static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
-{
- return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD);
-}
-#endif /* CONFIG_PPC_SUBPAGE_PROT */
-#endif /* __ASSEMBLY__ */
-
-/* With 4k base page size, hugepage PTEs go at the PMD level */
-#define MIN_HUGEPTE_SHIFT PAGE_SHIFT
-
-/* PMD_SHIFT determines what a second-level page table entry can map */
-#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
-#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
-#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
+/* To be include by pgtable-hash64.h only */
/* Additional PTE bits (don't change without checking asm in hash_low.S) */
-#define __HAVE_ARCH_PTE_SPECIAL
#define _PAGE_SPECIAL 0x00000400 /* software: special page */
#define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */
#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */
@@ -107,21 +37,15 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
* of addressable physical space, or 46 bits for the special 4k PFNs.
*/
#define PTE_RPN_SHIFT (30)
-#define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT))
-#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
-
-/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
- * pgprot changes
- */
-#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
- _PAGE_ACCESSED | _PAGE_SPECIAL)
-/* Bits to mask out from a PMD to get to the PTE page */
-#define PMD_MASKED_BITS 0x1ff
-/* Bits to mask out from a PGD/PUD to get to the PMD page */
-#define PUD_MASKED_BITS 0x1ff
+#ifndef __ASSEMBLY__
-/* Manipulate "rpte" values */
+/*
+ * With 64K pages on hash table, we have a special PTE format that
+ * uses a second "half" of the page table to encode sub-page information
+ * in order to deal with 64K made of 4K HW pages. Thus we override the
+ * generic accessors and iterators here
+ */
#define __real_pte(e,p) ((real_pte_t) { \
(e), pte_val(*((p) + PTRS_PER_PTE)) })
#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
@@ -130,7 +54,6 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
#define __rpte_sub_valid(rpte, index) \
(pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
-
/* Trick: we set __end to va + 64k, which happens works for
* a 16M page as well as we want only one iteration
*/
@@ -152,4 +75,41 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \
__pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))
-#endif /* _ASM_POWERPC_PGTABLE_64K_H */
+
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+/*
+ * For the sub-page protection option, we extend the PGD with one of
+ * these. Basically we have a 3-level tree, with the top level being
+ * the protptrs array. To optimize speed and memory consumption when
+ * only addresses < 4GB are being protected, pointers to the first
+ * four pages of sub-page protection words are stored in the low_prot
+ * array.
+ * Each page of sub-page protection words protects 1GB (4 bytes
+ * protects 64k). For the 3-level tree, each page of pointers then
+ * protects 8TB.
+ */
+struct subpage_prot_table {
+ unsigned long maxaddr; /* only addresses < this are protected */
+ unsigned int **protptrs[2];
+ unsigned int *low_prot[4];
+};
+
+#undef PGD_TABLE_SIZE
+#define PGD_TABLE_SIZE ((sizeof(pgd_t) << PGD_INDEX_SIZE) + \
+ sizeof(struct subpage_prot_table))
+
+#define SBP_L1_BITS (PAGE_SHIFT - 2)
+#define SBP_L2_BITS (PAGE_SHIFT - 3)
+#define SBP_L1_COUNT (1 << SBP_L1_BITS)
+#define SBP_L2_COUNT (1 << SBP_L2_BITS)
+#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
+#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
+
+extern void subpage_prot_free(pgd_t *pgd);
+
+static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
+{
+ return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD);
+}
+#endif /* CONFIG_PPC_SUBPAGE_PROT */
+#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h
new file mode 100644
index 00000000000..0419eeb5327
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-hash64.h
@@ -0,0 +1,54 @@
+#ifndef _ASM_POWERPC_PTE_HASH64_H
+#define _ASM_POWERPC_PTE_HASH64_H
+#ifdef __KERNEL__
+
+/*
+ * Common bits between 4K and 64K pages in a linux-style PTE.
+ * These match the bits in the (hardware-defined) PowerPC PTE as closely
+ * as possible. Additional bits may be defined in pgtable-hash64-*.h
+ *
+ * Note: We only support user read/write permissions. Supervisor always
+ * have full read/write to pages above PAGE_OFFSET (pages below that
+ * always use the user access permissions).
+ *
+ * We could create separate kernel read-only if we used the 3 PP bits
+ * combinations that newer processors provide but we currently don't.
+ */
+#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */
+#define _PAGE_USER 0x0002 /* matches one of the PP bits */
+#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
+#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
+#define _PAGE_GUARDED 0x0008
+#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */
+#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */
+#define _PAGE_DIRTY 0x0080 /* C: page changed */
+#define _PAGE_ACCESSED 0x0100 /* R: page referenced */
+#define _PAGE_RW 0x0200 /* software: user write access allowed */
+#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
+
+/* No separate kernel read-only */
+#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */
+#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW
+
+/* Strong Access Ordering */
+#define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT)
+
+/* No page size encoding in the linux PTE */
+#define _PAGE_PSIZE 0
+
+/* PTEIDX nibble */
+#define _PTEIDX_SECONDARY 0x8
+#define _PTEIDX_GROUP_IX 0x7
+
+/* Hash table based platforms need atomic updates of the linux PTE */
+#define PTE_ATOMIC_UPDATES 1
+
+#ifdef CONFIG_PPC_64K_PAGES
+#include <asm/pte-hash64-64k.h>
+#else
+#include <asm/pte-hash64-4k.h>
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PTE_HASH64_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index f484a343efb..c9ff1ec9747 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -155,6 +155,8 @@
#define CTRL_RUNLATCH 0x1
#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
#define DABR_TRANSLATION (1UL << 2)
+#define DABR_DATA_WRITE (1UL << 1)
+#define DABR_DATA_READ (1UL << 0)
#define SPRN_DABR2 0x13D /* e300 */
#define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */
#define DABRX_USER (1UL << 0)
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 67453766bff..a56f4d61aa7 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -10,6 +10,7 @@
#define __ASM_POWERPC_REG_BOOKE_H__
/* Machine State Register (MSR) Fields */
+#define MSR_GS (1<<28) /* Guest state */
#define MSR_UCLE (1<<26) /* User-mode cache lock enable */
#define MSR_SPE (1<<25) /* Enable SPE */
#define MSR_DWE (1<<10) /* Debug Wait Enable */
@@ -110,6 +111,7 @@
#define SPRN_L1CSR0 0x3F2 /* L1 Cache Control and Status Register 0 */
#define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */
#define SPRN_MMUCSR0 0x3F4 /* MMU Control and Status Register 0 */
+#define SPRN_MMUCFG 0x3F7 /* MMU Configuration Register */
#define SPRN_PIT 0x3DB /* Programmable Interval Timer */
#define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */
#define SPRN_L2CSR0 0x3F9 /* L2 Data Cache Control and Status Register 0 */
diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h
index f5a4e168e49..1e5cfad0e3f 100644
--- a/arch/powerpc/include/asm/socket.h
+++ b/arch/powerpc/include/asm/socket.h
@@ -61,4 +61,7 @@
#define SO_MARK 36
+#define SO_TIMESTAMPING 37
+#define SCM_TIMESTAMPING SO_TIMESTAMPING
+
#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 72353f6070a..fe166491e9d 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -65,7 +65,7 @@ SYSCALL(ni_syscall)
SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
COMPAT_SYS_SPU(umask)
SYSCALL_SPU(chroot)
-SYSCALL(ustat)
+COMPAT_SYS(ustat)
SYSCALL_SPU(dup2)
SYSCALL_SPU(getppid)
SYSCALL_SPU(getpgrp)
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index 2a4be19a92c..f612798e1c9 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif
-#define arch_align_stack(x) (x)
+extern unsigned long arch_align_stack(unsigned long sp);
/* Used in very early kernel initialization. */
extern unsigned long reloc_offset(void);
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 9665a26a253..9aba5a38a7c 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -12,8 +12,10 @@
/* We have 8k stacks on ppc32 and 16k on ppc64 */
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64)
#define THREAD_SHIFT 14
+#elif defined(CONFIG_PPC_256K_PAGES)
+#define THREAD_SHIFT 15
#else
#define THREAD_SHIFT 13
#endif
@@ -154,6 +156,13 @@ static inline void set_restore_sigmask(void)
ti->local_flags |= _TLF_RESTORE_SIGMASK;
set_bit(TIF_SIGPENDING, &ti->flags);
}
+
+#ifdef CONFIG_PPC64
+#define is_32bit_task() (test_thread_flag(TIF_32BIT))
+#else
+#define is_32bit_task() (1)
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index 6418ceea44b..cd21e5e6b04 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -15,6 +15,7 @@
#include <linux/init.h>
extern void (*udbg_putc)(char c);
+extern void (*udbg_flush)(void);
extern int (*udbg_getc)(void);
extern int (*udbg_getc_poll)(void);