aboutsummaryrefslogtreecommitdiff
path: root/arch/ppc64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/mm')
-rw-r--r--arch/ppc64/mm/fault.c37
-rw-r--r--arch/ppc64/mm/hash_low.S2
-rw-r--r--arch/ppc64/mm/hash_native.c6
-rw-r--r--arch/ppc64/mm/hugetlbpage.c7
-rw-r--r--arch/ppc64/mm/init.c7
-rw-r--r--arch/ppc64/mm/slb_low.S2
-rw-r--r--arch/ppc64/mm/tlb.c4
7 files changed, 44 insertions, 21 deletions
diff --git a/arch/ppc64/mm/fault.c b/arch/ppc64/mm/fault.c
index 20b0f37e8bf..be3f25cf3e9 100644
--- a/arch/ppc64/mm/fault.c
+++ b/arch/ppc64/mm/fault.c
@@ -29,6 +29,7 @@
#include <linux/interrupt.h>
#include <linux/smp_lock.h>
#include <linux/module.h>
+#include <linux/kprobes.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -37,6 +38,7 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/kdebug.h>
+#include <asm/siginfo.h>
/*
* Check whether the instruction at regs->nip is a store using
@@ -76,6 +78,28 @@ static int store_updates_sp(struct pt_regs *regs)
return 0;
}
+static void do_dabr(struct pt_regs *regs, unsigned long error_code)
+{
+ siginfo_t info;
+
+ if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
+ 11, SIGSEGV) == NOTIFY_STOP)
+ return;
+
+ if (debugger_dabr_match(regs))
+ return;
+
+ /* Clear the DABR */
+ set_dabr(0);
+
+ /* Deliver the signal to userspace */
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_HWBKPT;
+ info.si_addr = (void __user *)regs->nip;
+ force_sig_info(SIGTRAP, &info, current);
+}
+
/*
* The error_code parameter is
* - DSISR for a non-SLB data access fault,
@@ -84,8 +108,8 @@ static int store_updates_sp(struct pt_regs *regs)
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/
-int do_page_fault(struct pt_regs *regs, unsigned long address,
- unsigned long error_code)
+int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code)
{
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
@@ -110,12 +134,9 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
if (!user_mode(regs) && (address >= TASK_SIZE))
return SIGSEGV;
- if (error_code & DSISR_DABRMATCH) {
- if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
- 11, SIGSEGV) == NOTIFY_STOP)
- return 0;
- if (debugger_dabr_match(regs))
- return 0;
+ if (error_code & DSISR_DABRMATCH) {
+ do_dabr(regs, error_code);
+ return 0;
}
if (in_atomic() || mm == NULL) {
diff --git a/arch/ppc64/mm/hash_low.S b/arch/ppc64/mm/hash_low.S
index 35eb49e1b89..ee5a5d36bfa 100644
--- a/arch/ppc64/mm/hash_low.S
+++ b/arch/ppc64/mm/hash_low.S
@@ -16,7 +16,7 @@
#include <asm/page.h>
#include <asm/types.h>
#include <asm/ppc_asm.h>
-#include <asm/offsets.h>
+#include <asm/asm-offsets.h>
#include <asm/cputable.h>
.text
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index 7626bb59954..bfd385b7713 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -343,9 +343,7 @@ static void native_flush_hash_range(unsigned long context,
hpte_t *hptep;
unsigned long hpte_v;
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
-
- /* XXX fix for large ptes */
- unsigned long large = 0;
+ unsigned long large = batch->large;
local_irq_save(flags);
@@ -407,7 +405,7 @@ static void native_flush_hash_range(unsigned long context,
asm volatile("ptesync":::"memory");
for (i = 0; i < j; i++)
- __tlbie(batch->vaddr[i], 0);
+ __tlbie(batch->vaddr[i], large);
asm volatile("eieio; tlbsync; ptesync":::"memory");
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index 338771ec70d..0ea0994ed97 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -710,10 +710,13 @@ repeat:
hpte_group = ((~hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
slot = ppc_md.hpte_insert(hpte_group, va, prpn,
- HPTE_V_LARGE, rflags);
+ HPTE_V_LARGE |
+ HPTE_V_SECONDARY,
+ rflags);
if (slot == -1) {
if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ hpte_group = ((hash & htab_hash_mask) *
+ HPTES_PER_GROUP)&~0x7UL;
ppc_md.hpte_remove(hpte_group);
goto repeat;
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index a14ab87df49..be64b157afc 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -554,12 +554,12 @@ void __init do_init_bootmem(void)
* present.
*/
for (i=0; i < lmb.memory.cnt; i++)
- free_bootmem(lmb_start_pfn(&lmb.memory, i),
+ free_bootmem(lmb.memory.region[i].base,
lmb_size_bytes(&lmb.memory, i));
/* reserve the sections we're already using */
for (i=0; i < lmb.reserved.cnt; i++)
- reserve_bootmem(lmb_start_pfn(&lmb.reserved, i),
+ reserve_bootmem(lmb.reserved.region[i].base,
lmb_size_bytes(&lmb.reserved, i));
for (i=0; i < lmb.memory.cnt; i++)
@@ -799,8 +799,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
local = 1;
- __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
- 0x300, local);
+ __hash_page(ea, 0, vsid, ptep, 0x300, local);
local_irq_restore(flags);
}
diff --git a/arch/ppc64/mm/slb_low.S b/arch/ppc64/mm/slb_low.S
index 698d6b9ed6d..a3a03da503b 100644
--- a/arch/ppc64/mm/slb_low.S
+++ b/arch/ppc64/mm/slb_low.S
@@ -21,7 +21,7 @@
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
-#include <asm/offsets.h>
+#include <asm/asm-offsets.h>
#include <asm/cputable.h>
/* void slb_allocate(unsigned long ea);
diff --git a/arch/ppc64/mm/tlb.c b/arch/ppc64/mm/tlb.c
index d8a6593a13f..21fbffb23a4 100644
--- a/arch/ppc64/mm/tlb.c
+++ b/arch/ppc64/mm/tlb.c
@@ -143,7 +143,8 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
* up scanning and resetting referenced bits then our batch context
* will change mid stream.
*/
- if (unlikely(i != 0 && context != batch->context)) {
+ if (i != 0 && (context != batch->context ||
+ batch->large != pte_huge(pte))) {
flush_tlb_pending();
i = 0;
}
@@ -151,6 +152,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
if (i == 0) {
batch->context = context;
batch->mm = mm;
+ batch->large = pte_huge(pte);
}
batch->pte[i] = __pte(pte);
batch->addr[i] = addr;