aboutsummaryrefslogtreecommitdiff
path: root/include/asm-powerpc
diff options
context:
space:
mode:
authorAndy Whitcroft <apw@shadowen.org>2008-06-26 19:55:58 +1000
committerPaul Mackerras <paulus@samba.org>2008-07-01 11:28:56 +1000
commit016b33c4958681c24056abed8ec95844a0da80a3 (patch)
tree67ba81cb6bc980119fab4eee15f09488e5777012 /include/asm-powerpc
parent03d70617b8a789c3721afaafde06fcbba7c7ebf1 (diff)
powerpc: Add 64 bit version of huge_ptep_set_wrprotect
The implementation of huge_ptep_set_wrprotect() directly calls ptep_set_wrprotect() to mark a hugepte write protected. However this call is not appropriate on ppc64 kernels as this is a small page only implementation. This can lead to the hash not being flushed correctly when a mapping is being converted to COW, allowing processes to continue using the original copy. Currently huge_ptep_set_wrprotect() unconditionally calls ptep_set_wrprotect(). This is fine on ppc32 kernels as this call is generic. On 64 bit this is implemented as: pte_update(mm, addr, ptep, _PAGE_RW, 0); On ppc64 this last parameter is the page size and is passed directly on to hpte_need_flush(): hpte_need_flush(mm, addr, ptep, old, huge); And this directly affects the page size we pass to flush_hash_page(): flush_hash_page(vaddr, rpte, psize, ssize, 0); As this changes the way the hash is calculated we will flush the wrong pages, potentially leaving live hashes to the original page. Move the definition of huge_ptep_set_wrprotect() to the 32/64 bit specific headers. Signed-off-by: Andy Whitcroft <apw@shadowen.org> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/hugetlb.h6
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h6
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h9
3 files changed, 15 insertions, 6 deletions
diff --git a/include/asm-powerpc/hugetlb.h b/include/asm-powerpc/hugetlb.h
index 649c6c3b87b..be32ff02f4a 100644
--- a/include/asm-powerpc/hugetlb.h
+++ b/include/asm-powerpc/hugetlb.h
@@ -49,12 +49,6 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
return pte_wrprotect(pte);
}
-static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- ptep_set_wrprotect(mm, addr, ptep);
-}
-
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index dde466bd2ce..e1d2bb57f1d 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -652,6 +652,12 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
{
pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
}
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index 2e48be841cc..b2754d46be4 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -312,6 +312,15 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
return;
old = pte_update(mm, addr, ptep, _PAGE_RW, 0);
}
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long old;
+
+ if ((pte_val(*ptep) & _PAGE_RW) == 0)
+ return;
+ old = pte_update(mm, addr, ptep, _PAGE_RW, 1);
+}
/*
* We currently remove entries from the hashtable regardless of whether