diff options
author | Christian Borntraeger <borntraeger@de.ibm.com> | 2008-10-28 11:10:15 +0100 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-10-28 11:12:03 +0100 |
commit | 250cf776f74b5932a1977d0489cae9206e2351dd (patch) | |
tree | 71bb6778e65e8efbdd0cac09a64e4e1194b98c3f /arch/s390/include/asm | |
parent | 2c78091405d6f54748b1fac78c45f2a799e3073a (diff) |
[S390] pgtables: Fix race in enable_sie vs. page table ops
The current enable_sie code sets the mm->context.pgstes bit to tell
dup_mm that the new mm should have extended page tables. This bit is also
used by the s390 specific page table primitives to decide about the page
table layout - which means context.pgstes has two meanings. This can cause
any kind of bugs. For example - e.g. shrink_zone can call
ptep_clear_flush_young while enable_sie is running. ptep_clear_flush_young
will test for context.pgstes. Since enable_sie changed that value of the old
struct mm without changing the page table layout ptep_clear_flush_young will
do the wrong thing.
The solution is to split pgstes into two bits
- one for the allocation
- one for the current state
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r-- | arch/s390/include/asm/mmu.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 19 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 8 |
3 files changed, 22 insertions, 8 deletions
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 5dd5e7b3476..d2b4ff83147 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -7,7 +7,8 @@ typedef struct { unsigned long asce_bits; unsigned long asce_limit; int noexec; - int pgstes; + int has_pgste; /* The mmu context has extended page tables */ + int alloc_pgste; /* cloned contexts will have extended page tables */ } mm_context_t; #endif diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 4c2fbf48c9c..28ec870655a 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -20,12 +20,25 @@ static inline int init_new_context(struct task_struct *tsk, #ifdef CONFIG_64BIT mm->context.asce_bits |= _ASCE_TYPE_REGION3; #endif - if (current->mm->context.pgstes) { + if (current->mm->context.alloc_pgste) { + /* + * alloc_pgste indicates, that any NEW context will be created + * with extended page tables. The old context is unchanged. The + * page table allocation and the page table operations will + * look at has_pgste to distinguish normal and extended page + * tables. The only way to create extended page tables is to + * set alloc_pgste and then create a new context (e.g. dup_mm). + * The page table allocation is called after init_new_context + * and if has_pgste is set, it will create extended page + * tables. + */ mm->context.noexec = 0; - mm->context.pgstes = 1; + mm->context.has_pgste = 1; + mm->context.alloc_pgste = 1; } else { mm->context.noexec = s390_noexec; - mm->context.pgstes = 0; + mm->context.has_pgste = 0; + mm->context.alloc_pgste = 0; } mm->context.asce_limit = STACK_TOP_MAX; crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 1a928f84afd..7fc76133b3e 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -679,7 +679,7 @@ static inline void pmd_clear(pmd_t *pmd) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - if (mm->context.pgstes) + if (mm->context.has_pgste) ptep_rcp_copy(ptep); pte_val(*ptep) = _PAGE_TYPE_EMPTY; if (mm->context.noexec) @@ -763,7 +763,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, struct page *page; unsigned int skey; - if (!mm->context.pgstes) + if (!mm->context.has_pgste) return -EINVAL; rcp_lock(ptep); pgste = (unsigned long *) (ptep + PTRS_PER_PTE); @@ -794,7 +794,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, int young; unsigned long *pgste; - if (!vma->vm_mm->context.pgstes) + if (!vma->vm_mm->context.has_pgste) return 0; physpage = pte_val(*ptep) & PAGE_MASK; pgste = (unsigned long *) (ptep + PTRS_PER_PTE); @@ -844,7 +844,7 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) static inline void ptep_invalidate(struct mm_struct *mm, unsigned long address, pte_t *ptep) { - if (mm->context.pgstes) { + if (mm->context.has_pgste) { rcp_lock(ptep); __ptep_ipte(address, ptep); ptep_rcp_copy(ptep); |