diff options
-rw-r--r-- | arch/ppc64/kernel/pSeries_lpar.c | 5 | ||||
-rw-r--r-- | arch/ppc64/mm/hash_native.c | 13 | ||||
-rw-r--r-- | arch/ppc64/mm/hash_utils.c | 21 | ||||
-rw-r--r-- | arch/ppc64/mm/tlb.c | 25 | ||||
-rw-r--r-- | include/asm-ppc64/machdep.h | 5 | ||||
-rw-r--r-- | include/asm-ppc64/tlbflush.h | 7 |
6 files changed, 28 insertions, 48 deletions
diff --git a/arch/ppc64/kernel/pSeries_lpar.c b/arch/ppc64/kernel/pSeries_lpar.c index a6de83f2078..268d8362dde 100644 --- a/arch/ppc64/kernel/pSeries_lpar.c +++ b/arch/ppc64/kernel/pSeries_lpar.c @@ -486,8 +486,7 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie * lock. */ -void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number, - int local) +void pSeries_lpar_flush_hash_range(unsigned long number, int local) { int i; unsigned long flags = 0; @@ -498,7 +497,7 @@ void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number, spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); for (i = 0; i < number; i++) - flush_hash_page(context, batch->addr[i], batch->pte[i], local); + flush_hash_page(batch->vaddr[i], batch->pte[i], local); if (lock_tlbie) spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c index 7626bb59954..29b074505d3 100644 --- a/arch/ppc64/mm/hash_native.c +++ b/arch/ppc64/mm/hash_native.c @@ -335,10 +335,9 @@ static void native_hpte_clear(void) local_irq_restore(flags); } -static void native_flush_hash_range(unsigned long context, - unsigned long number, int local) +static void native_flush_hash_range(unsigned long number, int local) { - unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn; + unsigned long va, vpn, hash, secondary, slot, flags, avpn; int i, j; hpte_t *hptep; unsigned long hpte_v; @@ -351,13 +350,7 @@ static void native_flush_hash_range(unsigned long context, j = 0; for (i = 0; i < number; i++) { - if (batch->addr[i] < KERNELBASE) - vsid = get_vsid(context, batch->addr[i]); - else - vsid = get_kernel_vsid(batch->addr[i]); - - va = (vsid << 28) | (batch->addr[i] & 0x0fffffff); - batch->vaddr[j] = va; + va = batch->vaddr[j]; if (large) vpn = va >> HPAGE_SHIFT; else diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c index 09475c8edf7..36cf474b3d3 100644 --- a/arch/ppc64/mm/hash_utils.c +++ b/arch/ppc64/mm/hash_utils.c @@ -355,18 +355,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) return ret; } -void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte, - int local) +void flush_hash_page(unsigned long va, pte_t pte, int local) { - unsigned long vsid, vpn, va, hash, secondary, slot; + unsigned long vpn, hash, secondary, slot; unsigned long huge = pte_huge(pte); - if (ea < KERNELBASE) - vsid = get_vsid(context, ea); - else - vsid = get_kernel_vsid(ea); - - va = (vsid << 28) | (ea & 0x0fffffff); if (huge) vpn = va >> HPAGE_SHIFT; else @@ -381,17 +374,17 @@ void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte, ppc_md.hpte_invalidate(slot, va, huge, local); } -void flush_hash_range(unsigned long context, unsigned long number, int local) +void flush_hash_range(unsigned long number, int local) { if (ppc_md.flush_hash_range) { - ppc_md.flush_hash_range(context, number, local); + ppc_md.flush_hash_range(number, local); } else { int i; - struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *batch = + &__get_cpu_var(ppc64_tlb_batch); for (i = 0; i < number; i++) - flush_hash_page(context, batch->addr[i], batch->pte[i], - local); + flush_hash_page(batch->vaddr[i], batch->pte[i], local); } } diff --git a/arch/ppc64/mm/tlb.c b/arch/ppc64/mm/tlb.c index d8a6593a13f..31afd95bf87 100644 --- a/arch/ppc64/mm/tlb.c +++ b/arch/ppc64/mm/tlb.c @@ -128,12 +128,10 @@ void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) void hpte_update(struct mm_struct *mm, unsigned long addr, unsigned long pte, int wrprot) { - int i; - unsigned long context = 0; struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); + unsigned long vsid; + int i; - if (REGION_ID(addr) == USER_REGION_ID) - context = mm->context.id; i = batch->index; /* @@ -143,17 +141,19 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, * up scanning and resetting referenced bits then our batch context * will change mid stream. */ - if (unlikely(i != 0 && context != batch->context)) { + if (unlikely(i != 0 && mm != batch->mm)) { flush_tlb_pending(); i = 0; } - - if (i == 0) { - batch->context = context; + if (i == 0) batch->mm = mm; - } + if (addr < KERNELBASE) { + vsid = get_vsid(mm->context.id, addr); + WARN_ON(vsid == 0); + } else + vsid = get_kernel_vsid(addr); + batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff); batch->pte[i] = __pte(pte); - batch->addr[i] = addr; batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) flush_tlb_pending(); @@ -175,10 +175,9 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) local = 1; if (i == 1) - flush_hash_page(batch->context, batch->addr[0], batch->pte[0], - local); + flush_hash_page(batch->vaddr[0], batch->pte[0], local); else - flush_hash_range(batch->context, i, local); + flush_hash_range(i, local); batch->index = 0; put_cpu(); } diff --git a/include/asm-ppc64/machdep.h b/include/asm-ppc64/machdep.h index 8027160ec96..d35d9d3e44c 100644 --- a/include/asm-ppc64/machdep.h +++ b/include/asm-ppc64/machdep.h @@ -56,9 +56,8 @@ struct machdep_calls { unsigned long vflags, unsigned long rflags); long (*hpte_remove)(unsigned long hpte_group); - void (*flush_hash_range)(unsigned long context, - unsigned long number, - int local); + void (*flush_hash_range)(unsigned long number, int local); + /* special for kexec, to be called in real mode, linar mapping is * destroyed as well */ void (*hpte_clear_all)(void); diff --git a/include/asm-ppc64/tlbflush.h b/include/asm-ppc64/tlbflush.h index 45411a67e08..800bc0010cf 100644 --- a/include/asm-ppc64/tlbflush.h +++ b/include/asm-ppc64/tlbflush.h @@ -20,10 +20,8 @@ struct mm_struct; struct ppc64_tlb_batch { unsigned long index; - unsigned long context; struct mm_struct *mm; pte_t pte[PPC64_TLB_BATCH_NR]; - unsigned long addr[PPC64_TLB_BATCH_NR]; unsigned long vaddr[PPC64_TLB_BATCH_NR]; }; DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); @@ -47,8 +45,7 @@ static inline void flush_tlb_pending(void) #define flush_tlb_kernel_range(start, end) flush_tlb_pending() #define flush_tlb_pgtables(mm, start, end) do { } while (0) -extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte, - int local); -void flush_hash_range(unsigned long context, unsigned long number, int local); +extern void flush_hash_page(unsigned long va, pte_t pte, int local); +void flush_hash_range(unsigned long number, int local); #endif /* _PPC64_TLBFLUSH_H */ |