From d258e64ef595792d6f749518354b69583e9a97f4 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 28 Jun 2009 06:26:10 +0000 Subject: powerpc: Remove unnecessary semicolons Signed-off-by: Joe Perches Acked-by: Geoff Levand Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/tlb_hash64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 1be1b5e5979..937eb90677d 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -72,7 +72,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, */ if (huge) { #ifdef CONFIG_HUGETLB_PAGE - psize = get_slice_psize(mm, addr);; + psize = get_slice_psize(mm, addr); #else BUG(); psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ -- cgit v1.2.3 From a1ac38ab98e8a79ce225347b725f3b9751c70f1e Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 17 Jun 2009 18:13:54 +0000 Subject: powerpc: Use pr_devel() in arch/powerpc/mm/mmu_context_nohash.c pr_debug() can now result in code being generated even when DEBUG is not defined. That's not really desirable in some places. With CONFIG_DYNAMIC_DEBUG=y: size before: text data bss dec hex filename 1508 48 28 1584 630 powerpc/mm/mmu_context_nohash.o size after: text data bss dec hex filename 1088 0 28 1116 45c powerpc/mm/mmu_context_nohash.o Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/mmu_context_nohash.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 8343986809c..92a197117d5 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -89,7 +89,7 @@ static unsigned int steal_context_smp(unsigned int id) id = first_context; continue; } - pr_debug("[%d] steal context %d from mm @%p\n", + pr_devel("[%d] steal context %d from mm @%p\n", smp_processor_id(), id, mm); /* Mark this mm has having no context anymore */ @@ -126,7 +126,7 @@ static unsigned int steal_context_up(unsigned int id) /* Pick up the victim mm */ mm = context_mm[id]; - pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm); + pr_devel("[%d] steal context %d from mm @%p\n", cpu, id, mm); /* Flush the TLB for that context */ local_flush_tlb_mm(mm); @@ -180,7 +180,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) spin_lock(&context_lock); #ifndef DEBUG_STEAL_ONLY - pr_debug("[%d] activating context for mm @%p, active=%d, id=%d\n", + pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n", cpu, next, next->context.active, next->context.id); #endif @@ -189,7 +189,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) next->context.active++; if (prev) { #ifndef DEBUG_STEAL_ONLY - pr_debug(" old context %p active was: %d\n", + pr_devel(" old context %p active was: %d\n", prev, prev->context.active); #endif WARN_ON(prev->context.active < 1); @@ -236,7 +236,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) next->context.id = id; #ifndef DEBUG_STEAL_ONLY - pr_debug("[%d] picked up new id %d, nrf is now %d\n", + pr_devel("[%d] picked up new id %d, nrf is now %d\n", cpu, id, nr_free_contexts); #endif @@ -247,7 +247,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) * local TLB for it and unmark it before we use it */ if (test_bit(id, stale_map[cpu])) { - pr_debug("[%d] flushing stale context %d for mm @%p !\n", + pr_devel("[%d] flushing stale context %d for mm @%p !\n", cpu, id, next); local_flush_tlb_mm(next); @@ -314,13 +314,13 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: - pr_debug("MMU: Allocating stale context map for CPU %d\n", cpu); + pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_DEAD: case CPU_DEAD_FROZEN: - pr_debug("MMU: Freeing stale context map for CPU %d\n", cpu); + pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); kfree(stale_map[cpu]); stale_map[cpu] = NULL; break; -- cgit v1.2.3 From 651e2dd2a1c47bb078d446f123a9ae950ba7a1f0 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 17 Jun 2009 18:13:51 +0000 Subject: powerpc: Cleanup & use pr_devel() in arch/powerpc/mm/slb.c pr_debug() can now result in code being generated even when DEBUG is not defined. That's not really desirable in some places. With CONFIG_DYNAMIC_DEBUG=y: size before: text data bss dec hex filename 3261 416 4 3681 e61 arch/powerpc/mm/slb.o size after: text data bss dec hex filename 2861 248 4 3113 c29 arch/powerpc/mm/slb.o Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/slb.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 3b52c80e5e3..5b7038f248b 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -14,8 +14,6 @@ * 2 of the License, or (at your option) any later version. */ -#undef DEBUG - #include #include #include @@ -27,11 +25,6 @@ #include #include -#ifdef DEBUG -#define DBG(fmt...) printk(fmt) -#else -#define DBG pr_debug -#endif extern void slb_allocate_realmode(unsigned long ea); extern void slb_allocate_user(unsigned long ea); @@ -285,13 +278,13 @@ void slb_initialize(void) patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size); - DBG("SLB: linear LLP = %04lx\n", linear_llp); - DBG("SLB: io LLP = %04lx\n", io_llp); + pr_devel("SLB: linear LLP = %04lx\n", linear_llp); + pr_devel("SLB: io LLP = %04lx\n", io_llp); #ifdef CONFIG_SPARSEMEM_VMEMMAP patch_slb_encoding(slb_miss_kernel_load_vmemmap, SLB_VSID_KERNEL | vmemmap_llp); - DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); + pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); #endif } -- cgit v1.2.3 From 29e5fa59e5ebe06d6bd7e04e7752a47ead23f89e Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 17 Jun 2009 18:13:51 +0000 Subject: powerpc: Use pr_devel() in arch/powerpc/mm/gup.c pr_debug() can now result in code being generated even when DEBUG is not defined. That's not really desirable in some places. With CONFIG_DYNAMIC_DEBUG=y: size before: text data bss dec hex filename 3252 384 0 3636 e34 arch/powerpc/mm/gup.o size after: text data bss dec hex filename 2576 96 0 2672 a70 arch/powerpc/mm/gup.o Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/gup.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c index bc400c78c97..bc122a120bf 100644 --- a/arch/powerpc/mm/gup.c +++ b/arch/powerpc/mm/gup.c @@ -159,7 +159,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, int psize; #endif - pr_debug("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read"); + pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read"); start &= PAGE_MASK; addr = start; @@ -170,7 +170,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, start, len))) goto slow_irqon; - pr_debug(" aligned: %lx .. %lx\n", start, end); + pr_devel(" aligned: %lx .. %lx\n", start, end); #ifdef CONFIG_HUGETLB_PAGE /* We bail out on slice boundary crossing when hugetlb is @@ -234,7 +234,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, do { VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, a)].shift); ptep = huge_pte_offset(mm, a); - pr_debug(" %016lx: huge ptep %p\n", a, ptep); + pr_devel(" %016lx: huge ptep %p\n", a, ptep); if (!ptep || !gup_huge_pte(ptep, hstate, &a, end, write, pages, &nr)) goto slow; @@ -249,7 +249,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, #ifdef CONFIG_PPC64 VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift); #endif - pr_debug(" %016lx: normal pgd %p\n", addr, + pr_devel(" %016lx: normal pgd %p\n", addr, (void *)pgd_val(pgd)); next = pgd_addr_end(addr, end); if (pgd_none(pgd)) @@ -269,7 +269,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, slow: local_irq_enable(); slow_irqon: - pr_debug(" slow path ! nr = %d\n", nr); + pr_devel(" slow path ! nr = %d\n", nr); /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; -- cgit v1.2.3 From 30c5af435b2e3e5700b0e4a53ac37a39b3b3516e Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 17 Jun 2009 18:13:52 +0000 Subject: powerpc: Use pr_devel() in do_dcache_icache_coherency() pr_debug() can now result in code being generated even when DEBUG is not defined. That's not really desirable in some places. With CONFIG_DYNAMIC_DEBUG=y: size before: text data bss dec hex filename 2036 368 8 2412 96c arch/powerpc/mm/pgtable.o size after: text data bss dec hex filename 1677 248 8 1933 78d arch/powerpc/mm/pgtable.o Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/pgtable.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index ae1d67cc090..627767d6169 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -129,12 +129,12 @@ static pte_t do_dcache_icache_coherency(pte_t pte) page = pfn_to_page(pfn); if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) { - pr_debug("do_dcache_icache_coherency... flushing\n"); + pr_devel("do_dcache_icache_coherency... flushing\n"); flush_dcache_icache_page(page); set_bit(PG_arch_1, &page->flags); } else - pr_debug("do_dcache_icache_coherency... already clean\n"); + pr_devel("do_dcache_icache_coherency... already clean\n"); return __pte(pte_val(pte) | _PAGE_HWEXEC); } -- cgit v1.2.3 From 9e1b32caa525cb236e80e9c671e179bcecccc657 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 22 Jul 2009 15:44:28 +1000 Subject: mm: Pass virtual address to [__]p{te,ud,md}_free_tlb() mm: Pass virtual address to [__]p{te,ud,md}_free_tlb() Upcoming paches to support the new 64-bit "BookE" powerpc architecture will need to have the virtual address corresponding to PTE page when freeing it, due to the way the HW table walker works. Basically, the TLB can be loaded with "large" pages that cover the whole virtual space (well, sort-of, half of it actually) represented by a PTE page, and which contain an "indirect" bit indicating that this TLB entry RPN points to an array of PTEs from which the TLB can then create direct entries. Thus, in order to invalidate those when PTE pages are deleted, we need the virtual address to pass to tlbilx or tlbivax instructions. The old trick of sticking it somewhere in the PTE page struct page sucks too much, the address is almost readily available in all call sites and almost everybody implemets these as macros, so we may as well add the argument everywhere. I added it to the pmd and pud variants for consistency. Signed-off-by: Benjamin Herrenschmidt Acked-by: David Howells [MN10300 & FRV] Acked-by: Nick Piggin Acked-by: Martin Schwidefsky [s390] Signed-off-by: Linus Torvalds --- arch/powerpc/mm/hugetlbpage.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 9920d6a7cf2..c46ef2ffa3d 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -305,7 +305,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, pmd = pmd_offset(pud, start); pud_clear(pud); - pmd_free_tlb(tlb, pmd); + pmd_free_tlb(tlb, pmd, start); } static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, @@ -348,7 +348,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, pud = pud_offset(pgd, start); pgd_clear(pgd); - pud_free_tlb(tlb, pud); + pud_free_tlb(tlb, pud, start); } /* -- cgit v1.2.3 From 5156ddce6c0a152ee7ccab2c976c6a8abc8a49b5 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 29 Jul 2009 23:04:25 -0500 Subject: powerpc/mm: Fix SMP issue with MMU context handling code In switch_mmu_context() if we call steal_context_smp() to get a context to use we shouldn't fall through and than call steal_context_up(). Doing so can be problematic in that the 'mm' that steal_context_up() ends up using will not get marked dirty in the stale_map[] for other CPUs that might have used that mm. Thus we could end up with stale TLB entries in the other CPUs that can cause all kinda of havoc. Signed-off-by: Kumar Gala --- arch/powerpc/mm/mmu_context_nohash.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 92a197117d5..b1a727def15 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -217,6 +217,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) id = steal_context_smp(id); if (id == MMU_NO_CONTEXT) goto again; + goto stolen; } #endif /* CONFIG_SMP */ id = steal_context_up(id); -- cgit v1.2.3