From 5e10cf587a7b1aa34099cbfc244e6f04e96dc835 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Tue, 19 May 2009 00:52:20 -0500 Subject: powerpc/cpm1: Remove IMAP_ADDR We no longer user IMAP_ADDR for anything so kill it off. Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/cpm1.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h index 2ff798744c1..7685ffde882 100644 --- a/arch/powerpc/include/asm/cpm1.h +++ b/arch/powerpc/include/asm/cpm1.h @@ -598,8 +598,6 @@ typedef struct risc_timer_pram { #define CICR_IEN ((uint)0x00000080) /* Int. enable */ #define CICR_SPS ((uint)0x00000001) /* SCC Spread */ -#define IMAP_ADDR (get_immrbase()) - #define CPM_PIN_INPUT 0 #define CPM_PIN_OUTPUT 1 #define CPM_PIN_PRIMARY 0 -- cgit v1.2.3 From 194002b274e9169a04beb1b23dcc132159bb566c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 22 Jun 2009 16:35:24 +0200 Subject: perf_counter, x86: Add mmap counter read support Update the mmap control page with the needed information to use the userspace RDPMC instruction for self monitoring. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/perf_counter.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 8ccd4e15576..0ea0639fcf7 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -61,6 +61,8 @@ struct pt_regs; extern unsigned long perf_misc_flags(struct pt_regs *regs); extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +#define PERF_COUNTER_INDEX_OFFSET 1 + /* * Only override the default definitions in include/linux/perf_counter.h * if we have hardware PMU support. -- cgit v1.2.3 From 6f0b1c6094b3e8eeeb13f8f16c1b2ef452a6f519 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 22 Jun 2009 23:13:48 +0000 Subject: powerpc: Swiotlb breaks pseries Turning on SWIOTLB selects or enables PPC_NEED_DMA_SYNC_OPS, which means we get the non empty versions of dma_sync_* in asm/dma-mapping.h On my pseries machine the dma_ops have no such routines and we die with a null pointer - this patch gets it booting, is there a more elegant way to do it? Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/dma-mapping.h | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 3d9e887c3c0..b44aaabdd1a 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -309,7 +309,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, + + if (dma_ops->sync_single_range_for_cpu) + dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0, size, direction); } @@ -320,7 +322,9 @@ static inline void dma_sync_single_for_device(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_single_range_for_device(dev, dma_handle, + + if (dma_ops->sync_single_range_for_device) + dma_ops->sync_single_range_for_device(dev, dma_handle, 0, size, direction); } @@ -331,7 +335,9 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); + + if (dma_ops->sync_sg_for_cpu) + dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction); } static inline void dma_sync_sg_for_device(struct device *dev, @@ -341,7 +347,9 @@ static inline void dma_sync_sg_for_device(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_sg_for_device(dev, sgl, nents, direction); + + if (dma_ops->sync_sg_for_device) + dma_ops->sync_sg_for_device(dev, sgl, nents, direction); } static inline void dma_sync_single_range_for_cpu(struct device *dev, @@ -351,7 +359,9 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_single_range_for_cpu(dev, dma_handle, + + if (dma_ops->sync_single_range_for_cpu) + dma_ops->sync_single_range_for_cpu(dev, dma_handle, offset, size, direction); } @@ -362,7 +372,9 @@ static inline void dma_sync_single_range_for_device(struct device *dev, struct dma_mapping_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); - dma_ops->sync_single_range_for_device(dev, dma_handle, offset, + + if (dma_ops->sync_single_range_for_device) + dma_ops->sync_single_range_for_device(dev, dma_handle, offset, size, direction); } #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ -- cgit v1.2.3 From 850f6ac316cf84bba63fdb775c897834eccbfaa3 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 18 Jun 2009 19:25:00 +0000 Subject: powerpc/mm: Make k(un)map_atomic out of line Those functions are way too big to be inline, besides, kmap_atomic() wants to call debug_kmap_atomic() which isn't exported for modules and causes module link failures. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/highmem.h | 57 +++----------------------------------- 1 file changed, 4 insertions(+), 53 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index 684a73f4324..a74c4ee6c02 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h @@ -22,9 +22,7 @@ #ifdef __KERNEL__ -#include #include -#include #include #include #include @@ -62,6 +60,9 @@ extern pte_t *pkmap_page_table; extern void *kmap_high(struct page *page); extern void kunmap_high(struct page *page); +extern void *kmap_atomic_prot(struct page *page, enum km_type type, + pgprot_t prot); +extern void kunmap_atomic(void *kvaddr, enum km_type type); static inline void *kmap(struct page *page) { @@ -79,62 +80,11 @@ static inline void kunmap(struct page *page) kunmap_high(page); } -/* - * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap - * gives a more generic (and caching) interface. But kmap_atomic can - * be used in IRQ contexts, so in some (very limited) cases we need - * it. - */ -static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) -{ - unsigned int idx; - unsigned long vaddr; - - /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); - - debug_kmap_atomic(type); - idx = type + KM_TYPE_NR*smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -#ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(*(kmap_pte-idx))); -#endif - __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); - local_flush_tlb_page(NULL, vaddr); - - return (void*) vaddr; -} - static inline void *kmap_atomic(struct page *page, enum km_type type) { return kmap_atomic_prot(page, type, kmap_prot); } -static inline void kunmap_atomic(void *kvaddr, enum km_type type) -{ -#ifdef CONFIG_DEBUG_HIGHMEM - unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); - - if (vaddr < __fix_to_virt(FIX_KMAP_END)) { - pagefault_enable(); - return; - } - - BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); - - /* - * force other mappings to Oops if they'll try to access - * this pte without first remap it - */ - pte_clear(&init_mm, vaddr, kmap_pte-idx); - local_flush_tlb_page(NULL, vaddr); -#endif - pagefault_enable(); -} - static inline struct page *kmap_atomic_to_page(void *ptr) { unsigned long idx, vaddr = (unsigned long) ptr; @@ -148,6 +98,7 @@ static inline struct page *kmap_atomic_to_page(void *ptr) return pte_page(*pte); } + #define flush_cache_kmaps() flush_cache_all() #endif /* __KERNEL__ */ -- cgit v1.2.3 From 5d38902c483881645ba16058cffaa478b81e5cfa Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 17 Jun 2009 17:43:59 +0000 Subject: powerpc: Add irqtrace support for 32-bit powerpc Based on initial work from: Dale Farnsworth Add the low level irq tracing hooks for 32-bit powerpc needed to enable full lockdep functionality. The approach taken to deal with the code in entry_32.S is that we don't trace all the transitions of MSR:EE when we just turn it off to peek at TI_FLAGS without races. Only when we are calling into C code or returning from exceptions with a state that have changed from what lockdep thinks. There's a little bugger though: If we take an exception that keeps interrupts enabled (such as an alignment exception) while interrupts are enabled, we will call trace_hardirqs_on() on the way back spurriously. Not a big deal, but to get rid of it would require remembering in pt_regs that the exception was one of the type that kept interrupts enabled which we don't know at this stage. (Well, we could test all cases for regs->trap but that sucks too much). Signed-off-by: Benjamin Herrenschmidt Tested-by: Kumar Gala --- arch/powerpc/include/asm/hw_irq.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 867ab8ed69b..8b505eaaa38 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -68,13 +68,13 @@ static inline int irqs_disabled_flags(unsigned long flags) #if defined(CONFIG_BOOKE) #define SET_MSR_EE(x) mtmsr(x) -#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") +#define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") #else #define SET_MSR_EE(x) mtmsr(x) -#define local_irq_restore(flags) mtmsr(flags) +#define raw_local_irq_restore(flags) mtmsr(flags) #endif -static inline void local_irq_disable(void) +static inline void raw_local_irq_disable(void) { #ifdef CONFIG_BOOKE __asm__ __volatile__("wrteei 0": : :"memory"); @@ -86,7 +86,7 @@ static inline void local_irq_disable(void) #endif } -static inline void local_irq_enable(void) +static inline void raw_local_irq_enable(void) { #ifdef CONFIG_BOOKE __asm__ __volatile__("wrteei 1": : :"memory"); @@ -98,7 +98,7 @@ static inline void local_irq_enable(void) #endif } -static inline void local_irq_save_ptr(unsigned long *flags) +static inline void raw_local_irq_save_ptr(unsigned long *flags) { unsigned long msr; msr = mfmsr(); @@ -110,12 +110,12 @@ static inline void local_irq_save_ptr(unsigned long *flags) #endif } -#define local_save_flags(flags) ((flags) = mfmsr()) -#define local_irq_save(flags) local_irq_save_ptr(&flags) -#define irqs_disabled() ((mfmsr() & MSR_EE) == 0) +#define raw_local_save_flags(flags) ((flags) = mfmsr()) +#define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags) +#define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0) +#define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0) -#define hard_irq_enable() local_irq_enable() -#define hard_irq_disable() local_irq_disable() +#define hard_irq_disable() raw_local_irq_disable() static inline int irqs_disabled_flags(unsigned long flags) { -- cgit v1.2.3 From f97bb36f705da0a86b3ea77bfeee3415fee0b025 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 16 Jun 2009 16:42:49 +0000 Subject: powerpc/rtas: Turn rtas lock into a raw spinlock RTAS currently uses a normal spinlock. However it can be called from contexts where this is not necessarily a good idea. For example, it can be called while syncing timebases, with the core timebase being frozen. Unfortunately, that will deadlock in case of lock contention when spinlock debugging is enabled as the spin lock debugging code will try to use __delay() which ... relies on the timebase being enabled. Also RTAS can be used in some low level IRQ handling code path so it may as well be a raw spinlock for -rt sake. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/rtas.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 01c12339b30..0af42d20b69 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -58,7 +58,7 @@ struct rtas_t { unsigned long entry; /* physical address pointer */ unsigned long base; /* physical address pointer */ unsigned long size; - spinlock_t lock; + raw_spinlock_t lock; struct rtas_args args; struct device_node *dev; /* virtual address pointer */ }; -- cgit v1.2.3 From c4007a2fbf5f82b7e694c22b5929c87e38415a56 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 16 Jun 2009 16:42:50 +0000 Subject: powerpc: Use one common impl. of RTAS timebase sync and use raw spinlock Several platforms use their own copy of what is essentially the same code, using RTAS to synchronize the timebases when bringing up new CPUs. This moves it all into a single common implementation and additionally turns the spinlock into a raw spinlock since the former can rely on the timebase not being frozen when spinlock debugging is enabled, and finally masks interrupts while the timebase is disabled. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/rtas.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 0af42d20b69..168fce72620 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -245,5 +245,8 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg) (devfn << 8) | (reg & 0xff); } +extern void __cpuinit rtas_give_timebase(void); +extern void __cpuinit rtas_take_timebase(void); + #endif /* __KERNEL__ */ #endif /* _POWERPC_RTAS_H */ -- cgit v1.2.3 From 6c16a74d423f584ed80815ee7b944f5b578dd37a Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 15 Jun 2009 16:53:43 +0000 Subject: powerpc/mm: Fix potential access to freed pages when using hugetlbfs When using 64k page sizes, our PTE pages are split in two halves, the second half containing the "extension" used to keep track of individual 4k pages when not using HW 64k pages. However, our page tables used for hugetlb have a slightly different format and don't carry that "second half". Our code that batched PTEs to be invalidated unconditionally reads the "second half" (to put it into the batch), which means that when called to invalidate hugetlb PTEs, it will access unrelated memory. It breaks when CONFIG_DEBUG_PAGEALLOC is enabled. This fixes it by only accessing the second half when the _PAGE_COMBO bit is set in the first half, which indicates that we are dealing with a "combo" page which represents 16x4k subpages. Anything else shouldn't have this bit set and thus not require loading from the second half. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/pte-hash64-64k.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index e05d26fa372..82b72207c51 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h @@ -47,7 +47,8 @@ * generic accessors and iterators here */ #define __real_pte(e,p) ((real_pte_t) { \ - (e), pte_val(*((p) + PTRS_PER_PTE)) }) + (e), ((e) & _PAGE_COMBO) ? \ + (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) #define __rpte_to_pte(r) ((r).pte) -- cgit v1.2.3 From ad9064d5e22a6a24f828dad63c4775c4d7280bd4 Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Mon, 29 Jun 2009 13:40:51 +0000 Subject: powerpc: Fix spin_event_timeout() to be robust over context switches Current implementation of spin_event_timeout can be interrupted by an IRQ or context switch after testing the condition, but before checking the timeout. This can cause the loop to report a timeout when the condition actually became true in the middle. This patch adds one final check of the condition upon exit of the loop if the last test of the condition was still false. Signed-off-by: Grant Likely Acked-by: Timur Tabi Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/delay.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/delay.h b/arch/powerpc/include/asm/delay.h index 1e2eb41fa05..52e4d54da2a 100644 --- a/arch/powerpc/include/asm/delay.h +++ b/arch/powerpc/include/asm/delay.h @@ -63,6 +63,8 @@ extern void udelay(unsigned long usecs); udelay(delay); \ else \ cpu_relax(); \ + if (!__ret) \ + __ret = (condition); \ __ret; \ }) -- cgit v1.2.3 From c99e6efe1ba04561e7d93a81f0be07e37427e835 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 10 Jul 2009 14:57:56 +0200 Subject: sched: INIT_PREEMPT_COUNT Pull the initial preempt_count value into a single definition site. Maintainers for: alpha, ia64 and m68k, please have a look, your arch code is funny. The header magic is a bit odd, but similar to the KERNEL_DS one, CPP waits with expanding these macros until the INIT_THREAD_INFO macro itself is expanded, which is in arch/*/kernel/init_task.c where we've already included sched.h so we're good. Cc: tony.luck@intel.com Cc: rth@twiddle.net Cc: geert@linux-m68k.org Signed-off-by: Peter Zijlstra Acked-by: Matt Mackall Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/thread_info.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 9aba5a38a7c..c8b32925567 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -46,15 +46,13 @@ struct thread_info { /* * macros/functions for gaining access to the thread information structure - * - * preempt_count needs to be 1 initially, until the scheduler is functional. */ #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ .exec_domain = &default_exec_domain, \ .cpu = 0, \ - .preempt_count = 1, \ + .preempt_count = INIT_PREEMPT_COUNT, \ .restart_block = { \ .fn = do_no_restart_syscall, \ }, \ -- cgit v1.2.3 From 9e1b32caa525cb236e80e9c671e179bcecccc657 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 22 Jul 2009 15:44:28 +1000 Subject: mm: Pass virtual address to [__]p{te,ud,md}_free_tlb() mm: Pass virtual address to [__]p{te,ud,md}_free_tlb() Upcoming paches to support the new 64-bit "BookE" powerpc architecture will need to have the virtual address corresponding to PTE page when freeing it, due to the way the HW table walker works. Basically, the TLB can be loaded with "large" pages that cover the whole virtual space (well, sort-of, half of it actually) represented by a PTE page, and which contain an "indirect" bit indicating that this TLB entry RPN points to an array of PTEs from which the TLB can then create direct entries. Thus, in order to invalidate those when PTE pages are deleted, we need the virtual address to pass to tlbilx or tlbivax instructions. The old trick of sticking it somewhere in the PTE page struct page sucks too much, the address is almost readily available in all call sites and almost everybody implemets these as macros, so we may as well add the argument everywhere. I added it to the pmd and pud variants for consistency. Signed-off-by: Benjamin Herrenschmidt Acked-by: David Howells [MN10300 & FRV] Acked-by: Nick Piggin Acked-by: Martin Schwidefsky [s390] Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/pgalloc-32.h | 2 +- arch/powerpc/include/asm/pgalloc-64.h | 4 ++-- arch/powerpc/include/asm/pgalloc.h | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 0815eb40aca..c9500d666a1 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h @@ -16,7 +16,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); */ /* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */ #define pmd_free(mm, x) do { } while (0) -#define __pmd_free_tlb(tlb,x) do { } while (0) +#define __pmd_free_tlb(tlb,x,a) do { } while (0) /* #define pgd_populate(mm, pmd, pte) BUG() */ #ifndef CONFIG_BOOKE diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index afda2bdd860..e6f069c4f71 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -118,11 +118,11 @@ static inline void pgtable_free(pgtable_free_t pgf) kmem_cache_free(pgtable_cache[cachenum], p); } -#define __pmd_free_tlb(tlb, pmd) \ +#define __pmd_free_tlb(tlb, pmd,addr) \ pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) #ifndef CONFIG_PPC_64K_PAGES -#define __pud_free_tlb(tlb, pud) \ +#define __pud_free_tlb(tlb, pud, addr) \ pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) #endif /* CONFIG_PPC_64K_PAGES */ diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index 5d8480265a7..1730e5e298d 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h @@ -38,14 +38,14 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); #ifdef CONFIG_SMP -#define __pte_free_tlb(tlb,ptepage) \ +#define __pte_free_tlb(tlb,ptepage,address) \ do { \ pgtable_page_dtor(ptepage); \ pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ - PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ + PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ } while (0) #else -#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) +#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, (pte)) #endif -- cgit v1.2.3