aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/misc_32.S23
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/mm/mmu_decl.h4
-rw-r--r--arch/ppc/kernel/misc.S22
-rw-r--r--arch/ppc/mm/fault.c2
-rw-r--r--arch/ppc/mm/mmu_decl.h4
-rw-r--r--arch/ppc/platforms/4xx/ebony.c2
-rw-r--r--arch/ppc/platforms/4xx/ocotea.c2
-rw-r--r--arch/ppc/platforms/4xx/taishan.c2
-rw-r--r--include/asm-powerpc/tlbflush.h12
10 files changed, 46 insertions, 29 deletions
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 8533de50347..0ed2c7eddc9 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -288,7 +288,16 @@ _GLOBAL(_tlbia)
*/
_GLOBAL(_tlbie)
#if defined(CONFIG_40x)
+ /* We run the search with interrupts disabled because we have to change
+ * the PID and I don't want to preempt when that happens.
+ */
+ mfmsr r5
+ mfspr r6,SPRN_PID
+ wrteei 0
+ mtspr SPRN_PID,r4
tlbsx. r3, 0, r3
+ mtspr SPRN_PID,r6
+ wrtee r5
bne 10f
sync
/* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
@@ -297,23 +306,23 @@ _GLOBAL(_tlbie)
tlbwe r3, r3, TLB_TAG
isync
10:
+
#elif defined(CONFIG_44x)
- mfspr r4,SPRN_MMUCR
- mfspr r5,SPRN_PID /* Get PID */
- rlwimi r4,r5,0,24,31 /* Set TID */
+ mfspr r5,SPRN_MMUCR
+ rlwimi r5,r4,0,24,31 /* Set TID */
/* We have to run the search with interrupts disabled, even critical
* and debug interrupts (in fact the only critical exceptions we have
* are debug and machine check). Otherwise an interrupt which causes
* a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
- mfmsr r5
+ mfmsr r4
lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
- andc r6,r5,r6
+ andc r6,r4,r6
mtmsr r6
- mtspr SPRN_MMUCR,r4
+ mtspr SPRN_MMUCR,r5
tlbsx. r3, 0, r3
- mtmsr r5
+ mtmsr r4
bne 10f
sync
/* There are only 64 TLB entries, so r3 < 64,
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index a18fda361cc..8135da06e0a 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -309,7 +309,7 @@ good_area:
set_bit(PG_arch_1, &page->flags);
}
pte_update(ptep, 0, _PAGE_HWEXEC);
- _tlbie(address);
+ _tlbie(address, mm->context.id);
pte_unmap_unlock(ptep, ptl);
up_read(&mm->mmap_sem);
return 0;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index c94a64fd3c0..eb3a732e91d 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -61,12 +61,12 @@ extern unsigned long total_lowmem;
#define mmu_mapin_ram() (0UL)
#elif defined(CONFIG_4xx)
-#define flush_HPTE(X, va, pg) _tlbie(va)
+#define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
#elif defined(CONFIG_FSL_BOOKE)
-#define flush_HPTE(X, va, pg) _tlbie(va)
+#define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
extern void adjust_total_lowmem(void);
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index a22e1f4d94c..2b81e71d6b2 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -224,7 +224,16 @@ _GLOBAL(_tlbia)
*/
_GLOBAL(_tlbie)
#if defined(CONFIG_40x)
+ /* We run the search with interrupts disabled because we have to change
+ * the PID and I don't want to preempt when that happens.
+ */
+ mfmsr r5
+ mfspr r6,SPRN_PID
+ wrteei 0
+ mtspr SPRN_PID,r4
tlbsx. r3, 0, r3
+ mtspr SPRN_PID,r6
+ wrtee r5
bne 10f
sync
/* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
@@ -234,22 +243,21 @@ _GLOBAL(_tlbie)
isync
10:
#elif defined(CONFIG_44x)
- mfspr r4,SPRN_MMUCR
- mfspr r5,SPRN_PID /* Get PID */
- rlwimi r4,r5,0,24,31 /* Set TID */
+ mfspr r5,SPRN_MMUCR
+ rlwimi r5,r4,0,24,31 /* Set TID */
/* We have to run the search with interrupts disabled, even critical
* and debug interrupts (in fact the only critical exceptions we have
* are debug and machine check). Otherwise an interrupt which causes
* a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
- mfmsr r5
+ mfmsr r4
lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
- andc r6,r5,r6
+ andc r6,r4,r6
mtmsr r6
- mtspr SPRN_MMUCR,r4
+ mtspr SPRN_MMUCR,r5
tlbsx. r3, 0, r3
- mtmsr r5
+ mtmsr r4
bne 10f
sync
/* There are only 64 TLB entries, so r3 < 64,
diff --git a/arch/ppc/mm/fault.c b/arch/ppc/mm/fault.c
index 254c23b755e..36c0e7529ed 100644
--- a/arch/ppc/mm/fault.c
+++ b/arch/ppc/mm/fault.c
@@ -227,7 +227,7 @@ good_area:
set_bit(PG_arch_1, &page->flags);
}
pte_update(ptep, 0, _PAGE_HWEXEC);
- _tlbie(address);
+ _tlbie(address, mm->context.id);
pte_unmap_unlock(ptep, ptl);
up_read(&mm->mmap_sem);
return 0;
diff --git a/arch/ppc/mm/mmu_decl.h b/arch/ppc/mm/mmu_decl.h
index 540f3292b22..f1d4f2109a9 100644
--- a/arch/ppc/mm/mmu_decl.h
+++ b/arch/ppc/mm/mmu_decl.h
@@ -54,12 +54,12 @@ extern unsigned int num_tlbcam_entries;
#define mmu_mapin_ram() (0UL)
#elif defined(CONFIG_4xx)
-#define flush_HPTE(X, va, pg) _tlbie(va)
+#define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
#elif defined(CONFIG_FSL_BOOKE)
-#define flush_HPTE(X, va, pg) _tlbie(va)
+#define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
extern void adjust_total_lowmem(void);
diff --git a/arch/ppc/platforms/4xx/ebony.c b/arch/ppc/platforms/4xx/ebony.c
index 05d7184d7e1..453643a0eee 100644
--- a/arch/ppc/platforms/4xx/ebony.c
+++ b/arch/ppc/platforms/4xx/ebony.c
@@ -236,7 +236,7 @@ ebony_early_serial_map(void)
gen550_init(0, &port);
/* Purge TLB entry added in head_44x.S for early serial access */
- _tlbie(UART0_IO_BASE);
+ _tlbie(UART0_IO_BASE, 0);
#endif
port.membase = ioremap64(PPC440GP_UART1_ADDR, 8);
diff --git a/arch/ppc/platforms/4xx/ocotea.c b/arch/ppc/platforms/4xx/ocotea.c
index fd0f971881d..28a712cd480 100644
--- a/arch/ppc/platforms/4xx/ocotea.c
+++ b/arch/ppc/platforms/4xx/ocotea.c
@@ -259,7 +259,7 @@ ocotea_early_serial_map(void)
gen550_init(0, &port);
/* Purge TLB entry added in head_44x.S for early serial access */
- _tlbie(UART0_IO_BASE);
+ _tlbie(UART0_IO_BASE, 0);
#endif
port.membase = ioremap64(PPC440GX_UART1_ADDR, 8);
diff --git a/arch/ppc/platforms/4xx/taishan.c b/arch/ppc/platforms/4xx/taishan.c
index 888c492b4a4..f6a0c6650f3 100644
--- a/arch/ppc/platforms/4xx/taishan.c
+++ b/arch/ppc/platforms/4xx/taishan.c
@@ -316,7 +316,7 @@ taishan_early_serial_map(void)
gen550_init(0, &port);
/* Purge TLB entry added in head_44x.S for early serial access */
- _tlbie(UART0_IO_BASE);
+ _tlbie(UART0_IO_BASE, 0);
#endif
port.membase = ioremap64(PPC440GX_UART1_ADDR, 8);
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h
index b6b036ccee3..e7b4c0d298a 100644
--- a/include/asm-powerpc/tlbflush.h
+++ b/include/asm-powerpc/tlbflush.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_TLBFLUSH_H
#define _ASM_POWERPC_TLBFLUSH_H
+
/*
* TLB flushing:
*
@@ -16,9 +17,6 @@
*/
#ifdef __KERNEL__
-struct mm_struct;
-struct vm_area_struct;
-
#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
/*
* TLB flushing for software loaded TLB chips
@@ -28,7 +26,9 @@ struct vm_area_struct;
* specific tlbie's
*/
-extern void _tlbie(unsigned long address);
+#include <linux/mm.h>
+
+extern void _tlbie(unsigned long address, unsigned int pid);
#if defined(CONFIG_40x) || defined(CONFIG_8xx)
#define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
@@ -44,13 +44,13 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
- _tlbie(vmaddr);
+ _tlbie(vmaddr, vma->vm_mm->context.id);
}
static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
unsigned long vmaddr)
{
- _tlbie(vmaddr);
+ _tlbie(vmaddr, vma->vm_mm->context.id);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,