aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_native_64.c7
-rw-r--r--arch/powerpc/mm/hash_utils_64.c5
-rw-r--r--arch/powerpc/mm/lmb.c4
-rw-r--r--arch/powerpc/mm/mem.c14
4 files changed, 15 insertions, 15 deletions
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index d96bcfe4c6f..33654d1b1b4 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -403,12 +403,17 @@ static void native_hpte_clear(void)
*/
hpte_v = hptep->v;
+ /*
+ * Call __tlbie() here rather than tlbie() since we
+ * already hold the native_tlbie_lock.
+ */
if (hpte_v & HPTE_V_VALID) {
hptep->v = 0;
- tlbie(slot2va(hpte_v, slot), MMU_PAGE_4K, 0);
+ __tlbie(slot2va(hpte_v, slot), MMU_PAGE_4K);
}
}
+ asm volatile("eieio; tlbsync; ptesync":::"memory");
spin_unlock(&native_tlbie_lock);
local_irq_restore(flags);
}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 149351a84b9..e9d589eefc1 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -88,6 +88,7 @@ static unsigned long _SDR1;
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
hpte_t *htab_address;
+unsigned long htab_size_bytes;
unsigned long htab_hash_mask;
int mmu_linear_psize = MMU_PAGE_4K;
int mmu_virtual_psize = MMU_PAGE_4K;
@@ -168,7 +169,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
#ifdef CONFIG_PPC_ISERIES
if (_machine == PLATFORM_ISERIES_LPAR)
ret = iSeries_hpte_insert(hpteg, va,
- virt_to_abs(paddr),
+ __pa(vaddr),
tmp_mode,
HPTE_V_BOLTED,
psize);
@@ -399,7 +400,7 @@ void create_section_mapping(unsigned long start, unsigned long end)
void __init htab_initialize(void)
{
- unsigned long table, htab_size_bytes;
+ unsigned long table;
unsigned long pteg_count;
unsigned long mode_rw;
unsigned long base = 0, size = 0;
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c
index 9584608fd76..bbe3eac918e 100644
--- a/arch/powerpc/mm/lmb.c
+++ b/arch/powerpc/mm/lmb.c
@@ -197,6 +197,8 @@ long __init lmb_reserve(unsigned long base, unsigned long size)
{
struct lmb_region *_rgn = &(lmb.reserved);
+ BUG_ON(0 == size);
+
return lmb_add_region(_rgn, base, size);
}
@@ -227,6 +229,8 @@ unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
long i, j;
unsigned long base = 0;
+ BUG_ON(0 == size);
+
#ifdef CONFIG_PPC32
/* On 32-bit, make sure we allocate lowmem */
if (max_addr == LMB_ALLOC_ANYWHERE)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 15aac0d78df..550517c2dd4 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -435,17 +435,12 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
clear_page(page);
- if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
- return;
/*
* We shouldnt have to do this, but some versions of glibc
* require it (ld.so assumes zero filled pages are icache clean)
* - Anton
*/
-
- /* avoid an atomic op if possible */
- if (test_bit(PG_arch_1, &pg->flags))
- clear_bit(PG_arch_1, &pg->flags);
+ flush_dcache_page(pg);
}
EXPORT_SYMBOL(clear_user_page);
@@ -469,12 +464,7 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
return;
#endif
- if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
- return;
-
- /* avoid an atomic op if possible */
- if (test_bit(PG_arch_1, &pg->flags))
- clear_bit(PG_arch_1, &pg->flags);
+ flush_dcache_page(pg);
}
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,