aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/mem.c30
-rw-r--r--arch/powerpc/mm/slb.c27
2 files changed, 39 insertions, 18 deletions
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index d9e37f365b5..f67e118116f 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -154,19 +154,35 @@ out:
/*
* walk_memory_resource() needs to make sure there is no holes in a given
- * memory range. On PPC64, since this range comes from /sysfs, the range
- * is guaranteed to be valid, non-overlapping and can not contain any
- * holes. By the time we get here (memory add or remove), /proc/device-tree
- * is updated and correct. Only reason we need to check against device-tree
- * would be if we allow user-land to specify a memory range through a
- * system call/ioctl etc. instead of doing offline/online through /sysfs.
+ * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
+ * Instead it maintains it in lmb.memory structures. Walk through the
+ * memory regions, find holes and callback for contiguous regions.
*/
int
walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
int (*func)(unsigned long, unsigned long, void *))
{
- return (*func)(start_pfn, nr_pages, arg);
+ struct lmb_property res;
+ unsigned long pfn, len;
+ u64 end;
+ int ret = -1;
+
+ res.base = (u64) start_pfn << PAGE_SHIFT;
+ res.size = (u64) nr_pages << PAGE_SHIFT;
+
+ end = res.base + res.size - 1;
+ while ((res.base < end) && (lmb_find(&res) >= 0)) {
+ pfn = (unsigned long)(res.base >> PAGE_SHIFT);
+ len = (unsigned long)(res.size >> PAGE_SHIFT);
+ ret = (*func)(pfn, len, arg);
+ if (ret)
+ break;
+ res.base += (res.size + 1);
+ res.size = (end - res.base + 1);
+ }
+ return ret;
}
+EXPORT_SYMBOL_GPL(walk_memory_resource);
#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 906daeda59a..cf8705e32d6 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -30,7 +30,7 @@
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
-#define DBG(fmt...)
+#define DBG pr_debug
#endif
extern void slb_allocate_realmode(unsigned long ea);
@@ -44,13 +44,13 @@ static void slb_allocate(unsigned long ea)
slb_allocate_realmode(ea);
}
+#define slb_esid_mask(ssize) \
+ (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
+
static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
unsigned long slot)
{
- unsigned long mask;
-
- mask = (ssize == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T;
- return (ea & mask) | SLB_ESID_V | slot;
+ return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
}
#define slb_vsid_shift(ssize) \
@@ -279,8 +279,8 @@ void slb_initialize(void)
patch_slb_encoding(slb_compare_rr_to_size,
mmu_slb_size);
- DBG("SLB: linear LLP = %04x\n", linear_llp);
- DBG("SLB: io LLP = %04x\n", io_llp);
+ DBG("SLB: linear LLP = %04lx\n", linear_llp);
+ DBG("SLB: io LLP = %04lx\n", io_llp);
}
get_paca()->stab_rr = SLB_NUM_BOLTED;
@@ -301,11 +301,16 @@ void slb_initialize(void)
create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
+ /* For the boot cpu, we're running on the stack in init_thread_union,
+ * which is in the first segment of the linear mapping, and also
+ * get_paca()->kstack hasn't been initialized yet.
+ * For secondary cpus, we need to bolt the kernel stack entry now.
+ */
slb_shadow_clear(2);
+ if (raw_smp_processor_id() != boot_cpuid &&
+ (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
+ create_shadowed_slbe(get_paca()->kstack,
+ mmu_kernel_ssize, lflags, 2);
- /* We don't bolt the stack for the time being - we're in boot,
- * so the stack is in the bolted segment. By the time it goes
- * elsewhere, we'll call _switch() which will bolt in the new
- * one. */
asm volatile("isync":::"memory");
}