aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c6
-rw-r--r--arch/powerpc/mm/lmb.c13
-rw-r--r--arch/powerpc/mm/mem.c21
3 files changed, 35 insertions, 5 deletions
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 17139daeaff..c93a966b7e4 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -165,15 +165,15 @@ void invalidate_tlbcam_entry(int index)
void __init cam_mapin_ram(unsigned long cam0, unsigned long cam1,
unsigned long cam2)
{
- settlbcam(0, KERNELBASE, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0);
+ settlbcam(0, PAGE_OFFSET, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0);
tlbcam_index++;
if (cam1) {
tlbcam_index++;
- settlbcam(1, KERNELBASE+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0);
+ settlbcam(1, PAGE_OFFSET+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0);
}
if (cam2) {
tlbcam_index++;
- settlbcam(2, KERNELBASE+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0);
+ settlbcam(2, PAGE_OFFSET+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0);
}
}
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c
index 8f4d2dc4caf..4ce23bcf8a5 100644
--- a/arch/powerpc/mm/lmb.c
+++ b/arch/powerpc/mm/lmb.c
@@ -342,3 +342,16 @@ void __init lmb_enforce_memory_limit(unsigned long memory_limit)
}
}
}
+
+int __init lmb_is_reserved(unsigned long addr)
+{
+ int i;
+
+ for (i = 0; i < lmb.reserved.cnt; i++) {
+ unsigned long upper = lmb.reserved.region[i].base +
+ lmb.reserved.region[i].size - 1;
+ if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
+ return 1;
+ }
+ return 0;
+}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 5402fb6b3aa..e8122447f01 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -213,15 +213,30 @@ void __init do_init_bootmem(void)
*/
#ifdef CONFIG_HIGHMEM
free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT);
+
+ /* reserve the sections we're already using */
+ for (i = 0; i < lmb.reserved.cnt; i++) {
+ unsigned long addr = lmb.reserved.region[i].base +
+ lmb_size_bytes(&lmb.reserved, i) - 1;
+ if (addr < total_lowmem)
+ reserve_bootmem(lmb.reserved.region[i].base,
+ lmb_size_bytes(&lmb.reserved, i));
+ else if (lmb.reserved.region[i].base < total_lowmem) {
+ unsigned long adjusted_size = total_lowmem -
+ lmb.reserved.region[i].base;
+ reserve_bootmem(lmb.reserved.region[i].base,
+ adjusted_size);
+ }
+ }
#else
free_bootmem_with_active_regions(0, max_pfn);
-#endif
/* reserve the sections we're already using */
for (i = 0; i < lmb.reserved.cnt; i++)
reserve_bootmem(lmb.reserved.region[i].base,
lmb_size_bytes(&lmb.reserved, i));
+#endif
/* XXX need to clip this if using highmem? */
sparse_memory_present_with_active_regions(0);
@@ -334,11 +349,13 @@ void __init mem_init(void)
highmem_mapnr = total_lowmem >> PAGE_SHIFT;
for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
struct page *page = pfn_to_page(pfn);
-
+ if (lmb_is_reserved(pfn << PAGE_SHIFT))
+ continue;
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
totalhigh_pages++;
+ reservedpages--;
}
totalram_pages += totalhigh_pages;
printk(KERN_DEBUG "High memory: %luk\n",