diff options
Diffstat (limited to 'arch/powerpc/mm/mem.c')
-rw-r--r-- | arch/powerpc/mm/mem.c | 76 |
1 files changed, 52 insertions, 24 deletions
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 16def4dcff6..f67e118116f 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -45,6 +45,7 @@ #include <asm/tlb.h> #include <asm/sections.h> #include <asm/vdso.h> +#include <asm/fixmap.h> #include "mmu_decl.h" @@ -57,6 +58,20 @@ int init_bootmem_done; int mem_init_done; unsigned long memory_limit; +#ifdef CONFIG_HIGHMEM +pte_t *kmap_pte; +pgprot_t kmap_prot; + +EXPORT_SYMBOL(kmap_prot); +EXPORT_SYMBOL(kmap_pte); + +static inline pte_t *virt_to_kpte(unsigned long vaddr) +{ + return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), + vaddr), vaddr), vaddr); +} +#endif + int page_is_ram(unsigned long pfn) { unsigned long paddr = (pfn << PAGE_SHIFT); @@ -95,15 +110,6 @@ EXPORT_SYMBOL(phys_mem_access_prot); #ifdef CONFIG_MEMORY_HOTPLUG -void online_page(struct page *page) -{ - ClearPageReserved(page); - init_page_count(page); - __free_page(page); - totalram_pages++; - num_physpages++; -} - #ifdef CONFIG_NUMA int memory_add_physaddr_to_nid(u64 start) { @@ -148,19 +154,35 @@ out: /* * walk_memory_resource() needs to make sure there is no holes in a given - * memory range. On PPC64, since this range comes from /sysfs, the range - * is guaranteed to be valid, non-overlapping and can not contain any - * holes. By the time we get here (memory add or remove), /proc/device-tree - * is updated and correct. Only reason we need to check against device-tree - * would be if we allow user-land to specify a memory range through a - * system call/ioctl etc. instead of doing offline/online through /sysfs. + * memory range. PPC64 does not maintain the memory layout in /proc/iomem. + * Instead it maintains it in lmb.memory structures. Walk through the + * memory regions, find holes and callback for contiguous regions. */ int walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)) { - return (*func)(start_pfn, nr_pages, arg); + struct lmb_property res; + unsigned long pfn, len; + u64 end; + int ret = -1; + + res.base = (u64) start_pfn << PAGE_SHIFT; + res.size = (u64) nr_pages << PAGE_SHIFT; + + end = res.base + res.size - 1; + while ((res.base < end) && (lmb_find(&res) >= 0)) { + pfn = (unsigned long)(res.base >> PAGE_SHIFT); + len = (unsigned long)(res.size >> PAGE_SHIFT); + ret = (*func)(pfn, len, arg); + if (ret) + break; + res.base += (res.size + 1); + res.size = (end - res.base + 1); + } + return ret; } +EXPORT_SYMBOL_GPL(walk_memory_resource); #endif /* CONFIG_MEMORY_HOTPLUG */ @@ -216,7 +238,7 @@ void __init do_init_bootmem(void) unsigned long total_pages; int boot_mapsize; - max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; + max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; #ifdef CONFIG_HIGHMEM total_pages = total_lowmem >> PAGE_SHIFT; @@ -232,7 +254,8 @@ void __init do_init_bootmem(void) start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); - boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); + min_low_pfn = MEMORY_START >> PAGE_SHIFT; + boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); /* Add active regions with valid PFNs */ for (i = 0; i < lmb.memory.cnt; i++) { @@ -310,14 +333,19 @@ void __init paging_init(void) unsigned long top_of_ram = lmb_end_of_DRAM(); unsigned long max_zone_pfns[MAX_NR_ZONES]; +#ifdef CONFIG_PPC32 + unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1); + unsigned long end = __fix_to_virt(FIX_HOLE); + + for (; v < end; v += PAGE_SIZE) + map_page(v, 0, 0); /* XXX gross */ +#endif + #ifdef CONFIG_HIGHMEM map_page(PKMAP_BASE, 0, 0); /* XXX gross */ - pkmap_page_table = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k - (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE), PKMAP_BASE); - map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */ - kmap_pte = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k - (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), - KMAP_FIX_BEGIN); + pkmap_page_table = virt_to_kpte(PKMAP_BASE); + + kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); kmap_prot = PAGE_KERNEL; #endif /* CONFIG_HIGHMEM */ |