diff options
author | James Morris <jmorris@namei.org> | 2008-12-04 17:16:36 +1100 |
---|---|---|
committer | James Morris <jmorris@namei.org> | 2008-12-04 17:16:36 +1100 |
commit | ec98ce480ada787f2cfbd696980ff3564415505b (patch) | |
tree | 1a4d644b38f9f1e4b4e086fde0b195df4a92cf84 /arch/powerpc/mm | |
parent | 3496f92beb9aa99ef21fccc154a36c7698e9c538 (diff) | |
parent | feaf3848a813a106f163013af6fcf6c4bfec92d9 (diff) |
Merge branch 'master' into next
Conflicts:
fs/nfsd/nfs4recover.c
Manually fixed above to use new creds API functions, e.g.
nfs4_save_creds().
Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/40x_mmu.c | 16 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 122 |
3 files changed, 90 insertions, 50 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index cecbbc76f62..29954dc2894 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c @@ -93,7 +93,7 @@ void __init MMU_init_hw(void) unsigned long __init mmu_mapin_ram(void) { - unsigned long v, s; + unsigned long v, s, mapped; phys_addr_t p; v = KERNELBASE; @@ -130,5 +130,17 @@ unsigned long __init mmu_mapin_ram(void) s -= LARGE_PAGE_SIZE_4M; } - return total_lowmem - s; + mapped = total_lowmem - s; + + /* If the size of RAM is not an exact power of two, we may not + * have covered RAM in its entirety with 16 and 4 MiB + * pages. Consequently, restrict the top end of RAM currently + * allocable so that calls to the LMB to allocate PTEs for "tail" + * coverage with normal-sized pages (or other reasons) do not + * attempt to allocate outside the allowed range. + */ + + __initial_memory_limit_addr = memstart_addr + mapped; + + return mapped; } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index a117024ab8c..7bbf4e4ed43 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -677,7 +677,7 @@ repeat: return err; } -void set_huge_psize(int psize) +static void __init set_huge_psize(int psize) { /* Check that it is a page size supported by the hardware and * that it fits within pagetable limits. */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index eb505ad34a8..a8397bbad3d 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -865,6 +865,67 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = { .priority = 1 /* Must run before sched domains notifier. */ }; +static void mark_reserved_regions_for_nid(int nid) +{ + struct pglist_data *node = NODE_DATA(nid); + int i; + + for (i = 0; i < lmb.reserved.cnt; i++) { + unsigned long physbase = lmb.reserved.region[i].base; + unsigned long size = lmb.reserved.region[i].size; + unsigned long start_pfn = physbase >> PAGE_SHIFT; + unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT); + struct node_active_region node_ar; + unsigned long node_end_pfn = node->node_start_pfn + + node->node_spanned_pages; + + /* + * Check to make sure that this lmb.reserved area is + * within the bounds of the node that we care about. + * Checking the nid of the start and end points is not + * sufficient because the reserved area could span the + * entire node. + */ + if (end_pfn <= node->node_start_pfn || + start_pfn >= node_end_pfn) + continue; + + get_node_active_region(start_pfn, &node_ar); + while (start_pfn < end_pfn && + node_ar.start_pfn < node_ar.end_pfn) { + unsigned long reserve_size = size; + /* + * if reserved region extends past active region + * then trim size to active region + */ + if (end_pfn > node_ar.end_pfn) + reserve_size = (node_ar.end_pfn << PAGE_SHIFT) + - (start_pfn << PAGE_SHIFT); + dbg("reserve_bootmem %lx %lx nid=%d\n", physbase, + reserve_size, node_ar.nid); + reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase, + reserve_size, BOOTMEM_DEFAULT); + /* + * if reserved region is contained in the active region + * then done. + */ + if (end_pfn <= node_ar.end_pfn) + break; + + /* + * reserved region extends past the active region + * get next active region that contains this + * reserved region + */ + start_pfn = node_ar.end_pfn; + physbase = start_pfn << PAGE_SHIFT; + size = size - reserve_size; + get_node_active_region(start_pfn, &node_ar); + } + } +} + + void __init do_init_bootmem(void) { int nid; @@ -890,7 +951,13 @@ void __init do_init_bootmem(void) get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); - /* Allocate the node structure node local if possible */ + /* + * Allocate the node structure node local if possible + * + * Be careful moving this around, as it relies on all + * previous nodes' bootmem to be initialized and have + * all reserved areas marked. + */ NODE_DATA(nid) = careful_allocation(nid, sizeof(struct pglist_data), SMP_CACHE_BYTES, end_pfn); @@ -922,53 +989,14 @@ void __init do_init_bootmem(void) start_pfn, end_pfn); free_bootmem_with_active_regions(nid, end_pfn); - } - - /* Mark reserved regions */ - for (i = 0; i < lmb.reserved.cnt; i++) { - unsigned long physbase = lmb.reserved.region[i].base; - unsigned long size = lmb.reserved.region[i].size; - unsigned long start_pfn = physbase >> PAGE_SHIFT; - unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT); - struct node_active_region node_ar; - - get_node_active_region(start_pfn, &node_ar); - while (start_pfn < end_pfn && - node_ar.start_pfn < node_ar.end_pfn) { - unsigned long reserve_size = size; - /* - * if reserved region extends past active region - * then trim size to active region - */ - if (end_pfn > node_ar.end_pfn) - reserve_size = (node_ar.end_pfn << PAGE_SHIFT) - - (start_pfn << PAGE_SHIFT); - dbg("reserve_bootmem %lx %lx nid=%d\n", physbase, - reserve_size, node_ar.nid); - reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase, - reserve_size, BOOTMEM_DEFAULT); - /* - * if reserved region is contained in the active region - * then done. - */ - if (end_pfn <= node_ar.end_pfn) - break; - - /* - * reserved region extends past the active region - * get next active region that contains this - * reserved region - */ - start_pfn = node_ar.end_pfn; - physbase = start_pfn << PAGE_SHIFT; - size = size - reserve_size; - get_node_active_region(start_pfn, &node_ar); - } - - } - - for_each_online_node(nid) + /* + * Be very careful about moving this around. Future + * calls to careful_allocation() depend on this getting + * done correctly. + */ + mark_reserved_regions_for_nid(nid); sparse_memory_present_with_active_regions(nid); + } } void __init paging_init(void) |