diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-03-31 13:33:50 +1030 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-03-31 13:33:50 +1030 |
commit | 558f6ab9106e6be701acb0257e7171df1bbccf04 (patch) | |
tree | 6e811633baeb676693c493f6c82bf785cab2771d /mm | |
parent | 15f7176eb1cccec0a332541285ee752b935c1c85 (diff) | |
parent | 65fb0d23fcddd8697c871047b700c78817bdaa43 (diff) |
Merge branch 'cpumask-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
Conflicts:
arch/x86/include/asm/topology.h
drivers/oprofile/buffer_sync.c
(Both cases: changed in Linus' tree, removed in Ingo's).
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 6 | ||||
-rw-r--r-- | mm/quicklist.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 6 |
4 files changed, 9 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a3803ea8c27..f87e0d8df5a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2139,7 +2139,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask) int n, val; int min_val = INT_MAX; int best_node = -1; - node_to_cpumask_ptr(tmp, 0); + const struct cpumask *tmp = cpumask_of_node(0); /* Use the local node if we haven't already */ if (!node_isset(node, *used_node_mask)) { @@ -2160,8 +2160,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask) val += (n < node); /* Give preference to headless and unused nodes */ - node_to_cpumask_ptr_next(tmp, n); - if (!cpus_empty(*tmp)) + tmp = cpumask_of_node(n); + if (!cpumask_empty(tmp)) val += PENALTY_FOR_NODE_WITH_CPUS; /* Slight preference for less loaded node */ diff --git a/mm/quicklist.c b/mm/quicklist.c index 8dbb6805ef3..e66d07d1b4f 100644 --- a/mm/quicklist.c +++ b/mm/quicklist.c @@ -29,7 +29,7 @@ static unsigned long max_pages(unsigned long min_pages) int node = numa_node_id(); struct zone *zones = NODE_DATA(node)->node_zones; int num_cpus_on_node; - node_to_cpumask_ptr(cpumask_on_node, node); + const struct cpumask *cpumask_on_node = cpumask_of_node(node); node_free_pages = #ifdef CONFIG_ZONE_DMA diff --git a/mm/slab.c b/mm/slab.c index 825c606f691..59839d7ee5b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1160,7 +1160,7 @@ static void __cpuinit cpuup_canceled(long cpu) struct kmem_cache *cachep; struct kmem_list3 *l3 = NULL; int node = cpu_to_node(cpu); - node_to_cpumask_ptr(mask, node); + const struct cpumask *mask = cpumask_of_node(node); list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; diff --git a/mm/vmscan.c b/mm/vmscan.c index 479e4671939..f74a61e522f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1963,7 +1963,7 @@ static int kswapd(void *p) struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; - node_to_cpumask_ptr(cpumask, pgdat->node_id); + const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); lockdep_set_current_reclaim_state(GFP_KERNEL); @@ -2200,7 +2200,9 @@ static int __devinit cpu_callback(struct notifier_block *nfb, if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { for_each_node_state(nid, N_HIGH_MEMORY) { pg_data_t *pgdat = NODE_DATA(nid); - node_to_cpumask_ptr(mask, pgdat->node_id); + const struct cpumask *mask; + + mask = cpumask_of_node(pgdat->node_id); if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) /* One of our CPUs online: restore mask */ |