aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-08 11:59:23 +0200
committerIngo Molnar <mingo@elte.hu>2008-07-08 11:59:23 +0200
commit2b4fa851b2f06fdb04cac808b57324f5e51e1578 (patch)
tree97db3ad5adda7683923630982f68b8b52c86e790 /kernel/sched.c
parent3de352bbd86f890dd0c5e1c09a6a1b0b29e0f8ce (diff)
parent46f68e1c6b04a04772e828ff3bcd07ed708805c2 (diff)
Merge branch 'x86/numa' into x86/devel
Conflicts: arch/x86/Kconfig arch/x86/kernel/e820.c arch/x86/kernel/efi_64.c arch/x86/kernel/mpparse.c arch/x86/kernel/setup.c arch/x86/kernel/setup_32.c arch/x86/mm/init_64.c include/asm-x86/proto.h Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 94ead43eda6..bcc22b569ee 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6538,9 +6538,9 @@ static int find_next_best_node(int node, nodemask_t *used_nodes)
min_val = INT_MAX;
- for (i = 0; i < MAX_NUMNODES; i++) {
+ for (i = 0; i < nr_node_ids; i++) {
/* Start at @node */
- n = (node + i) % MAX_NUMNODES;
+ n = (node + i) % nr_node_ids;
if (!nr_cpus_node(n))
continue;
@@ -6734,7 +6734,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
if (!sched_group_nodes)
continue;
- for (i = 0; i < MAX_NUMNODES; i++) {
+ for (i = 0; i < nr_node_ids; i++) {
struct sched_group *oldsg, *sg = sched_group_nodes[i];
*nodemask = node_to_cpumask(i);
@@ -6927,7 +6927,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
/*
* Allocate the per-node list of sched groups
*/
- sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
+ sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
GFP_KERNEL);
if (!sched_group_nodes) {
printk(KERN_WARNING "Can not alloc sched group node list\n");
@@ -7066,7 +7066,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
#endif
/* Set up physical groups */
- for (i = 0; i < MAX_NUMNODES; i++) {
+ for (i = 0; i < nr_node_ids; i++) {
SCHED_CPUMASK_VAR(nodemask, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks);
@@ -7090,7 +7090,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
send_covered, tmpmask);
}
- for (i = 0; i < MAX_NUMNODES; i++) {
+ for (i = 0; i < nr_node_ids; i++) {
/* Set up node groups */
struct sched_group *sg, *prev;
SCHED_CPUMASK_VAR(nodemask, allmasks);
@@ -7129,9 +7129,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
cpus_or(*covered, *covered, *nodemask);
prev = sg;
- for (j = 0; j < MAX_NUMNODES; j++) {
+ for (j = 0; j < nr_node_ids; j++) {
SCHED_CPUMASK_VAR(notcovered, allmasks);
- int n = (i + j) % MAX_NUMNODES;
+ int n = (i + j) % nr_node_ids;
node_to_cpumask_ptr(pnodemask, n);
cpus_complement(*notcovered, *covered);
@@ -7184,7 +7184,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
}
#ifdef CONFIG_NUMA
- for (i = 0; i < MAX_NUMNODES; i++)
+ for (i = 0; i < nr_node_ids; i++)
init_numa_sched_groups_power(sched_group_nodes[i]);
if (sd_allnodes) {