aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorAndreas Herrmann <andreas.herrmann3@amd.com>2009-08-18 12:54:06 +0200
committerIngo Molnar <mingo@elte.hu>2009-08-18 18:35:40 +0200
commit7f4588f3aa395632fec9ba2e15a1920f0682fda0 (patch)
treeb827a407ef4f509e80aa60bbc0b0eb0dcf8cf402 /kernel/sched.c
parent2109b99ee192764b407dc7f52babb74740eea6f9 (diff)
sched: Separate out build of NUMA sched domain from __build_sched_domains
... to further strip down __build_sched_domains(). Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com> Cc: Peter Zijlstra <peterz@infradead.org> LKML-Reference: <20090818105406.GD29515@alberich.amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c57
1 files changed, 32 insertions, 25 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c5d1fee4236..dd95a470837 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8482,6 +8482,37 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
return sa_rootdomain;
}
+static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
+{
+ struct sched_domain *sd = NULL;
+#ifdef CONFIG_NUMA
+ struct sched_domain *parent;
+
+ d->sd_allnodes = 0;
+ if (cpumask_weight(cpu_map) >
+ SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
+ sd = &per_cpu(allnodes_domains, i).sd;
+ SD_INIT(sd, ALLNODES);
+ set_domain_attribute(sd, attr);
+ cpumask_copy(sched_domain_span(sd), cpu_map);
+ cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
+ d->sd_allnodes = 1;
+ }
+ parent = sd;
+
+ sd = &per_cpu(node_domains, i).sd;
+ SD_INIT(sd, NODE);
+ set_domain_attribute(sd, attr);
+ sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
+ sd->parent = parent;
+ if (parent)
+ parent->child = sd;
+ cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
+#endif
+ return sd;
+}
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
@@ -8510,31 +8541,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
cpu_map);
-#ifdef CONFIG_NUMA
- if (cpumask_weight(cpu_map) >
- SD_NODES_PER_DOMAIN*cpumask_weight(d.nodemask)) {
- sd = &per_cpu(allnodes_domains, i).sd;
- SD_INIT(sd, ALLNODES);
- set_domain_attribute(sd, attr);
- cpumask_copy(sched_domain_span(sd), cpu_map);
- cpu_to_allnodes_group(i, cpu_map, &sd->groups,
- d.tmpmask);
- p = sd;
- d.sd_allnodes = 1;
- } else
- p = NULL;
-
- sd = &per_cpu(node_domains, i).sd;
- SD_INIT(sd, NODE);
- set_domain_attribute(sd, attr);
- sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
- sd->parent = p;
- if (p)
- p->child = sd;
- cpumask_and(sched_domain_span(sd),
- sched_domain_span(sd), cpu_map);
-#endif
-
+ sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
p = sd;
sd = &per_cpu(phys_domains, i).sd;
SD_INIT(sd, CPU);