aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c10
-rw-r--r--arch/x86/kernel/cpu/proc.c3
-rw-r--r--arch/x86/kernel/mce_amd_64.c6
-rw-r--r--arch/x86/kernel/setup_64.c3
-rw-r--r--arch/x86/kernel/smpboot_32.c34
-rw-r--r--arch/x86/kernel/smpboot_64.c24
-rw-r--r--arch/x86/xen/smp.c14
-rw-r--r--include/asm-x86/smp_32.h2
-rw-r--r--include/asm-x86/smp_64.h7
-rw-r--r--include/asm-x86/topology_32.h2
-rw-r--r--include/asm-x86/topology_64.h2
12 files changed, 63 insertions, 46 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index ffd01e5dcb5..2ca43ba32bc 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -595,7 +595,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
dmi_check_system(sw_any_bug_dmi_table);
if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) {
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
- policy->cpus = cpu_core_map[cpu];
+ policy->cpus = per_cpu(cpu_core_map, cpu);
}
#endif
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index b273b69cfdd..c06ac680c9c 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -57,7 +57,7 @@ static struct powernow_k8_data *powernow_data[NR_CPUS];
static int cpu_family = CPU_OPTERON;
#ifndef CONFIG_SMP
-static cpumask_t cpu_core_map[1];
+DEFINE_PER_CPU(cpumask_t, cpu_core_map);
#endif
/* Return a frequency in MHz, given an input fid */
@@ -667,7 +667,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst,
dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
data->powernow_table = powernow_table;
- if (first_cpu(cpu_core_map[data->cpu]) == data->cpu)
+ if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
print_basics(data);
for (j = 0; j < data->numps; j++)
@@ -821,7 +821,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
/* fill in data */
data->numps = data->acpi_data.state_count;
- if (first_cpu(cpu_core_map[data->cpu]) == data->cpu)
+ if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
print_basics(data);
powernow_k8_acpi_pst_values(data, 0);
@@ -1214,7 +1214,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
if (cpu_family == CPU_HW_PSTATE)
pol->cpus = cpumask_of_cpu(pol->cpu);
else
- pol->cpus = cpu_core_map[pol->cpu];
+ pol->cpus = per_cpu(cpu_core_map, pol->cpu);
data->available_cores = &(pol->cpus);
/* Take a crude guess here.
@@ -1281,7 +1281,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
cpumask_t oldmask = current->cpus_allowed;
unsigned int khz = 0;
- data = powernow_data[first_cpu(cpu_core_map[cpu])];
+ data = powernow_data[first_cpu(per_cpu(cpu_core_map, cpu))];
if (!data)
return -EINVAL;
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 1e31b6caffb..879a0f789b1 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -122,7 +122,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef CONFIG_X86_HT
if (c->x86_max_cores * smp_num_siblings > 1) {
seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
- seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n]));
+ seq_printf(m, "siblings\t: %d\n",
+ cpus_weight(per_cpu(cpu_core_map, n)));
seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
}
diff --git a/arch/x86/kernel/mce_amd_64.c b/arch/x86/kernel/mce_amd_64.c
index 2f8a7f18b0f..805b62b1e0d 100644
--- a/arch/x86/kernel/mce_amd_64.c
+++ b/arch/x86/kernel/mce_amd_64.c
@@ -472,7 +472,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
#ifdef CONFIG_SMP
if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */
- i = first_cpu(cpu_core_map[cpu]);
+ i = first_cpu(per_cpu(cpu_core_map, cpu));
/* first core not up yet */
if (cpu_data[i].cpu_core_id)
@@ -492,7 +492,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (err)
goto out;
- b->cpus = cpu_core_map[cpu];
+ b->cpus = per_cpu(cpu_core_map, cpu);
per_cpu(threshold_banks, cpu)[bank] = b;
goto out;
}
@@ -509,7 +509,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
#ifndef CONFIG_SMP
b->cpus = CPU_MASK_ALL;
#else
- b->cpus = cpu_core_map[cpu];
+ b->cpus = per_cpu(cpu_core_map, cpu);
#endif
err = kobject_register(&b->kobj);
if (err)
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index b7da90e79c7..85b5b6310ac 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -1070,7 +1070,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (smp_num_siblings * c->x86_max_cores > 1) {
int cpu = c - cpu_data;
seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
- seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
+ seq_printf(m, "siblings\t: %d\n",
+ cpus_weight(per_cpu(cpu_core_map, cpu)));
seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
}
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index e4f61d1c624..4cbab48ba86 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -74,8 +74,8 @@ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
/* representing HT and core siblings of each logical CPU */
-cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(cpu_core_map);
+DEFINE_PER_CPU(cpumask_t, cpu_core_map);
+EXPORT_PER_CPU_SYMBOL(cpu_core_map);
/* bitmap of online cpus */
cpumask_t cpu_online_map __read_mostly;
@@ -300,7 +300,7 @@ cpumask_t cpu_coregroup_map(int cpu)
* And for power savings, we return cpu_core_map
*/
if (sched_mc_power_savings || sched_smt_power_savings)
- return cpu_core_map[cpu];
+ return per_cpu(cpu_core_map, cpu);
else
return c->llc_shared_map;
}
@@ -321,8 +321,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
c[cpu].cpu_core_id == c[i].cpu_core_id) {
cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]);
- cpu_set(i, cpu_core_map[cpu]);
- cpu_set(cpu, cpu_core_map[i]);
+ cpu_set(i, per_cpu(cpu_core_map, cpu));
+ cpu_set(cpu, per_cpu(cpu_core_map, i));
cpu_set(i, c[cpu].llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map);
}
@@ -334,7 +334,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
cpu_set(cpu, c[cpu].llc_shared_map);
if (current_cpu_data.x86_max_cores == 1) {
- cpu_core_map[cpu] = cpu_sibling_map[cpu];
+ per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu];
c[cpu].booted_cores = 1;
return;
}
@@ -346,8 +346,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
cpu_set(cpu, c[i].llc_shared_map);
}
if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
- cpu_set(i, cpu_core_map[cpu]);
- cpu_set(cpu, cpu_core_map[i]);
+ cpu_set(i, per_cpu(cpu_core_map, cpu));
+ cpu_set(cpu, per_cpu(cpu_core_map, i));
/*
* Does this new cpu bringup a new core?
*/
@@ -984,7 +984,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
" Using dummy APIC emulation.\n");
map_cpu_to_logical_apicid();
cpu_set(0, cpu_sibling_map[0]);
- cpu_set(0, cpu_core_map[0]);
+ cpu_set(0, per_cpu(cpu_core_map, 0));
return;
}
@@ -1009,7 +1009,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
smpboot_clear_io_apic_irqs();
phys_cpu_present_map = physid_mask_of_physid(0);
cpu_set(0, cpu_sibling_map[0]);
- cpu_set(0, cpu_core_map[0]);
+ cpu_set(0, per_cpu(cpu_core_map, 0));
return;
}
@@ -1024,7 +1024,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
smpboot_clear_io_apic_irqs();
phys_cpu_present_map = physid_mask_of_physid(0);
cpu_set(0, cpu_sibling_map[0]);
- cpu_set(0, cpu_core_map[0]);
+ cpu_set(0, per_cpu(cpu_core_map, 0));
return;
}
@@ -1107,11 +1107,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
*/
for (cpu = 0; cpu < NR_CPUS; cpu++) {
cpus_clear(cpu_sibling_map[cpu]);
- cpus_clear(cpu_core_map[cpu]);
+ cpus_clear(per_cpu(cpu_core_map, cpu));
}
cpu_set(0, cpu_sibling_map[0]);
- cpu_set(0, cpu_core_map[0]);
+ cpu_set(0, per_cpu(cpu_core_map, 0));
smpboot_setup_io_apic();
@@ -1148,9 +1148,9 @@ void remove_siblinginfo(int cpu)
int sibling;
struct cpuinfo_x86 *c = cpu_data;
- for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
- cpu_clear(cpu, cpu_core_map[sibling]);
- /*
+ for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
+ cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
+ /*/
* last thread sibling in this cpu core going down
*/
if (cpus_weight(cpu_sibling_map[cpu]) == 1)
@@ -1160,7 +1160,7 @@ void remove_siblinginfo(int cpu)
for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
cpu_clear(cpu, cpu_sibling_map[sibling]);
cpus_clear(cpu_sibling_map[cpu]);
- cpus_clear(cpu_core_map[cpu]);
+ cpus_clear(per_cpu(cpu_core_map, cpu));
c[cpu].phys_proc_id = 0;
c[cpu].cpu_core_id = 0;
cpu_clear(cpu, cpu_sibling_setup_map);
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index 720a7d1f886..6723c862282 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -95,8 +95,8 @@ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
/* representing HT and core siblings of each logical CPU */
-cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(cpu_core_map);
+DEFINE_PER_CPU(cpumask_t, cpu_core_map);
+EXPORT_PER_CPU_SYMBOL(cpu_core_map);
/*
* Trampoline 80x86 program as an array.
@@ -243,7 +243,7 @@ cpumask_t cpu_coregroup_map(int cpu)
* And for power savings, we return cpu_core_map
*/
if (sched_mc_power_savings || sched_smt_power_savings)
- return cpu_core_map[cpu];
+ return per_cpu(cpu_core_map, cpu);
else
return c->llc_shared_map;
}
@@ -264,8 +264,8 @@ static inline void set_cpu_sibling_map(int cpu)
c[cpu].cpu_core_id == c[i].cpu_core_id) {
cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]);
- cpu_set(i, cpu_core_map[cpu]);
- cpu_set(cpu, cpu_core_map[i]);
+ cpu_set(i, per_cpu(cpu_core_map, cpu));
+ cpu_set(cpu, per_cpu(cpu_core_map, i));
cpu_set(i, c[cpu].llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map);
}
@@ -277,7 +277,7 @@ static inline void set_cpu_sibling_map(int cpu)
cpu_set(cpu, c[cpu].llc_shared_map);
if (current_cpu_data.x86_max_cores == 1) {
- cpu_core_map[cpu] = cpu_sibling_map[cpu];
+ per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu];
c[cpu].booted_cores = 1;
return;
}
@@ -289,8 +289,8 @@ static inline void set_cpu_sibling_map(int cpu)
cpu_set(cpu, c[i].llc_shared_map);
}
if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
- cpu_set(i, cpu_core_map[cpu]);
- cpu_set(cpu, cpu_core_map[i]);
+ cpu_set(i, per_cpu(cpu_core_map, cpu));
+ cpu_set(cpu, per_cpu(cpu_core_map, i));
/*
* Does this new cpu bringup a new core?
*/
@@ -736,7 +736,7 @@ static __init void disable_smp(void)
else
phys_cpu_present_map = physid_mask_of_physid(0);
cpu_set(0, cpu_sibling_map[0]);
- cpu_set(0, cpu_core_map[0]);
+ cpu_set(0, per_cpu(cpu_core_map, 0));
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -971,8 +971,8 @@ static void remove_siblinginfo(int cpu)
int sibling;
struct cpuinfo_x86 *c = cpu_data;
- for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
- cpu_clear(cpu, cpu_core_map[sibling]);
+ for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
+ cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
/*
* last thread sibling in this cpu core going down
*/
@@ -983,7 +983,7 @@ static void remove_siblinginfo(int cpu)
for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
cpu_clear(cpu, cpu_sibling_map[sibling]);
cpus_clear(cpu_sibling_map[cpu]);
- cpus_clear(cpu_core_map[cpu]);
+ cpus_clear(per_cpu(cpu_core_map, cpu));
c[cpu].phys_proc_id = 0;
c[cpu].cpu_core_id = 0;
cpu_clear(cpu, cpu_sibling_setup_map);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 557b8e24706..539d42530fc 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -148,7 +148,12 @@ void __init xen_smp_prepare_boot_cpu(void)
for (cpu = 0; cpu < NR_CPUS; cpu++) {
cpus_clear(cpu_sibling_map[cpu]);
- cpus_clear(cpu_core_map[cpu]);
+ /*
+ * cpu_core_map lives in a per cpu area that is cleared
+ * when the per cpu array is allocated.
+ *
+ * cpus_clear(per_cpu(cpu_core_map, cpu));
+ */
}
xen_setup_vcpu_info_placement();
@@ -160,7 +165,12 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus)
for (cpu = 0; cpu < NR_CPUS; cpu++) {
cpus_clear(cpu_sibling_map[cpu]);
- cpus_clear(cpu_core_map[cpu]);
+ /*
+ * cpu_core_ map will be zeroed when the per
+ * cpu area is allocated.
+ *
+ * cpus_clear(per_cpu(cpu_core_map, cpu));
+ */
}
smp_store_cpu_info(0);
diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h
index 1f73bde165b..01ab31bb262 100644
--- a/include/asm-x86/smp_32.h
+++ b/include/asm-x86/smp_32.h
@@ -31,7 +31,7 @@ extern void smp_alloc_memory(void);
extern int pic_mode;
extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[];
-extern cpumask_t cpu_core_map[];
+DECLARE_PER_CPU(cpumask_t, cpu_core_map);
extern void (*mtrr_hook) (void);
extern void zap_low_mappings (void);
diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h
index 3f303d2365e..65f84486441 100644
--- a/include/asm-x86/smp_64.h
+++ b/include/asm-x86/smp_64.h
@@ -39,7 +39,12 @@ extern int smp_num_siblings;
extern void smp_send_reschedule(int cpu);
extern cpumask_t cpu_sibling_map[NR_CPUS];
-extern cpumask_t cpu_core_map[NR_CPUS];
+/*
+ * cpu_core_map lives in a per cpu area
+ *
+ * extern cpumask_t cpu_core_map[NR_CPUS];
+ */
+DECLARE_PER_CPU(cpumask_t, cpu_core_map);
extern u8 cpu_llc_id[NR_CPUS];
#define SMP_TRAMPOLINE_BASE 0x6000
diff --git a/include/asm-x86/topology_32.h b/include/asm-x86/topology_32.h
index 19b2dafd0c8..7b68dbcd0eb 100644
--- a/include/asm-x86/topology_32.h
+++ b/include/asm-x86/topology_32.h
@@ -30,7 +30,7 @@
#ifdef CONFIG_X86_HT
#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
-#define topology_core_siblings(cpu) (cpu_core_map[cpu])
+#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
#endif
diff --git a/include/asm-x86/topology_64.h b/include/asm-x86/topology_64.h
index 36e52fba796..b8590dff34c 100644
--- a/include/asm-x86/topology_64.h
+++ b/include/asm-x86/topology_64.h
@@ -58,7 +58,7 @@ extern int __node_distance(int, int);
#ifdef CONFIG_SMP
#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
-#define topology_core_siblings(cpu) (cpu_core_map[cpu])
+#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
#define smt_capable() (smp_num_siblings > 1)