From c2d1cec1c77f7714672c1efeae075424c929e0d5 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sun, 4 Jan 2009 05:18:03 -0800 Subject: x86: cleanup remaining cpumask_t ops in smpboot code Impact: use new cpumask API to reduce memory and stack usage Allocate the following local cpumasks based on the number of cpus that are present. References will use new cpumask API. (Currently only modified for x86_64, x86_32 continues to use the *_map variants.) cpu_callin_mask cpu_callout_mask cpu_initialized_mask cpu_sibling_setup_mask Provide the following accessor functions: struct cpumask *cpu_sibling_mask(int cpu) struct cpumask *cpu_core_mask(int cpu) Other changes are when setting or clearing the cpu online, possible or present maps, use the accessor functions. Signed-off-by: Mike Travis Acked-by: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/common.c | 26 +++++++-- arch/x86/kernel/setup_percpu.c | 25 +++++++- arch/x86/kernel/smp.c | 17 ++++-- arch/x86/kernel/smpboot.c | 128 +++++++++++++++++++++-------------------- 4 files changed, 123 insertions(+), 73 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 3f95a40f718..83492b1f93b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -40,6 +40,26 @@ #include "cpu.h" +#ifdef CONFIG_X86_64 + +/* all of these masks are initialized in setup_cpu_local_masks() */ +cpumask_var_t cpu_callin_mask; +cpumask_var_t cpu_callout_mask; +cpumask_var_t cpu_initialized_mask; + +/* representing cpus for which sibling maps can be computed */ +cpumask_var_t cpu_sibling_setup_mask; + +#else /* CONFIG_X86_32 */ + +cpumask_t cpu_callin_map; +cpumask_t cpu_callout_map; +cpumask_t cpu_initialized; +cpumask_t cpu_sibling_setup_map; + +#endif /* CONFIG_X86_32 */ + + static struct cpu_dev *this_cpu __cpuinitdata; #ifdef CONFIG_X86_64 @@ -856,8 +876,6 @@ static __init int setup_disablecpuid(char *arg) } __setup("clearcpuid=", setup_disablecpuid); -cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; - #ifdef CONFIG_X86_64 struct x8664_pda **_cpu_pda __read_mostly; EXPORT_SYMBOL(_cpu_pda); @@ -976,7 +994,7 @@ void __cpuinit cpu_init(void) me = current; - if (cpu_test_and_set(cpu, cpu_initialized)) + if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) panic("CPU#%d already initialized!\n", cpu); printk(KERN_INFO "Initializing CPU#%d\n", cpu); @@ -1085,7 +1103,7 @@ void __cpuinit cpu_init(void) struct tss_struct *t = &per_cpu(init_tss, cpu); struct thread_struct *thread = &curr->thread; - if (cpu_test_and_set(cpu, cpu_initialized)) { + if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); for (;;) local_irq_enable(); } diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index a4b619c3310..aa55764602b 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -131,7 +131,27 @@ static void __init setup_cpu_pda_map(void) /* point to new pointer table */ _cpu_pda = new_cpu_pda; } -#endif + +#endif /* CONFIG_SMP && CONFIG_X86_64 */ + +#ifdef CONFIG_X86_64 + +/* correctly size the local cpu masks */ +static void setup_cpu_local_masks(void) +{ + alloc_bootmem_cpumask_var(&cpu_initialized_mask); + alloc_bootmem_cpumask_var(&cpu_callin_mask); + alloc_bootmem_cpumask_var(&cpu_callout_mask); + alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); +} + +#else /* CONFIG_X86_32 */ + +static inline void setup_cpu_local_masks(void) +{ +} + +#endif /* CONFIG_X86_32 */ /* * Great future plan: @@ -187,6 +207,9 @@ void __init setup_per_cpu_areas(void) /* Setup node to cpumask map */ setup_node_to_cpumask_map(); + + /* Setup cpu initialized, callin, callout masks */ + setup_cpu_local_masks(); } #endif diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index beea2649a24..182135ba1ea 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -128,16 +128,23 @@ void native_send_call_func_single_ipi(int cpu) void native_send_call_func_ipi(const struct cpumask *mask) { - cpumask_t allbutself; + cpumask_var_t allbutself; - allbutself = cpu_online_map; - cpu_clear(smp_processor_id(), allbutself); + if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) { + send_IPI_mask(mask, CALL_FUNCTION_VECTOR); + return; + } - if (cpus_equal(*mask, allbutself) && - cpus_equal(cpu_online_map, cpu_callout_map)) + cpumask_copy(allbutself, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), allbutself); + + if (cpumask_equal(mask, allbutself) && + cpumask_equal(cpu_online_mask, cpu_callout_mask)) send_IPI_allbutself(CALL_FUNCTION_VECTOR); else send_IPI_mask(mask, CALL_FUNCTION_VECTOR); + + free_cpumask_var(allbutself); } /* diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6bd4d9b7387..00e17e58948 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -102,9 +102,6 @@ EXPORT_SYMBOL(smp_num_siblings); /* Last level cache ID of each logical CPU */ DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; -cpumask_t cpu_callin_map; -cpumask_t cpu_callout_map; - /* representing HT siblings of each logical CPU */ DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); @@ -120,9 +117,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); static atomic_t init_deasserted; -/* representing cpus for which sibling maps can be computed */ -static cpumask_t cpu_sibling_setup_map; - /* Set if we find a B stepping CPU */ static int __cpuinitdata smp_b_stepping; @@ -140,7 +134,7 @@ EXPORT_SYMBOL(cpu_to_node_map); static void map_cpu_to_node(int cpu, int node) { printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); - cpu_set(cpu, node_to_cpumask_map[node]); + cpumask_set_cpu(cpu, &node_to_cpumask_map[node]); cpu_to_node_map[cpu] = node; } @@ -151,7 +145,7 @@ static void unmap_cpu_to_node(int cpu) printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); for (node = 0; node < MAX_NUMNODES; node++) - cpu_clear(cpu, node_to_cpumask_map[node]); + cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]); cpu_to_node_map[cpu] = 0; } #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ @@ -209,7 +203,7 @@ static void __cpuinit smp_callin(void) */ phys_id = read_apic_id(); cpuid = smp_processor_id(); - if (cpu_isset(cpuid, cpu_callin_map)) { + if (cpumask_test_cpu(cpuid, cpu_callin_mask)) { panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, phys_id, cpuid); } @@ -231,7 +225,7 @@ static void __cpuinit smp_callin(void) /* * Has the boot CPU finished it's STARTUP sequence? */ - if (cpu_isset(cpuid, cpu_callout_map)) + if (cpumask_test_cpu(cpuid, cpu_callout_mask)) break; cpu_relax(); } @@ -274,7 +268,7 @@ static void __cpuinit smp_callin(void) /* * Allow the master to continue. */ - cpu_set(cpuid, cpu_callin_map); + cpumask_set_cpu(cpuid, cpu_callin_mask); } static int __cpuinitdata unsafe_smp; @@ -332,7 +326,7 @@ notrace static void __cpuinit start_secondary(void *unused) ipi_call_lock(); lock_vector_lock(); __setup_vector_irq(smp_processor_id()); - cpu_set(smp_processor_id(), cpu_online_map); + set_cpu_online(smp_processor_id(), true); unlock_vector_lock(); ipi_call_unlock(); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; @@ -438,50 +432,52 @@ void __cpuinit set_cpu_sibling_map(int cpu) int i; struct cpuinfo_x86 *c = &cpu_data(cpu); - cpu_set(cpu, cpu_sibling_setup_map); + cpumask_set_cpu(cpu, cpu_sibling_setup_mask); if (smp_num_siblings > 1) { - for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { - if (c->phys_proc_id == cpu_data(i).phys_proc_id && - c->cpu_core_id == cpu_data(i).cpu_core_id) { - cpu_set(i, per_cpu(cpu_sibling_map, cpu)); - cpu_set(cpu, per_cpu(cpu_sibling_map, i)); - cpu_set(i, per_cpu(cpu_core_map, cpu)); - cpu_set(cpu, per_cpu(cpu_core_map, i)); - cpu_set(i, c->llc_shared_map); - cpu_set(cpu, cpu_data(i).llc_shared_map); + for_each_cpu(i, cpu_sibling_setup_mask) { + struct cpuinfo_x86 *o = &cpu_data(i); + + if (c->phys_proc_id == o->phys_proc_id && + c->cpu_core_id == o->cpu_core_id) { + cpumask_set_cpu(i, cpu_sibling_mask(cpu)); + cpumask_set_cpu(cpu, cpu_sibling_mask(i)); + cpumask_set_cpu(i, cpu_core_mask(cpu)); + cpumask_set_cpu(cpu, cpu_core_mask(i)); + cpumask_set_cpu(i, &c->llc_shared_map); + cpumask_set_cpu(cpu, &o->llc_shared_map); } } } else { - cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); + cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); } - cpu_set(cpu, c->llc_shared_map); + cpumask_set_cpu(cpu, &c->llc_shared_map); if (current_cpu_data.x86_max_cores == 1) { - per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); + cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); c->booted_cores = 1; return; } - for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { + for_each_cpu(i, cpu_sibling_setup_mask) { if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { - cpu_set(i, c->llc_shared_map); - cpu_set(cpu, cpu_data(i).llc_shared_map); + cpumask_set_cpu(i, &c->llc_shared_map); + cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map); } if (c->phys_proc_id == cpu_data(i).phys_proc_id) { - cpu_set(i, per_cpu(cpu_core_map, cpu)); - cpu_set(cpu, per_cpu(cpu_core_map, i)); + cpumask_set_cpu(i, cpu_core_mask(cpu)); + cpumask_set_cpu(cpu, cpu_core_mask(i)); /* * Does this new cpu bringup a new core? */ - if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { + if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) { /* * for each core in package, increment * the booted_cores for this new cpu */ - if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) + if (cpumask_first(cpu_sibling_mask(i)) == i) c->booted_cores++; /* * increment the core count for all @@ -504,7 +500,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu) * And for power savings, we return cpu_core_map */ if (sched_mc_power_savings || sched_smt_power_savings) - return &per_cpu(cpu_core_map, cpu); + return cpu_core_mask(cpu); else return &c->llc_shared_map; } @@ -523,7 +519,7 @@ static void impress_friends(void) */ pr_debug("Before bogomips.\n"); for_each_possible_cpu(cpu) - if (cpu_isset(cpu, cpu_callout_map)) + if (cpumask_test_cpu(cpu, cpu_callout_mask)) bogosum += cpu_data(cpu).loops_per_jiffy; printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", @@ -904,19 +900,19 @@ do_rest: * allow APs to start initializing. */ pr_debug("Before Callout %d.\n", cpu); - cpu_set(cpu, cpu_callout_map); + cpumask_set_cpu(cpu, cpu_callout_mask); pr_debug("After Callout %d.\n", cpu); /* * Wait 5s total for a response */ for (timeout = 0; timeout < 50000; timeout++) { - if (cpu_isset(cpu, cpu_callin_map)) + if (cpumask_test_cpu(cpu, cpu_callin_mask)) break; /* It has booted */ udelay(100); } - if (cpu_isset(cpu, cpu_callin_map)) { + if (cpumask_test_cpu(cpu, cpu_callin_mask)) { /* number CPUs logically, starting from 1 (BSP is 0) */ pr_debug("OK.\n"); printk(KERN_INFO "CPU%d: ", cpu); @@ -941,9 +937,14 @@ restore_state: if (boot_error) { /* Try to put things back the way they were before ... */ numa_remove_cpu(cpu); /* was set by numa_add_cpu */ - cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */ - cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ - cpu_clear(cpu, cpu_present_map); + + /* was set by do_boot_cpu() */ + cpumask_clear_cpu(cpu, cpu_callout_mask); + + /* was set by cpu_init() */ + cpumask_clear_cpu(cpu, cpu_initialized_mask); + + set_cpu_present(cpu, false); per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; } @@ -977,7 +978,7 @@ int __cpuinit native_cpu_up(unsigned int cpu) /* * Already booted CPU? */ - if (cpu_isset(cpu, cpu_callin_map)) { + if (cpumask_test_cpu(cpu, cpu_callin_mask)) { pr_debug("do_boot_cpu %d Already started\n", cpu); return -ENOSYS; } @@ -1032,8 +1033,9 @@ int __cpuinit native_cpu_up(unsigned int cpu) */ static __init void disable_smp(void) { - cpu_present_map = cpumask_of_cpu(0); - cpu_possible_map = cpumask_of_cpu(0); + /* use the read/write pointers to the present and possible maps */ + cpumask_copy(&cpu_present_map, cpumask_of(0)); + cpumask_copy(&cpu_possible_map, cpumask_of(0)); smpboot_clear_io_apic_irqs(); if (smp_found_config) @@ -1041,8 +1043,8 @@ static __init void disable_smp(void) else physid_set_mask_of_physid(0, &phys_cpu_present_map); map_cpu_to_logical_apicid(); - cpu_set(0, per_cpu(cpu_sibling_map, 0)); - cpu_set(0, per_cpu(cpu_core_map, 0)); + cpumask_set_cpu(0, cpu_sibling_mask(0)); + cpumask_set_cpu(0, cpu_core_mask(0)); } /* @@ -1064,14 +1066,14 @@ static int __init smp_sanity_check(unsigned max_cpus) nr = 0; for_each_present_cpu(cpu) { if (nr >= 8) - cpu_clear(cpu, cpu_present_map); + set_cpu_present(cpu, false); nr++; } nr = 0; for_each_possible_cpu(cpu) { if (nr >= 8) - cpu_clear(cpu, cpu_possible_map); + set_cpu_possible(cpu, false); nr++; } @@ -1167,7 +1169,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) preempt_disable(); smp_cpu_index_default(); current_cpu_data = boot_cpu_data; - cpu_callin_map = cpumask_of_cpu(0); + cpumask_copy(cpu_callin_mask, cpumask_of(0)); mb(); /* * Setup boot CPU information @@ -1242,8 +1244,8 @@ void __init native_smp_prepare_boot_cpu(void) init_gdt(me); #endif switch_to_new_gdt(); - /* already set me in cpu_online_map in boot_cpu_init() */ - cpu_set(me, cpu_callout_map); + /* already set me in cpu_online_mask in boot_cpu_init() */ + cpumask_set_cpu(me, cpu_callout_mask); per_cpu(cpu_state, me) = CPU_ONLINE; } @@ -1311,7 +1313,7 @@ __init void prefill_possible_map(void) possible, max_t(int, possible - num_processors, 0)); for (i = 0; i < possible; i++) - cpu_set(i, cpu_possible_map); + set_cpu_possible(i, true); nr_cpu_ids = possible; } @@ -1323,31 +1325,31 @@ static void remove_siblinginfo(int cpu) int sibling; struct cpuinfo_x86 *c = &cpu_data(cpu); - for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) { - cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); + for_each_cpu(sibling, cpu_core_mask(cpu)) { + cpumask_clear_cpu(cpu, cpu_core_mask(sibling)); /*/ * last thread sibling in this cpu core going down */ - if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) + if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) cpu_data(sibling).booted_cores--; } - for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu)) - cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); - cpus_clear(per_cpu(cpu_sibling_map, cpu)); - cpus_clear(per_cpu(cpu_core_map, cpu)); + for_each_cpu(sibling, cpu_sibling_mask(cpu)) + cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling)); + cpumask_clear(cpu_sibling_mask(cpu)); + cpumask_clear(cpu_core_mask(cpu)); c->phys_proc_id = 0; c->cpu_core_id = 0; - cpu_clear(cpu, cpu_sibling_setup_map); + cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); } static void __ref remove_cpu_from_maps(int cpu) { - cpu_clear(cpu, cpu_online_map); - cpu_clear(cpu, cpu_callout_map); - cpu_clear(cpu, cpu_callin_map); + set_cpu_online(cpu, false); + cpumask_clear_cpu(cpu, cpu_callout_mask); + cpumask_clear_cpu(cpu, cpu_callin_mask); /* was set by cpu_init() */ - cpu_clear(cpu, cpu_initialized); + cpumask_clear_cpu(cpu, cpu_initialized_mask); numa_remove_cpu(cpu); } -- cgit v1.2.3 From 5cb0535f1713b51610f2881b17d0fe3656114364 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sun, 4 Jan 2009 05:18:05 -0800 Subject: cpumask: replace CPUMASK_ALLOC etc with cpumask_var_t Impact: cleanup There's only one user, and it's a fairly easy conversion. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Acked-by: Dave Jones Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index f0ea6fa2f53..d2cc4991cba 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -458,11 +458,6 @@ static int centrino_verify (struct cpufreq_policy *policy) * * Sets a new CPUFreq policy. */ -struct allmasks { - cpumask_t saved_mask; - cpumask_t covered_cpus; -}; - static int centrino_target (struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) @@ -472,12 +467,15 @@ static int centrino_target (struct cpufreq_policy *policy, struct cpufreq_freqs freqs; int retval = 0; unsigned int j, k, first_cpu, tmp; - CPUMASK_ALLOC(allmasks); - CPUMASK_PTR(saved_mask, allmasks); - CPUMASK_PTR(covered_cpus, allmasks); + cpumask_var_t saved_mask, covered_cpus; - if (unlikely(allmasks == NULL)) + if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL))) return -ENOMEM; + if (unlikely(!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))) { + free_cpumask_var(saved_mask); + return -ENOMEM; + } + cpumask_copy(saved_mask, ¤t->cpus_allowed); if (unlikely(per_cpu(centrino_model, cpu) == NULL)) { retval = -ENODEV; @@ -493,9 +491,7 @@ static int centrino_target (struct cpufreq_policy *policy, goto out; } - *saved_mask = current->cpus_allowed; first_cpu = 1; - cpus_clear(*covered_cpus); for_each_cpu_mask_nr(j, policy->cpus) { const cpumask_t *mask; @@ -605,7 +601,8 @@ migrate_end: preempt_enable(); set_cpus_allowed_ptr(current, saved_mask); out: - CPUMASK_FREE(allmasks); + free_cpumask_var(saved_mask); + free_cpumask_var(covered_cpus); return retval; } -- cgit v1.2.3 From 835481d9bcd65720b473db6b38746a74a3964218 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sun, 4 Jan 2009 05:18:06 -0800 Subject: cpumask: convert struct cpufreq_policy to cpumask_var_t Impact: use new cpumask API to reduce memory usage This is part of an effort to reduce structure sizes for machines configured with large NR_CPUS. cpumask_t gets replaced by cpumask_var_t, which is either struct cpumask[1] (small NR_CPUS) or struct cpumask * (large NR_CPUS). Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Acked-by: Dave Jones Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 10 +++++----- arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 8 ++++---- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 6 +++--- arch/x86/kernel/cpu/cpufreq/powernow-k8.h | 2 +- arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 14 +++++++------- arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | 18 +++++++++--------- 6 files changed, 29 insertions(+), 29 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 28102ad1a36..0b31939862d 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -411,7 +411,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, #ifdef CONFIG_HOTPLUG_CPU /* cpufreq holds the hotplug lock, so we are safe from here on */ - cpus_and(online_policy_cpus, cpu_online_map, policy->cpus); + cpumask_and(&online_policy_cpus, cpu_online_mask, policy->cpus); #else online_policy_cpus = policy->cpus; #endif @@ -626,15 +626,15 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) */ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { - cpumask_copy(&policy->cpus, perf->shared_cpu_map); + cpumask_copy(policy->cpus, perf->shared_cpu_map); } - cpumask_copy(&policy->related_cpus, perf->shared_cpu_map); + cpumask_copy(policy->related_cpus, perf->shared_cpu_map); #ifdef CONFIG_SMP dmi_check_system(sw_any_bug_dmi_table); - if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) { + if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) { policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; - policy->cpus = per_cpu(cpu_core_map, cpu); + cpumask_copy(policy->cpus, cpu_core_mask(cpu)); } #endif diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index beea4466b06..b585e04cbc9 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, return 0; /* notifiers */ - for_each_cpu_mask_nr(i, policy->cpus) { + for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software * Developer's Manual, Volume 3 */ - for_each_cpu_mask_nr(i, policy->cpus) + for_each_cpu(i, policy->cpus) cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); /* notifiers */ - for_each_cpu_mask_nr(i, policy->cpus) { + for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } @@ -203,7 +203,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) unsigned int i; #ifdef CONFIG_SMP - policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); + cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); #endif /* Errata workaround */ diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index c3c9adbaa26..5c28b37dea1 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -1199,10 +1199,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) set_cpus_allowed_ptr(current, &oldmask); if (cpu_family == CPU_HW_PSTATE) - pol->cpus = cpumask_of_cpu(pol->cpu); + cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); else - pol->cpus = per_cpu(cpu_core_map, pol->cpu); - data->available_cores = &(pol->cpus); + cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); + data->available_cores = pol->cpus; /* Take a crude guess here. * That guess was in microseconds, so multiply with 1000 */ diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index 65cfb5d7f77..8ecc75b6c7c 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h @@ -53,7 +53,7 @@ struct powernow_k8_data { /* we need to keep track of associated cores, but let cpufreq * handle hotplug events - so just point at cpufreq pol->cpus * structure */ - cpumask_t *available_cores; + struct cpumask *available_cores; }; diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index d2cc4991cba..f08998278a3 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -492,8 +492,8 @@ static int centrino_target (struct cpufreq_policy *policy, } first_cpu = 1; - for_each_cpu_mask_nr(j, policy->cpus) { - const cpumask_t *mask; + for_each_cpu(j, policy->cpus) { + const struct cpumask *mask; /* cpufreq holds the hotplug lock, so we are safe here */ if (!cpu_online(j)) @@ -504,9 +504,9 @@ static int centrino_target (struct cpufreq_policy *policy, * Make sure we are running on CPU that wants to change freq */ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) - mask = &policy->cpus; + mask = policy->cpus; else - mask = &cpumask_of_cpu(j); + mask = cpumask_of(j); set_cpus_allowed_ptr(current, mask); preempt_disable(); @@ -538,7 +538,7 @@ static int centrino_target (struct cpufreq_policy *policy, dprintk("target=%dkHz old=%d new=%d msr=%04x\n", target_freq, freqs.old, freqs.new, msr); - for_each_cpu_mask_nr(k, policy->cpus) { + for_each_cpu(k, policy->cpus) { if (!cpu_online(k)) continue; freqs.cpu = k; @@ -563,7 +563,7 @@ static int centrino_target (struct cpufreq_policy *policy, preempt_enable(); } - for_each_cpu_mask_nr(k, policy->cpus) { + for_each_cpu(k, policy->cpus) { if (!cpu_online(k)) continue; freqs.cpu = k; @@ -586,7 +586,7 @@ static int centrino_target (struct cpufreq_policy *policy, tmp = freqs.new; freqs.new = freqs.old; freqs.old = tmp; - for_each_cpu_mask_nr(j, policy->cpus) { + for_each_cpu(j, policy->cpus) { if (!cpu_online(j)) continue; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 04d0376b64b..dedc1e98f16 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c @@ -229,7 +229,7 @@ static unsigned int speedstep_detect_chipset (void) return 0; } -static unsigned int _speedstep_get(const cpumask_t *cpus) +static unsigned int _speedstep_get(const struct cpumask *cpus) { unsigned int speed; cpumask_t cpus_allowed; @@ -244,7 +244,7 @@ static unsigned int _speedstep_get(const cpumask_t *cpus) static unsigned int speedstep_get(unsigned int cpu) { - return _speedstep_get(&cpumask_of_cpu(cpu)); + return _speedstep_get(cpumask_of(cpu)); } /** @@ -267,7 +267,7 @@ static int speedstep_target (struct cpufreq_policy *policy, if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) return -EINVAL; - freqs.old = _speedstep_get(&policy->cpus); + freqs.old = _speedstep_get(policy->cpus); freqs.new = speedstep_freqs[newstate].frequency; freqs.cpu = policy->cpu; @@ -279,20 +279,20 @@ static int speedstep_target (struct cpufreq_policy *policy, cpus_allowed = current->cpus_allowed; - for_each_cpu_mask_nr(i, policy->cpus) { + for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } /* switch to physical CPU where state is to be changed */ - set_cpus_allowed_ptr(current, &policy->cpus); + set_cpus_allowed_ptr(current, policy->cpus); speedstep_set_state(newstate); /* allow to be run on all CPUs */ set_cpus_allowed_ptr(current, &cpus_allowed); - for_each_cpu_mask_nr(i, policy->cpus) { + for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } @@ -322,11 +322,11 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) /* only run on CPU to be set, or on its sibling */ #ifdef CONFIG_SMP - policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); + cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); #endif cpus_allowed = current->cpus_allowed; - set_cpus_allowed_ptr(current, &policy->cpus); + set_cpus_allowed_ptr(current, policy->cpus); /* detect low and high frequency and transition latency */ result = speedstep_get_freqs(speedstep_processor, @@ -339,7 +339,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) return result; /* get current speed setting */ - speed = _speedstep_get(&policy->cpus); + speed = _speedstep_get(policy->cpus); if (!speed) return -EIO; -- cgit v1.2.3 From c74f31c035f46a095a0c72f80246a65b314205a5 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sun, 4 Jan 2009 05:18:07 -0800 Subject: cpumask: use work_on_cpu in acpi/cstate.c Impact: use new cpumask API to reduce stack usage Replace the saving of current->cpus_allowed and set_cpus_allowed_ptr() with a work_on_cpu function for the acpi_processor_ffh_cstate_probe() function. Basically splits acpi_processor_ffh_cstate_probe() into two functions, the other being acpi_processor_ffh_cstate_probe_cpu which is the work function run on the designated cpu. Signed-off-by: Mike Travis Acked-by: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/kernel/acpi/cstate.c | 70 +++++++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 33 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index c2502eb9aa8..cf5ec586f22 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -66,35 +66,15 @@ static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; #define NATIVE_CSTATE_BEYOND_HALT (2) -int acpi_processor_ffh_cstate_probe(unsigned int cpu, - struct acpi_processor_cx *cx, struct acpi_power_register *reg) +static long acpi_processor_ffh_cstate_probe_cpu(void *_cx) { - struct cstate_entry *percpu_entry; - struct cpuinfo_x86 *c = &cpu_data(cpu); - - cpumask_t saved_mask; - int retval; + struct acpi_processor_cx *cx = _cx; + long retval; unsigned int eax, ebx, ecx, edx; unsigned int edx_part; unsigned int cstate_type; /* C-state type and not ACPI C-state type */ unsigned int num_cstate_subtype; - if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF ) - return -1; - - if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT) - return -1; - - percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); - percpu_entry->states[cx->index].eax = 0; - percpu_entry->states[cx->index].ecx = 0; - - /* Make sure we are running on right CPU */ - saved_mask = current->cpus_allowed; - retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); - if (retval) - return -1; - cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); /* Check whether this particular cx_type (in CST) is supported or not */ @@ -114,21 +94,45 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, retval = -1; goto out; } - percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; - - /* Use the hint in CST */ - percpu_entry->states[cx->index].eax = cx->address; if (!mwait_supported[cstate_type]) { mwait_supported[cstate_type] = 1; - printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d " - "state\n", cx->type); + printk(KERN_DEBUG + "Monitor-Mwait will be used to enter C-%d " + "state\n", cx->type); } - snprintf(cx->desc, ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x", - cx->address); - + snprintf(cx->desc, + ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x", + cx->address); out: - set_cpus_allowed_ptr(current, &saved_mask); + return retval; +} + +int acpi_processor_ffh_cstate_probe(unsigned int cpu, + struct acpi_processor_cx *cx, struct acpi_power_register *reg) +{ + struct cstate_entry *percpu_entry; + struct cpuinfo_x86 *c = &cpu_data(cpu); + long retval; + + if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF) + return -1; + + if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT) + return -1; + + percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); + percpu_entry->states[cx->index].eax = 0; + percpu_entry->states[cx->index].ecx = 0; + + /* Make sure we are running on right CPU */ + + retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx); + if (retval == 0) { + /* Use the hint in CST */ + percpu_entry->states[cx->index].eax = cx->address; + percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; + } return retval; } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); -- cgit v1.2.3 From 4d8bb5374924fc736ce275bfd1619b87a2106860 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sun, 4 Jan 2009 05:18:08 -0800 Subject: cpumask: use cpumask_var_t in acpi-cpufreq.c Impact: cleanup, reduce stack usage, use new cpumask API. Replace the cpumask_t in struct drv_cmd with a cpumask_var_t. Remove unneeded online_policy_cpus cpumask_t in acpi_cpufreq_target. Update refs to use new cpumask API. Signed-off-by: Mike Travis Acked-by: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 58 +++++++++++++++--------------- 1 file changed, 29 insertions(+), 29 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 0b31939862d..fb594170dc5 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -145,7 +145,7 @@ typedef union { struct drv_cmd { unsigned int type; - cpumask_t mask; + cpumask_var_t mask; drv_addr_union addr; u32 val; }; @@ -193,7 +193,7 @@ static void drv_read(struct drv_cmd *cmd) cpumask_t saved_mask = current->cpus_allowed; cmd->val = 0; - set_cpus_allowed_ptr(current, &cmd->mask); + set_cpus_allowed_ptr(current, cmd->mask); do_drv_read(cmd); set_cpus_allowed_ptr(current, &saved_mask); } @@ -203,8 +203,8 @@ static void drv_write(struct drv_cmd *cmd) cpumask_t saved_mask = current->cpus_allowed; unsigned int i; - for_each_cpu_mask_nr(i, cmd->mask) { - set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); + for_each_cpu(i, cmd->mask) { + set_cpus_allowed_ptr(current, cpumask_of(i)); do_drv_write(cmd); } @@ -212,22 +212,22 @@ static void drv_write(struct drv_cmd *cmd) return; } -static u32 get_cur_val(const cpumask_t *mask) +static u32 get_cur_val(const struct cpumask *mask) { struct acpi_processor_performance *perf; struct drv_cmd cmd; - if (unlikely(cpus_empty(*mask))) + if (unlikely(cpumask_empty(mask))) return 0; - switch (per_cpu(drv_data, first_cpu(*mask))->cpu_feature) { + switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; break; case SYSTEM_IO_CAPABLE: cmd.type = SYSTEM_IO_CAPABLE; - perf = per_cpu(drv_data, first_cpu(*mask))->acpi_data; + perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data; cmd.addr.io.port = perf->control_register.address; cmd.addr.io.bit_width = perf->control_register.bit_width; break; @@ -235,7 +235,7 @@ static u32 get_cur_val(const cpumask_t *mask) return 0; } - cmd.mask = *mask; + cpumask_copy(cmd.mask, mask); drv_read(&cmd); @@ -386,7 +386,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); struct acpi_processor_performance *perf; struct cpufreq_freqs freqs; - cpumask_t online_policy_cpus; struct drv_cmd cmd; unsigned int next_state = 0; /* Index into freq_table */ unsigned int next_perf_state = 0; /* Index into perf table */ @@ -401,20 +400,18 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, return -ENODEV; } + if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL))) + return -ENOMEM; + perf = data->acpi_data; result = cpufreq_frequency_table_target(policy, data->freq_table, target_freq, relation, &next_state); - if (unlikely(result)) - return -ENODEV; - -#ifdef CONFIG_HOTPLUG_CPU - /* cpufreq holds the hotplug lock, so we are safe from here on */ - cpumask_and(&online_policy_cpus, cpu_online_mask, policy->cpus); -#else - online_policy_cpus = policy->cpus; -#endif + if (unlikely(result)) { + result = -ENODEV; + goto out; + } next_perf_state = data->freq_table[next_state].index; if (perf->state == next_perf_state) { @@ -425,7 +422,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, } else { dprintk("Already at target state (P%d)\n", next_perf_state); - return 0; + goto out; } } @@ -444,19 +441,19 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, cmd.val = (u32) perf->states[next_perf_state].control; break; default: - return -ENODEV; + result = -ENODEV; + goto out; } - cpus_clear(cmd.mask); - + /* cpufreq holds the hotplug lock, so we are safe from here on */ if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) - cmd.mask = online_policy_cpus; + cpumask_and(cmd.mask, cpu_online_mask, policy->cpus); else - cpu_set(policy->cpu, cmd.mask); + cpumask_copy(cmd.mask, cpumask_of(policy->cpu)); freqs.old = perf->states[perf->state].core_frequency * 1000; freqs.new = data->freq_table[next_state].frequency; - for_each_cpu_mask_nr(i, cmd.mask) { + for_each_cpu(i, cmd.mask) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -464,19 +461,22 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, drv_write(&cmd); if (acpi_pstate_strict) { - if (!check_freqs(&cmd.mask, freqs.new, data)) { + if (!check_freqs(cmd.mask, freqs.new, data)) { dprintk("acpi_cpufreq_target failed (%d)\n", policy->cpu); - return -EAGAIN; + result = -EAGAIN; + goto out; } } - for_each_cpu_mask_nr(i, cmd.mask) { + for_each_cpu(i, cmd.mask) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } perf->state = next_perf_state; +out: + free_cpumask_var(cmd.mask); return result; } -- cgit v1.2.3 From 7503bfbae89eba07b46441a5d1594647f6b8ab7d Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sun, 4 Jan 2009 05:18:09 -0800 Subject: cpumask: use work_on_cpu in acpi-cpufreq.c for drv_read and drv_write Impact: use new cpumask API to reduce stack usage Replace the saving of current->cpus_allowed and set_cpus_allowed_ptr() with a work_on_cpu function for drv_read() and drv_write(). Basically converts do_drv_{read,write} into "work_on_cpu" functions that are now called by drv_read and drv_write. Signed-off-by: Mike Travis Acked-by: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index fb594170dc5..4e4f2b04dac 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -150,8 +150,9 @@ struct drv_cmd { u32 val; }; -static void do_drv_read(struct drv_cmd *cmd) +static long do_drv_read(void *_cmd) { + struct drv_cmd *cmd = _cmd; u32 h; switch (cmd->type) { @@ -166,10 +167,12 @@ static void do_drv_read(struct drv_cmd *cmd) default: break; } + return 0; } -static void do_drv_write(struct drv_cmd *cmd) +static long do_drv_write(void *_cmd) { + struct drv_cmd *cmd = _cmd; u32 lo, hi; switch (cmd->type) { @@ -186,30 +189,23 @@ static void do_drv_write(struct drv_cmd *cmd) default: break; } + return 0; } static void drv_read(struct drv_cmd *cmd) { - cpumask_t saved_mask = current->cpus_allowed; cmd->val = 0; - set_cpus_allowed_ptr(current, cmd->mask); - do_drv_read(cmd); - set_cpus_allowed_ptr(current, &saved_mask); + work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); } static void drv_write(struct drv_cmd *cmd) { - cpumask_t saved_mask = current->cpus_allowed; unsigned int i; for_each_cpu(i, cmd->mask) { - set_cpus_allowed_ptr(current, cpumask_of(i)); - do_drv_write(cmd); + work_on_cpu(i, do_drv_write, cmd); } - - set_cpus_allowed_ptr(current, &saved_mask); - return; } static u32 get_cur_val(const struct cpumask *mask) @@ -235,10 +231,15 @@ static u32 get_cur_val(const struct cpumask *mask) return 0; } + if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL))) + return 0; + cpumask_copy(cmd.mask, mask); drv_read(&cmd); + free_cpumask_var(cmd.mask); + dprintk("get_cur_val = %u\n", cmd.val); return cmd.val; -- cgit v1.2.3 From e39ad415ac15116df213dfa2aa2a4f1b0857af9c Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sun, 4 Jan 2009 05:18:10 -0800 Subject: cpumask: use work_on_cpu in acpi-cpufreq.c for read_measured_perf_ctrs Impact: use new cpumask API to reduce stack usage Replace the saving of current->cpus_allowed and set_cpus_allowed_ptr() with a work_on_cpu function for read_measured_perf_ctrs(). Basically splits off the work function from get_measured_perf which is run on the designated cpu. Moves definition of struct perf_cur out of function local namespace, and is used as the work function argument. References in get_measured_perf use values in the perf_cur struct. Signed-off-by: Mike Travis Acked-by: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 83 ++++++++++++++++-------------- 1 file changed, 43 insertions(+), 40 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 4e4f2b04dac..06fcd8f9323 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -245,6 +245,30 @@ static u32 get_cur_val(const struct cpumask *mask) return cmd.val; } +struct perf_cur { + union { + struct { + u32 lo; + u32 hi; + } split; + u64 whole; + } aperf_cur, mperf_cur; +}; + + +static long read_measured_perf_ctrs(void *_cur) +{ + struct perf_cur *cur = _cur; + + rdmsr(MSR_IA32_APERF, cur->aperf_cur.split.lo, cur->aperf_cur.split.hi); + rdmsr(MSR_IA32_MPERF, cur->mperf_cur.split.lo, cur->mperf_cur.split.hi); + + wrmsr(MSR_IA32_APERF, 0, 0); + wrmsr(MSR_IA32_MPERF, 0, 0); + + return 0; +} + /* * Return the measured active (C0) frequency on this CPU since last call * to this function. @@ -261,31 +285,12 @@ static u32 get_cur_val(const struct cpumask *mask) static unsigned int get_measured_perf(struct cpufreq_policy *policy, unsigned int cpu) { - union { - struct { - u32 lo; - u32 hi; - } split; - u64 whole; - } aperf_cur, mperf_cur; - - cpumask_t saved_mask; + struct perf_cur cur; unsigned int perf_percent; unsigned int retval; - saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); - if (get_cpu() != cpu) { - /* We were not able to run on requested processor */ - put_cpu(); + if (!work_on_cpu(cpu, read_measured_perf_ctrs, &cur)) return 0; - } - - rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi); - rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi); - - wrmsr(MSR_IA32_APERF, 0,0); - wrmsr(MSR_IA32_MPERF, 0,0); #ifdef __i386__ /* @@ -293,37 +298,39 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, * Get an approximate value. Return failure in case we cannot get * an approximate value. */ - if (unlikely(aperf_cur.split.hi || mperf_cur.split.hi)) { + if (unlikely(cur.aperf_cur.split.hi || cur.mperf_cur.split.hi)) { int shift_count; u32 h; - h = max_t(u32, aperf_cur.split.hi, mperf_cur.split.hi); + h = max_t(u32, cur.aperf_cur.split.hi, cur.mperf_cur.split.hi); shift_count = fls(h); - aperf_cur.whole >>= shift_count; - mperf_cur.whole >>= shift_count; + cur.aperf_cur.whole >>= shift_count; + cur.mperf_cur.whole >>= shift_count; } - if (((unsigned long)(-1) / 100) < aperf_cur.split.lo) { + if (((unsigned long)(-1) / 100) < cur.aperf_cur.split.lo) { int shift_count = 7; - aperf_cur.split.lo >>= shift_count; - mperf_cur.split.lo >>= shift_count; + cur.aperf_cur.split.lo >>= shift_count; + cur.mperf_cur.split.lo >>= shift_count; } - if (aperf_cur.split.lo && mperf_cur.split.lo) - perf_percent = (aperf_cur.split.lo * 100) / mperf_cur.split.lo; + if (cur.aperf_cur.split.lo && cur.mperf_cur.split.lo) + perf_percent = (cur.aperf_cur.split.lo * 100) / + cur.mperf_cur.split.lo; else perf_percent = 0; #else - if (unlikely(((unsigned long)(-1) / 100) < aperf_cur.whole)) { + if (unlikely(((unsigned long)(-1) / 100) < cur.aperf_cur.whole)) { int shift_count = 7; - aperf_cur.whole >>= shift_count; - mperf_cur.whole >>= shift_count; + cur.aperf_cur.whole >>= shift_count; + cur.mperf_cur.whole >>= shift_count; } - if (aperf_cur.whole && mperf_cur.whole) - perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole; + if (cur.aperf_cur.whole && cur.mperf_cur.whole) + perf_percent = (cur.aperf_cur.whole * 100) / + cur.mperf_cur.whole; else perf_percent = 0; @@ -331,10 +338,6 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; - put_cpu(); - set_cpus_allowed_ptr(current, &saved_mask); - - dprintk("cpu %d: performance percent %d\n", cpu, perf_percent); return retval; } @@ -352,7 +355,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) } cached_freq = data->freq_table[data->acpi_data->state].frequency; - freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data); + freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); if (freq != cached_freq) { /* * The dreaded BIOS frequency change behind our back. -- cgit v1.2.3