diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-07-08 09:36:46 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-07-08 09:36:46 -0700 |
commit | b4b21cac88caa4078f5755b0bd3770af5fe9c146 (patch) | |
tree | fb5cccef61d0ed80adf4b0d1d03ddb3e80ff701d /drivers | |
parent | 728b690fd5c185c639a5db0819bd6e0385b14188 (diff) | |
parent | a2e1b4c31257c07f148a89eb7eea7ca959fd0642 (diff) |
Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
[CPUFREQ] Powernow-k8: support family 0xf with 2 low p-states
[CPUFREQ] fix (utter) cpufreq_add_dev mess
[CPUFREQ] Cleanup locking in conservative governor
[CPUFREQ] Cleanup locking in ondemand governor
[CPUFREQ] Mark policy_rwsem as going static in cpufreq.c wont be exported
[CPUFREQ] Eliminate the recent lockdep warnings in cpufreq
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 69 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 49 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 77 |
3 files changed, 92 insertions, 103 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 6e2ec0b1894..c668ac855f7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -761,6 +761,10 @@ static struct kobj_type ktype_cpufreq = { * cpufreq_add_dev - add a CPU device * * Adds the cpufreq interface for a CPU device. + * + * The Oracle says: try running cpufreq registration/unregistration concurrently + * with with cpu hotplugging and all hell will break loose. Tried to clean this + * mess up, but more thorough testing is needed. - Mathieu */ static int cpufreq_add_dev(struct sys_device *sys_dev) { @@ -804,15 +808,12 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) goto nomem_out; } if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) { - kfree(policy); ret = -ENOMEM; - goto nomem_out; + goto err_free_policy; } if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { - free_cpumask_var(policy->cpus); - kfree(policy); ret = -ENOMEM; - goto nomem_out; + goto err_free_cpumask; } policy->cpu = cpu; @@ -820,7 +821,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) /* Initially set CPU itself as the policy_cpu */ per_cpu(policy_cpu, cpu) = cpu; - lock_policy_rwsem_write(cpu); + ret = (lock_policy_rwsem_write(cpu) < 0); + WARN_ON(ret); init_completion(&policy->kobj_unregister); INIT_WORK(&policy->update, handle_update); @@ -833,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ret = cpufreq_driver->init(policy); if (ret) { dprintk("initialization failed\n"); - goto err_out; + goto err_unlock_policy; } policy->user_policy.min = policy->min; policy->user_policy.max = policy->max; @@ -858,15 +860,21 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) /* Check for existing affected CPUs. * They may not be aware of it due to CPU Hotplug. */ - managed_policy = cpufreq_cpu_get(j); /* FIXME: Where is this released? What about error paths? */ + managed_policy = cpufreq_cpu_get(j); if (unlikely(managed_policy)) { /* Set proper policy_cpu */ unlock_policy_rwsem_write(cpu); per_cpu(policy_cpu, cpu) = managed_policy->cpu; - if (lock_policy_rwsem_write(cpu) < 0) - goto err_out_driver_exit; + if (lock_policy_rwsem_write(cpu) < 0) { + /* Should not go through policy unlock path */ + if (cpufreq_driver->exit) + cpufreq_driver->exit(policy); + ret = -EBUSY; + cpufreq_cpu_put(managed_policy); + goto err_free_cpumask; + } spin_lock_irqsave(&cpufreq_driver_lock, flags); cpumask_copy(managed_policy->cpus, policy->cpus); @@ -877,12 +885,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ret = sysfs_create_link(&sys_dev->kobj, &managed_policy->kobj, "cpufreq"); - if (ret) - goto err_out_driver_exit; - - cpufreq_debug_enable_ratelimit(); - ret = 0; - goto err_out_driver_exit; /* call driver->exit() */ + if (!ret) + cpufreq_cpu_put(managed_policy); + /* + * Success. We only needed to be added to the mask. + * Call driver->exit() because only the cpu parent of + * the kobj needed to call init(). + */ + goto out_driver_exit; /* call driver->exit() */ } } #endif @@ -892,25 +902,25 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj, "cpufreq"); if (ret) - goto err_out_driver_exit; + goto out_driver_exit; /* set up files for this cpu device */ drv_attr = cpufreq_driver->attr; while ((drv_attr) && (*drv_attr)) { ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); if (ret) - goto err_out_driver_exit; + goto err_out_kobj_put; drv_attr++; } if (cpufreq_driver->get) { ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); if (ret) - goto err_out_driver_exit; + goto err_out_kobj_put; } if (cpufreq_driver->target) { ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); if (ret) - goto err_out_driver_exit; + goto err_out_kobj_put; } spin_lock_irqsave(&cpufreq_driver_lock, flags); @@ -928,12 +938,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) continue; dprintk("CPU %u already managed, adding link\n", j); - cpufreq_cpu_get(cpu); + managed_policy = cpufreq_cpu_get(cpu); cpu_sys_dev = get_cpu_sysdev(j); ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, "cpufreq"); - if (ret) + if (ret) { + cpufreq_cpu_put(managed_policy); goto err_out_unregister; + } } policy->governor = NULL; /* to assure that the starting sequence is @@ -965,17 +977,20 @@ err_out_unregister: per_cpu(cpufreq_cpu_data, j) = NULL; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); +err_out_kobj_put: kobject_put(&policy->kobj); wait_for_completion(&policy->kobj_unregister); -err_out_driver_exit: +out_driver_exit: if (cpufreq_driver->exit) cpufreq_driver->exit(policy); -err_out: +err_unlock_policy: unlock_policy_rwsem_write(cpu); +err_free_cpumask: + free_cpumask_var(policy->cpus); +err_free_policy: kfree(policy); - nomem_out: module_put(cpufreq_driver->owner); module_out: @@ -1070,8 +1085,6 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) spin_unlock_irqrestore(&cpufreq_driver_lock, flags); #endif - unlock_policy_rwsem_write(cpu); - if (cpufreq_driver->target) __cpufreq_governor(data, CPUFREQ_GOV_STOP); @@ -1088,6 +1101,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) if (cpufreq_driver->exit) cpufreq_driver->exit(data); + unlock_policy_rwsem_write(cpu); + free_cpumask_var(data->related_cpus); free_cpumask_var(data->cpus); kfree(data); diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 7fc58af748b..57490502b21 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -63,22 +63,20 @@ struct cpu_dbs_info_s { unsigned int down_skip; unsigned int requested_freq; int cpu; - unsigned int enable:1; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; }; static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ /* - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug - * lock and dbs_mutex. cpu_hotplug lock should always be held before - * dbs_mutex. If any function that can potentially take cpu_hotplug lock - * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then - * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock - * is recursive for the same process. -Venki - * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it - * would deadlock with cancel_delayed_work_sync(), which is needed for proper - * raceless workqueue teardown. + * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on + * different CPUs. It protects dbs_enable in governor start/stop. */ static DEFINE_MUTEX(dbs_mutex); @@ -143,9 +141,6 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, struct cpufreq_policy *policy; - if (!this_dbs_info->enable) - return 0; - policy = this_dbs_info->cur_policy; /* @@ -488,18 +483,12 @@ static void do_dbs_timer(struct work_struct *work) delay -= jiffies % delay; - if (lock_policy_rwsem_write(cpu) < 0) - return; - - if (!dbs_info->enable) { - unlock_policy_rwsem_write(cpu); - return; - } + mutex_lock(&dbs_info->timer_mutex); dbs_check_cpu(dbs_info); queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); - unlock_policy_rwsem_write(cpu); + mutex_unlock(&dbs_info->timer_mutex); } static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) @@ -508,7 +497,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); delay -= jiffies % delay; - dbs_info->enable = 1; INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, delay); @@ -516,7 +504,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) { - dbs_info->enable = 0; cancel_delayed_work_sync(&dbs_info->work); } @@ -535,9 +522,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if ((!cpu_online(cpu)) || (!policy->cur)) return -EINVAL; - if (this_dbs_info->enable) /* Already enabled */ - break; - mutex_lock(&dbs_mutex); rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); @@ -561,6 +545,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, this_dbs_info->down_skip = 0; this_dbs_info->requested_freq = policy->cur; + mutex_init(&this_dbs_info->timer_mutex); dbs_enable++; /* * Start the timerschedule work, when this governor @@ -590,17 +575,19 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, &dbs_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } - dbs_timer_init(this_dbs_info); - mutex_unlock(&dbs_mutex); + dbs_timer_init(this_dbs_info); + break; case CPUFREQ_GOV_STOP: - mutex_lock(&dbs_mutex); dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); sysfs_remove_group(&policy->kobj, &dbs_attr_group); dbs_enable--; + mutex_destroy(&this_dbs_info->timer_mutex); /* * Stop the timerschedule work, when this governor @@ -616,7 +603,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, break; case CPUFREQ_GOV_LIMITS: - mutex_lock(&dbs_mutex); + mutex_lock(&this_dbs_info->timer_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target( this_dbs_info->cur_policy, @@ -625,7 +612,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, __cpufreq_driver_target( this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&dbs_mutex); + mutex_unlock(&this_dbs_info->timer_mutex); break; } diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 1911d172935..d6ba14276bb 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -70,23 +70,21 @@ struct cpu_dbs_info_s { unsigned int freq_lo_jiffies; unsigned int freq_hi_jiffies; int cpu; - unsigned int enable:1, - sample_type:1; + unsigned int sample_type:1; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; }; static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ /* - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug - * lock and dbs_mutex. cpu_hotplug lock should always be held before - * dbs_mutex. If any function that can potentially take cpu_hotplug lock - * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then - * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock - * is recursive for the same process. -Venki - * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it - * would deadlock with cancel_delayed_work_sync(), which is needed for proper - * raceless workqueue teardown. + * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on + * different CPUs. It protects dbs_enable in governor start/stop. */ static DEFINE_MUTEX(dbs_mutex); @@ -192,13 +190,18 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, return freq_hi; } +static void ondemand_powersave_bias_init_cpu(int cpu) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); + dbs_info->freq_lo = 0; +} + static void ondemand_powersave_bias_init(void) { int i; for_each_online_cpu(i) { - struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i); - dbs_info->freq_table = cpufreq_frequency_get_table(i); - dbs_info->freq_lo = 0; + ondemand_powersave_bias_init_cpu(i); } } @@ -240,12 +243,10 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused, unsigned int input; int ret; ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; mutex_lock(&dbs_mutex); - if (ret != 1) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); mutex_unlock(&dbs_mutex); @@ -259,13 +260,12 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, int ret; ret = sscanf(buf, "%u", &input); - mutex_lock(&dbs_mutex); if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || input < MIN_FREQUENCY_UP_THRESHOLD) { - mutex_unlock(&dbs_mutex); return -EINVAL; } + mutex_lock(&dbs_mutex); dbs_tuners_ins.up_threshold = input; mutex_unlock(&dbs_mutex); @@ -363,9 +363,6 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) struct cpufreq_policy *policy; unsigned int j; - if (!this_dbs_info->enable) - return; - this_dbs_info->freq_lo = 0; policy = this_dbs_info->cur_policy; @@ -493,14 +490,7 @@ static void do_dbs_timer(struct work_struct *work) int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); delay -= jiffies % delay; - - if (lock_policy_rwsem_write(cpu) < 0) - return; - - if (!dbs_info->enable) { - unlock_policy_rwsem_write(cpu); - return; - } + mutex_lock(&dbs_info->timer_mutex); /* Common NORMAL_SAMPLE setup */ dbs_info->sample_type = DBS_NORMAL_SAMPLE; @@ -517,7 +507,7 @@ static void do_dbs_timer(struct work_struct *work) dbs_info->freq_lo, CPUFREQ_RELATION_H); } queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); - unlock_policy_rwsem_write(cpu); + mutex_unlock(&dbs_info->timer_mutex); } static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) @@ -526,8 +516,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); delay -= jiffies % delay; - dbs_info->enable = 1; - ondemand_powersave_bias_init(); dbs_info->sample_type = DBS_NORMAL_SAMPLE; INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, @@ -536,7 +524,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) { - dbs_info->enable = 0; cancel_delayed_work_sync(&dbs_info->work); } @@ -555,19 +542,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if ((!cpu_online(cpu)) || (!policy->cur)) return -EINVAL; - if (this_dbs_info->enable) /* Already enabled */ - break; - mutex_lock(&dbs_mutex); - dbs_enable++; rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); if (rc) { - dbs_enable--; mutex_unlock(&dbs_mutex); return rc; } + dbs_enable++; for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cpu_dbs_info, j); @@ -581,6 +564,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, } } this_dbs_info->cpu = cpu; + ondemand_powersave_bias_init_cpu(cpu); + mutex_init(&this_dbs_info->timer_mutex); /* * Start the timerschedule work, when this governor * is used for first time @@ -598,29 +583,31 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, max(min_sampling_rate, latency * LATENCY_MULTIPLIER); } - dbs_timer_init(this_dbs_info); - mutex_unlock(&dbs_mutex); + + dbs_timer_init(this_dbs_info); break; case CPUFREQ_GOV_STOP: - mutex_lock(&dbs_mutex); dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); sysfs_remove_group(&policy->kobj, &dbs_attr_group); + mutex_destroy(&this_dbs_info->timer_mutex); dbs_enable--; mutex_unlock(&dbs_mutex); break; case CPUFREQ_GOV_LIMITS: - mutex_lock(&dbs_mutex); + mutex_lock(&this_dbs_info->timer_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target(this_dbs_info->cur_policy, policy->max, CPUFREQ_RELATION_H); else if (policy->min > this_dbs_info->cur_policy->cur) __cpufreq_driver_target(this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&dbs_mutex); + mutex_unlock(&this_dbs_info->timer_mutex); break; } return 0; |