提交 96bbbe4a 编写于 作者: V Viresh Kumar 提交者: Rafael J. Wysocki

cpufreq: Remove unnecessary variable/parameter 'frozen'

We have used 'frozen' variable/function parameter at many places to
distinguish between CPU offline/online on suspend/resume vs sysfs
removals. We now have another variable cpufreq_suspended which can
be used in these cases, so we can get rid of all those variables or
function parameters.
Signed-off-by: NViresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: NRafael J. Wysocki <rafael.j.wysocki@intel.com>
上级 979d86fa
...@@ -1041,13 +1041,13 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) ...@@ -1041,13 +1041,13 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
CPUFREQ_UPDATE_POLICY_CPU, policy); CPUFREQ_UPDATE_POLICY_CPU, policy);
} }
static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
bool frozen)
{ {
unsigned int j, cpu = dev->id; unsigned int j, cpu = dev->id;
int ret = -ENOMEM; int ret = -ENOMEM;
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
unsigned long flags; unsigned long flags;
bool recover_policy = cpufreq_suspended;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
struct cpufreq_policy *tpolicy; struct cpufreq_policy *tpolicy;
#endif #endif
...@@ -1088,9 +1088,9 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, ...@@ -1088,9 +1088,9 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
* Restore the saved policy when doing light-weight init and fall back * Restore the saved policy when doing light-weight init and fall back
* to the full init if that fails. * to the full init if that fails.
*/ */
policy = frozen ? cpufreq_policy_restore(cpu) : NULL; policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
if (!policy) { if (!policy) {
frozen = false; recover_policy = false;
policy = cpufreq_policy_alloc(); policy = cpufreq_policy_alloc();
if (!policy) if (!policy)
goto nomem_out; goto nomem_out;
...@@ -1102,7 +1102,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, ...@@ -1102,7 +1102,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
* the creation of a brand new one. So we need to perform this update * the creation of a brand new one. So we need to perform this update
* by invoking update_policy_cpu(). * by invoking update_policy_cpu().
*/ */
if (frozen && cpu != policy->cpu) if (recover_policy && cpu != policy->cpu)
update_policy_cpu(policy, cpu); update_policy_cpu(policy, cpu);
else else
policy->cpu = cpu; policy->cpu = cpu;
...@@ -1130,7 +1130,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, ...@@ -1130,7 +1130,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
*/ */
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
if (!frozen) { if (!recover_policy) {
policy->user_policy.min = policy->min; policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max; policy->user_policy.max = policy->max;
} }
...@@ -1192,7 +1192,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, ...@@ -1192,7 +1192,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
blocking_notifier_call_chain(&cpufreq_policy_notifier_list, blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy); CPUFREQ_START, policy);
if (!frozen) { if (!recover_policy) {
ret = cpufreq_add_dev_interface(policy, dev); ret = cpufreq_add_dev_interface(policy, dev);
if (ret) if (ret)
goto err_out_unregister; goto err_out_unregister;
...@@ -1206,7 +1206,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, ...@@ -1206,7 +1206,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
cpufreq_init_policy(policy); cpufreq_init_policy(policy);
if (!frozen) { if (!recover_policy) {
policy->user_policy.policy = policy->policy; policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor; policy->user_policy.governor = policy->governor;
} }
...@@ -1229,7 +1229,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, ...@@ -1229,7 +1229,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
if (cpufreq_driver->exit) if (cpufreq_driver->exit)
cpufreq_driver->exit(policy); cpufreq_driver->exit(policy);
err_set_policy_cpu: err_set_policy_cpu:
if (frozen) { if (recover_policy) {
/* Do not leave stale fallback data behind. */ /* Do not leave stale fallback data behind. */
per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL; per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
cpufreq_policy_put_kobj(policy); cpufreq_policy_put_kobj(policy);
...@@ -1253,7 +1253,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, ...@@ -1253,7 +1253,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
*/ */
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{ {
return __cpufreq_add_dev(dev, sif, false); return __cpufreq_add_dev(dev, sif);
} }
static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
...@@ -1284,8 +1284,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, ...@@ -1284,8 +1284,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
} }
static int __cpufreq_remove_dev_prepare(struct device *dev, static int __cpufreq_remove_dev_prepare(struct device *dev,
struct subsys_interface *sif, struct subsys_interface *sif)
bool frozen)
{ {
unsigned int cpu = dev->id, cpus; unsigned int cpu = dev->id, cpus;
int new_cpu, ret; int new_cpu, ret;
...@@ -1299,7 +1298,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, ...@@ -1299,7 +1298,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
policy = per_cpu(cpufreq_cpu_data, cpu); policy = per_cpu(cpufreq_cpu_data, cpu);
/* Save the policy somewhere when doing a light-weight tear-down */ /* Save the policy somewhere when doing a light-weight tear-down */
if (frozen) if (cpufreq_suspended)
per_cpu(cpufreq_cpu_data_fallback, cpu) = policy; per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags); write_unlock_irqrestore(&cpufreq_driver_lock, flags);
...@@ -1332,7 +1331,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, ...@@ -1332,7 +1331,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
if (new_cpu >= 0) { if (new_cpu >= 0) {
update_policy_cpu(policy, new_cpu); update_policy_cpu(policy, new_cpu);
if (!frozen) { if (!cpufreq_suspended) {
pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
__func__, new_cpu, cpu); __func__, new_cpu, cpu);
} }
...@@ -1343,8 +1342,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, ...@@ -1343,8 +1342,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
} }
static int __cpufreq_remove_dev_finish(struct device *dev, static int __cpufreq_remove_dev_finish(struct device *dev,
struct subsys_interface *sif, struct subsys_interface *sif)
bool frozen)
{ {
unsigned int cpu = dev->id, cpus; unsigned int cpu = dev->id, cpus;
int ret; int ret;
...@@ -1379,7 +1377,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev, ...@@ -1379,7 +1377,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
} }
} }
if (!frozen) if (!cpufreq_suspended)
cpufreq_policy_put_kobj(policy); cpufreq_policy_put_kobj(policy);
/* /*
...@@ -1395,7 +1393,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev, ...@@ -1395,7 +1393,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
list_del(&policy->policy_list); list_del(&policy->policy_list);
write_unlock_irqrestore(&cpufreq_driver_lock, flags); write_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!frozen) if (!cpufreq_suspended)
cpufreq_policy_free(policy); cpufreq_policy_free(policy);
} else { } else {
if (has_target()) { if (has_target()) {
...@@ -1425,10 +1423,10 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) ...@@ -1425,10 +1423,10 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (cpu_is_offline(cpu)) if (cpu_is_offline(cpu))
return 0; return 0;
ret = __cpufreq_remove_dev_prepare(dev, sif, false); ret = __cpufreq_remove_dev_prepare(dev, sif);
if (!ret) if (!ret)
ret = __cpufreq_remove_dev_finish(dev, sif, false); ret = __cpufreq_remove_dev_finish(dev, sif);
return ret; return ret;
} }
...@@ -2182,29 +2180,24 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, ...@@ -2182,29 +2180,24 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
{ {
unsigned int cpu = (unsigned long)hcpu; unsigned int cpu = (unsigned long)hcpu;
struct device *dev; struct device *dev;
bool frozen = false;
dev = get_cpu_device(cpu); dev = get_cpu_device(cpu);
if (dev) { if (dev) {
if (action & CPU_TASKS_FROZEN)
frozen = true;
switch (action & ~CPU_TASKS_FROZEN) { switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE: case CPU_ONLINE:
__cpufreq_add_dev(dev, NULL, frozen); __cpufreq_add_dev(dev, NULL);
break; break;
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
__cpufreq_remove_dev_prepare(dev, NULL, frozen); __cpufreq_remove_dev_prepare(dev, NULL);
break; break;
case CPU_POST_DEAD: case CPU_POST_DEAD:
__cpufreq_remove_dev_finish(dev, NULL, frozen); __cpufreq_remove_dev_finish(dev, NULL);
break; break;
case CPU_DOWN_FAILED: case CPU_DOWN_FAILED:
__cpufreq_add_dev(dev, NULL, frozen); __cpufreq_add_dev(dev, NULL);
break; break;
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册