cpufreq: Initialize policy->kobj while allocating policy

policy->kobj is required to be initialized once in the lifetime of a
policy.  Currently we are initializing it from __cpufreq_add_dev() and
that doesn't look to be the best place for doing so as we have to do
this on special cases (like: !recover_policy).

We can initialize it from a more obvious place cpufreq_policy_alloc()
and that will make code look cleaner, specially the error handling part.

The error handling part of __cpufreq_add_dev() was doing almost the same
thing while recover_policy is true or false. Fix that as well by always
calling cpufreq_policy_put_kobj() with an additional parameter to skip
notification part of it.

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
This commit is contained in:
Viresh Kumar 2015-06-08 18:25:29 +05:30 committed by Rafael J. Wysocki
parent 87549141d5
commit 2fc3384dc7
1 changed files with 21 additions and 25 deletions

View File

@ -1134,9 +1134,10 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
return policy; return policy;
} }
static struct cpufreq_policy *cpufreq_policy_alloc(int cpu) static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
{ {
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
int ret;
policy = kzalloc(sizeof(*policy), GFP_KERNEL); policy = kzalloc(sizeof(*policy), GFP_KERNEL);
if (!policy) if (!policy)
@ -1148,6 +1149,13 @@ static struct cpufreq_policy *cpufreq_policy_alloc(int cpu)
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
goto err_free_cpumask; goto err_free_cpumask;
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
"cpufreq");
if (ret) {
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
goto err_free_rcpumask;
}
INIT_LIST_HEAD(&policy->policy_list); INIT_LIST_HEAD(&policy->policy_list);
init_rwsem(&policy->rwsem); init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock); spin_lock_init(&policy->transition_lock);
@ -1155,13 +1163,15 @@ static struct cpufreq_policy *cpufreq_policy_alloc(int cpu)
init_completion(&policy->kobj_unregister); init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update); INIT_WORK(&policy->update, handle_update);
policy->cpu = cpu; policy->cpu = dev->id;
/* Set this once on allocation */ /* Set this once on allocation */
policy->kobj_cpu = cpu; policy->kobj_cpu = dev->id;
return policy; return policy;
err_free_rcpumask:
free_cpumask_var(policy->related_cpus);
err_free_cpumask: err_free_cpumask:
free_cpumask_var(policy->cpus); free_cpumask_var(policy->cpus);
err_free_policy: err_free_policy:
@ -1170,11 +1180,12 @@ err_free_policy:
return NULL; return NULL;
} }
static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
{ {
struct kobject *kobj; struct kobject *kobj;
struct completion *cmp; struct completion *cmp;
if (notify)
blocking_notifier_call_chain(&cpufreq_policy_notifier_list, blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_REMOVE_POLICY, policy); CPUFREQ_REMOVE_POLICY, policy);
@ -1270,7 +1281,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
if (!policy) { if (!policy) {
recover_policy = false; recover_policy = false;
policy = cpufreq_policy_alloc(cpu); policy = cpufreq_policy_alloc(dev);
if (!policy) if (!policy)
goto nomem_out; goto nomem_out;
} }
@ -1310,15 +1321,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
policy->user_policy.min = policy->min; policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max; policy->user_policy.max = policy->max;
/* prepare interface data */
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
&dev->kobj, "cpufreq");
if (ret) {
pr_err("%s: failed to init policy->kobj: %d\n",
__func__, ret);
goto err_init_policy_kobj;
}
write_lock_irqsave(&cpufreq_driver_lock, flags); write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->related_cpus) for_each_cpu(j, policy->related_cpus)
per_cpu(cpufreq_cpu_data, j) = policy; per_cpu(cpufreq_cpu_data, j) = policy;
@ -1410,18 +1412,12 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
err_out_unregister: err_out_unregister:
err_get_freq: err_get_freq:
if (!recover_policy) {
kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister);
}
err_init_policy_kobj:
up_write(&policy->rwsem); up_write(&policy->rwsem);
if (cpufreq_driver->exit) if (cpufreq_driver->exit)
cpufreq_driver->exit(policy); cpufreq_driver->exit(policy);
err_set_policy_cpu: err_set_policy_cpu:
if (recover_policy) cpufreq_policy_put_kobj(policy, recover_policy);
cpufreq_policy_put_kobj(policy);
cpufreq_policy_free(policy); cpufreq_policy_free(policy);
nomem_out: nomem_out:
@ -1517,7 +1513,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
/* Free the policy kobjects only if the driver is getting removed. */ /* Free the policy kobjects only if the driver is getting removed. */
if (sif) if (sif)
cpufreq_policy_put_kobj(policy); cpufreq_policy_put_kobj(policy, true);
/* /*
* Perform the ->exit() even during light-weight tear-down, * Perform the ->exit() even during light-weight tear-down,
@ -1567,7 +1563,7 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
return 0; return 0;
} }
cpufreq_policy_put_kobj(policy); cpufreq_policy_put_kobj(policy, true);
cpufreq_policy_free(policy); cpufreq_policy_free(policy);
return 0; return 0;
} }