Merge branches 'pm-domains', 'pm-sleep' and 'pm-cpufreq'
* pm-domains: PM / Domains: Fix initial default state of the need_restore flag PM / Domains: Change prototype for the attach and detach callbacks * pm-sleep: PM / sleep: Fix entering suspend-to-IDLE if no freeze_oops is set * pm-cpufreq: cpufreq: Avoid crash in resume on SMP without OPP cpufreq: cpufreq-dt: Fix arguments in clock failure error message
This commit is contained in:
commit
31689497d9
|
@ -361,9 +361,19 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
|
|||
struct device *dev = pdd->dev;
|
||||
int ret = 0;
|
||||
|
||||
if (gpd_data->need_restore)
|
||||
if (gpd_data->need_restore > 0)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If the value of the need_restore flag is still unknown at this point,
|
||||
* we trust that pm_genpd_poweroff() has verified that the device is
|
||||
* already runtime PM suspended.
|
||||
*/
|
||||
if (gpd_data->need_restore < 0) {
|
||||
gpd_data->need_restore = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_unlock(&genpd->lock);
|
||||
|
||||
genpd_start_dev(genpd, dev);
|
||||
|
@ -373,7 +383,7 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
|
|||
mutex_lock(&genpd->lock);
|
||||
|
||||
if (!ret)
|
||||
gpd_data->need_restore = true;
|
||||
gpd_data->need_restore = 1;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -389,12 +399,17 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
|
|||
{
|
||||
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
|
||||
struct device *dev = pdd->dev;
|
||||
bool need_restore = gpd_data->need_restore;
|
||||
int need_restore = gpd_data->need_restore;
|
||||
|
||||
gpd_data->need_restore = false;
|
||||
gpd_data->need_restore = 0;
|
||||
mutex_unlock(&genpd->lock);
|
||||
|
||||
genpd_start_dev(genpd, dev);
|
||||
|
||||
/*
|
||||
* Call genpd_restore_dev() for recently added devices too (need_restore
|
||||
* is negative then).
|
||||
*/
|
||||
if (need_restore)
|
||||
genpd_restore_dev(genpd, dev);
|
||||
|
||||
|
@ -603,6 +618,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
|
|||
static int pm_genpd_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct generic_pm_domain *genpd;
|
||||
struct generic_pm_domain_data *gpd_data;
|
||||
bool (*stop_ok)(struct device *__dev);
|
||||
int ret;
|
||||
|
||||
|
@ -628,6 +644,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
|
|||
return 0;
|
||||
|
||||
mutex_lock(&genpd->lock);
|
||||
|
||||
/*
|
||||
* If we have an unknown state of the need_restore flag, it means none
|
||||
* of the runtime PM callbacks has been invoked yet. Let's update the
|
||||
* flag to reflect that the current state is active.
|
||||
*/
|
||||
gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
|
||||
if (gpd_data->need_restore < 0)
|
||||
gpd_data->need_restore = 0;
|
||||
|
||||
genpd->in_progress++;
|
||||
pm_genpd_poweroff(genpd);
|
||||
genpd->in_progress--;
|
||||
|
@ -1437,12 +1463,12 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
|
|||
spin_unlock_irq(&dev->power.lock);
|
||||
|
||||
if (genpd->attach_dev)
|
||||
genpd->attach_dev(dev);
|
||||
genpd->attach_dev(genpd, dev);
|
||||
|
||||
mutex_lock(&gpd_data->lock);
|
||||
gpd_data->base.dev = dev;
|
||||
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
|
||||
gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
|
||||
gpd_data->need_restore = -1;
|
||||
gpd_data->td.constraint_changed = true;
|
||||
gpd_data->td.effective_constraint_ns = -1;
|
||||
mutex_unlock(&gpd_data->lock);
|
||||
|
@ -1499,7 +1525,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
|||
genpd->max_off_time_changed = true;
|
||||
|
||||
if (genpd->detach_dev)
|
||||
genpd->detach_dev(dev);
|
||||
genpd->detach_dev(genpd, dev);
|
||||
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
|
||||
|
@ -1546,7 +1572,7 @@ void pm_genpd_dev_need_restore(struct device *dev, bool val)
|
|||
|
||||
psd = dev_to_psd(dev);
|
||||
if (psd && psd->domain_data)
|
||||
to_gpd_data(psd->domain_data)->need_restore = val;
|
||||
to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0;
|
||||
|
||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||
}
|
||||
|
|
|
@ -166,8 +166,8 @@ try_again:
|
|||
if (ret == -EPROBE_DEFER)
|
||||
dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
|
||||
else
|
||||
dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", ret,
|
||||
cpu);
|
||||
dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
|
||||
ret);
|
||||
} else {
|
||||
*cdev = cpu_dev;
|
||||
*creg = cpu_reg;
|
||||
|
|
|
@ -1022,7 +1022,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
|
|||
|
||||
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
policy->governor = NULL;
|
||||
if (policy)
|
||||
policy->governor = NULL;
|
||||
|
||||
return policy;
|
||||
}
|
||||
|
|
|
@ -72,8 +72,10 @@ struct generic_pm_domain {
|
|||
bool max_off_time_changed;
|
||||
bool cached_power_down_ok;
|
||||
struct gpd_cpuidle_data *cpuidle_data;
|
||||
void (*attach_dev)(struct device *dev);
|
||||
void (*detach_dev)(struct device *dev);
|
||||
int (*attach_dev)(struct generic_pm_domain *domain,
|
||||
struct device *dev);
|
||||
void (*detach_dev)(struct generic_pm_domain *domain,
|
||||
struct device *dev);
|
||||
};
|
||||
|
||||
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
|
||||
|
@ -104,7 +106,7 @@ struct generic_pm_domain_data {
|
|||
struct notifier_block nb;
|
||||
struct mutex lock;
|
||||
unsigned int refcount;
|
||||
bool need_restore;
|
||||
int need_restore;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM_GENERIC_DOMAINS
|
||||
|
|
|
@ -146,7 +146,7 @@ static int platform_suspend_prepare(suspend_state_t state)
|
|||
|
||||
static int platform_suspend_prepare_late(suspend_state_t state)
|
||||
{
|
||||
return state == PM_SUSPEND_FREEZE && freeze_ops->prepare ?
|
||||
return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ?
|
||||
freeze_ops->prepare() : 0;
|
||||
}
|
||||
|
||||
|
@ -164,7 +164,7 @@ static void platform_resume_noirq(suspend_state_t state)
|
|||
|
||||
static void platform_resume_early(suspend_state_t state)
|
||||
{
|
||||
if (state == PM_SUSPEND_FREEZE && freeze_ops->restore)
|
||||
if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->restore)
|
||||
freeze_ops->restore();
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue