Merge branch 'pm-domains' into pm-for-linus
* pm-domains: PM / Domains: Split device PM domain data into base and need_restore ARM: mach-shmobile: sh7372 sleep warning fixes ARM: mach-shmobile: sh7372 A3SM support ARM: mach-shmobile: sh7372 generic suspend/resume support PM / Domains: Preliminary support for devices with power.irq_safe set PM: Move clock-related definitions and headers to separate file PM / Domains: Use power.sybsys_data to reduce overhead PM: Reference counting of power.subsys_data PM: Introduce struct pm_subsys_data ARM / shmobile: Make A3RV be a subdomain of A4LC on SH7372 PM / Domains: Rename argument of pm_genpd_add_subdomain() PM / Domains: Rename GPD_STATE_WAIT_PARENT to GPD_STATE_WAIT_MASTER PM / Domains: Allow generic PM domains to have multiple masters PM / Domains: Add "wait for parent" status for generic PM domains PM / Domains: Make pm_genpd_poweron() always survive parent removal PM / Domains: Do not take parent locks to modify subdomain counters PM / Domains: Implement subdomain counters as atomic fields
This commit is contained in:
commit
c28b56b1d4
|
@ -13,6 +13,7 @@
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
|
#include <linux/pm_clock.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
|
|
|
@ -42,6 +42,7 @@
|
||||||
#include <linux/leds.h>
|
#include <linux/leds.h>
|
||||||
#include <linux/input/sh_keysc.h>
|
#include <linux/input/sh_keysc.h>
|
||||||
#include <linux/usb/r8a66597.h>
|
#include <linux/usb/r8a66597.h>
|
||||||
|
#include <linux/pm_clock.h>
|
||||||
|
|
||||||
#include <media/sh_mobile_ceu.h>
|
#include <media/sh_mobile_ceu.h>
|
||||||
#include <media/sh_mobile_csi2.h>
|
#include <media/sh_mobile_csi2.h>
|
||||||
|
|
|
@ -39,7 +39,7 @@
|
||||||
#include <linux/mtd/mtd.h>
|
#include <linux/mtd/mtd.h>
|
||||||
#include <linux/mtd/partitions.h>
|
#include <linux/mtd/partitions.h>
|
||||||
#include <linux/mtd/physmap.h>
|
#include <linux/mtd/physmap.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_clock.h>
|
||||||
#include <linux/smsc911x.h>
|
#include <linux/smsc911x.h>
|
||||||
#include <linux/sh_intc.h>
|
#include <linux/sh_intc.h>
|
||||||
#include <linux/tca6416_keypad.h>
|
#include <linux/tca6416_keypad.h>
|
||||||
|
|
|
@ -35,8 +35,8 @@ extern void sh7372_add_standard_devices(void);
|
||||||
extern void sh7372_clock_init(void);
|
extern void sh7372_clock_init(void);
|
||||||
extern void sh7372_pinmux_init(void);
|
extern void sh7372_pinmux_init(void);
|
||||||
extern void sh7372_pm_init(void);
|
extern void sh7372_pm_init(void);
|
||||||
extern void sh7372_cpu_suspend(void);
|
extern void sh7372_resume_core_standby_a3sm(void);
|
||||||
extern void sh7372_cpu_resume(void);
|
extern int sh7372_do_idle_a3sm(unsigned long unused);
|
||||||
extern struct clk sh7372_extal1_clk;
|
extern struct clk sh7372_extal1_clk;
|
||||||
extern struct clk sh7372_extal2_clk;
|
extern struct clk sh7372_extal2_clk;
|
||||||
|
|
||||||
|
|
|
@ -498,9 +498,12 @@ extern struct sh7372_pm_domain sh7372_a3sg;
|
||||||
extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd);
|
extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd);
|
||||||
extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
|
extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
|
||||||
struct platform_device *pdev);
|
struct platform_device *pdev);
|
||||||
|
extern void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
|
||||||
|
struct sh7372_pm_domain *sh7372_sd);
|
||||||
#else
|
#else
|
||||||
#define sh7372_init_pm_domain(pd) do { } while(0)
|
#define sh7372_init_pm_domain(pd) do { } while(0)
|
||||||
#define sh7372_add_device_to_domain(pd, pdev) do { } while(0)
|
#define sh7372_add_device_to_domain(pd, pdev) do { } while(0)
|
||||||
|
#define sh7372_pm_add_subdomain(pd, sd) do { } while(0)
|
||||||
#endif /* CONFIG_PM */
|
#endif /* CONFIG_PM */
|
||||||
|
|
||||||
#endif /* __ASM_SH7372_H__ */
|
#endif /* __ASM_SH7372_H__ */
|
||||||
|
|
|
@ -15,23 +15,60 @@
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_clock.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
|
#include <linux/irq.h>
|
||||||
|
#include <linux/bitrev.h>
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
#include <asm/suspend.h>
|
||||||
#include <mach/common.h>
|
#include <mach/common.h>
|
||||||
#include <mach/sh7372.h>
|
#include <mach/sh7372.h>
|
||||||
|
|
||||||
#define SMFRAM 0xe6a70000
|
/* DBG */
|
||||||
#define SYSTBCR 0xe6150024
|
#define DBGREG1 0xe6100020
|
||||||
#define SBAR 0xe6180020
|
#define DBGREG9 0xe6100040
|
||||||
#define APARMBAREA 0xe6f10020
|
|
||||||
|
|
||||||
|
/* CPGA */
|
||||||
|
#define SYSTBCR 0xe6150024
|
||||||
|
#define MSTPSR0 0xe6150030
|
||||||
|
#define MSTPSR1 0xe6150038
|
||||||
|
#define MSTPSR2 0xe6150040
|
||||||
|
#define MSTPSR3 0xe6150048
|
||||||
|
#define MSTPSR4 0xe615004c
|
||||||
|
#define PLLC01STPCR 0xe61500c8
|
||||||
|
|
||||||
|
/* SYSC */
|
||||||
#define SPDCR 0xe6180008
|
#define SPDCR 0xe6180008
|
||||||
#define SWUCR 0xe6180014
|
#define SWUCR 0xe6180014
|
||||||
|
#define SBAR 0xe6180020
|
||||||
|
#define WUPSMSK 0xe618002c
|
||||||
|
#define WUPSMSK2 0xe6180048
|
||||||
#define PSTR 0xe6180080
|
#define PSTR 0xe6180080
|
||||||
|
#define WUPSFAC 0xe6180098
|
||||||
|
#define IRQCR 0xe618022c
|
||||||
|
#define IRQCR2 0xe6180238
|
||||||
|
#define IRQCR3 0xe6180244
|
||||||
|
#define IRQCR4 0xe6180248
|
||||||
|
#define PDNSEL 0xe6180254
|
||||||
|
|
||||||
|
/* INTC */
|
||||||
|
#define ICR1A 0xe6900000
|
||||||
|
#define ICR2A 0xe6900004
|
||||||
|
#define ICR3A 0xe6900008
|
||||||
|
#define ICR4A 0xe690000c
|
||||||
|
#define INTMSK00A 0xe6900040
|
||||||
|
#define INTMSK10A 0xe6900044
|
||||||
|
#define INTMSK20A 0xe6900048
|
||||||
|
#define INTMSK30A 0xe690004c
|
||||||
|
|
||||||
|
/* MFIS */
|
||||||
|
#define SMFRAM 0xe6a70000
|
||||||
|
|
||||||
|
/* AP-System Core */
|
||||||
|
#define APARMBAREA 0xe6f10020
|
||||||
|
|
||||||
#define PSTR_RETRIES 100
|
#define PSTR_RETRIES 100
|
||||||
#define PSTR_DELAY_US 10
|
#define PSTR_DELAY_US 10
|
||||||
|
@ -91,35 +128,6 @@ static int pd_power_up(struct generic_pm_domain *genpd)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pd_power_up_a3rv(struct generic_pm_domain *genpd)
|
|
||||||
{
|
|
||||||
int ret = pd_power_up(genpd);
|
|
||||||
|
|
||||||
/* force A4LC on after A3RV has been requested on */
|
|
||||||
pm_genpd_poweron(&sh7372_a4lc.genpd);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int pd_power_down_a3rv(struct generic_pm_domain *genpd)
|
|
||||||
{
|
|
||||||
int ret = pd_power_down(genpd);
|
|
||||||
|
|
||||||
/* try to power down A4LC after A3RV is requested off */
|
|
||||||
genpd_queue_power_off_work(&sh7372_a4lc.genpd);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int pd_power_down_a4lc(struct generic_pm_domain *genpd)
|
|
||||||
{
|
|
||||||
/* only power down A4LC if A3RV is off */
|
|
||||||
if (!(__raw_readl(PSTR) & (1 << sh7372_a3rv.bit_shift)))
|
|
||||||
return pd_power_down(genpd);
|
|
||||||
|
|
||||||
return -EBUSY;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool pd_active_wakeup(struct device *dev)
|
static bool pd_active_wakeup(struct device *dev)
|
||||||
{
|
{
|
||||||
return true;
|
return true;
|
||||||
|
@ -132,18 +140,10 @@ void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
|
||||||
pm_genpd_init(genpd, NULL, false);
|
pm_genpd_init(genpd, NULL, false);
|
||||||
genpd->stop_device = pm_clk_suspend;
|
genpd->stop_device = pm_clk_suspend;
|
||||||
genpd->start_device = pm_clk_resume;
|
genpd->start_device = pm_clk_resume;
|
||||||
|
genpd->dev_irq_safe = true;
|
||||||
genpd->active_wakeup = pd_active_wakeup;
|
genpd->active_wakeup = pd_active_wakeup;
|
||||||
|
genpd->power_off = pd_power_down;
|
||||||
if (sh7372_pd == &sh7372_a4lc) {
|
genpd->power_on = pd_power_up;
|
||||||
genpd->power_off = pd_power_down_a4lc;
|
|
||||||
genpd->power_on = pd_power_up;
|
|
||||||
} else if (sh7372_pd == &sh7372_a3rv) {
|
|
||||||
genpd->power_off = pd_power_down_a3rv;
|
|
||||||
genpd->power_on = pd_power_up_a3rv;
|
|
||||||
} else {
|
|
||||||
genpd->power_off = pd_power_down;
|
|
||||||
genpd->power_on = pd_power_up;
|
|
||||||
}
|
|
||||||
genpd->power_on(&sh7372_pd->genpd);
|
genpd->power_on(&sh7372_pd->genpd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,11 +152,15 @@ void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
|
||||||
{
|
{
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
|
|
||||||
if (!dev->power.subsys_data) {
|
|
||||||
pm_clk_init(dev);
|
|
||||||
pm_clk_add(dev, NULL);
|
|
||||||
}
|
|
||||||
pm_genpd_add_device(&sh7372_pd->genpd, dev);
|
pm_genpd_add_device(&sh7372_pd->genpd, dev);
|
||||||
|
if (pm_clk_no_clocks(dev))
|
||||||
|
pm_clk_add(dev, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
|
||||||
|
struct sh7372_pm_domain *sh7372_sd)
|
||||||
|
{
|
||||||
|
pm_genpd_add_subdomain(&sh7372_pd->genpd, &sh7372_sd->genpd);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct sh7372_pm_domain sh7372_a4lc = {
|
struct sh7372_pm_domain sh7372_a4lc = {
|
||||||
|
@ -185,33 +189,175 @@ struct sh7372_pm_domain sh7372_a3sg = {
|
||||||
|
|
||||||
#endif /* CONFIG_PM */
|
#endif /* CONFIG_PM */
|
||||||
|
|
||||||
static void sh7372_enter_core_standby(void)
|
#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
|
||||||
|
static int sh7372_do_idle_core_standby(unsigned long unused)
|
||||||
{
|
{
|
||||||
void __iomem *smfram = (void __iomem *)SMFRAM;
|
cpu_do_idle(); /* WFI when SYSTBCR == 0x10 -> Core Standby */
|
||||||
|
return 0;
|
||||||
__raw_writel(0, APARMBAREA); /* translate 4k */
|
|
||||||
__raw_writel(__pa(sh7372_cpu_resume), SBAR); /* set reset vector */
|
|
||||||
__raw_writel(0x10, SYSTBCR); /* enable core standby */
|
|
||||||
|
|
||||||
__raw_writel(0, smfram + 0x3c); /* clear page table address */
|
|
||||||
|
|
||||||
sh7372_cpu_suspend();
|
|
||||||
cpu_init();
|
|
||||||
|
|
||||||
/* if page table address is non-NULL then we have been powered down */
|
|
||||||
if (__raw_readl(smfram + 0x3c)) {
|
|
||||||
__raw_writel(__raw_readl(smfram + 0x40),
|
|
||||||
__va(__raw_readl(smfram + 0x3c)));
|
|
||||||
|
|
||||||
flush_tlb_all();
|
|
||||||
set_cr(__raw_readl(smfram + 0x38));
|
|
||||||
}
|
|
||||||
|
|
||||||
__raw_writel(0, SYSTBCR); /* disable core standby */
|
|
||||||
__raw_writel(0, SBAR); /* disable reset vector translation */
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void sh7372_enter_core_standby(void)
|
||||||
|
{
|
||||||
|
/* set reset vector, translate 4k */
|
||||||
|
__raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
|
||||||
|
__raw_writel(0, APARMBAREA);
|
||||||
|
|
||||||
|
/* enter sleep mode with SYSTBCR to 0x10 */
|
||||||
|
__raw_writel(0x10, SYSTBCR);
|
||||||
|
cpu_suspend(0, sh7372_do_idle_core_standby);
|
||||||
|
__raw_writel(0, SYSTBCR);
|
||||||
|
|
||||||
|
/* disable reset vector translation */
|
||||||
|
__raw_writel(0, SBAR);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_SUSPEND
|
||||||
|
static void sh7372_enter_a3sm_common(int pllc0_on)
|
||||||
|
{
|
||||||
|
/* set reset vector, translate 4k */
|
||||||
|
__raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
|
||||||
|
__raw_writel(0, APARMBAREA);
|
||||||
|
|
||||||
|
if (pllc0_on)
|
||||||
|
__raw_writel(0, PLLC01STPCR);
|
||||||
|
else
|
||||||
|
__raw_writel(1 << 28, PLLC01STPCR);
|
||||||
|
|
||||||
|
__raw_writel(0, PDNSEL); /* power-down A3SM only, not A4S */
|
||||||
|
__raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */
|
||||||
|
cpu_suspend(0, sh7372_do_idle_a3sm);
|
||||||
|
__raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */
|
||||||
|
|
||||||
|
/* disable reset vector translation */
|
||||||
|
__raw_writel(0, SBAR);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sh7372_a3sm_valid(unsigned long *mskp, unsigned long *msk2p)
|
||||||
|
{
|
||||||
|
unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4;
|
||||||
|
unsigned long msk, msk2;
|
||||||
|
|
||||||
|
/* check active clocks to determine potential wakeup sources */
|
||||||
|
|
||||||
|
mstpsr0 = __raw_readl(MSTPSR0);
|
||||||
|
if ((mstpsr0 & 0x00000003) != 0x00000003) {
|
||||||
|
pr_debug("sh7372 mstpsr0 0x%08lx\n", mstpsr0);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
mstpsr1 = __raw_readl(MSTPSR1);
|
||||||
|
if ((mstpsr1 & 0xff079b7f) != 0xff079b7f) {
|
||||||
|
pr_debug("sh7372 mstpsr1 0x%08lx\n", mstpsr1);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
mstpsr2 = __raw_readl(MSTPSR2);
|
||||||
|
if ((mstpsr2 & 0x000741ff) != 0x000741ff) {
|
||||||
|
pr_debug("sh7372 mstpsr2 0x%08lx\n", mstpsr2);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
mstpsr3 = __raw_readl(MSTPSR3);
|
||||||
|
if ((mstpsr3 & 0x1a60f010) != 0x1a60f010) {
|
||||||
|
pr_debug("sh7372 mstpsr3 0x%08lx\n", mstpsr3);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
mstpsr4 = __raw_readl(MSTPSR4);
|
||||||
|
if ((mstpsr4 & 0x00008cf0) != 0x00008cf0) {
|
||||||
|
pr_debug("sh7372 mstpsr4 0x%08lx\n", mstpsr4);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
msk = 0;
|
||||||
|
msk2 = 0;
|
||||||
|
|
||||||
|
/* make bitmaps of limited number of wakeup sources */
|
||||||
|
|
||||||
|
if ((mstpsr2 & (1 << 23)) == 0) /* SPU2 */
|
||||||
|
msk |= 1 << 31;
|
||||||
|
|
||||||
|
if ((mstpsr2 & (1 << 12)) == 0) /* MFI_MFIM */
|
||||||
|
msk |= 1 << 21;
|
||||||
|
|
||||||
|
if ((mstpsr4 & (1 << 3)) == 0) /* KEYSC */
|
||||||
|
msk |= 1 << 2;
|
||||||
|
|
||||||
|
if ((mstpsr1 & (1 << 24)) == 0) /* CMT0 */
|
||||||
|
msk |= 1 << 1;
|
||||||
|
|
||||||
|
if ((mstpsr3 & (1 << 29)) == 0) /* CMT1 */
|
||||||
|
msk |= 1 << 1;
|
||||||
|
|
||||||
|
if ((mstpsr4 & (1 << 0)) == 0) /* CMT2 */
|
||||||
|
msk |= 1 << 1;
|
||||||
|
|
||||||
|
if ((mstpsr2 & (1 << 13)) == 0) /* MFI_MFIS */
|
||||||
|
msk2 |= 1 << 17;
|
||||||
|
|
||||||
|
*mskp = msk;
|
||||||
|
*msk2p = msk2;
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sh7372_icr_to_irqcr(unsigned long icr, u16 *irqcr1p, u16 *irqcr2p)
|
||||||
|
{
|
||||||
|
u16 tmp, irqcr1, irqcr2;
|
||||||
|
int k;
|
||||||
|
|
||||||
|
irqcr1 = 0;
|
||||||
|
irqcr2 = 0;
|
||||||
|
|
||||||
|
/* convert INTCA ICR register layout to SYSC IRQCR+IRQCR2 */
|
||||||
|
for (k = 0; k <= 7; k++) {
|
||||||
|
tmp = (icr >> ((7 - k) * 4)) & 0xf;
|
||||||
|
irqcr1 |= (tmp & 0x03) << (k * 2);
|
||||||
|
irqcr2 |= (tmp >> 2) << (k * 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
*irqcr1p = irqcr1;
|
||||||
|
*irqcr2p = irqcr2;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2)
|
||||||
|
{
|
||||||
|
u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high;
|
||||||
|
unsigned long tmp;
|
||||||
|
|
||||||
|
/* read IRQ0A -> IRQ15A mask */
|
||||||
|
tmp = bitrev8(__raw_readb(INTMSK00A));
|
||||||
|
tmp |= bitrev8(__raw_readb(INTMSK10A)) << 8;
|
||||||
|
|
||||||
|
/* setup WUPSMSK from clocks and external IRQ mask */
|
||||||
|
msk = (~msk & 0xc030000f) | (tmp << 4);
|
||||||
|
__raw_writel(msk, WUPSMSK);
|
||||||
|
|
||||||
|
/* propage level/edge trigger for external IRQ 0->15 */
|
||||||
|
sh7372_icr_to_irqcr(__raw_readl(ICR1A), &irqcrx_low, &irqcry_low);
|
||||||
|
sh7372_icr_to_irqcr(__raw_readl(ICR2A), &irqcrx_high, &irqcry_high);
|
||||||
|
__raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR);
|
||||||
|
__raw_writel((irqcry_high << 16) | irqcry_low, IRQCR2);
|
||||||
|
|
||||||
|
/* read IRQ16A -> IRQ31A mask */
|
||||||
|
tmp = bitrev8(__raw_readb(INTMSK20A));
|
||||||
|
tmp |= bitrev8(__raw_readb(INTMSK30A)) << 8;
|
||||||
|
|
||||||
|
/* setup WUPSMSK2 from clocks and external IRQ mask */
|
||||||
|
msk2 = (~msk2 & 0x00030000) | tmp;
|
||||||
|
__raw_writel(msk2, WUPSMSK2);
|
||||||
|
|
||||||
|
/* propage level/edge trigger for external IRQ 16->31 */
|
||||||
|
sh7372_icr_to_irqcr(__raw_readl(ICR3A), &irqcrx_low, &irqcry_low);
|
||||||
|
sh7372_icr_to_irqcr(__raw_readl(ICR4A), &irqcrx_high, &irqcry_high);
|
||||||
|
__raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3);
|
||||||
|
__raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_IDLE
|
#ifdef CONFIG_CPU_IDLE
|
||||||
|
|
||||||
static void sh7372_cpuidle_setup(struct cpuidle_device *dev)
|
static void sh7372_cpuidle_setup(struct cpuidle_device *dev)
|
||||||
{
|
{
|
||||||
struct cpuidle_state *state;
|
struct cpuidle_state *state;
|
||||||
|
@ -239,9 +385,25 @@ static void sh7372_cpuidle_init(void) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SUSPEND
|
#ifdef CONFIG_SUSPEND
|
||||||
|
|
||||||
static int sh7372_enter_suspend(suspend_state_t suspend_state)
|
static int sh7372_enter_suspend(suspend_state_t suspend_state)
|
||||||
{
|
{
|
||||||
sh7372_enter_core_standby();
|
unsigned long msk, msk2;
|
||||||
|
|
||||||
|
/* check active clocks to determine potential wakeup sources */
|
||||||
|
if (sh7372_a3sm_valid(&msk, &msk2)) {
|
||||||
|
|
||||||
|
/* convert INTC mask and sense to SYSC mask and sense */
|
||||||
|
sh7372_setup_a3sm(msk, msk2);
|
||||||
|
|
||||||
|
/* enter A3SM sleep with PLLC0 off */
|
||||||
|
pr_debug("entering A3SM\n");
|
||||||
|
sh7372_enter_a3sm_common(0);
|
||||||
|
} else {
|
||||||
|
/* default to Core Standby that supports all wakeup sources */
|
||||||
|
pr_debug("entering Core Standby\n");
|
||||||
|
sh7372_enter_core_standby();
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -253,9 +415,6 @@ static void sh7372_suspend_init(void)
|
||||||
static void sh7372_suspend_init(void) {}
|
static void sh7372_suspend_init(void) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define DBGREG1 0xe6100020
|
|
||||||
#define DBGREG9 0xe6100040
|
|
||||||
|
|
||||||
void __init sh7372_pm_init(void)
|
void __init sh7372_pm_init(void)
|
||||||
{
|
{
|
||||||
/* enable DBG hardware block to kick SYSC */
|
/* enable DBG hardware block to kick SYSC */
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
#include <linux/pm_domain.h>
|
#include <linux/pm_domain.h>
|
||||||
|
#include <linux/pm_clock.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
#include <linux/sh_clk.h>
|
#include <linux/sh_clk.h>
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
#include <linux/sh_dma.h>
|
#include <linux/sh_dma.h>
|
||||||
#include <linux/sh_intc.h>
|
#include <linux/sh_intc.h>
|
||||||
#include <linux/sh_timer.h>
|
#include <linux/sh_timer.h>
|
||||||
|
#include <linux/pm_domain.h>
|
||||||
#include <mach/hardware.h>
|
#include <mach/hardware.h>
|
||||||
#include <mach/sh7372.h>
|
#include <mach/sh7372.h>
|
||||||
#include <asm/mach-types.h>
|
#include <asm/mach-types.h>
|
||||||
|
@ -994,6 +995,8 @@ void __init sh7372_add_standard_devices(void)
|
||||||
sh7372_init_pm_domain(&sh7372_a3ri);
|
sh7372_init_pm_domain(&sh7372_a3ri);
|
||||||
sh7372_init_pm_domain(&sh7372_a3sg);
|
sh7372_init_pm_domain(&sh7372_a3sg);
|
||||||
|
|
||||||
|
sh7372_pm_add_subdomain(&sh7372_a4lc, &sh7372_a3rv);
|
||||||
|
|
||||||
platform_add_devices(sh7372_early_devices,
|
platform_add_devices(sh7372_early_devices,
|
||||||
ARRAY_SIZE(sh7372_early_devices));
|
ARRAY_SIZE(sh7372_early_devices));
|
||||||
|
|
||||||
|
|
|
@ -30,58 +30,20 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <asm/memory.h>
|
||||||
#include <asm/assembler.h>
|
#include <asm/assembler.h>
|
||||||
|
|
||||||
#define SMFRAM 0xe6a70000
|
#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
|
||||||
|
.align 12
|
||||||
.align
|
.text
|
||||||
kernel_flush:
|
.global sh7372_resume_core_standby_a3sm
|
||||||
.word v7_flush_dcache_all
|
sh7372_resume_core_standby_a3sm:
|
||||||
|
ldr pc, 1f
|
||||||
.align 3
|
1: .long cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET
|
||||||
ENTRY(sh7372_cpu_suspend)
|
|
||||||
stmfd sp!, {r0-r12, lr} @ save registers on stack
|
|
||||||
|
|
||||||
ldr r8, =SMFRAM
|
|
||||||
|
|
||||||
mov r4, sp @ Store sp
|
|
||||||
mrs r5, spsr @ Store spsr
|
|
||||||
mov r6, lr @ Store lr
|
|
||||||
stmia r8!, {r4-r6}
|
|
||||||
|
|
||||||
mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register
|
|
||||||
mrc p15, 0, r5, c2, c0, 0 @ TTBR0
|
|
||||||
mrc p15, 0, r6, c2, c0, 1 @ TTBR1
|
|
||||||
mrc p15, 0, r7, c2, c0, 2 @ TTBCR
|
|
||||||
stmia r8!, {r4-r7}
|
|
||||||
|
|
||||||
mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register
|
|
||||||
mrc p15, 0, r5, c10, c2, 0 @ PRRR
|
|
||||||
mrc p15, 0, r6, c10, c2, 1 @ NMRR
|
|
||||||
stmia r8!,{r4-r6}
|
|
||||||
|
|
||||||
mrc p15, 0, r4, c13, c0, 1 @ Context ID
|
|
||||||
mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
|
|
||||||
mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
|
|
||||||
mrs r7, cpsr @ Store current cpsr
|
|
||||||
stmia r8!, {r4-r7}
|
|
||||||
|
|
||||||
mrc p15, 0, r4, c1, c0, 0 @ save control register
|
|
||||||
stmia r8!, {r4}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* jump out to kernel flush routine
|
|
||||||
* - reuse that code is better
|
|
||||||
* - it executes in a cached space so is faster than refetch per-block
|
|
||||||
* - should be faster and will change with kernel
|
|
||||||
* - 'might' have to copy address, load and jump to it
|
|
||||||
* Flush all data from the L1 data cache before disabling
|
|
||||||
* SCTLR.C bit.
|
|
||||||
*/
|
|
||||||
ldr r1, kernel_flush
|
|
||||||
mov lr, pc
|
|
||||||
bx r1
|
|
||||||
|
|
||||||
|
.global sh7372_do_idle_a3sm
|
||||||
|
sh7372_do_idle_a3sm:
|
||||||
/*
|
/*
|
||||||
* Clear the SCTLR.C bit to prevent further data cache
|
* Clear the SCTLR.C bit to prevent further data cache
|
||||||
* allocation. Clearing SCTLR.C would make all the data accesses
|
* allocation. Clearing SCTLR.C would make all the data accesses
|
||||||
|
@ -92,10 +54,13 @@ ENTRY(sh7372_cpu_suspend)
|
||||||
mcr p15, 0, r0, c1, c0, 0
|
mcr p15, 0, r0, c1, c0, 0
|
||||||
isb
|
isb
|
||||||
|
|
||||||
|
/* disable L2 cache in the aux control register */
|
||||||
|
mrc p15, 0, r10, c1, c0, 1
|
||||||
|
bic r10, r10, #2
|
||||||
|
mcr p15, 0, r10, c1, c0, 1
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Invalidate L1 data cache. Even though only invalidate is
|
* Invalidate data cache again.
|
||||||
* necessary exported flush API is used here. Doing clean
|
|
||||||
* on already clean cache would be almost NOP.
|
|
||||||
*/
|
*/
|
||||||
ldr r1, kernel_flush
|
ldr r1, kernel_flush
|
||||||
blx r1
|
blx r1
|
||||||
|
@ -115,146 +80,16 @@ ENTRY(sh7372_cpu_suspend)
|
||||||
dsb
|
dsb
|
||||||
dmb
|
dmb
|
||||||
|
|
||||||
/*
|
#define SPDCR 0xe6180008
|
||||||
* ===================================
|
#define A3SM (1 << 12)
|
||||||
* == WFI instruction => Enter idle ==
|
|
||||||
* ===================================
|
|
||||||
*/
|
|
||||||
wfi @ wait for interrupt
|
|
||||||
|
|
||||||
/*
|
/* A3SM power down */
|
||||||
* ===================================
|
ldr r0, =SPDCR
|
||||||
* == Resume path for non-OFF modes ==
|
ldr r1, =A3SM
|
||||||
* ===================================
|
str r1, [r0]
|
||||||
*/
|
1:
|
||||||
mrc p15, 0, r0, c1, c0, 0
|
b 1b
|
||||||
tst r0, #(1 << 2) @ Check C bit enabled?
|
|
||||||
orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
|
|
||||||
mcreq p15, 0, r0, c1, c0, 0
|
|
||||||
isb
|
|
||||||
|
|
||||||
/*
|
kernel_flush:
|
||||||
* ===================================
|
.word v7_flush_dcache_all
|
||||||
* == Exit point from non-OFF modes ==
|
#endif
|
||||||
* ===================================
|
|
||||||
*/
|
|
||||||
ldmfd sp!, {r0-r12, pc} @ restore regs and return
|
|
||||||
|
|
||||||
.pool
|
|
||||||
|
|
||||||
.align 12
|
|
||||||
.text
|
|
||||||
.global sh7372_cpu_resume
|
|
||||||
sh7372_cpu_resume:
|
|
||||||
|
|
||||||
mov r1, #0
|
|
||||||
/*
|
|
||||||
* Invalidate all instruction caches to PoU
|
|
||||||
* and flush branch target cache
|
|
||||||
*/
|
|
||||||
mcr p15, 0, r1, c7, c5, 0
|
|
||||||
|
|
||||||
ldr r3, =SMFRAM
|
|
||||||
|
|
||||||
ldmia r3!, {r4-r6}
|
|
||||||
mov sp, r4 @ Restore sp
|
|
||||||
msr spsr_cxsf, r5 @ Restore spsr
|
|
||||||
mov lr, r6 @ Restore lr
|
|
||||||
|
|
||||||
ldmia r3!, {r4-r7}
|
|
||||||
mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register
|
|
||||||
mcr p15, 0, r5, c2, c0, 0 @ TTBR0
|
|
||||||
mcr p15, 0, r6, c2, c0, 1 @ TTBR1
|
|
||||||
mcr p15, 0, r7, c2, c0, 2 @ TTBCR
|
|
||||||
|
|
||||||
ldmia r3!,{r4-r6}
|
|
||||||
mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register
|
|
||||||
mcr p15, 0, r5, c10, c2, 0 @ PRRR
|
|
||||||
mcr p15, 0, r6, c10, c2, 1 @ NMRR
|
|
||||||
|
|
||||||
ldmia r3!,{r4-r7}
|
|
||||||
mcr p15, 0, r4, c13, c0, 1 @ Context ID
|
|
||||||
mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
|
|
||||||
mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
|
|
||||||
msr cpsr, r7 @ store cpsr
|
|
||||||
|
|
||||||
/* Starting to enable MMU here */
|
|
||||||
mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
|
|
||||||
/* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
|
|
||||||
and r7, #0x7
|
|
||||||
cmp r7, #0x0
|
|
||||||
beq usettbr0
|
|
||||||
ttbr_error:
|
|
||||||
/*
|
|
||||||
* More work needs to be done to support N[0:2] value other than 0
|
|
||||||
* So looping here so that the error can be detected
|
|
||||||
*/
|
|
||||||
b ttbr_error
|
|
||||||
|
|
||||||
.align
|
|
||||||
cache_pred_disable_mask:
|
|
||||||
.word 0xFFFFE7FB
|
|
||||||
ttbrbit_mask:
|
|
||||||
.word 0xFFFFC000
|
|
||||||
table_index_mask:
|
|
||||||
.word 0xFFF00000
|
|
||||||
table_entry:
|
|
||||||
.word 0x00000C02
|
|
||||||
usettbr0:
|
|
||||||
|
|
||||||
mrc p15, 0, r2, c2, c0, 0
|
|
||||||
ldr r5, ttbrbit_mask
|
|
||||||
and r2, r5
|
|
||||||
mov r4, pc
|
|
||||||
ldr r5, table_index_mask
|
|
||||||
and r4, r5 @ r4 = 31 to 20 bits of pc
|
|
||||||
/* Extract the value to be written to table entry */
|
|
||||||
ldr r6, table_entry
|
|
||||||
/* r6 has the value to be written to table entry */
|
|
||||||
add r6, r6, r4
|
|
||||||
/* Getting the address of table entry to modify */
|
|
||||||
lsr r4, #18
|
|
||||||
/* r2 has the location which needs to be modified */
|
|
||||||
add r2, r4
|
|
||||||
ldr r4, [r2]
|
|
||||||
str r6, [r2] /* modify the table entry */
|
|
||||||
|
|
||||||
mov r7, r6
|
|
||||||
mov r5, r2
|
|
||||||
mov r6, r4
|
|
||||||
/* r5 = original page table address */
|
|
||||||
/* r6 = original page table data */
|
|
||||||
|
|
||||||
mov r0, #0
|
|
||||||
mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
|
|
||||||
mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
|
|
||||||
mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
|
|
||||||
mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Restore control register. This enables the MMU.
|
|
||||||
* The caches and prediction are not enabled here, they
|
|
||||||
* will be enabled after restoring the MMU table entry.
|
|
||||||
*/
|
|
||||||
ldmia r3!, {r4}
|
|
||||||
stmia r3!, {r5} /* save original page table address */
|
|
||||||
stmia r3!, {r6} /* save original page table data */
|
|
||||||
stmia r3!, {r7} /* save modified page table data */
|
|
||||||
|
|
||||||
ldr r2, cache_pred_disable_mask
|
|
||||||
and r4, r2
|
|
||||||
mcr p15, 0, r4, c1, c0, 0
|
|
||||||
dsb
|
|
||||||
isb
|
|
||||||
|
|
||||||
ldr r0, =restoremmu_on
|
|
||||||
bx r0
|
|
||||||
|
|
||||||
/*
|
|
||||||
* ==============================
|
|
||||||
* == Exit point from OFF mode ==
|
|
||||||
* ==============================
|
|
||||||
*/
|
|
||||||
restoremmu_on:
|
|
||||||
|
|
||||||
ldmfd sp!, {r0-r12, pc} @ restore regs and return
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
obj-$(CONFIG_PM) += sysfs.o generic_ops.o
|
obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o
|
||||||
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
|
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
|
||||||
obj-$(CONFIG_PM_RUNTIME) += runtime.o
|
obj-$(CONFIG_PM_RUNTIME) += runtime.o
|
||||||
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
|
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
|
||||||
|
|
|
@ -10,18 +10,13 @@
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/pm.h>
|
#include <linux/pm.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_clock.h>
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
#ifdef CONFIG_PM
|
||||||
|
|
||||||
struct pm_clk_data {
|
|
||||||
struct list_head clock_list;
|
|
||||||
spinlock_t lock;
|
|
||||||
};
|
|
||||||
|
|
||||||
enum pce_status {
|
enum pce_status {
|
||||||
PCE_STATUS_NONE = 0,
|
PCE_STATUS_NONE = 0,
|
||||||
PCE_STATUS_ACQUIRED,
|
PCE_STATUS_ACQUIRED,
|
||||||
|
@ -36,11 +31,6 @@ struct pm_clock_entry {
|
||||||
enum pce_status status;
|
enum pce_status status;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct pm_clk_data *__to_pcd(struct device *dev)
|
|
||||||
{
|
|
||||||
return dev ? dev->power.subsys_data : NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pm_clk_acquire - Acquire a device clock.
|
* pm_clk_acquire - Acquire a device clock.
|
||||||
* @dev: Device whose clock is to be acquired.
|
* @dev: Device whose clock is to be acquired.
|
||||||
|
@ -67,10 +57,10 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
|
||||||
*/
|
*/
|
||||||
int pm_clk_add(struct device *dev, const char *con_id)
|
int pm_clk_add(struct device *dev, const char *con_id)
|
||||||
{
|
{
|
||||||
struct pm_clk_data *pcd = __to_pcd(dev);
|
struct pm_subsys_data *psd = dev_to_psd(dev);
|
||||||
struct pm_clock_entry *ce;
|
struct pm_clock_entry *ce;
|
||||||
|
|
||||||
if (!pcd)
|
if (!psd)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
|
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
|
||||||
|
@ -91,9 +81,9 @@ int pm_clk_add(struct device *dev, const char *con_id)
|
||||||
|
|
||||||
pm_clk_acquire(dev, ce);
|
pm_clk_acquire(dev, ce);
|
||||||
|
|
||||||
spin_lock_irq(&pcd->lock);
|
spin_lock_irq(&psd->lock);
|
||||||
list_add_tail(&ce->node, &pcd->clock_list);
|
list_add_tail(&ce->node, &psd->clock_list);
|
||||||
spin_unlock_irq(&pcd->lock);
|
spin_unlock_irq(&psd->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -130,15 +120,15 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
|
||||||
*/
|
*/
|
||||||
void pm_clk_remove(struct device *dev, const char *con_id)
|
void pm_clk_remove(struct device *dev, const char *con_id)
|
||||||
{
|
{
|
||||||
struct pm_clk_data *pcd = __to_pcd(dev);
|
struct pm_subsys_data *psd = dev_to_psd(dev);
|
||||||
struct pm_clock_entry *ce;
|
struct pm_clock_entry *ce;
|
||||||
|
|
||||||
if (!pcd)
|
if (!psd)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irq(&pcd->lock);
|
spin_lock_irq(&psd->lock);
|
||||||
|
|
||||||
list_for_each_entry(ce, &pcd->clock_list, node) {
|
list_for_each_entry(ce, &psd->clock_list, node) {
|
||||||
if (!con_id && !ce->con_id)
|
if (!con_id && !ce->con_id)
|
||||||
goto remove;
|
goto remove;
|
||||||
else if (!con_id || !ce->con_id)
|
else if (!con_id || !ce->con_id)
|
||||||
|
@ -147,12 +137,12 @@ void pm_clk_remove(struct device *dev, const char *con_id)
|
||||||
goto remove;
|
goto remove;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irq(&pcd->lock);
|
spin_unlock_irq(&psd->lock);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
remove:
|
remove:
|
||||||
list_del(&ce->node);
|
list_del(&ce->node);
|
||||||
spin_unlock_irq(&pcd->lock);
|
spin_unlock_irq(&psd->lock);
|
||||||
|
|
||||||
__pm_clk_remove(ce);
|
__pm_clk_remove(ce);
|
||||||
}
|
}
|
||||||
|
@ -161,23 +151,27 @@ void pm_clk_remove(struct device *dev, const char *con_id)
|
||||||
* pm_clk_init - Initialize a device's list of power management clocks.
|
* pm_clk_init - Initialize a device's list of power management clocks.
|
||||||
* @dev: Device to initialize the list of PM clocks for.
|
* @dev: Device to initialize the list of PM clocks for.
|
||||||
*
|
*
|
||||||
* Allocate a struct pm_clk_data object, initialize its lock member and
|
* Initialize the lock and clock_list members of the device's pm_subsys_data
|
||||||
* make the @dev's power.subsys_data field point to it.
|
* object.
|
||||||
*/
|
*/
|
||||||
int pm_clk_init(struct device *dev)
|
void pm_clk_init(struct device *dev)
|
||||||
{
|
{
|
||||||
struct pm_clk_data *pcd;
|
struct pm_subsys_data *psd = dev_to_psd(dev);
|
||||||
|
if (psd)
|
||||||
|
INIT_LIST_HEAD(&psd->clock_list);
|
||||||
|
}
|
||||||
|
|
||||||
pcd = kzalloc(sizeof(*pcd), GFP_KERNEL);
|
/**
|
||||||
if (!pcd) {
|
* pm_clk_create - Create and initialize a device's list of PM clocks.
|
||||||
dev_err(dev, "Not enough memory for PM clock data.\n");
|
* @dev: Device to create and initialize the list of PM clocks for.
|
||||||
return -ENOMEM;
|
*
|
||||||
}
|
* Allocate a struct pm_subsys_data object, initialize its lock and clock_list
|
||||||
|
* members and make the @dev's power.subsys_data field point to it.
|
||||||
INIT_LIST_HEAD(&pcd->clock_list);
|
*/
|
||||||
spin_lock_init(&pcd->lock);
|
int pm_clk_create(struct device *dev)
|
||||||
dev->power.subsys_data = pcd;
|
{
|
||||||
return 0;
|
int ret = dev_pm_get_subsys_data(dev);
|
||||||
|
return ret < 0 ? ret : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -185,29 +179,28 @@ int pm_clk_init(struct device *dev)
|
||||||
* @dev: Device to destroy the list of PM clocks for.
|
* @dev: Device to destroy the list of PM clocks for.
|
||||||
*
|
*
|
||||||
* Clear the @dev's power.subsys_data field, remove the list of clock entries
|
* Clear the @dev's power.subsys_data field, remove the list of clock entries
|
||||||
* from the struct pm_clk_data object pointed to by it before and free
|
* from the struct pm_subsys_data object pointed to by it before and free
|
||||||
* that object.
|
* that object.
|
||||||
*/
|
*/
|
||||||
void pm_clk_destroy(struct device *dev)
|
void pm_clk_destroy(struct device *dev)
|
||||||
{
|
{
|
||||||
struct pm_clk_data *pcd = __to_pcd(dev);
|
struct pm_subsys_data *psd = dev_to_psd(dev);
|
||||||
struct pm_clock_entry *ce, *c;
|
struct pm_clock_entry *ce, *c;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
|
|
||||||
if (!pcd)
|
if (!psd)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dev->power.subsys_data = NULL;
|
|
||||||
INIT_LIST_HEAD(&list);
|
INIT_LIST_HEAD(&list);
|
||||||
|
|
||||||
spin_lock_irq(&pcd->lock);
|
spin_lock_irq(&psd->lock);
|
||||||
|
|
||||||
list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
|
list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
|
||||||
list_move(&ce->node, &list);
|
list_move(&ce->node, &list);
|
||||||
|
|
||||||
spin_unlock_irq(&pcd->lock);
|
spin_unlock_irq(&psd->lock);
|
||||||
|
|
||||||
kfree(pcd);
|
dev_pm_put_subsys_data(dev);
|
||||||
|
|
||||||
list_for_each_entry_safe_reverse(ce, c, &list, node) {
|
list_for_each_entry_safe_reverse(ce, c, &list, node) {
|
||||||
list_del(&ce->node);
|
list_del(&ce->node);
|
||||||
|
@ -225,25 +218,25 @@ void pm_clk_destroy(struct device *dev)
|
||||||
*/
|
*/
|
||||||
int pm_clk_suspend(struct device *dev)
|
int pm_clk_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct pm_clk_data *pcd = __to_pcd(dev);
|
struct pm_subsys_data *psd = dev_to_psd(dev);
|
||||||
struct pm_clock_entry *ce;
|
struct pm_clock_entry *ce;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
dev_dbg(dev, "%s()\n", __func__);
|
dev_dbg(dev, "%s()\n", __func__);
|
||||||
|
|
||||||
if (!pcd)
|
if (!psd)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&pcd->lock, flags);
|
spin_lock_irqsave(&psd->lock, flags);
|
||||||
|
|
||||||
list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
|
list_for_each_entry_reverse(ce, &psd->clock_list, node) {
|
||||||
if (ce->status < PCE_STATUS_ERROR) {
|
if (ce->status < PCE_STATUS_ERROR) {
|
||||||
clk_disable(ce->clk);
|
clk_disable(ce->clk);
|
||||||
ce->status = PCE_STATUS_ACQUIRED;
|
ce->status = PCE_STATUS_ACQUIRED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&pcd->lock, flags);
|
spin_unlock_irqrestore(&psd->lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -254,25 +247,25 @@ int pm_clk_suspend(struct device *dev)
|
||||||
*/
|
*/
|
||||||
int pm_clk_resume(struct device *dev)
|
int pm_clk_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct pm_clk_data *pcd = __to_pcd(dev);
|
struct pm_subsys_data *psd = dev_to_psd(dev);
|
||||||
struct pm_clock_entry *ce;
|
struct pm_clock_entry *ce;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
dev_dbg(dev, "%s()\n", __func__);
|
dev_dbg(dev, "%s()\n", __func__);
|
||||||
|
|
||||||
if (!pcd)
|
if (!psd)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&pcd->lock, flags);
|
spin_lock_irqsave(&psd->lock, flags);
|
||||||
|
|
||||||
list_for_each_entry(ce, &pcd->clock_list, node) {
|
list_for_each_entry(ce, &psd->clock_list, node) {
|
||||||
if (ce->status < PCE_STATUS_ERROR) {
|
if (ce->status < PCE_STATUS_ERROR) {
|
||||||
clk_enable(ce->clk);
|
clk_enable(ce->clk);
|
||||||
ce->status = PCE_STATUS_ENABLED;
|
ce->status = PCE_STATUS_ENABLED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&pcd->lock, flags);
|
spin_unlock_irqrestore(&psd->lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -310,7 +303,7 @@ static int pm_clk_notify(struct notifier_block *nb,
|
||||||
if (dev->pm_domain)
|
if (dev->pm_domain)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
error = pm_clk_init(dev);
|
error = pm_clk_create(dev);
|
||||||
if (error)
|
if (error)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -345,22 +338,22 @@ static int pm_clk_notify(struct notifier_block *nb,
|
||||||
*/
|
*/
|
||||||
int pm_clk_suspend(struct device *dev)
|
int pm_clk_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct pm_clk_data *pcd = __to_pcd(dev);
|
struct pm_subsys_data *psd = dev_to_psd(dev);
|
||||||
struct pm_clock_entry *ce;
|
struct pm_clock_entry *ce;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
dev_dbg(dev, "%s()\n", __func__);
|
dev_dbg(dev, "%s()\n", __func__);
|
||||||
|
|
||||||
/* If there is no driver, the clocks are already disabled. */
|
/* If there is no driver, the clocks are already disabled. */
|
||||||
if (!pcd || !dev->driver)
|
if (!psd || !dev->driver)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&pcd->lock, flags);
|
spin_lock_irqsave(&psd->lock, flags);
|
||||||
|
|
||||||
list_for_each_entry_reverse(ce, &pcd->clock_list, node)
|
list_for_each_entry_reverse(ce, &psd->clock_list, node)
|
||||||
clk_disable(ce->clk);
|
clk_disable(ce->clk);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&pcd->lock, flags);
|
spin_unlock_irqrestore(&psd->lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -371,22 +364,22 @@ int pm_clk_suspend(struct device *dev)
|
||||||
*/
|
*/
|
||||||
int pm_clk_resume(struct device *dev)
|
int pm_clk_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct pm_clk_data *pcd = __to_pcd(dev);
|
struct pm_subsys_data *psd = dev_to_psd(dev);
|
||||||
struct pm_clock_entry *ce;
|
struct pm_clock_entry *ce;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
dev_dbg(dev, "%s()\n", __func__);
|
dev_dbg(dev, "%s()\n", __func__);
|
||||||
|
|
||||||
/* If there is no driver, the clocks should remain disabled. */
|
/* If there is no driver, the clocks should remain disabled. */
|
||||||
if (!pcd || !dev->driver)
|
if (!psd || !dev->driver)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&pcd->lock, flags);
|
spin_lock_irqsave(&psd->lock, flags);
|
||||||
|
|
||||||
list_for_each_entry(ce, &pcd->clock_list, node)
|
list_for_each_entry(ce, &psd->clock_list, node)
|
||||||
clk_enable(ce->clk);
|
clk_enable(ce->clk);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&pcd->lock, flags);
|
spin_unlock_irqrestore(&psd->lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,86 @@
|
||||||
|
/*
|
||||||
|
* drivers/base/power/common.c - Common device power management code.
|
||||||
|
*
|
||||||
|
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
|
||||||
|
*
|
||||||
|
* This file is released under the GPLv2.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/pm_clock.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
|
||||||
|
* @dev: Device to handle.
|
||||||
|
*
|
||||||
|
* If power.subsys_data is NULL, point it to a new object, otherwise increment
|
||||||
|
* its reference counter. Return 1 if a new object has been created, otherwise
|
||||||
|
* return 0 or error code.
|
||||||
|
*/
|
||||||
|
int dev_pm_get_subsys_data(struct device *dev)
|
||||||
|
{
|
||||||
|
struct pm_subsys_data *psd;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
psd = kzalloc(sizeof(*psd), GFP_KERNEL);
|
||||||
|
if (!psd)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
spin_lock_irq(&dev->power.lock);
|
||||||
|
|
||||||
|
if (dev->power.subsys_data) {
|
||||||
|
dev->power.subsys_data->refcount++;
|
||||||
|
} else {
|
||||||
|
spin_lock_init(&psd->lock);
|
||||||
|
psd->refcount = 1;
|
||||||
|
dev->power.subsys_data = psd;
|
||||||
|
pm_clk_init(dev);
|
||||||
|
psd = NULL;
|
||||||
|
ret = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_irq(&dev->power.lock);
|
||||||
|
|
||||||
|
/* kfree() verifies that its argument is nonzero. */
|
||||||
|
kfree(psd);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dev_pm_put_subsys_data - Drop reference to power.subsys_data.
|
||||||
|
* @dev: Device to handle.
|
||||||
|
*
|
||||||
|
* If the reference counter of power.subsys_data is zero after dropping the
|
||||||
|
* reference, power.subsys_data is removed. Return 1 if that happens or 0
|
||||||
|
* otherwise.
|
||||||
|
*/
|
||||||
|
int dev_pm_put_subsys_data(struct device *dev)
|
||||||
|
{
|
||||||
|
struct pm_subsys_data *psd;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
spin_lock_irq(&dev->power.lock);
|
||||||
|
|
||||||
|
psd = dev_to_psd(dev);
|
||||||
|
if (!psd) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (--psd->refcount == 0) {
|
||||||
|
dev->power.subsys_data = NULL;
|
||||||
|
kfree(psd);
|
||||||
|
ret = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
spin_unlock_irq(&dev->power.lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
|
|
@ -29,10 +29,20 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
|
||||||
return pd_to_genpd(dev->pm_domain);
|
return pd_to_genpd(dev->pm_domain);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
|
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
|
||||||
{
|
{
|
||||||
if (!WARN_ON(genpd->sd_count == 0))
|
bool ret = false;
|
||||||
genpd->sd_count--;
|
|
||||||
|
if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
|
||||||
|
ret = !!atomic_dec_and_test(&genpd->sd_count);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
|
||||||
|
{
|
||||||
|
atomic_inc(&genpd->sd_count);
|
||||||
|
smp_mb__after_atomic_inc();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void genpd_acquire_lock(struct generic_pm_domain *genpd)
|
static void genpd_acquire_lock(struct generic_pm_domain *genpd)
|
||||||
|
@ -71,60 +81,97 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pm_genpd_poweron - Restore power to a given PM domain and its parents.
|
* __pm_genpd_poweron - Restore power to a given PM domain and its masters.
|
||||||
* @genpd: PM domain to power up.
|
* @genpd: PM domain to power up.
|
||||||
*
|
*
|
||||||
* Restore power to @genpd and all of its parents so that it is possible to
|
* Restore power to @genpd and all of its masters so that it is possible to
|
||||||
* resume a device belonging to it.
|
* resume a device belonging to it.
|
||||||
*/
|
*/
|
||||||
int pm_genpd_poweron(struct generic_pm_domain *genpd)
|
int __pm_genpd_poweron(struct generic_pm_domain *genpd)
|
||||||
|
__releases(&genpd->lock) __acquires(&genpd->lock)
|
||||||
{
|
{
|
||||||
struct generic_pm_domain *parent = genpd->parent;
|
struct gpd_link *link;
|
||||||
|
DEFINE_WAIT(wait);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
start:
|
/* If the domain's master is being waited for, we have to wait too. */
|
||||||
if (parent) {
|
for (;;) {
|
||||||
genpd_acquire_lock(parent);
|
prepare_to_wait(&genpd->status_wait_queue, &wait,
|
||||||
mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
|
TASK_UNINTERRUPTIBLE);
|
||||||
} else {
|
if (genpd->status != GPD_STATE_WAIT_MASTER)
|
||||||
|
break;
|
||||||
|
mutex_unlock(&genpd->lock);
|
||||||
|
|
||||||
|
schedule();
|
||||||
|
|
||||||
mutex_lock(&genpd->lock);
|
mutex_lock(&genpd->lock);
|
||||||
}
|
}
|
||||||
|
finish_wait(&genpd->status_wait_queue, &wait);
|
||||||
|
|
||||||
if (genpd->status == GPD_STATE_ACTIVE
|
if (genpd->status == GPD_STATE_ACTIVE
|
||||||
|| (genpd->prepared_count > 0 && genpd->suspend_power_off))
|
|| (genpd->prepared_count > 0 && genpd->suspend_power_off))
|
||||||
goto out;
|
return 0;
|
||||||
|
|
||||||
if (genpd->status != GPD_STATE_POWER_OFF) {
|
if (genpd->status != GPD_STATE_POWER_OFF) {
|
||||||
genpd_set_active(genpd);
|
genpd_set_active(genpd);
|
||||||
goto out;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (parent && parent->status != GPD_STATE_ACTIVE) {
|
/*
|
||||||
|
* The list is guaranteed not to change while the loop below is being
|
||||||
|
* executed, unless one of the masters' .power_on() callbacks fiddles
|
||||||
|
* with it.
|
||||||
|
*/
|
||||||
|
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||||
|
genpd_sd_counter_inc(link->master);
|
||||||
|
genpd->status = GPD_STATE_WAIT_MASTER;
|
||||||
|
|
||||||
mutex_unlock(&genpd->lock);
|
mutex_unlock(&genpd->lock);
|
||||||
genpd_release_lock(parent);
|
|
||||||
|
|
||||||
ret = pm_genpd_poweron(parent);
|
ret = pm_genpd_poweron(link->master);
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
goto start;
|
mutex_lock(&genpd->lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The "wait for parent" status is guaranteed not to change
|
||||||
|
* while the master is powering on.
|
||||||
|
*/
|
||||||
|
genpd->status = GPD_STATE_POWER_OFF;
|
||||||
|
wake_up_all(&genpd->status_wait_queue);
|
||||||
|
if (ret) {
|
||||||
|
genpd_sd_counter_dec(link->master);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (genpd->power_on) {
|
if (genpd->power_on) {
|
||||||
ret = genpd->power_on(genpd);
|
ret = genpd->power_on(genpd);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
genpd_set_active(genpd);
|
genpd_set_active(genpd);
|
||||||
if (parent)
|
|
||||||
parent->sd_count++;
|
|
||||||
|
|
||||||
out:
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
|
||||||
|
genpd_sd_counter_dec(link->master);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* pm_genpd_poweron - Restore power to a given PM domain and its masters.
|
||||||
|
* @genpd: PM domain to power up.
|
||||||
|
*/
|
||||||
|
int pm_genpd_poweron(struct generic_pm_domain *genpd)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
mutex_lock(&genpd->lock);
|
||||||
|
ret = __pm_genpd_poweron(genpd);
|
||||||
mutex_unlock(&genpd->lock);
|
mutex_unlock(&genpd->lock);
|
||||||
if (parent)
|
|
||||||
genpd_release_lock(parent);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,18 +181,19 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __pm_genpd_save_device - Save the pre-suspend state of a device.
|
* __pm_genpd_save_device - Save the pre-suspend state of a device.
|
||||||
* @dle: Device list entry of the device to save the state of.
|
* @pdd: Domain data of the device to save the state of.
|
||||||
* @genpd: PM domain the device belongs to.
|
* @genpd: PM domain the device belongs to.
|
||||||
*/
|
*/
|
||||||
static int __pm_genpd_save_device(struct dev_list_entry *dle,
|
static int __pm_genpd_save_device(struct pm_domain_data *pdd,
|
||||||
struct generic_pm_domain *genpd)
|
struct generic_pm_domain *genpd)
|
||||||
__releases(&genpd->lock) __acquires(&genpd->lock)
|
__releases(&genpd->lock) __acquires(&genpd->lock)
|
||||||
{
|
{
|
||||||
struct device *dev = dle->dev;
|
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
|
||||||
|
struct device *dev = pdd->dev;
|
||||||
struct device_driver *drv = dev->driver;
|
struct device_driver *drv = dev->driver;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (dle->need_restore)
|
if (gpd_data->need_restore)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_unlock(&genpd->lock);
|
mutex_unlock(&genpd->lock);
|
||||||
|
@ -163,24 +211,25 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
|
||||||
mutex_lock(&genpd->lock);
|
mutex_lock(&genpd->lock);
|
||||||
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
dle->need_restore = true;
|
gpd_data->need_restore = true;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __pm_genpd_restore_device - Restore the pre-suspend state of a device.
|
* __pm_genpd_restore_device - Restore the pre-suspend state of a device.
|
||||||
* @dle: Device list entry of the device to restore the state of.
|
* @pdd: Domain data of the device to restore the state of.
|
||||||
* @genpd: PM domain the device belongs to.
|
* @genpd: PM domain the device belongs to.
|
||||||
*/
|
*/
|
||||||
static void __pm_genpd_restore_device(struct dev_list_entry *dle,
|
static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
|
||||||
struct generic_pm_domain *genpd)
|
struct generic_pm_domain *genpd)
|
||||||
__releases(&genpd->lock) __acquires(&genpd->lock)
|
__releases(&genpd->lock) __acquires(&genpd->lock)
|
||||||
{
|
{
|
||||||
struct device *dev = dle->dev;
|
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
|
||||||
|
struct device *dev = pdd->dev;
|
||||||
struct device_driver *drv = dev->driver;
|
struct device_driver *drv = dev->driver;
|
||||||
|
|
||||||
if (!dle->need_restore)
|
if (!gpd_data->need_restore)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_unlock(&genpd->lock);
|
mutex_unlock(&genpd->lock);
|
||||||
|
@ -197,7 +246,7 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
|
||||||
|
|
||||||
mutex_lock(&genpd->lock);
|
mutex_lock(&genpd->lock);
|
||||||
|
|
||||||
dle->need_restore = false;
|
gpd_data->need_restore = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -211,7 +260,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
|
||||||
*/
|
*/
|
||||||
static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
|
static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
|
||||||
{
|
{
|
||||||
return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
|
return genpd->status == GPD_STATE_WAIT_MASTER
|
||||||
|
|| genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -238,8 +288,8 @@ void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
|
||||||
static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
|
static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
|
||||||
__releases(&genpd->lock) __acquires(&genpd->lock)
|
__releases(&genpd->lock) __acquires(&genpd->lock)
|
||||||
{
|
{
|
||||||
struct generic_pm_domain *parent;
|
struct pm_domain_data *pdd;
|
||||||
struct dev_list_entry *dle;
|
struct gpd_link *link;
|
||||||
unsigned int not_suspended;
|
unsigned int not_suspended;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
@ -247,19 +297,22 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
|
||||||
/*
|
/*
|
||||||
* Do not try to power off the domain in the following situations:
|
* Do not try to power off the domain in the following situations:
|
||||||
* (1) The domain is already in the "power off" state.
|
* (1) The domain is already in the "power off" state.
|
||||||
* (2) System suspend is in progress.
|
* (2) The domain is waiting for its master to power up.
|
||||||
* (3) One of the domain's devices is being resumed right now.
|
* (3) One of the domain's devices is being resumed right now.
|
||||||
|
* (4) System suspend is in progress.
|
||||||
*/
|
*/
|
||||||
if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
|
if (genpd->status == GPD_STATE_POWER_OFF
|
||||||
|| genpd->resume_count > 0)
|
|| genpd->status == GPD_STATE_WAIT_MASTER
|
||||||
|
|| genpd->resume_count > 0 || genpd->prepared_count > 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (genpd->sd_count > 0)
|
if (atomic_read(&genpd->sd_count) > 0)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
not_suspended = 0;
|
not_suspended = 0;
|
||||||
list_for_each_entry(dle, &genpd->dev_list, node)
|
list_for_each_entry(pdd, &genpd->dev_list, list_node)
|
||||||
if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
|
if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
|
||||||
|
|| pdd->dev->power.irq_safe))
|
||||||
not_suspended++;
|
not_suspended++;
|
||||||
|
|
||||||
if (not_suspended > genpd->in_progress)
|
if (not_suspended > genpd->in_progress)
|
||||||
|
@ -282,54 +335,50 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
|
||||||
genpd->status = GPD_STATE_BUSY;
|
genpd->status = GPD_STATE_BUSY;
|
||||||
genpd->poweroff_task = current;
|
genpd->poweroff_task = current;
|
||||||
|
|
||||||
list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
|
list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
|
||||||
ret = __pm_genpd_save_device(dle, genpd);
|
ret = atomic_read(&genpd->sd_count) == 0 ?
|
||||||
|
__pm_genpd_save_device(pdd, genpd) : -EBUSY;
|
||||||
|
|
||||||
|
if (genpd_abort_poweroff(genpd))
|
||||||
|
goto out;
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
genpd_set_active(genpd);
|
genpd_set_active(genpd);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (genpd_abort_poweroff(genpd))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (genpd->status == GPD_STATE_REPEAT) {
|
if (genpd->status == GPD_STATE_REPEAT) {
|
||||||
genpd->poweroff_task = NULL;
|
genpd->poweroff_task = NULL;
|
||||||
goto start;
|
goto start;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
parent = genpd->parent;
|
if (genpd->power_off) {
|
||||||
if (parent) {
|
if (atomic_read(&genpd->sd_count) > 0) {
|
||||||
mutex_unlock(&genpd->lock);
|
ret = -EBUSY;
|
||||||
|
|
||||||
genpd_acquire_lock(parent);
|
|
||||||
mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
|
|
||||||
|
|
||||||
if (genpd_abort_poweroff(genpd)) {
|
|
||||||
genpd_release_lock(parent);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (genpd->power_off) {
|
/*
|
||||||
|
* If sd_count > 0 at this point, one of the subdomains hasn't
|
||||||
|
* managed to call pm_genpd_poweron() for the master yet after
|
||||||
|
* incrementing it. In that case pm_genpd_poweron() will wait
|
||||||
|
* for us to drop the lock, so we can call .power_off() and let
|
||||||
|
* the pm_genpd_poweron() restore power for us (this shouldn't
|
||||||
|
* happen very often).
|
||||||
|
*/
|
||||||
ret = genpd->power_off(genpd);
|
ret = genpd->power_off(genpd);
|
||||||
if (ret == -EBUSY) {
|
if (ret == -EBUSY) {
|
||||||
genpd_set_active(genpd);
|
genpd_set_active(genpd);
|
||||||
if (parent)
|
|
||||||
genpd_release_lock(parent);
|
|
||||||
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
genpd->status = GPD_STATE_POWER_OFF;
|
genpd->status = GPD_STATE_POWER_OFF;
|
||||||
|
|
||||||
if (parent) {
|
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||||
genpd_sd_counter_dec(parent);
|
genpd_sd_counter_dec(link->master);
|
||||||
if (parent->sd_count == 0)
|
genpd_queue_power_off_work(link->master);
|
||||||
genpd_queue_power_off_work(parent);
|
|
||||||
|
|
||||||
genpd_release_lock(parent);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -371,12 +420,21 @@ static int pm_genpd_runtime_suspend(struct device *dev)
|
||||||
if (IS_ERR(genpd))
|
if (IS_ERR(genpd))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
might_sleep_if(!genpd->dev_irq_safe);
|
||||||
|
|
||||||
if (genpd->stop_device) {
|
if (genpd->stop_device) {
|
||||||
int ret = genpd->stop_device(dev);
|
int ret = genpd->stop_device(dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If power.irq_safe is set, this routine will be run with interrupts
|
||||||
|
* off, so it can't use mutexes.
|
||||||
|
*/
|
||||||
|
if (dev->power.irq_safe)
|
||||||
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&genpd->lock);
|
mutex_lock(&genpd->lock);
|
||||||
genpd->in_progress++;
|
genpd->in_progress++;
|
||||||
pm_genpd_poweroff(genpd);
|
pm_genpd_poweroff(genpd);
|
||||||
|
@ -386,24 +444,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
|
|
||||||
* @dev: Device to resume.
|
|
||||||
* @genpd: PM domain the device belongs to.
|
|
||||||
*/
|
|
||||||
static void __pm_genpd_runtime_resume(struct device *dev,
|
|
||||||
struct generic_pm_domain *genpd)
|
|
||||||
{
|
|
||||||
struct dev_list_entry *dle;
|
|
||||||
|
|
||||||
list_for_each_entry(dle, &genpd->dev_list, node) {
|
|
||||||
if (dle->dev == dev) {
|
|
||||||
__pm_genpd_restore_device(dle, genpd);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
|
* pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
|
||||||
* @dev: Device to resume.
|
* @dev: Device to resume.
|
||||||
|
@ -424,11 +464,18 @@ static int pm_genpd_runtime_resume(struct device *dev)
|
||||||
if (IS_ERR(genpd))
|
if (IS_ERR(genpd))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = pm_genpd_poweron(genpd);
|
might_sleep_if(!genpd->dev_irq_safe);
|
||||||
if (ret)
|
|
||||||
return ret;
|
/* If power.irq_safe, the PM domain is never powered off. */
|
||||||
|
if (dev->power.irq_safe)
|
||||||
|
goto out;
|
||||||
|
|
||||||
mutex_lock(&genpd->lock);
|
mutex_lock(&genpd->lock);
|
||||||
|
ret = __pm_genpd_poweron(genpd);
|
||||||
|
if (ret) {
|
||||||
|
mutex_unlock(&genpd->lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
genpd->status = GPD_STATE_BUSY;
|
genpd->status = GPD_STATE_BUSY;
|
||||||
genpd->resume_count++;
|
genpd->resume_count++;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
@ -448,12 +495,13 @@ static int pm_genpd_runtime_resume(struct device *dev)
|
||||||
mutex_lock(&genpd->lock);
|
mutex_lock(&genpd->lock);
|
||||||
}
|
}
|
||||||
finish_wait(&genpd->status_wait_queue, &wait);
|
finish_wait(&genpd->status_wait_queue, &wait);
|
||||||
__pm_genpd_runtime_resume(dev, genpd);
|
__pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
|
||||||
genpd->resume_count--;
|
genpd->resume_count--;
|
||||||
genpd_set_active(genpd);
|
genpd_set_active(genpd);
|
||||||
wake_up_all(&genpd->status_wait_queue);
|
wake_up_all(&genpd->status_wait_queue);
|
||||||
mutex_unlock(&genpd->lock);
|
mutex_unlock(&genpd->lock);
|
||||||
|
|
||||||
|
out:
|
||||||
if (genpd->start_device)
|
if (genpd->start_device)
|
||||||
genpd->start_device(dev);
|
genpd->start_device(dev);
|
||||||
|
|
||||||
|
@ -478,8 +526,6 @@ void pm_genpd_poweroff_unused(void)
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static inline void genpd_power_off_work_fn(struct work_struct *work) {}
|
static inline void genpd_power_off_work_fn(struct work_struct *work) {}
|
||||||
static inline void __pm_genpd_runtime_resume(struct device *dev,
|
|
||||||
struct generic_pm_domain *genpd) {}
|
|
||||||
|
|
||||||
#define pm_genpd_runtime_suspend NULL
|
#define pm_genpd_runtime_suspend NULL
|
||||||
#define pm_genpd_runtime_resume NULL
|
#define pm_genpd_runtime_resume NULL
|
||||||
|
@ -489,11 +535,11 @@ static inline void __pm_genpd_runtime_resume(struct device *dev,
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
|
* pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
|
||||||
* @genpd: PM domain to power off, if possible.
|
* @genpd: PM domain to power off, if possible.
|
||||||
*
|
*
|
||||||
* Check if the given PM domain can be powered off (during system suspend or
|
* Check if the given PM domain can be powered off (during system suspend or
|
||||||
* hibernation) and do that if so. Also, in that case propagate to its parent.
|
* hibernation) and do that if so. Also, in that case propagate to its masters.
|
||||||
*
|
*
|
||||||
* This function is only called in "noirq" stages of system power transitions,
|
* This function is only called in "noirq" stages of system power transitions,
|
||||||
* so it need not acquire locks (all of the "noirq" callbacks are executed
|
* so it need not acquire locks (all of the "noirq" callbacks are executed
|
||||||
|
@ -501,21 +547,23 @@ static inline void __pm_genpd_runtime_resume(struct device *dev,
|
||||||
*/
|
*/
|
||||||
static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
|
static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
|
||||||
{
|
{
|
||||||
struct generic_pm_domain *parent = genpd->parent;
|
struct gpd_link *link;
|
||||||
|
|
||||||
if (genpd->status == GPD_STATE_POWER_OFF)
|
if (genpd->status == GPD_STATE_POWER_OFF)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
|
if (genpd->suspended_count != genpd->device_count
|
||||||
|
|| atomic_read(&genpd->sd_count) > 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (genpd->power_off)
|
if (genpd->power_off)
|
||||||
genpd->power_off(genpd);
|
genpd->power_off(genpd);
|
||||||
|
|
||||||
genpd->status = GPD_STATE_POWER_OFF;
|
genpd->status = GPD_STATE_POWER_OFF;
|
||||||
if (parent) {
|
|
||||||
genpd_sd_counter_dec(parent);
|
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||||
pm_genpd_sync_poweroff(parent);
|
genpd_sd_counter_dec(link->master);
|
||||||
|
pm_genpd_sync_poweroff(link->master);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1034,7 +1082,8 @@ static void pm_genpd_complete(struct device *dev)
|
||||||
*/
|
*/
|
||||||
int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
|
int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
|
||||||
{
|
{
|
||||||
struct dev_list_entry *dle;
|
struct generic_pm_domain_data *gpd_data;
|
||||||
|
struct pm_domain_data *pdd;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
dev_dbg(dev, "%s()\n", __func__);
|
dev_dbg(dev, "%s()\n", __func__);
|
||||||
|
@ -1054,26 +1103,26 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(dle, &genpd->dev_list, node)
|
list_for_each_entry(pdd, &genpd->dev_list, list_node)
|
||||||
if (dle->dev == dev) {
|
if (pdd->dev == dev) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
dle = kzalloc(sizeof(*dle), GFP_KERNEL);
|
gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
|
||||||
if (!dle) {
|
if (!gpd_data) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
dle->dev = dev;
|
|
||||||
dle->need_restore = false;
|
|
||||||
list_add_tail(&dle->node, &genpd->dev_list);
|
|
||||||
genpd->device_count++;
|
genpd->device_count++;
|
||||||
|
|
||||||
spin_lock_irq(&dev->power.lock);
|
|
||||||
dev->pm_domain = &genpd->domain;
|
dev->pm_domain = &genpd->domain;
|
||||||
spin_unlock_irq(&dev->power.lock);
|
dev_pm_get_subsys_data(dev);
|
||||||
|
dev->power.subsys_data->domain_data = &gpd_data->base;
|
||||||
|
gpd_data->base.dev = dev;
|
||||||
|
gpd_data->need_restore = false;
|
||||||
|
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
genpd_release_lock(genpd);
|
genpd_release_lock(genpd);
|
||||||
|
@ -1089,7 +1138,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
|
||||||
int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
{
|
{
|
||||||
struct dev_list_entry *dle;
|
struct pm_domain_data *pdd;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
|
|
||||||
dev_dbg(dev, "%s()\n", __func__);
|
dev_dbg(dev, "%s()\n", __func__);
|
||||||
|
@ -1104,17 +1153,17 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(dle, &genpd->dev_list, node) {
|
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
||||||
if (dle->dev != dev)
|
if (pdd->dev != dev)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
spin_lock_irq(&dev->power.lock);
|
list_del_init(&pdd->list_node);
|
||||||
|
pdd->dev = NULL;
|
||||||
|
dev_pm_put_subsys_data(dev);
|
||||||
dev->pm_domain = NULL;
|
dev->pm_domain = NULL;
|
||||||
spin_unlock_irq(&dev->power.lock);
|
kfree(to_gpd_data(pdd));
|
||||||
|
|
||||||
genpd->device_count--;
|
genpd->device_count--;
|
||||||
list_del(&dle->node);
|
|
||||||
kfree(dle);
|
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
|
@ -1129,48 +1178,55 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
||||||
/**
|
/**
|
||||||
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
|
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
|
||||||
* @genpd: Master PM domain to add the subdomain to.
|
* @genpd: Master PM domain to add the subdomain to.
|
||||||
* @new_subdomain: Subdomain to be added.
|
* @subdomain: Subdomain to be added.
|
||||||
*/
|
*/
|
||||||
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
||||||
struct generic_pm_domain *new_subdomain)
|
struct generic_pm_domain *subdomain)
|
||||||
{
|
{
|
||||||
struct generic_pm_domain *subdomain;
|
struct gpd_link *link;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
|
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
start:
|
start:
|
||||||
genpd_acquire_lock(genpd);
|
genpd_acquire_lock(genpd);
|
||||||
mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
|
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
|
||||||
|
|
||||||
if (new_subdomain->status != GPD_STATE_POWER_OFF
|
if (subdomain->status != GPD_STATE_POWER_OFF
|
||||||
&& new_subdomain->status != GPD_STATE_ACTIVE) {
|
&& subdomain->status != GPD_STATE_ACTIVE) {
|
||||||
mutex_unlock(&new_subdomain->lock);
|
mutex_unlock(&subdomain->lock);
|
||||||
genpd_release_lock(genpd);
|
genpd_release_lock(genpd);
|
||||||
goto start;
|
goto start;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (genpd->status == GPD_STATE_POWER_OFF
|
if (genpd->status == GPD_STATE_POWER_OFF
|
||||||
&& new_subdomain->status != GPD_STATE_POWER_OFF) {
|
&& subdomain->status != GPD_STATE_POWER_OFF) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
|
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||||
if (subdomain == new_subdomain) {
|
if (link->slave == subdomain && link->master == genpd) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
|
link = kzalloc(sizeof(*link), GFP_KERNEL);
|
||||||
new_subdomain->parent = genpd;
|
if (!link) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
link->master = genpd;
|
||||||
|
list_add_tail(&link->master_node, &genpd->master_links);
|
||||||
|
link->slave = subdomain;
|
||||||
|
list_add_tail(&link->slave_node, &subdomain->slave_links);
|
||||||
if (subdomain->status != GPD_STATE_POWER_OFF)
|
if (subdomain->status != GPD_STATE_POWER_OFF)
|
||||||
genpd->sd_count++;
|
genpd_sd_counter_inc(genpd);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&new_subdomain->lock);
|
mutex_unlock(&subdomain->lock);
|
||||||
genpd_release_lock(genpd);
|
genpd_release_lock(genpd);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1179,22 +1235,22 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
||||||
/**
|
/**
|
||||||
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
|
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
|
||||||
* @genpd: Master PM domain to remove the subdomain from.
|
* @genpd: Master PM domain to remove the subdomain from.
|
||||||
* @target: Subdomain to be removed.
|
* @subdomain: Subdomain to be removed.
|
||||||
*/
|
*/
|
||||||
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
||||||
struct generic_pm_domain *target)
|
struct generic_pm_domain *subdomain)
|
||||||
{
|
{
|
||||||
struct generic_pm_domain *subdomain;
|
struct gpd_link *link;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
|
|
||||||
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
|
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
start:
|
start:
|
||||||
genpd_acquire_lock(genpd);
|
genpd_acquire_lock(genpd);
|
||||||
|
|
||||||
list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
|
list_for_each_entry(link, &genpd->master_links, master_node) {
|
||||||
if (subdomain != target)
|
if (link->slave != subdomain)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
|
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
|
||||||
|
@ -1206,8 +1262,9 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
||||||
goto start;
|
goto start;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_del(&subdomain->sd_node);
|
list_del(&link->master_node);
|
||||||
subdomain->parent = NULL;
|
list_del(&link->slave_node);
|
||||||
|
kfree(link);
|
||||||
if (subdomain->status != GPD_STATE_POWER_OFF)
|
if (subdomain->status != GPD_STATE_POWER_OFF)
|
||||||
genpd_sd_counter_dec(genpd);
|
genpd_sd_counter_dec(genpd);
|
||||||
|
|
||||||
|
@ -1234,15 +1291,14 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
|
||||||
if (IS_ERR_OR_NULL(genpd))
|
if (IS_ERR_OR_NULL(genpd))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&genpd->sd_node);
|
INIT_LIST_HEAD(&genpd->master_links);
|
||||||
genpd->parent = NULL;
|
INIT_LIST_HEAD(&genpd->slave_links);
|
||||||
INIT_LIST_HEAD(&genpd->dev_list);
|
INIT_LIST_HEAD(&genpd->dev_list);
|
||||||
INIT_LIST_HEAD(&genpd->sd_list);
|
|
||||||
mutex_init(&genpd->lock);
|
mutex_init(&genpd->lock);
|
||||||
genpd->gov = gov;
|
genpd->gov = gov;
|
||||||
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
|
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
|
||||||
genpd->in_progress = 0;
|
genpd->in_progress = 0;
|
||||||
genpd->sd_count = 0;
|
atomic_set(&genpd->sd_count, 0);
|
||||||
genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
|
genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
|
||||||
init_waitqueue_head(&genpd->status_wait_queue);
|
init_waitqueue_head(&genpd->status_wait_queue);
|
||||||
genpd->poweroff_task = NULL;
|
genpd->poweroff_task = NULL;
|
||||||
|
|
|
@ -636,6 +636,11 @@ static inline void set_dev_node(struct device *dev, int node)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
|
||||||
|
{
|
||||||
|
return dev ? dev->power.subsys_data : NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
|
static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
|
||||||
{
|
{
|
||||||
return dev->kobj.uevent_suppress;
|
return dev->kobj.uevent_suppress;
|
||||||
|
|
|
@ -423,6 +423,22 @@ enum rpm_request {
|
||||||
|
|
||||||
struct wakeup_source;
|
struct wakeup_source;
|
||||||
|
|
||||||
|
struct pm_domain_data {
|
||||||
|
struct list_head list_node;
|
||||||
|
struct device *dev;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct pm_subsys_data {
|
||||||
|
spinlock_t lock;
|
||||||
|
unsigned int refcount;
|
||||||
|
#ifdef CONFIG_PM_CLK
|
||||||
|
struct list_head clock_list;
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_PM_GENERIC_DOMAINS
|
||||||
|
struct pm_domain_data *domain_data;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
struct dev_pm_info {
|
struct dev_pm_info {
|
||||||
pm_message_t power_state;
|
pm_message_t power_state;
|
||||||
unsigned int can_wakeup:1;
|
unsigned int can_wakeup:1;
|
||||||
|
@ -464,10 +480,12 @@ struct dev_pm_info {
|
||||||
unsigned long suspended_jiffies;
|
unsigned long suspended_jiffies;
|
||||||
unsigned long accounting_timestamp;
|
unsigned long accounting_timestamp;
|
||||||
#endif
|
#endif
|
||||||
void *subsys_data; /* Owned by the subsystem. */
|
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
|
||||||
};
|
};
|
||||||
|
|
||||||
extern void update_pm_runtime_accounting(struct device *dev);
|
extern void update_pm_runtime_accounting(struct device *dev);
|
||||||
|
extern int dev_pm_get_subsys_data(struct device *dev);
|
||||||
|
extern int dev_pm_put_subsys_data(struct device *dev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Power domains provide callbacks that are executed during system suspend,
|
* Power domains provide callbacks that are executed during system suspend,
|
||||||
|
|
|
@ -0,0 +1,71 @@
|
||||||
|
/*
|
||||||
|
* pm_clock.h - Definitions and headers related to device clocks.
|
||||||
|
*
|
||||||
|
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
|
||||||
|
*
|
||||||
|
* This file is released under the GPLv2.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _LINUX_PM_CLOCK_H
|
||||||
|
#define _LINUX_PM_CLOCK_H
|
||||||
|
|
||||||
|
#include <linux/device.h>
|
||||||
|
#include <linux/notifier.h>
|
||||||
|
|
||||||
|
struct pm_clk_notifier_block {
|
||||||
|
struct notifier_block nb;
|
||||||
|
struct dev_pm_domain *pm_domain;
|
||||||
|
char *con_ids[];
|
||||||
|
};
|
||||||
|
|
||||||
|
#ifdef CONFIG_PM_CLK
|
||||||
|
static inline bool pm_clk_no_clocks(struct device *dev)
|
||||||
|
{
|
||||||
|
return dev && dev->power.subsys_data
|
||||||
|
&& list_empty(&dev->power.subsys_data->clock_list);
|
||||||
|
}
|
||||||
|
|
||||||
|
extern void pm_clk_init(struct device *dev);
|
||||||
|
extern int pm_clk_create(struct device *dev);
|
||||||
|
extern void pm_clk_destroy(struct device *dev);
|
||||||
|
extern int pm_clk_add(struct device *dev, const char *con_id);
|
||||||
|
extern void pm_clk_remove(struct device *dev, const char *con_id);
|
||||||
|
extern int pm_clk_suspend(struct device *dev);
|
||||||
|
extern int pm_clk_resume(struct device *dev);
|
||||||
|
#else
|
||||||
|
static inline bool pm_clk_no_clocks(struct device *dev)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
static inline void pm_clk_init(struct device *dev)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
static inline int pm_clk_create(struct device *dev)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
static inline void pm_clk_destroy(struct device *dev)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
static inline int pm_clk_add(struct device *dev, const char *con_id)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
static inline void pm_clk_remove(struct device *dev, const char *con_id)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#define pm_clk_suspend NULL
|
||||||
|
#define pm_clk_resume NULL
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_CLK
|
||||||
|
extern void pm_clk_add_notifier(struct bus_type *bus,
|
||||||
|
struct pm_clk_notifier_block *clknb);
|
||||||
|
#else
|
||||||
|
static inline void pm_clk_add_notifier(struct bus_type *bus,
|
||||||
|
struct pm_clk_notifier_block *clknb)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
|
@ -13,6 +13,7 @@
|
||||||
|
|
||||||
enum gpd_status {
|
enum gpd_status {
|
||||||
GPD_STATE_ACTIVE = 0, /* PM domain is active */
|
GPD_STATE_ACTIVE = 0, /* PM domain is active */
|
||||||
|
GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */
|
||||||
GPD_STATE_BUSY, /* Something is happening to the PM domain */
|
GPD_STATE_BUSY, /* Something is happening to the PM domain */
|
||||||
GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
|
GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
|
||||||
GPD_STATE_POWER_OFF, /* PM domain is off */
|
GPD_STATE_POWER_OFF, /* PM domain is off */
|
||||||
|
@ -25,15 +26,14 @@ struct dev_power_governor {
|
||||||
struct generic_pm_domain {
|
struct generic_pm_domain {
|
||||||
struct dev_pm_domain domain; /* PM domain operations */
|
struct dev_pm_domain domain; /* PM domain operations */
|
||||||
struct list_head gpd_list_node; /* Node in the global PM domains list */
|
struct list_head gpd_list_node; /* Node in the global PM domains list */
|
||||||
struct list_head sd_node; /* Node in the parent's subdomain list */
|
struct list_head master_links; /* Links with PM domain as a master */
|
||||||
struct generic_pm_domain *parent; /* Parent PM domain */
|
struct list_head slave_links; /* Links with PM domain as a slave */
|
||||||
struct list_head sd_list; /* List of dubdomains */
|
|
||||||
struct list_head dev_list; /* List of devices */
|
struct list_head dev_list; /* List of devices */
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
struct dev_power_governor *gov;
|
struct dev_power_governor *gov;
|
||||||
struct work_struct power_off_work;
|
struct work_struct power_off_work;
|
||||||
unsigned int in_progress; /* Number of devices being suspended now */
|
unsigned int in_progress; /* Number of devices being suspended now */
|
||||||
unsigned int sd_count; /* Number of subdomains with power "on" */
|
atomic_t sd_count; /* Number of subdomains with power "on" */
|
||||||
enum gpd_status status; /* Current state of the domain */
|
enum gpd_status status; /* Current state of the domain */
|
||||||
wait_queue_head_t status_wait_queue;
|
wait_queue_head_t status_wait_queue;
|
||||||
struct task_struct *poweroff_task; /* Powering off task */
|
struct task_struct *poweroff_task; /* Powering off task */
|
||||||
|
@ -42,6 +42,7 @@ struct generic_pm_domain {
|
||||||
unsigned int suspended_count; /* System suspend device counter */
|
unsigned int suspended_count; /* System suspend device counter */
|
||||||
unsigned int prepared_count; /* Suspend counter of prepared devices */
|
unsigned int prepared_count; /* Suspend counter of prepared devices */
|
||||||
bool suspend_power_off; /* Power status before system suspend */
|
bool suspend_power_off; /* Power status before system suspend */
|
||||||
|
bool dev_irq_safe; /* Device callbacks are IRQ-safe */
|
||||||
int (*power_off)(struct generic_pm_domain *domain);
|
int (*power_off)(struct generic_pm_domain *domain);
|
||||||
int (*power_on)(struct generic_pm_domain *domain);
|
int (*power_on)(struct generic_pm_domain *domain);
|
||||||
int (*start_device)(struct device *dev);
|
int (*start_device)(struct device *dev);
|
||||||
|
@ -54,12 +55,23 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
|
||||||
return container_of(pd, struct generic_pm_domain, domain);
|
return container_of(pd, struct generic_pm_domain, domain);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct dev_list_entry {
|
struct gpd_link {
|
||||||
struct list_head node;
|
struct generic_pm_domain *master;
|
||||||
struct device *dev;
|
struct list_head master_node;
|
||||||
|
struct generic_pm_domain *slave;
|
||||||
|
struct list_head slave_node;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct generic_pm_domain_data {
|
||||||
|
struct pm_domain_data base;
|
||||||
bool need_restore;
|
bool need_restore;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd)
|
||||||
|
{
|
||||||
|
return container_of(pdd, struct generic_pm_domain_data, base);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PM_GENERIC_DOMAINS
|
#ifdef CONFIG_PM_GENERIC_DOMAINS
|
||||||
extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
|
extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
|
||||||
struct device *dev);
|
struct device *dev);
|
||||||
|
|
|
@ -251,46 +251,4 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
|
||||||
__pm_runtime_use_autosuspend(dev, false);
|
__pm_runtime_use_autosuspend(dev, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct pm_clk_notifier_block {
|
|
||||||
struct notifier_block nb;
|
|
||||||
struct dev_pm_domain *pm_domain;
|
|
||||||
char *con_ids[];
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef CONFIG_PM_CLK
|
|
||||||
extern int pm_clk_init(struct device *dev);
|
|
||||||
extern void pm_clk_destroy(struct device *dev);
|
|
||||||
extern int pm_clk_add(struct device *dev, const char *con_id);
|
|
||||||
extern void pm_clk_remove(struct device *dev, const char *con_id);
|
|
||||||
extern int pm_clk_suspend(struct device *dev);
|
|
||||||
extern int pm_clk_resume(struct device *dev);
|
|
||||||
#else
|
|
||||||
static inline int pm_clk_init(struct device *dev)
|
|
||||||
{
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
static inline void pm_clk_destroy(struct device *dev)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
static inline int pm_clk_add(struct device *dev, const char *con_id)
|
|
||||||
{
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
static inline void pm_clk_remove(struct device *dev, const char *con_id)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
#define pm_clk_suspend NULL
|
|
||||||
#define pm_clk_resume NULL
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_CLK
|
|
||||||
extern void pm_clk_add_notifier(struct bus_type *bus,
|
|
||||||
struct pm_clk_notifier_block *clknb);
|
|
||||||
#else
|
|
||||||
static inline void pm_clk_add_notifier(struct bus_type *bus,
|
|
||||||
struct pm_clk_notifier_block *clknb)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue