Merge branches 'pm-avs' and 'pm-cpuidle'
* pm-avs: power: avs: qcom-cpr: Avoid clang -Wsometimes-uninitialized in cpr_scale power: avs: qcom-cpr: add unspecified HAS_IOMEM dependency PM / AVS: rockchip-io: fix the supply naming for the emmc supply on px30 power: avs: qcom-cpr: add a printout after the driver has been initialized * pm-cpuidle: cpuidle: Documentation: Clean up PM QoS description intel_idle: Introduce 'states_off' module parameter intel_idle: Introduce 'use_acpi' module parameter
This commit is contained in:
commit
332008256f
|
@ -632,16 +632,16 @@ class priority list and destroyed. If that happens, the priority list mechanism
|
|||
will be used, again, to determine the new effective value for the whole list
|
||||
and that value will become the new real constraint.
|
||||
|
||||
In turn, for each CPU there is only one resume latency PM QoS request
|
||||
associated with the :file:`power/pm_qos_resume_latency_us` file under
|
||||
In turn, for each CPU there is one resume latency PM QoS request associated with
|
||||
the :file:`power/pm_qos_resume_latency_us` file under
|
||||
:file:`/sys/devices/system/cpu/cpu<N>/` in ``sysfs`` and writing to it causes
|
||||
this single PM QoS request to be updated regardless of which user space
|
||||
process does that. In other words, this PM QoS request is shared by the entire
|
||||
user space, so access to the file associated with it needs to be arbitrated
|
||||
to avoid confusion. [Arguably, the only legitimate use of this mechanism in
|
||||
practice is to pin a process to the CPU in question and let it use the
|
||||
``sysfs`` interface to control the resume latency constraint for it.] It
|
||||
still only is a request, however. It is a member of a priority list used to
|
||||
``sysfs`` interface to control the resume latency constraint for it.] It is
|
||||
still only a request, however. It is an entry in a priority list used to
|
||||
determine the effective value to be set as the resume latency constraint for the
|
||||
CPU in question every time the list of requests is updated this way or another
|
||||
(there may be other requests coming from kernel code in that list).
|
||||
|
|
|
@ -60,6 +60,9 @@ of the system. The former are always used if the processor model at hand is
|
|||
recognized by ``intel_idle`` and the latter are used if that is required for
|
||||
the given processor model (which is the case for all server processor models
|
||||
recognized by ``intel_idle``) or if the processor model is not recognized.
|
||||
[There is a module parameter that can be used to make the driver use the ACPI
|
||||
tables with any processor model recognized by it; see
|
||||
`below <intel-idle-parameters_>`_.]
|
||||
|
||||
If the ACPI tables are going to be used for building the list of available idle
|
||||
states, ``intel_idle`` first looks for a ``_CST`` object under one of the ACPI
|
||||
|
@ -165,7 +168,7 @@ and ``idle=nomwait``. If any of them is present in the kernel command line, the
|
|||
``MWAIT`` instruction is not allowed to be used, so the initialization of
|
||||
``intel_idle`` will fail.
|
||||
|
||||
Apart from that there are two module parameters recognized by ``intel_idle``
|
||||
Apart from that there are four module parameters recognized by ``intel_idle``
|
||||
itself that can be set via the kernel command line (they cannot be updated via
|
||||
sysfs, so that is the only way to change their values).
|
||||
|
||||
|
@ -186,9 +189,28 @@ QoS) feature can be used to prevent ``CPUIdle`` from touching those idle states
|
|||
even if they have been enumerated (see :ref:`cpu-pm-qos` in :doc:`cpuidle`).
|
||||
Setting ``max_cstate`` to 0 causes the ``intel_idle`` initialization to fail.
|
||||
|
||||
The ``noacpi`` module parameter (which is recognized by ``intel_idle`` if the
|
||||
kernel has been configured with ACPI support), can be set to make the driver
|
||||
ignore the system's ACPI tables entirely (it is unset by default).
|
||||
The ``no_acpi`` and ``use_acpi`` module parameters (recognized by ``intel_idle``
|
||||
if the kernel has been configured with ACPI support) can be set to make the
|
||||
driver ignore the system's ACPI tables entirely or use them for all of the
|
||||
recognized processor models, respectively (they both are unset by default and
|
||||
``use_acpi`` has no effect if ``no_acpi`` is set).
|
||||
|
||||
The value of the ``states_off`` module parameter (0 by default) represents a
|
||||
list of idle states to be disabled by default in the form of a bitmask.
|
||||
|
||||
Namely, the positions of the bits that are set in the ``states_off`` value are
|
||||
the indices of idle states to be disabled by default (as reflected by the names
|
||||
of the corresponding idle state directories in ``sysfs``, :file:`state0`,
|
||||
:file:`state1` ... :file:`state<i>` ..., where ``<i>`` is the index of the given
|
||||
idle state; see :ref:`idle-states-representation` in :doc:`cpuidle`).
|
||||
|
||||
For example, if ``states_off`` is equal to 3, the driver will disable idle
|
||||
states 0 and 1 by default, and if it is equal to 8, idle state 3 will be
|
||||
disabled by default and so on (bit positions beyond the maximum idle state index
|
||||
are ignored).
|
||||
|
||||
The idle states disabled this way can be enabled (on a per-CPU basis) from user
|
||||
space via ``sysfs``.
|
||||
|
||||
|
||||
.. _intel-idle-core-and-package-idle-states:
|
||||
|
|
|
@ -63,6 +63,7 @@ static struct cpuidle_driver intel_idle_driver = {
|
|||
};
|
||||
/* intel_idle.max_cstate=0 disables driver */
|
||||
static int max_cstate = CPUIDLE_STATE_MAX - 1;
|
||||
static unsigned int disabled_states_mask;
|
||||
|
||||
static unsigned int mwait_substates;
|
||||
|
||||
|
@ -1131,6 +1132,10 @@ static bool no_acpi __read_mostly;
|
|||
module_param(no_acpi, bool, 0444);
|
||||
MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list");
|
||||
|
||||
static bool force_use_acpi __read_mostly; /* No effect if no_acpi is set. */
|
||||
module_param_named(use_acpi, force_use_acpi, bool, 0444);
|
||||
MODULE_PARM_DESC(use_acpi, "Use ACPI _CST for building the idle states list");
|
||||
|
||||
static struct acpi_processor_power acpi_state_table __initdata;
|
||||
|
||||
/**
|
||||
|
@ -1230,6 +1235,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
|
|||
if (cx->type > ACPI_STATE_C2)
|
||||
state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
|
||||
|
||||
if (disabled_states_mask & BIT(cstate))
|
||||
state->flags |= CPUIDLE_FLAG_OFF;
|
||||
|
||||
state->enter = intel_idle;
|
||||
state->enter_s2idle = intel_idle_s2idle;
|
||||
}
|
||||
|
@ -1258,6 +1266,8 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint)
|
|||
return true;
|
||||
}
|
||||
#else /* !CONFIG_ACPI_PROCESSOR_CSTATE */
|
||||
#define force_use_acpi (false)
|
||||
|
||||
static inline bool intel_idle_acpi_cst_extract(void) { return false; }
|
||||
static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
|
||||
static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
|
||||
|
@ -1460,8 +1470,10 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
|
|||
/* Structure copy. */
|
||||
drv->states[drv->state_count] = cpuidle_state_table[cstate];
|
||||
|
||||
if (icpu->use_acpi && intel_idle_off_by_default(mwait_hint) &&
|
||||
!(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE))
|
||||
if ((disabled_states_mask & BIT(drv->state_count)) ||
|
||||
((icpu->use_acpi || force_use_acpi) &&
|
||||
intel_idle_off_by_default(mwait_hint) &&
|
||||
!(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE)))
|
||||
drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF;
|
||||
|
||||
drv->state_count++;
|
||||
|
@ -1480,6 +1492,10 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
|
|||
static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv)
|
||||
{
|
||||
cpuidle_poll_state_init(drv);
|
||||
|
||||
if (disabled_states_mask & BIT(0))
|
||||
drv->states[0].flags |= CPUIDLE_FLAG_OFF;
|
||||
|
||||
drv->state_count = 1;
|
||||
|
||||
if (icpu)
|
||||
|
@ -1607,7 +1623,7 @@ static int __init intel_idle_init(void)
|
|||
icpu = (const struct idle_cpu *)id->driver_data;
|
||||
if (icpu) {
|
||||
cpuidle_state_table = icpu->state_table;
|
||||
if (icpu->use_acpi)
|
||||
if (icpu->use_acpi || force_use_acpi)
|
||||
intel_idle_acpi_cst_extract();
|
||||
} else if (!intel_idle_acpi_cst_extract()) {
|
||||
return -ENODEV;
|
||||
|
@ -1660,3 +1676,11 @@ device_initcall(intel_idle_init);
|
|||
* is the easiest way (currently) to continue doing that.
|
||||
*/
|
||||
module_param(max_cstate, int, 0444);
|
||||
/*
|
||||
* The positions of the bits that are set in this number are the indices of the
|
||||
* idle states to be disabled by default (as reflected by the names of the
|
||||
* corresponding idle state directories in sysfs, "state0", "state1" ...
|
||||
* "state<i>" ..., where <i> is the index of the given state).
|
||||
*/
|
||||
module_param_named(states_off, disabled_states_mask, uint, 0444);
|
||||
MODULE_PARM_DESC(states_off, "Mask of disabled idle states");
|
||||
|
|
|
@ -14,7 +14,7 @@ menuconfig POWER_AVS
|
|||
|
||||
config QCOM_CPR
|
||||
tristate "QCOM Core Power Reduction (CPR) support"
|
||||
depends on POWER_AVS
|
||||
depends on POWER_AVS && HAS_IOMEM
|
||||
select PM_OPP
|
||||
select REGMAP
|
||||
help
|
||||
|
|
|
@ -517,7 +517,7 @@ static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
|
|||
dev_dbg(drv->dev,
|
||||
"UP: -> new_uV: %d last_uV: %d perf state: %u\n",
|
||||
new_uV, last_uV, cpr_get_cur_perf_state(drv));
|
||||
} else if (dir == DOWN) {
|
||||
} else {
|
||||
if (desc->clamp_timer_interval &&
|
||||
error_steps < desc->down_threshold) {
|
||||
/*
|
||||
|
@ -567,7 +567,7 @@ static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
|
|||
/* Disable auto nack down */
|
||||
reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
|
||||
val = 0;
|
||||
} else if (dir == DOWN) {
|
||||
} else {
|
||||
/* Restore default threshold for UP */
|
||||
reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
|
||||
reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
|
||||
|
@ -1547,8 +1547,6 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
dev_dbg(drv->dev, "number of OPPs: %d\n", drv->num_corners);
|
||||
|
||||
drv->corners = devm_kcalloc(drv->dev, drv->num_corners,
|
||||
sizeof(*drv->corners),
|
||||
GFP_KERNEL);
|
||||
|
@ -1586,6 +1584,9 @@ static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
|
|||
acc_desc->enable_mask,
|
||||
acc_desc->enable_mask);
|
||||
|
||||
dev_info(drv->dev, "driver initialized with %u OPPs\n",
|
||||
drv->num_corners);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&drv->lock);
|
||||
|
||||
|
|
|
@ -152,18 +152,18 @@ static void px30_iodomain_init(struct rockchip_iodomain *iod)
|
|||
int ret;
|
||||
u32 val;
|
||||
|
||||
/* if no VCCIO0 supply we should leave things alone */
|
||||
/* if no VCCIO6 supply we should leave things alone */
|
||||
if (!iod->supplies[PX30_IO_VSEL_VCCIO6_SUPPLY_NUM].reg)
|
||||
return;
|
||||
|
||||
/*
|
||||
* set vccio0 iodomain to also use this framework
|
||||
* set vccio6 iodomain to also use this framework
|
||||
* instead of a special gpio.
|
||||
*/
|
||||
val = PX30_IO_VSEL_VCCIO6_SRC | (PX30_IO_VSEL_VCCIO6_SRC << 16);
|
||||
ret = regmap_write(iod->grf, PX30_IO_VSEL, val);
|
||||
if (ret < 0)
|
||||
dev_warn(iod->dev, "couldn't update vccio0 ctrl\n");
|
||||
dev_warn(iod->dev, "couldn't update vccio6 ctrl\n");
|
||||
}
|
||||
|
||||
static void rk3288_iodomain_init(struct rockchip_iodomain *iod)
|
||||
|
|
Loading…
Reference in New Issue