2019-05-30 07:57:49 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2010-03-09 03:07:30 +08:00
|
|
|
/*
|
|
|
|
* intel_idle.c - native hardware idle loop for modern Intel processors
|
|
|
|
*
|
2020-02-07 01:45:49 +08:00
|
|
|
* Copyright (c) 2013 - 2020, Intel Corporation.
|
2010-03-09 03:07:30 +08:00
|
|
|
* Len Brown <len.brown@intel.com>
|
2020-02-07 01:45:49 +08:00
|
|
|
* Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
2010-03-09 03:07:30 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2020-10-12 20:50:33 +08:00
|
|
|
* intel_idle is a cpuidle driver that loads on all Intel CPUs with MWAIT
|
2010-03-09 03:07:30 +08:00
|
|
|
* in lieu of the legacy ACPI processor_idle driver. The intent is to
|
|
|
|
* make Linux more efficient on these processors, as intel_idle knows
|
|
|
|
* more than ACPI, as well as make Linux more immune to ACPI BIOS bugs.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Design Assumptions
|
|
|
|
*
|
|
|
|
* All CPUs have same idle states as boot CPU
|
|
|
|
*
|
|
|
|
* Chipset BM_STS (bus master status) bit is a NOP
|
2020-10-12 20:50:33 +08:00
|
|
|
* for preventing entry into deep C-states
|
|
|
|
*
|
|
|
|
* CPU will flush caches as needed when entering a C-state via MWAIT
|
|
|
|
* (in contrast to entering ACPI C3, in which case the WBINVD
|
|
|
|
* instruction needs to be executed to flush the caches)
|
2010-03-09 03:07:30 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Known limitations
|
|
|
|
*
|
|
|
|
* ACPI has a .suspend hack to turn off deep c-statees during suspend
|
|
|
|
* to avoid complications with the lapic timer workaround.
|
|
|
|
* Have not seen issues with suspend, but may need same workaround here.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* un-comment DEBUG to enable pr_debug() statements */
|
2021-01-16 07:56:46 +08:00
|
|
|
/* #define DEBUG */
|
2010-03-09 03:07:30 +08:00
|
|
|
|
2017-06-10 03:29:20 +08:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2019-12-13 16:56:01 +08:00
|
|
|
#include <linux/acpi.h>
|
2010-03-09 03:07:30 +08:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/cpuidle.h>
|
2015-04-03 08:02:34 +08:00
|
|
|
#include <linux/tick.h>
|
2010-03-09 03:07:30 +08:00
|
|
|
#include <trace/events/power.h>
|
|
|
|
#include <linux/sched.h>
|
2022-06-15 05:15:58 +08:00
|
|
|
#include <linux/sched/smt.h>
|
2011-01-10 09:38:12 +08:00
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/cpu.h>
|
2016-06-17 13:28:33 +08:00
|
|
|
#include <linux/moduleparam.h>
|
2012-01-26 07:09:07 +08:00
|
|
|
#include <asm/cpu_device_id.h>
|
2016-06-03 08:19:32 +08:00
|
|
|
#include <asm/intel-family.h>
|
2022-06-15 05:15:58 +08:00
|
|
|
#include <asm/nospec-branch.h>
|
2010-09-18 06:36:40 +08:00
|
|
|
#include <asm/mwait.h>
|
2011-01-19 09:48:27 +08:00
|
|
|
#include <asm/msr.h>
|
2010-03-09 03:07:30 +08:00
|
|
|
|
2020-02-07 01:45:49 +08:00
|
|
|
#define INTEL_IDLE_VERSION "0.5.1"
|
2010-03-09 03:07:30 +08:00
|
|
|
|
|
|
|
static struct cpuidle_driver intel_idle_driver = {
|
|
|
|
.name = "intel_idle",
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
/* intel_idle.max_cstate=0 disables driver */
|
2013-02-02 10:35:35 +08:00
|
|
|
static int max_cstate = CPUIDLE_STATE_MAX - 1;
|
2020-02-03 18:57:18 +08:00
|
|
|
static unsigned int disabled_states_mask;
|
intel_idle: add 'preferred_cstates' module argument
On Sapphire Rapids Xeon (SPR) the C1 and C1E states are basically mutually
exclusive - only one of them can be enabled. By default, 'intel_idle' driver
enables C1 and disables C1E. However, some users prefer to use C1E instead of
C1, because it saves more energy.
This patch adds a new module parameter ('preferred_cstates') for enabling C1E
and disabling C1. Here is the idea behind it.
1. This option has effect only for "mutually exclusive" C-states like C1 and
C1E on SPR.
2. It does not have any effect on independent C-states, which do not require
other C-states to be disabled (most states on most platforms as of today).
3. For mutually exclusive C-states, the 'intel_idle' driver always has a
reasonable default, such as enabling C1 on SPR by default. On other
platforms, the default may be different.
4. Users can override the default using the 'preferred_cstates' parameter.
5. The parameter accepts the preferred C-states bit-mask, similarly to the
existing 'states_off' parameter.
6. This parameter is not limited to C1/C1E, and leaves room for supporting
other mutually exclusive C-states, if they come in the future.
Today 'intel_idle' can only be compiled-in, which means that on SPR, in order
to disable C1 and enable C1E, users should boot with the following kernel
argument: intel_idle.preferred_cstates=4
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2022-03-02 16:15:59 +08:00
|
|
|
static unsigned int preferred_states_mask;
|
2010-03-09 03:07:30 +08:00
|
|
|
|
2020-02-07 01:45:18 +08:00
|
|
|
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
|
2020-02-07 01:41:24 +08:00
|
|
|
|
|
|
|
static unsigned long auto_demotion_disable_flags;
|
2022-04-27 14:08:52 +08:00
|
|
|
|
|
|
|
static enum {
|
|
|
|
C1E_PROMOTION_PRESERVE,
|
|
|
|
C1E_PROMOTION_ENABLE,
|
|
|
|
C1E_PROMOTION_DISABLE
|
|
|
|
} c1e_promotion = C1E_PROMOTION_PRESERVE;
|
2020-02-07 01:41:24 +08:00
|
|
|
|
2012-01-26 07:09:07 +08:00
|
|
|
struct idle_cpu {
|
|
|
|
struct cpuidle_state *state_table;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hardware C-state auto-demotion may not always be optimal.
|
|
|
|
* Indicate which enable bits to clear here.
|
|
|
|
*/
|
|
|
|
unsigned long auto_demotion_disable_flags;
|
2014-08-01 03:21:24 +08:00
|
|
|
bool byt_auto_demotion_disable_flag;
|
2013-02-02 14:31:56 +08:00
|
|
|
bool disable_promotion_to_c1e;
|
2019-12-13 16:56:21 +08:00
|
|
|
bool use_acpi;
|
2012-01-26 07:09:07 +08:00
|
|
|
};
|
|
|
|
|
2020-02-07 01:41:24 +08:00
|
|
|
static const struct idle_cpu *icpu __initdata;
|
|
|
|
static struct cpuidle_state *cpuidle_state_table __initdata;
|
2010-03-09 03:07:30 +08:00
|
|
|
|
2020-02-07 01:45:18 +08:00
|
|
|
static unsigned int mwait_substates __initdata;
|
|
|
|
|
intel_idle: enable interrupts before C1 on Xeons
Enable local interrupts before requesting C1 on the last two generations
of Intel Xeon platforms: Sky Lake, Cascade Lake, Cooper Lake, Ice Lake.
This decreases average C1 interrupt latency by about 5-10%, as measured
with the 'wult' tool.
The '->enter()' function of the driver enters C-states with local
interrupts disabled by executing the 'monitor' and 'mwait' pair of
instructions. If an interrupt happens, the CPU exits the C-state and
continues executing instructions after 'mwait'. It does not jump to
the interrupt handler, because local interrupts are disabled. The
cpuidle subsystem enables interrupts a bit later, after doing some
housekeeping.
With this patch, we enable local interrupts before requesting C1. In
this case, if the CPU wakes up because of an interrupt, it will jump
to the interrupt handler right away. The cpuidle housekeeping will be
done after the pending interrupt(s) are handled.
Enabling interrupts before entering a C-state has measurable impact
for faster C-states, like C1. Deeper, but slower C-states like C6 do
not really benefit from this sort of change, because their latency is
a lot higher comparing to the delay added by cpuidle housekeeping.
This change was also tested with cyclictest and dbench. In case of Ice
Lake, the average cyclictest latency decreased by 5.1%, and the average
'dbench' throughput increased by about 0.8%. Both tests were run for 4
hours with only C1 enabled (all other idle states, including 'POLL',
were disabled). CPU frequency was pinned to HFM, and uncore frequency
was pinned to the maximum value. The other platforms had similar
single-digit percentage improvements.
It is worth noting that this patch affects 'cpuidle' statistics a tiny
bit. Before this patch, C1 residency did not include the interrupt
handling time, but with this patch, it will include it. This is similar
to what happens in case of the 'POLL' state, which also runs with
interrupts enabled.
Suggested-by: Len Brown <len.brown@intel.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2021-09-17 15:20:22 +08:00
|
|
|
/*
|
|
|
|
* Enable interrupts before entering the C-state. On some platforms and for
|
|
|
|
* some C-states, this may measurably decrease interrupt latency.
|
|
|
|
*/
|
|
|
|
#define CPUIDLE_FLAG_IRQ_ENABLE BIT(14)
|
|
|
|
|
2019-12-13 16:56:21 +08:00
|
|
|
/*
|
|
|
|
* Enable this state by default even if the ACPI _CST does not list it.
|
|
|
|
*/
|
|
|
|
#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
|
|
|
|
|
2022-06-15 05:15:58 +08:00
|
|
|
/*
|
|
|
|
* Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE
|
|
|
|
* above.
|
|
|
|
*/
|
|
|
|
#define CPUIDLE_FLAG_IBRS BIT(16)
|
|
|
|
|
2013-02-01 08:55:37 +08:00
|
|
|
/*
|
|
|
|
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
|
|
|
|
* the C-state (top nibble) and sub-state (bottom nibble)
|
|
|
|
* 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
|
|
|
|
*
|
|
|
|
* We store the hint at the top of our "flags" for each state.
|
|
|
|
*/
|
|
|
|
#define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
|
|
|
|
#define MWAIT2flg(eax) ((eax & 0xFF) << 24)
|
|
|
|
|
2022-06-08 22:27:27 +08:00
|
|
|
static __always_inline int __intel_idle(struct cpuidle_device *dev,
|
|
|
|
struct cpuidle_driver *drv, int index)
|
|
|
|
{
|
|
|
|
struct cpuidle_state *state = &drv->states[index];
|
|
|
|
unsigned long eax = flg2MWAIT(state->flags);
|
|
|
|
unsigned long ecx = 1; /* break on interrupt flag */
|
|
|
|
|
|
|
|
mwait_idle_with_hints(eax, ecx);
|
|
|
|
|
|
|
|
return index;
|
|
|
|
}
|
|
|
|
|
2020-02-07 01:41:15 +08:00
|
|
|
/**
|
|
|
|
* intel_idle - Ask the processor to enter the given idle state.
|
|
|
|
* @dev: cpuidle device of the target CPU.
|
|
|
|
* @drv: cpuidle driver (assumed to point to intel_idle_driver).
|
|
|
|
* @index: Target idle state index.
|
|
|
|
*
|
|
|
|
* Use the MWAIT instruction to notify the processor that the CPU represented by
|
|
|
|
* @dev is idle and it can try to enter the idle state corresponding to @index.
|
|
|
|
*
|
|
|
|
* If the local APIC timer is not known to be reliable in the target idle state,
|
|
|
|
* enable one-shot tick broadcasting for the target CPU before executing MWAIT.
|
|
|
|
*
|
|
|
|
* Must be called under local_irq_disable().
|
|
|
|
*/
|
|
|
|
static __cpuidle int intel_idle(struct cpuidle_device *dev,
|
|
|
|
struct cpuidle_driver *drv, int index)
|
|
|
|
{
|
2022-06-08 22:27:27 +08:00
|
|
|
return __intel_idle(dev, drv, index);
|
|
|
|
}
|
2020-02-07 01:41:15 +08:00
|
|
|
|
2022-06-08 22:27:27 +08:00
|
|
|
static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
|
|
|
|
struct cpuidle_driver *drv, int index)
|
|
|
|
{
|
|
|
|
int ret;
|
intel_idle: enable interrupts before C1 on Xeons
Enable local interrupts before requesting C1 on the last two generations
of Intel Xeon platforms: Sky Lake, Cascade Lake, Cooper Lake, Ice Lake.
This decreases average C1 interrupt latency by about 5-10%, as measured
with the 'wult' tool.
The '->enter()' function of the driver enters C-states with local
interrupts disabled by executing the 'monitor' and 'mwait' pair of
instructions. If an interrupt happens, the CPU exits the C-state and
continues executing instructions after 'mwait'. It does not jump to
the interrupt handler, because local interrupts are disabled. The
cpuidle subsystem enables interrupts a bit later, after doing some
housekeeping.
With this patch, we enable local interrupts before requesting C1. In
this case, if the CPU wakes up because of an interrupt, it will jump
to the interrupt handler right away. The cpuidle housekeeping will be
done after the pending interrupt(s) are handled.
Enabling interrupts before entering a C-state has measurable impact
for faster C-states, like C1. Deeper, but slower C-states like C6 do
not really benefit from this sort of change, because their latency is
a lot higher comparing to the delay added by cpuidle housekeeping.
This change was also tested with cyclictest and dbench. In case of Ice
Lake, the average cyclictest latency decreased by 5.1%, and the average
'dbench' throughput increased by about 0.8%. Both tests were run for 4
hours with only C1 enabled (all other idle states, including 'POLL',
were disabled). CPU frequency was pinned to HFM, and uncore frequency
was pinned to the maximum value. The other platforms had similar
single-digit percentage improvements.
It is worth noting that this patch affects 'cpuidle' statistics a tiny
bit. Before this patch, C1 residency did not include the interrupt
handling time, but with this patch, it will include it. This is similar
to what happens in case of the 'POLL' state, which also runs with
interrupts enabled.
Suggested-by: Len Brown <len.brown@intel.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2021-09-17 15:20:22 +08:00
|
|
|
|
2022-06-08 22:27:27 +08:00
|
|
|
raw_local_irq_enable();
|
|
|
|
ret = __intel_idle(dev, drv, index);
|
|
|
|
raw_local_irq_disable();
|
2020-02-07 01:41:15 +08:00
|
|
|
|
2022-06-08 22:27:27 +08:00
|
|
|
return ret;
|
2020-02-07 01:41:15 +08:00
|
|
|
}
|
|
|
|
|
2022-06-15 05:15:58 +08:00
|
|
|
static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
|
|
|
|
struct cpuidle_driver *drv, int index)
|
|
|
|
{
|
|
|
|
bool smt_active = sched_smt_active();
|
|
|
|
u64 spec_ctrl = spec_ctrl_current();
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (smt_active)
|
|
|
|
wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
|
|
|
|
|
|
|
ret = __intel_idle(dev, drv, index);
|
|
|
|
|
|
|
|
if (smt_active)
|
|
|
|
wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-02-07 01:41:15 +08:00
|
|
|
/**
|
|
|
|
* intel_idle_s2idle - Ask the processor to enter the given idle state.
|
|
|
|
* @dev: cpuidle device of the target CPU.
|
|
|
|
* @drv: cpuidle driver (assumed to point to intel_idle_driver).
|
|
|
|
* @index: Target idle state index.
|
|
|
|
*
|
|
|
|
* Use the MWAIT instruction to notify the processor that the CPU represented by
|
|
|
|
* @dev is idle and it can try to enter the idle state corresponding to @index.
|
|
|
|
*
|
|
|
|
* Invoked as a suspend-to-idle callback routine with frozen user space, frozen
|
|
|
|
* scheduler tick and suspended scheduler clock on the target CPU.
|
|
|
|
*/
|
2020-07-27 11:25:46 +08:00
|
|
|
static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
|
|
|
|
struct cpuidle_driver *drv, int index)
|
2020-02-07 01:41:15 +08:00
|
|
|
{
|
|
|
|
unsigned long eax = flg2MWAIT(drv->states[index].flags);
|
|
|
|
unsigned long ecx = 1; /* break on interrupt flag */
|
|
|
|
|
|
|
|
mwait_idle_with_hints(eax, ecx);
|
2020-07-27 11:25:46 +08:00
|
|
|
|
|
|
|
return 0;
|
2020-02-07 01:41:15 +08:00
|
|
|
}
|
|
|
|
|
2010-03-09 03:07:30 +08:00
|
|
|
/*
|
|
|
|
* States are indexed by the cstate number,
|
|
|
|
* which is also the index into the MWAIT hint array.
|
|
|
|
* Thus C0 is a dummy.
|
|
|
|
*/
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state nehalem_cstates[] __initdata = {
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2010-03-09 03:07:30 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x00),
|
2010-03-09 03:07:30 +08:00
|
|
|
.exit_latency = 3,
|
|
|
|
.target_residency = 6,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 14:31:56 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1E",
|
2013-02-02 14:31:56 +08:00
|
|
|
.desc = "MWAIT 0x01",
|
2019-12-13 16:56:38 +08:00
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
|
2013-02-02 14:31:56 +08:00
|
|
|
.exit_latency = 10,
|
|
|
|
.target_residency = 20,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C3",
|
2010-03-09 03:07:30 +08:00
|
|
|
.desc = "MWAIT 0x10",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2010-03-09 03:07:30 +08:00
|
|
|
.exit_latency = 20,
|
|
|
|
.target_residency = 80,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6",
|
2010-03-09 03:07:30 +08:00
|
|
|
.desc = "MWAIT 0x20",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2010-03-09 03:07:30 +08:00
|
|
|
.exit_latency = 200,
|
|
|
|
.target_residency = 800,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
2010-03-09 03:07:30 +08:00
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state snb_cstates[] __initdata = {
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2010-07-07 12:12:03 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x00),
|
2013-02-02 14:31:56 +08:00
|
|
|
.exit_latency = 2,
|
|
|
|
.target_residency = 2,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 14:31:56 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1E",
|
2013-02-02 14:31:56 +08:00
|
|
|
.desc = "MWAIT 0x01",
|
2019-12-13 16:56:38 +08:00
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
|
2013-02-02 14:31:56 +08:00
|
|
|
.exit_latency = 10,
|
|
|
|
.target_residency = 20,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C3",
|
2010-07-07 12:12:03 +08:00
|
|
|
.desc = "MWAIT 0x10",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2010-07-07 12:12:03 +08:00
|
|
|
.exit_latency = 80,
|
2010-12-14 07:28:22 +08:00
|
|
|
.target_residency = 211,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6",
|
2010-07-07 12:12:03 +08:00
|
|
|
.desc = "MWAIT 0x20",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2010-07-07 12:12:03 +08:00
|
|
|
.exit_latency = 104,
|
2010-12-14 07:28:22 +08:00
|
|
|
.target_residency = 345,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C7",
|
2010-07-07 12:12:03 +08:00
|
|
|
.desc = "MWAIT 0x30",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2010-07-07 12:12:03 +08:00
|
|
|
.exit_latency = 109,
|
2010-12-14 07:28:22 +08:00
|
|
|
.target_residency = 345,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
2010-07-07 12:12:03 +08:00
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state byt_cstates[] __initdata = {
|
2014-02-14 15:30:00 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2014-02-14 15:30:00 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x00),
|
2014-02-14 15:30:00 +08:00
|
|
|
.exit_latency = 1,
|
|
|
|
.target_residency = 1,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-02-14 15:30:00 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6N",
|
2014-02-14 15:30:00 +08:00
|
|
|
.desc = "MWAIT 0x58",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2015-03-25 11:23:20 +08:00
|
|
|
.exit_latency = 300,
|
2014-02-14 15:30:00 +08:00
|
|
|
.target_residency = 275,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-02-14 15:30:00 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6S",
|
2014-02-14 15:30:00 +08:00
|
|
|
.desc = "MWAIT 0x52",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2015-03-25 11:23:20 +08:00
|
|
|
.exit_latency = 500,
|
2014-02-14 15:30:00 +08:00
|
|
|
.target_residency = 560,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-02-14 15:30:00 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C7",
|
2014-02-14 15:30:00 +08:00
|
|
|
.desc = "MWAIT 0x60",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2014-02-14 15:30:00 +08:00
|
|
|
.exit_latency = 1200,
|
2015-03-25 11:23:20 +08:00
|
|
|
.target_residency = 4000,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-02-14 15:30:00 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C7S",
|
2014-02-14 15:30:00 +08:00
|
|
|
.desc = "MWAIT 0x64",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2014-02-14 15:30:00 +08:00
|
|
|
.exit_latency = 10000,
|
|
|
|
.target_residency = 20000,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-02-14 15:30:00 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state cht_cstates[] __initdata = {
|
2015-03-28 08:54:01 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2015-03-28 08:54:01 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
|
|
|
.flags = MWAIT2flg(0x00),
|
|
|
|
.exit_latency = 1,
|
|
|
|
.target_residency = 1,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2015-03-28 08:54:01 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6N",
|
2015-03-28 08:54:01 +08:00
|
|
|
.desc = "MWAIT 0x58",
|
|
|
|
.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 80,
|
|
|
|
.target_residency = 275,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2015-03-28 08:54:01 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6S",
|
2015-03-28 08:54:01 +08:00
|
|
|
.desc = "MWAIT 0x52",
|
|
|
|
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 200,
|
|
|
|
.target_residency = 560,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2015-03-28 08:54:01 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C7",
|
2015-03-28 08:54:01 +08:00
|
|
|
.desc = "MWAIT 0x60",
|
|
|
|
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 1200,
|
|
|
|
.target_residency = 4000,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2015-03-28 08:54:01 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C7S",
|
2015-03-28 08:54:01 +08:00
|
|
|
.desc = "MWAIT 0x64",
|
|
|
|
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 10000,
|
|
|
|
.target_residency = 20000,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2015-03-28 08:54:01 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state ivb_cstates[] __initdata = {
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2012-06-02 07:45:32 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x00),
|
2012-06-02 07:45:32 +08:00
|
|
|
.exit_latency = 1,
|
|
|
|
.target_residency = 1,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 14:31:56 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1E",
|
2013-02-02 14:31:56 +08:00
|
|
|
.desc = "MWAIT 0x01",
|
2019-12-13 16:56:38 +08:00
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
|
2013-02-02 14:31:56 +08:00
|
|
|
.exit_latency = 10,
|
|
|
|
.target_residency = 20,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C3",
|
2012-06-02 07:45:32 +08:00
|
|
|
.desc = "MWAIT 0x10",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2012-06-02 07:45:32 +08:00
|
|
|
.exit_latency = 59,
|
|
|
|
.target_residency = 156,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6",
|
2012-06-02 07:45:32 +08:00
|
|
|
.desc = "MWAIT 0x20",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2012-06-02 07:45:32 +08:00
|
|
|
.exit_latency = 80,
|
|
|
|
.target_residency = 300,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C7",
|
2012-06-02 07:45:32 +08:00
|
|
|
.desc = "MWAIT 0x30",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2012-06-02 07:45:32 +08:00
|
|
|
.exit_latency = 87,
|
|
|
|
.target_residency = 300,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
2012-06-02 07:45:32 +08:00
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state ivt_cstates[] __initdata = {
|
2014-04-04 13:21:07 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2014-04-04 13:21:07 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x00),
|
2014-04-04 13:21:07 +08:00
|
|
|
.exit_latency = 1,
|
|
|
|
.target_residency = 1,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-04-04 13:21:07 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1E",
|
2014-04-04 13:21:07 +08:00
|
|
|
.desc = "MWAIT 0x01",
|
2019-12-13 16:56:38 +08:00
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
|
2014-04-04 13:21:07 +08:00
|
|
|
.exit_latency = 10,
|
|
|
|
.target_residency = 80,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-04-04 13:21:07 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C3",
|
2014-04-04 13:21:07 +08:00
|
|
|
.desc = "MWAIT 0x10",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2014-04-04 13:21:07 +08:00
|
|
|
.exit_latency = 59,
|
|
|
|
.target_residency = 156,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-04-04 13:21:07 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6",
|
2014-04-04 13:21:07 +08:00
|
|
|
.desc = "MWAIT 0x20",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2014-04-04 13:21:07 +08:00
|
|
|
.exit_latency = 82,
|
|
|
|
.target_residency = 300,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-04-04 13:21:07 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state ivt_cstates_4s[] __initdata = {
|
2014-04-04 13:21:07 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2014-04-04 13:21:07 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x00),
|
2014-04-04 13:21:07 +08:00
|
|
|
.exit_latency = 1,
|
|
|
|
.target_residency = 1,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-04-04 13:21:07 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1E",
|
2014-04-04 13:21:07 +08:00
|
|
|
.desc = "MWAIT 0x01",
|
2019-12-13 16:56:38 +08:00
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
|
2014-04-04 13:21:07 +08:00
|
|
|
.exit_latency = 10,
|
|
|
|
.target_residency = 250,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-04-04 13:21:07 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C3",
|
2014-04-04 13:21:07 +08:00
|
|
|
.desc = "MWAIT 0x10",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2014-04-04 13:21:07 +08:00
|
|
|
.exit_latency = 59,
|
|
|
|
.target_residency = 300,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-04-04 13:21:07 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6",
|
2014-04-04 13:21:07 +08:00
|
|
|
.desc = "MWAIT 0x20",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2014-04-04 13:21:07 +08:00
|
|
|
.exit_latency = 84,
|
|
|
|
.target_residency = 400,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-04-04 13:21:07 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state ivt_cstates_8s[] __initdata = {
|
2014-04-04 13:21:07 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2014-04-04 13:21:07 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x00),
|
2014-04-04 13:21:07 +08:00
|
|
|
.exit_latency = 1,
|
|
|
|
.target_residency = 1,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-04-04 13:21:07 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1E",
|
2014-04-04 13:21:07 +08:00
|
|
|
.desc = "MWAIT 0x01",
|
2019-12-13 16:56:38 +08:00
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
|
2014-04-04 13:21:07 +08:00
|
|
|
.exit_latency = 10,
|
|
|
|
.target_residency = 500,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-04-04 13:21:07 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C3",
|
2014-04-04 13:21:07 +08:00
|
|
|
.desc = "MWAIT 0x10",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2014-04-04 13:21:07 +08:00
|
|
|
.exit_latency = 59,
|
|
|
|
.target_residency = 600,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-04-04 13:21:07 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6",
|
2014-04-04 13:21:07 +08:00
|
|
|
.desc = "MWAIT 0x20",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2014-04-04 13:21:07 +08:00
|
|
|
.exit_latency = 88,
|
|
|
|
.target_residency = 700,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-04-04 13:21:07 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state hsw_cstates[] __initdata = {
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2013-02-01 03:40:49 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x00),
|
2013-02-01 03:40:49 +08:00
|
|
|
.exit_latency = 2,
|
|
|
|
.target_residency = 2,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 14:31:56 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1E",
|
2013-02-02 14:31:56 +08:00
|
|
|
.desc = "MWAIT 0x01",
|
2019-12-13 16:56:38 +08:00
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
|
2013-02-02 14:31:56 +08:00
|
|
|
.exit_latency = 10,
|
|
|
|
.target_residency = 20,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C3",
|
2013-02-01 03:40:49 +08:00
|
|
|
.desc = "MWAIT 0x10",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2013-02-01 03:40:49 +08:00
|
|
|
.exit_latency = 33,
|
|
|
|
.target_residency = 100,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6",
|
2013-02-01 03:40:49 +08:00
|
|
|
.desc = "MWAIT 0x20",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2013-02-01 03:40:49 +08:00
|
|
|
.exit_latency = 133,
|
|
|
|
.target_residency = 400,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C7s",
|
2013-02-01 03:40:49 +08:00
|
|
|
.desc = "MWAIT 0x32",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2013-02-01 03:40:49 +08:00
|
|
|
.exit_latency = 166,
|
|
|
|
.target_residency = 500,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-28 02:18:50 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C8",
|
2013-02-28 02:18:50 +08:00
|
|
|
.desc = "MWAIT 0x40",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2013-02-28 02:18:50 +08:00
|
|
|
.exit_latency = 300,
|
|
|
|
.target_residency = 900,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-28 02:18:50 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C9",
|
2013-02-28 02:18:50 +08:00
|
|
|
.desc = "MWAIT 0x50",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2013-02-28 02:18:50 +08:00
|
|
|
.exit_latency = 600,
|
|
|
|
.target_residency = 1800,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-28 02:18:50 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C10",
|
2013-02-28 02:18:50 +08:00
|
|
|
.desc = "MWAIT 0x60",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2013-02-28 02:18:50 +08:00
|
|
|
.exit_latency = 2600,
|
|
|
|
.target_residency = 7700,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
2013-02-01 03:40:49 +08:00
|
|
|
};
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state bdw_cstates[] __initdata = {
|
2014-02-05 12:56:40 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2014-02-05 12:56:40 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x00),
|
2014-02-05 12:56:40 +08:00
|
|
|
.exit_latency = 2,
|
|
|
|
.target_residency = 2,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-02-05 12:56:40 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1E",
|
2014-02-05 12:56:40 +08:00
|
|
|
.desc = "MWAIT 0x01",
|
2019-12-13 16:56:38 +08:00
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
|
2014-02-05 12:56:40 +08:00
|
|
|
.exit_latency = 10,
|
|
|
|
.target_residency = 20,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-02-05 12:56:40 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C3",
|
2014-02-05 12:56:40 +08:00
|
|
|
.desc = "MWAIT 0x10",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2014-02-05 12:56:40 +08:00
|
|
|
.exit_latency = 40,
|
|
|
|
.target_residency = 100,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-02-05 12:56:40 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6",
|
2014-02-05 12:56:40 +08:00
|
|
|
.desc = "MWAIT 0x20",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2014-02-05 12:56:40 +08:00
|
|
|
.exit_latency = 133,
|
|
|
|
.target_residency = 400,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-02-05 12:56:40 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C7s",
|
2014-02-05 12:56:40 +08:00
|
|
|
.desc = "MWAIT 0x32",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2014-02-05 12:56:40 +08:00
|
|
|
.exit_latency = 166,
|
|
|
|
.target_residency = 500,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-02-05 12:56:40 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C8",
|
2014-02-05 12:56:40 +08:00
|
|
|
.desc = "MWAIT 0x40",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2014-02-05 12:56:40 +08:00
|
|
|
.exit_latency = 300,
|
|
|
|
.target_residency = 900,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-02-05 12:56:40 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C9",
|
2014-02-05 12:56:40 +08:00
|
|
|
.desc = "MWAIT 0x50",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2014-02-05 12:56:40 +08:00
|
|
|
.exit_latency = 600,
|
|
|
|
.target_residency = 1800,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-02-05 12:56:40 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C10",
|
2014-02-05 12:56:40 +08:00
|
|
|
.desc = "MWAIT 0x60",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2014-02-05 12:56:40 +08:00
|
|
|
.exit_latency = 2600,
|
|
|
|
.target_residency = 7700,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-02-05 12:56:40 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
2013-02-01 03:40:49 +08:00
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state skl_cstates[] __initdata = {
|
2015-03-26 11:20:37 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2015-03-26 11:20:37 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
|
|
|
.flags = MWAIT2flg(0x00),
|
|
|
|
.exit_latency = 2,
|
|
|
|
.target_residency = 2,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2015-03-26 11:20:37 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1E",
|
2015-03-26 11:20:37 +08:00
|
|
|
.desc = "MWAIT 0x01",
|
2019-12-13 16:56:38 +08:00
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
|
2015-03-26 11:20:37 +08:00
|
|
|
.exit_latency = 10,
|
|
|
|
.target_residency = 20,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2015-03-26 11:20:37 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C3",
|
2015-03-26 11:20:37 +08:00
|
|
|
.desc = "MWAIT 0x10",
|
|
|
|
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 70,
|
|
|
|
.target_residency = 100,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2015-03-26 11:20:37 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6",
|
2015-03-26 11:20:37 +08:00
|
|
|
.desc = "MWAIT 0x20",
|
2022-06-15 05:15:58 +08:00
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
|
2015-09-10 01:35:05 +08:00
|
|
|
.exit_latency = 85,
|
2015-03-26 11:20:37 +08:00
|
|
|
.target_residency = 200,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2015-03-26 11:20:37 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C7s",
|
2015-03-26 11:20:37 +08:00
|
|
|
.desc = "MWAIT 0x33",
|
2022-06-15 05:15:58 +08:00
|
|
|
.flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
|
2015-03-26 11:20:37 +08:00
|
|
|
.exit_latency = 124,
|
|
|
|
.target_residency = 800,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2015-03-26 11:20:37 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C8",
|
2015-03-26 11:20:37 +08:00
|
|
|
.desc = "MWAIT 0x40",
|
2022-06-15 05:15:58 +08:00
|
|
|
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
|
2015-09-10 01:35:05 +08:00
|
|
|
.exit_latency = 200,
|
2015-03-26 11:20:37 +08:00
|
|
|
.target_residency = 800,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2015-09-10 01:35:05 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C9",
|
2015-09-10 01:35:05 +08:00
|
|
|
.desc = "MWAIT 0x50",
|
2022-06-15 05:15:58 +08:00
|
|
|
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
|
2015-09-10 01:35:05 +08:00
|
|
|
.exit_latency = 480,
|
|
|
|
.target_residency = 5000,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2015-03-26 11:20:37 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C10",
|
2015-03-26 11:20:37 +08:00
|
|
|
.desc = "MWAIT 0x60",
|
2022-06-15 05:15:58 +08:00
|
|
|
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
|
2015-03-26 11:20:37 +08:00
|
|
|
.exit_latency = 890,
|
|
|
|
.target_residency = 5000,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2015-03-26 11:20:37 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state skx_cstates[] __initdata = {
|
2016-04-07 05:00:58 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2016-04-07 05:00:58 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
intel_idle: enable interrupts before C1 on Xeons
Enable local interrupts before requesting C1 on the last two generations
of Intel Xeon platforms: Sky Lake, Cascade Lake, Cooper Lake, Ice Lake.
This decreases average C1 interrupt latency by about 5-10%, as measured
with the 'wult' tool.
The '->enter()' function of the driver enters C-states with local
interrupts disabled by executing the 'monitor' and 'mwait' pair of
instructions. If an interrupt happens, the CPU exits the C-state and
continues executing instructions after 'mwait'. It does not jump to
the interrupt handler, because local interrupts are disabled. The
cpuidle subsystem enables interrupts a bit later, after doing some
housekeeping.
With this patch, we enable local interrupts before requesting C1. In
this case, if the CPU wakes up because of an interrupt, it will jump
to the interrupt handler right away. The cpuidle housekeeping will be
done after the pending interrupt(s) are handled.
Enabling interrupts before entering a C-state has measurable impact
for faster C-states, like C1. Deeper, but slower C-states like C6 do
not really benefit from this sort of change, because their latency is
a lot higher comparing to the delay added by cpuidle housekeeping.
This change was also tested with cyclictest and dbench. In case of Ice
Lake, the average cyclictest latency decreased by 5.1%, and the average
'dbench' throughput increased by about 0.8%. Both tests were run for 4
hours with only C1 enabled (all other idle states, including 'POLL',
were disabled). CPU frequency was pinned to HFM, and uncore frequency
was pinned to the maximum value. The other platforms had similar
single-digit percentage improvements.
It is worth noting that this patch affects 'cpuidle' statistics a tiny
bit. Before this patch, C1 residency did not include the interrupt
handling time, but with this patch, it will include it. This is similar
to what happens in case of the 'POLL' state, which also runs with
interrupts enabled.
Suggested-by: Len Brown <len.brown@intel.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2021-09-17 15:20:22 +08:00
|
|
|
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
|
2016-04-07 05:00:58 +08:00
|
|
|
.exit_latency = 2,
|
|
|
|
.target_residency = 2,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-04-07 05:00:58 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1E",
|
2016-04-07 05:00:58 +08:00
|
|
|
.desc = "MWAIT 0x01",
|
2019-12-13 16:56:38 +08:00
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
|
2016-04-07 05:00:58 +08:00
|
|
|
.exit_latency = 10,
|
|
|
|
.target_residency = 20,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-04-07 05:00:58 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6",
|
2016-04-07 05:00:58 +08:00
|
|
|
.desc = "MWAIT 0x20",
|
2022-06-15 05:15:58 +08:00
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
|
2016-04-07 05:00:58 +08:00
|
|
|
.exit_latency = 133,
|
|
|
|
.target_residency = 600,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-04-07 05:00:58 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
|
|
|
|
2020-07-10 12:12:01 +08:00
|
|
|
static struct cpuidle_state icx_cstates[] __initdata = {
|
|
|
|
{
|
|
|
|
.name = "C1",
|
|
|
|
.desc = "MWAIT 0x00",
|
intel_idle: enable interrupts before C1 on Xeons
Enable local interrupts before requesting C1 on the last two generations
of Intel Xeon platforms: Sky Lake, Cascade Lake, Cooper Lake, Ice Lake.
This decreases average C1 interrupt latency by about 5-10%, as measured
with the 'wult' tool.
The '->enter()' function of the driver enters C-states with local
interrupts disabled by executing the 'monitor' and 'mwait' pair of
instructions. If an interrupt happens, the CPU exits the C-state and
continues executing instructions after 'mwait'. It does not jump to
the interrupt handler, because local interrupts are disabled. The
cpuidle subsystem enables interrupts a bit later, after doing some
housekeeping.
With this patch, we enable local interrupts before requesting C1. In
this case, if the CPU wakes up because of an interrupt, it will jump
to the interrupt handler right away. The cpuidle housekeeping will be
done after the pending interrupt(s) are handled.
Enabling interrupts before entering a C-state has measurable impact
for faster C-states, like C1. Deeper, but slower C-states like C6 do
not really benefit from this sort of change, because their latency is
a lot higher comparing to the delay added by cpuidle housekeeping.
This change was also tested with cyclictest and dbench. In case of Ice
Lake, the average cyclictest latency decreased by 5.1%, and the average
'dbench' throughput increased by about 0.8%. Both tests were run for 4
hours with only C1 enabled (all other idle states, including 'POLL',
were disabled). CPU frequency was pinned to HFM, and uncore frequency
was pinned to the maximum value. The other platforms had similar
single-digit percentage improvements.
It is worth noting that this patch affects 'cpuidle' statistics a tiny
bit. Before this patch, C1 residency did not include the interrupt
handling time, but with this patch, it will include it. This is similar
to what happens in case of the 'POLL' state, which also runs with
interrupts enabled.
Suggested-by: Len Brown <len.brown@intel.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2021-09-17 15:20:22 +08:00
|
|
|
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
|
2020-07-10 12:12:01 +08:00
|
|
|
.exit_latency = 1,
|
|
|
|
.target_residency = 1,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.name = "C1E",
|
|
|
|
.desc = "MWAIT 0x01",
|
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
|
|
|
|
.exit_latency = 4,
|
|
|
|
.target_residency = 4,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.name = "C6",
|
|
|
|
.desc = "MWAIT 0x20",
|
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2021-03-08 22:31:46 +08:00
|
|
|
.exit_latency = 170,
|
|
|
|
.target_residency = 600,
|
2020-07-10 12:12:01 +08:00
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
|
|
|
|
2022-04-15 17:39:51 +08:00
|
|
|
/*
|
|
|
|
* On AlderLake C1 has to be disabled if C1E is enabled, and vice versa.
|
|
|
|
* C1E is enabled only if "C1E promotion" bit is set in MSR_IA32_POWER_CTL.
|
|
|
|
* But in this case there is effectively no C1, because C1 requests are
|
|
|
|
* promoted to C1E. If the "C1E promotion" bit is cleared, then both C1
|
|
|
|
* and C1E requests end up with C1, so there is effectively no C1E.
|
|
|
|
*
|
|
|
|
* By default we enable C1E and disable C1 by marking it with
|
|
|
|
* 'CPUIDLE_FLAG_UNUSABLE'.
|
|
|
|
*/
|
|
|
|
static struct cpuidle_state adl_cstates[] __initdata = {
|
|
|
|
{
|
|
|
|
.name = "C1",
|
|
|
|
.desc = "MWAIT 0x00",
|
|
|
|
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
|
|
|
|
.exit_latency = 1,
|
|
|
|
.target_residency = 1,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.name = "C1E",
|
|
|
|
.desc = "MWAIT 0x01",
|
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
|
|
|
|
.exit_latency = 2,
|
|
|
|
.target_residency = 4,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.name = "C6",
|
|
|
|
.desc = "MWAIT 0x20",
|
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 220,
|
|
|
|
.target_residency = 600,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.name = "C8",
|
|
|
|
.desc = "MWAIT 0x40",
|
|
|
|
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 280,
|
|
|
|
.target_residency = 800,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.name = "C10",
|
|
|
|
.desc = "MWAIT 0x60",
|
|
|
|
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 680,
|
|
|
|
.target_residency = 2000,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct cpuidle_state adl_l_cstates[] __initdata = {
|
|
|
|
{
|
|
|
|
.name = "C1",
|
|
|
|
.desc = "MWAIT 0x00",
|
|
|
|
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
|
|
|
|
.exit_latency = 1,
|
|
|
|
.target_residency = 1,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.name = "C1E",
|
|
|
|
.desc = "MWAIT 0x01",
|
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
|
|
|
|
.exit_latency = 2,
|
|
|
|
.target_residency = 4,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.name = "C6",
|
|
|
|
.desc = "MWAIT 0x20",
|
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 170,
|
|
|
|
.target_residency = 500,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.name = "C8",
|
|
|
|
.desc = "MWAIT 0x40",
|
|
|
|
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 200,
|
|
|
|
.target_residency = 600,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.name = "C10",
|
|
|
|
.desc = "MWAIT 0x60",
|
|
|
|
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 230,
|
|
|
|
.target_residency = 700,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
|
|
|
|
intel_idle: add SPR support
Add Sapphire Rapids Xeon support.
Up until very recently, the C1 and C1E C-states were independent, but this
has changed in some new chips, including Sapphire Rapids Xeon (SPR). In these
chips the C1 and C1E states cannot be enabled at the same time. The "C1E
promotion" bit in 'MSR_IA32_POWER_CTL' also has its semantics changed a bit.
Here are the C1, C1E, and "C1E promotion" bit rules on Xeons before SPR.
1. If C1E promotion bit is disabled.
a. C1 requests end up with C1 C-state.
b. C1E requests end up with C1E C-state.
2. If C1E promotion bit is enabled.
a. C1 requests end up with C1E C-state.
b. C1E requests end up with C1E C-state.
Here are the C1, C1E, and "C1E promotion" bit rules on Sapphire Rapids Xeon.
1. If C1E promotion bit is disabled.
a. C1 requests end up with C1 C-state.
b. C1E requests end up with C1 C-state.
2. If C1E promotion bit is enabled.
a. C1 requests end up with C1E C-state.
b. C1E requests end up with C1E C-state.
Before SPR Xeon, the 'intel_idle' driver was disabling C1E promotion and was
exposing C1 and C1E as independent C-states. But on SPR, C1 and C1E cannot be
enabled at the same time.
This patch adds both C1 and C1E states. However, C1E is marked as with the
"CPUIDLE_FLAG_UNUSABLE" flag, which means that in won't be registered by
default. The C1E promotion bit will be cleared, which means that by default
only C1 and C6 will be registered on SPR.
The next patch will add an option for enabling C1E and disabling C1 on SPR.
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2022-03-02 16:15:58 +08:00
|
|
|
/*
|
|
|
|
* On Sapphire Rapids Xeon C1 has to be disabled if C1E is enabled, and vice
|
|
|
|
* versa. On SPR C1E is enabled only if "C1E promotion" bit is set in
|
|
|
|
* MSR_IA32_POWER_CTL. But in this case there effectively no C1, because C1
|
|
|
|
* requests are promoted to C1E. If the "C1E promotion" bit is cleared, then
|
|
|
|
* both C1 and C1E requests end up with C1, so there is effectively no C1E.
|
|
|
|
*
|
|
|
|
* By default we enable C1 and disable C1E by marking it with
|
|
|
|
* 'CPUIDLE_FLAG_UNUSABLE'.
|
|
|
|
*/
|
|
|
|
static struct cpuidle_state spr_cstates[] __initdata = {
|
|
|
|
{
|
|
|
|
.name = "C1",
|
|
|
|
.desc = "MWAIT 0x00",
|
|
|
|
.flags = MWAIT2flg(0x00),
|
|
|
|
.exit_latency = 1,
|
|
|
|
.target_residency = 1,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.name = "C1E",
|
|
|
|
.desc = "MWAIT 0x01",
|
2022-03-16 03:36:42 +08:00
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE |
|
intel_idle: add SPR support
Add Sapphire Rapids Xeon support.
Up until very recently, the C1 and C1E C-states were independent, but this
has changed in some new chips, including Sapphire Rapids Xeon (SPR). In these
chips the C1 and C1E states cannot be enabled at the same time. The "C1E
promotion" bit in 'MSR_IA32_POWER_CTL' also has its semantics changed a bit.
Here are the C1, C1E, and "C1E promotion" bit rules on Xeons before SPR.
1. If C1E promotion bit is disabled.
a. C1 requests end up with C1 C-state.
b. C1E requests end up with C1E C-state.
2. If C1E promotion bit is enabled.
a. C1 requests end up with C1E C-state.
b. C1E requests end up with C1E C-state.
Here are the C1, C1E, and "C1E promotion" bit rules on Sapphire Rapids Xeon.
1. If C1E promotion bit is disabled.
a. C1 requests end up with C1 C-state.
b. C1E requests end up with C1 C-state.
2. If C1E promotion bit is enabled.
a. C1 requests end up with C1E C-state.
b. C1E requests end up with C1E C-state.
Before SPR Xeon, the 'intel_idle' driver was disabling C1E promotion and was
exposing C1 and C1E as independent C-states. But on SPR, C1 and C1E cannot be
enabled at the same time.
This patch adds both C1 and C1E states. However, C1E is marked as with the
"CPUIDLE_FLAG_UNUSABLE" flag, which means that in won't be registered by
default. The C1E promotion bit will be cleared, which means that by default
only C1 and C6 will be registered on SPR.
The next patch will add an option for enabling C1E and disabling C1 on SPR.
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2022-03-02 16:15:58 +08:00
|
|
|
CPUIDLE_FLAG_UNUSABLE,
|
|
|
|
.exit_latency = 2,
|
|
|
|
.target_residency = 4,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.name = "C6",
|
|
|
|
.desc = "MWAIT 0x20",
|
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 290,
|
|
|
|
.target_residency = 800,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state atom_cstates[] __initdata = {
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1E",
|
2010-03-09 03:07:30 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x00),
|
2013-02-02 14:31:56 +08:00
|
|
|
.exit_latency = 10,
|
|
|
|
.target_residency = 20,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C2",
|
2010-03-09 03:07:30 +08:00
|
|
|
.desc = "MWAIT 0x10",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x10),
|
2010-03-09 03:07:30 +08:00
|
|
|
.exit_latency = 20,
|
|
|
|
.target_residency = 80,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C4",
|
2010-03-09 03:07:30 +08:00
|
|
|
.desc = "MWAIT 0x30",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2010-03-09 03:07:30 +08:00
|
|
|
.exit_latency = 100,
|
|
|
|
.target_residency = 400,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6",
|
2010-10-06 01:43:14 +08:00
|
|
|
.desc = "MWAIT 0x52",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2010-10-06 01:43:14 +08:00
|
|
|
.exit_latency = 140,
|
|
|
|
.target_residency = 560,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-02-02 12:37:30 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
2010-03-09 03:07:30 +08:00
|
|
|
};
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state tangier_cstates[] __initdata = {
|
2016-10-25 22:11:39 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2016-10-25 22:11:39 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
|
|
|
.flags = MWAIT2flg(0x00),
|
|
|
|
.exit_latency = 1,
|
|
|
|
.target_residency = 4,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-10-25 22:11:39 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C4",
|
2016-10-25 22:11:39 +08:00
|
|
|
.desc = "MWAIT 0x30",
|
|
|
|
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 100,
|
|
|
|
.target_residency = 400,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-10-25 22:11:39 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6",
|
2016-10-25 22:11:39 +08:00
|
|
|
.desc = "MWAIT 0x52",
|
|
|
|
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 140,
|
|
|
|
.target_residency = 560,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-10-25 22:11:39 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C7",
|
2016-10-25 22:11:39 +08:00
|
|
|
.desc = "MWAIT 0x60",
|
|
|
|
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 1200,
|
|
|
|
.target_residency = 4000,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-10-25 22:11:39 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C9",
|
2016-10-25 22:11:39 +08:00
|
|
|
.desc = "MWAIT 0x64",
|
|
|
|
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 10000,
|
|
|
|
.target_residency = 20000,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-10-25 22:11:39 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state avn_cstates[] __initdata = {
|
2013-11-09 13:30:17 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2013-11-09 13:30:17 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x00),
|
2013-11-09 13:30:17 +08:00
|
|
|
.exit_latency = 2,
|
|
|
|
.target_residency = 2,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2013-11-09 13:30:17 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6",
|
2013-11-09 13:30:17 +08:00
|
|
|
.desc = "MWAIT 0x51",
|
2014-11-12 23:03:50 +08:00
|
|
|
.flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED,
|
2013-11-09 13:30:17 +08:00
|
|
|
.exit_latency = 15,
|
|
|
|
.target_residency = 45,
|
2015-02-11 12:04:17 +08:00
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2014-01-09 15:30:27 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
2013-11-09 13:30:17 +08:00
|
|
|
};
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state knl_cstates[] __initdata = {
|
2014-09-05 08:22:54 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2014-09-05 08:22:54 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
|
|
|
.flags = MWAIT2flg(0x00),
|
|
|
|
.exit_latency = 1,
|
|
|
|
.target_residency = 2,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle },
|
2014-09-05 08:22:54 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6",
|
2014-09-05 08:22:54 +08:00
|
|
|
.desc = "MWAIT 0x10",
|
|
|
|
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 120,
|
|
|
|
.target_residency = 500,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle },
|
2014-09-05 08:22:54 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
2010-03-09 03:07:30 +08:00
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state bxt_cstates[] __initdata = {
|
2016-04-07 05:00:47 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2016-04-07 05:00:47 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
|
|
|
.flags = MWAIT2flg(0x00),
|
|
|
|
.exit_latency = 2,
|
|
|
|
.target_residency = 2,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-04-07 05:00:47 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1E",
|
2016-04-07 05:00:47 +08:00
|
|
|
.desc = "MWAIT 0x01",
|
2019-12-13 16:56:38 +08:00
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
|
2016-04-07 05:00:47 +08:00
|
|
|
.exit_latency = 10,
|
|
|
|
.target_residency = 20,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-04-07 05:00:47 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6",
|
2016-04-07 05:00:47 +08:00
|
|
|
.desc = "MWAIT 0x20",
|
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 133,
|
|
|
|
.target_residency = 133,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-04-07 05:00:47 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C7s",
|
2016-04-07 05:00:47 +08:00
|
|
|
.desc = "MWAIT 0x31",
|
|
|
|
.flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 155,
|
|
|
|
.target_residency = 155,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-04-07 05:00:47 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C8",
|
2016-04-07 05:00:47 +08:00
|
|
|
.desc = "MWAIT 0x40",
|
|
|
|
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 1000,
|
|
|
|
.target_residency = 1000,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-04-07 05:00:47 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C9",
|
2016-04-07 05:00:47 +08:00
|
|
|
.desc = "MWAIT 0x50",
|
|
|
|
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 2000,
|
|
|
|
.target_residency = 2000,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-04-07 05:00:47 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C10",
|
2016-04-07 05:00:47 +08:00
|
|
|
.desc = "MWAIT 0x60",
|
|
|
|
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 10000,
|
|
|
|
.target_residency = 10000,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-04-07 05:00:47 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static struct cpuidle_state dnv_cstates[] __initdata = {
|
2016-06-17 13:28:34 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1",
|
2016-06-17 13:28:34 +08:00
|
|
|
.desc = "MWAIT 0x00",
|
|
|
|
.flags = MWAIT2flg(0x00),
|
|
|
|
.exit_latency = 2,
|
|
|
|
.target_residency = 2,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-06-17 13:28:34 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C1E",
|
2016-06-17 13:28:34 +08:00
|
|
|
.desc = "MWAIT 0x01",
|
2019-12-13 16:56:38 +08:00
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
|
2016-06-17 13:28:34 +08:00
|
|
|
.exit_latency = 10,
|
|
|
|
.target_residency = 20,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-06-17 13:28:34 +08:00
|
|
|
{
|
2017-03-01 05:32:44 +08:00
|
|
|
.name = "C6",
|
2016-06-17 13:28:34 +08:00
|
|
|
.desc = "MWAIT 0x20",
|
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 50,
|
|
|
|
.target_residency = 500,
|
|
|
|
.enter = &intel_idle,
|
2017-08-10 06:14:45 +08:00
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
2016-06-17 13:28:34 +08:00
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
|
|
|
|
2020-12-27 18:11:16 +08:00
|
|
|
/*
|
|
|
|
* Note, depending on HW and FW revision, SnowRidge SoC may or may not support
|
|
|
|
* C6, and this is indicated in the CPUID mwait leaf.
|
|
|
|
*/
|
|
|
|
static struct cpuidle_state snr_cstates[] __initdata = {
|
|
|
|
{
|
|
|
|
.name = "C1",
|
|
|
|
.desc = "MWAIT 0x00",
|
|
|
|
.flags = MWAIT2flg(0x00),
|
|
|
|
.exit_latency = 2,
|
|
|
|
.target_residency = 2,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.name = "C1E",
|
|
|
|
.desc = "MWAIT 0x01",
|
|
|
|
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
|
|
|
|
.exit_latency = 15,
|
|
|
|
.target_residency = 25,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.name = "C6",
|
|
|
|
.desc = "MWAIT 0x20",
|
|
|
|
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
|
|
|
|
.exit_latency = 130,
|
|
|
|
.target_residency = 500,
|
|
|
|
.enter = &intel_idle,
|
|
|
|
.enter_s2idle = intel_idle_s2idle, },
|
|
|
|
{
|
|
|
|
.enter = NULL }
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_nehalem __initconst = {
|
2012-01-26 07:09:07 +08:00
|
|
|
.state_table = nehalem_cstates,
|
|
|
|
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
|
2013-02-02 14:31:56 +08:00
|
|
|
.disable_promotion_to_c1e = true,
|
2012-01-26 07:09:07 +08:00
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_nhx __initconst = {
|
2019-12-13 16:56:38 +08:00
|
|
|
.state_table = nehalem_cstates,
|
|
|
|
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
|
|
|
|
.disable_promotion_to_c1e = true,
|
|
|
|
.use_acpi = true,
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_atom __initconst = {
|
2012-01-26 07:09:07 +08:00
|
|
|
.state_table = atom_cstates,
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_tangier __initconst = {
|
2016-10-25 22:11:39 +08:00
|
|
|
.state_table = tangier_cstates,
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_lincroft __initconst = {
|
2012-01-26 07:09:07 +08:00
|
|
|
.state_table = atom_cstates,
|
|
|
|
.auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_snb __initconst = {
|
2012-01-26 07:09:07 +08:00
|
|
|
.state_table = snb_cstates,
|
2013-02-02 14:31:56 +08:00
|
|
|
.disable_promotion_to_c1e = true,
|
2012-01-26 07:09:07 +08:00
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_snx __initconst = {
|
2019-12-13 16:56:38 +08:00
|
|
|
.state_table = snb_cstates,
|
|
|
|
.disable_promotion_to_c1e = true,
|
|
|
|
.use_acpi = true,
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_byt __initconst = {
|
2014-02-14 15:30:00 +08:00
|
|
|
.state_table = byt_cstates,
|
|
|
|
.disable_promotion_to_c1e = true,
|
2014-08-01 03:21:24 +08:00
|
|
|
.byt_auto_demotion_disable_flag = true,
|
2014-02-14 15:30:00 +08:00
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_cht __initconst = {
|
2015-03-28 08:54:01 +08:00
|
|
|
.state_table = cht_cstates,
|
|
|
|
.disable_promotion_to_c1e = true,
|
|
|
|
.byt_auto_demotion_disable_flag = true,
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_ivb __initconst = {
|
2012-06-02 07:45:32 +08:00
|
|
|
.state_table = ivb_cstates,
|
2013-02-02 14:31:56 +08:00
|
|
|
.disable_promotion_to_c1e = true,
|
2012-06-02 07:45:32 +08:00
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_ivt __initconst = {
|
2014-04-04 13:21:07 +08:00
|
|
|
.state_table = ivt_cstates,
|
|
|
|
.disable_promotion_to_c1e = true,
|
2019-12-13 16:56:38 +08:00
|
|
|
.use_acpi = true,
|
2014-04-04 13:21:07 +08:00
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_hsw __initconst = {
|
2013-02-01 03:40:49 +08:00
|
|
|
.state_table = hsw_cstates,
|
2013-02-02 14:31:56 +08:00
|
|
|
.disable_promotion_to_c1e = true,
|
2013-02-01 03:40:49 +08:00
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_hsx __initconst = {
|
2019-12-13 16:56:38 +08:00
|
|
|
.state_table = hsw_cstates,
|
|
|
|
.disable_promotion_to_c1e = true,
|
|
|
|
.use_acpi = true,
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_bdw __initconst = {
|
2014-02-05 12:56:40 +08:00
|
|
|
.state_table = bdw_cstates,
|
|
|
|
.disable_promotion_to_c1e = true,
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_bdx __initconst = {
|
2019-12-13 16:56:38 +08:00
|
|
|
.state_table = bdw_cstates,
|
|
|
|
.disable_promotion_to_c1e = true,
|
|
|
|
.use_acpi = true,
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_skl __initconst = {
|
2015-03-26 11:20:37 +08:00
|
|
|
.state_table = skl_cstates,
|
|
|
|
.disable_promotion_to_c1e = true,
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_skx __initconst = {
|
2016-04-07 05:00:58 +08:00
|
|
|
.state_table = skx_cstates,
|
|
|
|
.disable_promotion_to_c1e = true,
|
2019-12-13 16:56:38 +08:00
|
|
|
.use_acpi = true,
|
2016-04-07 05:00:58 +08:00
|
|
|
};
|
2015-03-26 11:20:37 +08:00
|
|
|
|
2020-07-10 12:12:01 +08:00
|
|
|
static const struct idle_cpu idle_cpu_icx __initconst = {
|
|
|
|
.state_table = icx_cstates,
|
|
|
|
.disable_promotion_to_c1e = true,
|
|
|
|
.use_acpi = true,
|
|
|
|
};
|
|
|
|
|
2022-04-15 17:39:51 +08:00
|
|
|
static const struct idle_cpu idle_cpu_adl __initconst = {
|
|
|
|
.state_table = adl_cstates,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct idle_cpu idle_cpu_adl_l __initconst = {
|
|
|
|
.state_table = adl_l_cstates,
|
|
|
|
};
|
|
|
|
|
intel_idle: add SPR support
Add Sapphire Rapids Xeon support.
Up until very recently, the C1 and C1E C-states were independent, but this
has changed in some new chips, including Sapphire Rapids Xeon (SPR). In these
chips the C1 and C1E states cannot be enabled at the same time. The "C1E
promotion" bit in 'MSR_IA32_POWER_CTL' also has its semantics changed a bit.
Here are the C1, C1E, and "C1E promotion" bit rules on Xeons before SPR.
1. If C1E promotion bit is disabled.
a. C1 requests end up with C1 C-state.
b. C1E requests end up with C1E C-state.
2. If C1E promotion bit is enabled.
a. C1 requests end up with C1E C-state.
b. C1E requests end up with C1E C-state.
Here are the C1, C1E, and "C1E promotion" bit rules on Sapphire Rapids Xeon.
1. If C1E promotion bit is disabled.
a. C1 requests end up with C1 C-state.
b. C1E requests end up with C1 C-state.
2. If C1E promotion bit is enabled.
a. C1 requests end up with C1E C-state.
b. C1E requests end up with C1E C-state.
Before SPR Xeon, the 'intel_idle' driver was disabling C1E promotion and was
exposing C1 and C1E as independent C-states. But on SPR, C1 and C1E cannot be
enabled at the same time.
This patch adds both C1 and C1E states. However, C1E is marked as with the
"CPUIDLE_FLAG_UNUSABLE" flag, which means that in won't be registered by
default. The C1E promotion bit will be cleared, which means that by default
only C1 and C6 will be registered on SPR.
The next patch will add an option for enabling C1E and disabling C1 on SPR.
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2022-03-02 16:15:58 +08:00
|
|
|
static const struct idle_cpu idle_cpu_spr __initconst = {
|
|
|
|
.state_table = spr_cstates,
|
|
|
|
.disable_promotion_to_c1e = true,
|
|
|
|
.use_acpi = true,
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_avn __initconst = {
|
2013-11-09 13:30:17 +08:00
|
|
|
.state_table = avn_cstates,
|
|
|
|
.disable_promotion_to_c1e = true,
|
2019-12-13 16:56:38 +08:00
|
|
|
.use_acpi = true,
|
2013-11-09 13:30:17 +08:00
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_knl __initconst = {
|
2014-09-05 08:22:54 +08:00
|
|
|
.state_table = knl_cstates,
|
2019-12-13 16:56:38 +08:00
|
|
|
.use_acpi = true,
|
2014-09-05 08:22:54 +08:00
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_bxt __initconst = {
|
2016-04-07 05:00:47 +08:00
|
|
|
.state_table = bxt_cstates,
|
|
|
|
.disable_promotion_to_c1e = true,
|
|
|
|
};
|
|
|
|
|
2020-02-07 01:45:06 +08:00
|
|
|
static const struct idle_cpu idle_cpu_dnv __initconst = {
|
2016-06-17 13:28:34 +08:00
|
|
|
.state_table = dnv_cstates,
|
|
|
|
.disable_promotion_to_c1e = true,
|
2019-12-13 16:56:38 +08:00
|
|
|
.use_acpi = true,
|
2016-06-17 13:28:34 +08:00
|
|
|
};
|
|
|
|
|
2020-12-27 18:11:16 +08:00
|
|
|
static const struct idle_cpu idle_cpu_snr __initconst = {
|
|
|
|
.state_table = snr_cstates,
|
|
|
|
.disable_promotion_to_c1e = true,
|
|
|
|
.use_acpi = true,
|
|
|
|
};
|
|
|
|
|
2015-03-26 05:15:14 +08:00
|
|
|
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
|
2020-03-20 21:14:00 +08:00
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_G, &idle_cpu_nehalem),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &idle_cpu_nehalem),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &idle_cpu_nhx),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &idle_cpu_nhx),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ATOM_BONNELL, &idle_cpu_atom),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ATOM_BONNELL_MID, &idle_cpu_lincroft),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &idle_cpu_nhx),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &idle_cpu_snb),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &idle_cpu_snx),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL, &idle_cpu_atom),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &idle_cpu_byt),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &idle_cpu_tangier),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &idle_cpu_cht),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &idle_cpu_ivb),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &idle_cpu_ivt),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &idle_cpu_hsw),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &idle_cpu_hsx),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &idle_cpu_hsw),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &idle_cpu_hsw),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &idle_cpu_avn),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &idle_cpu_bdw),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &idle_cpu_bdw),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &idle_cpu_bdx),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &idle_cpu_bdx),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &idle_cpu_skl),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &idle_cpu_skl),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &idle_cpu_skl),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &idle_cpu_skl),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx),
|
2020-07-10 12:12:01 +08:00
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &idle_cpu_icx),
|
2021-04-07 14:10:28 +08:00
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx),
|
2022-04-15 17:39:51 +08:00
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l),
|
intel_idle: add SPR support
Add Sapphire Rapids Xeon support.
Up until very recently, the C1 and C1E C-states were independent, but this
has changed in some new chips, including Sapphire Rapids Xeon (SPR). In these
chips the C1 and C1E states cannot be enabled at the same time. The "C1E
promotion" bit in 'MSR_IA32_POWER_CTL' also has its semantics changed a bit.
Here are the C1, C1E, and "C1E promotion" bit rules on Xeons before SPR.
1. If C1E promotion bit is disabled.
a. C1 requests end up with C1 C-state.
b. C1E requests end up with C1E C-state.
2. If C1E promotion bit is enabled.
a. C1 requests end up with C1E C-state.
b. C1E requests end up with C1E C-state.
Here are the C1, C1E, and "C1E promotion" bit rules on Sapphire Rapids Xeon.
1. If C1E promotion bit is disabled.
a. C1 requests end up with C1 C-state.
b. C1E requests end up with C1 C-state.
2. If C1E promotion bit is enabled.
a. C1 requests end up with C1E C-state.
b. C1E requests end up with C1E C-state.
Before SPR Xeon, the 'intel_idle' driver was disabling C1E promotion and was
exposing C1 and C1E as independent C-states. But on SPR, C1 and C1E cannot be
enabled at the same time.
This patch adds both C1 and C1E states. However, C1E is marked as with the
"CPUIDLE_FLAG_UNUSABLE" flag, which means that in won't be registered by
default. The C1E promotion bit will be cleared, which means that by default
only C1 and C6 will be registered on SPR.
The next patch will add an option for enabling C1E and disabling C1 on SPR.
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2022-03-02 16:15:58 +08:00
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
|
2020-03-20 21:14:00 +08:00
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
|
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv),
|
2020-12-27 18:11:16 +08:00
|
|
|
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr),
|
2012-01-26 07:09:07 +08:00
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
2019-12-13 16:56:01 +08:00
|
|
|
static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
|
2020-03-20 21:14:00 +08:00
|
|
|
X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL),
|
2019-12-13 16:56:01 +08:00
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
2020-01-10 18:51:22 +08:00
|
|
|
static bool __init intel_idle_max_cstate_reached(int cstate)
|
2019-12-13 16:56:01 +08:00
|
|
|
{
|
|
|
|
if (cstate + 1 > max_cstate) {
|
|
|
|
pr_info("max_cstate %d reached\n", max_cstate);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-11-30 19:54:34 +08:00
|
|
|
static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state)
|
|
|
|
{
|
|
|
|
unsigned long eax = flg2MWAIT(state->flags);
|
|
|
|
|
|
|
|
if (boot_cpu_has(X86_FEATURE_ARAT))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Switch over to one-shot tick broadcast if the target C-state
|
|
|
|
* is deeper than C1.
|
|
|
|
*/
|
|
|
|
return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK);
|
|
|
|
}
|
|
|
|
|
2019-12-13 16:56:01 +08:00
|
|
|
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
|
|
|
|
#include <acpi/processor.h>
|
|
|
|
|
2019-12-13 16:56:29 +08:00
|
|
|
static bool no_acpi __read_mostly;
|
|
|
|
module_param(no_acpi, bool, 0444);
|
|
|
|
MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list");
|
|
|
|
|
2020-02-03 18:57:08 +08:00
|
|
|
static bool force_use_acpi __read_mostly; /* No effect if no_acpi is set. */
|
|
|
|
module_param_named(use_acpi, force_use_acpi, bool, 0444);
|
|
|
|
MODULE_PARM_DESC(use_acpi, "Use ACPI _CST for building the idle states list");
|
|
|
|
|
2020-01-10 18:51:22 +08:00
|
|
|
static struct acpi_processor_power acpi_state_table __initdata;
|
2019-12-13 16:56:01 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* intel_idle_cst_usable - Check if the _CST information can be used.
|
|
|
|
*
|
|
|
|
* Check if all of the C-states listed by _CST in the max_cstate range are
|
|
|
|
* ACPI_CSTATE_FFH, which means that they should be entered via MWAIT.
|
|
|
|
*/
|
2020-01-10 18:51:22 +08:00
|
|
|
static bool __init intel_idle_cst_usable(void)
|
2019-12-13 16:56:01 +08:00
|
|
|
{
|
|
|
|
int cstate, limit;
|
|
|
|
|
|
|
|
limit = min_t(int, min_t(int, CPUIDLE_STATE_MAX, max_cstate + 1),
|
|
|
|
acpi_state_table.count);
|
|
|
|
|
|
|
|
for (cstate = 1; cstate < limit; cstate++) {
|
|
|
|
struct acpi_processor_cx *cx = &acpi_state_table.states[cstate];
|
|
|
|
|
|
|
|
if (cx->entry_method != ACPI_CSTATE_FFH)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-01-10 18:51:22 +08:00
|
|
|
static bool __init intel_idle_acpi_cst_extract(void)
|
2019-12-13 16:56:01 +08:00
|
|
|
{
|
|
|
|
unsigned int cpu;
|
|
|
|
|
2019-12-13 16:56:29 +08:00
|
|
|
if (no_acpi) {
|
|
|
|
pr_debug("Not allowed to use ACPI _CST\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-12-13 16:56:01 +08:00
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
struct acpi_processor *pr = per_cpu(processors, cpu);
|
|
|
|
|
|
|
|
if (!pr)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
acpi_state_table.count++;
|
|
|
|
|
|
|
|
if (!intel_idle_cst_usable())
|
|
|
|
continue;
|
|
|
|
|
2020-10-16 23:28:32 +08:00
|
|
|
if (!acpi_processor_claim_cst_control())
|
|
|
|
break;
|
2019-12-13 16:56:01 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-10-16 23:28:32 +08:00
|
|
|
acpi_state_table.count = 0;
|
2019-12-13 16:56:01 +08:00
|
|
|
pr_debug("ACPI _CST not found or not usable\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-01-10 18:51:22 +08:00
|
|
|
static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
|
2019-12-13 16:56:01 +08:00
|
|
|
{
|
|
|
|
int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If limit > 0, intel_idle_cst_usable() has returned 'true', so all of
|
|
|
|
* the interesting states are ACPI_CSTATE_FFH.
|
|
|
|
*/
|
|
|
|
for (cstate = 1; cstate < limit; cstate++) {
|
|
|
|
struct acpi_processor_cx *cx;
|
|
|
|
struct cpuidle_state *state;
|
|
|
|
|
2020-10-25 00:29:53 +08:00
|
|
|
if (intel_idle_max_cstate_reached(cstate - 1))
|
2019-12-13 16:56:01 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
cx = &acpi_state_table.states[cstate];
|
|
|
|
|
|
|
|
state = &drv->states[drv->state_count++];
|
|
|
|
|
|
|
|
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate);
|
|
|
|
strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
|
|
|
|
state->exit_latency = cx->latency;
|
|
|
|
/*
|
|
|
|
* For C1-type C-states use the same number for both the exit
|
|
|
|
* latency and target residency, because that is the case for
|
|
|
|
* C1 in the majority of the static C-states tables above.
|
|
|
|
* For the other types of C-states, however, set the target
|
|
|
|
* residency to 3 times the exit latency which should lead to
|
|
|
|
* a reasonable balance between energy-efficiency and
|
|
|
|
* performance in the majority of interesting cases.
|
|
|
|
*/
|
|
|
|
state->target_residency = cx->latency;
|
|
|
|
if (cx->type > ACPI_STATE_C1)
|
|
|
|
state->target_residency *= 3;
|
|
|
|
|
|
|
|
state->flags = MWAIT2flg(cx->address);
|
|
|
|
if (cx->type > ACPI_STATE_C2)
|
|
|
|
state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
|
|
|
|
|
2020-02-03 18:57:18 +08:00
|
|
|
if (disabled_states_mask & BIT(cstate))
|
|
|
|
state->flags |= CPUIDLE_FLAG_OFF;
|
|
|
|
|
2020-11-20 18:28:35 +08:00
|
|
|
if (intel_idle_state_needs_timer_stop(state))
|
|
|
|
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
|
|
|
|
|
2019-12-13 16:56:01 +08:00
|
|
|
state->enter = intel_idle;
|
|
|
|
state->enter_s2idle = intel_idle_s2idle;
|
|
|
|
}
|
|
|
|
}
|
2019-12-13 16:56:21 +08:00
|
|
|
|
2020-01-10 18:51:22 +08:00
|
|
|
static bool __init intel_idle_off_by_default(u32 mwait_hint)
|
2019-12-13 16:56:21 +08:00
|
|
|
{
|
|
|
|
int cstate, limit;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there are no _CST C-states, do not disable any C-states by
|
|
|
|
* default.
|
|
|
|
*/
|
|
|
|
if (!acpi_state_table.count)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
|
|
|
|
/*
|
|
|
|
* If limit > 0, intel_idle_cst_usable() has returned 'true', so all of
|
|
|
|
* the interesting states are ACPI_CSTATE_FFH.
|
|
|
|
*/
|
|
|
|
for (cstate = 1; cstate < limit; cstate++) {
|
|
|
|
if (acpi_state_table.states[cstate].address == mwait_hint)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2019-12-13 16:56:01 +08:00
|
|
|
#else /* !CONFIG_ACPI_PROCESSOR_CSTATE */
|
2020-02-03 18:57:08 +08:00
|
|
|
#define force_use_acpi (false)
|
|
|
|
|
2019-12-13 16:56:01 +08:00
|
|
|
static inline bool intel_idle_acpi_cst_extract(void) { return false; }
|
|
|
|
static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
|
2019-12-13 16:56:21 +08:00
|
|
|
static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
|
2019-12-13 16:56:01 +08:00
|
|
|
#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
|
|
|
|
|
2020-02-07 01:45:29 +08:00
|
|
|
/**
|
|
|
|
* ivt_idle_state_table_update - Tune the idle states table for Ivy Town.
|
2014-04-04 13:21:07 +08:00
|
|
|
*
|
2020-02-07 01:45:29 +08:00
|
|
|
* Tune IVT multi-socket targets.
|
|
|
|
* Assumption: num_sockets == (max_package_num + 1).
|
2014-04-04 13:21:07 +08:00
|
|
|
*/
|
2020-01-10 18:51:22 +08:00
|
|
|
static void __init ivt_idle_state_table_update(void)
|
2014-04-04 13:21:07 +08:00
|
|
|
{
|
|
|
|
/* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
|
2016-03-13 13:33:48 +08:00
|
|
|
int cpu, package_num, num_sockets = 1;
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
package_num = topology_physical_package_id(cpu);
|
|
|
|
if (package_num + 1 > num_sockets) {
|
|
|
|
num_sockets = package_num + 1;
|
|
|
|
|
|
|
|
if (num_sockets > 4) {
|
|
|
|
cpuidle_state_table = ivt_cstates_8s;
|
|
|
|
return;
|
2014-04-04 13:21:07 +08:00
|
|
|
}
|
|
|
|
}
|
2016-03-13 13:33:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (num_sockets > 2)
|
|
|
|
cpuidle_state_table = ivt_cstates_4s;
|
|
|
|
|
|
|
|
/* else, 1 and 2 socket systems use default ivt_cstates */
|
|
|
|
}
|
2016-04-07 05:00:47 +08:00
|
|
|
|
2020-01-17 18:46:24 +08:00
|
|
|
/**
|
|
|
|
* irtl_2_usec - IRTL to microseconds conversion.
|
|
|
|
* @irtl: IRTL MSR value.
|
|
|
|
*
|
|
|
|
* Translate the IRTL (Interrupt Response Time Limit) MSR value to microseconds.
|
2016-04-07 05:00:47 +08:00
|
|
|
*/
|
2020-01-10 18:51:22 +08:00
|
|
|
static unsigned long long __init irtl_2_usec(unsigned long long irtl)
|
2016-04-07 05:00:47 +08:00
|
|
|
{
|
2020-01-17 18:46:24 +08:00
|
|
|
static const unsigned int irtl_ns_units[] __initconst = {
|
|
|
|
1, 32, 1024, 32768, 1048576, 33554432, 0, 0
|
|
|
|
};
|
2016-04-07 05:00:47 +08:00
|
|
|
unsigned long long ns;
|
|
|
|
|
2016-06-27 14:35:12 +08:00
|
|
|
if (!irtl)
|
|
|
|
return 0;
|
|
|
|
|
2016-06-27 14:35:48 +08:00
|
|
|
ns = irtl_ns_units[(irtl >> 10) & 0x7];
|
2016-04-07 05:00:47 +08:00
|
|
|
|
2020-01-17 18:46:24 +08:00
|
|
|
return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC);
|
2016-04-07 05:00:47 +08:00
|
|
|
}
|
2020-01-17 18:46:24 +08:00
|
|
|
|
2020-02-07 01:45:29 +08:00
|
|
|
/**
|
|
|
|
* bxt_idle_state_table_update - Fix up the Broxton idle states table.
|
2016-04-07 05:00:47 +08:00
|
|
|
*
|
2020-02-07 01:45:29 +08:00
|
|
|
* On BXT, trust the IRTL (Interrupt Response Time Limit) MSR to show the
|
|
|
|
* definitive maximum latency and use the same value for target_residency.
|
2016-04-07 05:00:47 +08:00
|
|
|
*/
|
2020-01-10 18:51:22 +08:00
|
|
|
static void __init bxt_idle_state_table_update(void)
|
2016-04-07 05:00:47 +08:00
|
|
|
{
|
|
|
|
unsigned long long msr;
|
2016-06-27 14:35:12 +08:00
|
|
|
unsigned int usec;
|
2016-04-07 05:00:47 +08:00
|
|
|
|
|
|
|
rdmsrl(MSR_PKGC6_IRTL, msr);
|
2016-06-27 14:35:12 +08:00
|
|
|
usec = irtl_2_usec(msr);
|
|
|
|
if (usec) {
|
2016-04-07 05:00:47 +08:00
|
|
|
bxt_cstates[2].exit_latency = usec;
|
|
|
|
bxt_cstates[2].target_residency = usec;
|
|
|
|
}
|
|
|
|
|
|
|
|
rdmsrl(MSR_PKGC7_IRTL, msr);
|
2016-06-27 14:35:12 +08:00
|
|
|
usec = irtl_2_usec(msr);
|
|
|
|
if (usec) {
|
2016-04-07 05:00:47 +08:00
|
|
|
bxt_cstates[3].exit_latency = usec;
|
|
|
|
bxt_cstates[3].target_residency = usec;
|
|
|
|
}
|
|
|
|
|
|
|
|
rdmsrl(MSR_PKGC8_IRTL, msr);
|
2016-06-27 14:35:12 +08:00
|
|
|
usec = irtl_2_usec(msr);
|
|
|
|
if (usec) {
|
2016-04-07 05:00:47 +08:00
|
|
|
bxt_cstates[4].exit_latency = usec;
|
|
|
|
bxt_cstates[4].target_residency = usec;
|
|
|
|
}
|
|
|
|
|
|
|
|
rdmsrl(MSR_PKGC9_IRTL, msr);
|
2016-06-27 14:35:12 +08:00
|
|
|
usec = irtl_2_usec(msr);
|
|
|
|
if (usec) {
|
2016-04-07 05:00:47 +08:00
|
|
|
bxt_cstates[5].exit_latency = usec;
|
|
|
|
bxt_cstates[5].target_residency = usec;
|
|
|
|
}
|
|
|
|
|
|
|
|
rdmsrl(MSR_PKGC10_IRTL, msr);
|
2016-06-27 14:35:12 +08:00
|
|
|
usec = irtl_2_usec(msr);
|
|
|
|
if (usec) {
|
2016-04-07 05:00:47 +08:00
|
|
|
bxt_cstates[6].exit_latency = usec;
|
|
|
|
bxt_cstates[6].target_residency = usec;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2020-02-07 01:45:29 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* sklh_idle_state_table_update - Fix up the Sky Lake idle states table.
|
2016-03-13 13:33:48 +08:00
|
|
|
*
|
2020-02-07 01:45:29 +08:00
|
|
|
* On SKL-H (model 0x5e) skip C8 and C9 if C10 is enabled and SGX disabled.
|
2016-03-13 13:33:48 +08:00
|
|
|
*/
|
2020-01-10 18:51:22 +08:00
|
|
|
static void __init sklh_idle_state_table_update(void)
|
2016-03-13 13:33:48 +08:00
|
|
|
{
|
|
|
|
unsigned long long msr;
|
|
|
|
unsigned int eax, ebx, ecx, edx;
|
|
|
|
|
|
|
|
|
|
|
|
/* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */
|
|
|
|
if (max_cstate <= 7)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* if PC10 not present in CPUID.MWAIT.EDX */
|
|
|
|
if ((mwait_substates & (0xF << 28)) == 0)
|
|
|
|
return;
|
|
|
|
|
2017-01-08 12:23:25 +08:00
|
|
|
rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
|
2016-03-13 13:33:48 +08:00
|
|
|
|
|
|
|
/* PC10 is not enabled in PKG C-state limit */
|
|
|
|
if ((msr & 0xF) != 8)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ecx = 0;
|
|
|
|
cpuid(7, &eax, &ebx, &ecx, &edx);
|
|
|
|
|
|
|
|
/* if SGX is present */
|
|
|
|
if (ebx & (1 << 2)) {
|
2014-04-04 13:21:07 +08:00
|
|
|
|
x86/msr-index: Clean up bit defines for IA32_FEATURE_CONTROL MSR
As pointed out by Boris, the defines for bits in IA32_FEATURE_CONTROL
are quite a mouthful, especially the VMX bits which must differentiate
between enabling VMX inside and outside SMX (TXT) operation. Rename the
MSR and its bit defines to abbreviate FEATURE_CONTROL as FEAT_CTL to
make them a little friendlier on the eyes.
Arguably, the MSR itself should keep the full IA32_FEATURE_CONTROL name
to match Intel's SDM, but a future patch will add a dedicated Kconfig,
file and functions for the MSR. Using the full name for those assets is
rather unwieldy, so bite the bullet and use IA32_FEAT_CTL so that its
nomenclature is consistent throughout the kernel.
Opportunistically, fix a few other annoyances with the defines:
- Relocate the bit defines so that they immediately follow the MSR
define, e.g. aren't mistaken as belonging to MISC_FEATURE_CONTROL.
- Add whitespace around the block of feature control defines to make
it clear they're all related.
- Use BIT() instead of manually encoding the bit shift.
- Use "VMX" instead of "VMXON" to match the SDM.
- Append "_ENABLED" to the LMCE (Local Machine Check Exception) bit to
be consistent with the kernel's verbiage used for all other feature
control bits. Note, the SDM refers to the LMCE bit as LMCE_ON,
likely to differentiate it from IA32_MCG_EXT_CTL.LMCE_EN. Ignore
the (literal) one-off usage of _ON, the SDM is simply "wrong".
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20191221044513.21680-2-sean.j.christopherson@intel.com
2019-12-21 12:44:55 +08:00
|
|
|
rdmsrl(MSR_IA32_FEAT_CTL, msr);
|
2016-03-13 13:33:48 +08:00
|
|
|
|
|
|
|
/* if SGX is enabled */
|
|
|
|
if (msr & (1 << 18))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-11-22 02:41:51 +08:00
|
|
|
skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */
|
|
|
|
skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */
|
2016-03-13 13:33:48 +08:00
|
|
|
}
|
|
|
|
|
intel_idle: Adjust the SKX C6 parameters if PC6 is disabled
Because cpuidle assumes worst-case C-state parameters, PC6 parameters
are used for describing C6, which is worst-case for requesting CC6.
When PC6 is enabled, this is appropriate. But if PC6 is disabled
in the BIOS, the exit latency and target residency should be adjusted
accordingly.
Exit latency:
Previously the C6 exit latency was measured as the PC6 exit latency.
With PC6 disabled, the C6 exit latency should be the one of CC6.
Target residency:
With PC6 disabled, the idle duration within [CC6, PC6) would make the
idle governor choose C1E over C6. This would cause low energy-efficiency.
We should lower the bar to request C6 when PC6 is disabled.
To fill this gap, check if PC6 is disabled in the BIOS in the
MSR_PKG_CST_CONFIG_CONTROL(0xe2) register. If so, use the CC6 exit latency
for C6 and set target_residency to 3 times of the new exit latency. [This
is consistent with how intel_idle driver uses _CST to calculate the
target_residency.] As a result, the OS would be more likely to choose C6
over C1E when PC6 is disabled, which is reasonable, because if C6 is
enabled, it implies that the user cares about energy, so choosing C6 more
frequently makes sense.
The new CC6 exit latency of 92us was measured with wult[1] on SKX via NIC
wakeup as the 99.99th percentile. Also CLX and CPX both have the same CPU
model number as SkX, but their CC6 exit latencies are similar to the SKX
one, 96us and 89us respectively, so reuse the SKX value for them.
There is a concern that it might be better to use a more generic approach
instead of optimizing every platform. However, if the required code
complexity and different PC6 bit interpretation on different platforms
are taken into account, tuning the code per platform seems to be an
acceptable tradeoff.
Link: https://intel.github.io/wult/ # [1]
Suggested-by: Len Brown <len.brown@intel.com>
Signed-off-by: Chen Yu <yu.c.chen@intel.com>
Reviewed-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
[ rjw: Subject and changelog edits ]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2021-05-28 11:20:54 +08:00
|
|
|
/**
|
|
|
|
* skx_idle_state_table_update - Adjust the Sky Lake/Cascade Lake
|
|
|
|
* idle states table.
|
|
|
|
*/
|
|
|
|
static void __init skx_idle_state_table_update(void)
|
|
|
|
{
|
|
|
|
unsigned long long msr;
|
|
|
|
|
|
|
|
rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 000b: C0/C1 (no package C-state support)
|
|
|
|
* 001b: C2
|
|
|
|
* 010b: C6 (non-retention)
|
|
|
|
* 011b: C6 (retention)
|
|
|
|
* 111b: No Package C state limits.
|
|
|
|
*/
|
|
|
|
if ((msr & 0x7) < 2) {
|
|
|
|
/*
|
|
|
|
* Uses the CC6 + PC0 latency and 3 times of
|
|
|
|
* latency for target_residency if the PC6
|
|
|
|
* is disabled in BIOS. This is consistent
|
|
|
|
* with how intel_idle driver uses _CST
|
|
|
|
* to set the target_residency.
|
|
|
|
*/
|
|
|
|
skx_cstates[2].exit_latency = 92;
|
|
|
|
skx_cstates[2].target_residency = 276;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-15 17:39:51 +08:00
|
|
|
/**
|
|
|
|
* adl_idle_state_table_update - Adjust AlderLake idle states table.
|
|
|
|
*/
|
|
|
|
static void __init adl_idle_state_table_update(void)
|
|
|
|
{
|
|
|
|
/* Check if user prefers C1 over C1E. */
|
|
|
|
if (preferred_states_mask & BIT(1) && !(preferred_states_mask & BIT(2))) {
|
|
|
|
cpuidle_state_table[0].flags &= ~CPUIDLE_FLAG_UNUSABLE;
|
|
|
|
cpuidle_state_table[1].flags |= CPUIDLE_FLAG_UNUSABLE;
|
|
|
|
|
|
|
|
/* Disable C1E by clearing the "C1E promotion" bit. */
|
|
|
|
c1e_promotion = C1E_PROMOTION_DISABLE;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure C1E is enabled by default */
|
|
|
|
c1e_promotion = C1E_PROMOTION_ENABLE;
|
|
|
|
}
|
|
|
|
|
intel_idle: add 'preferred_cstates' module argument
On Sapphire Rapids Xeon (SPR) the C1 and C1E states are basically mutually
exclusive - only one of them can be enabled. By default, 'intel_idle' driver
enables C1 and disables C1E. However, some users prefer to use C1E instead of
C1, because it saves more energy.
This patch adds a new module parameter ('preferred_cstates') for enabling C1E
and disabling C1. Here is the idea behind it.
1. This option has effect only for "mutually exclusive" C-states like C1 and
C1E on SPR.
2. It does not have any effect on independent C-states, which do not require
other C-states to be disabled (most states on most platforms as of today).
3. For mutually exclusive C-states, the 'intel_idle' driver always has a
reasonable default, such as enabling C1 on SPR by default. On other
platforms, the default may be different.
4. Users can override the default using the 'preferred_cstates' parameter.
5. The parameter accepts the preferred C-states bit-mask, similarly to the
existing 'states_off' parameter.
6. This parameter is not limited to C1/C1E, and leaves room for supporting
other mutually exclusive C-states, if they come in the future.
Today 'intel_idle' can only be compiled-in, which means that on SPR, in order
to disable C1 and enable C1E, users should boot with the following kernel
argument: intel_idle.preferred_cstates=4
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2022-03-02 16:15:59 +08:00
|
|
|
/**
|
|
|
|
* spr_idle_state_table_update - Adjust Sapphire Rapids idle states table.
|
|
|
|
*/
|
|
|
|
static void __init spr_idle_state_table_update(void)
|
|
|
|
{
|
2022-03-02 16:16:00 +08:00
|
|
|
unsigned long long msr;
|
|
|
|
|
intel_idle: add 'preferred_cstates' module argument
On Sapphire Rapids Xeon (SPR) the C1 and C1E states are basically mutually
exclusive - only one of them can be enabled. By default, 'intel_idle' driver
enables C1 and disables C1E. However, some users prefer to use C1E instead of
C1, because it saves more energy.
This patch adds a new module parameter ('preferred_cstates') for enabling C1E
and disabling C1. Here is the idea behind it.
1. This option has effect only for "mutually exclusive" C-states like C1 and
C1E on SPR.
2. It does not have any effect on independent C-states, which do not require
other C-states to be disabled (most states on most platforms as of today).
3. For mutually exclusive C-states, the 'intel_idle' driver always has a
reasonable default, such as enabling C1 on SPR by default. On other
platforms, the default may be different.
4. Users can override the default using the 'preferred_cstates' parameter.
5. The parameter accepts the preferred C-states bit-mask, similarly to the
existing 'states_off' parameter.
6. This parameter is not limited to C1/C1E, and leaves room for supporting
other mutually exclusive C-states, if they come in the future.
Today 'intel_idle' can only be compiled-in, which means that on SPR, in order
to disable C1 and enable C1E, users should boot with the following kernel
argument: intel_idle.preferred_cstates=4
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2022-03-02 16:15:59 +08:00
|
|
|
/* Check if user prefers C1E over C1. */
|
2022-04-27 14:08:53 +08:00
|
|
|
if ((preferred_states_mask & BIT(2)) &&
|
|
|
|
!(preferred_states_mask & BIT(1))) {
|
|
|
|
/* Disable C1 and enable C1E. */
|
intel_idle: add 'preferred_cstates' module argument
On Sapphire Rapids Xeon (SPR) the C1 and C1E states are basically mutually
exclusive - only one of them can be enabled. By default, 'intel_idle' driver
enables C1 and disables C1E. However, some users prefer to use C1E instead of
C1, because it saves more energy.
This patch adds a new module parameter ('preferred_cstates') for enabling C1E
and disabling C1. Here is the idea behind it.
1. This option has effect only for "mutually exclusive" C-states like C1 and
C1E on SPR.
2. It does not have any effect on independent C-states, which do not require
other C-states to be disabled (most states on most platforms as of today).
3. For mutually exclusive C-states, the 'intel_idle' driver always has a
reasonable default, such as enabling C1 on SPR by default. On other
platforms, the default may be different.
4. Users can override the default using the 'preferred_cstates' parameter.
5. The parameter accepts the preferred C-states bit-mask, similarly to the
existing 'states_off' parameter.
6. This parameter is not limited to C1/C1E, and leaves room for supporting
other mutually exclusive C-states, if they come in the future.
Today 'intel_idle' can only be compiled-in, which means that on SPR, in order
to disable C1 and enable C1E, users should boot with the following kernel
argument: intel_idle.preferred_cstates=4
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2022-03-02 16:15:59 +08:00
|
|
|
spr_cstates[0].flags |= CPUIDLE_FLAG_UNUSABLE;
|
|
|
|
spr_cstates[1].flags &= ~CPUIDLE_FLAG_UNUSABLE;
|
|
|
|
|
|
|
|
/* Enable C1E using the "C1E promotion" bit. */
|
2022-04-27 14:08:52 +08:00
|
|
|
c1e_promotion = C1E_PROMOTION_ENABLE;
|
intel_idle: add 'preferred_cstates' module argument
On Sapphire Rapids Xeon (SPR) the C1 and C1E states are basically mutually
exclusive - only one of them can be enabled. By default, 'intel_idle' driver
enables C1 and disables C1E. However, some users prefer to use C1E instead of
C1, because it saves more energy.
This patch adds a new module parameter ('preferred_cstates') for enabling C1E
and disabling C1. Here is the idea behind it.
1. This option has effect only for "mutually exclusive" C-states like C1 and
C1E on SPR.
2. It does not have any effect on independent C-states, which do not require
other C-states to be disabled (most states on most platforms as of today).
3. For mutually exclusive C-states, the 'intel_idle' driver always has a
reasonable default, such as enabling C1 on SPR by default. On other
platforms, the default may be different.
4. Users can override the default using the 'preferred_cstates' parameter.
5. The parameter accepts the preferred C-states bit-mask, similarly to the
existing 'states_off' parameter.
6. This parameter is not limited to C1/C1E, and leaves room for supporting
other mutually exclusive C-states, if they come in the future.
Today 'intel_idle' can only be compiled-in, which means that on SPR, in order
to disable C1 and enable C1E, users should boot with the following kernel
argument: intel_idle.preferred_cstates=4
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2022-03-02 16:15:59 +08:00
|
|
|
}
|
2022-03-02 16:16:00 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* By default, the C6 state assumes the worst-case scenario of package
|
|
|
|
* C6. However, if PC6 is disabled, we update the numbers to match
|
|
|
|
* core C6.
|
|
|
|
*/
|
|
|
|
rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr);
|
|
|
|
|
|
|
|
/* Limit value 2 and above allow for PC6. */
|
|
|
|
if ((msr & 0x7) < 2) {
|
|
|
|
spr_cstates[2].exit_latency = 190;
|
|
|
|
spr_cstates[2].target_residency = 600;
|
|
|
|
}
|
intel_idle: add 'preferred_cstates' module argument
On Sapphire Rapids Xeon (SPR) the C1 and C1E states are basically mutually
exclusive - only one of them can be enabled. By default, 'intel_idle' driver
enables C1 and disables C1E. However, some users prefer to use C1E instead of
C1, because it saves more energy.
This patch adds a new module parameter ('preferred_cstates') for enabling C1E
and disabling C1. Here is the idea behind it.
1. This option has effect only for "mutually exclusive" C-states like C1 and
C1E on SPR.
2. It does not have any effect on independent C-states, which do not require
other C-states to be disabled (most states on most platforms as of today).
3. For mutually exclusive C-states, the 'intel_idle' driver always has a
reasonable default, such as enabling C1 on SPR by default. On other
platforms, the default may be different.
4. Users can override the default using the 'preferred_cstates' parameter.
5. The parameter accepts the preferred C-states bit-mask, similarly to the
existing 'states_off' parameter.
6. This parameter is not limited to C1/C1E, and leaves room for supporting
other mutually exclusive C-states, if they come in the future.
Today 'intel_idle' can only be compiled-in, which means that on SPR, in order
to disable C1 and enable C1E, users should boot with the following kernel
argument: intel_idle.preferred_cstates=4
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2022-03-02 16:15:59 +08:00
|
|
|
}
|
|
|
|
|
2020-01-10 18:52:32 +08:00
|
|
|
static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
|
|
|
|
{
|
|
|
|
unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1;
|
|
|
|
unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) &
|
|
|
|
MWAIT_SUBSTATE_MASK;
|
|
|
|
|
|
|
|
/* Ignore the C-state if there are NO sub-states in CPUID for it. */
|
|
|
|
if (num_substates == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (mwait_cstate > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
|
|
|
|
mark_tsc_unstable("TSC halts in idle states deeper than C2");
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-01-10 18:51:22 +08:00
|
|
|
static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
|
2016-03-13 13:33:48 +08:00
|
|
|
{
|
2020-01-10 18:48:25 +08:00
|
|
|
int cstate;
|
2016-03-13 13:33:48 +08:00
|
|
|
|
2020-01-10 18:48:25 +08:00
|
|
|
switch (boot_cpu_data.x86_model) {
|
2016-06-03 08:19:32 +08:00
|
|
|
case INTEL_FAM6_IVYBRIDGE_X:
|
2016-03-13 13:33:48 +08:00
|
|
|
ivt_idle_state_table_update();
|
|
|
|
break;
|
2016-06-03 08:19:32 +08:00
|
|
|
case INTEL_FAM6_ATOM_GOLDMONT:
|
2018-08-08 01:17:27 +08:00
|
|
|
case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
|
2016-04-07 05:00:47 +08:00
|
|
|
bxt_idle_state_table_update();
|
|
|
|
break;
|
2019-08-28 03:48:21 +08:00
|
|
|
case INTEL_FAM6_SKYLAKE:
|
2016-03-13 13:33:48 +08:00
|
|
|
sklh_idle_state_table_update();
|
|
|
|
break;
|
intel_idle: Adjust the SKX C6 parameters if PC6 is disabled
Because cpuidle assumes worst-case C-state parameters, PC6 parameters
are used for describing C6, which is worst-case for requesting CC6.
When PC6 is enabled, this is appropriate. But if PC6 is disabled
in the BIOS, the exit latency and target residency should be adjusted
accordingly.
Exit latency:
Previously the C6 exit latency was measured as the PC6 exit latency.
With PC6 disabled, the C6 exit latency should be the one of CC6.
Target residency:
With PC6 disabled, the idle duration within [CC6, PC6) would make the
idle governor choose C1E over C6. This would cause low energy-efficiency.
We should lower the bar to request C6 when PC6 is disabled.
To fill this gap, check if PC6 is disabled in the BIOS in the
MSR_PKG_CST_CONFIG_CONTROL(0xe2) register. If so, use the CC6 exit latency
for C6 and set target_residency to 3 times of the new exit latency. [This
is consistent with how intel_idle driver uses _CST to calculate the
target_residency.] As a result, the OS would be more likely to choose C6
over C1E when PC6 is disabled, which is reasonable, because if C6 is
enabled, it implies that the user cares about energy, so choosing C6 more
frequently makes sense.
The new CC6 exit latency of 92us was measured with wult[1] on SKX via NIC
wakeup as the 99.99th percentile. Also CLX and CPX both have the same CPU
model number as SkX, but their CC6 exit latencies are similar to the SKX
one, 96us and 89us respectively, so reuse the SKX value for them.
There is a concern that it might be better to use a more generic approach
instead of optimizing every platform. However, if the required code
complexity and different PC6 bit interpretation on different platforms
are taken into account, tuning the code per platform seems to be an
acceptable tradeoff.
Link: https://intel.github.io/wult/ # [1]
Suggested-by: Len Brown <len.brown@intel.com>
Signed-off-by: Chen Yu <yu.c.chen@intel.com>
Reviewed-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
[ rjw: Subject and changelog edits ]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2021-05-28 11:20:54 +08:00
|
|
|
case INTEL_FAM6_SKYLAKE_X:
|
|
|
|
skx_idle_state_table_update();
|
|
|
|
break;
|
intel_idle: add 'preferred_cstates' module argument
On Sapphire Rapids Xeon (SPR) the C1 and C1E states are basically mutually
exclusive - only one of them can be enabled. By default, 'intel_idle' driver
enables C1 and disables C1E. However, some users prefer to use C1E instead of
C1, because it saves more energy.
This patch adds a new module parameter ('preferred_cstates') for enabling C1E
and disabling C1. Here is the idea behind it.
1. This option has effect only for "mutually exclusive" C-states like C1 and
C1E on SPR.
2. It does not have any effect on independent C-states, which do not require
other C-states to be disabled (most states on most platforms as of today).
3. For mutually exclusive C-states, the 'intel_idle' driver always has a
reasonable default, such as enabling C1 on SPR by default. On other
platforms, the default may be different.
4. Users can override the default using the 'preferred_cstates' parameter.
5. The parameter accepts the preferred C-states bit-mask, similarly to the
existing 'states_off' parameter.
6. This parameter is not limited to C1/C1E, and leaves room for supporting
other mutually exclusive C-states, if they come in the future.
Today 'intel_idle' can only be compiled-in, which means that on SPR, in order
to disable C1 and enable C1E, users should boot with the following kernel
argument: intel_idle.preferred_cstates=4
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2022-03-02 16:15:59 +08:00
|
|
|
case INTEL_FAM6_SAPPHIRERAPIDS_X:
|
|
|
|
spr_idle_state_table_update();
|
|
|
|
break;
|
2022-04-15 17:39:51 +08:00
|
|
|
case INTEL_FAM6_ALDERLAKE:
|
|
|
|
case INTEL_FAM6_ALDERLAKE_L:
|
|
|
|
adl_idle_state_table_update();
|
|
|
|
break;
|
2014-04-04 13:21:07 +08:00
|
|
|
}
|
2011-10-28 18:50:42 +08:00
|
|
|
|
2013-02-02 12:37:30 +08:00
|
|
|
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
|
2019-12-13 16:55:52 +08:00
|
|
|
unsigned int mwait_hint;
|
2011-10-28 18:50:42 +08:00
|
|
|
|
2019-12-13 16:56:01 +08:00
|
|
|
if (intel_idle_max_cstate_reached(cstate))
|
2013-02-02 12:37:30 +08:00
|
|
|
break;
|
|
|
|
|
2019-12-13 16:56:01 +08:00
|
|
|
if (!cpuidle_state_table[cstate].enter &&
|
|
|
|
!cpuidle_state_table[cstate].enter_s2idle)
|
2011-10-28 18:50:42 +08:00
|
|
|
break;
|
|
|
|
|
2019-12-13 16:55:52 +08:00
|
|
|
/* If marked as unusable, skip this state. */
|
2019-11-22 02:41:51 +08:00
|
|
|
if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {
|
2017-06-10 03:29:20 +08:00
|
|
|
pr_debug("state %s is disabled\n",
|
|
|
|
cpuidle_state_table[cstate].name);
|
2016-03-13 13:33:48 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-12-13 16:55:52 +08:00
|
|
|
mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
|
|
|
|
if (!intel_idle_verify_cstate(mwait_hint))
|
|
|
|
continue;
|
2016-03-13 13:33:48 +08:00
|
|
|
|
2019-12-13 16:55:52 +08:00
|
|
|
/* Structure copy. */
|
2019-12-13 16:56:21 +08:00
|
|
|
drv->states[drv->state_count] = cpuidle_state_table[cstate];
|
|
|
|
|
2022-06-08 22:27:27 +08:00
|
|
|
if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE)
|
|
|
|
drv->states[drv->state_count].enter = intel_idle_irq;
|
|
|
|
|
2022-06-15 05:15:58 +08:00
|
|
|
if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) &&
|
|
|
|
cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IBRS) {
|
|
|
|
WARN_ON_ONCE(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE);
|
|
|
|
drv->states[drv->state_count].enter = intel_idle_ibrs;
|
|
|
|
}
|
|
|
|
|
2020-02-03 18:57:18 +08:00
|
|
|
if ((disabled_states_mask & BIT(drv->state_count)) ||
|
|
|
|
((icpu->use_acpi || force_use_acpi) &&
|
|
|
|
intel_idle_off_by_default(mwait_hint) &&
|
|
|
|
!(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE)))
|
2019-12-13 16:56:21 +08:00
|
|
|
drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF;
|
|
|
|
|
2020-11-20 18:28:35 +08:00
|
|
|
if (intel_idle_state_needs_timer_stop(&drv->states[drv->state_count]))
|
|
|
|
drv->states[drv->state_count].flags |= CPUIDLE_FLAG_TIMER_STOP;
|
|
|
|
|
2019-12-13 16:56:21 +08:00
|
|
|
drv->state_count++;
|
2011-10-28 18:50:42 +08:00
|
|
|
}
|
|
|
|
|
2014-08-01 03:21:24 +08:00
|
|
|
if (icpu->byt_auto_demotion_disable_flag) {
|
|
|
|
wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
|
|
|
|
wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
|
|
|
|
}
|
2011-10-28 18:50:42 +08:00
|
|
|
}
|
|
|
|
|
2020-02-07 01:45:29 +08:00
|
|
|
/**
|
|
|
|
* intel_idle_cpuidle_driver_init - Create the list of available idle states.
|
|
|
|
* @drv: cpuidle driver structure to initialize.
|
2019-12-13 16:56:01 +08:00
|
|
|
*/
|
2020-01-10 18:48:25 +08:00
|
|
|
static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv)
|
2019-12-13 16:56:01 +08:00
|
|
|
{
|
|
|
|
cpuidle_poll_state_init(drv);
|
2020-02-03 18:57:18 +08:00
|
|
|
|
|
|
|
if (disabled_states_mask & BIT(0))
|
|
|
|
drv->states[0].flags |= CPUIDLE_FLAG_OFF;
|
|
|
|
|
2019-12-13 16:56:01 +08:00
|
|
|
drv->state_count = 1;
|
|
|
|
|
|
|
|
if (icpu)
|
|
|
|
intel_idle_init_cstates_icpu(drv);
|
|
|
|
else
|
|
|
|
intel_idle_init_cstates_acpi(drv);
|
|
|
|
}
|
2011-10-28 18:50:42 +08:00
|
|
|
|
2020-01-10 18:52:32 +08:00
|
|
|
static void auto_demotion_disable(void)
|
|
|
|
{
|
|
|
|
unsigned long long msr_bits;
|
|
|
|
|
|
|
|
rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
|
2020-02-07 01:41:24 +08:00
|
|
|
msr_bits &= ~auto_demotion_disable_flags;
|
2020-01-10 18:52:32 +08:00
|
|
|
wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
|
|
|
|
}
|
|
|
|
|
intel_idle: add 'preferred_cstates' module argument
On Sapphire Rapids Xeon (SPR) the C1 and C1E states are basically mutually
exclusive - only one of them can be enabled. By default, 'intel_idle' driver
enables C1 and disables C1E. However, some users prefer to use C1E instead of
C1, because it saves more energy.
This patch adds a new module parameter ('preferred_cstates') for enabling C1E
and disabling C1. Here is the idea behind it.
1. This option has effect only for "mutually exclusive" C-states like C1 and
C1E on SPR.
2. It does not have any effect on independent C-states, which do not require
other C-states to be disabled (most states on most platforms as of today).
3. For mutually exclusive C-states, the 'intel_idle' driver always has a
reasonable default, such as enabling C1 on SPR by default. On other
platforms, the default may be different.
4. Users can override the default using the 'preferred_cstates' parameter.
5. The parameter accepts the preferred C-states bit-mask, similarly to the
existing 'states_off' parameter.
6. This parameter is not limited to C1/C1E, and leaves room for supporting
other mutually exclusive C-states, if they come in the future.
Today 'intel_idle' can only be compiled-in, which means that on SPR, in order
to disable C1 and enable C1E, users should boot with the following kernel
argument: intel_idle.preferred_cstates=4
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2022-03-02 16:15:59 +08:00
|
|
|
static void c1e_promotion_enable(void)
|
|
|
|
{
|
|
|
|
unsigned long long msr_bits;
|
|
|
|
|
|
|
|
rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
|
|
|
|
msr_bits |= 0x2;
|
|
|
|
wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
|
|
|
|
}
|
|
|
|
|
2020-01-10 18:52:32 +08:00
|
|
|
static void c1e_promotion_disable(void)
|
|
|
|
{
|
|
|
|
unsigned long long msr_bits;
|
|
|
|
|
|
|
|
rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
|
|
|
|
msr_bits &= ~0x2;
|
|
|
|
wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
|
|
|
|
}
|
|
|
|
|
2020-02-07 01:45:29 +08:00
|
|
|
/**
|
|
|
|
* intel_idle_cpu_init - Register the target CPU with the cpuidle core.
|
|
|
|
* @cpu: CPU to initialize.
|
|
|
|
*
|
|
|
|
* Register a cpuidle device object for @cpu and update its MSRs in accordance
|
|
|
|
* with the processor model flags.
|
2010-03-09 03:07:30 +08:00
|
|
|
*/
|
2016-11-29 17:51:43 +08:00
|
|
|
static int intel_idle_cpu_init(unsigned int cpu)
|
2010-03-09 03:07:30 +08:00
|
|
|
{
|
|
|
|
struct cpuidle_device *dev;
|
|
|
|
|
2012-01-18 05:40:08 +08:00
|
|
|
dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
|
|
|
|
dev->cpu = cpu;
|
2010-03-09 03:07:30 +08:00
|
|
|
|
2012-01-18 05:40:08 +08:00
|
|
|
if (cpuidle_register_device(dev)) {
|
2017-06-10 03:29:20 +08:00
|
|
|
pr_debug("cpuidle_register_device %d failed!\n", cpu);
|
2012-01-18 05:40:08 +08:00
|
|
|
return -EIO;
|
2010-03-09 03:07:30 +08:00
|
|
|
}
|
|
|
|
|
2020-02-07 01:41:24 +08:00
|
|
|
if (auto_demotion_disable_flags)
|
2016-11-29 17:51:43 +08:00
|
|
|
auto_demotion_disable();
|
2012-01-18 05:40:08 +08:00
|
|
|
|
2022-04-27 14:08:52 +08:00
|
|
|
if (c1e_promotion == C1E_PROMOTION_ENABLE)
|
|
|
|
c1e_promotion_enable();
|
|
|
|
else if (c1e_promotion == C1E_PROMOTION_DISABLE)
|
2016-11-29 17:51:43 +08:00
|
|
|
c1e_promotion_disable();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int intel_idle_cpu_online(unsigned int cpu)
|
|
|
|
{
|
|
|
|
struct cpuidle_device *dev;
|
|
|
|
|
2020-06-29 19:58:28 +08:00
|
|
|
if (!boot_cpu_has(X86_FEATURE_ARAT))
|
2020-01-10 18:43:23 +08:00
|
|
|
tick_broadcast_enable();
|
2016-11-29 17:51:43 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Some systems can hotplug a cpu at runtime after
|
|
|
|
* the kernel has booted, we have to initialize the
|
|
|
|
* driver in this case
|
|
|
|
*/
|
|
|
|
dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
|
|
|
|
if (!dev->registered)
|
|
|
|
return intel_idle_cpu_init(cpu);
|
2013-12-21 02:47:28 +08:00
|
|
|
|
2010-03-09 03:07:30 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-01-10 18:49:58 +08:00
|
|
|
/**
|
|
|
|
* intel_idle_cpuidle_devices_uninit - Unregister all cpuidle devices.
|
|
|
|
*/
|
|
|
|
static void __init intel_idle_cpuidle_devices_uninit(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_online_cpu(i)
|
|
|
|
cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i));
|
|
|
|
}
|
|
|
|
|
2010-03-09 03:07:30 +08:00
|
|
|
static int __init intel_idle_init(void)
|
|
|
|
{
|
2020-01-10 18:44:58 +08:00
|
|
|
const struct x86_cpu_id *id;
|
|
|
|
unsigned int eax, ebx, ecx;
|
2016-11-29 17:51:43 +08:00
|
|
|
int retval;
|
2010-03-09 03:07:30 +08:00
|
|
|
|
2010-11-04 00:06:14 +08:00
|
|
|
/* Do not load intel_idle at all for now if idle= is passed */
|
|
|
|
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2020-01-10 18:44:58 +08:00
|
|
|
if (max_cstate == 0) {
|
|
|
|
pr_debug("disabled\n");
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
|
|
|
id = x86_match_cpu(intel_idle_ids);
|
|
|
|
if (id) {
|
|
|
|
if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
|
|
|
|
pr_debug("Please enable MWAIT in BIOS SETUP\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
id = x86_match_cpu(intel_mwait_ids);
|
|
|
|
if (!id)
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
|
|
|
|
|
|
|
|
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
|
|
|
|
!(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
|
|
|
|
!mwait_substates)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
|
|
|
|
|
|
|
|
icpu = (const struct idle_cpu *)id->driver_data;
|
|
|
|
if (icpu) {
|
|
|
|
cpuidle_state_table = icpu->state_table;
|
2020-02-07 01:41:24 +08:00
|
|
|
auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
|
2022-04-27 14:08:52 +08:00
|
|
|
if (icpu->disable_promotion_to_c1e)
|
|
|
|
c1e_promotion = C1E_PROMOTION_DISABLE;
|
2020-02-03 18:57:08 +08:00
|
|
|
if (icpu->use_acpi || force_use_acpi)
|
2020-01-10 18:44:58 +08:00
|
|
|
intel_idle_acpi_cst_extract();
|
|
|
|
} else if (!intel_idle_acpi_cst_extract()) {
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
|
|
|
|
boot_cpu_data.x86_model);
|
2010-03-09 03:07:30 +08:00
|
|
|
|
2016-04-07 05:00:52 +08:00
|
|
|
intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
|
2020-01-10 18:45:49 +08:00
|
|
|
if (!intel_idle_cpuidle_devices)
|
2016-04-07 05:00:52 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2020-01-10 18:48:25 +08:00
|
|
|
intel_idle_cpuidle_driver_init(&intel_idle_driver);
|
|
|
|
|
2010-03-09 03:07:30 +08:00
|
|
|
retval = cpuidle_register_driver(&intel_idle_driver);
|
|
|
|
if (retval) {
|
2012-08-17 04:06:55 +08:00
|
|
|
struct cpuidle_driver *drv = cpuidle_get_driver();
|
2017-06-10 03:29:20 +08:00
|
|
|
printk(KERN_DEBUG pr_fmt("intel_idle yielding to %s\n"),
|
|
|
|
drv ? drv->name : "none");
|
2016-11-29 17:51:43 +08:00
|
|
|
goto init_driver_fail;
|
2010-03-09 03:07:30 +08:00
|
|
|
}
|
|
|
|
|
2016-11-29 17:51:43 +08:00
|
|
|
retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online",
|
|
|
|
intel_idle_cpu_online, NULL);
|
|
|
|
if (retval < 0)
|
|
|
|
goto hp_setup_fail;
|
2010-03-09 03:07:30 +08:00
|
|
|
|
2020-02-07 01:40:54 +08:00
|
|
|
pr_debug("Local APIC timer is reliable in %s\n",
|
2020-06-29 19:58:28 +08:00
|
|
|
boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1");
|
2016-04-07 05:00:54 +08:00
|
|
|
|
2010-03-09 03:07:30 +08:00
|
|
|
return 0;
|
2016-11-29 17:51:43 +08:00
|
|
|
|
|
|
|
hp_setup_fail:
|
|
|
|
intel_idle_cpuidle_devices_uninit();
|
|
|
|
cpuidle_unregister_driver(&intel_idle_driver);
|
|
|
|
init_driver_fail:
|
|
|
|
free_percpu(intel_idle_cpuidle_devices);
|
|
|
|
return retval;
|
|
|
|
|
2010-03-09 03:07:30 +08:00
|
|
|
}
|
2016-06-17 13:28:33 +08:00
|
|
|
device_initcall(intel_idle_init);
|
2010-03-09 03:07:30 +08:00
|
|
|
|
2016-06-17 13:28:33 +08:00
|
|
|
/*
|
|
|
|
* We are not really modular, but we used to support that. Meaning we also
|
|
|
|
* support "intel_idle.max_cstate=..." at boot and also a read-only export of
|
|
|
|
* it at /sys/module/intel_idle/parameters/max_cstate -- so using module_param
|
|
|
|
* is the easiest way (currently) to continue doing that.
|
|
|
|
*/
|
2010-03-09 03:07:30 +08:00
|
|
|
module_param(max_cstate, int, 0444);
|
2020-02-03 18:57:18 +08:00
|
|
|
/*
|
|
|
|
* The positions of the bits that are set in this number are the indices of the
|
|
|
|
* idle states to be disabled by default (as reflected by the names of the
|
|
|
|
* corresponding idle state directories in sysfs, "state0", "state1" ...
|
|
|
|
* "state<i>" ..., where <i> is the index of the given state).
|
|
|
|
*/
|
|
|
|
module_param_named(states_off, disabled_states_mask, uint, 0444);
|
|
|
|
MODULE_PARM_DESC(states_off, "Mask of disabled idle states");
|
intel_idle: add 'preferred_cstates' module argument
On Sapphire Rapids Xeon (SPR) the C1 and C1E states are basically mutually
exclusive - only one of them can be enabled. By default, 'intel_idle' driver
enables C1 and disables C1E. However, some users prefer to use C1E instead of
C1, because it saves more energy.
This patch adds a new module parameter ('preferred_cstates') for enabling C1E
and disabling C1. Here is the idea behind it.
1. This option has effect only for "mutually exclusive" C-states like C1 and
C1E on SPR.
2. It does not have any effect on independent C-states, which do not require
other C-states to be disabled (most states on most platforms as of today).
3. For mutually exclusive C-states, the 'intel_idle' driver always has a
reasonable default, such as enabling C1 on SPR by default. On other
platforms, the default may be different.
4. Users can override the default using the 'preferred_cstates' parameter.
5. The parameter accepts the preferred C-states bit-mask, similarly to the
existing 'states_off' parameter.
6. This parameter is not limited to C1/C1E, and leaves room for supporting
other mutually exclusive C-states, if they come in the future.
Today 'intel_idle' can only be compiled-in, which means that on SPR, in order
to disable C1 and enable C1E, users should boot with the following kernel
argument: intel_idle.preferred_cstates=4
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2022-03-02 16:15:59 +08:00
|
|
|
/*
|
|
|
|
* Some platforms come with mutually exclusive C-states, so that if one is
|
|
|
|
* enabled, the other C-states must not be used. Example: C1 and C1E on
|
|
|
|
* Sapphire Rapids platform. This parameter allows for selecting the
|
|
|
|
* preferred C-states among the groups of mutually exclusive C-states - the
|
|
|
|
* selected C-states will be registered, the other C-states from the mutually
|
|
|
|
* exclusive group won't be registered. If the platform has no mutually
|
|
|
|
* exclusive C-states, this parameter has no effect.
|
|
|
|
*/
|
|
|
|
module_param_named(preferred_cstates, preferred_states_mask, uint, 0444);
|
|
|
|
MODULE_PARM_DESC(preferred_cstates, "Mask of preferred idle states");
|