x86/fpu: Rename "dynamic" XSTATEs to "independent"
The salient feature of "dynamic" XSTATEs is that they are not part of the main task XSTATE buffer. The fact that they are dynamically allocated is irrelevant and will become quite confusing when user math XSTATEs start being dynamically allocated. Rename them to "independent" because they are independent of the main XSTATE code. This is just a search-and-replace with some whitespace updates to keep things aligned. Signed-off-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lore.kernel.org/r/1eecb0e4f3e07828ebe5d737ec77dc3b708fad2d.1623388344.git.luto@kernel.org Link: https://lkml.kernel.org/r/20210623121454.911450390@linutronix.de
This commit is contained in:
parent
b76411b1b5
commit
01707b6653
|
@ -491,7 +491,7 @@ static void intel_pmu_arch_lbr_xrstors(void *ctx)
|
||||||
{
|
{
|
||||||
struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
|
struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
|
||||||
|
|
||||||
copy_kernel_to_dynamic_supervisor(&task_ctx->xsave, XFEATURE_MASK_LBR);
|
copy_kernel_to_independent_supervisor(&task_ctx->xsave, XFEATURE_MASK_LBR);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline bool lbr_is_reset_in_cstate(void *ctx)
|
static __always_inline bool lbr_is_reset_in_cstate(void *ctx)
|
||||||
|
@ -576,7 +576,7 @@ static void intel_pmu_arch_lbr_xsaves(void *ctx)
|
||||||
{
|
{
|
||||||
struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
|
struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
|
||||||
|
|
||||||
copy_dynamic_supervisor_to_kernel(&task_ctx->xsave, XFEATURE_MASK_LBR);
|
copy_independent_supervisor_to_kernel(&task_ctx->xsave, XFEATURE_MASK_LBR);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __intel_pmu_lbr_save(void *ctx)
|
static void __intel_pmu_lbr_save(void *ctx)
|
||||||
|
@ -992,7 +992,7 @@ static void intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events *cpuc)
|
||||||
intel_pmu_store_lbr(cpuc, NULL);
|
intel_pmu_store_lbr(cpuc, NULL);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
copy_dynamic_supervisor_to_kernel(&xsave->xsave, XFEATURE_MASK_LBR);
|
copy_independent_supervisor_to_kernel(&xsave->xsave, XFEATURE_MASK_LBR);
|
||||||
|
|
||||||
intel_pmu_store_lbr(cpuc, xsave->lbr.entries);
|
intel_pmu_store_lbr(cpuc, xsave->lbr.entries);
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,21 +42,21 @@
|
||||||
* and its size may be huge. Saving/restoring such supervisor state components
|
* and its size may be huge. Saving/restoring such supervisor state components
|
||||||
* at each context switch can cause high CPU and space overhead, which should
|
* at each context switch can cause high CPU and space overhead, which should
|
||||||
* be avoided. Such supervisor state components should only be saved/restored
|
* be avoided. Such supervisor state components should only be saved/restored
|
||||||
* on demand. The on-demand dynamic supervisor features are set in this mask.
|
* on demand. The on-demand supervisor features are set in this mask.
|
||||||
*
|
*
|
||||||
* Unlike the existing supported supervisor features, a dynamic supervisor
|
* Unlike the existing supported supervisor features, an independent supervisor
|
||||||
* feature does not allocate a buffer in task->fpu, and the corresponding
|
* feature does not allocate a buffer in task->fpu, and the corresponding
|
||||||
* supervisor state component cannot be saved/restored at each context switch.
|
* supervisor state component cannot be saved/restored at each context switch.
|
||||||
*
|
*
|
||||||
* To support a dynamic supervisor feature, a developer should follow the
|
* To support an independent supervisor feature, a developer should follow the
|
||||||
* dos and don'ts as below:
|
* dos and don'ts as below:
|
||||||
* - Do dynamically allocate a buffer for the supervisor state component.
|
* - Do dynamically allocate a buffer for the supervisor state component.
|
||||||
* - Do manually invoke the XSAVES/XRSTORS instruction to save/restore the
|
* - Do manually invoke the XSAVES/XRSTORS instruction to save/restore the
|
||||||
* state component to/from the buffer.
|
* state component to/from the buffer.
|
||||||
* - Don't set the bit corresponding to the dynamic supervisor feature in
|
* - Don't set the bit corresponding to the independent supervisor feature in
|
||||||
* IA32_XSS at run time, since it has been set at boot time.
|
* IA32_XSS at run time, since it has been set at boot time.
|
||||||
*/
|
*/
|
||||||
#define XFEATURE_MASK_DYNAMIC (XFEATURE_MASK_LBR)
|
#define XFEATURE_MASK_INDEPENDENT (XFEATURE_MASK_LBR)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unsupported supervisor features. When a supervisor feature in this mask is
|
* Unsupported supervisor features. When a supervisor feature in this mask is
|
||||||
|
@ -66,7 +66,7 @@
|
||||||
|
|
||||||
/* All supervisor states including supported and unsupported states. */
|
/* All supervisor states including supported and unsupported states. */
|
||||||
#define XFEATURE_MASK_SUPERVISOR_ALL (XFEATURE_MASK_SUPERVISOR_SUPPORTED | \
|
#define XFEATURE_MASK_SUPERVISOR_ALL (XFEATURE_MASK_SUPERVISOR_SUPPORTED | \
|
||||||
XFEATURE_MASK_DYNAMIC | \
|
XFEATURE_MASK_INDEPENDENT | \
|
||||||
XFEATURE_MASK_SUPERVISOR_UNSUPPORTED)
|
XFEATURE_MASK_SUPERVISOR_UNSUPPORTED)
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
|
@ -87,12 +87,12 @@ static inline u64 xfeatures_mask_user(void)
|
||||||
return xfeatures_mask_all & XFEATURE_MASK_USER_SUPPORTED;
|
return xfeatures_mask_all & XFEATURE_MASK_USER_SUPPORTED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u64 xfeatures_mask_dynamic(void)
|
static inline u64 xfeatures_mask_independent(void)
|
||||||
{
|
{
|
||||||
if (!boot_cpu_has(X86_FEATURE_ARCH_LBR))
|
if (!boot_cpu_has(X86_FEATURE_ARCH_LBR))
|
||||||
return XFEATURE_MASK_DYNAMIC & ~XFEATURE_MASK_LBR;
|
return XFEATURE_MASK_INDEPENDENT & ~XFEATURE_MASK_LBR;
|
||||||
|
|
||||||
return XFEATURE_MASK_DYNAMIC;
|
return XFEATURE_MASK_INDEPENDENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
|
extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
|
||||||
|
@ -104,8 +104,8 @@ void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
|
||||||
int xfeature_size(int xfeature_nr);
|
int xfeature_size(int xfeature_nr);
|
||||||
int copy_uabi_from_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf);
|
int copy_uabi_from_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf);
|
||||||
int copy_sigframe_from_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf);
|
int copy_sigframe_from_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf);
|
||||||
void copy_dynamic_supervisor_to_kernel(struct xregs_state *xstate, u64 mask);
|
void copy_independent_supervisor_to_kernel(struct xregs_state *xstate, u64 mask);
|
||||||
void copy_kernel_to_dynamic_supervisor(struct xregs_state *xstate, u64 mask);
|
void copy_kernel_to_independent_supervisor(struct xregs_state *xstate, u64 mask);
|
||||||
|
|
||||||
enum xstate_copy_mode {
|
enum xstate_copy_mode {
|
||||||
XSTATE_COPY_FP,
|
XSTATE_COPY_FP,
|
||||||
|
|
|
@ -151,7 +151,7 @@ void fpu__init_cpu_xstate(void)
|
||||||
*/
|
*/
|
||||||
if (boot_cpu_has(X86_FEATURE_XSAVES)) {
|
if (boot_cpu_has(X86_FEATURE_XSAVES)) {
|
||||||
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
|
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
|
||||||
xfeatures_mask_dynamic());
|
xfeatures_mask_independent());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -551,7 +551,7 @@ static void check_xstate_against_struct(int nr)
|
||||||
* how large the XSAVE buffer needs to be. We are recalculating
|
* how large the XSAVE buffer needs to be. We are recalculating
|
||||||
* it to be safe.
|
* it to be safe.
|
||||||
*
|
*
|
||||||
* Dynamic XSAVE features allocate their own buffers and are not
|
* Independent XSAVE features allocate their own buffers and are not
|
||||||
* covered by these checks. Only the size of the buffer for task->fpu
|
* covered by these checks. Only the size of the buffer for task->fpu
|
||||||
* is checked here.
|
* is checked here.
|
||||||
*/
|
*/
|
||||||
|
@ -617,18 +617,18 @@ static unsigned int __init get_xsaves_size(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get the total size of the enabled xstates without the dynamic supervisor
|
* Get the total size of the enabled xstates without the independent supervisor
|
||||||
* features.
|
* features.
|
||||||
*/
|
*/
|
||||||
static unsigned int __init get_xsaves_size_no_dynamic(void)
|
static unsigned int __init get_xsaves_size_no_independent(void)
|
||||||
{
|
{
|
||||||
u64 mask = xfeatures_mask_dynamic();
|
u64 mask = xfeatures_mask_independent();
|
||||||
unsigned int size;
|
unsigned int size;
|
||||||
|
|
||||||
if (!mask)
|
if (!mask)
|
||||||
return get_xsaves_size();
|
return get_xsaves_size();
|
||||||
|
|
||||||
/* Disable dynamic features. */
|
/* Disable independent features. */
|
||||||
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
|
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -637,7 +637,7 @@ static unsigned int __init get_xsaves_size_no_dynamic(void)
|
||||||
*/
|
*/
|
||||||
size = get_xsaves_size();
|
size = get_xsaves_size();
|
||||||
|
|
||||||
/* Re-enable dynamic features so XSAVES will work on them again. */
|
/* Re-enable independent features so XSAVES will work on them again. */
|
||||||
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask);
|
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask);
|
||||||
|
|
||||||
return size;
|
return size;
|
||||||
|
@ -680,7 +680,7 @@ static int __init init_xstate_size(void)
|
||||||
xsave_size = get_xsave_size();
|
xsave_size = get_xsave_size();
|
||||||
|
|
||||||
if (boot_cpu_has(X86_FEATURE_XSAVES))
|
if (boot_cpu_has(X86_FEATURE_XSAVES))
|
||||||
possible_xstate_size = get_xsaves_size_no_dynamic();
|
possible_xstate_size = get_xsaves_size_no_independent();
|
||||||
else
|
else
|
||||||
possible_xstate_size = xsave_size;
|
possible_xstate_size = xsave_size;
|
||||||
|
|
||||||
|
@ -837,7 +837,7 @@ void fpu__resume_cpu(void)
|
||||||
*/
|
*/
|
||||||
if (boot_cpu_has(X86_FEATURE_XSAVES)) {
|
if (boot_cpu_has(X86_FEATURE_XSAVES)) {
|
||||||
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
|
wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
|
||||||
xfeatures_mask_dynamic());
|
xfeatures_mask_independent());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1163,34 +1163,34 @@ int copy_sigframe_from_user_to_xstate(struct xregs_state *xsave,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* copy_dynamic_supervisor_to_kernel() - Save dynamic supervisor states to
|
* copy_independent_supervisor_to_kernel() - Save independent supervisor states to
|
||||||
* an xsave area
|
* an xsave area
|
||||||
* @xstate: A pointer to an xsave area
|
* @xstate: A pointer to an xsave area
|
||||||
* @mask: Represent the dynamic supervisor features saved into the xsave area
|
* @mask: Represent the independent supervisor features saved into the xsave area
|
||||||
*
|
*
|
||||||
* Only the dynamic supervisor states sets in the mask are saved into the xsave
|
* Only the independent supervisor states sets in the mask are saved into the xsave
|
||||||
* area (See the comment in XFEATURE_MASK_DYNAMIC for the details of dynamic
|
* area (See the comment in XFEATURE_MASK_INDEPENDENT for the details of independent
|
||||||
* supervisor feature). Besides the dynamic supervisor states, the legacy
|
* supervisor feature). Besides the independent supervisor states, the legacy
|
||||||
* region and XSAVE header are also saved into the xsave area. The supervisor
|
* region and XSAVE header are also saved into the xsave area. The supervisor
|
||||||
* features in the XFEATURE_MASK_SUPERVISOR_SUPPORTED and
|
* features in the XFEATURE_MASK_SUPERVISOR_SUPPORTED and
|
||||||
* XFEATURE_MASK_SUPERVISOR_UNSUPPORTED are not saved.
|
* XFEATURE_MASK_SUPERVISOR_UNSUPPORTED are not saved.
|
||||||
*
|
*
|
||||||
* The xsave area must be 64-bytes aligned.
|
* The xsave area must be 64-bytes aligned.
|
||||||
*/
|
*/
|
||||||
void copy_dynamic_supervisor_to_kernel(struct xregs_state *xstate, u64 mask)
|
void copy_independent_supervisor_to_kernel(struct xregs_state *xstate, u64 mask)
|
||||||
{
|
{
|
||||||
u64 dynamic_mask = xfeatures_mask_dynamic() & mask;
|
u64 independent_mask = xfeatures_mask_independent() & mask;
|
||||||
u32 lmask, hmask;
|
u32 lmask, hmask;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (WARN_ON_FPU(!boot_cpu_has(X86_FEATURE_XSAVES)))
|
if (WARN_ON_FPU(!boot_cpu_has(X86_FEATURE_XSAVES)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (WARN_ON_FPU(!dynamic_mask))
|
if (WARN_ON_FPU(!independent_mask))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
lmask = dynamic_mask;
|
lmask = independent_mask;
|
||||||
hmask = dynamic_mask >> 32;
|
hmask = independent_mask >> 32;
|
||||||
|
|
||||||
XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
|
XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
|
||||||
|
|
||||||
|
@ -1199,34 +1199,34 @@ void copy_dynamic_supervisor_to_kernel(struct xregs_state *xstate, u64 mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* copy_kernel_to_dynamic_supervisor() - Restore dynamic supervisor states from
|
* copy_kernel_to_independent_supervisor() - Restore independent supervisor states from
|
||||||
* an xsave area
|
* an xsave area
|
||||||
* @xstate: A pointer to an xsave area
|
* @xstate: A pointer to an xsave area
|
||||||
* @mask: Represent the dynamic supervisor features restored from the xsave area
|
* @mask: Represent the independent supervisor features restored from the xsave area
|
||||||
*
|
*
|
||||||
* Only the dynamic supervisor states sets in the mask are restored from the
|
* Only the independent supervisor states sets in the mask are restored from the
|
||||||
* xsave area (See the comment in XFEATURE_MASK_DYNAMIC for the details of
|
* xsave area (See the comment in XFEATURE_MASK_INDEPENDENT for the details of
|
||||||
* dynamic supervisor feature). Besides the dynamic supervisor states, the
|
* independent supervisor feature). Besides the independent supervisor states, the
|
||||||
* legacy region and XSAVE header are also restored from the xsave area. The
|
* legacy region and XSAVE header are also restored from the xsave area. The
|
||||||
* supervisor features in the XFEATURE_MASK_SUPERVISOR_SUPPORTED and
|
* supervisor features in the XFEATURE_MASK_SUPERVISOR_SUPPORTED and
|
||||||
* XFEATURE_MASK_SUPERVISOR_UNSUPPORTED are not restored.
|
* XFEATURE_MASK_SUPERVISOR_UNSUPPORTED are not restored.
|
||||||
*
|
*
|
||||||
* The xsave area must be 64-bytes aligned.
|
* The xsave area must be 64-bytes aligned.
|
||||||
*/
|
*/
|
||||||
void copy_kernel_to_dynamic_supervisor(struct xregs_state *xstate, u64 mask)
|
void copy_kernel_to_independent_supervisor(struct xregs_state *xstate, u64 mask)
|
||||||
{
|
{
|
||||||
u64 dynamic_mask = xfeatures_mask_dynamic() & mask;
|
u64 independent_mask = xfeatures_mask_independent() & mask;
|
||||||
u32 lmask, hmask;
|
u32 lmask, hmask;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (WARN_ON_FPU(!boot_cpu_has(X86_FEATURE_XSAVES)))
|
if (WARN_ON_FPU(!boot_cpu_has(X86_FEATURE_XSAVES)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (WARN_ON_FPU(!dynamic_mask))
|
if (WARN_ON_FPU(!independent_mask))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
lmask = dynamic_mask;
|
lmask = independent_mask;
|
||||||
hmask = dynamic_mask >> 32;
|
hmask = independent_mask >> 32;
|
||||||
|
|
||||||
XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
|
XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue