arm64: mte: make the per-task SCTLR_EL1 field usable elsewhere
In an upcoming change we are going to introduce per-task SCTLR_EL1 bits for PAC. Move the existing per-task SCTLR_EL1 field out of the MTE-specific code so that we will be able to use it from both the PAC and MTE code paths and make the task switching code more efficient. Signed-off-by: Peter Collingbourne <pcc@google.com> Link: https://linux-review.googlesource.com/id/Ic65fac78a7926168fa68f9e8da591c9e04ff7278 Link: https://lore.kernel.org/r/13d725cb8e741950fb9d6e64b2cd9bd54ff7c3f9.1616123271.git.pcc@google.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
e80a76aa1a
commit
2f79d2fc39
|
@ -151,11 +151,13 @@ struct thread_struct {
|
||||||
struct ptrauth_keys_kernel keys_kernel;
|
struct ptrauth_keys_kernel keys_kernel;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_ARM64_MTE
|
#ifdef CONFIG_ARM64_MTE
|
||||||
u64 sctlr_tcf0;
|
|
||||||
u64 gcr_user_excl;
|
u64 gcr_user_excl;
|
||||||
#endif
|
#endif
|
||||||
|
u64 sctlr_user;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define SCTLR_USER_MASK SCTLR_EL1_TCF0_MASK
|
||||||
|
|
||||||
static inline void arch_thread_struct_whitelist(unsigned long *offset,
|
static inline void arch_thread_struct_whitelist(unsigned long *offset,
|
||||||
unsigned long *size)
|
unsigned long *size)
|
||||||
{
|
{
|
||||||
|
@ -247,6 +249,8 @@ extern void release_thread(struct task_struct *);
|
||||||
|
|
||||||
unsigned long get_wchan(struct task_struct *p);
|
unsigned long get_wchan(struct task_struct *p);
|
||||||
|
|
||||||
|
void set_task_sctlr_el1(u64 sctlr);
|
||||||
|
|
||||||
/* Thread switching */
|
/* Thread switching */
|
||||||
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
|
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
|
||||||
struct task_struct *next);
|
struct task_struct *next);
|
||||||
|
|
|
@ -185,26 +185,6 @@ void mte_check_tfsr_el1(void)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void update_sctlr_el1_tcf0(u64 tcf0)
|
|
||||||
{
|
|
||||||
/* ISB required for the kernel uaccess routines */
|
|
||||||
sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0);
|
|
||||||
isb();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void set_sctlr_el1_tcf0(u64 tcf0)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* mte_thread_switch() checks current->thread.sctlr_tcf0 as an
|
|
||||||
* optimisation. Disable preemption so that it does not see
|
|
||||||
* the variable update before the SCTLR_EL1.TCF0 one.
|
|
||||||
*/
|
|
||||||
preempt_disable();
|
|
||||||
current->thread.sctlr_tcf0 = tcf0;
|
|
||||||
update_sctlr_el1_tcf0(tcf0);
|
|
||||||
preempt_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void update_gcr_el1_excl(u64 excl)
|
static void update_gcr_el1_excl(u64 excl)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -237,31 +217,22 @@ void flush_mte_state(void)
|
||||||
write_sysreg_s(0, SYS_TFSRE0_EL1);
|
write_sysreg_s(0, SYS_TFSRE0_EL1);
|
||||||
clear_thread_flag(TIF_MTE_ASYNC_FAULT);
|
clear_thread_flag(TIF_MTE_ASYNC_FAULT);
|
||||||
/* disable tag checking */
|
/* disable tag checking */
|
||||||
set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
|
set_task_sctlr_el1((current->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK) |
|
||||||
|
SCTLR_EL1_TCF0_NONE);
|
||||||
/* reset tag generation mask */
|
/* reset tag generation mask */
|
||||||
set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
|
set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mte_thread_switch(struct task_struct *next)
|
void mte_thread_switch(struct task_struct *next)
|
||||||
{
|
{
|
||||||
if (!system_supports_mte())
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* avoid expensive SCTLR_EL1 accesses if no change */
|
|
||||||
if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
|
|
||||||
update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
|
|
||||||
else
|
|
||||||
isb();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if an async tag exception occurred at EL1.
|
* Check if an async tag exception occurred at EL1.
|
||||||
*
|
*
|
||||||
* Note: On the context switch path we rely on the dsb() present
|
* Note: On the context switch path we rely on the dsb() present
|
||||||
* in __switch_to() to guarantee that the indirect writes to TFSR_EL1
|
* in __switch_to() to guarantee that the indirect writes to TFSR_EL1
|
||||||
* are synchronized before this point.
|
* are synchronized before this point.
|
||||||
* isb() above is required for the same reason.
|
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
|
isb();
|
||||||
mte_check_tfsr_el1();
|
mte_check_tfsr_el1();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -291,7 +262,7 @@ void mte_suspend_exit(void)
|
||||||
|
|
||||||
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
|
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
|
||||||
{
|
{
|
||||||
u64 tcf0;
|
u64 sctlr = task->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK;
|
||||||
u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
|
u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
|
||||||
SYS_GCR_EL1_EXCL_MASK;
|
SYS_GCR_EL1_EXCL_MASK;
|
||||||
|
|
||||||
|
@ -300,23 +271,23 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
|
||||||
|
|
||||||
switch (arg & PR_MTE_TCF_MASK) {
|
switch (arg & PR_MTE_TCF_MASK) {
|
||||||
case PR_MTE_TCF_NONE:
|
case PR_MTE_TCF_NONE:
|
||||||
tcf0 = SCTLR_EL1_TCF0_NONE;
|
sctlr |= SCTLR_EL1_TCF0_NONE;
|
||||||
break;
|
break;
|
||||||
case PR_MTE_TCF_SYNC:
|
case PR_MTE_TCF_SYNC:
|
||||||
tcf0 = SCTLR_EL1_TCF0_SYNC;
|
sctlr |= SCTLR_EL1_TCF0_SYNC;
|
||||||
break;
|
break;
|
||||||
case PR_MTE_TCF_ASYNC:
|
case PR_MTE_TCF_ASYNC:
|
||||||
tcf0 = SCTLR_EL1_TCF0_ASYNC;
|
sctlr |= SCTLR_EL1_TCF0_ASYNC;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (task != current) {
|
if (task != current) {
|
||||||
task->thread.sctlr_tcf0 = tcf0;
|
task->thread.sctlr_user = sctlr;
|
||||||
task->thread.gcr_user_excl = gcr_excl;
|
task->thread.gcr_user_excl = gcr_excl;
|
||||||
} else {
|
} else {
|
||||||
set_sctlr_el1_tcf0(tcf0);
|
set_task_sctlr_el1(sctlr);
|
||||||
set_gcr_el1_excl(gcr_excl);
|
set_gcr_el1_excl(gcr_excl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -333,7 +304,7 @@ long get_mte_ctrl(struct task_struct *task)
|
||||||
|
|
||||||
ret = incl << PR_MTE_TAG_SHIFT;
|
ret = incl << PR_MTE_TAG_SHIFT;
|
||||||
|
|
||||||
switch (task->thread.sctlr_tcf0) {
|
switch (task->thread.sctlr_user & SCTLR_EL1_TCF0_MASK) {
|
||||||
case SCTLR_EL1_TCF0_NONE:
|
case SCTLR_EL1_TCF0_NONE:
|
||||||
ret |= PR_MTE_TCF_NONE;
|
ret |= PR_MTE_TCF_NONE;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -529,6 +529,27 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
|
||||||
write_sysreg(val, cntkctl_el1);
|
write_sysreg(val, cntkctl_el1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void update_sctlr_el1(u64 sctlr)
|
||||||
|
{
|
||||||
|
sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK, sctlr);
|
||||||
|
|
||||||
|
/* ISB required for the kernel uaccess routines when setting TCF0. */
|
||||||
|
isb();
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_task_sctlr_el1(u64 sctlr)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* __switch_to() checks current->thread.sctlr as an
|
||||||
|
* optimisation. Disable preemption so that it does not see
|
||||||
|
* the variable update before the SCTLR_EL1 one.
|
||||||
|
*/
|
||||||
|
preempt_disable();
|
||||||
|
current->thread.sctlr_user = sctlr;
|
||||||
|
update_sctlr_el1(sctlr);
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Thread switching.
|
* Thread switching.
|
||||||
*/
|
*/
|
||||||
|
@ -559,6 +580,9 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
|
||||||
* registers.
|
* registers.
|
||||||
*/
|
*/
|
||||||
mte_thread_switch(next);
|
mte_thread_switch(next);
|
||||||
|
/* avoid expensive SCTLR_EL1 accesses if no change */
|
||||||
|
if (prev->thread.sctlr_user != next->thread.sctlr_user)
|
||||||
|
update_sctlr_el1(next->thread.sctlr_user);
|
||||||
|
|
||||||
/* the actual thread switch */
|
/* the actual thread switch */
|
||||||
last = cpu_switch_to(prev, next);
|
last = cpu_switch_to(prev, next);
|
||||||
|
|
Loading…
Reference in New Issue