arm64/sve: Eliminate data races on sve_default_vl

sve_default_vl can be modified via the /proc/sys/abi/sve_default_vl
sysctl concurrently with use, and modified concurrently by multiple
threads.

Adding a lock for this seems overkill, and I don't want to think any
more than necessary, so just define wrappers using READ_ONCE()/
WRITE_ONCE().

This will avoid the possibility of torn accesses and repeated loads
and stores.

There's no evidence yet that this is going wrong in practice: this
is just hygiene.  For generic sysctl users, it would be better to
build this kind of thing into the sysctl common code somehow.

Reported-by: Will Deacon <will@kernel.org>
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Link: https://lore.kernel.org/r/1591808590-20210-3-git-send-email-Dave.Martin@arm.com
[will: move set_sve_default_vl() inside #ifdef to squash allnoconfig warning]
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Dave Martin 2020-06-10 18:03:10 +01:00 committed by Will Deacon
parent 9ba6a9efa4
commit 1e570f512c
1 changed files with 18 additions and 7 deletions

View File

@ -12,6 +12,7 @@
#include <linux/bug.h>
#include <linux/cache.h>
#include <linux/compat.h>
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/kernel.h>
@ -119,10 +120,20 @@ struct fpsimd_last_state_struct {
static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
/* Default VL for tasks that don't set it explicitly: */
static int sve_default_vl = -1;
static int __sve_default_vl = -1;
static int get_sve_default_vl(void)
{
return READ_ONCE(__sve_default_vl);
}
#ifdef CONFIG_ARM64_SVE
static void set_sve_default_vl(int val)
{
WRITE_ONCE(__sve_default_vl, val);
}
/* Maximum supported vector length across all CPUs (initially poisoned) */
int __ro_after_init sve_max_vl = SVE_VL_MIN;
int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
@ -344,7 +355,7 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
int vl = sve_default_vl;
int vl = get_sve_default_vl();
struct ctl_table tmp_table = {
.data = &vl,
.maxlen = sizeof(vl),
@ -361,7 +372,7 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write,
if (!sve_vl_valid(vl))
return -EINVAL;
sve_default_vl = find_supported_vector_length(vl);
set_sve_default_vl(find_supported_vector_length(vl));
return 0;
}
@ -868,7 +879,7 @@ void __init sve_setup(void)
* For the default VL, pick the maximum supported value <= 64.
* VL == 64 is guaranteed not to grow the signal frame.
*/
sve_default_vl = find_supported_vector_length(64);
set_sve_default_vl(find_supported_vector_length(64));
bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
SVE_VQ_MAX);
@ -889,7 +900,7 @@ void __init sve_setup(void)
pr_info("SVE: maximum available vector length %u bytes per vector\n",
sve_max_vl);
pr_info("SVE: default vector length %u bytes per vector\n",
sve_default_vl);
get_sve_default_vl());
/* KVM decides whether to support mismatched systems. Just warn here: */
if (sve_max_virtualisable_vl < sve_max_vl)
@ -1029,13 +1040,13 @@ void fpsimd_flush_thread(void)
* vector length configured: no kernel task can become a user
* task without an exec and hence a call to this function.
* By the time the first call to this function is made, all
* early hardware probing is complete, so sve_default_vl
* early hardware probing is complete, so __sve_default_vl
* should be valid.
* If a bug causes this to go wrong, we make some noise and
* try to fudge thread.sve_vl to a safe value here.
*/
vl = current->thread.sve_vl_onexec ?
current->thread.sve_vl_onexec : sve_default_vl;
current->thread.sve_vl_onexec : get_sve_default_vl();
if (WARN_ON(!sve_vl_valid(vl)))
vl = SVE_VL_MIN;