KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST
Since commitd26c25a9d1
("arm64: KVM: Tighten guest core register access from userspace"), KVM_{GET,SET}_ONE_REG rejects register IDs that do not correspond to a single underlying architectural register. KVM_GET_REG_LIST was not changed to match however: instead, it simply yields a list of 32-bit register IDs that together cover the whole kvm_regs struct. This means that if userspace tries to use the resulting list of IDs directly to drive calls to KVM_*_ONE_REG, some of those calls will now fail. This was not the intention. Instead, iterating KVM_*_ONE_REG over the list of IDs returned by KVM_GET_REG_LIST should be guaranteed to work. This patch fixes the problem by splitting validate_core_offset() into a backend core_reg_size_from_offset() which does all of the work except for checking that the size field in the register ID matches, and kvm_arm_copy_reg_indices() and num_core_regs() are converted to use this to enumerate the valid offsets. kvm_arm_copy_reg_indices() now also sets the register ID size field appropriately based on the value returned, so the register ID supplied to userspace is fully qualified for use with the register access ioctls. Cc: stable@vger.kernel.org Fixes:d26c25a9d1
("arm64: KVM: Tighten guest core register access from userspace") Signed-off-by: Dave Martin <Dave.Martin@arm.com> Reviewed-by: Andrew Jones <drjones@redhat.com> Tested-by: Andrew Jones <drjones@redhat.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
parent
0c529ff789
commit
df205b5c63
|
@ -70,10 +70,8 @@ static u64 core_reg_offset_from_id(u64 id)
|
|||
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
|
||||
}
|
||||
|
||||
static int validate_core_offset(const struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg)
|
||||
static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
|
||||
{
|
||||
u64 off = core_reg_offset_from_id(reg->id);
|
||||
int size;
|
||||
|
||||
switch (off) {
|
||||
|
@ -103,8 +101,7 @@ static int validate_core_offset(const struct kvm_vcpu *vcpu,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (KVM_REG_SIZE(reg->id) != size ||
|
||||
!IS_ALIGNED(off, size / sizeof(__u32)))
|
||||
if (!IS_ALIGNED(off, size / sizeof(__u32)))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
|
@ -115,6 +112,21 @@ static int validate_core_offset(const struct kvm_vcpu *vcpu,
|
|||
if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
|
||||
return -EINVAL;
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static int validate_core_offset(const struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg)
|
||||
{
|
||||
u64 off = core_reg_offset_from_id(reg->id);
|
||||
int size = core_reg_size_from_offset(vcpu, off);
|
||||
|
||||
if (size < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (KVM_REG_SIZE(reg->id) != size)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -447,19 +459,34 @@ static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
|
|||
{
|
||||
unsigned int i;
|
||||
int n = 0;
|
||||
const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
|
||||
|
||||
for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
|
||||
/*
|
||||
* The KVM_REG_ARM64_SVE regs must be used instead of
|
||||
* KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
|
||||
* SVE-enabled vcpus:
|
||||
*/
|
||||
if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i))
|
||||
u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
|
||||
int size = core_reg_size_from_offset(vcpu, i);
|
||||
|
||||
if (size < 0)
|
||||
continue;
|
||||
|
||||
switch (size) {
|
||||
case sizeof(__u32):
|
||||
reg |= KVM_REG_SIZE_U32;
|
||||
break;
|
||||
|
||||
case sizeof(__u64):
|
||||
reg |= KVM_REG_SIZE_U64;
|
||||
break;
|
||||
|
||||
case sizeof(__uint128_t):
|
||||
reg |= KVM_REG_SIZE_U128;
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_ON(1);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (uindices) {
|
||||
if (put_user(core_reg | i, uindices))
|
||||
if (put_user(reg, uindices))
|
||||
return -EFAULT;
|
||||
uindices++;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue