KVM: arm64: Use generic sanitisation for ID_AA64PFR0_EL1

KVM allows userspace to write to the CSV2 and CSV3 fields of
ID_AA64PFR0_EL1 so long as it doesn't over-promise on the
Spectre/Meltdown mitigation state. Switch over to the new way of the
world for screening user writes. Leave the old plumbing in place until
we actually start handling ID register reads from the VM-wide values.

Signed-off-by: Jing Zhang <jingzhangos@google.com>
Link: https://lore.kernel.org/r/20230609190054.1542113-10-oliver.upton@linux.dev
[Oliver: split from monster patch, added commit description]
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
Jing Zhang 2023-06-09 19:00:52 +00:00 committed by Oliver Upton
parent c118cead07
commit c39f5974d3
1 changed files with 45 additions and 21 deletions

View File

@ -1470,34 +1470,54 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
return REG_HIDDEN; return REG_HIDDEN;
} }
static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
if (!vcpu_has_sve(vcpu))
val &= ~ID_AA64PFR0_EL1_SVE_MASK;
/*
* The default is to expose CSV2 == 1 if the HW isn't affected.
* Although this is a per-CPU feature, we make it global because
* asymmetric systems are just a nuisance.
*
* Userspace can override this as long as it doesn't promise
* the impossible.
*/
if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
}
if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
}
if (kvm_vgic_global_state.type == VGIC_V3) {
val &= ~ID_AA64PFR0_EL1_GIC_MASK;
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
}
val &= ~ID_AA64PFR0_EL1_AMU_MASK;
return val;
}
static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, const struct sys_reg_desc *rd,
u64 val) u64 val)
{ {
u8 csv2, csv3; u8 csv2, csv3;
int r;
/*
* Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
* it doesn't promise more than what is actually provided (the
* guest could otherwise be covered in ectoplasmic residue).
*/
csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT); csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT);
if (csv2 > 1 ||
(csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
return -EINVAL;
/* Same thing for CSV3 */
csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT); csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT);
if (csv3 > 1 ||
(csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
return -EINVAL;
/* We can only differ with CSV[23], and anything else is an error */ r = set_id_reg(vcpu, rd, val);
val ^= read_id_reg(vcpu, rd); if (r)
val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | return r;
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
if (val)
return -EINVAL;
vcpu->kvm->arch.pfr0_csv2 = csv2; vcpu->kvm->arch.pfr0_csv2 = csv2;
vcpu->kvm->arch.pfr0_csv3 = csv3; vcpu->kvm->arch.pfr0_csv3 = csv3;
@ -2041,8 +2061,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* AArch64 ID registers */ /* AArch64 ID registers */
/* CRm=4 */ /* CRm=4 */
{ SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg, { SYS_DESC(SYS_ID_AA64PFR0_EL1),
.get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, }, .access = access_id_reg,
.get_user = get_id_reg,
.set_user = set_id_aa64pfr0_el1,
.reset = read_sanitised_id_aa64pfr0_el1,
.val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, },
ID_SANITISED(ID_AA64PFR1_EL1), ID_SANITISED(ID_AA64PFR1_EL1),
ID_UNALLOCATED(4,2), ID_UNALLOCATED(4,2),
ID_UNALLOCATED(4,3), ID_UNALLOCATED(4,3),