KVM/arm64 fixes for 5.11, take #2
- Don't allow tagged pointers to point to memslots - Filter out ARMv8.1+ PMU events on v8.0 hardware - Hide PMU registers from userspace when no PMU is configured - More PMU cleanups - Don't try to handle broken PSCI firmware - More sys_reg() to reg_to_encoding() conversions -----BEGIN PGP SIGNATURE----- iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAmAJn00PHG1hekBrZXJu ZWwub3JnAAoJECPQ0LrRPXpD47AQAJtT2NbvumRBhnNAMD6+bDB0AeFdcd4s12FN fffsR+7UgCU4YrbMCcBEd/3gGc0/bSPQqo6ZVNaxL4M+bDR7loCKIF/kDLjv6gtu 28Q5c+DqFirKyIWMmNSJmHPu5rXEJQOjrLtxsXigRi9QvFIALyXKYq5Bu/67Xcat 2aoIfQyPuJYYpd/HAEa25kmJgH9Z1Wj3gQ82mGAlRWyIuSkVI0/HRGNE+dKe3fjx 1D9lQaBwT8lsCelv6GpNZbsXo2Zh5Y/Zi7KLY6uNAD9iTHbaOwiLZMBWi9ag97Hc WNM4bTzWa7NGGBXvlxnoXH+o5X473JQbj/pVR8EBZvntCzdi7P8PIXo6eOIT4Z9L nVKXjt4NH5VER4p48tPR+ZlGYocLb7BDRFW05myUIFu0nT93O8cKmFxyuXdkJv5p J6DRTOohRkXh8wl7F+bBlgC+qbRbungpFWFhfpf09aKUbpR1Py+W+yrX6HDL92bT gGT0wKq6yTPYdHTBFQJEfSibCXPM9d2Q2cYZcLeJaMz3eZ2cxEcRU/De63qQ7EIy A2DXAVJnvmmzbeuCW4j7kaYAV81nKypdfBUNvZx4of/UBJSapifxAOWU9UAHPp3A 0/qWLp2up1GOjIepF6tNpfwiPV3RvqCXi7XVy+bBIV+pgfHvl3DkBGcVhLKXI2JE JO9jh9rn =GHVB -----END PGP SIGNATURE----- Merge tag 'kvmarm-fixes-5.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD KVM/arm64 fixes for 5.11, take #2 - Don't allow tagged pointers to point to memslots - Filter out ARMv8.1+ PMU events on v8.0 hardware - Hide PMU registers from userspace when no PMU is configured - More PMU cleanups - Don't try to handle broken PSCI firmware - More sys_reg() to reg_to_encoding() conversions
This commit is contained in:
commit
615099b01e
|
@ -1281,6 +1281,9 @@ field userspace_addr, which must point at user addressable memory for
|
|||
the entire memory slot size. Any object may back this memory, including
|
||||
anonymous memory, ordinary files, and hugetlbfs.
|
||||
|
||||
On architectures that support a form of address tagging, userspace_addr must
|
||||
be an untagged address.
|
||||
|
||||
It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr
|
||||
be identical. This allows large pages in the guest to be backed by large
|
||||
pages in the host.
|
||||
|
|
|
@ -1396,8 +1396,9 @@ static void cpu_init_hyp_mode(void)
|
|||
* Calculate the raw per-cpu offset without a translation from the
|
||||
* kernel's mapping to the linear mapping, and store it in tpidr_el2
|
||||
* so that we can use adr_l to access per-cpu variables in EL2.
|
||||
* Also drop the KASAN tag which gets in the way...
|
||||
*/
|
||||
params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
|
||||
params->tpidr_el2 = (unsigned long)kasan_reset_tag(this_cpu_ptr_nvhe_sym(__per_cpu_start)) -
|
||||
(unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
|
||||
|
||||
params->mair_el2 = read_sysreg(mair_el1);
|
||||
|
|
|
@ -77,12 +77,6 @@ static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt)
|
|||
cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
|
||||
}
|
||||
|
||||
static __noreturn unsigned long psci_forward_noreturn(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
psci_forward(host_ctxt);
|
||||
hyp_panic(); /* unreachable */
|
||||
}
|
||||
|
||||
static unsigned int find_cpu_id(u64 mpidr)
|
||||
{
|
||||
unsigned int i;
|
||||
|
@ -251,10 +245,13 @@ static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_
|
|||
case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
|
||||
case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
|
||||
return psci_forward(host_ctxt);
|
||||
/*
|
||||
* SYSTEM_OFF/RESET should not return according to the spec.
|
||||
* Allow it so as to stay robust to broken firmware.
|
||||
*/
|
||||
case PSCI_0_2_FN_SYSTEM_OFF:
|
||||
case PSCI_0_2_FN_SYSTEM_RESET:
|
||||
psci_forward_noreturn(host_ctxt);
|
||||
unreachable();
|
||||
return psci_forward(host_ctxt);
|
||||
case PSCI_0_2_FN64_CPU_SUSPEND:
|
||||
return psci_cpu_suspend(func_id, host_ctxt);
|
||||
case PSCI_0_2_FN64_CPU_ON:
|
||||
|
|
|
@ -788,7 +788,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
|
|||
{
|
||||
unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
|
||||
u64 val, mask = 0;
|
||||
int base, i;
|
||||
int base, i, nr_events;
|
||||
|
||||
if (!pmceid1) {
|
||||
val = read_sysreg(pmceid0_el0);
|
||||
|
@ -801,14 +801,18 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
|
|||
if (!bmap)
|
||||
return val;
|
||||
|
||||
nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
|
||||
|
||||
for (i = 0; i < 32; i += 8) {
|
||||
u64 byte;
|
||||
|
||||
byte = bitmap_get_value8(bmap, base + i);
|
||||
mask |= byte << i;
|
||||
if (nr_events >= (0x4000 + base + 32)) {
|
||||
byte = bitmap_get_value8(bmap, 0x4000 + base + i);
|
||||
mask |= byte << (32 + i);
|
||||
}
|
||||
}
|
||||
|
||||
return val & mask;
|
||||
}
|
||||
|
|
|
@ -43,6 +43,10 @@
|
|||
* 64bit interface.
|
||||
*/
|
||||
|
||||
#define reg_to_encoding(x) \
|
||||
sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
|
||||
(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
|
||||
|
||||
static bool read_from_write_only(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *params,
|
||||
const struct sys_reg_desc *r)
|
||||
|
@ -273,8 +277,7 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
|
|||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
|
||||
u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
|
||||
(u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
|
||||
u32 sr = reg_to_encoding(r);
|
||||
|
||||
if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
|
@ -590,6 +593,15 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
|
||||
}
|
||||
|
||||
static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (kvm_vcpu_has_pmu(vcpu))
|
||||
return 0;
|
||||
|
||||
return REG_HIDDEN;
|
||||
}
|
||||
|
||||
static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 pmcr, val;
|
||||
|
@ -613,9 +625,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
|
||||
{
|
||||
u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
|
||||
bool enabled = kvm_vcpu_has_pmu(vcpu);
|
||||
bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
|
||||
|
||||
enabled &= (reg & flags) || vcpu_mode_priv(vcpu);
|
||||
if (!enabled)
|
||||
kvm_inject_undefined(vcpu);
|
||||
|
||||
|
@ -900,11 +911,6 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (!kvm_vcpu_has_pmu(vcpu)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (p->is_write) {
|
||||
if (!vcpu_mode_priv(vcpu)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
|
@ -921,10 +927,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
return true;
|
||||
}
|
||||
|
||||
#define reg_to_encoding(x) \
|
||||
sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
|
||||
(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
|
||||
|
||||
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
|
||||
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
|
||||
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
|
||||
|
@ -936,15 +938,18 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
|
||||
trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
|
||||
|
||||
#define PMU_SYS_REG(r) \
|
||||
SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
|
||||
|
||||
/* Macro to expand the PMEVCNTRn_EL0 register */
|
||||
#define PMU_PMEVCNTR_EL0(n) \
|
||||
{ SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
|
||||
access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
|
||||
{ PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
|
||||
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
|
||||
|
||||
/* Macro to expand the PMEVTYPERn_EL0 register */
|
||||
#define PMU_PMEVTYPER_EL0(n) \
|
||||
{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
|
||||
access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
|
||||
{ PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
|
||||
.access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
|
||||
|
||||
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
|
@ -1020,8 +1025,7 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
|
|||
static u64 read_id_reg(const struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_desc const *r, bool raz)
|
||||
{
|
||||
u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
|
||||
(u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
|
||||
u32 id = reg_to_encoding(r);
|
||||
u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
|
||||
|
||||
if (id == SYS_ID_AA64PFR0_EL1) {
|
||||
|
@ -1062,8 +1066,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
|
|||
static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
|
||||
(u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
|
||||
u32 id = reg_to_encoding(r);
|
||||
|
||||
switch (id) {
|
||||
case SYS_ID_AA64ZFR0_EL1:
|
||||
|
@ -1486,8 +1489,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
|
||||
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
|
||||
|
||||
{ SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
|
||||
{ SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
|
||||
{ PMU_SYS_REG(SYS_PMINTENSET_EL1),
|
||||
.access = access_pminten, .reg = PMINTENSET_EL1 },
|
||||
{ PMU_SYS_REG(SYS_PMINTENCLR_EL1),
|
||||
.access = access_pminten, .reg = PMINTENSET_EL1 },
|
||||
|
||||
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
|
||||
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
|
||||
|
@ -1526,23 +1531,36 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
|
||||
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
|
||||
|
||||
{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
|
||||
{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
|
||||
{ SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
|
||||
{ SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
|
||||
{ SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
|
||||
{ SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
|
||||
{ SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
|
||||
{ SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
|
||||
{ PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
|
||||
.reset = reset_pmcr, .reg = PMCR_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMCNTENSET_EL0),
|
||||
.access = access_pmcnten, .reg = PMCNTENSET_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
|
||||
.access = access_pmcnten, .reg = PMCNTENSET_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMOVSCLR_EL0),
|
||||
.access = access_pmovs, .reg = PMOVSSET_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMSWINC_EL0),
|
||||
.access = access_pmswinc, .reg = PMSWINC_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMSELR_EL0),
|
||||
.access = access_pmselr, .reg = PMSELR_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMCEID0_EL0),
|
||||
.access = access_pmceid, .reset = NULL },
|
||||
{ PMU_SYS_REG(SYS_PMCEID1_EL0),
|
||||
.access = access_pmceid, .reset = NULL },
|
||||
{ PMU_SYS_REG(SYS_PMCCNTR_EL0),
|
||||
.access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
|
||||
.access = access_pmu_evtyper, .reset = NULL },
|
||||
{ PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
|
||||
.access = access_pmu_evcntr, .reset = NULL },
|
||||
/*
|
||||
* PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
|
||||
* in 32bit mode. Here we choose to reset it as zero for consistency.
|
||||
*/
|
||||
{ SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
|
||||
{ SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
|
||||
.reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
|
||||
{ PMU_SYS_REG(SYS_PMOVSSET_EL0),
|
||||
.access = access_pmovs, .reg = PMOVSSET_EL0 },
|
||||
|
||||
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
|
||||
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
|
||||
|
@ -1694,7 +1712,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
* PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
|
||||
* in 32bit mode. Here we choose to reset it as zero for consistency.
|
||||
*/
|
||||
{ SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
|
||||
{ PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
|
||||
.reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
|
||||
|
||||
{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
|
||||
{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
|
||||
|
|
|
@ -1292,6 +1292,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||
return -EINVAL;
|
||||
/* We can read the guest memory with __xxx_user() later on. */
|
||||
if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
|
||||
(mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
|
||||
!access_ok((void __user *)(unsigned long)mem->userspace_addr,
|
||||
mem->memory_size))
|
||||
return -EINVAL;
|
||||
|
|
Loading…
Reference in New Issue