KVM/arm64 fixes for 6.4, take #3

- Fix the reported address of a watchpoint forwarded to userspace
 
 - Fix the freeing of the root of stage-2 page tables
 
 - Stop creating spurious PMU events to perform detection of the
   default PMU and use the existing PMU list instead.
 -----BEGIN PGP SIGNATURE-----
 
 iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAmR3F3EPHG1hekBrZXJu
 ZWwub3JnAAoJECPQ0LrRPXpDPdQP/3dnsiOsA6uHMuRvSa6/+IMAjAoe6CNSQ+iD
 CE8AvVGuHD4/99JFxrBZJrFXJiB3VIhJS2NCxzcXu26ZKwbGx9ZhEuBcWa5O2yo5
 Aa9EfPuFzTFdB207kkzk/TMeLbnT2jy8d9ZYeop/h1x+xsVQxgqllAA6NCooBCYA
 +BcYiT+HKGklqzKpBrbPqjA1lHrsSmjccf64+3xGYMaTnlCsjav3K3fc1a9jGipy
 k2LnVlh/Bv1aw1qj7Aqf3kZyhD/1F8U7QuuaSpCYXxHJPwlrQRmmRC5iL31BVlvy
 tVfgVEloi1sas/rqZYI9UKhi6S/Z9Hx3AVIW4ehgJkjj35hrre7ZvQjqjEj8XUBz
 9P9XVz1TKt5GjTIycaYvopQiXnnE4J7GAC80vTLnGtTFXGCpXT3DVDCpEIjOpKj4
 8CECdBaY7XQGQGupNspUOaWvlDuBUSIARvccBH6Z2y3hcdF9eXf05T9j3Pw6Eexf
 M0AsQAAgj5C00dTzfV/R9A1uNdL2x3M1eiZgXgttbLEyzd+slvvuUfpJA5k+c9+/
 9g3x+u0qFmIRQMFtYYnDNtMhYtjtgVJQunspUwXmij6dzAz/dXU7v+QoB2/KZLY+
 fpfgpGuDZH9OBC/TmzKzjT+onR+hYSPdrzx3WTJwzsKGr/4WtIBiiykjKVF6c5yY
 GyxT00p3
 =63kl
 -----END PGP SIGNATURE-----

Merge tag 'kvmarm-fixes-6.4-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 6.4, take #3

- Fix the reported address of a watchpoint forwarded to userspace

- Fix the freeing of the root of stage-2 page tables

- Stop creating spurious PMU events to perform detection of the
  default PMU and use the existing PMU list instead.
This commit is contained in:
Paolo Bonzini 2023-06-03 15:15:49 -04:00
commit 49661a52a4
5 changed files with 35 additions and 37 deletions

View File

@ -412,17 +412,21 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
return false;
}
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (!__populate_fault_info(vcpu))
return true;
return false;
}
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (!__populate_fault_info(vcpu))
if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
return true;
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {

View File

@ -186,6 +186,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
};
@ -196,6 +197,7 @@ static const exit_handler_fn pvm_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
};

View File

@ -1332,4 +1332,7 @@ void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pg
};
WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));
WARN_ON(mm_ops->page_count(pgtable) != 1);
mm_ops->put_page(pgtable);
}

View File

@ -110,6 +110,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
};

View File

@ -694,45 +694,23 @@ out_unlock:
static struct arm_pmu *kvm_pmu_probe_armpmu(void)
{
struct perf_event_attr attr = { };
struct perf_event *event;
struct arm_pmu *pmu = NULL;
struct arm_pmu *tmp, *pmu = NULL;
struct arm_pmu_entry *entry;
int cpu;
/*
* Create a dummy event that only counts user cycles. As we'll never
* leave this function with the event being live, it will never
* count anything. But it allows us to probe some of the PMU
* details. Yes, this is terrible.
*/
attr.type = PERF_TYPE_RAW;
attr.size = sizeof(attr);
attr.pinned = 1;
attr.disabled = 0;
attr.exclude_user = 0;
attr.exclude_kernel = 1;
attr.exclude_hv = 1;
attr.exclude_host = 1;
attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
attr.sample_period = GENMASK(63, 0);
mutex_lock(&arm_pmus_lock);
event = perf_event_create_kernel_counter(&attr, -1, current,
kvm_pmu_perf_overflow, &attr);
cpu = smp_processor_id();
list_for_each_entry(entry, &arm_pmus, entry) {
tmp = entry->arm_pmu;
if (IS_ERR(event)) {
pr_err_once("kvm: pmu event creation failed %ld\n",
PTR_ERR(event));
return NULL;
if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
pmu = tmp;
break;
}
}
if (event->pmu) {
pmu = to_arm_pmu(event->pmu);
if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
pmu = NULL;
}
perf_event_disable(event);
perf_event_release_kernel(event);
mutex_unlock(&arm_pmus_lock);
return pmu;
}
@ -912,7 +890,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
return -EBUSY;
if (!kvm->arch.arm_pmu) {
/* No PMU set, get the default one */
/*
* No PMU set, get the default one.
*
* The observant among you will notice that the supported_cpus
* mask does not get updated for the default PMU even though it
* is quite possible the selected instance supports only a
* subset of cores in the system. This is intentional, and
* upholds the preexisting behavior on heterogeneous systems
* where vCPUs can be scheduled on any core but the guest
* counters could stop working.
*/
kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
if (!kvm->arch.arm_pmu)
return -ENODEV;