KVM: x86/pmu: Limit the maximum number of supported Intel GP counters
The Intel Architectural IA32_PMCx MSRs addresses range allows for a maximum of 8 GP counters, and KVM cannot address any more. Introduce a local macro (named KVM_INTEL_PMC_MAX_GENERIC) and use it consistently to refer to the number of counters supported by KVM, thus avoiding possible out-of-bound accesses. Suggested-by: Jim Mattson <jmattson@google.com> Signed-off-by: Like Xu <likexu@tencent.com> Reviewed-by: Jim Mattson <jmattson@google.com> Message-Id: <20220919091008.60695-2-likexu@tencent.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
8631ef59b6
commit
4f1fa2a1bb
|
@ -501,6 +501,10 @@ struct kvm_pmc {
|
|||
bool intr;
|
||||
};
|
||||
|
||||
/* More counters may conflict with other existing Architectural MSRs */
|
||||
#define KVM_INTEL_PMC_MAX_GENERIC 8
|
||||
#define MSR_ARCH_PERFMON_PERFCTR_MAX (MSR_ARCH_PERFMON_PERFCTR0 + KVM_INTEL_PMC_MAX_GENERIC - 1)
|
||||
#define MSR_ARCH_PERFMON_EVENTSEL_MAX (MSR_ARCH_PERFMON_EVENTSEL0 + KVM_INTEL_PMC_MAX_GENERIC - 1)
|
||||
#define KVM_PMC_MAX_FIXED 3
|
||||
struct kvm_pmu {
|
||||
unsigned nr_arch_gp_counters;
|
||||
|
@ -516,7 +520,7 @@ struct kvm_pmu {
|
|||
u64 reserved_bits;
|
||||
u64 raw_event_mask;
|
||||
u8 version;
|
||||
struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
|
||||
struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
|
||||
struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
|
||||
struct irq_work irq_work;
|
||||
DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);
|
||||
|
|
|
@ -56,7 +56,7 @@ static const struct x86_cpu_id vmx_icl_pebs_cpu[] = {
|
|||
* code. Each pmc, stored in kvm_pmc.idx field, is unique across
|
||||
* all perf counters (both gp and fixed). The mapping relationship
|
||||
* between pmc and perf counters is as the following:
|
||||
* * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
|
||||
* * Intel: [0 .. KVM_INTEL_PMC_MAX_GENERIC-1] <=> gp counters
|
||||
* [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
|
||||
* * AMD: [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
|
||||
* and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
|
||||
|
|
|
@ -617,7 +617,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
|
|||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
|
||||
|
||||
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
|
||||
for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) {
|
||||
pmu->gp_counters[i].type = KVM_PMC_GP;
|
||||
pmu->gp_counters[i].vcpu = vcpu;
|
||||
pmu->gp_counters[i].idx = i;
|
||||
|
@ -643,7 +643,7 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu)
|
|||
struct kvm_pmc *pmc = NULL;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
|
||||
for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) {
|
||||
pmc = &pmu->gp_counters[i];
|
||||
|
||||
pmc_stop_counter(pmc);
|
||||
|
|
|
@ -1438,6 +1438,9 @@ static const u32 msrs_to_save_all[] = {
|
|||
MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
|
||||
MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
|
||||
MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
|
||||
MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
|
||||
|
||||
/* This part of MSRs should match KVM_INTEL_PMC_MAX_GENERIC. */
|
||||
MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
|
||||
MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
|
||||
MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
|
||||
|
@ -1446,7 +1449,6 @@ static const u32 msrs_to_save_all[] = {
|
|||
MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
|
||||
MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
|
||||
MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
|
||||
MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
|
||||
|
||||
MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
|
||||
MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
|
||||
|
@ -7031,14 +7033,14 @@ static void kvm_init_msr_list(void)
|
|||
intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
|
||||
continue;
|
||||
break;
|
||||
case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 7:
|
||||
case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR_MAX:
|
||||
if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
|
||||
min(INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
|
||||
min(KVM_INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
|
||||
continue;
|
||||
break;
|
||||
case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 7:
|
||||
case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL_MAX:
|
||||
if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
|
||||
min(INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
|
||||
min(KVM_INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
|
||||
continue;
|
||||
break;
|
||||
case MSR_IA32_XFD:
|
||||
|
|
Loading…
Reference in New Issue