KVM: x86: Use MTRR macros to define possible MTRR MSR ranges

Use the MTRR macros to identify the ranges of possible MTRR MSRs instead
of bounding the ranges with a mismash of open coded values and unrelated
MSR indices.  Carving out the gap for the machine check MSRs in particular
is confusing, as it's easy to incorrectly think the case statement handles
MCE MSRs instead of skipping them.

Drop the range-based funneling of MSRs between the end of the MCE MSRs
and MTRR_DEF_TYPE, i.e. 0x2A0-0x2FF, and instead handle MTTR_DEF_TYPE as
the one-off case that it is.

Extract PAT (0x277) as well in anticipation of dropping PAT "handling"
from the MTRR code.

Keep the range-based handling for the variable+fixed MTRRs even though
capturing unknown MSRs 0x214-0x24F is arguably "wrong".  There is a gap in
the fixed MTRRs, 0x260-0x267, i.e. the MTRR code needs to filter out
unknown MSRs anyways, and using a single range generates marginally better
code for the big switch statement.

Reviewed-by: Kai Huang <kai.huang@intel.com>
Link: https://lore.kernel.org/r/20230511233351.635053-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
Sean Christopherson 2023-05-11 16:33:48 -07:00
parent 9ae38b4fb1
commit 34a83deac3
2 changed files with 10 additions and 7 deletions

View File

@ -34,7 +34,7 @@ static bool is_mtrr_base_msr(unsigned int msr)
static struct kvm_mtrr_range *var_mtrr_msr_to_range(struct kvm_vcpu *vcpu, static struct kvm_mtrr_range *var_mtrr_msr_to_range(struct kvm_vcpu *vcpu,
unsigned int msr) unsigned int msr)
{ {
int index = (msr - 0x200) / 2; int index = (msr - MTRRphysBase_MSR(0)) / 2;
return &vcpu->arch.mtrr_state.var_ranges[index]; return &vcpu->arch.mtrr_state.var_ranges[index];
} }
@ -42,7 +42,7 @@ static struct kvm_mtrr_range *var_mtrr_msr_to_range(struct kvm_vcpu *vcpu,
static bool msr_mtrr_valid(unsigned msr) static bool msr_mtrr_valid(unsigned msr)
{ {
switch (msr) { switch (msr) {
case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: case MTRRphysBase_MSR(0) ... MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1):
case MSR_MTRRfix64K_00000: case MSR_MTRRfix64K_00000:
case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_80000:
case MSR_MTRRfix16K_A0000: case MSR_MTRRfix16K_A0000:
@ -88,7 +88,8 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
} }
/* variable MTRRs */ /* variable MTRRs */
WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); WARN_ON(!(msr >= MTRRphysBase_MSR(0) &&
msr <= MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1)));
mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu); mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
if ((msr & 1) == 0) { if ((msr & 1) == 0) {

View File

@ -3702,8 +3702,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1; return 1;
} }
break; break;
case 0x200 ... MSR_IA32_MC0_CTL2 - 1: case MSR_IA32_CR_PAT:
case MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) ... 0x2ff: case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
case MSR_MTRRdefType:
return kvm_mtrr_set_msr(vcpu, msr, data); return kvm_mtrr_set_msr(vcpu, msr, data);
case MSR_IA32_APICBASE: case MSR_IA32_APICBASE:
return kvm_set_apic_base(vcpu, msr_info); return kvm_set_apic_base(vcpu, msr_info);
@ -4110,9 +4111,10 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset; msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset;
break; break;
} }
case MSR_IA32_CR_PAT:
case MSR_MTRRcap: case MSR_MTRRcap:
case 0x200 ... MSR_IA32_MC0_CTL2 - 1: case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
case MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) ... 0x2ff: case MSR_MTRRdefType:
return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
case 0xcd: /* fsb frequency */ case 0xcd: /* fsb frequency */
msr_info->data = 3; msr_info->data = 3;