KVM: nVMX: hyper-v: Cache VP assist page in 'struct kvm_vcpu_hv'

In preparation to enabling L2 TLB flush, cache VP assist page in
'struct kvm_vcpu_hv'. While on it, rename nested_enlightened_vmentry()
to nested_get_evmptr() and make it return eVMCS GPA directly.

No functional change intended.

Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20221101145426.251680-26-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Vitaly Kuznetsov 2022-11-01 15:54:03 +01:00 committed by Paolo Bonzini
parent d4baf1a9a5
commit 046f5756c4
6 changed files with 20 additions and 24 deletions

View File

@ -669,6 +669,8 @@ struct kvm_vcpu_hv {
/* Preallocated buffer for handling hypercalls passing sparse vCPU set */ /* Preallocated buffer for handling hypercalls passing sparse vCPU set */
u64 sparse_banks[HV_MAX_SPARSE_VCPU_BANKS]; u64 sparse_banks[HV_MAX_SPARSE_VCPU_BANKS];
struct hv_vp_assist_page vp_assist_page;
struct { struct {
u64 pa_page_gpa; u64 pa_page_gpa;
u64 vm_id; u64 vm_id;

View File

@ -900,13 +900,15 @@ bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled); EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu)
struct hv_vp_assist_page *assist_page)
{ {
if (!kvm_hv_assist_page_enabled(vcpu)) struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
if (!hv_vcpu || !kvm_hv_assist_page_enabled(vcpu))
return false; return false;
return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
assist_page, sizeof(*assist_page)); &hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page));
} }
EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page); EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);

View File

@ -108,8 +108,7 @@ int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu); void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu); bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu);
struct hv_vp_assist_page *assist_page);
static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(struct kvm_vcpu *vcpu, static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(struct kvm_vcpu *vcpu,
int timer_index) int timer_index)

View File

@ -321,24 +321,17 @@ const struct evmcs_field vmcs_field_to_evmcs_1[] = {
}; };
const unsigned int nr_evmcs_1_fields = ARRAY_SIZE(vmcs_field_to_evmcs_1); const unsigned int nr_evmcs_1_fields = ARRAY_SIZE(vmcs_field_to_evmcs_1);
bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa) u64 nested_get_evmptr(struct kvm_vcpu *vcpu)
{ {
struct hv_vp_assist_page assist_page; struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
*evmcs_gpa = -1ull; if (unlikely(!kvm_hv_get_assist_page(vcpu)))
return EVMPTR_INVALID;
if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page))) if (unlikely(!hv_vcpu->vp_assist_page.enlighten_vmentry))
return false; return EVMPTR_INVALID;
if (unlikely(!assist_page.enlighten_vmentry)) return hv_vcpu->vp_assist_page.current_nested_vmcs;
return false;
if (unlikely(!evmptr_is_valid(assist_page.current_nested_vmcs)))
return false;
*evmcs_gpa = assist_page.current_nested_vmcs;
return true;
} }
uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu) uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)

View File

@ -237,7 +237,7 @@ enum nested_evmptrld_status {
EVMPTRLD_ERROR, EVMPTRLD_ERROR,
}; };
bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa); u64 nested_get_evmptr(struct kvm_vcpu *vcpu);
uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu); uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
int nested_enable_evmcs(struct kvm_vcpu *vcpu, int nested_enable_evmcs(struct kvm_vcpu *vcpu,
uint16_t *vmcs_version); uint16_t *vmcs_version);

View File

@ -1992,7 +1992,8 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
if (likely(!guest_cpuid_has_evmcs(vcpu))) if (likely(!guest_cpuid_has_evmcs(vcpu)))
return EVMPTRLD_DISABLED; return EVMPTRLD_DISABLED;
if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) { evmcs_gpa = nested_get_evmptr(vcpu);
if (!evmptr_is_valid(evmcs_gpa)) {
nested_release_evmcs(vcpu); nested_release_evmcs(vcpu);
return EVMPTRLD_DISABLED; return EVMPTRLD_DISABLED;
} }
@ -5221,7 +5222,6 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 zero = 0; u32 zero = 0;
gpa_t vmptr; gpa_t vmptr;
u64 evmcs_gpa;
int r; int r;
if (!nested_vmx_check_permission(vcpu)) if (!nested_vmx_check_permission(vcpu))
@ -5247,7 +5247,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
* vmx->nested.hv_evmcs but this shouldn't be a problem. * vmx->nested.hv_evmcs but this shouldn't be a problem.
*/ */
if (likely(!guest_cpuid_has_evmcs(vcpu) || if (likely(!guest_cpuid_has_evmcs(vcpu) ||
!nested_enlightened_vmentry(vcpu, &evmcs_gpa))) { !evmptr_is_valid(nested_get_evmptr(vcpu)))) {
if (vmptr == vmx->nested.current_vmptr) if (vmptr == vmx->nested.current_vmptr)
nested_release_vmcs12(vcpu); nested_release_vmcs12(vcpu);