KVM/VMX: Add hv tlb range flush support

This patch is to register tlb_remote_flush_with_range callback with
hv tlb range flush interface.

Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Lan Tianyu 2018-12-06 21:21:07 +08:00 committed by Paolo Bonzini
parent cc4edae4b9
commit 1f3a3e46cc
1 changed files with 44 additions and 17 deletions

View File

@ -421,7 +421,34 @@ static void check_ept_pointer_match(struct kvm *kvm)
to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH; to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
} }
static int vmx_hv_remote_flush_tlb(struct kvm *kvm) int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
void *data)
{
struct kvm_tlb_range *range = data;
return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
range->pages);
}
static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm,
struct kvm_vcpu *vcpu, struct kvm_tlb_range *range)
{
u64 ept_pointer = to_vmx(vcpu)->ept_pointer;
/*
* FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs address
* of the base of EPT PML4 table, strip off EPT configuration
* information.
*/
if (range)
return hyperv_flush_guest_mapping_range(ept_pointer & PAGE_MASK,
kvm_fill_hv_flush_list_func, (void *)range);
else
return hyperv_flush_guest_mapping(ept_pointer & PAGE_MASK);
}
static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
struct kvm_tlb_range *range)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int ret = -ENOTSUPP, i; int ret = -ENOTSUPP, i;
@ -431,29 +458,26 @@ static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK) if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
check_ept_pointer_match(kvm); check_ept_pointer_match(kvm);
/*
* FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs the address of the
* base of EPT PML4 table, strip off EPT configuration information.
* If ept_pointer is invalid pointer, bypass the flush request.
*/
if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) { if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
u64 ept_pointer = to_vmx(vcpu)->ept_pointer; /* If ept_pointer is invalid pointer, bypass flush request. */
if (VALID_PAGE(to_vmx(vcpu)->ept_pointer))
if (!VALID_PAGE(ept_pointer)) ret |= __hv_remote_flush_tlb_with_range(
continue; kvm, vcpu, range);
ret |= hyperv_flush_guest_mapping(
ept_pointer & PAGE_MASK);
} }
} else { } else {
ret = hyperv_flush_guest_mapping( ret = __hv_remote_flush_tlb_with_range(kvm,
to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer & PAGE_MASK); kvm_get_vcpu(kvm, 0), range);
} }
spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
return ret; return ret;
} }
static int hv_remote_flush_tlb(struct kvm *kvm)
{
return hv_remote_flush_tlb_with_range(kvm, NULL);
}
#endif /* IS_ENABLED(CONFIG_HYPERV) */ #endif /* IS_ENABLED(CONFIG_HYPERV) */
/* /*
@ -7554,8 +7578,11 @@ static __init int hardware_setup(void)
#if IS_ENABLED(CONFIG_HYPERV) #if IS_ENABLED(CONFIG_HYPERV)
if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
&& enable_ept) && enable_ept) {
kvm_x86_ops->tlb_remote_flush = vmx_hv_remote_flush_tlb; kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb;
kvm_x86_ops->tlb_remote_flush_with_range =
hv_remote_flush_tlb_with_range;
}
#endif #endif
if (!cpu_has_vmx_ple()) { if (!cpu_has_vmx_ple()) {