X86/nVMX: handle_vmon: Read 4 bytes from guest memory
Read the data directly from guest memory instead of the map->read->unmap sequence. This also avoids using kvm_vcpu_gpa_to_page() and kmap() which assumes that there is a "struct page" for guest memory. Suggested-by: Jim Mattson <jmattson@google.com> Signed-off-by: KarimAllah Ahmed <karahmed@amazon.de> Reviewed-by: Jim Mattson <jmattson@google.com> Reviewed-by: David Hildenbrand <david@redhat.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
191c8137a9
commit
2e408936b6
|
@ -4264,7 +4264,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
int ret;
|
||||
gpa_t vmptr;
|
||||
struct page *page;
|
||||
uint32_t revision;
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
|
||||
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
|
||||
|
@ -4313,18 +4313,10 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
|
|||
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
|
||||
return nested_vmx_failInvalid(vcpu);
|
||||
|
||||
page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
|
||||
if (is_error_page(page))
|
||||
if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
|
||||
revision != VMCS12_REVISION)
|
||||
return nested_vmx_failInvalid(vcpu);
|
||||
|
||||
if (*(u32 *)kmap(page) != VMCS12_REVISION) {
|
||||
kunmap(page);
|
||||
kvm_release_page_clean(page);
|
||||
return nested_vmx_failInvalid(vcpu);
|
||||
}
|
||||
kunmap(page);
|
||||
kvm_release_page_clean(page);
|
||||
|
||||
vmx->nested.vmxon_ptr = vmptr;
|
||||
ret = enter_vmx_operation(vcpu);
|
||||
if (ret)
|
||||
|
|
Loading…
Reference in New Issue