KVM: x86: merge handle_mmio_page_fault and handle_mmio_page_fault_common
They are exactly the same, except that handle_mmio_page_fault has an unused argument and a call to WARN_ON. Remove the unused argument from the callers, and move the warning to (the former) handle_mmio_page_fault_common. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
a3eaa8649e
commit
450869d6db
|
@ -3359,7 +3359,7 @@ exit:
|
||||||
return reserved;
|
return reserved;
|
||||||
}
|
}
|
||||||
|
|
||||||
int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
||||||
{
|
{
|
||||||
u64 spte;
|
u64 spte;
|
||||||
bool reserved;
|
bool reserved;
|
||||||
|
@ -3368,7 +3368,7 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
||||||
return RET_MMIO_PF_EMULATE;
|
return RET_MMIO_PF_EMULATE;
|
||||||
|
|
||||||
reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
|
reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
|
||||||
if (unlikely(reserved))
|
if (WARN_ON(reserved))
|
||||||
return RET_MMIO_PF_BUG;
|
return RET_MMIO_PF_BUG;
|
||||||
|
|
||||||
if (is_mmio_spte(spte)) {
|
if (is_mmio_spte(spte)) {
|
||||||
|
@ -3392,17 +3392,7 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
||||||
*/
|
*/
|
||||||
return RET_MMIO_PF_RETRY;
|
return RET_MMIO_PF_RETRY;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common);
|
EXPORT_SYMBOL_GPL(handle_mmio_page_fault);
|
||||||
|
|
||||||
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr,
|
|
||||||
u32 error_code, bool direct)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = handle_mmio_page_fault_common(vcpu, addr, direct);
|
|
||||||
WARN_ON(ret == RET_MMIO_PF_BUG);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
|
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
|
||||||
u32 error_code, bool prefault)
|
u32 error_code, bool prefault)
|
||||||
|
@ -3413,7 +3403,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
|
||||||
pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
|
pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
|
||||||
|
|
||||||
if (unlikely(error_code & PFERR_RSVD_MASK)) {
|
if (unlikely(error_code & PFERR_RSVD_MASK)) {
|
||||||
r = handle_mmio_page_fault(vcpu, gva, error_code, true);
|
r = handle_mmio_page_fault(vcpu, gva, true);
|
||||||
|
|
||||||
if (likely(r != RET_MMIO_PF_INVALID))
|
if (likely(r != RET_MMIO_PF_INVALID))
|
||||||
return r;
|
return r;
|
||||||
|
@ -3503,7 +3493,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
|
||||||
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
|
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
|
||||||
|
|
||||||
if (unlikely(error_code & PFERR_RSVD_MASK)) {
|
if (unlikely(error_code & PFERR_RSVD_MASK)) {
|
||||||
r = handle_mmio_page_fault(vcpu, gpa, error_code, true);
|
r = handle_mmio_page_fault(vcpu, gpa, true);
|
||||||
|
|
||||||
if (likely(r != RET_MMIO_PF_INVALID))
|
if (likely(r != RET_MMIO_PF_INVALID))
|
||||||
return r;
|
return r;
|
||||||
|
|
|
@ -56,13 +56,13 @@ void
|
||||||
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
|
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return values of handle_mmio_page_fault_common:
|
* Return values of handle_mmio_page_fault:
|
||||||
* RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
|
* RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
|
||||||
* directly.
|
* directly.
|
||||||
* RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
|
* RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
|
||||||
* fault path update the mmio spte.
|
* fault path update the mmio spte.
|
||||||
* RET_MMIO_PF_RETRY: let CPU fault again on the address.
|
* RET_MMIO_PF_RETRY: let CPU fault again on the address.
|
||||||
* RET_MMIO_PF_BUG: bug is detected.
|
* RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
|
||||||
*/
|
*/
|
||||||
enum {
|
enum {
|
||||||
RET_MMIO_PF_EMULATE = 1,
|
RET_MMIO_PF_EMULATE = 1,
|
||||||
|
@ -71,7 +71,7 @@ enum {
|
||||||
RET_MMIO_PF_BUG = -1
|
RET_MMIO_PF_BUG = -1
|
||||||
};
|
};
|
||||||
|
|
||||||
int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
|
int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
|
||||||
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
|
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
|
||||||
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
|
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
|
||||||
|
|
||||||
|
|
|
@ -705,8 +705,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
||||||
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
|
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
|
||||||
|
|
||||||
if (unlikely(error_code & PFERR_RSVD_MASK)) {
|
if (unlikely(error_code & PFERR_RSVD_MASK)) {
|
||||||
r = handle_mmio_page_fault(vcpu, addr, error_code,
|
r = handle_mmio_page_fault(vcpu, addr, mmu_is_nested(vcpu));
|
||||||
mmu_is_nested(vcpu));
|
|
||||||
if (likely(r != RET_MMIO_PF_INVALID))
|
if (likely(r != RET_MMIO_PF_INVALID))
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
|
|
@ -5908,7 +5908,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = handle_mmio_page_fault_common(vcpu, gpa, true);
|
ret = handle_mmio_page_fault(vcpu, gpa, true);
|
||||||
if (likely(ret == RET_MMIO_PF_EMULATE))
|
if (likely(ret == RET_MMIO_PF_EMULATE))
|
||||||
return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
|
return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
|
||||||
EMULATE_DONE;
|
EMULATE_DONE;
|
||||||
|
|
Loading…
Reference in New Issue