KVM: x86: improve reexecute_instruction
The current reexecute_instruction can not well detect the failed instruction emulation. It allows guest to retry all the instructions except it accesses on error pfn For example, some cases are nested-write-protect - if the page we want to write is used as PDE but it chains to itself. Under this case, we should stop the emulation and report the case to userspace Reviewed-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
95b3cf69bd
commit
93c05d3ef2
|
@ -497,6 +497,13 @@ struct kvm_vcpu_arch {
|
|||
u64 msr_val;
|
||||
struct gfn_to_hva_cache data;
|
||||
} pv_eoi;
|
||||
|
||||
/*
|
||||
* Indicate whether the access faults on its page table in guest
|
||||
* which is set when fix page fault and used to detect unhandeable
|
||||
* instruction.
|
||||
*/
|
||||
bool write_fault_to_shadow_pgtable;
|
||||
};
|
||||
|
||||
struct kvm_lpage_info {
|
||||
|
|
|
@ -497,26 +497,34 @@ out_gpte_changed:
|
|||
* created when kvm establishes shadow page table that stop kvm using large
|
||||
* page size. Do it early can avoid unnecessary #PF and emulation.
|
||||
*
|
||||
* @write_fault_to_shadow_pgtable will return true if the fault gfn is
|
||||
* currently used as its page table.
|
||||
*
|
||||
* Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
|
||||
* since the PDPT is always shadowed, that means, we can not use large page
|
||||
* size to map the gfn which is used as PDPT.
|
||||
*/
|
||||
static bool
|
||||
FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
|
||||
struct guest_walker *walker, int user_fault)
|
||||
struct guest_walker *walker, int user_fault,
|
||||
bool *write_fault_to_shadow_pgtable)
|
||||
{
|
||||
int level;
|
||||
gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
|
||||
bool self_changed = false;
|
||||
|
||||
if (!(walker->pte_access & ACC_WRITE_MASK ||
|
||||
(!is_write_protection(vcpu) && !user_fault)))
|
||||
return false;
|
||||
|
||||
for (level = walker->level; level <= walker->max_level; level++)
|
||||
if (!((walker->gfn ^ walker->table_gfn[level - 1]) & mask))
|
||||
return true;
|
||||
for (level = walker->level; level <= walker->max_level; level++) {
|
||||
gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
|
||||
|
||||
return false;
|
||||
self_changed |= !(gfn & mask);
|
||||
*write_fault_to_shadow_pgtable |= !gfn;
|
||||
}
|
||||
|
||||
return self_changed;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -544,7 +552,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
|||
int level = PT_PAGE_TABLE_LEVEL;
|
||||
int force_pt_level;
|
||||
unsigned long mmu_seq;
|
||||
bool map_writable;
|
||||
bool map_writable, is_self_change_mapping;
|
||||
|
||||
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
|
||||
|
||||
|
@ -572,9 +580,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
|||
return 0;
|
||||
}
|
||||
|
||||
vcpu->arch.write_fault_to_shadow_pgtable = false;
|
||||
|
||||
is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
|
||||
&walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
|
||||
|
||||
if (walker.level >= PT_DIRECTORY_LEVEL)
|
||||
force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
|
||||
|| FNAME(is_self_change_mapping)(vcpu, &walker, user_fault);
|
||||
|| is_self_change_mapping;
|
||||
else
|
||||
force_pt_level = 1;
|
||||
if (!force_pt_level) {
|
||||
|
|
|
@ -4751,7 +4751,8 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
|
|||
return r;
|
||||
}
|
||||
|
||||
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2)
|
||||
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
|
||||
bool write_fault_to_shadow_pgtable)
|
||||
{
|
||||
gpa_t gpa = cr2;
|
||||
pfn_t pfn;
|
||||
|
@ -4808,7 +4809,13 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2)
|
|||
* guest to let CPU execute the instruction.
|
||||
*/
|
||||
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If the access faults on its page table, it can not
|
||||
* be fixed by unprotecting shadow page and it should
|
||||
* be reported to userspace.
|
||||
*/
|
||||
return !write_fault_to_shadow_pgtable;
|
||||
}
|
||||
|
||||
static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
|
||||
|
@ -4867,7 +4874,13 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
|
|||
int r;
|
||||
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
|
||||
bool writeback = true;
|
||||
bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
|
||||
|
||||
/*
|
||||
* Clear write_fault_to_shadow_pgtable here to ensure it is
|
||||
* never reused.
|
||||
*/
|
||||
vcpu->arch.write_fault_to_shadow_pgtable = false;
|
||||
kvm_clear_exception_queue(vcpu);
|
||||
|
||||
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
|
||||
|
@ -4886,7 +4899,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
|
|||
if (r != EMULATION_OK) {
|
||||
if (emulation_type & EMULTYPE_TRAP_UD)
|
||||
return EMULATE_FAIL;
|
||||
if (reexecute_instruction(vcpu, cr2))
|
||||
if (reexecute_instruction(vcpu, cr2,
|
||||
write_fault_to_spt))
|
||||
return EMULATE_DONE;
|
||||
if (emulation_type & EMULTYPE_SKIP)
|
||||
return EMULATE_FAIL;
|
||||
|
@ -4916,7 +4930,7 @@ restart:
|
|||
return EMULATE_DONE;
|
||||
|
||||
if (r == EMULATION_FAILED) {
|
||||
if (reexecute_instruction(vcpu, cr2))
|
||||
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt))
|
||||
return EMULATE_DONE;
|
||||
|
||||
return handle_emulation_failure(vcpu);
|
||||
|
|
Loading…
Reference in New Issue