KVM: MMU: cleanup FNAME(invlpg)
Directly Use mmu_page_zap_pte to zap spte in FNAME(invlpg), also remove the same code between FNAME(invlpg) and FNAME(sync_page) Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
d01f8d5e02
commit
505aef8f30
|
@ -1809,7 +1809,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|||
}
|
||||
}
|
||||
|
||||
static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
|
||||
static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
|
||||
u64 *spte)
|
||||
{
|
||||
u64 pte;
|
||||
|
@ -1817,17 +1817,21 @@ static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
|
|||
|
||||
pte = *spte;
|
||||
if (is_shadow_present_pte(pte)) {
|
||||
if (is_last_spte(pte, sp->role.level))
|
||||
if (is_last_spte(pte, sp->role.level)) {
|
||||
drop_spte(kvm, spte);
|
||||
else {
|
||||
if (is_large_pte(pte))
|
||||
--kvm->stat.lpages;
|
||||
} else {
|
||||
child = page_header(pte & PT64_BASE_ADDR_MASK);
|
||||
drop_parent_pte(child, spte);
|
||||
}
|
||||
} else if (is_mmio_spte(pte))
|
||||
return true;
|
||||
}
|
||||
|
||||
if (is_mmio_spte(pte))
|
||||
mmu_spte_clear_no_track(spte);
|
||||
|
||||
if (is_large_pte(pte))
|
||||
--kvm->stat.lpages;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
|
||||
|
|
|
@ -656,6 +656,18 @@ out_unlock:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
|
||||
{
|
||||
int offset = 0;
|
||||
|
||||
WARN_ON(sp->role.level != 1);
|
||||
|
||||
if (PTTYPE == 32)
|
||||
offset = sp->role.quadrant << PT64_LEVEL_BITS;
|
||||
|
||||
return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
|
||||
}
|
||||
|
||||
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
|
||||
{
|
||||
struct kvm_shadow_walk_iterator iterator;
|
||||
|
@ -663,7 +675,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
|
|||
gpa_t pte_gpa = -1;
|
||||
int level;
|
||||
u64 *sptep;
|
||||
int need_flush = 0;
|
||||
|
||||
vcpu_clear_mmio_info(vcpu, gva);
|
||||
|
||||
|
@ -675,36 +686,20 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
|
|||
|
||||
sp = page_header(__pa(sptep));
|
||||
if (is_last_spte(*sptep, level)) {
|
||||
int offset, shift;
|
||||
|
||||
if (!sp->unsync)
|
||||
break;
|
||||
|
||||
shift = PAGE_SHIFT -
|
||||
(PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
|
||||
offset = sp->role.quadrant << shift;
|
||||
|
||||
pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
|
||||
pte_gpa = FNAME(get_level1_sp_gpa)(sp);
|
||||
pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
|
||||
|
||||
if (is_shadow_present_pte(*sptep)) {
|
||||
if (is_large_pte(*sptep))
|
||||
--vcpu->kvm->stat.lpages;
|
||||
drop_spte(vcpu->kvm, sptep);
|
||||
need_flush = 1;
|
||||
} else if (is_mmio_spte(*sptep))
|
||||
mmu_spte_clear_no_track(sptep);
|
||||
|
||||
break;
|
||||
if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
|
||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
||||
}
|
||||
|
||||
if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
|
||||
break;
|
||||
}
|
||||
|
||||
if (need_flush)
|
||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
||||
|
||||
atomic_inc(&vcpu->kvm->arch.invlpg_counter);
|
||||
|
||||
spin_unlock(&vcpu->kvm->mmu_lock);
|
||||
|
@ -769,19 +764,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
|
|||
*/
|
||||
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
{
|
||||
int i, offset, nr_present;
|
||||
int i, nr_present = 0;
|
||||
bool host_writable;
|
||||
gpa_t first_pte_gpa;
|
||||
|
||||
offset = nr_present = 0;
|
||||
|
||||
/* direct kvm_mmu_page can not be unsync. */
|
||||
BUG_ON(sp->role.direct);
|
||||
|
||||
if (PTTYPE == 32)
|
||||
offset = sp->role.quadrant << PT64_LEVEL_BITS;
|
||||
|
||||
first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
|
||||
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
|
||||
|
||||
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
|
||||
unsigned pte_access;
|
||||
|
|
Loading…
Reference in New Issue