KVM: MMU: Replace walk_shadow() by for_each_shadow_entry() in fetch()
Effectively reverting to the pre walk_shadow() version -- but now with the reusable for_each(). Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
9f652d21c3
commit
e7a04c99b5
|
@ -283,91 +283,79 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
|
|||
/*
|
||||
* Fetch a shadow pte for a specific level in the paging hierarchy.
|
||||
*/
|
||||
static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
|
||||
struct kvm_vcpu *vcpu, u64 addr,
|
||||
u64 *sptep, int level)
|
||||
{
|
||||
struct shadow_walker *sw =
|
||||
container_of(_sw, struct shadow_walker, walker);
|
||||
struct guest_walker *gw = sw->guest_walker;
|
||||
unsigned access = gw->pt_access;
|
||||
struct kvm_mmu_page *shadow_page;
|
||||
u64 spte;
|
||||
int metaphysical;
|
||||
gfn_t table_gfn;
|
||||
int r;
|
||||
pt_element_t curr_pte;
|
||||
|
||||
if (level == PT_PAGE_TABLE_LEVEL
|
||||
|| (sw->largepage && level == PT_DIRECTORY_LEVEL)) {
|
||||
mmu_set_spte(vcpu, sptep, access, gw->pte_access & access,
|
||||
sw->user_fault, sw->write_fault,
|
||||
gw->ptes[gw->level-1] & PT_DIRTY_MASK,
|
||||
sw->ptwrite, sw->largepage,
|
||||
gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
|
||||
gw->gfn, sw->pfn, false);
|
||||
sw->sptep = sptep;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
|
||||
return 0;
|
||||
|
||||
if (is_large_pte(*sptep)) {
|
||||
set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
|
||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
||||
rmap_remove(vcpu->kvm, sptep);
|
||||
}
|
||||
|
||||
if (level == PT_DIRECTORY_LEVEL && gw->level == PT_DIRECTORY_LEVEL) {
|
||||
metaphysical = 1;
|
||||
if (!is_dirty_pte(gw->ptes[level - 1]))
|
||||
access &= ~ACC_WRITE_MASK;
|
||||
table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
|
||||
} else {
|
||||
metaphysical = 0;
|
||||
table_gfn = gw->table_gfn[level - 2];
|
||||
}
|
||||
shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1,
|
||||
metaphysical, access, sptep);
|
||||
if (!metaphysical) {
|
||||
r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
|
||||
&curr_pte, sizeof(curr_pte));
|
||||
if (r || curr_pte != gw->ptes[level - 2]) {
|
||||
kvm_mmu_put_page(shadow_page, sptep);
|
||||
kvm_release_pfn_clean(sw->pfn);
|
||||
sw->sptep = NULL;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
spte = __pa(shadow_page->spt) | PT_PRESENT_MASK | PT_ACCESSED_MASK
|
||||
| PT_WRITABLE_MASK | PT_USER_MASK;
|
||||
*sptep = spte;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||
struct guest_walker *guest_walker,
|
||||
struct guest_walker *gw,
|
||||
int user_fault, int write_fault, int largepage,
|
||||
int *ptwrite, pfn_t pfn)
|
||||
{
|
||||
struct shadow_walker walker = {
|
||||
.walker = { .entry = FNAME(shadow_walk_entry), },
|
||||
.guest_walker = guest_walker,
|
||||
.user_fault = user_fault,
|
||||
.write_fault = write_fault,
|
||||
.largepage = largepage,
|
||||
.ptwrite = ptwrite,
|
||||
.pfn = pfn,
|
||||
};
|
||||
unsigned access = gw->pt_access;
|
||||
struct kvm_mmu_page *shadow_page;
|
||||
u64 spte, *sptep;
|
||||
int metaphysical;
|
||||
gfn_t table_gfn;
|
||||
int r;
|
||||
int level;
|
||||
pt_element_t curr_pte;
|
||||
struct kvm_shadow_walk_iterator iterator;
|
||||
|
||||
if (!is_present_pte(guest_walker->ptes[guest_walker->level - 1]))
|
||||
if (!is_present_pte(gw->ptes[gw->level - 1]))
|
||||
return NULL;
|
||||
|
||||
walk_shadow(&walker.walker, vcpu, addr);
|
||||
for_each_shadow_entry(vcpu, addr, iterator) {
|
||||
level = iterator.level;
|
||||
sptep = iterator.sptep;
|
||||
if (level == PT_PAGE_TABLE_LEVEL
|
||||
|| (largepage && level == PT_DIRECTORY_LEVEL)) {
|
||||
mmu_set_spte(vcpu, sptep, access,
|
||||
gw->pte_access & access,
|
||||
user_fault, write_fault,
|
||||
gw->ptes[gw->level-1] & PT_DIRTY_MASK,
|
||||
ptwrite, largepage,
|
||||
gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
|
||||
gw->gfn, pfn, false);
|
||||
break;
|
||||
}
|
||||
|
||||
return walker.sptep;
|
||||
if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
|
||||
continue;
|
||||
|
||||
if (is_large_pte(*sptep)) {
|
||||
set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
|
||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
||||
rmap_remove(vcpu->kvm, sptep);
|
||||
}
|
||||
|
||||
if (level == PT_DIRECTORY_LEVEL
|
||||
&& gw->level == PT_DIRECTORY_LEVEL) {
|
||||
metaphysical = 1;
|
||||
if (!is_dirty_pte(gw->ptes[level - 1]))
|
||||
access &= ~ACC_WRITE_MASK;
|
||||
table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
|
||||
} else {
|
||||
metaphysical = 0;
|
||||
table_gfn = gw->table_gfn[level - 2];
|
||||
}
|
||||
shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
|
||||
metaphysical, access, sptep);
|
||||
if (!metaphysical) {
|
||||
r = kvm_read_guest_atomic(vcpu->kvm,
|
||||
gw->pte_gpa[level - 2],
|
||||
&curr_pte, sizeof(curr_pte));
|
||||
if (r || curr_pte != gw->ptes[level - 2]) {
|
||||
kvm_mmu_put_page(shadow_page, sptep);
|
||||
kvm_release_pfn_clean(pfn);
|
||||
sptep = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
spte = __pa(shadow_page->spt)
|
||||
| PT_PRESENT_MASK | PT_ACCESSED_MASK
|
||||
| PT_WRITABLE_MASK | PT_USER_MASK;
|
||||
*sptep = spte;
|
||||
}
|
||||
|
||||
return sptep;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue