KVM: Remove unused slot_bitmap from kvm_mmu_page

Not needed any more.

Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
This commit is contained in:
Takuya Yoshikawa 2013-01-08 19:45:28 +09:00 committed by Gleb Natapov
parent b99db1d352
commit e12091ce7b
3 changed files with 0 additions and 22 deletions

View File

@ -187,13 +187,6 @@ Shadow pages contain the following information:
perform a reverse map from a pte to a gfn. When role.direct is set, any perform a reverse map from a pte to a gfn. When role.direct is set, any
element of this array can be calculated from the gfn field when used, in element of this array can be calculated from the gfn field when used, in
this case, the array of gfns is not allocated. See role.direct and gfn. this case, the array of gfns is not allocated. See role.direct and gfn.
slot_bitmap:
A bitmap containing one bit per memory slot. If the page contains a pte
mapping a page from memory slot n, then bit n of slot_bitmap will be set
(if a page is aliased among several slots, then it is not guaranteed that
all slots will be marked).
Used during dirty logging to avoid scanning a shadow page if none if its
pages need tracking.
root_count: root_count:
A counter keeping track of how many hardware registers (guest cr3 or A counter keeping track of how many hardware registers (guest cr3 or
pdptrs) are now pointing at the page. While this counter is nonzero, the pdptrs) are now pointing at the page. While this counter is nonzero, the

View File

@ -219,11 +219,6 @@ struct kvm_mmu_page {
u64 *spt; u64 *spt;
/* hold the gfn of each spte inside spt */ /* hold the gfn of each spte inside spt */
gfn_t *gfns; gfn_t *gfns;
/*
* One bit set per slot which has memory
* in this shadow page.
*/
DECLARE_BITMAP(slot_bitmap, KVM_MEM_SLOTS_NUM);
bool unsync; bool unsync;
int root_count; /* Currently serving as active root */ int root_count; /* Currently serving as active root */
unsigned int unsync_children; unsigned int unsync_children;

View File

@ -1522,7 +1522,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp); set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
bitmap_zero(sp->slot_bitmap, KVM_MEM_SLOTS_NUM);
sp->parent_ptes = 0; sp->parent_ptes = 0;
mmu_page_add_parent_pte(vcpu, sp, parent_pte); mmu_page_add_parent_pte(vcpu, sp, parent_pte);
kvm_mod_used_mmu_pages(vcpu->kvm, +1); kvm_mod_used_mmu_pages(vcpu->kvm, +1);
@ -2183,14 +2182,6 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
} }
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page); EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
{
int slot = memslot_id(kvm, gfn);
struct kvm_mmu_page *sp = page_header(__pa(pte));
__set_bit(slot, sp->slot_bitmap);
}
/* /*
* The function is based on mtrr_type_lookup() in * The function is based on mtrr_type_lookup() in
* arch/x86/kernel/cpu/mtrr/generic.c * arch/x86/kernel/cpu/mtrr/generic.c
@ -2472,7 +2463,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
++vcpu->kvm->stat.lpages; ++vcpu->kvm->stat.lpages;
if (is_shadow_present_pte(*sptep)) { if (is_shadow_present_pte(*sptep)) {
page_header_update_slot(vcpu->kvm, sptep, gfn);
if (!was_rmapped) { if (!was_rmapped) {
rmap_count = rmap_add(vcpu, sptep, gfn); rmap_count = rmap_add(vcpu, sptep, gfn);
if (rmap_count > RMAP_RECYCLE_THRESHOLD) if (rmap_count > RMAP_RECYCLE_THRESHOLD)