KVM: MMU: Make the way of accessing lpage_info more generic

Large page information has two elements but one of them, write_count, alone
is accessed by a helper function.

This patch replaces this helper function with more generic one which returns
newly named kvm_lpage_info structure and use it to access the other element
rmap_pde.

Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Takuya Yoshikawa 2010-12-07 12:59:07 +09:00 committed by Avi Kivity
parent 443381a828
commit d4dbf47009
2 changed files with 30 additions and 32 deletions

View File

@ -477,10 +477,10 @@ static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
} }
/* /*
* Return the pointer to the largepage write count for a given * Return the pointer to the large page information for a given gfn,
* gfn, handling slots that are not large page aligned. * handling slots that are not large page aligned.
*/ */
static int *slot_largepage_idx(gfn_t gfn, static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
int level) int level)
{ {
@ -488,35 +488,35 @@ static int *slot_largepage_idx(gfn_t gfn,
idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
(slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
return &slot->lpage_info[level - 2][idx].write_count; return &slot->lpage_info[level - 2][idx];
} }
static void account_shadowed(struct kvm *kvm, gfn_t gfn) static void account_shadowed(struct kvm *kvm, gfn_t gfn)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
int *write_count; struct kvm_lpage_info *linfo;
int i; int i;
slot = gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot(kvm, gfn);
for (i = PT_DIRECTORY_LEVEL; for (i = PT_DIRECTORY_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
write_count = slot_largepage_idx(gfn, slot, i); linfo = lpage_info_slot(gfn, slot, i);
*write_count += 1; linfo->write_count += 1;
} }
} }
static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
int *write_count; struct kvm_lpage_info *linfo;
int i; int i;
slot = gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot(kvm, gfn);
for (i = PT_DIRECTORY_LEVEL; for (i = PT_DIRECTORY_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
write_count = slot_largepage_idx(gfn, slot, i); linfo = lpage_info_slot(gfn, slot, i);
*write_count -= 1; linfo->write_count -= 1;
WARN_ON(*write_count < 0); WARN_ON(linfo->write_count < 0);
} }
} }
@ -525,12 +525,12 @@ static int has_wrprotected_page(struct kvm *kvm,
int level) int level)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
int *largepage_idx; struct kvm_lpage_info *linfo;
slot = gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot(kvm, gfn);
if (slot) { if (slot) {
largepage_idx = slot_largepage_idx(gfn, slot, level); linfo = lpage_info_slot(gfn, slot, level);
return *largepage_idx; return linfo->write_count;
} }
return 1; return 1;
@ -585,16 +585,15 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
unsigned long idx; struct kvm_lpage_info *linfo;
slot = gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot(kvm, gfn);
if (likely(level == PT_PAGE_TABLE_LEVEL)) if (likely(level == PT_PAGE_TABLE_LEVEL))
return &slot->rmap[gfn - slot->base_gfn]; return &slot->rmap[gfn - slot->base_gfn];
idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - linfo = lpage_info_slot(gfn, slot, level);
(slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
return &slot->lpage_info[level - 2][idx].rmap_pde; return &linfo->rmap_pde;
} }
/* /*
@ -882,19 +881,16 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
end = start + (memslot->npages << PAGE_SHIFT); end = start + (memslot->npages << PAGE_SHIFT);
if (hva >= start && hva < end) { if (hva >= start && hva < end) {
gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
gfn_t gfn = memslot->base_gfn + gfn_offset;
ret = handler(kvm, &memslot->rmap[gfn_offset], data); ret = handler(kvm, &memslot->rmap[gfn_offset], data);
for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
unsigned long idx; struct kvm_lpage_info *linfo;
int sh;
sh = KVM_HPAGE_GFN_SHIFT(PT_DIRECTORY_LEVEL+j); linfo = lpage_info_slot(gfn, memslot,
idx = ((memslot->base_gfn+gfn_offset) >> sh) - PT_DIRECTORY_LEVEL + j);
(memslot->base_gfn >> sh); ret |= handler(kvm, &linfo->rmap_pde, data);
ret |= handler(kvm,
&memslot->lpage_info[j][idx].rmap_pde,
data);
} }
trace_kvm_age_page(hva, memslot, ret); trace_kvm_age_page(hva, memslot, ret);
retval |= ret; retval |= ret;

View File

@ -146,6 +146,11 @@ struct kvm_vcpu {
*/ */
#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
struct kvm_lpage_info {
unsigned long rmap_pde;
int write_count;
};
struct kvm_memory_slot { struct kvm_memory_slot {
gfn_t base_gfn; gfn_t base_gfn;
unsigned long npages; unsigned long npages;
@ -153,10 +158,7 @@ struct kvm_memory_slot {
unsigned long *rmap; unsigned long *rmap;
unsigned long *dirty_bitmap; unsigned long *dirty_bitmap;
unsigned long *dirty_bitmap_head; unsigned long *dirty_bitmap_head;
struct { struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
unsigned long rmap_pde;
int write_count;
} *lpage_info[KVM_NR_PAGE_SIZES - 1];
unsigned long userspace_addr; unsigned long userspace_addr;
int user_alloc; int user_alloc;
int id; int id;