KVM: rename x86 kvm->arch.n_alloc_mmu_pages
arch.n_alloc_mmu_pages is a poor choice of name. This value truly means, "the number of pages which _may_ be allocated". But, reading the name, "n_alloc_mmu_pages" implies "the number of allocated mmu pages", which is dead wrong. It's really the high watermark, so let's give it a name to match: nr_max_mmu_pages. This change will make the next few patches much more obvious and easy to read. Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: Tim Pepper <lnxninja@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
e0df7b9f6c
commit
39de71ec53
|
@ -369,7 +369,7 @@ struct kvm_vcpu_arch {
|
||||||
struct kvm_arch {
|
struct kvm_arch {
|
||||||
unsigned int n_free_mmu_pages;
|
unsigned int n_free_mmu_pages;
|
||||||
unsigned int n_requested_mmu_pages;
|
unsigned int n_requested_mmu_pages;
|
||||||
unsigned int n_alloc_mmu_pages;
|
unsigned int n_max_mmu_pages;
|
||||||
atomic_t invlpg_counter;
|
atomic_t invlpg_counter;
|
||||||
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
|
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1696,7 +1696,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
|
||||||
int used_pages;
|
int used_pages;
|
||||||
LIST_HEAD(invalid_list);
|
LIST_HEAD(invalid_list);
|
||||||
|
|
||||||
used_pages = kvm->arch.n_alloc_mmu_pages - kvm_mmu_available_pages(kvm);
|
used_pages = kvm->arch.n_max_mmu_pages - kvm_mmu_available_pages(kvm);
|
||||||
used_pages = max(0, used_pages);
|
used_pages = max(0, used_pages);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1721,9 +1721,9 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
|
kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
|
||||||
- kvm->arch.n_alloc_mmu_pages;
|
- kvm->arch.n_max_mmu_pages;
|
||||||
|
|
||||||
kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
|
kvm->arch.n_max_mmu_pages = kvm_nr_mmu_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
|
static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
|
||||||
|
@ -3141,7 +3141,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
|
||||||
|
|
||||||
idx = srcu_read_lock(&kvm->srcu);
|
idx = srcu_read_lock(&kvm->srcu);
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
npages = kvm->arch.n_alloc_mmu_pages -
|
npages = kvm->arch.n_max_mmu_pages -
|
||||||
kvm_mmu_available_pages(kvm);
|
kvm_mmu_available_pages(kvm);
|
||||||
cache_count += npages;
|
cache_count += npages;
|
||||||
if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
|
if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
|
||||||
|
|
|
@ -2759,7 +2759,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
|
||||||
|
|
||||||
static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
|
static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
return kvm->arch.n_alloc_mmu_pages;
|
return kvm->arch.n_max_mmu_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
|
static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
|
||||||
|
|
Loading…
Reference in New Issue