KVM: MMU: Introduce a helper function for FIFO zapping
Make the code for zapping the oldest mmu page, placed at the tail of the active list, a separate function. Reviewed-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
945315b9db
commit
5da596078f
|
@ -2110,6 +2110,21 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
|
||||||
|
struct list_head *invalid_list)
|
||||||
|
{
|
||||||
|
struct kvm_mmu_page *sp;
|
||||||
|
|
||||||
|
if (list_empty(&kvm->arch.active_mmu_pages))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
sp = list_entry(kvm->arch.active_mmu_pages.prev,
|
||||||
|
struct kvm_mmu_page, link);
|
||||||
|
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Changing the number of mmu pages allocated to the vm
|
* Changing the number of mmu pages allocated to the vm
|
||||||
* Note: if goal_nr_mmu_pages is too small, you will get dead lock
|
* Note: if goal_nr_mmu_pages is too small, you will get dead lock
|
||||||
|
@ -2117,23 +2132,15 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
|
||||||
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
|
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
|
||||||
{
|
{
|
||||||
LIST_HEAD(invalid_list);
|
LIST_HEAD(invalid_list);
|
||||||
/*
|
|
||||||
* If we set the number of mmu pages to be smaller be than the
|
|
||||||
* number of actived pages , we must to free some mmu pages before we
|
|
||||||
* change the value
|
|
||||||
*/
|
|
||||||
|
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
|
|
||||||
if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
|
if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
|
||||||
while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
|
/* Need to free some mmu pages to achieve the goal. */
|
||||||
!list_empty(&kvm->arch.active_mmu_pages)) {
|
while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages)
|
||||||
struct kvm_mmu_page *page;
|
if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list))
|
||||||
|
break;
|
||||||
|
|
||||||
page = container_of(kvm->arch.active_mmu_pages.prev,
|
|
||||||
struct kvm_mmu_page, link);
|
|
||||||
kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
|
|
||||||
}
|
|
||||||
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
||||||
goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
|
goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
|
||||||
}
|
}
|
||||||
|
@ -4007,13 +4014,10 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
LIST_HEAD(invalid_list);
|
LIST_HEAD(invalid_list);
|
||||||
|
|
||||||
while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
|
while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
|
||||||
!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
|
if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
|
||||||
struct kvm_mmu_page *sp;
|
break;
|
||||||
|
|
||||||
sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
|
|
||||||
struct kvm_mmu_page, link);
|
|
||||||
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
|
|
||||||
++vcpu->kvm->stat.mmu_recycled;
|
++vcpu->kvm->stat.mmu_recycled;
|
||||||
}
|
}
|
||||||
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
|
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
|
||||||
|
@ -4182,19 +4186,6 @@ restart:
|
||||||
spin_unlock(&kvm->mmu_lock);
|
spin_unlock(&kvm->mmu_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
|
|
||||||
struct list_head *invalid_list)
|
|
||||||
{
|
|
||||||
struct kvm_mmu_page *page;
|
|
||||||
|
|
||||||
if (list_empty(&kvm->arch.active_mmu_pages))
|
|
||||||
return;
|
|
||||||
|
|
||||||
page = container_of(kvm->arch.active_mmu_pages.prev,
|
|
||||||
struct kvm_mmu_page, link);
|
|
||||||
kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
|
static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
|
||||||
{
|
{
|
||||||
struct kvm *kvm;
|
struct kvm *kvm;
|
||||||
|
@ -4229,7 +4220,7 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
|
||||||
idx = srcu_read_lock(&kvm->srcu);
|
idx = srcu_read_lock(&kvm->srcu);
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
|
|
||||||
kvm_mmu_remove_some_alloc_mmu_pages(kvm, &invalid_list);
|
prepare_zap_oldest_mmu_page(kvm, &invalid_list);
|
||||||
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
||||||
|
|
||||||
spin_unlock(&kvm->mmu_lock);
|
spin_unlock(&kvm->mmu_lock);
|
||||||
|
|
Loading…
Reference in New Issue