kvm: x86: export maximum number of mmu_page_hash collisions
Report the maximum number of mmu_page_hash collisions as a per-VM stat. This will make it easy to identify problems with the mmu_page_hash in the future. Signed-off-by: David Matlack <dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
826da32140
commit
f3414bc774
|
@ -821,6 +821,7 @@ struct kvm_vm_stat {
|
||||||
ulong mmu_unsync;
|
ulong mmu_unsync;
|
||||||
ulong remote_tlb_flush;
|
ulong remote_tlb_flush;
|
||||||
ulong lpages;
|
ulong lpages;
|
||||||
|
ulong max_mmu_page_hash_collisions;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct kvm_vcpu_stat {
|
struct kvm_vcpu_stat {
|
||||||
|
|
|
@ -1904,17 +1904,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
|
||||||
* since it has been deleted from active_mmu_pages but still can be found
|
* since it has been deleted from active_mmu_pages but still can be found
|
||||||
* at hast list.
|
* at hast list.
|
||||||
*
|
*
|
||||||
* for_each_gfn_valid_sp() has skipped that kind of pages.
|
* for_each_valid_sp() has skipped that kind of pages.
|
||||||
*/
|
*/
|
||||||
#define for_each_gfn_valid_sp(_kvm, _sp, _gfn) \
|
#define for_each_valid_sp(_kvm, _sp, _gfn) \
|
||||||
hlist_for_each_entry(_sp, \
|
hlist_for_each_entry(_sp, \
|
||||||
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
|
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
|
||||||
if ((_sp)->gfn != (_gfn) || is_obsolete_sp((_kvm), (_sp)) \
|
if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) { \
|
||||||
|| (_sp)->role.invalid) {} else
|
} else
|
||||||
|
|
||||||
#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
|
#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
|
||||||
for_each_gfn_valid_sp(_kvm, _sp, _gfn) \
|
for_each_valid_sp(_kvm, _sp, _gfn) \
|
||||||
if ((_sp)->role.direct) {} else
|
if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
|
||||||
|
|
||||||
/* @sp->gfn should be write-protected at the call site */
|
/* @sp->gfn should be write-protected at the call site */
|
||||||
static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||||
|
@ -2116,6 +2116,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
||||||
struct kvm_mmu_page *sp;
|
struct kvm_mmu_page *sp;
|
||||||
bool need_sync = false;
|
bool need_sync = false;
|
||||||
bool flush = false;
|
bool flush = false;
|
||||||
|
int collisions = 0;
|
||||||
LIST_HEAD(invalid_list);
|
LIST_HEAD(invalid_list);
|
||||||
|
|
||||||
role = vcpu->arch.mmu.base_role;
|
role = vcpu->arch.mmu.base_role;
|
||||||
|
@ -2130,7 +2131,12 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
||||||
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
|
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
|
||||||
role.quadrant = quadrant;
|
role.quadrant = quadrant;
|
||||||
}
|
}
|
||||||
for_each_gfn_valid_sp(vcpu->kvm, sp, gfn) {
|
for_each_valid_sp(vcpu->kvm, sp, gfn) {
|
||||||
|
if (sp->gfn != gfn) {
|
||||||
|
collisions++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (!need_sync && sp->unsync)
|
if (!need_sync && sp->unsync)
|
||||||
need_sync = true;
|
need_sync = true;
|
||||||
|
|
||||||
|
@ -2153,7 +2159,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
__clear_sp_write_flooding_count(sp);
|
__clear_sp_write_flooding_count(sp);
|
||||||
trace_kvm_mmu_get_page(sp, false);
|
trace_kvm_mmu_get_page(sp, false);
|
||||||
return sp;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
++vcpu->kvm->stat.mmu_cache_miss;
|
++vcpu->kvm->stat.mmu_cache_miss;
|
||||||
|
@ -2183,6 +2189,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
||||||
trace_kvm_mmu_get_page(sp, true);
|
trace_kvm_mmu_get_page(sp, true);
|
||||||
|
|
||||||
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
|
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
|
||||||
|
out:
|
||||||
|
if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
|
||||||
|
vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
|
||||||
return sp;
|
return sp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -190,6 +190,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||||
{ "mmu_unsync", VM_STAT(mmu_unsync) },
|
{ "mmu_unsync", VM_STAT(mmu_unsync) },
|
||||||
{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
|
{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
|
||||||
{ "largepages", VM_STAT(lpages) },
|
{ "largepages", VM_STAT(lpages) },
|
||||||
|
{ "max_mmu_page_hash_collisions",
|
||||||
|
VM_STAT(max_mmu_page_hash_collisions) },
|
||||||
{ NULL }
|
{ NULL }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue