KVM: MMU: fix wrong not write protected sp report
The audit code reports some sp not write protected in current code, it's just the bug in audit_write_protection(), since: - the invalid sp not need write protected - using uninitialize local variable('gfn') - call kvm_mmu_audit() out of mmu_lock's protection Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
0beb8d6604
commit
bc32ce2152
|
@ -3708,16 +3708,17 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
|
||||||
struct kvm_memory_slot *slot;
|
struct kvm_memory_slot *slot;
|
||||||
unsigned long *rmapp;
|
unsigned long *rmapp;
|
||||||
u64 *spte;
|
u64 *spte;
|
||||||
gfn_t gfn;
|
|
||||||
|
|
||||||
list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
|
list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
|
||||||
if (sp->role.direct)
|
if (sp->role.direct)
|
||||||
continue;
|
continue;
|
||||||
if (sp->unsync)
|
if (sp->unsync)
|
||||||
continue;
|
continue;
|
||||||
|
if (sp->role.invalid)
|
||||||
|
continue;
|
||||||
|
|
||||||
slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
|
slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
|
||||||
rmapp = &slot->rmap[gfn - slot->base_gfn];
|
rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
|
||||||
|
|
||||||
spte = rmap_next(vcpu->kvm, rmapp, NULL);
|
spte = rmap_next(vcpu->kvm, rmapp, NULL);
|
||||||
while (spte) {
|
while (spte) {
|
||||||
|
|
|
@ -504,7 +504,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||||
unsigned long mmu_seq;
|
unsigned long mmu_seq;
|
||||||
|
|
||||||
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
|
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
|
||||||
kvm_mmu_audit(vcpu, "pre page fault");
|
|
||||||
|
|
||||||
r = mmu_topup_memory_caches(vcpu);
|
r = mmu_topup_memory_caches(vcpu);
|
||||||
if (r)
|
if (r)
|
||||||
|
@ -542,6 +541,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||||
spin_lock(&vcpu->kvm->mmu_lock);
|
spin_lock(&vcpu->kvm->mmu_lock);
|
||||||
if (mmu_notifier_retry(vcpu, mmu_seq))
|
if (mmu_notifier_retry(vcpu, mmu_seq))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
|
kvm_mmu_audit(vcpu, "pre page fault");
|
||||||
kvm_mmu_free_some_pages(vcpu);
|
kvm_mmu_free_some_pages(vcpu);
|
||||||
sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
|
sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
|
||||||
level, &write_pt, pfn);
|
level, &write_pt, pfn);
|
||||||
|
|
Loading…
Reference in New Issue