KVM: MMU: cleanup for hlist walk restart
Quote from Avi: |Just change the assignment to a 'goto restart;' please, |I don't like playing with list_for_each internals. Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
acb5451789
commit
3246af0ece
|
@ -1565,13 +1565,14 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
|
||||||
r = 0;
|
r = 0;
|
||||||
index = kvm_page_table_hashfn(gfn);
|
index = kvm_page_table_hashfn(gfn);
|
||||||
bucket = &kvm->arch.mmu_page_hash[index];
|
bucket = &kvm->arch.mmu_page_hash[index];
|
||||||
|
restart:
|
||||||
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
|
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
|
||||||
if (sp->gfn == gfn && !sp->role.direct) {
|
if (sp->gfn == gfn && !sp->role.direct) {
|
||||||
pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
|
pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
|
||||||
sp->role.word);
|
sp->role.word);
|
||||||
r = 1;
|
r = 1;
|
||||||
if (kvm_mmu_zap_page(kvm, sp))
|
if (kvm_mmu_zap_page(kvm, sp))
|
||||||
n = bucket->first;
|
goto restart;
|
||||||
}
|
}
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -1585,13 +1586,14 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
|
||||||
|
|
||||||
index = kvm_page_table_hashfn(gfn);
|
index = kvm_page_table_hashfn(gfn);
|
||||||
bucket = &kvm->arch.mmu_page_hash[index];
|
bucket = &kvm->arch.mmu_page_hash[index];
|
||||||
|
restart:
|
||||||
hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
|
hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
|
||||||
if (sp->gfn == gfn && !sp->role.direct
|
if (sp->gfn == gfn && !sp->role.direct
|
||||||
&& !sp->role.invalid) {
|
&& !sp->role.invalid) {
|
||||||
pgprintk("%s: zap %lx %x\n",
|
pgprintk("%s: zap %lx %x\n",
|
||||||
__func__, gfn, sp->role.word);
|
__func__, gfn, sp->role.word);
|
||||||
if (kvm_mmu_zap_page(kvm, sp))
|
if (kvm_mmu_zap_page(kvm, sp))
|
||||||
nn = bucket->first;
|
goto restart;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2671,6 +2673,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||||
}
|
}
|
||||||
index = kvm_page_table_hashfn(gfn);
|
index = kvm_page_table_hashfn(gfn);
|
||||||
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
|
bucket = &vcpu->kvm->arch.mmu_page_hash[index];
|
||||||
|
|
||||||
|
restart:
|
||||||
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
|
hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
|
||||||
if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
|
if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
|
||||||
continue;
|
continue;
|
||||||
|
@ -2691,7 +2695,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||||
pgprintk("misaligned: gpa %llx bytes %d role %x\n",
|
pgprintk("misaligned: gpa %llx bytes %d role %x\n",
|
||||||
gpa, bytes, sp->role.word);
|
gpa, bytes, sp->role.word);
|
||||||
if (kvm_mmu_zap_page(vcpu->kvm, sp))
|
if (kvm_mmu_zap_page(vcpu->kvm, sp))
|
||||||
n = bucket->first;
|
goto restart;
|
||||||
++vcpu->kvm->stat.mmu_flooded;
|
++vcpu->kvm->stat.mmu_flooded;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -2900,10 +2904,11 @@ void kvm_mmu_zap_all(struct kvm *kvm)
|
||||||
struct kvm_mmu_page *sp, *node;
|
struct kvm_mmu_page *sp, *node;
|
||||||
|
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
|
restart:
|
||||||
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
|
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
|
||||||
if (kvm_mmu_zap_page(kvm, sp))
|
if (kvm_mmu_zap_page(kvm, sp))
|
||||||
node = container_of(kvm->arch.active_mmu_pages.next,
|
goto restart;
|
||||||
struct kvm_mmu_page, link);
|
|
||||||
spin_unlock(&kvm->mmu_lock);
|
spin_unlock(&kvm->mmu_lock);
|
||||||
|
|
||||||
kvm_flush_remote_tlbs(kvm);
|
kvm_flush_remote_tlbs(kvm);
|
||||||
|
|
Loading…
Reference in New Issue