KVM: PPC: Book3S PR: Mark pages accessed, and dirty if being written

The mark_page_dirty() function, despite what its name might suggest,
doesn't actually mark the page as dirty as far as the MM subsystem is
concerned.  It merely sets a bit in KVM's map of dirty pages, if
userspace has requested dirty tracking for the relevant memslot.
To tell the MM subsystem that the page is dirty, we have to call
kvm_set_pfn_dirty() (or an equivalent such as SetPageDirty()).

This adds a call to kvm_set_pfn_dirty(), and while we are here, also
adds a call to kvm_set_pfn_accessed() to tell the MM subsystem that
the page has been accessed.  Since we are now using the pfn in
several places, this adds a 'pfn' variable to store it and changes
the places that used hpaddr >> PAGE_SHIFT to use pfn instead, which
is the same thing.

This also changes a use of HPTE_R_PP to PP_RXRX.  Both are 3, but
PP_RXRX is more informative as being the read-only page permission
bit setting.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
Paul Mackerras 2013-09-20 14:52:53 +10:00 committed by Alexander Graf
parent d78bca7296
commit adc0bafe00
1 changed files with 15 additions and 11 deletions

View File

@ -96,20 +96,21 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
unsigned long mmu_seq; unsigned long mmu_seq;
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct hpte_cache *cpte; struct hpte_cache *cpte;
unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
unsigned long pfn;
/* used to check for invalidations in progress */ /* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq; mmu_seq = kvm->mmu_notifier_seq;
smp_rmb(); smp_rmb();
/* Get host physical address for gpa */ /* Get host physical address for gpa */
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT, pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable);
iswrite, &writable); if (is_error_noslot_pfn(pfn)) {
if (is_error_noslot_pfn(hpaddr)) { printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn);
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
r = -EINVAL; r = -EINVAL;
goto out; goto out;
} }
hpaddr <<= PAGE_SHIFT; hpaddr = pfn << PAGE_SHIFT;
/* and write the mapping ea -> hpa into the pt */ /* and write the mapping ea -> hpa into the pt */
vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
@ -129,15 +130,18 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
kvm_set_pfn_accessed(pfn);
if (!orig_pte->may_write || !writable) if (!orig_pte->may_write || !writable)
rflags |= HPTE_R_PP; rflags |= PP_RXRX;
else else {
mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); mark_page_dirty(vcpu->kvm, gfn);
kvm_set_pfn_dirty(pfn);
}
if (!orig_pte->may_execute) if (!orig_pte->may_execute)
rflags |= HPTE_R_N; rflags |= HPTE_R_N;
else else
kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); kvmppc_mmu_flush_icache(pfn);
/* /*
* Use 64K pages if possible; otherwise, on 64K page kernels, * Use 64K pages if possible; otherwise, on 64K page kernels,
@ -191,7 +195,7 @@ map_again:
cpte->slot = hpteg + (ret & 7); cpte->slot = hpteg + (ret & 7);
cpte->host_vpn = vpn; cpte->host_vpn = vpn;
cpte->pte = *orig_pte; cpte->pte = *orig_pte;
cpte->pfn = hpaddr >> PAGE_SHIFT; cpte->pfn = pfn;
cpte->pagesize = hpsize; cpte->pagesize = hpsize;
kvmppc_mmu_hpte_cache_map(vcpu, cpte); kvmppc_mmu_hpte_cache_map(vcpu, cpte);
@ -200,7 +204,7 @@ map_again:
out_unlock: out_unlock:
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); kvm_release_pfn_clean(pfn);
if (cpte) if (cpte)
kvmppc_mmu_hpte_cache_free(cpte); kvmppc_mmu_hpte_cache_free(cpte);