KVM: Add kvm_read_guest_atomic()

In preparation for a mmu spinlock, add kvm_read_guest_atomic()
and use it in fetch() and prefetch_page().

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
Marcelo Tosatti 2007-12-20 19:18:23 -05:00 committed by Avi Kivity
parent 10589a4699
commit 7ec5458821
3 changed files with 38 additions and 12 deletions

View File

@ -316,10 +316,12 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
metaphysical, access, metaphysical, access,
shadow_ent, &new_page); shadow_ent, &new_page);
if (new_page && !metaphysical) { if (new_page && !metaphysical) {
int r;
pt_element_t curr_pte; pt_element_t curr_pte;
kvm_read_guest(vcpu->kvm, walker->pte_gpa[level - 2], r = kvm_read_guest_atomic(vcpu->kvm,
&curr_pte, sizeof(curr_pte)); walker->pte_gpa[level - 2],
if (curr_pte != walker->ptes[level - 2]) &curr_pte, sizeof(curr_pte));
if (r || curr_pte != walker->ptes[level - 2])
return NULL; return NULL;
} }
shadow_addr = __pa(shadow_page->spt); shadow_addr = __pa(shadow_page->spt);
@ -429,9 +431,8 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp) struct kvm_mmu_page *sp)
{ {
int i, offset = 0; int i, offset = 0, r = 0;
pt_element_t *gpt; pt_element_t pt;
struct page *page;
if (sp->role.metaphysical if (sp->role.metaphysical
|| (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
@ -441,15 +442,18 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
if (PTTYPE == 32) if (PTTYPE == 32)
offset = sp->role.quadrant << PT64_LEVEL_BITS; offset = sp->role.quadrant << PT64_LEVEL_BITS;
page = gfn_to_page(vcpu->kvm, sp->gfn);
gpt = kmap_atomic(page, KM_USER0); for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) gpa_t pte_gpa = gfn_to_gpa(sp->gfn);
if (is_present_pte(gpt[offset + i])) pte_gpa += (i+offset) * sizeof(pt_element_t);
r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &pt,
sizeof(pt_element_t));
if (r || is_present_pte(pt))
sp->spt[i] = shadow_trap_nonpresent_pte; sp->spt[i] = shadow_trap_nonpresent_pte;
else else
sp->spt[i] = shadow_notrap_nonpresent_pte; sp->spt[i] = shadow_notrap_nonpresent_pte;
kunmap_atomic(gpt, KM_USER0); }
kvm_release_page_clean(page);
} }
#undef pt_element_t #undef pt_element_t

View File

@ -167,6 +167,8 @@ void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page); void kvm_release_page_dirty(struct page *page);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len); int len);
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
unsigned long len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
int offset, int len); int offset, int len);

View File

@ -541,6 +541,26 @@ int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
} }
EXPORT_SYMBOL_GPL(kvm_read_guest); EXPORT_SYMBOL_GPL(kvm_read_guest);
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
unsigned long len)
{
int r;
unsigned long addr;
gfn_t gfn = gpa >> PAGE_SHIFT;
int offset = offset_in_page(gpa);
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
pagefault_disable();
r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
pagefault_enable();
if (r)
return -EFAULT;
return 0;
}
EXPORT_SYMBOL(kvm_read_guest_atomic);
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
int offset, int len) int offset, int len)
{ {