KVM: Replace reads of vcpu->arch.cr3 by an accessor
This allows us to keep cr3 in the VMCS, later on. Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
e49146dce8
commit
9f8fe5043f
|
@ -73,6 +73,11 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
|
||||||
return vcpu->arch.cr4 & mask;
|
return vcpu->arch.cr4 & mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
return vcpu->arch.cr3;
|
||||||
|
}
|
||||||
|
|
||||||
static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
|
static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return kvm_read_cr4_bits(vcpu, ~0UL);
|
return kvm_read_cr4_bits(vcpu, ~0UL);
|
||||||
|
|
|
@ -2727,13 +2727,13 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
static void paging_new_cr3(struct kvm_vcpu *vcpu)
|
static void paging_new_cr3(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
|
pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu));
|
||||||
mmu_free_roots(vcpu);
|
mmu_free_roots(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long get_cr3(struct kvm_vcpu *vcpu)
|
static unsigned long get_cr3(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->arch.cr3;
|
return kvm_read_cr3(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void inject_page_fault(struct kvm_vcpu *vcpu,
|
static void inject_page_fault(struct kvm_vcpu *vcpu,
|
||||||
|
@ -3637,7 +3637,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
|
static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
(void)kvm_set_cr3(vcpu, vcpu->arch.cr3);
|
(void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu));
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1174,7 +1174,7 @@ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
|
||||||
switch (reg) {
|
switch (reg) {
|
||||||
case VCPU_EXREG_PDPTR:
|
case VCPU_EXREG_PDPTR:
|
||||||
BUG_ON(!npt_enabled);
|
BUG_ON(!npt_enabled);
|
||||||
load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
|
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
BUG();
|
BUG();
|
||||||
|
@ -2116,7 +2116,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
|
||||||
nested_vmcb->save.idtr = vmcb->save.idtr;
|
nested_vmcb->save.idtr = vmcb->save.idtr;
|
||||||
nested_vmcb->save.efer = svm->vcpu.arch.efer;
|
nested_vmcb->save.efer = svm->vcpu.arch.efer;
|
||||||
nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
|
nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
|
||||||
nested_vmcb->save.cr3 = svm->vcpu.arch.cr3;
|
nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
|
||||||
nested_vmcb->save.cr2 = vmcb->save.cr2;
|
nested_vmcb->save.cr2 = vmcb->save.cr2;
|
||||||
nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
|
nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
|
||||||
nested_vmcb->save.rflags = vmcb->save.rflags;
|
nested_vmcb->save.rflags = vmcb->save.rflags;
|
||||||
|
@ -2311,7 +2311,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
|
||||||
if (npt_enabled)
|
if (npt_enabled)
|
||||||
hsave->save.cr3 = vmcb->save.cr3;
|
hsave->save.cr3 = vmcb->save.cr3;
|
||||||
else
|
else
|
||||||
hsave->save.cr3 = svm->vcpu.arch.cr3;
|
hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
|
||||||
|
|
||||||
copy_vmcb_control_area(hsave, vmcb);
|
copy_vmcb_control_area(hsave, vmcb);
|
||||||
|
|
||||||
|
@ -2715,7 +2715,7 @@ static int cr_interception(struct vcpu_svm *svm)
|
||||||
val = svm->vcpu.arch.cr2;
|
val = svm->vcpu.arch.cr2;
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
val = svm->vcpu.arch.cr3;
|
val = kvm_read_cr3(&svm->vcpu);
|
||||||
break;
|
break;
|
||||||
case 4:
|
case 4:
|
||||||
val = kvm_read_cr4(&svm->vcpu);
|
val = kvm_read_cr4(&svm->vcpu);
|
||||||
|
@ -3693,7 +3693,7 @@ static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
|
||||||
mark_dirty(svm->vmcb, VMCB_NPT);
|
mark_dirty(svm->vmcb, VMCB_NPT);
|
||||||
|
|
||||||
/* Also sync guest cr3 here in case we live migrate */
|
/* Also sync guest cr3 here in case we live migrate */
|
||||||
svm->vmcb->save.cr3 = vcpu->arch.cr3;
|
svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
|
||||||
mark_dirty(svm->vmcb, VMCB_CR);
|
mark_dirty(svm->vmcb, VMCB_CR);
|
||||||
|
|
||||||
svm_flush_tlb(vcpu);
|
svm_flush_tlb(vcpu);
|
||||||
|
|
|
@ -1989,7 +1989,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
|
||||||
if (enable_ept) {
|
if (enable_ept) {
|
||||||
eptp = construct_eptp(cr3);
|
eptp = construct_eptp(cr3);
|
||||||
vmcs_write64(EPT_POINTER, eptp);
|
vmcs_write64(EPT_POINTER, eptp);
|
||||||
guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
|
guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) :
|
||||||
vcpu->kvm->arch.ept_identity_map_addr;
|
vcpu->kvm->arch.ept_identity_map_addr;
|
||||||
ept_load_pdptrs(vcpu);
|
ept_load_pdptrs(vcpu);
|
||||||
}
|
}
|
||||||
|
@ -3227,8 +3227,9 @@ static int handle_cr(struct kvm_vcpu *vcpu)
|
||||||
case 1: /*mov from cr*/
|
case 1: /*mov from cr*/
|
||||||
switch (cr) {
|
switch (cr) {
|
||||||
case 3:
|
case 3:
|
||||||
kvm_register_write(vcpu, reg, vcpu->arch.cr3);
|
val = kvm_read_cr3(vcpu);
|
||||||
trace_kvm_cr_read(cr, vcpu->arch.cr3);
|
kvm_register_write(vcpu, reg, val);
|
||||||
|
trace_kvm_cr_read(cr, val);
|
||||||
skip_emulated_instruction(vcpu);
|
skip_emulated_instruction(vcpu);
|
||||||
return 1;
|
return 1;
|
||||||
case 8:
|
case 8:
|
||||||
|
|
|
@ -473,8 +473,8 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
|
||||||
(unsigned long *)&vcpu->arch.regs_avail))
|
(unsigned long *)&vcpu->arch.regs_avail))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT;
|
gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
|
||||||
offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1);
|
offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
|
||||||
r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
|
r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
|
||||||
PFERR_USER_MASK | PFERR_WRITE_MASK);
|
PFERR_USER_MASK | PFERR_WRITE_MASK);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
|
@ -519,7 +519,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
|
if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
|
||||||
vcpu->arch.cr3))
|
kvm_read_cr3(vcpu)))
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -611,7 +611,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||||
return 1;
|
return 1;
|
||||||
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
|
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
|
||||||
&& ((cr4 ^ old_cr4) & pdptr_bits)
|
&& ((cr4 ^ old_cr4) & pdptr_bits)
|
||||||
&& !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))
|
&& !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
|
||||||
|
kvm_read_cr3(vcpu)))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (cr4 & X86_CR4_VMXE)
|
if (cr4 & X86_CR4_VMXE)
|
||||||
|
@ -631,7 +632,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4);
|
||||||
|
|
||||||
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
|
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
|
||||||
{
|
{
|
||||||
if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
|
if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
|
||||||
kvm_mmu_sync_roots(vcpu);
|
kvm_mmu_sync_roots(vcpu);
|
||||||
kvm_mmu_flush_tlb(vcpu);
|
kvm_mmu_flush_tlb(vcpu);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -4073,7 +4074,7 @@ static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)
|
||||||
value = vcpu->arch.cr2;
|
value = vcpu->arch.cr2;
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
value = vcpu->arch.cr3;
|
value = kvm_read_cr3(vcpu);
|
||||||
break;
|
break;
|
||||||
case 4:
|
case 4:
|
||||||
value = kvm_read_cr4(vcpu);
|
value = kvm_read_cr4(vcpu);
|
||||||
|
@ -5512,7 +5513,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
sregs->cr0 = kvm_read_cr0(vcpu);
|
sregs->cr0 = kvm_read_cr0(vcpu);
|
||||||
sregs->cr2 = vcpu->arch.cr2;
|
sregs->cr2 = vcpu->arch.cr2;
|
||||||
sregs->cr3 = vcpu->arch.cr3;
|
sregs->cr3 = kvm_read_cr3(vcpu);
|
||||||
sregs->cr4 = kvm_read_cr4(vcpu);
|
sregs->cr4 = kvm_read_cr4(vcpu);
|
||||||
sregs->cr8 = kvm_get_cr8(vcpu);
|
sregs->cr8 = kvm_get_cr8(vcpu);
|
||||||
sregs->efer = vcpu->arch.efer;
|
sregs->efer = vcpu->arch.efer;
|
||||||
|
@ -5580,7 +5581,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
||||||
kvm_x86_ops->set_gdt(vcpu, &dt);
|
kvm_x86_ops->set_gdt(vcpu, &dt);
|
||||||
|
|
||||||
vcpu->arch.cr2 = sregs->cr2;
|
vcpu->arch.cr2 = sregs->cr2;
|
||||||
mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
|
mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
|
||||||
vcpu->arch.cr3 = sregs->cr3;
|
vcpu->arch.cr3 = sregs->cr3;
|
||||||
|
|
||||||
kvm_set_cr8(vcpu, sregs->cr8);
|
kvm_set_cr8(vcpu, sregs->cr8);
|
||||||
|
@ -5598,7 +5599,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
||||||
if (sregs->cr4 & X86_CR4_OSXSAVE)
|
if (sregs->cr4 & X86_CR4_OSXSAVE)
|
||||||
update_cpuid(vcpu);
|
update_cpuid(vcpu);
|
||||||
if (!is_long_mode(vcpu) && is_pae(vcpu)) {
|
if (!is_long_mode(vcpu) && is_pae(vcpu)) {
|
||||||
load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
|
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
|
||||||
mmu_reset_needed = 1;
|
mmu_reset_needed = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue