KVM: PPC: booke: use shadow_msr
Keep the guest MSR and the guest-mode true MSR separate, rather than modifying the guest MSR on each guest entry to produce a true MSR. Any bits which should be modified based on guest MSR must be explicitly propagated from vcpu->arch.shared->msr to vcpu->arch.shadow_msr in kvmppc_set_msr(). While we're modifying the guest entry code, reorder a few instructions to bury some load latencies. Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
parent
c51584d52e
commit
ecee273fc4
|
@ -219,12 +219,12 @@ struct kvm_vcpu_arch {
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
ulong shadow_msr;
|
||||
ulong hflags;
|
||||
ulong guest_owned_ext;
|
||||
#endif
|
||||
u32 vrsave; /* also USPRG0 */
|
||||
u32 mmucr;
|
||||
ulong shadow_msr;
|
||||
ulong sprg4;
|
||||
ulong sprg5;
|
||||
ulong sprg6;
|
||||
|
|
|
@ -404,12 +404,12 @@ int main(void)
|
|||
DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
|
||||
DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
|
||||
DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
|
||||
DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
|
||||
|
||||
/* book3s */
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
|
||||
DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
|
||||
DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
|
||||
DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
|
||||
DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
|
||||
DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
|
||||
|
|
|
@ -514,6 +514,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|||
|
||||
vcpu->arch.pc = 0;
|
||||
vcpu->arch.shared->msr = 0;
|
||||
vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
|
||||
kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
|
||||
|
||||
vcpu->arch.shadow_pid = 1;
|
||||
|
|
|
@ -24,8 +24,6 @@
|
|||
#include <asm/page.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
#define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS)
|
||||
|
||||
#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
|
||||
|
||||
/* The host stack layout: */
|
||||
|
@ -405,20 +403,17 @@ lightweight_exit:
|
|||
|
||||
/* Finish loading guest volatiles and jump to guest. */
|
||||
lwz r3, VCPU_CTR(r4)
|
||||
lwz r5, VCPU_CR(r4)
|
||||
lwz r6, VCPU_PC(r4)
|
||||
lwz r7, VCPU_SHADOW_MSR(r4)
|
||||
mtctr r3
|
||||
lwz r3, VCPU_CR(r4)
|
||||
mtcr r3
|
||||
mtcr r5
|
||||
mtsrr0 r6
|
||||
mtsrr1 r7
|
||||
lwz r5, VCPU_GPR(r5)(r4)
|
||||
lwz r6, VCPU_GPR(r6)(r4)
|
||||
lwz r7, VCPU_GPR(r7)(r4)
|
||||
lwz r8, VCPU_GPR(r8)(r4)
|
||||
lwz r3, VCPU_PC(r4)
|
||||
mtsrr0 r3
|
||||
lwz r3, VCPU_SHARED(r4)
|
||||
lwz r3, (VCPU_SHARED_MSR + 4)(r3)
|
||||
oris r3, r3, KVMPPC_MSR_MASK@h
|
||||
ori r3, r3, KVMPPC_MSR_MASK@l
|
||||
mtsrr1 r3
|
||||
|
||||
/* Clear any debug events which occurred since we disabled MSR[DE].
|
||||
* XXX This gives us a 3-instruction window in which a breakpoint
|
||||
|
|
Loading…
Reference in New Issue