KVM: PPC: Make shared struct aka magic page guest endian
The shared (magic) page is a data structure that contains often used supervisor privileged SPRs accessible via memory to the user to reduce the number of exits we have to take to read/write them. When we actually share this structure with the guest we have to maintain it in guest endianness, because some of the patch tricks only work with native endian load/store operations. Since we only share the structure with either host or guest in little endian on book3s_64 pr mode, we don't have to worry about booke or book3s hv. For booke, the shared struct stays big endian. For book3s_64 hv we maintain the struct in host native endian, since it never gets shared with the guest. For book3s_64 pr we introduce a variable that tells us which endianness the shared struct is in and route every access to it through helper inline functions that evaluate this variable. Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
parent
2743103f91
commit
5deb8e7ad8
|
@ -268,9 +268,10 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
|
||||||
return vcpu->arch.pc;
|
return vcpu->arch.pc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
|
||||||
static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
|
static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
|
return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
|
static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
|
||||||
|
|
|
@ -108,9 +108,4 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->arch.fault_dear;
|
return vcpu->arch.fault_dear;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
return vcpu->arch.shared->msr;
|
|
||||||
}
|
|
||||||
#endif /* __ASM_KVM_BOOKE_H__ */
|
#endif /* __ASM_KVM_BOOKE_H__ */
|
||||||
|
|
|
@ -623,6 +623,9 @@ struct kvm_vcpu_arch {
|
||||||
wait_queue_head_t cpu_run;
|
wait_queue_head_t cpu_run;
|
||||||
|
|
||||||
struct kvm_vcpu_arch_shared *shared;
|
struct kvm_vcpu_arch_shared *shared;
|
||||||
|
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
|
||||||
|
bool shared_big_endian;
|
||||||
|
#endif
|
||||||
unsigned long magic_page_pa; /* phys addr to map the magic page to */
|
unsigned long magic_page_pa; /* phys addr to map the magic page to */
|
||||||
unsigned long magic_page_ea; /* effect. addr to map the magic page to */
|
unsigned long magic_page_ea; /* effect. addr to map the magic page to */
|
||||||
|
|
||||||
|
|
|
@ -448,6 +448,84 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Shared struct helpers. The shared struct can be little or big endian,
|
||||||
|
* depending on the guest endianness. So expose helpers to all of them.
|
||||||
|
*/
|
||||||
|
static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
|
||||||
|
/* Only Book3S_64 PR supports bi-endian for now */
|
||||||
|
return vcpu->arch.shared_big_endian;
|
||||||
|
#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
|
||||||
|
/* Book3s_64 HV on little endian is always little endian */
|
||||||
|
return false;
|
||||||
|
#else
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
#define SHARED_WRAPPER_GET(reg, size) \
|
||||||
|
static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
|
||||||
|
{ \
|
||||||
|
if (kvmppc_shared_big_endian(vcpu)) \
|
||||||
|
return be##size##_to_cpu(vcpu->arch.shared->reg); \
|
||||||
|
else \
|
||||||
|
return le##size##_to_cpu(vcpu->arch.shared->reg); \
|
||||||
|
} \
|
||||||
|
|
||||||
|
#define SHARED_WRAPPER_SET(reg, size) \
|
||||||
|
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
|
||||||
|
{ \
|
||||||
|
if (kvmppc_shared_big_endian(vcpu)) \
|
||||||
|
vcpu->arch.shared->reg = cpu_to_be##size(val); \
|
||||||
|
else \
|
||||||
|
vcpu->arch.shared->reg = cpu_to_le##size(val); \
|
||||||
|
} \
|
||||||
|
|
||||||
|
#define SHARED_WRAPPER(reg, size) \
|
||||||
|
SHARED_WRAPPER_GET(reg, size) \
|
||||||
|
SHARED_WRAPPER_SET(reg, size) \
|
||||||
|
|
||||||
|
SHARED_WRAPPER(critical, 64)
|
||||||
|
SHARED_WRAPPER(sprg0, 64)
|
||||||
|
SHARED_WRAPPER(sprg1, 64)
|
||||||
|
SHARED_WRAPPER(sprg2, 64)
|
||||||
|
SHARED_WRAPPER(sprg3, 64)
|
||||||
|
SHARED_WRAPPER(srr0, 64)
|
||||||
|
SHARED_WRAPPER(srr1, 64)
|
||||||
|
SHARED_WRAPPER(dar, 64)
|
||||||
|
SHARED_WRAPPER_GET(msr, 64)
|
||||||
|
static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
|
||||||
|
{
|
||||||
|
if (kvmppc_shared_big_endian(vcpu))
|
||||||
|
vcpu->arch.shared->msr = cpu_to_be64(val);
|
||||||
|
else
|
||||||
|
vcpu->arch.shared->msr = cpu_to_le64(val);
|
||||||
|
}
|
||||||
|
SHARED_WRAPPER(dsisr, 32)
|
||||||
|
SHARED_WRAPPER(int_pending, 32)
|
||||||
|
SHARED_WRAPPER(sprg4, 64)
|
||||||
|
SHARED_WRAPPER(sprg5, 64)
|
||||||
|
SHARED_WRAPPER(sprg6, 64)
|
||||||
|
SHARED_WRAPPER(sprg7, 64)
|
||||||
|
|
||||||
|
static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
|
||||||
|
{
|
||||||
|
if (kvmppc_shared_big_endian(vcpu))
|
||||||
|
return be32_to_cpu(vcpu->arch.shared->sr[nr]);
|
||||||
|
else
|
||||||
|
return le32_to_cpu(vcpu->arch.shared->sr[nr]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
|
||||||
|
{
|
||||||
|
if (kvmppc_shared_big_endian(vcpu))
|
||||||
|
vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
|
||||||
|
else
|
||||||
|
vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Please call after prepare_to_enter. This function puts the lazy ee and irq
|
* Please call after prepare_to_enter. This function puts the lazy ee and irq
|
||||||
* disabled tracking state back to normal mode, without actually enabling
|
* disabled tracking state back to normal mode, without actually enabling
|
||||||
|
@ -485,7 +563,7 @@ static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
|
||||||
msr_64bit = MSR_SF;
|
msr_64bit = MSR_SF;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!(vcpu->arch.shared->msr & msr_64bit))
|
if (!(kvmppc_get_msr(vcpu) & msr_64bit))
|
||||||
ea = (uint32_t)ea;
|
ea = (uint32_t)ea;
|
||||||
|
|
||||||
return ea;
|
return ea;
|
||||||
|
|
|
@ -54,6 +54,7 @@
|
||||||
#endif
|
#endif
|
||||||
#if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S)
|
#if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S)
|
||||||
#include <asm/kvm_book3s.h>
|
#include <asm/kvm_book3s.h>
|
||||||
|
#include <asm/kvm_ppc.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_PPC32
|
#ifdef CONFIG_PPC32
|
||||||
|
@ -467,6 +468,9 @@ int main(void)
|
||||||
DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
|
DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
|
||||||
DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
|
DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
|
||||||
DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
|
DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
|
||||||
|
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
|
||||||
|
DEFINE(VCPU_SHAREDBE, offsetof(struct kvm_vcpu, arch.shared_big_endian));
|
||||||
|
#endif
|
||||||
|
|
||||||
DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0));
|
DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0));
|
||||||
DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1));
|
DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1));
|
||||||
|
|
|
@ -85,9 +85,9 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
|
||||||
if (is_kvmppc_hv_enabled(vcpu->kvm))
|
if (is_kvmppc_hv_enabled(vcpu->kvm))
|
||||||
return;
|
return;
|
||||||
if (pending_now)
|
if (pending_now)
|
||||||
vcpu->arch.shared->int_pending = 1;
|
kvmppc_set_int_pending(vcpu, 1);
|
||||||
else if (old_pending)
|
else if (old_pending)
|
||||||
vcpu->arch.shared->int_pending = 0;
|
kvmppc_set_int_pending(vcpu, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
|
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
|
||||||
|
@ -99,11 +99,11 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
|
||||||
if (is_kvmppc_hv_enabled(vcpu->kvm))
|
if (is_kvmppc_hv_enabled(vcpu->kvm))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
crit_raw = vcpu->arch.shared->critical;
|
crit_raw = kvmppc_get_critical(vcpu);
|
||||||
crit_r1 = kvmppc_get_gpr(vcpu, 1);
|
crit_r1 = kvmppc_get_gpr(vcpu, 1);
|
||||||
|
|
||||||
/* Truncate crit indicators in 32 bit mode */
|
/* Truncate crit indicators in 32 bit mode */
|
||||||
if (!(vcpu->arch.shared->msr & MSR_SF)) {
|
if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
|
||||||
crit_raw &= 0xffffffff;
|
crit_raw &= 0xffffffff;
|
||||||
crit_r1 &= 0xffffffff;
|
crit_r1 &= 0xffffffff;
|
||||||
}
|
}
|
||||||
|
@ -111,15 +111,15 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
|
||||||
/* Critical section when crit == r1 */
|
/* Critical section when crit == r1 */
|
||||||
crit = (crit_raw == crit_r1);
|
crit = (crit_raw == crit_r1);
|
||||||
/* ... and we're in supervisor mode */
|
/* ... and we're in supervisor mode */
|
||||||
crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
|
crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
|
||||||
|
|
||||||
return crit;
|
return crit;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
|
void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
|
||||||
{
|
{
|
||||||
vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
|
kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
|
||||||
vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags;
|
kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
|
||||||
kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
|
kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
|
||||||
vcpu->arch.mmu.reset_msr(vcpu);
|
vcpu->arch.mmu.reset_msr(vcpu);
|
||||||
}
|
}
|
||||||
|
@ -225,12 +225,12 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
|
||||||
|
|
||||||
switch (priority) {
|
switch (priority) {
|
||||||
case BOOK3S_IRQPRIO_DECREMENTER:
|
case BOOK3S_IRQPRIO_DECREMENTER:
|
||||||
deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
|
deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
|
||||||
vec = BOOK3S_INTERRUPT_DECREMENTER;
|
vec = BOOK3S_INTERRUPT_DECREMENTER;
|
||||||
break;
|
break;
|
||||||
case BOOK3S_IRQPRIO_EXTERNAL:
|
case BOOK3S_IRQPRIO_EXTERNAL:
|
||||||
case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
|
case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
|
||||||
deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
|
deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
|
||||||
vec = BOOK3S_INTERRUPT_EXTERNAL;
|
vec = BOOK3S_INTERRUPT_EXTERNAL;
|
||||||
break;
|
break;
|
||||||
case BOOK3S_IRQPRIO_SYSTEM_RESET:
|
case BOOK3S_IRQPRIO_SYSTEM_RESET:
|
||||||
|
@ -343,7 +343,7 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
|
||||||
{
|
{
|
||||||
ulong mp_pa = vcpu->arch.magic_page_pa;
|
ulong mp_pa = vcpu->arch.magic_page_pa;
|
||||||
|
|
||||||
if (!(vcpu->arch.shared->msr & MSR_SF))
|
if (!(kvmppc_get_msr(vcpu) & MSR_SF))
|
||||||
mp_pa = (uint32_t)mp_pa;
|
mp_pa = (uint32_t)mp_pa;
|
||||||
|
|
||||||
/* Magic page override */
|
/* Magic page override */
|
||||||
|
@ -367,7 +367,7 @@ EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn);
|
||||||
static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
|
static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
|
||||||
bool iswrite, struct kvmppc_pte *pte)
|
bool iswrite, struct kvmppc_pte *pte)
|
||||||
{
|
{
|
||||||
int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
|
int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (relocated) {
|
if (relocated) {
|
||||||
|
@ -498,18 +498,18 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||||
regs->ctr = kvmppc_get_ctr(vcpu);
|
regs->ctr = kvmppc_get_ctr(vcpu);
|
||||||
regs->lr = kvmppc_get_lr(vcpu);
|
regs->lr = kvmppc_get_lr(vcpu);
|
||||||
regs->xer = kvmppc_get_xer(vcpu);
|
regs->xer = kvmppc_get_xer(vcpu);
|
||||||
regs->msr = vcpu->arch.shared->msr;
|
regs->msr = kvmppc_get_msr(vcpu);
|
||||||
regs->srr0 = vcpu->arch.shared->srr0;
|
regs->srr0 = kvmppc_get_srr0(vcpu);
|
||||||
regs->srr1 = vcpu->arch.shared->srr1;
|
regs->srr1 = kvmppc_get_srr1(vcpu);
|
||||||
regs->pid = vcpu->arch.pid;
|
regs->pid = vcpu->arch.pid;
|
||||||
regs->sprg0 = vcpu->arch.shared->sprg0;
|
regs->sprg0 = kvmppc_get_sprg0(vcpu);
|
||||||
regs->sprg1 = vcpu->arch.shared->sprg1;
|
regs->sprg1 = kvmppc_get_sprg1(vcpu);
|
||||||
regs->sprg2 = vcpu->arch.shared->sprg2;
|
regs->sprg2 = kvmppc_get_sprg2(vcpu);
|
||||||
regs->sprg3 = vcpu->arch.shared->sprg3;
|
regs->sprg3 = kvmppc_get_sprg3(vcpu);
|
||||||
regs->sprg4 = vcpu->arch.shared->sprg4;
|
regs->sprg4 = kvmppc_get_sprg4(vcpu);
|
||||||
regs->sprg5 = vcpu->arch.shared->sprg5;
|
regs->sprg5 = kvmppc_get_sprg5(vcpu);
|
||||||
regs->sprg6 = vcpu->arch.shared->sprg6;
|
regs->sprg6 = kvmppc_get_sprg6(vcpu);
|
||||||
regs->sprg7 = vcpu->arch.shared->sprg7;
|
regs->sprg7 = kvmppc_get_sprg7(vcpu);
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
|
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
|
||||||
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
|
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
|
||||||
|
@ -527,16 +527,16 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||||
kvmppc_set_lr(vcpu, regs->lr);
|
kvmppc_set_lr(vcpu, regs->lr);
|
||||||
kvmppc_set_xer(vcpu, regs->xer);
|
kvmppc_set_xer(vcpu, regs->xer);
|
||||||
kvmppc_set_msr(vcpu, regs->msr);
|
kvmppc_set_msr(vcpu, regs->msr);
|
||||||
vcpu->arch.shared->srr0 = regs->srr0;
|
kvmppc_set_srr0(vcpu, regs->srr0);
|
||||||
vcpu->arch.shared->srr1 = regs->srr1;
|
kvmppc_set_srr1(vcpu, regs->srr1);
|
||||||
vcpu->arch.shared->sprg0 = regs->sprg0;
|
kvmppc_set_sprg0(vcpu, regs->sprg0);
|
||||||
vcpu->arch.shared->sprg1 = regs->sprg1;
|
kvmppc_set_sprg1(vcpu, regs->sprg1);
|
||||||
vcpu->arch.shared->sprg2 = regs->sprg2;
|
kvmppc_set_sprg2(vcpu, regs->sprg2);
|
||||||
vcpu->arch.shared->sprg3 = regs->sprg3;
|
kvmppc_set_sprg3(vcpu, regs->sprg3);
|
||||||
vcpu->arch.shared->sprg4 = regs->sprg4;
|
kvmppc_set_sprg4(vcpu, regs->sprg4);
|
||||||
vcpu->arch.shared->sprg5 = regs->sprg5;
|
kvmppc_set_sprg5(vcpu, regs->sprg5);
|
||||||
vcpu->arch.shared->sprg6 = regs->sprg6;
|
kvmppc_set_sprg6(vcpu, regs->sprg6);
|
||||||
vcpu->arch.shared->sprg7 = regs->sprg7;
|
kvmppc_set_sprg7(vcpu, regs->sprg7);
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
|
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
|
||||||
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
|
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
|
||||||
|
@ -570,10 +570,10 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||||
r = 0;
|
r = 0;
|
||||||
switch (reg->id) {
|
switch (reg->id) {
|
||||||
case KVM_REG_PPC_DAR:
|
case KVM_REG_PPC_DAR:
|
||||||
val = get_reg_val(reg->id, vcpu->arch.shared->dar);
|
val = get_reg_val(reg->id, kvmppc_get_dar(vcpu));
|
||||||
break;
|
break;
|
||||||
case KVM_REG_PPC_DSISR:
|
case KVM_REG_PPC_DSISR:
|
||||||
val = get_reg_val(reg->id, vcpu->arch.shared->dsisr);
|
val = get_reg_val(reg->id, kvmppc_get_dsisr(vcpu));
|
||||||
break;
|
break;
|
||||||
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
|
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
|
||||||
i = reg->id - KVM_REG_PPC_FPR0;
|
i = reg->id - KVM_REG_PPC_FPR0;
|
||||||
|
@ -660,10 +660,10 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||||
r = 0;
|
r = 0;
|
||||||
switch (reg->id) {
|
switch (reg->id) {
|
||||||
case KVM_REG_PPC_DAR:
|
case KVM_REG_PPC_DAR:
|
||||||
vcpu->arch.shared->dar = set_reg_val(reg->id, val);
|
kvmppc_set_dar(vcpu, set_reg_val(reg->id, val));
|
||||||
break;
|
break;
|
||||||
case KVM_REG_PPC_DSISR:
|
case KVM_REG_PPC_DSISR:
|
||||||
vcpu->arch.shared->dsisr = set_reg_val(reg->id, val);
|
kvmppc_set_dsisr(vcpu, set_reg_val(reg->id, val));
|
||||||
break;
|
break;
|
||||||
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
|
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
|
||||||
i = reg->id - KVM_REG_PPC_FPR0;
|
i = reg->id - KVM_REG_PPC_FPR0;
|
||||||
|
|
|
@ -91,7 +91,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
|
||||||
|
|
||||||
static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr)
|
static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr)
|
||||||
{
|
{
|
||||||
return vcpu->arch.shared->sr[(eaddr >> 28) & 0xf];
|
return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
|
static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||||
|
@ -160,7 +160,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||||
else
|
else
|
||||||
bat = &vcpu_book3s->ibat[i];
|
bat = &vcpu_book3s->ibat[i];
|
||||||
|
|
||||||
if (vcpu->arch.shared->msr & MSR_PR) {
|
if (kvmppc_get_msr(vcpu) & MSR_PR) {
|
||||||
if (!bat->vp)
|
if (!bat->vp)
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
|
@ -242,8 +242,8 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||||
pte->raddr = (pte1 & ~(0xFFFULL)) | (eaddr & 0xFFF);
|
pte->raddr = (pte1 & ~(0xFFFULL)) | (eaddr & 0xFFF);
|
||||||
pp = pte1 & 3;
|
pp = pte1 & 3;
|
||||||
|
|
||||||
if ((sr_kp(sre) && (vcpu->arch.shared->msr & MSR_PR)) ||
|
if ((sr_kp(sre) && (kvmppc_get_msr(vcpu) & MSR_PR)) ||
|
||||||
(sr_ks(sre) && !(vcpu->arch.shared->msr & MSR_PR)))
|
(sr_ks(sre) && !(kvmppc_get_msr(vcpu) & MSR_PR)))
|
||||||
pp |= 4;
|
pp |= 4;
|
||||||
|
|
||||||
pte->may_write = false;
|
pte->may_write = false;
|
||||||
|
@ -320,7 +320,7 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||||
/* Magic page override */
|
/* Magic page override */
|
||||||
if (unlikely(mp_ea) &&
|
if (unlikely(mp_ea) &&
|
||||||
unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
|
unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
|
||||||
!(vcpu->arch.shared->msr & MSR_PR)) {
|
!(kvmppc_get_msr(vcpu) & MSR_PR)) {
|
||||||
pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
|
pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
|
||||||
pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff);
|
pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff);
|
||||||
pte->raddr &= KVM_PAM;
|
pte->raddr &= KVM_PAM;
|
||||||
|
@ -345,13 +345,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||||
|
|
||||||
static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum)
|
static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum)
|
||||||
{
|
{
|
||||||
return vcpu->arch.shared->sr[srnum];
|
return kvmppc_get_sr(vcpu, srnum);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
|
static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
|
||||||
ulong value)
|
ulong value)
|
||||||
{
|
{
|
||||||
vcpu->arch.shared->sr[srnum] = value;
|
kvmppc_set_sr(vcpu, srnum, value);
|
||||||
kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT);
|
kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -371,8 +371,9 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
|
||||||
ulong ea = esid << SID_SHIFT;
|
ulong ea = esid << SID_SHIFT;
|
||||||
u32 sr;
|
u32 sr;
|
||||||
u64 gvsid = esid;
|
u64 gvsid = esid;
|
||||||
|
u64 msr = kvmppc_get_msr(vcpu);
|
||||||
|
|
||||||
if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
|
if (msr & (MSR_DR|MSR_IR)) {
|
||||||
sr = find_sr(vcpu, ea);
|
sr = find_sr(vcpu, ea);
|
||||||
if (sr_valid(sr))
|
if (sr_valid(sr))
|
||||||
gvsid = sr_vsid(sr);
|
gvsid = sr_vsid(sr);
|
||||||
|
@ -381,7 +382,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
|
||||||
/* In case we only have one of MSR_IR or MSR_DR set, let's put
|
/* In case we only have one of MSR_IR or MSR_DR set, let's put
|
||||||
that in the real-mode context (and hope RM doesn't access
|
that in the real-mode context (and hope RM doesn't access
|
||||||
high memory) */
|
high memory) */
|
||||||
switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
|
switch (msr & (MSR_DR|MSR_IR)) {
|
||||||
case 0:
|
case 0:
|
||||||
*vsid = VSID_REAL | esid;
|
*vsid = VSID_REAL | esid;
|
||||||
break;
|
break;
|
||||||
|
@ -401,7 +402,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vcpu->arch.shared->msr & MSR_PR)
|
if (msr & MSR_PR)
|
||||||
*vsid |= VSID_PR;
|
*vsid |= VSID_PR;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -92,7 +92,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
|
||||||
struct kvmppc_sid_map *map;
|
struct kvmppc_sid_map *map;
|
||||||
u16 sid_map_mask;
|
u16 sid_map_mask;
|
||||||
|
|
||||||
if (vcpu->arch.shared->msr & MSR_PR)
|
if (kvmppc_get_msr(vcpu) & MSR_PR)
|
||||||
gvsid |= VSID_PR;
|
gvsid |= VSID_PR;
|
||||||
|
|
||||||
sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
|
sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
|
||||||
|
@ -279,7 +279,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
|
||||||
u16 sid_map_mask;
|
u16 sid_map_mask;
|
||||||
static int backwards_map = 0;
|
static int backwards_map = 0;
|
||||||
|
|
||||||
if (vcpu->arch.shared->msr & MSR_PR)
|
if (kvmppc_get_msr(vcpu) & MSR_PR)
|
||||||
gvsid |= VSID_PR;
|
gvsid |= VSID_PR;
|
||||||
|
|
||||||
/* We might get collisions that trap in preceding order, so let's
|
/* We might get collisions that trap in preceding order, so let's
|
||||||
|
|
|
@ -226,7 +226,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||||
/* Magic page override */
|
/* Magic page override */
|
||||||
if (unlikely(mp_ea) &&
|
if (unlikely(mp_ea) &&
|
||||||
unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
|
unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
|
||||||
!(vcpu->arch.shared->msr & MSR_PR)) {
|
!(kvmppc_get_msr(vcpu) & MSR_PR)) {
|
||||||
gpte->eaddr = eaddr;
|
gpte->eaddr = eaddr;
|
||||||
gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
|
gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
|
||||||
gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
|
gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
|
||||||
|
@ -269,9 +269,9 @@ do_second:
|
||||||
goto no_page_found;
|
goto no_page_found;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp)
|
if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp)
|
||||||
key = 4;
|
key = 4;
|
||||||
else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks)
|
else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks)
|
||||||
key = 4;
|
key = 4;
|
||||||
|
|
||||||
for (i=0; i<16; i+=2) {
|
for (i=0; i<16; i+=2) {
|
||||||
|
@ -482,7 +482,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
|
||||||
vcpu->arch.slb[i].origv = 0;
|
vcpu->arch.slb[i].origv = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vcpu->arch.shared->msr & MSR_IR) {
|
if (kvmppc_get_msr(vcpu) & MSR_IR) {
|
||||||
kvmppc_mmu_flush_segments(vcpu);
|
kvmppc_mmu_flush_segments(vcpu);
|
||||||
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
|
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
|
||||||
}
|
}
|
||||||
|
@ -566,7 +566,7 @@ static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
|
||||||
{
|
{
|
||||||
ulong mp_ea = vcpu->arch.magic_page_ea;
|
ulong mp_ea = vcpu->arch.magic_page_ea;
|
||||||
|
|
||||||
return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) &&
|
return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) &&
|
||||||
(mp_ea >> SID_SHIFT) == esid;
|
(mp_ea >> SID_SHIFT) == esid;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -579,8 +579,9 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
|
||||||
u64 gvsid = esid;
|
u64 gvsid = esid;
|
||||||
ulong mp_ea = vcpu->arch.magic_page_ea;
|
ulong mp_ea = vcpu->arch.magic_page_ea;
|
||||||
int pagesize = MMU_PAGE_64K;
|
int pagesize = MMU_PAGE_64K;
|
||||||
|
u64 msr = kvmppc_get_msr(vcpu);
|
||||||
|
|
||||||
if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
|
if (msr & (MSR_DR|MSR_IR)) {
|
||||||
slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
|
slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
|
||||||
if (slb) {
|
if (slb) {
|
||||||
gvsid = slb->vsid;
|
gvsid = slb->vsid;
|
||||||
|
@ -593,7 +594,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
|
switch (msr & (MSR_DR|MSR_IR)) {
|
||||||
case 0:
|
case 0:
|
||||||
gvsid = VSID_REAL | esid;
|
gvsid = VSID_REAL | esid;
|
||||||
break;
|
break;
|
||||||
|
@ -626,7 +627,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
|
||||||
gvsid |= VSID_64K;
|
gvsid |= VSID_64K;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (vcpu->arch.shared->msr & MSR_PR)
|
if (kvmppc_get_msr(vcpu) & MSR_PR)
|
||||||
gvsid |= VSID_PR;
|
gvsid |= VSID_PR;
|
||||||
|
|
||||||
*vsid = gvsid;
|
*vsid = gvsid;
|
||||||
|
@ -636,7 +637,7 @@ no_slb:
|
||||||
/* Catch magic page case */
|
/* Catch magic page case */
|
||||||
if (unlikely(mp_ea) &&
|
if (unlikely(mp_ea) &&
|
||||||
unlikely(esid == (mp_ea >> SID_SHIFT)) &&
|
unlikely(esid == (mp_ea >> SID_SHIFT)) &&
|
||||||
!(vcpu->arch.shared->msr & MSR_PR)) {
|
!(kvmppc_get_msr(vcpu) & MSR_PR)) {
|
||||||
*vsid = VSID_REAL | esid;
|
*vsid = VSID_REAL | esid;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,7 +58,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
|
||||||
struct kvmppc_sid_map *map;
|
struct kvmppc_sid_map *map;
|
||||||
u16 sid_map_mask;
|
u16 sid_map_mask;
|
||||||
|
|
||||||
if (vcpu->arch.shared->msr & MSR_PR)
|
if (kvmppc_get_msr(vcpu) & MSR_PR)
|
||||||
gvsid |= VSID_PR;
|
gvsid |= VSID_PR;
|
||||||
|
|
||||||
sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
|
sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
|
||||||
|
@ -230,7 +230,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
|
||||||
u16 sid_map_mask;
|
u16 sid_map_mask;
|
||||||
static int backwards_map = 0;
|
static int backwards_map = 0;
|
||||||
|
|
||||||
if (vcpu->arch.shared->msr & MSR_PR)
|
if (kvmppc_get_msr(vcpu) & MSR_PR)
|
||||||
gvsid |= VSID_PR;
|
gvsid |= VSID_PR;
|
||||||
|
|
||||||
/* We might get collisions that trap in preceding order, so let's
|
/* We might get collisions that trap in preceding order, so let's
|
||||||
|
|
|
@ -80,7 +80,7 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* Limit user space to its own small SPR set */
|
/* Limit user space to its own small SPR set */
|
||||||
if ((vcpu->arch.shared->msr & MSR_PR) && level > PRIV_PROBLEM)
|
if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -100,8 +100,8 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
switch (get_xop(inst)) {
|
switch (get_xop(inst)) {
|
||||||
case OP_19_XOP_RFID:
|
case OP_19_XOP_RFID:
|
||||||
case OP_19_XOP_RFI:
|
case OP_19_XOP_RFI:
|
||||||
kvmppc_set_pc(vcpu, vcpu->arch.shared->srr0);
|
kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
|
||||||
kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
|
kvmppc_set_msr(vcpu, kvmppc_get_srr1(vcpu));
|
||||||
*advance = 0;
|
*advance = 0;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -113,16 +113,16 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
case 31:
|
case 31:
|
||||||
switch (get_xop(inst)) {
|
switch (get_xop(inst)) {
|
||||||
case OP_31_XOP_MFMSR:
|
case OP_31_XOP_MFMSR:
|
||||||
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
|
kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
|
||||||
break;
|
break;
|
||||||
case OP_31_XOP_MTMSRD:
|
case OP_31_XOP_MTMSRD:
|
||||||
{
|
{
|
||||||
ulong rs_val = kvmppc_get_gpr(vcpu, rs);
|
ulong rs_val = kvmppc_get_gpr(vcpu, rs);
|
||||||
if (inst & 0x10000) {
|
if (inst & 0x10000) {
|
||||||
ulong new_msr = vcpu->arch.shared->msr;
|
ulong new_msr = kvmppc_get_msr(vcpu);
|
||||||
new_msr &= ~(MSR_RI | MSR_EE);
|
new_msr &= ~(MSR_RI | MSR_EE);
|
||||||
new_msr |= rs_val & (MSR_RI | MSR_EE);
|
new_msr |= rs_val & (MSR_RI | MSR_EE);
|
||||||
vcpu->arch.shared->msr = new_msr;
|
kvmppc_set_msr_fast(vcpu, new_msr);
|
||||||
} else
|
} else
|
||||||
kvmppc_set_msr(vcpu, rs_val);
|
kvmppc_set_msr(vcpu, rs_val);
|
||||||
break;
|
break;
|
||||||
|
@ -179,7 +179,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
ulong cmd = kvmppc_get_gpr(vcpu, 3);
|
ulong cmd = kvmppc_get_gpr(vcpu, 3);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if ((vcpu->arch.shared->msr & MSR_PR) ||
|
if ((kvmppc_get_msr(vcpu) & MSR_PR) ||
|
||||||
!vcpu->arch.papr_enabled) {
|
!vcpu->arch.papr_enabled) {
|
||||||
emulated = EMULATE_FAIL;
|
emulated = EMULATE_FAIL;
|
||||||
break;
|
break;
|
||||||
|
@ -261,14 +261,14 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
ra_val = kvmppc_get_gpr(vcpu, ra);
|
ra_val = kvmppc_get_gpr(vcpu, ra);
|
||||||
|
|
||||||
addr = (ra_val + rb_val) & ~31ULL;
|
addr = (ra_val + rb_val) & ~31ULL;
|
||||||
if (!(vcpu->arch.shared->msr & MSR_SF))
|
if (!(kvmppc_get_msr(vcpu) & MSR_SF))
|
||||||
addr &= 0xffffffff;
|
addr &= 0xffffffff;
|
||||||
vaddr = addr;
|
vaddr = addr;
|
||||||
|
|
||||||
r = kvmppc_st(vcpu, &addr, 32, zeros, true);
|
r = kvmppc_st(vcpu, &addr, 32, zeros, true);
|
||||||
if ((r == -ENOENT) || (r == -EPERM)) {
|
if ((r == -ENOENT) || (r == -EPERM)) {
|
||||||
*advance = 0;
|
*advance = 0;
|
||||||
vcpu->arch.shared->dar = vaddr;
|
kvmppc_set_dar(vcpu, vaddr);
|
||||||
vcpu->arch.fault_dar = vaddr;
|
vcpu->arch.fault_dar = vaddr;
|
||||||
|
|
||||||
dsisr = DSISR_ISSTORE;
|
dsisr = DSISR_ISSTORE;
|
||||||
|
@ -277,7 +277,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
else if (r == -EPERM)
|
else if (r == -EPERM)
|
||||||
dsisr |= DSISR_PROTFAULT;
|
dsisr |= DSISR_PROTFAULT;
|
||||||
|
|
||||||
vcpu->arch.shared->dsisr = dsisr;
|
kvmppc_set_dsisr(vcpu, dsisr);
|
||||||
vcpu->arch.fault_dsisr = dsisr;
|
vcpu->arch.fault_dsisr = dsisr;
|
||||||
|
|
||||||
kvmppc_book3s_queue_irqprio(vcpu,
|
kvmppc_book3s_queue_irqprio(vcpu,
|
||||||
|
@ -356,10 +356,10 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
||||||
to_book3s(vcpu)->sdr1 = spr_val;
|
to_book3s(vcpu)->sdr1 = spr_val;
|
||||||
break;
|
break;
|
||||||
case SPRN_DSISR:
|
case SPRN_DSISR:
|
||||||
vcpu->arch.shared->dsisr = spr_val;
|
kvmppc_set_dsisr(vcpu, spr_val);
|
||||||
break;
|
break;
|
||||||
case SPRN_DAR:
|
case SPRN_DAR:
|
||||||
vcpu->arch.shared->dar = spr_val;
|
kvmppc_set_dar(vcpu, spr_val);
|
||||||
break;
|
break;
|
||||||
case SPRN_HIOR:
|
case SPRN_HIOR:
|
||||||
to_book3s(vcpu)->hior = spr_val;
|
to_book3s(vcpu)->hior = spr_val;
|
||||||
|
@ -493,10 +493,10 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
|
||||||
*spr_val = to_book3s(vcpu)->sdr1;
|
*spr_val = to_book3s(vcpu)->sdr1;
|
||||||
break;
|
break;
|
||||||
case SPRN_DSISR:
|
case SPRN_DSISR:
|
||||||
*spr_val = vcpu->arch.shared->dsisr;
|
*spr_val = kvmppc_get_dsisr(vcpu);
|
||||||
break;
|
break;
|
||||||
case SPRN_DAR:
|
case SPRN_DAR:
|
||||||
*spr_val = vcpu->arch.shared->dar;
|
*spr_val = kvmppc_get_dar(vcpu);
|
||||||
break;
|
break;
|
||||||
case SPRN_HIOR:
|
case SPRN_HIOR:
|
||||||
*spr_val = to_book3s(vcpu)->hior;
|
*spr_val = to_book3s(vcpu)->hior;
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
|
#include <asm/kvm_ppc.h>
|
||||||
#include <asm/kvm_book3s.h>
|
#include <asm/kvm_book3s.h>
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||||
|
|
|
@ -1280,6 +1280,17 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
|
||||||
goto free_vcpu;
|
goto free_vcpu;
|
||||||
|
|
||||||
vcpu->arch.shared = &vcpu->arch.shregs;
|
vcpu->arch.shared = &vcpu->arch.shregs;
|
||||||
|
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||||
|
/*
|
||||||
|
* The shared struct is never shared on HV,
|
||||||
|
* so we can always use host endianness
|
||||||
|
*/
|
||||||
|
#ifdef __BIG_ENDIAN__
|
||||||
|
vcpu->arch.shared_big_endian = true;
|
||||||
|
#else
|
||||||
|
vcpu->arch.shared_big_endian = false;
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
vcpu->arch.mmcr[0] = MMCR0_FC;
|
vcpu->arch.mmcr[0] = MMCR0_FC;
|
||||||
vcpu->arch.ctrl = CTRL_RUNLATCH;
|
vcpu->arch.ctrl = CTRL_RUNLATCH;
|
||||||
/* default to host PVR, since we can't spoof it */
|
/* default to host PVR, since we can't spoof it */
|
||||||
|
|
|
@ -104,8 +104,27 @@ kvm_start_lightweight:
|
||||||
stb r3, HSTATE_RESTORE_HID5(r13)
|
stb r3, HSTATE_RESTORE_HID5(r13)
|
||||||
|
|
||||||
/* Load up guest SPRG3 value, since it's user readable */
|
/* Load up guest SPRG3 value, since it's user readable */
|
||||||
ld r3, VCPU_SHARED(r4)
|
lwz r3, VCPU_SHAREDBE(r4)
|
||||||
ld r3, VCPU_SHARED_SPRG3(r3)
|
cmpwi r3, 0
|
||||||
|
ld r5, VCPU_SHARED(r4)
|
||||||
|
beq sprg3_little_endian
|
||||||
|
sprg3_big_endian:
|
||||||
|
#ifdef __BIG_ENDIAN__
|
||||||
|
ld r3, VCPU_SHARED_SPRG3(r5)
|
||||||
|
#else
|
||||||
|
addi r5, r5, VCPU_SHARED_SPRG3
|
||||||
|
ldbrx r3, 0, r5
|
||||||
|
#endif
|
||||||
|
b after_sprg3_load
|
||||||
|
sprg3_little_endian:
|
||||||
|
#ifdef __LITTLE_ENDIAN__
|
||||||
|
ld r3, VCPU_SHARED_SPRG3(r5)
|
||||||
|
#else
|
||||||
|
addi r5, r5, VCPU_SHARED_SPRG3
|
||||||
|
ldbrx r3, 0, r5
|
||||||
|
#endif
|
||||||
|
|
||||||
|
after_sprg3_load:
|
||||||
mtspr SPRN_SPRG3, r3
|
mtspr SPRN_SPRG3, r3
|
||||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||||
|
|
||||||
|
|
|
@ -165,16 +165,18 @@ static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
|
||||||
|
|
||||||
static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
|
static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
|
||||||
{
|
{
|
||||||
u64 dsisr;
|
u32 dsisr;
|
||||||
struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
|
u64 msr = kvmppc_get_msr(vcpu);
|
||||||
|
|
||||||
shared->msr = kvmppc_set_field(shared->msr, 33, 36, 0);
|
msr = kvmppc_set_field(msr, 33, 36, 0);
|
||||||
shared->msr = kvmppc_set_field(shared->msr, 42, 47, 0);
|
msr = kvmppc_set_field(msr, 42, 47, 0);
|
||||||
shared->dar = eaddr;
|
kvmppc_set_msr(vcpu, msr);
|
||||||
|
kvmppc_set_dar(vcpu, eaddr);
|
||||||
/* Page Fault */
|
/* Page Fault */
|
||||||
dsisr = kvmppc_set_field(0, 33, 33, 1);
|
dsisr = kvmppc_set_field(0, 33, 33, 1);
|
||||||
if (is_store)
|
if (is_store)
|
||||||
shared->dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
|
dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
|
||||||
|
kvmppc_set_dsisr(vcpu, dsisr);
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
|
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -660,7 +662,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||||
if (!kvmppc_inst_is_paired_single(vcpu, inst))
|
if (!kvmppc_inst_is_paired_single(vcpu, inst))
|
||||||
return EMULATE_FAIL;
|
return EMULATE_FAIL;
|
||||||
|
|
||||||
if (!(vcpu->arch.shared->msr & MSR_FP)) {
|
if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
|
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
|
||||||
return EMULATE_AGAIN;
|
return EMULATE_AGAIN;
|
||||||
}
|
}
|
||||||
|
|
|
@ -246,14 +246,15 @@ static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||||
|
|
||||||
static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
|
static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
ulong smsr = vcpu->arch.shared->msr;
|
ulong guest_msr = kvmppc_get_msr(vcpu);
|
||||||
|
ulong smsr = guest_msr;
|
||||||
|
|
||||||
/* Guest MSR values */
|
/* Guest MSR values */
|
||||||
smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
|
smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
|
||||||
/* Process MSR values */
|
/* Process MSR values */
|
||||||
smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
|
smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
|
||||||
/* External providers the guest reserved */
|
/* External providers the guest reserved */
|
||||||
smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
|
smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
|
||||||
/* 64-bit Process MSR values */
|
/* 64-bit Process MSR values */
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
smsr |= MSR_ISF | MSR_HV;
|
smsr |= MSR_ISF | MSR_HV;
|
||||||
|
@ -263,14 +264,14 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
|
static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
|
||||||
{
|
{
|
||||||
ulong old_msr = vcpu->arch.shared->msr;
|
ulong old_msr = kvmppc_get_msr(vcpu);
|
||||||
|
|
||||||
#ifdef EXIT_DEBUG
|
#ifdef EXIT_DEBUG
|
||||||
printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
|
printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
msr &= to_book3s(vcpu)->msr_mask;
|
msr &= to_book3s(vcpu)->msr_mask;
|
||||||
vcpu->arch.shared->msr = msr;
|
kvmppc_set_msr_fast(vcpu, msr);
|
||||||
kvmppc_recalc_shadow_msr(vcpu);
|
kvmppc_recalc_shadow_msr(vcpu);
|
||||||
|
|
||||||
if (msr & MSR_POW) {
|
if (msr & MSR_POW) {
|
||||||
|
@ -281,11 +282,11 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
|
||||||
|
|
||||||
/* Unset POW bit after we woke up */
|
/* Unset POW bit after we woke up */
|
||||||
msr &= ~MSR_POW;
|
msr &= ~MSR_POW;
|
||||||
vcpu->arch.shared->msr = msr;
|
kvmppc_set_msr_fast(vcpu, msr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
|
if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
|
||||||
(old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
|
(old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
|
||||||
kvmppc_mmu_flush_segments(vcpu);
|
kvmppc_mmu_flush_segments(vcpu);
|
||||||
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
|
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
|
||||||
|
@ -317,7 +318,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Preload FPU if it's enabled */
|
/* Preload FPU if it's enabled */
|
||||||
if (vcpu->arch.shared->msr & MSR_FP)
|
if (kvmppc_get_msr(vcpu) & MSR_FP)
|
||||||
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
|
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -438,7 +439,7 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||||
{
|
{
|
||||||
ulong mp_pa = vcpu->arch.magic_page_pa;
|
ulong mp_pa = vcpu->arch.magic_page_pa;
|
||||||
|
|
||||||
if (!(vcpu->arch.shared->msr & MSR_SF))
|
if (!(kvmppc_get_msr(vcpu) & MSR_SF))
|
||||||
mp_pa = (uint32_t)mp_pa;
|
mp_pa = (uint32_t)mp_pa;
|
||||||
|
|
||||||
if (unlikely(mp_pa) &&
|
if (unlikely(mp_pa) &&
|
||||||
|
@ -459,8 +460,8 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
int page_found = 0;
|
int page_found = 0;
|
||||||
struct kvmppc_pte pte;
|
struct kvmppc_pte pte;
|
||||||
bool is_mmio = false;
|
bool is_mmio = false;
|
||||||
bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
|
bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
|
||||||
bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
|
bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
|
||||||
u64 vsid;
|
u64 vsid;
|
||||||
|
|
||||||
relocated = data ? dr : ir;
|
relocated = data ? dr : ir;
|
||||||
|
@ -480,7 +481,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
pte.page_size = MMU_PAGE_64K;
|
pte.page_size = MMU_PAGE_64K;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
|
switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
|
||||||
case 0:
|
case 0:
|
||||||
pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
|
pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
|
||||||
break;
|
break;
|
||||||
|
@ -488,7 +489,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
case MSR_IR:
|
case MSR_IR:
|
||||||
vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
|
vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
|
||||||
|
|
||||||
if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
|
if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
|
||||||
pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
|
pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
|
||||||
else
|
else
|
||||||
pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
|
pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
|
||||||
|
@ -511,22 +512,25 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
if (page_found == -ENOENT) {
|
if (page_found == -ENOENT) {
|
||||||
/* Page not found in guest PTE entries */
|
/* Page not found in guest PTE entries */
|
||||||
vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
|
u64 ssrr1 = vcpu->arch.shadow_srr1;
|
||||||
vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
|
u64 msr = kvmppc_get_msr(vcpu);
|
||||||
vcpu->arch.shared->msr |=
|
kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
|
||||||
vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
|
kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
|
||||||
|
kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, vec);
|
kvmppc_book3s_queue_irqprio(vcpu, vec);
|
||||||
} else if (page_found == -EPERM) {
|
} else if (page_found == -EPERM) {
|
||||||
/* Storage protection */
|
/* Storage protection */
|
||||||
vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
|
u32 dsisr = vcpu->arch.fault_dsisr;
|
||||||
vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
|
u64 ssrr1 = vcpu->arch.shadow_srr1;
|
||||||
vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
|
u64 msr = kvmppc_get_msr(vcpu);
|
||||||
vcpu->arch.shared->msr |=
|
kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
|
||||||
vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
|
dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
|
||||||
|
kvmppc_set_dsisr(vcpu, dsisr);
|
||||||
|
kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, vec);
|
kvmppc_book3s_queue_irqprio(vcpu, vec);
|
||||||
} else if (page_found == -EINVAL) {
|
} else if (page_found == -EINVAL) {
|
||||||
/* Page not found in guest SLB */
|
/* Page not found in guest SLB */
|
||||||
vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
|
kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
|
kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
|
||||||
} else if (!is_mmio &&
|
} else if (!is_mmio &&
|
||||||
kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
|
kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
|
||||||
|
@ -614,11 +618,12 @@ static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
|
ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
|
||||||
if (ret == -ENOENT) {
|
if (ret == -ENOENT) {
|
||||||
ulong msr = vcpu->arch.shared->msr;
|
ulong msr = kvmppc_get_msr(vcpu);
|
||||||
|
|
||||||
msr = kvmppc_set_field(msr, 33, 33, 1);
|
msr = kvmppc_set_field(msr, 33, 33, 1);
|
||||||
msr = kvmppc_set_field(msr, 34, 36, 0);
|
msr = kvmppc_set_field(msr, 34, 36, 0);
|
||||||
vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
|
msr = kvmppc_set_field(msr, 42, 47, 0);
|
||||||
|
kvmppc_set_msr_fast(vcpu, msr);
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
|
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
|
||||||
return EMULATE_AGAIN;
|
return EMULATE_AGAIN;
|
||||||
}
|
}
|
||||||
|
@ -651,7 +656,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
|
||||||
if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
|
if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
|
||||||
return RESUME_GUEST;
|
return RESUME_GUEST;
|
||||||
|
|
||||||
if (!(vcpu->arch.shared->msr & msr)) {
|
if (!(kvmppc_get_msr(vcpu) & msr)) {
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
||||||
return RESUME_GUEST;
|
return RESUME_GUEST;
|
||||||
}
|
}
|
||||||
|
@ -792,7 +797,9 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
|
kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
|
||||||
r = RESUME_GUEST;
|
r = RESUME_GUEST;
|
||||||
} else {
|
} else {
|
||||||
vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
|
u64 msr = kvmppc_get_msr(vcpu);
|
||||||
|
msr |= shadow_srr1 & 0x58000000;
|
||||||
|
kvmppc_set_msr_fast(vcpu, msr);
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
||||||
r = RESUME_GUEST;
|
r = RESUME_GUEST;
|
||||||
}
|
}
|
||||||
|
@ -832,8 +839,8 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
|
r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
|
||||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||||
} else {
|
} else {
|
||||||
vcpu->arch.shared->dar = dar;
|
kvmppc_set_dar(vcpu, dar);
|
||||||
vcpu->arch.shared->dsisr = fault_dsisr;
|
kvmppc_set_dsisr(vcpu, fault_dsisr);
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
||||||
r = RESUME_GUEST;
|
r = RESUME_GUEST;
|
||||||
}
|
}
|
||||||
|
@ -841,7 +848,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
}
|
}
|
||||||
case BOOK3S_INTERRUPT_DATA_SEGMENT:
|
case BOOK3S_INTERRUPT_DATA_SEGMENT:
|
||||||
if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
|
if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
|
||||||
vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
|
kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
|
||||||
kvmppc_book3s_queue_irqprio(vcpu,
|
kvmppc_book3s_queue_irqprio(vcpu,
|
||||||
BOOK3S_INTERRUPT_DATA_SEGMENT);
|
BOOK3S_INTERRUPT_DATA_SEGMENT);
|
||||||
}
|
}
|
||||||
|
@ -879,7 +886,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
program_interrupt:
|
program_interrupt:
|
||||||
flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
|
flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
|
||||||
|
|
||||||
if (vcpu->arch.shared->msr & MSR_PR) {
|
if (kvmppc_get_msr(vcpu) & MSR_PR) {
|
||||||
#ifdef EXIT_DEBUG
|
#ifdef EXIT_DEBUG
|
||||||
printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
|
printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
|
||||||
#endif
|
#endif
|
||||||
|
@ -921,7 +928,7 @@ program_interrupt:
|
||||||
case BOOK3S_INTERRUPT_SYSCALL:
|
case BOOK3S_INTERRUPT_SYSCALL:
|
||||||
if (vcpu->arch.papr_enabled &&
|
if (vcpu->arch.papr_enabled &&
|
||||||
(kvmppc_get_last_sc(vcpu) == 0x44000022) &&
|
(kvmppc_get_last_sc(vcpu) == 0x44000022) &&
|
||||||
!(vcpu->arch.shared->msr & MSR_PR)) {
|
!(kvmppc_get_msr(vcpu) & MSR_PR)) {
|
||||||
/* SC 1 papr hypercalls */
|
/* SC 1 papr hypercalls */
|
||||||
ulong cmd = kvmppc_get_gpr(vcpu, 3);
|
ulong cmd = kvmppc_get_gpr(vcpu, 3);
|
||||||
int i;
|
int i;
|
||||||
|
@ -953,7 +960,7 @@ program_interrupt:
|
||||||
gprs[i] = kvmppc_get_gpr(vcpu, i);
|
gprs[i] = kvmppc_get_gpr(vcpu, i);
|
||||||
vcpu->arch.osi_needed = 1;
|
vcpu->arch.osi_needed = 1;
|
||||||
r = RESUME_HOST_NV;
|
r = RESUME_HOST_NV;
|
||||||
} else if (!(vcpu->arch.shared->msr & MSR_PR) &&
|
} else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
|
||||||
(((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
|
(((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
|
||||||
/* KVM PV hypercalls */
|
/* KVM PV hypercalls */
|
||||||
kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
|
kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
|
||||||
|
@ -994,10 +1001,16 @@ program_interrupt:
|
||||||
}
|
}
|
||||||
case BOOK3S_INTERRUPT_ALIGNMENT:
|
case BOOK3S_INTERRUPT_ALIGNMENT:
|
||||||
if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
|
if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
|
||||||
vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
|
u32 last_inst = kvmppc_get_last_inst(vcpu);
|
||||||
kvmppc_get_last_inst(vcpu));
|
u32 dsisr;
|
||||||
vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
|
u64 dar;
|
||||||
kvmppc_get_last_inst(vcpu));
|
|
||||||
|
dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
|
||||||
|
dar = kvmppc_alignment_dar(vcpu, last_inst);
|
||||||
|
|
||||||
|
kvmppc_set_dsisr(vcpu, dsisr);
|
||||||
|
kvmppc_set_dar(vcpu, dar);
|
||||||
|
|
||||||
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
||||||
}
|
}
|
||||||
r = RESUME_GUEST;
|
r = RESUME_GUEST;
|
||||||
|
@ -1062,7 +1075,7 @@ static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (i = 0; i < 16; i++)
|
for (i = 0; i < 16; i++)
|
||||||
sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
|
sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
|
||||||
|
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < 8; i++) {
|
||||||
sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
|
sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
|
||||||
|
@ -1198,8 +1211,14 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
|
||||||
goto uninit_vcpu;
|
goto uninit_vcpu;
|
||||||
/* the real shared page fills the last 4k of our page */
|
/* the real shared page fills the last 4k of our page */
|
||||||
vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
|
vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
|
/* Always start the shared struct in native endian mode */
|
||||||
|
#ifdef __BIG_ENDIAN__
|
||||||
|
vcpu->arch.shared_big_endian = true;
|
||||||
|
#else
|
||||||
|
vcpu->arch.shared_big_endian = false;
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Default to the same as the host if we're on sufficiently
|
* Default to the same as the host if we're on sufficiently
|
||||||
* recent machine that we have 1TB segments;
|
* recent machine that we have 1TB segments;
|
||||||
|
@ -1293,7 +1312,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Preload FPU if it's enabled */
|
/* Preload FPU if it's enabled */
|
||||||
if (vcpu->arch.shared->msr & MSR_FP)
|
if (kvmppc_get_msr(vcpu) & MSR_FP)
|
||||||
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
|
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
|
||||||
|
|
||||||
kvmppc_fix_ee_before_entry();
|
kvmppc_fix_ee_before_entry();
|
||||||
|
|
|
@ -278,7 +278,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
|
||||||
case H_PUT_TCE:
|
case H_PUT_TCE:
|
||||||
return kvmppc_h_pr_put_tce(vcpu);
|
return kvmppc_h_pr_put_tce(vcpu);
|
||||||
case H_CEDE:
|
case H_CEDE:
|
||||||
vcpu->arch.shared->msr |= MSR_EE;
|
kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
|
||||||
kvm_vcpu_block(vcpu);
|
kvm_vcpu_block(vcpu);
|
||||||
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
|
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
|
||||||
vcpu->stat.halt_wakeup++;
|
vcpu->stat.halt_wakeup++;
|
||||||
|
|
|
@ -97,10 +97,10 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
|
||||||
|
|
||||||
switch (sprn) {
|
switch (sprn) {
|
||||||
case SPRN_SRR0:
|
case SPRN_SRR0:
|
||||||
vcpu->arch.shared->srr0 = spr_val;
|
kvmppc_set_srr0(vcpu, spr_val);
|
||||||
break;
|
break;
|
||||||
case SPRN_SRR1:
|
case SPRN_SRR1:
|
||||||
vcpu->arch.shared->srr1 = spr_val;
|
kvmppc_set_srr1(vcpu, spr_val);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* XXX We need to context-switch the timebase for
|
/* XXX We need to context-switch the timebase for
|
||||||
|
@ -114,16 +114,16 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SPRN_SPRG0:
|
case SPRN_SPRG0:
|
||||||
vcpu->arch.shared->sprg0 = spr_val;
|
kvmppc_set_sprg0(vcpu, spr_val);
|
||||||
break;
|
break;
|
||||||
case SPRN_SPRG1:
|
case SPRN_SPRG1:
|
||||||
vcpu->arch.shared->sprg1 = spr_val;
|
kvmppc_set_sprg1(vcpu, spr_val);
|
||||||
break;
|
break;
|
||||||
case SPRN_SPRG2:
|
case SPRN_SPRG2:
|
||||||
vcpu->arch.shared->sprg2 = spr_val;
|
kvmppc_set_sprg2(vcpu, spr_val);
|
||||||
break;
|
break;
|
||||||
case SPRN_SPRG3:
|
case SPRN_SPRG3:
|
||||||
vcpu->arch.shared->sprg3 = spr_val;
|
kvmppc_set_sprg3(vcpu, spr_val);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* PIR can legally be written, but we ignore it */
|
/* PIR can legally be written, but we ignore it */
|
||||||
|
@ -150,10 +150,10 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
|
||||||
|
|
||||||
switch (sprn) {
|
switch (sprn) {
|
||||||
case SPRN_SRR0:
|
case SPRN_SRR0:
|
||||||
spr_val = vcpu->arch.shared->srr0;
|
spr_val = kvmppc_get_srr0(vcpu);
|
||||||
break;
|
break;
|
||||||
case SPRN_SRR1:
|
case SPRN_SRR1:
|
||||||
spr_val = vcpu->arch.shared->srr1;
|
spr_val = kvmppc_get_srr1(vcpu);
|
||||||
break;
|
break;
|
||||||
case SPRN_PVR:
|
case SPRN_PVR:
|
||||||
spr_val = vcpu->arch.pvr;
|
spr_val = vcpu->arch.pvr;
|
||||||
|
@ -173,16 +173,16 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SPRN_SPRG0:
|
case SPRN_SPRG0:
|
||||||
spr_val = vcpu->arch.shared->sprg0;
|
spr_val = kvmppc_get_sprg0(vcpu);
|
||||||
break;
|
break;
|
||||||
case SPRN_SPRG1:
|
case SPRN_SPRG1:
|
||||||
spr_val = vcpu->arch.shared->sprg1;
|
spr_val = kvmppc_get_sprg1(vcpu);
|
||||||
break;
|
break;
|
||||||
case SPRN_SPRG2:
|
case SPRN_SPRG2:
|
||||||
spr_val = vcpu->arch.shared->sprg2;
|
spr_val = kvmppc_get_sprg2(vcpu);
|
||||||
break;
|
break;
|
||||||
case SPRN_SPRG3:
|
case SPRN_SPRG3:
|
||||||
spr_val = vcpu->arch.shared->sprg3;
|
spr_val = kvmppc_get_sprg3(vcpu);
|
||||||
break;
|
break;
|
||||||
/* Note: SPRG4-7 are user-readable, so we don't get
|
/* Note: SPRG4-7 are user-readable, so we don't get
|
||||||
* a trap. */
|
* a trap. */
|
||||||
|
|
|
@ -125,6 +125,27 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
|
EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
|
||||||
|
|
||||||
|
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
|
||||||
|
static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
shared->sprg0 = swab64(shared->sprg0);
|
||||||
|
shared->sprg1 = swab64(shared->sprg1);
|
||||||
|
shared->sprg2 = swab64(shared->sprg2);
|
||||||
|
shared->sprg3 = swab64(shared->sprg3);
|
||||||
|
shared->srr0 = swab64(shared->srr0);
|
||||||
|
shared->srr1 = swab64(shared->srr1);
|
||||||
|
shared->dar = swab64(shared->dar);
|
||||||
|
shared->msr = swab64(shared->msr);
|
||||||
|
shared->dsisr = swab32(shared->dsisr);
|
||||||
|
shared->int_pending = swab32(shared->int_pending);
|
||||||
|
for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
|
||||||
|
shared->sr[i] = swab32(shared->sr[i]);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
|
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int nr = kvmppc_get_gpr(vcpu, 11);
|
int nr = kvmppc_get_gpr(vcpu, 11);
|
||||||
|
@ -135,7 +156,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
|
||||||
unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
|
unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
|
||||||
unsigned long r2 = 0;
|
unsigned long r2 = 0;
|
||||||
|
|
||||||
if (!(vcpu->arch.shared->msr & MSR_SF)) {
|
if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
|
||||||
/* 32 bit mode */
|
/* 32 bit mode */
|
||||||
param1 &= 0xffffffff;
|
param1 &= 0xffffffff;
|
||||||
param2 &= 0xffffffff;
|
param2 &= 0xffffffff;
|
||||||
|
@ -146,6 +167,16 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
|
||||||
switch (nr) {
|
switch (nr) {
|
||||||
case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
|
case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
|
||||||
{
|
{
|
||||||
|
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
|
||||||
|
/* Book3S can be little endian, find it out here */
|
||||||
|
int shared_big_endian = true;
|
||||||
|
if (vcpu->arch.intr_msr & MSR_LE)
|
||||||
|
shared_big_endian = false;
|
||||||
|
if (shared_big_endian != vcpu->arch.shared_big_endian)
|
||||||
|
kvmppc_swab_shared(vcpu);
|
||||||
|
vcpu->arch.shared_big_endian = shared_big_endian;
|
||||||
|
#endif
|
||||||
|
|
||||||
vcpu->arch.magic_page_pa = param1;
|
vcpu->arch.magic_page_pa = param1;
|
||||||
vcpu->arch.magic_page_ea = param2;
|
vcpu->arch.magic_page_ea = param2;
|
||||||
|
|
||||||
|
|
|
@ -255,7 +255,7 @@ TRACE_EVENT(kvm_exit,
|
||||||
__entry->exit_nr = exit_nr;
|
__entry->exit_nr = exit_nr;
|
||||||
__entry->pc = kvmppc_get_pc(vcpu);
|
__entry->pc = kvmppc_get_pc(vcpu);
|
||||||
__entry->dar = kvmppc_get_fault_dar(vcpu);
|
__entry->dar = kvmppc_get_fault_dar(vcpu);
|
||||||
__entry->msr = vcpu->arch.shared->msr;
|
__entry->msr = kvmppc_get_msr(vcpu);
|
||||||
__entry->srr1 = vcpu->arch.shadow_srr1;
|
__entry->srr1 = vcpu->arch.shadow_srr1;
|
||||||
__entry->last_inst = vcpu->arch.last_inst;
|
__entry->last_inst = vcpu->arch.last_inst;
|
||||||
),
|
),
|
||||||
|
|
Loading…
Reference in New Issue