Second PPC KVM update for 4.16
Seven fixes that are either trivial or that address bugs that people are actually hitting. The main ones are: - Drop spinlocks before reading guest memory - Fix a bug causing corruption of VCPU state in PR KVM with preemption enabled - Make HPT resizing work on POWER9 - Add MMIO emulation for vector loads and stores, because guests now use these instructions in memcpy and similar routines. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJafWn0AAoJEJ2a6ncsY3GfaMsIANF0hQD8SS78WNKnoy0vnZ/X PUXdjwHEsfkg5KdQ7o0oaa2BJHHqO3vozddmMiG14r2L1mNCHJpnVZCVV0GaEJcZ eU8++OPK6yrsPNNpAjnrtQ0Vk4LwzoT0bftEjS3TtLt1s2uSo+R1+HLmxbxGhQUX bZngo9wQ3cjUfAXLrPtAVhE5wTmgVOiufVRyfRsBRdFzRsAWqjY4hBtJAfwdff4r AA5H0RCrXO6e1feKr5ElU8KzX6b7IjH9Xu868oJ1r16zZfE05PBl1X5n4XG7XDm7 xWvs8uLAB7iRv2o/ecFznYJ+Dz1NCBVzD0RmAUTqPCcVKDrxixaTkqMPFW97IAA= =HOJR -----END PGP SIGNATURE----- Merge tag 'kvm-ppc-next-4.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc Second PPC KVM update for 4.16 Seven fixes that are either trivial or that address bugs that people are actually hitting. The main ones are: - Drop spinlocks before reading guest memory - Fix a bug causing corruption of VCPU state in PR KVM with preemption enabled - Make HPT resizing work on POWER9 - Add MMIO emulation for vector loads and stores, because guests now use these instructions in memcpy and similar routines.
This commit is contained in:
commit
1ab03c072f
|
@ -249,10 +249,8 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
|
|||
extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
|
||||
extern int kvmppc_hcall_impl_pr(unsigned long cmd);
|
||||
extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
|
||||
extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
|
||||
struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu);
|
||||
extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
|
||||
extern int kvm_irq_bypass;
|
||||
|
||||
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -690,6 +690,7 @@ struct kvm_vcpu_arch {
|
|||
u8 mmio_vsx_offset;
|
||||
u8 mmio_vsx_copy_type;
|
||||
u8 mmio_vsx_tx_sx_enabled;
|
||||
u8 mmio_vmx_copy_nums;
|
||||
u8 osi_needed;
|
||||
u8 osi_enabled;
|
||||
u8 papr_enabled;
|
||||
|
@ -804,6 +805,7 @@ struct kvm_vcpu_arch {
|
|||
#define KVM_MMIO_REG_QPR 0x0040
|
||||
#define KVM_MMIO_REG_FQPR 0x0060
|
||||
#define KVM_MMIO_REG_VSX 0x0080
|
||||
#define KVM_MMIO_REG_VMX 0x00c0
|
||||
|
||||
#define __KVM_HAVE_ARCH_WQP
|
||||
#define __KVM_HAVE_CREATE_DEVICE
|
||||
|
|
|
@ -81,6 +81,10 @@ extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, unsigned int bytes,
|
||||
int is_default_endian, int mmio_sign_extend);
|
||||
extern int kvmppc_handle_load128_by2x64(struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu, unsigned int rt, int is_default_endian);
|
||||
extern int kvmppc_handle_store128_by2x64(struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu, unsigned int rs, int is_default_endian);
|
||||
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
u64 val, unsigned int bytes,
|
||||
int is_default_endian);
|
||||
|
|
|
@ -156,6 +156,12 @@
|
|||
#define OP_31_XOP_LFDX 599
|
||||
#define OP_31_XOP_LFDUX 631
|
||||
|
||||
/* VMX Vector Load Instructions */
|
||||
#define OP_31_XOP_LVX 103
|
||||
|
||||
/* VMX Vector Store Instructions */
|
||||
#define OP_31_XOP_STVX 231
|
||||
|
||||
#define OP_LWZ 32
|
||||
#define OP_STFS 52
|
||||
#define OP_STFSU 53
|
||||
|
|
|
@ -69,7 +69,7 @@ config KVM_BOOK3S_64
|
|||
select KVM_BOOK3S_64_HANDLER
|
||||
select KVM
|
||||
select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
|
||||
select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV)
|
||||
select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_PSERIES || PPC_POWERNV)
|
||||
---help---
|
||||
Support running unmodified book3s_64 and book3s_32 guest kernels
|
||||
in virtual machines on book3s_64 host processors.
|
||||
|
|
|
@ -1269,6 +1269,11 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
|
|||
/* Nothing to do */
|
||||
goto out;
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
||||
rpte = be64_to_cpu(hptep[1]);
|
||||
vpte = hpte_new_to_old_v(vpte, rpte);
|
||||
}
|
||||
|
||||
/* Unmap */
|
||||
rev = &old->rev[idx];
|
||||
guest_rpte = rev->guest_rpte;
|
||||
|
@ -1298,7 +1303,6 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
|
|||
|
||||
/* Reload PTE after unmap */
|
||||
vpte = be64_to_cpu(hptep[0]);
|
||||
|
||||
BUG_ON(vpte & HPTE_V_VALID);
|
||||
BUG_ON(!(vpte & HPTE_V_ABSENT));
|
||||
|
||||
|
@ -1307,6 +1311,12 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
|
|||
goto out;
|
||||
|
||||
rpte = be64_to_cpu(hptep[1]);
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
||||
vpte = hpte_new_to_old_v(vpte, rpte);
|
||||
rpte = hpte_new_to_old_r(rpte);
|
||||
}
|
||||
|
||||
pshift = kvmppc_hpte_base_page_shift(vpte, rpte);
|
||||
avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23);
|
||||
pteg = idx / HPTES_PER_GROUP;
|
||||
|
@ -1337,17 +1347,17 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
|
|||
}
|
||||
|
||||
new_pteg = hash & new_hash_mask;
|
||||
if (vpte & HPTE_V_SECONDARY) {
|
||||
BUG_ON(~pteg != (hash & old_hash_mask));
|
||||
new_pteg = ~new_pteg;
|
||||
} else {
|
||||
BUG_ON(pteg != (hash & old_hash_mask));
|
||||
}
|
||||
if (vpte & HPTE_V_SECONDARY)
|
||||
new_pteg = ~hash & new_hash_mask;
|
||||
|
||||
new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP);
|
||||
new_hptep = (__be64 *)(new->virt + (new_idx << 4));
|
||||
|
||||
replace_vpte = be64_to_cpu(new_hptep[0]);
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
||||
unsigned long replace_rpte = be64_to_cpu(new_hptep[1]);
|
||||
replace_vpte = hpte_new_to_old_v(replace_vpte, replace_rpte);
|
||||
}
|
||||
|
||||
if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
|
||||
BUG_ON(new->order >= old->order);
|
||||
|
@ -1363,6 +1373,11 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
|
|||
/* Discard the previous HPTE */
|
||||
}
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
||||
rpte = hpte_old_to_new_r(vpte, rpte);
|
||||
vpte = hpte_old_to_new_v(vpte);
|
||||
}
|
||||
|
||||
new_hptep[1] = cpu_to_be64(rpte);
|
||||
new->rev[new_idx].guest_rpte = guest_rpte;
|
||||
/* No need for a barrier, since new HPT isn't active */
|
||||
|
@ -1380,12 +1395,6 @@ static int resize_hpt_rehash(struct kvm_resize_hpt *resize)
|
|||
unsigned long i;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* resize_hpt_rehash_hpte() doesn't handle the new-format HPTEs
|
||||
* that POWER9 uses, and could well hit a BUG_ON on POWER9.
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
return -EIO;
|
||||
for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) {
|
||||
rc = resize_hpt_rehash_hpte(resize, i);
|
||||
if (rc != 0)
|
||||
|
@ -1416,6 +1425,9 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
|
|||
|
||||
synchronize_srcu_expedited(&kvm->srcu);
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
kvmppc_setup_partition_table(kvm);
|
||||
|
||||
resize_hpt_debug(resize, "resize_hpt_pivot() done\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -1008,8 +1008,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
|
|||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvm_vcpu *tvcpu;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
return EMULATE_FAIL;
|
||||
if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
|
||||
return RESUME_GUEST;
|
||||
if (get_op(inst) != 31)
|
||||
|
@ -1059,6 +1057,7 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
|
|||
return RESUME_GUEST;
|
||||
}
|
||||
|
||||
/* Called with vcpu->arch.vcore->lock held */
|
||||
static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
|
@ -1179,7 +1178,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
swab32(vcpu->arch.emul_inst) :
|
||||
vcpu->arch.emul_inst;
|
||||
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
|
||||
/* Need vcore unlocked to call kvmppc_get_last_inst */
|
||||
spin_unlock(&vcpu->arch.vcore->lock);
|
||||
r = kvmppc_emulate_debug_inst(run, vcpu);
|
||||
spin_lock(&vcpu->arch.vcore->lock);
|
||||
} else {
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
r = RESUME_GUEST;
|
||||
|
@ -1194,8 +1196,13 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
*/
|
||||
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
|
||||
r = EMULATE_FAIL;
|
||||
if ((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG)
|
||||
if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
|
||||
cpu_has_feature(CPU_FTR_ARCH_300)) {
|
||||
/* Need vcore unlocked to call kvmppc_get_last_inst */
|
||||
spin_unlock(&vcpu->arch.vcore->lock);
|
||||
r = kvmppc_emulate_doorbell_instr(vcpu);
|
||||
spin_lock(&vcpu->arch.vcore->lock);
|
||||
}
|
||||
if (r == EMULATE_FAIL) {
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
r = RESUME_GUEST;
|
||||
|
@ -2946,13 +2953,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|||
/* make sure updates to secondary vcpu structs are visible now */
|
||||
smp_mb();
|
||||
|
||||
preempt_enable();
|
||||
|
||||
for (sub = 0; sub < core_info.n_subcores; ++sub) {
|
||||
pvc = core_info.vc[sub];
|
||||
post_guest_process(pvc, pvc == vc);
|
||||
}
|
||||
|
||||
spin_lock(&vc->lock);
|
||||
preempt_enable();
|
||||
|
||||
out:
|
||||
vc->vcore_state = VCORE_INACTIVE;
|
||||
|
|
|
@ -413,10 +413,11 @@ FTR_SECTION_ELSE
|
|||
/* On P9 we use the split_info for coordinating LPCR changes */
|
||||
lwz r4, KVM_SPLIT_DO_SET(r6)
|
||||
cmpwi r4, 0
|
||||
beq 63f
|
||||
beq 1f
|
||||
mr r3, r6
|
||||
bl kvmhv_p9_set_lpcr
|
||||
nop
|
||||
1:
|
||||
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
|
||||
63:
|
||||
/* Order load of vcpu after load of vcore */
|
||||
|
|
|
@ -96,7 +96,7 @@ kvm_start_entry:
|
|||
|
||||
kvm_start_lightweight:
|
||||
/* Copy registers into shadow vcpu so we can access them in real mode */
|
||||
GET_SHADOW_VCPU(r3)
|
||||
mr r3, r4
|
||||
bl FUNC(kvmppc_copy_to_svcpu)
|
||||
nop
|
||||
REST_GPR(4, r1)
|
||||
|
@ -165,9 +165,7 @@ after_sprg3_load:
|
|||
stw r12, VCPU_TRAP(r3)
|
||||
|
||||
/* Transfer reg values from shadow vcpu back to vcpu struct */
|
||||
/* On 64-bit, interrupts are still off at this point */
|
||||
|
||||
GET_SHADOW_VCPU(r4)
|
||||
bl FUNC(kvmppc_copy_from_svcpu)
|
||||
nop
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
|
|||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
if (svcpu->in_use) {
|
||||
kvmppc_copy_from_svcpu(vcpu, svcpu);
|
||||
kvmppc_copy_from_svcpu(vcpu);
|
||||
}
|
||||
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
|
||||
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
|
||||
|
@ -143,9 +143,10 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
/* Copy data needed by real-mode code from vcpu to shadow vcpu */
|
||||
void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
|
||||
struct kvm_vcpu *vcpu)
|
||||
void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
|
||||
svcpu->gpr[0] = vcpu->arch.gpr[0];
|
||||
svcpu->gpr[1] = vcpu->arch.gpr[1];
|
||||
svcpu->gpr[2] = vcpu->arch.gpr[2];
|
||||
|
@ -177,17 +178,14 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
|
|||
if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
||||
vcpu->arch.entry_ic = mfspr(SPRN_IC);
|
||||
svcpu->in_use = true;
|
||||
|
||||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
|
||||
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu)
|
||||
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* vcpu_put would just call us again because in_use hasn't
|
||||
* been updated yet.
|
||||
*/
|
||||
preempt_disable();
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
|
||||
/*
|
||||
* Maybe we were already preempted and synced the svcpu from
|
||||
|
@ -233,7 +231,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
|
|||
svcpu->in_use = false;
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -58,6 +58,18 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
|
||||
kvmppc_core_queue_vec_unavail(vcpu);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
/*
|
||||
* XXX to do:
|
||||
* lfiwax, lfiwzx
|
||||
|
@ -98,6 +110,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE;
|
||||
vcpu->arch.mmio_sp64_extend = 0;
|
||||
vcpu->arch.mmio_sign_extend = 0;
|
||||
vcpu->arch.mmio_vmx_copy_nums = 0;
|
||||
|
||||
switch (get_op(inst)) {
|
||||
case 31:
|
||||
|
@ -459,6 +472,29 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|||
rs, 4, 1);
|
||||
break;
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
case OP_31_XOP_LVX:
|
||||
if (kvmppc_check_altivec_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.vaddr_accessed &= ~0xFULL;
|
||||
vcpu->arch.paddr_accessed &= ~0xFULL;
|
||||
vcpu->arch.mmio_vmx_copy_nums = 2;
|
||||
emulated = kvmppc_handle_load128_by2x64(run, vcpu,
|
||||
KVM_MMIO_REG_VMX|rt, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STVX:
|
||||
if (kvmppc_check_altivec_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.vaddr_accessed &= ~0xFULL;
|
||||
vcpu->arch.paddr_accessed &= ~0xFULL;
|
||||
vcpu->arch.mmio_vmx_copy_nums = 2;
|
||||
emulated = kvmppc_handle_store128_by2x64(run, vcpu,
|
||||
rs, 1);
|
||||
break;
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
default:
|
||||
emulated = EMULATE_FAIL;
|
||||
break;
|
||||
|
|
|
@ -638,8 +638,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_SPAPR_RESIZE_HPT:
|
||||
/* Disable this on POWER9 until code handles new HPTE format */
|
||||
r = !!hv_enabled && !cpu_has_feature(CPU_FTR_ARCH_300);
|
||||
r = !!hv_enabled;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
|
@ -930,6 +929,34 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
|
|||
}
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
|
||||
u64 gpr)
|
||||
{
|
||||
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
|
||||
u32 hi, lo;
|
||||
u32 di;
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
hi = gpr >> 32;
|
||||
lo = gpr & 0xffffffff;
|
||||
#else
|
||||
lo = gpr >> 32;
|
||||
hi = gpr & 0xffffffff;
|
||||
#endif
|
||||
|
||||
di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */
|
||||
if (di > 1)
|
||||
return;
|
||||
|
||||
if (vcpu->arch.mmio_host_swabbed)
|
||||
di = 1 - di;
|
||||
|
||||
VCPU_VSX_VR(vcpu, index).u[di * 2] = hi;
|
||||
VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo;
|
||||
}
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
static inline u64 sp_to_dp(u32 fprs)
|
||||
{
|
||||
|
@ -1032,6 +1059,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
|||
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
|
||||
kvmppc_set_vsr_dword_dump(vcpu, gpr);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
case KVM_MMIO_REG_VMX:
|
||||
kvmppc_set_vmx_dword(vcpu, gpr);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
BUG();
|
||||
|
@ -1308,6 +1340,111 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
|
|||
}
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
/* handle quadword load access in two halves */
|
||||
int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, int is_default_endian)
|
||||
{
|
||||
enum emulation_result emulated;
|
||||
|
||||
while (vcpu->arch.mmio_vmx_copy_nums) {
|
||||
emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
|
||||
is_default_endian, 0);
|
||||
|
||||
if (emulated != EMULATE_DONE)
|
||||
break;
|
||||
|
||||
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||
vcpu->arch.mmio_vmx_copy_nums--;
|
||||
}
|
||||
|
||||
return emulated;
|
||||
}
|
||||
|
||||
static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
|
||||
{
|
||||
vector128 vrs = VCPU_VSX_VR(vcpu, rs);
|
||||
u32 di;
|
||||
u64 w0, w1;
|
||||
|
||||
di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */
|
||||
if (di > 1)
|
||||
return -1;
|
||||
|
||||
if (vcpu->arch.mmio_host_swabbed)
|
||||
di = 1 - di;
|
||||
|
||||
w0 = vrs.u[di * 2];
|
||||
w1 = vrs.u[di * 2 + 1];
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
*val = (w0 << 32) | w1;
|
||||
#else
|
||||
*val = (w1 << 32) | w0;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* handle quadword store in two halves */
|
||||
int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int rs, int is_default_endian)
|
||||
{
|
||||
u64 val = 0;
|
||||
enum emulation_result emulated = EMULATE_DONE;
|
||||
|
||||
vcpu->arch.io_gpr = rs;
|
||||
|
||||
while (vcpu->arch.mmio_vmx_copy_nums) {
|
||||
if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1)
|
||||
return EMULATE_FAIL;
|
||||
|
||||
emulated = kvmppc_handle_store(run, vcpu, val, 8,
|
||||
is_default_endian);
|
||||
if (emulated != EMULATE_DONE)
|
||||
break;
|
||||
|
||||
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||
vcpu->arch.mmio_vmx_copy_nums--;
|
||||
}
|
||||
|
||||
return emulated;
|
||||
}
|
||||
|
||||
static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run)
|
||||
{
|
||||
enum emulation_result emulated = EMULATE_FAIL;
|
||||
int r;
|
||||
|
||||
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||
|
||||
if (!vcpu->mmio_is_write) {
|
||||
emulated = kvmppc_handle_load128_by2x64(run, vcpu,
|
||||
vcpu->arch.io_gpr, 1);
|
||||
} else {
|
||||
emulated = kvmppc_handle_store128_by2x64(run, vcpu,
|
||||
vcpu->arch.io_gpr, 1);
|
||||
}
|
||||
|
||||
switch (emulated) {
|
||||
case EMULATE_DO_MMIO:
|
||||
run->exit_reason = KVM_EXIT_MMIO;
|
||||
r = RESUME_HOST;
|
||||
break;
|
||||
case EMULATE_FAIL:
|
||||
pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
|
||||
r = RESUME_HOST;
|
||||
break;
|
||||
default:
|
||||
r = RESUME_GUEST;
|
||||
break;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||
{
|
||||
int r = 0;
|
||||
|
@ -1428,6 +1565,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
goto out;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
if (vcpu->arch.mmio_vmx_copy_nums > 0)
|
||||
vcpu->arch.mmio_vmx_copy_nums--;
|
||||
|
||||
if (vcpu->arch.mmio_vmx_copy_nums > 0) {
|
||||
r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
|
||||
if (r == RESUME_HOST) {
|
||||
vcpu->mmio_needed = 1;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
} else if (vcpu->arch.osi_needed) {
|
||||
u64 *gprs = run->osi.gprs;
|
||||
|
|
Loading…
Reference in New Issue