2019-05-29 22:12:40 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2014-06-18 20:53:49 +08:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright IBM Corp. 2007
|
|
|
|
* Copyright 2011 Freescale Semiconductor, Inc.
|
|
|
|
*
|
|
|
|
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/jiffies.h>
|
|
|
|
#include <linux/hrtimer.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <linux/clockchips.h>
|
|
|
|
|
|
|
|
#include <asm/reg.h>
|
|
|
|
#include <asm/time.h>
|
|
|
|
#include <asm/byteorder.h>
|
|
|
|
#include <asm/kvm_ppc.h>
|
|
|
|
#include <asm/disassemble.h>
|
|
|
|
#include <asm/ppc-opcode.h>
|
2018-05-21 13:24:21 +08:00
|
|
|
#include <asm/sstep.h>
|
2014-06-18 20:53:49 +08:00
|
|
|
#include "timing.h"
|
|
|
|
#include "trace.h"
|
|
|
|
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 21:12:36 +08:00
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
|
|
static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
|
|
|
|
kvmppc_core_queue_fpunavail(vcpu);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PPC_FPU */
|
|
|
|
|
|
|
|
#ifdef CONFIG_VSX
|
|
|
|
static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
|
|
|
|
kvmppc_core_queue_vsx_unavail(vcpu);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_VSX */
|
|
|
|
|
2018-02-04 04:24:26 +08:00
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
|
static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
|
|
|
|
kvmppc_core_queue_vec_unavail(vcpu);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_ALTIVEC */
|
|
|
|
|
KVM: PPC: Emulation for more integer loads and stores
This adds emulation for the following integer loads and stores,
thus enabling them to be used in a guest for accessing emulated
MMIO locations.
- lhaux
- lwaux
- lwzux
- ldu
- lwa
- stdux
- stwux
- stdu
- ldbrx
- stdbrx
Previously, most of these would cause an emulation failure exit to
userspace, though ldu and lwa got treated incorrectly as ld, and
stdu got treated incorrectly as std.
This also tidies up some of the formatting and updates the comment
listing instructions that still need to be implemented.
With this, all integer loads and stores that are defined in the Power
ISA v2.07 are emulated, except for those that are permitted to trap
when used on cache-inhibited or write-through mappings (and which do
in fact trap on POWER8), that is, lmw/stmw, lswi/stswi, lswx/stswx,
lq/stq, and l[bhwdq]arx/st[bhwdq]cx.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-03-21 12:43:47 +08:00
|
|
|
/*
|
|
|
|
* XXX to do:
|
|
|
|
* lfiwax, lfiwzx
|
|
|
|
* vector loads and stores
|
2014-06-18 20:53:49 +08:00
|
|
|
*
|
KVM: PPC: Emulation for more integer loads and stores
This adds emulation for the following integer loads and stores,
thus enabling them to be used in a guest for accessing emulated
MMIO locations.
- lhaux
- lwaux
- lwzux
- ldu
- lwa
- stdux
- stwux
- stdu
- ldbrx
- stdbrx
Previously, most of these would cause an emulation failure exit to
userspace, though ldu and lwa got treated incorrectly as ld, and
stdu got treated incorrectly as std.
This also tidies up some of the formatting and updates the comment
listing instructions that still need to be implemented.
With this, all integer loads and stores that are defined in the Power
ISA v2.07 are emulated, except for those that are permitted to trap
when used on cache-inhibited or write-through mappings (and which do
in fact trap on POWER8), that is, lmw/stmw, lswi/stswi, lswx/stswx,
lq/stq, and l[bhwdq]arx/st[bhwdq]cx.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-03-21 12:43:47 +08:00
|
|
|
* Instructions that trap when used on cache-inhibited mappings
|
|
|
|
* are not emulated here: multiple and string instructions,
|
|
|
|
* lq/stq, and the load-reserve/store-conditional instructions.
|
2014-06-18 20:53:49 +08:00
|
|
|
*/
|
|
|
|
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm_run *run = vcpu->run;
|
|
|
|
u32 inst;
|
2018-05-21 13:24:21 +08:00
|
|
|
enum emulation_result emulated = EMULATE_FAIL;
|
2014-06-18 20:53:49 +08:00
|
|
|
int advance = 1;
|
2018-05-21 13:24:21 +08:00
|
|
|
struct instruction_op op;
|
2014-06-18 20:53:49 +08:00
|
|
|
|
|
|
|
/* this default type might be overwritten by subcategories */
|
|
|
|
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
|
|
|
|
|
2014-09-10 20:37:29 +08:00
|
|
|
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
|
2014-06-18 20:53:49 +08:00
|
|
|
if (emulated != EMULATE_DONE)
|
|
|
|
return emulated;
|
|
|
|
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 21:12:36 +08:00
|
|
|
vcpu->arch.mmio_vsx_copy_nums = 0;
|
|
|
|
vcpu->arch.mmio_vsx_offset = 0;
|
2018-05-21 13:24:25 +08:00
|
|
|
vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 21:12:36 +08:00
|
|
|
vcpu->arch.mmio_sp64_extend = 0;
|
|
|
|
vcpu->arch.mmio_sign_extend = 0;
|
2018-02-04 04:24:26 +08:00
|
|
|
vcpu->arch.mmio_vmx_copy_nums = 0;
|
2018-05-21 13:24:26 +08:00
|
|
|
vcpu->arch.mmio_vmx_offset = 0;
|
2018-05-07 14:20:09 +08:00
|
|
|
vcpu->arch.mmio_host_swabbed = 0;
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 21:12:36 +08:00
|
|
|
|
2018-05-21 13:24:21 +08:00
|
|
|
emulated = EMULATE_FAIL;
|
|
|
|
vcpu->arch.regs.msr = vcpu->arch.shared->msr;
|
|
|
|
if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
|
|
|
|
int type = op.type & INSTR_TYPE_MASK;
|
|
|
|
int size = GETSIZE(op.type);
|
2017-03-17 16:31:38 +08:00
|
|
|
|
2018-05-21 13:24:21 +08:00
|
|
|
switch (type) {
|
|
|
|
case LOAD: {
|
|
|
|
int instr_byte_swap = op.type & BYTEREV;
|
KVM: PPC: Emulation for more integer loads and stores
This adds emulation for the following integer loads and stores,
thus enabling them to be used in a guest for accessing emulated
MMIO locations.
- lhaux
- lwaux
- lwzux
- ldu
- lwa
- stdux
- stwux
- stdu
- ldbrx
- stdbrx
Previously, most of these would cause an emulation failure exit to
userspace, though ldu and lwa got treated incorrectly as ld, and
stdu got treated incorrectly as std.
This also tidies up some of the formatting and updates the comment
listing instructions that still need to be implemented.
With this, all integer loads and stores that are defined in the Power
ISA v2.07 are emulated, except for those that are permitted to trap
when used on cache-inhibited or write-through mappings (and which do
in fact trap on POWER8), that is, lmw/stmw, lswi/stswi, lswx/stswx,
lq/stq, and l[bhwdq]arx/st[bhwdq]cx.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-03-21 12:43:47 +08:00
|
|
|
|
2018-05-21 13:24:21 +08:00
|
|
|
if (op.type & SIGNEXT)
|
|
|
|
emulated = kvmppc_handle_loads(run, vcpu,
|
|
|
|
op.reg, size, !instr_byte_swap);
|
|
|
|
else
|
|
|
|
emulated = kvmppc_handle_load(run, vcpu,
|
|
|
|
op.reg, size, !instr_byte_swap);
|
2014-06-18 20:53:49 +08:00
|
|
|
|
2018-05-21 13:24:21 +08:00
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
|
|
|
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
|
2014-06-18 20:53:49 +08:00
|
|
|
|
|
|
|
break;
|
2018-05-21 13:24:21 +08:00
|
|
|
}
|
2018-05-21 13:24:23 +08:00
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
|
|
case LOAD_FP:
|
|
|
|
if (kvmppc_check_fp_disabled(vcpu))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
|
|
|
|
if (op.type & FPCONV)
|
|
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
|
|
|
|
if (op.type & SIGNEXT)
|
|
|
|
emulated = kvmppc_handle_loads(run, vcpu,
|
|
|
|
KVM_MMIO_REG_FPR|op.reg, size, 1);
|
|
|
|
else
|
|
|
|
emulated = kvmppc_handle_load(run, vcpu,
|
|
|
|
KVM_MMIO_REG_FPR|op.reg, size, 1);
|
|
|
|
|
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
|
|
|
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
|
|
|
|
|
|
|
|
break;
|
2018-05-21 13:24:24 +08:00
|
|
|
#endif
|
2018-05-21 13:24:26 +08:00
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
|
case LOAD_VMX:
|
|
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
|
|
|
|
/* Hardware enforces alignment of VMX accesses */
|
|
|
|
vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
|
|
|
|
vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
|
|
|
|
|
|
|
|
if (size == 16) { /* lvx */
|
|
|
|
vcpu->arch.mmio_copy_type =
|
|
|
|
KVMPPC_VMX_COPY_DWORD;
|
|
|
|
} else if (size == 4) { /* lvewx */
|
|
|
|
vcpu->arch.mmio_copy_type =
|
|
|
|
KVMPPC_VMX_COPY_WORD;
|
|
|
|
} else if (size == 2) { /* lvehx */
|
|
|
|
vcpu->arch.mmio_copy_type =
|
|
|
|
KVMPPC_VMX_COPY_HWORD;
|
|
|
|
} else if (size == 1) { /* lvebx */
|
|
|
|
vcpu->arch.mmio_copy_type =
|
|
|
|
KVMPPC_VMX_COPY_BYTE;
|
|
|
|
} else
|
|
|
|
break;
|
|
|
|
|
|
|
|
vcpu->arch.mmio_vmx_offset =
|
|
|
|
(vcpu->arch.vaddr_accessed & 0xf)/size;
|
|
|
|
|
|
|
|
if (size == 16) {
|
|
|
|
vcpu->arch.mmio_vmx_copy_nums = 2;
|
|
|
|
emulated = kvmppc_handle_vmx_load(run,
|
|
|
|
vcpu, KVM_MMIO_REG_VMX|op.reg,
|
|
|
|
8, 1);
|
|
|
|
} else {
|
|
|
|
vcpu->arch.mmio_vmx_copy_nums = 1;
|
|
|
|
emulated = kvmppc_handle_vmx_load(run, vcpu,
|
|
|
|
KVM_MMIO_REG_VMX|op.reg,
|
|
|
|
size, 1);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif
|
2018-05-21 13:24:24 +08:00
|
|
|
#ifdef CONFIG_VSX
|
|
|
|
case LOAD_VSX: {
|
|
|
|
int io_size_each;
|
|
|
|
|
|
|
|
if (op.vsx_flags & VSX_CHECK_VEC) {
|
|
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
} else {
|
|
|
|
if (kvmppc_check_vsx_disabled(vcpu))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (op.vsx_flags & VSX_FPCONV)
|
|
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
|
|
|
|
if (op.element_size == 8) {
|
|
|
|
if (op.vsx_flags & VSX_SPLAT)
|
2018-05-21 13:24:25 +08:00
|
|
|
vcpu->arch.mmio_copy_type =
|
2018-05-21 13:24:24 +08:00
|
|
|
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
|
|
|
|
else
|
2018-05-21 13:24:25 +08:00
|
|
|
vcpu->arch.mmio_copy_type =
|
2018-05-21 13:24:24 +08:00
|
|
|
KVMPPC_VSX_COPY_DWORD;
|
|
|
|
} else if (op.element_size == 4) {
|
|
|
|
if (op.vsx_flags & VSX_SPLAT)
|
2018-05-21 13:24:25 +08:00
|
|
|
vcpu->arch.mmio_copy_type =
|
2018-05-21 13:24:24 +08:00
|
|
|
KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
|
|
|
|
else
|
2018-05-21 13:24:25 +08:00
|
|
|
vcpu->arch.mmio_copy_type =
|
2018-05-21 13:24:24 +08:00
|
|
|
KVMPPC_VSX_COPY_WORD;
|
|
|
|
} else
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (size < op.element_size) {
|
|
|
|
/* precision convert case: lxsspx, etc */
|
|
|
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
|
|
io_size_each = size;
|
|
|
|
} else { /* lxvw4x, lxvd2x, etc */
|
|
|
|
vcpu->arch.mmio_vsx_copy_nums =
|
|
|
|
size/op.element_size;
|
|
|
|
io_size_each = op.element_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
2018-05-28 09:48:26 +08:00
|
|
|
KVM_MMIO_REG_VSX|op.reg, io_size_each,
|
|
|
|
1, op.type & SIGNEXT);
|
2018-05-21 13:24:24 +08:00
|
|
|
break;
|
|
|
|
}
|
2018-05-21 13:24:23 +08:00
|
|
|
#endif
|
2018-05-21 13:24:21 +08:00
|
|
|
case STORE:
|
|
|
|
/* if need byte reverse, op.val has been reversed by
|
|
|
|
* analyse_instr().
|
|
|
|
*/
|
|
|
|
emulated = kvmppc_handle_store(run, vcpu, op.val,
|
|
|
|
size, 1);
|
2014-06-18 20:53:49 +08:00
|
|
|
|
2018-05-21 13:24:21 +08:00
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
|
|
|
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
|
2014-06-18 20:53:49 +08:00
|
|
|
|
|
|
|
break;
|
2018-05-21 13:24:23 +08:00
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
|
|
case STORE_FP:
|
|
|
|
if (kvmppc_check_fp_disabled(vcpu))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
|
|
|
|
/* The FP registers need to be flushed so that
|
|
|
|
* kvmppc_handle_store() can read actual FP vals
|
|
|
|
* from vcpu->arch.
|
|
|
|
*/
|
|
|
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
|
|
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
|
|
|
MSR_FP);
|
|
|
|
|
|
|
|
if (op.type & FPCONV)
|
|
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
|
|
|
|
emulated = kvmppc_handle_store(run, vcpu,
|
|
|
|
VCPU_FPR(vcpu, op.reg), size, 1);
|
|
|
|
|
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
|
|
|
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
|
|
|
|
|
|
|
|
break;
|
2018-05-21 13:24:24 +08:00
|
|
|
#endif
|
2018-05-21 13:24:26 +08:00
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
|
case STORE_VMX:
|
|
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
|
|
|
|
/* Hardware enforces alignment of VMX accesses. */
|
|
|
|
vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
|
|
|
|
vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
|
|
|
|
|
|
|
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
|
|
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
|
|
|
MSR_VEC);
|
|
|
|
if (size == 16) { /* stvx */
|
|
|
|
vcpu->arch.mmio_copy_type =
|
|
|
|
KVMPPC_VMX_COPY_DWORD;
|
|
|
|
} else if (size == 4) { /* stvewx */
|
|
|
|
vcpu->arch.mmio_copy_type =
|
|
|
|
KVMPPC_VMX_COPY_WORD;
|
|
|
|
} else if (size == 2) { /* stvehx */
|
|
|
|
vcpu->arch.mmio_copy_type =
|
|
|
|
KVMPPC_VMX_COPY_HWORD;
|
|
|
|
} else if (size == 1) { /* stvebx */
|
|
|
|
vcpu->arch.mmio_copy_type =
|
|
|
|
KVMPPC_VMX_COPY_BYTE;
|
|
|
|
} else
|
|
|
|
break;
|
|
|
|
|
|
|
|
vcpu->arch.mmio_vmx_offset =
|
|
|
|
(vcpu->arch.vaddr_accessed & 0xf)/size;
|
|
|
|
|
|
|
|
if (size == 16) {
|
|
|
|
vcpu->arch.mmio_vmx_copy_nums = 2;
|
|
|
|
emulated = kvmppc_handle_vmx_store(run,
|
|
|
|
vcpu, op.reg, 8, 1);
|
|
|
|
} else {
|
|
|
|
vcpu->arch.mmio_vmx_copy_nums = 1;
|
|
|
|
emulated = kvmppc_handle_vmx_store(run,
|
|
|
|
vcpu, op.reg, size, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
#endif
|
2018-05-21 13:24:24 +08:00
|
|
|
#ifdef CONFIG_VSX
|
|
|
|
case STORE_VSX: {
|
|
|
|
int io_size_each;
|
|
|
|
|
|
|
|
if (op.vsx_flags & VSX_CHECK_VEC) {
|
|
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
} else {
|
|
|
|
if (kvmppc_check_vsx_disabled(vcpu))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
|
|
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
|
|
|
MSR_VSX);
|
|
|
|
|
|
|
|
if (op.vsx_flags & VSX_FPCONV)
|
|
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
|
|
|
|
if (op.element_size == 8)
|
2018-05-21 13:24:25 +08:00
|
|
|
vcpu->arch.mmio_copy_type =
|
2018-05-21 13:24:24 +08:00
|
|
|
KVMPPC_VSX_COPY_DWORD;
|
|
|
|
else if (op.element_size == 4)
|
2018-05-21 13:24:25 +08:00
|
|
|
vcpu->arch.mmio_copy_type =
|
2018-05-21 13:24:24 +08:00
|
|
|
KVMPPC_VSX_COPY_WORD;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (size < op.element_size) {
|
|
|
|
/* precise conversion case, like stxsspx */
|
|
|
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
|
|
io_size_each = size;
|
|
|
|
} else { /* stxvw4x, stxvd2x, etc */
|
|
|
|
vcpu->arch.mmio_vsx_copy_nums =
|
|
|
|
size/op.element_size;
|
|
|
|
io_size_each = op.element_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
2018-05-28 09:48:26 +08:00
|
|
|
op.reg, io_size_each, 1);
|
2018-05-21 13:24:24 +08:00
|
|
|
break;
|
|
|
|
}
|
2018-05-21 13:24:23 +08:00
|
|
|
#endif
|
2018-05-21 13:24:21 +08:00
|
|
|
case CACHEOP:
|
2014-06-18 20:53:49 +08:00
|
|
|
/* Do nothing. The guest is performing dcbi because
|
|
|
|
* hardware DMA is not snooped by the dcache, but
|
|
|
|
* emulated DMA either goes through the dcache as
|
|
|
|
* normal writes, or the host kernel has handled dcache
|
2018-05-21 13:24:21 +08:00
|
|
|
* coherence.
|
|
|
|
*/
|
|
|
|
emulated = EMULATE_DONE;
|
2014-06-18 20:53:49 +08:00
|
|
|
break;
|
2018-05-21 13:24:21 +08:00
|
|
|
default:
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 21:12:36 +08:00
|
|
|
break;
|
2018-05-21 13:24:21 +08:00
|
|
|
}
|
|
|
|
}
|
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.
The instructions that this adds emulation for are:
- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x
[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]
Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-02-21 21:12:36 +08:00
|
|
|
|
2014-06-18 20:53:49 +08:00
|
|
|
if (emulated == EMULATE_FAIL) {
|
|
|
|
advance = 0;
|
|
|
|
kvmppc_core_queue_program(vcpu, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
|
|
|
|
|
|
|
|
/* Advance past emulated instruction. */
|
|
|
|
if (advance)
|
|
|
|
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
|
|
|
|
|
|
|
|
return emulated;
|
|
|
|
}
|