KVM/riscv changes for 5.20

- Track ISA extensions used by Guest using bitmap
 - Added system instruction emulation framework
 - Added CSR emulation framework
 - Added gfp_custom flag in struct kvm_mmu_memory_cache
 - Added G-stage ioremap() and iounmap() functions
 - Added support for Svpbmt inside Guest
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEZdn75s5e6LHDQ+f/rUjsVaLHLAcFAmLj0WwACgkQrUjsVaLH
 LAc6GA//Uan79kMF8OQ9/ID+XGw35RXMJXrYKq+QvuQyLoJr5a8Bb1YRWUm517AE
 34oKdiFw0KZSq2GjWV9RBvMGUShBE1LBiIaj7wLzo2siUW0Lm1k2/YfWwnWE/IzG
 eUkONEvhPP2lRoYRdm3u1SU0LTlLmXSYcwr/9H5IOB4rET+My9bCpO+BYSYwXdAn
 qpuBElczRDOIgvRpWUtpOItEFBRV+lfwl1X5Pi9PBtJydrsQBosNtHNevQA3xR8J
 jigD/Pmw5vnQ5V0FGTiEJGaMMho+SdD29H5cmAExWoXn2zQSdyrXZngChOaB/Oed
 TTjRl5jwIcioGQCdVl6/o/4Jmm/frNZhp0XJssV2GjW0DEMQHZ6ZtDMAedkh7K9S
 u8bZf2YfG6Ed08NCAPiltm/KssNtV3blC/4nN0ExrLX4V4WBC1AOVKBwQjpEMrMy
 eOAuldcmRqrIkAjGeI8WL6bOseuxMDmdBdeK2cqZUYbSAkoSOgIh7mnbTLaxC7oU
 eHevvq1THutv5LdVSgzffN5MaRiuJd8sQvtvxx20N8n6Uzp3INx8MpYkifbJVjv4
 HMd78uU4Ii3CayY/JxyWEnKhCewSKoK6Kb0zlBM+bs08mIhzAZZpP837zHIzsv/j
 Va6x+vr8U0IWiMrUfO+050BXHJbRG07pzrzg5osZYonWRWvmiTE=
 =8FMB
 -----END PGP SIGNATURE-----

Merge tag 'kvm-riscv-5.20-1' of https://github.com/kvm-riscv/linux into HEAD

KVM/riscv changes for 5.20

* Track ISA extensions used by Guest using bitmap

* Added system instruction emulation framework

* Added CSR emulation framework

* Added gfp_custom flag in struct kvm_mmu_memory_cache

* Added G-stage ioremap() and iounmap() functions

* Added support for Svpbmt inside Guest
This commit is contained in:
Paolo Bonzini 2022-07-29 09:34:45 -04:00
commit 2e2e91158f
17 changed files with 1029 additions and 600 deletions

View File

@ -156,6 +156,18 @@
(_AC(1, UL) << IRQ_S_TIMER) | \
(_AC(1, UL) << IRQ_S_EXT))
/* xENVCFG flags */
#define ENVCFG_STCE (_AC(1, ULL) << 63)
#define ENVCFG_PBMTE (_AC(1, ULL) << 62)
#define ENVCFG_CBZE (_AC(1, UL) << 7)
#define ENVCFG_CBCFE (_AC(1, UL) << 6)
#define ENVCFG_CBIE_SHIFT 4
#define ENVCFG_CBIE (_AC(0x3, UL) << ENVCFG_CBIE_SHIFT)
#define ENVCFG_CBIE_ILL _AC(0x0, UL)
#define ENVCFG_CBIE_FLUSH _AC(0x1, UL)
#define ENVCFG_CBIE_INV _AC(0x3, UL)
#define ENVCFG_FIOM _AC(0x1, UL)
/* symbolic CSR names: */
#define CSR_CYCLE 0xc00
#define CSR_TIME 0xc01
@ -252,7 +264,9 @@
#define CSR_HTIMEDELTA 0x605
#define CSR_HCOUNTEREN 0x606
#define CSR_HGEIE 0x607
#define CSR_HENVCFG 0x60a
#define CSR_HTIMEDELTAH 0x615
#define CSR_HENVCFGH 0x61a
#define CSR_HTVAL 0x643
#define CSR_HIP 0x644
#define CSR_HVIP 0x645
@ -264,6 +278,8 @@
#define CSR_MISA 0x301
#define CSR_MIE 0x304
#define CSR_MTVEC 0x305
#define CSR_MENVCFG 0x30a
#define CSR_MENVCFGH 0x31a
#define CSR_MSCRATCH 0x340
#define CSR_MEPC 0x341
#define CSR_MCAUSE 0x342

View File

@ -14,7 +14,9 @@
#include <linux/kvm_types.h>
#include <linux/spinlock.h>
#include <asm/csr.h>
#include <asm/hwcap.h>
#include <asm/kvm_vcpu_fp.h>
#include <asm/kvm_vcpu_insn.h>
#include <asm/kvm_vcpu_timer.h>
#define KVM_MAX_VCPUS 1024
@ -63,6 +65,8 @@ struct kvm_vcpu_stat {
u64 wfi_exit_stat;
u64 mmio_exit_user;
u64 mmio_exit_kernel;
u64 csr_exit_user;
u64 csr_exit_kernel;
u64 exits;
};
@ -90,14 +94,6 @@ struct kvm_arch {
struct kvm_guest_timer timer;
};
struct kvm_mmio_decode {
unsigned long insn;
int insn_len;
int len;
int shift;
int return_handled;
};
struct kvm_sbi_context {
int return_handled;
};
@ -170,7 +166,7 @@ struct kvm_vcpu_arch {
int last_exit_cpu;
/* ISA feature bits (similar to MISA) */
unsigned long isa;
DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
/* SSCRATCH, STVEC, and SCOUNTEREN of Host */
unsigned long host_sscratch;
@ -216,6 +212,9 @@ struct kvm_vcpu_arch {
/* MMIO instruction details */
struct kvm_mmio_decode mmio_decode;
/* CSR instruction details */
struct kvm_csr_decode csr_decode;
/* SBI context */
struct kvm_sbi_context sbi_context;
@ -285,6 +284,11 @@ void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
unsigned long hbase, unsigned long hmask);
int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
phys_addr_t hpa, unsigned long size,
bool writable, bool in_atomic);
void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
unsigned long size);
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot,
gpa_t gpa, unsigned long hva, bool is_write);
@ -303,14 +307,12 @@ void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
void __kvm_riscv_unpriv_trap(void);
void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu);
unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
bool read_insn,
unsigned long guest_addr,
struct kvm_cpu_trap *trap);
void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
struct kvm_cpu_trap *trap);
int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_trap *trap);

View File

@ -22,9 +22,9 @@ void __kvm_riscv_fp_d_restore(struct kvm_cpu_context *context);
void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
unsigned long isa);
const unsigned long *isa);
void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
unsigned long isa);
const unsigned long *isa);
void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx);
void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx);
#else
@ -32,12 +32,12 @@ static inline void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
{
}
static inline void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
unsigned long isa)
const unsigned long *isa)
{
}
static inline void kvm_riscv_vcpu_guest_fp_restore(
struct kvm_cpu_context *cntx,
unsigned long isa)
const unsigned long *isa)
{
}
static inline void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)

View File

@ -0,0 +1,48 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Ventana Micro Systems Inc.
*/
#ifndef __KVM_VCPU_RISCV_INSN_H
#define __KVM_VCPU_RISCV_INSN_H
struct kvm_vcpu;
struct kvm_run;
struct kvm_cpu_trap;
struct kvm_mmio_decode {
unsigned long insn;
int insn_len;
int len;
int shift;
int return_handled;
};
struct kvm_csr_decode {
unsigned long insn;
int return_handled;
};
/* Return values used by function emulating a particular instruction */
enum kvm_insn_return {
KVM_INSN_EXIT_TO_USER_SPACE = 0,
KVM_INSN_CONTINUE_NEXT_SEPC,
KVM_INSN_CONTINUE_SAME_SEPC,
KVM_INSN_ILLEGAL_TRAP,
KVM_INSN_VIRTUAL_TRAP
};
void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_trap *trap);
int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
unsigned long fault_addr,
unsigned long htinst);
int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
unsigned long fault_addr,
unsigned long htinst);
int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
#endif

View File

@ -39,6 +39,6 @@ int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
int kvm_riscv_guest_timer_init(struct kvm *kvm);
void kvm_riscv_guest_timer_init(struct kvm *kvm);
#endif

View File

@ -96,6 +96,7 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_H,
KVM_RISCV_ISA_EXT_I,
KVM_RISCV_ISA_EXT_M,
KVM_RISCV_ISA_EXT_SVPBMT,
KVM_RISCV_ISA_EXT_MAX,
};

View File

@ -17,6 +17,7 @@ kvm-y += mmu.o
kvm-y += vcpu.o
kvm-y += vcpu_exit.o
kvm-y += vcpu_fp.o
kvm-y += vcpu_insn.o
kvm-y += vcpu_switch.o
kvm-y += vcpu_sbi.o
kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o

View File

@ -343,8 +343,9 @@ static void gstage_wp_memory_region(struct kvm *kvm, int slot)
kvm_flush_remote_tlbs(kvm);
}
static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
unsigned long size, bool writable)
int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
phys_addr_t hpa, unsigned long size,
bool writable, bool in_atomic)
{
pte_t pte;
int ret = 0;
@ -353,13 +354,14 @@ static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
struct kvm_mmu_memory_cache pcache;
memset(&pcache, 0, sizeof(pcache));
pcache.gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0;
pcache.gfp_zero = __GFP_ZERO;
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
pfn = __phys_to_pfn(hpa);
for (addr = gpa; addr < end; addr += PAGE_SIZE) {
pte = pfn_pte(pfn, PAGE_KERNEL);
pte = pfn_pte(pfn, PAGE_KERNEL_IO);
if (!writable)
pte = pte_wrprotect(pte);
@ -382,6 +384,13 @@ out:
return ret;
}
void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size)
{
spin_lock(&kvm->mmu_lock);
gstage_unmap_range(kvm, gpa, size, false);
spin_unlock(&kvm->mmu_lock);
}
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset,
@ -517,8 +526,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
goto out;
}
ret = gstage_ioremap(kvm, gpa, pa,
vm_end - vm_start, writable);
ret = kvm_riscv_gstage_ioremap(kvm, gpa, pa,
vm_end - vm_start,
writable, false);
if (ret)
break;
}
@ -611,7 +621,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
{
int ret;
kvm_pfn_t hfn;
bool writeable;
bool writable;
short vma_pageshift;
gfn_t gfn = gpa >> PAGE_SHIFT;
struct vm_area_struct *vma;
@ -659,7 +669,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
mmu_seq = kvm->mmu_notifier_seq;
hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writeable);
hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
if (hfn == KVM_PFN_ERR_HWPOISON) {
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
vma_pageshift, current);
@ -673,14 +683,14 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
* for write faults.
*/
if (logging && !is_write)
writeable = false;
writable = false;
spin_lock(&kvm->mmu_lock);
if (mmu_notifier_retry(kvm, mmu_seq))
goto out_unlock;
if (writeable) {
if (writable) {
kvm_set_pfn_dirty(hfn);
mark_page_dirty(kvm, gfn);
ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,

View File

@ -26,6 +26,8 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
STATS_DESC_COUNTER(VCPU, mmio_exit_user),
STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
STATS_DESC_COUNTER(VCPU, csr_exit_user),
STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
STATS_DESC_COUNTER(VCPU, exits)
};
@ -38,16 +40,58 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
sizeof(kvm_vcpu_stats_desc),
};
#define KVM_RISCV_ISA_DISABLE_ALLOWED (riscv_isa_extension_mask(d) | \
riscv_isa_extension_mask(f))
#define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
#define KVM_RISCV_ISA_DISABLE_NOT_ALLOWED (riscv_isa_extension_mask(a) | \
riscv_isa_extension_mask(c) | \
riscv_isa_extension_mask(i) | \
riscv_isa_extension_mask(m))
/* Mapping between KVM ISA Extension ID & Host ISA extension ID */
static const unsigned long kvm_isa_ext_arr[] = {
RISCV_ISA_EXT_a,
RISCV_ISA_EXT_c,
RISCV_ISA_EXT_d,
RISCV_ISA_EXT_f,
RISCV_ISA_EXT_h,
RISCV_ISA_EXT_i,
RISCV_ISA_EXT_m,
RISCV_ISA_EXT_SVPBMT,
};
#define KVM_RISCV_ISA_ALLOWED (KVM_RISCV_ISA_DISABLE_ALLOWED | \
KVM_RISCV_ISA_DISABLE_NOT_ALLOWED)
static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
{
unsigned long i;
for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
if (kvm_isa_ext_arr[i] == base_ext)
return i;
}
return KVM_RISCV_ISA_EXT_MAX;
}
static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
{
switch (ext) {
case KVM_RISCV_ISA_EXT_H:
return false;
default:
break;
}
return true;
}
static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
{
switch (ext) {
case KVM_RISCV_ISA_EXT_A:
case KVM_RISCV_ISA_EXT_C:
case KVM_RISCV_ISA_EXT_I:
case KVM_RISCV_ISA_EXT_M:
return false;
default:
break;
}
return true;
}
static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
{
@ -99,13 +143,20 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *cntx;
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
unsigned long host_isa, i;
/* Mark this VCPU never ran */
vcpu->arch.ran_atleast_once = false;
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
/* Setup ISA features available to VCPU */
vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED;
for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
host_isa = kvm_isa_ext_arr[i];
if (__riscv_isa_extension_available(NULL, host_isa) &&
kvm_riscv_vcpu_isa_enable_allowed(i))
set_bit(host_isa, vcpu->arch.isa);
}
/* Setup VCPU hfence queue */
spin_lock_init(&vcpu->arch.hfence_lock);
@ -199,7 +250,7 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
switch (reg_num) {
case KVM_REG_RISCV_CONFIG_REG(isa):
reg_val = vcpu->arch.isa;
reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
break;
default:
return -EINVAL;
@ -219,7 +270,7 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CONFIG);
unsigned long reg_val;
unsigned long i, isa_ext, reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
@ -227,13 +278,32 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
/* This ONE REG interface is only defined for single letter extensions */
if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
return -EINVAL;
switch (reg_num) {
case KVM_REG_RISCV_CONFIG_REG(isa):
if (!vcpu->arch.ran_atleast_once) {
/* Ignore the disable request for these extensions */
vcpu->arch.isa = reg_val | KVM_RISCV_ISA_DISABLE_NOT_ALLOWED;
vcpu->arch.isa &= riscv_isa_extension_base(NULL);
vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
/* Ignore the enable/disable request for certain extensions */
for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
reg_val &= ~BIT(i);
continue;
}
if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
if (reg_val & BIT(i))
reg_val &= ~BIT(i);
if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
if (!(reg_val & BIT(i)))
reg_val |= BIT(i);
}
reg_val &= riscv_isa_extension_base(NULL);
/* Do not modify anything beyond single letter extensions */
reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
(reg_val & KVM_RISCV_BASE_ISA_MASK);
vcpu->arch.isa[0] = reg_val;
kvm_riscv_vcpu_fp_reset(vcpu);
} else {
return -EOPNOTSUPP;
@ -374,17 +444,6 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
return 0;
}
/* Mapping between KVM ISA Extension ID & Host ISA extension ID */
static unsigned long kvm_isa_ext_arr[] = {
RISCV_ISA_EXT_a,
RISCV_ISA_EXT_c,
RISCV_ISA_EXT_d,
RISCV_ISA_EXT_f,
RISCV_ISA_EXT_h,
RISCV_ISA_EXT_i,
RISCV_ISA_EXT_m,
};
static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
@ -399,11 +458,12 @@ static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (reg_num >= KVM_RISCV_ISA_EXT_MAX || reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
return -EINVAL;
host_isa_ext = kvm_isa_ext_arr[reg_num];
if (__riscv_isa_extension_available(&vcpu->arch.isa, host_isa_ext))
if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
reg_val = 1; /* Mark the given extension as available */
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
@ -422,12 +482,12 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
KVM_REG_RISCV_ISA_EXT);
unsigned long reg_val;
unsigned long host_isa_ext;
unsigned long host_isa_ext_mask;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (reg_num >= KVM_RISCV_ISA_EXT_MAX || reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
return -EINVAL;
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
@ -437,30 +497,19 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
if (!__riscv_isa_extension_available(NULL, host_isa_ext))
return -EOPNOTSUPP;
if (host_isa_ext >= RISCV_ISA_EXT_BASE &&
host_isa_ext < RISCV_ISA_EXT_MAX) {
/*
* Multi-letter ISA extension. Currently there is no provision
* to enable/disable the multi-letter ISA extensions for guests.
* Return success if the request is to enable any ISA extension
* that is available in the hardware.
* Return -EOPNOTSUPP otherwise.
*/
if (!reg_val)
return -EOPNOTSUPP;
else
return 0;
}
/* Single letter base ISA extension */
if (!vcpu->arch.ran_atleast_once) {
host_isa_ext_mask = BIT_MASK(host_isa_ext);
if (!reg_val && (host_isa_ext_mask & KVM_RISCV_ISA_DISABLE_ALLOWED))
vcpu->arch.isa &= ~host_isa_ext_mask;
/*
* All multi-letter extension and a few single letter
* extension can be disabled
*/
if (reg_val == 1 &&
kvm_riscv_vcpu_isa_enable_allowed(reg_num))
set_bit(host_isa_ext, vcpu->arch.isa);
else if (!reg_val &&
kvm_riscv_vcpu_isa_disable_allowed(reg_num))
clear_bit(host_isa_ext, vcpu->arch.isa);
else
vcpu->arch.isa |= host_isa_ext_mask;
vcpu->arch.isa &= riscv_isa_extension_base(NULL);
vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
return -EINVAL;
kvm_riscv_vcpu_fp_reset(vcpu);
} else {
return -EOPNOTSUPP;
@ -729,6 +778,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
return -EINVAL;
}
static void kvm_riscv_vcpu_update_config(const unsigned long *isa)
{
u64 henvcfg = 0;
if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SVPBMT))
henvcfg |= ENVCFG_PBMTE;
csr_write(CSR_HENVCFG, henvcfg);
#ifdef CONFIG_32BIT
csr_write(CSR_HENVCFGH, henvcfg >> 32);
#endif
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
@ -743,6 +805,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
csr_write(CSR_HVIP, csr->hvip);
csr_write(CSR_VSATP, csr->vsatp);
kvm_riscv_vcpu_update_config(vcpu->arch.isa);
kvm_riscv_gstage_update_hgatp(vcpu);
kvm_riscv_vcpu_timer_restore(vcpu);
@ -853,22 +917,26 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_vcpu_srcu_read_lock(vcpu);
/* Process MMIO value returned from user-space */
if (run->exit_reason == KVM_EXIT_MMIO) {
switch (run->exit_reason) {
case KVM_EXIT_MMIO:
/* Process MMIO value returned from user-space */
ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
if (ret) {
kvm_vcpu_srcu_read_unlock(vcpu);
return ret;
}
}
/* Process SBI value returned from user-space */
if (run->exit_reason == KVM_EXIT_RISCV_SBI) {
break;
case KVM_EXIT_RISCV_SBI:
/* Process SBI value returned from user-space */
ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
if (ret) {
kvm_vcpu_srcu_read_unlock(vcpu);
return ret;
}
break;
case KVM_EXIT_RISCV_CSR:
/* Process CSR value returned from user-space */
ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run);
break;
default:
ret = 0;
break;
}
if (ret) {
kvm_vcpu_srcu_read_unlock(vcpu);
return ret;
}
if (run->immediate_exit) {
@ -890,8 +958,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_riscv_check_vcpu_requests(vcpu);
preempt_disable();
local_irq_disable();
/*
@ -928,7 +994,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_request_pending(vcpu)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
local_irq_enable();
preempt_enable();
kvm_vcpu_srcu_read_lock(vcpu);
continue;
}
@ -962,6 +1027,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
/* Syncup interrupts state with HW */
kvm_riscv_vcpu_sync_interrupts(vcpu);
preempt_disable();
/*
* We must ensure that any pending interrupts are taken before
* we exit guest timing so that timer ticks are accounted as

View File

@ -6,435 +6,34 @@
* Anup Patel <anup.patel@wdc.com>
*/
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <asm/csr.h>
#define INSN_OPCODE_MASK 0x007c
#define INSN_OPCODE_SHIFT 2
#define INSN_OPCODE_SYSTEM 28
#define INSN_MASK_WFI 0xffffffff
#define INSN_MATCH_WFI 0x10500073
#define INSN_MATCH_LB 0x3
#define INSN_MASK_LB 0x707f
#define INSN_MATCH_LH 0x1003
#define INSN_MASK_LH 0x707f
#define INSN_MATCH_LW 0x2003
#define INSN_MASK_LW 0x707f
#define INSN_MATCH_LD 0x3003
#define INSN_MASK_LD 0x707f
#define INSN_MATCH_LBU 0x4003
#define INSN_MASK_LBU 0x707f
#define INSN_MATCH_LHU 0x5003
#define INSN_MASK_LHU 0x707f
#define INSN_MATCH_LWU 0x6003
#define INSN_MASK_LWU 0x707f
#define INSN_MATCH_SB 0x23
#define INSN_MASK_SB 0x707f
#define INSN_MATCH_SH 0x1023
#define INSN_MASK_SH 0x707f
#define INSN_MATCH_SW 0x2023
#define INSN_MASK_SW 0x707f
#define INSN_MATCH_SD 0x3023
#define INSN_MASK_SD 0x707f
#define INSN_MATCH_C_LD 0x6000
#define INSN_MASK_C_LD 0xe003
#define INSN_MATCH_C_SD 0xe000
#define INSN_MASK_C_SD 0xe003
#define INSN_MATCH_C_LW 0x4000
#define INSN_MASK_C_LW 0xe003
#define INSN_MATCH_C_SW 0xc000
#define INSN_MASK_C_SW 0xe003
#define INSN_MATCH_C_LDSP 0x6002
#define INSN_MASK_C_LDSP 0xe003
#define INSN_MATCH_C_SDSP 0xe002
#define INSN_MASK_C_SDSP 0xe003
#define INSN_MATCH_C_LWSP 0x4002
#define INSN_MASK_C_LWSP 0xe003
#define INSN_MATCH_C_SWSP 0xc002
#define INSN_MASK_C_SWSP 0xe003
#define INSN_16BIT_MASK 0x3
#define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK)
#define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4)
#ifdef CONFIG_64BIT
#define LOG_REGBYTES 3
#else
#define LOG_REGBYTES 2
#endif
#define REGBYTES (1 << LOG_REGBYTES)
#define SH_RD 7
#define SH_RS1 15
#define SH_RS2 20
#define SH_RS2C 2
#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
#define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
(RV_X(x, 10, 3) << 3) | \
(RV_X(x, 5, 1) << 6))
#define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
(RV_X(x, 5, 2) << 6))
#define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
(RV_X(x, 12, 1) << 5) | \
(RV_X(x, 2, 2) << 6))
#define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
(RV_X(x, 12, 1) << 5) | \
(RV_X(x, 2, 3) << 6))
#define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
(RV_X(x, 7, 2) << 6))
#define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
(RV_X(x, 7, 3) << 6))
#define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
#define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
#define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
#define SHIFT_RIGHT(x, y) \
((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
#define REG_MASK \
((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
#define REG_OFFSET(insn, pos) \
(SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
#define REG_PTR(insn, pos, regs) \
((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos)))
#define GET_RM(insn) (((insn) >> 12) & 7)
#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
#define GET_SP(regs) (*REG_PTR(2, 0, regs))
#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
#define IMM_I(insn) ((s32)(insn) >> 20)
#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
(s32)(((insn) >> 7) & 0x1f))
#define MASK_FUNCT3 0x7000
static int truly_illegal_insn(struct kvm_vcpu *vcpu,
struct kvm_run *run,
ulong insn)
{
struct kvm_cpu_trap utrap = { 0 };
/* Redirect trap to Guest VCPU */
utrap.sepc = vcpu->arch.guest_context.sepc;
utrap.scause = EXC_INST_ILLEGAL;
utrap.stval = insn;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
return 1;
}
static int system_opcode_insn(struct kvm_vcpu *vcpu,
struct kvm_run *run,
ulong insn)
{
if ((insn & INSN_MASK_WFI) == INSN_MATCH_WFI) {
vcpu->stat.wfi_exit_stat++;
kvm_riscv_vcpu_wfi(vcpu);
vcpu->arch.guest_context.sepc += INSN_LEN(insn);
return 1;
}
return truly_illegal_insn(vcpu, run, insn);
}
static int virtual_inst_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_trap *trap)
{
unsigned long insn = trap->stval;
struct kvm_cpu_trap utrap = { 0 };
struct kvm_cpu_context *ct;
if (unlikely(INSN_IS_16BIT(insn))) {
if (insn == 0) {
ct = &vcpu->arch.guest_context;
insn = kvm_riscv_vcpu_unpriv_read(vcpu, true,
ct->sepc,
&utrap);
if (utrap.scause) {
utrap.sepc = ct->sepc;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
return 1;
}
}
if (INSN_IS_16BIT(insn))
return truly_illegal_insn(vcpu, run, insn);
}
switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) {
case INSN_OPCODE_SYSTEM:
return system_opcode_insn(vcpu, run, insn);
default:
return truly_illegal_insn(vcpu, run, insn);
}
}
static int emulate_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
unsigned long fault_addr, unsigned long htinst)
{
u8 data_buf[8];
unsigned long insn;
int shift = 0, len = 0, insn_len = 0;
struct kvm_cpu_trap utrap = { 0 };
struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
/* Determine trapped instruction */
if (htinst & 0x1) {
/*
* Bit[0] == 1 implies trapped instruction value is
* transformed instruction or custom instruction.
*/
insn = htinst | INSN_16BIT_MASK;
insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
} else {
/*
* Bit[0] == 0 implies trapped instruction value is
* zero or special value.
*/
insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
&utrap);
if (utrap.scause) {
/* Redirect trap if we failed to read instruction */
utrap.sepc = ct->sepc;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
return 1;
}
insn_len = INSN_LEN(insn);
}
/* Decode length of MMIO and shift */
if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
len = 4;
shift = 8 * (sizeof(ulong) - len);
} else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) {
len = 1;
shift = 8 * (sizeof(ulong) - len);
} else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) {
len = 1;
shift = 8 * (sizeof(ulong) - len);
#ifdef CONFIG_64BIT
} else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
len = 8;
shift = 8 * (sizeof(ulong) - len);
} else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
len = 4;
#endif
} else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
len = 2;
shift = 8 * (sizeof(ulong) - len);
} else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
len = 2;
#ifdef CONFIG_64BIT
} else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
len = 8;
shift = 8 * (sizeof(ulong) - len);
insn = RVC_RS2S(insn) << SH_RD;
} else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
((insn >> SH_RD) & 0x1f)) {
len = 8;
shift = 8 * (sizeof(ulong) - len);
#endif
} else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
len = 4;
shift = 8 * (sizeof(ulong) - len);
insn = RVC_RS2S(insn) << SH_RD;
} else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
((insn >> SH_RD) & 0x1f)) {
len = 4;
shift = 8 * (sizeof(ulong) - len);
} else {
return -EOPNOTSUPP;
}
/* Fault address should be aligned to length of MMIO */
if (fault_addr & (len - 1))
return -EIO;
/* Save instruction decode info */
vcpu->arch.mmio_decode.insn = insn;
vcpu->arch.mmio_decode.insn_len = insn_len;
vcpu->arch.mmio_decode.shift = shift;
vcpu->arch.mmio_decode.len = len;
vcpu->arch.mmio_decode.return_handled = 0;
/* Update MMIO details in kvm_run struct */
run->mmio.is_write = false;
run->mmio.phys_addr = fault_addr;
run->mmio.len = len;
/* Try to handle MMIO access in the kernel */
if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) {
/* Successfully handled MMIO access in the kernel so resume */
memcpy(run->mmio.data, data_buf, len);
vcpu->stat.mmio_exit_kernel++;
kvm_riscv_vcpu_mmio_return(vcpu, run);
return 1;
}
/* Exit to userspace for MMIO emulation */
vcpu->stat.mmio_exit_user++;
run->exit_reason = KVM_EXIT_MMIO;
return 0;
}
static int emulate_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
unsigned long fault_addr, unsigned long htinst)
{
u8 data8;
u16 data16;
u32 data32;
u64 data64;
ulong data;
unsigned long insn;
int len = 0, insn_len = 0;
struct kvm_cpu_trap utrap = { 0 };
struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
/* Determine trapped instruction */
if (htinst & 0x1) {
/*
* Bit[0] == 1 implies trapped instruction value is
* transformed instruction or custom instruction.
*/
insn = htinst | INSN_16BIT_MASK;
insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
} else {
/*
* Bit[0] == 0 implies trapped instruction value is
* zero or special value.
*/
insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
&utrap);
if (utrap.scause) {
/* Redirect trap if we failed to read instruction */
utrap.sepc = ct->sepc;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
return 1;
}
insn_len = INSN_LEN(insn);
}
data = GET_RS2(insn, &vcpu->arch.guest_context);
data8 = data16 = data32 = data64 = data;
if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
len = 4;
} else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) {
len = 1;
#ifdef CONFIG_64BIT
} else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
len = 8;
#endif
} else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
len = 2;
#ifdef CONFIG_64BIT
} else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
len = 8;
data64 = GET_RS2S(insn, &vcpu->arch.guest_context);
} else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
((insn >> SH_RD) & 0x1f)) {
len = 8;
data64 = GET_RS2C(insn, &vcpu->arch.guest_context);
#endif
} else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
len = 4;
data32 = GET_RS2S(insn, &vcpu->arch.guest_context);
} else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
((insn >> SH_RD) & 0x1f)) {
len = 4;
data32 = GET_RS2C(insn, &vcpu->arch.guest_context);
} else {
return -EOPNOTSUPP;
}
/* Fault address should be aligned to length of MMIO */
if (fault_addr & (len - 1))
return -EIO;
/* Save instruction decode info */
vcpu->arch.mmio_decode.insn = insn;
vcpu->arch.mmio_decode.insn_len = insn_len;
vcpu->arch.mmio_decode.shift = 0;
vcpu->arch.mmio_decode.len = len;
vcpu->arch.mmio_decode.return_handled = 0;
/* Copy data to kvm_run instance */
switch (len) {
case 1:
*((u8 *)run->mmio.data) = data8;
break;
case 2:
*((u16 *)run->mmio.data) = data16;
break;
case 4:
*((u32 *)run->mmio.data) = data32;
break;
case 8:
*((u64 *)run->mmio.data) = data64;
break;
default:
return -EOPNOTSUPP;
}
/* Update MMIO details in kvm_run struct */
run->mmio.is_write = true;
run->mmio.phys_addr = fault_addr;
run->mmio.len = len;
/* Try to handle MMIO access in the kernel */
if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
fault_addr, len, run->mmio.data)) {
/* Successfully handled MMIO access in the kernel so resume */
vcpu->stat.mmio_exit_kernel++;
kvm_riscv_vcpu_mmio_return(vcpu, run);
return 1;
}
/* Exit to userspace for MMIO emulation */
vcpu->stat.mmio_exit_user++;
run->exit_reason = KVM_EXIT_MMIO;
return 0;
}
static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_trap *trap)
{
struct kvm_memory_slot *memslot;
unsigned long hva, fault_addr;
bool writeable;
bool writable;
gfn_t gfn;
int ret;
fault_addr = (trap->htval << 2) | (trap->stval & 0x3);
gfn = fault_addr >> PAGE_SHIFT;
memslot = gfn_to_memslot(vcpu->kvm, gfn);
hva = gfn_to_hva_memslot_prot(memslot, gfn, &writeable);
hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
if (kvm_is_error_hva(hva) ||
(trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writeable)) {
(trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writable)) {
switch (trap->scause) {
case EXC_LOAD_GUEST_PAGE_FAULT:
return emulate_load(vcpu, run, fault_addr,
trap->htinst);
return kvm_riscv_vcpu_mmio_load(vcpu, run,
fault_addr,
trap->htinst);
case EXC_STORE_GUEST_PAGE_FAULT:
return emulate_store(vcpu, run, fault_addr,
trap->htinst);
return kvm_riscv_vcpu_mmio_store(vcpu, run,
fault_addr,
trap->htinst);
default:
return -EOPNOTSUPP;
};
@ -448,21 +47,6 @@ static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
return 1;
}
/**
* kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
*
* @vcpu: The VCPU pointer
*/
void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
{
if (!kvm_arch_vcpu_runnable(vcpu)) {
kvm_vcpu_srcu_read_unlock(vcpu);
kvm_vcpu_halt(vcpu);
kvm_vcpu_srcu_read_lock(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
}
/**
* kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory
*
@ -601,66 +185,6 @@ void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC);
}
/**
* kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
* or in-kernel IO emulation
*
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
*/
int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
u8 data8;
u16 data16;
u32 data32;
u64 data64;
ulong insn;
int len, shift;
if (vcpu->arch.mmio_decode.return_handled)
return 0;
vcpu->arch.mmio_decode.return_handled = 1;
insn = vcpu->arch.mmio_decode.insn;
if (run->mmio.is_write)
goto done;
len = vcpu->arch.mmio_decode.len;
shift = vcpu->arch.mmio_decode.shift;
switch (len) {
case 1:
data8 = *((u8 *)run->mmio.data);
SET_RD(insn, &vcpu->arch.guest_context,
(ulong)data8 << shift >> shift);
break;
case 2:
data16 = *((u16 *)run->mmio.data);
SET_RD(insn, &vcpu->arch.guest_context,
(ulong)data16 << shift >> shift);
break;
case 4:
data32 = *((u32 *)run->mmio.data);
SET_RD(insn, &vcpu->arch.guest_context,
(ulong)data32 << shift >> shift);
break;
case 8:
data64 = *((u64 *)run->mmio.data);
SET_RD(insn, &vcpu->arch.guest_context,
(ulong)data64 << shift >> shift);
break;
default:
return -EOPNOTSUPP;
}
done:
/* Move to next instruction */
vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len;
return 0;
}
/*
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
* proper exit to userspace.
@ -680,7 +204,7 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
switch (trap->scause) {
case EXC_VIRTUAL_INST_FAULT:
if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
ret = virtual_inst_fault(vcpu, run, trap);
ret = kvm_riscv_vcpu_virtual_insn(vcpu, run, trap);
break;
case EXC_INST_GUEST_PAGE_FAULT:
case EXC_LOAD_GUEST_PAGE_FAULT:

View File

@ -16,12 +16,11 @@
#ifdef CONFIG_FPU
void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
{
unsigned long isa = vcpu->arch.isa;
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
cntx->sstatus &= ~SR_FS;
if (riscv_isa_extension_available(&isa, f) ||
riscv_isa_extension_available(&isa, d))
if (riscv_isa_extension_available(vcpu->arch.isa, f) ||
riscv_isa_extension_available(vcpu->arch.isa, d))
cntx->sstatus |= SR_FS_INITIAL;
else
cntx->sstatus |= SR_FS_OFF;
@ -34,24 +33,24 @@ static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx)
}
void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
unsigned long isa)
const unsigned long *isa)
{
if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) {
if (riscv_isa_extension_available(&isa, d))
if (riscv_isa_extension_available(isa, d))
__kvm_riscv_fp_d_save(cntx);
else if (riscv_isa_extension_available(&isa, f))
else if (riscv_isa_extension_available(isa, f))
__kvm_riscv_fp_f_save(cntx);
kvm_riscv_vcpu_fp_clean(cntx);
}
}
void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
unsigned long isa)
const unsigned long *isa)
{
if ((cntx->sstatus & SR_FS) != SR_FS_OFF) {
if (riscv_isa_extension_available(&isa, d))
if (riscv_isa_extension_available(isa, d))
__kvm_riscv_fp_d_restore(cntx);
else if (riscv_isa_extension_available(&isa, f))
else if (riscv_isa_extension_available(isa, f))
__kvm_riscv_fp_f_restore(cntx);
kvm_riscv_vcpu_fp_clean(cntx);
}
@ -80,7 +79,6 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
unsigned long rtype)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
unsigned long isa = vcpu->arch.isa;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
@ -89,7 +87,7 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
void *reg_val;
if ((rtype == KVM_REG_RISCV_FP_F) &&
riscv_isa_extension_available(&isa, f)) {
riscv_isa_extension_available(vcpu->arch.isa, f)) {
if (KVM_REG_SIZE(reg->id) != sizeof(u32))
return -EINVAL;
if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
@ -100,7 +98,7 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
else
return -EINVAL;
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
riscv_isa_extension_available(&isa, d)) {
riscv_isa_extension_available(vcpu->arch.isa, d)) {
if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
if (KVM_REG_SIZE(reg->id) != sizeof(u32))
return -EINVAL;
@ -126,7 +124,6 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
unsigned long rtype)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
unsigned long isa = vcpu->arch.isa;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
@ -135,7 +132,7 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
void *reg_val;
if ((rtype == KVM_REG_RISCV_FP_F) &&
riscv_isa_extension_available(&isa, f)) {
riscv_isa_extension_available(vcpu->arch.isa, f)) {
if (KVM_REG_SIZE(reg->id) != sizeof(u32))
return -EINVAL;
if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
@ -146,7 +143,7 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
else
return -EINVAL;
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
riscv_isa_extension_available(&isa, d)) {
riscv_isa_extension_available(vcpu->arch.isa, d)) {
if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
if (KVM_REG_SIZE(reg->id) != sizeof(u32))
return -EINVAL;

752
arch/riscv/kvm/vcpu_insn.c Normal file
View File

@ -0,0 +1,752 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
* Copyright (c) 2022 Ventana Micro Systems Inc.
*/
#include <linux/bitops.h>
#include <linux/kvm_host.h>
#define INSN_OPCODE_MASK 0x007c
#define INSN_OPCODE_SHIFT 2
#define INSN_OPCODE_SYSTEM 28
#define INSN_MASK_WFI 0xffffffff
#define INSN_MATCH_WFI 0x10500073
#define INSN_MATCH_CSRRW 0x1073
#define INSN_MASK_CSRRW 0x707f
#define INSN_MATCH_CSRRS 0x2073
#define INSN_MASK_CSRRS 0x707f
#define INSN_MATCH_CSRRC 0x3073
#define INSN_MASK_CSRRC 0x707f
#define INSN_MATCH_CSRRWI 0x5073
#define INSN_MASK_CSRRWI 0x707f
#define INSN_MATCH_CSRRSI 0x6073
#define INSN_MASK_CSRRSI 0x707f
#define INSN_MATCH_CSRRCI 0x7073
#define INSN_MASK_CSRRCI 0x707f
#define INSN_MATCH_LB 0x3
#define INSN_MASK_LB 0x707f
#define INSN_MATCH_LH 0x1003
#define INSN_MASK_LH 0x707f
#define INSN_MATCH_LW 0x2003
#define INSN_MASK_LW 0x707f
#define INSN_MATCH_LD 0x3003
#define INSN_MASK_LD 0x707f
#define INSN_MATCH_LBU 0x4003
#define INSN_MASK_LBU 0x707f
#define INSN_MATCH_LHU 0x5003
#define INSN_MASK_LHU 0x707f
#define INSN_MATCH_LWU 0x6003
#define INSN_MASK_LWU 0x707f
#define INSN_MATCH_SB 0x23
#define INSN_MASK_SB 0x707f
#define INSN_MATCH_SH 0x1023
#define INSN_MASK_SH 0x707f
#define INSN_MATCH_SW 0x2023
#define INSN_MASK_SW 0x707f
#define INSN_MATCH_SD 0x3023
#define INSN_MASK_SD 0x707f
#define INSN_MATCH_C_LD 0x6000
#define INSN_MASK_C_LD 0xe003
#define INSN_MATCH_C_SD 0xe000
#define INSN_MASK_C_SD 0xe003
#define INSN_MATCH_C_LW 0x4000
#define INSN_MASK_C_LW 0xe003
#define INSN_MATCH_C_SW 0xc000
#define INSN_MASK_C_SW 0xe003
#define INSN_MATCH_C_LDSP 0x6002
#define INSN_MASK_C_LDSP 0xe003
#define INSN_MATCH_C_SDSP 0xe002
#define INSN_MASK_C_SDSP 0xe003
#define INSN_MATCH_C_LWSP 0x4002
#define INSN_MASK_C_LWSP 0xe003
#define INSN_MATCH_C_SWSP 0xc002
#define INSN_MASK_C_SWSP 0xe003
#define INSN_16BIT_MASK 0x3
#define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK)
#define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4)
#ifdef CONFIG_64BIT
#define LOG_REGBYTES 3
#else
#define LOG_REGBYTES 2
#endif
#define REGBYTES (1 << LOG_REGBYTES)
#define SH_RD 7
#define SH_RS1 15
#define SH_RS2 20
#define SH_RS2C 2
#define MASK_RX 0x1f
#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
#define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
(RV_X(x, 10, 3) << 3) | \
(RV_X(x, 5, 1) << 6))
#define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
(RV_X(x, 5, 2) << 6))
#define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
(RV_X(x, 12, 1) << 5) | \
(RV_X(x, 2, 2) << 6))
#define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
(RV_X(x, 12, 1) << 5) | \
(RV_X(x, 2, 3) << 6))
#define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
(RV_X(x, 7, 2) << 6))
#define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
(RV_X(x, 7, 3) << 6))
#define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
#define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
#define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
#define SHIFT_RIGHT(x, y) \
((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
#define REG_MASK \
((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
#define REG_OFFSET(insn, pos) \
(SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
#define REG_PTR(insn, pos, regs) \
((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos)))
#define GET_FUNCT3(insn) (((insn) >> 12) & 7)
#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
#define GET_SP(regs) (*REG_PTR(2, 0, regs))
#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
#define IMM_I(insn) ((s32)(insn) >> 20)
#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
(s32)(((insn) >> 7) & 0x1f))
struct insn_func {
unsigned long mask;
unsigned long match;
/*
* Possible return values are as follows:
* 1) Returns < 0 for error case
* 2) Returns 0 for exit to user-space
* 3) Returns 1 to continue with next sepc
* 4) Returns 2 to continue with same sepc
* 5) Returns 3 to inject illegal instruction trap and continue
* 6) Returns 4 to inject virtual instruction trap and continue
*
* Use enum kvm_insn_return for return values
*/
int (*func)(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn);
};
static int truly_illegal_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
ulong insn)
{
struct kvm_cpu_trap utrap = { 0 };
/* Redirect trap to Guest VCPU */
utrap.sepc = vcpu->arch.guest_context.sepc;
utrap.scause = EXC_INST_ILLEGAL;
utrap.stval = insn;
utrap.htval = 0;
utrap.htinst = 0;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
return 1;
}
static int truly_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
ulong insn)
{
struct kvm_cpu_trap utrap = { 0 };
/* Redirect trap to Guest VCPU */
utrap.sepc = vcpu->arch.guest_context.sepc;
utrap.scause = EXC_VIRTUAL_INST_FAULT;
utrap.stval = insn;
utrap.htval = 0;
utrap.htinst = 0;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
return 1;
}
/**
* kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
*
* @vcpu: The VCPU pointer
*/
void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
{
if (!kvm_arch_vcpu_runnable(vcpu)) {
kvm_vcpu_srcu_read_unlock(vcpu);
kvm_vcpu_halt(vcpu);
kvm_vcpu_srcu_read_lock(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
}
static int wfi_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
{
vcpu->stat.wfi_exit_stat++;
kvm_riscv_vcpu_wfi(vcpu);
return KVM_INSN_CONTINUE_NEXT_SEPC;
}
struct csr_func {
unsigned int base;
unsigned int count;
/*
* Possible return values are as same as "func" callback in
* "struct insn_func".
*/
int (*func)(struct kvm_vcpu *vcpu, unsigned int csr_num,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask);
};
static const struct csr_func csr_funcs[] = { };
/**
* kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space
* emulation or in-kernel emulation
*
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the CSR data
*
* Returns > 0 upon failure and 0 upon success
*/
int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
ulong insn;
if (vcpu->arch.csr_decode.return_handled)
return 0;
vcpu->arch.csr_decode.return_handled = 1;
/* Update destination register for CSR reads */
insn = vcpu->arch.csr_decode.insn;
if ((insn >> SH_RD) & MASK_RX)
SET_RD(insn, &vcpu->arch.guest_context,
run->riscv_csr.ret_value);
/* Move to next instruction */
vcpu->arch.guest_context.sepc += INSN_LEN(insn);
return 0;
}
static int csr_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
{
int i, rc = KVM_INSN_ILLEGAL_TRAP;
unsigned int csr_num = insn >> SH_RS2;
unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX;
ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context);
const struct csr_func *tcfn, *cfn = NULL;
ulong val = 0, wr_mask = 0, new_val = 0;
/* Decode the CSR instruction */
switch (GET_FUNCT3(insn)) {
case GET_FUNCT3(INSN_MATCH_CSRRW):
wr_mask = -1UL;
new_val = rs1_val;
break;
case GET_FUNCT3(INSN_MATCH_CSRRS):
wr_mask = rs1_val;
new_val = -1UL;
break;
case GET_FUNCT3(INSN_MATCH_CSRRC):
wr_mask = rs1_val;
new_val = 0;
break;
case GET_FUNCT3(INSN_MATCH_CSRRWI):
wr_mask = -1UL;
new_val = rs1_num;
break;
case GET_FUNCT3(INSN_MATCH_CSRRSI):
wr_mask = rs1_num;
new_val = -1UL;
break;
case GET_FUNCT3(INSN_MATCH_CSRRCI):
wr_mask = rs1_num;
new_val = 0;
break;
default:
return rc;
}
/* Save instruction decode info */
vcpu->arch.csr_decode.insn = insn;
vcpu->arch.csr_decode.return_handled = 0;
/* Update CSR details in kvm_run struct */
run->riscv_csr.csr_num = csr_num;
run->riscv_csr.new_value = new_val;
run->riscv_csr.write_mask = wr_mask;
run->riscv_csr.ret_value = 0;
/* Find in-kernel CSR function */
for (i = 0; i < ARRAY_SIZE(csr_funcs); i++) {
tcfn = &csr_funcs[i];
if ((tcfn->base <= csr_num) &&
(csr_num < (tcfn->base + tcfn->count))) {
cfn = tcfn;
break;
}
}
/* First try in-kernel CSR emulation */
if (cfn && cfn->func) {
rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask);
if (rc > KVM_INSN_EXIT_TO_USER_SPACE) {
if (rc == KVM_INSN_CONTINUE_NEXT_SEPC) {
run->riscv_csr.ret_value = val;
vcpu->stat.csr_exit_kernel++;
kvm_riscv_vcpu_csr_return(vcpu, run);
rc = KVM_INSN_CONTINUE_SAME_SEPC;
}
return rc;
}
}
/* Exit to user-space for CSR emulation */
if (rc <= KVM_INSN_EXIT_TO_USER_SPACE) {
vcpu->stat.csr_exit_user++;
run->exit_reason = KVM_EXIT_RISCV_CSR;
}
return rc;
}
static const struct insn_func system_opcode_funcs[] = {
{
.mask = INSN_MASK_CSRRW,
.match = INSN_MATCH_CSRRW,
.func = csr_insn,
},
{
.mask = INSN_MASK_CSRRS,
.match = INSN_MATCH_CSRRS,
.func = csr_insn,
},
{
.mask = INSN_MASK_CSRRC,
.match = INSN_MATCH_CSRRC,
.func = csr_insn,
},
{
.mask = INSN_MASK_CSRRWI,
.match = INSN_MATCH_CSRRWI,
.func = csr_insn,
},
{
.mask = INSN_MASK_CSRRSI,
.match = INSN_MATCH_CSRRSI,
.func = csr_insn,
},
{
.mask = INSN_MASK_CSRRCI,
.match = INSN_MATCH_CSRRCI,
.func = csr_insn,
},
{
.mask = INSN_MASK_WFI,
.match = INSN_MATCH_WFI,
.func = wfi_insn,
},
};
static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
ulong insn)
{
int i, rc = KVM_INSN_ILLEGAL_TRAP;
const struct insn_func *ifn;
for (i = 0; i < ARRAY_SIZE(system_opcode_funcs); i++) {
ifn = &system_opcode_funcs[i];
if ((insn & ifn->mask) == ifn->match) {
rc = ifn->func(vcpu, run, insn);
break;
}
}
switch (rc) {
case KVM_INSN_ILLEGAL_TRAP:
return truly_illegal_insn(vcpu, run, insn);
case KVM_INSN_VIRTUAL_TRAP:
return truly_virtual_insn(vcpu, run, insn);
case KVM_INSN_CONTINUE_NEXT_SEPC:
vcpu->arch.guest_context.sepc += INSN_LEN(insn);
break;
default:
break;
}
return (rc <= 0) ? rc : 1;
}
/**
* kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
*
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
* @trap: Trap details
*
* Returns > 0 to continue run-loop
* Returns 0 to exit run-loop and handle in user-space.
* Returns < 0 to report failure and exit run-loop
*/
int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_trap *trap)
{
unsigned long insn = trap->stval;
struct kvm_cpu_trap utrap = { 0 };
struct kvm_cpu_context *ct;
if (unlikely(INSN_IS_16BIT(insn))) {
if (insn == 0) {
ct = &vcpu->arch.guest_context;
insn = kvm_riscv_vcpu_unpriv_read(vcpu, true,
ct->sepc,
&utrap);
if (utrap.scause) {
utrap.sepc = ct->sepc;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
return 1;
}
}
if (INSN_IS_16BIT(insn))
return truly_illegal_insn(vcpu, run, insn);
}
switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) {
case INSN_OPCODE_SYSTEM:
return system_opcode_insn(vcpu, run, insn);
default:
return truly_illegal_insn(vcpu, run, insn);
}
}
/**
* kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction
*
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
* @fault_addr: Guest physical address to load
* @htinst: Transformed encoding of the load instruction
*
* Returns > 0 to continue run-loop
* Returns 0 to exit run-loop and handle in user-space.
* Returns < 0 to report failure and exit run-loop
*/
int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
unsigned long fault_addr,
unsigned long htinst)
{
u8 data_buf[8];
unsigned long insn;
int shift = 0, len = 0, insn_len = 0;
struct kvm_cpu_trap utrap = { 0 };
struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
/* Determine trapped instruction */
if (htinst & 0x1) {
/*
* Bit[0] == 1 implies trapped instruction value is
* transformed instruction or custom instruction.
*/
insn = htinst | INSN_16BIT_MASK;
insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
} else {
/*
* Bit[0] == 0 implies trapped instruction value is
* zero or special value.
*/
insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
&utrap);
if (utrap.scause) {
/* Redirect trap if we failed to read instruction */
utrap.sepc = ct->sepc;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
return 1;
}
insn_len = INSN_LEN(insn);
}
/* Decode length of MMIO and shift */
if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
len = 4;
shift = 8 * (sizeof(ulong) - len);
} else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) {
len = 1;
shift = 8 * (sizeof(ulong) - len);
} else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) {
len = 1;
shift = 8 * (sizeof(ulong) - len);
#ifdef CONFIG_64BIT
} else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
len = 8;
shift = 8 * (sizeof(ulong) - len);
} else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
len = 4;
#endif
} else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
len = 2;
shift = 8 * (sizeof(ulong) - len);
} else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
len = 2;
#ifdef CONFIG_64BIT
} else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
len = 8;
shift = 8 * (sizeof(ulong) - len);
insn = RVC_RS2S(insn) << SH_RD;
} else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
((insn >> SH_RD) & 0x1f)) {
len = 8;
shift = 8 * (sizeof(ulong) - len);
#endif
} else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
len = 4;
shift = 8 * (sizeof(ulong) - len);
insn = RVC_RS2S(insn) << SH_RD;
} else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
((insn >> SH_RD) & 0x1f)) {
len = 4;
shift = 8 * (sizeof(ulong) - len);
} else {
return -EOPNOTSUPP;
}
/* Fault address should be aligned to length of MMIO */
if (fault_addr & (len - 1))
return -EIO;
/* Save instruction decode info */
vcpu->arch.mmio_decode.insn = insn;
vcpu->arch.mmio_decode.insn_len = insn_len;
vcpu->arch.mmio_decode.shift = shift;
vcpu->arch.mmio_decode.len = len;
vcpu->arch.mmio_decode.return_handled = 0;
/* Update MMIO details in kvm_run struct */
run->mmio.is_write = false;
run->mmio.phys_addr = fault_addr;
run->mmio.len = len;
/* Try to handle MMIO access in the kernel */
if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) {
/* Successfully handled MMIO access in the kernel so resume */
memcpy(run->mmio.data, data_buf, len);
vcpu->stat.mmio_exit_kernel++;
kvm_riscv_vcpu_mmio_return(vcpu, run);
return 1;
}
/* Exit to userspace for MMIO emulation */
vcpu->stat.mmio_exit_user++;
run->exit_reason = KVM_EXIT_MMIO;
return 0;
}
/**
* kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction
*
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
* @fault_addr: Guest physical address to store
* @htinst: Transformed encoding of the store instruction
*
* Returns > 0 to continue run-loop
* Returns 0 to exit run-loop and handle in user-space.
* Returns < 0 to report failure and exit run-loop
*/
int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
unsigned long fault_addr,
unsigned long htinst)
{
u8 data8;
u16 data16;
u32 data32;
u64 data64;
ulong data;
unsigned long insn;
int len = 0, insn_len = 0;
struct kvm_cpu_trap utrap = { 0 };
struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
/* Determine trapped instruction */
if (htinst & 0x1) {
/*
* Bit[0] == 1 implies trapped instruction value is
* transformed instruction or custom instruction.
*/
insn = htinst | INSN_16BIT_MASK;
insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
} else {
/*
* Bit[0] == 0 implies trapped instruction value is
* zero or special value.
*/
insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
&utrap);
if (utrap.scause) {
/* Redirect trap if we failed to read instruction */
utrap.sepc = ct->sepc;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
return 1;
}
insn_len = INSN_LEN(insn);
}
data = GET_RS2(insn, &vcpu->arch.guest_context);
data8 = data16 = data32 = data64 = data;
if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
len = 4;
} else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) {
len = 1;
#ifdef CONFIG_64BIT
} else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
len = 8;
#endif
} else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
len = 2;
#ifdef CONFIG_64BIT
} else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
len = 8;
data64 = GET_RS2S(insn, &vcpu->arch.guest_context);
} else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
((insn >> SH_RD) & 0x1f)) {
len = 8;
data64 = GET_RS2C(insn, &vcpu->arch.guest_context);
#endif
} else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
len = 4;
data32 = GET_RS2S(insn, &vcpu->arch.guest_context);
} else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
((insn >> SH_RD) & 0x1f)) {
len = 4;
data32 = GET_RS2C(insn, &vcpu->arch.guest_context);
} else {
return -EOPNOTSUPP;
}
/* Fault address should be aligned to length of MMIO */
if (fault_addr & (len - 1))
return -EIO;
/* Save instruction decode info */
vcpu->arch.mmio_decode.insn = insn;
vcpu->arch.mmio_decode.insn_len = insn_len;
vcpu->arch.mmio_decode.shift = 0;
vcpu->arch.mmio_decode.len = len;
vcpu->arch.mmio_decode.return_handled = 0;
/* Copy data to kvm_run instance */
switch (len) {
case 1:
*((u8 *)run->mmio.data) = data8;
break;
case 2:
*((u16 *)run->mmio.data) = data16;
break;
case 4:
*((u32 *)run->mmio.data) = data32;
break;
case 8:
*((u64 *)run->mmio.data) = data64;
break;
default:
return -EOPNOTSUPP;
}
/* Update MMIO details in kvm_run struct */
run->mmio.is_write = true;
run->mmio.phys_addr = fault_addr;
run->mmio.len = len;
/* Try to handle MMIO access in the kernel */
if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
fault_addr, len, run->mmio.data)) {
/* Successfully handled MMIO access in the kernel so resume */
vcpu->stat.mmio_exit_kernel++;
kvm_riscv_vcpu_mmio_return(vcpu, run);
return 1;
}
/* Exit to userspace for MMIO emulation */
vcpu->stat.mmio_exit_user++;
run->exit_reason = KVM_EXIT_MMIO;
return 0;
}
/**
* kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
* or in-kernel IO emulation
*
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
*/
int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
u8 data8;
u16 data16;
u32 data32;
u64 data64;
ulong insn;
int len, shift;
if (vcpu->arch.mmio_decode.return_handled)
return 0;
vcpu->arch.mmio_decode.return_handled = 1;
insn = vcpu->arch.mmio_decode.insn;
if (run->mmio.is_write)
goto done;
len = vcpu->arch.mmio_decode.len;
shift = vcpu->arch.mmio_decode.shift;
switch (len) {
case 1:
data8 = *((u8 *)run->mmio.data);
SET_RD(insn, &vcpu->arch.guest_context,
(ulong)data8 << shift >> shift);
break;
case 2:
data16 = *((u16 *)run->mmio.data);
SET_RD(insn, &vcpu->arch.guest_context,
(ulong)data16 << shift >> shift);
break;
case 4:
data32 = *((u32 *)run->mmio.data);
SET_RD(insn, &vcpu->arch.guest_context,
(ulong)data32 << shift >> shift);
break;
case 8:
data64 = *((u64 *)run->mmio.data);
SET_RD(insn, &vcpu->arch.guest_context,
(ulong)data64 << shift >> shift);
break;
default:
return -EOPNOTSUPP;
}
done:
/* Move to next instruction */
vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len;
return 0;
}

View File

@ -214,12 +214,10 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
#endif
}
int kvm_riscv_guest_timer_init(struct kvm *kvm)
void kvm_riscv_guest_timer_init(struct kvm *kvm)
{
struct kvm_guest_timer *gt = &kvm->arch.timer;
riscv_cs_get_mult_shift(&gt->nsec_mult, &gt->nsec_shift);
gt->time_delta = -get_cycles64();
return 0;
}

View File

@ -41,7 +41,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return r;
}
return kvm_riscv_guest_timer_init(kvm);
kvm_riscv_guest_timer_init(kvm);
return 0;
}
void kvm_arch_destroy_vm(struct kvm *kvm)

View File

@ -87,6 +87,7 @@ struct gfn_to_pfn_cache {
struct kvm_mmu_memory_cache {
int nobjs;
gfp_t gfp_zero;
gfp_t gfp_custom;
struct kmem_cache *kmem_cache;
void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE];
};

View File

@ -270,6 +270,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_X86_BUS_LOCK 33
#define KVM_EXIT_XEN 34
#define KVM_EXIT_RISCV_SBI 35
#define KVM_EXIT_RISCV_CSR 36
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@ -496,6 +497,13 @@ struct kvm_run {
unsigned long args[6];
unsigned long ret[2];
} riscv_sbi;
/* KVM_EXIT_RISCV_CSR */
struct {
unsigned long csr_num;
unsigned long new_value;
unsigned long write_mask;
unsigned long ret_value;
} riscv_csr;
/* Fix the size of the union. */
char padding[256];
};

View File

@ -386,7 +386,9 @@ int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
if (mc->nobjs >= min)
return 0;
while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT);
obj = mmu_memory_cache_alloc_obj(mc, (mc->gfp_custom) ?
mc->gfp_custom :
GFP_KERNEL_ACCOUNT);
if (!obj)
return mc->nobjs >= min ? 0 : -ENOMEM;
mc->objects[mc->nobjs++] = obj;