LoongArch: KVM: Add PMU support

Upstream: no

On LoongArch, the host and guest have their own PMU CSRs registers
and they share PMU hardware resources. A set of PMU CSRs consists of
a CTRL register and a CNTR register. We can set which PMU CSRs are used
by the guest by writing to the GCFG register [24: 26] bits.

On KVM side:
- we save the host PMU CSRs into structure kvm_context.
- If the host supports the PMU feature.
  - When entering guest mode. we save the host PMU CSRs and restore the guest PMU CSRs.
  - When exiting guest mode, we save the guest PMU CSRs and restore the host PMU CSRs.

Signed-off-by: Song Gao <gaosong@loongson.cn>
Reviewed-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Xianglai Li <lixianglai@loongson.cn>
This commit is contained in:
Song Gao 2024-05-07 20:01:40 +08:00 committed by Xianglai Li
parent 5a128388a4
commit a3aa255ea9
4 changed files with 183 additions and 82 deletions

View File

@ -30,6 +30,7 @@
: [val] "+r" (__v) \
: [reg] "i" (csr) \
: "memory"); \
__v; \
})
#define gcsr_xchg(v, m, csr) \
@ -180,6 +181,7 @@ __BUILD_GCSR_OP(tlbidx)
#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid))
#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid))
#define kvm_read_clear_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_write(0, gid))
int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
@ -213,4 +215,9 @@ static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr,
CSR_PERFCTRL_PLV2 | \
CSR_PERFCTRL_PLV3)
#define KVM_PMU_EVENT_ENABLED (CSR_PERFCTRL_PLV0 | \
CSR_PERFCTRL_PLV1 | \
CSR_PERFCTRL_PLV2 | \
CSR_PERFCTRL_PLV3)
#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */

View File

@ -55,9 +55,14 @@ struct kvm_arch_memory_slot {
unsigned long flags;
};
#define KVM_REQ_PMU KVM_ARCH_REQ(0)
#define HOST_MAX_PMNUM 16
struct kvm_context {
unsigned long vpid_cache;
struct kvm_vcpu *last_vcpu;
/* Save host pmu csr */
u64 perf_ctrl[HOST_MAX_PMNUM];
u64 perf_cntr[HOST_MAX_PMNUM];
};
struct kvm_world_switch {
@ -129,7 +134,8 @@ enum emulation_result {
#define KVM_LARCH_LASX (0x1 << 2)
#define KVM_LARCH_SWCSR_LATEST (0x1 << 3)
#define KVM_LARCH_HWCSR_USABLE (0x1 << 4)
#define KVM_LARCH_PERF (0x1 << 5)
#define KVM_GUEST_PMU_ENABLE (0x1 << 5)
#define KVM_GUEST_PMU_ACTIVE (0x1 << 6)
struct kvm_vcpu_arch {
/*
@ -167,6 +173,9 @@ struct kvm_vcpu_arch {
/* CSR state */
struct loongarch_csrs *csr;
/* Guest max PMU CSR id */
int max_pmu_csrid;
/* GPR used as IO source/target */
u32 io_gpr;
@ -245,6 +254,16 @@ static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch)
return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT;
}
static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch)
{
return arch->cpucfg[LOONGARCH_CPUCFG6] & CPUCFG6_PMP;
}
static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch)
{
return (arch->cpucfg[LOONGARCH_CPUCFG6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT;
}
/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);

View File

@ -83,9 +83,10 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
rj = inst.reg2csr_format.rj;
csrid = inst.reg2csr_format.csr;
if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= LOONGARCH_CSR_PERFCNTR3) {
if (!kvm_own_pmu(vcpu)) {
if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) {
if (kvm_guest_has_pmu(&vcpu->arch)) {
vcpu->arch.pc -= 4;
kvm_make_request(KVM_REQ_PMU, vcpu);
return EMULATE_DONE;
}
}

View File

@ -140,6 +140,131 @@ static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
return -ENXIO;
}
static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
{
struct kvm_context *context;
context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
context->perf_ctrl[0] = write_csr_perfctrl0(0);
context->perf_ctrl[1] = write_csr_perfctrl1(0);
context->perf_ctrl[2] = write_csr_perfctrl2(0);
context->perf_ctrl[3] = write_csr_perfctrl3(0);
context->perf_cntr[0] = read_csr_perfcntr0();
context->perf_cntr[1] = read_csr_perfcntr1();
context->perf_cntr[2] = read_csr_perfcntr2();
context->perf_cntr[3] = read_csr_perfcntr3();
}
static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
{
struct kvm_context *context;
context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
write_csr_perfcntr0(context->perf_cntr[0]);
write_csr_perfcntr1(context->perf_cntr[1]);
write_csr_perfcntr2(context->perf_cntr[2]);
write_csr_perfcntr3(context->perf_cntr[3]);
write_csr_perfctrl0(context->perf_ctrl[0]);
write_csr_perfctrl1(context->perf_ctrl[1]);
write_csr_perfctrl2(context->perf_ctrl[2]);
write_csr_perfctrl3(context->perf_ctrl[3]);
}
static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
{
struct loongarch_csrs *csr = vcpu->arch.csr;
kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
}
static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
{
struct loongarch_csrs *csr = vcpu->arch.csr;
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
}
static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
{
unsigned long val;
struct loongarch_csrs *csr = vcpu->arch.csr;
if (!(vcpu->arch.aux_inuse & KVM_GUEST_PMU_ENABLE))
return;
if (!(vcpu->arch.aux_inuse & KVM_GUEST_PMU_ACTIVE))
return;
kvm_save_guest_pmu(vcpu);
/* Disable pmu access from guest */
write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
/*
* Clear KVM_GUEST_PMU_ENABLE if the guest is not using PMU CSRs
* when exiting the guest, so that the next time trap into the guest.
* we don't need to deal with PMU CSRs contexts.
*/
val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
if (!(val & KVM_PMU_EVENT_ENABLED))
vcpu->arch.aux_inuse &= ~KVM_GUEST_PMU_ENABLE;
kvm_restore_host_pmu(vcpu);
/* KVM_GUEST_PMU_ACTIVE needs to be cleared when exiting the guest */
vcpu->arch.aux_inuse &= ~KVM_GUEST_PMU_ACTIVE;
}
static void kvm_own_pmu(struct kvm_vcpu *vcpu)
{
unsigned long val;
kvm_save_host_pmu(vcpu);
/* Set PM0-PM(num) to guest */
val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
write_csr_gcfg(val);
kvm_restore_guest_pmu(vcpu);
}
static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.aux_inuse & KVM_GUEST_PMU_ENABLE))
return;
kvm_make_request(KVM_REQ_PMU, vcpu);
}
static void kvm_check_pmu(struct kvm_vcpu *vcpu)
{
if (!kvm_check_request(KVM_REQ_PMU, vcpu))
return;
kvm_own_pmu(vcpu);
/*
* Set KVM_GUEST PMU_ENABLE and GUEST_PMU_ACTIVE
* when guest has KVM_REQ_PMU request.
*/
vcpu->arch.aux_inuse |= KVM_GUEST_PMU_ENABLE;
vcpu->arch.aux_inuse |= KVM_GUEST_PMU_ACTIVE;
}
/*
* kvm_check_requests - check and handle pending vCPU requests
*
@ -213,6 +338,7 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
/* Make sure the vcpu mode has been written */
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
kvm_check_vpid(vcpu);
kvm_check_pmu(vcpu);
vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
/* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
@ -243,6 +369,8 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Set a default exit reason */
run->exit_reason = KVM_EXIT_UNKNOWN;
kvm_lose_pmu(vcpu);
guest_timing_exit_irqoff();
guest_state_exit_irqoff();
local_irq_enable();
@ -506,6 +634,21 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
kvm_write_sw_gcsr(csr, id, val);
/*
* After modifying the PMU CSR register value of the vcpu.
* If the PMU CSRs are used, we need to set KVM_REQ_PMU.
*/
if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
unsigned long val;
val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
if (val & KVM_PMU_EVENT_ENABLED)
kvm_make_request(KVM_REQ_PMU, vcpu);
}
return ret;
}
@ -596,7 +739,7 @@ static int kvm_check_cpucfg(int id, u64 val)
return 0;
case LOONGARCH_CPUCFG6:
if (val & CPUCFG6_PMP) {
host = read_cpucfg(6);
host = read_cpucfg(LOONGARCH_CPUCFG6);
if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
/* Guest pmbits must be the same with host */
return -EINVAL;
@ -691,6 +834,10 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
if (ret)
break;
vcpu->arch.cpucfg[id] = (u32)v;
if (id == LOONGARCH_CPUCFG6) {
vcpu->arch.max_pmu_csrid = LOONGARCH_CSR_PERFCTRL0 +
2 * kvm_get_pmu_num(&vcpu->arch) + 1;
}
break;
case KVM_REG_LOONGARCH_KVM:
switch (reg->id) {
@ -784,8 +931,8 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
switch (attr->attr) {
case 2:
case 6:
case LOONGARCH_CPUCFG2:
case LOONGARCH_CPUCFG6:
return 0;
default:
return -ENXIO;
@ -1086,77 +1233,6 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
preempt_enable();
}
int kvm_own_pmu(struct kvm_vcpu *vcpu)
{
unsigned long val;
if (!kvm_guest_has_pmu(&vcpu->arch))
return -EINVAL;
preempt_disable();
val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
write_csr_gcfg(val);
vcpu->arch.aux_inuse |= KVM_LARCH_PERF;
preempt_enable();
return 0;
}
static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
{
struct loongarch_csrs *csr = vcpu->arch.csr;
if (!(vcpu->arch.aux_inuse & KVM_LARCH_PERF))
return;
/* save guest pmu csr */
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL0, 0);
kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL1, 0);
kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL2, 0);
kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL3, 0);
/* Disable pmu access from guest */
write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
if (((kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3))
& KVM_PMU_PLV_ENABLE) == 0)
vcpu->arch.aux_inuse &= ~KVM_LARCH_PERF;
}
static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
{
unsigned long val;
struct loongarch_csrs *csr = vcpu->arch.csr;
if (!(vcpu->arch.aux_inuse & KVM_LARCH_PERF))
return;
/* Set PM0-PM(num) to Guest */
val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
write_csr_gcfg(val);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
}
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{
int intr = (int)irq->irq;
@ -1295,12 +1371,11 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
/* Control guest page CCA attribute */
change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
/* Restore hardware perf csr */
kvm_restore_pmu(vcpu);
kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
/* Restore hardware PMU CSRs */
kvm_restore_pmu(vcpu);
/* Don't bother restoring registers multiple times unless necessary */
if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
return 0;
@ -1384,7 +1459,6 @@ static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
struct loongarch_csrs *csr = vcpu->arch.csr;
kvm_lose_fpu(vcpu);
kvm_lose_pmu(vcpu);
/*
* Update CSR state from hardware if software CSR state is stale,