LoongArch: KVM: Add PMU support
Upstream: no Add PMU device emulation Signed-off-by: Song Gao <gaosong@loongson.cn> Signed-off-by: Xianglai Li <lixianglai@loongson.cn>
This commit is contained in:
parent
ec943318bc
commit
5773e05369
|
@ -208,4 +208,9 @@ static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr,
|
|||
csr->csrs[gid] |= val & _mask;
|
||||
}
|
||||
|
||||
#define KVM_PMU_PLV_ENABLE (CSR_PERFCTRL_PLV0 | \
|
||||
CSR_PERFCTRL_PLV1 | \
|
||||
CSR_PERFCTRL_PLV2 | \
|
||||
CSR_PERFCTRL_PLV3)
|
||||
|
||||
#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */
|
||||
|
|
|
@ -129,6 +129,7 @@ enum emulation_result {
|
|||
#define KVM_LARCH_LASX (0x1 << 2)
|
||||
#define KVM_LARCH_SWCSR_LATEST (0x1 << 3)
|
||||
#define KVM_LARCH_HWCSR_USABLE (0x1 << 4)
|
||||
#define KVM_LARCH_PERF (0x1 << 5)
|
||||
|
||||
struct kvm_vcpu_arch {
|
||||
/*
|
||||
|
@ -204,6 +205,9 @@ struct kvm_vcpu_arch {
|
|||
u64 last_steal;
|
||||
struct gfn_to_hva_cache cache;
|
||||
} st;
|
||||
/* Save host pmu csr */
|
||||
u64 perf_ctrl[4];
|
||||
u64 perf_cntr[4];
|
||||
};
|
||||
|
||||
static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
|
||||
|
@ -231,6 +235,16 @@ static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch)
|
|||
return arch->cpucfg[2] & CPUCFG2_LASX;
|
||||
}
|
||||
|
||||
static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch)
|
||||
{
|
||||
return arch->cpucfg[6] & CPUCFG6_PMP;
|
||||
}
|
||||
|
||||
static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch)
|
||||
{
|
||||
return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT;
|
||||
}
|
||||
|
||||
/* Debug: dump vcpu state */
|
||||
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
|
|
@ -75,6 +75,8 @@ static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
|
|||
static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
|
||||
#endif
|
||||
|
||||
int kvm_own_pmu(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
|
||||
void kvm_reset_timer(struct kvm_vcpu *vcpu);
|
||||
void kvm_save_timer(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -119,6 +119,7 @@
|
|||
#define CPUCFG6_PMP BIT(0)
|
||||
#define CPUCFG6_PAMVER GENMASK(3, 1)
|
||||
#define CPUCFG6_PMNUM GENMASK(7, 4)
|
||||
#define CPUCFG6_PMNUM_SHIFT 4
|
||||
#define CPUCFG6_PMBITS GENMASK(13, 8)
|
||||
#define CPUCFG6_UPM BIT(14)
|
||||
|
||||
|
|
|
@ -83,6 +83,13 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
|
|||
rj = inst.reg2csr_format.rj;
|
||||
csrid = inst.reg2csr_format.csr;
|
||||
|
||||
if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= LOONGARCH_CSR_PERFCNTR3) {
|
||||
if (!kvm_own_pmu(vcpu)) {
|
||||
vcpu->arch.pc -= 4;
|
||||
return EMULATE_DONE;
|
||||
}
|
||||
}
|
||||
|
||||
/* Process CSR ops */
|
||||
switch (rj) {
|
||||
case 0: /* process csrrd */
|
||||
|
|
|
@ -544,6 +544,12 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
|
|||
case LOONGARCH_CPUCFG5:
|
||||
*v = GENMASK(31, 0);
|
||||
return 0;
|
||||
case LOONGARCH_CPUCFG6:
|
||||
if (cpu_has_pmp)
|
||||
*v = GENMASK(14, 0);
|
||||
else
|
||||
*v = 0;
|
||||
return 0;
|
||||
case LOONGARCH_CPUCFG16:
|
||||
*v = GENMASK(16, 0);
|
||||
return 0;
|
||||
|
@ -562,7 +568,7 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
|
|||
|
||||
static int kvm_check_cpucfg(int id, u64 val)
|
||||
{
|
||||
int ret;
|
||||
int ret, host;
|
||||
u64 mask = 0;
|
||||
|
||||
ret = _kvm_get_cpucfg_mask(id, &mask);
|
||||
|
@ -588,6 +594,18 @@ static int kvm_check_cpucfg(int id, u64 val)
|
|||
/* LASX architecturally implies LSX and FP but val does not satisfy that */
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
case LOONGARCH_CPUCFG6:
|
||||
if (val & CPUCFG6_PMP) {
|
||||
host = read_cpucfg(6);
|
||||
if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
|
||||
/* Guest pmbits must be the same with host */
|
||||
return -EINVAL;
|
||||
if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
|
||||
return -EINVAL;
|
||||
if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
default:
|
||||
/*
|
||||
* Values for the other CPUCFG IDs are not being further validated
|
||||
|
@ -767,6 +785,7 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
|
|||
{
|
||||
switch (attr->attr) {
|
||||
case 2:
|
||||
case 6:
|
||||
return 0;
|
||||
default:
|
||||
return -ENXIO;
|
||||
|
@ -1067,6 +1086,77 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
|
|||
preempt_enable();
|
||||
}
|
||||
|
||||
int kvm_own_pmu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
if (!kvm_guest_has_pmu(&vcpu->arch))
|
||||
return -EINVAL;
|
||||
|
||||
preempt_disable();
|
||||
val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
|
||||
val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
|
||||
write_csr_gcfg(val);
|
||||
|
||||
vcpu->arch.aux_inuse |= KVM_LARCH_PERF;
|
||||
preempt_enable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct loongarch_csrs *csr = vcpu->arch.csr;
|
||||
|
||||
if (!(vcpu->arch.aux_inuse & KVM_LARCH_PERF))
|
||||
return;
|
||||
|
||||
/* save guest pmu csr */
|
||||
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
|
||||
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
|
||||
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
|
||||
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
|
||||
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
|
||||
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
|
||||
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
|
||||
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
|
||||
kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL0, 0);
|
||||
kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL1, 0);
|
||||
kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL2, 0);
|
||||
kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL3, 0);
|
||||
/* Disable pmu access from guest */
|
||||
write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
|
||||
|
||||
if (((kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
|
||||
kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
|
||||
kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
|
||||
kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3))
|
||||
& KVM_PMU_PLV_ENABLE) == 0)
|
||||
vcpu->arch.aux_inuse &= ~KVM_LARCH_PERF;
|
||||
}
|
||||
|
||||
static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long val;
|
||||
struct loongarch_csrs *csr = vcpu->arch.csr;
|
||||
|
||||
if (!(vcpu->arch.aux_inuse & KVM_LARCH_PERF))
|
||||
return;
|
||||
|
||||
/* Set PM0-PM(num) to Guest */
|
||||
val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
|
||||
val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
|
||||
write_csr_gcfg(val);
|
||||
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
|
||||
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
|
||||
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
|
||||
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
|
||||
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
|
||||
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
|
||||
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
|
||||
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
|
||||
}
|
||||
|
||||
|
||||
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
|
||||
{
|
||||
int intr = (int)irq->irq;
|
||||
|
@ -1205,6 +1295,10 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
|
||||
/* Control guest page CCA attribute */
|
||||
change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
|
||||
|
||||
/* Restore hardware perf csr */
|
||||
kvm_restore_pmu(vcpu);
|
||||
|
||||
kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
|
||||
|
||||
/* Don't bother restoring registers multiple times unless necessary */
|
||||
|
@ -1290,6 +1384,7 @@ static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
|
|||
struct loongarch_csrs *csr = vcpu->arch.csr;
|
||||
|
||||
kvm_lose_fpu(vcpu);
|
||||
kvm_lose_pmu(vcpu);
|
||||
|
||||
/*
|
||||
* Update CSR state from hardware if software CSR state is stale,
|
||||
|
|
Loading…
Reference in New Issue