KVM: PPC: Book3S HV P9: Move remaining SPR and MSR access into low level entry

Move register saving and loading from kvmhv_p9_guest_entry() into the HV
and nested entry handlers.

Accesses are scheduled to reduce mtSPR / mfSPR interleaving which
reduces SPR scoreboard stalls.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211123095231.1036501-32-npiggin@gmail.com
This commit is contained in:
Nicholas Piggin 2021-11-23 19:52:09 +10:00 committed by Michael Ellerman
parent 08b3f08af5
commit d5f4801945
2 changed files with 109 additions and 66 deletions

View File

@ -3827,9 +3827,15 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long host_psscr;
unsigned long msr;
struct hv_guest_state hvregs;
int trap;
struct p9_host_os_sprs host_os_sprs;
s64 dec;
int trap;
switch_pmu_to_guest(vcpu, &host_os_sprs);
save_p9_host_os_sprs(&host_os_sprs);
/*
* We need to save and restore the guest visible part of the
@ -3838,6 +3844,27 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
* this is done in kvmhv_vcpu_entry_p9() below otherwise.
*/
host_psscr = mfspr(SPRN_PSSCR_PR);
hard_irq_disable();
if (lazy_irq_pending())
return 0;
/* MSR bits may have been cleared by context switch */
msr = 0;
if (IS_ENABLED(CONFIG_PPC_FPU))
msr |= MSR_FP;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
msr |= MSR_VEC;
if (cpu_has_feature(CPU_FTR_VSX))
msr |= MSR_VSX;
if (cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
msr |= MSR_TM;
msr = msr_check_and_set(msr);
if (unlikely(load_vcpu_state(vcpu, &host_os_sprs)))
msr = mfmsr(); /* TM restore can update msr */
mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
kvmhv_save_hv_regs(vcpu, &hvregs);
hvregs.lpcr = lpcr;
@ -3879,12 +3906,20 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
mtspr(SPRN_PSSCR_PR, host_psscr);
store_vcpu_state(vcpu);
dec = mfspr(SPRN_DEC);
if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
dec = (s32) dec;
*tb = mftb();
vcpu->arch.dec_expires = dec + (*tb + vc->tb_offset);
timer_rearm_host_dec(*tb);
restore_p9_host_os_sprs(vcpu, &host_os_sprs);
switch_pmu_to_host(vcpu, &host_os_sprs);
return trap;
}
@ -3895,9 +3930,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr, u64 *tb)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
struct p9_host_os_sprs host_os_sprs;
u64 next_timer;
unsigned long msr;
int trap;
next_timer = timer_get_next_tb();
@ -3908,33 +3941,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.ceded = 0;
save_p9_host_os_sprs(&host_os_sprs);
/*
* This could be combined with MSR[RI] clearing, but that expands
* the unrecoverable window. It would be better to cover unrecoverable
* with KVM bad interrupt handling rather than use MSR[RI] at all.
*
* Much more difficult and less worthwhile to combine with IR/DR
* disable.
*/
hard_irq_disable();
if (lazy_irq_pending())
return 0;
/* MSR bits may have been cleared by context switch */
msr = 0;
if (IS_ENABLED(CONFIG_PPC_FPU))
msr |= MSR_FP;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
msr |= MSR_VEC;
if (cpu_has_feature(CPU_FTR_VSX))
msr |= MSR_VSX;
if (cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
msr |= MSR_TM;
msr = msr_check_and_set(msr);
kvmppc_subcore_enter_guest();
vc->entry_exit_map = 1;
@ -3942,11 +3948,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu_vpa_increment_dispatch(vcpu);
if (unlikely(load_vcpu_state(vcpu, &host_os_sprs)))
msr = mfmsr(); /* MSR may have been updated */
switch_pmu_to_guest(vcpu, &host_os_sprs);
if (kvmhv_on_pseries()) {
trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb);
@ -3989,16 +3990,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.slb_max = 0;
}
switch_pmu_to_host(vcpu, &host_os_sprs);
store_vcpu_state(vcpu);
vcpu_vpa_increment_dispatch(vcpu);
timer_rearm_host_dec(*tb);
restore_p9_host_os_sprs(vcpu, &host_os_sprs);
vc->entry_exit_map = 0x101;
vc->in_guest = 0;

View File

@ -538,6 +538,7 @@ static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
{
struct p9_host_os_sprs host_os_sprs;
struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
@ -567,9 +568,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
vcpu->arch.ceded = 0;
/* Could avoid mfmsr by passing around, but probably no big deal */
msr = mfmsr();
host_hfscr = mfspr(SPRN_HFSCR);
host_ciabr = mfspr(SPRN_CIABR);
host_dawr0 = mfspr(SPRN_DAWR0);
@ -584,6 +582,41 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
switch_pmu_to_guest(vcpu, &host_os_sprs);
save_p9_host_os_sprs(&host_os_sprs);
/*
* This could be combined with MSR[RI] clearing, but that expands
* the unrecoverable window. It would be better to cover unrecoverable
* with KVM bad interrupt handling rather than use MSR[RI] at all.
*
* Much more difficult and less worthwhile to combine with IR/DR
* disable.
*/
hard_irq_disable();
if (lazy_irq_pending()) {
trap = 0;
goto out;
}
/* MSR bits may have been cleared by context switch */
msr = 0;
if (IS_ENABLED(CONFIG_PPC_FPU))
msr |= MSR_FP;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
msr |= MSR_VEC;
if (cpu_has_feature(CPU_FTR_VSX))
msr |= MSR_VSX;
if (cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
msr |= MSR_TM;
msr = msr_check_and_set(msr);
/* Save MSR for restore. This is after hard disable, so EE is clear. */
if (unlikely(load_vcpu_state(vcpu, &host_os_sprs)))
msr = mfmsr(); /* MSR may have been updated */
if (vc->tb_offset) {
u64 new_tb = *tb + vc->tb_offset;
mtspr(SPRN_TBU40, new_tb);
@ -642,6 +675,14 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
/*
* It might be preferable to load_vcpu_state here, in order to get the
* GPR/FP register loads executing in parallel with the previous mtSPR
* instructions, but for now that can't be done because the TM handling
* in load_vcpu_state can change some SPRs and vcpu state (nip, msr).
* But TM could be split out if this would be a significant benefit.
*/
local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_HV_P9;
/*
@ -819,6 +860,20 @@ tm_return_to_guest:
vc->dpdes = mfspr(SPRN_DPDES);
vc->vtb = mfspr(SPRN_VTB);
save_clear_guest_mmu(kvm, vcpu);
switch_mmu_to_host(kvm, host_pidr);
/*
* If we are in real mode, only switch MMU on after the MMU is
* switched to host, to avoid the P9_RADIX_PREFETCH_BUG.
*/
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
vcpu->arch.shregs.msr & MSR_TS_MASK)
msr |= MSR_TS_S;
__mtmsrd(msr, 0);
store_vcpu_state(vcpu);
dec = mfspr(SPRN_DEC);
if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
dec = (s32) dec;
@ -851,6 +906,19 @@ tm_return_to_guest:
mtspr(SPRN_DAWRX1, host_dawrx1);
}
mtspr(SPRN_DPDES, 0);
if (vc->pcr)
mtspr(SPRN_PCR, PCR_MASK);
/* HDEC must be at least as large as DEC, so decrementer_max fits */
mtspr(SPRN_HDEC, decrementer_max);
timer_rearm_host_dec(*tb);
restore_p9_host_os_sprs(vcpu, &host_os_sprs);
local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE;
if (kvm_is_radix(kvm)) {
/*
* Since this is radix, do a eieio; tlbsync; ptesync sequence
@ -867,26 +935,8 @@ tm_return_to_guest:
if (cpu_has_feature(CPU_FTR_ARCH_31))
asm volatile(PPC_CP_ABORT);
mtspr(SPRN_DPDES, 0);
if (vc->pcr)
mtspr(SPRN_PCR, PCR_MASK);
/* HDEC must be at least as large as DEC, so decrementer_max fits */
mtspr(SPRN_HDEC, decrementer_max);
save_clear_guest_mmu(kvm, vcpu);
switch_mmu_to_host(kvm, host_pidr);
local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE;
/*
* If we are in real mode, only switch MMU on after the MMU is
* switched to host, to avoid the P9_RADIX_PREFETCH_BUG.
*/
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
vcpu->arch.shregs.msr & MSR_TS_MASK)
msr |= MSR_TS_S;
__mtmsrd(msr, 0);
out:
switch_pmu_to_host(vcpu, &host_os_sprs);
end_timing(vcpu);