KVM: PPC: Book3S HV P9: More SPR speed improvements

This avoids more scoreboard stalls and reduces mtSPRs.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211123095231.1036501-36-npiggin@gmail.com
This commit is contained in:
Nicholas Piggin 2021-11-23 19:52:13 +10:00 committed by Michael Ellerman
parent d55b1eccc7
commit 34e02d555d
1 changed files with 43 additions and 30 deletions

View File

@ -645,24 +645,29 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
vc->tb_offset_applied = vc->tb_offset; vc->tb_offset_applied = vc->tb_offset;
} }
if (vc->pcr)
mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
mtspr(SPRN_DPDES, vc->dpdes);
mtspr(SPRN_VTB, vc->vtb); mtspr(SPRN_VTB, vc->vtb);
mtspr(SPRN_PURR, vcpu->arch.purr); mtspr(SPRN_PURR, vcpu->arch.purr);
mtspr(SPRN_SPURR, vcpu->arch.spurr); mtspr(SPRN_SPURR, vcpu->arch.spurr);
if (vc->pcr)
mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
if (vc->dpdes)
mtspr(SPRN_DPDES, vc->dpdes);
if (dawr_enabled()) { if (dawr_enabled()) {
if (vcpu->arch.dawr0 != host_dawr0)
mtspr(SPRN_DAWR0, vcpu->arch.dawr0); mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
if (vcpu->arch.dawrx0 != host_dawrx0)
mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0); mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
if (cpu_has_feature(CPU_FTR_DAWR1)) { if (cpu_has_feature(CPU_FTR_DAWR1)) {
if (vcpu->arch.dawr1 != host_dawr1)
mtspr(SPRN_DAWR1, vcpu->arch.dawr1); mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
if (vcpu->arch.dawrx1 != host_dawrx1)
mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1); mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
} }
} }
if (vcpu->arch.ciabr != host_ciabr)
mtspr(SPRN_CIABR, vcpu->arch.ciabr); mtspr(SPRN_CIABR, vcpu->arch.ciabr);
mtspr(SPRN_IC, vcpu->arch.ic);
mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC | mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
(local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
@ -881,20 +886,6 @@ tm_return_to_guest:
vc->dpdes = mfspr(SPRN_DPDES); vc->dpdes = mfspr(SPRN_DPDES);
vc->vtb = mfspr(SPRN_VTB); vc->vtb = mfspr(SPRN_VTB);
save_clear_guest_mmu(kvm, vcpu);
switch_mmu_to_host(kvm, host_pidr);
/*
* If we are in real mode, only switch MMU on after the MMU is
* switched to host, to avoid the P9_RADIX_PREFETCH_BUG.
*/
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
vcpu->arch.shregs.msr & MSR_TS_MASK)
msr |= MSR_TS_S;
__mtmsrd(msr, 0);
store_vcpu_state(vcpu);
dec = mfspr(SPRN_DEC); dec = mfspr(SPRN_DEC);
if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */ if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
dec = (s32) dec; dec = (s32) dec;
@ -912,6 +903,22 @@ tm_return_to_guest:
vc->tb_offset_applied = 0; vc->tb_offset_applied = 0;
} }
save_clear_guest_mmu(kvm, vcpu);
switch_mmu_to_host(kvm, host_pidr);
/*
* Enable MSR here in order to have facilities enabled to save
* guest registers. This enables MMU (if we were in realmode), so
* only switch MMU on after the MMU is switched to host, to avoid
* the P9_RADIX_PREFETCH_BUG or hash guest context.
*/
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
vcpu->arch.shregs.msr & MSR_TS_MASK)
msr |= MSR_TS_S;
__mtmsrd(msr, 0);
store_vcpu_state(vcpu);
mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr); mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr);
mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr); mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr);
@ -919,14 +926,20 @@ tm_return_to_guest:
mtspr(SPRN_PSSCR, host_psscr | mtspr(SPRN_PSSCR, host_psscr |
(local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
mtspr(SPRN_HFSCR, host_hfscr); mtspr(SPRN_HFSCR, host_hfscr);
if (vcpu->arch.ciabr != host_ciabr)
mtspr(SPRN_CIABR, host_ciabr); mtspr(SPRN_CIABR, host_ciabr);
if (vcpu->arch.dawr0 != host_dawr0)
mtspr(SPRN_DAWR0, host_dawr0); mtspr(SPRN_DAWR0, host_dawr0);
if (vcpu->arch.dawrx0 != host_dawrx0)
mtspr(SPRN_DAWRX0, host_dawrx0); mtspr(SPRN_DAWRX0, host_dawrx0);
if (cpu_has_feature(CPU_FTR_DAWR1)) { if (cpu_has_feature(CPU_FTR_DAWR1)) {
if (vcpu->arch.dawr1 != host_dawr1)
mtspr(SPRN_DAWR1, host_dawr1); mtspr(SPRN_DAWR1, host_dawr1);
if (vcpu->arch.dawrx1 != host_dawrx1)
mtspr(SPRN_DAWRX1, host_dawrx1); mtspr(SPRN_DAWRX1, host_dawrx1);
} }
if (vc->dpdes)
mtspr(SPRN_DPDES, 0); mtspr(SPRN_DPDES, 0);
if (vc->pcr) if (vc->pcr)
mtspr(SPRN_PCR, PCR_MASK); mtspr(SPRN_PCR, PCR_MASK);