KVM: PPC: Book3S HV P9: Demand fault EBB facility registers
Use HFSCR facility disabling to implement demand faulting for EBB, with a hysteresis counter similar to the load_fp etc counters in context switching that implement the equivalent demand faulting for userspace facilities. This speeds up guest entry/exit by avoiding the register save/restore when a guest is not frequently using them. When a guest does use them often, there will be some additional demand fault overhead, but these are not commonly used facilities. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20211123095231.1036501-37-npiggin@gmail.com
This commit is contained in:
parent
34e02d555d
commit
a3e18ca8ab
|
@ -579,6 +579,7 @@ struct kvm_vcpu_arch {
|
||||||
ulong cfar;
|
ulong cfar;
|
||||||
ulong ppr;
|
ulong ppr;
|
||||||
u32 pspb;
|
u32 pspb;
|
||||||
|
u8 load_ebb;
|
||||||
ulong fscr;
|
ulong fscr;
|
||||||
ulong shadow_fscr;
|
ulong shadow_fscr;
|
||||||
ulong ebbhr;
|
ulong ebbhr;
|
||||||
|
|
|
@ -1436,6 +1436,16 @@ static int kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu)
|
||||||
return RESUME_GUEST;
|
return RESUME_GUEST;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB))
|
||||||
|
return EMULATE_FAIL;
|
||||||
|
|
||||||
|
vcpu->arch.hfscr |= HFSCR_EBB;
|
||||||
|
|
||||||
|
return RESUME_GUEST;
|
||||||
|
}
|
||||||
|
|
||||||
static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
|
static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
|
||||||
struct task_struct *tsk)
|
struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
|
@ -1727,6 +1737,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
|
||||||
r = kvmppc_emulate_doorbell_instr(vcpu);
|
r = kvmppc_emulate_doorbell_instr(vcpu);
|
||||||
if (cause == FSCR_PM_LG)
|
if (cause == FSCR_PM_LG)
|
||||||
r = kvmppc_pmu_unavailable(vcpu);
|
r = kvmppc_pmu_unavailable(vcpu);
|
||||||
|
if (cause == FSCR_EBB_LG)
|
||||||
|
r = kvmppc_ebb_unavailable(vcpu);
|
||||||
}
|
}
|
||||||
if (r == EMULATE_FAIL) {
|
if (r == EMULATE_FAIL) {
|
||||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||||
|
@ -2771,9 +2783,9 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
|
||||||
vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
|
vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PM is demand-faulted so start with it clear.
|
* PM, EBB is demand-faulted so start with it clear.
|
||||||
*/
|
*/
|
||||||
vcpu->arch.hfscr &= ~HFSCR_PM;
|
vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB);
|
||||||
|
|
||||||
kvmppc_mmu_book3s_hv_init(vcpu);
|
kvmppc_mmu_book3s_hv_init(vcpu);
|
||||||
|
|
||||||
|
|
|
@ -232,9 +232,12 @@ static void load_spr_state(struct kvm_vcpu *vcpu,
|
||||||
struct p9_host_os_sprs *host_os_sprs)
|
struct p9_host_os_sprs *host_os_sprs)
|
||||||
{
|
{
|
||||||
mtspr(SPRN_TAR, vcpu->arch.tar);
|
mtspr(SPRN_TAR, vcpu->arch.tar);
|
||||||
mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
|
|
||||||
mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
|
if (vcpu->arch.hfscr & HFSCR_EBB) {
|
||||||
mtspr(SPRN_BESCR, vcpu->arch.bescr);
|
mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
|
||||||
|
mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
|
||||||
|
mtspr(SPRN_BESCR, vcpu->arch.bescr);
|
||||||
|
}
|
||||||
|
|
||||||
if (cpu_has_feature(CPU_FTR_P9_TIDR))
|
if (cpu_has_feature(CPU_FTR_P9_TIDR))
|
||||||
mtspr(SPRN_TIDR, vcpu->arch.tid);
|
mtspr(SPRN_TIDR, vcpu->arch.tid);
|
||||||
|
@ -265,9 +268,22 @@ static void load_spr_state(struct kvm_vcpu *vcpu,
|
||||||
static void store_spr_state(struct kvm_vcpu *vcpu)
|
static void store_spr_state(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
vcpu->arch.tar = mfspr(SPRN_TAR);
|
vcpu->arch.tar = mfspr(SPRN_TAR);
|
||||||
vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
|
|
||||||
vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
|
if (vcpu->arch.hfscr & HFSCR_EBB) {
|
||||||
vcpu->arch.bescr = mfspr(SPRN_BESCR);
|
vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
|
||||||
|
vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
|
||||||
|
vcpu->arch.bescr = mfspr(SPRN_BESCR);
|
||||||
|
/*
|
||||||
|
* This is like load_fp in context switching, turn off the
|
||||||
|
* facility after it wraps the u8 to try avoiding saving
|
||||||
|
* and restoring the registers each partition switch.
|
||||||
|
*/
|
||||||
|
if (!vcpu->arch.nested) {
|
||||||
|
vcpu->arch.load_ebb++;
|
||||||
|
if (!vcpu->arch.load_ebb)
|
||||||
|
vcpu->arch.hfscr &= ~HFSCR_EBB;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (cpu_has_feature(CPU_FTR_P9_TIDR))
|
if (cpu_has_feature(CPU_FTR_P9_TIDR))
|
||||||
vcpu->arch.tid = mfspr(SPRN_TIDR);
|
vcpu->arch.tid = mfspr(SPRN_TIDR);
|
||||||
|
|
Loading…
Reference in New Issue