Merge branch 'topic/ppc-kvm' into next
Merge some KVM patches we are keeping in a topic branch in case there are any merge conflicts that need resolving.
This commit is contained in:
commit
465e333e77
|
@ -39,6 +39,7 @@ struct kvm_nested_guest {
|
|||
pgd_t *shadow_pgtable; /* our page table for this guest */
|
||||
u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */
|
||||
u64 process_table; /* process table entry for this guest */
|
||||
u64 hfscr; /* HFSCR that the L1 requested for this nested guest */
|
||||
long refcnt; /* number of pointers to this struct */
|
||||
struct mutex tlb_lock; /* serialize page faults and tlbies */
|
||||
struct kvm_nested_guest *next;
|
||||
|
|
|
@ -811,6 +811,8 @@ struct kvm_vcpu_arch {
|
|||
|
||||
u32 online;
|
||||
|
||||
u64 hfscr_permitted; /* A mask of permitted HFSCR facilities */
|
||||
|
||||
/* For support of nested guests */
|
||||
struct kvm_nested_guest *nested;
|
||||
u32 nested_vcpu_id;
|
||||
|
|
|
@ -34,6 +34,13 @@ static inline void ppc_set_pmu_inuse(int inuse)
|
|||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
static inline int ppc_get_pmu_inuse(void)
|
||||
{
|
||||
return get_paca()->pmcregs_in_use;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void power4_enable_pmcs(void);
|
||||
|
||||
#else /* CONFIG_PPC64 */
|
||||
|
|
|
@ -415,6 +415,7 @@
|
|||
#define FSCR_TAR __MASK(FSCR_TAR_LG)
|
||||
#define FSCR_EBB __MASK(FSCR_EBB_LG)
|
||||
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
|
||||
#define FSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */
|
||||
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
|
||||
#define HFSCR_PREFIX __MASK(FSCR_PREFIX_LG)
|
||||
#define HFSCR_MSGP __MASK(FSCR_MSGP_LG)
|
||||
|
@ -426,7 +427,7 @@
|
|||
#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
|
||||
#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
|
||||
#define HFSCR_FP __MASK(FSCR_FP_LG)
|
||||
#define HFSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */
|
||||
#define HFSCR_INTR_CAUSE FSCR_INTR_CAUSE
|
||||
#define SPRN_TAR 0x32f /* Target Address Register */
|
||||
#define SPRN_LPCR 0x13E /* LPAR Control Register */
|
||||
#define LPCR_VPM0 ASM_CONST(0x8000000000000000)
|
||||
|
|
|
@ -44,6 +44,9 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
|
|||
(to != NULL) ? __pa(to): 0,
|
||||
(from != NULL) ? __pa(from): 0, n);
|
||||
|
||||
if (eaddr & (0xFFFUL << 52))
|
||||
return ret;
|
||||
|
||||
quadrant = 1;
|
||||
if (!pid)
|
||||
quadrant = 2;
|
||||
|
@ -65,10 +68,12 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
|
|||
}
|
||||
isync();
|
||||
|
||||
pagefault_disable();
|
||||
if (is_load)
|
||||
ret = copy_from_user_nofault(to, (const void __user *)from, n);
|
||||
ret = __copy_from_user_inatomic(to, (const void __user *)from, n);
|
||||
else
|
||||
ret = copy_to_user_nofault((void __user *)to, from, n);
|
||||
ret = __copy_to_user_inatomic((void __user *)to, from, n);
|
||||
pagefault_enable();
|
||||
|
||||
/* switch the pid first to avoid running host with unallocated pid */
|
||||
if (quadrant == 1 && pid != old_pid)
|
||||
|
@ -81,7 +86,6 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix);
|
||||
|
||||
static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
void *to, void *from, unsigned long n)
|
||||
|
@ -117,14 +121,12 @@ long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix);
|
||||
|
||||
long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from,
|
||||
unsigned long n)
|
||||
{
|
||||
return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix);
|
||||
|
||||
int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
struct kvmppc_pte *gpte, u64 root,
|
||||
|
|
|
@ -59,6 +59,7 @@
|
|||
#include <asm/kvm_book3s.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/lppaca.h>
|
||||
#include <asm/pmc.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cputhreads.h>
|
||||
#include <asm/page.h>
|
||||
|
@ -1679,6 +1680,21 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
|
|||
r = RESUME_GUEST;
|
||||
}
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
case BOOK3S_INTERRUPT_HV_SOFTPATCH:
|
||||
/*
|
||||
* This occurs for various TM-related instructions that
|
||||
* we need to emulate on POWER9 DD2.2. We have already
|
||||
* handled the cases where the guest was in real-suspend
|
||||
* mode and was transitioning to transactional state.
|
||||
*/
|
||||
r = kvmhv_p9_tm_emulation(vcpu);
|
||||
if (r != -1)
|
||||
break;
|
||||
fallthrough; /* go to facility unavailable handler */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This occurs if the guest (kernel or userspace), does something that
|
||||
* is prohibited by HFSCR.
|
||||
|
@ -1697,18 +1713,6 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
|
|||
}
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
case BOOK3S_INTERRUPT_HV_SOFTPATCH:
|
||||
/*
|
||||
* This occurs for various TM-related instructions that
|
||||
* we need to emulate on POWER9 DD2.2. We have already
|
||||
* handled the cases where the guest was in real-suspend
|
||||
* mode and was transitioning to transactional state.
|
||||
*/
|
||||
r = kvmhv_p9_tm_emulation(vcpu);
|
||||
break;
|
||||
#endif
|
||||
|
||||
case BOOK3S_INTERRUPT_HV_RM_HARD:
|
||||
r = RESUME_PASSTHROUGH;
|
||||
break;
|
||||
|
@ -1727,6 +1731,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
|
|||
|
||||
static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_nested_guest *nested = vcpu->arch.nested;
|
||||
int r;
|
||||
int srcu_idx;
|
||||
|
||||
|
@ -1811,9 +1816,41 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
|
|||
* mode and was transitioning to transactional state.
|
||||
*/
|
||||
r = kvmhv_p9_tm_emulation(vcpu);
|
||||
break;
|
||||
if (r != -1)
|
||||
break;
|
||||
fallthrough; /* go to facility unavailable handler */
|
||||
#endif
|
||||
|
||||
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: {
|
||||
u64 cause = vcpu->arch.hfscr >> 56;
|
||||
|
||||
/*
|
||||
* Only pass HFU interrupts to the L1 if the facility is
|
||||
* permitted but disabled by the L1's HFSCR, otherwise
|
||||
* the interrupt does not make sense to the L1 so turn
|
||||
* it into a HEAI.
|
||||
*/
|
||||
if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) ||
|
||||
(nested->hfscr & (1UL << cause))) {
|
||||
vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST;
|
||||
|
||||
/*
|
||||
* If the fetch failed, return to guest and
|
||||
* try executing it again.
|
||||
*/
|
||||
r = kvmppc_get_last_inst(vcpu, INST_GENERIC,
|
||||
&vcpu->arch.emul_inst);
|
||||
if (r != EMULATE_DONE)
|
||||
r = RESUME_GUEST;
|
||||
else
|
||||
r = RESUME_HOST;
|
||||
} else {
|
||||
r = RESUME_HOST;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case BOOK3S_INTERRUPT_HV_RM_HARD:
|
||||
vcpu->arch.trap = 0;
|
||||
r = RESUME_GUEST;
|
||||
|
@ -2684,6 +2721,7 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
|
|||
spin_lock_init(&vcpu->arch.vpa_update_lock);
|
||||
spin_lock_init(&vcpu->arch.tbacct_lock);
|
||||
vcpu->arch.busy_preempt = TB_NIL;
|
||||
vcpu->arch.shregs.msr = MSR_ME;
|
||||
vcpu->arch.intr_msr = MSR_SF | MSR_ME;
|
||||
|
||||
/*
|
||||
|
@ -2705,6 +2743,8 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
|
|||
if (cpu_has_feature(CPU_FTR_TM_COMP))
|
||||
vcpu->arch.hfscr |= HFSCR_TM;
|
||||
|
||||
vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
|
||||
|
||||
kvmppc_mmu_book3s_hv_init(vcpu);
|
||||
|
||||
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
|
||||
|
@ -3727,7 +3767,6 @@ static void load_spr_state(struct kvm_vcpu *vcpu)
|
|||
mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
|
||||
mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
|
||||
mtspr(SPRN_BESCR, vcpu->arch.bescr);
|
||||
mtspr(SPRN_WORT, vcpu->arch.wort);
|
||||
mtspr(SPRN_TIDR, vcpu->arch.tid);
|
||||
mtspr(SPRN_AMR, vcpu->arch.amr);
|
||||
mtspr(SPRN_UAMOR, vcpu->arch.uamor);
|
||||
|
@ -3754,7 +3793,6 @@ static void store_spr_state(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
|
||||
vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
|
||||
vcpu->arch.bescr = mfspr(SPRN_BESCR);
|
||||
vcpu->arch.wort = mfspr(SPRN_WORT);
|
||||
vcpu->arch.tid = mfspr(SPRN_TIDR);
|
||||
vcpu->arch.amr = mfspr(SPRN_AMR);
|
||||
vcpu->arch.uamor = mfspr(SPRN_UAMOR);
|
||||
|
@ -3786,7 +3824,6 @@ static void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
|
|||
struct p9_host_os_sprs *host_os_sprs)
|
||||
{
|
||||
mtspr(SPRN_PSPB, 0);
|
||||
mtspr(SPRN_WORT, 0);
|
||||
mtspr(SPRN_UAMOR, 0);
|
||||
|
||||
mtspr(SPRN_DSCR, host_os_sprs->dscr);
|
||||
|
@ -3852,6 +3889,18 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
|||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
|
||||
kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
if (kvmhv_on_pseries()) {
|
||||
barrier();
|
||||
if (vcpu->arch.vpa.pinned_addr) {
|
||||
struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
|
||||
get_lppaca()->pmcregs_in_use = lp->pmcregs_in_use;
|
||||
} else {
|
||||
get_lppaca()->pmcregs_in_use = 1;
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
#endif
|
||||
kvmhv_load_guest_pmu(vcpu);
|
||||
|
||||
msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
|
||||
|
@ -3986,6 +4035,13 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
|||
save_pmu |= nesting_enabled(vcpu->kvm);
|
||||
|
||||
kvmhv_save_guest_pmu(vcpu, save_pmu);
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
if (kvmhv_on_pseries()) {
|
||||
barrier();
|
||||
get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
|
||||
barrier();
|
||||
}
|
||||
#endif
|
||||
|
||||
vc->entry_exit_map = 0x101;
|
||||
vc->in_guest = 0;
|
||||
|
|
|
@ -99,13 +99,12 @@ static void byteswap_hv_regs(struct hv_guest_state *hr)
|
|||
hr->dawrx1 = swab64(hr->dawrx1);
|
||||
}
|
||||
|
||||
static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
|
||||
static void save_hv_return_state(struct kvm_vcpu *vcpu,
|
||||
struct hv_guest_state *hr)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
|
||||
hr->dpdes = vc->dpdes;
|
||||
hr->hfscr = vcpu->arch.hfscr;
|
||||
hr->purr = vcpu->arch.purr;
|
||||
hr->spurr = vcpu->arch.spurr;
|
||||
hr->ic = vcpu->arch.ic;
|
||||
|
@ -119,7 +118,7 @@ static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
|
|||
hr->pidr = vcpu->arch.pid;
|
||||
hr->cfar = vcpu->arch.cfar;
|
||||
hr->ppr = vcpu->arch.ppr;
|
||||
switch (trap) {
|
||||
switch (vcpu->arch.trap) {
|
||||
case BOOK3S_INTERRUPT_H_DATA_STORAGE:
|
||||
hr->hdar = vcpu->arch.fault_dar;
|
||||
hr->hdsisr = vcpu->arch.fault_dsisr;
|
||||
|
@ -128,55 +127,17 @@ static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
|
|||
case BOOK3S_INTERRUPT_H_INST_STORAGE:
|
||||
hr->asdr = vcpu->arch.fault_gpa;
|
||||
break;
|
||||
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
|
||||
hr->hfscr = ((~HFSCR_INTR_CAUSE & hr->hfscr) |
|
||||
(HFSCR_INTR_CAUSE & vcpu->arch.hfscr));
|
||||
break;
|
||||
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
|
||||
hr->heir = vcpu->arch.emul_inst;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This can result in some L0 HV register state being leaked to an L1
|
||||
* hypervisor when the hv_guest_state is copied back to the guest after
|
||||
* being modified here.
|
||||
*
|
||||
* There is no known problem with such a leak, and in many cases these
|
||||
* register settings could be derived by the guest by observing behaviour
|
||||
* and timing, interrupts, etc., but it is an issue to consider.
|
||||
*/
|
||||
static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
u64 mask;
|
||||
|
||||
/*
|
||||
* Don't let L1 change LPCR bits for the L2 except these:
|
||||
*/
|
||||
mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
|
||||
LPCR_LPES | LPCR_MER;
|
||||
|
||||
/*
|
||||
* Additional filtering is required depending on hardware
|
||||
* and configuration.
|
||||
*/
|
||||
hr->lpcr = kvmppc_filter_lpcr_hv(vcpu->kvm,
|
||||
(vc->lpcr & ~mask) | (hr->lpcr & mask));
|
||||
|
||||
/*
|
||||
* Don't let L1 enable features for L2 which we've disabled for L1,
|
||||
* but preserve the interrupt cause field.
|
||||
*/
|
||||
hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr);
|
||||
|
||||
/* Don't let data address watchpoint match in hypervisor state */
|
||||
hr->dawrx0 &= ~DAWRX_HYP;
|
||||
hr->dawrx1 &= ~DAWRX_HYP;
|
||||
|
||||
/* Don't let completed instruction address breakpt match in HV state */
|
||||
if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
|
||||
hr->ciabr &= ~CIABR_PRIV;
|
||||
}
|
||||
|
||||
static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
|
||||
static void restore_hv_regs(struct kvm_vcpu *vcpu, const struct hv_guest_state *hr)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
|
||||
|
@ -288,6 +249,43 @@ static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu,
|
|||
sizeof(struct pt_regs));
|
||||
}
|
||||
|
||||
static void load_l2_hv_regs(struct kvm_vcpu *vcpu,
|
||||
const struct hv_guest_state *l2_hv,
|
||||
const struct hv_guest_state *l1_hv, u64 *lpcr)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
u64 mask;
|
||||
|
||||
restore_hv_regs(vcpu, l2_hv);
|
||||
|
||||
/*
|
||||
* Don't let L1 change LPCR bits for the L2 except these:
|
||||
*/
|
||||
mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
|
||||
LPCR_LPES | LPCR_MER;
|
||||
|
||||
/*
|
||||
* Additional filtering is required depending on hardware
|
||||
* and configuration.
|
||||
*/
|
||||
*lpcr = kvmppc_filter_lpcr_hv(vcpu->kvm,
|
||||
(vc->lpcr & ~mask) | (*lpcr & mask));
|
||||
|
||||
/*
|
||||
* Don't let L1 enable features for L2 which we don't allow for L1,
|
||||
* but preserve the interrupt cause field.
|
||||
*/
|
||||
vcpu->arch.hfscr = l2_hv->hfscr & (HFSCR_INTR_CAUSE | vcpu->arch.hfscr_permitted);
|
||||
|
||||
/* Don't let data address watchpoint match in hypervisor state */
|
||||
vcpu->arch.dawrx0 = l2_hv->dawrx0 & ~DAWRX_HYP;
|
||||
vcpu->arch.dawrx1 = l2_hv->dawrx1 & ~DAWRX_HYP;
|
||||
|
||||
/* Don't let completed instruction address breakpt match in HV state */
|
||||
if ((l2_hv->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
|
||||
vcpu->arch.ciabr = l2_hv->ciabr & ~CIABR_PRIV;
|
||||
}
|
||||
|
||||
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
long int err, r;
|
||||
|
@ -296,7 +294,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
|
|||
struct hv_guest_state l2_hv = {0}, saved_l1_hv;
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
u64 hv_ptr, regs_ptr;
|
||||
u64 hdec_exp;
|
||||
u64 hdec_exp, lpcr;
|
||||
s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
|
||||
|
||||
if (vcpu->kvm->arch.l1_ptcr == 0)
|
||||
|
@ -364,13 +362,14 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
|
|||
/* set L1 state to L2 state */
|
||||
vcpu->arch.nested = l2;
|
||||
vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
|
||||
l2->hfscr = l2_hv.hfscr;
|
||||
vcpu->arch.regs = l2_regs;
|
||||
|
||||
/* Guest must always run with ME enabled, HV disabled. */
|
||||
vcpu->arch.shregs.msr = (vcpu->arch.regs.msr | MSR_ME) & ~MSR_HV;
|
||||
|
||||
sanitise_hv_regs(vcpu, &l2_hv);
|
||||
restore_hv_regs(vcpu, &l2_hv);
|
||||
lpcr = l2_hv.lpcr;
|
||||
load_l2_hv_regs(vcpu, &l2_hv, &saved_l1_hv, &lpcr);
|
||||
|
||||
vcpu->arch.ret = RESUME_GUEST;
|
||||
vcpu->arch.trap = 0;
|
||||
|
@ -380,7 +379,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
|
|||
r = RESUME_HOST;
|
||||
break;
|
||||
}
|
||||
r = kvmhv_run_single_vcpu(vcpu, hdec_exp, l2_hv.lpcr);
|
||||
r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
|
||||
} while (is_kvmppc_resume_guest(r));
|
||||
|
||||
/* save L2 state for return */
|
||||
|
@ -390,7 +389,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
|
|||
delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
|
||||
delta_ic = vcpu->arch.ic - l2_hv.ic;
|
||||
delta_vtb = vc->vtb - l2_hv.vtb;
|
||||
save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv);
|
||||
save_hv_return_state(vcpu, &l2_hv);
|
||||
|
||||
/* restore L1 state */
|
||||
vcpu->arch.nested = NULL;
|
||||
|
|
|
@ -1088,12 +1088,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|||
cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
|
||||
beq kvmppc_hisi
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/* For softpatch interrupt, go off and do TM instruction emulation */
|
||||
cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
|
||||
beq kvmppc_tm_emul
|
||||
#endif
|
||||
|
||||
/* See if this is a leftover HDEC interrupt */
|
||||
cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
|
||||
bne 2f
|
||||
|
@ -1599,42 +1593,6 @@ maybe_reenter_guest:
|
|||
blt deliver_guest_interrupt
|
||||
b guest_exit_cont
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/*
|
||||
* Softpatch interrupt for transactional memory emulation cases
|
||||
* on POWER9 DD2.2. This is early in the guest exit path - we
|
||||
* haven't saved registers or done a treclaim yet.
|
||||
*/
|
||||
kvmppc_tm_emul:
|
||||
/* Save instruction image in HEIR */
|
||||
mfspr r3, SPRN_HEIR
|
||||
stw r3, VCPU_HEIR(r9)
|
||||
|
||||
/*
|
||||
* The cases we want to handle here are those where the guest
|
||||
* is in real suspend mode and is trying to transition to
|
||||
* transactional mode.
|
||||
*/
|
||||
lbz r0, HSTATE_FAKE_SUSPEND(r13)
|
||||
cmpwi r0, 0 /* keep exiting guest if in fake suspend */
|
||||
bne guest_exit_cont
|
||||
rldicl r3, r11, 64 - MSR_TS_S_LG, 62
|
||||
cmpwi r3, 1 /* or if not in suspend state */
|
||||
bne guest_exit_cont
|
||||
|
||||
/* Call C code to do the emulation */
|
||||
mr r3, r9
|
||||
bl kvmhv_p9_tm_emulation_early
|
||||
nop
|
||||
ld r9, HSTATE_KVM_VCPU(r13)
|
||||
li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
|
||||
cmpwi r3, 0
|
||||
beq guest_exit_cont /* continue exiting if not handled */
|
||||
ld r10, VCPU_PC(r9)
|
||||
ld r11, VCPU_MSR(r9)
|
||||
b fast_interrupt_c_return /* go back to guest if handled */
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
|
||||
/*
|
||||
* Check whether an HDSI is an HPTE not found fault or something else.
|
||||
* If it is an HPTE not found fault that is due to the guest accessing
|
||||
|
|
|
@ -46,6 +46,15 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
|||
u64 newmsr, bescr;
|
||||
int ra, rs;
|
||||
|
||||
/*
|
||||
* The TM softpatch interrupt sets NIP to the instruction following
|
||||
* the faulting instruction, which is not executed. Rewind nip to the
|
||||
* faulting instruction so it looks like a normal synchronous
|
||||
* interrupt, then update nip in the places where the instruction is
|
||||
* emulated.
|
||||
*/
|
||||
vcpu->arch.regs.nip -= 4;
|
||||
|
||||
/*
|
||||
* rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
|
||||
* in these instructions, so masking bit 31 out doesn't change these
|
||||
|
@ -67,7 +76,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
|||
(newmsr & MSR_TM)));
|
||||
newmsr = sanitize_msr(newmsr);
|
||||
vcpu->arch.shregs.msr = newmsr;
|
||||
vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
|
||||
vcpu->arch.cfar = vcpu->arch.regs.nip;
|
||||
vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
|
||||
return RESUME_GUEST;
|
||||
|
||||
|
@ -79,14 +88,15 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
/* check EBB facility is available */
|
||||
if (!(vcpu->arch.hfscr & HFSCR_EBB)) {
|
||||
/* generate an illegal instruction interrupt */
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
return RESUME_GUEST;
|
||||
vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
|
||||
vcpu->arch.hfscr |= (u64)FSCR_EBB_LG << 56;
|
||||
vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
|
||||
return -1; /* rerun host interrupt handler */
|
||||
}
|
||||
if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) {
|
||||
/* generate a facility unavailable interrupt */
|
||||
vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
|
||||
((u64)FSCR_EBB_LG << 56);
|
||||
vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
|
||||
vcpu->arch.fscr |= (u64)FSCR_EBB_LG << 56;
|
||||
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
|
@ -100,7 +110,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.bescr = bescr;
|
||||
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
|
||||
vcpu->arch.shregs.msr = msr;
|
||||
vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
|
||||
vcpu->arch.cfar = vcpu->arch.regs.nip;
|
||||
vcpu->arch.regs.nip = vcpu->arch.ebbrr;
|
||||
return RESUME_GUEST;
|
||||
|
||||
|
@ -116,6 +126,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
|||
newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE);
|
||||
newmsr = sanitize_msr(newmsr);
|
||||
vcpu->arch.shregs.msr = newmsr;
|
||||
vcpu->arch.regs.nip += 4;
|
||||
return RESUME_GUEST;
|
||||
|
||||
/* ignore bit 31, see comment above */
|
||||
|
@ -128,14 +139,15 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
/* check for TM disabled in the HFSCR or MSR */
|
||||
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
|
||||
/* generate an illegal instruction interrupt */
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
return RESUME_GUEST;
|
||||
vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
|
||||
vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
|
||||
vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
|
||||
return -1; /* rerun host interrupt handler */
|
||||
}
|
||||
if (!(msr & MSR_TM)) {
|
||||
/* generate a facility unavailable interrupt */
|
||||
vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
|
||||
((u64)FSCR_TM_LG << 56);
|
||||
vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
|
||||
vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
|
||||
kvmppc_book3s_queue_irqprio(vcpu,
|
||||
BOOK3S_INTERRUPT_FAC_UNAVAIL);
|
||||
return RESUME_GUEST;
|
||||
|
@ -152,20 +164,22 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
|||
msr = (msr & ~MSR_TS_MASK) | MSR_TS_S;
|
||||
}
|
||||
vcpu->arch.shregs.msr = msr;
|
||||
vcpu->arch.regs.nip += 4;
|
||||
return RESUME_GUEST;
|
||||
|
||||
/* ignore bit 31, see comment above */
|
||||
case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
|
||||
/* check for TM disabled in the HFSCR or MSR */
|
||||
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
|
||||
/* generate an illegal instruction interrupt */
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
return RESUME_GUEST;
|
||||
vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
|
||||
vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
|
||||
vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
|
||||
return -1; /* rerun host interrupt handler */
|
||||
}
|
||||
if (!(msr & MSR_TM)) {
|
||||
/* generate a facility unavailable interrupt */
|
||||
vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
|
||||
((u64)FSCR_TM_LG << 56);
|
||||
vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
|
||||
vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
|
||||
kvmppc_book3s_queue_irqprio(vcpu,
|
||||
BOOK3S_INTERRUPT_FAC_UNAVAIL);
|
||||
return RESUME_GUEST;
|
||||
|
@ -189,6 +203,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
|
||||
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
|
||||
vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
|
||||
vcpu->arch.regs.nip += 4;
|
||||
return RESUME_GUEST;
|
||||
|
||||
/* ignore bit 31, see comment above */
|
||||
|
@ -196,14 +211,15 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
|||
/* XXX do we need to check for PR=0 here? */
|
||||
/* check for TM disabled in the HFSCR or MSR */
|
||||
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
|
||||
/* generate an illegal instruction interrupt */
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
return RESUME_GUEST;
|
||||
vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
|
||||
vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
|
||||
vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
|
||||
return -1; /* rerun host interrupt handler */
|
||||
}
|
||||
if (!(msr & MSR_TM)) {
|
||||
/* generate a facility unavailable interrupt */
|
||||
vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
|
||||
((u64)FSCR_TM_LG << 56);
|
||||
vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
|
||||
vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
|
||||
kvmppc_book3s_queue_irqprio(vcpu,
|
||||
BOOK3S_INTERRUPT_FAC_UNAVAIL);
|
||||
return RESUME_GUEST;
|
||||
|
@ -220,6 +236,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
|
||||
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
|
||||
vcpu->arch.shregs.msr = msr | MSR_TS_S;
|
||||
vcpu->arch.regs.nip += 4;
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
|
||||
|
|
|
@ -667,7 +667,6 @@ static unsigned long power9_idle_stop(unsigned long psscr)
|
|||
sprs.purr = mfspr(SPRN_PURR);
|
||||
sprs.spurr = mfspr(SPRN_SPURR);
|
||||
sprs.dscr = mfspr(SPRN_DSCR);
|
||||
sprs.wort = mfspr(SPRN_WORT);
|
||||
sprs.ciabr = mfspr(SPRN_CIABR);
|
||||
|
||||
sprs.mmcra = mfspr(SPRN_MMCRA);
|
||||
|
@ -785,7 +784,6 @@ core_woken:
|
|||
mtspr(SPRN_PURR, sprs.purr);
|
||||
mtspr(SPRN_SPURR, sprs.spurr);
|
||||
mtspr(SPRN_DSCR, sprs.dscr);
|
||||
mtspr(SPRN_WORT, sprs.wort);
|
||||
mtspr(SPRN_CIABR, sprs.ciabr);
|
||||
|
||||
mtspr(SPRN_MMCRA, sprs.mmcra);
|
||||
|
|
Loading…
Reference in New Issue