Merge branch 'kvm-ppc-next' of https://github.com/agraf/linux-2.6 into queue
This commit is contained in:
commit
aa11e3a8a6
|
@ -345,7 +345,7 @@ struct kvm_sregs {
|
|||
__u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
|
||||
};
|
||||
|
||||
/* ppc -- see arch/powerpc/include/asm/kvm.h */
|
||||
/* ppc -- see arch/powerpc/include/uapi/asm/kvm.h */
|
||||
|
||||
interrupt_bitmap is a bitmap of pending external interrupts. At most
|
||||
one bit may be set. This interrupt has been acknowledged by the APIC
|
||||
|
@ -1774,6 +1774,7 @@ registers, find a list below:
|
|||
PPC | KVM_REG_PPC_VPA_SLB | 128
|
||||
PPC | KVM_REG_PPC_VPA_DTL | 128
|
||||
PPC | KVM_REG_PPC_EPCR | 32
|
||||
PPC | KVM_REG_PPC_EPR | 32
|
||||
|
||||
4.69 KVM_GET_ONE_REG
|
||||
|
||||
|
@ -2246,8 +2247,8 @@ executed a memory-mapped I/O instruction which could not be satisfied
|
|||
by kvm. The 'data' member contains the written data if 'is_write' is
|
||||
true, and should be filled by application code otherwise.
|
||||
|
||||
NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_DCR
|
||||
and KVM_EXIT_PAPR the corresponding
|
||||
NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_DCR,
|
||||
KVM_EXIT_PAPR and KVM_EXIT_EPR the corresponding
|
||||
operations are complete (and guest state is consistent) only after userspace
|
||||
has re-entered the kernel with KVM_RUN. The kernel side will first finish
|
||||
incomplete operations and then check for pending signals. Userspace
|
||||
|
@ -2366,6 +2367,25 @@ interrupt for the target subchannel has been dequeued and subchannel_id,
|
|||
subchannel_nr, io_int_parm and io_int_word contain the parameters for that
|
||||
interrupt. ipb is needed for instruction parameter decoding.
|
||||
|
||||
/* KVM_EXIT_EPR */
|
||||
struct {
|
||||
__u32 epr;
|
||||
} epr;
|
||||
|
||||
On FSL BookE PowerPC chips, the interrupt controller has a fast patch
|
||||
interrupt acknowledge path to the core. When the core successfully
|
||||
delivers an interrupt, it automatically populates the EPR register with
|
||||
the interrupt vector number and acknowledges the interrupt inside
|
||||
the interrupt controller.
|
||||
|
||||
In case the interrupt controller lives in user space, we need to do
|
||||
the interrupt acknowledge cycle through it to fetch the next to be
|
||||
delivered interrupt vector using this exit.
|
||||
|
||||
It gets triggered whenever both KVM_CAP_PPC_EPR are enabled and an
|
||||
external interrupt has just been delivered into the guest. User space
|
||||
should put the acknowledged interrupt vector into the 'epr' field.
|
||||
|
||||
/* Fix the size of the union. */
|
||||
char padding[256];
|
||||
};
|
||||
|
@ -2501,3 +2521,20 @@ handled in-kernel, while the other I/O instructions are passed to userspace.
|
|||
|
||||
When this capability is enabled, KVM_EXIT_S390_TSCH will occur on TEST
|
||||
SUBCHANNEL intercepts.
|
||||
|
||||
6.5 KVM_CAP_PPC_EPR
|
||||
|
||||
Architectures: ppc
|
||||
Parameters: args[0] defines whether the proxy facility is active
|
||||
Returns: 0 on success; -1 on error
|
||||
|
||||
This capability enables or disables the delivery of interrupts through the
|
||||
external proxy facility.
|
||||
|
||||
When enabled (args[0] != 0), every time the guest gets an external interrupt
|
||||
delivered, it automatically exits into user space with a KVM_EXIT_EPR exit
|
||||
to receive the topmost interrupt vector.
|
||||
|
||||
When disabled (args[0] == 0), behavior is as if this facility is unsupported.
|
||||
|
||||
When this capability is enabled, KVM_EXIT_EPR can occur.
|
||||
|
|
|
@ -520,6 +520,8 @@ struct kvm_vcpu_arch {
|
|||
u8 sane;
|
||||
u8 cpu_type;
|
||||
u8 hcall_needed;
|
||||
u8 epr_enabled;
|
||||
u8 epr_needed;
|
||||
|
||||
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
|
||||
|
||||
|
|
|
@ -44,6 +44,7 @@ enum emulation_result {
|
|||
EMULATE_DO_DCR, /* kvm_run filled with DCR request */
|
||||
EMULATE_FAIL, /* can't emulate this instruction */
|
||||
EMULATE_AGAIN, /* something went wrong. go again */
|
||||
EMULATE_DO_PAPR, /* kvm_run filled with PAPR request */
|
||||
};
|
||||
|
||||
extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
|
||||
|
@ -263,6 +264,15 @@ static inline void kvm_linear_init(void)
|
|||
{}
|
||||
#endif
|
||||
|
||||
static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
|
||||
{
|
||||
#ifdef CONFIG_KVM_BOOKE_HV
|
||||
mtspr(SPRN_GEPR, epr);
|
||||
#elif defined(CONFIG_BOOKE)
|
||||
vcpu->arch.epr = epr;
|
||||
#endif
|
||||
}
|
||||
|
||||
int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
|
||||
struct kvm_config_tlb *cfg);
|
||||
int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
|
||||
|
|
|
@ -114,7 +114,10 @@ struct kvm_regs {
|
|||
/* Embedded Floating Point (SPE) -- IVOR32-34 if KVM_SREGS_E_IVOR */
|
||||
#define KVM_SREGS_E_SPE (1 << 9)
|
||||
|
||||
/* External Proxy (EXP) -- EPR */
|
||||
/*
|
||||
* DEPRECATED! USE ONE_REG FOR THIS ONE!
|
||||
* External Proxy (EXP) -- EPR
|
||||
*/
|
||||
#define KVM_SREGS_EXP (1 << 10)
|
||||
|
||||
/* External PID (E.PD) -- EPSC/EPLC */
|
||||
|
@ -412,5 +415,6 @@ struct kvm_get_htab_header {
|
|||
#define KVM_REG_PPC_VPA_DTL (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x84)
|
||||
|
||||
#define KVM_REG_PPC_EPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x85)
|
||||
#define KVM_REG_PPC_EPR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x86)
|
||||
|
||||
#endif /* __LINUX_KVM_POWERPC_H */
|
||||
|
|
|
@ -34,6 +34,8 @@
|
|||
#define OP_31_XOP_MTSRIN 242
|
||||
#define OP_31_XOP_TLBIEL 274
|
||||
#define OP_31_XOP_TLBIE 306
|
||||
/* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
|
||||
#define OP_31_XOP_FAKE_SC1 308
|
||||
#define OP_31_XOP_SLBMTE 402
|
||||
#define OP_31_XOP_SLBIE 434
|
||||
#define OP_31_XOP_SLBIA 498
|
||||
|
@ -170,6 +172,32 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
vcpu->arch.mmu.tlbie(vcpu, addr, large);
|
||||
break;
|
||||
}
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_PR
|
||||
case OP_31_XOP_FAKE_SC1:
|
||||
{
|
||||
/* SC 1 papr hypercalls */
|
||||
ulong cmd = kvmppc_get_gpr(vcpu, 3);
|
||||
int i;
|
||||
|
||||
if ((vcpu->arch.shared->msr & MSR_PR) ||
|
||||
!vcpu->arch.papr_enabled) {
|
||||
emulated = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
|
||||
break;
|
||||
|
||||
run->papr_hcall.nr = cmd;
|
||||
for (i = 0; i < 9; ++i) {
|
||||
ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
|
||||
run->papr_hcall.args[i] = gpr;
|
||||
}
|
||||
|
||||
emulated = EMULATE_DO_PAPR;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
case OP_31_XOP_EIOIO:
|
||||
break;
|
||||
case OP_31_XOP_SLBMTE:
|
||||
|
@ -427,6 +455,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
|||
case SPRN_PMC3_GEKKO:
|
||||
case SPRN_PMC4_GEKKO:
|
||||
case SPRN_WPAR_GEKKO:
|
||||
case SPRN_MSSSR0:
|
||||
break;
|
||||
unprivileged:
|
||||
default:
|
||||
|
@ -523,6 +552,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
|
|||
case SPRN_PMC3_GEKKO:
|
||||
case SPRN_PMC4_GEKKO:
|
||||
case SPRN_WPAR_GEKKO:
|
||||
case SPRN_MSSSR0:
|
||||
*spr_val = 0;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -760,6 +760,11 @@ program_interrupt:
|
|||
run->exit_reason = KVM_EXIT_MMIO;
|
||||
r = RESUME_HOST_NV;
|
||||
break;
|
||||
case EMULATE_DO_PAPR:
|
||||
run->exit_reason = KVM_EXIT_PAPR_HCALL;
|
||||
vcpu->arch.hcall_needed = 1;
|
||||
r = RESUME_HOST_NV;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
|
|
@ -300,13 +300,22 @@ static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
|
|||
#endif
|
||||
}
|
||||
|
||||
static unsigned long get_guest_epr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#ifdef CONFIG_KVM_BOOKE_HV
|
||||
return mfspr(SPRN_GEPR);
|
||||
#else
|
||||
return vcpu->arch.epr;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Deliver the interrupt of the corresponding priority, if possible. */
|
||||
static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
|
||||
unsigned int priority)
|
||||
{
|
||||
int allowed = 0;
|
||||
ulong msr_mask = 0;
|
||||
bool update_esr = false, update_dear = false;
|
||||
bool update_esr = false, update_dear = false, update_epr = false;
|
||||
ulong crit_raw = vcpu->arch.shared->critical;
|
||||
ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
|
||||
bool crit;
|
||||
|
@ -330,6 +339,9 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
|
|||
keep_irq = true;
|
||||
}
|
||||
|
||||
if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_enabled)
|
||||
update_epr = true;
|
||||
|
||||
switch (priority) {
|
||||
case BOOKE_IRQPRIO_DTLB_MISS:
|
||||
case BOOKE_IRQPRIO_DATA_STORAGE:
|
||||
|
@ -408,6 +420,8 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
|
|||
set_guest_esr(vcpu, vcpu->arch.queued_esr);
|
||||
if (update_dear == true)
|
||||
set_guest_dear(vcpu, vcpu->arch.queued_dear);
|
||||
if (update_epr == true)
|
||||
kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
|
||||
|
||||
new_msr &= msr_mask;
|
||||
#if defined(CONFIG_64BIT)
|
||||
|
@ -581,6 +595,11 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
|
|||
|
||||
kvmppc_core_check_exceptions(vcpu);
|
||||
|
||||
if (vcpu->requests) {
|
||||
/* Exception delivery raised request; start over */
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (vcpu->arch.shared->msr & MSR_WE) {
|
||||
local_irq_enable();
|
||||
kvm_vcpu_block(vcpu);
|
||||
|
@ -610,6 +629,13 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
|
|||
r = 0;
|
||||
}
|
||||
|
||||
if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
|
||||
vcpu->run->epr.epr = 0;
|
||||
vcpu->arch.epr_needed = true;
|
||||
vcpu->run->exit_reason = KVM_EXIT_EPR;
|
||||
r = 0;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -1388,6 +1414,11 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
|||
&vcpu->arch.dbg_reg.dac[dac], sizeof(u64));
|
||||
break;
|
||||
}
|
||||
case KVM_REG_PPC_EPR: {
|
||||
u32 epr = get_guest_epr(vcpu);
|
||||
r = put_user(epr, (u32 __user *)(long)reg->addr);
|
||||
break;
|
||||
}
|
||||
#if defined(CONFIG_64BIT)
|
||||
case KVM_REG_PPC_EPCR:
|
||||
r = put_user(vcpu->arch.epcr, (u32 __user *)(long)reg->addr);
|
||||
|
@ -1420,6 +1451,13 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
|||
(u64 __user *)(long)reg->addr, sizeof(u64));
|
||||
break;
|
||||
}
|
||||
case KVM_REG_PPC_EPR: {
|
||||
u32 new_epr;
|
||||
r = get_user(new_epr, (u32 __user *)(long)reg->addr);
|
||||
if (!r)
|
||||
kvmppc_set_epr(vcpu, new_epr);
|
||||
break;
|
||||
}
|
||||
#if defined(CONFIG_64BIT)
|
||||
case KVM_REG_PPC_EPCR: {
|
||||
u32 new_epcr;
|
||||
|
|
|
@ -269,6 +269,9 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
|
|||
case SPRN_ESR:
|
||||
*spr_val = vcpu->arch.shared->esr;
|
||||
break;
|
||||
case SPRN_EPR:
|
||||
*spr_val = vcpu->arch.epr;
|
||||
break;
|
||||
case SPRN_CSRR0:
|
||||
*spr_val = vcpu->arch.csrr0;
|
||||
break;
|
||||
|
|
|
@ -149,8 +149,6 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
|
|||
case SPRN_TBWL: break;
|
||||
case SPRN_TBWU: break;
|
||||
|
||||
case SPRN_MSSSR0: break;
|
||||
|
||||
case SPRN_DEC:
|
||||
vcpu->arch.dec = spr_val;
|
||||
kvmppc_emulate_dec(vcpu);
|
||||
|
@ -201,9 +199,6 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
|
|||
case SPRN_PIR:
|
||||
spr_val = vcpu->vcpu_id;
|
||||
break;
|
||||
case SPRN_MSSSR0:
|
||||
spr_val = 0;
|
||||
break;
|
||||
|
||||
/* Note: mftb and TBRL/TBWL are user-accessible, so
|
||||
* the guest can always access the real TB anyways.
|
||||
|
|
|
@ -237,7 +237,8 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
r = RESUME_HOST;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
WARN_ON(1);
|
||||
r = RESUME_GUEST;
|
||||
}
|
||||
|
||||
return r;
|
||||
|
@ -305,6 +306,7 @@ int kvm_dev_ioctl_check_extension(long ext)
|
|||
#ifdef CONFIG_BOOKE
|
||||
case KVM_CAP_PPC_BOOKE_SREGS:
|
||||
case KVM_CAP_PPC_BOOKE_WATCHDOG:
|
||||
case KVM_CAP_PPC_EPR:
|
||||
#else
|
||||
case KVM_CAP_PPC_SEGSTATE:
|
||||
case KVM_CAP_PPC_HIOR:
|
||||
|
@ -720,6 +722,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
for (i = 0; i < 9; ++i)
|
||||
kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
|
||||
vcpu->arch.hcall_needed = 0;
|
||||
#ifdef CONFIG_BOOKE
|
||||
} else if (vcpu->arch.epr_needed) {
|
||||
kvmppc_set_epr(vcpu, run->epr.epr);
|
||||
vcpu->arch.epr_needed = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
r = kvmppc_vcpu_run(run, vcpu);
|
||||
|
@ -761,6 +768,10 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
|
|||
r = 0;
|
||||
vcpu->arch.papr_enabled = true;
|
||||
break;
|
||||
case KVM_CAP_PPC_EPR:
|
||||
r = 0;
|
||||
vcpu->arch.epr_enabled = cap->args[0];
|
||||
break;
|
||||
#ifdef CONFIG_BOOKE
|
||||
case KVM_CAP_PPC_BOOKE_WATCHDOG:
|
||||
r = 0;
|
||||
|
|
|
@ -122,6 +122,7 @@ static inline bool is_error_page(struct page *page)
|
|||
#define KVM_REQ_WATCHDOG 18
|
||||
#define KVM_REQ_MASTERCLOCK_UPDATE 19
|
||||
#define KVM_REQ_MCLOCK_INPROGRESS 20
|
||||
#define KVM_REQ_EPR_EXIT 21
|
||||
|
||||
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
|
||||
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
|
||||
|
|
|
@ -169,6 +169,7 @@ struct kvm_pit_config {
|
|||
#define KVM_EXIT_S390_UCONTROL 20
|
||||
#define KVM_EXIT_WATCHDOG 21
|
||||
#define KVM_EXIT_S390_TSCH 22
|
||||
#define KVM_EXIT_EPR 23
|
||||
|
||||
/* For KVM_EXIT_INTERNAL_ERROR */
|
||||
/* Emulate instruction failed. */
|
||||
|
@ -295,6 +296,10 @@ struct kvm_run {
|
|||
__u32 ipb;
|
||||
__u8 dequeued;
|
||||
} s390_tsch;
|
||||
/* KVM_EXIT_EPR */
|
||||
struct {
|
||||
__u32 epr;
|
||||
} epr;
|
||||
/* Fix the size of the union. */
|
||||
char padding[256];
|
||||
};
|
||||
|
@ -656,6 +661,7 @@ struct kvm_ppc_smmu_info {
|
|||
#define KVM_CAP_PPC_BOOKE_WATCHDOG 83
|
||||
#define KVM_CAP_PPC_HTAB_FD 84
|
||||
#define KVM_CAP_S390_CSS_SUPPORT 85
|
||||
#define KVM_CAP_PPC_EPR 86
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
|
||||
|
|
Loading…
Reference in New Issue