2019-06-04 16:11:33 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2011-06-29 08:17:58 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Alexander Graf <agraf@suse.de>
|
|
|
|
* Kevin Wolf <mail@kevin-wolf.de>
|
|
|
|
* Paul Mackerras <paulus@samba.org>
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Functions relating to running KVM on Book 3S processors where
|
|
|
|
* we don't have access to hypervisor mode, and we run the guest
|
|
|
|
* in problem state (user mode).
|
|
|
|
*
|
|
|
|
* This file is derived from arch/powerpc/kvm/44x.c,
|
|
|
|
* by Hollis Blanchard <hollisb@us.ibm.com>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kvm_host.h>
|
2011-07-29 14:19:31 +08:00
|
|
|
#include <linux/export.h>
|
2011-06-29 08:17:58 +08:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
|
|
|
|
#include <asm/reg.h>
|
|
|
|
#include <asm/cputable.h>
|
|
|
|
#include <asm/cacheflush.h>
|
2016-12-25 03:46:01 +08:00
|
|
|
#include <linux/uaccess.h>
|
2011-06-29 08:17:58 +08:00
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/kvm_ppc.h>
|
|
|
|
#include <asm/kvm_book3s.h>
|
|
|
|
#include <asm/mmu_context.h>
|
2012-04-02 01:35:53 +08:00
|
|
|
#include <asm/switch_to.h>
|
2012-12-04 02:36:13 +08:00
|
|
|
#include <asm/firmware.h>
|
2016-07-05 13:03:49 +08:00
|
|
|
#include <asm/setup.h>
|
2011-06-29 08:17:58 +08:00
|
|
|
#include <linux/gfp.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/highmem.h>
|
2013-10-08 00:47:59 +08:00
|
|
|
#include <linux/module.h>
|
2013-12-09 20:53:42 +08:00
|
|
|
#include <linux/miscdevice.h>
|
2018-05-23 15:01:57 +08:00
|
|
|
#include <asm/asm-prototypes.h>
|
2018-05-23 15:01:58 +08:00
|
|
|
#include <asm/tm.h>
|
2011-06-29 08:17:58 +08:00
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
#include "book3s.h"
|
2013-10-08 00:47:57 +08:00
|
|
|
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include "trace_pr.h"
|
2011-06-29 08:17:58 +08:00
|
|
|
|
|
|
|
/* #define EXIT_DEBUG */
|
|
|
|
/* #define DEBUG_EXT */
|
|
|
|
|
|
|
|
static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
|
|
|
|
ulong msr);
|
2018-05-23 15:02:07 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac);
|
|
|
|
#endif
|
2011-06-29 08:17:58 +08:00
|
|
|
|
|
|
|
/* Some compatibility defines */
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_32
|
|
|
|
#define MSR_USER32 MSR_USER
|
|
|
|
#define MSR_USER64 MSR_USER
|
|
|
|
#define HW_PAGE_SIZE PAGE_SIZE
|
2017-11-22 11:42:21 +08:00
|
|
|
#define HPTE_R_M _PAGE_COHERENT
|
2011-06-29 08:17:58 +08:00
|
|
|
#endif
|
|
|
|
|
2014-07-11 08:58:58 +08:00
|
|
|
static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
ulong msr = kvmppc_get_msr(vcpu);
|
|
|
|
return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
ulong msr = kvmppc_get_msr(vcpu);
|
|
|
|
ulong pc = kvmppc_get_pc(vcpu);
|
|
|
|
|
|
|
|
/* We are in DR only split real mode */
|
|
|
|
if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* We have not fixed up the guest already */
|
|
|
|
if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* The code is in fixupable address space */
|
|
|
|
if (pc & SPLIT_HACK_MASK)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
|
|
|
|
kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
|
|
|
|
}
|
|
|
|
|
2019-10-02 14:00:22 +08:00
|
|
|
static void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
|
|
|
|
ulong pc = kvmppc_get_pc(vcpu);
|
|
|
|
ulong lr = kvmppc_get_lr(vcpu);
|
|
|
|
if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
|
|
|
|
kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
|
|
|
|
if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
|
|
|
|
kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
|
|
|
|
vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvmppc_inject_interrupt_pr(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
|
|
|
|
{
|
|
|
|
unsigned long msr, pc, new_msr, new_pc;
|
|
|
|
|
|
|
|
kvmppc_unfixup_split_real(vcpu);
|
|
|
|
|
|
|
|
msr = kvmppc_get_msr(vcpu);
|
|
|
|
pc = kvmppc_get_pc(vcpu);
|
|
|
|
new_msr = vcpu->arch.intr_msr;
|
|
|
|
new_pc = to_book3s(vcpu)->hior + vec;
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
/* If transactional, change to suspend mode on IRQ delivery */
|
|
|
|
if (MSR_TM_TRANSACTIONAL(msr))
|
|
|
|
new_msr |= MSR_TS_S;
|
|
|
|
else
|
|
|
|
new_msr |= msr & MSR_TS_MASK;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
kvmppc_set_srr0(vcpu, pc);
|
|
|
|
kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
|
|
|
|
kvmppc_set_pc(vcpu, new_pc);
|
|
|
|
kvmppc_set_msr(vcpu, new_msr);
|
|
|
|
}
|
2014-07-11 08:58:58 +08:00
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
|
2011-06-29 08:17:58 +08:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
2011-12-09 21:44:13 +08:00
|
|
|
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
|
|
|
memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
|
|
|
|
svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
|
2013-11-29 09:29:00 +08:00
|
|
|
svcpu->in_use = 0;
|
2011-12-09 21:44:13 +08:00
|
|
|
svcpu_put(svcpu);
|
2011-06-29 08:17:58 +08:00
|
|
|
#endif
|
2014-06-09 07:16:32 +08:00
|
|
|
|
|
|
|
/* Disable AIL if supported */
|
|
|
|
if (cpu_has_feature(CPU_FTR_HVMODE) &&
|
|
|
|
cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
|
|
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
|
|
|
|
|
2012-09-21 03:35:51 +08:00
|
|
|
vcpu->cpu = smp_processor_id();
|
2011-06-29 08:17:58 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_32
|
2013-09-20 12:52:49 +08:00
|
|
|
current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
|
2011-06-29 08:17:58 +08:00
|
|
|
#endif
|
2014-07-11 08:58:58 +08:00
|
|
|
|
|
|
|
if (kvmppc_is_split_real(vcpu))
|
|
|
|
kvmppc_fixup_split_real(vcpu);
|
2018-05-23 15:01:58 +08:00
|
|
|
|
|
|
|
kvmppc_restore_tm_pr(vcpu);
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
|
2011-06-29 08:17:58 +08:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
2011-12-09 21:44:13 +08:00
|
|
|
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
2013-11-29 09:29:00 +08:00
|
|
|
if (svcpu->in_use) {
|
2018-02-01 05:24:58 +08:00
|
|
|
kvmppc_copy_from_svcpu(vcpu);
|
2013-11-29 09:29:00 +08:00
|
|
|
}
|
2011-12-09 21:44:13 +08:00
|
|
|
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
|
|
|
|
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
|
|
|
|
svcpu_put(svcpu);
|
2011-06-29 08:17:58 +08:00
|
|
|
#endif
|
|
|
|
|
2014-07-11 08:58:58 +08:00
|
|
|
if (kvmppc_is_split_real(vcpu))
|
|
|
|
kvmppc_unfixup_split_real(vcpu);
|
|
|
|
|
KVM: PPC: Book3S PR: Fix VSX handling
This fixes various issues in how we were handling the VSX registers
that exist on POWER7 machines. First, we were running off the end
of the current->thread.fpr[] array. Ultimately this was because the
vcpu->arch.vsr[] array is sized to be able to store both the FP
registers and the extra VSX registers (i.e. 64 entries), but PR KVM
only uses it for the extra VSX registers (i.e. 32 entries).
Secondly, calling load_up_vsx() from C code is a really bad idea,
because it jumps to fast_exception_return at the end, rather than
returning with a blr instruction. This was causing it to jump off
to a random location with random register contents, since it was using
the largely uninitialized stack frame created by kvmppc_load_up_vsx.
In fact, it isn't necessary to call either __giveup_vsx or load_up_vsx,
since giveup_fpu and load_up_fpu handle the extra VSX registers as well
as the standard FP registers on machines with VSX. Also, since VSX
instructions can access the VMX registers and the FP registers as well
as the extra VSX registers, we have to load up the FP and VMX registers
before we can turn on the MSR_VSX bit for the guest. Conversely, if
we save away any of the VSX or FP registers, we have to turn off MSR_VSX
for the guest.
To handle all this, it is more convenient for a single call to
kvmppc_giveup_ext() to handle all the state saving that needs to be done,
so we make it take a set of MSR bits rather than just one, and the switch
statement becomes a series of if statements. Similarly kvmppc_handle_ext
needs to be able to load up more than one set of registers.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2012-11-05 02:16:46 +08:00
|
|
|
kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
|
2014-04-22 18:26:58 +08:00
|
|
|
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
|
2018-05-23 15:01:58 +08:00
|
|
|
kvmppc_save_tm_pr(vcpu);
|
2014-06-09 07:16:32 +08:00
|
|
|
|
|
|
|
/* Enable AIL if supported */
|
|
|
|
if (cpu_has_feature(CPU_FTR_HVMODE) &&
|
|
|
|
cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
|
|
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
|
|
|
|
|
2012-09-21 03:35:51 +08:00
|
|
|
vcpu->cpu = -1;
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
|
|
|
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
/* Copy data needed by real-mode code from vcpu to shadow vcpu */
|
2018-02-01 05:24:58 +08:00
|
|
|
void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
{
|
2018-02-01 05:24:58 +08:00
|
|
|
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
|
|
|
|
2018-05-07 14:20:07 +08:00
|
|
|
svcpu->gpr[0] = vcpu->arch.regs.gpr[0];
|
|
|
|
svcpu->gpr[1] = vcpu->arch.regs.gpr[1];
|
|
|
|
svcpu->gpr[2] = vcpu->arch.regs.gpr[2];
|
|
|
|
svcpu->gpr[3] = vcpu->arch.regs.gpr[3];
|
|
|
|
svcpu->gpr[4] = vcpu->arch.regs.gpr[4];
|
|
|
|
svcpu->gpr[5] = vcpu->arch.regs.gpr[5];
|
|
|
|
svcpu->gpr[6] = vcpu->arch.regs.gpr[6];
|
|
|
|
svcpu->gpr[7] = vcpu->arch.regs.gpr[7];
|
|
|
|
svcpu->gpr[8] = vcpu->arch.regs.gpr[8];
|
|
|
|
svcpu->gpr[9] = vcpu->arch.regs.gpr[9];
|
|
|
|
svcpu->gpr[10] = vcpu->arch.regs.gpr[10];
|
|
|
|
svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
|
|
|
|
svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
|
|
|
|
svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
|
2018-10-08 13:30:58 +08:00
|
|
|
svcpu->cr = vcpu->arch.regs.ccr;
|
2018-05-07 14:20:08 +08:00
|
|
|
svcpu->xer = vcpu->arch.regs.xer;
|
|
|
|
svcpu->ctr = vcpu->arch.regs.ctr;
|
|
|
|
svcpu->lr = vcpu->arch.regs.link;
|
|
|
|
svcpu->pc = vcpu->arch.regs.nip;
|
2014-04-29 22:48:44 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
|
|
|
|
#endif
|
2014-06-04 19:17:55 +08:00
|
|
|
/*
|
|
|
|
* Now also save the current time base value. We use this
|
|
|
|
* to find the guest purr and spurr value.
|
|
|
|
*/
|
|
|
|
vcpu->arch.entry_tb = get_tb();
|
2014-06-05 20:08:02 +08:00
|
|
|
vcpu->arch.entry_vtb = get_vtb();
|
2014-06-05 20:08:05 +08:00
|
|
|
if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
|
|
vcpu->arch.entry_ic = mfspr(SPRN_IC);
|
2013-11-29 09:29:00 +08:00
|
|
|
svcpu->in_use = true;
|
2018-02-01 05:24:58 +08:00
|
|
|
|
|
|
|
svcpu_put(svcpu);
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
}
|
|
|
|
|
2018-05-23 15:01:53 +08:00
|
|
|
static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
ulong guest_msr = kvmppc_get_msr(vcpu);
|
|
|
|
ulong smsr = guest_msr;
|
|
|
|
|
|
|
|
/* Guest MSR values */
|
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE |
|
|
|
|
MSR_TM | MSR_TS_MASK;
|
|
|
|
#else
|
|
|
|
smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
|
|
|
|
#endif
|
|
|
|
/* Process MSR values */
|
|
|
|
smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
|
|
|
|
/* External providers the guest reserved */
|
|
|
|
smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
|
|
|
|
/* 64-bit Process MSR values */
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
smsr |= MSR_ISF | MSR_HV;
|
2018-05-23 15:02:01 +08:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
/*
|
|
|
|
* in guest privileged state, we want to fail all TM transactions.
|
|
|
|
* So disable MSR TM bit so that all tbegin. will be able to be
|
|
|
|
* trapped into host.
|
|
|
|
*/
|
|
|
|
if (!(guest_msr & MSR_PR))
|
|
|
|
smsr &= ~MSR_TM;
|
2018-05-23 15:01:53 +08:00
|
|
|
#endif
|
|
|
|
vcpu->arch.shadow_msr = smsr;
|
|
|
|
}
|
|
|
|
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
|
2018-02-01 05:24:58 +08:00
|
|
|
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
{
|
2018-02-01 05:24:58 +08:00
|
|
|
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
2018-05-23 15:01:53 +08:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
ulong old_msr;
|
|
|
|
#endif
|
2013-11-29 09:29:00 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Maybe we were already preempted and synced the svcpu from
|
|
|
|
* our preempt notifiers. Don't bother touching this svcpu then.
|
|
|
|
*/
|
|
|
|
if (!svcpu->in_use)
|
|
|
|
goto out;
|
|
|
|
|
2018-05-07 14:20:07 +08:00
|
|
|
vcpu->arch.regs.gpr[0] = svcpu->gpr[0];
|
|
|
|
vcpu->arch.regs.gpr[1] = svcpu->gpr[1];
|
|
|
|
vcpu->arch.regs.gpr[2] = svcpu->gpr[2];
|
|
|
|
vcpu->arch.regs.gpr[3] = svcpu->gpr[3];
|
|
|
|
vcpu->arch.regs.gpr[4] = svcpu->gpr[4];
|
|
|
|
vcpu->arch.regs.gpr[5] = svcpu->gpr[5];
|
|
|
|
vcpu->arch.regs.gpr[6] = svcpu->gpr[6];
|
|
|
|
vcpu->arch.regs.gpr[7] = svcpu->gpr[7];
|
|
|
|
vcpu->arch.regs.gpr[8] = svcpu->gpr[8];
|
|
|
|
vcpu->arch.regs.gpr[9] = svcpu->gpr[9];
|
|
|
|
vcpu->arch.regs.gpr[10] = svcpu->gpr[10];
|
|
|
|
vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
|
|
|
|
vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
|
|
|
|
vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
|
2018-10-08 13:30:58 +08:00
|
|
|
vcpu->arch.regs.ccr = svcpu->cr;
|
2018-05-07 14:20:08 +08:00
|
|
|
vcpu->arch.regs.xer = svcpu->xer;
|
|
|
|
vcpu->arch.regs.ctr = svcpu->ctr;
|
|
|
|
vcpu->arch.regs.link = svcpu->lr;
|
|
|
|
vcpu->arch.regs.nip = svcpu->pc;
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
|
|
|
|
vcpu->arch.fault_dar = svcpu->fault_dar;
|
|
|
|
vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
|
|
|
|
vcpu->arch.last_inst = svcpu->last_inst;
|
2014-04-29 22:48:44 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
|
|
|
|
#endif
|
2014-06-04 19:17:55 +08:00
|
|
|
/*
|
|
|
|
* Update purr and spurr using time base on exit.
|
|
|
|
*/
|
|
|
|
vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
|
|
|
|
vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
|
2016-09-15 11:42:52 +08:00
|
|
|
to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
|
2014-06-05 20:08:05 +08:00
|
|
|
if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
|
|
vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
|
2018-05-23 15:01:53 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
/*
|
|
|
|
* Unlike other MSR bits, MSR[TS]bits can be changed at guest without
|
|
|
|
* notifying host:
|
|
|
|
* modified by unprivileged instructions like "tbegin"/"tend"/
|
|
|
|
* "tresume"/"tsuspend" in PR KVM guest.
|
|
|
|
*
|
|
|
|
* It is necessary to sync here to calculate a correct shadow_msr.
|
|
|
|
*
|
|
|
|
* privileged guest's tbegin will be failed at present. So we
|
|
|
|
* only take care of problem state guest.
|
|
|
|
*/
|
|
|
|
old_msr = kvmppc_get_msr(vcpu);
|
|
|
|
if (unlikely((old_msr & MSR_PR) &&
|
|
|
|
(vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) !=
|
|
|
|
(old_msr & (MSR_TS_MASK)))) {
|
|
|
|
old_msr &= ~(MSR_TS_MASK);
|
|
|
|
old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK));
|
|
|
|
kvmppc_set_msr_fast(vcpu, old_msr);
|
|
|
|
kvmppc_recalc_shadow_msr(vcpu);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-11-29 09:29:00 +08:00
|
|
|
svcpu->in_use = false;
|
|
|
|
|
|
|
|
out:
|
2018-02-01 05:24:58 +08:00
|
|
|
svcpu_put(svcpu);
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
}
|
|
|
|
|
2018-05-23 15:01:57 +08:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
2018-05-23 15:02:04 +08:00
|
|
|
void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu)
|
2018-05-23 15:01:57 +08:00
|
|
|
{
|
|
|
|
tm_enable();
|
|
|
|
vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
|
|
|
|
vcpu->arch.texasr = mfspr(SPRN_TEXASR);
|
|
|
|
vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
|
|
|
|
tm_disable();
|
|
|
|
}
|
|
|
|
|
2018-05-23 15:02:01 +08:00
|
|
|
void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu)
|
2018-05-23 15:01:57 +08:00
|
|
|
{
|
|
|
|
tm_enable();
|
|
|
|
mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
|
|
|
|
mtspr(SPRN_TEXASR, vcpu->arch.texasr);
|
|
|
|
mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
|
|
|
|
tm_disable();
|
|
|
|
}
|
|
|
|
|
2018-05-23 15:01:59 +08:00
|
|
|
/* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at
|
|
|
|
* hardware.
|
|
|
|
*/
|
|
|
|
static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
ulong exit_nr;
|
|
|
|
ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) &
|
|
|
|
(MSR_FP | MSR_VEC | MSR_VSX);
|
|
|
|
|
|
|
|
if (!ext_diff)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (ext_diff == MSR_FP)
|
|
|
|
exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL;
|
|
|
|
else if (ext_diff == MSR_VEC)
|
|
|
|
exit_nr = BOOK3S_INTERRUPT_ALTIVEC;
|
|
|
|
else
|
|
|
|
exit_nr = BOOK3S_INTERRUPT_VSX;
|
|
|
|
|
|
|
|
kvmppc_handle_ext(vcpu, exit_nr, ext_diff);
|
|
|
|
}
|
|
|
|
|
2018-05-23 15:01:58 +08:00
|
|
|
void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) {
|
|
|
|
kvmppc_save_tm_sprs(vcpu);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-05-23 15:02:07 +08:00
|
|
|
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
|
2018-05-23 15:01:59 +08:00
|
|
|
kvmppc_giveup_ext(vcpu, MSR_VSX);
|
|
|
|
|
2018-05-23 15:01:58 +08:00
|
|
|
preempt_disable();
|
|
|
|
_kvmppc_save_tm_pr(vcpu, mfmsr());
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
|
|
|
|
kvmppc_restore_tm_sprs(vcpu);
|
2018-05-23 15:02:07 +08:00
|
|
|
if (kvmppc_get_msr(vcpu) & MSR_TM) {
|
2018-05-23 15:01:59 +08:00
|
|
|
kvmppc_handle_lost_math_exts(vcpu);
|
2018-05-23 15:02:07 +08:00
|
|
|
if (vcpu->arch.fscr & FSCR_TAR)
|
|
|
|
kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
|
|
|
|
}
|
2018-05-23 15:01:58 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
_kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
|
|
|
|
preempt_enable();
|
2018-05-23 15:01:59 +08:00
|
|
|
|
2018-05-23 15:02:07 +08:00
|
|
|
if (kvmppc_get_msr(vcpu) & MSR_TM) {
|
2018-05-23 15:01:59 +08:00
|
|
|
kvmppc_handle_lost_math_exts(vcpu);
|
2018-05-23 15:02:07 +08:00
|
|
|
if (vcpu->arch.fscr & FSCR_TAR)
|
|
|
|
kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
|
|
|
|
}
|
2018-05-23 15:01:58 +08:00
|
|
|
}
|
2018-05-23 15:01:57 +08:00
|
|
|
#endif
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
|
2012-08-10 18:28:50 +08:00
|
|
|
{
|
2012-08-13 18:50:35 +08:00
|
|
|
int r = 1; /* Indicate we want to get back into the guest */
|
|
|
|
|
2012-08-10 19:23:55 +08:00
|
|
|
/* We misuse TLB_FLUSH to indicate that we want to clear
|
|
|
|
all shadow cache entries */
|
|
|
|
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
|
|
|
|
kvmppc_mmu_pte_flush(vcpu, 0, 0);
|
2012-08-13 18:50:35 +08:00
|
|
|
|
|
|
|
return r;
|
2012-08-10 18:28:50 +08:00
|
|
|
}
|
|
|
|
|
2012-08-10 19:23:55 +08:00
|
|
|
/************* MMU Notifiers *************/
|
2013-09-20 12:52:54 +08:00
|
|
|
static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
|
|
|
|
unsigned long end)
|
|
|
|
{
|
|
|
|
long i;
|
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
|
struct kvm_memslots *slots;
|
|
|
|
struct kvm_memory_slot *memslot;
|
|
|
|
|
|
|
|
slots = kvm_memslots(kvm);
|
|
|
|
kvm_for_each_memslot(memslot, slots) {
|
|
|
|
unsigned long hva_start, hva_end;
|
|
|
|
gfn_t gfn, gfn_end;
|
|
|
|
|
|
|
|
hva_start = max(start, memslot->userspace_addr);
|
|
|
|
hva_end = min(end, memslot->userspace_addr +
|
|
|
|
(memslot->npages << PAGE_SHIFT));
|
|
|
|
if (hva_start >= hva_end)
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* {gfn(page) | page intersects with [hva_start, hva_end)} =
|
|
|
|
* {gfn, gfn+1, ..., gfn_end-1}.
|
|
|
|
*/
|
|
|
|
gfn = hva_to_gfn_memslot(hva_start, memslot);
|
|
|
|
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
|
|
|
|
kvm_for_each_vcpu(i, vcpu, kvm)
|
|
|
|
kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
|
|
|
|
gfn_end << PAGE_SHIFT);
|
|
|
|
}
|
|
|
|
}
|
2012-08-10 19:23:55 +08:00
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
|
|
|
|
unsigned long end)
|
2012-08-10 19:23:55 +08:00
|
|
|
{
|
2013-09-20 12:52:54 +08:00
|
|
|
do_kvm_unmap_hva(kvm, start, end);
|
2012-08-10 19:23:55 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-09-23 05:54:42 +08:00
|
|
|
static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
|
|
|
|
unsigned long end)
|
2012-08-10 19:23:55 +08:00
|
|
|
{
|
|
|
|
/* XXX could be more clever ;) */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
|
2012-08-10 19:23:55 +08:00
|
|
|
{
|
|
|
|
/* XXX could be more clever ;) */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
|
2012-08-10 19:23:55 +08:00
|
|
|
{
|
|
|
|
/* The page will get remapped properly on its next fault */
|
2013-09-20 12:52:54 +08:00
|
|
|
do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
|
2012-08-10 19:23:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*****************************************/
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
|
2011-06-29 08:17:58 +08:00
|
|
|
{
|
2018-05-23 15:02:06 +08:00
|
|
|
ulong old_msr;
|
2011-06-29 08:17:58 +08:00
|
|
|
|
2018-06-07 16:07:06 +08:00
|
|
|
/* For PAPR guest, make sure MSR reflects guest mode */
|
|
|
|
if (vcpu->arch.papr_enabled)
|
|
|
|
msr = (msr & ~MSR_HV) | MSR_ME;
|
|
|
|
|
2011-06-29 08:17:58 +08:00
|
|
|
#ifdef EXIT_DEBUG
|
|
|
|
printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
|
|
|
|
#endif
|
|
|
|
|
2018-05-23 15:02:06 +08:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
/* We should never target guest MSR to TS=10 && PR=0,
|
|
|
|
* since we always fail transaction for guest privilege
|
|
|
|
* state.
|
|
|
|
*/
|
|
|
|
if (!(msr & MSR_PR) && MSR_TM_TRANSACTIONAL(msr))
|
|
|
|
kvmppc_emulate_tabort(vcpu,
|
|
|
|
TM_CAUSE_KVM_FAC_UNAV | TM_CAUSE_PERSISTENT);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
old_msr = kvmppc_get_msr(vcpu);
|
2011-06-29 08:17:58 +08:00
|
|
|
msr &= to_book3s(vcpu)->msr_mask;
|
2014-04-24 19:46:24 +08:00
|
|
|
kvmppc_set_msr_fast(vcpu, msr);
|
2011-06-29 08:17:58 +08:00
|
|
|
kvmppc_recalc_shadow_msr(vcpu);
|
|
|
|
|
|
|
|
if (msr & MSR_POW) {
|
|
|
|
if (!vcpu->arch.pending_exceptions) {
|
|
|
|
kvm_vcpu_block(vcpu);
|
2017-04-27 04:32:19 +08:00
|
|
|
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
2011-06-29 08:17:58 +08:00
|
|
|
vcpu->stat.halt_wakeup++;
|
|
|
|
|
|
|
|
/* Unset POW bit after we woke up */
|
|
|
|
msr &= ~MSR_POW;
|
2014-04-24 19:46:24 +08:00
|
|
|
kvmppc_set_msr_fast(vcpu, msr);
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-11 08:58:58 +08:00
|
|
|
if (kvmppc_is_split_real(vcpu))
|
|
|
|
kvmppc_fixup_split_real(vcpu);
|
|
|
|
else
|
|
|
|
kvmppc_unfixup_split_real(vcpu);
|
|
|
|
|
2014-04-24 19:46:24 +08:00
|
|
|
if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
|
2011-06-29 08:17:58 +08:00
|
|
|
(old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
|
|
|
|
kvmppc_mmu_flush_segments(vcpu);
|
|
|
|
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
|
|
|
|
|
|
|
|
/* Preload magic page segment when in kernel mode */
|
|
|
|
if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
|
|
|
|
struct kvm_vcpu_arch *a = &vcpu->arch;
|
|
|
|
|
|
|
|
if (msr & MSR_DR)
|
|
|
|
kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
|
|
|
|
else
|
|
|
|
kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-14 05:52:44 +08:00
|
|
|
/*
|
|
|
|
* When switching from 32 to 64-bit, we may have a stale 32-bit
|
|
|
|
* magic page around, we need to flush it. Typically 32-bit magic
|
2018-08-24 08:00:52 +08:00
|
|
|
* page will be instantiated when calling into RTAS. Note: We
|
2012-03-14 05:52:44 +08:00
|
|
|
* assume that such transition only happens while in kernel mode,
|
|
|
|
* ie, we never transition from user 32-bit to kernel 64-bit with
|
|
|
|
* a 32-bit magic page around.
|
|
|
|
*/
|
|
|
|
if (vcpu->arch.magic_page_pa &&
|
|
|
|
!(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
|
|
|
|
/* going from RTAS to normal kernel code */
|
|
|
|
kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
|
|
|
|
~0xFFFUL);
|
|
|
|
}
|
|
|
|
|
2011-06-29 08:17:58 +08:00
|
|
|
/* Preload FPU if it's enabled */
|
2014-04-24 19:46:24 +08:00
|
|
|
if (kvmppc_get_msr(vcpu) & MSR_FP)
|
2011-06-29 08:17:58 +08:00
|
|
|
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
|
2018-05-23 15:01:59 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
if (kvmppc_get_msr(vcpu) & MSR_TM)
|
|
|
|
kvmppc_handle_lost_math_exts(vcpu);
|
|
|
|
#endif
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
|
2011-06-29 08:17:58 +08:00
|
|
|
{
|
|
|
|
u32 host_pvr;
|
|
|
|
|
|
|
|
vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
|
|
|
|
vcpu->arch.pvr = pvr;
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
|
|
|
|
kvmppc_mmu_book3s_64_init(vcpu);
|
2011-09-15 03:45:23 +08:00
|
|
|
if (!to_book3s(vcpu)->hior_explicit)
|
|
|
|
to_book3s(vcpu)->hior = 0xfff00000;
|
2011-06-29 08:17:58 +08:00
|
|
|
to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
|
2011-08-10 19:57:08 +08:00
|
|
|
vcpu->arch.cpu_type = KVM_CPU_3S_64;
|
2011-06-29 08:17:58 +08:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
kvmppc_mmu_book3s_32_init(vcpu);
|
2011-09-15 03:45:23 +08:00
|
|
|
if (!to_book3s(vcpu)->hior_explicit)
|
|
|
|
to_book3s(vcpu)->hior = 0;
|
2011-06-29 08:17:58 +08:00
|
|
|
to_book3s(vcpu)->msr_mask = 0xffffffffULL;
|
2011-08-10 19:57:08 +08:00
|
|
|
vcpu->arch.cpu_type = KVM_CPU_3S_32;
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
|
|
|
|
2011-08-10 19:57:08 +08:00
|
|
|
kvmppc_sanity_check(vcpu);
|
|
|
|
|
2011-06-29 08:17:58 +08:00
|
|
|
/* If we are in hypervisor level on 970, we can tell the CPU to
|
|
|
|
* treat DCBZ as 32 bytes store */
|
|
|
|
vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
|
|
|
|
if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
|
|
|
|
!strcmp(cur_cpu_spec->platform, "ppc970"))
|
|
|
|
vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
|
|
|
|
|
|
|
|
/* Cell performs badly if MSR_FEx are set. So let's hope nobody
|
|
|
|
really needs them in a VM on Cell and force disable them. */
|
|
|
|
if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
|
|
|
|
to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
|
|
|
|
|
2013-09-20 12:52:44 +08:00
|
|
|
/*
|
|
|
|
* If they're asking for POWER6 or later, set the flag
|
|
|
|
* indicating that we can do multiple large page sizes
|
|
|
|
* and 1TB segments.
|
|
|
|
* Also set the flag that indicates that tlbie has the large
|
|
|
|
* page bit in the RB operand instead of the instruction.
|
|
|
|
*/
|
|
|
|
switch (PVR_VER(pvr)) {
|
|
|
|
case PVR_POWER6:
|
|
|
|
case PVR_POWER7:
|
|
|
|
case PVR_POWER7p:
|
|
|
|
case PVR_POWER8:
|
2016-09-21 19:53:46 +08:00
|
|
|
case PVR_POWER8E:
|
|
|
|
case PVR_POWER8NVL:
|
2018-12-07 09:17:03 +08:00
|
|
|
case PVR_POWER9:
|
2013-09-20 12:52:44 +08:00
|
|
|
vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
|
|
|
|
BOOK3S_HFLAG_NEW_TLBIE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-06-29 08:17:58 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_32
|
|
|
|
/* 32 bit Book3S always has 32 byte dcbz */
|
|
|
|
vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* On some CPUs we can execute paired single operations natively */
|
|
|
|
asm ( "mfpvr %0" : "=r"(host_pvr));
|
|
|
|
switch (host_pvr) {
|
|
|
|
case 0x00080200: /* lonestar 2.0 */
|
|
|
|
case 0x00088202: /* lonestar 2.2 */
|
|
|
|
case 0x70000100: /* gekko 1.0 */
|
|
|
|
case 0x00080100: /* gekko 2.0 */
|
|
|
|
case 0x00083203: /* gekko 2.3a */
|
|
|
|
case 0x00083213: /* gekko 2.3b */
|
|
|
|
case 0x00083204: /* gekko 2.4 */
|
|
|
|
case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
|
|
|
|
case 0x00087200: /* broadway */
|
|
|
|
vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
|
|
|
|
/* Enable HID2.PSE - in case we need it later */
|
|
|
|
mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
|
|
|
|
* make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
|
|
|
|
* emulate 32 bytes dcbz length.
|
|
|
|
*
|
|
|
|
* The Book3s_64 inventors also realized this case and implemented a special bit
|
|
|
|
* in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
|
|
|
|
*
|
|
|
|
* My approach here is to patch the dcbz instruction on executing pages.
|
|
|
|
*/
|
|
|
|
static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
|
|
|
|
{
|
|
|
|
struct page *hpage;
|
|
|
|
u64 hpage_offset;
|
|
|
|
u32 *page;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
|
2012-08-03 15:42:52 +08:00
|
|
|
if (is_error_page(hpage))
|
2011-06-29 08:17:58 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
hpage_offset = pte->raddr & ~PAGE_MASK;
|
|
|
|
hpage_offset &= ~0xFFFULL;
|
|
|
|
hpage_offset /= 4;
|
|
|
|
|
|
|
|
get_page(hpage);
|
2011-11-25 23:14:16 +08:00
|
|
|
page = kmap_atomic(hpage);
|
2011-06-29 08:17:58 +08:00
|
|
|
|
|
|
|
/* patch dcbz into reserved instruction, so we trap */
|
|
|
|
for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
|
2014-04-24 19:52:01 +08:00
|
|
|
if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
|
|
|
|
page[i] &= cpu_to_be32(0xfffffff7);
|
2011-06-29 08:17:58 +08:00
|
|
|
|
2011-11-25 23:14:16 +08:00
|
|
|
kunmap_atomic(page);
|
2011-06-29 08:17:58 +08:00
|
|
|
put_page(hpage);
|
|
|
|
}
|
|
|
|
|
2015-11-16 11:10:24 +08:00
|
|
|
static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
|
2011-06-29 08:17:58 +08:00
|
|
|
{
|
|
|
|
ulong mp_pa = vcpu->arch.magic_page_pa;
|
|
|
|
|
2014-04-24 19:46:24 +08:00
|
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_SF))
|
2012-03-14 05:52:44 +08:00
|
|
|
mp_pa = (uint32_t)mp_pa;
|
|
|
|
|
2014-07-13 22:37:12 +08:00
|
|
|
gpa &= ~0xFFFULL;
|
|
|
|
if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
|
2015-11-16 11:10:24 +08:00
|
|
|
return true;
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
|
|
|
|
2014-07-13 22:37:12 +08:00
|
|
|
return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
|
|
|
|
2020-04-27 12:35:11 +08:00
|
|
|
static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu,
|
2011-06-29 08:17:58 +08:00
|
|
|
ulong eaddr, int vec)
|
|
|
|
{
|
|
|
|
bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:51 +08:00
|
|
|
bool iswrite = false;
|
2011-06-29 08:17:58 +08:00
|
|
|
int r = RESUME_GUEST;
|
|
|
|
int relocated;
|
|
|
|
int page_found = 0;
|
2017-03-24 14:49:22 +08:00
|
|
|
struct kvmppc_pte pte = { 0 };
|
2014-04-24 19:46:24 +08:00
|
|
|
bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
|
|
|
|
bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
|
2011-06-29 08:17:58 +08:00
|
|
|
u64 vsid;
|
|
|
|
|
|
|
|
relocated = data ? dr : ir;
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:51 +08:00
|
|
|
if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
|
|
|
|
iswrite = true;
|
2011-06-29 08:17:58 +08:00
|
|
|
|
|
|
|
/* Resolve real address if translation turned on */
|
|
|
|
if (relocated) {
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:51 +08:00
|
|
|
page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
|
2011-06-29 08:17:58 +08:00
|
|
|
} else {
|
|
|
|
pte.may_execute = true;
|
|
|
|
pte.may_read = true;
|
|
|
|
pte.may_write = true;
|
|
|
|
pte.raddr = eaddr & KVM_PAM;
|
|
|
|
pte.eaddr = eaddr;
|
|
|
|
pte.vpage = eaddr >> 12;
|
2013-09-20 12:52:45 +08:00
|
|
|
pte.page_size = MMU_PAGE_64K;
|
2017-11-22 11:42:21 +08:00
|
|
|
pte.wimg = HPTE_R_M;
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
|
|
|
|
2014-04-24 19:46:24 +08:00
|
|
|
switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
|
2011-06-29 08:17:58 +08:00
|
|
|
case 0:
|
|
|
|
pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
|
|
|
|
break;
|
|
|
|
case MSR_DR:
|
2014-07-11 08:58:58 +08:00
|
|
|
if (!data &&
|
|
|
|
(vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
|
|
|
|
((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
|
|
|
|
pte.raddr &= ~SPLIT_HACK_MASK;
|
2020-03-11 12:51:30 +08:00
|
|
|
fallthrough;
|
2011-06-29 08:17:58 +08:00
|
|
|
case MSR_IR:
|
|
|
|
vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
|
|
|
|
|
2014-04-24 19:46:24 +08:00
|
|
|
if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
|
2011-06-29 08:17:58 +08:00
|
|
|
pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
|
|
|
|
else
|
|
|
|
pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
|
|
|
|
pte.vpage |= vsid;
|
|
|
|
|
|
|
|
if (vsid == -1)
|
|
|
|
page_found = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
|
|
|
|
(!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
|
|
|
|
/*
|
|
|
|
* If we do the dcbz hack, we have to NX on every execution,
|
|
|
|
* so we can patch the executing code. This renders our guest
|
|
|
|
* NX-less.
|
|
|
|
*/
|
|
|
|
pte.may_execute = !data;
|
|
|
|
}
|
|
|
|
|
2018-06-07 16:04:37 +08:00
|
|
|
if (page_found == -ENOENT || page_found == -EPERM) {
|
|
|
|
/* Page not found in guest PTE entries, or protection fault */
|
|
|
|
u64 flags;
|
|
|
|
|
|
|
|
if (page_found == -EPERM)
|
|
|
|
flags = DSISR_PROTFAULT;
|
|
|
|
else
|
|
|
|
flags = DSISR_NOHPTE;
|
|
|
|
if (data) {
|
|
|
|
flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE;
|
|
|
|
kvmppc_core_queue_data_storage(vcpu, eaddr, flags);
|
|
|
|
} else {
|
|
|
|
kvmppc_core_queue_inst_storage(vcpu, flags);
|
|
|
|
}
|
2011-06-29 08:17:58 +08:00
|
|
|
} else if (page_found == -EINVAL) {
|
|
|
|
/* Page not found in guest SLB */
|
2014-04-24 19:46:24 +08:00
|
|
|
kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
|
2011-06-29 08:17:58 +08:00
|
|
|
kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
|
2017-03-24 14:47:13 +08:00
|
|
|
} else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:51 +08:00
|
|
|
if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
|
|
|
|
/*
|
|
|
|
* There is already a host HPTE there, presumably
|
|
|
|
* a read-only one for a page the guest thinks
|
|
|
|
* is writable, so get rid of it first.
|
|
|
|
*/
|
|
|
|
kvmppc_mmu_unmap_page(vcpu, &pte);
|
|
|
|
}
|
2011-06-29 08:17:58 +08:00
|
|
|
/* The guest's PTE is not mapped yet. Map on the host */
|
2017-03-24 14:48:10 +08:00
|
|
|
if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
|
|
|
|
/* Exit KVM if mapping failed */
|
2020-04-27 12:35:11 +08:00
|
|
|
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
2017-03-24 14:48:10 +08:00
|
|
|
return RESUME_HOST;
|
|
|
|
}
|
2011-06-29 08:17:58 +08:00
|
|
|
if (data)
|
|
|
|
vcpu->stat.sp_storage++;
|
|
|
|
else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:51 +08:00
|
|
|
(!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
|
2011-06-29 08:17:58 +08:00
|
|
|
kvmppc_patch_dcbz(vcpu, &pte);
|
|
|
|
} else {
|
|
|
|
/* MMIO */
|
|
|
|
vcpu->stat.mmio_exits++;
|
|
|
|
vcpu->arch.paddr_accessed = pte.raddr;
|
2012-03-12 09:26:30 +08:00
|
|
|
vcpu->arch.vaddr_accessed = pte.eaddr;
|
2020-04-27 12:35:11 +08:00
|
|
|
r = kvmppc_emulate_mmio(vcpu);
|
2011-06-29 08:17:58 +08:00
|
|
|
if ( r == RESUME_HOST_NV )
|
|
|
|
r = RESUME_HOST;
|
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Give up external provider (FPU, Altivec, VSX) */
|
|
|
|
void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
|
|
|
|
{
|
|
|
|
struct thread_struct *t = ¤t->thread;
|
|
|
|
|
KVM: PPC: Book3S PR: Fix VSX handling
This fixes various issues in how we were handling the VSX registers
that exist on POWER7 machines. First, we were running off the end
of the current->thread.fpr[] array. Ultimately this was because the
vcpu->arch.vsr[] array is sized to be able to store both the FP
registers and the extra VSX registers (i.e. 64 entries), but PR KVM
only uses it for the extra VSX registers (i.e. 32 entries).
Secondly, calling load_up_vsx() from C code is a really bad idea,
because it jumps to fast_exception_return at the end, rather than
returning with a blr instruction. This was causing it to jump off
to a random location with random register contents, since it was using
the largely uninitialized stack frame created by kvmppc_load_up_vsx.
In fact, it isn't necessary to call either __giveup_vsx or load_up_vsx,
since giveup_fpu and load_up_fpu handle the extra VSX registers as well
as the standard FP registers on machines with VSX. Also, since VSX
instructions can access the VMX registers and the FP registers as well
as the extra VSX registers, we have to load up the FP and VMX registers
before we can turn on the MSR_VSX bit for the guest. Conversely, if
we save away any of the VSX or FP registers, we have to turn off MSR_VSX
for the guest.
To handle all this, it is more convenient for a single call to
kvmppc_giveup_ext() to handle all the state saving that needs to be done,
so we make it take a set of MSR bits rather than just one, and the switch
statement becomes a series of if statements. Similarly kvmppc_handle_ext
needs to be able to load up more than one set of registers.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2012-11-05 02:16:46 +08:00
|
|
|
/*
|
|
|
|
* VSX instructions can access FP and vector registers, so if
|
|
|
|
* we are giving up VSX, make sure we give up FP and VMX as well.
|
|
|
|
*/
|
|
|
|
if (msr & MSR_VSX)
|
|
|
|
msr |= MSR_FP | MSR_VEC;
|
|
|
|
|
|
|
|
msr &= vcpu->arch.guest_owned_ext;
|
|
|
|
if (!msr)
|
2011-06-29 08:17:58 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
#ifdef DEBUG_EXT
|
|
|
|
printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
|
|
|
|
#endif
|
|
|
|
|
KVM: PPC: Book3S PR: Fix VSX handling
This fixes various issues in how we were handling the VSX registers
that exist on POWER7 machines. First, we were running off the end
of the current->thread.fpr[] array. Ultimately this was because the
vcpu->arch.vsr[] array is sized to be able to store both the FP
registers and the extra VSX registers (i.e. 64 entries), but PR KVM
only uses it for the extra VSX registers (i.e. 32 entries).
Secondly, calling load_up_vsx() from C code is a really bad idea,
because it jumps to fast_exception_return at the end, rather than
returning with a blr instruction. This was causing it to jump off
to a random location with random register contents, since it was using
the largely uninitialized stack frame created by kvmppc_load_up_vsx.
In fact, it isn't necessary to call either __giveup_vsx or load_up_vsx,
since giveup_fpu and load_up_fpu handle the extra VSX registers as well
as the standard FP registers on machines with VSX. Also, since VSX
instructions can access the VMX registers and the FP registers as well
as the extra VSX registers, we have to load up the FP and VMX registers
before we can turn on the MSR_VSX bit for the guest. Conversely, if
we save away any of the VSX or FP registers, we have to turn off MSR_VSX
for the guest.
To handle all this, it is more convenient for a single call to
kvmppc_giveup_ext() to handle all the state saving that needs to be done,
so we make it take a set of MSR bits rather than just one, and the switch
statement becomes a series of if statements. Similarly kvmppc_handle_ext
needs to be able to load up more than one set of registers.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2012-11-05 02:16:46 +08:00
|
|
|
if (msr & MSR_FP) {
|
|
|
|
/*
|
|
|
|
* Note that on CPUs with VSX, giveup_fpu stores
|
|
|
|
* both the traditional FP registers and the added VSX
|
2013-09-10 18:20:42 +08:00
|
|
|
* registers into thread.fp_state.fpr[].
|
KVM: PPC: Book3S PR: Fix VSX handling
This fixes various issues in how we were handling the VSX registers
that exist on POWER7 machines. First, we were running off the end
of the current->thread.fpr[] array. Ultimately this was because the
vcpu->arch.vsr[] array is sized to be able to store both the FP
registers and the extra VSX registers (i.e. 64 entries), but PR KVM
only uses it for the extra VSX registers (i.e. 32 entries).
Secondly, calling load_up_vsx() from C code is a really bad idea,
because it jumps to fast_exception_return at the end, rather than
returning with a blr instruction. This was causing it to jump off
to a random location with random register contents, since it was using
the largely uninitialized stack frame created by kvmppc_load_up_vsx.
In fact, it isn't necessary to call either __giveup_vsx or load_up_vsx,
since giveup_fpu and load_up_fpu handle the extra VSX registers as well
as the standard FP registers on machines with VSX. Also, since VSX
instructions can access the VMX registers and the FP registers as well
as the extra VSX registers, we have to load up the FP and VMX registers
before we can turn on the MSR_VSX bit for the guest. Conversely, if
we save away any of the VSX or FP registers, we have to turn off MSR_VSX
for the guest.
To handle all this, it is more convenient for a single call to
kvmppc_giveup_ext() to handle all the state saving that needs to be done,
so we make it take a set of MSR bits rather than just one, and the switch
statement becomes a series of if statements. Similarly kvmppc_handle_ext
needs to be able to load up more than one set of registers.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2012-11-05 02:16:46 +08:00
|
|
|
*/
|
2013-10-15 17:43:03 +08:00
|
|
|
if (t->regs->msr & MSR_FP)
|
KVM: PPC: Book3S PR: Don't corrupt guest state when kernel uses VMX
Currently the code assumes that once we load up guest FP/VSX or VMX
state into the CPU, it stays valid in the CPU registers until we
explicitly flush it to the thread_struct. However, on POWER7,
copy_page() and memcpy() can use VMX. These functions do flush the
VMX state to the thread_struct before using VMX instructions, but if
this happens while we have guest state in the VMX registers, and we
then re-enter the guest, we don't reload the VMX state from the
thread_struct, leading to guest corruption. This has been observed
to cause guest processes to segfault.
To fix this, we check before re-entering the guest that all of the
bits corresponding to facilities owned by the guest, as expressed
in vcpu->arch.guest_owned_ext, are set in current->thread.regs->msr.
Any bits that have been cleared correspond to facilities that have
been used by kernel code and thus flushed to the thread_struct, so
for them we reload the state from the thread_struct.
We also need to check current->thread.regs->msr before calling
giveup_fpu() or giveup_altivec(), since if the relevant bit is
clear, the state has already been flushed to the thread_struct and
to flush it again would corrupt it.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-08-06 12:14:33 +08:00
|
|
|
giveup_fpu(current);
|
2013-10-15 17:43:03 +08:00
|
|
|
t->fp_save_area = NULL;
|
KVM: PPC: Book3S PR: Fix VSX handling
This fixes various issues in how we were handling the VSX registers
that exist on POWER7 machines. First, we were running off the end
of the current->thread.fpr[] array. Ultimately this was because the
vcpu->arch.vsr[] array is sized to be able to store both the FP
registers and the extra VSX registers (i.e. 64 entries), but PR KVM
only uses it for the extra VSX registers (i.e. 32 entries).
Secondly, calling load_up_vsx() from C code is a really bad idea,
because it jumps to fast_exception_return at the end, rather than
returning with a blr instruction. This was causing it to jump off
to a random location with random register contents, since it was using
the largely uninitialized stack frame created by kvmppc_load_up_vsx.
In fact, it isn't necessary to call either __giveup_vsx or load_up_vsx,
since giveup_fpu and load_up_fpu handle the extra VSX registers as well
as the standard FP registers on machines with VSX. Also, since VSX
instructions can access the VMX registers and the FP registers as well
as the extra VSX registers, we have to load up the FP and VMX registers
before we can turn on the MSR_VSX bit for the guest. Conversely, if
we save away any of the VSX or FP registers, we have to turn off MSR_VSX
for the guest.
To handle all this, it is more convenient for a single call to
kvmppc_giveup_ext() to handle all the state saving that needs to be done,
so we make it take a set of MSR bits rather than just one, and the switch
statement becomes a series of if statements. Similarly kvmppc_handle_ext
needs to be able to load up more than one set of registers.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2012-11-05 02:16:46 +08:00
|
|
|
}
|
|
|
|
|
2011-06-29 08:17:58 +08:00
|
|
|
#ifdef CONFIG_ALTIVEC
|
KVM: PPC: Book3S PR: Fix VSX handling
This fixes various issues in how we were handling the VSX registers
that exist on POWER7 machines. First, we were running off the end
of the current->thread.fpr[] array. Ultimately this was because the
vcpu->arch.vsr[] array is sized to be able to store both the FP
registers and the extra VSX registers (i.e. 64 entries), but PR KVM
only uses it for the extra VSX registers (i.e. 32 entries).
Secondly, calling load_up_vsx() from C code is a really bad idea,
because it jumps to fast_exception_return at the end, rather than
returning with a blr instruction. This was causing it to jump off
to a random location with random register contents, since it was using
the largely uninitialized stack frame created by kvmppc_load_up_vsx.
In fact, it isn't necessary to call either __giveup_vsx or load_up_vsx,
since giveup_fpu and load_up_fpu handle the extra VSX registers as well
as the standard FP registers on machines with VSX. Also, since VSX
instructions can access the VMX registers and the FP registers as well
as the extra VSX registers, we have to load up the FP and VMX registers
before we can turn on the MSR_VSX bit for the guest. Conversely, if
we save away any of the VSX or FP registers, we have to turn off MSR_VSX
for the guest.
To handle all this, it is more convenient for a single call to
kvmppc_giveup_ext() to handle all the state saving that needs to be done,
so we make it take a set of MSR bits rather than just one, and the switch
statement becomes a series of if statements. Similarly kvmppc_handle_ext
needs to be able to load up more than one set of registers.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2012-11-05 02:16:46 +08:00
|
|
|
if (msr & MSR_VEC) {
|
KVM: PPC: Book3S PR: Don't corrupt guest state when kernel uses VMX
Currently the code assumes that once we load up guest FP/VSX or VMX
state into the CPU, it stays valid in the CPU registers until we
explicitly flush it to the thread_struct. However, on POWER7,
copy_page() and memcpy() can use VMX. These functions do flush the
VMX state to the thread_struct before using VMX instructions, but if
this happens while we have guest state in the VMX registers, and we
then re-enter the guest, we don't reload the VMX state from the
thread_struct, leading to guest corruption. This has been observed
to cause guest processes to segfault.
To fix this, we check before re-entering the guest that all of the
bits corresponding to facilities owned by the guest, as expressed
in vcpu->arch.guest_owned_ext, are set in current->thread.regs->msr.
Any bits that have been cleared correspond to facilities that have
been used by kernel code and thus flushed to the thread_struct, so
for them we reload the state from the thread_struct.
We also need to check current->thread.regs->msr before calling
giveup_fpu() or giveup_altivec(), since if the relevant bit is
clear, the state has already been flushed to the thread_struct and
to flush it again would corrupt it.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-08-06 12:14:33 +08:00
|
|
|
if (current->thread.regs->msr & MSR_VEC)
|
|
|
|
giveup_altivec(current);
|
2013-10-15 17:43:03 +08:00
|
|
|
t->vr_save_area = NULL;
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
KVM: PPC: Book3S PR: Fix VSX handling
This fixes various issues in how we were handling the VSX registers
that exist on POWER7 machines. First, we were running off the end
of the current->thread.fpr[] array. Ultimately this was because the
vcpu->arch.vsr[] array is sized to be able to store both the FP
registers and the extra VSX registers (i.e. 64 entries), but PR KVM
only uses it for the extra VSX registers (i.e. 32 entries).
Secondly, calling load_up_vsx() from C code is a really bad idea,
because it jumps to fast_exception_return at the end, rather than
returning with a blr instruction. This was causing it to jump off
to a random location with random register contents, since it was using
the largely uninitialized stack frame created by kvmppc_load_up_vsx.
In fact, it isn't necessary to call either __giveup_vsx or load_up_vsx,
since giveup_fpu and load_up_fpu handle the extra VSX registers as well
as the standard FP registers on machines with VSX. Also, since VSX
instructions can access the VMX registers and the FP registers as well
as the extra VSX registers, we have to load up the FP and VMX registers
before we can turn on the MSR_VSX bit for the guest. Conversely, if
we save away any of the VSX or FP registers, we have to turn off MSR_VSX
for the guest.
To handle all this, it is more convenient for a single call to
kvmppc_giveup_ext() to handle all the state saving that needs to be done,
so we make it take a set of MSR bits rather than just one, and the switch
statement becomes a series of if statements. Similarly kvmppc_handle_ext
needs to be able to load up more than one set of registers.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2012-11-05 02:16:46 +08:00
|
|
|
#endif
|
2011-06-29 08:17:58 +08:00
|
|
|
|
KVM: PPC: Book3S PR: Fix VSX handling
This fixes various issues in how we were handling the VSX registers
that exist on POWER7 machines. First, we were running off the end
of the current->thread.fpr[] array. Ultimately this was because the
vcpu->arch.vsr[] array is sized to be able to store both the FP
registers and the extra VSX registers (i.e. 64 entries), but PR KVM
only uses it for the extra VSX registers (i.e. 32 entries).
Secondly, calling load_up_vsx() from C code is a really bad idea,
because it jumps to fast_exception_return at the end, rather than
returning with a blr instruction. This was causing it to jump off
to a random location with random register contents, since it was using
the largely uninitialized stack frame created by kvmppc_load_up_vsx.
In fact, it isn't necessary to call either __giveup_vsx or load_up_vsx,
since giveup_fpu and load_up_fpu handle the extra VSX registers as well
as the standard FP registers on machines with VSX. Also, since VSX
instructions can access the VMX registers and the FP registers as well
as the extra VSX registers, we have to load up the FP and VMX registers
before we can turn on the MSR_VSX bit for the guest. Conversely, if
we save away any of the VSX or FP registers, we have to turn off MSR_VSX
for the guest.
To handle all this, it is more convenient for a single call to
kvmppc_giveup_ext() to handle all the state saving that needs to be done,
so we make it take a set of MSR bits rather than just one, and the switch
statement becomes a series of if statements. Similarly kvmppc_handle_ext
needs to be able to load up more than one set of registers.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2012-11-05 02:16:46 +08:00
|
|
|
vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
|
2011-06-29 08:17:58 +08:00
|
|
|
kvmppc_recalc_shadow_msr(vcpu);
|
|
|
|
}
|
|
|
|
|
2014-04-29 22:48:44 +08:00
|
|
|
/* Give up facility (TAR / EBB / DSCR) */
|
2018-05-23 15:02:07 +08:00
|
|
|
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
|
2014-04-29 22:48:44 +08:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
|
|
|
|
/* Facility not available to the guest, ignore giveup request*/
|
|
|
|
return;
|
|
|
|
}
|
2014-04-22 18:26:58 +08:00
|
|
|
|
|
|
|
switch (fac) {
|
|
|
|
case FSCR_TAR_LG:
|
|
|
|
vcpu->arch.tar = mfspr(SPRN_TAR);
|
|
|
|
mtspr(SPRN_TAR, current->thread.tar);
|
|
|
|
vcpu->arch.shadow_fscr &= ~FSCR_TAR;
|
|
|
|
break;
|
|
|
|
}
|
2014-04-29 22:48:44 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-06-29 08:17:58 +08:00
|
|
|
/* Handle external providers (FPU, Altivec, VSX) */
|
|
|
|
static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
|
|
|
|
ulong msr)
|
|
|
|
{
|
|
|
|
struct thread_struct *t = ¤t->thread;
|
|
|
|
|
|
|
|
/* When we have paired singles, we emulate in software */
|
|
|
|
if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
|
|
|
|
return RESUME_GUEST;
|
|
|
|
|
2014-04-24 19:46:24 +08:00
|
|
|
if (!(kvmppc_get_msr(vcpu) & msr)) {
|
2011-06-29 08:17:58 +08:00
|
|
|
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
|
|
|
return RESUME_GUEST;
|
|
|
|
}
|
|
|
|
|
KVM: PPC: Book3S PR: Fix VSX handling
This fixes various issues in how we were handling the VSX registers
that exist on POWER7 machines. First, we were running off the end
of the current->thread.fpr[] array. Ultimately this was because the
vcpu->arch.vsr[] array is sized to be able to store both the FP
registers and the extra VSX registers (i.e. 64 entries), but PR KVM
only uses it for the extra VSX registers (i.e. 32 entries).
Secondly, calling load_up_vsx() from C code is a really bad idea,
because it jumps to fast_exception_return at the end, rather than
returning with a blr instruction. This was causing it to jump off
to a random location with random register contents, since it was using
the largely uninitialized stack frame created by kvmppc_load_up_vsx.
In fact, it isn't necessary to call either __giveup_vsx or load_up_vsx,
since giveup_fpu and load_up_fpu handle the extra VSX registers as well
as the standard FP registers on machines with VSX. Also, since VSX
instructions can access the VMX registers and the FP registers as well
as the extra VSX registers, we have to load up the FP and VMX registers
before we can turn on the MSR_VSX bit for the guest. Conversely, if
we save away any of the VSX or FP registers, we have to turn off MSR_VSX
for the guest.
To handle all this, it is more convenient for a single call to
kvmppc_giveup_ext() to handle all the state saving that needs to be done,
so we make it take a set of MSR bits rather than just one, and the switch
statement becomes a series of if statements. Similarly kvmppc_handle_ext
needs to be able to load up more than one set of registers.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2012-11-05 02:16:46 +08:00
|
|
|
if (msr == MSR_VSX) {
|
|
|
|
/* No VSX? Give an illegal instruction interrupt */
|
|
|
|
#ifdef CONFIG_VSX
|
|
|
|
if (!cpu_has_feature(CPU_FTR_VSX))
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
|
|
|
return RESUME_GUEST;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have to load up all the FP and VMX registers before
|
|
|
|
* we can let the guest use VSX instructions.
|
|
|
|
*/
|
|
|
|
msr = MSR_FP | MSR_VEC | MSR_VSX;
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
|
|
|
|
KVM: PPC: Book3S PR: Fix VSX handling
This fixes various issues in how we were handling the VSX registers
that exist on POWER7 machines. First, we were running off the end
of the current->thread.fpr[] array. Ultimately this was because the
vcpu->arch.vsr[] array is sized to be able to store both the FP
registers and the extra VSX registers (i.e. 64 entries), but PR KVM
only uses it for the extra VSX registers (i.e. 32 entries).
Secondly, calling load_up_vsx() from C code is a really bad idea,
because it jumps to fast_exception_return at the end, rather than
returning with a blr instruction. This was causing it to jump off
to a random location with random register contents, since it was using
the largely uninitialized stack frame created by kvmppc_load_up_vsx.
In fact, it isn't necessary to call either __giveup_vsx or load_up_vsx,
since giveup_fpu and load_up_fpu handle the extra VSX registers as well
as the standard FP registers on machines with VSX. Also, since VSX
instructions can access the VMX registers and the FP registers as well
as the extra VSX registers, we have to load up the FP and VMX registers
before we can turn on the MSR_VSX bit for the guest. Conversely, if
we save away any of the VSX or FP registers, we have to turn off MSR_VSX
for the guest.
To handle all this, it is more convenient for a single call to
kvmppc_giveup_ext() to handle all the state saving that needs to be done,
so we make it take a set of MSR bits rather than just one, and the switch
statement becomes a series of if statements. Similarly kvmppc_handle_ext
needs to be able to load up more than one set of registers.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2012-11-05 02:16:46 +08:00
|
|
|
/* See if we already own all the ext(s) needed */
|
|
|
|
msr &= ~vcpu->arch.guest_owned_ext;
|
|
|
|
if (!msr)
|
|
|
|
return RESUME_GUEST;
|
|
|
|
|
2011-06-29 08:17:58 +08:00
|
|
|
#ifdef DEBUG_EXT
|
|
|
|
printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
|
|
|
|
#endif
|
|
|
|
|
KVM: PPC: Book3S PR: Fix VSX handling
This fixes various issues in how we were handling the VSX registers
that exist on POWER7 machines. First, we were running off the end
of the current->thread.fpr[] array. Ultimately this was because the
vcpu->arch.vsr[] array is sized to be able to store both the FP
registers and the extra VSX registers (i.e. 64 entries), but PR KVM
only uses it for the extra VSX registers (i.e. 32 entries).
Secondly, calling load_up_vsx() from C code is a really bad idea,
because it jumps to fast_exception_return at the end, rather than
returning with a blr instruction. This was causing it to jump off
to a random location with random register contents, since it was using
the largely uninitialized stack frame created by kvmppc_load_up_vsx.
In fact, it isn't necessary to call either __giveup_vsx or load_up_vsx,
since giveup_fpu and load_up_fpu handle the extra VSX registers as well
as the standard FP registers on machines with VSX. Also, since VSX
instructions can access the VMX registers and the FP registers as well
as the extra VSX registers, we have to load up the FP and VMX registers
before we can turn on the MSR_VSX bit for the guest. Conversely, if
we save away any of the VSX or FP registers, we have to turn off MSR_VSX
for the guest.
To handle all this, it is more convenient for a single call to
kvmppc_giveup_ext() to handle all the state saving that needs to be done,
so we make it take a set of MSR bits rather than just one, and the switch
statement becomes a series of if statements. Similarly kvmppc_handle_ext
needs to be able to load up more than one set of registers.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2012-11-05 02:16:46 +08:00
|
|
|
if (msr & MSR_FP) {
|
2014-05-05 01:26:08 +08:00
|
|
|
preempt_disable();
|
2013-10-15 17:43:01 +08:00
|
|
|
enable_kernel_fp();
|
2013-10-15 17:43:03 +08:00
|
|
|
load_fp_state(&vcpu->arch.fp);
|
2015-10-29 08:44:05 +08:00
|
|
|
disable_kernel_fp();
|
2013-10-15 17:43:03 +08:00
|
|
|
t->fp_save_area = &vcpu->arch.fp;
|
2014-05-05 01:26:08 +08:00
|
|
|
preempt_enable();
|
KVM: PPC: Book3S PR: Fix VSX handling
This fixes various issues in how we were handling the VSX registers
that exist on POWER7 machines. First, we were running off the end
of the current->thread.fpr[] array. Ultimately this was because the
vcpu->arch.vsr[] array is sized to be able to store both the FP
registers and the extra VSX registers (i.e. 64 entries), but PR KVM
only uses it for the extra VSX registers (i.e. 32 entries).
Secondly, calling load_up_vsx() from C code is a really bad idea,
because it jumps to fast_exception_return at the end, rather than
returning with a blr instruction. This was causing it to jump off
to a random location with random register contents, since it was using
the largely uninitialized stack frame created by kvmppc_load_up_vsx.
In fact, it isn't necessary to call either __giveup_vsx or load_up_vsx,
since giveup_fpu and load_up_fpu handle the extra VSX registers as well
as the standard FP registers on machines with VSX. Also, since VSX
instructions can access the VMX registers and the FP registers as well
as the extra VSX registers, we have to load up the FP and VMX registers
before we can turn on the MSR_VSX bit for the guest. Conversely, if
we save away any of the VSX or FP registers, we have to turn off MSR_VSX
for the guest.
To handle all this, it is more convenient for a single call to
kvmppc_giveup_ext() to handle all the state saving that needs to be done,
so we make it take a set of MSR bits rather than just one, and the switch
statement becomes a series of if statements. Similarly kvmppc_handle_ext
needs to be able to load up more than one set of registers.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2012-11-05 02:16:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (msr & MSR_VEC) {
|
2011-06-29 08:17:58 +08:00
|
|
|
#ifdef CONFIG_ALTIVEC
|
2014-05-05 01:26:08 +08:00
|
|
|
preempt_disable();
|
2013-10-15 17:43:01 +08:00
|
|
|
enable_kernel_altivec();
|
2013-10-15 17:43:03 +08:00
|
|
|
load_vr_state(&vcpu->arch.vr);
|
2015-10-29 08:44:05 +08:00
|
|
|
disable_kernel_altivec();
|
2013-10-15 17:43:03 +08:00
|
|
|
t->vr_save_area = &vcpu->arch.vr;
|
2014-05-05 01:26:08 +08:00
|
|
|
preempt_enable();
|
2011-06-29 08:17:58 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2013-10-15 17:43:03 +08:00
|
|
|
t->regs->msr |= msr;
|
2011-06-29 08:17:58 +08:00
|
|
|
vcpu->arch.guest_owned_ext |= msr;
|
|
|
|
kvmppc_recalc_shadow_msr(vcpu);
|
|
|
|
|
|
|
|
return RESUME_GUEST;
|
|
|
|
}
|
|
|
|
|
KVM: PPC: Book3S PR: Don't corrupt guest state when kernel uses VMX
Currently the code assumes that once we load up guest FP/VSX or VMX
state into the CPU, it stays valid in the CPU registers until we
explicitly flush it to the thread_struct. However, on POWER7,
copy_page() and memcpy() can use VMX. These functions do flush the
VMX state to the thread_struct before using VMX instructions, but if
this happens while we have guest state in the VMX registers, and we
then re-enter the guest, we don't reload the VMX state from the
thread_struct, leading to guest corruption. This has been observed
to cause guest processes to segfault.
To fix this, we check before re-entering the guest that all of the
bits corresponding to facilities owned by the guest, as expressed
in vcpu->arch.guest_owned_ext, are set in current->thread.regs->msr.
Any bits that have been cleared correspond to facilities that have
been used by kernel code and thus flushed to the thread_struct, so
for them we reload the state from the thread_struct.
We also need to check current->thread.regs->msr before calling
giveup_fpu() or giveup_altivec(), since if the relevant bit is
clear, the state has already been flushed to the thread_struct and
to flush it again would corrupt it.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-08-06 12:14:33 +08:00
|
|
|
/*
|
|
|
|
* Kernel code using FP or VMX could have flushed guest state to
|
|
|
|
* the thread_struct; if so, get it back now.
|
|
|
|
*/
|
|
|
|
static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
unsigned long lost_ext;
|
|
|
|
|
|
|
|
lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
|
|
|
|
if (!lost_ext)
|
|
|
|
return;
|
|
|
|
|
2013-10-15 17:43:01 +08:00
|
|
|
if (lost_ext & MSR_FP) {
|
2014-05-05 01:26:08 +08:00
|
|
|
preempt_disable();
|
2013-10-15 17:43:01 +08:00
|
|
|
enable_kernel_fp();
|
2013-10-15 17:43:03 +08:00
|
|
|
load_fp_state(&vcpu->arch.fp);
|
2015-10-29 08:44:05 +08:00
|
|
|
disable_kernel_fp();
|
2014-05-05 01:26:08 +08:00
|
|
|
preempt_enable();
|
2013-10-15 17:43:01 +08:00
|
|
|
}
|
2013-09-20 12:52:42 +08:00
|
|
|
#ifdef CONFIG_ALTIVEC
|
2013-10-15 17:43:01 +08:00
|
|
|
if (lost_ext & MSR_VEC) {
|
2014-05-05 01:26:08 +08:00
|
|
|
preempt_disable();
|
2013-10-15 17:43:01 +08:00
|
|
|
enable_kernel_altivec();
|
2013-10-15 17:43:03 +08:00
|
|
|
load_vr_state(&vcpu->arch.vr);
|
2015-10-29 08:44:05 +08:00
|
|
|
disable_kernel_altivec();
|
2014-05-05 01:26:08 +08:00
|
|
|
preempt_enable();
|
2013-10-15 17:43:01 +08:00
|
|
|
}
|
2013-09-20 12:52:42 +08:00
|
|
|
#endif
|
KVM: PPC: Book3S PR: Don't corrupt guest state when kernel uses VMX
Currently the code assumes that once we load up guest FP/VSX or VMX
state into the CPU, it stays valid in the CPU registers until we
explicitly flush it to the thread_struct. However, on POWER7,
copy_page() and memcpy() can use VMX. These functions do flush the
VMX state to the thread_struct before using VMX instructions, but if
this happens while we have guest state in the VMX registers, and we
then re-enter the guest, we don't reload the VMX state from the
thread_struct, leading to guest corruption. This has been observed
to cause guest processes to segfault.
To fix this, we check before re-entering the guest that all of the
bits corresponding to facilities owned by the guest, as expressed
in vcpu->arch.guest_owned_ext, are set in current->thread.regs->msr.
Any bits that have been cleared correspond to facilities that have
been used by kernel code and thus flushed to the thread_struct, so
for them we reload the state from the thread_struct.
We also need to check current->thread.regs->msr before calling
giveup_fpu() or giveup_altivec(), since if the relevant bit is
clear, the state has already been flushed to the thread_struct and
to flush it again would corrupt it.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-08-06 12:14:33 +08:00
|
|
|
current->thread.regs->msr |= lost_ext;
|
|
|
|
}
|
|
|
|
|
2014-04-29 22:48:44 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
|
2018-05-23 15:02:00 +08:00
|
|
|
void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
|
2014-04-29 22:48:44 +08:00
|
|
|
{
|
|
|
|
/* Inject the Interrupt Cause field and trigger a guest interrupt */
|
|
|
|
vcpu->arch.fscr &= ~(0xffULL << 56);
|
|
|
|
vcpu->arch.fscr |= (fac << 56);
|
|
|
|
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
|
|
|
|
{
|
|
|
|
enum emulation_result er = EMULATE_FAIL;
|
|
|
|
|
|
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_PR))
|
2020-04-27 12:35:11 +08:00
|
|
|
er = kvmppc_emulate_instruction(vcpu);
|
2014-04-29 22:48:44 +08:00
|
|
|
|
|
|
|
if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
|
|
|
|
/* Couldn't emulate, trigger interrupt in guest */
|
|
|
|
kvmppc_trigger_fac_interrupt(vcpu, fac);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable facilities (TAR, EBB, DSCR) for the guest */
|
|
|
|
static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
|
|
|
|
{
|
2014-04-29 23:54:40 +08:00
|
|
|
bool guest_fac_enabled;
|
2014-04-29 22:48:44 +08:00
|
|
|
BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
|
|
|
|
|
2014-04-29 23:54:40 +08:00
|
|
|
/*
|
|
|
|
* Not every facility is enabled by FSCR bits, check whether the
|
|
|
|
* guest has this facility enabled at all.
|
|
|
|
*/
|
|
|
|
switch (fac) {
|
|
|
|
case FSCR_TAR_LG:
|
|
|
|
case FSCR_EBB_LG:
|
|
|
|
guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
|
|
|
|
break;
|
|
|
|
case FSCR_TM_LG:
|
|
|
|
guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
guest_fac_enabled = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!guest_fac_enabled) {
|
2014-04-29 22:48:44 +08:00
|
|
|
/* Facility not enabled by the guest */
|
|
|
|
kvmppc_trigger_fac_interrupt(vcpu, fac);
|
|
|
|
return RESUME_GUEST;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (fac) {
|
2014-04-22 18:26:58 +08:00
|
|
|
case FSCR_TAR_LG:
|
|
|
|
/* TAR switching isn't lazy in Linux yet */
|
|
|
|
current->thread.tar = mfspr(SPRN_TAR);
|
|
|
|
mtspr(SPRN_TAR, vcpu->arch.tar);
|
|
|
|
vcpu->arch.shadow_fscr |= FSCR_TAR;
|
|
|
|
break;
|
2014-04-29 22:48:44 +08:00
|
|
|
default:
|
|
|
|
kvmppc_emulate_fac(vcpu, fac);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-05-23 15:02:02 +08:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
/* Since we disabled MSR_TM at privilege state, the mfspr instruction
|
|
|
|
* for TM spr can trigger TM fac unavailable. In this case, the
|
|
|
|
* emulation is handled by kvmppc_emulate_fac(), which invokes
|
|
|
|
* kvmppc_emulate_mfspr() finally. But note the mfspr can include
|
|
|
|
* RT for NV registers. So it need to restore those NV reg to reflect
|
|
|
|
* the update.
|
|
|
|
*/
|
|
|
|
if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR))
|
|
|
|
return RESUME_GUEST_NV;
|
|
|
|
#endif
|
|
|
|
|
2014-04-29 22:48:44 +08:00
|
|
|
return RESUME_GUEST;
|
|
|
|
}
|
2014-07-31 16:21:59 +08:00
|
|
|
|
|
|
|
void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
|
|
|
|
{
|
|
|
|
if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
|
|
|
|
/* TAR got dropped, drop it in shadow too */
|
|
|
|
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
|
2018-05-23 15:02:07 +08:00
|
|
|
} else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) {
|
|
|
|
vcpu->arch.fscr = fscr;
|
|
|
|
kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
|
|
|
|
return;
|
2014-07-31 16:21:59 +08:00
|
|
|
}
|
2018-05-23 15:02:07 +08:00
|
|
|
|
2014-07-31 16:21:59 +08:00
|
|
|
vcpu->arch.fscr = fscr;
|
|
|
|
}
|
2014-04-29 22:48:44 +08:00
|
|
|
#endif
|
|
|
|
|
2016-04-09 00:05:00 +08:00
|
|
|
static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
|
|
|
|
u64 msr = kvmppc_get_msr(vcpu);
|
|
|
|
|
|
|
|
kvmppc_set_msr(vcpu, msr | MSR_SE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
|
|
|
|
u64 msr = kvmppc_get_msr(vcpu);
|
|
|
|
|
|
|
|
kvmppc_set_msr(vcpu, msr & ~MSR_SE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-27 12:35:11 +08:00
|
|
|
static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr)
|
2017-01-25 20:27:22 +08:00
|
|
|
{
|
|
|
|
enum emulation_result er;
|
|
|
|
ulong flags;
|
|
|
|
u32 last_inst;
|
|
|
|
int emul, r;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* shadow_srr1 only contains valid flags if we came here via a program
|
|
|
|
* exception. The other exceptions (emulation assist, FP unavailable,
|
|
|
|
* etc.) do not provide flags in SRR1, so use an illegal-instruction
|
|
|
|
* exception when injecting a program interrupt into the guest.
|
|
|
|
*/
|
|
|
|
if (exit_nr == BOOK3S_INTERRUPT_PROGRAM)
|
|
|
|
flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
|
|
|
|
else
|
|
|
|
flags = SRR1_PROGILL;
|
|
|
|
|
|
|
|
emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
|
|
|
|
if (emul != EMULATE_DONE)
|
|
|
|
return RESUME_GUEST;
|
|
|
|
|
|
|
|
if (kvmppc_get_msr(vcpu) & MSR_PR) {
|
|
|
|
#ifdef EXIT_DEBUG
|
|
|
|
pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
|
|
|
|
kvmppc_get_pc(vcpu), last_inst);
|
|
|
|
#endif
|
|
|
|
if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) {
|
|
|
|
kvmppc_core_queue_program(vcpu, flags);
|
|
|
|
return RESUME_GUEST;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
vcpu->stat.emulated_inst_exits++;
|
2020-04-27 12:35:11 +08:00
|
|
|
er = kvmppc_emulate_instruction(vcpu);
|
2017-01-25 20:27:22 +08:00
|
|
|
switch (er) {
|
|
|
|
case EMULATE_DONE:
|
|
|
|
r = RESUME_GUEST_NV;
|
|
|
|
break;
|
|
|
|
case EMULATE_AGAIN:
|
|
|
|
r = RESUME_GUEST;
|
|
|
|
break;
|
|
|
|
case EMULATE_FAIL:
|
|
|
|
pr_crit("%s: emulation at %lx failed (%08x)\n",
|
|
|
|
__func__, kvmppc_get_pc(vcpu), last_inst);
|
|
|
|
kvmppc_core_queue_program(vcpu, flags);
|
|
|
|
r = RESUME_GUEST;
|
|
|
|
break;
|
|
|
|
case EMULATE_DO_MMIO:
|
2020-04-27 12:35:11 +08:00
|
|
|
vcpu->run->exit_reason = KVM_EXIT_MMIO;
|
2017-01-25 20:27:22 +08:00
|
|
|
r = RESUME_HOST_NV;
|
|
|
|
break;
|
|
|
|
case EMULATE_EXIT_USER:
|
|
|
|
r = RESUME_HOST_NV;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
|
unsigned int exit_nr)
|
2011-06-29 08:17:58 +08:00
|
|
|
{
|
|
|
|
int r = RESUME_HOST;
|
2012-08-13 18:44:41 +08:00
|
|
|
int s;
|
2011-06-29 08:17:58 +08:00
|
|
|
|
|
|
|
vcpu->stat.sum_exits++;
|
|
|
|
|
|
|
|
run->exit_reason = KVM_EXIT_UNKNOWN;
|
|
|
|
run->ready_for_interrupt_injection = 1;
|
|
|
|
|
2012-08-13 07:04:19 +08:00
|
|
|
/* We get here with MSR.EE=1 */
|
2012-04-30 16:56:12 +08:00
|
|
|
|
2012-08-02 21:10:00 +08:00
|
|
|
trace_kvm_exit(exit_nr, vcpu);
|
2016-06-15 21:18:26 +08:00
|
|
|
guest_exit();
|
2012-08-12 17:27:49 +08:00
|
|
|
|
2011-06-29 08:17:58 +08:00
|
|
|
switch (exit_nr) {
|
|
|
|
case BOOK3S_INTERRUPT_INST_STORAGE:
|
2011-12-09 21:44:13 +08:00
|
|
|
{
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
ulong shadow_srr1 = vcpu->arch.shadow_srr1;
|
2011-06-29 08:17:58 +08:00
|
|
|
vcpu->stat.pf_instruc++;
|
|
|
|
|
2014-07-11 08:58:58 +08:00
|
|
|
if (kvmppc_is_split_real(vcpu))
|
|
|
|
kvmppc_fixup_split_real(vcpu);
|
|
|
|
|
2011-06-29 08:17:58 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_32
|
|
|
|
/* We set segments as unused segments when invalidating them. So
|
|
|
|
* treat the respective fault as segment fault. */
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
{
|
|
|
|
struct kvmppc_book3s_shadow_vcpu *svcpu;
|
|
|
|
u32 sr;
|
|
|
|
|
|
|
|
svcpu = svcpu_get(vcpu);
|
|
|
|
sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
|
2011-12-09 21:44:13 +08:00
|
|
|
svcpu_put(svcpu);
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
if (sr == SR_INVALID) {
|
|
|
|
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
|
|
|
|
r = RESUME_GUEST;
|
|
|
|
break;
|
|
|
|
}
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* only care about PTEG not found errors, but leave NX alone */
|
2011-12-09 21:44:13 +08:00
|
|
|
if (shadow_srr1 & 0x40000000) {
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:51 +08:00
|
|
|
int idx = srcu_read_lock(&vcpu->kvm->srcu);
|
2020-04-27 12:35:11 +08:00
|
|
|
r = kvmppc_handle_pagefault(vcpu, kvmppc_get_pc(vcpu), exit_nr);
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:51 +08:00
|
|
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
2011-06-29 08:17:58 +08:00
|
|
|
vcpu->stat.sp_instruc++;
|
|
|
|
} else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
|
|
|
|
(!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
|
|
|
|
/*
|
|
|
|
* XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
|
|
|
|
* so we can't use the NX bit inside the guest. Let's cross our fingers,
|
|
|
|
* that no guest that needs the dcbz hack does NX.
|
|
|
|
*/
|
|
|
|
kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
|
|
|
|
r = RESUME_GUEST;
|
|
|
|
} else {
|
2018-06-07 16:04:37 +08:00
|
|
|
kvmppc_core_queue_inst_storage(vcpu,
|
|
|
|
shadow_srr1 & 0x58000000);
|
2011-06-29 08:17:58 +08:00
|
|
|
r = RESUME_GUEST;
|
|
|
|
}
|
|
|
|
break;
|
2011-12-09 21:44:13 +08:00
|
|
|
}
|
2011-06-29 08:17:58 +08:00
|
|
|
case BOOK3S_INTERRUPT_DATA_STORAGE:
|
|
|
|
{
|
|
|
|
ulong dar = kvmppc_get_fault_dar(vcpu);
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
u32 fault_dsisr = vcpu->arch.fault_dsisr;
|
2011-06-29 08:17:58 +08:00
|
|
|
vcpu->stat.pf_storage++;
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_32
|
|
|
|
/* We set segments as unused segments when invalidating them. So
|
|
|
|
* treat the respective fault as segment fault. */
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
{
|
|
|
|
struct kvmppc_book3s_shadow_vcpu *svcpu;
|
|
|
|
u32 sr;
|
|
|
|
|
|
|
|
svcpu = svcpu_get(vcpu);
|
|
|
|
sr = svcpu->sr[dar >> SID_SHIFT];
|
2011-12-09 21:44:13 +08:00
|
|
|
svcpu_put(svcpu);
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
if (sr == SR_INVALID) {
|
|
|
|
kvmppc_mmu_map_segment(vcpu, dar);
|
|
|
|
r = RESUME_GUEST;
|
|
|
|
break;
|
|
|
|
}
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:51 +08:00
|
|
|
/*
|
|
|
|
* We need to handle missing shadow PTEs, and
|
|
|
|
* protection faults due to us mapping a page read-only
|
|
|
|
* when the guest thinks it is writable.
|
|
|
|
*/
|
|
|
|
if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
|
|
|
|
int idx = srcu_read_lock(&vcpu->kvm->srcu);
|
2020-04-27 12:35:11 +08:00
|
|
|
r = kvmppc_handle_pagefault(vcpu, dar, exit_nr);
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:51 +08:00
|
|
|
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
2011-06-29 08:17:58 +08:00
|
|
|
} else {
|
2018-06-07 16:04:37 +08:00
|
|
|
kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr);
|
2011-06-29 08:17:58 +08:00
|
|
|
r = RESUME_GUEST;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BOOK3S_INTERRUPT_DATA_SEGMENT:
|
|
|
|
if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
|
2014-04-24 19:46:24 +08:00
|
|
|
kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
|
2011-06-29 08:17:58 +08:00
|
|
|
kvmppc_book3s_queue_irqprio(vcpu,
|
|
|
|
BOOK3S_INTERRUPT_DATA_SEGMENT);
|
|
|
|
}
|
|
|
|
r = RESUME_GUEST;
|
|
|
|
break;
|
|
|
|
case BOOK3S_INTERRUPT_INST_SEGMENT:
|
|
|
|
if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
|
|
|
|
kvmppc_book3s_queue_irqprio(vcpu,
|
|
|
|
BOOK3S_INTERRUPT_INST_SEGMENT);
|
|
|
|
}
|
|
|
|
r = RESUME_GUEST;
|
|
|
|
break;
|
|
|
|
/* We're good on these - the host merely wanted to get our attention */
|
|
|
|
case BOOK3S_INTERRUPT_DECREMENTER:
|
2012-03-14 06:05:16 +08:00
|
|
|
case BOOK3S_INTERRUPT_HV_DECREMENTER:
|
KVM: PPC: Book3S PR: Cope with doorbell interrupts
When the PR host is running on a POWER8 machine in POWER8 mode, it
will use doorbell interrupts for IPIs. If one of them arrives while
we are in the guest, we pop out of the guest with trap number 0xA00,
which isn't handled by kvmppc_handle_exit_pr, leading to the following
BUG_ON:
[ 331.436215] exit_nr=0xa00 | pc=0x1d2c | msr=0x800000000000d032
[ 331.437522] ------------[ cut here ]------------
[ 331.438296] kernel BUG at arch/powerpc/kvm/book3s_pr.c:982!
[ 331.439063] Oops: Exception in kernel mode, sig: 5 [#2]
[ 331.439819] SMP NR_CPUS=1024 NUMA pSeries
[ 331.440552] Modules linked in: tun nf_conntrack_netbios_ns nf_conntrack_broadcast ipt_MASQUERADE ip6t_REJECT xt_conntrack ebtable_nat ebtable_broute bridge stp llc ebtable_filter ebtables ip6table_nat nf_conntrack_ipv6 nf_defrag_ipv6 nf_nat_ipv6 ip6table_mangle ip6table_security ip6table_raw ip6table_filter ip6_tables iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat nf_conntrack iptable_mangle iptable_security iptable_raw virtio_net kvm binfmt_misc ibmvscsi scsi_transport_srp scsi_tgt virtio_blk
[ 331.447614] CPU: 11 PID: 1296 Comm: qemu-system-ppc Tainted: G D 3.11.7-200.2.fc19.ppc64p7 #1
[ 331.448920] task: c0000003bdc8c000 ti: c0000003bd32c000 task.ti: c0000003bd32c000
[ 331.450088] NIP: d0000000025d6b9c LR: d0000000025d6b98 CTR: c0000000004cfdd0
[ 331.451042] REGS: c0000003bd32f420 TRAP: 0700 Tainted: G D (3.11.7-200.2.fc19.ppc64p7)
[ 331.452331] MSR: 800000000282b032 <SF,VEC,VSX,EE,FP,ME,IR,DR,RI> CR: 28004824 XER: 20000000
[ 331.454616] SOFTE: 1
[ 331.455106] CFAR: c000000000848bb8
[ 331.455726]
GPR00: d0000000025d6b98 c0000003bd32f6a0 d0000000026017b8 0000000000000032
GPR04: c0000000018627f8 c000000001873208 320d0a3030303030 3030303030643033
GPR08: c000000000c490a8 0000000000000000 0000000000000000 0000000000000002
GPR12: 0000000028004822 c00000000fdc6300 0000000000000000 00000100076ec310
GPR16: 000000002ae343b8 00003ffffd397398 0000000000000000 0000000000000000
GPR20: 00000100076f16f4 00000100076ebe60 0000000000000008 ffffffffffffffff
GPR24: 0000000000000000 0000008001041e60 0000000000000000 0000008001040ce8
GPR28: c0000003a2d80000 0000000000000a00 0000000000000001 c0000003a2681810
[ 331.466504] NIP [d0000000025d6b9c] .kvmppc_handle_exit_pr+0x75c/0xa80 [kvm]
[ 331.466999] LR [d0000000025d6b98] .kvmppc_handle_exit_pr+0x758/0xa80 [kvm]
[ 331.467517] Call Trace:
[ 331.467909] [c0000003bd32f6a0] [d0000000025d6b98] .kvmppc_handle_exit_pr+0x758/0xa80 [kvm] (unreliable)
[ 331.468553] [c0000003bd32f750] [d0000000025d98f0] kvm_start_lightweight+0xb4/0xc4 [kvm]
[ 331.469189] [c0000003bd32f920] [d0000000025d7648] .kvmppc_vcpu_run_pr+0xd8/0x270 [kvm]
[ 331.469838] [c0000003bd32f9c0] [d0000000025cf748] .kvmppc_vcpu_run+0xc8/0xf0 [kvm]
[ 331.470790] [c0000003bd32fa50] [d0000000025cc19c] .kvm_arch_vcpu_ioctl_run+0x5c/0x1b0 [kvm]
[ 331.471401] [c0000003bd32fae0] [d0000000025c4888] .kvm_vcpu_ioctl+0x478/0x730 [kvm]
[ 331.472026] [c0000003bd32fc90] [c00000000026192c] .do_vfs_ioctl+0x4dc/0x7a0
[ 331.472561] [c0000003bd32fd80] [c000000000261cc4] .SyS_ioctl+0xd4/0xf0
[ 331.473095] [c0000003bd32fe30] [c000000000009ed8] syscall_exit+0x0/0x98
[ 331.473633] Instruction dump:
[ 331.473766] 4bfff9b4 2b9d0800 419efc18 60000000 60420000 3d220000 e8bf11a0 e8df12a8
[ 331.474733] 7fa4eb78 e8698660 48015165 e8410028 <0fe00000> 813f00e4 3ba00000 39290001
[ 331.475386] ---[ end trace 49fc47d994c1f8f2 ]---
[ 331.479817]
This fixes the problem by making kvmppc_handle_exit_pr() recognize the
interrupt. We also need to jump to the doorbell interrupt handler in
book3s_segment.S to handle the interrupt on the way out of the guest.
Having done that, there's nothing further to be done in
kvmppc_handle_exit_pr().
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2014-01-08 18:25:36 +08:00
|
|
|
case BOOK3S_INTERRUPT_DOORBELL:
|
2014-06-16 22:37:38 +08:00
|
|
|
case BOOK3S_INTERRUPT_H_DOORBELL:
|
2011-06-29 08:17:58 +08:00
|
|
|
vcpu->stat.dec_exits++;
|
|
|
|
r = RESUME_GUEST;
|
|
|
|
break;
|
|
|
|
case BOOK3S_INTERRUPT_EXTERNAL:
|
2012-03-14 06:05:16 +08:00
|
|
|
case BOOK3S_INTERRUPT_EXTERNAL_HV:
|
2018-06-05 22:48:55 +08:00
|
|
|
case BOOK3S_INTERRUPT_H_VIRT:
|
2011-06-29 08:17:58 +08:00
|
|
|
vcpu->stat.ext_intr_exits++;
|
|
|
|
r = RESUME_GUEST;
|
|
|
|
break;
|
2018-06-05 22:48:55 +08:00
|
|
|
case BOOK3S_INTERRUPT_HMI:
|
2011-06-29 08:17:58 +08:00
|
|
|
case BOOK3S_INTERRUPT_PERFMON:
|
2018-06-05 22:48:55 +08:00
|
|
|
case BOOK3S_INTERRUPT_SYSTEM_RESET:
|
2011-06-29 08:17:58 +08:00
|
|
|
r = RESUME_GUEST;
|
|
|
|
break;
|
|
|
|
case BOOK3S_INTERRUPT_PROGRAM:
|
2012-03-14 06:05:16 +08:00
|
|
|
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
|
2020-04-27 12:35:11 +08:00
|
|
|
r = kvmppc_exit_pr_progint(vcpu, exit_nr);
|
2011-06-29 08:17:58 +08:00
|
|
|
break;
|
|
|
|
case BOOK3S_INTERRUPT_SYSCALL:
|
2014-07-24 00:06:21 +08:00
|
|
|
{
|
|
|
|
u32 last_sc;
|
|
|
|
int emul;
|
|
|
|
|
|
|
|
/* Get last sc for papr */
|
|
|
|
if (vcpu->arch.papr_enabled) {
|
|
|
|
/* The sc instuction points SRR0 to the next inst */
|
|
|
|
emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
|
|
|
|
if (emul != EMULATE_DONE) {
|
|
|
|
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
|
|
|
|
r = RESUME_GUEST;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-08 23:26:24 +08:00
|
|
|
if (vcpu->arch.papr_enabled &&
|
2014-07-24 00:06:21 +08:00
|
|
|
(last_sc == 0x44000022) &&
|
2014-04-24 19:46:24 +08:00
|
|
|
!(kvmppc_get_msr(vcpu) & MSR_PR)) {
|
2011-08-08 23:26:24 +08:00
|
|
|
/* SC 1 papr hypercalls */
|
|
|
|
ulong cmd = kvmppc_get_gpr(vcpu, 3);
|
|
|
|
int i;
|
|
|
|
|
2013-10-08 00:47:59 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
2011-08-08 23:26:24 +08:00
|
|
|
if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
|
|
|
|
r = RESUME_GUEST;
|
|
|
|
break;
|
|
|
|
}
|
2011-11-08 15:17:39 +08:00
|
|
|
#endif
|
2011-08-08 23:26:24 +08:00
|
|
|
|
|
|
|
run->papr_hcall.nr = cmd;
|
|
|
|
for (i = 0; i < 9; ++i) {
|
|
|
|
ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
|
|
|
|
run->papr_hcall.args[i] = gpr;
|
|
|
|
}
|
|
|
|
run->exit_reason = KVM_EXIT_PAPR_HCALL;
|
|
|
|
vcpu->arch.hcall_needed = 1;
|
|
|
|
r = RESUME_HOST;
|
|
|
|
} else if (vcpu->arch.osi_enabled &&
|
2011-06-29 08:17:58 +08:00
|
|
|
(((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
|
|
|
|
(((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
|
|
|
|
/* MOL hypercalls */
|
|
|
|
u64 *gprs = run->osi.gprs;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
run->exit_reason = KVM_EXIT_OSI;
|
|
|
|
for (i = 0; i < 32; i++)
|
|
|
|
gprs[i] = kvmppc_get_gpr(vcpu, i);
|
|
|
|
vcpu->arch.osi_needed = 1;
|
|
|
|
r = RESUME_HOST_NV;
|
2014-04-24 19:46:24 +08:00
|
|
|
} else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
|
2011-06-29 08:17:58 +08:00
|
|
|
(((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
|
|
|
|
/* KVM PV hypercalls */
|
|
|
|
kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
|
|
|
|
r = RESUME_GUEST;
|
|
|
|
} else {
|
|
|
|
/* Guest syscalls */
|
|
|
|
vcpu->stat.syscall_exits++;
|
|
|
|
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
|
|
|
r = RESUME_GUEST;
|
|
|
|
}
|
|
|
|
break;
|
2014-07-24 00:06:21 +08:00
|
|
|
}
|
2011-06-29 08:17:58 +08:00
|
|
|
case BOOK3S_INTERRUPT_FP_UNAVAIL:
|
|
|
|
case BOOK3S_INTERRUPT_ALTIVEC:
|
|
|
|
case BOOK3S_INTERRUPT_VSX:
|
|
|
|
{
|
|
|
|
int ext_msr = 0;
|
2014-07-24 00:06:20 +08:00
|
|
|
int emul;
|
|
|
|
u32 last_inst;
|
|
|
|
|
|
|
|
if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
|
|
|
|
/* Do paired single instruction emulation */
|
2014-07-24 00:06:21 +08:00
|
|
|
emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
|
|
|
|
&last_inst);
|
2014-07-24 00:06:20 +08:00
|
|
|
if (emul == EMULATE_DONE)
|
2020-04-27 12:35:11 +08:00
|
|
|
r = kvmppc_exit_pr_progint(vcpu, exit_nr);
|
2014-07-24 00:06:20 +08:00
|
|
|
else
|
|
|
|
r = RESUME_GUEST;
|
2011-06-29 08:17:58 +08:00
|
|
|
|
2014-07-24 00:06:20 +08:00
|
|
|
break;
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
|
|
|
|
2014-07-24 00:06:20 +08:00
|
|
|
/* Enable external provider */
|
|
|
|
switch (exit_nr) {
|
|
|
|
case BOOK3S_INTERRUPT_FP_UNAVAIL:
|
|
|
|
ext_msr = MSR_FP;
|
2011-06-29 08:17:58 +08:00
|
|
|
break;
|
2014-07-24 00:06:20 +08:00
|
|
|
|
|
|
|
case BOOK3S_INTERRUPT_ALTIVEC:
|
|
|
|
ext_msr = MSR_VEC;
|
2011-06-29 08:17:58 +08:00
|
|
|
break;
|
2014-07-24 00:06:20 +08:00
|
|
|
|
|
|
|
case BOOK3S_INTERRUPT_VSX:
|
|
|
|
ext_msr = MSR_VSX;
|
2011-06-29 08:17:58 +08:00
|
|
|
break;
|
|
|
|
}
|
2014-07-24 00:06:20 +08:00
|
|
|
|
|
|
|
r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
|
2011-06-29 08:17:58 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BOOK3S_INTERRUPT_ALIGNMENT:
|
2014-07-24 00:06:20 +08:00
|
|
|
{
|
2014-07-24 00:06:21 +08:00
|
|
|
u32 last_inst;
|
|
|
|
int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
|
2014-07-24 00:06:20 +08:00
|
|
|
|
|
|
|
if (emul == EMULATE_DONE) {
|
2014-04-24 19:46:24 +08:00
|
|
|
u32 dsisr;
|
|
|
|
u64 dar;
|
|
|
|
|
|
|
|
dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
|
|
|
|
dar = kvmppc_alignment_dar(vcpu, last_inst);
|
|
|
|
|
|
|
|
kvmppc_set_dsisr(vcpu, dsisr);
|
|
|
|
kvmppc_set_dar(vcpu, dar);
|
|
|
|
|
2011-06-29 08:17:58 +08:00
|
|
|
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
|
|
|
}
|
|
|
|
r = RESUME_GUEST;
|
|
|
|
break;
|
2014-07-24 00:06:20 +08:00
|
|
|
}
|
2014-04-29 22:48:44 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
case BOOK3S_INTERRUPT_FAC_UNAVAIL:
|
2018-05-23 15:02:02 +08:00
|
|
|
r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
|
2014-04-29 22:48:44 +08:00
|
|
|
break;
|
|
|
|
#endif
|
2011-06-29 08:17:58 +08:00
|
|
|
case BOOK3S_INTERRUPT_MACHINE_CHECK:
|
|
|
|
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
|
|
|
r = RESUME_GUEST;
|
|
|
|
break;
|
2016-04-09 00:05:00 +08:00
|
|
|
case BOOK3S_INTERRUPT_TRACE:
|
|
|
|
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
|
|
|
|
run->exit_reason = KVM_EXIT_DEBUG;
|
|
|
|
r = RESUME_HOST;
|
|
|
|
} else {
|
|
|
|
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
|
|
|
r = RESUME_GUEST;
|
|
|
|
}
|
|
|
|
break;
|
2011-06-29 08:17:58 +08:00
|
|
|
default:
|
2011-12-09 21:44:13 +08:00
|
|
|
{
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
ulong shadow_srr1 = vcpu->arch.shadow_srr1;
|
2011-06-29 08:17:58 +08:00
|
|
|
/* Ugh - bork here! What did we get? */
|
|
|
|
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
|
2011-12-09 21:44:13 +08:00
|
|
|
exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
|
2011-06-29 08:17:58 +08:00
|
|
|
r = RESUME_HOST;
|
|
|
|
BUG();
|
|
|
|
break;
|
|
|
|
}
|
2011-12-09 21:44:13 +08:00
|
|
|
}
|
2011-06-29 08:17:58 +08:00
|
|
|
|
|
|
|
if (!(r & RESUME_HOST)) {
|
|
|
|
/* To avoid clobbering exit_reason, only check for signals if
|
|
|
|
* we aren't already exiting to userspace for some other
|
|
|
|
* reason. */
|
2011-12-19 20:36:55 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Interrupts could be timers for the guest which we have to
|
|
|
|
* inject again, so let's postpone them until we're in the guest
|
|
|
|
* and if we really did time things so badly, then we just exit
|
|
|
|
* again due to a host external interrupt.
|
|
|
|
*/
|
2012-08-13 18:44:41 +08:00
|
|
|
s = kvmppc_prepare_to_enter(vcpu);
|
2014-01-10 09:18:40 +08:00
|
|
|
if (s <= 0)
|
2012-08-13 18:44:41 +08:00
|
|
|
r = s;
|
2014-01-10 09:18:40 +08:00
|
|
|
else {
|
|
|
|
/* interrupts now hard-disabled */
|
2013-07-11 06:47:39 +08:00
|
|
|
kvmppc_fix_ee_before_entry();
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
2014-01-10 09:18:40 +08:00
|
|
|
|
KVM: PPC: Book3S PR: Don't corrupt guest state when kernel uses VMX
Currently the code assumes that once we load up guest FP/VSX or VMX
state into the CPU, it stays valid in the CPU registers until we
explicitly flush it to the thread_struct. However, on POWER7,
copy_page() and memcpy() can use VMX. These functions do flush the
VMX state to the thread_struct before using VMX instructions, but if
this happens while we have guest state in the VMX registers, and we
then re-enter the guest, we don't reload the VMX state from the
thread_struct, leading to guest corruption. This has been observed
to cause guest processes to segfault.
To fix this, we check before re-entering the guest that all of the
bits corresponding to facilities owned by the guest, as expressed
in vcpu->arch.guest_owned_ext, are set in current->thread.regs->msr.
Any bits that have been cleared correspond to facilities that have
been used by kernel code and thus flushed to the thread_struct, so
for them we reload the state from the thread_struct.
We also need to check current->thread.regs->msr before calling
giveup_fpu() or giveup_altivec(), since if the relevant bit is
clear, the state has already been flushed to the thread_struct and
to flush it again would corrupt it.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-08-06 12:14:33 +08:00
|
|
|
kvmppc_handle_lost_ext(vcpu);
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
trace_kvm_book3s_reenter(r, vcpu);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_sregs *sregs)
|
2011-06-29 08:17:58 +08:00
|
|
|
{
|
|
|
|
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
sregs->pvr = vcpu->arch.pvr;
|
|
|
|
|
|
|
|
sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
|
|
|
|
if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
|
|
|
|
for (i = 0; i < 64; i++) {
|
|
|
|
sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
|
|
|
|
sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < 16; i++)
|
2014-04-24 19:46:24 +08:00
|
|
|
sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
|
2011-06-29 08:17:58 +08:00
|
|
|
|
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
|
sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
|
|
|
|
sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_sregs *sregs)
|
2011-06-29 08:17:58 +08:00
|
|
|
{
|
|
|
|
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
|
|
|
|
int i;
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
kvmppc_set_pvr_pr(vcpu, sregs->pvr);
|
2011-06-29 08:17:58 +08:00
|
|
|
|
|
|
|
vcpu3s->sdr1 = sregs->u.s.sdr1;
|
KVM: PPC: Book3S PR: Only install valid SLBs during KVM_SET_SREGS
Userland passes an array of 64 SLB descriptors to KVM_SET_SREGS,
some of which are valid (ie, SLB_ESID_V is set) and the rest are
likely all-zeroes (with QEMU at least).
Each of them is then passed to kvmppc_mmu_book3s_64_slbmte(), which
assumes to find the SLB index in the 3 lower bits of its rb argument.
When passed zeroed arguments, it happily overwrites the 0th SLB entry
with zeroes. This is exactly what happens while doing live migration
with QEMU when the destination pushes the incoming SLB descriptors to
KVM PR. When reloading the SLBs at the next synchronization, QEMU first
clears its SLB array and only restore valid ones, but the 0th one is
now gone and we cannot access the corresponding memory anymore:
(qemu) x/x $pc
c0000000000b742c: Cannot access memory
To avoid this, let's filter out non-valid SLB entries. While here, we
also force a full SLB flush before installing new entries. Since SLB
is for 64-bit only, we now build this path conditionally to avoid a
build break on 32-bit, which doesn't define SLB_ESID_V.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-10-16 18:29:44 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
2011-06-29 08:17:58 +08:00
|
|
|
if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
|
KVM: PPC: Book3S PR: Only install valid SLBs during KVM_SET_SREGS
Userland passes an array of 64 SLB descriptors to KVM_SET_SREGS,
some of which are valid (ie, SLB_ESID_V is set) and the rest are
likely all-zeroes (with QEMU at least).
Each of them is then passed to kvmppc_mmu_book3s_64_slbmte(), which
assumes to find the SLB index in the 3 lower bits of its rb argument.
When passed zeroed arguments, it happily overwrites the 0th SLB entry
with zeroes. This is exactly what happens while doing live migration
with QEMU when the destination pushes the incoming SLB descriptors to
KVM PR. When reloading the SLBs at the next synchronization, QEMU first
clears its SLB array and only restore valid ones, but the 0th one is
now gone and we cannot access the corresponding memory anymore:
(qemu) x/x $pc
c0000000000b742c: Cannot access memory
To avoid this, let's filter out non-valid SLB entries. While here, we
also force a full SLB flush before installing new entries. Since SLB
is for 64-bit only, we now build this path conditionally to avoid a
build break on 32-bit, which doesn't define SLB_ESID_V.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-10-16 18:29:44 +08:00
|
|
|
/* Flush all SLB entries */
|
|
|
|
vcpu->arch.mmu.slbmte(vcpu, 0, 0);
|
|
|
|
vcpu->arch.mmu.slbia(vcpu);
|
|
|
|
|
2011-06-29 08:17:58 +08:00
|
|
|
for (i = 0; i < 64; i++) {
|
KVM: PPC: Book3S PR: Only install valid SLBs during KVM_SET_SREGS
Userland passes an array of 64 SLB descriptors to KVM_SET_SREGS,
some of which are valid (ie, SLB_ESID_V is set) and the rest are
likely all-zeroes (with QEMU at least).
Each of them is then passed to kvmppc_mmu_book3s_64_slbmte(), which
assumes to find the SLB index in the 3 lower bits of its rb argument.
When passed zeroed arguments, it happily overwrites the 0th SLB entry
with zeroes. This is exactly what happens while doing live migration
with QEMU when the destination pushes the incoming SLB descriptors to
KVM PR. When reloading the SLBs at the next synchronization, QEMU first
clears its SLB array and only restore valid ones, but the 0th one is
now gone and we cannot access the corresponding memory anymore:
(qemu) x/x $pc
c0000000000b742c: Cannot access memory
To avoid this, let's filter out non-valid SLB entries. While here, we
also force a full SLB flush before installing new entries. Since SLB
is for 64-bit only, we now build this path conditionally to avoid a
build break on 32-bit, which doesn't define SLB_ESID_V.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-10-16 18:29:44 +08:00
|
|
|
u64 rb = sregs->u.s.ppc64.slb[i].slbe;
|
|
|
|
u64 rs = sregs->u.s.ppc64.slb[i].slbv;
|
|
|
|
|
|
|
|
if (rb & SLB_ESID_V)
|
|
|
|
vcpu->arch.mmu.slbmte(vcpu, rs, rb);
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
KVM: PPC: Book3S PR: Only install valid SLBs during KVM_SET_SREGS
Userland passes an array of 64 SLB descriptors to KVM_SET_SREGS,
some of which are valid (ie, SLB_ESID_V is set) and the rest are
likely all-zeroes (with QEMU at least).
Each of them is then passed to kvmppc_mmu_book3s_64_slbmte(), which
assumes to find the SLB index in the 3 lower bits of its rb argument.
When passed zeroed arguments, it happily overwrites the 0th SLB entry
with zeroes. This is exactly what happens while doing live migration
with QEMU when the destination pushes the incoming SLB descriptors to
KVM PR. When reloading the SLBs at the next synchronization, QEMU first
clears its SLB array and only restore valid ones, but the 0th one is
now gone and we cannot access the corresponding memory anymore:
(qemu) x/x $pc
c0000000000b742c: Cannot access memory
To avoid this, let's filter out non-valid SLB entries. While here, we
also force a full SLB flush before installing new entries. Since SLB
is for 64-bit only, we now build this path conditionally to avoid a
build break on 32-bit, which doesn't define SLB_ESID_V.
Signed-off-by: Greg Kurz <groug@kaod.org>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
2017-10-16 18:29:44 +08:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
2011-06-29 08:17:58 +08:00
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
|
|
|
|
}
|
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
|
kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
|
|
|
|
(u32)sregs->u.s.ppc32.ibat[i]);
|
|
|
|
kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
|
|
|
|
(u32)(sregs->u.s.ppc32.ibat[i] >> 32));
|
|
|
|
kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
|
|
|
|
(u32)sregs->u.s.ppc32.dbat[i]);
|
|
|
|
kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
|
|
|
|
(u32)(sregs->u.s.ppc32.dbat[i] >> 32));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Flush the MMU after messing with the segments */
|
|
|
|
kvmppc_mmu_pte_flush(vcpu, 0, 0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
|
|
|
|
union kvmppc_one_reg *val)
|
2011-12-12 20:26:50 +08:00
|
|
|
{
|
2012-09-26 04:31:56 +08:00
|
|
|
int r = 0;
|
2011-12-12 20:26:50 +08:00
|
|
|
|
2012-09-26 04:31:56 +08:00
|
|
|
switch (id) {
|
2014-09-10 01:07:35 +08:00
|
|
|
case KVM_REG_PPC_DEBUG_INST:
|
|
|
|
*val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
|
|
|
|
break;
|
2011-12-12 20:26:50 +08:00
|
|
|
case KVM_REG_PPC_HIOR:
|
2012-09-26 04:31:56 +08:00
|
|
|
*val = get_reg_val(id, to_book3s(vcpu)->hior);
|
2011-12-12 20:26:50 +08:00
|
|
|
break;
|
2016-09-15 11:42:52 +08:00
|
|
|
case KVM_REG_PPC_VTB:
|
|
|
|
*val = get_reg_val(id, to_book3s(vcpu)->vtb);
|
|
|
|
break;
|
2014-05-05 11:09:44 +08:00
|
|
|
case KVM_REG_PPC_LPCR:
|
2014-07-19 15:59:34 +08:00
|
|
|
case KVM_REG_PPC_LPCR_64:
|
2014-05-05 11:09:44 +08:00
|
|
|
/*
|
|
|
|
* We are only interested in the LPCR_ILE bit
|
|
|
|
*/
|
|
|
|
if (vcpu->arch.intr_msr & MSR_LE)
|
|
|
|
*val = get_reg_val(id, LPCR_ILE);
|
|
|
|
else
|
|
|
|
*val = get_reg_val(id, 0);
|
|
|
|
break;
|
2018-05-23 15:02:12 +08:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
case KVM_REG_PPC_TFHAR:
|
|
|
|
*val = get_reg_val(id, vcpu->arch.tfhar);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TFIAR:
|
|
|
|
*val = get_reg_val(id, vcpu->arch.tfiar);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TEXASR:
|
|
|
|
*val = get_reg_val(id, vcpu->arch.texasr);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
|
|
|
|
*val = get_reg_val(id,
|
|
|
|
vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
i = id - KVM_REG_PPC_TM_VSR0;
|
|
|
|
if (i < 32)
|
|
|
|
for (j = 0; j < TS_FPRWIDTH; j++)
|
|
|
|
val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
|
|
|
|
else {
|
|
|
|
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
|
|
|
val->vval = vcpu->arch.vr_tm.vr[i-32];
|
|
|
|
else
|
|
|
|
r = -ENXIO;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case KVM_REG_PPC_TM_CR:
|
|
|
|
*val = get_reg_val(id, vcpu->arch.cr_tm);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_XER:
|
|
|
|
*val = get_reg_val(id, vcpu->arch.xer_tm);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_LR:
|
|
|
|
*val = get_reg_val(id, vcpu->arch.lr_tm);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_CTR:
|
|
|
|
*val = get_reg_val(id, vcpu->arch.ctr_tm);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_FPSCR:
|
|
|
|
*val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_AMR:
|
|
|
|
*val = get_reg_val(id, vcpu->arch.amr_tm);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_PPR:
|
|
|
|
*val = get_reg_val(id, vcpu->arch.ppr_tm);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_VRSAVE:
|
|
|
|
*val = get_reg_val(id, vcpu->arch.vrsave_tm);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_VSCR:
|
|
|
|
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
|
|
|
*val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
|
|
|
|
else
|
|
|
|
r = -ENXIO;
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_DSCR:
|
|
|
|
*val = get_reg_val(id, vcpu->arch.dscr_tm);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_TAR:
|
|
|
|
*val = get_reg_val(id, vcpu->arch.tar_tm);
|
|
|
|
break;
|
|
|
|
#endif
|
2011-12-12 20:26:50 +08:00
|
|
|
default:
|
2012-09-26 04:31:56 +08:00
|
|
|
r = -EINVAL;
|
2011-12-12 20:26:50 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2014-05-05 11:09:44 +08:00
|
|
|
static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
|
|
|
|
{
|
|
|
|
if (new_lpcr & LPCR_ILE)
|
|
|
|
vcpu->arch.intr_msr |= MSR_LE;
|
|
|
|
else
|
|
|
|
vcpu->arch.intr_msr &= ~MSR_LE;
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
|
|
|
|
union kvmppc_one_reg *val)
|
2011-12-12 20:26:50 +08:00
|
|
|
{
|
2012-09-26 04:31:56 +08:00
|
|
|
int r = 0;
|
2011-12-12 20:26:50 +08:00
|
|
|
|
2012-09-26 04:31:56 +08:00
|
|
|
switch (id) {
|
2011-12-12 20:26:50 +08:00
|
|
|
case KVM_REG_PPC_HIOR:
|
2012-09-26 04:31:56 +08:00
|
|
|
to_book3s(vcpu)->hior = set_reg_val(id, *val);
|
|
|
|
to_book3s(vcpu)->hior_explicit = true;
|
2011-12-12 20:26:50 +08:00
|
|
|
break;
|
2016-09-15 11:42:52 +08:00
|
|
|
case KVM_REG_PPC_VTB:
|
|
|
|
to_book3s(vcpu)->vtb = set_reg_val(id, *val);
|
|
|
|
break;
|
2014-05-05 11:09:44 +08:00
|
|
|
case KVM_REG_PPC_LPCR:
|
2014-07-19 15:59:34 +08:00
|
|
|
case KVM_REG_PPC_LPCR_64:
|
2014-05-05 11:09:44 +08:00
|
|
|
kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
|
|
|
|
break;
|
2018-05-23 15:02:12 +08:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
case KVM_REG_PPC_TFHAR:
|
|
|
|
vcpu->arch.tfhar = set_reg_val(id, *val);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TFIAR:
|
|
|
|
vcpu->arch.tfiar = set_reg_val(id, *val);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TEXASR:
|
|
|
|
vcpu->arch.texasr = set_reg_val(id, *val);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
|
|
|
|
vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] =
|
|
|
|
set_reg_val(id, *val);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
i = id - KVM_REG_PPC_TM_VSR0;
|
|
|
|
if (i < 32)
|
|
|
|
for (j = 0; j < TS_FPRWIDTH; j++)
|
|
|
|
vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
|
|
|
|
else
|
|
|
|
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
|
|
|
vcpu->arch.vr_tm.vr[i-32] = val->vval;
|
|
|
|
else
|
|
|
|
r = -ENXIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case KVM_REG_PPC_TM_CR:
|
|
|
|
vcpu->arch.cr_tm = set_reg_val(id, *val);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_XER:
|
|
|
|
vcpu->arch.xer_tm = set_reg_val(id, *val);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_LR:
|
|
|
|
vcpu->arch.lr_tm = set_reg_val(id, *val);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_CTR:
|
|
|
|
vcpu->arch.ctr_tm = set_reg_val(id, *val);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_FPSCR:
|
|
|
|
vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_AMR:
|
|
|
|
vcpu->arch.amr_tm = set_reg_val(id, *val);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_PPR:
|
|
|
|
vcpu->arch.ppr_tm = set_reg_val(id, *val);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_VRSAVE:
|
|
|
|
vcpu->arch.vrsave_tm = set_reg_val(id, *val);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_VSCR:
|
|
|
|
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
|
|
|
vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
|
|
|
|
else
|
|
|
|
r = -ENXIO;
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_DSCR:
|
|
|
|
vcpu->arch.dscr_tm = set_reg_val(id, *val);
|
|
|
|
break;
|
|
|
|
case KVM_REG_PPC_TM_TAR:
|
|
|
|
vcpu->arch.tar_tm = set_reg_val(id, *val);
|
|
|
|
break;
|
|
|
|
#endif
|
2011-12-12 20:26:50 +08:00
|
|
|
default:
|
2012-09-26 04:31:56 +08:00
|
|
|
r = -EINVAL;
|
2011-12-12 20:26:50 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2019-12-19 05:55:00 +08:00
|
|
|
static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu)
|
2011-06-29 08:17:58 +08:00
|
|
|
{
|
|
|
|
struct kvmppc_vcpu_book3s *vcpu_book3s;
|
|
|
|
unsigned long p;
|
2019-12-19 05:54:58 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = -ENOMEM;
|
2011-06-29 08:17:58 +08:00
|
|
|
|
|
|
|
vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
|
|
|
|
if (!vcpu_book3s)
|
2019-12-19 05:55:00 +08:00
|
|
|
goto out;
|
2013-09-20 12:52:49 +08:00
|
|
|
vcpu->arch.book3s = vcpu_book3s;
|
2011-06-29 08:17:58 +08:00
|
|
|
|
2014-04-07 05:31:48 +08:00
|
|
|
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
2013-09-20 12:52:49 +08:00
|
|
|
vcpu->arch.shadow_vcpu =
|
|
|
|
kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
|
|
|
|
if (!vcpu->arch.shadow_vcpu)
|
|
|
|
goto free_vcpu3s;
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
#endif
|
2011-06-29 08:17:58 +08:00
|
|
|
|
|
|
|
p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
|
|
|
|
if (!p)
|
2019-12-19 05:54:58 +08:00
|
|
|
goto free_shadow_vcpu;
|
2014-07-13 22:37:12 +08:00
|
|
|
vcpu->arch.shared = (void *)p;
|
2011-06-29 08:17:58 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
2014-04-24 19:46:24 +08:00
|
|
|
/* Always start the shared struct in native endian mode */
|
|
|
|
#ifdef __BIG_ENDIAN__
|
|
|
|
vcpu->arch.shared_big_endian = true;
|
|
|
|
#else
|
|
|
|
vcpu->arch.shared_big_endian = false;
|
|
|
|
#endif
|
|
|
|
|
2013-09-20 12:52:44 +08:00
|
|
|
/*
|
|
|
|
* Default to the same as the host if we're on sufficiently
|
|
|
|
* recent machine that we have 1TB segments;
|
|
|
|
* otherwise default to PPC970FX.
|
|
|
|
*/
|
2011-06-29 08:17:58 +08:00
|
|
|
vcpu->arch.pvr = 0x3C0301;
|
2013-09-20 12:52:44 +08:00
|
|
|
if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
|
|
|
|
vcpu->arch.pvr = mfspr(SPRN_PVR);
|
2014-05-05 11:09:44 +08:00
|
|
|
vcpu->arch.intr_msr = MSR_SF;
|
2011-06-29 08:17:58 +08:00
|
|
|
#else
|
|
|
|
/* default to book3s_32 (750) */
|
|
|
|
vcpu->arch.pvr = 0x84202;
|
2019-10-02 14:00:22 +08:00
|
|
|
vcpu->arch.intr_msr = 0;
|
2011-06-29 08:17:58 +08:00
|
|
|
#endif
|
2013-10-08 00:47:53 +08:00
|
|
|
kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
|
2011-06-29 08:17:58 +08:00
|
|
|
vcpu->arch.slb_nr = 64;
|
|
|
|
|
2014-04-24 19:04:01 +08:00
|
|
|
vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
|
2011-06-29 08:17:58 +08:00
|
|
|
|
2020-03-19 01:43:36 +08:00
|
|
|
err = kvmppc_mmu_init_pr(vcpu);
|
2011-06-29 08:17:58 +08:00
|
|
|
if (err < 0)
|
2019-12-19 05:54:47 +08:00
|
|
|
goto free_shared_page;
|
2011-06-29 08:17:58 +08:00
|
|
|
|
2019-12-19 05:54:57 +08:00
|
|
|
return 0;
|
2011-06-29 08:17:58 +08:00
|
|
|
|
2019-12-19 05:54:47 +08:00
|
|
|
free_shared_page:
|
|
|
|
free_page((unsigned long)vcpu->arch.shared);
|
2011-06-29 08:17:58 +08:00
|
|
|
free_shadow_vcpu:
|
2014-04-07 05:31:48 +08:00
|
|
|
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
2013-09-20 12:52:49 +08:00
|
|
|
kfree(vcpu->arch.shadow_vcpu);
|
|
|
|
free_vcpu3s:
|
KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu
Currently PR-style KVM keeps the volatile guest register values
(R0 - R13, CR, LR, CTR, XER, PC) in a shadow_vcpu struct rather than
the main kvm_vcpu struct. For 64-bit, the shadow_vcpu exists in two
places, a kmalloc'd struct and in the PACA, and it gets copied back
and forth in kvmppc_core_vcpu_load/put(), because the real-mode code
can't rely on being able to access the kmalloc'd struct.
This changes the code to copy the volatile values into the shadow_vcpu
as one of the last things done before entering the guest. Similarly
the values are copied back out of the shadow_vcpu to the kvm_vcpu
immediately after exiting the guest. We arrange for interrupts to be
still disabled at this point so that we can't get preempted on 64-bit
and end up copying values from the wrong PACA.
This means that the accessor functions in kvm_book3s.h for these
registers are greatly simplified, and are same between PR and HV KVM.
In places where accesses to shadow_vcpu fields are now replaced by
accesses to the kvm_vcpu, we can also remove the svcpu_get/put pairs.
Finally, on 64-bit, we don't need the kmalloc'd struct at all any more.
With this, the time to read the PVR one million times in a loop went
from 567.7ms to 575.5ms (averages of 6 values), an increase of about
1.4% for this worse-case test for guest entries and exits. The
standard deviation of the measurements is about 11ms, so the
difference is only marginally significant statistically.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:43 +08:00
|
|
|
#endif
|
2011-06-29 08:17:58 +08:00
|
|
|
vfree(vcpu_book3s);
|
2019-12-19 05:55:00 +08:00
|
|
|
out:
|
2019-12-19 05:54:57 +08:00
|
|
|
return err;
|
2011-06-29 08:17:58 +08:00
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
|
2011-06-29 08:17:58 +08:00
|
|
|
{
|
|
|
|
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
|
|
|
|
|
2020-03-19 01:43:30 +08:00
|
|
|
kvmppc_mmu_destroy_pr(vcpu);
|
2011-06-29 08:17:58 +08:00
|
|
|
free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
|
2014-04-07 05:31:48 +08:00
|
|
|
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
2013-09-20 12:52:49 +08:00
|
|
|
kfree(vcpu->arch.shadow_vcpu);
|
|
|
|
#endif
|
2011-06-29 08:17:58 +08:00
|
|
|
vfree(vcpu_book3s);
|
|
|
|
}
|
|
|
|
|
2020-04-27 12:35:11 +08:00
|
|
|
static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
|
2011-06-29 08:17:58 +08:00
|
|
|
{
|
2020-04-27 12:35:11 +08:00
|
|
|
struct kvm_run *run = vcpu->run;
|
2011-06-29 08:17:58 +08:00
|
|
|
int ret;
|
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
|
unsigned long uninitialized_var(vrsave);
|
|
|
|
#endif
|
|
|
|
|
2011-08-10 19:57:08 +08:00
|
|
|
/* Check if we can run the vcpu at all */
|
|
|
|
if (!vcpu->arch.sane) {
|
2020-04-27 12:35:11 +08:00
|
|
|
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
2011-12-09 22:46:21 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
2011-08-10 19:57:08 +08:00
|
|
|
}
|
|
|
|
|
2016-04-09 00:05:00 +08:00
|
|
|
kvmppc_setup_debug(vcpu);
|
|
|
|
|
2011-12-19 20:36:55 +08:00
|
|
|
/*
|
|
|
|
* Interrupts could be timers for the guest which we have to inject
|
|
|
|
* again, so let's postpone them until we're in the guest and if we
|
|
|
|
* really did time things so badly, then we just exit again due to
|
|
|
|
* a host external interrupt.
|
|
|
|
*/
|
2012-08-13 18:44:41 +08:00
|
|
|
ret = kvmppc_prepare_to_enter(vcpu);
|
2014-01-10 09:18:40 +08:00
|
|
|
if (ret <= 0)
|
2011-12-09 22:46:21 +08:00
|
|
|
goto out;
|
2014-01-10 09:18:40 +08:00
|
|
|
/* interrupts now hard-disabled */
|
2011-06-29 08:17:58 +08:00
|
|
|
|
2015-10-29 08:44:08 +08:00
|
|
|
/* Save FPU, Altivec and VSX state */
|
|
|
|
giveup_all(current);
|
2011-06-29 08:17:58 +08:00
|
|
|
|
|
|
|
/* Preload FPU if it's enabled */
|
2014-04-24 19:46:24 +08:00
|
|
|
if (kvmppc_get_msr(vcpu) & MSR_FP)
|
2011-06-29 08:17:58 +08:00
|
|
|
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
|
|
|
|
|
2013-07-11 06:47:39 +08:00
|
|
|
kvmppc_fix_ee_before_entry();
|
2011-06-29 08:19:50 +08:00
|
|
|
|
2020-04-27 12:35:11 +08:00
|
|
|
ret = __kvmppc_vcpu_run(run, vcpu);
|
2011-06-29 08:19:50 +08:00
|
|
|
|
2016-04-09 00:05:00 +08:00
|
|
|
kvmppc_clear_debug(vcpu);
|
|
|
|
|
2016-06-15 21:18:26 +08:00
|
|
|
/* No need for guest_exit. It's done in handle_exit.
|
2012-08-12 18:42:30 +08:00
|
|
|
We also get here with interrupts enabled. */
|
2011-06-29 08:17:58 +08:00
|
|
|
|
|
|
|
/* Make sure we save the guest FPU/Altivec/VSX state */
|
KVM: PPC: Book3S PR: Fix VSX handling
This fixes various issues in how we were handling the VSX registers
that exist on POWER7 machines. First, we were running off the end
of the current->thread.fpr[] array. Ultimately this was because the
vcpu->arch.vsr[] array is sized to be able to store both the FP
registers and the extra VSX registers (i.e. 64 entries), but PR KVM
only uses it for the extra VSX registers (i.e. 32 entries).
Secondly, calling load_up_vsx() from C code is a really bad idea,
because it jumps to fast_exception_return at the end, rather than
returning with a blr instruction. This was causing it to jump off
to a random location with random register contents, since it was using
the largely uninitialized stack frame created by kvmppc_load_up_vsx.
In fact, it isn't necessary to call either __giveup_vsx or load_up_vsx,
since giveup_fpu and load_up_fpu handle the extra VSX registers as well
as the standard FP registers on machines with VSX. Also, since VSX
instructions can access the VMX registers and the FP registers as well
as the extra VSX registers, we have to load up the FP and VMX registers
before we can turn on the MSR_VSX bit for the guest. Conversely, if
we save away any of the VSX or FP registers, we have to turn off MSR_VSX
for the guest.
To handle all this, it is more convenient for a single call to
kvmppc_giveup_ext() to handle all the state saving that needs to be done,
so we make it take a set of MSR bits rather than just one, and the switch
statement becomes a series of if statements. Similarly kvmppc_handle_ext
needs to be able to load up more than one set of registers.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2012-11-05 02:16:46 +08:00
|
|
|
kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
|
|
|
|
|
2014-04-22 18:26:58 +08:00
|
|
|
/* Make sure we save the guest TAR/EBB/DSCR state */
|
|
|
|
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
|
|
|
|
|
2011-12-09 22:46:21 +08:00
|
|
|
out:
|
2012-08-12 17:34:21 +08:00
|
|
|
vcpu->mode = OUTSIDE_GUEST_MODE;
|
2011-06-29 08:17:58 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-12-15 10:03:22 +08:00
|
|
|
/*
|
|
|
|
* Get (and clear) the dirty memory log for a memory slot.
|
|
|
|
*/
|
2013-10-08 00:47:53 +08:00
|
|
|
static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
|
|
|
|
struct kvm_dirty_log *log)
|
2011-12-15 10:03:22 +08:00
|
|
|
{
|
|
|
|
struct kvm_memory_slot *memslot;
|
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
|
ulong ga, ga_end;
|
|
|
|
int is_dirty = 0;
|
|
|
|
int r;
|
|
|
|
unsigned long n;
|
|
|
|
|
|
|
|
mutex_lock(&kvm->slots_lock);
|
|
|
|
|
KVM: Ensure validity of memslot with respect to kvm_get_dirty_log()
Rework kvm_get_dirty_log() so that it "returns" the associated memslot
on success. A future patch will rework memslot handling such that
id_to_memslot() can return NULL, returning the memslot makes it more
obvious that the validity of the memslot has been verified, i.e.
precludes the need to add validity checks in the arch code that are
technically unnecessary.
To maintain ordering in s390, move the call to kvm_arch_sync_dirty_log()
from s390's kvm_vm_ioctl_get_dirty_log() to the new kvm_get_dirty_log().
This is a nop for PPC, the only other arch that doesn't select
KVM_GENERIC_DIRTYLOG_READ_PROTECT, as its sync_dirty_log() is empty.
Ideally, moving the sync_dirty_log() call would be done in a separate
patch, but it can't be done in a follow-on patch because that would
temporarily break s390's ordering. Making the move in a preparatory
patch would be functionally correct, but would create an odd scenario
where the moved sync_dirty_log() would operate on a "different" memslot
due to consuming the result of a different id_to_memslot(). The
memslot couldn't actually be different as slots_lock is held, but the
code is confusing enough as it is, i.e. moving sync_dirty_log() in this
patch is the lesser of all evils.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-02-19 05:07:30 +08:00
|
|
|
r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
|
2011-12-15 10:03:22 +08:00
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* If nothing is dirty, don't bother messing with page tables. */
|
|
|
|
if (is_dirty) {
|
|
|
|
ga = memslot->base_gfn << PAGE_SHIFT;
|
|
|
|
ga_end = ga + (memslot->npages << PAGE_SHIFT);
|
|
|
|
|
|
|
|
kvm_for_each_vcpu(n, vcpu, kvm)
|
|
|
|
kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
|
|
|
|
|
|
|
|
n = kvm_dirty_bitmap_bytes(memslot);
|
|
|
|
memset(memslot->dirty_bitmap, 0, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
r = 0;
|
|
|
|
out:
|
|
|
|
mutex_unlock(&kvm->slots_lock);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
|
|
|
|
struct kvm_memory_slot *memslot)
|
2012-04-27 03:43:42 +08:00
|
|
|
{
|
2013-10-08 00:47:53 +08:00
|
|
|
return;
|
|
|
|
}
|
2012-04-27 03:43:42 +08:00
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
|
|
|
|
struct kvm_memory_slot *memslot,
|
2020-02-19 05:07:18 +08:00
|
|
|
const struct kvm_userspace_memory_region *mem,
|
|
|
|
enum kvm_mr_change change)
|
2013-10-08 00:47:53 +08:00
|
|
|
{
|
2012-04-27 03:43:42 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
|
2015-05-18 19:59:39 +08:00
|
|
|
const struct kvm_userspace_memory_region *mem,
|
2015-05-18 19:20:23 +08:00
|
|
|
const struct kvm_memory_slot *old,
|
2018-12-12 12:15:30 +08:00
|
|
|
const struct kvm_memory_slot *new,
|
|
|
|
enum kvm_mr_change change)
|
2012-09-11 21:27:46 +08:00
|
|
|
{
|
2013-10-08 00:47:53 +08:00
|
|
|
return;
|
2012-09-11 21:27:46 +08:00
|
|
|
}
|
|
|
|
|
2020-02-19 05:07:27 +08:00
|
|
|
static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *slot)
|
2012-09-11 21:27:46 +08:00
|
|
|
{
|
2013-10-08 00:47:53 +08:00
|
|
|
return;
|
2012-09-11 21:27:46 +08:00
|
|
|
}
|
|
|
|
|
2012-04-27 03:43:42 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
2013-10-08 00:47:53 +08:00
|
|
|
static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
|
|
|
|
struct kvm_ppc_smmu_info *info)
|
2012-09-11 21:28:18 +08:00
|
|
|
{
|
2013-09-20 12:52:44 +08:00
|
|
|
long int i;
|
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
|
|
|
|
|
info->flags = 0;
|
2012-04-27 03:43:42 +08:00
|
|
|
|
|
|
|
/* SLB is always 64 entries */
|
|
|
|
info->slb_size = 64;
|
|
|
|
|
|
|
|
/* Standard 4k base page size segment */
|
|
|
|
info->sps[0].page_shift = 12;
|
|
|
|
info->sps[0].slb_enc = 0;
|
|
|
|
info->sps[0].enc[0].page_shift = 12;
|
|
|
|
info->sps[0].enc[0].pte_enc = 0;
|
|
|
|
|
2013-09-20 12:52:44 +08:00
|
|
|
/*
|
|
|
|
* 64k large page size.
|
|
|
|
* We only want to put this in if the CPUs we're emulating
|
|
|
|
* support it, but unfortunately we don't have a vcpu easily
|
|
|
|
* to hand here to test. Just pick the first vcpu, and if
|
|
|
|
* that doesn't exist yet, report the minimum capability,
|
|
|
|
* i.e., no 64k pages.
|
|
|
|
* 1T segment support goes along with 64k pages.
|
|
|
|
*/
|
|
|
|
i = 1;
|
|
|
|
vcpu = kvm_get_vcpu(kvm, 0);
|
|
|
|
if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
|
|
|
|
info->flags = KVM_PPC_1T_SEGMENTS;
|
|
|
|
info->sps[i].page_shift = 16;
|
|
|
|
info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
|
|
|
|
info->sps[i].enc[0].page_shift = 16;
|
|
|
|
info->sps[i].enc[0].pte_enc = 1;
|
|
|
|
++i;
|
|
|
|
}
|
|
|
|
|
2012-04-27 03:43:42 +08:00
|
|
|
/* Standard 16M large page size segment */
|
2013-09-20 12:52:44 +08:00
|
|
|
info->sps[i].page_shift = 24;
|
|
|
|
info->sps[i].slb_enc = SLB_VSID_L;
|
|
|
|
info->sps[i].enc[0].page_shift = 24;
|
|
|
|
info->sps[i].enc[0].pte_enc = 0;
|
2012-09-11 21:28:18 +08:00
|
|
|
|
2012-04-27 03:43:42 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2018-05-30 13:47:17 +08:00
|
|
|
|
|
|
|
static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
|
|
|
|
{
|
|
|
|
if (!cpu_has_feature(CPU_FTR_ARCH_300))
|
|
|
|
return -ENODEV;
|
|
|
|
/* Require flags and process table base and size to all be zero. */
|
|
|
|
if (cfg->flags || cfg->process_table)
|
|
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
#else
|
|
|
|
static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
|
|
|
|
struct kvm_ppc_smmu_info *info)
|
2011-06-29 08:19:22 +08:00
|
|
|
{
|
2013-10-08 00:47:53 +08:00
|
|
|
/* We should not get called */
|
|
|
|
BUG();
|
2020-01-27 06:31:58 +08:00
|
|
|
return 0;
|
2011-06-29 08:19:22 +08:00
|
|
|
}
|
2013-10-08 00:47:53 +08:00
|
|
|
#endif /* CONFIG_PPC64 */
|
2011-06-29 08:19:22 +08:00
|
|
|
|
2012-12-04 02:36:13 +08:00
|
|
|
static unsigned int kvm_global_user_count = 0;
|
|
|
|
static DEFINE_SPINLOCK(kvm_global_user_count_lock);
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static int kvmppc_core_init_vm_pr(struct kvm *kvm)
|
2011-06-29 08:19:22 +08:00
|
|
|
{
|
KVM: PPC: Book3S PR: Make HPT accesses and updates SMP-safe
This adds a per-VM mutex to provide mutual exclusion between vcpus
for accesses to and updates of the guest hashed page table (HPT).
This also makes the code use single-byte writes to the HPT entry
when updating of the reference (R) and change (C) bits. The reason
for doing this, rather than writing back the whole HPTE, is that on
non-PAPR virtual machines, the guest OS might be writing to the HPTE
concurrently, and writing back the whole HPTE might conflict with
that. Also, real hardware does single-byte writes to update R and C.
The new mutex is taken in kvmppc_mmu_book3s_64_xlate() when reading
the HPT and updating R and/or C, and in the PAPR HPT update hcalls
(H_ENTER, H_REMOVE, etc.). Having the mutex means that we don't need
to use a hypervisor lock bit in the HPT update hcalls, and we don't
need to be careful about the order in which the bytes of the HPTE are
updated by those hcalls.
The other change here is to make emulated TLB invalidations (tlbie)
effective across all vcpus. To do this we call kvmppc_mmu_pte_vflush
for all vcpus in kvmppc_ppc_book3s_64_tlbie().
For 32-bit, this makes the setting of the accessed and dirty bits use
single-byte writes, and makes tlbie invalidate shadow HPTEs for all
vcpus.
With this, PR KVM can successfully run SMP guests.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:48 +08:00
|
|
|
mutex_init(&kvm->arch.hpt_mutex);
|
2012-03-16 05:58:34 +08:00
|
|
|
|
2014-06-02 09:02:59 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
/* Start out with the default set of hcalls enabled */
|
|
|
|
kvmppc_pr_init_default_hcalls(kvm);
|
|
|
|
#endif
|
|
|
|
|
2012-12-04 02:36:13 +08:00
|
|
|
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
|
|
|
|
spin_lock(&kvm_global_user_count_lock);
|
|
|
|
if (++kvm_global_user_count == 1)
|
2016-07-05 13:03:49 +08:00
|
|
|
pseries_disable_reloc_on_exc();
|
2012-12-04 02:36:13 +08:00
|
|
|
spin_unlock(&kvm_global_user_count_lock);
|
|
|
|
}
|
2011-06-29 08:19:22 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
|
2011-06-29 08:19:22 +08:00
|
|
|
{
|
2012-03-16 05:58:34 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
|
|
|
|
#endif
|
2012-12-04 02:36:13 +08:00
|
|
|
|
|
|
|
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
|
|
|
|
spin_lock(&kvm_global_user_count_lock);
|
|
|
|
BUG_ON(kvm_global_user_count == 0);
|
|
|
|
if (--kvm_global_user_count == 0)
|
2016-07-05 13:03:49 +08:00
|
|
|
pseries_enable_reloc_on_exc();
|
2012-12-04 02:36:13 +08:00
|
|
|
spin_unlock(&kvm_global_user_count_lock);
|
|
|
|
}
|
2011-06-29 08:19:22 +08:00
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static int kvmppc_core_check_processor_compat_pr(void)
|
2011-06-29 08:17:58 +08:00
|
|
|
{
|
2016-04-29 21:25:43 +08:00
|
|
|
/*
|
2018-05-18 19:49:28 +08:00
|
|
|
* PR KVM can work on POWER9 inside a guest partition
|
|
|
|
* running in HPT mode. It can't work if we are using
|
|
|
|
* radix translation (because radix provides no way for
|
2018-06-07 16:08:02 +08:00
|
|
|
* a process to have unique translations in quadrant 3).
|
2016-04-29 21:25:43 +08:00
|
|
|
*/
|
2018-06-07 16:08:02 +08:00
|
|
|
if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled())
|
2016-04-29 21:25:43 +08:00
|
|
|
return -EIO;
|
2013-10-08 00:47:53 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2011-06-29 08:17:58 +08:00
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
static long kvm_arch_vm_ioctl_pr(struct file *filp,
|
|
|
|
unsigned int ioctl, unsigned long arg)
|
|
|
|
{
|
|
|
|
return -ENOTTY;
|
|
|
|
}
|
2011-06-29 08:17:58 +08:00
|
|
|
|
2013-10-08 00:48:01 +08:00
|
|
|
static struct kvmppc_ops kvm_ops_pr = {
|
2013-10-08 00:47:53 +08:00
|
|
|
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
|
|
|
|
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
|
|
|
|
.get_one_reg = kvmppc_get_one_reg_pr,
|
|
|
|
.set_one_reg = kvmppc_set_one_reg_pr,
|
|
|
|
.vcpu_load = kvmppc_core_vcpu_load_pr,
|
|
|
|
.vcpu_put = kvmppc_core_vcpu_put_pr,
|
2019-10-02 14:00:22 +08:00
|
|
|
.inject_interrupt = kvmppc_inject_interrupt_pr,
|
2013-10-08 00:47:53 +08:00
|
|
|
.set_msr = kvmppc_set_msr_pr,
|
|
|
|
.vcpu_run = kvmppc_vcpu_run_pr,
|
|
|
|
.vcpu_create = kvmppc_core_vcpu_create_pr,
|
|
|
|
.vcpu_free = kvmppc_core_vcpu_free_pr,
|
|
|
|
.check_requests = kvmppc_core_check_requests_pr,
|
|
|
|
.get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
|
|
|
|
.flush_memslot = kvmppc_core_flush_memslot_pr,
|
|
|
|
.prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
|
|
|
|
.commit_memory_region = kvmppc_core_commit_memory_region_pr,
|
|
|
|
.unmap_hva_range = kvm_unmap_hva_range_pr,
|
|
|
|
.age_hva = kvm_age_hva_pr,
|
|
|
|
.test_age_hva = kvm_test_age_hva_pr,
|
|
|
|
.set_spte_hva = kvm_set_spte_hva_pr,
|
|
|
|
.free_memslot = kvmppc_core_free_memslot_pr,
|
|
|
|
.init_vm = kvmppc_core_init_vm_pr,
|
|
|
|
.destroy_vm = kvmppc_core_destroy_vm_pr,
|
|
|
|
.get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
|
|
|
|
.emulate_op = kvmppc_core_emulate_op_pr,
|
|
|
|
.emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
|
|
|
|
.emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
|
|
|
|
.fast_vcpu_kick = kvm_vcpu_kick,
|
|
|
|
.arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
|
2014-06-02 09:03:00 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
.hcall_implemented = kvmppc_hcall_impl_pr,
|
2018-05-30 13:47:17 +08:00
|
|
|
.configure_mmu = kvm_configure_mmu_pr,
|
2014-06-02 09:03:00 +08:00
|
|
|
#endif
|
2018-05-21 13:24:22 +08:00
|
|
|
.giveup_ext = kvmppc_giveup_ext,
|
2013-10-08 00:47:53 +08:00
|
|
|
};
|
|
|
|
|
2013-10-08 00:48:01 +08:00
|
|
|
|
|
|
|
int kvmppc_book3s_init_pr(void)
|
2011-06-29 08:17:58 +08:00
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
2013-10-08 00:48:01 +08:00
|
|
|
r = kvmppc_core_check_processor_compat_pr();
|
|
|
|
if (r < 0)
|
2011-06-29 08:17:58 +08:00
|
|
|
return r;
|
|
|
|
|
2013-10-08 00:48:01 +08:00
|
|
|
kvm_ops_pr.owner = THIS_MODULE;
|
|
|
|
kvmppc_pr_ops = &kvm_ops_pr;
|
2011-06-29 08:17:58 +08:00
|
|
|
|
2013-10-08 00:48:01 +08:00
|
|
|
r = kvmppc_mmu_hpte_sysinit();
|
2011-06-29 08:17:58 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:48:01 +08:00
|
|
|
void kvmppc_book3s_exit_pr(void)
|
2011-06-29 08:17:58 +08:00
|
|
|
{
|
2013-10-08 00:48:01 +08:00
|
|
|
kvmppc_pr_ops = NULL;
|
2011-06-29 08:17:58 +08:00
|
|
|
kvmppc_mmu_hpte_sysexit();
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:48:01 +08:00
|
|
|
/*
|
|
|
|
* We only support separate modules for book3s 64
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
module_init(kvmppc_book3s_init_pr);
|
|
|
|
module_exit(kvmppc_book3s_exit_pr);
|
2013-10-08 00:47:59 +08:00
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
2013-12-09 20:53:42 +08:00
|
|
|
MODULE_ALIAS_MISCDEV(KVM_MINOR);
|
|
|
|
MODULE_ALIAS("devname:kvm");
|
2013-10-08 00:48:01 +08:00
|
|
|
#endif
|