2019-05-29 22:12:40 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2010-07-29 20:47:57 +08:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright SUSE Linux Products GmbH 2010
|
2011-11-09 08:23:28 +08:00
|
|
|
* Copyright 2010-2011 Freescale Semiconductor, Inc.
|
2010-07-29 20:47:57 +08:00
|
|
|
*
|
|
|
|
* Authors: Alexander Graf <agraf@suse.de>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <asm/ppc_asm.h>
|
|
|
|
#include <asm/kvm_asm.h>
|
|
|
|
#include <asm/reg.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/asm-offsets.h>
|
2018-07-06 00:24:57 +08:00
|
|
|
#include <asm/asm-compat.h>
|
2010-07-29 20:47:57 +08:00
|
|
|
|
|
|
|
#define KVM_MAGIC_PAGE (-4096)
|
2010-07-29 20:48:03 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
#define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
|
|
|
|
#define STL64(reg, offs, reg2) std reg, (offs)(reg2)
|
|
|
|
#else
|
|
|
|
#define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
|
|
|
|
#define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define SCRATCH_SAVE \
|
|
|
|
/* Enable critical section. We are critical if \
|
|
|
|
shared->critical == r1 */ \
|
|
|
|
STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
|
|
|
|
\
|
|
|
|
/* Save state */ \
|
|
|
|
PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
|
|
|
|
PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
|
|
|
|
mfcr r31; \
|
|
|
|
stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
|
|
|
|
|
|
|
|
#define SCRATCH_RESTORE \
|
|
|
|
/* Restore state */ \
|
|
|
|
PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
|
|
|
|
lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
|
|
|
|
mtcr r30; \
|
|
|
|
PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
|
|
|
|
\
|
|
|
|
/* Disable critical section. We are critical if \
|
|
|
|
shared->critical == r1 and r2 is always != r1 */ \
|
|
|
|
STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
|
2010-07-29 20:48:04 +08:00
|
|
|
|
2011-12-02 04:22:53 +08:00
|
|
|
.global kvm_template_start
|
|
|
|
kvm_template_start:
|
|
|
|
|
2010-07-29 20:48:04 +08:00
|
|
|
.global kvm_emulate_mtmsrd
|
|
|
|
kvm_emulate_mtmsrd:
|
|
|
|
|
|
|
|
SCRATCH_SAVE
|
|
|
|
|
|
|
|
/* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
|
|
|
|
LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
|
|
|
|
lis r30, (~(MSR_EE | MSR_RI))@h
|
|
|
|
ori r30, r30, (~(MSR_EE | MSR_RI))@l
|
|
|
|
and r31, r31, r30
|
|
|
|
|
|
|
|
/* OR the register's (MSR_EE|MSR_RI) on MSR */
|
|
|
|
kvm_emulate_mtmsrd_reg:
|
2010-08-05 21:44:41 +08:00
|
|
|
ori r30, r0, 0
|
|
|
|
andi. r30, r30, (MSR_EE|MSR_RI)
|
2010-07-29 20:48:04 +08:00
|
|
|
or r31, r31, r30
|
|
|
|
|
|
|
|
/* Put MSR back into magic page */
|
|
|
|
STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
|
|
|
|
|
|
|
|
/* Check if we have to fetch an interrupt */
|
|
|
|
lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
|
|
|
|
cmpwi r31, 0
|
|
|
|
beq+ no_check
|
|
|
|
|
|
|
|
/* Check if we may trigger an interrupt */
|
|
|
|
andi. r30, r30, MSR_EE
|
|
|
|
beq no_check
|
|
|
|
|
|
|
|
SCRATCH_RESTORE
|
|
|
|
|
|
|
|
/* Nag hypervisor */
|
2010-08-05 21:44:41 +08:00
|
|
|
kvm_emulate_mtmsrd_orig_ins:
|
2010-07-29 20:48:04 +08:00
|
|
|
tlbsync
|
|
|
|
|
|
|
|
b kvm_emulate_mtmsrd_branch
|
|
|
|
|
|
|
|
no_check:
|
|
|
|
|
|
|
|
SCRATCH_RESTORE
|
|
|
|
|
|
|
|
/* Go back to caller */
|
|
|
|
kvm_emulate_mtmsrd_branch:
|
|
|
|
b .
|
|
|
|
kvm_emulate_mtmsrd_end:
|
|
|
|
|
|
|
|
.global kvm_emulate_mtmsrd_branch_offs
|
|
|
|
kvm_emulate_mtmsrd_branch_offs:
|
|
|
|
.long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
|
|
|
|
|
|
|
|
.global kvm_emulate_mtmsrd_reg_offs
|
|
|
|
kvm_emulate_mtmsrd_reg_offs:
|
|
|
|
.long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
|
|
|
|
|
2010-08-05 21:44:41 +08:00
|
|
|
.global kvm_emulate_mtmsrd_orig_ins_offs
|
|
|
|
kvm_emulate_mtmsrd_orig_ins_offs:
|
|
|
|
.long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
|
|
|
|
|
2010-07-29 20:48:04 +08:00
|
|
|
.global kvm_emulate_mtmsrd_len
|
|
|
|
kvm_emulate_mtmsrd_len:
|
|
|
|
.long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
|
2010-07-29 20:48:05 +08:00
|
|
|
|
|
|
|
|
2012-05-21 07:21:53 +08:00
|
|
|
#define MSR_SAFE_BITS (MSR_EE | MSR_RI)
|
2010-07-29 20:48:05 +08:00
|
|
|
#define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
|
|
|
|
|
|
|
|
.global kvm_emulate_mtmsr
|
|
|
|
kvm_emulate_mtmsr:
|
|
|
|
|
|
|
|
SCRATCH_SAVE
|
|
|
|
|
|
|
|
/* Fetch old MSR in r31 */
|
|
|
|
LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
|
|
|
|
|
|
|
|
/* Find the changed bits between old and new MSR */
|
|
|
|
kvm_emulate_mtmsr_reg1:
|
2010-08-05 17:26:04 +08:00
|
|
|
ori r30, r0, 0
|
|
|
|
xor r31, r30, r31
|
2010-07-29 20:48:05 +08:00
|
|
|
|
|
|
|
/* Check if we need to really do mtmsr */
|
|
|
|
LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
|
|
|
|
and. r31, r31, r30
|
|
|
|
|
|
|
|
/* No critical bits changed? Maybe we can stay in the guest. */
|
|
|
|
beq maybe_stay_in_guest
|
|
|
|
|
|
|
|
do_mtmsr:
|
|
|
|
|
|
|
|
SCRATCH_RESTORE
|
|
|
|
|
|
|
|
/* Just fire off the mtmsr if it's critical */
|
|
|
|
kvm_emulate_mtmsr_orig_ins:
|
|
|
|
mtmsr r0
|
|
|
|
|
|
|
|
b kvm_emulate_mtmsr_branch
|
|
|
|
|
|
|
|
maybe_stay_in_guest:
|
|
|
|
|
2010-08-05 17:26:04 +08:00
|
|
|
/* Get the target register in r30 */
|
|
|
|
kvm_emulate_mtmsr_reg2:
|
|
|
|
ori r30, r0, 0
|
|
|
|
|
2011-10-13 17:47:08 +08:00
|
|
|
/* Put MSR into magic page because we don't call mtmsr */
|
|
|
|
STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
|
|
|
|
|
2010-07-29 20:48:05 +08:00
|
|
|
/* Check if we have to fetch an interrupt */
|
|
|
|
lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
|
|
|
|
cmpwi r31, 0
|
|
|
|
beq+ no_mtmsr
|
|
|
|
|
|
|
|
/* Check if we may trigger an interrupt */
|
2010-08-05 17:26:04 +08:00
|
|
|
andi. r31, r30, MSR_EE
|
2011-10-13 17:47:08 +08:00
|
|
|
bne do_mtmsr
|
2010-07-29 20:48:05 +08:00
|
|
|
|
|
|
|
no_mtmsr:
|
|
|
|
|
|
|
|
SCRATCH_RESTORE
|
|
|
|
|
|
|
|
/* Go back to caller */
|
|
|
|
kvm_emulate_mtmsr_branch:
|
|
|
|
b .
|
|
|
|
kvm_emulate_mtmsr_end:
|
|
|
|
|
|
|
|
.global kvm_emulate_mtmsr_branch_offs
|
|
|
|
kvm_emulate_mtmsr_branch_offs:
|
|
|
|
.long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
|
|
|
|
|
|
|
|
.global kvm_emulate_mtmsr_reg1_offs
|
|
|
|
kvm_emulate_mtmsr_reg1_offs:
|
|
|
|
.long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
|
|
|
|
|
|
|
|
.global kvm_emulate_mtmsr_reg2_offs
|
|
|
|
kvm_emulate_mtmsr_reg2_offs:
|
|
|
|
.long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
|
|
|
|
|
|
|
|
.global kvm_emulate_mtmsr_orig_ins_offs
|
|
|
|
kvm_emulate_mtmsr_orig_ins_offs:
|
|
|
|
.long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
|
|
|
|
|
|
|
|
.global kvm_emulate_mtmsr_len
|
|
|
|
kvm_emulate_mtmsr_len:
|
|
|
|
.long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
|
2010-07-29 20:48:06 +08:00
|
|
|
|
2019-09-11 19:57:46 +08:00
|
|
|
#ifdef CONFIG_BOOKE
|
|
|
|
|
2011-11-09 08:23:28 +08:00
|
|
|
/* also used for wrteei 1 */
|
|
|
|
.global kvm_emulate_wrtee
|
|
|
|
kvm_emulate_wrtee:
|
2010-07-29 20:48:06 +08:00
|
|
|
|
2011-11-09 08:23:28 +08:00
|
|
|
SCRATCH_SAVE
|
|
|
|
|
|
|
|
/* Fetch old MSR in r31 */
|
|
|
|
LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
|
2010-07-29 20:48:06 +08:00
|
|
|
|
2011-11-09 08:23:28 +08:00
|
|
|
/* Insert new MSR[EE] */
|
|
|
|
kvm_emulate_wrtee_reg:
|
|
|
|
ori r30, r0, 0
|
|
|
|
rlwimi r31, r30, 0, MSR_EE
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If MSR[EE] is now set, check for a pending interrupt.
|
|
|
|
* We could skip this if MSR[EE] was already on, but that
|
|
|
|
* should be rare, so don't bother.
|
|
|
|
*/
|
|
|
|
andi. r30, r30, MSR_EE
|
2010-07-29 20:48:06 +08:00
|
|
|
|
2011-11-09 08:23:28 +08:00
|
|
|
/* Put MSR into magic page because we don't call wrtee */
|
|
|
|
STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
|
|
|
|
|
|
|
|
beq no_wrtee
|
|
|
|
|
|
|
|
/* Check if we have to fetch an interrupt */
|
|
|
|
lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
|
|
|
|
cmpwi r30, 0
|
|
|
|
bne do_wrtee
|
|
|
|
|
|
|
|
no_wrtee:
|
|
|
|
SCRATCH_RESTORE
|
|
|
|
|
|
|
|
/* Go back to caller */
|
|
|
|
kvm_emulate_wrtee_branch:
|
|
|
|
b .
|
|
|
|
|
|
|
|
do_wrtee:
|
|
|
|
SCRATCH_RESTORE
|
|
|
|
|
|
|
|
/* Just fire off the wrtee if it's critical */
|
|
|
|
kvm_emulate_wrtee_orig_ins:
|
|
|
|
wrtee r0
|
|
|
|
|
|
|
|
b kvm_emulate_wrtee_branch
|
|
|
|
|
|
|
|
kvm_emulate_wrtee_end:
|
|
|
|
|
|
|
|
.global kvm_emulate_wrtee_branch_offs
|
|
|
|
kvm_emulate_wrtee_branch_offs:
|
|
|
|
.long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
|
|
|
|
|
|
|
|
.global kvm_emulate_wrtee_reg_offs
|
|
|
|
kvm_emulate_wrtee_reg_offs:
|
|
|
|
.long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
|
|
|
|
|
|
|
|
.global kvm_emulate_wrtee_orig_ins_offs
|
|
|
|
kvm_emulate_wrtee_orig_ins_offs:
|
|
|
|
.long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
|
|
|
|
|
|
|
|
.global kvm_emulate_wrtee_len
|
|
|
|
kvm_emulate_wrtee_len:
|
|
|
|
.long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
|
|
|
|
|
|
|
|
.global kvm_emulate_wrteei_0
|
|
|
|
kvm_emulate_wrteei_0:
|
2010-07-29 20:48:06 +08:00
|
|
|
SCRATCH_SAVE
|
|
|
|
|
|
|
|
/* Fetch old MSR in r31 */
|
|
|
|
LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
|
|
|
|
|
|
|
|
/* Remove MSR_EE from old MSR */
|
2011-11-09 08:23:28 +08:00
|
|
|
rlwinm r31, r31, 0, ~MSR_EE
|
2010-07-29 20:48:06 +08:00
|
|
|
|
|
|
|
/* Write new MSR value back */
|
|
|
|
STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
|
|
|
|
|
|
|
|
SCRATCH_RESTORE
|
|
|
|
|
|
|
|
/* Go back to caller */
|
2011-11-09 08:23:28 +08:00
|
|
|
kvm_emulate_wrteei_0_branch:
|
2010-07-29 20:48:06 +08:00
|
|
|
b .
|
2011-11-09 08:23:28 +08:00
|
|
|
kvm_emulate_wrteei_0_end:
|
2010-07-29 20:48:06 +08:00
|
|
|
|
2011-11-09 08:23:28 +08:00
|
|
|
.global kvm_emulate_wrteei_0_branch_offs
|
|
|
|
kvm_emulate_wrteei_0_branch_offs:
|
|
|
|
.long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
|
2010-08-03 16:39:35 +08:00
|
|
|
|
2011-11-09 08:23:28 +08:00
|
|
|
.global kvm_emulate_wrteei_0_len
|
|
|
|
kvm_emulate_wrteei_0_len:
|
|
|
|
.long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
|
2010-08-03 16:39:35 +08:00
|
|
|
|
2019-09-11 19:57:46 +08:00
|
|
|
#endif /* CONFIG_BOOKE */
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_32
|
|
|
|
|
2010-08-03 16:39:35 +08:00
|
|
|
.global kvm_emulate_mtsrin
|
|
|
|
kvm_emulate_mtsrin:
|
|
|
|
|
|
|
|
SCRATCH_SAVE
|
|
|
|
|
|
|
|
LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
|
|
|
|
andi. r31, r31, MSR_DR | MSR_IR
|
|
|
|
beq kvm_emulate_mtsrin_reg1
|
|
|
|
|
|
|
|
SCRATCH_RESTORE
|
|
|
|
|
|
|
|
kvm_emulate_mtsrin_orig_ins:
|
|
|
|
nop
|
|
|
|
b kvm_emulate_mtsrin_branch
|
|
|
|
|
|
|
|
kvm_emulate_mtsrin_reg1:
|
|
|
|
/* rX >> 26 */
|
|
|
|
rlwinm r30,r0,6,26,29
|
|
|
|
|
|
|
|
kvm_emulate_mtsrin_reg2:
|
|
|
|
stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
|
|
|
|
|
|
|
|
SCRATCH_RESTORE
|
|
|
|
|
|
|
|
/* Go back to caller */
|
|
|
|
kvm_emulate_mtsrin_branch:
|
|
|
|
b .
|
|
|
|
kvm_emulate_mtsrin_end:
|
|
|
|
|
|
|
|
.global kvm_emulate_mtsrin_branch_offs
|
|
|
|
kvm_emulate_mtsrin_branch_offs:
|
|
|
|
.long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
|
|
|
|
|
|
|
|
.global kvm_emulate_mtsrin_reg1_offs
|
|
|
|
kvm_emulate_mtsrin_reg1_offs:
|
|
|
|
.long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
|
|
|
|
|
|
|
|
.global kvm_emulate_mtsrin_reg2_offs
|
|
|
|
kvm_emulate_mtsrin_reg2_offs:
|
|
|
|
.long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
|
|
|
|
|
|
|
|
.global kvm_emulate_mtsrin_orig_ins_offs
|
|
|
|
kvm_emulate_mtsrin_orig_ins_offs:
|
|
|
|
.long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
|
|
|
|
|
|
|
|
.global kvm_emulate_mtsrin_len
|
|
|
|
kvm_emulate_mtsrin_len:
|
|
|
|
.long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
|
2011-12-02 04:22:53 +08:00
|
|
|
|
2019-09-11 19:57:46 +08:00
|
|
|
#endif /* CONFIG_PPC_BOOK3S_32 */
|
|
|
|
|
powerpc/kvm: Move kvm_tmp into .text, shrink to 64K
In some configurations of KVM, guests binary patch themselves to
avoid/reduce trapping into the hypervisor. For some instructions this
requires replacing one instruction with a sequence of instructions.
For those cases we need to write the sequence of instructions
somewhere and then patch the location of the original instruction to
branch to the sequence. That requires that the location of the
sequence be within 32MB of the original instruction.
The current solution for this is that we create a 1MB array in BSS,
write sequences into there, and then free the remainder of the array.
This has a few problems:
- it confuses kmemleak.
- it confuses lockdep.
- it requires mapping kvm_tmp executable, which can cause adjacent
areas to also be mapped executable if we're using 16M pages for the
linear mapping.
- the 32MB limit can be exceeded if the kernel is big enough,
especially with STRICT_KERNEL_RWX enabled, which then prevents the
patching from working at all.
We can fix all those problems by making kvm_tmp just a region of
regular .text. However currently it's 1MB in size, and we don't want
to waste 1MB of text. In practice however I only see ~30KB of kvm_tmp
being used even for an allyes_config. So shrink kvm_tmp to 64K, which
ought to be enough for everyone, and move it into .text.
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190911115746.12433-1-mpe@ellerman.id.au
2019-09-11 19:57:43 +08:00
|
|
|
.balign 4
|
|
|
|
.global kvm_tmp
|
|
|
|
kvm_tmp:
|
|
|
|
.space (64 * 1024)
|
|
|
|
|
|
|
|
.global kvm_tmp_end
|
|
|
|
kvm_tmp_end:
|
|
|
|
|
2011-12-02 04:22:53 +08:00
|
|
|
.global kvm_template_end
|
|
|
|
kvm_template_end:
|