Patch queue for 3.15 - 2014-05-12

This request includes a few bug fixes that really shouldn't wait for the next
 release.
 
 It fixes KVM on 32bit PowerPC when built as module. It also fixes the PV KVM
 acceleration when NX gets honored by the host. Furthermore we fix transactional
 memory support and numa support on HV KVM.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.19 (GNU/Linux)
 
 iQIcBAABAgAGBQJTcKFaAAoJECszeR4D/txg7qYP/RX3V32i2zQYH2NpjQrDCwtY
 Wur+CQrn/VA6xhtTK1rT2zH5rNFLt6ClhtxCMkZFfBdUE4sHi3OTlEdcvXBZjbls
 JqQ/7lBkUPN8pTpz2NHP9gvH7g6v07EruysRQNa/JZMzlwhpzWk8D7yXakaCPNY/
 JZRgVTrfKnhQ8OtXt48Bp4EmEKllbNqi9kNN7dewD2dEb3fAco3Jpk6WoeG+1f0o
 jv3NmeTsp87KaRpjvDzPb7iCe6PA7GVqvJIQpir3Rpk2Kpx0yj58AfacF+f72GOf
 CPlJGepiumJCaANhV6dbvtS49vaiiAnSvbqCil2USNl0LIGWQXdSjs5lztEuiMyr
 tAav0YSVpnIcw0HJxXug/M31VwfRjYCX3hnCCIOd3Xj2jgAqwD+Lo95uUrRGJ9TP
 75zKh8E093tOXIC9CyMaiYajpFMUrCSMgnpJ+7fpeHiyigB6yc8juFxahIHsw8q1
 NgHggroJm6QNIm8JSY/tG/YET4AT7H4ZetGP8MeeRUg0TpqQXvYpkMGB8YDouaBA
 XzxjwyTq57BOYgLGExnwW3Jj0kbqVY+ts0aDGQVGrl5YFzooGqrQ61CRmwG5BvI8
 sou3l6TJ2ng8qrc7Maw9MHca1QB3mtXD7I26T/QEfQm9NLRTTqJyaxH5J1q9siRI
 PpHVE5FKnmWPNr8JlxtC
 =t2S+
 -----END PGP SIGNATURE-----

Merge tag 'signed-for-3.15' of git://github.com/agraf/linux-2.6 into kvm-master

Patch queue for 3.15 - 2014-05-12

This request includes a few bug fixes that really shouldn't wait for the next
release.

It fixes KVM on 32bit PowerPC when built as module. It also fixes the PV KVM
acceleration when NX gets honored by the host. Furthermore we fix transactional
memory support and numa support on HV KVM.
This commit is contained in:
Paolo Bonzini 2014-05-13 18:15:16 +02:00
commit 5367742ad5
7 changed files with 127 additions and 8 deletions

View File

@ -39,6 +39,17 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
(unsigned long)_stext < end; (unsigned long)_stext < end;
} }
static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end)
{
#ifdef CONFIG_KVM_GUEST
extern char kvm_tmp[];
return start < (unsigned long)kvm_tmp &&
(unsigned long)&kvm_tmp[1024 * 1024] < end;
#else
return 0;
#endif
}
#undef dereference_function_descriptor #undef dereference_function_descriptor
static inline void *dereference_function_descriptor(void *ptr) static inline void *dereference_function_descriptor(void *ptr)
{ {

View File

@ -74,7 +74,7 @@
#define KVM_INST_MTSRIN 0x7c0001e4 #define KVM_INST_MTSRIN 0x7c0001e4
static bool kvm_patching_worked = true; static bool kvm_patching_worked = true;
static char kvm_tmp[1024 * 1024]; char kvm_tmp[1024 * 1024];
static int kvm_tmp_index; static int kvm_tmp_index;
static inline void kvm_patch_ins(u32 *inst, u32 new_inst) static inline void kvm_patch_ins(u32 *inst, u32 new_inst)

View File

@ -886,7 +886,7 @@ static int kvmppc_book3s_init(void)
r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
if (r) if (r)
return r; return r;
#ifdef CONFIG_KVM_BOOK3S_32 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
r = kvmppc_book3s_init_pr(); r = kvmppc_book3s_init_pr();
#endif #endif
return r; return r;
@ -895,7 +895,7 @@ static int kvmppc_book3s_init(void)
static void kvmppc_book3s_exit(void) static void kvmppc_book3s_exit(void)
{ {
#ifdef CONFIG_KVM_BOOK3S_32 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
kvmppc_book3s_exit_pr(); kvmppc_book3s_exit_pr();
#endif #endif
kvm_exit(); kvm_exit();
@ -905,7 +905,7 @@ module_init(kvmppc_book3s_init);
module_exit(kvmppc_book3s_exit); module_exit(kvmppc_book3s_exit);
/* On 32bit this is our one and only kernel module */ /* On 32bit this is our one and only kernel module */
#ifdef CONFIG_KVM_BOOK3S_32 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
MODULE_ALIAS_MISCDEV(KVM_MINOR); MODULE_ALIAS_MISCDEV(KVM_MINOR);
MODULE_ALIAS("devname:kvm"); MODULE_ALIAS("devname:kvm");
#endif #endif

View File

@ -234,7 +234,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
pte_size = psize; pte_size = psize;
pte = lookup_linux_pte_and_update(pgdir, hva, writing, pte = lookup_linux_pte_and_update(pgdir, hva, writing,
&pte_size); &pte_size);
if (pte_present(pte)) { if (pte_present(pte) && !pte_numa(pte)) {
if (writing && !pte_write(pte)) if (writing && !pte_write(pte))
/* make the actual HPTE be read-only */ /* make the actual HPTE be read-only */
ptel = hpte_make_readonly(ptel); ptel = hpte_make_readonly(ptel);

View File

@ -1323,6 +1323,110 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
mr r3, r9 mr r3, r9
bl kvmppc_save_fp bl kvmppc_save_fp
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
b 2f
END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/* Turn on TM. */
mfmsr r8
li r0, 1
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
mtmsrd r8
ld r5, VCPU_MSR(r9)
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
beq 1f /* TM not active in guest. */
li r3, TM_CAUSE_KVM_RESCHED
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
li r5, 0
mtmsrd r5, 1
/* All GPRs are volatile at this point. */
TRECLAIM(R3)
/* Temporarily store r13 and r9 so we have some regs to play with */
SET_SCRATCH0(r13)
GET_PACA(r13)
std r9, PACATMSCRATCH(r13)
ld r9, HSTATE_KVM_VCPU(r13)
/* Get a few more GPRs free. */
std r29, VCPU_GPRS_TM(29)(r9)
std r30, VCPU_GPRS_TM(30)(r9)
std r31, VCPU_GPRS_TM(31)(r9)
/* Save away PPR and DSCR soon so don't run with user values. */
mfspr r31, SPRN_PPR
HMT_MEDIUM
mfspr r30, SPRN_DSCR
ld r29, HSTATE_DSCR(r13)
mtspr SPRN_DSCR, r29
/* Save all but r9, r13 & r29-r31 */
reg = 0
.rept 29
.if (reg != 9) && (reg != 13)
std reg, VCPU_GPRS_TM(reg)(r9)
.endif
reg = reg + 1
.endr
/* ... now save r13 */
GET_SCRATCH0(r4)
std r4, VCPU_GPRS_TM(13)(r9)
/* ... and save r9 */
ld r4, PACATMSCRATCH(r13)
std r4, VCPU_GPRS_TM(9)(r9)
/* Reload stack pointer and TOC. */
ld r1, HSTATE_HOST_R1(r13)
ld r2, PACATOC(r13)
/* Set MSR RI now we have r1 and r13 back. */
li r5, MSR_RI
mtmsrd r5, 1
/* Save away checkpinted SPRs. */
std r31, VCPU_PPR_TM(r9)
std r30, VCPU_DSCR_TM(r9)
mflr r5
mfcr r6
mfctr r7
mfspr r8, SPRN_AMR
mfspr r10, SPRN_TAR
std r5, VCPU_LR_TM(r9)
stw r6, VCPU_CR_TM(r9)
std r7, VCPU_CTR_TM(r9)
std r8, VCPU_AMR_TM(r9)
std r10, VCPU_TAR_TM(r9)
/* Restore r12 as trap number. */
lwz r12, VCPU_TRAP(r9)
/* Save FP/VSX. */
addi r3, r9, VCPU_FPRS_TM
bl .store_fp_state
addi r3, r9, VCPU_VRS_TM
bl .store_vr_state
mfspr r6, SPRN_VRSAVE
stw r6, VCPU_VRSAVE_TM(r9)
1:
/*
* We need to save these SPRs after the treclaim so that the software
* error code is recorded correctly in the TEXASR. Also the user may
* change these outside of a transaction, so they must always be
* context switched.
*/
mfspr r5, SPRN_TFHAR
mfspr r6, SPRN_TFIAR
mfspr r7, SPRN_TEXASR
std r5, VCPU_TFHAR(r9)
std r6, VCPU_TFIAR(r9)
std r7, VCPU_TEXASR(r9)
2:
#endif
/* Increment yield count if they have a VPA */ /* Increment yield count if they have a VPA */
ld r8, VCPU_VPA(r9) /* do they have a VPA? */ ld r8, VCPU_VPA(r9) /* do they have a VPA? */
cmpdi r8, 0 cmpdi r8, 0

View File

@ -1153,7 +1153,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
goto free_vcpu; goto free_vcpu;
vcpu->arch.book3s = vcpu_book3s; vcpu->arch.book3s = vcpu_book3s;
#ifdef CONFIG_KVM_BOOK3S_32 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
vcpu->arch.shadow_vcpu = vcpu->arch.shadow_vcpu =
kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
if (!vcpu->arch.shadow_vcpu) if (!vcpu->arch.shadow_vcpu)
@ -1198,7 +1198,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
uninit_vcpu: uninit_vcpu:
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
free_shadow_vcpu: free_shadow_vcpu:
#ifdef CONFIG_KVM_BOOK3S_32 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
kfree(vcpu->arch.shadow_vcpu); kfree(vcpu->arch.shadow_vcpu);
free_vcpu3s: free_vcpu3s:
#endif #endif
@ -1215,7 +1215,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
#ifdef CONFIG_KVM_BOOK3S_32 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
kfree(vcpu->arch.shadow_vcpu); kfree(vcpu->arch.shadow_vcpu);
#endif #endif
vfree(vcpu_book3s); vfree(vcpu_book3s);

View File

@ -207,6 +207,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
if (overlaps_kernel_text(vaddr, vaddr + step)) if (overlaps_kernel_text(vaddr, vaddr + step))
tprot &= ~HPTE_R_N; tprot &= ~HPTE_R_N;
/* Make kvm guest trampolines executable */
if (overlaps_kvm_tmp(vaddr, vaddr + step))
tprot &= ~HPTE_R_N;
/* /*
* If relocatable, check if it overlaps interrupt vectors that * If relocatable, check if it overlaps interrupt vectors that
* are copied down to real 0. For relocatable kernel * are copied down to real 0. For relocatable kernel