From ac64115a66c18c01745bbd3c47a36b124e5fd8c0 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Thu, 14 Sep 2017 23:56:25 +0200 Subject: [PATCH 1/4] KVM: PPC: Fix oops when checking KVM_CAP_PPC_HTM The following program causes a kernel oops: #include #include #include #include #include main() { int fd = open("/dev/kvm", O_RDWR); ioctl(fd, KVM_CHECK_EXTENSION, KVM_CAP_PPC_HTM); } This happens because when using the global KVM fd with KVM_CHECK_EXTENSION, kvm_vm_ioctl_check_extension() gets called with a NULL kvm argument, which gets dereferenced in is_kvmppc_hv_enabled(). Spotted while reading the code. Let's use the hv_enabled fallback variable, like everywhere else in this function. Fixes: 23528bb21ee2 ("KVM: PPC: Introduce KVM_CAP_PPC_HTM") Cc: stable@vger.kernel.org # v4.7+ Signed-off-by: Greg Kurz Reviewed-by: David Gibson Reviewed-by: Thomas Huth Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/powerpc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 3480faaf1ef8..ee279c7f4802 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -644,8 +644,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; #endif case KVM_CAP_PPC_HTM: - r = cpu_has_feature(CPU_FTR_TM_COMP) && - is_kvmppc_hv_enabled(kvm); + r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled; break; default: r = 0; From 2cde3716321ec64a1faeaf567bd94100c7b4160f Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 10 Oct 2017 20:18:28 +1000 Subject: [PATCH 2/4] KVM: PPC: Book3S HV: POWER9 more doorbell fixes - Add another case where msgsync is required. - Required barrier sequence for global doorbells is msgsync ; lwsync When msgsnd is used for IPIs to other cores, msgsync must be executed by the target to order stores performed on the source before its msgsnd (provided the source executes the appropriate sync). Fixes: 1704a81ccebc ("KVM: PPC: Book3S HV: Use msgsnd for IPIs to other cores on POWER9") Cc: stable@vger.kernel.org # v4.10+ Signed-off-by: Nicholas Piggin Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index ec69fa45d5a2..c700bedccaab 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1310,6 +1310,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) bne 3f BEGIN_FTR_SECTION PPC_MSGSYNC + lwsync END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) lbz r0, HSTATE_HOST_IPI(r13) cmpwi r0, 0 @@ -2788,6 +2789,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) PPC_MSGCLR(6) /* see if it's a host IPI */ li r3, 1 +BEGIN_FTR_SECTION + PPC_MSGSYNC + lwsync +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) lbz r0, HSTATE_HOST_IPI(r13) cmpwi r0, 0 bnelr From 8f6a9f0d0604817f7c8d4376fd51718f1bf192ee Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Wed, 11 Oct 2017 16:00:34 +1100 Subject: [PATCH 3/4] KVM: PPC: Book3S: Protect kvmppc_gpa_to_ua() with SRCU kvmppc_gpa_to_ua() accesses KVM memory slot array via srcu_dereference_check() and this produces warnings from RCU like below. This extends the existing srcu_read_lock/unlock to cover that kvmppc_gpa_to_ua() as well. We did not hit this before as this lock is not needed for the realmode handlers and hash guests would use the realmode path all the time; however the radix guests are always redirected to the virtual mode handlers and hence the warning. [ 68.253798] ./include/linux/kvm_host.h:575 suspicious rcu_dereference_check() usage! [ 68.253799] other info that might help us debug this: [ 68.253802] rcu_scheduler_active = 2, debug_locks = 1 [ 68.253804] 1 lock held by qemu-system-ppc/6413: [ 68.253806] #0: (&vcpu->mutex){+.+.}, at: [] vcpu_load+0x3c/0xc0 [kvm] [ 68.253826] stack backtrace: [ 68.253830] CPU: 92 PID: 6413 Comm: qemu-system-ppc Tainted: G W 4.14.0-rc3-00553-g432dcba58e9c-dirty #72 [ 68.253833] Call Trace: [ 68.253839] [c000000fd3d9f790] [c000000000b7fcc8] dump_stack+0xe8/0x160 (unreliable) [ 68.253845] [c000000fd3d9f7d0] [c0000000001924c0] lockdep_rcu_suspicious+0x110/0x180 [ 68.253851] [c000000fd3d9f850] [c0000000000e825c] kvmppc_gpa_to_ua+0x26c/0x2b0 [ 68.253858] [c000000fd3d9f8b0] [c00800000e3e1984] kvmppc_h_put_tce+0x12c/0x2a0 [kvm] Fixes: 121f80ba68f1 ("KVM: PPC: VFIO: Add in-kernel acceleration for VFIO") Cc: stable@vger.kernel.org # v4.12+ Signed-off-by: Alexey Kardashevskiy Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_64_vio.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 8f2da8bba737..4dffa611376d 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -478,28 +478,30 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, return ret; dir = iommu_tce_direction(tce); + + idx = srcu_read_lock(&vcpu->kvm->srcu); + if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, - tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) - return H_PARAMETER; + tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) { + ret = H_PARAMETER; + goto unlock_exit; + } entry = ioba >> stt->page_shift; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { - if (dir == DMA_NONE) { + if (dir == DMA_NONE) ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stit->tbl, entry); - } else { - idx = srcu_read_lock(&vcpu->kvm->srcu); + else ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl, entry, ua, dir); - srcu_read_unlock(&vcpu->kvm->srcu, idx); - } if (ret == H_SUCCESS) continue; if (ret == H_TOO_HARD) - return ret; + goto unlock_exit; WARN_ON_ONCE(1); kvmppc_clear_tce(stit->tbl, entry); @@ -507,7 +509,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, kvmppc_tce_put(stt, entry, tce); - return H_SUCCESS; +unlock_exit: + srcu_read_unlock(&vcpu->kvm->srcu, idx); + + return ret; } EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); From ad98dd1a75ac6a8b68cd2f7bf4676b65734f2a43 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 16 Oct 2017 08:37:54 +1100 Subject: [PATCH 4/4] KVM: PPC: Book3S HV: Add more barriers in XIVE load/unload code On POWER9 systems, we push the VCPU context onto the XIVE (eXternal Interrupt Virtualization Engine) hardware when entering a guest, and pull the context off the XIVE when exiting the guest. The push is done with cache-inhibited stores, and the pull with cache-inhibited loads. Testing has revealed that it is possible (though very rare) for the stores to get reordered with the loads so that we end up with the guest VCPU context still loaded on the XIVE after we have exited the guest. When that happens, it is possible for the same VCPU context to then get loaded on another CPU, which causes the machine to checkstop. To fix this, we add I/O barrier instructions (eieio) before and after the push and pull operations. As partial compensation for the potential slowdown caused by the extra barriers, we remove the eieio instructions between the two stores in the push operation, and between the two loads in the pull operation. (The architecture requires loads to cache-inhibited, guarded storage to be kept in order, and requires stores to cache-inhibited, guarded storage likewise to be kept in order, but allows such loads and stores to be reordered with respect to each other.) Reported-by: Carol L Soto Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index c700bedccaab..42639fba89e8 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -989,13 +989,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) beq no_xive ld r11, VCPU_XIVE_SAVED_STATE(r4) li r9, TM_QW1_OS - stdcix r11,r9,r10 eieio + stdcix r11,r9,r10 lwz r11, VCPU_XIVE_CAM_WORD(r4) li r9, TM_QW1_OS + TM_WORD2 stwcix r11,r9,r10 li r9, 1 stw r9, VCPU_XIVE_PUSHED(r4) + eieio no_xive: #endif /* CONFIG_KVM_XICS */ @@ -1401,8 +1402,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ cmpldi cr0, r10, 0 beq 1f /* First load to pull the context, we ignore the value */ - lwzx r11, r7, r10 eieio + lwzx r11, r7, r10 /* Second load to recover the context state (Words 0 and 1) */ ldx r11, r6, r10 b 3f @@ -1410,8 +1411,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ cmpldi cr0, r10, 0 beq 1f /* First load to pull the context, we ignore the value */ - lwzcix r11, r7, r10 eieio + lwzcix r11, r7, r10 /* Second load to recover the context state (Words 0 and 1) */ ldcix r11, r6, r10 3: std r11, VCPU_XIVE_SAVED_STATE(r9) @@ -1421,6 +1422,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ stw r10, VCPU_XIVE_PUSHED(r9) stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) + eieio 1: #endif /* CONFIG_KVM_XICS */ /* Save more register state */