Merge branch 'kvm/cortex-a76-erratum-1165522' into aarch64/for-next/core

Pull in KVM workaround for A76 erratum #116522.

Conflicts:
	arch/arm64/include/asm/cpucaps.h
This commit is contained in:
Will Deacon 2018-12-10 18:53:03 +00:00
commit bc84a2d106
11 changed files with 129 additions and 21 deletions

View File

@ -57,6 +57,7 @@ stable kernels.
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
| ARM | Cortex-A76 | #1165522 | ARM64_ERRATUM_1165522 |
| ARM | MMU-500 | #841119,#826419 | N/A |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |

View File

@ -285,7 +285,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
static inline bool kvm_arch_check_sve_has_vhe(void) { return true; }
static inline bool kvm_arch_requires_vhe(void) { return false; }
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}

View File

@ -504,6 +504,18 @@ config ARM64_ERRATUM_1188873
If unsure, say Y.
config ARM64_ERRATUM_1165522
bool "Cortex-A76: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
default y
help
This option adds work arounds for ARM Cortex-A76 erratum 1165522
Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could end-up with
corrupted TLBs by speculating an AT instruction during a guest
context switch.
If unsure, say Y.
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y

View File

@ -55,7 +55,8 @@
#define ARM64_SSBS 34
#define ARM64_WORKAROUND_1188873 35
#define ARM64_HAS_SB 36
#define ARM64_WORKAROUND_1165522 37
#define ARM64_NCAPS 37
#define ARM64_NCAPS 38
#endif /* __ASM_CPUCAPS_H */

View File

@ -422,7 +422,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
}
}
static inline bool kvm_arch_check_sve_has_vhe(void)
static inline bool kvm_arch_requires_vhe(void)
{
/*
* The Arm architecture specifies that implementation of SVE
@ -430,9 +430,13 @@ static inline bool kvm_arch_check_sve_has_vhe(void)
* relies on this when SVE is present:
*/
if (system_supports_sve())
return has_vhe();
else
return true;
/* Some implementations have defects that confine them to VHE */
if (cpus_have_cap(ARM64_WORKAROUND_1165522))
return true;
return false;
}
static inline void kvm_arch_hardware_unsetup(void) {}

View File

@ -20,6 +20,7 @@
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/alternative.h>
#include <asm/sysreg.h>
#define __hyp_text __section(.hyp.text) notrace
@ -163,6 +164,13 @@ static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
{
write_sysreg(kvm->arch.vtcr, vtcr_el2);
write_sysreg(kvm->arch.vttbr, vttbr_el2);
/*
* ARM erratum 1165522 requires the actual execution of the above
* before we can switch to the EL1/EL0 translation regime used by
* the guest.
*/
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
}
#endif /* __ARM64_KVM_HYP_H__ */

View File

@ -228,6 +228,8 @@
#define TCR_TxSZ_WIDTH 6
#define TCR_T0SZ_MASK (((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T0SZ_OFFSET)
#define TCR_EPD0_SHIFT 7
#define TCR_EPD0_MASK (UL(1) << TCR_EPD0_SHIFT)
#define TCR_IRGN0_SHIFT 8
#define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT)
#define TCR_IRGN0_NC (UL(0) << TCR_IRGN0_SHIFT)
@ -235,6 +237,8 @@
#define TCR_IRGN0_WT (UL(2) << TCR_IRGN0_SHIFT)
#define TCR_IRGN0_WBnWA (UL(3) << TCR_IRGN0_SHIFT)
#define TCR_EPD1_SHIFT 23
#define TCR_EPD1_MASK (UL(1) << TCR_EPD1_SHIFT)
#define TCR_IRGN1_SHIFT 24
#define TCR_IRGN1_MASK (UL(3) << TCR_IRGN1_SHIFT)
#define TCR_IRGN1_NC (UL(0) << TCR_IRGN1_SHIFT)

View File

@ -748,6 +748,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.capability = ARM64_WORKAROUND_1188873,
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1165522
{
/* Cortex-A76 r0p0 to r2p0 */
.desc = "ARM erratum 1165522",
.capability = ARM64_WORKAROUND_1165522,
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
},
#endif
{
}

View File

@ -143,6 +143,14 @@ static void deactivate_traps_vhe(void)
{
extern char vectors[]; /* kernel exception vectors */
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
/*
* ARM erratum 1165522 requires the actual execution of the above
* before we can switch to the EL2/EL0 translation regime used by
* the host.
*/
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
write_sysreg(vectors, vbar_el1);
}
@ -499,8 +507,19 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_save_host_state_vhe(host_ctxt);
__activate_traps(vcpu);
/*
* ARM erratum 1165522 requires us to configure both stage 1 and
* stage 2 translation for the guest context before we clear
* HCR_EL2.TGE.
*
* We have already configured the guest's stage 1 translation in
* kvm_vcpu_load_sysregs above. We must now call __activate_vm
* before __activate_traps, because __activate_vm configures
* stage 2 translation, and __activate_traps clear HCR_EL2.TGE
* (among other things).
*/
__activate_vm(vcpu->kvm);
__activate_traps(vcpu);
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
@ -545,8 +564,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
__sysreg_save_state_nvhe(host_ctxt);
__activate_traps(vcpu);
__activate_vm(kern_hyp_va(vcpu->kvm));
__activate_traps(vcpu);
__hyp_vgic_restore_state(vcpu);
__timer_enable_traps(vcpu);

View File

@ -15,20 +15,54 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/irqflags.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/tlbflush.h>
static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
struct tlb_inv_context {
unsigned long flags;
u64 tcr;
u64 sctlr;
};
static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
struct tlb_inv_context *cxt)
{
u64 val;
local_irq_save(cxt->flags);
if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
/*
* For CPUs that are affected by ARM erratum 1165522, we
* cannot trust stage-1 to be in a correct state at that
* point. Since we do not want to force a full load of the
* vcpu state, we prevent the EL1 page-table walker to
* allocate new TLBs. This is done by setting the EPD bits
* in the TCR_EL1 register. We also need to prevent it to
* allocate IPA->PA walks, so we enable the S1 MMU...
*/
val = cxt->tcr = read_sysreg_el1(tcr);
val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
write_sysreg_el1(val, tcr);
val = cxt->sctlr = read_sysreg_el1(sctlr);
val |= SCTLR_ELx_M;
write_sysreg_el1(val, sctlr);
}
/*
* With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
* most TLB operations target EL2/EL0. In order to affect the
* guest TLBs (EL1/EL0), we need to change one of these two
* bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
* let's flip TGE before executing the TLB operation.
*
* ARM erratum 1165522 requires some special handling (again),
* as we need to make sure both stages of translation are in
* place before clearing TGE. __load_guest_stage2() already
* has an ISB in order to deal with this.
*/
__load_guest_stage2(kvm);
val = read_sysreg(hcr_el2);
@ -37,7 +71,8 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
isb();
}
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
struct tlb_inv_context *cxt)
{
__load_guest_stage2(kvm);
isb();
@ -48,7 +83,8 @@ static hyp_alternate_select(__tlb_switch_to_guest,
__tlb_switch_to_guest_vhe,
ARM64_HAS_VIRT_HOST_EXTN);
static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
struct tlb_inv_context *cxt)
{
/*
* We're done with the TLB operation, let's restore the host's
@ -56,9 +92,19 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
*/
write_sysreg(0, vttbr_el2);
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
isb();
if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
/* Restore the registers to what they were */
write_sysreg_el1(cxt->tcr, tcr);
write_sysreg_el1(cxt->sctlr, sctlr);
}
static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
local_irq_restore(cxt->flags);
}
static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
struct tlb_inv_context *cxt)
{
write_sysreg(0, vttbr_el2);
}
@ -70,11 +116,13 @@ static hyp_alternate_select(__tlb_switch_to_host,
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
struct tlb_inv_context cxt;
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
__tlb_switch_to_guest()(kvm);
__tlb_switch_to_guest()(kvm, &cxt);
/*
* We could do so much better if we had the VA as well.
@ -117,36 +165,39 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
if (!has_vhe() && icache_is_vpipt())
__flush_icache_all();
__tlb_switch_to_host()(kvm);
__tlb_switch_to_host()(kvm, &cxt);
}
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
{
struct tlb_inv_context cxt;
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
__tlb_switch_to_guest()(kvm);
__tlb_switch_to_guest()(kvm, &cxt);
__tlbi(vmalls12e1is);
dsb(ish);
isb();
__tlb_switch_to_host()(kvm);
__tlb_switch_to_host()(kvm, &cxt);
}
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
struct tlb_inv_context cxt;
/* Switch to requested VMID */
__tlb_switch_to_guest()(kvm);
__tlb_switch_to_guest()(kvm, &cxt);
__tlbi(vmalle1);
dsb(nsh);
isb();
__tlb_switch_to_host()(kvm);
__tlb_switch_to_host()(kvm, &cxt);
}
void __hyp_text __kvm_flush_vm_context(void)

View File

@ -1640,8 +1640,10 @@ int kvm_arch_init(void *opaque)
return -ENODEV;
}
if (!kvm_arch_check_sve_has_vhe()) {
kvm_pr_unimpl("SVE system without VHE unsupported. Broken cpu?");
in_hyp_mode = is_kernel_in_hyp_mode();
if (!in_hyp_mode && kvm_arch_requires_vhe()) {
kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
return -ENODEV;
}
@ -1657,8 +1659,6 @@ int kvm_arch_init(void *opaque)
if (err)
return err;
in_hyp_mode = is_kernel_in_hyp_mode();
if (!in_hyp_mode) {
err = init_hyp_mode();
if (err)