KVM/arm64 fixes for 6.4, take #1
- Plug a race in the stage-2 mapping code where the IPA and the PA would end up being out of sync - Make better use of the bitmap API (bitmap_zero, bitmap_zalloc...) - FP/SVE/SME documentation update, in the hope that this field becomes clearer... - Add workaround for the usual Apple SEIS brokenness - Random comment fixes -----BEGIN PGP SIGNATURE----- iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAmRc+84PHG1hekBrZXJu ZWwub3JnAAoJECPQ0LrRPXpD/c8P/jauxMTBh5fqwRuYjgJ31onJEL+S2nnHazLG 9yxdyx3TkPVrIwwg0qkiK5qftTj/UjRWIyTBr+cpfobq/WkzuioZs2pT04jw47/+ WE8IPRb64GJnQV2GvcgEIZgZ/JSVVHtbiPCojppuLbzAiBQ7lvXXKnzCIQOvck0C E7vcYLAX8MXSDU+XYON4uajw8CJpWO3iiyjy4dRlikz2BZ6DRlwKsWGphoyV5jqC 1DsPyGLcPhxi/VS9QHfwEETDAyCLJ11EoHueO/XmW/0W+XZ4wdOkkv1O+LaK287Q x5dbeXT5bVBQ0tjh4QVvlCvQ0KfKBIlR2ystFo44UVAlEGO4XbyxD4U+X5PiLbLb B99ImftDAhPuH85ILEeElZxDy2DJC/3llOWTxIaY+6qUkA5JoCFQBWQG0Y2l2/Um +yycu3FwnOamuYDWd6qHlRKQw0uVvUVhUokh5JUHM4xkLcwY0xknOcPLkrBRiyDP ga9bNbb+ZbwIx8GKrIlKUlKwbwS0ssF705kYDkOnilfwpFdkqApdKYRA2kaImaVQ fa0SSgFsD9g3rbnNpV9TFYCkniczB+CQDofjCAx4nSqmx8iyI686SoIrLZqjSuSH J+0bZFNA0hrrW4y/ukY4b2wAOOiAWMU2CnYo3fTRUJhZF87P/3vwmdN84+by158z un+4a/sw =y6WB -----END PGP SIGNATURE----- Merge tag 'kvmarm-fixes-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD KVM/arm64 fixes for 6.4, take #1 - Plug a race in the stage-2 mapping code where the IPA and the PA would end up being out of sync - Make better use of the bitmap API (bitmap_zero, bitmap_zalloc...) - FP/SVE/SME documentation update, in the hope that this field becomes clearer... - Add workaround for the usual Apple SEIS brokenness - Random comment fixes
This commit is contained in:
commit
39428f6ea9
|
@ -126,6 +126,10 @@
|
|||
#define APPLE_CPU_PART_M1_FIRESTORM_MAX 0x029
|
||||
#define APPLE_CPU_PART_M2_BLIZZARD 0x032
|
||||
#define APPLE_CPU_PART_M2_AVALANCHE 0x033
|
||||
#define APPLE_CPU_PART_M2_BLIZZARD_PRO 0x034
|
||||
#define APPLE_CPU_PART_M2_AVALANCHE_PRO 0x035
|
||||
#define APPLE_CPU_PART_M2_BLIZZARD_MAX 0x038
|
||||
#define APPLE_CPU_PART_M2_AVALANCHE_MAX 0x039
|
||||
|
||||
#define AMPERE_CPU_PART_AMPERE1 0xAC3
|
||||
|
||||
|
@ -181,6 +185,10 @@
|
|||
#define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
|
||||
#define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD)
|
||||
#define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE)
|
||||
#define MIDR_APPLE_M2_BLIZZARD_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_PRO)
|
||||
#define MIDR_APPLE_M2_AVALANCHE_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_PRO)
|
||||
#define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX)
|
||||
#define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX)
|
||||
#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
|
||||
|
||||
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
|
||||
|
|
|
@ -209,6 +209,7 @@ struct kvm_pgtable_visit_ctx {
|
|||
kvm_pte_t old;
|
||||
void *arg;
|
||||
struct kvm_pgtable_mm_ops *mm_ops;
|
||||
u64 start;
|
||||
u64 addr;
|
||||
u64 end;
|
||||
u32 level;
|
||||
|
|
|
@ -81,26 +81,34 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
|
|||
|
||||
fpsimd_kvm_prepare();
|
||||
|
||||
/*
|
||||
* We will check TIF_FOREIGN_FPSTATE just before entering the
|
||||
* guest in kvm_arch_vcpu_ctxflush_fp() and override this to
|
||||
* FP_STATE_FREE if the flag set.
|
||||
*/
|
||||
vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
|
||||
|
||||
vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
|
||||
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
|
||||
vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
|
||||
|
||||
/*
|
||||
* We don't currently support SME guests but if we leave
|
||||
* things in streaming mode then when the guest starts running
|
||||
* FPSIMD or SVE code it may generate SME traps so as a
|
||||
* special case if we are in streaming mode we force the host
|
||||
* state to be saved now and exit streaming mode so that we
|
||||
* don't have to handle any SME traps for valid guest
|
||||
* operations. Do this for ZA as well for now for simplicity.
|
||||
*/
|
||||
if (system_supports_sme()) {
|
||||
vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
|
||||
if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
|
||||
vcpu_set_flag(vcpu, HOST_SME_ENABLED);
|
||||
|
||||
/*
|
||||
* If PSTATE.SM is enabled then save any pending FP
|
||||
* state and disable PSTATE.SM. If we leave PSTATE.SM
|
||||
* enabled and the guest does not enable SME via
|
||||
* CPACR_EL1.SMEN then operations that should be valid
|
||||
* may generate SME traps from EL1 to EL1 which we
|
||||
* can't intercept and which would confuse the guest.
|
||||
*
|
||||
* Do the same for PSTATE.ZA in the case where there
|
||||
* is state in the registers which has not already
|
||||
* been saved, this is very unlikely to happen.
|
||||
*/
|
||||
if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
|
||||
vcpu->arch.fp_state = FP_STATE_FREE;
|
||||
fpsimd_save_and_flush_cpu_state();
|
||||
|
|
|
@ -177,9 +177,17 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
sve_guest = vcpu_has_sve(vcpu);
|
||||
esr_ec = kvm_vcpu_trap_get_class(vcpu);
|
||||
|
||||
/* Don't handle SVE traps for non-SVE vcpus here: */
|
||||
if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD)
|
||||
/* Only handle traps the vCPU can support here: */
|
||||
switch (esr_ec) {
|
||||
case ESR_ELx_EC_FP_ASIMD:
|
||||
break;
|
||||
case ESR_ELx_EC_SVE:
|
||||
if (!sve_guest)
|
||||
return false;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Valid trap. Switch the context: */
|
||||
|
||||
|
|
|
@ -58,8 +58,9 @@
|
|||
struct kvm_pgtable_walk_data {
|
||||
struct kvm_pgtable_walker *walker;
|
||||
|
||||
const u64 start;
|
||||
u64 addr;
|
||||
u64 end;
|
||||
const u64 end;
|
||||
};
|
||||
|
||||
static bool kvm_phys_is_valid(u64 phys)
|
||||
|
@ -201,6 +202,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
|
|||
.old = READ_ONCE(*ptep),
|
||||
.arg = data->walker->arg,
|
||||
.mm_ops = mm_ops,
|
||||
.start = data->start,
|
||||
.addr = data->addr,
|
||||
.end = data->end,
|
||||
.level = level,
|
||||
|
@ -293,6 +295,7 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
|||
struct kvm_pgtable_walker *walker)
|
||||
{
|
||||
struct kvm_pgtable_walk_data walk_data = {
|
||||
.start = ALIGN_DOWN(addr, PAGE_SIZE),
|
||||
.addr = ALIGN_DOWN(addr, PAGE_SIZE),
|
||||
.end = PAGE_ALIGN(walk_data.addr + size),
|
||||
.walker = walker,
|
||||
|
@ -349,7 +352,7 @@ int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
|
|||
}
|
||||
|
||||
struct hyp_map_data {
|
||||
u64 phys;
|
||||
const u64 phys;
|
||||
kvm_pte_t attr;
|
||||
};
|
||||
|
||||
|
@ -407,13 +410,12 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
|
|||
static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
struct hyp_map_data *data)
|
||||
{
|
||||
u64 phys = data->phys + (ctx->addr - ctx->start);
|
||||
kvm_pte_t new;
|
||||
u64 granule = kvm_granule_size(ctx->level), phys = data->phys;
|
||||
|
||||
if (!kvm_block_mapping_supported(ctx, phys))
|
||||
return false;
|
||||
|
||||
data->phys += granule;
|
||||
new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
|
||||
if (ctx->old == new)
|
||||
return true;
|
||||
|
@ -576,7 +578,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
|
|||
}
|
||||
|
||||
struct stage2_map_data {
|
||||
u64 phys;
|
||||
const u64 phys;
|
||||
kvm_pte_t attr;
|
||||
u8 owner_id;
|
||||
|
||||
|
@ -794,20 +796,43 @@ static bool stage2_pte_executable(kvm_pte_t pte)
|
|||
return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
|
||||
}
|
||||
|
||||
static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
const struct stage2_map_data *data)
|
||||
{
|
||||
u64 phys = data->phys;
|
||||
|
||||
/*
|
||||
* Stage-2 walks to update ownership data are communicated to the map
|
||||
* walker using an invalid PA. Avoid offsetting an already invalid PA,
|
||||
* which could overflow and make the address valid again.
|
||||
*/
|
||||
if (!kvm_phys_is_valid(phys))
|
||||
return phys;
|
||||
|
||||
/*
|
||||
* Otherwise, work out the correct PA based on how far the walk has
|
||||
* gotten.
|
||||
*/
|
||||
return phys + (ctx->addr - ctx->start);
|
||||
}
|
||||
|
||||
static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
struct stage2_map_data *data)
|
||||
{
|
||||
u64 phys = stage2_map_walker_phys_addr(ctx, data);
|
||||
|
||||
if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1)))
|
||||
return false;
|
||||
|
||||
return kvm_block_mapping_supported(ctx, data->phys);
|
||||
return kvm_block_mapping_supported(ctx, phys);
|
||||
}
|
||||
|
||||
static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
struct stage2_map_data *data)
|
||||
{
|
||||
kvm_pte_t new;
|
||||
u64 granule = kvm_granule_size(ctx->level), phys = data->phys;
|
||||
u64 phys = stage2_map_walker_phys_addr(ctx, data);
|
||||
u64 granule = kvm_granule_size(ctx->level);
|
||||
struct kvm_pgtable *pgt = data->mmu->pgt;
|
||||
struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
|
||||
|
||||
|
@ -841,8 +866,6 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
|
|||
|
||||
stage2_make_pte(ctx, new);
|
||||
|
||||
if (kvm_phys_is_valid(phys))
|
||||
data->phys += granule;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -204,7 +204,7 @@ void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
|
|||
* Size Fault at level 0, as if exceeding PARange.
|
||||
*
|
||||
* Non-LPAE guests will only get the external abort, as there
|
||||
* is no way to to describe the ASF.
|
||||
* is no way to describe the ASF.
|
||||
*/
|
||||
if (vcpu_el1_is_32bit(vcpu) &&
|
||||
!(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
|
||||
|
|
|
@ -616,6 +616,10 @@ static const struct midr_range broken_seis[] = {
|
|||
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ static void flush_context(void)
|
|||
int cpu;
|
||||
u64 vmid;
|
||||
|
||||
bitmap_clear(vmid_map, 0, NUM_USER_VMIDS);
|
||||
bitmap_zero(vmid_map, NUM_USER_VMIDS);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
|
||||
|
@ -182,8 +182,7 @@ int __init kvm_arm_vmid_alloc_init(void)
|
|||
*/
|
||||
WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus());
|
||||
atomic64_set(&vmid_generation, VMID_FIRST_VERSION);
|
||||
vmid_map = kcalloc(BITS_TO_LONGS(NUM_USER_VMIDS),
|
||||
sizeof(*vmid_map), GFP_KERNEL);
|
||||
vmid_map = bitmap_zalloc(NUM_USER_VMIDS, GFP_KERNEL);
|
||||
if (!vmid_map)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -192,5 +191,5 @@ int __init kvm_arm_vmid_alloc_init(void)
|
|||
|
||||
void __init kvm_arm_vmid_alloc_free(void)
|
||||
{
|
||||
kfree(vmid_map);
|
||||
bitmap_free(vmid_map);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue