iommu/arm-smmu: Add context init implementation hook

Allocating and initialising a context for a domain is another point
where certain implementations are known to want special behaviour.
Currently the other half of the Cavium workaround comes into play here,
so let's finish the job to get the whole thing right out of the way.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Robin Murphy 2019-08-15 19:37:37 +01:00 committed by Will Deacon
parent 62b993a36e
commit ba7e4a08bb
3 changed files with 87 additions and 48 deletions

View File

@ -48,25 +48,60 @@ const struct arm_smmu_impl calxeda_impl = {
};
struct cavium_smmu {
struct arm_smmu_device smmu;
u32 id_base;
};
static int cavium_cfg_probe(struct arm_smmu_device *smmu)
{
static atomic_t context_count = ATOMIC_INIT(0);
struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu);
/*
* Cavium CN88xx erratum #27704.
* Ensure ASID and VMID allocation is unique across all SMMUs in
* the system.
*/
smmu->cavium_id_base = atomic_fetch_add(smmu->num_context_banks,
&context_count);
cs->id_base = atomic_fetch_add(smmu->num_context_banks, &context_count);
dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
return 0;
}
int cavium_init_context(struct arm_smmu_domain *smmu_domain)
{
struct cavium_smmu *cs = container_of(smmu_domain->smmu,
struct cavium_smmu, smmu);
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
smmu_domain->cfg.vmid += cs->id_base;
else
smmu_domain->cfg.asid += cs->id_base;
return 0;
}
const struct arm_smmu_impl cavium_impl = {
.cfg_probe = cavium_cfg_probe,
.init_context = cavium_init_context,
};
struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smmu)
{
struct cavium_smmu *cs;
cs = devm_kzalloc(smmu->dev, sizeof(*cs), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);
cs->smmu = *smmu;
cs->smmu.impl = &cavium_impl;
devm_kfree(smmu->dev, smmu);
return &cs->smmu;
}
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
@ -126,8 +161,7 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
smmu->impl = &arm_mmu500_impl;
break;
case CAVIUM_SMMUV2:
smmu->impl = &cavium_impl;
break;
return cavium_smmu_impl_init(smmu);
default:
break;
}

View File

@ -27,7 +27,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/io-pgtable.h>
#include <linux/iopoll.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
@ -111,44 +110,6 @@ struct arm_smmu_master_cfg {
#define for_each_cfg_sme(fw, i, idx) \
for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
enum arm_smmu_context_fmt {
ARM_SMMU_CTX_FMT_NONE,
ARM_SMMU_CTX_FMT_AARCH64,
ARM_SMMU_CTX_FMT_AARCH32_L,
ARM_SMMU_CTX_FMT_AARCH32_S,
};
struct arm_smmu_cfg {
u8 cbndx;
u8 irptndx;
union {
u16 asid;
u16 vmid;
};
enum arm_smmu_cbar_type cbar;
enum arm_smmu_context_fmt fmt;
};
#define INVALID_IRPTNDX 0xff
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
ARM_SMMU_DOMAIN_NESTED,
ARM_SMMU_DOMAIN_BYPASS,
};
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
const struct iommu_gather_ops *tlb_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
bool non_strict;
struct mutex init_mutex; /* Protects smmu pointer */
spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
struct iommu_domain domain;
};
static bool using_legacy_binding, using_generic_binding;
static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
@ -749,9 +710,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
}
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
cfg->vmid = cfg->cbndx + 1;
else
cfg->asid = cfg->cbndx + smmu->cavium_id_base;
cfg->asid = cfg->cbndx;
smmu_domain->smmu = smmu;
if (smmu->impl && smmu->impl->init_context) {
ret = smmu->impl->init_context(smmu_domain);
if (ret)
goto out_unlock;
}
pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = smmu->pgsize_bitmap,
@ -765,7 +733,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->non_strict)
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
smmu_domain->smmu = smmu;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops) {
ret = -ENOMEM;

View File

@ -14,6 +14,7 @@
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
@ -270,14 +271,50 @@ struct arm_smmu_device {
struct clk_bulk_data *clks;
int num_clks;
u32 cavium_id_base; /* Specific to Cavium */
spinlock_t global_sync_lock;
/* IOMMU core code handle */
struct iommu_device iommu;
};
enum arm_smmu_context_fmt {
ARM_SMMU_CTX_FMT_NONE,
ARM_SMMU_CTX_FMT_AARCH64,
ARM_SMMU_CTX_FMT_AARCH32_L,
ARM_SMMU_CTX_FMT_AARCH32_S,
};
struct arm_smmu_cfg {
u8 cbndx;
u8 irptndx;
union {
u16 asid;
u16 vmid;
};
enum arm_smmu_cbar_type cbar;
enum arm_smmu_context_fmt fmt;
};
#define INVALID_IRPTNDX 0xff
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
ARM_SMMU_DOMAIN_NESTED,
ARM_SMMU_DOMAIN_BYPASS,
};
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
const struct iommu_gather_ops *tlb_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
bool non_strict;
struct mutex init_mutex; /* Protects smmu pointer */
spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
struct iommu_domain domain;
};
/* Implementation details, yay! */
struct arm_smmu_impl {
@ -289,6 +326,7 @@ struct arm_smmu_impl {
u64 val);
int (*cfg_probe)(struct arm_smmu_device *smmu);
int (*reset)(struct arm_smmu_device *smmu);
int (*init_context)(struct arm_smmu_domain *smmu_domain);
};
static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)