iommu/qcom: Use domain rather than dev as tlb cookie
The device may be torn down, but the domain should still be valid. Lets
use that as the tlb flush ops cookie.
Fixes a problem reported in [1]
[1] https://lkml.org/lkml/2020/7/20/104
Reported-by: Naresh Kamboju <naresh.kamboju@linaro.org>
Signed-off-by: Rob Clark <robdclark@chromium.org>
Tested-by: Naresh Kamboju <naresh.kamboju@linaro.org>
Fixes: 09b5dfff9a
("iommu/qcom: Use accessor functions for iommu private data")
Link: https://lore.kernel.org/r/20200720155217.274994-1-robdclark@gmail.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
ba47d845d7
commit
1014a2f8d7
|
@ -65,6 +65,7 @@ struct qcom_iommu_domain {
|
|||
struct mutex init_mutex; /* Protects iommu pointer */
|
||||
struct iommu_domain domain;
|
||||
struct qcom_iommu_dev *iommu;
|
||||
struct iommu_fwspec *fwspec;
|
||||
};
|
||||
|
||||
static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
|
||||
|
@ -84,9 +85,9 @@ static struct qcom_iommu_dev * to_iommu(struct device *dev)
|
|||
return dev_iommu_priv_get(dev);
|
||||
}
|
||||
|
||||
static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid)
|
||||
static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
|
||||
{
|
||||
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
|
||||
struct qcom_iommu_dev *qcom_iommu = d->iommu;
|
||||
if (!qcom_iommu)
|
||||
return NULL;
|
||||
return qcom_iommu->ctxs[asid - 1];
|
||||
|
@ -118,14 +119,12 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
|
|||
|
||||
static void qcom_iommu_tlb_sync(void *cookie)
|
||||
{
|
||||
struct iommu_fwspec *fwspec;
|
||||
struct device *dev = cookie;
|
||||
struct qcom_iommu_domain *qcom_domain = cookie;
|
||||
struct iommu_fwspec *fwspec = qcom_domain->fwspec;
|
||||
unsigned i;
|
||||
|
||||
fwspec = dev_iommu_fwspec_get(dev);
|
||||
|
||||
for (i = 0; i < fwspec->num_ids; i++) {
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
|
||||
unsigned int val, ret;
|
||||
|
||||
iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
|
||||
|
@ -139,14 +138,12 @@ static void qcom_iommu_tlb_sync(void *cookie)
|
|||
|
||||
static void qcom_iommu_tlb_inv_context(void *cookie)
|
||||
{
|
||||
struct device *dev = cookie;
|
||||
struct iommu_fwspec *fwspec;
|
||||
struct qcom_iommu_domain *qcom_domain = cookie;
|
||||
struct iommu_fwspec *fwspec = qcom_domain->fwspec;
|
||||
unsigned i;
|
||||
|
||||
fwspec = dev_iommu_fwspec_get(dev);
|
||||
|
||||
for (i = 0; i < fwspec->num_ids; i++) {
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
|
||||
iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
|
||||
}
|
||||
|
||||
|
@ -156,16 +153,14 @@ static void qcom_iommu_tlb_inv_context(void *cookie)
|
|||
static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
|
||||
size_t granule, bool leaf, void *cookie)
|
||||
{
|
||||
struct device *dev = cookie;
|
||||
struct iommu_fwspec *fwspec;
|
||||
struct qcom_iommu_domain *qcom_domain = cookie;
|
||||
struct iommu_fwspec *fwspec = qcom_domain->fwspec;
|
||||
unsigned i, reg;
|
||||
|
||||
reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
|
||||
|
||||
fwspec = dev_iommu_fwspec_get(dev);
|
||||
|
||||
for (i = 0; i < fwspec->num_ids; i++) {
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
|
||||
size_t s = size;
|
||||
|
||||
iova = (iova >> 12) << 12;
|
||||
|
@ -256,7 +251,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
|
|||
};
|
||||
|
||||
qcom_domain->iommu = qcom_iommu;
|
||||
pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev);
|
||||
qcom_domain->fwspec = fwspec;
|
||||
|
||||
pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain);
|
||||
if (!pgtbl_ops) {
|
||||
dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
|
||||
ret = -ENOMEM;
|
||||
|
@ -269,7 +266,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
|
|||
domain->geometry.force_aperture = true;
|
||||
|
||||
for (i = 0; i < fwspec->num_ids; i++) {
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
|
||||
|
||||
if (!ctx->secure_init) {
|
||||
ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
|
||||
|
@ -419,7 +416,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
|
|||
|
||||
pm_runtime_get_sync(qcom_iommu->dev);
|
||||
for (i = 0; i < fwspec->num_ids; i++) {
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
|
||||
struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
|
||||
|
||||
/* Disable the context bank: */
|
||||
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
|
||||
|
|
Loading…
Reference in New Issue