iommu/io-pgtable: Rename iommu_gather_ops to iommu_flush_ops
In preparation for TLB flush gathering in the IOMMU API, rename the iommu_gather_ops structure in io-pgtable to iommu_flush_ops, which better describes its purpose and avoids the potential for confusion between different levels of the API. $ find linux/ -type f -name '*.[ch]' | xargs sed -i 's/gather_ops/flush_ops/g' Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
f71da46719
commit
298f78895b
|
@ -257,7 +257,7 @@ static void mmu_tlb_sync_context(void *cookie)
|
||||||
// TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
|
// TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct iommu_gather_ops mmu_tlb_ops = {
|
static const struct iommu_flush_ops mmu_tlb_ops = {
|
||||||
.tlb_flush_all = mmu_tlb_inv_context_s1,
|
.tlb_flush_all = mmu_tlb_inv_context_s1,
|
||||||
.tlb_add_flush = mmu_tlb_inv_range_nosync,
|
.tlb_add_flush = mmu_tlb_inv_range_nosync,
|
||||||
.tlb_sync = mmu_tlb_sync_context,
|
.tlb_sync = mmu_tlb_sync_context,
|
||||||
|
|
|
@ -1603,7 +1603,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
|
||||||
} while (size -= granule);
|
} while (size -= granule);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct iommu_gather_ops arm_smmu_gather_ops = {
|
static const struct iommu_flush_ops arm_smmu_flush_ops = {
|
||||||
.tlb_flush_all = arm_smmu_tlb_inv_context,
|
.tlb_flush_all = arm_smmu_tlb_inv_context,
|
||||||
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
|
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
|
||||||
.tlb_sync = arm_smmu_tlb_sync,
|
.tlb_sync = arm_smmu_tlb_sync,
|
||||||
|
@ -1796,7 +1796,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
|
||||||
.ias = ias,
|
.ias = ias,
|
||||||
.oas = oas,
|
.oas = oas,
|
||||||
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
|
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
|
||||||
.tlb = &arm_smmu_gather_ops,
|
.tlb = &arm_smmu_flush_ops,
|
||||||
.iommu_dev = smmu->dev,
|
.iommu_dev = smmu->dev,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -251,7 +251,7 @@ enum arm_smmu_domain_stage {
|
||||||
struct arm_smmu_domain {
|
struct arm_smmu_domain {
|
||||||
struct arm_smmu_device *smmu;
|
struct arm_smmu_device *smmu;
|
||||||
struct io_pgtable_ops *pgtbl_ops;
|
struct io_pgtable_ops *pgtbl_ops;
|
||||||
const struct iommu_gather_ops *tlb_ops;
|
const struct iommu_flush_ops *tlb_ops;
|
||||||
struct arm_smmu_cfg cfg;
|
struct arm_smmu_cfg cfg;
|
||||||
enum arm_smmu_domain_stage stage;
|
enum arm_smmu_domain_stage stage;
|
||||||
bool non_strict;
|
bool non_strict;
|
||||||
|
@ -547,19 +547,19 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
|
||||||
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
|
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
|
static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
|
||||||
.tlb_flush_all = arm_smmu_tlb_inv_context_s1,
|
.tlb_flush_all = arm_smmu_tlb_inv_context_s1,
|
||||||
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
|
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
|
||||||
.tlb_sync = arm_smmu_tlb_sync_context,
|
.tlb_sync = arm_smmu_tlb_sync_context,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
|
static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
|
||||||
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
|
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
|
||||||
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
|
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
|
||||||
.tlb_sync = arm_smmu_tlb_sync_context,
|
.tlb_sync = arm_smmu_tlb_sync_context,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
|
static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
|
||||||
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
|
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
|
||||||
.tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
|
.tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
|
||||||
.tlb_sync = arm_smmu_tlb_sync_vmid,
|
.tlb_sync = arm_smmu_tlb_sync_vmid,
|
||||||
|
|
|
@ -817,7 +817,7 @@ static void dummy_tlb_sync(void *cookie)
|
||||||
WARN_ON(cookie != cfg_cookie);
|
WARN_ON(cookie != cfg_cookie);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct iommu_gather_ops dummy_tlb_ops = {
|
static const struct iommu_flush_ops dummy_tlb_ops = {
|
||||||
.tlb_flush_all = dummy_tlb_flush_all,
|
.tlb_flush_all = dummy_tlb_flush_all,
|
||||||
.tlb_add_flush = dummy_tlb_add_flush,
|
.tlb_add_flush = dummy_tlb_add_flush,
|
||||||
.tlb_sync = dummy_tlb_sync,
|
.tlb_sync = dummy_tlb_sync,
|
||||||
|
|
|
@ -1081,7 +1081,7 @@ static void dummy_tlb_sync(void *cookie)
|
||||||
WARN_ON(cookie != cfg_cookie);
|
WARN_ON(cookie != cfg_cookie);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
|
static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
|
||||||
.tlb_flush_all = dummy_tlb_flush_all,
|
.tlb_flush_all = dummy_tlb_flush_all,
|
||||||
.tlb_add_flush = dummy_tlb_add_flush,
|
.tlb_add_flush = dummy_tlb_add_flush,
|
||||||
.tlb_sync = dummy_tlb_sync,
|
.tlb_sync = dummy_tlb_sync,
|
||||||
|
|
|
@ -367,7 +367,7 @@ static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
|
||||||
/* The hardware doesn't support selective TLB flush. */
|
/* The hardware doesn't support selective TLB flush. */
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct iommu_gather_ops ipmmu_gather_ops = {
|
static const struct iommu_flush_ops ipmmu_flush_ops = {
|
||||||
.tlb_flush_all = ipmmu_tlb_flush_all,
|
.tlb_flush_all = ipmmu_tlb_flush_all,
|
||||||
.tlb_add_flush = ipmmu_tlb_add_flush,
|
.tlb_add_flush = ipmmu_tlb_add_flush,
|
||||||
.tlb_sync = ipmmu_tlb_flush_all,
|
.tlb_sync = ipmmu_tlb_flush_all,
|
||||||
|
@ -480,7 +480,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
|
||||||
domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
|
domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
|
||||||
domain->cfg.ias = 32;
|
domain->cfg.ias = 32;
|
||||||
domain->cfg.oas = 40;
|
domain->cfg.oas = 40;
|
||||||
domain->cfg.tlb = &ipmmu_gather_ops;
|
domain->cfg.tlb = &ipmmu_flush_ops;
|
||||||
domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
|
domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
|
||||||
domain->io_domain.geometry.force_aperture = true;
|
domain->io_domain.geometry.force_aperture = true;
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -178,7 +178,7 @@ static void __flush_iotlb_sync(void *cookie)
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct iommu_gather_ops msm_iommu_gather_ops = {
|
static const struct iommu_flush_ops msm_iommu_flush_ops = {
|
||||||
.tlb_flush_all = __flush_iotlb,
|
.tlb_flush_all = __flush_iotlb,
|
||||||
.tlb_add_flush = __flush_iotlb_range,
|
.tlb_add_flush = __flush_iotlb_range,
|
||||||
.tlb_sync = __flush_iotlb_sync,
|
.tlb_sync = __flush_iotlb_sync,
|
||||||
|
@ -345,7 +345,7 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
|
||||||
.pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
|
.pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
|
||||||
.ias = 32,
|
.ias = 32,
|
||||||
.oas = 32,
|
.oas = 32,
|
||||||
.tlb = &msm_iommu_gather_ops,
|
.tlb = &msm_iommu_flush_ops,
|
||||||
.iommu_dev = priv->dev,
|
.iommu_dev = priv->dev,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -188,7 +188,7 @@ static void mtk_iommu_tlb_sync(void *cookie)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct iommu_gather_ops mtk_iommu_gather_ops = {
|
static const struct iommu_flush_ops mtk_iommu_flush_ops = {
|
||||||
.tlb_flush_all = mtk_iommu_tlb_flush_all,
|
.tlb_flush_all = mtk_iommu_tlb_flush_all,
|
||||||
.tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
|
.tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
|
||||||
.tlb_sync = mtk_iommu_tlb_sync,
|
.tlb_sync = mtk_iommu_tlb_sync,
|
||||||
|
@ -267,7 +267,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
|
||||||
.pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
|
.pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
|
||||||
.ias = 32,
|
.ias = 32,
|
||||||
.oas = 32,
|
.oas = 32,
|
||||||
.tlb = &mtk_iommu_gather_ops,
|
.tlb = &mtk_iommu_flush_ops,
|
||||||
.iommu_dev = data->dev,
|
.iommu_dev = data->dev,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -164,7 +164,7 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct iommu_gather_ops qcom_gather_ops = {
|
static const struct iommu_flush_ops qcom_flush_ops = {
|
||||||
.tlb_flush_all = qcom_iommu_tlb_inv_context,
|
.tlb_flush_all = qcom_iommu_tlb_inv_context,
|
||||||
.tlb_add_flush = qcom_iommu_tlb_inv_range_nosync,
|
.tlb_add_flush = qcom_iommu_tlb_inv_range_nosync,
|
||||||
.tlb_sync = qcom_iommu_tlb_sync,
|
.tlb_sync = qcom_iommu_tlb_sync,
|
||||||
|
@ -215,7 +215,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
|
||||||
.pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap,
|
.pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap,
|
||||||
.ias = 32,
|
.ias = 32,
|
||||||
.oas = 40,
|
.oas = 40,
|
||||||
.tlb = &qcom_gather_ops,
|
.tlb = &qcom_flush_ops,
|
||||||
.iommu_dev = qcom_iommu->dev,
|
.iommu_dev = qcom_iommu->dev,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ enum io_pgtable_fmt {
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
|
* struct iommu_flush_ops - IOMMU callbacks for TLB and page table management.
|
||||||
*
|
*
|
||||||
* @tlb_flush_all: Synchronously invalidate the entire TLB context.
|
* @tlb_flush_all: Synchronously invalidate the entire TLB context.
|
||||||
* @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
|
* @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
|
||||||
|
@ -28,7 +28,7 @@ enum io_pgtable_fmt {
|
||||||
* Note that these can all be called in atomic context and must therefore
|
* Note that these can all be called in atomic context and must therefore
|
||||||
* not block.
|
* not block.
|
||||||
*/
|
*/
|
||||||
struct iommu_gather_ops {
|
struct iommu_flush_ops {
|
||||||
void (*tlb_flush_all)(void *cookie);
|
void (*tlb_flush_all)(void *cookie);
|
||||||
void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
|
void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
|
||||||
bool leaf, void *cookie);
|
bool leaf, void *cookie);
|
||||||
|
@ -84,7 +84,7 @@ struct io_pgtable_cfg {
|
||||||
unsigned int ias;
|
unsigned int ias;
|
||||||
unsigned int oas;
|
unsigned int oas;
|
||||||
bool coherent_walk;
|
bool coherent_walk;
|
||||||
const struct iommu_gather_ops *tlb;
|
const struct iommu_flush_ops *tlb;
|
||||||
struct device *iommu_dev;
|
struct device *iommu_dev;
|
||||||
|
|
||||||
/* Low-level data specific to the table format */
|
/* Low-level data specific to the table format */
|
||||||
|
|
Loading…
Reference in New Issue