iommu/io-pgtable: Remove tlb_flush_leaf

The only user of tlb_flush_leaf is a particularly hairy corner of the
Arm short-descriptor code, which wants a synchronous invalidation to
minimise the races inherent in trying to split a large page mapping.
This is already far enough into "here be dragons" territory that no
sensible caller should ever hit it, and thus it really doesn't need
optimising. Although using tlb_flush_walk there may technically be
more heavyweight than needed, it does the job and saves everyone else
having to carry around useless baggage.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Link: https://lore.kernel.org/r/9844ab0c5cb3da8b2f89c6c2da16941910702b41.1606324115.git.robin.murphy@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Robin Murphy 2020-11-25 17:29:39 +00:00 committed by Will Deacon
parent c74009f529
commit fefe8527a1
11 changed files with 4 additions and 68 deletions

View File

@ -139,7 +139,6 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
static const struct iommu_flush_ops null_tlb_ops = {
.tlb_flush_all = msm_iommu_tlb_flush_all,
.tlb_flush_walk = msm_iommu_tlb_flush_walk,
.tlb_flush_leaf = msm_iommu_tlb_flush_walk,
.tlb_add_page = msm_iommu_tlb_add_page,
};

View File

@ -347,16 +347,9 @@ static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
mmu_tlb_sync_context(cookie);
}
static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule,
void *cookie)
{
mmu_tlb_sync_context(cookie);
}
static const struct iommu_flush_ops mmu_tlb_ops = {
.tlb_flush_all = mmu_tlb_inv_context_s1,
.tlb_flush_walk = mmu_tlb_flush_walk,
.tlb_flush_leaf = mmu_tlb_flush_leaf,
};
int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)

View File

@ -1760,16 +1760,9 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
arm_smmu_tlb_inv_range(iova, size, granule, false, cookie);
}
static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
arm_smmu_tlb_inv_range(iova, size, granule, true, cookie);
}
static const struct iommu_flush_ops arm_smmu_flush_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
.tlb_add_page = arm_smmu_tlb_inv_page_nosync,
};

View File

@ -333,14 +333,6 @@ static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
arm_smmu_tlb_sync_context(cookie);
}
static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
ARM_SMMU_CB_S1_TLBIVAL);
arm_smmu_tlb_sync_context(cookie);
}
static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie)
@ -357,14 +349,6 @@ static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
arm_smmu_tlb_sync_context(cookie);
}
static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
ARM_SMMU_CB_S2_TLBIIPAS2L);
arm_smmu_tlb_sync_context(cookie);
}
static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie)
@ -373,8 +357,8 @@ static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
ARM_SMMU_CB_S2_TLBIIPAS2L);
}
static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size,
size_t granule, void *cookie)
static void arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
arm_smmu_tlb_inv_context_s2(cookie);
}
@ -401,21 +385,18 @@ static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s1,
.tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1,
.tlb_add_page = arm_smmu_tlb_add_page_s1,
};
static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
.tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2,
.tlb_add_page = arm_smmu_tlb_add_page_s2,
};
static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
.tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1,
.tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1,
.tlb_flush_walk = arm_smmu_tlb_inv_walk_s2_v1,
.tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
};

View File

@ -185,13 +185,6 @@ static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
qcom_iommu_tlb_sync(cookie);
}
static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
qcom_iommu_tlb_sync(cookie);
}
static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie)
@ -202,7 +195,6 @@ static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
static const struct iommu_flush_ops qcom_flush_ops = {
.tlb_flush_all = qcom_iommu_tlb_inv_context,
.tlb_flush_walk = qcom_iommu_tlb_flush_walk,
.tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
.tlb_add_page = qcom_iommu_tlb_add_page,
};

View File

@ -584,7 +584,7 @@ static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
__arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg);
size *= ARM_V7S_CONT_PAGES;
io_pgtable_tlb_flush_leaf(iop, iova, size, size);
io_pgtable_tlb_flush_walk(iop, iova, size, size);
return pte;
}
@ -866,7 +866,6 @@ static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_flush_walk = dummy_tlb_flush,
.tlb_flush_leaf = dummy_tlb_flush,
.tlb_add_page = dummy_tlb_add_page,
};

View File

@ -1085,7 +1085,6 @@ static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_flush_walk = dummy_tlb_flush,
.tlb_flush_leaf = dummy_tlb_flush,
.tlb_add_page = dummy_tlb_add_page,
};

View File

@ -325,7 +325,6 @@ static void ipmmu_tlb_flush(unsigned long iova, size_t size,
static const struct iommu_flush_ops ipmmu_flush_ops = {
.tlb_flush_all = ipmmu_tlb_flush_all,
.tlb_flush_walk = ipmmu_tlb_flush,
.tlb_flush_leaf = ipmmu_tlb_flush,
};
/* -----------------------------------------------------------------------------

View File

@ -174,12 +174,6 @@ static void __flush_iotlb_walk(unsigned long iova, size_t size,
__flush_iotlb_range(iova, size, granule, false, cookie);
}
static void __flush_iotlb_leaf(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
__flush_iotlb_range(iova, size, granule, true, cookie);
}
static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule, void *cookie)
{
@ -189,7 +183,6 @@ static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
static const struct iommu_flush_ops msm_iommu_flush_ops = {
.tlb_flush_all = __flush_iotlb,
.tlb_flush_walk = __flush_iotlb_walk,
.tlb_flush_leaf = __flush_iotlb_leaf,
.tlb_add_page = __flush_iotlb_page,
};

View File

@ -240,7 +240,6 @@ static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
static const struct iommu_flush_ops mtk_iommu_flush_ops = {
.tlb_flush_all = mtk_iommu_tlb_flush_all,
.tlb_flush_walk = mtk_iommu_tlb_flush_range_sync,
.tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync,
.tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
};

View File

@ -25,8 +25,6 @@ enum io_pgtable_fmt {
* @tlb_flush_walk: Synchronously invalidate all intermediate TLB state
* (sometimes referred to as the "walk cache") for a virtual
* address range.
* @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual
* address range.
* @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a
* single page. IOMMUs that cannot batch TLB invalidation
* operations efficiently will typically issue them here, but
@ -40,8 +38,6 @@ struct iommu_flush_ops {
void (*tlb_flush_all)(void *cookie);
void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
void *cookie);
void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
void *cookie);
void (*tlb_add_page)(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule, void *cookie);
};
@ -228,13 +224,6 @@ io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
}
static inline void
io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova,
size_t size, size_t granule)
{
iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie);
}
static inline void
io_pgtable_tlb_add_page(struct io_pgtable *iop,
struct iommu_iotlb_gather * gather, unsigned long iova,