iommu/arm-smmu-v3: Implement flush_iotlb_all hook
.flush_iotlb_all is currently stubbed to arm_smmu_iotlb_sync() since the only time it would ever need to actually do anything is for callers doing their own explicit batching, e.g.: iommu_unmap_fast(domain, ...); iommu_unmap_fast(domain, ...); iommu_iotlb_flush_all(domain, ...); where since io-pgtable still issues the TLBI commands implicitly in the unmap instead of implementing .iotlb_range_add, the "flush" only needs to ensure completion of those already-in-flight invalidations. However, we're about to start using it in anger with flush queues, so let's get a proper implementation wired up. Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com> Reviewed-by: Robin Murphy <robin.murphy@arm.com> [rm: document why it wasn't a bug] Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
901510ee32
commit
07fdef34d2
|
@ -1781,6 +1781,14 @@ arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
|
|||
return ops->unmap(ops, iova, size);
|
||||
}
|
||||
|
||||
static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
|
||||
if (smmu_domain->smmu)
|
||||
arm_smmu_tlb_inv_context(smmu_domain);
|
||||
}
|
||||
|
||||
static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
|
||||
{
|
||||
struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
|
||||
|
@ -2008,7 +2016,7 @@ static struct iommu_ops arm_smmu_ops = {
|
|||
.attach_dev = arm_smmu_attach_dev,
|
||||
.map = arm_smmu_map,
|
||||
.unmap = arm_smmu_unmap,
|
||||
.flush_iotlb_all = arm_smmu_iotlb_sync,
|
||||
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
|
||||
.iotlb_sync = arm_smmu_iotlb_sync,
|
||||
.iova_to_phys = arm_smmu_iova_to_phys,
|
||||
.add_device = arm_smmu_add_device,
|
||||
|
|
Loading…
Reference in New Issue