iommu: Split struct iommu_ops
Move the domain specific operations out of struct iommu_ops into a new structure that only has domain specific operations. This solves the problem of needing to know if the method vector for a given operation needs to be retrieved from the device or the domain. Logically the domain ops are the ones that make sense for external subsystems and endpoint drivers to use, while device ops, with the sole exception of domain_alloc, are IOMMU API internals. Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/20220216025249.3459465-10-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
41bb23e70b
commit
9a630a4b41
|
@ -2268,13 +2268,6 @@ static int amd_iommu_def_domain_type(struct device *dev)
|
|||
const struct iommu_ops amd_iommu_ops = {
|
||||
.capable = amd_iommu_capable,
|
||||
.domain_alloc = amd_iommu_domain_alloc,
|
||||
.domain_free = amd_iommu_domain_free,
|
||||
.attach_dev = amd_iommu_attach_device,
|
||||
.detach_dev = amd_iommu_detach_device,
|
||||
.map = amd_iommu_map,
|
||||
.iotlb_sync_map = amd_iommu_iotlb_sync_map,
|
||||
.unmap = amd_iommu_unmap,
|
||||
.iova_to_phys = amd_iommu_iova_to_phys,
|
||||
.probe_device = amd_iommu_probe_device,
|
||||
.release_device = amd_iommu_release_device,
|
||||
.probe_finalize = amd_iommu_probe_finalize,
|
||||
|
@ -2283,9 +2276,18 @@ const struct iommu_ops amd_iommu_ops = {
|
|||
.put_resv_regions = generic_iommu_put_resv_regions,
|
||||
.is_attach_deferred = amd_iommu_is_attach_deferred,
|
||||
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
|
||||
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
|
||||
.iotlb_sync = amd_iommu_iotlb_sync,
|
||||
.def_domain_type = amd_iommu_def_domain_type,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = amd_iommu_attach_device,
|
||||
.detach_dev = amd_iommu_detach_device,
|
||||
.map = amd_iommu_map,
|
||||
.unmap = amd_iommu_unmap,
|
||||
.iotlb_sync_map = amd_iommu_iotlb_sync_map,
|
||||
.iova_to_phys = amd_iommu_iova_to_phys,
|
||||
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
|
||||
.iotlb_sync = amd_iommu_iotlb_sync,
|
||||
.free = amd_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
/*****************************************************************************
|
||||
|
|
|
@ -765,15 +765,6 @@ static void apple_dart_get_resv_regions(struct device *dev,
|
|||
|
||||
static const struct iommu_ops apple_dart_iommu_ops = {
|
||||
.domain_alloc = apple_dart_domain_alloc,
|
||||
.domain_free = apple_dart_domain_free,
|
||||
.attach_dev = apple_dart_attach_dev,
|
||||
.detach_dev = apple_dart_detach_dev,
|
||||
.map_pages = apple_dart_map_pages,
|
||||
.unmap_pages = apple_dart_unmap_pages,
|
||||
.flush_iotlb_all = apple_dart_flush_iotlb_all,
|
||||
.iotlb_sync = apple_dart_iotlb_sync,
|
||||
.iotlb_sync_map = apple_dart_iotlb_sync_map,
|
||||
.iova_to_phys = apple_dart_iova_to_phys,
|
||||
.probe_device = apple_dart_probe_device,
|
||||
.release_device = apple_dart_release_device,
|
||||
.device_group = apple_dart_device_group,
|
||||
|
@ -782,6 +773,17 @@ static const struct iommu_ops apple_dart_iommu_ops = {
|
|||
.get_resv_regions = apple_dart_get_resv_regions,
|
||||
.put_resv_regions = generic_iommu_put_resv_regions,
|
||||
.pgsize_bitmap = -1UL, /* Restricted during dart probe */
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = apple_dart_attach_dev,
|
||||
.detach_dev = apple_dart_detach_dev,
|
||||
.map_pages = apple_dart_map_pages,
|
||||
.unmap_pages = apple_dart_unmap_pages,
|
||||
.flush_iotlb_all = apple_dart_flush_iotlb_all,
|
||||
.iotlb_sync = apple_dart_iotlb_sync,
|
||||
.iotlb_sync_map = apple_dart_iotlb_sync_map,
|
||||
.iova_to_phys = apple_dart_iova_to_phys,
|
||||
.free = apple_dart_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static irqreturn_t apple_dart_irq(int irq, void *dev)
|
||||
|
|
|
@ -2841,17 +2841,9 @@ static int arm_smmu_dev_disable_feature(struct device *dev,
|
|||
static struct iommu_ops arm_smmu_ops = {
|
||||
.capable = arm_smmu_capable,
|
||||
.domain_alloc = arm_smmu_domain_alloc,
|
||||
.domain_free = arm_smmu_domain_free,
|
||||
.attach_dev = arm_smmu_attach_dev,
|
||||
.map_pages = arm_smmu_map_pages,
|
||||
.unmap_pages = arm_smmu_unmap_pages,
|
||||
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
|
||||
.iotlb_sync = arm_smmu_iotlb_sync,
|
||||
.iova_to_phys = arm_smmu_iova_to_phys,
|
||||
.probe_device = arm_smmu_probe_device,
|
||||
.release_device = arm_smmu_release_device,
|
||||
.device_group = arm_smmu_device_group,
|
||||
.enable_nesting = arm_smmu_enable_nesting,
|
||||
.of_xlate = arm_smmu_of_xlate,
|
||||
.get_resv_regions = arm_smmu_get_resv_regions,
|
||||
.put_resv_regions = generic_iommu_put_resv_regions,
|
||||
|
@ -2865,6 +2857,16 @@ static struct iommu_ops arm_smmu_ops = {
|
|||
.page_response = arm_smmu_page_response,
|
||||
.pgsize_bitmap = -1UL, /* Restricted during device attach */
|
||||
.owner = THIS_MODULE,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = arm_smmu_attach_dev,
|
||||
.map_pages = arm_smmu_map_pages,
|
||||
.unmap_pages = arm_smmu_unmap_pages,
|
||||
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
|
||||
.iotlb_sync = arm_smmu_iotlb_sync,
|
||||
.iova_to_phys = arm_smmu_iova_to_phys,
|
||||
.enable_nesting = arm_smmu_enable_nesting,
|
||||
.free = arm_smmu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
/* Probing and initialisation functions */
|
||||
|
|
|
@ -1583,25 +1583,27 @@ static int arm_smmu_def_domain_type(struct device *dev)
|
|||
static struct iommu_ops arm_smmu_ops = {
|
||||
.capable = arm_smmu_capable,
|
||||
.domain_alloc = arm_smmu_domain_alloc,
|
||||
.domain_free = arm_smmu_domain_free,
|
||||
.attach_dev = arm_smmu_attach_dev,
|
||||
.map_pages = arm_smmu_map_pages,
|
||||
.unmap_pages = arm_smmu_unmap_pages,
|
||||
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
|
||||
.iotlb_sync = arm_smmu_iotlb_sync,
|
||||
.iova_to_phys = arm_smmu_iova_to_phys,
|
||||
.probe_device = arm_smmu_probe_device,
|
||||
.release_device = arm_smmu_release_device,
|
||||
.probe_finalize = arm_smmu_probe_finalize,
|
||||
.device_group = arm_smmu_device_group,
|
||||
.enable_nesting = arm_smmu_enable_nesting,
|
||||
.set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
|
||||
.of_xlate = arm_smmu_of_xlate,
|
||||
.get_resv_regions = arm_smmu_get_resv_regions,
|
||||
.put_resv_regions = generic_iommu_put_resv_regions,
|
||||
.def_domain_type = arm_smmu_def_domain_type,
|
||||
.pgsize_bitmap = -1UL, /* Restricted during device attach */
|
||||
.owner = THIS_MODULE,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = arm_smmu_attach_dev,
|
||||
.map_pages = arm_smmu_map_pages,
|
||||
.unmap_pages = arm_smmu_unmap_pages,
|
||||
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
|
||||
.iotlb_sync = arm_smmu_iotlb_sync,
|
||||
.iova_to_phys = arm_smmu_iova_to_phys,
|
||||
.enable_nesting = arm_smmu_enable_nesting,
|
||||
.set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
|
||||
.free = arm_smmu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
||||
|
|
|
@ -590,19 +590,21 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
|||
static const struct iommu_ops qcom_iommu_ops = {
|
||||
.capable = qcom_iommu_capable,
|
||||
.domain_alloc = qcom_iommu_domain_alloc,
|
||||
.domain_free = qcom_iommu_domain_free,
|
||||
.attach_dev = qcom_iommu_attach_dev,
|
||||
.detach_dev = qcom_iommu_detach_dev,
|
||||
.map = qcom_iommu_map,
|
||||
.unmap = qcom_iommu_unmap,
|
||||
.flush_iotlb_all = qcom_iommu_flush_iotlb_all,
|
||||
.iotlb_sync = qcom_iommu_iotlb_sync,
|
||||
.iova_to_phys = qcom_iommu_iova_to_phys,
|
||||
.probe_device = qcom_iommu_probe_device,
|
||||
.release_device = qcom_iommu_release_device,
|
||||
.device_group = generic_device_group,
|
||||
.of_xlate = qcom_iommu_of_xlate,
|
||||
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = qcom_iommu_attach_dev,
|
||||
.detach_dev = qcom_iommu_detach_dev,
|
||||
.map = qcom_iommu_map,
|
||||
.unmap = qcom_iommu_unmap,
|
||||
.flush_iotlb_all = qcom_iommu_flush_iotlb_all,
|
||||
.iotlb_sync = qcom_iommu_iotlb_sync,
|
||||
.iova_to_phys = qcom_iommu_iova_to_phys,
|
||||
.free = qcom_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static int qcom_iommu_sec_ptbl_init(struct device *dev)
|
||||
|
|
|
@ -1309,17 +1309,19 @@ static int exynos_iommu_of_xlate(struct device *dev,
|
|||
|
||||
static const struct iommu_ops exynos_iommu_ops = {
|
||||
.domain_alloc = exynos_iommu_domain_alloc,
|
||||
.domain_free = exynos_iommu_domain_free,
|
||||
.attach_dev = exynos_iommu_attach_device,
|
||||
.detach_dev = exynos_iommu_detach_device,
|
||||
.map = exynos_iommu_map,
|
||||
.unmap = exynos_iommu_unmap,
|
||||
.iova_to_phys = exynos_iommu_iova_to_phys,
|
||||
.device_group = generic_device_group,
|
||||
.probe_device = exynos_iommu_probe_device,
|
||||
.release_device = exynos_iommu_release_device,
|
||||
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
|
||||
.of_xlate = exynos_iommu_of_xlate,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = exynos_iommu_attach_device,
|
||||
.detach_dev = exynos_iommu_detach_device,
|
||||
.map = exynos_iommu_map,
|
||||
.unmap = exynos_iommu_unmap,
|
||||
.iova_to_phys = exynos_iommu_iova_to_phys,
|
||||
.free = exynos_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init exynos_iommu_init(void)
|
||||
|
|
|
@ -453,13 +453,15 @@ static void fsl_pamu_release_device(struct device *dev)
|
|||
static const struct iommu_ops fsl_pamu_ops = {
|
||||
.capable = fsl_pamu_capable,
|
||||
.domain_alloc = fsl_pamu_domain_alloc,
|
||||
.domain_free = fsl_pamu_domain_free,
|
||||
.attach_dev = fsl_pamu_attach_device,
|
||||
.detach_dev = fsl_pamu_detach_device,
|
||||
.iova_to_phys = fsl_pamu_iova_to_phys,
|
||||
.probe_device = fsl_pamu_probe_device,
|
||||
.release_device = fsl_pamu_release_device,
|
||||
.device_group = fsl_pamu_device_group,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = fsl_pamu_attach_device,
|
||||
.detach_dev = fsl_pamu_detach_device,
|
||||
.iova_to_phys = fsl_pamu_iova_to_phys,
|
||||
.free = fsl_pamu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
int __init pamu_domain_init(void)
|
||||
|
|
|
@ -5092,15 +5092,6 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
|
|||
const struct iommu_ops intel_iommu_ops = {
|
||||
.capable = intel_iommu_capable,
|
||||
.domain_alloc = intel_iommu_domain_alloc,
|
||||
.domain_free = intel_iommu_domain_free,
|
||||
.attach_dev = intel_iommu_attach_device,
|
||||
.detach_dev = intel_iommu_detach_device,
|
||||
.map_pages = intel_iommu_map_pages,
|
||||
.unmap_pages = intel_iommu_unmap_pages,
|
||||
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
|
||||
.flush_iotlb_all = intel_flush_iotlb_all,
|
||||
.iotlb_sync = intel_iommu_tlb_sync,
|
||||
.iova_to_phys = intel_iommu_iova_to_phys,
|
||||
.probe_device = intel_iommu_probe_device,
|
||||
.probe_finalize = intel_iommu_probe_finalize,
|
||||
.release_device = intel_iommu_release_device,
|
||||
|
@ -5118,6 +5109,17 @@ const struct iommu_ops intel_iommu_ops = {
|
|||
.sva_get_pasid = intel_svm_get_pasid,
|
||||
.page_response = intel_svm_page_response,
|
||||
#endif
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = intel_iommu_attach_device,
|
||||
.detach_dev = intel_iommu_detach_device,
|
||||
.map_pages = intel_iommu_map_pages,
|
||||
.unmap_pages = intel_iommu_unmap_pages,
|
||||
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
|
||||
.flush_iotlb_all = intel_flush_iotlb_all,
|
||||
.iotlb_sync = intel_iommu_tlb_sync,
|
||||
.iova_to_phys = intel_iommu_iova_to_phys,
|
||||
.free = intel_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static void quirk_iommu_igfx(struct pci_dev *dev)
|
||||
|
|
|
@ -1950,10 +1950,11 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
|
|||
if (!domain)
|
||||
return NULL;
|
||||
|
||||
domain->ops = bus->iommu_ops;
|
||||
domain->type = type;
|
||||
/* Assume all sizes by default; the driver may override this later */
|
||||
domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
|
||||
domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
|
||||
if (!domain->ops)
|
||||
domain->ops = bus->iommu_ops->default_domain_ops;
|
||||
|
||||
if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) {
|
||||
iommu_domain_free(domain);
|
||||
|
@ -1971,7 +1972,7 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc);
|
|||
void iommu_domain_free(struct iommu_domain *domain)
|
||||
{
|
||||
iommu_put_dma_cookie(domain);
|
||||
domain->ops->domain_free(domain);
|
||||
domain->ops->free(domain);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_domain_free);
|
||||
|
||||
|
@ -2242,7 +2243,7 @@ static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
|
|||
phys_addr_t paddr, size_t size, int prot,
|
||||
gfp_t gfp, size_t *mapped)
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
const struct iommu_domain_ops *ops = domain->ops;
|
||||
size_t pgsize, count;
|
||||
int ret;
|
||||
|
||||
|
@ -2265,7 +2266,7 @@ static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
|
|||
static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
const struct iommu_domain_ops *ops = domain->ops;
|
||||
unsigned long orig_iova = iova;
|
||||
unsigned int min_pagesz;
|
||||
size_t orig_size = size;
|
||||
|
@ -2325,7 +2326,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||
static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
const struct iommu_domain_ops *ops = domain->ops;
|
||||
int ret;
|
||||
|
||||
ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
|
||||
|
@ -2354,7 +2355,7 @@ static size_t __iommu_unmap_pages(struct iommu_domain *domain,
|
|||
unsigned long iova, size_t size,
|
||||
struct iommu_iotlb_gather *iotlb_gather)
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
const struct iommu_domain_ops *ops = domain->ops;
|
||||
size_t pgsize, count;
|
||||
|
||||
pgsize = iommu_pgsize(domain, iova, iova, size, &count);
|
||||
|
@ -2367,7 +2368,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
|
|||
unsigned long iova, size_t size,
|
||||
struct iommu_iotlb_gather *iotlb_gather)
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
const struct iommu_domain_ops *ops = domain->ops;
|
||||
size_t unmapped_page, unmapped = 0;
|
||||
unsigned long orig_iova = iova;
|
||||
unsigned int min_pagesz;
|
||||
|
@ -2443,7 +2444,7 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
|||
struct scatterlist *sg, unsigned int nents, int prot,
|
||||
gfp_t gfp)
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
const struct iommu_domain_ops *ops = domain->ops;
|
||||
size_t len = 0, mapped = 0;
|
||||
phys_addr_t start;
|
||||
unsigned int i = 0;
|
||||
|
|
|
@ -868,14 +868,6 @@ static struct iommu_group *ipmmu_find_group(struct device *dev)
|
|||
|
||||
static const struct iommu_ops ipmmu_ops = {
|
||||
.domain_alloc = ipmmu_domain_alloc,
|
||||
.domain_free = ipmmu_domain_free,
|
||||
.attach_dev = ipmmu_attach_device,
|
||||
.detach_dev = ipmmu_detach_device,
|
||||
.map = ipmmu_map,
|
||||
.unmap = ipmmu_unmap,
|
||||
.flush_iotlb_all = ipmmu_flush_iotlb_all,
|
||||
.iotlb_sync = ipmmu_iotlb_sync,
|
||||
.iova_to_phys = ipmmu_iova_to_phys,
|
||||
.probe_device = ipmmu_probe_device,
|
||||
.release_device = ipmmu_release_device,
|
||||
.probe_finalize = ipmmu_probe_finalize,
|
||||
|
@ -883,6 +875,16 @@ static const struct iommu_ops ipmmu_ops = {
|
|||
? generic_device_group : ipmmu_find_group,
|
||||
.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
|
||||
.of_xlate = ipmmu_of_xlate,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = ipmmu_attach_device,
|
||||
.detach_dev = ipmmu_detach_device,
|
||||
.map = ipmmu_map,
|
||||
.unmap = ipmmu_unmap,
|
||||
.flush_iotlb_all = ipmmu_flush_iotlb_all,
|
||||
.iotlb_sync = ipmmu_iotlb_sync,
|
||||
.iova_to_phys = ipmmu_iova_to_phys,
|
||||
.free = ipmmu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
|
|
|
@ -668,25 +668,27 @@ fail:
|
|||
|
||||
static struct iommu_ops msm_iommu_ops = {
|
||||
.domain_alloc = msm_iommu_domain_alloc,
|
||||
.domain_free = msm_iommu_domain_free,
|
||||
.attach_dev = msm_iommu_attach_dev,
|
||||
.detach_dev = msm_iommu_detach_dev,
|
||||
.map = msm_iommu_map,
|
||||
.unmap = msm_iommu_unmap,
|
||||
/*
|
||||
* Nothing is needed here, the barrier to guarantee
|
||||
* completion of the tlb sync operation is implicitly
|
||||
* taken care when the iommu client does a writel before
|
||||
* kick starting the other master.
|
||||
*/
|
||||
.iotlb_sync = NULL,
|
||||
.iotlb_sync_map = msm_iommu_sync_map,
|
||||
.iova_to_phys = msm_iommu_iova_to_phys,
|
||||
.probe_device = msm_iommu_probe_device,
|
||||
.release_device = msm_iommu_release_device,
|
||||
.device_group = generic_device_group,
|
||||
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
|
||||
.of_xlate = qcom_iommu_of_xlate,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = msm_iommu_attach_dev,
|
||||
.detach_dev = msm_iommu_detach_dev,
|
||||
.map = msm_iommu_map,
|
||||
.unmap = msm_iommu_unmap,
|
||||
/*
|
||||
* Nothing is needed here, the barrier to guarantee
|
||||
* completion of the tlb sync operation is implicitly
|
||||
* taken care when the iommu client does a writel before
|
||||
* kick starting the other master.
|
||||
*/
|
||||
.iotlb_sync = NULL,
|
||||
.iotlb_sync_map = msm_iommu_sync_map,
|
||||
.iova_to_phys = msm_iommu_iova_to_phys,
|
||||
.free = msm_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static int msm_iommu_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -658,15 +658,6 @@ static void mtk_iommu_get_resv_regions(struct device *dev,
|
|||
|
||||
static const struct iommu_ops mtk_iommu_ops = {
|
||||
.domain_alloc = mtk_iommu_domain_alloc,
|
||||
.domain_free = mtk_iommu_domain_free,
|
||||
.attach_dev = mtk_iommu_attach_device,
|
||||
.detach_dev = mtk_iommu_detach_device,
|
||||
.map = mtk_iommu_map,
|
||||
.unmap = mtk_iommu_unmap,
|
||||
.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
|
||||
.iotlb_sync = mtk_iommu_iotlb_sync,
|
||||
.iotlb_sync_map = mtk_iommu_sync_map,
|
||||
.iova_to_phys = mtk_iommu_iova_to_phys,
|
||||
.probe_device = mtk_iommu_probe_device,
|
||||
.release_device = mtk_iommu_release_device,
|
||||
.device_group = mtk_iommu_device_group,
|
||||
|
@ -675,6 +666,17 @@ static const struct iommu_ops mtk_iommu_ops = {
|
|||
.put_resv_regions = generic_iommu_put_resv_regions,
|
||||
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
|
||||
.owner = THIS_MODULE,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = mtk_iommu_attach_device,
|
||||
.detach_dev = mtk_iommu_detach_device,
|
||||
.map = mtk_iommu_map,
|
||||
.unmap = mtk_iommu_unmap,
|
||||
.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
|
||||
.iotlb_sync = mtk_iommu_iotlb_sync,
|
||||
.iotlb_sync_map = mtk_iommu_sync_map,
|
||||
.iova_to_phys = mtk_iommu_iova_to_phys,
|
||||
.free = mtk_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
|
||||
|
|
|
@ -514,12 +514,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
|
|||
|
||||
static const struct iommu_ops mtk_iommu_ops = {
|
||||
.domain_alloc = mtk_iommu_domain_alloc,
|
||||
.domain_free = mtk_iommu_domain_free,
|
||||
.attach_dev = mtk_iommu_attach_device,
|
||||
.detach_dev = mtk_iommu_detach_device,
|
||||
.map = mtk_iommu_map,
|
||||
.unmap = mtk_iommu_unmap,
|
||||
.iova_to_phys = mtk_iommu_iova_to_phys,
|
||||
.probe_device = mtk_iommu_probe_device,
|
||||
.probe_finalize = mtk_iommu_probe_finalize,
|
||||
.release_device = mtk_iommu_release_device,
|
||||
|
@ -527,6 +521,14 @@ static const struct iommu_ops mtk_iommu_ops = {
|
|||
.device_group = generic_device_group,
|
||||
.pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
|
||||
.owner = THIS_MODULE,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = mtk_iommu_attach_device,
|
||||
.detach_dev = mtk_iommu_detach_device,
|
||||
.map = mtk_iommu_map,
|
||||
.unmap = mtk_iommu_unmap,
|
||||
.iova_to_phys = mtk_iommu_iova_to_phys,
|
||||
.free = mtk_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static const struct of_device_id mtk_iommu_of_ids[] = {
|
||||
|
|
|
@ -1734,16 +1734,18 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev)
|
|||
|
||||
static const struct iommu_ops omap_iommu_ops = {
|
||||
.domain_alloc = omap_iommu_domain_alloc,
|
||||
.domain_free = omap_iommu_domain_free,
|
||||
.attach_dev = omap_iommu_attach_dev,
|
||||
.detach_dev = omap_iommu_detach_dev,
|
||||
.map = omap_iommu_map,
|
||||
.unmap = omap_iommu_unmap,
|
||||
.iova_to_phys = omap_iommu_iova_to_phys,
|
||||
.probe_device = omap_iommu_probe_device,
|
||||
.release_device = omap_iommu_release_device,
|
||||
.device_group = omap_iommu_device_group,
|
||||
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = omap_iommu_attach_dev,
|
||||
.detach_dev = omap_iommu_detach_dev,
|
||||
.map = omap_iommu_map,
|
||||
.unmap = omap_iommu_unmap,
|
||||
.iova_to_phys = omap_iommu_iova_to_phys,
|
||||
.free = omap_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init omap_iommu_init(void)
|
||||
|
|
|
@ -1187,17 +1187,19 @@ static int rk_iommu_of_xlate(struct device *dev,
|
|||
|
||||
static const struct iommu_ops rk_iommu_ops = {
|
||||
.domain_alloc = rk_iommu_domain_alloc,
|
||||
.domain_free = rk_iommu_domain_free,
|
||||
.attach_dev = rk_iommu_attach_device,
|
||||
.detach_dev = rk_iommu_detach_device,
|
||||
.map = rk_iommu_map,
|
||||
.unmap = rk_iommu_unmap,
|
||||
.probe_device = rk_iommu_probe_device,
|
||||
.release_device = rk_iommu_release_device,
|
||||
.iova_to_phys = rk_iommu_iova_to_phys,
|
||||
.device_group = rk_iommu_device_group,
|
||||
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
|
||||
.of_xlate = rk_iommu_of_xlate,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = rk_iommu_attach_device,
|
||||
.detach_dev = rk_iommu_detach_device,
|
||||
.map = rk_iommu_map,
|
||||
.unmap = rk_iommu_unmap,
|
||||
.iova_to_phys = rk_iommu_iova_to_phys,
|
||||
.free = rk_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static int rk_iommu_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -363,16 +363,18 @@ void zpci_destroy_iommu(struct zpci_dev *zdev)
|
|||
static const struct iommu_ops s390_iommu_ops = {
|
||||
.capable = s390_iommu_capable,
|
||||
.domain_alloc = s390_domain_alloc,
|
||||
.domain_free = s390_domain_free,
|
||||
.attach_dev = s390_iommu_attach_device,
|
||||
.detach_dev = s390_iommu_detach_device,
|
||||
.map = s390_iommu_map,
|
||||
.unmap = s390_iommu_unmap,
|
||||
.iova_to_phys = s390_iommu_iova_to_phys,
|
||||
.probe_device = s390_iommu_probe_device,
|
||||
.release_device = s390_iommu_release_device,
|
||||
.device_group = generic_device_group,
|
||||
.pgsize_bitmap = S390_IOMMU_PGSIZES,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = s390_iommu_attach_device,
|
||||
.detach_dev = s390_iommu_detach_device,
|
||||
.map = s390_iommu_map,
|
||||
.unmap = s390_iommu_unmap,
|
||||
.iova_to_phys = s390_iommu_iova_to_phys,
|
||||
.free = s390_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init s390_iommu_init(void)
|
||||
|
|
|
@ -416,20 +416,22 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
|||
|
||||
static const struct iommu_ops sprd_iommu_ops = {
|
||||
.domain_alloc = sprd_iommu_domain_alloc,
|
||||
.domain_free = sprd_iommu_domain_free,
|
||||
.attach_dev = sprd_iommu_attach_device,
|
||||
.detach_dev = sprd_iommu_detach_device,
|
||||
.map = sprd_iommu_map,
|
||||
.unmap = sprd_iommu_unmap,
|
||||
.iotlb_sync_map = sprd_iommu_sync_map,
|
||||
.iotlb_sync = sprd_iommu_sync,
|
||||
.iova_to_phys = sprd_iommu_iova_to_phys,
|
||||
.probe_device = sprd_iommu_probe_device,
|
||||
.release_device = sprd_iommu_release_device,
|
||||
.device_group = sprd_iommu_device_group,
|
||||
.of_xlate = sprd_iommu_of_xlate,
|
||||
.pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT,
|
||||
.owner = THIS_MODULE,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = sprd_iommu_attach_device,
|
||||
.detach_dev = sprd_iommu_detach_device,
|
||||
.map = sprd_iommu_map,
|
||||
.unmap = sprd_iommu_unmap,
|
||||
.iotlb_sync_map = sprd_iommu_sync_map,
|
||||
.iotlb_sync = sprd_iommu_sync,
|
||||
.iova_to_phys = sprd_iommu_iova_to_phys,
|
||||
.free = sprd_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static const struct of_device_id sprd_iommu_of_match[] = {
|
||||
|
|
|
@ -760,19 +760,21 @@ static int sun50i_iommu_of_xlate(struct device *dev,
|
|||
|
||||
static const struct iommu_ops sun50i_iommu_ops = {
|
||||
.pgsize_bitmap = SZ_4K,
|
||||
.attach_dev = sun50i_iommu_attach_device,
|
||||
.detach_dev = sun50i_iommu_detach_device,
|
||||
.device_group = sun50i_iommu_device_group,
|
||||
.domain_alloc = sun50i_iommu_domain_alloc,
|
||||
.domain_free = sun50i_iommu_domain_free,
|
||||
.flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
|
||||
.iotlb_sync = sun50i_iommu_iotlb_sync,
|
||||
.iova_to_phys = sun50i_iommu_iova_to_phys,
|
||||
.map = sun50i_iommu_map,
|
||||
.of_xlate = sun50i_iommu_of_xlate,
|
||||
.probe_device = sun50i_iommu_probe_device,
|
||||
.release_device = sun50i_iommu_release_device,
|
||||
.unmap = sun50i_iommu_unmap,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = sun50i_iommu_attach_device,
|
||||
.detach_dev = sun50i_iommu_detach_device,
|
||||
.flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
|
||||
.iotlb_sync = sun50i_iommu_iotlb_sync,
|
||||
.iova_to_phys = sun50i_iommu_iova_to_phys,
|
||||
.map = sun50i_iommu_map,
|
||||
.unmap = sun50i_iommu_unmap,
|
||||
.free = sun50i_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
|
||||
|
|
|
@ -272,19 +272,21 @@ static void gart_iommu_sync(struct iommu_domain *domain,
|
|||
|
||||
static const struct iommu_ops gart_iommu_ops = {
|
||||
.domain_alloc = gart_iommu_domain_alloc,
|
||||
.domain_free = gart_iommu_domain_free,
|
||||
.attach_dev = gart_iommu_attach_dev,
|
||||
.detach_dev = gart_iommu_detach_dev,
|
||||
.probe_device = gart_iommu_probe_device,
|
||||
.release_device = gart_iommu_release_device,
|
||||
.device_group = generic_device_group,
|
||||
.map = gart_iommu_map,
|
||||
.unmap = gart_iommu_unmap,
|
||||
.iova_to_phys = gart_iommu_iova_to_phys,
|
||||
.pgsize_bitmap = GART_IOMMU_PGSIZES,
|
||||
.of_xlate = gart_iommu_of_xlate,
|
||||
.iotlb_sync_map = gart_iommu_sync_map,
|
||||
.iotlb_sync = gart_iommu_sync,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = gart_iommu_attach_dev,
|
||||
.detach_dev = gart_iommu_detach_dev,
|
||||
.map = gart_iommu_map,
|
||||
.unmap = gart_iommu_unmap,
|
||||
.iova_to_phys = gart_iommu_iova_to_phys,
|
||||
.iotlb_sync_map = gart_iommu_sync_map,
|
||||
.iotlb_sync = gart_iommu_sync,
|
||||
.free = gart_iommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
int tegra_gart_suspend(struct gart_device *gart)
|
||||
|
|
|
@ -963,17 +963,19 @@ static int tegra_smmu_of_xlate(struct device *dev,
|
|||
|
||||
static const struct iommu_ops tegra_smmu_ops = {
|
||||
.domain_alloc = tegra_smmu_domain_alloc,
|
||||
.domain_free = tegra_smmu_domain_free,
|
||||
.attach_dev = tegra_smmu_attach_dev,
|
||||
.detach_dev = tegra_smmu_detach_dev,
|
||||
.probe_device = tegra_smmu_probe_device,
|
||||
.release_device = tegra_smmu_release_device,
|
||||
.device_group = tegra_smmu_device_group,
|
||||
.map = tegra_smmu_map,
|
||||
.unmap = tegra_smmu_unmap,
|
||||
.iova_to_phys = tegra_smmu_iova_to_phys,
|
||||
.of_xlate = tegra_smmu_of_xlate,
|
||||
.pgsize_bitmap = SZ_4K,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = tegra_smmu_attach_dev,
|
||||
.detach_dev = tegra_smmu_detach_dev,
|
||||
.map = tegra_smmu_map,
|
||||
.unmap = tegra_smmu_unmap,
|
||||
.iova_to_phys = tegra_smmu_iova_to_phys,
|
||||
.free = tegra_smmu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static void tegra_smmu_ahb_enable(void)
|
||||
|
|
|
@ -1008,12 +1008,6 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
|||
|
||||
static struct iommu_ops viommu_ops = {
|
||||
.domain_alloc = viommu_domain_alloc,
|
||||
.domain_free = viommu_domain_free,
|
||||
.attach_dev = viommu_attach_dev,
|
||||
.map = viommu_map,
|
||||
.unmap = viommu_unmap,
|
||||
.iova_to_phys = viommu_iova_to_phys,
|
||||
.iotlb_sync = viommu_iotlb_sync,
|
||||
.probe_device = viommu_probe_device,
|
||||
.probe_finalize = viommu_probe_finalize,
|
||||
.release_device = viommu_release_device,
|
||||
|
@ -1022,6 +1016,14 @@ static struct iommu_ops viommu_ops = {
|
|||
.put_resv_regions = generic_iommu_put_resv_regions,
|
||||
.of_xlate = viommu_of_xlate,
|
||||
.owner = THIS_MODULE,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = viommu_attach_dev,
|
||||
.map = viommu_map,
|
||||
.unmap = viommu_unmap,
|
||||
.iova_to_phys = viommu_iova_to_phys,
|
||||
.iotlb_sync = viommu_iotlb_sync,
|
||||
.free = viommu_domain_free,
|
||||
}
|
||||
};
|
||||
|
||||
static int viommu_init_vqs(struct viommu_dev *viommu)
|
||||
|
|
|
@ -37,6 +37,7 @@ struct iommu_group;
|
|||
struct bus_type;
|
||||
struct device;
|
||||
struct iommu_domain;
|
||||
struct iommu_domain_ops;
|
||||
struct notifier_block;
|
||||
struct iommu_sva;
|
||||
struct iommu_fault_event;
|
||||
|
@ -88,7 +89,7 @@ struct iommu_domain_geometry {
|
|||
|
||||
struct iommu_domain {
|
||||
unsigned type;
|
||||
const struct iommu_ops *ops;
|
||||
const struct iommu_domain_ops *ops;
|
||||
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
|
||||
iommu_fault_handler_t handler;
|
||||
void *handler_token;
|
||||
|
@ -192,26 +193,11 @@ struct iommu_iotlb_gather {
|
|||
* struct iommu_ops - iommu ops and capabilities
|
||||
* @capable: check capability
|
||||
* @domain_alloc: allocate iommu domain
|
||||
* @domain_free: free iommu domain
|
||||
* @attach_dev: attach device to an iommu domain
|
||||
* @detach_dev: detach device from an iommu domain
|
||||
* @map: map a physically contiguous memory region to an iommu domain
|
||||
* @map_pages: map a physically contiguous set of pages of the same size to
|
||||
* an iommu domain.
|
||||
* @unmap: unmap a physically contiguous memory region from an iommu domain
|
||||
* @unmap_pages: unmap a number of pages of the same size from an iommu domain
|
||||
* @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
|
||||
* @iotlb_sync_map: Sync mappings created recently using @map to the hardware
|
||||
* @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
|
||||
* queue
|
||||
* @iova_to_phys: translate iova to physical address
|
||||
* @probe_device: Add device to iommu driver handling
|
||||
* @release_device: Remove device from iommu driver handling
|
||||
* @probe_finalize: Do final setup work after the device is added to an IOMMU
|
||||
* group and attached to the groups domain
|
||||
* @device_group: find iommu group for a particular device
|
||||
* @enable_nesting: Enable nesting
|
||||
* @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
|
||||
* @get_resv_regions: Request list of reserved regions for a device
|
||||
* @put_resv_regions: Free list of reserved regions for a device
|
||||
* @of_xlate: add OF master IDs to iommu grouping
|
||||
|
@ -228,6 +214,7 @@ struct iommu_iotlb_gather {
|
|||
* - IOMMU_DOMAIN_IDENTITY: must use an identity domain
|
||||
* - IOMMU_DOMAIN_DMA: must use a dma domain
|
||||
* - 0: use the default setting
|
||||
* @default_domain_ops: the default ops for domains
|
||||
* @pgsize_bitmap: bitmap of all possible supported page sizes
|
||||
* @owner: Driver module providing these ops
|
||||
*/
|
||||
|
@ -236,33 +223,11 @@ struct iommu_ops {
|
|||
|
||||
/* Domain allocation and freeing by the iommu driver */
|
||||
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
|
||||
void (*domain_free)(struct iommu_domain *);
|
||||
|
||||
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||
int (*map)(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
|
||||
int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t pgsize, size_t pgcount,
|
||||
int prot, gfp_t gfp, size_t *mapped);
|
||||
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size, struct iommu_iotlb_gather *iotlb_gather);
|
||||
size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t pgsize, size_t pgcount,
|
||||
struct iommu_iotlb_gather *iotlb_gather);
|
||||
void (*flush_iotlb_all)(struct iommu_domain *domain);
|
||||
void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
void (*iotlb_sync)(struct iommu_domain *domain,
|
||||
struct iommu_iotlb_gather *iotlb_gather);
|
||||
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
|
||||
struct iommu_device *(*probe_device)(struct device *dev);
|
||||
void (*release_device)(struct device *dev);
|
||||
void (*probe_finalize)(struct device *dev);
|
||||
struct iommu_group *(*device_group)(struct device *dev);
|
||||
int (*enable_nesting)(struct iommu_domain *domain);
|
||||
int (*set_pgtable_quirks)(struct iommu_domain *domain,
|
||||
unsigned long quirks);
|
||||
|
||||
/* Request/Free a list of reserved regions for a device */
|
||||
void (*get_resv_regions)(struct device *dev, struct list_head *list);
|
||||
|
@ -288,10 +253,60 @@ struct iommu_ops {
|
|||
|
||||
int (*def_domain_type)(struct device *dev);
|
||||
|
||||
const struct iommu_domain_ops *default_domain_ops;
|
||||
unsigned long pgsize_bitmap;
|
||||
struct module *owner;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_domain_ops - domain specific operations
|
||||
* @attach_dev: attach an iommu domain to a device
|
||||
* @detach_dev: detach an iommu domain from a device
|
||||
* @map: map a physically contiguous memory region to an iommu domain
|
||||
* @map_pages: map a physically contiguous set of pages of the same size to
|
||||
* an iommu domain.
|
||||
* @unmap: unmap a physically contiguous memory region from an iommu domain
|
||||
* @unmap_pages: unmap a number of pages of the same size from an iommu domain
|
||||
* @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
|
||||
* @iotlb_sync_map: Sync mappings created recently using @map to the hardware
|
||||
* @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
|
||||
* queue
|
||||
* @iova_to_phys: translate iova to physical address
|
||||
* @enable_nesting: Enable nesting
|
||||
* @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
|
||||
* @free: Release the domain after use.
|
||||
*/
|
||||
struct iommu_domain_ops {
|
||||
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
|
||||
|
||||
int (*map)(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
|
||||
int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t pgsize, size_t pgcount,
|
||||
int prot, gfp_t gfp, size_t *mapped);
|
||||
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size, struct iommu_iotlb_gather *iotlb_gather);
|
||||
size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t pgsize, size_t pgcount,
|
||||
struct iommu_iotlb_gather *iotlb_gather);
|
||||
|
||||
void (*flush_iotlb_all)(struct iommu_domain *domain);
|
||||
void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
void (*iotlb_sync)(struct iommu_domain *domain,
|
||||
struct iommu_iotlb_gather *iotlb_gather);
|
||||
|
||||
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
|
||||
dma_addr_t iova);
|
||||
|
||||
int (*enable_nesting)(struct iommu_domain *domain);
|
||||
int (*set_pgtable_quirks)(struct iommu_domain *domain,
|
||||
unsigned long quirks);
|
||||
|
||||
void (*free)(struct iommu_domain *domain);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_device - IOMMU core representation of one IOMMU hardware
|
||||
* instance
|
||||
|
|
Loading…
Reference in New Issue