iommu: Add iommu_map_sg() function
Mapping and unmapping are more often than not in the critical path. map_sg allows IOMMU driver implementations to optimize the process of mapping buffers into the IOMMU page tables. Instead of mapping a buffer one page at a time and requiring potentially expensive TLB operations for each page, this function allows the driver to map all pages in one go and defer TLB maintenance until after all pages have been mapped. Additionally, the mapping operation would be faster in general since clients does not have to keep calling map API over and over again for each physically contiguous chunk of memory that needs to be mapped to a virtually contiguous region. Signed-off-by: Olav Haugan <ohaugan@codeaurora.org> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
0df1f2487d
commit
315786ebbf
|
@ -3424,6 +3424,7 @@ static const struct iommu_ops amd_iommu_ops = {
|
|||
.detach_dev = amd_iommu_detach_device,
|
||||
.map = amd_iommu_map,
|
||||
.unmap = amd_iommu_unmap,
|
||||
.map_sg = default_iommu_map_sg,
|
||||
.iova_to_phys = amd_iommu_iova_to_phys,
|
||||
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
|
||||
};
|
||||
|
|
|
@ -1652,6 +1652,7 @@ static const struct iommu_ops arm_smmu_ops = {
|
|||
.detach_dev = arm_smmu_detach_dev,
|
||||
.map = arm_smmu_map,
|
||||
.unmap = arm_smmu_unmap,
|
||||
.map_sg = default_iommu_map_sg,
|
||||
.iova_to_phys = arm_smmu_iova_to_phys,
|
||||
.add_device = arm_smmu_add_device,
|
||||
.remove_device = arm_smmu_remove_device,
|
||||
|
|
|
@ -1178,6 +1178,7 @@ static const struct iommu_ops exynos_iommu_ops = {
|
|||
.detach_dev = exynos_iommu_detach_device,
|
||||
.map = exynos_iommu_map,
|
||||
.unmap = exynos_iommu_unmap,
|
||||
.map_sg = default_iommu_map_sg,
|
||||
.iova_to_phys = exynos_iommu_iova_to_phys,
|
||||
.add_device = exynos_iommu_add_device,
|
||||
.remove_device = exynos_iommu_remove_device,
|
||||
|
|
|
@ -4467,6 +4467,7 @@ static const struct iommu_ops intel_iommu_ops = {
|
|||
.detach_dev = intel_iommu_detach_device,
|
||||
.map = intel_iommu_map,
|
||||
.unmap = intel_iommu_unmap,
|
||||
.map_sg = default_iommu_map_sg,
|
||||
.iova_to_phys = intel_iommu_iova_to_phys,
|
||||
.add_device = intel_iommu_add_device,
|
||||
.remove_device = intel_iommu_remove_device,
|
||||
|
|
|
@ -1124,6 +1124,31 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_unmap);
|
||||
|
||||
size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents, int prot)
|
||||
{
|
||||
int ret;
|
||||
size_t mapped = 0;
|
||||
unsigned int i;
|
||||
struct scatterlist *s;
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
phys_addr_t phys = page_to_phys(sg_page(s));
|
||||
size_t page_len = s->offset + s->length;
|
||||
|
||||
ret = iommu_map(domain, iova + mapped, phys, page_len, prot);
|
||||
if (ret) {
|
||||
/* undo mappings already done */
|
||||
iommu_unmap(domain, iova, mapped);
|
||||
mapped = 0;
|
||||
break;
|
||||
}
|
||||
mapped += page_len;
|
||||
}
|
||||
|
||||
return mapped;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(default_iommu_map_sg);
|
||||
|
||||
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
|
||||
phys_addr_t paddr, u64 size, int prot)
|
||||
|
|
|
@ -1127,6 +1127,7 @@ static const struct iommu_ops ipmmu_ops = {
|
|||
.detach_dev = ipmmu_detach_device,
|
||||
.map = ipmmu_map,
|
||||
.unmap = ipmmu_unmap,
|
||||
.map_sg = default_iommu_map_sg,
|
||||
.iova_to_phys = ipmmu_iova_to_phys,
|
||||
.add_device = ipmmu_add_device,
|
||||
.remove_device = ipmmu_remove_device,
|
||||
|
|
|
@ -681,6 +681,7 @@ static const struct iommu_ops msm_iommu_ops = {
|
|||
.detach_dev = msm_iommu_detach_dev,
|
||||
.map = msm_iommu_map,
|
||||
.unmap = msm_iommu_unmap,
|
||||
.map_sg = default_iommu_map_sg,
|
||||
.iova_to_phys = msm_iommu_iova_to_phys,
|
||||
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
|
||||
};
|
||||
|
|
|
@ -1288,6 +1288,7 @@ static const struct iommu_ops omap_iommu_ops = {
|
|||
.detach_dev = omap_iommu_detach_dev,
|
||||
.map = omap_iommu_map,
|
||||
.unmap = omap_iommu_unmap,
|
||||
.map_sg = default_iommu_map_sg,
|
||||
.iova_to_phys = omap_iommu_iova_to_phys,
|
||||
.add_device = omap_iommu_add_device,
|
||||
.remove_device = omap_iommu_remove_device,
|
||||
|
|
|
@ -361,6 +361,7 @@ static const struct iommu_ops shmobile_iommu_ops = {
|
|||
.detach_dev = shmobile_iommu_detach_device,
|
||||
.map = shmobile_iommu_map,
|
||||
.unmap = shmobile_iommu_unmap,
|
||||
.map_sg = default_iommu_map_sg,
|
||||
.iova_to_phys = shmobile_iommu_iova_to_phys,
|
||||
.add_device = shmobile_iommu_add_device,
|
||||
.pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K,
|
||||
|
|
|
@ -955,6 +955,7 @@ static const struct iommu_ops smmu_iommu_ops = {
|
|||
.detach_dev = smmu_iommu_detach_dev,
|
||||
.map = smmu_iommu_map,
|
||||
.unmap = smmu_iommu_unmap,
|
||||
.map_sg = default_iommu_map_sg,
|
||||
.iova_to_phys = smmu_iommu_iova_to_phys,
|
||||
.pgsize_bitmap = SMMU_IOMMU_PGSIZES,
|
||||
};
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <trace/events/iommu.h>
|
||||
|
||||
#define IOMMU_READ (1 << 0)
|
||||
|
@ -97,6 +98,8 @@ enum iommu_attr {
|
|||
* @detach_dev: detach device from an iommu domain
|
||||
* @map: map a physically contiguous memory region to an iommu domain
|
||||
* @unmap: unmap a physically contiguous memory region from an iommu domain
|
||||
* @map_sg: map a scatter-gather list of physically contiguous memory chunks
|
||||
* to an iommu domain
|
||||
* @iova_to_phys: translate iova to physical address
|
||||
* @add_device: add device to iommu grouping
|
||||
* @remove_device: remove device from iommu grouping
|
||||
|
@ -114,6 +117,8 @@ struct iommu_ops {
|
|||
phys_addr_t paddr, size_t size, int prot);
|
||||
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents, int prot);
|
||||
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
|
||||
int (*add_device)(struct device *dev);
|
||||
void (*remove_device)(struct device *dev);
|
||||
|
@ -156,6 +161,9 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||
phys_addr_t paddr, size_t size, int prot);
|
||||
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t size);
|
||||
extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg,unsigned int nents,
|
||||
int prot);
|
||||
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
|
||||
extern void iommu_set_fault_handler(struct iommu_domain *domain,
|
||||
iommu_fault_handler_t handler, void *token);
|
||||
|
@ -241,6 +249,13 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline size_t iommu_map_sg(struct iommu_domain *domain,
|
||||
unsigned long iova, struct scatterlist *sg,
|
||||
unsigned int nents, int prot)
|
||||
{
|
||||
return domain->ops->map_sg(domain, iova, sg, nents, prot);
|
||||
}
|
||||
|
||||
#else /* CONFIG_IOMMU_API */
|
||||
|
||||
struct iommu_ops {};
|
||||
|
@ -293,6 +308,13 @@ static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline size_t iommu_map_sg(struct iommu_domain *domain,
|
||||
unsigned long iova, struct scatterlist *sg,
|
||||
unsigned int nents, int prot)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_domain_window_enable(struct iommu_domain *domain,
|
||||
u32 wnd_nr, phys_addr_t paddr,
|
||||
u64 size, int prot)
|
||||
|
|
Loading…
Reference in New Issue