dma-mapping: Allow mixing bypass and mapped DMA operation
At the moment we allow bypassing DMA ops only when we can do this for the entire RAM. However there are configs with mixed type memory where we could still allow bypassing IOMMU in most cases; POWERPC with persistent memory is one example. This adds an arch hook to determine where bypass can still work and we invoke direct DMA API. The following patch checks the bus limit on POWERPC to allow or disallow direct mapping. This adds a ARCH_HAS_DMA_MAP_DIRECT config option to make the arch_xxxx hooks no-op by default. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
418baf2c28
commit
8d8d53cf8f
|
@ -314,6 +314,20 @@ static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
|
|||
void *arch_dma_set_uncached(void *addr, size_t size);
|
||||
void arch_dma_clear_uncached(void *addr, size_t size);
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
|
||||
bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr);
|
||||
bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle);
|
||||
bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
|
||||
int nents);
|
||||
bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
|
||||
int nents);
|
||||
#else
|
||||
#define arch_dma_map_page_direct(d, a) (false)
|
||||
#define arch_dma_unmap_page_direct(d, a) (false)
|
||||
#define arch_dma_map_sg_direct(d, s, n) (false)
|
||||
#define arch_dma_unmap_sg_direct(d, s, n) (false)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent);
|
||||
|
|
|
@ -20,6 +20,10 @@ config DMA_OPS
|
|||
config DMA_OPS_BYPASS
|
||||
bool
|
||||
|
||||
# Lets platform IOMMU driver choose between bypass and IOMMU
|
||||
config ARCH_HAS_DMA_MAP_DIRECT
|
||||
bool
|
||||
|
||||
config NEED_SG_DMA_LENGTH
|
||||
bool
|
||||
|
||||
|
|
|
@ -149,7 +149,8 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
|
|||
if (WARN_ON_ONCE(!dev->dma_mask))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (dma_map_direct(dev, ops))
|
||||
if (dma_map_direct(dev, ops) ||
|
||||
arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
|
||||
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
|
||||
else
|
||||
addr = ops->map_page(dev, page, offset, size, dir, attrs);
|
||||
|
@ -165,7 +166,8 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
|
|||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_map_direct(dev, ops))
|
||||
if (dma_map_direct(dev, ops) ||
|
||||
arch_dma_unmap_page_direct(dev, addr + size))
|
||||
dma_direct_unmap_page(dev, addr, size, dir, attrs);
|
||||
else if (ops->unmap_page)
|
||||
ops->unmap_page(dev, addr, size, dir, attrs);
|
||||
|
@ -188,7 +190,8 @@ int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
|
|||
if (WARN_ON_ONCE(!dev->dma_mask))
|
||||
return 0;
|
||||
|
||||
if (dma_map_direct(dev, ops))
|
||||
if (dma_map_direct(dev, ops) ||
|
||||
arch_dma_map_sg_direct(dev, sg, nents))
|
||||
ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
|
||||
else
|
||||
ents = ops->map_sg(dev, sg, nents, dir, attrs);
|
||||
|
@ -207,7 +210,8 @@ void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
|
|||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
debug_dma_unmap_sg(dev, sg, nents, dir);
|
||||
if (dma_map_direct(dev, ops))
|
||||
if (dma_map_direct(dev, ops) ||
|
||||
arch_dma_unmap_sg_direct(dev, sg, nents))
|
||||
dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
|
||||
else if (ops->unmap_sg)
|
||||
ops->unmap_sg(dev, sg, nents, dir, attrs);
|
||||
|
|
Loading…
Reference in New Issue