dma-direct: remove the mapping_error dma_map_ops method
The dma-direct code already returns (~(dma_addr_t)0x0) on mapping failures, so we can switch over to returning DMA_MAPPING_ERROR and let the core dma-mapping code handle the rest. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
42ee3cae0e
commit
b0cbeae494
|
@ -59,7 +59,6 @@ const struct dma_map_ops powerpc_swiotlb_dma_ops = {
|
|||
.sync_single_for_device = swiotlb_sync_single_for_device,
|
||||
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = swiotlb_sync_sg_for_device,
|
||||
.mapping_error = dma_direct_mapping_error,
|
||||
.get_required_mask = swiotlb_powerpc_get_required,
|
||||
};
|
||||
|
||||
|
|
|
@ -5,8 +5,6 @@
|
|||
#include <linux/dma-mapping.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
|
||||
#define DIRECT_MAPPING_ERROR (~(dma_addr_t)0)
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
|
||||
#include <asm/dma-direct.h>
|
||||
#else
|
||||
|
@ -76,5 +74,4 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
|
|||
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
int dma_direct_supported(struct device *dev, u64 mask);
|
||||
int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr);
|
||||
#endif /* _LINUX_DMA_DIRECT_H */
|
||||
|
|
|
@ -289,7 +289,7 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
|
|||
dma_addr_t dma_addr = phys_to_dma(dev, phys);
|
||||
|
||||
if (!check_addr(dev, dma_addr, size, __func__))
|
||||
return DIRECT_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
dma_direct_sync_single_for_device(dev, dma_addr, size, dir);
|
||||
|
@ -336,11 +336,6 @@ int dma_direct_supported(struct device *dev, u64 mask)
|
|||
return mask >= phys_to_dma(dev, min_mask);
|
||||
}
|
||||
|
||||
int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == DIRECT_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
const struct dma_map_ops dma_direct_ops = {
|
||||
.alloc = dma_direct_alloc,
|
||||
.free = dma_direct_free,
|
||||
|
@ -359,7 +354,6 @@ const struct dma_map_ops dma_direct_ops = {
|
|||
#endif
|
||||
.get_required_mask = dma_direct_get_required_mask,
|
||||
.dma_supported = dma_direct_supported,
|
||||
.mapping_error = dma_direct_mapping_error,
|
||||
.cache_sync = arch_dma_cache_sync,
|
||||
};
|
||||
EXPORT_SYMBOL(dma_direct_ops);
|
||||
|
|
|
@ -631,21 +631,21 @@ static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys,
|
|||
if (unlikely(swiotlb_force == SWIOTLB_NO_FORCE)) {
|
||||
dev_warn_ratelimited(dev,
|
||||
"Cannot do DMA to address %pa\n", phys);
|
||||
return DIRECT_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
/* Oh well, have to allocate and map a bounce buffer. */
|
||||
*phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
|
||||
*phys, size, dir, attrs);
|
||||
if (*phys == SWIOTLB_MAP_ERROR)
|
||||
return DIRECT_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
/* Ensure that the address returned is DMA'ble */
|
||||
dma_addr = __phys_to_dma(dev, *phys);
|
||||
if (unlikely(!dma_capable(dev, dma_addr, size))) {
|
||||
swiotlb_tbl_unmap_single(dev, *phys, size, dir,
|
||||
attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
||||
return DIRECT_MAPPING_ERROR;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
return dma_addr;
|
||||
|
@ -680,7 +680,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
|
|||
|
||||
if (!dev_is_dma_coherent(dev) &&
|
||||
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0 &&
|
||||
dev_addr != DIRECT_MAPPING_ERROR)
|
||||
dev_addr != DMA_MAPPING_ERROR)
|
||||
arch_sync_dma_for_device(dev, phys, size, dir);
|
||||
|
||||
return dev_addr;
|
||||
|
@ -789,7 +789,7 @@ swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems,
|
|||
for_each_sg(sgl, sg, nelems, i) {
|
||||
sg->dma_address = swiotlb_map_page(dev, sg_page(sg), sg->offset,
|
||||
sg->length, dir, attrs);
|
||||
if (sg->dma_address == DIRECT_MAPPING_ERROR)
|
||||
if (sg->dma_address == DMA_MAPPING_ERROR)
|
||||
goto out_error;
|
||||
sg_dma_len(sg) = sg->length;
|
||||
}
|
||||
|
@ -869,7 +869,6 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
|
|||
}
|
||||
|
||||
const struct dma_map_ops swiotlb_dma_ops = {
|
||||
.mapping_error = dma_direct_mapping_error,
|
||||
.alloc = dma_direct_alloc,
|
||||
.free = dma_direct_free,
|
||||
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
||||
|
|
Loading…
Reference in New Issue