dma-direct: remove __dma_direct_free_pages
We can just call dma_free_contiguous directly instead of wrapping it. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Max Filippov <jcmvbkbc@gmail.com>
This commit is contained in:
parent
b3d53f5fce
commit
acaade1af3
|
@ -68,6 +68,5 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
|||
dma_addr_t dma_addr, unsigned long attrs);
|
||||
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
|
||||
void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page);
|
||||
int dma_direct_supported(struct device *dev, u64 mask);
|
||||
#endif /* _LINUX_DMA_DIRECT_H */
|
||||
|
|
|
@ -153,7 +153,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
|||
* so log an error and fail.
|
||||
*/
|
||||
dev_info(dev, "Rejecting highmem page from CMA.\n");
|
||||
__dma_direct_free_pages(dev, size, page);
|
||||
dma_free_contiguous(dev, page, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -175,11 +175,6 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
|||
return ret;
|
||||
}
|
||||
|
||||
void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
|
||||
{
|
||||
dma_free_contiguous(dev, page, size);
|
||||
}
|
||||
|
||||
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
|
@ -188,7 +183,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
|||
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
|
||||
!force_dma_unencrypted(dev)) {
|
||||
/* cpu_addr is a struct page cookie, not a kernel address */
|
||||
__dma_direct_free_pages(dev, size, cpu_addr);
|
||||
dma_free_contiguous(dev, cpu_addr, size);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -198,7 +193,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
|||
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
|
||||
dma_alloc_need_uncached(dev, attrs))
|
||||
cpu_addr = cached_kernel_address(cpu_addr);
|
||||
__dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
|
||||
dma_free_contiguous(dev, virt_to_page(cpu_addr), size);
|
||||
}
|
||||
|
||||
void *dma_direct_alloc(struct device *dev, size_t size,
|
||||
|
|
|
@ -238,7 +238,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||
dma_pgprot(dev, PAGE_KERNEL, attrs),
|
||||
__builtin_return_address(0));
|
||||
if (!ret) {
|
||||
__dma_direct_free_pages(dev, size, page);
|
||||
dma_free_contiguous(dev, page, size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -256,7 +256,7 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
|||
struct page *page = pfn_to_page(__phys_to_pfn(phys));
|
||||
|
||||
vunmap(vaddr);
|
||||
__dma_direct_free_pages(dev, size, page);
|
||||
dma_free_contiguous(dev, page, size);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue