dma-direct: rename and cleanup __phys_to_dma
commit 5ceda74093
upstream.
The __phys_to_dma vs phys_to_dma distinction isn't exactly obvious. Try
to improve the situation by renaming __phys_to_dma to
phys_to_dma_unencryped, and not forcing architectures that want to
override phys_to_dma to actually provide __phys_to_dma.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Chen Zhuo <sagazchen@tencent.com>
Signed-off-by: Xinghui Li <korantli@tencent.com>
This commit is contained in:
parent
6ab4586e14
commit
b0d4520b4f
|
@ -2,7 +2,7 @@
|
|||
#ifndef ASM_ARM_DMA_DIRECT_H
|
||||
#define ASM_ARM_DMA_DIRECT_H 1
|
||||
|
||||
static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
unsigned int offset = paddr & ~PAGE_MASK;
|
||||
return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
|
||||
|
|
|
@ -3784,7 +3784,7 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
|
|||
*/
|
||||
if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
|
||||
tlb_addr = swiotlb_tbl_map_single(dev,
|
||||
__phys_to_dma(dev, io_tlb_start),
|
||||
phys_to_dma_unencrypted(dev, io_tlb_start),
|
||||
paddr, size, aligned_size, dir, attrs);
|
||||
if (tlb_addr == DMA_MAPPING_ERROR) {
|
||||
goto swiotlb_error;
|
||||
|
|
|
@ -10,14 +10,29 @@ static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
|
|||
|
||||
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
|
||||
#include <asm/dma-direct.h>
|
||||
#ifndef phys_to_dma_unencrypted
|
||||
#define phys_to_dma_unencrypted phys_to_dma
|
||||
#endif
|
||||
#else
|
||||
static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
|
||||
phys_addr_t paddr)
|
||||
{
|
||||
dma_addr_t dev_addr = (dma_addr_t)paddr;
|
||||
|
||||
return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* If memory encryption is supported, phys_to_dma will set the memory encryption
|
||||
* bit in the DMA address, and dma_to_phys will clear it.
|
||||
* phys_to_dma_unencrypted is for use on special unencrypted memory like swiotlb
|
||||
* buffers.
|
||||
*/
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return __sme_set(phys_to_dma_unencrypted(dev, paddr));
|
||||
}
|
||||
|
||||
static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
||||
{
|
||||
phys_addr_t paddr = (phys_addr_t)dev_addr;
|
||||
|
@ -49,17 +64,6 @@ static inline bool force_dma_unencrypted(struct device *dev)
|
|||
}
|
||||
#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
|
||||
|
||||
/*
|
||||
* If memory encryption is supported, phys_to_dma will set the memory encryption
|
||||
* bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
|
||||
* and __dma_to_phys versions should only be used on non-encrypted memory for
|
||||
* special occasions like DMA coherent buffers.
|
||||
*/
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return __sme_set(__phys_to_dma(dev, paddr));
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return __sme_clr(__dma_to_phys(dev, daddr));
|
||||
|
|
|
@ -39,7 +39,7 @@ static inline dma_addr_t phys_to_dma_direct(struct device *dev,
|
|||
phys_addr_t phys)
|
||||
{
|
||||
if (force_dma_unencrypted(dev))
|
||||
return __phys_to_dma(dev, phys);
|
||||
return phys_to_dma_unencrypted(dev, phys);
|
||||
return phys_to_dma(dev, phys);
|
||||
}
|
||||
|
||||
|
@ -161,7 +161,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
|||
ret = page_address(page);
|
||||
if (force_dma_unencrypted(dev)) {
|
||||
set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
|
||||
*dma_handle = __phys_to_dma(dev, page_to_phys(page));
|
||||
*dma_handle = phys_to_dma_unencrypted(dev, page_to_phys(page));
|
||||
} else {
|
||||
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
||||
}
|
||||
|
@ -403,11 +403,11 @@ int dma_direct_supported(struct device *dev, u64 mask)
|
|||
min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
|
||||
|
||||
/*
|
||||
* This check needs to be against the actual bit mask value, so
|
||||
* use __phys_to_dma() here so that the SME encryption mask isn't
|
||||
* This check needs to be against the actual bit mask value, so use
|
||||
* phys_to_dma_unencrypted() here so that the SME encryption mask isn't
|
||||
* part of the check.
|
||||
*/
|
||||
return mask >= __phys_to_dma(dev, min_mask);
|
||||
return mask >= phys_to_dma_unencrypted(dev, min_mask);
|
||||
}
|
||||
|
||||
size_t dma_direct_max_mapping_size(struct device *dev)
|
||||
|
|
|
@ -675,13 +675,13 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
|
|||
}
|
||||
|
||||
/* Oh well, have to allocate and map a bounce buffer. */
|
||||
*phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
|
||||
*phys = swiotlb_tbl_map_single(dev, phys_to_dma_unencrypted(dev, io_tlb_start),
|
||||
*phys, size, size, dir, attrs);
|
||||
if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
|
||||
return false;
|
||||
|
||||
/* Ensure that the address returned is DMA'ble */
|
||||
*dma_addr = __phys_to_dma(dev, *phys);
|
||||
*dma_addr = phys_to_dma_unencrypted(dev, *phys);
|
||||
if (unlikely(!dma_capable(dev, *dma_addr, size))) {
|
||||
swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
|
||||
attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
||||
|
|
Loading…
Reference in New Issue