swiotlb-xen: add struct device * parameter to xen_phys_to_bus

No functional changes. The parameter is unused in this patch but will be
used by next patches.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Tested-by: Corey Minyard <cminyard@mvista.com>
Tested-by: Roman Shaposhnik <roman@zededa.com>
Link: https://lore.kernel.org/r/20200710223427.6897-3-sstabellini@kernel.org
Signed-off-by: Juergen Gross <jgross@suse.com>
This commit is contained in:
Stefano Stabellini 2020-07-10 15:34:19 -07:00 committed by Juergen Gross
parent ae4f0a17ee
commit 2cf6a91347
1 changed files with 7 additions and 7 deletions

View File

@ -57,7 +57,7 @@ static unsigned long xen_io_tlb_nslabs;
* can be 32bit when dma_addr_t is 64bit leading to a loss in
* information if the shift is done before casting to 64bit.
*/
static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
static inline dma_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
{
unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
@ -78,9 +78,9 @@ static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
return paddr;
}
static inline dma_addr_t xen_virt_to_bus(void *address)
static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address)
{
return xen_phys_to_bus(virt_to_phys(address));
return xen_phys_to_bus(dev, virt_to_phys(address));
}
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
@ -309,7 +309,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
* Do not use virt_to_phys(ret) because on ARM it doesn't correspond
* to *dma_handle. */
phys = *dma_handle;
dev_addr = xen_phys_to_bus(phys);
dev_addr = xen_phys_to_bus(hwdev, phys);
if (((dev_addr + size - 1 <= dma_mask)) &&
!range_straddles_page_boundary(phys, size))
*dma_handle = dev_addr;
@ -370,7 +370,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
phys_addr_t map, phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = xen_phys_to_bus(phys);
dma_addr_t dev_addr = xen_phys_to_bus(dev, phys);
BUG_ON(dir == DMA_NONE);
/*
@ -395,7 +395,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
return DMA_MAPPING_ERROR;
phys = map;
dev_addr = xen_phys_to_bus(map);
dev_addr = xen_phys_to_bus(dev, map);
/*
* Ensure that the address returned is DMA'ble
@ -539,7 +539,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
static int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
return xen_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask;
}
const struct dma_map_ops xen_swiotlb_dma_ops = {