2014-11-21 19:06:39 +08:00
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2013-10-10 21:40:44 +08:00
|
|
|
#include <linux/bootmem.h>
|
|
|
|
#include <linux/gfp.h>
|
2014-11-21 19:06:39 +08:00
|
|
|
#include <linux/highmem.h>
|
2013-10-10 21:40:44 +08:00
|
|
|
#include <linux/export.h>
|
2015-04-24 17:16:40 +08:00
|
|
|
#include <linux/memblock.h>
|
2014-11-21 19:06:39 +08:00
|
|
|
#include <linux/of_address.h>
|
2013-10-10 21:40:44 +08:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/swiotlb.h>
|
|
|
|
|
|
|
|
#include <xen/xen.h>
|
2014-11-21 19:08:39 +08:00
|
|
|
#include <xen/interface/grant_table.h>
|
2013-10-10 21:40:44 +08:00
|
|
|
#include <xen/interface/memory.h>
|
2015-06-17 22:28:02 +08:00
|
|
|
#include <xen/page.h>
|
2013-10-10 21:40:44 +08:00
|
|
|
#include <xen/swiotlb-xen.h>
|
|
|
|
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/xen/hypercall.h>
|
|
|
|
#include <asm/xen/interface.h>
|
|
|
|
|
2015-04-24 17:16:40 +08:00
|
|
|
unsigned long xen_get_swiotlb_free_pages(unsigned int order)
|
|
|
|
{
|
|
|
|
struct memblock_region *reg;
|
|
|
|
gfp_t flags = __GFP_NOWARN;
|
|
|
|
|
|
|
|
for_each_memblock(memory, reg) {
|
|
|
|
if (reg->base < (phys_addr_t)0xffffffff) {
|
|
|
|
flags |= __GFP_DMA;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return __get_free_pages(flags, order);
|
|
|
|
}
|
|
|
|
|
2014-11-21 19:06:39 +08:00
|
|
|
enum dma_cache_op {
|
|
|
|
DMA_UNMAP,
|
|
|
|
DMA_MAP,
|
|
|
|
};
|
2014-11-21 19:08:39 +08:00
|
|
|
static bool hypercall_cflush = false;
|
2014-11-21 19:06:39 +08:00
|
|
|
|
|
|
|
/* functions called by SWIOTLB */
|
|
|
|
|
|
|
|
static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
|
|
|
|
size_t size, enum dma_data_direction dir, enum dma_cache_op op)
|
|
|
|
{
|
2014-11-21 19:08:39 +08:00
|
|
|
struct gnttab_cache_flush cflush;
|
2014-11-21 19:06:39 +08:00
|
|
|
unsigned long pfn;
|
|
|
|
size_t left = size;
|
|
|
|
|
|
|
|
pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
|
|
|
|
offset %= PAGE_SIZE;
|
|
|
|
|
|
|
|
do {
|
|
|
|
size_t len = left;
|
|
|
|
|
2014-11-21 19:08:39 +08:00
|
|
|
/* buffers in highmem or foreign pages cannot cross page
|
|
|
|
* boundaries */
|
|
|
|
if (len + offset > PAGE_SIZE)
|
|
|
|
len = PAGE_SIZE - offset;
|
|
|
|
|
|
|
|
cflush.op = 0;
|
|
|
|
cflush.a.dev_bus_addr = pfn << PAGE_SHIFT;
|
|
|
|
cflush.offset = offset;
|
|
|
|
cflush.length = len;
|
|
|
|
|
|
|
|
if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
|
|
|
|
cflush.op = GNTTAB_CACHE_INVAL;
|
|
|
|
if (op == DMA_MAP) {
|
|
|
|
if (dir == DMA_FROM_DEVICE)
|
|
|
|
cflush.op = GNTTAB_CACHE_INVAL;
|
|
|
|
else
|
|
|
|
cflush.op = GNTTAB_CACHE_CLEAN;
|
|
|
|
}
|
|
|
|
if (cflush.op)
|
|
|
|
HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
|
2014-11-21 19:06:39 +08:00
|
|
|
|
|
|
|
offset = 0;
|
|
|
|
pfn++;
|
|
|
|
left -= len;
|
|
|
|
} while (left);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
|
|
|
|
size_t size, enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
|
|
|
|
size_t size, enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __xen_dma_map_page(struct device *hwdev, struct page *page,
|
|
|
|
dma_addr_t dev_addr, unsigned long offset, size_t size,
|
|
|
|
enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
|
|
{
|
|
|
|
if (is_device_dma_coherent(hwdev))
|
|
|
|
return;
|
|
|
|
if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
|
return;
|
|
|
|
|
|
|
|
__xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
|
|
|
size_t size, enum dma_data_direction dir,
|
|
|
|
struct dma_attrs *attrs)
|
|
|
|
|
|
|
|
{
|
|
|
|
if (is_device_dma_coherent(hwdev))
|
|
|
|
return;
|
|
|
|
if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
|
return;
|
|
|
|
|
|
|
|
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __xen_dma_sync_single_for_cpu(struct device *hwdev,
|
|
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
if (is_device_dma_coherent(hwdev))
|
|
|
|
return;
|
|
|
|
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __xen_dma_sync_single_for_device(struct device *hwdev,
|
|
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
if (is_device_dma_coherent(hwdev))
|
|
|
|
return;
|
|
|
|
__xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
|
|
|
|
}
|
|
|
|
|
2014-11-21 19:07:39 +08:00
|
|
|
bool xen_arch_need_swiotlb(struct device *dev,
|
|
|
|
unsigned long pfn,
|
xen: Make clear that swiotlb and biomerge are dealing with DMA address
The swiotlb is required when programming a DMA address on ARM when a
device is not protected by an IOMMU.
In this case, the DMA address should always be equal to the machine address.
For DOM0 memory, Xen ensure it by have an identity mapping between the
guest address and host address. However, when mapping a foreign grant
reference, the 1:1 model doesn't work.
For ARM guest, most of the callers of pfn_to_mfn expects to get a GFN
(Guest Frame Number), i.e a PFN (Page Frame Number) from the Linux point
of view given that all ARM guest are auto-translated.
Even though the name pfn_to_mfn is misleading, we need to ensure that
those caller get a GFN and not by mistake a MFN. In pratical, I haven't
seen error related to this but we should fix it for the sake of
correctness.
In order to fix the implementation of pfn_to_mfn on ARM in a follow-up
patch, we have to introduce new helpers to return the DMA from a PFN and
the invert.
On x86, the new helpers will be an alias of pfn_to_mfn and mfn_to_pfn.
The helpers will be used in swiotlb and xen_biovec_phys_mergeable.
This is necessary in the latter because we have to ensure that the
biovec code will not try to merge a biovec using foreign page and
another using Linux memory.
Lastly, the helper mfn_to_local_pfn has been renamed to bfn_to_local_pfn
given that the only usage was in swiotlb.
Signed-off-by: Julien Grall <julien.grall@citrix.com>
Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
2015-08-08 00:34:35 +08:00
|
|
|
unsigned long bfn)
|
2014-11-21 19:07:39 +08:00
|
|
|
{
|
xen: Make clear that swiotlb and biomerge are dealing with DMA address
The swiotlb is required when programming a DMA address on ARM when a
device is not protected by an IOMMU.
In this case, the DMA address should always be equal to the machine address.
For DOM0 memory, Xen ensure it by have an identity mapping between the
guest address and host address. However, when mapping a foreign grant
reference, the 1:1 model doesn't work.
For ARM guest, most of the callers of pfn_to_mfn expects to get a GFN
(Guest Frame Number), i.e a PFN (Page Frame Number) from the Linux point
of view given that all ARM guest are auto-translated.
Even though the name pfn_to_mfn is misleading, we need to ensure that
those caller get a GFN and not by mistake a MFN. In pratical, I haven't
seen error related to this but we should fix it for the sake of
correctness.
In order to fix the implementation of pfn_to_mfn on ARM in a follow-up
patch, we have to introduce new helpers to return the DMA from a PFN and
the invert.
On x86, the new helpers will be an alias of pfn_to_mfn and mfn_to_pfn.
The helpers will be used in swiotlb and xen_biovec_phys_mergeable.
This is necessary in the latter because we have to ensure that the
biovec code will not try to merge a biovec using foreign page and
another using Linux memory.
Lastly, the helper mfn_to_local_pfn has been renamed to bfn_to_local_pfn
given that the only usage was in swiotlb.
Signed-off-by: Julien Grall <julien.grall@citrix.com>
Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
2015-08-08 00:34:35 +08:00
|
|
|
return (!hypercall_cflush && (pfn != bfn) && !is_device_dma_coherent(dev));
|
2014-11-21 19:07:39 +08:00
|
|
|
}
|
|
|
|
|
2013-10-10 21:41:10 +08:00
|
|
|
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
|
2013-10-10 21:40:44 +08:00
|
|
|
unsigned int address_bits,
|
|
|
|
dma_addr_t *dma_handle)
|
|
|
|
{
|
|
|
|
if (!xen_initial_domain())
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* we assume that dom0 is mapped 1:1 for now */
|
2013-10-10 21:41:10 +08:00
|
|
|
*dma_handle = pstart;
|
2013-10-10 21:40:44 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
|
|
|
|
|
2013-10-10 21:41:10 +08:00
|
|
|
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
|
2013-10-10 21:40:44 +08:00
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
|
|
|
|
|
|
|
|
struct dma_map_ops *xen_dma_ops;
|
2014-12-22 04:30:58 +08:00
|
|
|
EXPORT_SYMBOL(xen_dma_ops);
|
2013-10-10 21:40:44 +08:00
|
|
|
|
|
|
|
static struct dma_map_ops xen_swiotlb_dma_ops = {
|
|
|
|
.mapping_error = xen_swiotlb_dma_mapping_error,
|
|
|
|
.alloc = xen_swiotlb_alloc_coherent,
|
|
|
|
.free = xen_swiotlb_free_coherent,
|
|
|
|
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
|
|
|
|
.sync_single_for_device = xen_swiotlb_sync_single_for_device,
|
|
|
|
.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
|
|
|
|
.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
|
|
|
|
.map_sg = xen_swiotlb_map_sg_attrs,
|
|
|
|
.unmap_sg = xen_swiotlb_unmap_sg_attrs,
|
|
|
|
.map_page = xen_swiotlb_map_page,
|
|
|
|
.unmap_page = xen_swiotlb_unmap_page,
|
|
|
|
.dma_supported = xen_swiotlb_dma_supported,
|
2013-10-10 00:56:33 +08:00
|
|
|
.set_dma_mask = xen_swiotlb_set_dma_mask,
|
2013-10-10 21:40:44 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
int __init xen_mm_init(void)
|
|
|
|
{
|
2014-11-21 19:08:39 +08:00
|
|
|
struct gnttab_cache_flush cflush;
|
2013-10-10 21:40:44 +08:00
|
|
|
if (!xen_initial_domain())
|
|
|
|
return 0;
|
|
|
|
xen_swiotlb_init(1, false);
|
|
|
|
xen_dma_ops = &xen_swiotlb_dma_ops;
|
2014-11-21 19:08:39 +08:00
|
|
|
|
|
|
|
cflush.op = 0;
|
|
|
|
cflush.a.dev_bus_addr = 0;
|
|
|
|
cflush.offset = 0;
|
|
|
|
cflush.length = 0;
|
|
|
|
if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
|
|
|
|
hypercall_cflush = true;
|
2013-10-10 21:40:44 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
arch_initcall(xen_mm_init);
|