OpenCloudOS-Kernel/include/linux/swiotlb.h

125 lines
3.6 KiB
C
Raw Normal View History

#ifndef __LINUX_SWIOTLB_H
#define __LINUX_SWIOTLB_H
#include <linux/dma-direction.h>
#include <linux/init.h>
#include <linux/types.h>
struct device;
struct dma_attrs;
struct page;
struct scatterlist;
extern int swiotlb_force;
/*
* Maximum allowable number of contiguous slabs to map,
* must be a power of 2. What is the appropriate value ?
* The complexity of {map,unmap}_single is linearly dependent on this value.
*/
#define IO_TLB_SEGSIZE 128
/*
* log of the size of each IO TLB slab. The number of slabs is command line
* controllable.
*/
#define IO_TLB_SHIFT 11
extern void swiotlb_init(int verbose);
x86: Don't panic if can not alloc buffer for swiotlb Normal boot path on system with iommu support: swiotlb buffer will be allocated early at first and then try to initialize iommu, if iommu for intel or AMD could setup properly, swiotlb buffer will be freed. The early allocating is with bootmem, and could panic when we try to use kdump with buffer above 4G only, or with memmap to limit mem under 4G. for example: memmap=4095M$1M to remove memory under 4G. According to Eric, add _nopanic version and no_iotlb_memory to fail map single later if swiotlb is still needed. -v2: don't pass nopanic, and use -ENOMEM return value according to Eric. panic early instead of using swiotlb_full to panic...according to Eric/Konrad. -v3: make swiotlb_init to be notpanic, but will affect: arm64, ia64, powerpc, tile, unicore32, x86. -v4: cleanup swiotlb_init by removing swiotlb_init_with_default_size. Suggested-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1359058816-7615-36-git-send-email-yinghai@kernel.org Reviewed-and-tested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Kyungmin Park <kyungmin.park@samsung.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Andrzej Pietrasiewicz <andrzej.p@samsung.com> Cc: linux-mips@linux-mips.org Cc: xen-devel@lists.xensource.com Cc: virtualization@lists.linux-foundation.org Cc: Shuah Khan <shuahkhan@gmail.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-01-25 04:20:16 +08:00
int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
extern unsigned long swiotlb_nr_tbl(void);
unsigned long swiotlb_size_or_default(void);
extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
/*
* Enumeration for sync targets
*/
enum dma_sync_target {
SYNC_FOR_CPU = 0,
SYNC_FOR_DEVICE = 1,
};
/* define the last possible byte of physical address space as a mapping error */
#define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0)
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr,
phys_addr_t phys, size_t size,
enum dma_data_direction dir);
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir);
extern void swiotlb_tbl_sync_single(struct device *hwdev,
phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target);
/* Accessory functions. */
extern void
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags);
extern void
swiotlb_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle);
extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs);
extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs);
extern int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
enum dma_data_direction dir);
extern void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
enum dma_data_direction dir);
extern int
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
enum dma_data_direction dir, struct dma_attrs *attrs);
extern void
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs);
extern void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir);
extern void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir);
extern void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir);
extern void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir);
extern int
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
extern int
swiotlb_dma_supported(struct device *hwdev, u64 mask);
#ifdef CONFIG_SWIOTLB
extern void __init swiotlb_free(void);
#else
static inline void swiotlb_free(void) { }
#endif
extern void swiotlb_print_info(void);
x86: enable DMA CMA with swiotlb The DMA Contiguous Memory Allocator support on x86 is disabled when swiotlb config option is enabled. So DMA CMA is always disabled on x86_64 because swiotlb is always enabled. This attempts to support for DMA CMA with enabling swiotlb config option. The contiguous memory allocator on x86 is integrated in the function dma_generic_alloc_coherent() which is .alloc callback in nommu_dma_ops for dma_alloc_coherent(). x86_swiotlb_alloc_coherent() which is .alloc callback in swiotlb_dma_ops tries to allocate with dma_generic_alloc_coherent() firstly and then swiotlb_alloc_coherent() is called as a fallback. The main part of supporting DMA CMA with swiotlb is that changing x86_swiotlb_free_coherent() which is .free callback in swiotlb_dma_ops for dma_free_coherent() so that it can distinguish memory allocated by dma_generic_alloc_coherent() from one allocated by swiotlb_alloc_coherent() and release it with dma_generic_free_coherent() which can handle contiguous memory. This change requires making is_swiotlb_buffer() global function. This also needs to change .free callback in the dma_map_ops for amd_gart and sta2x11, because these dma_ops are also using dma_generic_alloc_coherent(). Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Acked-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Don Dutile <ddutile@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-05 07:06:50 +08:00
extern int is_swiotlb_buffer(phys_addr_t paddr);
#endif /* __LINUX_SWIOTLB_H */