Merge branch 'for-next-arm-dma' into for-linus
Conflicts: arch/arm/Kconfig arch/arm/mm/dma-mapping.c Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
This commit is contained in:
commit
0f51596bd3
|
@ -4,6 +4,7 @@ config ARM
|
|||
select HAVE_AOUT
|
||||
select HAVE_DMA_API_DEBUG
|
||||
select HAVE_IDE if PCI || ISA || PCMCIA
|
||||
select HAVE_DMA_ATTRS
|
||||
select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
|
||||
select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
|
||||
select HAVE_MEMBLOCK
|
||||
|
@ -47,6 +48,14 @@ config ARM
|
|||
config ARM_HAS_SG_CHAIN
|
||||
bool
|
||||
|
||||
config NEED_SG_DMA_LENGTH
|
||||
bool
|
||||
|
||||
config ARM_DMA_USE_IOMMU
|
||||
select NEED_SG_DMA_LENGTH
|
||||
select ARM_HAS_SG_CHAIN
|
||||
bool
|
||||
|
||||
config HAVE_PWM
|
||||
bool
|
||||
|
||||
|
|
|
@ -173,7 +173,8 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_
|
|||
read_lock_irqsave(&device_info->lock, flags);
|
||||
|
||||
list_for_each_entry(b, &device_info->safe_buffers, node)
|
||||
if (b->safe_dma_addr == safe_dma_addr) {
|
||||
if (b->safe_dma_addr <= safe_dma_addr &&
|
||||
b->safe_dma_addr + b->size > safe_dma_addr) {
|
||||
rb = b;
|
||||
break;
|
||||
}
|
||||
|
@ -254,7 +255,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
|
|||
if (buf == NULL) {
|
||||
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
|
||||
__func__, ptr);
|
||||
return ~0;
|
||||
return DMA_ERROR_CODE;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
||||
|
@ -307,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
|
|||
* substitute the safe buffer for the unsafe one.
|
||||
* (basically move the buffer from an unsafe area to a safe one)
|
||||
*/
|
||||
dma_addr_t __dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
dma_addr_t dma_addr;
|
||||
int ret;
|
||||
|
@ -320,21 +322,20 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
|
|||
|
||||
ret = needs_bounce(dev, dma_addr, size);
|
||||
if (ret < 0)
|
||||
return ~0;
|
||||
return DMA_ERROR_CODE;
|
||||
|
||||
if (ret == 0) {
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
|
||||
return dma_addr;
|
||||
}
|
||||
|
||||
if (PageHighMem(page)) {
|
||||
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
|
||||
return ~0;
|
||||
return DMA_ERROR_CODE;
|
||||
}
|
||||
|
||||
return map_single(dev, page_address(page) + offset, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(__dma_map_page);
|
||||
|
||||
/*
|
||||
* see if a mapped address was really a "safe" buffer and if so, copy
|
||||
|
@ -342,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page);
|
|||
* the safe buffer. (basically return things back to the way they
|
||||
* should be)
|
||||
*/
|
||||
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
enum dma_data_direction dir, struct dma_attrs *attrs)
|
||||
{
|
||||
struct safe_buffer *buf;
|
||||
|
||||
|
@ -352,19 +353,18 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
|||
|
||||
buf = find_safe_buffer_dev(dev, dma_addr, __func__);
|
||||
if (!buf) {
|
||||
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
|
||||
dma_addr & ~PAGE_MASK, size, dir);
|
||||
arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
|
||||
return;
|
||||
}
|
||||
|
||||
unmap_single(dev, buf, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(__dma_unmap_page);
|
||||
|
||||
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
unsigned long off, size_t sz, enum dma_data_direction dir)
|
||||
static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
size_t sz, enum dma_data_direction dir)
|
||||
{
|
||||
struct safe_buffer *buf;
|
||||
unsigned long off;
|
||||
|
||||
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
|
||||
__func__, addr, off, sz, dir);
|
||||
|
@ -373,6 +373,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
|
|||
if (!buf)
|
||||
return 1;
|
||||
|
||||
off = addr - buf->safe_dma_addr;
|
||||
|
||||
BUG_ON(buf->direction != dir);
|
||||
|
||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
||||
|
@ -388,12 +390,21 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dmabounce_sync_for_cpu);
|
||||
|
||||
int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
|
||||
unsigned long off, size_t sz, enum dma_data_direction dir)
|
||||
static void dmabounce_sync_for_cpu(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
|
||||
return;
|
||||
|
||||
arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
|
||||
}
|
||||
|
||||
static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
|
||||
size_t sz, enum dma_data_direction dir)
|
||||
{
|
||||
struct safe_buffer *buf;
|
||||
unsigned long off;
|
||||
|
||||
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
|
||||
__func__, addr, off, sz, dir);
|
||||
|
@ -402,6 +413,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
|
|||
if (!buf)
|
||||
return 1;
|
||||
|
||||
off = addr - buf->safe_dma_addr;
|
||||
|
||||
BUG_ON(buf->direction != dir);
|
||||
|
||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
||||
|
@ -417,7 +430,38 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dmabounce_sync_for_device);
|
||||
|
||||
static void dmabounce_sync_for_device(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
if (!__dmabounce_sync_for_device(dev, handle, size, dir))
|
||||
return;
|
||||
|
||||
arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
|
||||
}
|
||||
|
||||
static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
|
||||
{
|
||||
if (dev->archdata.dmabounce)
|
||||
return 0;
|
||||
|
||||
return arm_dma_ops.set_dma_mask(dev, dma_mask);
|
||||
}
|
||||
|
||||
static struct dma_map_ops dmabounce_ops = {
|
||||
.alloc = arm_dma_alloc,
|
||||
.free = arm_dma_free,
|
||||
.mmap = arm_dma_mmap,
|
||||
.map_page = dmabounce_map_page,
|
||||
.unmap_page = dmabounce_unmap_page,
|
||||
.sync_single_for_cpu = dmabounce_sync_for_cpu,
|
||||
.sync_single_for_device = dmabounce_sync_for_device,
|
||||
.map_sg = arm_dma_map_sg,
|
||||
.unmap_sg = arm_dma_unmap_sg,
|
||||
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = arm_dma_sync_sg_for_device,
|
||||
.set_dma_mask = dmabounce_set_mask,
|
||||
};
|
||||
|
||||
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
|
||||
const char *name, unsigned long size)
|
||||
|
@ -479,6 +523,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
|
|||
#endif
|
||||
|
||||
dev->archdata.dmabounce = device_info;
|
||||
set_dma_ops(dev, &dmabounce_ops);
|
||||
|
||||
dev_info(dev, "dmabounce: registered device\n");
|
||||
|
||||
|
@ -497,6 +542,7 @@ void dmabounce_unregister_dev(struct device *dev)
|
|||
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
|
||||
|
||||
dev->archdata.dmabounce = NULL;
|
||||
set_dma_ops(dev, NULL);
|
||||
|
||||
if (!device_info) {
|
||||
dev_warn(dev,
|
||||
|
|
|
@ -7,12 +7,16 @@
|
|||
#define ASMARM_DEVICE_H
|
||||
|
||||
struct dev_archdata {
|
||||
struct dma_map_ops *dma_ops;
|
||||
#ifdef CONFIG_DMABOUNCE
|
||||
struct dmabounce_device_info *dmabounce;
|
||||
#endif
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
void *iommu; /* private IOMMU data */
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_DMA_USE_IOMMU
|
||||
struct dma_iommu_mapping *mapping;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct omap_device;
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
#ifndef ASMARM_DMA_IOMMU_H
|
||||
#define ASMARM_DMA_IOMMU_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-debug.h>
|
||||
#include <linux/kmemcheck.h>
|
||||
|
||||
struct dma_iommu_mapping {
|
||||
/* iommu specific data */
|
||||
struct iommu_domain *domain;
|
||||
|
||||
void *bitmap;
|
||||
size_t bits;
|
||||
unsigned int order;
|
||||
dma_addr_t base;
|
||||
|
||||
spinlock_t lock;
|
||||
struct kref kref;
|
||||
};
|
||||
|
||||
struct dma_iommu_mapping *
|
||||
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
|
||||
int order);
|
||||
|
||||
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
|
||||
|
||||
int arm_iommu_attach_device(struct device *dev,
|
||||
struct dma_iommu_mapping *mapping);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif
|
|
@ -5,11 +5,35 @@
|
|||
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-attrs.h>
|
||||
#include <linux/dma-debug.h>
|
||||
|
||||
#include <asm-generic/dma-coherent.h>
|
||||
#include <asm/memory.h>
|
||||
|
||||
#define DMA_ERROR_CODE (~0)
|
||||
extern struct dma_map_ops arm_dma_ops;
|
||||
|
||||
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
{
|
||||
if (dev && dev->archdata.dma_ops)
|
||||
return dev->archdata.dma_ops;
|
||||
return &arm_dma_ops;
|
||||
}
|
||||
|
||||
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
|
||||
{
|
||||
BUG_ON(!dev);
|
||||
dev->archdata.dma_ops = ops;
|
||||
}
|
||||
|
||||
#include <asm-generic/dma-mapping-common.h>
|
||||
|
||||
static inline int dma_set_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
return get_dma_ops(dev)->set_dma_mask(dev, mask);
|
||||
}
|
||||
|
||||
#ifdef __arch_page_to_dma
|
||||
#error Please update to __arch_pfn_to_dma
|
||||
#endif
|
||||
|
@ -61,69 +85,12 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The DMA API is built upon the notion of "buffer ownership". A buffer
|
||||
* is either exclusively owned by the CPU (and therefore may be accessed
|
||||
* by it) or exclusively owned by the DMA device. These helper functions
|
||||
* represent the transitions between these two ownership states.
|
||||
*
|
||||
* Note, however, that on later ARMs, this notion does not work due to
|
||||
* speculative prefetches. We model our approach on the assumption that
|
||||
* the CPU does do speculative prefetches, which means we clean caches
|
||||
* before transfers and delay cache invalidation until transfer completion.
|
||||
*
|
||||
* Private support functions: these are not part of the API and are
|
||||
* liable to change. Drivers must not use these.
|
||||
*/
|
||||
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
extern void ___dma_single_cpu_to_dev(const void *, size_t,
|
||||
enum dma_data_direction);
|
||||
|
||||
if (!arch_is_coherent())
|
||||
___dma_single_cpu_to_dev(kaddr, size, dir);
|
||||
}
|
||||
|
||||
static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
extern void ___dma_single_dev_to_cpu(const void *, size_t,
|
||||
enum dma_data_direction);
|
||||
|
||||
if (!arch_is_coherent())
|
||||
___dma_single_dev_to_cpu(kaddr, size, dir);
|
||||
}
|
||||
|
||||
static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
|
||||
size_t, enum dma_data_direction);
|
||||
|
||||
if (!arch_is_coherent())
|
||||
___dma_page_cpu_to_dev(page, off, size, dir);
|
||||
}
|
||||
|
||||
static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
|
||||
size_t, enum dma_data_direction);
|
||||
|
||||
if (!arch_is_coherent())
|
||||
___dma_page_dev_to_cpu(page, off, size, dir);
|
||||
}
|
||||
|
||||
extern int dma_supported(struct device *, u64);
|
||||
extern int dma_set_mask(struct device *, u64);
|
||||
|
||||
/*
|
||||
* DMA errors are defined by all-bits-set in the DMA address.
|
||||
*/
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == ~0;
|
||||
return dma_addr == DMA_ERROR_CODE;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -141,69 +108,118 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
|
|||
{
|
||||
}
|
||||
|
||||
extern int dma_supported(struct device *dev, u64 mask);
|
||||
|
||||
/**
|
||||
* dma_alloc_coherent - allocate consistent memory for DMA
|
||||
* arm_dma_alloc - allocate consistent memory for DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @size: required memory size
|
||||
* @handle: bus-specific DMA address
|
||||
* @attrs: optinal attributes that specific mapping properties
|
||||
*
|
||||
* Allocate some uncached, unbuffered memory for a device for
|
||||
* performing DMA. This function allocates pages, and will
|
||||
* return the CPU-viewed address, and sets @handle to be the
|
||||
* device-viewed address.
|
||||
* Allocate some memory for a device for performing DMA. This function
|
||||
* allocates pages, and will return the CPU-viewed address, and sets @handle
|
||||
* to be the device-viewed address.
|
||||
*/
|
||||
extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
|
||||
extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||
gfp_t gfp, struct dma_attrs *attrs);
|
||||
|
||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *cpu_addr;
|
||||
BUG_ON(!ops);
|
||||
|
||||
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_free_coherent - free memory allocated by dma_alloc_coherent
|
||||
* arm_dma_free - free memory allocated by arm_dma_alloc
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @size: size of memory originally requested in dma_alloc_coherent
|
||||
* @cpu_addr: CPU-view address returned from dma_alloc_coherent
|
||||
* @handle: device-view address returned from dma_alloc_coherent
|
||||
* @attrs: optinal attributes that specific mapping properties
|
||||
*
|
||||
* Free (and unmap) a DMA buffer previously allocated by
|
||||
* dma_alloc_coherent().
|
||||
* arm_dma_alloc().
|
||||
*
|
||||
* References to memory and mappings associated with cpu_addr/handle
|
||||
* during and after this call executing are illegal.
|
||||
*/
|
||||
extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
|
||||
extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, struct dma_attrs *attrs);
|
||||
|
||||
#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
BUG_ON(!ops);
|
||||
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_mmap_coherent - map a coherent DMA allocation into user space
|
||||
* arm_dma_mmap - map a coherent DMA allocation into user space
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @vma: vm_area_struct describing requested user mapping
|
||||
* @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
|
||||
* @handle: device-view address returned from dma_alloc_coherent
|
||||
* @size: size of memory originally requested in dma_alloc_coherent
|
||||
* @attrs: optinal attributes that specific mapping properties
|
||||
*
|
||||
* Map a coherent DMA buffer previously allocated by dma_alloc_coherent
|
||||
* into user space. The coherent DMA buffer must not be freed by the
|
||||
* driver until the user space mapping has been released.
|
||||
*/
|
||||
int dma_mmap_coherent(struct device *, struct vm_area_struct *,
|
||||
void *, dma_addr_t, size_t);
|
||||
extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
struct dma_attrs *attrs);
|
||||
|
||||
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
|
||||
|
||||
/**
|
||||
* dma_alloc_writecombine - allocate writecombining memory for DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @size: required memory size
|
||||
* @handle: bus-specific DMA address
|
||||
*
|
||||
* Allocate some uncached, buffered memory for a device for
|
||||
* performing DMA. This function allocates pages, and will
|
||||
* return the CPU-viewed address, and sets @handle to be the
|
||||
* device-viewed address.
|
||||
*/
|
||||
extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
|
||||
gfp_t);
|
||||
static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size, struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
BUG_ON(!ops);
|
||||
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||
}
|
||||
|
||||
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
|
||||
dma_free_coherent(dev,size,cpu_addr,handle)
|
||||
static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
DEFINE_DMA_ATTRS(attrs);
|
||||
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
|
||||
return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
|
||||
}
|
||||
|
||||
int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
|
||||
void *, dma_addr_t, size_t);
|
||||
static inline void dma_free_writecombine(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle)
|
||||
{
|
||||
DEFINE_DMA_ATTRS(attrs);
|
||||
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
|
||||
return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
|
||||
}
|
||||
|
||||
static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size)
|
||||
{
|
||||
DEFINE_DMA_ATTRS(attrs);
|
||||
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
|
||||
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
|
||||
}
|
||||
|
||||
/*
|
||||
* This can be called during boot to increase the size of the consistent
|
||||
|
@ -212,8 +228,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
|
|||
*/
|
||||
extern void __init init_consistent_dma_size(unsigned long size);
|
||||
|
||||
|
||||
#ifdef CONFIG_DMABOUNCE
|
||||
/*
|
||||
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
|
||||
* and utilize bounce buffers as needed to work around limited DMA windows.
|
||||
|
@ -253,222 +267,19 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
|
|||
*/
|
||||
extern void dmabounce_unregister_dev(struct device *);
|
||||
|
||||
/*
|
||||
* The DMA API, implemented by dmabounce.c. See below for descriptions.
|
||||
*/
|
||||
extern dma_addr_t __dma_map_page(struct device *, struct page *,
|
||||
unsigned long, size_t, enum dma_data_direction);
|
||||
extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
|
||||
enum dma_data_direction);
|
||||
|
||||
/*
|
||||
* Private functions
|
||||
*/
|
||||
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
|
||||
size_t, enum dma_data_direction);
|
||||
int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
|
||||
size_t, enum dma_data_direction);
|
||||
#else
|
||||
static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
||||
}
|
||||
|
||||
static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
|
||||
handle & ~PAGE_MASK, size, dir);
|
||||
}
|
||||
#endif /* CONFIG_DMABOUNCE */
|
||||
|
||||
/**
|
||||
* dma_map_single - map a single buffer for streaming DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @cpu_addr: CPU direct mapped address of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Ensure that any data held in the cache is appropriately discarded
|
||||
* or written back.
|
||||
*
|
||||
* The device owns this memory once this call has completed. The CPU
|
||||
* can regain ownership by calling dma_unmap_single() or
|
||||
* dma_sync_single_for_cpu().
|
||||
*/
|
||||
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
unsigned long offset;
|
||||
struct page *page;
|
||||
dma_addr_t addr;
|
||||
|
||||
BUG_ON(!virt_addr_valid(cpu_addr));
|
||||
BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
page = virt_to_page(cpu_addr);
|
||||
offset = (unsigned long)cpu_addr & ~PAGE_MASK;
|
||||
addr = __dma_map_page(dev, page, offset, size, dir);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr, true);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_map_page - map a portion of a page for streaming DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @page: page that buffer resides in
|
||||
* @offset: offset into page for start of buffer
|
||||
* @size: size of buffer to map
|
||||
* @dir: DMA transfer direction
|
||||
*
|
||||
* Ensure that any data held in the cache is appropriately discarded
|
||||
* or written back.
|
||||
*
|
||||
* The device owns this memory once this call has completed. The CPU
|
||||
* can regain ownership by calling dma_unmap_page().
|
||||
*/
|
||||
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_addr_t addr;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
addr = __dma_map_page(dev, page, offset, size, dir);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_unmap_single - unmap a single buffer previously mapped
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @handle: DMA address of buffer
|
||||
* @size: size of buffer (same as passed to dma_map_single)
|
||||
* @dir: DMA transfer direction (same as passed to dma_map_single)
|
||||
*
|
||||
* Unmap a single streaming mode DMA translation. The handle and size
|
||||
* must match what was provided in the previous dma_map_single() call.
|
||||
* All other usages are undefined.
|
||||
*
|
||||
* After this call, reads by the CPU to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*/
|
||||
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
debug_dma_unmap_page(dev, handle, size, dir, true);
|
||||
__dma_unmap_page(dev, handle, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @handle: DMA address of buffer
|
||||
* @size: size of buffer (same as passed to dma_map_page)
|
||||
* @dir: DMA transfer direction (same as passed to dma_map_page)
|
||||
*
|
||||
* Unmap a page streaming mode DMA translation. The handle and size
|
||||
* must match what was provided in the previous dma_map_page() call.
|
||||
* All other usages are undefined.
|
||||
*
|
||||
* After this call, reads by the CPU to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*/
|
||||
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
debug_dma_unmap_page(dev, handle, size, dir, false);
|
||||
__dma_unmap_page(dev, handle, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_sync_single_range_for_cpu
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
* @handle: DMA address of buffer
|
||||
* @offset: offset of region to start sync
|
||||
* @size: size of region to sync
|
||||
* @dir: DMA transfer direction (same as passed to dma_map_single)
|
||||
*
|
||||
* Make physical memory consistent for a single streaming mode DMA
|
||||
* translation after a transfer.
|
||||
*
|
||||
* If you perform a dma_map_single() but wish to interrogate the
|
||||
* buffer using the cpu, yet do not wish to teardown the PCI dma
|
||||
* mapping, you must call this function before doing so. At the
|
||||
* next point you give the PCI dma address back to the card, you
|
||||
* must first the perform a dma_sync_for_device, and then the
|
||||
* device again owns the buffer.
|
||||
*/
|
||||
static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t handle, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
|
||||
|
||||
if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
|
||||
return;
|
||||
|
||||
__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t handle, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
|
||||
|
||||
if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
|
||||
return;
|
||||
|
||||
__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_single_range_for_device(dev, handle, 0, size, dir);
|
||||
}
|
||||
|
||||
/*
|
||||
* The scatter list versions of the above methods.
|
||||
*/
|
||||
extern int dma_map_sg(struct device *, struct scatterlist *, int,
|
||||
extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction, struct dma_attrs *attrs);
|
||||
extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction, struct dma_attrs *attrs);
|
||||
extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
|
||||
extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -17,7 +17,7 @@ struct arm_vmregion {
|
|||
struct list_head vm_list;
|
||||
unsigned long vm_start;
|
||||
unsigned long vm_end;
|
||||
struct page *vm_pages;
|
||||
void *priv;
|
||||
int vm_active;
|
||||
const void *caller;
|
||||
};
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
struct dma_coherent_mem {
|
||||
void *virt_base;
|
||||
dma_addr_t device_base;
|
||||
phys_addr_t pfn_base;
|
||||
int size;
|
||||
int flags;
|
||||
unsigned long *bitmap;
|
||||
|
@ -44,6 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
|
|||
|
||||
dev->dma_mem->virt_base = mem_base;
|
||||
dev->dma_mem->device_base = device_addr;
|
||||
dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
|
||||
dev->dma_mem->size = pages;
|
||||
dev->dma_mem->flags = flags;
|
||||
|
||||
|
@ -176,3 +178,43 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
|
|||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_release_from_coherent);
|
||||
|
||||
/**
|
||||
* dma_mmap_from_coherent() - try to mmap the memory allocated from
|
||||
* per-device coherent memory pool to userspace
|
||||
* @dev: device from which the memory was allocated
|
||||
* @vma: vm_area for the userspace memory
|
||||
* @vaddr: cpu address returned by dma_alloc_from_coherent
|
||||
* @size: size of the memory buffer allocated by dma_alloc_from_coherent
|
||||
*
|
||||
* This checks whether the memory was allocated from the per-device
|
||||
* coherent memory pool and if so, maps that memory to the provided vma.
|
||||
*
|
||||
* Returns 1 if we correctly mapped the memory, or 0 if
|
||||
* dma_release_coherent() should proceed with mapping memory from
|
||||
* generic pools.
|
||||
*/
|
||||
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||
void *vaddr, size_t size, int *ret)
|
||||
{
|
||||
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
|
||||
|
||||
if (mem && vaddr >= mem->virt_base && vaddr + size <=
|
||||
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
||||
int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
||||
int count = size >> PAGE_SHIFT;
|
||||
|
||||
*ret = -ENXIO;
|
||||
if (off < count && user_count <= count - off) {
|
||||
unsigned pfn = mem->pfn_base + start + off;
|
||||
*ret = remap_pfn_range(vma, vma->vm_start, pfn,
|
||||
user_count << PAGE_SHIFT,
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_mmap_from_coherent);
|
||||
|
|
|
@ -3,13 +3,15 @@
|
|||
|
||||
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
|
||||
/*
|
||||
* These two functions are only for dma allocator.
|
||||
* These three functions are only for dma allocator.
|
||||
* Don't use them in device drivers.
|
||||
*/
|
||||
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
|
||||
dma_addr_t *dma_handle, void **ret);
|
||||
int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
|
||||
|
||||
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, size_t size, int *ret);
|
||||
/*
|
||||
* Standard interface
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue