2011-10-05 00:17:19 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2011 Texas Instruments Incorporated
|
|
|
|
* Author: Mark Salter <msalter@redhat.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/mm_types.h>
|
|
|
|
#include <linux/scatterlist.h>
|
|
|
|
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
|
|
|
|
static void c6x_dma_sync(dma_addr_t handle, size_t size,
|
|
|
|
enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
unsigned long paddr = handle;
|
|
|
|
|
|
|
|
BUG_ON(!valid_dma_direction(dir));
|
|
|
|
|
|
|
|
switch (dir) {
|
|
|
|
case DMA_FROM_DEVICE:
|
|
|
|
L2_cache_block_invalidate(paddr, paddr + size);
|
|
|
|
break;
|
|
|
|
case DMA_TO_DEVICE:
|
|
|
|
L2_cache_block_writeback(paddr, paddr + size);
|
|
|
|
break;
|
|
|
|
case DMA_BIDIRECTIONAL:
|
|
|
|
L2_cache_block_writeback_invalidate(paddr, paddr + size);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-21 07:01:35 +08:00
|
|
|
static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page,
|
|
|
|
unsigned long offset, size_t size, enum dma_data_direction dir,
|
2016-08-04 04:46:00 +08:00
|
|
|
unsigned long attrs)
|
2011-10-05 00:17:19 +08:00
|
|
|
{
|
2016-01-21 07:01:35 +08:00
|
|
|
dma_addr_t handle = virt_to_phys(page_address(page) + offset);
|
2011-10-05 00:17:19 +08:00
|
|
|
|
2016-12-15 07:04:41 +08:00
|
|
|
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
|
|
|
c6x_dma_sync(handle, size, dir);
|
|
|
|
|
2016-01-21 07:01:35 +08:00
|
|
|
return handle;
|
2011-10-05 00:17:19 +08:00
|
|
|
}
|
|
|
|
|
2016-01-21 07:01:35 +08:00
|
|
|
static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle,
|
2016-08-04 04:46:00 +08:00
|
|
|
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
2011-10-05 00:17:19 +08:00
|
|
|
{
|
2016-12-15 07:04:41 +08:00
|
|
|
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
|
|
|
c6x_dma_sync(handle, size, dir);
|
2011-10-05 00:17:19 +08:00
|
|
|
}
|
|
|
|
|
2016-01-21 07:01:35 +08:00
|
|
|
static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
|
2016-08-04 04:46:00 +08:00
|
|
|
int nents, enum dma_data_direction dir, unsigned long attrs)
|
2011-10-05 00:17:19 +08:00
|
|
|
{
|
|
|
|
struct scatterlist *sg;
|
|
|
|
int i;
|
|
|
|
|
2016-01-21 07:01:35 +08:00
|
|
|
for_each_sg(sglist, sg, nents, i) {
|
|
|
|
sg->dma_address = sg_phys(sg);
|
2016-12-15 07:04:41 +08:00
|
|
|
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
|
|
|
c6x_dma_sync(sg->dma_address, sg->length, dir);
|
2016-01-21 07:01:35 +08:00
|
|
|
}
|
2011-10-05 00:17:19 +08:00
|
|
|
|
|
|
|
return nents;
|
|
|
|
}
|
|
|
|
|
2016-01-21 07:01:35 +08:00
|
|
|
static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
2016-08-04 04:46:00 +08:00
|
|
|
int nents, enum dma_data_direction dir, unsigned long attrs)
|
2011-10-05 00:17:19 +08:00
|
|
|
{
|
|
|
|
struct scatterlist *sg;
|
|
|
|
int i;
|
|
|
|
|
2016-12-15 07:04:41 +08:00
|
|
|
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
|
|
|
|
return;
|
|
|
|
|
2011-10-05 00:17:19 +08:00
|
|
|
for_each_sg(sglist, sg, nents, i)
|
2016-01-21 07:01:35 +08:00
|
|
|
c6x_dma_sync(sg_dma_address(sg), sg->length, dir);
|
2011-10-05 00:17:19 +08:00
|
|
|
}
|
|
|
|
|
2016-01-21 07:01:35 +08:00
|
|
|
static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
|
|
|
|
size_t size, enum dma_data_direction dir)
|
2011-10-05 00:17:19 +08:00
|
|
|
{
|
|
|
|
c6x_dma_sync(handle, size, dir);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-01-21 07:01:35 +08:00
|
|
|
static void c6x_dma_sync_single_for_device(struct device *dev,
|
|
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
2011-10-05 00:17:19 +08:00
|
|
|
{
|
|
|
|
c6x_dma_sync(handle, size, dir);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-01-21 07:01:35 +08:00
|
|
|
static void c6x_dma_sync_sg_for_cpu(struct device *dev,
|
|
|
|
struct scatterlist *sglist, int nents,
|
|
|
|
enum dma_data_direction dir)
|
2011-10-05 00:17:19 +08:00
|
|
|
{
|
|
|
|
struct scatterlist *sg;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_sg(sglist, sg, nents, i)
|
2016-01-21 07:01:35 +08:00
|
|
|
c6x_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
|
2011-10-05 00:17:19 +08:00
|
|
|
sg->length, dir);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-01-21 07:01:35 +08:00
|
|
|
static void c6x_dma_sync_sg_for_device(struct device *dev,
|
|
|
|
struct scatterlist *sglist, int nents,
|
|
|
|
enum dma_data_direction dir)
|
2011-10-05 00:17:19 +08:00
|
|
|
{
|
|
|
|
struct scatterlist *sg;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_sg(sglist, sg, nents, i)
|
2016-01-21 07:01:35 +08:00
|
|
|
c6x_dma_sync_single_for_device(dev, sg_dma_address(sg),
|
2011-10-05 00:17:19 +08:00
|
|
|
sg->length, dir);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-01-21 05:04:01 +08:00
|
|
|
const struct dma_map_ops c6x_dma_ops = {
|
2016-01-21 07:01:35 +08:00
|
|
|
.alloc = c6x_dma_alloc,
|
|
|
|
.free = c6x_dma_free,
|
|
|
|
.map_page = c6x_dma_map_page,
|
|
|
|
.unmap_page = c6x_dma_unmap_page,
|
|
|
|
.map_sg = c6x_dma_map_sg,
|
|
|
|
.unmap_sg = c6x_dma_unmap_sg,
|
|
|
|
.sync_single_for_device = c6x_dma_sync_single_for_device,
|
|
|
|
.sync_single_for_cpu = c6x_dma_sync_single_for_cpu,
|
|
|
|
.sync_sg_for_device = c6x_dma_sync_sg_for_device,
|
|
|
|
.sync_sg_for_cpu = c6x_dma_sync_sg_for_cpu,
|
|
|
|
};
|
|
|
|
EXPORT_SYMBOL(c6x_dma_ops);
|
2011-10-05 00:17:19 +08:00
|
|
|
|
|
|
|
/* Number of entries preallocated for DMA-API debugging */
|
|
|
|
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
|
|
|
|
|
|
|
|
static int __init dma_init(void)
|
|
|
|
{
|
|
|
|
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
fs_initcall(dma_init);
|