OpenCloudOS-Kernel/arch/mips/mm/dma-default.c

445 lines
12 KiB
C
Raw Normal View History

/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
* Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
*/
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/dma-contiguous.h>
#include <asm/cache.h>
#include <asm/cpu-type.h>
#include <asm/io.h>
#include <dma-coherence.h>
#ifdef CONFIG_DMA_MAYBE_COHERENT
int coherentio = 0; /* User defined DMA coherency from command line. */
EXPORT_SYMBOL_GPL(coherentio);
int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
static int __init setcoherentio(char *str)
{
coherentio = 1;
pr_info("Hardware DMA cache coherency (command line)\n");
return 0;
}
early_param("coherentio", setcoherentio);
static int __init setnocoherentio(char *str)
{
coherentio = 0;
pr_info("Software DMA cache coherency (command line)\n");
return 0;
}
early_param("nocoherentio", setnocoherentio);
#endif
static inline struct page *dma_addr_to_page(struct device *dev,
dma_addr_t dma_addr)
{
return pfn_to_page(
plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
}
/*
* The affected CPUs below in 'cpu_needs_post_dma_flush()' can
* speculatively fill random cachelines with stale data at any time,
* requiring an extra flush post-DMA.
*
* Warning on the terminology - Linux calls an uncached area coherent;
* MIPS terminology calls memory areas with hardware maintained coherency
* coherent.
*
* Note that the R14000 and R16000 should also be checked for in this
* condition. However this function is only called on non-I/O-coherent
* systems and only the R10000 and R12000 are used in such systems, the
* SGI IP28 Indigo² rsp. SGI IP32 aka O2.
*/
static inline int cpu_needs_post_dma_flush(struct device *dev)
{
return !plat_device_is_coherent(dev) &&
MIPS: DMA: Fix BUG due to smp_processor_id() in preemptible code The use of current_cpu_type() in cpu_is_noncoherent_r10000() is not preemption-safe. Use boot_cpu_type() instead to make it preemption-safe. <log> / # insmod mtd_readtest.ko dev=4 mtd_readtest: MTD device: 4 mtd_readtest: MTD device size 996671488, eraseblock size 524288, page size 4096, count of eraseblocks 1901, pages per eraseblock 128, OOB size 224 mtd_readtest: scanning for bad eraseblocks mtd_readtest: scanned 1901 eraseblocks, 0 are bad mtd_readtest: testing page read BUG: using smp_processor_id() in preemptible [00000000] code: insmod/99 caller is mips_dma_sync_single_for_cpu+0x2c/0x128 CPU: 2 PID: 99 Comm: insmod Not tainted 3.10.4 #67 Stack : 00000006 69735f63 00000000 00000000 00000000 00000000 808273d6 00000032 80820000 00000002 8d700000 8de48fa0 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 8d6afb00 8d6afb24 80721f24 807b9927 8012c130 80820000 80721f24 00000002 00000063 8de48fa0 8082333c 807b98e6 8d6afaa0 ... Call Trace: [<80109984>] show_stack+0x64/0x7c [<80666230>] dump_stack+0x20/0x2c [<803a2210>] debug_smp_processor_id+0xe0/0xf0 [<801116f0>] mips_dma_sync_single_for_cpu+0x2c/0x128 [<8043456c>] nand_plat_read_page+0x16c/0x234 [<8042fad4>] nand_do_read_ops+0x194/0x480 [<804301dc>] nand_read+0x50/0x7c [<804261c8>] part_read+0x70/0xc0 [<804231dc>] mtd_read+0x80/0xe4 [<c0431354>] init_module+0x354/0x6f8 [mtd_readtest] [<8010057c>] do_one_initcall+0x140/0x1a4 [<80176d7c>] load_module+0x1b5c/0x2258 [<8017752c>] SyS_init_module+0xb4/0xec [<8010f3fc>] stack_done+0x20/0x44 BUG: using smp_processor_id() in preemptible [00000000] code: insmod/99 </log> Signed-off-by: Jerin Jacob <jerinjacobk@gmail.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/5800/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2013-09-03 20:01:54 +08:00
(boot_cpu_type() == CPU_R10000 ||
boot_cpu_type() == CPU_R12000 ||
boot_cpu_type() == CPU_BMIPS5000);
}
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
gfp_t dma_flag;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
#ifdef CONFIG_ISA
if (dev == NULL)
dma_flag = __GFP_DMA;
else
#endif
#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
dma_flag = __GFP_DMA;
else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
dma_flag = __GFP_DMA32;
else
#endif
#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
dma_flag = __GFP_DMA32;
else
#endif
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
dma_flag = __GFP_DMA;
else
#endif
dma_flag = 0;
/* Don't invoke OOM killer */
gfp |= __GFP_NORETRY;
return gfp | dma_flag;
}
2015-09-10 06:39:42 +08:00
static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
gfp = massage_gfp_flags(dev, gfp);
ret = (void *) __get_free_pages(gfp, get_order(size));
if (ret != NULL) {
memset(ret, 0, size);
*dma_handle = plat_map_dma_mem(dev, ret, size);
}
return ret;
}
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
void *ret;
struct page *page = NULL;
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
2015-09-10 06:39:42 +08:00
/*
* XXX: seems like the coherent and non-coherent implementations could
* be consolidated.
*/
if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp);
gfp = massage_gfp_flags(dev, gfp);
if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC))
page = dma_alloc_from_contiguous(dev,
count, get_order(size));
if (!page)
page = alloc_pages(gfp, get_order(size));
if (!page)
return NULL;
ret = page_address(page);
memset(ret, 0, size);
*dma_handle = plat_map_dma_mem(dev, ret, size);
if (!plat_device_is_coherent(dev)) {
dma_cache_wback_inv((unsigned long) ret, size);
if (!hw_coherentio)
ret = UNCAC_ADDR(ret);
}
return ret;
}
2015-09-10 06:39:42 +08:00
static void mips_dma_free_noncoherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
free_pages((unsigned long) vaddr, get_order(size));
}
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, struct dma_attrs *attrs)
{
unsigned long addr = (unsigned long) vaddr;
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = NULL;
2015-09-10 06:39:42 +08:00
if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
mips_dma_free_noncoherent(dev, size, vaddr, dma_handle);
return;
}
plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
if (!plat_device_is_coherent(dev) && !hw_coherentio)
addr = CAC_ADDR(addr);
page = virt_to_page((void *) addr);
if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
}
static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs)
{
unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long addr = (unsigned long)cpu_addr;
unsigned long off = vma->vm_pgoff;
unsigned long pfn;
int ret = -ENXIO;
if (!plat_device_is_coherent(dev) && !hw_coherentio)
addr = CAC_ADDR(addr);
pfn = page_to_pfn(virt_to_page((void *)addr));
if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < count && user_count <= (count - off)) {
ret = remap_pfn_range(vma, vma->vm_start,
pfn + off,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
}
return ret;
}
static inline void __dma_sync_virtual(void *addr, size_t size,
enum dma_data_direction direction)
{
switch (direction) {
case DMA_TO_DEVICE:
dma_cache_wback((unsigned long)addr, size);
break;
case DMA_FROM_DEVICE:
dma_cache_inv((unsigned long)addr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv((unsigned long)addr, size);
break;
default:
BUG();
}
}
/*
* A single sg entry may refer to multiple physically contiguous
* pages. But we still need to process highmem pages individually.
* If highmem is not configured then the bulk of this loop gets
* optimized out.
*/
static inline void __dma_sync(struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction)
{
size_t left = size;
do {
size_t len = left;
if (PageHighMem(page)) {
void *addr;
if (offset + len > PAGE_SIZE) {
if (offset >= PAGE_SIZE) {
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
}
len = PAGE_SIZE - offset;
}
addr = kmap_atomic(page);
__dma_sync_virtual(addr + offset, len, direction);
kunmap_atomic(addr);
} else
__dma_sync_virtual(page_address(page) + offset,
size, direction);
offset = 0;
page++;
left -= len;
} while (left);
}
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
{
if (cpu_needs_post_dma_flush(dev))
__dma_sync(dma_addr_to_page(dev, dma_addr),
dma_addr & ~PAGE_MASK, size, direction);
plat_post_dma_flush(dev);
plat_unmap_dma_mem(dev, dma_addr, size, direction);
}
static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
{
int i;
struct scatterlist *sg;
for_each_sg(sglist, sg, nents, i) {
if (!plat_device_is_coherent(dev))
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
#ifdef CONFIG_NEED_SG_DMA_LENGTH
sg->dma_length = sg->length;
#endif
sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
sg->offset;
}
return nents;
}
static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
if (!plat_device_is_coherent(dev))
__dma_sync(page, offset, size, direction);
return plat_map_dma_mem_page(dev, page) + offset;
}
static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nhwentries, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
int i;
struct scatterlist *sg;
for_each_sg(sglist, sg, nhwentries, i) {
if (!plat_device_is_coherent(dev) &&
direction != DMA_TO_DEVICE)
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
}
}
static void mips_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
if (cpu_needs_post_dma_flush(dev))
__dma_sync(dma_addr_to_page(dev, dma_handle),
dma_handle & ~PAGE_MASK, size, direction);
plat_post_dma_flush(dev);
}
static void mips_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
if (!plat_device_is_coherent(dev))
__dma_sync(dma_addr_to_page(dev, dma_handle),
dma_handle & ~PAGE_MASK, size, direction);
}
static void mips_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sglist, int nelems,
enum dma_data_direction direction)
{
int i;
struct scatterlist *sg;
if (cpu_needs_post_dma_flush(dev)) {
for_each_sg(sglist, sg, nelems, i) {
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
}
}
plat_post_dma_flush(dev);
}
static void mips_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sglist, int nelems,
enum dma_data_direction direction)
{
int i;
struct scatterlist *sg;
if (!plat_device_is_coherent(dev)) {
for_each_sg(sglist, sg, nelems, i) {
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
}
}
}
int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
int mips_dma_supported(struct device *dev, u64 mask)
{
return plat_dma_supported(dev, mask);
}
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
if (!plat_device_is_coherent(dev))
__dma_sync_virtual(vaddr, size, direction);
}
EXPORT_SYMBOL(dma_cache_sync);
static struct dma_map_ops mips_default_dma_map_ops = {
.alloc = mips_dma_alloc_coherent,
.free = mips_dma_free_coherent,
.mmap = mips_dma_mmap,
.map_page = mips_dma_map_page,
.unmap_page = mips_dma_unmap_page,
.map_sg = mips_dma_map_sg,
.unmap_sg = mips_dma_unmap_sg,
.sync_single_for_cpu = mips_dma_sync_single_for_cpu,
.sync_single_for_device = mips_dma_sync_single_for_device,
.sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
.sync_sg_for_device = mips_dma_sync_sg_for_device,
.mapping_error = mips_dma_mapping_error,
.dma_supported = mips_dma_supported
};
struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
EXPORT_SYMBOL(mips_dma_map_ops);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
static int __init mips_dma_init(void)
{
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
fs_initcall(mips_dma_init);