csky: Optimize arch_sync_dma_for_cpu/device with dma_inv_range
DMA_FROM_DEVICE only need to read dma data of memory into CPU cache, so there is no need to clear cache before. Also clear + inv for DMA_FROM_DEVICE won't cause problem, because the memory range for dma won't be touched by software during dma working. Changes for V2: - Remove clr cache and ignore the DMA_TO_DEVICE in _for_cpu. - Change inv to wbinv cache with DMA_FROM_DEVICE in _for_device. Signed-off-by: Guo Ren <ren_guo@c-sky.com> Cc: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
parent
4af9027d3f
commit
ae76f635d4
|
@ -24,6 +24,7 @@ void cache_wbinv_range(unsigned long start, unsigned long end);
|
||||||
void cache_wbinv_all(void);
|
void cache_wbinv_all(void);
|
||||||
|
|
||||||
void dma_wbinv_range(unsigned long start, unsigned long end);
|
void dma_wbinv_range(unsigned long start, unsigned long end);
|
||||||
|
void dma_inv_range(unsigned long start, unsigned long end);
|
||||||
void dma_wb_range(unsigned long start, unsigned long end);
|
void dma_wb_range(unsigned long start, unsigned long end);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -120,7 +120,12 @@ void dma_wbinv_range(unsigned long start, unsigned long end)
|
||||||
cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
|
cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void dma_inv_range(unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
|
||||||
|
}
|
||||||
|
|
||||||
void dma_wb_range(unsigned long start, unsigned long end)
|
void dma_wb_range(unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
cache_op_range(start, end, DATA_CACHE|CACHE_INV, 1);
|
cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1);
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,11 +69,20 @@ void dma_wbinv_range(unsigned long start, unsigned long end)
|
||||||
sync_is();
|
sync_is();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void dma_inv_range(unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
|
||||||
|
|
||||||
|
for (; i < end; i += L1_CACHE_BYTES)
|
||||||
|
asm volatile("dcache.iva %0\n"::"r"(i):"memory");
|
||||||
|
sync_is();
|
||||||
|
}
|
||||||
|
|
||||||
void dma_wb_range(unsigned long start, unsigned long end)
|
void dma_wb_range(unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
|
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
|
||||||
|
|
||||||
for (; i < end; i += L1_CACHE_BYTES)
|
for (; i < end; i += L1_CACHE_BYTES)
|
||||||
asm volatile("dcache.civa %0\n"::"r"(i):"memory");
|
asm volatile("dcache.cva %0\n"::"r"(i):"memory");
|
||||||
sync_is();
|
sync_is();
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,11 +85,10 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||||
{
|
{
|
||||||
switch (dir) {
|
switch (dir) {
|
||||||
case DMA_TO_DEVICE:
|
case DMA_TO_DEVICE:
|
||||||
cache_op(paddr, size, dma_wb_range);
|
return;
|
||||||
break;
|
|
||||||
case DMA_FROM_DEVICE:
|
case DMA_FROM_DEVICE:
|
||||||
case DMA_BIDIRECTIONAL:
|
case DMA_BIDIRECTIONAL:
|
||||||
cache_op(paddr, size, dma_wbinv_range);
|
cache_op(paddr, size, dma_inv_range);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
BUG();
|
BUG();
|
||||||
|
|
Loading…
Reference in New Issue