From d1cab34c039584ebe76b04d2f2109e0d87d344e1 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Fri, 18 Oct 2013 19:35:21 +0200 Subject: [PATCH 01/33] dmatest: make driver unmap also source buffers by itself Make the driver DMA unmap also source buffers by itself (currently it DMA unmaps only destination buffers) as a preparation for introducing generic 'ummap' data. Cc: Dan Williams Cc: Vinod Koul Cc: Tomasz Figa Cc: Dave Jiang Cc: Andy Shevchenko Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Acked-by: Andy Shevchenko Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 92f796cdc6ab..f4a2a25fae31 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -597,11 +597,10 @@ static int dmatest_func(void *data) set_user_nice(current, 10); /* - * src buffers are freed by the DMAEngine code with dma_unmap_single() - * dst buffers are freed by ourselves below + * src and dst buffers are freed by ourselves below */ - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT - | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | + DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; while (!kthread_should_stop() && !(params->iterations && total_tests >= params->iterations)) { @@ -750,7 +749,8 @@ static int dmatest_func(void *data) continue; } - /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ + /* Unmap by myself */ + unmap_src(dev->dev, dma_srcs, len, src_cnt); unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); error_count = 0; From 56ea27fd61f546117a35236113be72c8aaec382d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 18 Oct 2013 19:35:22 +0200 Subject: [PATCH 02/33] dmaengine: consolidate memcpy apis Copying from page to page (dma_async_memcpy_pg_to_pg) is the superset, make the other two apis use that one in preparation for providing a common dma unmap implementation. The common implementation just wants to assume all buffers are mapped with dma_map_page(). Cc: Vinod Koul Cc: Tomasz Figa Cc: Dave Jiang Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 139 +++++++++++++--------------------------- 1 file changed, 46 insertions(+), 93 deletions(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 9162ac80c18f..bbc89df6bc56 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -901,99 +901,6 @@ void dma_async_device_unregister(struct dma_device *device) } EXPORT_SYMBOL(dma_async_device_unregister); -/** - * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses - * @chan: DMA channel to offload copy to - * @dest: destination address (virtual) - * @src: source address (virtual) - * @len: length - * - * Both @dest and @src must be mappable to a bus address according to the - * DMA mapping API rules for streaming mappings. - * Both @dest and @src must stay memory resident (kernel memory or locked - * user space pages). - */ -dma_cookie_t -dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, - void *src, size_t len) -{ - struct dma_device *dev = chan->device; - struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; - dma_cookie_t cookie; - unsigned long flags; - - dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); - dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); - flags = DMA_CTRL_ACK | - DMA_COMPL_SRC_UNMAP_SINGLE | - DMA_COMPL_DEST_UNMAP_SINGLE; - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); - - if (!tx) { - dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); - return -ENOMEM; - } - - tx->callback = NULL; - cookie = tx->tx_submit(tx); - - preempt_disable(); - __this_cpu_add(chan->local->bytes_transferred, len); - __this_cpu_inc(chan->local->memcpy_count); - preempt_enable(); - - return cookie; -} -EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); - -/** - * dma_async_memcpy_buf_to_pg - offloaded copy from address to page - * @chan: DMA channel to offload copy to - * @page: destination page - * @offset: offset in page to copy to - * @kdata: source address (virtual) - * @len: length - * - * Both @page/@offset and @kdata must be mappable to a bus address according - * to the DMA mapping API rules for streaming mappings. - * Both @page/@offset and @kdata must stay memory resident (kernel memory or - * locked user space pages) - */ -dma_cookie_t -dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, - unsigned int offset, void *kdata, size_t len) -{ - struct dma_device *dev = chan->device; - struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; - dma_cookie_t cookie; - unsigned long flags; - - dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); - dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); - flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); - - if (!tx) { - dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); - return -ENOMEM; - } - - tx->callback = NULL; - cookie = tx->tx_submit(tx); - - preempt_disable(); - __this_cpu_add(chan->local->bytes_transferred, len); - __this_cpu_inc(chan->local->memcpy_count); - preempt_enable(); - - return cookie; -} -EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); - /** * dma_async_memcpy_pg_to_pg - offloaded copy from page to page * @chan: DMA channel to offload copy to @@ -1043,6 +950,52 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, } EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); +/** + * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses + * @chan: DMA channel to offload copy to + * @dest: destination address (virtual) + * @src: source address (virtual) + * @len: length + * + * Both @dest and @src must be mappable to a bus address according to the + * DMA mapping API rules for streaming mappings. + * Both @dest and @src must stay memory resident (kernel memory or locked + * user space pages). + */ +dma_cookie_t +dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, + void *src, size_t len) +{ + return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest), + (unsigned long) dest & ~PAGE_MASK, + virt_to_page(src), + (unsigned long) src & ~PAGE_MASK, len); +} +EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); + +/** + * dma_async_memcpy_buf_to_pg - offloaded copy from address to page + * @chan: DMA channel to offload copy to + * @page: destination page + * @offset: offset in page to copy to + * @kdata: source address (virtual) + * @len: length + * + * Both @page/@offset and @kdata must be mappable to a bus address according + * to the DMA mapping API rules for streaming mappings. + * Both @page/@offset and @kdata must stay memory resident (kernel memory or + * locked user space pages) + */ +dma_cookie_t +dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, + unsigned int offset, void *kdata, size_t len) +{ + return dma_async_memcpy_pg_to_pg(chan, page, offset, + virt_to_page(kdata), + (unsigned long) kdata & ~PAGE_MASK, len); +} +EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); + void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, struct dma_chan *chan) { From d38a8c622a1b382336c3e152c6caf4e11d1f1b2a Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 18 Oct 2013 19:35:23 +0200 Subject: [PATCH 03/33] dmaengine: prepare for generic 'unmap' data Add a hook for a common dma unmap implementation to enable removal of the per driver custom unmap code. (A reworked version of Bartlomiej Zolnierkiewicz's patches to remove the custom callbacks and the size increase of dma_async_tx_descriptor for drivers that don't care about raid). Cc: Vinod Koul Cc: Tomasz Figa Cc: Dave Jiang [bzolnier: prepare pl330 driver for adding missing unmap while at it] Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Signed-off-by: Dan Williams --- drivers/dma/amba-pl08x.c | 1 + drivers/dma/at_hdmac.c | 1 + drivers/dma/dw/core.c | 1 + drivers/dma/ep93xx_dma.c | 1 + drivers/dma/fsldma.c | 1 + drivers/dma/ioat/dma.c | 1 + drivers/dma/ioat/dma_v2.c | 1 + drivers/dma/ioat/dma_v3.c | 1 + drivers/dma/iop-adma.c | 1 + drivers/dma/mv_xor.c | 1 + drivers/dma/pl330.c | 2 ++ drivers/dma/ppc4xx/adma.c | 1 + drivers/dma/timb_dma.c | 1 + drivers/dma/txx9dmac.c | 1 + include/linux/dmaengine.h | 26 ++++++++++++++++++++++++++ 15 files changed, 41 insertions(+) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index fce46c5bf1c7..7f9846464b77 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1197,6 +1197,7 @@ static void pl08x_desc_free(struct virt_dma_desc *vd) struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); + dma_descriptor_unmap(txd); if (!plchan->slave) pl08x_unmap_buffers(txd); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index c787f38a186a..cc7098ddf9d4 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -345,6 +345,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) list_move(&desc->desc_node, &atchan->free_list); /* unmap dma addresses (not on slave channels) */ + dma_descriptor_unmap(txd); if (!atchan->chan_common.private) { struct device *parent = chan2parent(&atchan->chan_common); if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 89eb89f22284..e3fe1b1a73b1 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -311,6 +311,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, list_splice_init(&desc->tx_list, &dwc->free_list); list_move(&desc->desc_node, &dwc->free_list); + dma_descriptor_unmap(txd); if (!is_slave_direction(dwc->direction)) { struct device *parent = chan2parent(&dwc->chan); if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 591cd8c63abb..dcd6bf5d3091 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -791,6 +791,7 @@ static void ep93xx_dma_tasklet(unsigned long data) * For the memcpy channels the API requires us to unmap the * buffers unless requested otherwise. */ + dma_descriptor_unmap(&desc->txd); if (!edmac->chan.private) ep93xx_dma_unmap_buffers(desc); diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index b3f3e90054f2..66c4052a1f34 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -868,6 +868,7 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, /* Run any dependencies */ dma_run_dependencies(txd); + dma_descriptor_unmap(txd); /* Unmap the dst buffer, if requested */ if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 5ff6fc1819dc..26f8cfd6bc3f 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -602,6 +602,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) dump_desc_dbg(ioat, desc); if (tx->cookie) { dma_cookie_complete(tx); + dma_descriptor_unmap(tx); ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); ioat->active -= desc->hw->tx_cnt; if (tx->callback) { diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index b925e1b1d139..fc7b50a813cc 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -148,6 +148,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) tx = &desc->txd; dump_desc_dbg(ioat, desc); if (tx->cookie) { + dma_descriptor_unmap(tx); ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); dma_cookie_complete(tx); if (tx->callback) { diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index d8ececaf1b57..57a2901b917a 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -577,6 +577,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) tx = &desc->txd; if (tx->cookie) { dma_cookie_complete(tx); + dma_descriptor_unmap(tx); ioat3_dma_unmap(ioat, desc, idx + i); if (tx->callback) { tx->callback(tx->callback_param); diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index dd8b44a56e5d..8f6e426590eb 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -152,6 +152,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, if (tx->callback) tx->callback(tx->callback_param); + dma_descriptor_unmap(tx); /* unmap dma addresses * (unmap_single vs unmap_page?) */ diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 536dcb8ba5fd..ed1ab1d0875e 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -278,6 +278,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, desc->async_tx.callback( desc->async_tx.callback_param); + dma_descriptor_unmap(&desc->async_tx); /* unmap dma addresses * (unmap_single vs unmap_page?) */ diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index a562d24d20bf..ab25e52cd43b 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2268,6 +2268,8 @@ static void pl330_tasklet(unsigned long data) list_move_tail(&desc->node, &pch->dmac->desc_pool); } + dma_descriptor_unmap(&desc->txd); + if (callback) { spin_unlock_irqrestore(&pch->lock, flags); callback(callback_param); diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 370ff8265630..442492da7415 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -1765,6 +1765,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( desc->async_tx.callback( desc->async_tx.callback_param); + dma_descriptor_unmap(&desc->async_tx); /* unmap dma addresses * (unmap_single vs unmap_page?) * diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 28af214fce04..1d0c98839087 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -293,6 +293,7 @@ static void __td_finish(struct timb_dma_chan *td_chan) list_move(&td_desc->desc_node, &td_chan->free_list); + dma_descriptor_unmap(txd); if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) __td_unmap_descs(td_desc, txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE); diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 71e8e775189e..22a0b6c78c77 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -419,6 +419,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, list_splice_init(&desc->tx_list, &dc->free_list); list_move(&desc->desc_node, &dc->free_list); + dma_descriptor_unmap(txd); if (!ds) { dma_addr_t dmaaddr; if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 0bc727534108..9070050fbcd8 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -413,6 +413,17 @@ void dma_chan_cleanup(struct kref *kref); typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); typedef void (*dma_async_tx_callback)(void *dma_async_param); + +struct dmaengine_unmap_data { + u8 to_cnt; + u8 from_cnt; + u8 bidi_cnt; + struct device *dev; + struct kref kref; + size_t len; + dma_addr_t addr[0]; +}; + /** * struct dma_async_tx_descriptor - async transaction descriptor * ---dma generic offload fields--- @@ -438,6 +449,7 @@ struct dma_async_tx_descriptor { dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); dma_async_tx_callback callback; void *callback_param; + struct dmaengine_unmap_data *unmap; #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent; @@ -445,6 +457,20 @@ struct dma_async_tx_descriptor { #endif }; +static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, + struct dmaengine_unmap_data *unmap) +{ + kref_get(&unmap->kref); + tx->unmap = unmap; +} + +static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) +{ + if (tx->unmap) { + tx->unmap = NULL; + } +} + #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH static inline void txd_lock(struct dma_async_tx_descriptor *txd) { From 45c463ae924c62af4aa64ded1ca831f334a1db65 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 18 Oct 2013 19:35:24 +0200 Subject: [PATCH 04/33] dmaengine: reference counted unmap data Hang a common 'unmap' object off of dma descriptors for the purpose of providing a unified unmapping interface. The lifetime of a mapping may span multiple descriptors, so these unmap objects are reference counted by related descriptor. Cc: Vinod Koul Cc: Tomasz Figa Cc: Dave Jiang [bzolnier: fix IS_ENABLED() check] [bzolnier: fix release ordering in dmaengine_destroy_unmap_pool()] [bzolnier: fix check for success in dmaengine_init_unmap_pool()] [bzolnier: use mempool_free() instead of kmem_cache_free()] [bzolnier: add missing unmap->len initializations] [bzolnier: add __init tag to dmaengine_init_unmap_pool()] Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park [djbw: move DMAENGINE=n support to this patch for async_tx] Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 156 +++++++++++++++++++++++++++++++++++--- include/linux/dmaengine.h | 3 + 2 files changed, 150 insertions(+), 9 deletions(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index bbc89df6bc56..e721a1caff7f 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -65,6 +65,7 @@ #include #include #include +#include static DEFINE_MUTEX(dma_list_mutex); static DEFINE_IDR(dma_idr); @@ -901,6 +902,129 @@ void dma_async_device_unregister(struct dma_device *device) } EXPORT_SYMBOL(dma_async_device_unregister); +struct dmaengine_unmap_pool { + struct kmem_cache *cache; + const char *name; + mempool_t *pool; + size_t size; +}; + +#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } +static struct dmaengine_unmap_pool unmap_pool[] = { + __UNMAP_POOL(2), + #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) + __UNMAP_POOL(16), + __UNMAP_POOL(128), + __UNMAP_POOL(256), + #endif +}; + +static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) +{ + int order = get_count_order(nr); + + switch (order) { + case 0 ... 1: + return &unmap_pool[0]; + case 2 ... 4: + return &unmap_pool[1]; + case 5 ... 7: + return &unmap_pool[2]; + case 8: + return &unmap_pool[3]; + default: + BUG(); + return NULL; + } +} + +static void dmaengine_unmap(struct kref *kref) +{ + struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); + struct device *dev = unmap->dev; + int cnt, i; + + cnt = unmap->to_cnt; + for (i = 0; i < cnt; i++) + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_TO_DEVICE); + cnt += unmap->from_cnt; + for (; i < cnt; i++) + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_FROM_DEVICE); + cnt += unmap->bidi_cnt; + for (; i < cnt; i++) + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_BIDIRECTIONAL); + mempool_free(unmap, __get_unmap_pool(cnt)->pool); +} + +void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) +{ + if (unmap) + kref_put(&unmap->kref, dmaengine_unmap); +} +EXPORT_SYMBOL_GPL(dmaengine_unmap_put); + +static void dmaengine_destroy_unmap_pool(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { + struct dmaengine_unmap_pool *p = &unmap_pool[i]; + + if (p->pool) + mempool_destroy(p->pool); + p->pool = NULL; + if (p->cache) + kmem_cache_destroy(p->cache); + p->cache = NULL; + } +} + +static int __init dmaengine_init_unmap_pool(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { + struct dmaengine_unmap_pool *p = &unmap_pool[i]; + size_t size; + + size = sizeof(struct dmaengine_unmap_data) + + sizeof(dma_addr_t) * p->size; + + p->cache = kmem_cache_create(p->name, size, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!p->cache) + break; + p->pool = mempool_create_slab_pool(1, p->cache); + if (!p->pool) + break; + } + + if (i == ARRAY_SIZE(unmap_pool)) + return 0; + + dmaengine_destroy_unmap_pool(); + return -ENOMEM; +} + +static struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) +{ + struct dmaengine_unmap_data *unmap; + + unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); + if (!unmap) + return NULL; + + memset(unmap, 0, sizeof(*unmap)); + kref_init(&unmap->kref); + unmap->dev = dev; + + return unmap; +} + /** * dma_async_memcpy_pg_to_pg - offloaded copy from page to page * @chan: DMA channel to offload copy to @@ -922,24 +1046,34 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, { struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; + struct dmaengine_unmap_data *unmap; dma_cookie_t cookie; unsigned long flags; - dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); - dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, - DMA_FROM_DEVICE); - flags = DMA_CTRL_ACK; - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); + unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); + if (!unmap) + return -ENOMEM; + + unmap->to_cnt = 1; + unmap->from_cnt = 1; + unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len, + DMA_TO_DEVICE); + unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len, + DMA_FROM_DEVICE); + unmap->len = len; + flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_SRC_UNMAP | + DMA_COMPL_SKIP_DEST_UNMAP; + tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0], + len, flags); if (!tx) { - dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); + dmaengine_unmap_put(unmap); return -ENOMEM; } - tx->callback = NULL; + dma_set_unmap(tx, unmap); cookie = tx->tx_submit(tx); + dmaengine_unmap_put(unmap); preempt_disable(); __this_cpu_add(chan->local->bytes_transferred, len); @@ -1069,6 +1203,10 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies); static int __init dma_bus_init(void) { + int err = dmaengine_init_unmap_pool(); + + if (err) + return err; return class_register(&dma_devclass); } arch_initcall(dma_bus_init); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 9070050fbcd8..2fe855a7cab1 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -464,9 +464,12 @@ static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, tx->unmap = unmap; } +void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); + static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) { if (tx->unmap) { + dmaengine_unmap_put(tx->unmap); tx->unmap = NULL; } } From 8971646294bda65f8666b60cb2cb3d5e172c99bf Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 18 Oct 2013 19:35:25 +0200 Subject: [PATCH 05/33] async_memcpy: convert to dmaengine_unmap_data Use the generic unmap object to unmap dma buffers. Cc: Vinod Koul Cc: Tomasz Figa Cc: Dave Jiang Reported-by: Bartlomiej Zolnierkiewicz [bzolnier: add missing unmap->len initialization] [bzolnier: fix whitespace damage] Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park [djbw: add DMA_ENGINE=n support] Signed-off-by: Dan Williams --- crypto/async_tx/async_memcpy.c | 36 ++++++++++++++++++++-------------- drivers/dma/dmaengine.c | 3 ++- include/linux/dmaengine.h | 17 ++++++++++++++++ 3 files changed, 40 insertions(+), 16 deletions(-) diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 9e62feffb374..72750214f779 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -50,33 +50,37 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, &dest, 1, &src, 1, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; + struct dmaengine_unmap_data *unmap = NULL; - if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { - dma_addr_t dma_dest, dma_src; - unsigned long dma_prep_flags = 0; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); + + if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { + unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP | + DMA_COMPL_SKIP_DEST_UNMAP; if (submit->cb_fn) dma_prep_flags |= DMA_PREP_INTERRUPT; if (submit->flags & ASYNC_TX_FENCE) dma_prep_flags |= DMA_PREP_FENCE; - dma_dest = dma_map_page(device->dev, dest, dest_offset, len, - DMA_FROM_DEVICE); - dma_src = dma_map_page(device->dev, src, src_offset, len, - DMA_TO_DEVICE); + unmap->to_cnt = 1; + unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len, + DMA_TO_DEVICE); + unmap->from_cnt = 1; + unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len, + DMA_FROM_DEVICE); + unmap->len = len; - tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, - len, dma_prep_flags); - if (!tx) { - dma_unmap_page(device->dev, dma_dest, len, - DMA_FROM_DEVICE); - dma_unmap_page(device->dev, dma_src, len, - DMA_TO_DEVICE); - } + tx = device->device_prep_dma_memcpy(chan, unmap->addr[1], + unmap->addr[0], len, + dma_prep_flags); } if (tx) { pr_debug("%s: (async) len: %zu\n", __func__, len); + + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); } else { void *dest_buf, *src_buf; @@ -96,6 +100,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, async_tx_sync_epilog(submit); } + dmaengine_unmap_put(unmap); + return tx; } EXPORT_SYMBOL_GPL(async_memcpy); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index e721a1caff7f..54138b57b37c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1009,7 +1009,7 @@ static int __init dmaengine_init_unmap_pool(void) return -ENOMEM; } -static struct dmaengine_unmap_data * +struct dmaengine_unmap_data * dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) { struct dmaengine_unmap_data *unmap; @@ -1024,6 +1024,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) return unmap; } +EXPORT_SYMBOL(dmaengine_get_unmap_data); /** * dma_async_memcpy_pg_to_pg - offloaded copy from page to page diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 2fe855a7cab1..3782cdb782a8 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -457,6 +457,7 @@ struct dma_async_tx_descriptor { #endif }; +#ifdef CONFIG_DMA_ENGINE static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, struct dmaengine_unmap_data *unmap) { @@ -464,7 +465,23 @@ static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, tx->unmap = unmap; } +struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags); void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); +#else +static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, + struct dmaengine_unmap_data *unmap) +{ +} +static inline struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) +{ + return NULL; +} +static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) +{ +} +#endif static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) { From fb36ab142b2f1dc8c8ad3750413efa7a5cc1c07b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 18 Oct 2013 19:35:26 +0200 Subject: [PATCH 06/33] async_xor: convert to dmaengine_unmap_data Use the generic unmap object to unmap dma buffers. Later we can push this unmap object up to the raid layer and get rid of the 'scribble' parameter. Cc: Vinod Koul Cc: Tomasz Figa Cc: Dave Jiang Reported-by: Bartlomiej Zolnierkiewicz [bzolnier: minor cleanups] Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Signed-off-by: Dan Williams --- crypto/async_tx/async_xor.c | 95 ++++++++++++++++++++----------------- 1 file changed, 51 insertions(+), 44 deletions(-) diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 8ade0a0481c6..f092fa14a745 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -33,48 +33,32 @@ /* do_async_xor - dma map the pages and perform the xor with an engine */ static __async_inline struct dma_async_tx_descriptor * -do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, - unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src, +do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, struct async_submit_ctl *submit) { struct dma_device *dma = chan->device; struct dma_async_tx_descriptor *tx = NULL; - int src_off = 0; - int i; dma_async_tx_callback cb_fn_orig = submit->cb_fn; void *cb_param_orig = submit->cb_param; enum async_tx_flags flags_orig = submit->flags; enum dma_ctrl_flags dma_flags; - int xor_src_cnt = 0; - dma_addr_t dma_dest; - - /* map the dest bidrectional in case it is re-used as a source */ - dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); - for (i = 0; i < src_cnt; i++) { - /* only map the dest once */ - if (!src_list[i]) - continue; - if (unlikely(src_list[i] == dest)) { - dma_src[xor_src_cnt++] = dma_dest; - continue; - } - dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset, - len, DMA_TO_DEVICE); - } - src_cnt = xor_src_cnt; + int src_cnt = unmap->to_cnt; + int xor_src_cnt; + dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; + dma_addr_t *src_list = unmap->addr; while (src_cnt) { + dma_addr_t tmp; + submit->flags = flags_orig; - dma_flags = 0; xor_src_cnt = min(src_cnt, (int)dma->max_xor); - /* if we are submitting additional xors, leave the chain open, - * clear the callback parameters, and leave the destination - * buffer mapped + /* if we are submitting additional xors, leave the chain open + * and clear the callback parameters */ + dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; if (src_cnt > xor_src_cnt) { submit->flags &= ~ASYNC_TX_ACK; submit->flags |= ASYNC_TX_FENCE; - dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; submit->cb_fn = NULL; submit->cb_param = NULL; } else { @@ -85,12 +69,18 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, dma_flags |= DMA_PREP_INTERRUPT; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - /* Since we have clobbered the src_list we are committed - * to doing this asynchronously. Drivers force forward progress - * in case they can not provide a descriptor + + /* Drivers force forward progress in case they can not provide a + * descriptor */ - tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], - xor_src_cnt, len, dma_flags); + tmp = src_list[0]; + if (src_list > unmap->addr) + src_list[0] = dma_dest; + tx = dma->device_prep_dma_xor(chan, dma_dest, src_list, + xor_src_cnt, unmap->len, + dma_flags); + src_list[0] = tmp; + if (unlikely(!tx)) async_tx_quiesce(&submit->depend_tx); @@ -99,22 +89,21 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, while (unlikely(!tx)) { dma_async_issue_pending(chan); tx = dma->device_prep_dma_xor(chan, dma_dest, - &dma_src[src_off], - xor_src_cnt, len, + src_list, + xor_src_cnt, unmap->len, dma_flags); } + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); submit->depend_tx = tx; if (src_cnt > xor_src_cnt) { /* drop completed sources */ src_cnt -= xor_src_cnt; - src_off += xor_src_cnt; - /* use the intermediate result a source */ - dma_src[--src_off] = dma_dest; src_cnt++; + src_list += xor_src_cnt - 1; } else break; } @@ -189,22 +178,40 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, &dest, 1, src_list, src_cnt, len); - dma_addr_t *dma_src = NULL; + struct dma_device *device = chan ? chan->device : NULL; + struct dmaengine_unmap_data *unmap = NULL; BUG_ON(src_cnt <= 1); - if (submit->scribble) - dma_src = submit->scribble; - else if (sizeof(dma_addr_t) <= sizeof(struct page *)) - dma_src = (dma_addr_t *) src_list; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); + + if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { + struct dma_async_tx_descriptor *tx; + int i, j; - if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) { /* run the xor asynchronously */ pr_debug("%s (async): len: %zu\n", __func__, len); - return do_async_xor(chan, dest, src_list, offset, src_cnt, len, - dma_src, submit); + unmap->len = len; + for (i = 0, j = 0; i < src_cnt; i++) { + if (!src_list[i]) + continue; + unmap->to_cnt++; + unmap->addr[j++] = dma_map_page(device->dev, src_list[i], + offset, len, DMA_TO_DEVICE); + } + + /* map it bidirectional as it may be re-used as a source */ + unmap->addr[j] = dma_map_page(device->dev, dest, offset, len, + DMA_BIDIRECTIONAL); + unmap->bidi_cnt = 1; + + tx = do_async_xor(chan, unmap, submit); + dmaengine_unmap_put(unmap); + return tx; } else { + dmaengine_unmap_put(unmap); /* run the xor synchronously */ pr_debug("%s (sync): len: %zu\n", __func__, len); WARN_ONCE(chan, "%s: no space for dma address conversion\n", From 173e86b2809234cb5f2a50e9a8c159b70e23da1c Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 18 Oct 2013 19:35:27 +0200 Subject: [PATCH 07/33] async_xor_val: convert to dmaengine_unmap_data Use the generic unmap object to unmap dma buffers. Cc: Vinod Koul Cc: Tomasz Figa Cc: Dave Jiang Reported-by: Bartlomiej Zolnierkiewicz [bzolnier: minor cleanups] Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Signed-off-by: Dan Williams --- crypto/async_tx/async_xor.c | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index f092fa14a745..d2cc77d501c7 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -275,18 +275,17 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; - dma_addr_t *dma_src = NULL; + struct dmaengine_unmap_data *unmap = NULL; BUG_ON(src_cnt <= 1); - if (submit->scribble) - dma_src = submit->scribble; - else if (sizeof(dma_addr_t) <= sizeof(struct page *)) - dma_src = (dma_addr_t *) src_list; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); - if (dma_src && device && src_cnt <= device->max_xor && + if (unmap && src_cnt <= device->max_xor && is_dma_xor_aligned(device, offset, 0, len)) { - unsigned long dma_prep_flags = 0; + unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP | + DMA_COMPL_SKIP_DEST_UNMAP; int i; pr_debug("%s: (async) len: %zu\n", __func__, len); @@ -295,11 +294,15 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, dma_prep_flags |= DMA_PREP_INTERRUPT; if (submit->flags & ASYNC_TX_FENCE) dma_prep_flags |= DMA_PREP_FENCE; - for (i = 0; i < src_cnt; i++) - dma_src[i] = dma_map_page(device->dev, src_list[i], - offset, len, DMA_TO_DEVICE); - tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt, + for (i = 0; i < src_cnt; i++) { + unmap->addr[i] = dma_map_page(device->dev, src_list[i], + offset, len, DMA_TO_DEVICE); + unmap->to_cnt++; + } + unmap->len = len; + + tx = device->device_prep_dma_xor_val(chan, unmap->addr, src_cnt, len, result, dma_prep_flags); if (unlikely(!tx)) { @@ -308,11 +311,11 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, while (!tx) { dma_async_issue_pending(chan); tx = device->device_prep_dma_xor_val(chan, - dma_src, src_cnt, len, result, + unmap->addr, src_cnt, len, result, dma_prep_flags); } } - + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); } else { enum async_tx_flags flags_orig = submit->flags; @@ -334,6 +337,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, async_tx_sync_epilog(submit); submit->flags = flags_orig; } + dmaengine_unmap_put(unmap); return tx; } From 3bbdd49872931b8c4282aeb1cab5af7cce2cfb0d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 18 Oct 2013 19:35:28 +0200 Subject: [PATCH 08/33] async_raid6_recov: convert to dmaengine_unmap_data Use the generic unmap object to unmap dma buffers. Cc: Vinod Koul Cc: Tomasz Figa Cc: Dave Jiang Reported-by: Bartlomiej Zolnierkiewicz [bzolnier: keep temporary dma_dest array in async_mult()] Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Signed-off-by: Dan Williams --- crypto/async_tx/async_raid6_recov.c | 69 ++++++++++++++++++++--------- 1 file changed, 49 insertions(+), 20 deletions(-) diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index a9f08a6a582e..a3a72a784421 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -26,6 +26,7 @@ #include #include #include +#include static struct dma_async_tx_descriptor * async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, @@ -34,35 +35,47 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, &dest, 1, srcs, 2, len); struct dma_device *dma = chan ? chan->device : NULL; + struct dmaengine_unmap_data *unmap = NULL; const u8 *amul, *bmul; u8 ax, bx; u8 *a, *b, *c; - if (dma) { - dma_addr_t dma_dest[2]; - dma_addr_t dma_src[2]; + if (dma) + unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); + + if (unmap) { struct device *dev = dma->dev; + dma_addr_t pq[2]; struct dma_async_tx_descriptor *tx; - enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; + enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | + DMA_COMPL_SKIP_DEST_UNMAP | + DMA_PREP_PQ_DISABLE_P; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); - dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); - dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); - tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef, + unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); + unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); + unmap->to_cnt = 2; + + unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); + unmap->bidi_cnt = 1; + /* engine only looks at Q, but expects it to follow P */ + pq[1] = unmap->addr[2]; + + unmap->len = len; + tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef, len, dma_flags); if (tx) { + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); + dmaengine_unmap_put(unmap); return tx; } /* could not get a descriptor, unmap and fall through to * the synchronous path */ - dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); - dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); - dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE); + dmaengine_unmap_put(unmap); } /* run the operation synchronously */ @@ -89,23 +102,40 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, &dest, 1, &src, 1, len); struct dma_device *dma = chan ? chan->device : NULL; + struct dmaengine_unmap_data *unmap = NULL; const u8 *qmul; /* Q multiplier table */ u8 *d, *s; - if (dma) { + if (dma) + unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); + + if (unmap) { dma_addr_t dma_dest[2]; - dma_addr_t dma_src[1]; struct device *dev = dma->dev; struct dma_async_tx_descriptor *tx; - enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; + enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | + DMA_COMPL_SKIP_DEST_UNMAP | + DMA_PREP_PQ_DISABLE_P; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); - dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); - tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef, - len, dma_flags); + unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); + unmap->to_cnt++; + unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); + dma_dest[1] = unmap->addr[1]; + unmap->bidi_cnt++; + unmap->len = len; + + /* this looks funny, but the engine looks for Q at + * dma_dest[1] and ignores dma_dest[0] as a dest + * due to DMA_PREP_PQ_DISABLE_P + */ + tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr, + 1, &coef, len, dma_flags); + if (tx) { + dma_set_unmap(tx, unmap); + dmaengine_unmap_put(unmap); async_tx_submit(chan, tx, submit); return tx; } @@ -113,8 +143,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, /* could not get a descriptor, unmap and fall through to * the synchronous path */ - dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); - dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); + dmaengine_unmap_put(unmap); } /* no channel available, or failed to allocate a descriptor, so From 7476bd79fc019dd9a8361de6696627a4eae3ef05 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 18 Oct 2013 19:35:29 +0200 Subject: [PATCH 09/33] async_pq: convert to dmaengine_unmap_data Use the generic unmap object to unmap dma buffers. Cc: Vinod Koul Cc: Tomasz Figa Cc: Dave Jiang Reported-by: Bartlomiej Zolnierkiewicz [bzolnier: keep temporary dma_dest array in do_async_gen_syndrome()] Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Signed-off-by: Dan Williams --- crypto/async_tx/async_pq.c | 117 ++++++++++++++++++++----------------- drivers/dma/dmaengine.c | 5 +- 2 files changed, 69 insertions(+), 53 deletions(-) diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 91d5d385899e..8cdbf33bd046 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -46,49 +46,25 @@ static struct page *pq_scribble_page; * do_async_gen_syndrome - asynchronously calculate P and/or Q */ static __async_inline struct dma_async_tx_descriptor * -do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, - const unsigned char *scfs, unsigned int offset, int disks, - size_t len, dma_addr_t *dma_src, +do_async_gen_syndrome(struct dma_chan *chan, + const unsigned char *scfs, int disks, + struct dmaengine_unmap_data *unmap, + enum dma_ctrl_flags dma_flags, struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct dma_device *dma = chan->device; - enum dma_ctrl_flags dma_flags = 0; enum async_tx_flags flags_orig = submit->flags; dma_async_tx_callback cb_fn_orig = submit->cb_fn; dma_async_tx_callback cb_param_orig = submit->cb_param; int src_cnt = disks - 2; - unsigned char coefs[src_cnt]; unsigned short pq_src_cnt; dma_addr_t dma_dest[2]; int src_off = 0; - int idx; - int i; - /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */ - if (P(blocks, disks)) - dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset, - len, DMA_BIDIRECTIONAL); - else - dma_flags |= DMA_PREP_PQ_DISABLE_P; - if (Q(blocks, disks)) - dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset, - len, DMA_BIDIRECTIONAL); - else - dma_flags |= DMA_PREP_PQ_DISABLE_Q; - - /* convert source addresses being careful to collapse 'empty' - * sources and update the coefficients accordingly - */ - for (i = 0, idx = 0; i < src_cnt; i++) { - if (blocks[i] == NULL) - continue; - dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, - DMA_TO_DEVICE); - coefs[idx] = scfs[i]; - idx++; - } - src_cnt = idx; + dma_flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; + if (submit->flags & ASYNC_TX_FENCE) + dma_flags |= DMA_PREP_FENCE; while (src_cnt > 0) { submit->flags = flags_orig; @@ -100,28 +76,25 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, if (src_cnt > pq_src_cnt) { submit->flags &= ~ASYNC_TX_ACK; submit->flags |= ASYNC_TX_FENCE; - dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP; submit->cb_fn = NULL; submit->cb_param = NULL; } else { - dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP; submit->cb_fn = cb_fn_orig; submit->cb_param = cb_param_orig; if (cb_fn_orig) dma_flags |= DMA_PREP_INTERRUPT; } - if (submit->flags & ASYNC_TX_FENCE) - dma_flags |= DMA_PREP_FENCE; - /* Since we have clobbered the src_list we are committed - * to doing this asynchronously. Drivers force forward - * progress in case they can not provide a descriptor + /* Drivers force forward progress in case they can not provide + * a descriptor */ for (;;) { + dma_dest[0] = unmap->addr[disks - 2]; + dma_dest[1] = unmap->addr[disks - 1]; tx = dma->device_prep_dma_pq(chan, dma_dest, - &dma_src[src_off], + &unmap->addr[src_off], pq_src_cnt, - &coefs[src_off], len, + &scfs[src_off], unmap->len, dma_flags); if (likely(tx)) break; @@ -129,6 +102,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, dma_async_issue_pending(chan); } + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); submit->depend_tx = tx; @@ -188,10 +162,6 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, * set to NULL those buffers will be replaced with the raid6_zero_page * in the synchronous path and omitted in the hardware-asynchronous * path. - * - * 'blocks' note: if submit->scribble is NULL then the contents of - * 'blocks' may be overwritten to perform address conversions - * (dma_map_page() or page_address()). */ struct dma_async_tx_descriptor * async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, @@ -202,26 +172,69 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, &P(blocks, disks), 2, blocks, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; - dma_addr_t *dma_src = NULL; + struct dmaengine_unmap_data *unmap = NULL; BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); - if (submit->scribble) - dma_src = submit->scribble; - else if (sizeof(dma_addr_t) <= sizeof(struct page *)) - dma_src = (dma_addr_t *) blocks; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); - if (dma_src && device && + if (unmap && (src_cnt <= dma_maxpq(device, 0) || dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && is_dma_pq_aligned(device, offset, 0, len)) { + struct dma_async_tx_descriptor *tx; + enum dma_ctrl_flags dma_flags = 0; + unsigned char coefs[src_cnt]; + int i, j; + /* run the p+q asynchronously */ pr_debug("%s: (async) disks: %d len: %zu\n", __func__, disks, len); - return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset, - disks, len, dma_src, submit); + + /* convert source addresses being careful to collapse 'empty' + * sources and update the coefficients accordingly + */ + unmap->len = len; + for (i = 0, j = 0; i < src_cnt; i++) { + if (blocks[i] == NULL) + continue; + unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset, + len, DMA_TO_DEVICE); + coefs[j] = raid6_gfexp[i]; + unmap->to_cnt++; + j++; + } + + /* + * DMAs use destinations as sources, + * so use BIDIRECTIONAL mapping + */ + unmap->bidi_cnt++; + if (P(blocks, disks)) + unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks), + offset, len, DMA_BIDIRECTIONAL); + else { + unmap->addr[j++] = 0; + dma_flags |= DMA_PREP_PQ_DISABLE_P; + } + + unmap->bidi_cnt++; + if (Q(blocks, disks)) + unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks), + offset, len, DMA_BIDIRECTIONAL); + else { + unmap->addr[j++] = 0; + dma_flags |= DMA_PREP_PQ_DISABLE_Q; + } + + tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit); + dmaengine_unmap_put(unmap); + return tx; } + dmaengine_unmap_put(unmap); + /* run the pq synchronously */ pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 54138b57b37c..f878c808466e 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -953,9 +953,12 @@ static void dmaengine_unmap(struct kref *kref) dma_unmap_page(dev, unmap->addr[i], unmap->len, DMA_FROM_DEVICE); cnt += unmap->bidi_cnt; - for (; i < cnt; i++) + for (; i < cnt; i++) { + if (unmap->addr[i] == 0) + continue; dma_unmap_page(dev, unmap->addr[i], unmap->len, DMA_BIDIRECTIONAL); + } mempool_free(unmap, __get_unmap_pool(cnt)->pool); } From 1786b943dad0b2f655e69b3ad5187f7e39ef32e6 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 18 Oct 2013 19:35:30 +0200 Subject: [PATCH 10/33] async_pq_val: convert to dmaengine_unmap_data Use the generic unmap object to unmap dma buffers. Cc: Vinod Koul Cc: Tomasz Figa Cc: Dave Jiang Reported-by: Bartlomiej Zolnierkiewicz Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Signed-off-by: Dan Williams --- crypto/async_tx/async_pq.c | 58 +++++++++++++++++++++++--------------- 1 file changed, 35 insertions(+), 23 deletions(-) diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 8cdbf33bd046..4126b56fbc01 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -290,50 +290,60 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, struct dma_async_tx_descriptor *tx; unsigned char coefs[disks-2]; enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; - dma_addr_t *dma_src = NULL; - int src_cnt = 0; + struct dmaengine_unmap_data *unmap = NULL; BUG_ON(disks < 4); - if (submit->scribble) - dma_src = submit->scribble; - else if (sizeof(dma_addr_t) <= sizeof(struct page *)) - dma_src = (dma_addr_t *) blocks; + if (device) + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); - if (dma_src && device && disks <= dma_maxpq(device, 0) && + if (unmap && disks <= dma_maxpq(device, 0) && is_dma_pq_aligned(device, offset, 0, len)) { struct device *dev = device->dev; - dma_addr_t *pq = &dma_src[disks-2]; - int i; + dma_addr_t pq[2]; + int i, j = 0, src_cnt = 0; pr_debug("%s: (async) disks: %d len: %zu\n", __func__, disks, len); - if (!P(blocks, disks)) + + unmap->len = len; + for (i = 0; i < disks-2; i++) + if (likely(blocks[i])) { + unmap->addr[j] = dma_map_page(dev, blocks[i], + offset, len, + DMA_TO_DEVICE); + coefs[j] = raid6_gfexp[i]; + unmap->to_cnt++; + src_cnt++; + j++; + } + + if (!P(blocks, disks)) { + pq[0] = 0; dma_flags |= DMA_PREP_PQ_DISABLE_P; - else + } else { pq[0] = dma_map_page(dev, P(blocks, disks), offset, len, DMA_TO_DEVICE); - if (!Q(blocks, disks)) + unmap->addr[j++] = pq[0]; + unmap->to_cnt++; + } + if (!Q(blocks, disks)) { + pq[1] = 0; dma_flags |= DMA_PREP_PQ_DISABLE_Q; - else + } else { pq[1] = dma_map_page(dev, Q(blocks, disks), offset, len, DMA_TO_DEVICE); + unmap->addr[j++] = pq[1]; + unmap->to_cnt++; + } if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; - for (i = 0; i < disks-2; i++) - if (likely(blocks[i])) { - dma_src[src_cnt] = dma_map_page(dev, blocks[i], - offset, len, - DMA_TO_DEVICE); - coefs[src_cnt] = raid6_gfexp[i]; - src_cnt++; - } - for (;;) { - tx = device->device_prep_dma_pq_val(chan, pq, dma_src, + tx = device->device_prep_dma_pq_val(chan, pq, + unmap->addr, src_cnt, coefs, len, pqres, @@ -343,6 +353,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, async_tx_quiesce(&submit->depend_tx); dma_async_issue_pending(chan); } + + dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); return tx; From 6f57fd0578dff23a4bd16118f0cb4201bcec91f1 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Fri, 18 Oct 2013 19:35:31 +0200 Subject: [PATCH 11/33] NTB: convert to dmaengine_unmap_data Use the generic unmap object to unmap dma buffers. Cc: Vinod Koul Cc: Tomasz Figa Cc: Dave Jiang Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Acked-by: Jon Mason [djbw: fix up unmap len, and GFP flags] Signed-off-by: Dan Williams --- drivers/ntb/ntb_transport.c | 85 +++++++++++++++++++++++++------------ 1 file changed, 58 insertions(+), 27 deletions(-) diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 12a9e83c008b..222c2baa3a4b 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -1034,7 +1034,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, struct dma_chan *chan = qp->dma_chan; struct dma_device *device; size_t pay_off, buff_off; - dma_addr_t src, dest; + struct dmaengine_unmap_data *unmap; dma_cookie_t cookie; void *buf = entry->buf; unsigned long flags; @@ -1045,35 +1045,50 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, goto err; if (len < copy_bytes) - goto err1; + goto err_wait; device = chan->device; pay_off = (size_t) offset & ~PAGE_MASK; buff_off = (size_t) buf & ~PAGE_MASK; if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) - goto err1; + goto err_wait; - dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE); - if (dma_mapping_error(device->dev, dest)) - goto err1; + unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); + if (!unmap) + goto err_wait; - src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE); - if (dma_mapping_error(device->dev, src)) - goto err2; + unmap->len = len; + unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), + pay_off, len, DMA_TO_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[0])) + goto err_get_unmap; - flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE | + unmap->to_cnt = 1; + + unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), + buff_off, len, DMA_FROM_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[1])) + goto err_get_unmap; + + unmap->from_cnt = 1; + + flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; - txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); + txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], + unmap->addr[0], len, flags); if (!txd) - goto err3; + goto err_get_unmap; txd->callback = ntb_rx_copy_callback; txd->callback_param = entry; + dma_set_unmap(txd, unmap); cookie = dmaengine_submit(txd); if (dma_submit_error(cookie)) - goto err3; + goto err_set_unmap; + + dmaengine_unmap_put(unmap); qp->last_cookie = cookie; @@ -1081,11 +1096,11 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, return; -err3: - dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); -err2: - dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE); -err1: +err_set_unmap: + dmaengine_unmap_put(unmap); +err_get_unmap: + dmaengine_unmap_put(unmap); +err_wait: /* If the callbacks come out of order, the writing of the index to the * last completed will be out of order. This may result in the * receive stalling forever. @@ -1245,7 +1260,8 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, struct dma_chan *chan = qp->dma_chan; struct dma_device *device; size_t dest_off, buff_off; - dma_addr_t src, dest; + struct dmaengine_unmap_data *unmap; + dma_addr_t dest; dma_cookie_t cookie; void __iomem *offset; size_t len = entry->len; @@ -1273,28 +1289,43 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) goto err; - src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE); - if (dma_mapping_error(device->dev, src)) + unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); + if (!unmap) goto err; - flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT; - txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); + unmap->len = len; + unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), + buff_off, len, DMA_TO_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[0])) + goto err_get_unmap; + + unmap->to_cnt = 1; + + flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | + DMA_PREP_INTERRUPT; + txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, + flags); if (!txd) - goto err1; + goto err_get_unmap; txd->callback = ntb_tx_copy_callback; txd->callback_param = entry; + dma_set_unmap(txd, unmap); cookie = dmaengine_submit(txd); if (dma_submit_error(cookie)) - goto err1; + goto err_set_unmap; + + dmaengine_unmap_put(unmap); dma_async_issue_pending(chan); qp->tx_async++; return; -err1: - dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); +err_set_unmap: + dmaengine_unmap_put(unmap); +err_get_unmap: + dmaengine_unmap_put(unmap); err: ntb_memcpy_tx(entry, offset); qp->tx_memcpy++; From 54f8d501e842879143e867e70996574a54d1e130 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Fri, 18 Oct 2013 19:35:32 +0200 Subject: [PATCH 12/33] dmaengine: remove DMA unmap from drivers Remove support for DMA unmapping from drivers as it is no longer needed (DMA core code is now handling it). Cc: Vinod Koul Cc: Tomasz Figa Cc: Dave Jiang Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park [djbw: fix up chan2parent() unused warning in drivers/dma/dw/core.c] Signed-off-by: Dan Williams --- arch/arm/include/asm/hardware/iop3xx-adma.h | 30 --- arch/arm/include/asm/hardware/iop_adma.h | 4 - arch/arm/mach-iop13xx/include/mach/adma.h | 26 -- drivers/dma/amba-pl08x.c | 31 --- drivers/dma/at_hdmac.c | 25 -- drivers/dma/dw/core.c | 24 -- drivers/dma/ep93xx_dma.c | 29 --- drivers/dma/fsldma.c | 16 -- drivers/dma/ioat/dma.c | 16 -- drivers/dma/ioat/dma.h | 12 - drivers/dma/ioat/dma_v2.c | 1 - drivers/dma/ioat/dma_v3.c | 166 ------------ drivers/dma/iop-adma.c | 96 +------ drivers/dma/mv_xor.c | 44 +--- drivers/dma/ppc4xx/adma.c | 269 -------------------- drivers/dma/timb_dma.c | 36 --- drivers/dma/txx9dmac.c | 24 -- 17 files changed, 3 insertions(+), 846 deletions(-) diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h index 9b28f1243bdc..240b29ef17db 100644 --- a/arch/arm/include/asm/hardware/iop3xx-adma.h +++ b/arch/arm/include/asm/hardware/iop3xx-adma.h @@ -393,36 +393,6 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, return slot_cnt; } -static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc) -{ - return 0; -} - -static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan) -{ - union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - - switch (chan->device->id) { - case DMA0_ID: - case DMA1_ID: - return hw_desc.dma->dest_addr; - case AAU_ID: - return hw_desc.aau->dest_addr; - default: - BUG(); - } - return 0; -} - - -static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan) -{ - BUG(); - return 0; -} - static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, struct iop_adma_chan *chan) { diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h index 122f86d8c991..250760e08103 100644 --- a/arch/arm/include/asm/hardware/iop_adma.h +++ b/arch/arm/include/asm/hardware/iop_adma.h @@ -82,8 +82,6 @@ struct iop_adma_chan { * @slot_cnt: total slots used in an transaction (group of operations) * @slots_per_op: number of slots per operation * @idx: pool index - * @unmap_src_cnt: number of xor sources - * @unmap_len: transaction bytecount * @tx_list: list of descriptors that are associated with one operation * @async_tx: support for the async_tx api * @group_list: list of slots that make up a multi-descriptor transaction @@ -99,8 +97,6 @@ struct iop_adma_desc_slot { u16 slot_cnt; u16 slots_per_op; u16 idx; - u16 unmap_src_cnt; - size_t unmap_len; struct list_head tx_list; struct dma_async_tx_descriptor async_tx; union { diff --git a/arch/arm/mach-iop13xx/include/mach/adma.h b/arch/arm/mach-iop13xx/include/mach/adma.h index 6d3782d85a9f..a86fd0ed7757 100644 --- a/arch/arm/mach-iop13xx/include/mach/adma.h +++ b/arch/arm/mach-iop13xx/include/mach/adma.h @@ -218,20 +218,6 @@ iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op) #define iop_chan_pq_slot_count iop_chan_xor_slot_count #define iop_chan_pq_zero_sum_slot_count iop_chan_xor_slot_count -static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan) -{ - struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; - return hw_desc->dest_addr; -} - -static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc, - struct iop_adma_chan *chan) -{ - struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; - return hw_desc->q_dest_addr; -} - static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, struct iop_adma_chan *chan) { @@ -350,18 +336,6 @@ iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt, hw_desc->desc_ctrl = u_desc_ctrl.value; } -static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc) -{ - struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; - union { - u32 value; - struct iop13xx_adma_desc_ctrl field; - } u_desc_ctrl; - - u_desc_ctrl.value = hw_desc->desc_ctrl; - return u_desc_ctrl.field.pq_xfer_en; -} - static inline void iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, unsigned long flags) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 7f9846464b77..6a5f782ec7eb 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1164,43 +1164,12 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, kfree(txd); } -static void pl08x_unmap_buffers(struct pl08x_txd *txd) -{ - struct device *dev = txd->vd.tx.chan->device->dev; - struct pl08x_sg *dsg; - - if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - else { - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - } - } - if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - else - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - } -} - static void pl08x_desc_free(struct virt_dma_desc *vd) { struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); dma_descriptor_unmap(txd); - if (!plchan->slave) - pl08x_unmap_buffers(txd); - if (!txd->done) pl08x_release_mux(plchan); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index cc7098ddf9d4..6deaefbec0b0 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -344,32 +344,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) /* move myself to free_list */ list_move(&desc->desc_node, &atchan->free_list); - /* unmap dma addresses (not on slave channels) */ dma_descriptor_unmap(txd); - if (!atchan->chan_common.private) { - struct device *parent = chan2parent(&atchan->chan_common); - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(parent, - desc->lli.daddr, - desc->len, DMA_FROM_DEVICE); - else - dma_unmap_page(parent, - desc->lli.daddr, - desc->len, DMA_FROM_DEVICE); - } - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(parent, - desc->lli.saddr, - desc->len, DMA_TO_DEVICE); - else - dma_unmap_page(parent, - desc->lli.saddr, - desc->len, DMA_TO_DEVICE); - } - } - /* for cyclic transfers, * no need to replay callback function while stopping */ if (!atc_chan_is_cyclic(atchan)) { diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index e3fe1b1a73b1..1f39ccce2727 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -85,10 +85,6 @@ static struct device *chan2dev(struct dma_chan *chan) { return &chan->dev->device; } -static struct device *chan2parent(struct dma_chan *chan) -{ - return chan->dev->device.parent; -} static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) { @@ -312,26 +308,6 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, list_move(&desc->desc_node, &dwc->free_list); dma_descriptor_unmap(txd); - if (!is_slave_direction(dwc->direction)) { - struct device *parent = chan2parent(&dwc->chan); - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(parent, desc->lli.dar, - desc->total_len, DMA_FROM_DEVICE); - else - dma_unmap_page(parent, desc->lli.dar, - desc->total_len, DMA_FROM_DEVICE); - } - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(parent, desc->lli.sar, - desc->total_len, DMA_TO_DEVICE); - else - dma_unmap_page(parent, desc->lli.sar, - desc->total_len, DMA_TO_DEVICE); - } - } - spin_unlock_irqrestore(&dwc->lock, flags); if (callback) diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index dcd6bf5d3091..cb4bf682a708 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -733,28 +733,6 @@ static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac) spin_unlock_irqrestore(&edmac->lock, flags); } -static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc) -{ - struct device *dev = desc->txd.chan->device->dev; - - if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(dev, desc->src_addr, desc->size, - DMA_TO_DEVICE); - else - dma_unmap_page(dev, desc->src_addr, desc->size, - DMA_TO_DEVICE); - } - if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(dev, desc->dst_addr, desc->size, - DMA_FROM_DEVICE); - else - dma_unmap_page(dev, desc->dst_addr, desc->size, - DMA_FROM_DEVICE); - } -} - static void ep93xx_dma_tasklet(unsigned long data) { struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; @@ -787,14 +765,7 @@ static void ep93xx_dma_tasklet(unsigned long data) /* Now we can release all the chained descriptors */ list_for_each_entry_safe(desc, d, &list, node) { - /* - * For the memcpy channels the API requires us to unmap the - * buffers unless requested otherwise. - */ dma_descriptor_unmap(&desc->txd); - if (!edmac->chan.private) - ep93xx_dma_unmap_buffers(desc); - ep93xx_dma_desc_put(edmac, desc); } diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 66c4052a1f34..d9e6381b2b16 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -869,22 +869,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, dma_run_dependencies(txd); dma_descriptor_unmap(txd); - /* Unmap the dst buffer, if requested */ - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE); - else - dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE); - } - - /* Unmap the src buffer, if requested */ - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(dev, src, len, DMA_TO_DEVICE); - else - dma_unmap_page(dev, src, len, DMA_TO_DEVICE); - } - #ifdef FSL_DMA_LD_DEBUG chan_dbg(chan, "LD %p free\n", desc); #endif diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 26f8cfd6bc3f..c123e32dbbb0 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -531,21 +531,6 @@ static void ioat1_cleanup_event(unsigned long data) writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } -void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, - size_t len, struct ioat_dma_descriptor *hw) -{ - struct pci_dev *pdev = chan->device->pdev; - size_t offset = len - hw->size; - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) - ioat_unmap(pdev, hw->dst_addr - offset, len, - PCI_DMA_FROMDEVICE, flags, 1); - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) - ioat_unmap(pdev, hw->src_addr - offset, len, - PCI_DMA_TODEVICE, flags, 0); -} - dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) { dma_addr_t phys_complete; @@ -603,7 +588,6 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) if (tx->cookie) { dma_cookie_complete(tx); dma_descriptor_unmap(tx); - ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); ioat->active -= desc->hw->tx_cnt; if (tx->callback) { tx->callback(tx->callback_param); diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 54fb7b9ff9aa..4300d5af188f 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -342,16 +342,6 @@ static inline bool is_ioat_bug(unsigned long err) return !!err; } -static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, - int direction, enum dma_ctrl_flags flags, bool dst) -{ - if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) || - (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE))) - pci_unmap_single(pdev, addr, len, direction); - else - pci_unmap_page(pdev, addr, len, direction); -} - int ioat_probe(struct ioatdma_device *device); int ioat_register(struct ioatdma_device *device); int ioat1_dma_probe(struct ioatdma_device *dev, int dca); @@ -363,8 +353,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx); enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *txstate); -void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, - size_t len, struct ioat_dma_descriptor *hw); bool ioat_cleanup_preamble(struct ioat_chan_common *chan, dma_addr_t *phys_complete); void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index fc7b50a813cc..5d3affe7e976 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -149,7 +149,6 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) dump_desc_dbg(ioat, desc); if (tx->cookie) { dma_descriptor_unmap(tx); - ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); dma_cookie_complete(tx); if (tx->callback) { tx->callback(tx->callback_param); diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 57a2901b917a..43386c171bba 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -96,13 +96,6 @@ static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, static void ioat3_eh(struct ioat2_dma_chan *ioat); -static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) -{ - struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; - - return raw->field[xor_idx_to_field[idx]]; -} - static void xor_set_src(struct ioat_raw_descriptor *descs[2], dma_addr_t addr, u32 offset, int idx) { @@ -296,164 +289,6 @@ static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *s kmem_cache_free(device->sed_pool, sed); } -static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, - struct ioat_ring_ent *desc, int idx) -{ - struct ioat_chan_common *chan = &ioat->base; - struct pci_dev *pdev = chan->device->pdev; - size_t len = desc->len; - size_t offset = len - desc->hw->size; - struct dma_async_tx_descriptor *tx = &desc->txd; - enum dma_ctrl_flags flags = tx->flags; - - switch (desc->hw->ctl_f.op) { - case IOAT_OP_COPY: - if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ - ioat_dma_unmap(chan, flags, len, desc->hw); - break; - case IOAT_OP_XOR_VAL: - case IOAT_OP_XOR: { - struct ioat_xor_descriptor *xor = desc->xor; - struct ioat_ring_ent *ext; - struct ioat_xor_ext_descriptor *xor_ex = NULL; - int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt); - struct ioat_raw_descriptor *descs[2]; - int i; - - if (src_cnt > 5) { - ext = ioat2_get_ring_ent(ioat, idx + 1); - xor_ex = ext->xor_ex; - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - descs[0] = (struct ioat_raw_descriptor *) xor; - descs[1] = (struct ioat_raw_descriptor *) xor_ex; - for (i = 0; i < src_cnt; i++) { - dma_addr_t src = xor_get_src(descs, i); - - ioat_unmap(pdev, src - offset, len, - PCI_DMA_TODEVICE, flags, 0); - } - - /* dest is a source in xor validate operations */ - if (xor->ctl_f.op == IOAT_OP_XOR_VAL) { - ioat_unmap(pdev, xor->dst_addr - offset, len, - PCI_DMA_TODEVICE, flags, 1); - break; - } - } - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) - ioat_unmap(pdev, xor->dst_addr - offset, len, - PCI_DMA_FROMDEVICE, flags, 1); - break; - } - case IOAT_OP_PQ_VAL: - case IOAT_OP_PQ: { - struct ioat_pq_descriptor *pq = desc->pq; - struct ioat_ring_ent *ext; - struct ioat_pq_ext_descriptor *pq_ex = NULL; - int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); - struct ioat_raw_descriptor *descs[2]; - int i; - - if (src_cnt > 3) { - ext = ioat2_get_ring_ent(ioat, idx + 1); - pq_ex = ext->pq_ex; - } - - /* in the 'continue' case don't unmap the dests as sources */ - if (dmaf_p_disabled_continue(flags)) - src_cnt--; - else if (dmaf_continue(flags)) - src_cnt -= 3; - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - descs[0] = (struct ioat_raw_descriptor *) pq; - descs[1] = (struct ioat_raw_descriptor *) pq_ex; - for (i = 0; i < src_cnt; i++) { - dma_addr_t src = pq_get_src(descs, i); - - ioat_unmap(pdev, src - offset, len, - PCI_DMA_TODEVICE, flags, 0); - } - - /* the dests are sources in pq validate operations */ - if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { - if (!(flags & DMA_PREP_PQ_DISABLE_P)) - ioat_unmap(pdev, pq->p_addr - offset, - len, PCI_DMA_TODEVICE, flags, 0); - if (!(flags & DMA_PREP_PQ_DISABLE_Q)) - ioat_unmap(pdev, pq->q_addr - offset, - len, PCI_DMA_TODEVICE, flags, 0); - break; - } - } - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (!(flags & DMA_PREP_PQ_DISABLE_P)) - ioat_unmap(pdev, pq->p_addr - offset, len, - PCI_DMA_BIDIRECTIONAL, flags, 1); - if (!(flags & DMA_PREP_PQ_DISABLE_Q)) - ioat_unmap(pdev, pq->q_addr - offset, len, - PCI_DMA_BIDIRECTIONAL, flags, 1); - } - break; - } - case IOAT_OP_PQ_16S: - case IOAT_OP_PQ_VAL_16S: { - struct ioat_pq_descriptor *pq = desc->pq; - int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); - struct ioat_raw_descriptor *descs[4]; - int i; - - /* in the 'continue' case don't unmap the dests as sources */ - if (dmaf_p_disabled_continue(flags)) - src_cnt--; - else if (dmaf_continue(flags)) - src_cnt -= 3; - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - descs[0] = (struct ioat_raw_descriptor *)pq; - descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw); - descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]); - for (i = 0; i < src_cnt; i++) { - dma_addr_t src = pq16_get_src(descs, i); - - ioat_unmap(pdev, src - offset, len, - PCI_DMA_TODEVICE, flags, 0); - } - - /* the dests are sources in pq validate operations */ - if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { - if (!(flags & DMA_PREP_PQ_DISABLE_P)) - ioat_unmap(pdev, pq->p_addr - offset, - len, PCI_DMA_TODEVICE, - flags, 0); - if (!(flags & DMA_PREP_PQ_DISABLE_Q)) - ioat_unmap(pdev, pq->q_addr - offset, - len, PCI_DMA_TODEVICE, - flags, 0); - break; - } - } - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (!(flags & DMA_PREP_PQ_DISABLE_P)) - ioat_unmap(pdev, pq->p_addr - offset, len, - PCI_DMA_BIDIRECTIONAL, flags, 1); - if (!(flags & DMA_PREP_PQ_DISABLE_Q)) - ioat_unmap(pdev, pq->q_addr - offset, len, - PCI_DMA_BIDIRECTIONAL, flags, 1); - } - break; - } - default: - dev_err(&pdev->dev, "%s: unknown op type: %#x\n", - __func__, desc->hw->ctl_f.op); - } -} - static bool desc_has_ext(struct ioat_ring_ent *desc) { struct ioat_dma_descriptor *hw = desc->hw; @@ -578,7 +413,6 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) if (tx->cookie) { dma_cookie_complete(tx); dma_descriptor_unmap(tx); - ioat3_dma_unmap(ioat, desc, idx + i); if (tx->callback) { tx->callback(tx->callback_param); tx->callback = NULL; diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 8f6e426590eb..173e26ff18f8 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -61,80 +61,6 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) } } -static void -iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) -{ - struct dma_async_tx_descriptor *tx = &desc->async_tx; - struct iop_adma_desc_slot *unmap = desc->group_head; - struct device *dev = &iop_chan->device->pdev->dev; - u32 len = unmap->unmap_len; - enum dma_ctrl_flags flags = tx->flags; - u32 src_cnt; - dma_addr_t addr; - dma_addr_t dest; - - src_cnt = unmap->unmap_src_cnt; - dest = iop_desc_get_dest_addr(unmap, iop_chan); - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - enum dma_data_direction dir; - - if (src_cnt > 1) /* is xor? */ - dir = DMA_BIDIRECTIONAL; - else - dir = DMA_FROM_DEVICE; - - dma_unmap_page(dev, dest, len, dir); - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - while (src_cnt--) { - addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt); - if (addr == dest) - continue; - dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); - } - } - desc->group_head = NULL; -} - -static void -iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) -{ - struct dma_async_tx_descriptor *tx = &desc->async_tx; - struct iop_adma_desc_slot *unmap = desc->group_head; - struct device *dev = &iop_chan->device->pdev->dev; - u32 len = unmap->unmap_len; - enum dma_ctrl_flags flags = tx->flags; - u32 src_cnt = unmap->unmap_src_cnt; - dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan); - dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan); - int i; - - if (tx->flags & DMA_PREP_CONTINUE) - src_cnt -= 3; - - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) { - dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL); - dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL); - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - dma_addr_t addr; - - for (i = 0; i < src_cnt; i++) { - addr = iop_desc_get_src_addr(unmap, iop_chan, i); - dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); - } - if (desc->pq_check_result) { - dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE); - dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE); - } - } - - desc->group_head = NULL; -} - - static dma_cookie_t iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, struct iop_adma_chan *iop_chan, dma_cookie_t cookie) @@ -153,15 +79,8 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, tx->callback(tx->callback_param); dma_descriptor_unmap(tx); - /* unmap dma addresses - * (unmap_single vs unmap_page?) - */ - if (desc->group_head && desc->unmap_len) { - if (iop_desc_is_pq(desc)) - iop_desc_unmap_pq(iop_chan, desc); - else - iop_desc_unmap(iop_chan, desc); - } + if (desc->group_head) + desc->group_head = NULL; } /* run dependent operations */ @@ -592,7 +511,6 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) if (sw_desc) { grp_start = sw_desc->group_head; iop_desc_init_interrupt(grp_start, iop_chan); - grp_start->unmap_len = 0; sw_desc->async_tx.flags = flags; } spin_unlock_bh(&iop_chan->lock); @@ -624,8 +542,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, iop_desc_set_byte_count(grp_start, iop_chan, len); iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); iop_desc_set_memcpy_src_addr(grp_start, dma_src); - sw_desc->unmap_src_cnt = 1; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; } spin_unlock_bh(&iop_chan->lock); @@ -658,8 +574,6 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, iop_desc_init_xor(grp_start, src_cnt, flags); iop_desc_set_byte_count(grp_start, iop_chan, len); iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); - sw_desc->unmap_src_cnt = src_cnt; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; while (src_cnt--) iop_desc_set_xor_src_addr(grp_start, src_cnt, @@ -695,8 +609,6 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, grp_start->xor_check_result = result; pr_debug("\t%s: grp_start->xor_check_result: %p\n", __func__, grp_start->xor_check_result); - sw_desc->unmap_src_cnt = src_cnt; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; while (src_cnt--) iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, @@ -749,8 +661,6 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, dst[0] = dst[1] & 0x7; iop_desc_set_pq_addr(g, dst); - sw_desc->unmap_src_cnt = src_cnt; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; for (i = 0; i < src_cnt; i++) iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); @@ -805,8 +715,6 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, g->pq_check_result = pqres; pr_debug("\t%s: g->pq_check_result: %p\n", __func__, g->pq_check_result); - sw_desc->unmap_src_cnt = src_cnt+2; - sw_desc->unmap_len = len; sw_desc->async_tx.flags = flags; while (src_cnt--) iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index ed1ab1d0875e..17326e780e23 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -60,14 +60,6 @@ static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) return hw_desc->phy_dest_addr; } -static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, - int src_idx) -{ - struct mv_xor_desc *hw_desc = desc->hw_desc; - return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)]; -} - - static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, u32 byte_count) { @@ -279,42 +271,8 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, desc->async_tx.callback_param); dma_descriptor_unmap(&desc->async_tx); - /* unmap dma addresses - * (unmap_single vs unmap_page?) - */ - if (desc->group_head && desc->unmap_len) { - struct mv_xor_desc_slot *unmap = desc->group_head; - struct device *dev = mv_chan_to_devp(mv_chan); - u32 len = unmap->unmap_len; - enum dma_ctrl_flags flags = desc->async_tx.flags; - u32 src_cnt; - dma_addr_t addr; - dma_addr_t dest; - - src_cnt = unmap->unmap_src_cnt; - dest = mv_desc_get_dest_addr(unmap); - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - enum dma_data_direction dir; - - if (src_cnt > 1) /* is xor ? */ - dir = DMA_BIDIRECTIONAL; - else - dir = DMA_FROM_DEVICE; - dma_unmap_page(dev, dest, len, dir); - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - while (src_cnt--) { - addr = mv_desc_get_src_addr(unmap, - src_cnt); - if (addr == dest) - continue; - dma_unmap_page(dev, addr, len, - DMA_TO_DEVICE); - } - } + if (desc->group_head) desc->group_head = NULL; - } } /* run dependent operations */ diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 442492da7415..429be432ab7e 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -801,218 +801,6 @@ static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan, local_irq_restore(flags); } -/** - * ppc440spe_desc_get_src_addr - extract the source address from the descriptor - */ -static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc, - struct ppc440spe_adma_chan *chan, int src_idx) -{ - struct dma_cdb *dma_hw_desc; - struct xor_cb *xor_hw_desc; - - switch (chan->device->id) { - case PPC440SPE_DMA0_ID: - case PPC440SPE_DMA1_ID: - dma_hw_desc = desc->hw_desc; - /* May have 0, 1, 2, or 3 sources */ - switch (dma_hw_desc->opc) { - case DMA_CDB_OPC_NO_OP: - case DMA_CDB_OPC_DFILL128: - return 0; - case DMA_CDB_OPC_DCHECK128: - if (unlikely(src_idx)) { - printk(KERN_ERR "%s: try to get %d source for" - " DCHECK128\n", __func__, src_idx); - BUG(); - } - return le32_to_cpu(dma_hw_desc->sg1l); - case DMA_CDB_OPC_MULTICAST: - case DMA_CDB_OPC_MV_SG1_SG2: - if (unlikely(src_idx > 2)) { - printk(KERN_ERR "%s: try to get %d source from" - " DMA descr\n", __func__, src_idx); - BUG(); - } - if (src_idx) { - if (le32_to_cpu(dma_hw_desc->sg1u) & - DMA_CUED_XOR_WIN_MSK) { - u8 region; - - if (src_idx == 1) - return le32_to_cpu( - dma_hw_desc->sg1l) + - desc->unmap_len; - - region = (le32_to_cpu( - dma_hw_desc->sg1u)) >> - DMA_CUED_REGION_OFF; - - region &= DMA_CUED_REGION_MSK; - switch (region) { - case DMA_RXOR123: - return le32_to_cpu( - dma_hw_desc->sg1l) + - (desc->unmap_len << 1); - case DMA_RXOR124: - return le32_to_cpu( - dma_hw_desc->sg1l) + - (desc->unmap_len * 3); - case DMA_RXOR125: - return le32_to_cpu( - dma_hw_desc->sg1l) + - (desc->unmap_len << 2); - default: - printk(KERN_ERR - "%s: try to" - " get src3 for region %02x" - "PPC440SPE_DESC_RXOR12?\n", - __func__, region); - BUG(); - } - } else { - printk(KERN_ERR - "%s: try to get %d" - " source for non-cued descr\n", - __func__, src_idx); - BUG(); - } - } - return le32_to_cpu(dma_hw_desc->sg1l); - default: - printk(KERN_ERR "%s: unknown OPC 0x%02x\n", - __func__, dma_hw_desc->opc); - BUG(); - } - return le32_to_cpu(dma_hw_desc->sg1l); - case PPC440SPE_XOR_ID: - /* May have up to 16 sources */ - xor_hw_desc = desc->hw_desc; - return xor_hw_desc->ops[src_idx].l; - } - return 0; -} - -/** - * ppc440spe_desc_get_dest_addr - extract the destination address from the - * descriptor - */ -static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc, - struct ppc440spe_adma_chan *chan, int idx) -{ - struct dma_cdb *dma_hw_desc; - struct xor_cb *xor_hw_desc; - - switch (chan->device->id) { - case PPC440SPE_DMA0_ID: - case PPC440SPE_DMA1_ID: - dma_hw_desc = desc->hw_desc; - - if (likely(!idx)) - return le32_to_cpu(dma_hw_desc->sg2l); - return le32_to_cpu(dma_hw_desc->sg3l); - case PPC440SPE_XOR_ID: - xor_hw_desc = desc->hw_desc; - return xor_hw_desc->cbtal; - } - return 0; -} - -/** - * ppc440spe_desc_get_src_num - extract the number of source addresses from - * the descriptor - */ -static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc, - struct ppc440spe_adma_chan *chan) -{ - struct dma_cdb *dma_hw_desc; - struct xor_cb *xor_hw_desc; - - switch (chan->device->id) { - case PPC440SPE_DMA0_ID: - case PPC440SPE_DMA1_ID: - dma_hw_desc = desc->hw_desc; - - switch (dma_hw_desc->opc) { - case DMA_CDB_OPC_NO_OP: - case DMA_CDB_OPC_DFILL128: - return 0; - case DMA_CDB_OPC_DCHECK128: - return 1; - case DMA_CDB_OPC_MV_SG1_SG2: - case DMA_CDB_OPC_MULTICAST: - /* - * Only for RXOR operations we have more than - * one source - */ - if (le32_to_cpu(dma_hw_desc->sg1u) & - DMA_CUED_XOR_WIN_MSK) { - /* RXOR op, there are 2 or 3 sources */ - if (((le32_to_cpu(dma_hw_desc->sg1u) >> - DMA_CUED_REGION_OFF) & - DMA_CUED_REGION_MSK) == DMA_RXOR12) { - /* RXOR 1-2 */ - return 2; - } else { - /* RXOR 1-2-3/1-2-4/1-2-5 */ - return 3; - } - } - return 1; - default: - printk(KERN_ERR "%s: unknown OPC 0x%02x\n", - __func__, dma_hw_desc->opc); - BUG(); - } - case PPC440SPE_XOR_ID: - /* up to 16 sources */ - xor_hw_desc = desc->hw_desc; - return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK; - default: - BUG(); - } - return 0; -} - -/** - * ppc440spe_desc_get_dst_num - get the number of destination addresses in - * this descriptor - */ -static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc, - struct ppc440spe_adma_chan *chan) -{ - struct dma_cdb *dma_hw_desc; - - switch (chan->device->id) { - case PPC440SPE_DMA0_ID: - case PPC440SPE_DMA1_ID: - /* May be 1 or 2 destinations */ - dma_hw_desc = desc->hw_desc; - switch (dma_hw_desc->opc) { - case DMA_CDB_OPC_NO_OP: - case DMA_CDB_OPC_DCHECK128: - return 0; - case DMA_CDB_OPC_MV_SG1_SG2: - case DMA_CDB_OPC_DFILL128: - return 1; - case DMA_CDB_OPC_MULTICAST: - if (desc->dst_cnt == 2) - return 2; - else - return 1; - default: - printk(KERN_ERR "%s: unknown OPC 0x%02x\n", - __func__, dma_hw_desc->opc); - BUG(); - } - case PPC440SPE_XOR_ID: - /* Always only 1 destination */ - return 1; - default: - BUG(); - } - return 0; -} - /** * ppc440spe_desc_get_link - get the address of the descriptor that * follows this one @@ -1705,43 +1493,6 @@ static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot, } } -static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan, - struct ppc440spe_adma_desc_slot *desc) -{ - u32 src_cnt, dst_cnt; - dma_addr_t addr; - - /* - * get the number of sources & destination - * included in this descriptor and unmap - * them all - */ - src_cnt = ppc440spe_desc_get_src_num(desc, chan); - dst_cnt = ppc440spe_desc_get_dst_num(desc, chan); - - /* unmap destinations */ - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - while (dst_cnt--) { - addr = ppc440spe_desc_get_dest_addr( - desc, chan, dst_cnt); - dma_unmap_page(chan->device->dev, - addr, desc->unmap_len, - DMA_FROM_DEVICE); - } - } - - /* unmap sources */ - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - while (src_cnt--) { - addr = ppc440spe_desc_get_src_addr( - desc, chan, src_cnt); - dma_unmap_page(chan->device->dev, - addr, desc->unmap_len, - DMA_TO_DEVICE); - } - } -} - /** * ppc440spe_adma_run_tx_complete_actions - call functions to be called * upon completion @@ -1766,26 +1517,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( desc->async_tx.callback_param); dma_descriptor_unmap(&desc->async_tx); - /* unmap dma addresses - * (unmap_single vs unmap_page?) - * - * actually, ppc's dma_unmap_page() functions are empty, so - * the following code is just for the sake of completeness - */ - if (chan && chan->needs_unmap && desc->group_head && - desc->unmap_len) { - struct ppc440spe_adma_desc_slot *unmap = - desc->group_head; - /* assume 1 slot per op always */ - u32 slot_count = unmap->slot_cnt; - - /* Run through the group list and unmap addresses */ - for (i = 0; i < slot_count; i++) { - BUG_ON(!unmap); - ppc440spe_adma_unmap(chan, unmap); - unmap = unmap->hw_next; - } - } } /* run dependent operations */ diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 1d0c98839087..4506a7b4f972 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -154,38 +154,6 @@ static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) return done; } -static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, - bool single) -{ - dma_addr_t addr; - int len; - - addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) | - dma_desc[4]; - - len = (dma_desc[3] << 8) | dma_desc[2]; - - if (single) - dma_unmap_single(chan2dev(&td_chan->chan), addr, len, - DMA_TO_DEVICE); - else - dma_unmap_page(chan2dev(&td_chan->chan), addr, len, - DMA_TO_DEVICE); -} - -static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) -{ - struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan, - struct timb_dma_chan, chan); - u8 *descs; - - for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) { - __td_unmap_desc(td_chan, descs, single); - if (descs[0] & 0x02) - break; - } -} - static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, struct scatterlist *sg, bool last) { @@ -294,10 +262,6 @@ static void __td_finish(struct timb_dma_chan *td_chan) list_move(&td_desc->desc_node, &td_chan->free_list); dma_descriptor_unmap(txd); - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) - __td_unmap_descs(td_desc, - txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE); - /* * The API requires that no submissions are done from a * callback, so we don't need to drop the lock here diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 22a0b6c78c77..6f2729874016 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -420,30 +420,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, list_move(&desc->desc_node, &dc->free_list); dma_descriptor_unmap(txd); - if (!ds) { - dma_addr_t dmaaddr; - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - dmaaddr = is_dmac64(dc) ? - desc->hwdesc.DAR : desc->hwdesc32.DAR; - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(chan2parent(&dc->chan), - dmaaddr, desc->len, DMA_FROM_DEVICE); - else - dma_unmap_page(chan2parent(&dc->chan), - dmaaddr, desc->len, DMA_FROM_DEVICE); - } - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - dmaaddr = is_dmac64(dc) ? - desc->hwdesc.SAR : desc->hwdesc32.SAR; - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(chan2parent(&dc->chan), - dmaaddr, desc->len, DMA_TO_DEVICE); - else - dma_unmap_page(chan2parent(&dc->chan), - dmaaddr, desc->len, DMA_TO_DEVICE); - } - } - /* * The API requires that no submissions are done from a * callback, so we don't need to drop the lock here From 0776ae7b89782124ddd72eafe0b1e0fdcdabe32e Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Fri, 18 Oct 2013 19:35:33 +0200 Subject: [PATCH 13/33] dmaengine: remove DMA unmap flags Remove no longer needed DMA unmap flags: - DMA_COMPL_SKIP_SRC_UNMAP - DMA_COMPL_SKIP_DEST_UNMAP - DMA_COMPL_SRC_UNMAP_SINGLE - DMA_COMPL_DEST_UNMAP_SINGLE Cc: Vinod Koul Cc: Tomasz Figa Cc: Dave Jiang Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Acked-by: Jon Mason Acked-by: Mark Brown [djbw: clean up straggling skip unmap flags in ntb] Signed-off-by: Dan Williams --- crypto/async_tx/async_memcpy.c | 3 +-- crypto/async_tx/async_pq.c | 1 - crypto/async_tx/async_raid6_recov.c | 8 ++------ crypto/async_tx/async_xor.c | 6 ++---- drivers/ata/pata_arasan_cf.c | 3 +-- drivers/dma/dmaengine.c | 3 +-- drivers/dma/dmatest.c | 3 +-- drivers/dma/ioat/dma.c | 3 +-- drivers/dma/ioat/dma_v3.c | 12 +++--------- drivers/media/platform/m2m-deinterlace.c | 3 +-- drivers/media/platform/timblogiw.c | 2 +- drivers/misc/carma/carma-fpga.c | 3 +-- drivers/mtd/nand/atmel_nand.c | 3 +-- drivers/mtd/nand/fsmc_nand.c | 2 -- drivers/net/ethernet/micrel/ks8842.c | 6 ++---- drivers/ntb/ntb_transport.c | 11 +++-------- drivers/spi/spi-dw-mid.c | 4 ++-- include/linux/dmaengine.h | 18 ++++-------------- 18 files changed, 27 insertions(+), 67 deletions(-) diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 72750214f779..f8c0b8dbeb75 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -56,8 +56,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { - unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP; + unsigned long dma_prep_flags = 0; if (submit->cb_fn) dma_prep_flags |= DMA_PREP_INTERRUPT; diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 4126b56fbc01..d05327caf69d 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -62,7 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan, dma_addr_t dma_dest[2]; int src_off = 0; - dma_flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index a3a72a784421..934a84981495 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -47,9 +47,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, struct device *dev = dma->dev; dma_addr_t pq[2]; struct dma_async_tx_descriptor *tx; - enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP | - DMA_PREP_PQ_DISABLE_P; + enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; @@ -113,9 +111,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, dma_addr_t dma_dest[2]; struct device *dev = dma->dev; struct dma_async_tx_descriptor *tx; - enum dma_ctrl_flags dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP | - DMA_PREP_PQ_DISABLE_P; + enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index d2cc77d501c7..3c562f5a60bb 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -41,7 +41,7 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, dma_async_tx_callback cb_fn_orig = submit->cb_fn; void *cb_param_orig = submit->cb_param; enum async_tx_flags flags_orig = submit->flags; - enum dma_ctrl_flags dma_flags; + enum dma_ctrl_flags dma_flags = 0; int src_cnt = unmap->to_cnt; int xor_src_cnt; dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; @@ -55,7 +55,6 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, /* if we are submitting additional xors, leave the chain open * and clear the callback parameters */ - dma_flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; if (src_cnt > xor_src_cnt) { submit->flags &= ~ASYNC_TX_ACK; submit->flags |= ASYNC_TX_FENCE; @@ -284,8 +283,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, if (unmap && src_cnt <= device->max_xor && is_dma_xor_aligned(device, offset, 0, len)) { - unsigned long dma_prep_flags = DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP; + unsigned long dma_prep_flags = 0; int i; pr_debug("%s: (async) len: %zu\n", __func__, len); diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index 853f610af28f..e88690ebfd82 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -396,8 +396,7 @@ dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len) struct dma_async_tx_descriptor *tx; struct dma_chan *chan = acdev->dma_chan; dma_cookie_t cookie; - unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP; + unsigned long flags = DMA_PREP_INTERRUPT; int ret = 0; tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index f878c808466e..b69ac3892b86 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1065,8 +1065,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE); unmap->len = len; - flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP; + flags = DMA_CTRL_ACK; tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0], len, flags); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index f4a2a25fae31..5791091c13ca 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -599,8 +599,7 @@ static int dmatest_func(void *data) /* * src and dst buffers are freed by ourselves below */ - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; while (!kthread_should_stop() && !(params->iterations && total_tests >= params->iterations)) { diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index c123e32dbbb0..6fcf741ad91b 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -818,8 +818,7 @@ int ioat_dma_self_test(struct ioatdma_device *device) dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); - flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | - DMA_PREP_INTERRUPT; + flags = DMA_PREP_INTERRUPT; tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, IOAT_TEST_SIZE, flags); if (!tx) { diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 43386c171bba..a4798f0cc225 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -1279,9 +1279,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) DMA_TO_DEVICE); tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, IOAT_NUM_SRC_TEST, PAGE_SIZE, - DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP); + DMA_PREP_INTERRUPT); if (!tx) { dev_err(dev, "Self-test xor prep failed\n"); @@ -1342,9 +1340,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) DMA_TO_DEVICE); tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, - &xor_val_result, DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP); + &xor_val_result, DMA_PREP_INTERRUPT); if (!tx) { dev_err(dev, "Self-test zero prep failed\n"); err = -ENODEV; @@ -1389,9 +1385,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) DMA_TO_DEVICE); tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, - &xor_val_result, DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP); + &xor_val_result, DMA_PREP_INTERRUPT); if (!tx) { dev_err(dev, "Self-test 2nd zero prep failed\n"); err = -ENODEV; diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c index 540516ca872c..879ea6fdd1be 100644 --- a/drivers/media/platform/m2m-deinterlace.c +++ b/drivers/media/platform/m2m-deinterlace.c @@ -341,8 +341,7 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op, ctx->xt->dir = DMA_MEM_TO_MEM; ctx->xt->src_sgl = false; ctx->xt->dst_sgl = true; - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | - DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP; + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags); if (tx == NULL) { diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c index b557caf5b1a4..59a95e3ab0e3 100644 --- a/drivers/media/platform/timblogiw.c +++ b/drivers/media/platform/timblogiw.c @@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) desc = dmaengine_prep_slave_sg(fh->chan, buf->sg, sg_elems, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); + DMA_PREP_INTERRUPT); if (!desc) { spin_lock_irq(&fh->queue_lock); list_del_init(&vb->queue); diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c index 7b56563f8b74..5335104e7c84 100644 --- a/drivers/misc/carma/carma-fpga.c +++ b/drivers/misc/carma/carma-fpga.c @@ -631,8 +631,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf) struct dma_async_tx_descriptor *tx; dma_cookie_t cookie; dma_addr_t dst, src; - unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP | - DMA_COMPL_SKIP_SRC_UNMAP; + unsigned long dma_flags = 0; dst_sg = buf->vb.sglist; dst_nents = buf->vb.sglen; diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 060feeaf6b3e..2a837cb425d7 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -375,8 +375,7 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len, dma_dev = host->dma_chan->device; - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | - DMA_COMPL_SKIP_DEST_UNMAP; + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; phys_addr = dma_map_single(dma_dev->dev, p, len, dir); if (dma_mapping_error(dma_dev->dev, phys_addr)) { diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index 3dc1a7564d87..8b2752263db9 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c @@ -573,8 +573,6 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, dma_dev = chan->device; dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); - flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; - if (direction == DMA_TO_DEVICE) { dma_src = dma_addr; dma_dst = host->data_pa; diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index 0951f7aca1ef..822616e3c375 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -459,8 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, - &ctl->sg, 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); + &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); if (!ctl->adesc) return NETDEV_TX_BUSY; @@ -571,8 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) sg_dma_len(sg) = DMA_BUFFER_SIZE; ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, - sg, 1, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); + sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (!ctl->adesc) goto out; diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 222c2baa3a4b..d0222f13d154 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -1037,7 +1037,6 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, struct dmaengine_unmap_data *unmap; dma_cookie_t cookie; void *buf = entry->buf; - unsigned long flags; entry->len = len; @@ -1073,10 +1072,9 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, unmap->from_cnt = 1; - flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | - DMA_PREP_INTERRUPT; txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], - unmap->addr[0], len, flags); + unmap->addr[0], len, + DMA_PREP_INTERRUPT); if (!txd) goto err_get_unmap; @@ -1266,7 +1264,6 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, void __iomem *offset; size_t len = entry->len; void *buf = entry->buf; - unsigned long flags; offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); @@ -1301,10 +1298,8 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, unmap->to_cnt = 1; - flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | - DMA_PREP_INTERRUPT; txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, - flags); + DMA_PREP_INTERRUPT); if (!txd) goto err_get_unmap; diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index b9f0192758d6..6d207afec8cb 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -150,7 +150,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) &dws->tx_sgl, 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); + DMA_PREP_INTERRUPT); txdesc->callback = dw_spi_dma_done; txdesc->callback_param = dws; @@ -173,7 +173,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) &dws->rx_sgl, 1, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); + DMA_PREP_INTERRUPT); rxdesc->callback = dw_spi_dma_done; rxdesc->callback_param = dws; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 3782cdb782a8..491072cb5ba0 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -171,12 +171,6 @@ struct dma_interleaved_template { * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client * acknowledges receipt, i.e. has has a chance to establish any dependency * chains - * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) - * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) - * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single - * (if not set, do the source dma-unmapping as page) - * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single - * (if not set, do the destination dma-unmapping as page) * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as @@ -188,14 +182,10 @@ struct dma_interleaved_template { enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), DMA_CTRL_ACK = (1 << 1), - DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), - DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), - DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), - DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), - DMA_PREP_PQ_DISABLE_P = (1 << 6), - DMA_PREP_PQ_DISABLE_Q = (1 << 7), - DMA_PREP_CONTINUE = (1 << 8), - DMA_PREP_FENCE = (1 << 9), + DMA_PREP_PQ_DISABLE_P = (1 << 2), + DMA_PREP_PQ_DISABLE_Q = (1 << 3), + DMA_PREP_CONTINUE = (1 << 4), + DMA_PREP_FENCE = (1 << 5), }; /** From 7b61017822cdff9c18ae70005cf52d84e8dafe5d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 6 Nov 2013 16:29:57 -0800 Subject: [PATCH 14/33] Revert "dmatest: append verify result to results" This reverts commit d86b2f298e6de124984f5d5817ed1e6e759b3ada. The kernel log buffer is sufficient for collecting test results. The current logging OOMs the machine on long running tests, and usually only the first error is relevant. It is better to stop on error and parse the kernel output. If output volume becomes an issue we can always investigate using trace messages. Cc: Andy Shevchenko Acked-by: Andy Shevchenko Signed-off-by: Dan Williams --- Documentation/dmatest.txt | 6 +- drivers/dma/dmatest.c | 178 +++++++++++--------------------------- 2 files changed, 52 insertions(+), 132 deletions(-) diff --git a/Documentation/dmatest.txt b/Documentation/dmatest.txt index a2b5663eae26..8b7a5c879df9 100644 --- a/Documentation/dmatest.txt +++ b/Documentation/dmatest.txt @@ -76,7 +76,5 @@ The message format is unified across the different types of errors. A number in the parens represents additional information, e.g. error code, error counter, or status. -Comparison between buffers is stored to the dedicated structure. - -Note that the verify result is now accessible only via file 'results' in the -debugfs. +Note that the buffer comparison is done in the old way, i.e. data is not +collected and just printed out. diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 5791091c13ca..dcb38d86550e 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -98,20 +98,6 @@ enum dmatest_error_type { DMATEST_ET_DMA_ERROR, DMATEST_ET_DMA_IN_PROGRESS, DMATEST_ET_VERIFY, - DMATEST_ET_VERIFY_BUF, -}; - -struct dmatest_verify_buffer { - unsigned int index; - u8 expected; - u8 actual; -}; - -struct dmatest_verify_result { - unsigned int error_count; - struct dmatest_verify_buffer data[MAX_ERROR_COUNT]; - u8 pattern; - bool is_srcbuf; }; struct dmatest_thread_result { @@ -122,11 +108,10 @@ struct dmatest_thread_result { unsigned int len; enum dmatest_error_type type; union { - unsigned long data; - dma_cookie_t cookie; - enum dma_status status; - int error; - struct dmatest_verify_result *vr; + unsigned long data; + dma_cookie_t cookie; + enum dma_status status; + int error; }; }; @@ -262,9 +247,31 @@ static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, } } -static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, - unsigned int start, unsigned int end, unsigned int counter, - u8 pattern, bool is_srcbuf) +static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, + unsigned int counter, bool is_srcbuf) +{ + u8 diff = actual ^ pattern; + u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); + const char *thread_name = current->comm; + + if (is_srcbuf) + pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n", + thread_name, index, expected, actual); + else if ((pattern & PATTERN_COPY) + && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) + pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n", + thread_name, index, expected, actual); + else if (diff & PATTERN_SRC) + pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n", + thread_name, index, expected, actual); + else + pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n", + thread_name, index, expected, actual); +} + +static unsigned int dmatest_verify(u8 **bufs, unsigned int start, + unsigned int end, unsigned int counter, u8 pattern, + bool is_srcbuf) { unsigned int i; unsigned int error_count = 0; @@ -272,7 +279,6 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, u8 expected; u8 *buf; unsigned int counter_orig = counter; - struct dmatest_verify_buffer *vb; for (; (buf = *bufs); bufs++) { counter = counter_orig; @@ -280,12 +286,9 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, actual = buf[i]; expected = pattern | (~counter & PATTERN_COUNT_MASK); if (actual != expected) { - if (error_count < MAX_ERROR_COUNT && vr) { - vb = &vr->data[error_count]; - vb->index = i; - vb->expected = expected; - vb->actual = actual; - } + if (error_count < MAX_ERROR_COUNT) + dmatest_mismatch(actual, pattern, i, + counter, is_srcbuf); error_count++; } counter++; @@ -293,7 +296,7 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, } if (error_count > MAX_ERROR_COUNT) - pr_warning("%s: %u errors suppressed\n", + pr_warn("%s: %u errors suppressed\n", current->comm, error_count - MAX_ERROR_COUNT); return error_count; @@ -334,30 +337,6 @@ static unsigned int min_odd(unsigned int x, unsigned int y) return val % 2 ? val : val - 1; } -static char *verify_result_get_one(struct dmatest_verify_result *vr, - unsigned int i) -{ - struct dmatest_verify_buffer *vb = &vr->data[i]; - u8 diff = vb->actual ^ vr->pattern; - static char buf[512]; - char *msg; - - if (vr->is_srcbuf) - msg = "srcbuf overwritten!"; - else if ((vr->pattern & PATTERN_COPY) - && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) - msg = "dstbuf not copied!"; - else if (diff & PATTERN_SRC) - msg = "dstbuf was copied!"; - else - msg = "dstbuf mismatch!"; - - snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg, - vb->index, vb->expected, vb->actual); - - return buf; -} - static char *thread_result_get(const char *name, struct dmatest_thread_result *tr) { @@ -373,7 +352,6 @@ static char *thread_result_get(const char *name, [DMATEST_ET_DMA_IN_PROGRESS] = "got completion callback (DMA_IN_PROGRESS)", [DMATEST_ET_VERIFY] = "errors", - [DMATEST_ET_VERIFY_BUF] = "verify errors", }; static char buf[512]; @@ -415,51 +393,6 @@ static int thread_result_add(struct dmatest_info *info, return 0; } -static unsigned int verify_result_add(struct dmatest_info *info, - struct dmatest_result *r, unsigned int n, - unsigned int src_off, unsigned int dst_off, unsigned int len, - u8 **bufs, int whence, unsigned int counter, u8 pattern, - bool is_srcbuf) -{ - struct dmatest_verify_result *vr; - unsigned int error_count; - unsigned int buf_off = is_srcbuf ? src_off : dst_off; - unsigned int start, end; - - if (whence < 0) { - start = 0; - end = buf_off; - } else if (whence > 0) { - start = buf_off + len; - end = info->params.buf_size; - } else { - start = buf_off; - end = buf_off + len; - } - - vr = kmalloc(sizeof(*vr), GFP_KERNEL); - if (!vr) { - pr_warn("dmatest: No memory to store verify result\n"); - return dmatest_verify(NULL, bufs, start, end, counter, pattern, - is_srcbuf); - } - - vr->pattern = pattern; - vr->is_srcbuf = is_srcbuf; - - error_count = dmatest_verify(vr, bufs, start, end, counter, pattern, - is_srcbuf); - if (error_count) { - vr->error_count = error_count; - thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off, - dst_off, len, (unsigned long)vr); - return error_count; - } - - kfree(vr); - return 0; -} - static void result_free(struct dmatest_info *info, const char *name) { struct dmatest_result *r, *_r; @@ -472,8 +405,6 @@ static void result_free(struct dmatest_info *info, const char *name) continue; list_for_each_entry_safe(tr, _tr, &r->results, node) { - if (tr->type == DMATEST_ET_VERIFY_BUF) - kfree(tr->vr); list_del(&tr->node); kfree(tr); } @@ -755,26 +686,25 @@ static int dmatest_func(void *data) error_count = 0; pr_debug("%s: verifying source buffer...\n", thread_name); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->srcs, -1, + error_count += dmatest_verify(thread->srcs, 0, src_off, 0, PATTERN_SRC, true); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->srcs, 0, - src_off, PATTERN_SRC | PATTERN_COPY, true); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->srcs, 1, - src_off + len, PATTERN_SRC, true); + error_count += dmatest_verify(thread->srcs, src_off, + src_off + len, src_off, + PATTERN_SRC | PATTERN_COPY, true); + error_count += dmatest_verify(thread->srcs, src_off + len, + params->buf_size, src_off + len, + PATTERN_SRC, true); - pr_debug("%s: verifying dest buffer...\n", thread_name); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->dsts, -1, + pr_debug("%s: verifying dest buffer...\n", + thread->task->comm); + error_count += dmatest_verify(thread->dsts, 0, dst_off, 0, PATTERN_DST, false); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->dsts, 0, - src_off, PATTERN_SRC | PATTERN_COPY, false); - error_count += verify_result_add(info, result, total_tests, - src_off, dst_off, len, thread->dsts, 1, - dst_off + len, PATTERN_DST, false); + error_count += dmatest_verify(thread->dsts, dst_off, + dst_off + len, src_off, + PATTERN_SRC | PATTERN_COPY, false); + error_count += dmatest_verify(thread->dsts, dst_off + len, + params->buf_size, dst_off + len, + PATTERN_DST, false); if (error_count) { thread_result_add(info, result, DMATEST_ET_VERIFY, @@ -1099,20 +1029,12 @@ static int dtf_results_show(struct seq_file *sf, void *data) struct dmatest_info *info = sf->private; struct dmatest_result *result; struct dmatest_thread_result *tr; - unsigned int i; mutex_lock(&info->results_lock); list_for_each_entry(result, &info->results, node) { - list_for_each_entry(tr, &result->results, node) { + list_for_each_entry(tr, &result->results, node) seq_printf(sf, "%s\n", thread_result_get(result->name, tr)); - if (tr->type == DMATEST_ET_VERIFY_BUF) { - for (i = 0; i < tr->vr->error_count; i++) { - seq_printf(sf, "\t%s\n", - verify_result_get_one(tr->vr, i)); - } - } - } } mutex_unlock(&info->results_lock); From 872f05c6e9a37e9358fd58eb54deee7337863496 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 6 Nov 2013 16:29:58 -0800 Subject: [PATCH 15/33] dmatest: replace stored results mechanism, with uniform messages For long running tests the tracking results in a memory leak for the "ok" results, and for the failures the kernel log should be sufficient. Provide a uniform format for error messages so they can be easily parsed and remove the debugfs file. Cc: Andy Shevchenko Signed-off-by: Dan Williams --- Documentation/dmatest.txt | 27 +++-- drivers/dma/dmatest.c | 240 ++++++-------------------------------- 2 files changed, 46 insertions(+), 221 deletions(-) diff --git a/Documentation/dmatest.txt b/Documentation/dmatest.txt index 8b7a5c879df9..45b8c95f1a21 100644 --- a/Documentation/dmatest.txt +++ b/Documentation/dmatest.txt @@ -16,9 +16,8 @@ be built as module or inside kernel. Let's consider those cases. Part 2 - When dmatest is built as a module... After mounting debugfs and loading the module, the /sys/kernel/debug/dmatest -folder with nodes will be created. There are two important files located. First -is the 'run' node that controls run and stop phases of the test, and the second -one, 'results', is used to get the test case results. +folder with a file named 'run' nodes will be created. 'run' controls run and +stop phases of the test. Note that in this case test will not run on load automatically. @@ -32,8 +31,9 @@ Hint: available channel list could be extracted by running the following command: % ls -1 /sys/class/dma/ -After a while you will start to get messages about current status or error like -in the original code. +Once started a message like "dmatest: Started 1 threads using dma0chan0" is +emitted. After that only test failure messages are reported until the test +stops. Note that running a new test will not stop any in progress test. @@ -62,19 +62,18 @@ case. You always could check them at run-time by running Part 4 - Gathering the test results -The module provides a storage for the test results in the memory. The gathered -data could be used after test is done. +Test results are printed to the kernel log buffer with the format: -The special file 'results' in the debugfs represents gathered data of the in -progress test. The messages collected are printed to the kernel log as well. +"dmatest: result : : '' with src_off= dst_off= len= ()" Example of output: - % cat /sys/kernel/debug/dmatest/results - dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0) + % dmesg | tail -n 1 + dmatest: result dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0) The message format is unified across the different types of errors. A number in the parens represents additional information, e.g. error code, error counter, -or status. +or status. A test thread also emits a summary line at completion listing the +number of tests executed, number that failed, and a result code. -Note that the buffer comparison is done in the old way, i.e. data is not -collected and just printed out. +The details of a data miscompare error are also emitted, but do not follow the +above format. diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index dcb38d86550e..58b195f9d03c 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -8,6 +8,8 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -88,39 +90,6 @@ MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " #define PATTERN_OVERWRITE 0x20 #define PATTERN_COUNT_MASK 0x1f -enum dmatest_error_type { - DMATEST_ET_OK, - DMATEST_ET_MAP_SRC, - DMATEST_ET_MAP_DST, - DMATEST_ET_PREP, - DMATEST_ET_SUBMIT, - DMATEST_ET_TIMEOUT, - DMATEST_ET_DMA_ERROR, - DMATEST_ET_DMA_IN_PROGRESS, - DMATEST_ET_VERIFY, -}; - -struct dmatest_thread_result { - struct list_head node; - unsigned int n; - unsigned int src_off; - unsigned int dst_off; - unsigned int len; - enum dmatest_error_type type; - union { - unsigned long data; - dma_cookie_t cookie; - enum dma_status status; - int error; - }; -}; - -struct dmatest_result { - struct list_head node; - char *name; - struct list_head results; -}; - struct dmatest_info; struct dmatest_thread { @@ -180,10 +149,6 @@ struct dmatest_info { /* debugfs related stuff */ struct dentry *root; - - /* Test results */ - struct list_head results; - struct mutex results_lock; }; static struct dmatest_info test_info; @@ -337,100 +302,19 @@ static unsigned int min_odd(unsigned int x, unsigned int y) return val % 2 ? val : val - 1; } -static char *thread_result_get(const char *name, - struct dmatest_thread_result *tr) +static void result(const char *err, unsigned int n, unsigned int src_off, + unsigned int dst_off, unsigned int len, unsigned long data) { - static const char * const messages[] = { - [DMATEST_ET_OK] = "No errors", - [DMATEST_ET_MAP_SRC] = "src mapping error", - [DMATEST_ET_MAP_DST] = "dst mapping error", - [DMATEST_ET_PREP] = "prep error", - [DMATEST_ET_SUBMIT] = "submit error", - [DMATEST_ET_TIMEOUT] = "test timed out", - [DMATEST_ET_DMA_ERROR] = - "got completion callback (DMA_ERROR)", - [DMATEST_ET_DMA_IN_PROGRESS] = - "got completion callback (DMA_IN_PROGRESS)", - [DMATEST_ET_VERIFY] = "errors", - }; - static char buf[512]; - - snprintf(buf, sizeof(buf) - 1, - "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", - name, tr->n, messages[tr->type], tr->src_off, tr->dst_off, - tr->len, tr->data); - - return buf; + pr_info("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", + current->comm, n, err, src_off, dst_off, len, data); } -static int thread_result_add(struct dmatest_info *info, - struct dmatest_result *r, enum dmatest_error_type type, - unsigned int n, unsigned int src_off, unsigned int dst_off, - unsigned int len, unsigned long data) +static void dbg_result(const char *err, unsigned int n, unsigned int src_off, + unsigned int dst_off, unsigned int len, + unsigned long data) { - struct dmatest_thread_result *tr; - - tr = kzalloc(sizeof(*tr), GFP_KERNEL); - if (!tr) - return -ENOMEM; - - tr->type = type; - tr->n = n; - tr->src_off = src_off; - tr->dst_off = dst_off; - tr->len = len; - tr->data = data; - - mutex_lock(&info->results_lock); - list_add_tail(&tr->node, &r->results); - mutex_unlock(&info->results_lock); - - if (tr->type == DMATEST_ET_OK) - pr_debug("%s\n", thread_result_get(r->name, tr)); - else - pr_warn("%s\n", thread_result_get(r->name, tr)); - - return 0; -} - -static void result_free(struct dmatest_info *info, const char *name) -{ - struct dmatest_result *r, *_r; - - mutex_lock(&info->results_lock); - list_for_each_entry_safe(r, _r, &info->results, node) { - struct dmatest_thread_result *tr, *_tr; - - if (name && strcmp(r->name, name)) - continue; - - list_for_each_entry_safe(tr, _tr, &r->results, node) { - list_del(&tr->node); - kfree(tr); - } - - kfree(r->name); - list_del(&r->node); - kfree(r); - } - - mutex_unlock(&info->results_lock); -} - -static struct dmatest_result *result_init(struct dmatest_info *info, - const char *name) -{ - struct dmatest_result *r; - - r = kzalloc(sizeof(*r), GFP_KERNEL); - if (r) { - r->name = kstrdup(name, GFP_KERNEL); - INIT_LIST_HEAD(&r->results); - mutex_lock(&info->results_lock); - list_add_tail(&r->node, &info->results); - mutex_unlock(&info->results_lock); - } - return r; + pr_debug("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", + current->comm, n, err, src_off, dst_off, len, data); } /* @@ -456,7 +340,6 @@ static int dmatest_func(void *data) struct dmatest_params *params; struct dma_chan *chan; struct dma_device *dev; - const char *thread_name; unsigned int src_off, dst_off, len; unsigned int error_count; unsigned int failed_tests = 0; @@ -469,9 +352,7 @@ static int dmatest_func(void *data) int src_cnt; int dst_cnt; int i; - struct dmatest_result *result; - thread_name = current->comm; set_freezable(); ret = -ENOMEM; @@ -501,10 +382,6 @@ static int dmatest_func(void *data) } else goto err_thread_type; - result = result_init(info, thread_name); - if (!result) - goto err_srcs; - thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); if (!thread->srcs) goto err_srcs; @@ -576,10 +453,8 @@ static int dmatest_func(void *data) ret = dma_mapping_error(dev->dev, dma_srcs[i]); if (ret) { unmap_src(dev->dev, dma_srcs, len, i); - thread_result_add(info, result, - DMATEST_ET_MAP_SRC, - total_tests, src_off, dst_off, - len, ret); + result("src mapping error", total_tests, + src_off, dst_off, len, ret); failed_tests++; continue; } @@ -594,10 +469,8 @@ static int dmatest_func(void *data) unmap_src(dev->dev, dma_srcs, len, src_cnt); unmap_dst(dev->dev, dma_dsts, params->buf_size, i); - thread_result_add(info, result, - DMATEST_ET_MAP_DST, - total_tests, src_off, dst_off, - len, ret); + result("dst mapping error", total_tests, + src_off, dst_off, len, ret); failed_tests++; continue; } @@ -627,9 +500,8 @@ static int dmatest_func(void *data) unmap_src(dev->dev, dma_srcs, len, src_cnt); unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); - thread_result_add(info, result, DMATEST_ET_PREP, - total_tests, src_off, dst_off, - len, 0); + result("prep error", total_tests, src_off, + dst_off, len, ret); msleep(100); failed_tests++; continue; @@ -641,9 +513,8 @@ static int dmatest_func(void *data) cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { - thread_result_add(info, result, DMATEST_ET_SUBMIT, - total_tests, src_off, dst_off, - len, cookie); + result("submit error", total_tests, src_off, + dst_off, len, ret); msleep(100); failed_tests++; continue; @@ -664,17 +535,15 @@ static int dmatest_func(void *data) * free it this time?" dancing. For now, just * leave it dangling. */ - thread_result_add(info, result, DMATEST_ET_TIMEOUT, - total_tests, src_off, dst_off, - len, 0); + result("test timed out", total_tests, src_off, dst_off, + len, 0); failed_tests++; continue; } else if (status != DMA_SUCCESS) { - enum dmatest_error_type type = (status == DMA_ERROR) ? - DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS; - thread_result_add(info, result, type, - total_tests, src_off, dst_off, - len, status); + result(status == DMA_ERROR ? + "completion error status" : + "completion busy status", total_tests, src_off, + dst_off, len, ret); failed_tests++; continue; } @@ -685,7 +554,7 @@ static int dmatest_func(void *data) error_count = 0; - pr_debug("%s: verifying source buffer...\n", thread_name); + pr_debug("%s: verifying source buffer...\n", current->comm); error_count += dmatest_verify(thread->srcs, 0, src_off, 0, PATTERN_SRC, true); error_count += dmatest_verify(thread->srcs, src_off, @@ -695,8 +564,7 @@ static int dmatest_func(void *data) params->buf_size, src_off + len, PATTERN_SRC, true); - pr_debug("%s: verifying dest buffer...\n", - thread->task->comm); + pr_debug("%s: verifying dest buffer...\n", current->comm); error_count += dmatest_verify(thread->dsts, 0, dst_off, 0, PATTERN_DST, false); error_count += dmatest_verify(thread->dsts, dst_off, @@ -707,14 +575,12 @@ static int dmatest_func(void *data) PATTERN_DST, false); if (error_count) { - thread_result_add(info, result, DMATEST_ET_VERIFY, - total_tests, src_off, dst_off, - len, error_count); + result("data error", total_tests, src_off, dst_off, + len, error_count); failed_tests++; } else { - thread_result_add(info, result, DMATEST_ET_OK, - total_tests, src_off, dst_off, - len, 0); + dbg_result("test passed", total_tests, src_off, dst_off, + len, 0); } } @@ -731,8 +597,8 @@ err_srcbuf: err_srcs: kfree(pq_coefs); err_thread_type: - pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", - thread_name, total_tests, failed_tests, ret); + pr_info("%s: terminating after %u tests, %u failures (status %d)\n", + current->comm, total_tests, failed_tests, ret); /* terminate all transfers on specified channels */ if (ret) @@ -937,9 +803,6 @@ static int __restart_threaded_test(struct dmatest_info *info, bool run) if (run == false) return 0; - /* Clear results from previous run */ - result_free(info, NULL); - /* Copy test parameters */ params->buf_size = test_buf_size; strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); @@ -1024,35 +887,6 @@ static const struct file_operations dtf_run_fops = { .llseek = default_llseek, }; -static int dtf_results_show(struct seq_file *sf, void *data) -{ - struct dmatest_info *info = sf->private; - struct dmatest_result *result; - struct dmatest_thread_result *tr; - - mutex_lock(&info->results_lock); - list_for_each_entry(result, &info->results, node) { - list_for_each_entry(tr, &result->results, node) - seq_printf(sf, "%s\n", - thread_result_get(result->name, tr)); - } - - mutex_unlock(&info->results_lock); - return 0; -} - -static int dtf_results_open(struct inode *inode, struct file *file) -{ - return single_open(file, dtf_results_show, inode->i_private); -} - -static const struct file_operations dtf_results_fops = { - .open = dtf_results_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - static int dmatest_register_dbgfs(struct dmatest_info *info) { struct dentry *d; @@ -1069,10 +903,6 @@ static int dmatest_register_dbgfs(struct dmatest_info *info) debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info, &dtf_run_fops); - /* Results of test in progress */ - debugfs_create_file("results", S_IRUGO, info->root, info, - &dtf_results_fops); - return 0; err_root: @@ -1090,9 +920,6 @@ static int __init dmatest_init(void) mutex_init(&info->lock); INIT_LIST_HEAD(&info->channels); - mutex_init(&info->results_lock); - INIT_LIST_HEAD(&info->results); - ret = dmatest_register_dbgfs(info); if (ret) return ret; @@ -1112,7 +939,6 @@ static void __exit dmatest_exit(void) debugfs_remove_recursive(info->root); stop_threaded_test(info); - result_free(info, NULL); } module_exit(dmatest_exit); From 0adff800662f52d0ffc3e420db231769cb3fff13 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 6 Nov 2013 16:30:00 -0800 Subject: [PATCH 16/33] dmatest: cleanup redundant "dmatest: " prefixes ...now that we have a common pr_fmt. Acked-by: Andy Shevchenko Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 58b195f9d03c..15199edcc366 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -623,8 +623,8 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { ret = kthread_stop(thread->task); - pr_debug("dmatest: thread %s exited with status %d\n", - thread->task->comm, ret); + pr_debug("thread %s exited with status %d\n", + thread->task->comm, ret); list_del(&thread->node); kfree(thread); } @@ -656,9 +656,8 @@ static int dmatest_add_threads(struct dmatest_info *info, for (i = 0; i < params->threads_per_chan; i++) { thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); if (!thread) { - pr_warning("dmatest: No memory for %s-%s%u\n", - dma_chan_name(chan), op, i); - + pr_warn("No memory for %s-%s%u\n", + dma_chan_name(chan), op, i); break; } thread->info = info; @@ -668,8 +667,8 @@ static int dmatest_add_threads(struct dmatest_info *info, thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", dma_chan_name(chan), op, i); if (IS_ERR(thread->task)) { - pr_warning("dmatest: Failed to run thread %s-%s%u\n", - dma_chan_name(chan), op, i); + pr_warn("Failed to run thread %s-%s%u\n", + dma_chan_name(chan), op, i); kfree(thread); break; } @@ -692,7 +691,7 @@ static int dmatest_add_channel(struct dmatest_info *info, dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); if (!dtc) { - pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); + pr_warn("No memory for %s\n", dma_chan_name(chan)); return -ENOMEM; } @@ -712,7 +711,7 @@ static int dmatest_add_channel(struct dmatest_info *info, thread_count += cnt > 0 ? cnt : 0; } - pr_info("dmatest: Started %u threads using %s\n", + pr_info("Started %u threads using %s\n", thread_count, dma_chan_name(chan)); list_add_tail(&dtc->node, &info->channels); @@ -779,7 +778,7 @@ static void __stop_threaded_test(struct dmatest_info *info) list_del(&dtc->node); chan = dtc->chan; dmatest_cleanup_channel(dtc); - pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan)); + pr_debug("dropped channel %s\n", dma_chan_name(chan)); dma_release_channel(chan); } @@ -906,7 +905,7 @@ static int dmatest_register_dbgfs(struct dmatest_info *info) return 0; err_root: - pr_err("dmatest: Failed to initialize debugfs\n"); + pr_err("Failed to initialize debugfs\n"); return -ENOMEM; } From a310d037b8d06755c62bb4878c00d19490af5550 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 6 Nov 2013 16:30:01 -0800 Subject: [PATCH 17/33] dmatest: restore ability to start test at module load and init 1/ move 'run' control to a module parameter so we can do: modprobe dmatest run=1. With this moved the rest of the debugfs boilerplate can go. 2/ Fix parameter initialization. Previously the test was being started without taking the parameters into account in the built-in case. Also killed off the '__' version of some routines. The new rule is just hold the lock when calling a *threaded_test() routine. Acked-by: Linus Walleij Cc: Andy Shevchenko Acked-by: Andy Shevchenko Signed-off-by: Dan Williams --- Documentation/dmatest.txt | 18 +-- drivers/dma/dmatest.c | 281 ++++++++++++++++---------------------- 2 files changed, 124 insertions(+), 175 deletions(-) diff --git a/Documentation/dmatest.txt b/Documentation/dmatest.txt index 45b8c95f1a21..e6e16a7f3706 100644 --- a/Documentation/dmatest.txt +++ b/Documentation/dmatest.txt @@ -15,17 +15,19 @@ be built as module or inside kernel. Let's consider those cases. Part 2 - When dmatest is built as a module... -After mounting debugfs and loading the module, the /sys/kernel/debug/dmatest -folder with a file named 'run' nodes will be created. 'run' controls run and -stop phases of the test. - -Note that in this case test will not run on load automatically. - Example of usage: + % modprobe dmatest channel=dma0chan0 timeout=2000 iterations=1 run=1 + +...or: + % modprobe dmatest % echo dma0chan0 > /sys/module/dmatest/parameters/channel % echo 2000 > /sys/module/dmatest/parameters/timeout % echo 1 > /sys/module/dmatest/parameters/iterations - % echo 1 > /sys/kernel/debug/dmatest/run + % echo 1 > /sys/module/dmatest/parameters/run + +...or on the kernel command line: + + dmatest.channel=dma0chan0 dmatest.timeout=2000 dmatest.iterations=1 dmatest.run=1 Hint: available channel list could be extracted by running the following command: @@ -42,7 +44,7 @@ The following command should return actual state of the test. To wait for test done the user may perform a busy loop that checks the state. - % while [ $(cat /sys/kernel/debug/dmatest/run) = "Y" ] + % while [ $(cat /sys/module/dmatest/parameters/run) = "Y" ] > do > echo -n "." > sleep 1 diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 15199edcc366..c5048671daf7 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -21,10 +21,6 @@ #include #include #include -#include -#include -#include -#include static unsigned int test_buf_size = 16384; module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); @@ -70,45 +66,6 @@ module_param(timeout, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " "Pass -1 for infinite timeout"); -/* Maximum amount of mismatched bytes in buffer to print */ -#define MAX_ERROR_COUNT 32 - -/* - * Initialization patterns. All bytes in the source buffer has bit 7 - * set, all bytes in the destination buffer has bit 7 cleared. - * - * Bit 6 is set for all bytes which are to be copied by the DMA - * engine. Bit 5 is set for all bytes which are to be overwritten by - * the DMA engine. - * - * The remaining bits are the inverse of a counter which increments by - * one for each byte address. - */ -#define PATTERN_SRC 0x80 -#define PATTERN_DST 0x00 -#define PATTERN_COPY 0x40 -#define PATTERN_OVERWRITE 0x20 -#define PATTERN_COUNT_MASK 0x1f - -struct dmatest_info; - -struct dmatest_thread { - struct list_head node; - struct dmatest_info *info; - struct task_struct *task; - struct dma_chan *chan; - u8 **srcs; - u8 **dsts; - enum dma_transaction_type type; - bool done; -}; - -struct dmatest_chan { - struct list_head node; - struct dma_chan *chan; - struct list_head threads; -}; - /** * struct dmatest_params - test parameters. * @buf_size: size of the memcpy test buffer @@ -138,7 +95,7 @@ struct dmatest_params { * @params: test parameters * @lock: access protection to the fields of this structure */ -struct dmatest_info { +static struct dmatest_info { /* Test parameters */ struct dmatest_params params; @@ -146,12 +103,58 @@ struct dmatest_info { struct list_head channels; unsigned int nr_channels; struct mutex lock; - - /* debugfs related stuff */ - struct dentry *root; + bool did_init; +} test_info = { + .channels = LIST_HEAD_INIT(test_info.channels), + .lock = __MUTEX_INITIALIZER(test_info.lock), }; -static struct dmatest_info test_info; +static int dmatest_run_set(const char *val, const struct kernel_param *kp); +static int dmatest_run_get(char *val, const struct kernel_param *kp); +static struct kernel_param_ops run_ops = { + .set = dmatest_run_set, + .get = dmatest_run_get, +}; +static bool dmatest_run; +module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(run, "Run the test (default: false)"); + +/* Maximum amount of mismatched bytes in buffer to print */ +#define MAX_ERROR_COUNT 32 + +/* + * Initialization patterns. All bytes in the source buffer has bit 7 + * set, all bytes in the destination buffer has bit 7 cleared. + * + * Bit 6 is set for all bytes which are to be copied by the DMA + * engine. Bit 5 is set for all bytes which are to be overwritten by + * the DMA engine. + * + * The remaining bits are the inverse of a counter which increments by + * one for each byte address. + */ +#define PATTERN_SRC 0x80 +#define PATTERN_DST 0x00 +#define PATTERN_COPY 0x40 +#define PATTERN_OVERWRITE 0x20 +#define PATTERN_COUNT_MASK 0x1f + +struct dmatest_thread { + struct list_head node; + struct dmatest_info *info; + struct task_struct *task; + struct dma_chan *chan; + u8 **srcs; + u8 **dsts; + enum dma_transaction_type type; + bool done; +}; + +struct dmatest_chan { + struct list_head node; + struct dma_chan *chan; + struct list_head threads; +}; static bool dmatest_match_channel(struct dmatest_params *params, struct dma_chan *chan) @@ -731,13 +734,24 @@ static bool filter(struct dma_chan *chan, void *param) return true; } -static int __run_threaded_test(struct dmatest_info *info) +static int run_threaded_test(struct dmatest_info *info) { dma_cap_mask_t mask; struct dma_chan *chan; struct dmatest_params *params = &info->params; int err = 0; + /* Copy test parameters */ + params->buf_size = test_buf_size; + strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); + strlcpy(params->device, strim(test_device), sizeof(params->device)); + params->threads_per_chan = threads_per_chan; + params->max_channels = max_channels; + params->iterations = iterations; + params->xor_sources = xor_sources; + params->pq_sources = pq_sources; + params->timeout = timeout; + dma_cap_zero(mask); dma_cap_set(DMA_MEMCPY, mask); for (;;) { @@ -757,19 +771,8 @@ static int __run_threaded_test(struct dmatest_info *info) return err; } -#ifndef MODULE -static int run_threaded_test(struct dmatest_info *info) -{ - int ret; - mutex_lock(&info->lock); - ret = __run_threaded_test(info); - mutex_unlock(&info->lock); - return ret; -} -#endif - -static void __stop_threaded_test(struct dmatest_info *info) +static void stop_threaded_test(struct dmatest_info *info) { struct dmatest_chan *dtc, *_dtc; struct dma_chan *chan; @@ -785,39 +788,22 @@ static void __stop_threaded_test(struct dmatest_info *info) info->nr_channels = 0; } -static void stop_threaded_test(struct dmatest_info *info) +static int restart_threaded_test(struct dmatest_info *info, bool run) { - mutex_lock(&info->lock); - __stop_threaded_test(info); - mutex_unlock(&info->lock); -} - -static int __restart_threaded_test(struct dmatest_info *info, bool run) -{ - struct dmatest_params *params = &info->params; - - /* Stop any running test first */ - __stop_threaded_test(info); - - if (run == false) + /* we might be called early to set run=, defer running until all + * parameters have been evaluated + */ + if (!info->did_init) return 0; - /* Copy test parameters */ - params->buf_size = test_buf_size; - strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); - strlcpy(params->device, strim(test_device), sizeof(params->device)); - params->threads_per_chan = threads_per_chan; - params->max_channels = max_channels; - params->iterations = iterations; - params->xor_sources = xor_sources; - params->pq_sources = pq_sources; - params->timeout = timeout; + /* Stop any running test first */ + stop_threaded_test(info); /* Run test with new parameters */ - return __run_threaded_test(info); + return run_threaded_test(info); } -static bool __is_threaded_test_run(struct dmatest_info *info) +static bool is_threaded_test_run(struct dmatest_info *info) { struct dmatest_chan *dtc; @@ -833,101 +819,61 @@ static bool __is_threaded_test_run(struct dmatest_info *info) return false; } -static ssize_t dtf_read_run(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) +static int dmatest_run_get(char *val, const struct kernel_param *kp) { - struct dmatest_info *info = file->private_data; - char buf[3]; + struct dmatest_info *info = &test_info; mutex_lock(&info->lock); - - if (__is_threaded_test_run(info)) { - buf[0] = 'Y'; + if (is_threaded_test_run(info)) { + dmatest_run = true; } else { - __stop_threaded_test(info); - buf[0] = 'N'; + stop_threaded_test(info); + dmatest_run = false; } + mutex_unlock(&info->lock); + + return param_get_bool(val, kp); +} + +static int dmatest_run_set(const char *val, const struct kernel_param *kp) +{ + struct dmatest_info *info = &test_info; + int ret; + + mutex_lock(&info->lock); + ret = param_set_bool(val, kp); + if (ret) { + mutex_unlock(&info->lock); + return ret; + } + + if (is_threaded_test_run(info)) + ret = -EBUSY; + else if (dmatest_run) + ret = restart_threaded_test(info, dmatest_run); mutex_unlock(&info->lock); - buf[1] = '\n'; - buf[2] = 0x00; - return simple_read_from_buffer(user_buf, count, ppos, buf, 2); -} -static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct dmatest_info *info = file->private_data; - char buf[16]; - bool bv; - int ret = 0; - - if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) - return -EFAULT; - - if (strtobool(buf, &bv) == 0) { - mutex_lock(&info->lock); - - if (__is_threaded_test_run(info)) - ret = -EBUSY; - else - ret = __restart_threaded_test(info, bv); - - mutex_unlock(&info->lock); - } - - return ret ? ret : count; -} - -static const struct file_operations dtf_run_fops = { - .read = dtf_read_run, - .write = dtf_write_run, - .open = simple_open, - .llseek = default_llseek, -}; - -static int dmatest_register_dbgfs(struct dmatest_info *info) -{ - struct dentry *d; - - d = debugfs_create_dir("dmatest", NULL); - if (IS_ERR(d)) - return PTR_ERR(d); - if (!d) - goto err_root; - - info->root = d; - - /* Run or stop threaded test */ - debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info, - &dtf_run_fops); - - return 0; - -err_root: - pr_err("Failed to initialize debugfs\n"); - return -ENOMEM; + return ret; } static int __init dmatest_init(void) { struct dmatest_info *info = &test_info; - int ret; + int ret = 0; - memset(info, 0, sizeof(*info)); + if (dmatest_run) { + mutex_lock(&info->lock); + ret = run_threaded_test(info); + mutex_unlock(&info->lock); + } - mutex_init(&info->lock); - INIT_LIST_HEAD(&info->channels); + /* module parameters are stable, inittime tests are started, + * let userspace take over 'run' control + */ + info->did_init = true; - ret = dmatest_register_dbgfs(info); - if (ret) - return ret; - -#ifdef MODULE - return 0; -#else - return run_threaded_test(info); -#endif + return ret; } /* when compiled-in wait for drivers to load first */ late_initcall(dmatest_init); @@ -936,8 +882,9 @@ static void __exit dmatest_exit(void) { struct dmatest_info *info = &test_info; - debugfs_remove_recursive(info->root); + mutex_lock(&info->lock); stop_threaded_test(info); + mutex_unlock(&info->lock); } module_exit(dmatest_exit); From a9e554957de406d6adc581731f571b8a1503f6b0 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 6 Nov 2013 16:30:02 -0800 Subject: [PATCH 18/33] dmatest: support xor-only, or pq-only channels in tests Currently we only test raid channels that happen to also have 'copy' capability. Search for capable channels that do not have DMA_MEMCPY. Note the return value from run_threaded_test never really made sense because it could return errors after successfully starting tests. We already have the test results per channel so missing channels can be detected at that time. Acked-by: Andy Shevchenko Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 62 ++++++++++++++++++++++++------------------- 1 file changed, 34 insertions(+), 28 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index c5048671daf7..ea829659149a 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -734,12 +734,34 @@ static bool filter(struct dma_chan *chan, void *param) return true; } -static int run_threaded_test(struct dmatest_info *info) +static void request_channels(struct dmatest_info *info, + enum dma_transaction_type type) { dma_cap_mask_t mask; - struct dma_chan *chan; + + dma_cap_zero(mask); + dma_cap_set(type, mask); + for (;;) { + struct dmatest_params *params = &info->params; + struct dma_chan *chan; + + chan = dma_request_channel(mask, filter, params); + if (chan) { + if (dmatest_add_channel(info, chan)) { + dma_release_channel(chan); + break; /* add_channel failed, punt */ + } + } else + break; /* no more channels available */ + if (params->max_channels && + info->nr_channels >= params->max_channels) + break; /* we have all we need */ + } +} + +static void run_threaded_test(struct dmatest_info *info) +{ struct dmatest_params *params = &info->params; - int err = 0; /* Copy test parameters */ params->buf_size = test_buf_size; @@ -752,26 +774,11 @@ static int run_threaded_test(struct dmatest_info *info) params->pq_sources = pq_sources; params->timeout = timeout; - dma_cap_zero(mask); - dma_cap_set(DMA_MEMCPY, mask); - for (;;) { - chan = dma_request_channel(mask, filter, params); - if (chan) { - err = dmatest_add_channel(info, chan); - if (err) { - dma_release_channel(chan); - break; /* add_channel failed, punt */ - } - } else - break; /* no more channels available */ - if (params->max_channels && - info->nr_channels >= params->max_channels) - break; /* we have all we need */ - } - return err; + request_channels(info, DMA_MEMCPY); + request_channels(info, DMA_XOR); + request_channels(info, DMA_PQ); } - static void stop_threaded_test(struct dmatest_info *info) { struct dmatest_chan *dtc, *_dtc; @@ -788,19 +795,19 @@ static void stop_threaded_test(struct dmatest_info *info) info->nr_channels = 0; } -static int restart_threaded_test(struct dmatest_info *info, bool run) +static void restart_threaded_test(struct dmatest_info *info, bool run) { /* we might be called early to set run=, defer running until all * parameters have been evaluated */ if (!info->did_init) - return 0; + return; /* Stop any running test first */ stop_threaded_test(info); /* Run test with new parameters */ - return run_threaded_test(info); + run_threaded_test(info); } static bool is_threaded_test_run(struct dmatest_info *info) @@ -850,7 +857,7 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp) if (is_threaded_test_run(info)) ret = -EBUSY; else if (dmatest_run) - ret = restart_threaded_test(info, dmatest_run); + restart_threaded_test(info, dmatest_run); mutex_unlock(&info->lock); @@ -860,11 +867,10 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp) static int __init dmatest_init(void) { struct dmatest_info *info = &test_info; - int ret = 0; if (dmatest_run) { mutex_lock(&info->lock); - ret = run_threaded_test(info); + run_threaded_test(info); mutex_unlock(&info->lock); } @@ -873,7 +879,7 @@ static int __init dmatest_init(void) */ info->did_init = true; - return ret; + return 0; } /* when compiled-in wait for drivers to load first */ late_initcall(dmatest_init); From be9fa5a43641103bf13cd1bb8101a1453da03616 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 6 Nov 2013 16:30:03 -0800 Subject: [PATCH 19/33] dmatest: use pseudo random numbers There is no need for dmatest to drain the entropy pool. It would be nice to one day have repeatable runs, but would need a larger rework to synchronize and order calls to the rng across test threads. Acked-by: Andy Shevchenko Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index ea829659149a..01ac7112b5fd 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -176,7 +176,7 @@ static unsigned long dmatest_random(void) { unsigned long buf; - get_random_bytes(&buf, sizeof(buf)); + prandom_bytes(&buf, sizeof(buf)); return buf; } From e3b9c347316fe243bea6abd08681050c43ca22ee Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 6 Nov 2013 16:30:05 -0800 Subject: [PATCH 20/33] dmatest: add support for skipping verification and random data setup Towards enabling dmatest to checkout performance add a 'noverify' mode. Acked-by: Andy Shevchenko Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 44 ++++++++++++++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 11 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 01ac7112b5fd..d07b73275d0f 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -66,6 +66,10 @@ module_param(timeout, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " "Pass -1 for infinite timeout"); +static bool noverify; +module_param(noverify, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(noverify, "Disable random data setup and verification"); + /** * struct dmatest_params - test parameters. * @buf_size: size of the memcpy test buffer @@ -88,6 +92,7 @@ struct dmatest_params { unsigned int xor_sources; unsigned int pq_sources; int timeout; + bool noverify; }; /** @@ -435,18 +440,30 @@ static int dmatest_func(void *data) break; } - len = dmatest_random() % params->buf_size + 1; + if (params->noverify) { + len = params->buf_size; + src_off = 0; + dst_off = 0; + } else { + len = dmatest_random() % params->buf_size + 1; + len = (len >> align) << align; + if (!len) + len = 1 << align; + src_off = dmatest_random() % (params->buf_size - len + 1); + dst_off = dmatest_random() % (params->buf_size - len + 1); + + src_off = (src_off >> align) << align; + dst_off = (dst_off >> align) << align; + + dmatest_init_srcs(thread->srcs, src_off, len, + params->buf_size); + dmatest_init_dsts(thread->dsts, dst_off, len, + params->buf_size); + } + len = (len >> align) << align; if (!len) len = 1 << align; - src_off = dmatest_random() % (params->buf_size - len + 1); - dst_off = dmatest_random() % (params->buf_size - len + 1); - - src_off = (src_off >> align) << align; - dst_off = (dst_off >> align) << align; - - dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size); - dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size); for (i = 0; i < src_cnt; i++) { u8 *buf = thread->srcs[i] + src_off; @@ -555,10 +572,14 @@ static int dmatest_func(void *data) unmap_src(dev->dev, dma_srcs, len, src_cnt); unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); - error_count = 0; + if (params->noverify) { + dbg_result("test passed", total_tests, src_off, dst_off, + len, 0); + continue; + } pr_debug("%s: verifying source buffer...\n", current->comm); - error_count += dmatest_verify(thread->srcs, 0, src_off, + error_count = dmatest_verify(thread->srcs, 0, src_off, 0, PATTERN_SRC, true); error_count += dmatest_verify(thread->srcs, src_off, src_off + len, src_off, @@ -773,6 +794,7 @@ static void run_threaded_test(struct dmatest_info *info) params->xor_sources = xor_sources; params->pq_sources = pq_sources; params->timeout = timeout; + params->noverify = noverify; request_channels(info, DMA_MEMCPY); request_channels(info, DMA_XOR); From 86727443a04fdb25397041188efd2527f2b7237b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 6 Nov 2013 16:30:07 -0800 Subject: [PATCH 21/33] dmatest: add basic performance metrics Add iops and throughput to the summary output. Acked-by: Andy Shevchenko Signed-off-by: Dan Williams --- Documentation/dmatest.txt | 4 ++++ drivers/dma/dmatest.c | 35 +++++++++++++++++++++++++++++++++-- 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/Documentation/dmatest.txt b/Documentation/dmatest.txt index e6e16a7f3706..0beb4b68d81f 100644 --- a/Documentation/dmatest.txt +++ b/Documentation/dmatest.txt @@ -77,5 +77,9 @@ the parens represents additional information, e.g. error code, error counter, or status. A test thread also emits a summary line at completion listing the number of tests executed, number that failed, and a result code. +Example: + % dmesg | tail -n 1 + dmatest: dma3chan0-copy0: summary 400000 tests, 0 failures iops: 61524 KB/s 246098 (0) + The details of a data miscompare error are also emitted, but do not follow the above format. diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index d07b73275d0f..26b502069638 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -325,6 +325,29 @@ static void dbg_result(const char *err, unsigned int n, unsigned int src_off, current->comm, n, err, src_off, dst_off, len, data); } +static unsigned long long dmatest_persec(s64 runtime, unsigned int val) +{ + unsigned long long per_sec = 1000000; + + if (runtime <= 0) + return 0; + + /* drop precision until runtime is 32-bits */ + while (runtime > UINT_MAX) { + runtime >>= 1; + per_sec <<= 1; + } + + per_sec *= val; + do_div(per_sec, runtime); + return per_sec; +} + +static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) +{ + return dmatest_persec(runtime, len >> 10); +} + /* * This function repeatedly tests DMA transfers of various lengths and * offsets for a given operation type until it is told to exit by @@ -360,6 +383,9 @@ static int dmatest_func(void *data) int src_cnt; int dst_cnt; int i; + ktime_t ktime; + s64 runtime = 0; + unsigned long long total_len = 0; set_freezable(); @@ -417,6 +443,7 @@ static int dmatest_func(void *data) */ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; + ktime = ktime_get(); while (!kthread_should_stop() && !(params->iterations && total_tests >= params->iterations)) { struct dma_async_tx_descriptor *tx = NULL; @@ -464,6 +491,7 @@ static int dmatest_func(void *data) len = (len >> align) << align; if (!len) len = 1 << align; + total_len += len; for (i = 0; i < src_cnt; i++) { u8 *buf = thread->srcs[i] + src_off; @@ -607,6 +635,7 @@ static int dmatest_func(void *data) len, 0); } } + runtime = ktime_us_delta(ktime_get(), ktime); ret = 0; for (i = 0; thread->dsts[i]; i++) @@ -621,8 +650,10 @@ err_srcbuf: err_srcs: kfree(pq_coefs); err_thread_type: - pr_info("%s: terminating after %u tests, %u failures (status %d)\n", - current->comm, total_tests, failed_tests, ret); + pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n", + current->comm, total_tests, failed_tests, + dmatest_persec(runtime, total_tests), + dmatest_KBs(runtime, total_len), ret); /* terminate all transfers on specified channels */ if (ret) From 2d88ce76eb98c4ac4411dcb299cf61ca8999d2b9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 6 Nov 2013 16:30:09 -0800 Subject: [PATCH 22/33] dmatest: add a 'wait' parameter Allows for scripting test runs by module load / unload. Prevent module load from returning until 'iterations' (finite) tests have completed, or cause reads of the 'wait' parameter in sysfs to pause until the tests are done. Also killed the local waitqueue since we can just let the thread exit naturally as long as we hold a reference. Cc: Nicolas Ferre Signed-off-by: Dan Williams --- Documentation/dmatest.txt | 27 +++++++++------ drivers/dma/dmatest.c | 72 +++++++++++++++++++++++++-------------- 2 files changed, 64 insertions(+), 35 deletions(-) diff --git a/Documentation/dmatest.txt b/Documentation/dmatest.txt index 0beb4b68d81f..dd77a81bdb80 100644 --- a/Documentation/dmatest.txt +++ b/Documentation/dmatest.txt @@ -39,17 +39,24 @@ stops. Note that running a new test will not stop any in progress test. -The following command should return actual state of the test. - % cat /sys/kernel/debug/dmatest/run +The following command returns the state of the test. + % cat /sys/module/dmatest/parameters/run -To wait for test done the user may perform a busy loop that checks the state. +To wait for test completion userpace can poll 'run' until it is false, or use +the wait parameter. Specifying 'wait=1' when loading the module causes module +initialization to pause until a test run has completed, while reading +/sys/module/dmatest/parameters/wait waits for any running test to complete +before returning. For example, the following scripts wait for 42 tests +to complete before exiting. Note that if 'iterations' is set to 'infinite' then +waiting is disabled. - % while [ $(cat /sys/module/dmatest/parameters/run) = "Y" ] - > do - > echo -n "." - > sleep 1 - > done - > echo +Example: + % modprobe dmatest run=1 iterations=42 wait=1 + % modprobe -r dmatest +...or: + % modprobe dmatest run=1 iterations=42 + % cat /sys/module/dmatest/parameters/wait + % modprobe -r dmatest Part 3 - When built-in in the kernel... @@ -79,7 +86,7 @@ number of tests executed, number that failed, and a result code. Example: % dmesg | tail -n 1 - dmatest: dma3chan0-copy0: summary 400000 tests, 0 failures iops: 61524 KB/s 246098 (0) + dmatest: dma0chan0-copy0: summary 1 test, 0 failures 1000 iops 100000 KB/s (0) The details of a data miscompare error are also emitted, but do not follow the above format. diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 26b502069638..dd4d84d556d5 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -161,6 +161,43 @@ struct dmatest_chan { struct list_head threads; }; +static DECLARE_WAIT_QUEUE_HEAD(thread_wait); +static bool wait; + +static bool is_threaded_test_run(struct dmatest_info *info) +{ + struct dmatest_chan *dtc; + + list_for_each_entry(dtc, &info->channels, node) { + struct dmatest_thread *thread; + + list_for_each_entry(thread, &dtc->threads, node) { + if (!thread->done) + return true; + } + } + + return false; +} + +static int dmatest_wait_get(char *val, const struct kernel_param *kp) +{ + struct dmatest_info *info = &test_info; + struct dmatest_params *params = &info->params; + + if (params->iterations) + wait_event(thread_wait, !is_threaded_test_run(info)); + wait = true; + return param_get_bool(val, kp); +} + +static struct kernel_param_ops wait_ops = { + .get = dmatest_wait_get, + .set = param_set_bool, +}; +module_param_cb(wait, &wait_ops, &wait, S_IRUGO); +MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)"); + static bool dmatest_match_channel(struct dmatest_params *params, struct dma_chan *chan) { @@ -660,12 +697,7 @@ err_thread_type: dmaengine_terminate_all(chan); thread->done = true; - - if (params->iterations > 0) - while (!kthread_should_stop()) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); - interruptible_sleep_on(&wait_dmatest_exit); - } + wake_up(&thread_wait); return ret; } @@ -681,6 +713,7 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) pr_debug("thread %s exited with status %d\n", thread->task->comm, ret); list_del(&thread->node); + put_task_struct(thread->task); kfree(thread); } @@ -719,18 +752,19 @@ static int dmatest_add_threads(struct dmatest_info *info, thread->chan = dtc->chan; thread->type = type; smp_wmb(); - thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", + thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", dma_chan_name(chan), op, i); if (IS_ERR(thread->task)) { - pr_warn("Failed to run thread %s-%s%u\n", + pr_warn("Failed to create thread %s-%s%u\n", dma_chan_name(chan), op, i); kfree(thread); break; } /* srcbuf and dstbuf are allocated by the thread itself */ - + get_task_struct(thread->task); list_add_tail(&thread->node, &dtc->threads); + wake_up_process(thread->task); } return i; @@ -863,22 +897,6 @@ static void restart_threaded_test(struct dmatest_info *info, bool run) run_threaded_test(info); } -static bool is_threaded_test_run(struct dmatest_info *info) -{ - struct dmatest_chan *dtc; - - list_for_each_entry(dtc, &info->channels, node) { - struct dmatest_thread *thread; - - list_for_each_entry(thread, &dtc->threads, node) { - if (!thread->done) - return true; - } - } - - return false; -} - static int dmatest_run_get(char *val, const struct kernel_param *kp) { struct dmatest_info *info = &test_info; @@ -920,6 +938,7 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp) static int __init dmatest_init(void) { struct dmatest_info *info = &test_info; + struct dmatest_params *params = &info->params; if (dmatest_run) { mutex_lock(&info->lock); @@ -927,6 +946,9 @@ static int __init dmatest_init(void) mutex_unlock(&info->lock); } + if (params->iterations && wait) + wait_event(thread_wait, !is_threaded_test_run(info)); + /* module parameters are stable, inittime tests are started, * let userspace take over 'run' control */ From 4076e755dbec078c85352a8f77cec4c10181da4e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 6 Nov 2013 16:30:10 -0800 Subject: [PATCH 23/33] dmatest: convert to dmaengine_unmap_data Remove the open coded unmap and add coverage for this core functionality to dmatest. Also fixes up a couple places where we leaked dma mappings. Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 86 ++++++++++++++++++++++--------------------- 1 file changed, 44 insertions(+), 42 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index dd4d84d556d5..0d050d2324e3 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -326,20 +326,6 @@ static void dmatest_callback(void *arg) wake_up_all(done->wait); } -static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len, - unsigned int count) -{ - while (count--) - dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE); -} - -static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len, - unsigned int count) -{ - while (count--) - dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL); -} - static unsigned int min_odd(unsigned int x, unsigned int y) { unsigned int val = min(x, y); @@ -484,8 +470,9 @@ static int dmatest_func(void *data) while (!kthread_should_stop() && !(params->iterations && total_tests >= params->iterations)) { struct dma_async_tx_descriptor *tx = NULL; - dma_addr_t dma_srcs[src_cnt]; - dma_addr_t dma_dsts[dst_cnt]; + struct dmaengine_unmap_data *um; + dma_addr_t srcs[src_cnt]; + dma_addr_t *dsts; u8 align = 0; total_tests++; @@ -530,61 +517,75 @@ static int dmatest_func(void *data) len = 1 << align; total_len += len; - for (i = 0; i < src_cnt; i++) { - u8 *buf = thread->srcs[i] + src_off; + um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt, + GFP_KERNEL); + if (!um) { + failed_tests++; + result("unmap data NULL", total_tests, + src_off, dst_off, len, ret); + continue; + } - dma_srcs[i] = dma_map_single(dev->dev, buf, len, - DMA_TO_DEVICE); - ret = dma_mapping_error(dev->dev, dma_srcs[i]); + um->len = params->buf_size; + for (i = 0; i < src_cnt; i++) { + unsigned long buf = (unsigned long) thread->srcs[i]; + struct page *pg = virt_to_page(buf); + unsigned pg_off = buf & ~PAGE_MASK; + + um->addr[i] = dma_map_page(dev->dev, pg, pg_off, + um->len, DMA_TO_DEVICE); + srcs[i] = um->addr[i] + src_off; + ret = dma_mapping_error(dev->dev, um->addr[i]); if (ret) { - unmap_src(dev->dev, dma_srcs, len, i); + dmaengine_unmap_put(um); result("src mapping error", total_tests, src_off, dst_off, len, ret); failed_tests++; continue; } + um->to_cnt++; } /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ + dsts = &um->addr[src_cnt]; for (i = 0; i < dst_cnt; i++) { - dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], - params->buf_size, - DMA_BIDIRECTIONAL); - ret = dma_mapping_error(dev->dev, dma_dsts[i]); + unsigned long buf = (unsigned long) thread->dsts[i]; + struct page *pg = virt_to_page(buf); + unsigned pg_off = buf & ~PAGE_MASK; + + dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, + DMA_BIDIRECTIONAL); + ret = dma_mapping_error(dev->dev, dsts[i]); if (ret) { - unmap_src(dev->dev, dma_srcs, len, src_cnt); - unmap_dst(dev->dev, dma_dsts, params->buf_size, - i); + dmaengine_unmap_put(um); result("dst mapping error", total_tests, src_off, dst_off, len, ret); failed_tests++; continue; } + um->bidi_cnt++; } if (thread->type == DMA_MEMCPY) tx = dev->device_prep_dma_memcpy(chan, - dma_dsts[0] + dst_off, - dma_srcs[0], len, - flags); + dsts[0] + dst_off, + srcs[0], len, flags); else if (thread->type == DMA_XOR) tx = dev->device_prep_dma_xor(chan, - dma_dsts[0] + dst_off, - dma_srcs, src_cnt, + dsts[0] + dst_off, + srcs, src_cnt, len, flags); else if (thread->type == DMA_PQ) { dma_addr_t dma_pq[dst_cnt]; for (i = 0; i < dst_cnt; i++) - dma_pq[i] = dma_dsts[i] + dst_off; - tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, + dma_pq[i] = dsts[i] + dst_off; + tx = dev->device_prep_dma_pq(chan, dma_pq, srcs, src_cnt, pq_coefs, len, flags); } if (!tx) { - unmap_src(dev->dev, dma_srcs, len, src_cnt); - unmap_dst(dev->dev, dma_dsts, params->buf_size, - dst_cnt); + dmaengine_unmap_put(um); result("prep error", total_tests, src_off, dst_off, len, ret); msleep(100); @@ -598,6 +599,7 @@ static int dmatest_func(void *data) cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { + dmaengine_unmap_put(um); result("submit error", total_tests, src_off, dst_off, len, ret); msleep(100); @@ -620,11 +622,13 @@ static int dmatest_func(void *data) * free it this time?" dancing. For now, just * leave it dangling. */ + dmaengine_unmap_put(um); result("test timed out", total_tests, src_off, dst_off, len, 0); failed_tests++; continue; } else if (status != DMA_SUCCESS) { + dmaengine_unmap_put(um); result(status == DMA_ERROR ? "completion error status" : "completion busy status", total_tests, src_off, @@ -633,9 +637,7 @@ static int dmatest_func(void *data) continue; } - /* Unmap by myself */ - unmap_src(dev->dev, dma_srcs, len, src_cnt); - unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); + dmaengine_unmap_put(um); if (params->noverify) { dbg_result("test passed", total_tests, src_off, dst_off, From 50137a7df982f3767fe0b3b0cd0b9cfaf09c2cd9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 8 Nov 2013 12:26:26 -0800 Subject: [PATCH 24/33] dmatest: verbose mode Verbose mode turns on test success messages, by default we only output test summaries and failure results. Also cleaned up some stray quotes, leftover from putting the result message format string all on one line. Cc: Andy Shevchenko Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 0d050d2324e3..329b7cf02f8e 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -70,6 +70,10 @@ static bool noverify; module_param(noverify, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(noverify, "Disable random data setup and verification"); +static bool verbose; +module_param(verbose, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)"); + /** * struct dmatest_params - test parameters. * @buf_size: size of the memcpy test buffer @@ -336,7 +340,7 @@ static unsigned int min_odd(unsigned int x, unsigned int y) static void result(const char *err, unsigned int n, unsigned int src_off, unsigned int dst_off, unsigned int len, unsigned long data) { - pr_info("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", + pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)", current->comm, n, err, src_off, dst_off, len, data); } @@ -344,10 +348,17 @@ static void dbg_result(const char *err, unsigned int n, unsigned int src_off, unsigned int dst_off, unsigned int len, unsigned long data) { - pr_debug("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", - current->comm, n, err, src_off, dst_off, len, data); + pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)", + current->comm, n, err, src_off, dst_off, len, data); } +#define verbose_result(err, n, src_off, dst_off, len, data) ({ \ + if (verbose) \ + result(err, n, src_off, dst_off, len, data); \ + else \ + dbg_result(err, n, src_off, dst_off, len, data); \ +}) + static unsigned long long dmatest_persec(s64 runtime, unsigned int val) { unsigned long long per_sec = 1000000; @@ -640,8 +651,8 @@ static int dmatest_func(void *data) dmaengine_unmap_put(um); if (params->noverify) { - dbg_result("test passed", total_tests, src_off, dst_off, - len, 0); + verbose_result("test passed", total_tests, src_off, + dst_off, len, 0); continue; } @@ -670,8 +681,8 @@ static int dmatest_func(void *data) len, error_count); failed_tests++; } else { - dbg_result("test passed", total_tests, src_off, dst_off, - len, 0); + verbose_result("test passed", total_tests, src_off, + dst_off, len, 0); } } runtime = ktime_us_delta(ktime_get(), ktime); From ac7d631f7d9f9e4e6116c4a72b6308067d0a2226 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 6 Nov 2013 08:50:09 -0700 Subject: [PATCH 25/33] ioatdma: Fix bug in selftest after removal of DMA_MEMSET. Commit 48a9db4 (3.11) removed the memset op in the xor selftest for ioatdma. The issue is that with the removal of that op, it never replaced the memset with a CPU memset. The memory being operated on is expected to be zeroes but was not. This is causing the xor selftest to fail. Cc: Signed-off-by: Dave Jiang Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v3.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index a4798f0cc225..44786ad2a4cf 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -1376,6 +1376,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) goto free_resources; } + memset(page_address(dest), 0, PAGE_SIZE); + /* test for non-zero parity sum */ op = IOAT_OP_XOR_VAL; From 5d48b9b5d80e3aa38a5161565398b1e48a650573 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 13 Nov 2013 10:15:42 -0800 Subject: [PATCH 26/33] ioatdma: fix sed pool selection The array to lookup the sed pool based on the number of sources (pq16_idx_to_sedi) is 16 entries and expects a max source index. However, we pass the total source count which runs off the end of the array when src_cnt == 16. The minimal fix is to just pass src_cnt-1, but given we know the source count is > 8 we can just calculate the sed pool by (src_cnt - 2) >> 3. Cc: Dave Jiang Cc: Acked-by: Dave Jiang Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v3.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 44786ad2a4cf..f26a35d43ba1 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -87,13 +87,6 @@ static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6 }; -/* - * technically sources 1 and 2 do not require SED, but the op will have - * at least 9 descriptors so that's irrelevant. - */ -static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 1, 1, 1 }; - static void ioat3_eh(struct ioat2_dma_chan *ioat); static void xor_set_src(struct ioat_raw_descriptor *descs[2], @@ -128,12 +121,6 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2], pq->coef[idx] = coef; } -static int sed_get_pq16_pool_idx(int src_cnt) -{ - - return pq16_idx_to_sed[src_cnt]; -} - static bool is_jf_ioat(struct pci_dev *pdev) { switch (pdev->device) { @@ -994,8 +981,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, descs[0] = (struct ioat_raw_descriptor *) pq; - desc->sed = ioat3_alloc_sed(device, - sed_get_pq16_pool_idx(src_cnt)); + desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3); if (!desc->sed) { dev_err(to_dev(chan), "%s: no free sed entries\n", __func__); From 21e96c7313486390c694919522a76dfea0a86c59 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 13 Nov 2013 10:37:36 -0800 Subject: [PATCH 27/33] ioatdma: fix selection of 16 vs 8 source path When performing continuations there are implied sources that need to be added to the source count. Quoting dma_set_maxpq: /* dma_maxpq - reduce maxpq in the face of continued operations * @dma - dma device with PQ capability * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set * * When an engine does not support native continuation we need 3 extra * source slots to reuse P and Q with the following coefficients: * 1/ {00} * P : remove P from Q', but use it as a source for P' * 2/ {01} * Q : use Q to continue Q' calculation * 3/ {00} * Q : subtract Q from P' to cancel (2) * * In the case where P is disabled we only need 1 extra source: * 1/ {01} * Q : use Q to continue Q' calculation */ ...fix the selection of the 16 source path to take these implied sources into account. Note this also kills the BUG_ON(src_cnt < 9) check in __ioat3_prep_pq16_lock(). Besides not accounting for implied sources the check is redundant given we already made the path selection. Cc: Cc: Dave Jiang Acked-by: Dave Jiang Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v3.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index f26a35d43ba1..0284908997f8 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -951,9 +951,6 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, u8 op; int i, s, idx, num_descs; - /* this function only handles src_cnt 9 - 16 */ - BUG_ON(src_cnt < 9); - /* this function is only called with 9-16 sources */ op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; @@ -1039,13 +1036,21 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, return &desc->txd; } +static int src_cnt_flags(unsigned int src_cnt, unsigned long flags) +{ + if (dmaf_p_disabled_continue(flags)) + return src_cnt + 1; + else if (dmaf_continue(flags)) + return src_cnt + 3; + else + return src_cnt; +} + static struct dma_async_tx_descriptor * ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, unsigned long flags) { - struct dma_device *dma = chan->device; - /* specify valid address for disabled result */ if (flags & DMA_PREP_PQ_DISABLE_P) dst[0] = dst[1]; @@ -1065,7 +1070,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, single_source_coef[0] = scf[0]; single_source_coef[1] = 0; - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, 2, single_source_coef, len, flags) : @@ -1073,7 +1078,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, single_source_coef, len, flags); } else { - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, scf, len, flags) : __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, @@ -1086,8 +1091,6 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, enum sum_check_flags *pqres, unsigned long flags) { - struct dma_device *dma = chan->device; - /* specify valid address for disabled result */ if (flags & DMA_PREP_PQ_DISABLE_P) pq[0] = pq[1]; @@ -1099,7 +1102,7 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, */ *pqres = 0; - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, flags) : __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, @@ -1110,7 +1113,6 @@ static struct dma_async_tx_descriptor * ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { - struct dma_device *dma = chan->device; unsigned char scf[src_cnt]; dma_addr_t pq[2]; @@ -1119,7 +1121,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, flags |= DMA_PREP_PQ_DISABLE_Q; pq[1] = dst; /* specify valid address for disabled result */ - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, flags) : __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, @@ -1131,7 +1133,6 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags) { - struct dma_device *dma = chan->device; unsigned char scf[src_cnt]; dma_addr_t pq[2]; @@ -1145,8 +1146,7 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, flags |= DMA_PREP_PQ_DISABLE_Q; pq[1] = pq[0]; /* specify valid address for disabled result */ - - return (src_cnt > 8) && (dma->max_pq > 8) ? + return src_cnt_flags(src_cnt, flags) > 8 ? __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, scf, len, flags) : __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, From 59056e85d7dd337674c65d9dac65008cb46a98cd Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 13 Nov 2013 10:57:18 -0800 Subject: [PATCH 28/33] ioatdma: clean up sed pool kmem_cache Use a single cache for all sed allocations. No need to make it per channel. This also avoids the slub_debug warnings for multiple caches with the same name. Switching to dmam_pool_create() to fix leaking the dma pools on initialization failure and lets us kill ioat3_dma_remove(). Cc: Dave Jiang Acked-by: Dave Jiang Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.h | 1 - drivers/dma/ioat/dma_v2.h | 1 - drivers/dma/ioat/dma_v3.c | 42 +++++++-------------------------------- drivers/dma/ioat/pci.c | 20 ++++++++++++++----- 4 files changed, 22 insertions(+), 42 deletions(-) diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 4300d5af188f..df552d841481 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -83,7 +83,6 @@ struct ioatdma_device { struct pci_pool *completion_pool; #define MAX_SED_POOLS 5 struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; - struct kmem_cache *sed_pool; struct dma_device common; u8 version; struct msix_entry msix_entries[4]; diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 212d584fe427..470292767e68 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -157,7 +157,6 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) int ioat2_dma_probe(struct ioatdma_device *dev, int dca); int ioat3_dma_probe(struct ioatdma_device *dev, int dca); -void ioat3_dma_remove(struct ioatdma_device *dev); struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 0284908997f8..f8170de1d6a4 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -67,6 +67,8 @@ #include "dma.h" #include "dma_v2.h" +extern struct kmem_cache *ioat3_sed_cache; + /* ioat hardware assumes at least two sources for raid operations */ #define src_cnt_to_sw(x) ((x) + 2) #define src_cnt_to_hw(x) ((x) - 2) @@ -252,7 +254,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) struct ioat_sed_ent *sed; gfp_t flags = __GFP_ZERO | GFP_ATOMIC; - sed = kmem_cache_alloc(device->sed_pool, flags); + sed = kmem_cache_alloc(ioat3_sed_cache, flags); if (!sed) return NULL; @@ -260,7 +262,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], flags, &sed->dma); if (!sed->hw) { - kmem_cache_free(device->sed_pool, sed); + kmem_cache_free(ioat3_sed_cache, sed); return NULL; } @@ -273,7 +275,7 @@ static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *s return; dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); - kmem_cache_free(device->sed_pool, sed); + kmem_cache_free(ioat3_sed_cache, sed); } static bool desc_has_ext(struct ioat_ring_ent *desc) @@ -1652,21 +1654,15 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) char pool_name[14]; int i; - /* allocate sw descriptor pool for SED */ - device->sed_pool = kmem_cache_create("ioat_sed", - sizeof(struct ioat_sed_ent), 0, 0, NULL); - if (!device->sed_pool) - return -ENOMEM; - for (i = 0; i < MAX_SED_POOLS; i++) { snprintf(pool_name, 14, "ioat_hw%d_sed", i); /* allocate SED DMA pool */ - device->sed_hw_pool[i] = dma_pool_create(pool_name, + device->sed_hw_pool[i] = dmam_pool_create(pool_name, &pdev->dev, SED_SIZE * (i + 1), 64, 0); if (!device->sed_hw_pool[i]) - goto sed_pool_cleanup; + return -ENOMEM; } } @@ -1692,28 +1688,4 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) device->dca = ioat3_dca_init(pdev, device->reg_base); return 0; - -sed_pool_cleanup: - if (device->sed_pool) { - int i; - kmem_cache_destroy(device->sed_pool); - - for (i = 0; i < MAX_SED_POOLS; i++) - if (device->sed_hw_pool[i]) - dma_pool_destroy(device->sed_hw_pool[i]); - } - - return -ENOMEM; -} - -void ioat3_dma_remove(struct ioatdma_device *device) -{ - if (device->sed_pool) { - int i; - kmem_cache_destroy(device->sed_pool); - - for (i = 0; i < MAX_SED_POOLS; i++) - if (device->sed_hw_pool[i]) - dma_pool_destroy(device->sed_hw_pool[i]); - } } diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 2c8d560e6334..1d051cd045db 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -123,6 +123,7 @@ module_param(ioat_dca_enabled, int, 0644); MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); struct kmem_cache *ioat2_cache; +struct kmem_cache *ioat3_sed_cache; #define DRV_NAME "ioatdma" @@ -207,9 +208,6 @@ static void ioat_remove(struct pci_dev *pdev) if (!device) return; - if (device->version >= IOAT_VER_3_0) - ioat3_dma_remove(device); - dev_err(&pdev->dev, "Removing dma and dca services\n"); if (device->dca) { unregister_dca_provider(device->dca, &pdev->dev); @@ -221,7 +219,7 @@ static void ioat_remove(struct pci_dev *pdev) static int __init ioat_init_module(void) { - int err; + int err = -ENOMEM; pr_info("%s: Intel(R) QuickData Technology Driver %s\n", DRV_NAME, IOAT_DMA_VERSION); @@ -231,9 +229,21 @@ static int __init ioat_init_module(void) if (!ioat2_cache) return -ENOMEM; + ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0); + if (!ioat3_sed_cache) + goto err_ioat2_cache; + err = pci_register_driver(&ioat_pci_driver); if (err) - kmem_cache_destroy(ioat2_cache); + goto err_ioat3_cache; + + return 0; + + err_ioat3_cache: + kmem_cache_destroy(ioat3_sed_cache); + + err_ioat2_cache: + kmem_cache_destroy(ioat2_cache); return err; } From 09ec0f583f40bbecdf011b504dda9c1160fe0277 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 13 Nov 2013 11:22:26 -0800 Subject: [PATCH 29/33] raid6test: add new corner case for ioatdma driver With 24 disks and an ioatdma instance with 16 source support there is a corner case where the driver needs to be careful to account for the number of implied sources in the continuation case. Also bump the default case to test more than 16 sources now that it triggers different paths in offload drivers. Cc: Dave Jiang Acked-by: Dave Jiang Signed-off-by: Dan Williams --- crypto/async_tx/raid6test.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c index 4a92bac744dc..dad95f45b88f 100644 --- a/crypto/async_tx/raid6test.c +++ b/crypto/async_tx/raid6test.c @@ -28,7 +28,7 @@ #undef pr #define pr(fmt, args...) pr_info("raid6test: " fmt, ##args) -#define NDISKS 16 /* Including P and Q */ +#define NDISKS 64 /* Including P and Q */ static struct page *dataptrs[NDISKS]; static addr_conv_t addr_conv[NDISKS]; @@ -219,6 +219,14 @@ static int raid6_test(void) err += test(11, &tests); err += test(12, &tests); } + + /* the 24 disk case is special for ioatdma as it is the boudary point + * at which it needs to switch from 8-source ops to 16-source + * ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set) + */ + if (NDISKS > 24) + err += test(24, &tests); + err += test(NDISKS, &tests); pr("\n"); From 4c5d9619e06b960d14f5640341f40e71f78801c2 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 13 Nov 2013 16:29:52 -0800 Subject: [PATCH 30/33] ioat: kill msix_single_vector support Once we have determined that we will not have all of our desired msix vectors there is no point in attempting a single msix allocation. The driver will already need to read registers to determine the source of the interrupt the fact that it is msix is moot. Fallback directly to msi. Reported-by: Alexander Gordeev Cc: Dave Jiang Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 27 +++------------------------ drivers/dma/ioat/dma.h | 1 - drivers/dma/ioat/dma_v3.c | 7 ------- 3 files changed, 3 insertions(+), 32 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 6fcf741ad91b..fb879d9f026f 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -869,8 +869,7 @@ static char ioat_interrupt_style[32] = "msix"; module_param_string(ioat_interrupt_style, ioat_interrupt_style, sizeof(ioat_interrupt_style), 0644); MODULE_PARM_DESC(ioat_interrupt_style, - "set ioat interrupt style: msix (default), " - "msix-single-vector, msi, intx)"); + "set ioat interrupt style: msix (default), msi, intx"); /** * ioat_dma_setup_interrupts - setup interrupt handler @@ -888,8 +887,6 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *device) if (!strcmp(ioat_interrupt_style, "msix")) goto msix; - if (!strcmp(ioat_interrupt_style, "msix-single-vector")) - goto msix_single_vector; if (!strcmp(ioat_interrupt_style, "msi")) goto msi; if (!strcmp(ioat_interrupt_style, "intx")) @@ -904,10 +901,8 @@ msix: device->msix_entries[i].entry = i; err = pci_enable_msix(pdev, device->msix_entries, msixcnt); - if (err < 0) + if (err) goto msi; - if (err > 0) - goto msix_single_vector; for (i = 0; i < msixcnt; i++) { msix = &device->msix_entries[i]; @@ -921,29 +916,13 @@ msix: chan = ioat_chan_by_index(device, j); devm_free_irq(dev, msix->vector, chan); } - goto msix_single_vector; + goto msi; } } intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; device->irq_mode = IOAT_MSIX; goto done; -msix_single_vector: - msix = &device->msix_entries[0]; - msix->entry = 0; - err = pci_enable_msix(pdev, device->msix_entries, 1); - if (err) - goto msi; - - err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0, - "ioat-msix", device); - if (err) { - pci_disable_msix(pdev); - goto msi; - } - device->irq_mode = IOAT_MSIX_SINGLE; - goto done; - msi: err = pci_enable_msi(pdev); if (err) diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index df552d841481..11fb877ddca9 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -52,7 +52,6 @@ enum ioat_irq_mode { IOAT_NOIRQ = 0, IOAT_MSIX, - IOAT_MSIX_SINGLE, IOAT_MSI, IOAT_INTX }; diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index f8170de1d6a4..38616634780f 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -1468,13 +1468,6 @@ static int ioat3_irq_reinit(struct ioatdma_device *device) pci_disable_msix(pdev); break; - case IOAT_MSIX_SINGLE: - msix = &device->msix_entries[0]; - chan = ioat_chan_by_index(device, 0); - devm_free_irq(&pdev->dev, msix->vector, chan); - pci_disable_msix(pdev); - break; - case IOAT_MSI: chan = ioat_chan_by_index(device, 0); devm_free_irq(&pdev->dev, pdev->irq, chan); From 779e561ae2627727ea3d797a7db2496e8bae3430 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 13 Nov 2013 16:30:43 -0800 Subject: [PATCH 31/33] ioat: fix ioat3_irq_reinit The implementation of ioat3_irq_reinit has two bugs: 1/ The mode is incorrectly set to MSIX for the MSI case 2/ The 'dev_id' parameter to free_irq is the ioatdma_device not the channel in the msi and intx case Include a small cleanup to clarify that ioat3_irq_reinit is only for bwd hardware Cc: Dave Jiang Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 2 +- drivers/dma/ioat/dma_v3.c | 39 ++++++++++++++------------------------- 2 files changed, 15 insertions(+), 26 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index fb879d9f026f..4f67473f3b81 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -934,7 +934,7 @@ msi: pci_disable_msi(pdev); goto intx; } - device->irq_mode = IOAT_MSIX; + device->irq_mode = IOAT_MSI; goto done; intx: diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 38616634780f..915b1f66f2da 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -1449,45 +1449,36 @@ static int ioat3_dma_self_test(struct ioatdma_device *device) static int ioat3_irq_reinit(struct ioatdma_device *device) { - int msixcnt = device->common.chancnt; struct pci_dev *pdev = device->pdev; - int i; - struct msix_entry *msix; - struct ioat_chan_common *chan; - int err = 0; + int irq = pdev->irq, i; + + if (!is_bwd_ioat(pdev)) + return 0; switch (device->irq_mode) { case IOAT_MSIX: + for (i = 0; i < device->common.chancnt; i++) { + struct msix_entry *msix = &device->msix_entries[i]; + struct ioat_chan_common *chan; - for (i = 0; i < msixcnt; i++) { - msix = &device->msix_entries[i]; chan = ioat_chan_by_index(device, i); devm_free_irq(&pdev->dev, msix->vector, chan); } pci_disable_msix(pdev); break; - case IOAT_MSI: - chan = ioat_chan_by_index(device, 0); - devm_free_irq(&pdev->dev, pdev->irq, chan); pci_disable_msi(pdev); - break; - + /* fall through */ case IOAT_INTX: - chan = ioat_chan_by_index(device, 0); - devm_free_irq(&pdev->dev, pdev->irq, chan); + devm_free_irq(&pdev->dev, irq, device); break; - default: return 0; } - device->irq_mode = IOAT_NOIRQ; - err = ioat_dma_setup_interrupts(device); - - return err; + return ioat_dma_setup_interrupts(device); } static int ioat3_reset_hw(struct ioat_chan_common *chan) @@ -1530,14 +1521,12 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan) } err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); - if (err) { - dev_err(&pdev->dev, "Failed to reset!\n"); - return err; - } - - if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev)) + if (!err) err = ioat3_irq_reinit(device); + if (err) + dev_err(&pdev->dev, "Failed to reset: %d\n", err); + return err; } From a911ddc9a0ecbf77a8b2e78dc5c40e5b7bb40d24 Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Wed, 30 Oct 2013 12:01:42 -0300 Subject: [PATCH 32/33] dma: mv_xor: Remove unneeded NULL address check This mmio address is checked at probe-time, which makes this test redundant. Let's just remove it. Acked-by: Thomas Petazzoni Signed-off-by: Ezequiel Garcia Signed-off-by: Dan Williams --- drivers/dma/mv_xor.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 17326e780e23..8ff21b15ad1c 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -1035,10 +1035,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev, } mv_chan->mmr_base = xordev->xor_base; - if (!mv_chan->mmr_base) { - ret = -ENOMEM; - goto err_free_dma; - } tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) mv_chan); From 82a1402eaee5dab1f3ab2d5aa4c316451374c5af Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Wed, 30 Oct 2013 12:01:43 -0300 Subject: [PATCH 33/33] dma: mv_xor: Fix mis-usage of mmio 'base' and 'high_base' registers Despite requesting two memory resources, called 'base' and 'high_base', the driver uses explicitly only the former. The latter is being used implicitly by addressing at offset +0x200, which in practice accesses high_base. In other words, the current driver breaks if the second memory resource is ever place at an offset different from +0x200. This patch fixes the above by defining the registers with the offset from high_base, and use high_base explicitly where appropriate. Signed-off-by: Ezequiel Garcia Acked-by: Thomas Petazzoni Signed-off-by: Dan Williams --- drivers/dma/mv_xor.c | 3 ++- drivers/dma/mv_xor.h | 25 +++++++++++++------------ 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 8ff21b15ad1c..1b846d5d8408 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -1035,6 +1035,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, } mv_chan->mmr_base = xordev->xor_base; + mv_chan->mmr_high_base = xordev->xor_high_base; tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) mv_chan); @@ -1093,7 +1094,7 @@ static void mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, const struct mbus_dram_target_info *dram) { - void __iomem *base = xordev->xor_base; + void __iomem *base = xordev->xor_high_base; u32 win_enable = 0; int i; diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index 06b067f24c9b..d0749229c875 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h @@ -34,13 +34,13 @@ #define XOR_OPERATION_MODE_MEMCPY 2 #define XOR_DESCRIPTOR_SWAP BIT(14) -#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) -#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) -#define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4)) -#define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4)) -#define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4)) -#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0) -#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4) +#define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4)) +#define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4)) +#define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4)) +#define XOR_DEST_POINTER(chan) (chan->mmr_high_base + 0xB0 + (chan->idx * 4)) +#define XOR_BLOCK_SIZE(chan) (chan->mmr_high_base + 0xC0 + (chan->idx * 4)) +#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_high_base + 0xE0) +#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_high_base + 0xE4) #define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4)) #define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4)) @@ -50,11 +50,11 @@ #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) #define XOR_INTR_MASK_VALUE 0x3F5 -#define WINDOW_BASE(w) (0x250 + ((w) << 2)) -#define WINDOW_SIZE(w) (0x270 + ((w) << 2)) -#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2)) -#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2)) -#define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2)) +#define WINDOW_BASE(w) (0x50 + ((w) << 2)) +#define WINDOW_SIZE(w) (0x70 + ((w) << 2)) +#define WINDOW_REMAP_HIGH(w) (0x90 + ((w) << 2)) +#define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2)) +#define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2)) struct mv_xor_device { void __iomem *xor_base; @@ -82,6 +82,7 @@ struct mv_xor_chan { int pending; spinlock_t lock; /* protects the descriptor slot pool */ void __iomem *mmr_base; + void __iomem *mmr_high_base; unsigned int idx; int irq; enum dma_transaction_type current_type;