dmaengine: imx-sdma - reduce transfer latency for DMA cyclic clients

Having the SDMA driver use a tasklet for running the clients
callback introduce some issues:
  - probability to have desynchronized data because of the
    race condition created since the DMA transaction status
    is retrieved only when the callback is executed, leaving
    plenty of time for transaction status to get altered.
  - inter-transfer latency which can leave channels idle.

Move the callback execution, for cyclic channels, to SDMA
interrupt (as advised in `Documentation/dmaengine/provider.txt`)
to (a)reduce the inter-transfer latency and (b) eliminate the
race condition possibility where DMA transaction status might
be changed by the time is read.

The responsibility of the SDMA interrupt latency
is moved to the SDMA clients which case by case should defer
the work to bottom-halves when needed.

Tested-by: Peter Senna Tschudin <peter.senna@collabora.com>
Acked-by: Peter Senna Tschudin <peter.senna@collabora.com>
Reviewed-by: Vinod Koul <vinod.koul@intel.com>
Signed-off-by: Nandor Han <nandor.han@ge.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Nandor Han 2016-08-08 15:38:25 +03:00 committed by Greg Kroah-Hartman
parent fecdef932b
commit 15f30f5131
1 changed files with 16 additions and 20 deletions

View File

@ -648,12 +648,6 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
writel_relaxed(val, sdma->regs + chnenbl);
}
static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
{
if (sdmac->desc.callback)
sdmac->desc.callback(sdmac->desc.callback_param);
}
static void sdma_update_channel_loop(struct sdma_channel *sdmac)
{
struct sdma_buffer_descriptor *bd;
@ -672,13 +666,25 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
sdmac->status = DMA_ERROR;
bd->mode.status |= BD_DONE;
/*
* The callback is called from the interrupt context in order
* to reduce latency and to avoid the risk of altering the
* SDMA transaction status by the time the client tasklet is
* executed.
*/
if (sdmac->desc.callback)
sdmac->desc.callback(sdmac->desc.callback_param);
sdmac->buf_tail++;
sdmac->buf_tail %= sdmac->num_bd;
}
}
static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
static void mxc_sdma_handle_channel_normal(unsigned long data)
{
struct sdma_channel *sdmac = (struct sdma_channel *) data;
struct sdma_buffer_descriptor *bd;
int i, error = 0;
@ -705,16 +711,6 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
sdmac->desc.callback(sdmac->desc.callback_param);
}
static void sdma_tasklet(unsigned long data)
{
struct sdma_channel *sdmac = (struct sdma_channel *) data;
if (sdmac->flags & IMX_DMA_SG_LOOP)
sdma_handle_channel_loop(sdmac);
else
mxc_sdma_handle_channel_normal(sdmac);
}
static irqreturn_t sdma_int_handler(int irq, void *dev_id)
{
struct sdma_engine *sdma = dev_id;
@ -731,8 +727,8 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
if (sdmac->flags & IMX_DMA_SG_LOOP)
sdma_update_channel_loop(sdmac);
tasklet_schedule(&sdmac->tasklet);
else
tasklet_schedule(&sdmac->tasklet);
__clear_bit(channel, &stat);
}
@ -1732,7 +1728,7 @@ static int sdma_probe(struct platform_device *pdev)
dma_cookie_init(&sdmac->chan);
sdmac->channel = i;
tasklet_init(&sdmac->tasklet, sdma_tasklet,
tasklet_init(&sdmac->tasklet, mxc_sdma_handle_channel_normal,
(unsigned long) sdmac);
/*
* Add the channel to the DMAC list. Do not add channel 0 though