rapidio/tsi721_dma: update error reporting from prep_sg callback

Switch to returning error-valued pointer instead of simple NULL pointer.
This allows to properly identify situation when request queue is full
and therefore gives to upper layer an option to retry operation later.

Signed-off-by: Alexandre Bounine <alexandre.bounine@idt.com>
Cc: Matt Porter <mporter@kernel.crashing.org>
Cc: Aurelien Jacquiot <a-jacquiot@ti.com>
Cc: Andre van Herk <andre.van.herk@prodrive-technologies.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Alexandre Bounine 2016-03-22 14:26:59 -07:00 committed by Linus Torvalds
parent 72d8a0d230
commit 8347245750
1 changed files with 21 additions and 16 deletions

View File

@ -767,7 +767,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
void *tinfo) void *tinfo)
{ {
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
struct tsi721_tx_desc *desc, *_d; struct tsi721_tx_desc *desc;
struct rio_dma_ext *rext = tinfo; struct rio_dma_ext *rext = tinfo;
enum dma_rtype rtype; enum dma_rtype rtype;
struct dma_async_tx_descriptor *txd = NULL; struct dma_async_tx_descriptor *txd = NULL;
@ -775,7 +775,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
if (!sgl || !sg_len) { if (!sgl || !sg_len) {
tsi_err(&dchan->dev->device, "DMAC%d No SG list", tsi_err(&dchan->dev->device, "DMAC%d No SG list",
bdma_chan->id); bdma_chan->id);
return NULL; return ERR_PTR(-EINVAL);
} }
tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id, tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id,
@ -800,28 +800,33 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
tsi_err(&dchan->dev->device, tsi_err(&dchan->dev->device,
"DMAC%d Unsupported DMA direction option", "DMAC%d Unsupported DMA direction option",
bdma_chan->id); bdma_chan->id);
return NULL; return ERR_PTR(-EINVAL);
} }
spin_lock_bh(&bdma_chan->lock); spin_lock_bh(&bdma_chan->lock);
list_for_each_entry_safe(desc, _d, &bdma_chan->free_list, desc_node) { if (!list_empty(&bdma_chan->free_list)) {
if (async_tx_test_ack(&desc->txd)) { desc = list_first_entry(&bdma_chan->free_list,
list_del_init(&desc->desc_node); struct tsi721_tx_desc, desc_node);
desc->destid = rext->destid; list_del_init(&desc->desc_node);
desc->rio_addr = rext->rio_addr; desc->destid = rext->destid;
desc->rio_addr_u = 0; desc->rio_addr = rext->rio_addr;
desc->rtype = rtype; desc->rio_addr_u = 0;
desc->sg_len = sg_len; desc->rtype = rtype;
desc->sg = sgl; desc->sg_len = sg_len;
txd = &desc->txd; desc->sg = sgl;
txd->flags = flags; txd = &desc->txd;
break; txd->flags = flags;
}
} }
spin_unlock_bh(&bdma_chan->lock); spin_unlock_bh(&bdma_chan->lock);
if (!txd) {
tsi_debug(DMA, &dchan->dev->device,
"DMAC%d free TXD is not available", bdma_chan->id);
return ERR_PTR(-EBUSY);
}
return txd; return txd;
} }