dmaengine: vdma: Fix race condition in Non-SG mode

When VDMA is configured in  Non-sg mode
Users can queue descriptors greater than h/w configured frames.

Current driver allows the user to queue descriptors upto h/w configured.
Which is wrong for non-sg mode configuration.

This patch fixes this issue.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
Kedareswara rao Appana 2016-04-06 10:38:09 +05:30 committed by Vinod Koul
parent b72db4005f
commit a65cf5125b
1 changed files with 19 additions and 6 deletions

View File

@ -209,6 +209,7 @@ struct xilinx_vdma_tx_descriptor {
* @flush_on_fsync: Flush on Frame sync * @flush_on_fsync: Flush on Frame sync
* @desc_pendingcount: Descriptor pending count * @desc_pendingcount: Descriptor pending count
* @ext_addr: Indicates 64 bit addressing is supported by dma channel * @ext_addr: Indicates 64 bit addressing is supported by dma channel
* @desc_submitcount: Descriptor h/w submitted count
*/ */
struct xilinx_vdma_chan { struct xilinx_vdma_chan {
struct xilinx_vdma_device *xdev; struct xilinx_vdma_device *xdev;
@ -233,6 +234,7 @@ struct xilinx_vdma_chan {
bool flush_on_fsync; bool flush_on_fsync;
u32 desc_pendingcount; u32 desc_pendingcount;
bool ext_addr; bool ext_addr;
u32 desc_submitcount;
}; };
/** /**
@ -716,9 +718,10 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
struct xilinx_vdma_tx_segment *segment, *last = NULL; struct xilinx_vdma_tx_segment *segment, *last = NULL;
int i = 0; int i = 0;
list_for_each_entry(desc, &chan->pending_list, node) { if (chan->desc_submitcount < chan->num_frms)
segment = list_first_entry(&desc->segments, i = chan->desc_submitcount;
struct xilinx_vdma_tx_segment, node);
list_for_each_entry(segment, &desc->segments, node) {
if (chan->ext_addr) if (chan->ext_addr)
vdma_desc_write_64(chan, vdma_desc_write_64(chan,
XILINX_VDMA_REG_START_ADDRESS_64(i++), XILINX_VDMA_REG_START_ADDRESS_64(i++),
@ -742,9 +745,18 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize); vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize);
} }
if (!chan->has_sg) {
list_del(&desc->node);
list_add_tail(&desc->node, &chan->active_list);
chan->desc_submitcount++;
chan->desc_pendingcount--;
if (chan->desc_submitcount == chan->num_frms)
chan->desc_submitcount = 0;
} else {
list_splice_tail_init(&chan->pending_list, &chan->active_list); list_splice_tail_init(&chan->pending_list, &chan->active_list);
chan->desc_pendingcount = 0; chan->desc_pendingcount = 0;
} }
}
/** /**
* xilinx_vdma_issue_pending - Issue pending transactions * xilinx_vdma_issue_pending - Issue pending transactions
@ -927,7 +939,8 @@ append:
list_add_tail(&desc->node, &chan->pending_list); list_add_tail(&desc->node, &chan->pending_list);
chan->desc_pendingcount++; chan->desc_pendingcount++;
if (unlikely(chan->desc_pendingcount > chan->num_frms)) { if (chan->has_sg &&
unlikely(chan->desc_pendingcount > chan->num_frms)) {
dev_dbg(chan->dev, "desc pendingcount is too high\n"); dev_dbg(chan->dev, "desc pendingcount is too high\n");
chan->desc_pendingcount = chan->num_frms; chan->desc_pendingcount = chan->num_frms;
} }