dmaengine: xilinx_dma: Fix read-after-free bug when terminating transfers

When user calls dmaengine_terminate_sync, the driver will clean up any
remaining descriptors for all the pending or active transfers that had
previously been submitted. However, this might happen whilst the tasklet is
invoking the DMA callback for the last finished transfer, so by the time it
returns and takes over the channel's spinlock, the list of completed
descriptors it was traversing is no longer valid. This leads to a
read-after-free situation.

Fix it by signalling whether a user-triggered termination has happened by
means of a boolean variable.

Signed-off-by: Adrian Larumbe <adrian.martinezlarumbe@imgtec.com>
Link: https://lore.kernel.org/r/20210706234338.7696-3-adrian.martinezlarumbe@imgtec.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
Adrian Larumbe 2021-07-07 00:43:38 +01:00 committed by Vinod Koul
parent e73f0f0ee7
commit 7dd2dd4ff9
1 changed files with 12 additions and 0 deletions

View File

@ -394,6 +394,7 @@ struct xilinx_dma_tx_descriptor {
* @genlock: Support genlock mode * @genlock: Support genlock mode
* @err: Channel has errors * @err: Channel has errors
* @idle: Check for channel idle * @idle: Check for channel idle
* @terminating: Check for channel being synchronized by user
* @tasklet: Cleanup work after irq * @tasklet: Cleanup work after irq
* @config: Device configuration info * @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync * @flush_on_fsync: Flush on Frame sync
@ -431,6 +432,7 @@ struct xilinx_dma_chan {
bool genlock; bool genlock;
bool err; bool err;
bool idle; bool idle;
bool terminating;
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
struct xilinx_vdma_config config; struct xilinx_vdma_config config;
bool flush_on_fsync; bool flush_on_fsync;
@ -1049,6 +1051,13 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
/* Run any dependencies, then free the descriptor */ /* Run any dependencies, then free the descriptor */
dma_run_dependencies(&desc->async_tx); dma_run_dependencies(&desc->async_tx);
xilinx_dma_free_tx_descriptor(chan, desc); xilinx_dma_free_tx_descriptor(chan, desc);
/*
* While we ran a callback the user called a terminate function,
* which takes care of cleaning up any remaining descriptors
*/
if (chan->terminating)
break;
} }
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
@ -1965,6 +1974,8 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
if (desc->cyclic) if (desc->cyclic)
chan->cyclic = true; chan->cyclic = true;
chan->terminating = false;
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
return cookie; return cookie;
@ -2436,6 +2447,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
xilinx_dma_chan_reset(chan); xilinx_dma_chan_reset(chan);
/* Remove and free all of the descriptors in the lists */ /* Remove and free all of the descriptors in the lists */
chan->terminating = true;
xilinx_dma_free_descriptors(chan); xilinx_dma_free_descriptors(chan);
chan->idle = true; chan->idle = true;