serial: sh-sci: Fix race condition between RX worker and cleanup

During serial port shutdown, the DMA receive worker function may still
be called after the receive DMA cleanup function has been called.
Fix this race condition between work_fn_rx() and sci_rx_dma_release() by
acquiring the port's spinlock in sci_rx_dma_release().
This requires releasing the spinlock in work_fn_rx() before calling (any
function that may call) sci_rx_dma_release().

Terminate all active receive DMA descriptors to release them, and to
make sure no more completions come in.

Do the same in sci_tx_dma_release() for symmetry, although the serial
upper layer will no longer submit more data at this point of time.

Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Geert Uytterhoeven 2015-08-21 20:02:50 +02:00 committed by Greg Kroah-Hartman
parent 0907c1004f
commit 04928b79d2
1 changed files with 14 additions and 5 deletions

View File

@ -1362,9 +1362,13 @@ static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
{ {
struct dma_chan *chan = s->chan_rx; struct dma_chan *chan = s->chan_rx;
struct uart_port *port = &s->port; struct uart_port *port = &s->port;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
s->chan_rx = NULL; s->chan_rx = NULL;
s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL; s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
spin_unlock_irqrestore(&port->lock, flags);
dmaengine_terminate_all(chan);
dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, dma_free_coherent(chan->device->dev, s->buf_len_rx * 2,
sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0])); sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
dma_release_channel(chan); dma_release_channel(chan);
@ -1376,9 +1380,13 @@ static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
{ {
struct dma_chan *chan = s->chan_tx; struct dma_chan *chan = s->chan_tx;
struct uart_port *port = &s->port; struct uart_port *port = &s->port;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
s->chan_tx = NULL; s->chan_tx = NULL;
s->cookie_tx = -EINVAL; s->cookie_tx = -EINVAL;
spin_unlock_irqrestore(&port->lock, flags);
dmaengine_terminate_all(chan);
dma_unmap_single(chan->device->dev, s->tx_dma_addr, UART_XMIT_SIZE, dma_unmap_single(chan->device->dev, s->tx_dma_addr, UART_XMIT_SIZE,
DMA_TO_DEVICE); DMA_TO_DEVICE);
dma_release_channel(chan); dma_release_channel(chan);
@ -1444,7 +1452,8 @@ static void work_fn_rx(struct work_struct *work)
} else { } else {
dev_err(port->dev, "%s: Rx cookie %d not found!\n", __func__, dev_err(port->dev, "%s: Rx cookie %d not found!\n", __func__,
s->active_rx); s->active_rx);
goto out; spin_unlock_irqrestore(&port->lock, flags);
return;
} }
status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state); status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
@ -1464,9 +1473,10 @@ static void work_fn_rx(struct work_struct *work)
if (count) if (count)
tty_flip_buffer_push(&port->state->port); tty_flip_buffer_push(&port->state->port);
sci_submit_rx(s); spin_unlock_irqrestore(&port->lock, flags);
goto out; sci_submit_rx(s);
return;
} }
desc = dmaengine_prep_slave_sg(s->chan_rx, &s->sg_rx[new], 1, desc = dmaengine_prep_slave_sg(s->chan_rx, &s->sg_rx[new], 1,
@ -1485,14 +1495,13 @@ static void work_fn_rx(struct work_struct *work)
dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n", dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
__func__, s->cookie_rx[new], new, s->active_rx); __func__, s->cookie_rx[new], new, s->active_rx);
out:
spin_unlock_irqrestore(&port->lock, flags); spin_unlock_irqrestore(&port->lock, flags);
return; return;
fail: fail:
spin_unlock_irqrestore(&port->lock, flags);
dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n"); dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
sci_rx_dma_release(s, true); sci_rx_dma_release(s, true);
spin_unlock_irqrestore(&port->lock, flags);
} }
static void work_fn_tx(struct work_struct *work) static void work_fn_tx(struct work_struct *work)