tty: serial: 8250: omap: synchronize rx_running
The rx_running flag should show if DMA is currently active. However there is a window between when the flag is set/cleared and when the DMA is started/stopped. Because the flag is queried from both hard and soft irq contexts, the driver can make incorrect decisions and do things like start a DMA transfer using a buffer that is already setup to be used for a DMA transfer. This patch adds a spinlock to synchronize the rx_running flag and close the above mentioned window. Signed-off-by: John Ogness <john.ogness@linutronix.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
02ec6041a8
commit
eda0cd3546
|
@ -98,6 +98,7 @@ struct omap8250_priv {
|
||||||
struct pm_qos_request pm_qos_request;
|
struct pm_qos_request pm_qos_request;
|
||||||
struct work_struct qos_work;
|
struct work_struct qos_work;
|
||||||
struct uart_8250_dma omap8250_dma;
|
struct uart_8250_dma omap8250_dma;
|
||||||
|
spinlock_t rx_dma_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
static u32 uart_read(struct uart_8250_port *up, u32 reg)
|
static u32 uart_read(struct uart_8250_port *up, u32 reg)
|
||||||
|
@ -669,14 +670,21 @@ static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir);
|
||||||
|
|
||||||
static void __dma_rx_do_complete(struct uart_8250_port *p, bool error)
|
static void __dma_rx_do_complete(struct uart_8250_port *p, bool error)
|
||||||
{
|
{
|
||||||
|
struct omap8250_priv *priv = p->port.private_data;
|
||||||
struct uart_8250_dma *dma = p->dma;
|
struct uart_8250_dma *dma = p->dma;
|
||||||
struct tty_port *tty_port = &p->port.state->port;
|
struct tty_port *tty_port = &p->port.state->port;
|
||||||
struct dma_tx_state state;
|
struct dma_tx_state state;
|
||||||
int count;
|
int count;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
dma_sync_single_for_cpu(dma->rxchan->device->dev, dma->rx_addr,
|
dma_sync_single_for_cpu(dma->rxchan->device->dev, dma->rx_addr,
|
||||||
dma->rx_size, DMA_FROM_DEVICE);
|
dma->rx_size, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&priv->rx_dma_lock, flags);
|
||||||
|
|
||||||
|
if (!dma->rx_running)
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
dma->rx_running = 0;
|
dma->rx_running = 0;
|
||||||
dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
|
dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
|
||||||
dmaengine_terminate_all(dma->rxchan);
|
dmaengine_terminate_all(dma->rxchan);
|
||||||
|
@ -685,6 +693,9 @@ static void __dma_rx_do_complete(struct uart_8250_port *p, bool error)
|
||||||
|
|
||||||
tty_insert_flip_string(tty_port, dma->rx_buf, count);
|
tty_insert_flip_string(tty_port, dma->rx_buf, count);
|
||||||
p->port.icount.rx += count;
|
p->port.icount.rx += count;
|
||||||
|
unlock:
|
||||||
|
spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
|
||||||
|
|
||||||
if (!error)
|
if (!error)
|
||||||
omap_8250_rx_dma(p, 0);
|
omap_8250_rx_dma(p, 0);
|
||||||
|
|
||||||
|
@ -696,28 +707,45 @@ static void __dma_rx_complete(void *param)
|
||||||
__dma_rx_do_complete(param, false);
|
__dma_rx_do_complete(param, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
|
||||||
|
{
|
||||||
|
struct omap8250_priv *priv = p->port.private_data;
|
||||||
|
struct uart_8250_dma *dma = p->dma;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&priv->rx_dma_lock, flags);
|
||||||
|
|
||||||
|
if (!dma->rx_running) {
|
||||||
|
spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
dmaengine_pause(dma->rxchan);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
|
||||||
|
|
||||||
|
__dma_rx_do_complete(p, true);
|
||||||
|
}
|
||||||
|
|
||||||
static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
|
static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
|
||||||
{
|
{
|
||||||
|
struct omap8250_priv *priv = p->port.private_data;
|
||||||
struct uart_8250_dma *dma = p->dma;
|
struct uart_8250_dma *dma = p->dma;
|
||||||
|
int err = 0;
|
||||||
struct dma_async_tx_descriptor *desc;
|
struct dma_async_tx_descriptor *desc;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
switch (iir & 0x3f) {
|
switch (iir & 0x3f) {
|
||||||
case UART_IIR_RLSI:
|
case UART_IIR_RLSI:
|
||||||
/* 8250_core handles errors and break interrupts */
|
/* 8250_core handles errors and break interrupts */
|
||||||
if (dma->rx_running) {
|
omap_8250_rx_dma_flush(p);
|
||||||
dmaengine_pause(dma->rxchan);
|
|
||||||
__dma_rx_do_complete(p, true);
|
|
||||||
}
|
|
||||||
return -EIO;
|
return -EIO;
|
||||||
case UART_IIR_RX_TIMEOUT:
|
case UART_IIR_RX_TIMEOUT:
|
||||||
/*
|
/*
|
||||||
* If RCVR FIFO trigger level was not reached, complete the
|
* If RCVR FIFO trigger level was not reached, complete the
|
||||||
* transfer and let 8250_core copy the remaining data.
|
* transfer and let 8250_core copy the remaining data.
|
||||||
*/
|
*/
|
||||||
if (dma->rx_running) {
|
omap_8250_rx_dma_flush(p);
|
||||||
dmaengine_pause(dma->rxchan);
|
|
||||||
__dma_rx_do_complete(p, true);
|
|
||||||
}
|
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
case UART_IIR_RDI:
|
case UART_IIR_RDI:
|
||||||
/*
|
/*
|
||||||
|
@ -729,24 +757,25 @@ static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
|
||||||
* the DMA won't do anything soon so we have to cancel the DMA
|
* the DMA won't do anything soon so we have to cancel the DMA
|
||||||
* transfer and purge the FIFO manually.
|
* transfer and purge the FIFO manually.
|
||||||
*/
|
*/
|
||||||
if (dma->rx_running) {
|
omap_8250_rx_dma_flush(p);
|
||||||
dmaengine_pause(dma->rxchan);
|
|
||||||
__dma_rx_do_complete(p, true);
|
|
||||||
}
|
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock_irqsave(&priv->rx_dma_lock, flags);
|
||||||
|
|
||||||
if (dma->rx_running)
|
if (dma->rx_running)
|
||||||
return 0;
|
goto out;
|
||||||
|
|
||||||
desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
|
desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
|
||||||
dma->rx_size, DMA_DEV_TO_MEM,
|
dma->rx_size, DMA_DEV_TO_MEM,
|
||||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||||
if (!desc)
|
if (!desc) {
|
||||||
return -EBUSY;
|
err = -EBUSY;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
dma->rx_running = 1;
|
dma->rx_running = 1;
|
||||||
desc->callback = __dma_rx_complete;
|
desc->callback = __dma_rx_complete;
|
||||||
|
@ -758,7 +787,9 @@ static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
|
||||||
dma->rx_size, DMA_FROM_DEVICE);
|
dma->rx_size, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
dma_async_issue_pending(dma->rxchan);
|
dma_async_issue_pending(dma->rxchan);
|
||||||
return 0;
|
out:
|
||||||
|
spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int omap_8250_tx_dma(struct uart_8250_port *p);
|
static int omap_8250_tx_dma(struct uart_8250_port *p);
|
||||||
|
@ -1065,6 +1096,8 @@ static int omap8250_probe(struct platform_device *pdev)
|
||||||
priv->latency);
|
priv->latency);
|
||||||
INIT_WORK(&priv->qos_work, omap8250_uart_qos_work);
|
INIT_WORK(&priv->qos_work, omap8250_uart_qos_work);
|
||||||
|
|
||||||
|
spin_lock_init(&priv->rx_dma_lock);
|
||||||
|
|
||||||
device_init_wakeup(&pdev->dev, true);
|
device_init_wakeup(&pdev->dev, true);
|
||||||
pm_runtime_use_autosuspend(&pdev->dev);
|
pm_runtime_use_autosuspend(&pdev->dev);
|
||||||
pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
|
pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
|
||||||
|
|
Loading…
Reference in New Issue