Merge branch 'topic/err_reporting' into for-linus

Signed-off-by: Vinod Koul <vinod.koul@intel.com>

Conflicts:
	drivers/dma/cppi41.c
This commit is contained in:
Vinod Koul 2016-10-03 09:17:33 +05:30
commit 11bfedff55
41 changed files with 597 additions and 286 deletions

View File

@ -282,6 +282,17 @@ supported.
that is supposed to push the current that is supposed to push the current
transaction descriptor to a pending queue, waiting transaction descriptor to a pending queue, waiting
for issue_pending to be called. for issue_pending to be called.
- In this structure the function pointer callback_result can be
initialized in order for the submitter to be notified that a
transaction has completed. In the earlier code the function pointer
callback has been used. However it does not provide any status to the
transaction and will be deprecated. The result structure defined as
dmaengine_result that is passed in to callback_result has two fields:
+ result: This provides the transfer result defined by
dmaengine_tx_result. Either success or some error
condition.
+ residue: Provides the residue bytes of the transfer for those that
support residue.
* device_issue_pending * device_issue_pending
- Takes the first transaction descriptor in the pending queue, - Takes the first transaction descriptor in the pending queue,

View File

@ -473,15 +473,11 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* for cyclic transfers, /* for cyclic transfers,
* no need to replay callback function while stopping */ * no need to replay callback function while stopping */
if (!atc_chan_is_cyclic(atchan)) { if (!atc_chan_is_cyclic(atchan)) {
dma_async_tx_callback callback = txd->callback;
void *param = txd->callback_param;
/* /*
* The API requires that no submissions are done from a * The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here * callback, so we don't need to drop the lock here
*/ */
if (callback) dmaengine_desc_get_callback_invoke(txd, NULL);
callback(param);
} }
dma_run_dependencies(txd); dma_run_dependencies(txd);
@ -598,15 +594,12 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
{ {
struct at_desc *first = atc_first_active(atchan); struct at_desc *first = atc_first_active(atchan);
struct dma_async_tx_descriptor *txd = &first->txd; struct dma_async_tx_descriptor *txd = &first->txd;
dma_async_tx_callback callback = txd->callback;
void *param = txd->callback_param;
dev_vdbg(chan2dev(&atchan->chan_common), dev_vdbg(chan2dev(&atchan->chan_common),
"new cyclic period llp 0x%08x\n", "new cyclic period llp 0x%08x\n",
channel_readl(atchan, DSCR)); channel_readl(atchan, DSCR));
if (callback) dmaengine_desc_get_callback_invoke(txd, NULL);
callback(param);
} }
/*-- IRQ & Tasklet ---------------------------------------------------*/ /*-- IRQ & Tasklet ---------------------------------------------------*/

View File

@ -1572,8 +1572,8 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
txd = &desc->tx_dma_desc; txd = &desc->tx_dma_desc;
if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT)) if (txd->flags & DMA_PREP_INTERRUPT)
txd->callback(txd->callback_param); dmaengine_desc_get_callback_invoke(txd, NULL);
} }
static void at_xdmac_tasklet(unsigned long data) static void at_xdmac_tasklet(unsigned long data)
@ -1616,8 +1616,8 @@ static void at_xdmac_tasklet(unsigned long data)
if (!at_xdmac_chan_is_cyclic(atchan)) { if (!at_xdmac_chan_is_cyclic(atchan)) {
dma_cookie_complete(txd); dma_cookie_complete(txd);
if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT)) if (txd->flags & DMA_PREP_INTERRUPT)
txd->callback(txd->callback_param); dmaengine_desc_get_callback_invoke(txd, NULL);
} }
dma_run_dependencies(txd); dma_run_dependencies(txd);

View File

@ -1875,8 +1875,7 @@ static void dma_tasklet(unsigned long data)
struct coh901318_chan *cohc = (struct coh901318_chan *) data; struct coh901318_chan *cohc = (struct coh901318_chan *) data;
struct coh901318_desc *cohd_fin; struct coh901318_desc *cohd_fin;
unsigned long flags; unsigned long flags;
dma_async_tx_callback callback; struct dmaengine_desc_callback cb;
void *callback_param;
dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d" dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
" nbr_active_done %ld\n", __func__, " nbr_active_done %ld\n", __func__,
@ -1891,8 +1890,7 @@ static void dma_tasklet(unsigned long data)
goto err; goto err;
/* locate callback to client */ /* locate callback to client */
callback = cohd_fin->desc.callback; dmaengine_desc_get_callback(&cohd_fin->desc, &cb);
callback_param = cohd_fin->desc.callback_param;
/* sign this job as completed on the channel */ /* sign this job as completed on the channel */
dma_cookie_complete(&cohd_fin->desc); dma_cookie_complete(&cohd_fin->desc);
@ -1907,8 +1905,7 @@ static void dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&cohc->lock, flags); spin_unlock_irqrestore(&cohc->lock, flags);
/* Call the callback when we're done */ /* Call the callback when we're done */
if (callback) dmaengine_desc_callback_invoke(&cb, NULL);
callback(callback_param);
spin_lock_irqsave(&cohc->lock, flags); spin_lock_irqsave(&cohc->lock, flags);

View File

@ -336,7 +336,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
c->residue = pd_trans_len(c->desc->pd6) - len; c->residue = pd_trans_len(c->desc->pd6) - len;
dma_cookie_complete(&c->txd); dma_cookie_complete(&c->txd);
c->txd.callback(c->txd.callback_param); dmaengine_desc_get_callback_invoke(&c->txd, NULL);
/* Paired with cppi41_dma_issue_pending */ /* Paired with cppi41_dma_issue_pending */
pm_runtime_mark_last_busy(cdd->ddev.dev); pm_runtime_mark_last_busy(cdd->ddev.dev);

View File

@ -86,4 +86,88 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
state->residue = residue; state->residue = residue;
} }
struct dmaengine_desc_callback {
dma_async_tx_callback callback;
dma_async_tx_callback_result callback_result;
void *callback_param;
};
/**
* dmaengine_desc_get_callback - get the passed in callback function
* @tx: tx descriptor
* @cb: temp struct to hold the callback info
*
* Fill the passed in cb struct with what's available in the passed in
* tx descriptor struct
* No locking is required.
*/
static inline void
dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
struct dmaengine_desc_callback *cb)
{
cb->callback = tx->callback;
cb->callback_result = tx->callback_result;
cb->callback_param = tx->callback_param;
}
/**
* dmaengine_desc_callback_invoke - call the callback function in cb struct
* @cb: temp struct that is holding the callback info
* @result: transaction result
*
* Call the callback function provided in the cb struct with the parameter
* in the cb struct.
* Locking is dependent on the driver.
*/
static inline void
dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
const struct dmaengine_result *result)
{
struct dmaengine_result dummy_result = {
.result = DMA_TRANS_NOERROR,
.residue = 0
};
if (cb->callback_result) {
if (!result)
result = &dummy_result;
cb->callback_result(cb->callback_param, result);
} else if (cb->callback) {
cb->callback(cb->callback_param);
}
}
/**
* dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
* then immediately call the callback.
* @tx: dma async tx descriptor
* @result: transaction result
*
* Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
* in a single function since no work is necessary in between for the driver.
* Locking is dependent on the driver.
*/
static inline void
dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
const struct dmaengine_result *result)
{
struct dmaengine_desc_callback cb;
dmaengine_desc_get_callback(tx, &cb);
dmaengine_desc_callback_invoke(&cb, result);
}
/**
* dmaengine_desc_callback_valid - verify the callback is valid in cb
* @cb: callback info struct
*
* Return a bool that verifies whether callback in cb is valid or not.
* No locking is required.
*/
static inline bool
dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
{
return (cb->callback) ? true : false;
}
#endif #endif

View File

@ -270,20 +270,19 @@ static void
dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
bool callback_required) bool callback_required)
{ {
dma_async_tx_callback callback = NULL;
void *param = NULL;
struct dma_async_tx_descriptor *txd = &desc->txd; struct dma_async_tx_descriptor *txd = &desc->txd;
struct dw_desc *child; struct dw_desc *child;
unsigned long flags; unsigned long flags;
struct dmaengine_desc_callback cb;
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
spin_lock_irqsave(&dwc->lock, flags); spin_lock_irqsave(&dwc->lock, flags);
dma_cookie_complete(txd); dma_cookie_complete(txd);
if (callback_required) { if (callback_required)
callback = txd->callback; dmaengine_desc_get_callback(txd, &cb);
param = txd->callback_param; else
} memset(&cb, 0, sizeof(cb));
/* async_tx_ack */ /* async_tx_ack */
list_for_each_entry(child, &desc->tx_list, desc_node) list_for_each_entry(child, &desc->tx_list, desc_node)
@ -292,8 +291,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
dwc_desc_put(dwc, desc); dwc_desc_put(dwc, desc);
spin_unlock_irqrestore(&dwc->lock, flags); spin_unlock_irqrestore(&dwc->lock, flags);
if (callback) dmaengine_desc_callback_invoke(&cb, NULL);
callback(param);
} }
static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)

View File

@ -737,10 +737,10 @@ static void ep93xx_dma_tasklet(unsigned long data)
{ {
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
struct ep93xx_dma_desc *desc, *d; struct ep93xx_dma_desc *desc, *d;
dma_async_tx_callback callback = NULL; struct dmaengine_desc_callback cb;
void *callback_param = NULL;
LIST_HEAD(list); LIST_HEAD(list);
memset(&cb, 0, sizeof(cb));
spin_lock_irq(&edmac->lock); spin_lock_irq(&edmac->lock);
/* /*
* If dma_terminate_all() was called before we get to run, the active * If dma_terminate_all() was called before we get to run, the active
@ -755,8 +755,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
dma_cookie_complete(&desc->txd); dma_cookie_complete(&desc->txd);
list_splice_init(&edmac->active, &list); list_splice_init(&edmac->active, &list);
} }
callback = desc->txd.callback; dmaengine_desc_get_callback(&desc->txd, &cb);
callback_param = desc->txd.callback_param;
} }
spin_unlock_irq(&edmac->lock); spin_unlock_irq(&edmac->lock);
@ -769,8 +768,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
ep93xx_dma_desc_put(edmac, desc); ep93xx_dma_desc_put(edmac, desc);
} }
if (callback) dmaengine_desc_callback_invoke(&cb, NULL);
callback(callback_param);
} }
static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)

View File

@ -134,16 +134,8 @@ static void fsl_re_issue_pending(struct dma_chan *chan)
static void fsl_re_desc_done(struct fsl_re_desc *desc) static void fsl_re_desc_done(struct fsl_re_desc *desc)
{ {
dma_async_tx_callback callback;
void *callback_param;
dma_cookie_complete(&desc->async_tx); dma_cookie_complete(&desc->async_tx);
dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
callback = desc->async_tx.callback;
callback_param = desc->async_tx.callback_param;
if (callback)
callback(callback_param);
dma_descriptor_unmap(&desc->async_tx); dma_descriptor_unmap(&desc->async_tx);
} }

View File

@ -517,11 +517,7 @@ static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
ret = txd->cookie; ret = txd->cookie;
/* Run the link descriptor callback function */ /* Run the link descriptor callback function */
if (txd->callback) { dmaengine_desc_get_callback_invoke(txd, NULL);
chan_dbg(chan, "LD %p callback\n", desc);
txd->callback(txd->callback_param);
}
dma_descriptor_unmap(txd); dma_descriptor_unmap(txd);
} }

View File

@ -663,9 +663,7 @@ static void imxdma_tasklet(unsigned long data)
out: out:
spin_unlock_irqrestore(&imxdma->lock, flags); spin_unlock_irqrestore(&imxdma->lock, flags);
if (desc->desc.callback) dmaengine_desc_get_callback_invoke(&desc->desc, NULL);
desc->desc.callback(desc->desc.callback_param);
} }
static int imxdma_terminate_all(struct dma_chan *chan) static int imxdma_terminate_all(struct dma_chan *chan)

View File

@ -650,8 +650,7 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
static void sdma_handle_channel_loop(struct sdma_channel *sdmac) static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
{ {
if (sdmac->desc.callback) dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
sdmac->desc.callback(sdmac->desc.callback_param);
} }
static void sdma_update_channel_loop(struct sdma_channel *sdmac) static void sdma_update_channel_loop(struct sdma_channel *sdmac)
@ -701,8 +700,8 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
sdmac->status = DMA_COMPLETE; sdmac->status = DMA_COMPLETE;
dma_cookie_complete(&sdmac->desc); dma_cookie_complete(&sdmac->desc);
if (sdmac->desc.callback)
sdmac->desc.callback(sdmac->desc.callback_param); dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
} }
static void sdma_tasklet(unsigned long data) static void sdma_tasklet(unsigned long data)

View File

@ -38,8 +38,54 @@
#include "../dmaengine.h" #include "../dmaengine.h"
static char *chanerr_str[] = {
"DMA Transfer Destination Address Error",
"Next Descriptor Address Error",
"Descriptor Error",
"Chan Address Value Error",
"CHANCMD Error",
"Chipset Uncorrectable Data Integrity Error",
"DMA Uncorrectable Data Integrity Error",
"Read Data Error",
"Write Data Error",
"Descriptor Control Error",
"Descriptor Transfer Size Error",
"Completion Address Error",
"Interrupt Configuration Error",
"Super extended descriptor Address Error",
"Unaffiliated Error",
"CRC or XOR P Error",
"XOR Q Error",
"Descriptor Count Error",
"DIF All F detect Error",
"Guard Tag verification Error",
"Application Tag verification Error",
"Reference Tag verification Error",
"Bundle Bit Error",
"Result DIF All F detect Error",
"Result Guard Tag verification Error",
"Result Application Tag verification Error",
"Result Reference Tag verification Error",
NULL
};
static void ioat_eh(struct ioatdma_chan *ioat_chan); static void ioat_eh(struct ioatdma_chan *ioat_chan);
static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
{
int i;
for (i = 0; i < 32; i++) {
if ((chanerr >> i) & 1) {
if (chanerr_str[i]) {
dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
i, chanerr_str[i]);
} else
break;
}
}
}
/** /**
* ioat_dma_do_interrupt - handler used for single vector interrupt mode * ioat_dma_do_interrupt - handler used for single vector interrupt mode
* @irq: interrupt id * @irq: interrupt id
@ -568,12 +614,14 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
tx = &desc->txd; tx = &desc->txd;
if (tx->cookie) { if (tx->cookie) {
struct dmaengine_result res;
dma_cookie_complete(tx); dma_cookie_complete(tx);
dma_descriptor_unmap(tx); dma_descriptor_unmap(tx);
if (tx->callback) { res.result = DMA_TRANS_NOERROR;
tx->callback(tx->callback_param); dmaengine_desc_get_callback_invoke(tx, NULL);
tx->callback = NULL; tx->callback = NULL;
} tx->callback_result = NULL;
} }
if (tx->phys == phys_complete) if (tx->phys == phys_complete)
@ -622,7 +670,8 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
if (is_ioat_halted(*ioat_chan->completion)) { if (is_ioat_halted(*ioat_chan->completion)) {
u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
if (chanerr & IOAT_CHANERR_HANDLE_MASK) { if (chanerr &
(IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
ioat_eh(ioat_chan); ioat_eh(ioat_chan);
} }
@ -652,6 +701,61 @@ static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
__ioat_restart_chan(ioat_chan); __ioat_restart_chan(ioat_chan);
} }
static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
{
struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
struct ioat_ring_ent *desc;
u16 active;
int idx = ioat_chan->tail, i;
/*
* We assume that the failed descriptor has been processed.
* Now we are just returning all the remaining submitted
* descriptors to abort.
*/
active = ioat_ring_active(ioat_chan);
/* we skip the failed descriptor that tail points to */
for (i = 1; i < active; i++) {
struct dma_async_tx_descriptor *tx;
smp_read_barrier_depends();
prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
desc = ioat_get_ring_ent(ioat_chan, idx + i);
tx = &desc->txd;
if (tx->cookie) {
struct dmaengine_result res;
dma_cookie_complete(tx);
dma_descriptor_unmap(tx);
res.result = DMA_TRANS_ABORTED;
dmaengine_desc_get_callback_invoke(tx, &res);
tx->callback = NULL;
tx->callback_result = NULL;
}
/* skip extended descriptors */
if (desc_has_ext(desc)) {
WARN_ON(i + 1 >= active);
i++;
}
/* cleanup super extended descriptors */
if (desc->sed) {
ioat_free_sed(ioat_dma, desc->sed);
desc->sed = NULL;
}
}
smp_mb(); /* finish all descriptor reads before incrementing tail */
ioat_chan->tail = idx + active;
desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
}
static void ioat_eh(struct ioatdma_chan *ioat_chan) static void ioat_eh(struct ioatdma_chan *ioat_chan)
{ {
struct pci_dev *pdev = to_pdev(ioat_chan); struct pci_dev *pdev = to_pdev(ioat_chan);
@ -662,6 +766,8 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan)
u32 err_handled = 0; u32 err_handled = 0;
u32 chanerr_int; u32 chanerr_int;
u32 chanerr; u32 chanerr;
bool abort = false;
struct dmaengine_result res;
/* cleanup so tail points to descriptor that caused the error */ /* cleanup so tail points to descriptor that caused the error */
if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
@ -697,30 +803,55 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan)
break; break;
} }
if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
res.result = DMA_TRANS_READ_FAILED;
err_handled |= IOAT_CHANERR_READ_DATA_ERR;
} else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
res.result = DMA_TRANS_WRITE_FAILED;
err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
}
abort = true;
} else
res.result = DMA_TRANS_NOERROR;
/* fault on unhandled error or spurious halt */ /* fault on unhandled error or spurious halt */
if (chanerr ^ err_handled || chanerr == 0) { if (chanerr ^ err_handled || chanerr == 0) {
dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n", dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
__func__, chanerr, err_handled); __func__, chanerr, err_handled);
dev_err(to_dev(ioat_chan), "Errors handled:\n");
ioat_print_chanerrs(ioat_chan, err_handled);
dev_err(to_dev(ioat_chan), "Errors not handled:\n");
ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
BUG(); BUG();
} else { /* cleanup the faulty descriptor */
tx = &desc->txd;
if (tx->cookie) {
dma_cookie_complete(tx);
dma_descriptor_unmap(tx);
if (tx->callback) {
tx->callback(tx->callback_param);
tx->callback = NULL;
}
}
} }
writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); /* cleanup the faulty descriptor since we are continuing */
pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); tx = &desc->txd;
if (tx->cookie) {
dma_cookie_complete(tx);
dma_descriptor_unmap(tx);
dmaengine_desc_get_callback_invoke(tx, &res);
tx->callback = NULL;
tx->callback_result = NULL;
}
/* mark faulting descriptor as complete */ /* mark faulting descriptor as complete */
*ioat_chan->completion = desc->txd.phys; *ioat_chan->completion = desc->txd.phys;
spin_lock_bh(&ioat_chan->prep_lock); spin_lock_bh(&ioat_chan->prep_lock);
/* we need abort all descriptors */
if (abort) {
ioat_abort_descs(ioat_chan);
/* clean up the channel, we could be in weird state */
ioat_reset_hw(ioat_chan);
}
writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
ioat_restart_channel(ioat_chan); ioat_restart_channel(ioat_chan);
spin_unlock_bh(&ioat_chan->prep_lock); spin_unlock_bh(&ioat_chan->prep_lock);
} }
@ -753,10 +884,28 @@ void ioat_timer_event(unsigned long data)
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n", dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
__func__, chanerr); __func__, chanerr);
if (test_bit(IOAT_RUN, &ioat_chan->state)) dev_err(to_dev(ioat_chan), "Errors:\n");
BUG_ON(is_ioat_bug(chanerr)); ioat_print_chanerrs(ioat_chan, chanerr);
else /* we never got off the ground */
return; if (test_bit(IOAT_RUN, &ioat_chan->state)) {
spin_lock_bh(&ioat_chan->cleanup_lock);
spin_lock_bh(&ioat_chan->prep_lock);
set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
spin_unlock_bh(&ioat_chan->prep_lock);
ioat_abort_descs(ioat_chan);
dev_warn(to_dev(ioat_chan), "Reset channel...\n");
ioat_reset_hw(ioat_chan);
dev_warn(to_dev(ioat_chan), "Restart channel...\n");
ioat_restart_channel(ioat_chan);
spin_lock_bh(&ioat_chan->prep_lock);
clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
spin_unlock_bh(&ioat_chan->prep_lock);
spin_unlock_bh(&ioat_chan->cleanup_lock);
}
return;
} }
spin_lock_bh(&ioat_chan->cleanup_lock); spin_lock_bh(&ioat_chan->cleanup_lock);
@ -780,14 +929,26 @@ void ioat_timer_event(unsigned long data)
u32 chanerr; u32 chanerr;
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
dev_warn(to_dev(ioat_chan), "Restarting channel...\n"); dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n", status, chanerr);
status, chanerr); dev_err(to_dev(ioat_chan), "Errors:\n");
dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n", ioat_print_chanerrs(ioat_chan, chanerr);
ioat_ring_active(ioat_chan));
dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
ioat_ring_active(ioat_chan));
spin_lock_bh(&ioat_chan->prep_lock); spin_lock_bh(&ioat_chan->prep_lock);
set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
spin_unlock_bh(&ioat_chan->prep_lock);
ioat_abort_descs(ioat_chan);
dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
ioat_reset_hw(ioat_chan);
dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
ioat_restart_channel(ioat_chan); ioat_restart_channel(ioat_chan);
spin_lock_bh(&ioat_chan->prep_lock);
clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
spin_unlock_bh(&ioat_chan->prep_lock); spin_unlock_bh(&ioat_chan->prep_lock);
spin_unlock_bh(&ioat_chan->cleanup_lock); spin_unlock_bh(&ioat_chan->cleanup_lock);
return; return;

View File

@ -240,6 +240,8 @@
#define IOAT_CHANERR_DESCRIPTOR_COUNT_ERR 0x40000 #define IOAT_CHANERR_DESCRIPTOR_COUNT_ERR 0x40000
#define IOAT_CHANERR_HANDLE_MASK (IOAT_CHANERR_XOR_P_OR_CRC_ERR | IOAT_CHANERR_XOR_Q_ERR) #define IOAT_CHANERR_HANDLE_MASK (IOAT_CHANERR_XOR_P_OR_CRC_ERR | IOAT_CHANERR_XOR_Q_ERR)
#define IOAT_CHANERR_RECOVER_MASK (IOAT_CHANERR_READ_DATA_ERR | \
IOAT_CHANERR_WRITE_DATA_ERR)
#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ #define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */

View File

@ -71,8 +71,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
/* call the callback (must not sleep or submit new /* call the callback (must not sleep or submit new
* operations to this channel) * operations to this channel)
*/ */
if (tx->callback) dmaengine_desc_get_callback_invoke(tx, NULL);
tx->callback(tx->callback_param);
dma_descriptor_unmap(tx); dma_descriptor_unmap(tx);
if (desc->group_head) if (desc->group_head)

View File

@ -1160,11 +1160,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
struct scatterlist **sg, *sgnext, *sgnew = NULL; struct scatterlist **sg, *sgnext, *sgnew = NULL;
/* Next transfer descriptor */ /* Next transfer descriptor */
struct idmac_tx_desc *desc, *descnew; struct idmac_tx_desc *desc, *descnew;
dma_async_tx_callback callback;
void *callback_param;
bool done = false; bool done = false;
u32 ready0, ready1, curbuf, err; u32 ready0, ready1, curbuf, err;
unsigned long flags; unsigned long flags;
struct dmaengine_desc_callback cb;
/* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */ /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
@ -1278,12 +1277,12 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
if (likely(sgnew) && if (likely(sgnew) &&
ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
callback = descnew->txd.callback; dmaengine_desc_get_callback(&descnew->txd, &cb);
callback_param = descnew->txd.callback_param;
list_del_init(&descnew->list); list_del_init(&descnew->list);
spin_unlock(&ichan->lock); spin_unlock(&ichan->lock);
if (callback)
callback(callback_param); dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock(&ichan->lock); spin_lock(&ichan->lock);
} }
@ -1292,13 +1291,12 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
if (done) if (done)
dma_cookie_complete(&desc->txd); dma_cookie_complete(&desc->txd);
callback = desc->txd.callback; dmaengine_desc_get_callback(&desc->txd, &cb);
callback_param = desc->txd.callback_param;
spin_unlock(&ichan->lock); spin_unlock(&ichan->lock);
if (done && (desc->txd.flags & DMA_PREP_INTERRUPT) && callback) if (done && (desc->txd.flags & DMA_PREP_INTERRUPT))
callback(callback_param); dmaengine_desc_callback_invoke(&cb, NULL);
return IRQ_HANDLED; return IRQ_HANDLED;
} }

View File

@ -104,10 +104,8 @@ static void mic_dma_cleanup(struct mic_dma_chan *ch)
tx = &ch->tx_array[last_tail]; tx = &ch->tx_array[last_tail];
if (tx->cookie) { if (tx->cookie) {
dma_cookie_complete(tx); dma_cookie_complete(tx);
if (tx->callback) { dmaengine_desc_get_callback_invoke(tx, NULL);
tx->callback(tx->callback_param); tx->callback = NULL;
tx->callback = NULL;
}
} }
last_tail = mic_dma_hw_ring_inc(last_tail); last_tail = mic_dma_hw_ring_inc(last_tail);
} }

View File

@ -864,19 +864,15 @@ static void dma_do_tasklet(unsigned long data)
struct mmp_pdma_desc_sw *desc, *_desc; struct mmp_pdma_desc_sw *desc, *_desc;
LIST_HEAD(chain_cleanup); LIST_HEAD(chain_cleanup);
unsigned long flags; unsigned long flags;
struct dmaengine_desc_callback cb;
if (chan->cyclic_first) { if (chan->cyclic_first) {
dma_async_tx_callback cb = NULL;
void *cb_data = NULL;
spin_lock_irqsave(&chan->desc_lock, flags); spin_lock_irqsave(&chan->desc_lock, flags);
desc = chan->cyclic_first; desc = chan->cyclic_first;
cb = desc->async_tx.callback; dmaengine_desc_get_callback(&desc->async_tx, &cb);
cb_data = desc->async_tx.callback_param;
spin_unlock_irqrestore(&chan->desc_lock, flags); spin_unlock_irqrestore(&chan->desc_lock, flags);
if (cb) dmaengine_desc_callback_invoke(&cb, NULL);
cb(cb_data);
return; return;
} }
@ -921,8 +917,8 @@ static void dma_do_tasklet(unsigned long data)
/* Remove from the list of transactions */ /* Remove from the list of transactions */
list_del(&desc->node); list_del(&desc->node);
/* Run the link descriptor callback function */ /* Run the link descriptor callback function */
if (txd->callback) dmaengine_desc_get_callback(txd, &cb);
txd->callback(txd->callback_param); dmaengine_desc_callback_invoke(&cb, NULL);
dma_pool_free(chan->desc_pool, desc, txd->phys); dma_pool_free(chan->desc_pool, desc, txd->phys);
} }

View File

@ -349,9 +349,7 @@ static void dma_do_tasklet(unsigned long data)
{ {
struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data; struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data;
if (tdmac->desc.callback) dmaengine_desc_get_callback_invoke(&tdmac->desc, NULL);
tdmac->desc.callback(tdmac->desc.callback_param);
} }
static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)

View File

@ -411,8 +411,7 @@ static void mpc_dma_process_completed(struct mpc_dma *mdma)
list_for_each_entry(mdesc, &list, node) { list_for_each_entry(mdesc, &list, node) {
desc = &mdesc->desc; desc = &mdesc->desc;
if (desc->callback) dmaengine_desc_get_callback_invoke(desc, NULL);
desc->callback(desc->callback_param);
last_cookie = desc->cookie; last_cookie = desc->cookie;
dma_run_dependencies(desc); dma_run_dependencies(desc);

View File

@ -209,10 +209,7 @@ mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
/* call the callback (must not sleep or submit new /* call the callback (must not sleep or submit new
* operations to this channel) * operations to this channel)
*/ */
if (desc->async_tx.callback) dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
desc->async_tx.callback(
desc->async_tx.callback_param);
dma_descriptor_unmap(&desc->async_tx); dma_descriptor_unmap(&desc->async_tx);
} }

View File

@ -326,8 +326,7 @@ static void mxs_dma_tasklet(unsigned long data)
{ {
struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data; struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
if (mxs_chan->desc.callback) dmaengine_desc_get_callback_invoke(&mxs_chan->desc, NULL);
mxs_chan->desc.callback(mxs_chan->desc.callback_param);
} }
static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq) static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)

View File

@ -1102,8 +1102,7 @@ static void nbpf_chan_tasklet(unsigned long data)
{ {
struct nbpf_channel *chan = (struct nbpf_channel *)data; struct nbpf_channel *chan = (struct nbpf_channel *)data;
struct nbpf_desc *desc, *tmp; struct nbpf_desc *desc, *tmp;
dma_async_tx_callback callback; struct dmaengine_desc_callback cb;
void *param;
while (!list_empty(&chan->done)) { while (!list_empty(&chan->done)) {
bool found = false, must_put, recycling = false; bool found = false, must_put, recycling = false;
@ -1151,14 +1150,12 @@ static void nbpf_chan_tasklet(unsigned long data)
must_put = false; must_put = false;
} }
callback = desc->async_tx.callback; dmaengine_desc_get_callback(&desc->async_tx, &cb);
param = desc->async_tx.callback_param;
/* ack and callback completed descriptor */ /* ack and callback completed descriptor */
spin_unlock_irq(&chan->lock); spin_unlock_irq(&chan->lock);
if (callback) dmaengine_desc_callback_invoke(&cb, NULL);
callback(param);
if (must_put) if (must_put)
nbpf_desc_put(desc); nbpf_desc_put(desc);

View File

@ -357,14 +357,13 @@ static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
struct pch_dma_desc *desc) struct pch_dma_desc *desc)
{ {
struct dma_async_tx_descriptor *txd = &desc->txd; struct dma_async_tx_descriptor *txd = &desc->txd;
dma_async_tx_callback callback = txd->callback; struct dmaengine_desc_callback cb;
void *param = txd->callback_param;
dmaengine_desc_get_callback(txd, &cb);
list_splice_init(&desc->tx_list, &pd_chan->free_list); list_splice_init(&desc->tx_list, &pd_chan->free_list);
list_move(&desc->desc_node, &pd_chan->free_list); list_move(&desc->desc_node, &pd_chan->free_list);
if (callback) dmaengine_desc_callback_invoke(&cb, NULL);
callback(param);
} }
static void pdc_complete_all(struct pch_dma_chan *pd_chan) static void pdc_complete_all(struct pch_dma_chan *pd_chan)

View File

@ -2039,14 +2039,12 @@ static void pl330_tasklet(unsigned long data)
} }
while (!list_empty(&pch->completed_list)) { while (!list_empty(&pch->completed_list)) {
dma_async_tx_callback callback; struct dmaengine_desc_callback cb;
void *callback_param;
desc = list_first_entry(&pch->completed_list, desc = list_first_entry(&pch->completed_list,
struct dma_pl330_desc, node); struct dma_pl330_desc, node);
callback = desc->txd.callback; dmaengine_desc_get_callback(&desc->txd, &cb);
callback_param = desc->txd.callback_param;
if (pch->cyclic) { if (pch->cyclic) {
desc->status = PREP; desc->status = PREP;
@ -2064,9 +2062,9 @@ static void pl330_tasklet(unsigned long data)
dma_descriptor_unmap(&desc->txd); dma_descriptor_unmap(&desc->txd);
if (callback) { if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock_irqrestore(&pch->lock, flags); spin_unlock_irqrestore(&pch->lock, flags);
callback(callback_param); dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&pch->lock, flags); spin_lock_irqsave(&pch->lock, flags);
} }
} }

View File

@ -1485,10 +1485,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
/* call the callback (must not sleep or submit new /* call the callback (must not sleep or submit new
* operations to this channel) * operations to this channel)
*/ */
if (desc->async_tx.callback) dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
desc->async_tx.callback(
desc->async_tx.callback_param);
dma_descriptor_unmap(&desc->async_tx); dma_descriptor_unmap(&desc->async_tx);
} }

View File

@ -111,6 +111,7 @@ static void hidma_process_completed(struct hidma_chan *mchan)
struct dma_async_tx_descriptor *desc; struct dma_async_tx_descriptor *desc;
dma_cookie_t last_cookie; dma_cookie_t last_cookie;
struct hidma_desc *mdesc; struct hidma_desc *mdesc;
struct hidma_desc *next;
unsigned long irqflags; unsigned long irqflags;
struct list_head list; struct list_head list;
@ -122,28 +123,36 @@ static void hidma_process_completed(struct hidma_chan *mchan)
spin_unlock_irqrestore(&mchan->lock, irqflags); spin_unlock_irqrestore(&mchan->lock, irqflags);
/* Execute callbacks and run dependencies */ /* Execute callbacks and run dependencies */
list_for_each_entry(mdesc, &list, node) { list_for_each_entry_safe(mdesc, next, &list, node) {
enum dma_status llstat; enum dma_status llstat;
struct dmaengine_desc_callback cb;
struct dmaengine_result result;
desc = &mdesc->desc; desc = &mdesc->desc;
last_cookie = desc->cookie;
spin_lock_irqsave(&mchan->lock, irqflags); spin_lock_irqsave(&mchan->lock, irqflags);
dma_cookie_complete(desc); dma_cookie_complete(desc);
spin_unlock_irqrestore(&mchan->lock, irqflags); spin_unlock_irqrestore(&mchan->lock, irqflags);
llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
if (desc->callback && (llstat == DMA_COMPLETE)) dmaengine_desc_get_callback(desc, &cb);
desc->callback(desc->callback_param);
last_cookie = desc->cookie;
dma_run_dependencies(desc); dma_run_dependencies(desc);
spin_lock_irqsave(&mchan->lock, irqflags);
list_move(&mdesc->node, &mchan->free);
if (llstat == DMA_COMPLETE) {
mchan->last_success = last_cookie;
result.result = DMA_TRANS_NOERROR;
} else
result.result = DMA_TRANS_ABORTED;
spin_unlock_irqrestore(&mchan->lock, irqflags);
dmaengine_desc_callback_invoke(&cb, &result);
} }
/* Free descriptors */
spin_lock_irqsave(&mchan->lock, irqflags);
list_splice_tail_init(&list, &mchan->free);
spin_unlock_irqrestore(&mchan->lock, irqflags);
} }
/* /*
@ -238,6 +247,19 @@ static void hidma_issue_pending(struct dma_chan *dmach)
hidma_ll_start(dmadev->lldev); hidma_ll_start(dmadev->lldev);
} }
static inline bool hidma_txn_is_success(dma_cookie_t cookie,
dma_cookie_t last_success, dma_cookie_t last_used)
{
if (last_success <= last_used) {
if ((cookie <= last_success) || (cookie > last_used))
return true;
} else {
if ((cookie <= last_success) && (cookie > last_used))
return true;
}
return false;
}
static enum dma_status hidma_tx_status(struct dma_chan *dmach, static enum dma_status hidma_tx_status(struct dma_chan *dmach,
dma_cookie_t cookie, dma_cookie_t cookie,
struct dma_tx_state *txstate) struct dma_tx_state *txstate)
@ -246,8 +268,13 @@ static enum dma_status hidma_tx_status(struct dma_chan *dmach,
enum dma_status ret; enum dma_status ret;
ret = dma_cookie_status(dmach, cookie, txstate); ret = dma_cookie_status(dmach, cookie, txstate);
if (ret == DMA_COMPLETE) if (ret == DMA_COMPLETE) {
return ret; bool is_success;
is_success = hidma_txn_is_success(cookie, mchan->last_success,
dmach->cookie);
return is_success ? ret : DMA_ERROR;
}
if (mchan->paused && (ret == DMA_IN_PROGRESS)) { if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
unsigned long flags; unsigned long flags;
@ -398,6 +425,7 @@ static int hidma_terminate_channel(struct dma_chan *chan)
hidma_process_completed(mchan); hidma_process_completed(mchan);
spin_lock_irqsave(&mchan->lock, irqflags); spin_lock_irqsave(&mchan->lock, irqflags);
mchan->last_success = 0;
list_splice_init(&mchan->active, &list); list_splice_init(&mchan->active, &list);
list_splice_init(&mchan->prepared, &list); list_splice_init(&mchan->prepared, &list);
list_splice_init(&mchan->completed, &list); list_splice_init(&mchan->completed, &list);
@ -413,14 +441,9 @@ static int hidma_terminate_channel(struct dma_chan *chan)
/* return all user requests */ /* return all user requests */
list_for_each_entry_safe(mdesc, tmp, &list, node) { list_for_each_entry_safe(mdesc, tmp, &list, node) {
struct dma_async_tx_descriptor *txd = &mdesc->desc; struct dma_async_tx_descriptor *txd = &mdesc->desc;
dma_async_tx_callback callback = mdesc->desc.callback;
void *param = mdesc->desc.callback_param;
dma_descriptor_unmap(txd); dma_descriptor_unmap(txd);
dmaengine_desc_get_callback_invoke(txd, NULL);
if (callback)
callback(param);
dma_run_dependencies(txd); dma_run_dependencies(txd);
/* move myself to free_list */ /* move myself to free_list */

View File

@ -72,7 +72,6 @@ struct hidma_lldev {
u32 tre_write_offset; /* TRE write location */ u32 tre_write_offset; /* TRE write location */
struct tasklet_struct task; /* task delivering notifications */ struct tasklet_struct task; /* task delivering notifications */
struct tasklet_struct rst_task; /* task to reset HW */
DECLARE_KFIFO_PTR(handoff_fifo, DECLARE_KFIFO_PTR(handoff_fifo,
struct hidma_tre *); /* pending TREs FIFO */ struct hidma_tre *); /* pending TREs FIFO */
}; };
@ -89,6 +88,7 @@ struct hidma_chan {
bool allocated; bool allocated;
char dbg_name[16]; char dbg_name[16];
u32 dma_sig; u32 dma_sig;
dma_cookie_t last_success;
/* /*
* active descriptor on this channel * active descriptor on this channel

View File

@ -380,27 +380,6 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
return 0; return 0;
} }
/*
* Abort all transactions and perform a reset.
*/
static void hidma_ll_abort(unsigned long arg)
{
struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
u8 err_code = HIDMA_EVRE_STATUS_ERROR;
u8 err_info = 0xFF;
int rc;
hidma_cleanup_pending_tre(lldev, err_info, err_code);
/* reset the channel for recovery */
rc = hidma_ll_setup(lldev);
if (rc) {
dev_err(lldev->dev, "channel reinitialize failed after error\n");
return;
}
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
}
/* /*
* The interrupt handler for HIDMA will try to consume as many pending * The interrupt handler for HIDMA will try to consume as many pending
* EVRE from the event queue as possible. Each EVRE has an associated * EVRE from the event queue as possible. Each EVRE has an associated
@ -454,13 +433,18 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
while (cause) { while (cause) {
if (cause & HIDMA_ERR_INT_MASK) { if (cause & HIDMA_ERR_INT_MASK) {
dev_err(lldev->dev, "error 0x%x, resetting...\n", dev_err(lldev->dev, "error 0x%x, disabling...\n",
cause); cause);
/* Clear out pending interrupts */ /* Clear out pending interrupts */
writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
tasklet_schedule(&lldev->rst_task); /* No further submissions. */
hidma_ll_disable(lldev);
/* Driver completes the txn and intimates the client.*/
hidma_cleanup_pending_tre(lldev, 0xFF,
HIDMA_EVRE_STATUS_ERROR);
goto out; goto out;
} }
@ -808,7 +792,6 @@ struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
return NULL; return NULL;
spin_lock_init(&lldev->lock); spin_lock_init(&lldev->lock);
tasklet_init(&lldev->rst_task, hidma_ll_abort, (unsigned long)lldev);
tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev); tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
lldev->initialized = 1; lldev->initialized = 1;
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
@ -831,7 +814,6 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres; required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
tasklet_kill(&lldev->task); tasklet_kill(&lldev->task);
tasklet_kill(&lldev->rst_task);
memset(lldev->trepool, 0, required_bytes); memset(lldev->trepool, 0, required_bytes);
lldev->trepool = NULL; lldev->trepool = NULL;
lldev->pending_tre_count = 0; lldev->pending_tre_count = 0;

View File

@ -1389,21 +1389,18 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
{ {
struct rcar_dmac_chan *chan = dev; struct rcar_dmac_chan *chan = dev;
struct rcar_dmac_desc *desc; struct rcar_dmac_desc *desc;
struct dmaengine_desc_callback cb;
spin_lock_irq(&chan->lock); spin_lock_irq(&chan->lock);
/* For cyclic transfers notify the user after every chunk. */ /* For cyclic transfers notify the user after every chunk. */
if (chan->desc.running && chan->desc.running->cyclic) { if (chan->desc.running && chan->desc.running->cyclic) {
dma_async_tx_callback callback;
void *callback_param;
desc = chan->desc.running; desc = chan->desc.running;
callback = desc->async_tx.callback; dmaengine_desc_get_callback(&desc->async_tx, &cb);
callback_param = desc->async_tx.callback_param;
if (callback) { if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock_irq(&chan->lock); spin_unlock_irq(&chan->lock);
callback(callback_param); dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irq(&chan->lock); spin_lock_irq(&chan->lock);
} }
} }
@ -1418,14 +1415,15 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
dma_cookie_complete(&desc->async_tx); dma_cookie_complete(&desc->async_tx);
list_del(&desc->node); list_del(&desc->node);
if (desc->async_tx.callback) { dmaengine_desc_get_callback(&desc->async_tx, &cb);
if (dmaengine_desc_callback_valid(&cb)) {
spin_unlock_irq(&chan->lock); spin_unlock_irq(&chan->lock);
/* /*
* We own the only reference to this descriptor, we can * We own the only reference to this descriptor, we can
* safely dereference it without holding the channel * safely dereference it without holding the channel
* lock. * lock.
*/ */
desc->async_tx.callback(desc->async_tx.callback_param); dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irq(&chan->lock); spin_lock_irq(&chan->lock);
} }

View File

@ -330,10 +330,11 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
bool head_acked = false; bool head_acked = false;
dma_cookie_t cookie = 0; dma_cookie_t cookie = 0;
dma_async_tx_callback callback = NULL; dma_async_tx_callback callback = NULL;
void *param = NULL; struct dmaengine_desc_callback cb;
unsigned long flags; unsigned long flags;
LIST_HEAD(cyclic_list); LIST_HEAD(cyclic_list);
memset(&cb, 0, sizeof(cb));
spin_lock_irqsave(&schan->chan_lock, flags); spin_lock_irqsave(&schan->chan_lock, flags);
list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
struct dma_async_tx_descriptor *tx = &desc->async_tx; struct dma_async_tx_descriptor *tx = &desc->async_tx;
@ -367,8 +368,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
/* Call callback on the last chunk */ /* Call callback on the last chunk */
if (desc->mark == DESC_COMPLETED && tx->callback) { if (desc->mark == DESC_COMPLETED && tx->callback) {
desc->mark = DESC_WAITING; desc->mark = DESC_WAITING;
dmaengine_desc_get_callback(tx, &cb);
callback = tx->callback; callback = tx->callback;
param = tx->callback_param;
dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
tx->cookie, tx, schan->id); tx->cookie, tx, schan->id);
BUG_ON(desc->chunks != 1); BUG_ON(desc->chunks != 1);
@ -430,8 +431,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
spin_unlock_irqrestore(&schan->chan_lock, flags); spin_unlock_irqrestore(&schan->chan_lock, flags);
if (callback) dmaengine_desc_callback_invoke(&cb, NULL);
callback(param);
return callback; return callback;
} }
@ -885,9 +885,9 @@ bool shdma_reset(struct shdma_dev *sdev)
/* Complete all */ /* Complete all */
list_for_each_entry(sdesc, &dl, node) { list_for_each_entry(sdesc, &dl, node) {
struct dma_async_tx_descriptor *tx = &sdesc->async_tx; struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
sdesc->mark = DESC_IDLE; sdesc->mark = DESC_IDLE;
if (tx->callback) dmaengine_desc_get_callback_invoke(tx, NULL);
tx->callback(tx->callback_param);
} }
spin_lock(&schan->chan_lock); spin_lock(&schan->chan_lock);

View File

@ -360,9 +360,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
list_for_each_entry(sdesc, &list, node) { list_for_each_entry(sdesc, &list, node) {
desc = &sdesc->desc; desc = &sdesc->desc;
if (desc->callback) dmaengine_desc_get_callback_invoke(desc, NULL);
desc->callback(desc->callback_param);
last_cookie = desc->cookie; last_cookie = desc->cookie;
dma_run_dependencies(desc); dma_run_dependencies(desc);
} }
@ -388,8 +386,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
desc = &sdesc->desc; desc = &sdesc->desc;
while (happened_cyclic != schan->completed_cyclic) { while (happened_cyclic != schan->completed_cyclic) {
if (desc->callback) dmaengine_desc_get_callback_invoke(desc, NULL);
desc->callback(desc->callback_param);
schan->completed_cyclic++; schan->completed_cyclic++;
} }
} }

View File

@ -1570,8 +1570,7 @@ static void dma_tasklet(unsigned long data)
struct d40_desc *d40d; struct d40_desc *d40d;
unsigned long flags; unsigned long flags;
bool callback_active; bool callback_active;
dma_async_tx_callback callback; struct dmaengine_desc_callback cb;
void *callback_param;
spin_lock_irqsave(&d40c->lock, flags); spin_lock_irqsave(&d40c->lock, flags);
@ -1598,8 +1597,7 @@ static void dma_tasklet(unsigned long data)
/* Callback to client */ /* Callback to client */
callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT); callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
callback = d40d->txd.callback; dmaengine_desc_get_callback(&d40d->txd, &cb);
callback_param = d40d->txd.callback_param;
if (!d40d->cyclic) { if (!d40d->cyclic) {
if (async_tx_test_ack(&d40d->txd)) { if (async_tx_test_ack(&d40d->txd)) {
@ -1620,8 +1618,8 @@ static void dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&d40c->lock, flags); spin_unlock_irqrestore(&d40c->lock, flags);
if (callback_active && callback) if (callback_active)
callback(callback_param); dmaengine_desc_callback_invoke(&cb, NULL);
return; return;

View File

@ -655,8 +655,7 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
static void tegra_dma_tasklet(unsigned long data) static void tegra_dma_tasklet(unsigned long data)
{ {
struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data; struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
dma_async_tx_callback callback = NULL; struct dmaengine_desc_callback cb;
void *callback_param = NULL;
struct tegra_dma_desc *dma_desc; struct tegra_dma_desc *dma_desc;
unsigned long flags; unsigned long flags;
int cb_count; int cb_count;
@ -666,13 +665,12 @@ static void tegra_dma_tasklet(unsigned long data)
dma_desc = list_first_entry(&tdc->cb_desc, dma_desc = list_first_entry(&tdc->cb_desc,
typeof(*dma_desc), cb_node); typeof(*dma_desc), cb_node);
list_del(&dma_desc->cb_node); list_del(&dma_desc->cb_node);
callback = dma_desc->txd.callback; dmaengine_desc_get_callback(&dma_desc->txd, &cb);
callback_param = dma_desc->txd.callback_param;
cb_count = dma_desc->cb_count; cb_count = dma_desc->cb_count;
dma_desc->cb_count = 0; dma_desc->cb_count = 0;
spin_unlock_irqrestore(&tdc->lock, flags); spin_unlock_irqrestore(&tdc->lock, flags);
while (cb_count-- && callback) while (cb_count--)
callback(callback_param); dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&tdc->lock, flags); spin_lock_irqsave(&tdc->lock, flags);
} }
spin_unlock_irqrestore(&tdc->lock, flags); spin_unlock_irqrestore(&tdc->lock, flags);

View File

@ -226,8 +226,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan)
static void __td_finish(struct timb_dma_chan *td_chan) static void __td_finish(struct timb_dma_chan *td_chan)
{ {
dma_async_tx_callback callback; struct dmaengine_desc_callback cb;
void *param;
struct dma_async_tx_descriptor *txd; struct dma_async_tx_descriptor *txd;
struct timb_dma_desc *td_desc; struct timb_dma_desc *td_desc;
@ -252,8 +251,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
dma_cookie_complete(txd); dma_cookie_complete(txd);
td_chan->ongoing = false; td_chan->ongoing = false;
callback = txd->callback; dmaengine_desc_get_callback(txd, &cb);
param = txd->callback_param;
list_move(&td_desc->desc_node, &td_chan->free_list); list_move(&td_desc->desc_node, &td_chan->free_list);
@ -262,8 +260,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
* The API requires that no submissions are done from a * The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here * callback, so we don't need to drop the lock here
*/ */
if (callback) dmaengine_desc_callback_invoke(&cb, NULL);
callback(param);
} }
static u32 __td_ier_mask(struct timb_dma *td) static u32 __td_ier_mask(struct timb_dma *td)

View File

@ -403,16 +403,14 @@ static void
txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
struct txx9dmac_desc *desc) struct txx9dmac_desc *desc)
{ {
dma_async_tx_callback callback; struct dmaengine_desc_callback cb;
void *param;
struct dma_async_tx_descriptor *txd = &desc->txd; struct dma_async_tx_descriptor *txd = &desc->txd;
dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
txd->cookie, desc); txd->cookie, desc);
dma_cookie_complete(txd); dma_cookie_complete(txd);
callback = txd->callback; dmaengine_desc_get_callback(txd, &cb);
param = txd->callback_param;
txx9dmac_sync_desc_for_cpu(dc, desc); txx9dmac_sync_desc_for_cpu(dc, desc);
list_splice_init(&desc->tx_list, &dc->free_list); list_splice_init(&desc->tx_list, &dc->free_list);
@ -423,8 +421,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
* The API requires that no submissions are done from a * The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here * callback, so we don't need to drop the lock here
*/ */
if (callback) dmaengine_desc_callback_invoke(&cb, NULL);
callback(param);
dma_run_dependencies(txd); dma_run_dependencies(txd);
} }

View File

@ -87,8 +87,7 @@ static void vchan_complete(unsigned long arg)
{ {
struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
struct virt_dma_desc *vd; struct virt_dma_desc *vd;
dma_async_tx_callback cb = NULL; struct dmaengine_desc_callback cb;
void *cb_data = NULL;
LIST_HEAD(head); LIST_HEAD(head);
spin_lock_irq(&vc->lock); spin_lock_irq(&vc->lock);
@ -96,18 +95,17 @@ static void vchan_complete(unsigned long arg)
vd = vc->cyclic; vd = vc->cyclic;
if (vd) { if (vd) {
vc->cyclic = NULL; vc->cyclic = NULL;
cb = vd->tx.callback; dmaengine_desc_get_callback(&vd->tx, &cb);
cb_data = vd->tx.callback_param; } else {
memset(&cb, 0, sizeof(cb));
} }
spin_unlock_irq(&vc->lock); spin_unlock_irq(&vc->lock);
if (cb) dmaengine_desc_callback_invoke(&cb, NULL);
cb(cb_data);
while (!list_empty(&head)) { while (!list_empty(&head)) {
vd = list_first_entry(&head, struct virt_dma_desc, node); vd = list_first_entry(&head, struct virt_dma_desc, node);
cb = vd->tx.callback; dmaengine_desc_get_callback(&vd->tx, &cb);
cb_data = vd->tx.callback_param;
list_del(&vd->node); list_del(&vd->node);
if (dmaengine_desc_test_reuse(&vd->tx)) if (dmaengine_desc_test_reuse(&vd->tx))
@ -115,8 +113,7 @@ static void vchan_complete(unsigned long arg)
else else
vc->desc_free(vd); vc->desc_free(vd);
if (cb) dmaengine_desc_callback_invoke(&cb, NULL);
cb(cb_data);
} }
} }

View File

@ -608,8 +608,7 @@ static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan,
dma_cookie_complete(tx); dma_cookie_complete(tx);
/* Run the link descriptor callback function */ /* Run the link descriptor callback function */
if (tx->callback) dmaengine_desc_get_callback_invoke(tx, NULL);
tx->callback(tx->callback_param);
dma_descriptor_unmap(tx); dma_descriptor_unmap(tx);

View File

@ -755,8 +755,7 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
spin_lock_irqsave(&chan->lock, flags); spin_lock_irqsave(&chan->lock, flags);
list_for_each_entry_safe(desc, next, &chan->done_list, node) { list_for_each_entry_safe(desc, next, &chan->done_list, node) {
dma_async_tx_callback callback; struct dmaengine_desc_callback cb;
void *callback_param;
if (desc->cyclic) { if (desc->cyclic) {
xilinx_dma_chan_handle_cyclic(chan, desc, &flags); xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
@ -767,11 +766,10 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
list_del(&desc->node); list_del(&desc->node);
/* Run the link descriptor callback function */ /* Run the link descriptor callback function */
callback = desc->async_tx.callback; dmaengine_desc_get_callback(&desc->async_tx, &cb);
callback_param = desc->async_tx.callback_param; if (dmaengine_desc_callback_valid(&cb)) {
if (callback) {
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
callback(callback_param); dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&chan->lock, flags); spin_lock_irqsave(&chan->lock, flags);
} }

View File

@ -102,13 +102,16 @@ struct ntb_queue_entry {
void *buf; void *buf;
unsigned int len; unsigned int len;
unsigned int flags; unsigned int flags;
int retries;
int errors;
unsigned int tx_index;
unsigned int rx_index;
struct ntb_transport_qp *qp; struct ntb_transport_qp *qp;
union { union {
struct ntb_payload_header __iomem *tx_hdr; struct ntb_payload_header __iomem *tx_hdr;
struct ntb_payload_header *rx_hdr; struct ntb_payload_header *rx_hdr;
}; };
unsigned int index;
}; };
struct ntb_rx_info { struct ntb_rx_info {
@ -259,6 +262,12 @@ enum {
static void ntb_transport_rxc_db(unsigned long data); static void ntb_transport_rxc_db(unsigned long data);
static const struct ntb_ctx_ops ntb_transport_ops; static const struct ntb_ctx_ops ntb_transport_ops;
static struct ntb_client ntb_transport_client; static struct ntb_client ntb_transport_client;
static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
struct ntb_queue_entry *entry);
static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset);
static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset);
static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset);
static int ntb_transport_bus_match(struct device *dev, static int ntb_transport_bus_match(struct device *dev,
struct device_driver *drv) struct device_driver *drv)
@ -1229,7 +1238,7 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp)
break; break;
entry->rx_hdr->flags = 0; entry->rx_hdr->flags = 0;
iowrite32(entry->index, &qp->rx_info->entry); iowrite32(entry->rx_index, &qp->rx_info->entry);
cb_data = entry->cb_data; cb_data = entry->cb_data;
len = entry->len; len = entry->len;
@ -1247,10 +1256,36 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp)
spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
} }
static void ntb_rx_copy_callback(void *data) static void ntb_rx_copy_callback(void *data,
const struct dmaengine_result *res)
{ {
struct ntb_queue_entry *entry = data; struct ntb_queue_entry *entry = data;
/* we need to check DMA results if we are using DMA */
if (res) {
enum dmaengine_tx_result dma_err = res->result;
switch (dma_err) {
case DMA_TRANS_READ_FAILED:
case DMA_TRANS_WRITE_FAILED:
entry->errors++;
case DMA_TRANS_ABORTED:
{
struct ntb_transport_qp *qp = entry->qp;
void *offset = qp->rx_buff + qp->rx_max_frame *
qp->rx_index;
ntb_memcpy_rx(entry, offset);
qp->rx_memcpy++;
return;
}
case DMA_TRANS_NOERROR:
default:
break;
}
}
entry->flags |= DESC_DONE_FLAG; entry->flags |= DESC_DONE_FLAG;
ntb_complete_rxc(entry->qp); ntb_complete_rxc(entry->qp);
@ -1266,10 +1301,10 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
/* Ensure that the data is fully copied out before clearing the flag */ /* Ensure that the data is fully copied out before clearing the flag */
wmb(); wmb();
ntb_rx_copy_callback(entry); ntb_rx_copy_callback(entry, NULL);
} }
static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
{ {
struct dma_async_tx_descriptor *txd; struct dma_async_tx_descriptor *txd;
struct ntb_transport_qp *qp = entry->qp; struct ntb_transport_qp *qp = entry->qp;
@ -1282,13 +1317,6 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
int retries = 0; int retries = 0;
len = entry->len; len = entry->len;
if (!chan)
goto err;
if (len < copy_bytes)
goto err;
device = chan->device; device = chan->device;
pay_off = (size_t)offset & ~PAGE_MASK; pay_off = (size_t)offset & ~PAGE_MASK;
buff_off = (size_t)buf & ~PAGE_MASK; buff_off = (size_t)buf & ~PAGE_MASK;
@ -1316,7 +1344,8 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
unmap->from_cnt = 1; unmap->from_cnt = 1;
for (retries = 0; retries < DMA_RETRIES; retries++) { for (retries = 0; retries < DMA_RETRIES; retries++) {
txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], txd = device->device_prep_dma_memcpy(chan,
unmap->addr[1],
unmap->addr[0], len, unmap->addr[0], len,
DMA_PREP_INTERRUPT); DMA_PREP_INTERRUPT);
if (txd) if (txd)
@ -1331,7 +1360,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
goto err_get_unmap; goto err_get_unmap;
} }
txd->callback = ntb_rx_copy_callback; txd->callback_result = ntb_rx_copy_callback;
txd->callback_param = entry; txd->callback_param = entry;
dma_set_unmap(txd, unmap); dma_set_unmap(txd, unmap);
@ -1345,12 +1374,37 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
qp->rx_async++; qp->rx_async++;
return; return 0;
err_set_unmap: err_set_unmap:
dmaengine_unmap_put(unmap); dmaengine_unmap_put(unmap);
err_get_unmap: err_get_unmap:
dmaengine_unmap_put(unmap); dmaengine_unmap_put(unmap);
err:
return -ENXIO;
}
static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
{
struct ntb_transport_qp *qp = entry->qp;
struct dma_chan *chan = qp->rx_dma_chan;
int res;
if (!chan)
goto err;
if (entry->len < copy_bytes)
goto err;
res = ntb_async_rx_submit(entry, offset);
if (res < 0)
goto err;
if (!entry->retries)
qp->rx_async++;
return;
err: err:
ntb_memcpy_rx(entry, offset); ntb_memcpy_rx(entry, offset);
qp->rx_memcpy++; qp->rx_memcpy++;
@ -1397,7 +1451,7 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
} }
entry->rx_hdr = hdr; entry->rx_hdr = hdr;
entry->index = qp->rx_index; entry->rx_index = qp->rx_index;
if (hdr->len > entry->len) { if (hdr->len > entry->len) {
dev_dbg(&qp->ndev->pdev->dev, dev_dbg(&qp->ndev->pdev->dev,
@ -1467,12 +1521,39 @@ static void ntb_transport_rxc_db(unsigned long data)
} }
} }
static void ntb_tx_copy_callback(void *data) static void ntb_tx_copy_callback(void *data,
const struct dmaengine_result *res)
{ {
struct ntb_queue_entry *entry = data; struct ntb_queue_entry *entry = data;
struct ntb_transport_qp *qp = entry->qp; struct ntb_transport_qp *qp = entry->qp;
struct ntb_payload_header __iomem *hdr = entry->tx_hdr; struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
/* we need to check DMA results if we are using DMA */
if (res) {
enum dmaengine_tx_result dma_err = res->result;
switch (dma_err) {
case DMA_TRANS_READ_FAILED:
case DMA_TRANS_WRITE_FAILED:
entry->errors++;
case DMA_TRANS_ABORTED:
{
void __iomem *offset =
qp->tx_mw + qp->tx_max_frame *
entry->tx_index;
/* resubmit via CPU */
ntb_memcpy_tx(entry, offset);
qp->tx_memcpy++;
return;
}
case DMA_TRANS_NOERROR:
default:
break;
}
}
iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
@ -1507,40 +1588,25 @@ static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
/* Ensure that the data is fully copied out before setting the flags */ /* Ensure that the data is fully copied out before setting the flags */
wmb(); wmb();
ntb_tx_copy_callback(entry); ntb_tx_copy_callback(entry, NULL);
} }
static void ntb_async_tx(struct ntb_transport_qp *qp, static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
struct ntb_queue_entry *entry) struct ntb_queue_entry *entry)
{ {
struct ntb_payload_header __iomem *hdr;
struct dma_async_tx_descriptor *txd; struct dma_async_tx_descriptor *txd;
struct dma_chan *chan = qp->tx_dma_chan; struct dma_chan *chan = qp->tx_dma_chan;
struct dma_device *device; struct dma_device *device;
size_t len = entry->len;
void *buf = entry->buf;
size_t dest_off, buff_off; size_t dest_off, buff_off;
struct dmaengine_unmap_data *unmap; struct dmaengine_unmap_data *unmap;
dma_addr_t dest; dma_addr_t dest;
dma_cookie_t cookie; dma_cookie_t cookie;
void __iomem *offset;
size_t len = entry->len;
void *buf = entry->buf;
int retries = 0; int retries = 0;
offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
entry->tx_hdr = hdr;
iowrite32(entry->len, &hdr->len);
iowrite32((u32)qp->tx_pkts, &hdr->ver);
if (!chan)
goto err;
if (len < copy_bytes)
goto err;
device = chan->device; device = chan->device;
dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index; dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
buff_off = (size_t)buf & ~PAGE_MASK; buff_off = (size_t)buf & ~PAGE_MASK;
dest_off = (size_t)dest & ~PAGE_MASK; dest_off = (size_t)dest & ~PAGE_MASK;
@ -1560,8 +1626,9 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
unmap->to_cnt = 1; unmap->to_cnt = 1;
for (retries = 0; retries < DMA_RETRIES; retries++) { for (retries = 0; retries < DMA_RETRIES; retries++) {
txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], txd = device->device_prep_dma_memcpy(chan, dest,
len, DMA_PREP_INTERRUPT); unmap->addr[0], len,
DMA_PREP_INTERRUPT);
if (txd) if (txd)
break; break;
@ -1574,7 +1641,7 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
goto err_get_unmap; goto err_get_unmap;
} }
txd->callback = ntb_tx_copy_callback; txd->callback_result = ntb_tx_copy_callback;
txd->callback_param = entry; txd->callback_param = entry;
dma_set_unmap(txd, unmap); dma_set_unmap(txd, unmap);
@ -1585,13 +1652,47 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
dmaengine_unmap_put(unmap); dmaengine_unmap_put(unmap);
dma_async_issue_pending(chan); dma_async_issue_pending(chan);
qp->tx_async++;
return; return 0;
err_set_unmap: err_set_unmap:
dmaengine_unmap_put(unmap); dmaengine_unmap_put(unmap);
err_get_unmap: err_get_unmap:
dmaengine_unmap_put(unmap); dmaengine_unmap_put(unmap);
err:
return -ENXIO;
}
static void ntb_async_tx(struct ntb_transport_qp *qp,
struct ntb_queue_entry *entry)
{
struct ntb_payload_header __iomem *hdr;
struct dma_chan *chan = qp->tx_dma_chan;
void __iomem *offset;
int res;
entry->tx_index = qp->tx_index;
offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index;
hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
entry->tx_hdr = hdr;
iowrite32(entry->len, &hdr->len);
iowrite32((u32)qp->tx_pkts, &hdr->ver);
if (!chan)
goto err;
if (entry->len < copy_bytes)
goto err;
res = ntb_async_tx_submit(qp, entry);
if (res < 0)
goto err;
if (!entry->retries)
qp->tx_async++;
return;
err: err:
ntb_memcpy_tx(entry, offset); ntb_memcpy_tx(entry, offset);
qp->tx_memcpy++; qp->tx_memcpy++;
@ -1928,6 +2029,9 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
entry->buf = data; entry->buf = data;
entry->len = len; entry->len = len;
entry->flags = 0; entry->flags = 0;
entry->retries = 0;
entry->errors = 0;
entry->rx_index = 0;
ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
@ -1970,6 +2074,9 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
entry->buf = data; entry->buf = data;
entry->len = len; entry->len = len;
entry->flags = 0; entry->flags = 0;
entry->errors = 0;
entry->retries = 0;
entry->tx_index = 0;
rc = ntb_process_tx(qp, entry); rc = ntb_process_tx(qp, entry);
if (rc) if (rc)

View File

@ -441,6 +441,21 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
typedef void (*dma_async_tx_callback)(void *dma_async_param); typedef void (*dma_async_tx_callback)(void *dma_async_param);
enum dmaengine_tx_result {
DMA_TRANS_NOERROR = 0, /* SUCCESS */
DMA_TRANS_READ_FAILED, /* Source DMA read failed */
DMA_TRANS_WRITE_FAILED, /* Destination DMA write failed */
DMA_TRANS_ABORTED, /* Op never submitted / aborted */
};
struct dmaengine_result {
enum dmaengine_tx_result result;
u32 residue;
};
typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
const struct dmaengine_result *result);
struct dmaengine_unmap_data { struct dmaengine_unmap_data {
u8 map_cnt; u8 map_cnt;
u8 to_cnt; u8 to_cnt;
@ -478,6 +493,7 @@ struct dma_async_tx_descriptor {
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
int (*desc_free)(struct dma_async_tx_descriptor *tx); int (*desc_free)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback; dma_async_tx_callback callback;
dma_async_tx_callback_result callback_result;
void *callback_param; void *callback_param;
struct dmaengine_unmap_data *unmap; struct dmaengine_unmap_data *unmap;
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH