Merge branch 'fixes' of git://git.infradead.org/users/vkoul/slave-dma
Pull [GIT PULL] slave-dmaengine fixes from Vinod Koul. * 'fixes' of git://git.infradead.org/users/vkoul/slave-dma: dmaengine/amba-pl08x : reset phychan_hold on terminate all dma: pl330: fix a couple of compilation warnings dma/ste_dma40: fix erroneous comparison dma/ste_dma40: explicitly include regulator consumer header dma40: Improve the logic of stopping logical chan dmaengine: at_hdmac: remove clear-on-read in atc_dostart() dma: mxs-dma: enable channel in device_issue_pending call dmaengine: imx-dma: dont complete descriptor for cyclic dma
This commit is contained in:
commit
acdf2fc860
|
@ -1429,6 +1429,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
* signal
|
||||
*/
|
||||
release_phy_channel(plchan);
|
||||
plchan->phychan_hold = 0;
|
||||
}
|
||||
/* Dequeue jobs and free LLIs */
|
||||
if (plchan->at) {
|
||||
|
|
|
@ -221,10 +221,6 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
|
|||
|
||||
vdbg_dump_regs(atchan);
|
||||
|
||||
/* clear any pending interrupt */
|
||||
while (dma_readl(atdma, EBCISR))
|
||||
cpu_relax();
|
||||
|
||||
channel_writel(atchan, SADDR, 0);
|
||||
channel_writel(atchan, DADDR, 0);
|
||||
channel_writel(atchan, CTRLA, 0);
|
||||
|
|
|
@ -571,11 +571,14 @@ static void imxdma_tasklet(unsigned long data)
|
|||
if (desc->desc.callback)
|
||||
desc->desc.callback(desc->desc.callback_param);
|
||||
|
||||
dma_cookie_complete(&desc->desc);
|
||||
|
||||
/* If we are dealing with a cyclic descriptor keep it on ld_active */
|
||||
/* If we are dealing with a cyclic descriptor keep it on ld_active
|
||||
* and dont mark the descripor as complete.
|
||||
* Only in non-cyclic cases it would be marked as complete
|
||||
*/
|
||||
if (imxdma_chan_is_doing_cyclic(imxdmac))
|
||||
goto out;
|
||||
else
|
||||
dma_cookie_complete(&desc->desc);
|
||||
|
||||
/* Free 2D slot if it was an interleaved transfer */
|
||||
if (imxdmac->enabled_2d) {
|
||||
|
|
|
@ -201,10 +201,6 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
|
|||
|
||||
static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan);
|
||||
|
||||
mxs_dma_enable_chan(mxs_chan);
|
||||
|
||||
return dma_cookie_assign(tx);
|
||||
}
|
||||
|
||||
|
@ -558,9 +554,9 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
|
|||
|
||||
static void mxs_dma_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
/*
|
||||
* Nothing to do. We only have a single descriptor.
|
||||
*/
|
||||
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
|
||||
|
||||
mxs_dma_enable_chan(mxs_chan);
|
||||
}
|
||||
|
||||
static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
|
||||
|
|
|
@ -2225,12 +2225,9 @@ static inline void free_desc_list(struct list_head *list)
|
|||
{
|
||||
struct dma_pl330_dmac *pdmac;
|
||||
struct dma_pl330_desc *desc;
|
||||
struct dma_pl330_chan *pch;
|
||||
struct dma_pl330_chan *pch = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
if (list_empty(list))
|
||||
return;
|
||||
|
||||
/* Finish off the work list */
|
||||
list_for_each_entry(desc, list, node) {
|
||||
dma_async_tx_callback callback;
|
||||
|
@ -2247,6 +2244,10 @@ static inline void free_desc_list(struct list_head *list)
|
|||
desc->pchan = NULL;
|
||||
}
|
||||
|
||||
/* pch will be unset if list was empty */
|
||||
if (!pch)
|
||||
return;
|
||||
|
||||
pdmac = pch->dmac;
|
||||
|
||||
spin_lock_irqsave(&pdmac->pool_lock, flags);
|
||||
|
@ -2257,12 +2258,9 @@ static inline void free_desc_list(struct list_head *list)
|
|||
static inline void handle_cyclic_desc_list(struct list_head *list)
|
||||
{
|
||||
struct dma_pl330_desc *desc;
|
||||
struct dma_pl330_chan *pch;
|
||||
struct dma_pl330_chan *pch = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
if (list_empty(list))
|
||||
return;
|
||||
|
||||
list_for_each_entry(desc, list, node) {
|
||||
dma_async_tx_callback callback;
|
||||
|
||||
|
@ -2274,6 +2272,10 @@ static inline void handle_cyclic_desc_list(struct list_head *list)
|
|||
callback(desc->txd.callback_param);
|
||||
}
|
||||
|
||||
/* pch will be unset if list was empty */
|
||||
if (!pch)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&pch->lock, flags);
|
||||
list_splice_tail_init(list, &pch->work_list);
|
||||
spin_unlock_irqrestore(&pch->lock, flags);
|
||||
|
@ -2926,8 +2928,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
INIT_LIST_HEAD(&pd->channels);
|
||||
|
||||
/* Initialize channel parameters */
|
||||
num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
|
||||
(u8)pi->pcfg.num_chan);
|
||||
if (pdat)
|
||||
num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan);
|
||||
else
|
||||
num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
|
||||
|
||||
pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
|
||||
|
||||
for (i = 0; i < num_chan; i++) {
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/pm_runtime.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/amba/bus.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
|
||||
#include <plat/ste_dma40.h>
|
||||
|
||||
|
@ -68,6 +69,22 @@ enum d40_command {
|
|||
D40_DMA_SUSPENDED = 3
|
||||
};
|
||||
|
||||
/*
|
||||
* enum d40_events - The different Event Enables for the event lines.
|
||||
*
|
||||
* @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
|
||||
* @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
|
||||
* @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
|
||||
* @D40_ROUND_EVENTLINE: Status check for event line.
|
||||
*/
|
||||
|
||||
enum d40_events {
|
||||
D40_DEACTIVATE_EVENTLINE = 0,
|
||||
D40_ACTIVATE_EVENTLINE = 1,
|
||||
D40_SUSPEND_REQ_EVENTLINE = 2,
|
||||
D40_ROUND_EVENTLINE = 3
|
||||
};
|
||||
|
||||
/*
|
||||
* These are the registers that has to be saved and later restored
|
||||
* when the DMA hw is powered off.
|
||||
|
@ -870,8 +887,8 @@ static void d40_save_restore_registers(struct d40_base *base, bool save)
|
|||
}
|
||||
#endif
|
||||
|
||||
static int d40_channel_execute_command(struct d40_chan *d40c,
|
||||
enum d40_command command)
|
||||
static int __d40_execute_command_phy(struct d40_chan *d40c,
|
||||
enum d40_command command)
|
||||
{
|
||||
u32 status;
|
||||
int i;
|
||||
|
@ -880,6 +897,12 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
|
|||
unsigned long flags;
|
||||
u32 wmask;
|
||||
|
||||
if (command == D40_DMA_STOP) {
|
||||
ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&d40c->base->execmd_lock, flags);
|
||||
|
||||
if (d40c->phy_chan->num % 2 == 0)
|
||||
|
@ -973,67 +996,109 @@ static void d40_term_all(struct d40_chan *d40c)
|
|||
}
|
||||
|
||||
d40c->pending_tx = 0;
|
||||
d40c->busy = false;
|
||||
}
|
||||
|
||||
static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
|
||||
u32 event, int reg)
|
||||
static void __d40_config_set_event(struct d40_chan *d40c,
|
||||
enum d40_events event_type, u32 event,
|
||||
int reg)
|
||||
{
|
||||
void __iomem *addr = chan_base(d40c) + reg;
|
||||
int tries;
|
||||
u32 status;
|
||||
|
||||
switch (event_type) {
|
||||
|
||||
case D40_DEACTIVATE_EVENTLINE:
|
||||
|
||||
if (!enable) {
|
||||
writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
|
||||
| ~D40_EVENTLINE_MASK(event), addr);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
||||
case D40_SUSPEND_REQ_EVENTLINE:
|
||||
status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
|
||||
D40_EVENTLINE_POS(event);
|
||||
|
||||
if (status == D40_DEACTIVATE_EVENTLINE ||
|
||||
status == D40_SUSPEND_REQ_EVENTLINE)
|
||||
break;
|
||||
|
||||
writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
|
||||
| ~D40_EVENTLINE_MASK(event), addr);
|
||||
|
||||
for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
|
||||
|
||||
status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
|
||||
D40_EVENTLINE_POS(event);
|
||||
|
||||
cpu_relax();
|
||||
/*
|
||||
* Reduce the number of bus accesses while
|
||||
* waiting for the DMA to suspend.
|
||||
*/
|
||||
udelay(3);
|
||||
|
||||
if (status == D40_DEACTIVATE_EVENTLINE)
|
||||
break;
|
||||
}
|
||||
|
||||
if (tries == D40_SUSPEND_MAX_IT) {
|
||||
chan_err(d40c,
|
||||
"unable to stop the event_line chl %d (log: %d)"
|
||||
"status %x\n", d40c->phy_chan->num,
|
||||
d40c->log_num, status);
|
||||
}
|
||||
break;
|
||||
|
||||
case D40_ACTIVATE_EVENTLINE:
|
||||
/*
|
||||
* The hardware sometimes doesn't register the enable when src and dst
|
||||
* event lines are active on the same logical channel. Retry to ensure
|
||||
* it does. Usually only one retry is sufficient.
|
||||
*/
|
||||
tries = 100;
|
||||
while (--tries) {
|
||||
writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
|
||||
| ~D40_EVENTLINE_MASK(event), addr);
|
||||
tries = 100;
|
||||
while (--tries) {
|
||||
writel((D40_ACTIVATE_EVENTLINE <<
|
||||
D40_EVENTLINE_POS(event)) |
|
||||
~D40_EVENTLINE_MASK(event), addr);
|
||||
|
||||
if (readl(addr) & D40_EVENTLINE_MASK(event))
|
||||
break;
|
||||
}
|
||||
|
||||
if (tries != 99)
|
||||
dev_dbg(chan2dev(d40c),
|
||||
"[%s] workaround enable S%cLNK (%d tries)\n",
|
||||
__func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
|
||||
100 - tries);
|
||||
|
||||
WARN_ON(!tries);
|
||||
break;
|
||||
|
||||
case D40_ROUND_EVENTLINE:
|
||||
BUG();
|
||||
break;
|
||||
|
||||
if (readl(addr) & D40_EVENTLINE_MASK(event))
|
||||
break;
|
||||
}
|
||||
|
||||
if (tries != 99)
|
||||
dev_dbg(chan2dev(d40c),
|
||||
"[%s] workaround enable S%cLNK (%d tries)\n",
|
||||
__func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
|
||||
100 - tries);
|
||||
|
||||
WARN_ON(!tries);
|
||||
}
|
||||
|
||||
static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
|
||||
static void d40_config_set_event(struct d40_chan *d40c,
|
||||
enum d40_events event_type)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&d40c->phy_chan->lock, flags);
|
||||
|
||||
/* Enable event line connected to device (or memcpy) */
|
||||
if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
|
||||
(d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
|
||||
u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
|
||||
|
||||
__d40_config_set_event(d40c, do_enable, event,
|
||||
__d40_config_set_event(d40c, event_type, event,
|
||||
D40_CHAN_REG_SSLNK);
|
||||
}
|
||||
|
||||
if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
|
||||
u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
|
||||
|
||||
__d40_config_set_event(d40c, do_enable, event,
|
||||
__d40_config_set_event(d40c, event_type, event,
|
||||
D40_CHAN_REG_SDLNK);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
|
||||
}
|
||||
|
||||
static u32 d40_chan_has_events(struct d40_chan *d40c)
|
||||
|
@ -1047,6 +1112,64 @@ static u32 d40_chan_has_events(struct d40_chan *d40c)
|
|||
return val;
|
||||
}
|
||||
|
||||
static int
|
||||
__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
u32 active_status;
|
||||
void __iomem *active_reg;
|
||||
|
||||
if (d40c->phy_chan->num % 2 == 0)
|
||||
active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
|
||||
else
|
||||
active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
|
||||
|
||||
|
||||
spin_lock_irqsave(&d40c->phy_chan->lock, flags);
|
||||
|
||||
switch (command) {
|
||||
case D40_DMA_STOP:
|
||||
case D40_DMA_SUSPEND_REQ:
|
||||
|
||||
active_status = (readl(active_reg) &
|
||||
D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
|
||||
D40_CHAN_POS(d40c->phy_chan->num);
|
||||
|
||||
if (active_status == D40_DMA_RUN)
|
||||
d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
|
||||
else
|
||||
d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
|
||||
|
||||
if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
|
||||
ret = __d40_execute_command_phy(d40c, command);
|
||||
|
||||
break;
|
||||
|
||||
case D40_DMA_RUN:
|
||||
|
||||
d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
|
||||
ret = __d40_execute_command_phy(d40c, command);
|
||||
break;
|
||||
|
||||
case D40_DMA_SUSPENDED:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int d40_channel_execute_command(struct d40_chan *d40c,
|
||||
enum d40_command command)
|
||||
{
|
||||
if (chan_is_logical(d40c))
|
||||
return __d40_execute_command_log(d40c, command);
|
||||
else
|
||||
return __d40_execute_command_phy(d40c, command);
|
||||
}
|
||||
|
||||
static u32 d40_get_prmo(struct d40_chan *d40c)
|
||||
{
|
||||
static const unsigned int phy_map[] = {
|
||||
|
@ -1149,15 +1272,7 @@ static int d40_pause(struct d40_chan *d40c)
|
|||
spin_lock_irqsave(&d40c->lock, flags);
|
||||
|
||||
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
|
||||
if (res == 0) {
|
||||
if (chan_is_logical(d40c)) {
|
||||
d40_config_set_event(d40c, false);
|
||||
/* Resume the other logical channels if any */
|
||||
if (d40_chan_has_events(d40c))
|
||||
res = d40_channel_execute_command(d40c,
|
||||
D40_DMA_RUN);
|
||||
}
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(d40c->base->dev);
|
||||
pm_runtime_put_autosuspend(d40c->base->dev);
|
||||
spin_unlock_irqrestore(&d40c->lock, flags);
|
||||
|
@ -1174,45 +1289,17 @@ static int d40_resume(struct d40_chan *d40c)
|
|||
|
||||
spin_lock_irqsave(&d40c->lock, flags);
|
||||
pm_runtime_get_sync(d40c->base->dev);
|
||||
if (d40c->base->rev == 0)
|
||||
if (chan_is_logical(d40c)) {
|
||||
res = d40_channel_execute_command(d40c,
|
||||
D40_DMA_SUSPEND_REQ);
|
||||
goto no_suspend;
|
||||
}
|
||||
|
||||
/* If bytes left to transfer or linked tx resume job */
|
||||
if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
|
||||
|
||||
if (chan_is_logical(d40c))
|
||||
d40_config_set_event(d40c, true);
|
||||
|
||||
if (d40_residue(d40c) || d40_tx_is_linked(d40c))
|
||||
res = d40_channel_execute_command(d40c, D40_DMA_RUN);
|
||||
}
|
||||
|
||||
no_suspend:
|
||||
pm_runtime_mark_last_busy(d40c->base->dev);
|
||||
pm_runtime_put_autosuspend(d40c->base->dev);
|
||||
spin_unlock_irqrestore(&d40c->lock, flags);
|
||||
return res;
|
||||
}
|
||||
|
||||
static int d40_terminate_all(struct d40_chan *chan)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
ret = d40_pause(chan);
|
||||
if (!ret && chan_is_physical(chan))
|
||||
ret = d40_channel_execute_command(chan, D40_DMA_STOP);
|
||||
|
||||
spin_lock_irqsave(&chan->lock, flags);
|
||||
d40_term_all(chan);
|
||||
spin_unlock_irqrestore(&chan->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
struct d40_chan *d40c = container_of(tx->chan,
|
||||
|
@ -1232,20 +1319,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
|
|||
|
||||
static int d40_start(struct d40_chan *d40c)
|
||||
{
|
||||
if (d40c->base->rev == 0) {
|
||||
int err;
|
||||
|
||||
if (chan_is_logical(d40c)) {
|
||||
err = d40_channel_execute_command(d40c,
|
||||
D40_DMA_SUSPEND_REQ);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
if (chan_is_logical(d40c))
|
||||
d40_config_set_event(d40c, true);
|
||||
|
||||
return d40_channel_execute_command(d40c, D40_DMA_RUN);
|
||||
}
|
||||
|
||||
|
@ -1258,10 +1331,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
|
|||
d40d = d40_first_queued(d40c);
|
||||
|
||||
if (d40d != NULL) {
|
||||
if (!d40c->busy)
|
||||
if (!d40c->busy) {
|
||||
d40c->busy = true;
|
||||
|
||||
pm_runtime_get_sync(d40c->base->dev);
|
||||
pm_runtime_get_sync(d40c->base->dev);
|
||||
}
|
||||
|
||||
/* Remove from queue */
|
||||
d40_desc_remove(d40d);
|
||||
|
@ -1388,8 +1461,8 @@ static void dma_tasklet(unsigned long data)
|
|||
|
||||
return;
|
||||
|
||||
err:
|
||||
/* Rescue manoeuvre if receiving double interrupts */
|
||||
err:
|
||||
/* Rescue manouver if receiving double interrupts */
|
||||
if (d40c->pending_tx > 0)
|
||||
d40c->pending_tx--;
|
||||
spin_unlock_irqrestore(&d40c->lock, flags);
|
||||
|
@ -1770,7 +1843,6 @@ static int d40_config_memcpy(struct d40_chan *d40c)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int d40_free_dma(struct d40_chan *d40c)
|
||||
{
|
||||
|
||||
|
@ -1806,44 +1878,19 @@ static int d40_free_dma(struct d40_chan *d40c)
|
|||
}
|
||||
|
||||
pm_runtime_get_sync(d40c->base->dev);
|
||||
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
|
||||
if (res) {
|
||||
chan_err(d40c, "suspend failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (chan_is_logical(d40c)) {
|
||||
/* Release logical channel, deactivate the event line */
|
||||
|
||||
d40_config_set_event(d40c, false);
|
||||
d40c->base->lookup_log_chans[d40c->log_num] = NULL;
|
||||
|
||||
/*
|
||||
* Check if there are more logical allocation
|
||||
* on this phy channel.
|
||||
*/
|
||||
if (!d40_alloc_mask_free(phy, is_src, event)) {
|
||||
/* Resume the other logical channels if any */
|
||||
if (d40_chan_has_events(d40c)) {
|
||||
res = d40_channel_execute_command(d40c,
|
||||
D40_DMA_RUN);
|
||||
if (res)
|
||||
chan_err(d40c,
|
||||
"Executing RUN command\n");
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
(void) d40_alloc_mask_free(phy, is_src, 0);
|
||||
}
|
||||
|
||||
/* Release physical channel */
|
||||
res = d40_channel_execute_command(d40c, D40_DMA_STOP);
|
||||
if (res) {
|
||||
chan_err(d40c, "Failed to stop channel\n");
|
||||
chan_err(d40c, "stop failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
|
||||
|
||||
if (chan_is_logical(d40c))
|
||||
d40c->base->lookup_log_chans[d40c->log_num] = NULL;
|
||||
else
|
||||
d40c->base->lookup_phy_chans[phy->num] = NULL;
|
||||
|
||||
if (d40c->busy) {
|
||||
pm_runtime_mark_last_busy(d40c->base->dev);
|
||||
pm_runtime_put_autosuspend(d40c->base->dev);
|
||||
|
@ -1852,7 +1899,6 @@ static int d40_free_dma(struct d40_chan *d40c)
|
|||
d40c->busy = false;
|
||||
d40c->phy_chan = NULL;
|
||||
d40c->configured = false;
|
||||
d40c->base->lookup_phy_chans[phy->num] = NULL;
|
||||
out:
|
||||
|
||||
pm_runtime_mark_last_busy(d40c->base->dev);
|
||||
|
@ -2070,7 +2116,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
|
|||
if (sg_next(&sg_src[sg_len - 1]) == sg_src)
|
||||
desc->cyclic = true;
|
||||
|
||||
if (direction != DMA_NONE) {
|
||||
if (direction != DMA_TRANS_NONE) {
|
||||
dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
|
||||
|
||||
if (direction == DMA_DEV_TO_MEM)
|
||||
|
@ -2371,6 +2417,31 @@ static void d40_issue_pending(struct dma_chan *chan)
|
|||
spin_unlock_irqrestore(&d40c->lock, flags);
|
||||
}
|
||||
|
||||
static void d40_terminate_all(struct dma_chan *chan)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&d40c->lock, flags);
|
||||
|
||||
pm_runtime_get_sync(d40c->base->dev);
|
||||
ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
|
||||
if (ret)
|
||||
chan_err(d40c, "Failed to stop channel\n");
|
||||
|
||||
d40_term_all(d40c);
|
||||
pm_runtime_mark_last_busy(d40c->base->dev);
|
||||
pm_runtime_put_autosuspend(d40c->base->dev);
|
||||
if (d40c->busy) {
|
||||
pm_runtime_mark_last_busy(d40c->base->dev);
|
||||
pm_runtime_put_autosuspend(d40c->base->dev);
|
||||
}
|
||||
d40c->busy = false;
|
||||
|
||||
spin_unlock_irqrestore(&d40c->lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
dma40_config_to_halfchannel(struct d40_chan *d40c,
|
||||
struct stedma40_half_channel_info *info,
|
||||
|
@ -2551,7 +2622,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
|
||||
switch (cmd) {
|
||||
case DMA_TERMINATE_ALL:
|
||||
return d40_terminate_all(d40c);
|
||||
d40_terminate_all(chan);
|
||||
return 0;
|
||||
case DMA_PAUSE:
|
||||
return d40_pause(d40c);
|
||||
case DMA_RESUME:
|
||||
|
@ -2908,6 +2980,12 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
|
|||
dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
|
||||
rev, res->start);
|
||||
|
||||
if (rev < 2) {
|
||||
d40_err(&pdev->dev, "hardware revision: %d is not supported",
|
||||
rev);
|
||||
goto failure;
|
||||
}
|
||||
|
||||
plat_data = pdev->dev.platform_data;
|
||||
|
||||
/* Count the number of logical channels in use */
|
||||
|
@ -2998,6 +3076,7 @@ failure:
|
|||
|
||||
if (base) {
|
||||
kfree(base->lcla_pool.alloc_map);
|
||||
kfree(base->reg_val_backup_chan);
|
||||
kfree(base->lookup_log_chans);
|
||||
kfree(base->lookup_phy_chans);
|
||||
kfree(base->phy_res);
|
||||
|
|
|
@ -62,8 +62,6 @@
|
|||
#define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS)
|
||||
|
||||
/* Link register */
|
||||
#define D40_DEACTIVATE_EVENTLINE 0x0
|
||||
#define D40_ACTIVATE_EVENTLINE 0x1
|
||||
#define D40_EVENTLINE_POS(i) (2 * i)
|
||||
#define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i))
|
||||
|
||||
|
|
|
@ -363,6 +363,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
|
|||
goto out;
|
||||
|
||||
dmaengine_submit(desc);
|
||||
dma_async_issue_pending(host->dmach);
|
||||
return;
|
||||
|
||||
out:
|
||||
|
@ -403,6 +404,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
|
|||
goto out;
|
||||
|
||||
dmaengine_submit(desc);
|
||||
dma_async_issue_pending(host->dmach);
|
||||
return;
|
||||
|
||||
out:
|
||||
|
@ -531,6 +533,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
|
|||
goto out;
|
||||
|
||||
dmaengine_submit(desc);
|
||||
dma_async_issue_pending(host->dmach);
|
||||
return;
|
||||
out:
|
||||
dev_warn(mmc_dev(host->mmc),
|
||||
|
|
|
@ -266,6 +266,7 @@ int start_dma_without_bch_irq(struct gpmi_nand_data *this,
|
|||
desc->callback = dma_irq_callback;
|
||||
desc->callback_param = this;
|
||||
dmaengine_submit(desc);
|
||||
dma_async_issue_pending(get_dma_chan(this));
|
||||
|
||||
/* Wait for the interrupt from the DMA block. */
|
||||
err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
|
||||
|
|
Loading…
Reference in New Issue