From 90b44f8ffdf6c66d190ee71b330009bf7f11a208 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 25 Jul 2011 19:57:52 +0530 Subject: [PATCH 01/51] dmaengine: add helper function for slave_single For clients which require a single slave transfer and dont want to be bothered about the scatterlist api, this helper gives simple API for this transfer and creates single scatterlist for DMA API Idea from Russell King Signed-off-by: Vinod Koul --- include/linux/dmaengine.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 8fbf40e0713c..0d738c95fe4e 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -24,6 +24,7 @@ #include #include #include +#include struct scatterlist; @@ -519,6 +520,16 @@ static inline int dmaengine_slave_config(struct dma_chan *chan, (unsigned long)config); } +static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( + struct dma_chan *chan, void *buf, size_t len, + enum dma_data_direction dir, unsigned long flags) +{ + struct scatterlist sg; + sg_init_one(&sg, buf, len); + + return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags); +} + static inline int dmaengine_terminate_all(struct dma_chan *chan) { return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); From 9704efaa52ab18eb3504c4e0bc421c1d01b7981a Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 29 Jul 2011 16:21:57 +0530 Subject: [PATCH 02/51] dmaengine/dmatest: Terminate transfers on all channels in case of error or exit In case, some error occurs while doing memcpy transfers, we must terminate all transfers physically too. This is achieved by calling device_control() routine with TERMINATE_ALL as parameter. This is also required to be done in case module is removed while we are in middle of some transfers. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 765f5ff22304..accc18441b16 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -477,6 +477,8 @@ err_srcs: pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", thread_name, total_tests, failed_tests, ret); + /* terminate all transfers on specified channels */ + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); if (iterations > 0) while (!kthread_should_stop()) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); @@ -499,6 +501,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) list_del(&thread->node); kfree(thread); } + + /* terminate all transfers on specified channels */ + dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0); + kfree(dtc); } From a16e470caa173d323ef68dcac98c899b95fa4f84 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 9 Aug 2011 10:08:10 +0530 Subject: [PATCH 03/51] dmaengine: remove struct scatterlist for header Commit 90b44f8 introduces dmaengine_prep_slave_single API which adds scatterlist.h in dmaengine.h, so defining struct scatterlist is not required Signed-off-by: Vinod Koul Acked-by: Dan Williams --- include/linux/dmaengine.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 0d738c95fe4e..ace51af4369f 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -26,8 +26,6 @@ #include #include -struct scatterlist; - /** * typedef dma_cookie_t - an opaque DMA cookie * From ef298c21c0d9c06ed89ea2fa724c3a018acfff39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lothar=20Wa=C3=9Fmann?= Date: Mon, 8 Aug 2011 14:47:47 +0200 Subject: [PATCH 04/51] mxs-dma: enable CLKGATE before accessing registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After calling mxs_dma_disable_chan() for a channel, that channel becomes unusable because some controller registers can only be written when the clock is enabled via CLKGATE. Signed-off-by: Lothar Waßmann Signed-off-by: Vinod Koul --- drivers/dma/mxs-dma.c | 45 +++++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index be641cbd36fc..b4588bdd98bb 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -130,6 +130,23 @@ struct mxs_dma_engine { struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; }; +static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable) +{ + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + int chan_id = mxs_chan->chan.chan_id; + int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR; + + /* enable apbh channel clock */ + if (dma_is_apbh()) { + if (apbh_is_old()) + writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), + mxs_dma->base + HW_APBHX_CTRL0 + set_clr); + else + writel(1 << chan_id, + mxs_dma->base + HW_APBHX_CTRL0 + set_clr); + } +} + static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) { struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; @@ -148,38 +165,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_id = mxs_chan->chan.chan_id; + /* clkgate needs to be enabled before writing other registers */ + mxs_dma_clkgate(mxs_chan, 1); + /* set cmd_addr up */ writel(mxs_chan->ccw_phys, mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); - /* enable apbh channel clock */ - if (dma_is_apbh()) { - if (apbh_is_old()) - writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), - mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); - else - writel(1 << chan_id, - mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); - } - /* write 1 to SEMA to kick off the channel */ writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); } static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) { - struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; - int chan_id = mxs_chan->chan.chan_id; - /* disable apbh channel clock */ - if (dma_is_apbh()) { - if (apbh_is_old()) - writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), - mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); - else - writel(1 << chan_id, - mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); - } + mxs_dma_clkgate(mxs_chan, 0); mxs_chan->status = DMA_SUCCESS; } @@ -338,7 +338,10 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) if (ret) goto err_clk; + /* clkgate needs to be enabled for reset to finish */ + mxs_dma_clkgate(mxs_chan, 1); mxs_dma_reset_chan(mxs_chan); + mxs_dma_clkgate(mxs_chan, 0); dma_async_tx_descriptor_init(&mxs_chan->desc, chan); mxs_chan->desc.tx_submit = mxs_dma_tx_submit; From d8cb04b070c2a55f7201714d231cff4f8f9fbd16 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Wed, 27 Jul 2011 12:21:28 +0000 Subject: [PATCH 05/51] dmaengine: at_hdmac: replace spin_lock* with irqsave variants MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit dmaengine routines can be called from interrupt context and with interrupts disabled. Whereas spin_unlock_bh can't be called from such contexts. So this patch converts all spin_lock* routines to irqsave variants. Also, spin_lock() used in tasklet is converted to irqsave variants, as tasklet can be interrupted, and dma requests from such interruptions may also call spin_lock. Idea from dw_dmac patch by Viresh Kumar. Signed-off-by: Nicolas Ferre Signed-off-by: Uwe Kleine-König Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 52 ++++++++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 22 deletions(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 6a483eac7b3f..fd87b9690e1b 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -107,10 +107,11 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) { struct at_desc *desc, *_desc; struct at_desc *ret = NULL; + unsigned long flags; unsigned int i = 0; LIST_HEAD(tmp_list); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { i++; if (async_tx_test_ack(&desc->txd)) { @@ -121,7 +122,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) dev_dbg(chan2dev(&atchan->chan_common), "desc %p not ACKed\n", desc); } - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); dev_vdbg(chan2dev(&atchan->chan_common), "scanned %u descriptors on freelist\n", i); @@ -129,9 +130,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) if (!ret) { ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); if (ret) { - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); atchan->descs_allocated++; - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); } else { dev_err(chan2dev(&atchan->chan_common), "not enough descriptors available\n"); @@ -150,8 +151,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) { if (desc) { struct at_desc *child; + unsigned long flags; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); list_for_each_entry(child, &desc->tx_list, desc_node) dev_vdbg(chan2dev(&atchan->chan_common), "moving child desc %p to freelist\n", @@ -160,7 +162,7 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) dev_vdbg(chan2dev(&atchan->chan_common), "moving desc %p to freelist\n", desc); list_add(&desc->desc_node, &atchan->free_list); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); } } @@ -471,8 +473,9 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan) static void atc_tasklet(unsigned long data) { struct at_dma_chan *atchan = (struct at_dma_chan *)data; + unsigned long flags; - spin_lock(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) atc_handle_error(atchan); else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) @@ -480,7 +483,7 @@ static void atc_tasklet(unsigned long data) else atc_advance_work(atchan); - spin_unlock(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); } static irqreturn_t at_dma_interrupt(int irq, void *dev_id) @@ -539,8 +542,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) struct at_desc *desc = txd_to_at_desc(tx); struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); dma_cookie_t cookie; + unsigned long flags; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); cookie = atc_assign_cookie(atchan, desc); if (list_empty(&atchan->active_list)) { @@ -554,7 +558,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) list_add_tail(&desc->desc_node, &atchan->queue); } - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return cookie; } @@ -927,28 +931,29 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); int chan_id = atchan->chan_common.chan_id; + unsigned long flags; LIST_HEAD(list); dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); if (cmd == DMA_PAUSE) { - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); set_bit(ATC_IS_PAUSED, &atchan->status); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); } else if (cmd == DMA_RESUME) { if (!test_bit(ATC_IS_PAUSED, &atchan->status)) return 0; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); clear_bit(ATC_IS_PAUSED, &atchan->status); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); } else if (cmd == DMA_TERMINATE_ALL) { struct at_desc *desc, *_desc; /* @@ -957,7 +962,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, * channel. We still have to poll the channel enable bit due * to AHB/HSB limitations. */ - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); /* disabling channel: must also remove suspend state */ dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); @@ -978,7 +983,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, /* if channel dedicated to cyclic operations, free it */ clear_bit(ATC_IS_CYCLIC, &atchan->status); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); } else { return -ENXIO; } @@ -1004,9 +1009,10 @@ atc_tx_status(struct dma_chan *chan, struct at_dma_chan *atchan = to_at_dma_chan(chan); dma_cookie_t last_used; dma_cookie_t last_complete; + unsigned long flags; enum dma_status ret; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); last_complete = atchan->completed_cookie; last_used = chan->cookie; @@ -1021,7 +1027,7 @@ atc_tx_status(struct dma_chan *chan, ret = dma_async_is_complete(cookie, last_complete, last_used); } - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); if (ret != DMA_SUCCESS) dma_set_tx_state(txstate, last_complete, last_used, @@ -1046,6 +1052,7 @@ atc_tx_status(struct dma_chan *chan, static void atc_issue_pending(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); + unsigned long flags; dev_vdbg(chan2dev(chan), "issue_pending\n"); @@ -1053,11 +1060,11 @@ static void atc_issue_pending(struct dma_chan *chan) if (test_bit(ATC_IS_CYCLIC, &atchan->status)) return; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); if (!atc_chan_is_enabled(atchan)) { atc_advance_work(atchan); } - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); } /** @@ -1073,6 +1080,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) struct at_dma *atdma = to_at_dma(chan->device); struct at_desc *desc; struct at_dma_slave *atslave; + unsigned long flags; int i; u32 cfg; LIST_HEAD(tmp_list); @@ -1116,11 +1124,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) list_add_tail(&desc->desc_node, &tmp_list); } - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); atchan->descs_allocated = i; list_splice(&tmp_list, &atchan->free_list); atchan->completed_cookie = chan->cookie = 1; - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); /* channel parameters */ channel_writel(atchan, CFG, cfg); From c0ba5947370a0900b1823922fc4faf41515bc901 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Wed, 27 Jul 2011 12:21:29 +0000 Subject: [PATCH 06/51] dmaengine: at_hdmac: improve power management routines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Save/restore dma controller state across a suspend-resume sequence. The prepare() function will wait for the non-cyclic channels to become idle. It also deals with cyclic operations with the start at next period while resuming. Signed-off-by: Nicolas Ferre Signed-off-by: Uwe Kleine-König Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 88 ++++++++++++++++++++++++++++++++++++- drivers/dma/at_hdmac_regs.h | 7 +++ 2 files changed, 94 insertions(+), 1 deletion(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index fd87b9690e1b..0ead008e3bdf 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1385,27 +1385,113 @@ static void at_dma_shutdown(struct platform_device *pdev) clk_disable(atdma->clk); } +static int at_dma_prepare(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct at_dma *atdma = platform_get_drvdata(pdev); + struct dma_chan *chan, *_chan; + + list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, + device_node) { + struct at_dma_chan *atchan = to_at_dma_chan(chan); + /* wait for transaction completion (except in cyclic case) */ + if (atc_chan_is_enabled(atchan) && + !test_bit(ATC_IS_CYCLIC, &atchan->status)) + return -EAGAIN; + } + return 0; +} + +static void atc_suspend_cyclic(struct at_dma_chan *atchan) +{ + struct dma_chan *chan = &atchan->chan_common; + + /* Channel should be paused by user + * do it anyway even if it is not done already */ + if (!test_bit(ATC_IS_PAUSED, &atchan->status)) { + dev_warn(chan2dev(chan), + "cyclic channel not paused, should be done by channel user\n"); + atc_control(chan, DMA_PAUSE, 0); + } + + /* now preserve additional data for cyclic operations */ + /* next descriptor address in the cyclic list */ + atchan->save_dscr = channel_readl(atchan, DSCR); + + vdbg_dump_regs(atchan); +} + static int at_dma_suspend_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct at_dma *atdma = platform_get_drvdata(pdev); + struct dma_chan *chan, *_chan; - at_dma_off(platform_get_drvdata(pdev)); + /* preserve data */ + list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, + device_node) { + struct at_dma_chan *atchan = to_at_dma_chan(chan); + + if (test_bit(ATC_IS_CYCLIC, &atchan->status)) + atc_suspend_cyclic(atchan); + atchan->save_cfg = channel_readl(atchan, CFG); + } + atdma->save_imr = dma_readl(atdma, EBCIMR); + + /* disable DMA controller */ + at_dma_off(atdma); clk_disable(atdma->clk); return 0; } +static void atc_resume_cyclic(struct at_dma_chan *atchan) +{ + struct at_dma *atdma = to_at_dma(atchan->chan_common.device); + + /* restore channel status for cyclic descriptors list: + * next descriptor in the cyclic list at the time of suspend */ + channel_writel(atchan, SADDR, 0); + channel_writel(atchan, DADDR, 0); + channel_writel(atchan, CTRLA, 0); + channel_writel(atchan, CTRLB, 0); + channel_writel(atchan, DSCR, atchan->save_dscr); + dma_writel(atdma, CHER, atchan->mask); + + /* channel pause status should be removed by channel user + * We cannot take the initiative to do it here */ + + vdbg_dump_regs(atchan); +} + static int at_dma_resume_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct at_dma *atdma = platform_get_drvdata(pdev); + struct dma_chan *chan, *_chan; + /* bring back DMA controller */ clk_enable(atdma->clk); dma_writel(atdma, EN, AT_DMA_ENABLE); + + /* clear any pending interrupt */ + while (dma_readl(atdma, EBCISR)) + cpu_relax(); + + /* restore saved data */ + dma_writel(atdma, EBCIER, atdma->save_imr); + list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, + device_node) { + struct at_dma_chan *atchan = to_at_dma_chan(chan); + + channel_writel(atchan, CFG, atchan->save_cfg); + if (test_bit(ATC_IS_CYCLIC, &atchan->status)) + atc_resume_cyclic(atchan); + } return 0; } static const struct dev_pm_ops at_dma_dev_pm_ops = { + .prepare = at_dma_prepare, .suspend_noirq = at_dma_suspend_noirq, .resume_noirq = at_dma_resume_noirq, }; diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 087dbf1dd39c..6f0c4a3eb091 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -204,6 +204,9 @@ enum atc_status { * @status: transmit status information from irq/prep* functions * to tasklet (use atomic operations) * @tasklet: bottom half to finish transaction work + * @save_cfg: configuration register that is saved on suspend/resume cycle + * @save_dscr: for cyclic operations, preserve next descriptor address in + * the cyclic list on suspend/resume cycle * @lock: serializes enqueue/dequeue operations to descriptors lists * @completed_cookie: identifier for the most recently completed operation * @active_list: list of descriptors dmaengine is being running on @@ -218,6 +221,8 @@ struct at_dma_chan { u8 mask; unsigned long status; struct tasklet_struct tasklet; + u32 save_cfg; + u32 save_dscr; spinlock_t lock; @@ -248,6 +253,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan) * @chan_common: common dmaengine dma_device object members * @ch_regs: memory mapped register base * @clk: dma controller clock + * @save_imr: interrupt mask register that is saved on suspend/resume cycle * @all_chan_mask: all channels availlable in a mask * @dma_desc_pool: base of DMA descriptor region (DMA address) * @chan: channels table to store at_dma_chan structures @@ -256,6 +262,7 @@ struct at_dma { struct dma_device dma_common; void __iomem *regs; struct clk *clk; + u32 save_imr; u8 all_chan_mask; From 3c477482bb9f976e5451c50be7d3d60ea6f88646 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Mon, 25 Jul 2011 21:09:23 +0000 Subject: [PATCH 07/51] dmaengine: at_hdmac: add wrappers for testing channel state Cyclic property and paused state are encoded as bits in the channel status bitfield. Tests of those bits are wrapped in convenient helper functions. Signed-off-by: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 19 +++++++++---------- drivers/dma/at_hdmac_regs.h | 17 +++++++++++++++++ 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 0ead008e3bdf..d774800b9fa4 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -301,7 +301,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) /* for cyclic transfers, * no need to replay callback function while stopping */ - if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) { + if (!atc_chan_is_cyclic(atchan)) { dma_async_tx_callback callback = txd->callback; void *param = txd->callback_param; @@ -478,7 +478,7 @@ static void atc_tasklet(unsigned long data) spin_lock_irqsave(&atchan->lock, flags); if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) atc_handle_error(atchan); - else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) + else if (atc_chan_is_cyclic(atchan)) atc_handle_cyclic(atchan); else atc_advance_work(atchan); @@ -945,7 +945,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, spin_unlock_irqrestore(&atchan->lock, flags); } else if (cmd == DMA_RESUME) { - if (!test_bit(ATC_IS_PAUSED, &atchan->status)) + if (!atc_chan_is_paused(atchan)) return 0; spin_lock_irqsave(&atchan->lock, flags); @@ -1035,7 +1035,7 @@ atc_tx_status(struct dma_chan *chan, else dma_set_tx_state(txstate, last_complete, last_used, 0); - if (test_bit(ATC_IS_PAUSED, &atchan->status)) + if (atc_chan_is_paused(atchan)) ret = DMA_PAUSED; dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", @@ -1057,7 +1057,7 @@ static void atc_issue_pending(struct dma_chan *chan) dev_vdbg(chan2dev(chan), "issue_pending\n"); /* Not needed for cyclic transfers */ - if (test_bit(ATC_IS_CYCLIC, &atchan->status)) + if (atc_chan_is_cyclic(atchan)) return; spin_lock_irqsave(&atchan->lock, flags); @@ -1395,8 +1395,7 @@ static int at_dma_prepare(struct device *dev) device_node) { struct at_dma_chan *atchan = to_at_dma_chan(chan); /* wait for transaction completion (except in cyclic case) */ - if (atc_chan_is_enabled(atchan) && - !test_bit(ATC_IS_CYCLIC, &atchan->status)) + if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) return -EAGAIN; } return 0; @@ -1408,7 +1407,7 @@ static void atc_suspend_cyclic(struct at_dma_chan *atchan) /* Channel should be paused by user * do it anyway even if it is not done already */ - if (!test_bit(ATC_IS_PAUSED, &atchan->status)) { + if (!atc_chan_is_paused(atchan)) { dev_warn(chan2dev(chan), "cyclic channel not paused, should be done by channel user\n"); atc_control(chan, DMA_PAUSE, 0); @@ -1432,7 +1431,7 @@ static int at_dma_suspend_noirq(struct device *dev) device_node) { struct at_dma_chan *atchan = to_at_dma_chan(chan); - if (test_bit(ATC_IS_CYCLIC, &atchan->status)) + if (atc_chan_is_cyclic(atchan)) atc_suspend_cyclic(atchan); atchan->save_cfg = channel_readl(atchan, CFG); } @@ -1484,7 +1483,7 @@ static int at_dma_resume_noirq(struct device *dev) struct at_dma_chan *atchan = to_at_dma_chan(chan); channel_writel(atchan, CFG, atchan->save_cfg); - if (test_bit(ATC_IS_CYCLIC, &atchan->status)) + if (atc_chan_is_cyclic(atchan)) atc_resume_cyclic(atchan); } return 0; diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 6f0c4a3eb091..aa4c9aebab7c 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -362,6 +362,23 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan) return !!(dma_readl(atdma, CHSR) & atchan->mask); } +/** + * atc_chan_is_paused - test channel pause/resume status + * @atchan: channel we want to test status + */ +static inline int atc_chan_is_paused(struct at_dma_chan *atchan) +{ + return test_bit(ATC_IS_PAUSED, &atchan->status); +} + +/** + * atc_chan_is_cyclic - test if given channel has cyclic property set + * @atchan: channel we want to test status + */ +static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan) +{ + return test_bit(ATC_IS_CYCLIC, &atchan->status); +} /** * set_desc_eol - set end-of-link to descriptor so it will end transfer From d7db80801f8117cf210b9e2cd2c800e326d59fa2 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 5 Aug 2011 11:43:44 +0000 Subject: [PATCH 08/51] dmaengine: at_hdmac: fix way to specify cyclic capability In this driver, we can trigger cyclic transfer on peripherals-DMA interfaces. It is dependent on driver implementation but cannot depend on a platform property: we remove the dma_has_cap(DMA_CYCLIC, ) test which has no meaning. Signed-off-by: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index d774800b9fa4..3b99dc62874b 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1301,15 +1301,13 @@ static int __init at_dma_probe(struct platform_device *pdev) if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; - if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) + if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; - - if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) + /* controller can do slave DMA: can trigger cyclic transfers */ + dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; - - if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) || - dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) atdma->dma_common.device_control = atc_control; + } dma_writel(atdma, EN, AT_DMA_ENABLE); From 47850a27306997be914dedcbca1dfce09fa4ef0a Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:26 +0530 Subject: [PATCH 09/51] ARM: asm/pl080.h: Protect against multiple inclusion of header file doesn't have protection to deal with multiple inclusion. And so we get compilation errors in cases where this file is included more than once. This patch adds #ifdefs at the top of file to protect it against multiple inclusions. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- arch/arm/include/asm/hardware/pl080.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm/include/asm/hardware/pl080.h b/arch/arm/include/asm/hardware/pl080.h index e4a04e4e5627..33c78d7af2e1 100644 --- a/arch/arm/include/asm/hardware/pl080.h +++ b/arch/arm/include/asm/hardware/pl080.h @@ -21,6 +21,9 @@ * OneNAND features. */ +#ifndef ASM_PL080_H +#define ASM_PL080_H + #define PL080_INT_STATUS (0x00) #define PL080_TC_STATUS (0x04) #define PL080_TC_CLEAR (0x08) @@ -138,3 +141,4 @@ struct pl080s_lli { u32 control1; }; +#endif /* ASM_PL080_H */ From 3e27ee8448bcbc8b4f060b107aa622c116f287ab Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:27 +0530 Subject: [PATCH 10/51] dmaengine/amba-pl08x: Resolve formatting issues There were few formatting related issues in code. This patch fixes them. Fixes include: - Remove extra blank lines - align code to 80 cols - combine several lines to one line Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 41 ++++++++++++++++------------------------ 1 file changed, 16 insertions(+), 25 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 196a7378d332..4c4a3092e09c 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -125,7 +125,8 @@ struct pl08x_lli { * @phy_chans: array of data for the physical channels * @pool: a pool for the LLI descriptors * @pool_ctr: counter of LLIs in the pool - * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches + * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI + * fetches * @mem_buses: set to indicate memory transfers on AHB2. * @lock: a spinlock for this struct */ @@ -271,7 +272,6 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) writel(val, ch->base + PL080_CH_CONFIG); } - /* * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and * clears any pending interrupt status. This should not be used for @@ -546,7 +546,8 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, llis_va[num_llis].cctl = cctl; llis_va[num_llis].src = bd->srcbus.addr; llis_va[num_llis].dst = bd->dstbus.addr; - llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); + llis_va[num_llis].lli = llis_bus + (num_llis + 1) * + sizeof(struct pl08x_lli); llis_va[num_llis].lli |= bd->lli_bus; if (cctl & PL080_CONTROL_SRC_INCR) @@ -583,12 +584,10 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, struct pl08x_lli_build_data bd; int num_llis = 0; u32 cctl; - size_t max_bytes_per_lli; - size_t total_bytes = 0; + size_t max_bytes_per_lli, total_bytes = 0; struct pl08x_lli *llis_va; - txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, - &txd->llis_bus); + txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); if (!txd->llis_va) { dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); return 0; @@ -779,7 +778,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, total_bytes += lli_len; } - if (odd_bytes) { /* * Creep past the boundary, maintaining @@ -916,9 +914,7 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, * need, but for slaves the physical signals may be muxed! * Can the platform allow us to use this channel? */ - if (plchan->slave && - ch->signal < 0 && - pl08x->pd->get_signal) { + if (plchan->slave && ch->signal < 0 && pl08x->pd->get_signal) { ret = pl08x->pd->get_signal(plchan); if (ret < 0) { dev_dbg(&pl08x->adev->dev, @@ -1007,10 +1003,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( * If slaves are relying on interrupts to signal completion this function * must not be called with interrupts disabled. */ -static enum dma_status -pl08x_dma_tx_status(struct dma_chan *chan, - dma_cookie_t cookie, - struct dma_tx_state *txstate) +static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); dma_cookie_t last_used; @@ -1588,8 +1582,8 @@ static void pl08x_tasklet(unsigned long data) */ list_for_each_entry(waiting, &pl08x->memcpy.channels, chan.device_node) { - if (waiting->state == PL08X_CHAN_WAITING && - waiting->waiting != NULL) { + if (waiting->state == PL08X_CHAN_WAITING && + waiting->waiting != NULL) { int ret; /* This should REALLY not fail now */ @@ -1684,9 +1678,7 @@ static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) * Make a local wrapper to hold required data */ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, - struct dma_device *dmadev, - unsigned int channels, - bool slave) + struct dma_device *dmadev, unsigned int channels, bool slave) { struct pl08x_dma_chan *chan; int i; @@ -1836,9 +1828,9 @@ static const struct file_operations pl08x_debugfs_operations = { static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) { /* Expose a simple debugfs interface to view all clocks */ - (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, - NULL, pl08x, - &pl08x_debugfs_operations); + (void) debugfs_create_file(dev_name(&pl08x->adev->dev), + S_IFREG | S_IRUGO, NULL, pl08x, + &pl08x_debugfs_operations); } #else @@ -1973,8 +1965,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) /* Register slave channels */ ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, - pl08x->pd->num_slave_channels, - true); + pl08x->pd->num_slave_channels, true); if (ret <= 0) { dev_warn(&pl08x->adev->dev, "%s failed to enumerate slave channels - %d\n", From 0c38d70139138713e66c6f98e19a0320014476ff Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:28 +0530 Subject: [PATCH 11/51] dmaengine/amba-pl08x: Rearrange inclusion of header files in ascending order Header files included in driver are not present in alphabetical order. Rearrange them in alphabetical order. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 4c4a3092e09c..8e2056bf1623 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -74,19 +74,18 @@ * Global TODO: * - Break out common code from arch/arm/mach-s3c64xx and share */ -#include -#include -#include -#include -#include -#include -#include -#include #include #include #include +#include +#include +#include +#include +#include +#include +#include #include - +#include #include #define DRIVER_NAME "pl08xdmac" From b201c111c87a4cf36d009abe57c62bd14d17d762 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:29 +0530 Subject: [PATCH 12/51] dmaengine/amba-pl08x: pass (*ptr) to sizeof() instead of (struct xyz) As mentioned in Documentation/CodingStyle, The preferred form for passing a size of a struct is the following: p = kmalloc(sizeof(*p), ...); The alternative form where struct name is spelled out hurts readability and introduces an opportunity for a bug when the pointer variable type is changed but the corresponding sizeof that is passed to a memory allocator is not. This patch replaces (struct xyz) with *ptr at several occurrences in driver. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 8e2056bf1623..01c2f507e950 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1293,7 +1293,7 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, unsigned long flags) { - struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); + struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); if (txd) { dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); @@ -1690,7 +1690,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, * to cope with that situation. */ for (i = 0; i < channels; i++) { - chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); + chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) { dev_err(&pl08x->adev->dev, "%s no memory for channel\n", __func__); @@ -1850,7 +1850,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) return ret; /* Create the driver state holder */ - pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); + pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); if (!pl08x) { ret = -ENOMEM; goto out_no_pl08x; @@ -1929,7 +1929,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) } /* Initialize physical channels */ - pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), + pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)), GFP_KERNEL); if (!pl08x->phy_chans) { dev_err(&adev->dev, "%s failed to allocate " From 5a61233073a35a7ae152af77ed80dfc465c38fc7 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:30 +0530 Subject: [PATCH 13/51] dmaengine/amba-pl08x: Complete doc comment for struct pl08x_txd Doc comment for struct pl08x_txd was incomplete. Complete that. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- include/linux/amba/pl08x.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index e6e28f37d8ec..cd8f629da58f 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h @@ -105,8 +105,16 @@ struct pl08x_phy_chan { /** * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor + * @tx: async tx descriptor + * @node: node for txd list for channels + * @src_addr: src address of txd + * @dst_addr: dst address of txd + * @len: transfer len in bytes + * @direction: direction of transfer * @llis_bus: DMA memory address (physical) start for the LLIs * @llis_va: virtual memory address start for the LLIs + * @cctl: control reg values for current txd + * @ccfg: config reg values for current txd */ struct pl08x_txd { struct dma_async_tx_descriptor tx; From 0532e6fced3c4f6a4eda7f078d8aa36405647c07 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:31 +0530 Subject: [PATCH 14/51] dmaengine/amba-pl08x: Remove redundant comment and rewrite original Similar comment is present over routine also pl08x_choose_master_bus(). Keeping one of them. Also rewrite that comment to convey message clearly. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 01c2f507e950..6c52959b00af 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -497,9 +497,13 @@ struct pl08x_lli_build_data { }; /* - * Autoselect a master bus to use for the transfer this prefers the - * destination bus if both available if fixed address on one bus the - * other will be chosen + * Autoselect a master bus to use for the transfer. Slave will be the chosen as + * victim in case src & dest are not similarly aligned. i.e. If after aligning + * masters address with width requirements of transfer (by sending few byte by + * byte data), slave is still not aligned, then its width will be reduced to + * BYTE. + * - prefers the destination bus if both available + * - if fixed address on one bus the other will be chosen */ static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) @@ -625,11 +629,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, /* We need to count this down to zero */ bd.remainder = txd->len; - /* - * Choose bus to align to - * - prefers destination bus if both available - * - if fixed address on one bus chooses other - */ pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", From 175a5e617cd820d9e22d9e4f6d3ef736b2f874b1 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:32 +0530 Subject: [PATCH 15/51] dmaengine/amba-pl08x: Changing few prints to dev_dbg from dev_info For 8 memory and 16 slave channels 35 boot print lines are printed. And that is too much. Most of this would be more useful for debugging. So moving few of them to dev_dbg instead of dev_info. Now only 3 prints will be printed. This also rearrange one of the debug message to fit into two lines. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 6c52959b00af..ead88c9a5e93 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1717,7 +1717,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, kfree(chan); continue; } - dev_info(&pl08x->adev->dev, + dev_dbg(&pl08x->adev->dev, "initialize virtual channel \"%s\"\n", chan->name); @@ -1945,9 +1945,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) spin_lock_init(&ch->lock); ch->serving = NULL; ch->signal = -1; - dev_info(&adev->dev, - "physical channel %d is %s\n", i, - pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); + dev_dbg(&adev->dev, "physical channel %d is %s\n", + i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); } /* Register as many memcpy channels as there are physical channels */ From b7b6018bad6fd7ebe5a78bda5f2a71a6ecf5406a Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:33 +0530 Subject: [PATCH 16/51] dmaengine/amba-pl08x: support runtime PM Insert notifiers for the runtime PM API. With this the runtime PM layer kicks in to action where used. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index ead88c9a5e93..5dd97f450925 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -84,6 +84,7 @@ #include #include #include +#include #include #include #include @@ -405,6 +406,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, return NULL; } + pm_runtime_get_sync(&pl08x->adev->dev); return ch; } @@ -418,6 +420,8 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, /* Stop the channel and clear its interrupts */ pl08x_terminate_phy_chan(pl08x, ch); + pm_runtime_put(&pl08x->adev->dev); + /* Mark it as free */ ch->serving = NULL; spin_unlock_irqrestore(&ch->lock, flags); @@ -1855,6 +1859,9 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_pl08x; } + pm_runtime_set_active(&adev->dev); + pm_runtime_enable(&adev->dev); + /* Initialize memcpy engine */ dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); pl08x->memcpy.dev = &adev->dev; @@ -1992,6 +1999,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", amba_part(adev), amba_rev(adev), (unsigned long long)adev->res.start, adev->irq[0]); + + pm_runtime_put(&adev->dev); return 0; out_no_slave_reg: @@ -2010,6 +2019,9 @@ out_no_ioremap: dma_pool_destroy(pl08x->pool); out_no_lli_pool: out_no_platdata: + pm_runtime_put(&adev->dev); + pm_runtime_disable(&adev->dev); + kfree(pl08x); out_no_pl08x: amba_release_regions(adev); From 48a59ef3579492855d41405f8bf0a2983e061976 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:34 +0530 Subject: [PATCH 17/51] dmaengine/amba-pl08x: Simplify pl08x_ensure_on() Simply writing 1 on bit 0 is sufficient instead of reading and clearing bits. Also as per manual, for bit 3-31 of DMACConfiguration register: "read undefined, write as 0" So, we must not rely on values read from this registers bit 3-31. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 5dd97f450925..d79688d64886 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1502,13 +1502,7 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) */ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) { - u32 val; - - val = readl(pl08x->base + PL080_CONFIG); - val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); - /* We implicitly clear bit 1 and that means little-endian mode */ - val |= PL080_CONFIG_ENABLE; - writel(val, pl08x->base + PL080_CONFIG); + writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); } static void pl08x_unmap_buffers(struct pl08x_txd *txd) From 16ca8105040217acf5b4b506d04bb933fb3a76af Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:35 +0530 Subject: [PATCH 18/51] dmaengine/amba-pl08x: No need to check "ch->signal < 0" We have just executed following in pl08x_get_phy_channel(): ch->signal = -1; We don't have to compare "ch->signal < 0", as this will always be true. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index d79688d64886..3653961b6088 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -916,7 +916,7 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, * need, but for slaves the physical signals may be muxed! * Can the platform allow us to use this channel? */ - if (plchan->slave && ch->signal < 0 && pl08x->pd->get_signal) { + if (plchan->slave && pl08x->pd->get_signal) { ret = pl08x->pd->get_signal(plchan); if (ret < 0) { dev_dbg(&pl08x->adev->dev, From 28da28365da3f3bea1d4b7212a8a40e4b9ac3229 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:36 +0530 Subject: [PATCH 19/51] dmaengine/amba-pl08x: Schedule tasklet in case of error interrupt Currently, if error interrupt occurs, nothing is done in interrupt handler (just clearing the interrupts). We must somehow indicate this to the user that DMA is over, due to ERR interrupt or TC interrupt. So, this patch just schedules existing tasklet, with a print showing error interrupt has occurred on which channels. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 42 +++++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 3653961b6088..6bba32e5ddb8 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1619,38 +1619,40 @@ static void pl08x_tasklet(unsigned long data) static irqreturn_t pl08x_irq(int irq, void *dev) { struct pl08x_driver_data *pl08x = dev; - u32 mask = 0; - u32 val; - int i; + u32 mask = 0, err, tc, i; - val = readl(pl08x->base + PL080_ERR_STATUS); - if (val) { - /* An error interrupt (on one or more channels) */ - dev_err(&pl08x->adev->dev, - "%s error interrupt, register value 0x%08x\n", - __func__, val); - /* - * Simply clear ALL PL08X error interrupts, - * regardless of channel and cause - * FIXME: should be 0x00000003 on PL081 really. - */ - writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); + /* check & clear - ERR & TC interrupts */ + err = readl(pl08x->base + PL080_ERR_STATUS); + if (err) { + dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", + __func__, err); + writel(err, pl08x->base + PL080_ERR_CLEAR); } - val = readl(pl08x->base + PL080_INT_STATUS); + tc = readl(pl08x->base + PL080_INT_STATUS); + if (tc) + writel(tc, pl08x->base + PL080_TC_CLEAR); + + if (!err && !tc) + return IRQ_NONE; + for (i = 0; i < pl08x->vd->channels; i++) { - if ((1 << i) & val) { + if (((1 << i) & err) || ((1 << i) & tc)) { /* Locate physical channel */ struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; struct pl08x_dma_chan *plchan = phychan->serving; + if (!plchan) { + dev_err(&pl08x->adev->dev, + "%s Error TC interrupt on unused channel: 0x%08x\n", + __func__, i); + continue; + } + /* Schedule tasklet on this channel */ tasklet_schedule(&plchan->tasklet); - mask |= (1 << i); } } - /* Clear only the terminal interrupts on channels we processed */ - writel(mask, pl08x->base + PL080_TC_CLEAR); return mask ? IRQ_HANDLED : IRQ_NONE; } From 16a2e7d359b9fc64fb8a6717c0642691b1e60bb7 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:37 +0530 Subject: [PATCH 20/51] dmaengine/amba-pl08x: Get rid of pl08x_pre_boundary() Pl080 Manual says: "Bursts do not cross the 1KB address boundary" We can program the controller to cross 1 KB boundary on a burst and controller can take care of this boundary condition by itself. Following is the discussion with ARM Technical Support Guys (David): [Viresh] Manual says: "Bursts do not cross the 1KB address boundary" What does that actually mean? As, Maximum size transferable with a single LLI is 4095 * 4 =16380 ~ 16KB. So, if we don't have src/dest address aligned to burst size, we can't use this big of an LLI. [David] There is a difference between bursts describing the total data transferred by the DMA controller and AHB bursts. Bursts described by the programmable parameters in the PL080 have no direct connection with the bursts that are seen on the AHB bus. The statement that "Bursts do not cross the 1KB address boundary" in the TRM is referring to AHB bursts, where this limitation is a requirement of the AHB spec. You can still issue bursts within the PL080 that are in excess of 1KB. The PL080 will make sure that its bursts are broken down into legal AHB bursts which will be formatted to ensure that no AHB burst crosses a 1KB boundary. Based on above discussion, this patch removes all code related to 1 KB boundary as we are not required to handle this in driver. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 139 +++++-------------------------------- include/linux/amba/pl08x.h | 2 - 2 files changed, 16 insertions(+), 125 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 6bba32e5ddb8..be9a1c718f9a 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -149,14 +149,6 @@ struct pl08x_driver_data { * PL08X specific defines */ -/* - * Memory boundaries: the manual for PL08x says that the controller - * cannot read past a 1KiB boundary, so these defines are used to - * create transfer LLIs that do not cross such boundaries. - */ -#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ -#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) - /* Size (bytes) of each LLI buffer allocated for one transfer */ # define PL08X_LLI_TSFR_SIZE 0x2000 @@ -567,18 +559,6 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, bd->remainder -= len; } -/* - * Return number of bytes to fill to boundary, or len. - * This calculation works for any value of addr. - */ -static inline size_t pl08x_pre_boundary(u32 addr, size_t len) -{ - size_t boundary_len = PL08X_BOUNDARY_SIZE - - (addr & (PL08X_BOUNDARY_SIZE - 1)); - - return min(boundary_len, len); -} - /* * This fills in the table of LLIs for the transfer descriptor * Note that we assume we never have to change the burst sizes @@ -685,118 +665,30 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, * width left */ while (bd.remainder > (mbus->buswidth - 1)) { - size_t lli_len, target_len, tsize, odd_bytes; + size_t lli_len, tsize; /* * If enough left try to send max possible, * otherwise try to send the remainder */ - target_len = min(bd.remainder, max_bytes_per_lli); - + lli_len = min(bd.remainder, max_bytes_per_lli); /* - * Set bus lengths for incrementing buses to the - * number of bytes which fill to next memory boundary, - * limiting on the target length calculated above. + * Check against minimum bus alignment: Calculate actual + * transfer size in relation to bus width and get a + * maximum remainder of the smallest bus width - 1 */ - if (cctl & PL080_CONTROL_SRC_INCR) - bd.srcbus.fill_bytes = - pl08x_pre_boundary(bd.srcbus.addr, - target_len); - else - bd.srcbus.fill_bytes = target_len; + tsize = lli_len / min(mbus->buswidth, sbus->buswidth); + lli_len = tsize * min(mbus->buswidth, sbus->buswidth); - if (cctl & PL080_CONTROL_DST_INCR) - bd.dstbus.fill_bytes = - pl08x_pre_boundary(bd.dstbus.addr, - target_len); - else - bd.dstbus.fill_bytes = target_len; + dev_vdbg(&pl08x->adev->dev, + "%s fill lli with single lli chunk of " + "size 0x%08zx (remainder 0x%08zx)\n", + __func__, lli_len, bd.remainder); - /* Find the nearest */ - lli_len = min(bd.srcbus.fill_bytes, - bd.dstbus.fill_bytes); - - BUG_ON(lli_len > bd.remainder); - - if (lli_len <= 0) { - dev_err(&pl08x->adev->dev, - "%s lli_len is %zu, <= 0\n", - __func__, lli_len); - return 0; - } - - if (lli_len == target_len) { - /* - * Can send what we wanted. - * Maintain alignment - */ - lli_len = (lli_len/mbus->buswidth) * - mbus->buswidth; - odd_bytes = 0; - } else { - /* - * So now we know how many bytes to transfer - * to get to the nearest boundary. The next - * LLI will past the boundary. However, we - * may be working to a boundary on the slave - * bus. We need to ensure the master stays - * aligned, and that we are working in - * multiples of the bus widths. - */ - odd_bytes = lli_len % mbus->buswidth; - lli_len -= odd_bytes; - - } - - if (lli_len) { - /* - * Check against minimum bus alignment: - * Calculate actual transfer size in relation - * to bus width an get a maximum remainder of - * the smallest bus width - 1 - */ - /* FIXME: use round_down()? */ - tsize = lli_len / min(mbus->buswidth, - sbus->buswidth); - lli_len = tsize * min(mbus->buswidth, - sbus->buswidth); - - if (target_len != lli_len) { - dev_vdbg(&pl08x->adev->dev, - "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n", - __func__, target_len, lli_len, txd->len); - } - - cctl = pl08x_cctl_bits(cctl, - bd.srcbus.buswidth, - bd.dstbus.buswidth, - tsize); - - dev_vdbg(&pl08x->adev->dev, - "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", - __func__, lli_len, bd.remainder); - pl08x_fill_lli_for_desc(&bd, num_llis++, - lli_len, cctl); - total_bytes += lli_len; - } - - if (odd_bytes) { - /* - * Creep past the boundary, maintaining - * master alignment - */ - int j; - for (j = 0; (j < mbus->buswidth) - && (bd.remainder); j++) { - cctl = pl08x_cctl_bits(cctl, 1, 1, 1); - dev_vdbg(&pl08x->adev->dev, - "%s align with boundary, single byte (remain 0x%08zx)\n", - __func__, bd.remainder); - pl08x_fill_lli_for_desc(&bd, - num_llis++, 1, cctl); - total_bytes++; - } - } + cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, + bd.dstbus.buswidth, tsize); + pl08x_fill_lli_for_desc(&bd, num_llis++, lli_len, cctl); + total_bytes += lli_len; } /* @@ -811,6 +703,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, total_bytes++; } } + if (total_bytes != txd->len) { dev_err(&pl08x->adev->dev, "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index cd8f629da58f..ecd17f581e71 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h @@ -77,13 +77,11 @@ struct pl08x_channel_data { * @addr: current address * @maxwidth: the maximum width of a transfer on this bus * @buswidth: the width of this bus in bytes: 1, 2 or 4 - * @fill_bytes: bytes required to fill to the next bus memory boundary */ struct pl08x_bus_data { dma_addr_t addr; u8 maxwidth; u8 buswidth; - size_t fill_bytes; }; /** From fa6a940bf129c5417b602a4cdfe88b3dbd8e5898 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:38 +0530 Subject: [PATCH 21/51] dmaengine/amba-pl08x: max_bytes_per_lli is TRANSFER_SIZE * src_width (not MIN(width)) max_bytes_per_lli = bd.srcbus.buswidth * PL080_CONTROL_TRANSFER_SIZE_MASK; This is confirmed by ARM support guys. Below is summary of mail exchange with them: [Viresh] What is the total data to be transferred in case source and destination bus widths are different. Suppose, source bus width is 2 bytes and destination is 4 bytes. Now in order to transfer 80 bytes, what should be value of TransferSize field in control reg: 40? or 20?. [David from ARM] The value that is programmed into the TransferSize field should be the number of transfers needed to achieve the required data transfer. So, to transfer 80 bytes, with a Source Width of 2, the TransferSize field = should be programmed with: Total transfer size ------------------- = 40 [Viresh] Will this change if source is 4 bytes and dest is 2? [David] Yes - the calculation then becomes: Total transfer size ------------------- =20 Also, max_bytes_per_lli must be calculated after fixing src and dest widths not before that. So move this code to the correct place. This patch also removes max_bytes_per_lli from earlier print message, as till that point max_bytes_per_lli is unknown. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index be9a1c718f9a..e5930d512b00 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -604,23 +604,17 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, bd.srcbus.buswidth = bd.srcbus.maxwidth; bd.dstbus.buswidth = bd.dstbus.maxwidth; - /* - * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) - */ - max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * - PL080_CONTROL_TRANSFER_SIZE_MASK; - /* We need to count this down to zero */ bd.remainder = txd->len; pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); - dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", + dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n", bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", bd.srcbus.buswidth, bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", bd.dstbus.buswidth, - bd.remainder, max_bytes_per_lli); + bd.remainder); dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", mbus == &bd.srcbus ? "src" : "dst", sbus == &bd.srcbus ? "src" : "dst"); @@ -660,6 +654,10 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, sbus->buswidth = 1; } + /* Bytes transferred = tsize * src width, not MIN(buswidths) */ + max_bytes_per_lli = bd.srcbus.buswidth * + PL080_CONTROL_TRANSFER_SIZE_MASK; + /* * Make largest possible LLIs until less than one bus * width left From 03af500f743f486648fc8afc38593e9844411945 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:39 +0530 Subject: [PATCH 22/51] dmaengine/amba-pl08x: Add prep_single_byte_llis() routine Code for creating single byte llis is present at several places. Create a routine to avoid code redundancy. Also, we don't need one lli per single byte transfer, we can have single lli to do all single byte transfer. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 61 ++++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 28 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index e5930d512b00..45d8a5d5bccd 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -559,6 +559,14 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, bd->remainder -= len; } +static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd, + u32 *cctl, u32 len, int num_llis, size_t *total_bytes) +{ + *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); + pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl); + (*total_bytes) += len; +} + /* * This fills in the table of LLIs for the transfer descriptor * Note that we assume we never have to change the burst sizes @@ -570,7 +578,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, struct pl08x_bus_data *mbus, *sbus; struct pl08x_lli_build_data bd; int num_llis = 0; - u32 cctl; + u32 cctl, early_bytes = 0; size_t max_bytes_per_lli, total_bytes = 0; struct pl08x_lli *llis_va; @@ -619,29 +627,27 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, mbus == &bd.srcbus ? "src" : "dst", sbus == &bd.srcbus ? "src" : "dst"); - if (txd->len < mbus->buswidth) { - /* Less than a bus width available - send as single bytes */ - while (bd.remainder) { - dev_vdbg(&pl08x->adev->dev, - "%s single byte LLIs for a transfer of " - "less than a bus width (remain 0x%08x)\n", - __func__, bd.remainder); - cctl = pl08x_cctl_bits(cctl, 1, 1, 1); - pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); - total_bytes++; - } - } else { - /* Make one byte LLIs until master bus is aligned */ - while ((mbus->addr) % (mbus->buswidth)) { - dev_vdbg(&pl08x->adev->dev, - "%s adjustment lli for less than bus width " - "(remain 0x%08x)\n", - __func__, bd.remainder); - cctl = pl08x_cctl_bits(cctl, 1, 1, 1); - pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); - total_bytes++; - } + /* + * Send byte by byte for following cases + * - Less than a bus width available + * - until master bus is aligned + */ + if (bd.remainder < mbus->buswidth) + early_bytes = bd.remainder; + else if ((mbus->addr) % (mbus->buswidth)) { + early_bytes = mbus->buswidth - (mbus->addr) % (mbus->buswidth); + if ((bd.remainder - early_bytes) < mbus->buswidth) + early_bytes = bd.remainder; + } + if (early_bytes) { + dev_vdbg(&pl08x->adev->dev, "%s byte width LLIs " + "(remain 0x%08x)\n", __func__, bd.remainder); + prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++, + &total_bytes); + } + + if (bd.remainder) { /* * Master now aligned * - if slave is not then we must set its width down @@ -692,13 +698,12 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, /* * Send any odd bytes */ - while (bd.remainder) { - cctl = pl08x_cctl_bits(cctl, 1, 1, 1); + if (bd.remainder) { dev_vdbg(&pl08x->adev->dev, - "%s align with boundary, single odd byte (remain %zu)\n", + "%s align with boundary, send odd bytes (remain %zu)\n", __func__, bd.remainder); - pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); - total_bytes++; + prep_byte_width_lli(&bd, &cctl, bd.remainder, + num_llis++, &total_bytes); } } From e0719165801fad04073e7dcd90e4afd02aba3fb7 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:40 +0530 Subject: [PATCH 23/51] dmaengine/amba-pl08x: Align lli_len to max(src.width, dst.width) Currently lli_len is aligned to min of two widths, which looks to be incorrect. Instead it should be aligned to max of both widths. Lets say, total_size = 441 bytes MIN: lets check if min() suits or not: CASE 1: srcwidth = 1, dstwidth = 4 min(src, dst) = 1 i.e. We program transfer size in control reg to 441. Now, till 440 bytes everything is fine, but on the last byte DMAC can't transfer 1 byte to dst, as its width is 4. CASE 2: srcwidth = 4, dstwidth = 1 min(src, dst) = 1 i.e. we program transfer size in control reg to 110 (data transferred = 110 * srcwidth). So, here too 1 byte is left, but on the source side. MAX: Lets check if max() suits or not: CASE 3: srcwidth = 1, dstwidth = 4 max(src, dst) = 4 Aligned size is 440 i.e. We program transfer size in control reg to 440. Now, all 440 bytes will be transferred without any issues. CASE 4: srcwidth = 4, dstwidth = 1 max(src, dst) = 4 Aligned size is 440 i.e. We program transfer size in control reg to 110 (data transferred = 110 * srcwidth). Now, also all 440 bytes will be transferred without any issues. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 45d8a5d5bccd..4bcf6036f35d 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -669,20 +669,22 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, * width left */ while (bd.remainder > (mbus->buswidth - 1)) { - size_t lli_len, tsize; + size_t lli_len, tsize, width; /* * If enough left try to send max possible, * otherwise try to send the remainder */ lli_len = min(bd.remainder, max_bytes_per_lli); + /* - * Check against minimum bus alignment: Calculate actual + * Check against maximum bus alignment: Calculate actual * transfer size in relation to bus width and get a - * maximum remainder of the smallest bus width - 1 + * maximum remainder of the highest bus width - 1 */ - tsize = lli_len / min(mbus->buswidth, sbus->buswidth); - lli_len = tsize * min(mbus->buswidth, sbus->buswidth); + width = max(mbus->buswidth, sbus->buswidth); + lli_len = (lli_len / width) * width; + tsize = lli_len / bd.srcbus.buswidth; dev_vdbg(&pl08x->adev->dev, "%s fill lli with single lli chunk of " From 036f05fd6dcdb6a6b9e55703cb663112fa4c4e42 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:41 +0530 Subject: [PATCH 24/51] dmaengine/amba-pl08x: Choose peripheral bus as master bus When we have DMA transfers between peripheral and memory, then we shouldn't reduce width of peripheral at all, as that may be a strict requirement. But we can always reduce width of memory access, with some compromise in performance. Thus, we must select peripheral as master and not memory. Also this rearranges code to make it shorter. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 4bcf6036f35d..f70aa574c58f 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -499,34 +499,24 @@ struct pl08x_lli_build_data { * byte data), slave is still not aligned, then its width will be reduced to * BYTE. * - prefers the destination bus if both available - * - if fixed address on one bus the other will be chosen + * - prefers bus with fixed address (i.e. peripheral) */ static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) { if (!(cctl & PL080_CONTROL_DST_INCR)) { - *mbus = &bd->srcbus; - *sbus = &bd->dstbus; - } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { *mbus = &bd->dstbus; *sbus = &bd->srcbus; + } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { + *mbus = &bd->srcbus; + *sbus = &bd->dstbus; } else { - if (bd->dstbus.buswidth == 4) { + if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { *mbus = &bd->dstbus; *sbus = &bd->srcbus; - } else if (bd->srcbus.buswidth == 4) { - *mbus = &bd->srcbus; - *sbus = &bd->dstbus; - } else if (bd->dstbus.buswidth == 2) { - *mbus = &bd->dstbus; - *sbus = &bd->srcbus; - } else if (bd->srcbus.buswidth == 2) { - *mbus = &bd->srcbus; - *sbus = &bd->dstbus; } else { - /* bd->srcbus.buswidth == 1 */ - *mbus = &bd->dstbus; - *sbus = &bd->srcbus; + *mbus = &bd->srcbus; + *sbus = &bd->dstbus; } } } From 0a2356572b1910cc977f4ccf3c9ee1ecab08327a Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:42 +0530 Subject: [PATCH 25/51] dmaengine/amba-pl08x: Pass flow controller information with slave channel data At least, on SPEAr platforms there is one peripheral, JPEG, which can be flow controller for DMA transfer. Currently DMA controller driver didn't support peripheral flow controller configurations. This patch adds device_fc field in struct pl08x_channel_data, which will be used only for slave transfers and is not used in case of mem2mem transfers. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 61 +++++++++++++++++++++++++++++++++----- include/linux/amba/pl08x.h | 4 +++ 2 files changed, 57 insertions(+), 8 deletions(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index f70aa574c58f..a59c3c47286c 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -66,11 +66,6 @@ * after the final transfer signalled by LBREQ or LSREQ. The DMAC * will then move to the next LLI entry. * - * Only the former works sanely with scatter lists, so we only implement - * the DMAC flow control method. However, peripherals which use the LBREQ - * and LSREQ signals (eg, MMCI) are unable to use this mode, which through - * these hardware restrictions prevents them from using scatter DMA. - * * Global TODO: * - Break out common code from arch/arm/mach-s3c64xx and share */ @@ -617,6 +612,49 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, mbus == &bd.srcbus ? "src" : "dst", sbus == &bd.srcbus ? "src" : "dst"); + /* + * Zero length is only allowed if all these requirements are met: + * - flow controller is peripheral. + * - src.addr is aligned to src.width + * - dst.addr is aligned to dst.width + * + * sg_len == 1 should be true, as there can be two cases here: + * - Memory addresses are contiguous and are not scattered. Here, Only + * one sg will be passed by user driver, with memory address and zero + * length. We pass this to controller and after the transfer it will + * receive the last burst request from peripheral and so transfer + * finishes. + * + * - Memory addresses are scattered and are not contiguous. Here, + * Obviously as DMA controller doesn't know when a lli's transfer gets + * over, it can't load next lli. So in this case, there has to be an + * assumption that only one lli is supported. Thus, we can't have + * scattered addresses. + */ + if (!bd.remainder) { + u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> + PL080_CONFIG_FLOW_CONTROL_SHIFT; + if (!((fc >= PL080_FLOW_SRC2DST_DST) && + (fc <= PL080_FLOW_SRC2DST_SRC))) { + dev_err(&pl08x->adev->dev, "%s sg len can't be zero", + __func__); + return 0; + } + + if ((bd.srcbus.addr % bd.srcbus.buswidth) || + (bd.srcbus.addr % bd.srcbus.buswidth)) { + dev_err(&pl08x->adev->dev, + "%s src & dst address must be aligned to src" + " & dst width if peripheral is flow controller", + __func__); + return 0; + } + + cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, + bd.dstbus.buswidth, 0); + pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl); + } + /* * Send byte by byte for following cases * - Less than a bus width available @@ -1250,7 +1288,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_txd *txd; - int ret; + int ret, tmp; /* * Current implementation ASSUMES only one sg @@ -1284,12 +1322,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( txd->len = sgl->length; if (direction == DMA_TO_DEVICE) { - txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; txd->cctl = plchan->dst_cctl; txd->src_addr = sgl->dma_address; txd->dst_addr = plchan->dst_addr; } else if (direction == DMA_FROM_DEVICE) { - txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; txd->cctl = plchan->src_cctl; txd->src_addr = plchan->src_addr; txd->dst_addr = sgl->dma_address; @@ -1299,6 +1335,15 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( return NULL; } + if (plchan->cd->device_fc) + tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER : + PL080_FLOW_PER2MEM_PER; + else + tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER : + PL080_FLOW_PER2MEM; + + txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; + ret = pl08x_prep_channel_resources(plchan, txd); if (ret) return NULL; diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index ecd17f581e71..a22662c93981 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h @@ -47,6 +47,9 @@ enum { * @muxval: a number usually used to poke into some mux regiser to * mux in the signal to this channel * @cctl_opt: default options for the channel control register + * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave + * channels. Fill with 'true' if peripheral should be flow controller. Direction + * will be selected at Runtime. * @addr: source/target address in physical memory for this DMA channel, * can be the address of a FIFO register for burst requests for example. * This can be left undefined if the PrimeCell API is used for configuring @@ -65,6 +68,7 @@ struct pl08x_channel_data { int max_signal; u32 muxval; u32 cctl; + bool device_fc; dma_addr_t addr; bool circular_buffer; bool single; From 57001a606f845ce2eda21a0f23e6aab20ee0cb04 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 5 Aug 2011 15:32:45 +0530 Subject: [PATCH 26/51] dmaengine/amba-pl08x: Call pl08x_free_txd() instead of calling kfree() directly pl08x_prep_channel_resources() is calling kfree() directly for txd(). To maintain consistency in code call pl08x_free_txd() instead. Signed-off-by: Viresh Kumar Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index a59c3c47286c..849eab85514b 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1174,7 +1174,9 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, num_llis = pl08x_fill_llis_for_desc(pl08x, txd); if (!num_llis) { - kfree(txd); + spin_lock_irqsave(&plchan->lock, flags); + pl08x_free_txd(pl08x, txd); + spin_unlock_irqrestore(&plchan->lock, flags); return -EINVAL; } From 981ed70d8e4faf3689dbf3c48868a31d5b004d7a Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Thu, 18 Aug 2011 16:50:51 +0200 Subject: [PATCH 27/51] dmatest: make dmatest threads freezable Making dmatest threads freezable allows its use for system PM testing. Signed-off-by: Guennadi Liakhovetski Acked-by: Dan Williams Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index accc18441b16..eb1d8641cf5c 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -251,6 +252,7 @@ static int dmatest_func(void *data) int i; thread_name = current->comm; + set_freezable_with_signal(); ret = -ENOMEM; @@ -305,7 +307,8 @@ static int dmatest_func(void *data) dma_addr_t dma_srcs[src_cnt]; dma_addr_t dma_dsts[dst_cnt]; struct completion cmp; - unsigned long tmo = msecs_to_jiffies(timeout); + unsigned long start, tmo, end = 0 /* compiler... */; + bool reload = true; u8 align = 0; total_tests++; @@ -404,7 +407,17 @@ static int dmatest_func(void *data) } dma_async_issue_pending(chan); - tmo = wait_for_completion_timeout(&cmp, tmo); + do { + start = jiffies; + if (reload) + end = start + msecs_to_jiffies(timeout); + else if (end <= start) + end = start + 1; + tmo = wait_for_completion_interruptible_timeout(&cmp, + end - start); + reload = try_to_freeze(); + } while (tmo == -ERESTARTSYS); + status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); if (tmo == 0) { From 73eab978ad6934499b83ecc920d470fe99c5e54d Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Thu, 25 Aug 2011 11:03:35 +0200 Subject: [PATCH 28/51] dmaengine i.MX SDMA: lock channel 0 channel0 of the sdma engine is the configuration channel. It is a shared resource and thus must be protected by a mutex. Signed-off-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 7bd7e98548cd..f50c87c303dd 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -318,6 +318,7 @@ struct sdma_engine { dma_addr_t context_phys; struct dma_device dma_device; struct clk *clk; + struct mutex channel_0_lock; struct sdma_script_start_addrs *script_addrs; }; @@ -415,11 +416,15 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, dma_addr_t buf_phys; int ret; + mutex_lock(&sdma->channel_0_lock); + buf_virt = dma_alloc_coherent(NULL, size, &buf_phys, GFP_KERNEL); - if (!buf_virt) - return -ENOMEM; + if (!buf_virt) { + ret = -ENOMEM; + goto err_out; + } bd0->mode.command = C0_SETPM; bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; @@ -433,6 +438,9 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, dma_free_coherent(NULL, size, buf_virt, buf_phys); +err_out: + mutex_unlock(&sdma->channel_0_lock); + return ret; } @@ -656,6 +664,8 @@ static int sdma_load_context(struct sdma_channel *sdmac) dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); + mutex_lock(&sdma->channel_0_lock); + memset(context, 0, sizeof(*context)); context->channel_state.pc = load_address; @@ -676,6 +686,8 @@ static int sdma_load_context(struct sdma_channel *sdmac) ret = sdma_run_channel(&sdma->channel[0]); + mutex_unlock(&sdma->channel_0_lock); + return ret; } @@ -1274,6 +1286,8 @@ static int __init sdma_probe(struct platform_device *pdev) if (!sdma) return -ENOMEM; + mutex_init(&sdma->channel_0_lock); + sdma->dev = &pdev->dev; iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); From 36e2f21ab481b3d6bd31b99e1de669fbbac4bd0e Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Thu, 25 Aug 2011 11:03:36 +0200 Subject: [PATCH 29/51] dmaengine i.MX SDMA: set firmware scripts addresses to negative value initially If we do not have a firmare script for a given transfer, the setup of this channel must fail. For this the script addresses have to be < 0 initially, not 0. Signed-off-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index f50c87c303dd..8abf8c190aad 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1281,6 +1281,7 @@ static int __init sdma_probe(struct platform_device *pdev) struct sdma_platform_data *pdata = pdev->dev.platform_data; int i; struct sdma_engine *sdma; + s32 *saddr_arr; sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); if (!sdma) @@ -1324,6 +1325,11 @@ static int __init sdma_probe(struct platform_device *pdev) goto err_alloc; } + /* initially no scripts available */ + saddr_arr = (s32 *)sdma->script_addrs; + for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) + saddr_arr[i] = -EINVAL; + if (of_id) pdev->id_entry = of_id->data; sdma->devtype = pdev->id_entry->driver_data; From 7b4b88e067d37cbbafd856121767f7e154294eb2 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Thu, 25 Aug 2011 11:03:37 +0200 Subject: [PATCH 30/51] dmaengine i.MX SDMA: use request_firmware_nowait The firmware blob may not be available when the driver probes. Instead of blocking the whole kernel use request_firmware_nowait() and continue without firmware. The ROM scripts can already be used then if available. For the devicetree case the ROM scripts are not available, still the probe function should not block. The driver will be unusable in this case, but we have no way of detecting this properly. The configuration of the dma channels will fail, so nothing bad should happen. Signed-off-by: Sascha Hauer Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 8abf8c190aad..b5cc27dc9a51 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1143,18 +1143,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma, saddr_arr[i] = addr_arr[i]; } -static int __init sdma_get_firmware(struct sdma_engine *sdma, - const char *fw_name) +static void sdma_load_firmware(const struct firmware *fw, void *context) { - const struct firmware *fw; + struct sdma_engine *sdma = context; const struct sdma_firmware_header *header; - int ret; const struct sdma_script_start_addrs *addr; unsigned short *ram_code; - ret = request_firmware(&fw, fw_name, sdma->dev); - if (ret) - return ret; + if (!fw) { + dev_err(sdma->dev, "firmware not found\n"); + return; + } if (fw->size < sizeof(*header)) goto err_firmware; @@ -1184,6 +1183,16 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma, err_firmware: release_firmware(fw); +} + +static int __init sdma_get_firmware(struct sdma_engine *sdma, + const char *fw_name) +{ + int ret; + + ret = request_firmware_nowait(THIS_MODULE, + FW_ACTION_HOTPLUG, fw_name, sdma->dev, + GFP_KERNEL, sdma, sdma_load_firmware); return ret; } From 89de9f65429a97ab627310e2e85426dcbe423f39 Mon Sep 17 00:00:00 2001 From: Per Forlin Date: Mon, 29 Aug 2011 13:33:32 +0200 Subject: [PATCH 31/51] dmaengine/ste_dma40: add missing kernel doc for pending_queue Signed-off-by: Per Forlin Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index cd3a7c726bf8..486b6c0b44e3 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -174,6 +174,7 @@ struct d40_base; * @tasklet: Tasklet that gets scheduled from interrupt context to complete a * transfer and call client callback. * @client: Cliented owned descriptor list. + * @pending_queue: Submitted jobs, to be issued by issue_pending() * @active: Active descriptor. * @queue: Queued jobs. * @dma_cfg: The client configuration of this dma channel. From 270e779036ff144d6c6904ce9480f0d70ff93e86 Mon Sep 17 00:00:00 2001 From: Per Forlin Date: Mon, 29 Aug 2011 13:33:33 +0200 Subject: [PATCH 32/51] dmaengine/ste_dma40: remove duplicate call to d40_pool_lli_free(). d40_desc_free() already calls d40_pool_lli_free(). Signed-off-by: Per Forlin Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 486b6c0b44e3..37388d10497a 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -478,7 +478,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) list_for_each_entry_safe(d, _d, &d40c->client, node) if (async_tx_test_ack(&d->txd)) { - d40_pool_lli_free(d40c, d); d40_desc_remove(d); desc = d; memset(desc, 0, sizeof(*desc)); @@ -1209,7 +1208,6 @@ static void dma_tasklet(unsigned long data) if (!d40d->cyclic) { if (async_tx_test_ack(&d40d->txd)) { - d40_pool_lli_free(d40c, d40d); d40_desc_remove(d40d); d40_desc_free(d40c, d40d); } else { @@ -1606,7 +1604,6 @@ static int d40_free_dma(struct d40_chan *d40c) /* Release client owned descriptors */ if (!list_empty(&d40c->client)) list_for_each_entry_safe(d, _d, &d40c->client, node) { - d40_pool_lli_free(d40c, d); d40_desc_remove(d); d40_desc_free(d40c, d); } From 70a207ad4db2f0c60308b3f32086263c438c67a3 Mon Sep 17 00:00:00 2001 From: Per Forlin Date: Mon, 29 Aug 2011 13:33:34 +0200 Subject: [PATCH 33/51] dmaengine/ste_dma40: fix Oops due to double free of client descriptor The client list may exist in two lists at the same time. This makes free fail since the same desc is freed multiple times. Remove desc from client list when adding it to the pending queue. Move free of client owned descriptors from free_dma() to terminate_all(). Unable to handle kernel paging request at virtual address 00100104 pgd = dea8c000 [00100104] *pgd=1ea62831, *pte=00000000, *ppte=00000000 Internal error: Oops: 817 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.1.0-rc3+ #58) PC is at d40_free_chan_resources+0x64/0x330 Signed-off-by: Per Forlin Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 37388d10497a..92ec0a26401a 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -644,8 +644,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) return d; } +/* remove desc from current queue and add it to the pending_queue */ static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) { + d40_desc_remove(desc); + desc->is_in_client_list = false; list_add_tail(&desc->node, &d40c->pending_queue); } @@ -803,6 +806,7 @@ done: static void d40_term_all(struct d40_chan *d40c) { struct d40_desc *d40d; + struct d40_desc *_d; /* Release active descriptors */ while ((d40d = d40_first_active_get(d40c))) { @@ -822,6 +826,14 @@ static void d40_term_all(struct d40_chan *d40c) d40_desc_free(d40c, d40d); } + /* Release client owned descriptors */ + if (!list_empty(&d40c->client)) + list_for_each_entry_safe(d40d, _d, &d40c->client, node) { + d40_desc_remove(d40d); + d40_desc_free(d40c, d40d); + } + + d40c->pending_tx = 0; d40c->busy = false; } @@ -1594,20 +1606,10 @@ static int d40_free_dma(struct d40_chan *d40c) u32 event; struct d40_phy_res *phy = d40c->phy_chan; bool is_src; - struct d40_desc *d; - struct d40_desc *_d; - /* Terminate all queued and active transfers */ d40_term_all(d40c); - /* Release client owned descriptors */ - if (!list_empty(&d40c->client)) - list_for_each_entry_safe(d, _d, &d40c->client, node) { - d40_desc_remove(d); - d40_desc_free(d40c, d); - } - if (phy == NULL) { chan_err(d40c, "phy == null\n"); return -EINVAL; From 503473ac2a3952e6af254b0769fe788a67d797e5 Mon Sep 17 00:00:00 2001 From: Per Forlin Date: Mon, 29 Aug 2011 13:33:35 +0200 Subject: [PATCH 34/51] dmaengine/ste_dma40: fix memory leak due to prepared descriptors Prepared descriptors that are not submitted will not be freed. Add prepared descriptor to a list to be able to release them upon dmaengine_terminate_all(). Signed-off-by: Per Forlin Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 92ec0a26401a..467e4dcb20a0 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -177,6 +177,7 @@ struct d40_base; * @pending_queue: Submitted jobs, to be issued by issue_pending() * @active: Active descriptor. * @queue: Queued jobs. + * @prepare_queue: Prepared jobs. * @dma_cfg: The client configuration of this dma channel. * @configured: whether the dma_cfg configuration is valid * @base: Pointer to the device instance struct. @@ -204,6 +205,7 @@ struct d40_chan { struct list_head pending_queue; struct list_head active; struct list_head queue; + struct list_head prepare_queue; struct stedma40_chan_cfg dma_cfg; bool configured; struct d40_base *base; @@ -833,6 +835,13 @@ static void d40_term_all(struct d40_chan *d40c) d40_desc_free(d40c, d40d); } + /* Release descriptors in prepare queue */ + if (!list_empty(&d40c->prepare_queue)) + list_for_each_entry_safe(d40d, _d, + &d40c->prepare_queue, node) { + d40_desc_remove(d40d); + d40_desc_free(d40c, d40d); + } d40c->pending_tx = 0; d40c->busy = false; @@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, goto err; } + /* + * add descriptor to the prepare queue in order to be able + * to free them later in terminate_all + */ + list_add_tail(&desc->node, &chan->prepare_queue); + spin_unlock_irqrestore(&chan->lock, flags); return &desc->txd; @@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, INIT_LIST_HEAD(&d40c->queue); INIT_LIST_HEAD(&d40c->pending_queue); INIT_LIST_HEAD(&d40c->client); + INIT_LIST_HEAD(&d40c->prepare_queue); tasklet_init(&d40c->tasklet, dma_tasklet, (unsigned long) d40c); From 7703eac96abd119dcfbb04f287a5127462d18269 Mon Sep 17 00:00:00 2001 From: Russell King - ARM Linux Date: Wed, 31 Aug 2011 09:34:35 +0100 Subject: [PATCH 35/51] dmaengine: amba-pl08x: make filter check that the channel is owned by pl08x Before converting the dma channel to our private data structure, first check that the channel is indeed one which our driver registered. We do this by ensuring that the underlying device is bound to our driver. This avoids potential oopses if we try to reference 'plchan->name' against a foreign drivers dma channel. Signed-off-by: Russell King Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 3c2cad5b1165..cd8df7f5b5c8 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -87,6 +87,8 @@ #define DRIVER_NAME "pl08xdmac" +static struct amba_driver pl08x_amba_driver; + /** * struct vendor_data - vendor-specific config parameters for PL08x derivatives * @channels: the number of channels available in this variant @@ -1420,9 +1422,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) { - struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + struct pl08x_dma_chan *plchan; char *name = chan_id; + /* Reject channels for devices not bound to this driver */ + if (chan->device->dev->driver != &pl08x_amba_driver.drv) + return false; + + plchan = to_pl08x_chan(chan); + /* Check that the channel is not taken! */ if (!strcmp(plchan->name, name)) return true; From a2f5203fec3c06d68a6bb45ad41f2adebf9ac5e0 Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 2 Sep 2011 09:44:29 +0900 Subject: [PATCH 36/51] DMA: PL330: Add support runtime PM for PL330 DMAC Signed-off-by: Boojin Kim Acked-by: Jassi Brar Acked-by: Linus Walleij Acked-by: Vinod Koul Cc: Dan Williams Signed-off-by: Kukjin Kim Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 75 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 73 insertions(+), 2 deletions(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 00eee59e8b33..0b99af18f9a1 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -17,6 +17,7 @@ #include #include #include +#include #define NR_DEFAULT_DESC 16 @@ -83,6 +84,8 @@ struct dma_pl330_dmac { /* Peripheral channels connected to this DMAC */ struct dma_pl330_chan *peripherals; /* keep at end */ + + struct clk *clk; }; struct dma_pl330_desc { @@ -696,6 +699,30 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) goto probe_err1; } + pdmac->clk = clk_get(&adev->dev, "dma"); + if (IS_ERR(pdmac->clk)) { + dev_err(&adev->dev, "Cannot get operation clock.\n"); + ret = -EINVAL; + goto probe_err1; + } + + amba_set_drvdata(adev, pdmac); + +#ifdef CONFIG_PM_RUNTIME + /* to use the runtime PM helper functions */ + pm_runtime_enable(&adev->dev); + + /* enable the power domain */ + if (pm_runtime_get_sync(&adev->dev)) { + dev_err(&adev->dev, "failed to get runtime pm\n"); + ret = -ENODEV; + goto probe_err1; + } +#else + /* enable dma clk */ + clk_enable(pdmac->clk); +#endif + irq = adev->irq[0]; ret = request_irq(irq, pl330_irq_handler, 0, dev_name(&adev->dev), pi); @@ -771,8 +798,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) goto probe_err4; } - amba_set_drvdata(adev, pdmac); - dev_info(&adev->dev, "Loaded driver for PL330 DMAC-%d\n", adev->periphid); dev_info(&adev->dev, @@ -833,6 +858,13 @@ static int __devexit pl330_remove(struct amba_device *adev) res = &adev->res; release_mem_region(res->start, resource_size(res)); +#ifdef CONFIG_PM_RUNTIME + pm_runtime_put(&adev->dev); + pm_runtime_disable(&adev->dev); +#else + clk_disable(pdmac->clk); +#endif + kfree(pdmac); return 0; @@ -846,10 +878,49 @@ static struct amba_id pl330_ids[] = { { 0, 0 }, }; +#ifdef CONFIG_PM_RUNTIME +static int pl330_runtime_suspend(struct device *dev) +{ + struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev); + + if (!pdmac) { + dev_err(dev, "failed to get dmac\n"); + return -ENODEV; + } + + clk_disable(pdmac->clk); + + return 0; +} + +static int pl330_runtime_resume(struct device *dev) +{ + struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev); + + if (!pdmac) { + dev_err(dev, "failed to get dmac\n"); + return -ENODEV; + } + + clk_enable(pdmac->clk); + + return 0; +} +#else +#define pl330_runtime_suspend NULL +#define pl330_runtime_resume NULL +#endif /* CONFIG_PM_RUNTIME */ + +static const struct dev_pm_ops pl330_pm_ops = { + .runtime_suspend = pl330_runtime_suspend, + .runtime_resume = pl330_runtime_resume, +}; + static struct amba_driver pl330_driver = { .drv = { .owner = THIS_MODULE, .name = "dma-pl330", + .pm = &pl330_pm_ops, }, .id_table = pl330_ids, .probe = pl330_probe, From 1b9bb715e7c4c189c4215a11a09e2ccb16598d86 Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 2 Sep 2011 09:44:30 +0900 Subject: [PATCH 37/51] DMA: PL330: Update PL330 DMA API driver This patch updates following 3 items. 1. Removes unneccessary code. 2. Add AMBA, PL330 configuration 3. Change the meaning of 'peri_id' variable from PL330 event number to specific dma id by user. Signed-off-by: Boojin Kim Acked-by: Linus Walleij Acked-by: Vinod Koul Cc: Dan Williams Signed-off-by: Kukjin Kim Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 3 ++- drivers/dma/pl330.c | 14 +++++++++----- include/linux/amba/pl330.h | 6 +----- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 2e3b3d38c465..ab8f469f5cf8 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -193,7 +193,8 @@ config ARCH_HAS_ASYNC_TX_FIND_CHANNEL config PL330_DMA tristate "DMA API Driver for PL330" select DMA_ENGINE - depends on PL330 + depends on ARM_AMBA + select PL330 help Select if your platform has one or more PL330 DMACs. You need to provide platform specific settings via diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 0b99af18f9a1..d5829c734fad 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -18,6 +18,7 @@ #include #include #include +#include #define NR_DEFAULT_DESC 16 @@ -69,6 +70,10 @@ struct dma_pl330_chan { * NULL if the channel is available to be acquired. */ void *pl330_chid; + + /* For D-to-M and M-to-D channels */ + int burst_sz; /* the peripheral fifo width */ + dma_addr_t fifo_addr; }; struct dma_pl330_dmac { @@ -456,7 +461,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) if (peri) { desc->req.rqtype = peri->rqtype; - desc->req.peri = peri->peri_id; + desc->req.peri = pch->chan.chan_id; } else { desc->req.rqtype = MEMTOMEM; desc->req.peri = 0; @@ -582,7 +587,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct dma_pl330_peri *peri = chan->private; struct scatterlist *sg; unsigned long flags; - int i, burst_size; + int i; dma_addr_t addr; if (unlikely(!pch || !sgl || !sg_len || !peri)) @@ -598,8 +603,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, return NULL; } - addr = peri->fifo_addr; - burst_size = peri->burst_sz; + addr = pch->fifo_addr; first = NULL; @@ -647,7 +651,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, sg_dma_address(sg), addr, sg_dma_len(sg)); } - desc->rqcfg.brst_size = burst_size; + desc->rqcfg.brst_size = pch->burst_sz; desc->rqcfg.brst_len = 1; } diff --git a/include/linux/amba/pl330.h b/include/linux/amba/pl330.h index cbee7de7dd36..d12f077a6daf 100644 --- a/include/linux/amba/pl330.h +++ b/include/linux/amba/pl330.h @@ -19,12 +19,8 @@ struct dma_pl330_peri { * Peri_Req i/f of the DMAC that is * peripheral could be reached from. */ - u8 peri_id; /* {0, 31} */ + u8 peri_id; /* specific dma id */ enum pl330_reqtype rqtype; - - /* For M->D and D->M Channels */ - int burst_sz; /* in power of 2 */ - dma_addr_t fifo_addr; }; struct dma_pl330_platdata { From 1d0c1d606d787e833ee3bd9e1cda640e75c4681a Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 2 Sep 2011 09:44:31 +0900 Subject: [PATCH 38/51] DMA: PL330: Support DMA_SLAVE_CONFIG command Signed-off-by: Boojin Kim Acked-by: Linus Walleij Acked-by: Vinod Koul Cc: Dan Williams Signed-off-by: Kukjin Kim Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 55 ++++++++++++++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 15 deletions(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index d5829c734fad..e7f9d1d3d81a 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -73,6 +73,7 @@ struct dma_pl330_chan { /* For D-to-M and M-to-D channels */ int burst_sz; /* the peripheral fifo width */ + int burst_len; /* the number of burst */ dma_addr_t fifo_addr; }; @@ -263,23 +264,47 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned struct dma_pl330_chan *pch = to_pchan(chan); struct dma_pl330_desc *desc; unsigned long flags; + struct dma_pl330_dmac *pdmac = pch->dmac; + struct dma_slave_config *slave_config; - /* Only supports DMA_TERMINATE_ALL */ - if (cmd != DMA_TERMINATE_ALL) + switch (cmd) { + case DMA_TERMINATE_ALL: + spin_lock_irqsave(&pch->lock, flags); + + /* FLUSH the PL330 Channel thread */ + pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); + + /* Mark all desc done */ + list_for_each_entry(desc, &pch->work_list, node) + desc->status = DONE; + + spin_unlock_irqrestore(&pch->lock, flags); + + pl330_tasklet((unsigned long) pch); + break; + case DMA_SLAVE_CONFIG: + slave_config = (struct dma_slave_config *)arg; + + if (slave_config->direction == DMA_TO_DEVICE) { + if (slave_config->dst_addr) + pch->fifo_addr = slave_config->dst_addr; + if (slave_config->dst_addr_width) + pch->burst_sz = __ffs(slave_config->dst_addr_width); + if (slave_config->dst_maxburst) + pch->burst_len = slave_config->dst_maxburst; + } else if (slave_config->direction == DMA_FROM_DEVICE) { + if (slave_config->src_addr) + pch->fifo_addr = slave_config->src_addr; + if (slave_config->src_addr_width) + pch->burst_sz = __ffs(slave_config->src_addr_width); + if (slave_config->src_maxburst) + pch->burst_len = slave_config->src_maxburst; + } + break; + default: + dev_err(pch->dmac->pif.dev, "Not supported command.\n"); return -ENXIO; - - spin_lock_irqsave(&pch->lock, flags); - - /* FLUSH the PL330 Channel thread */ - pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); - - /* Mark all desc done */ - list_for_each_entry(desc, &pch->work_list, node) - desc->status = DONE; - - spin_unlock_irqrestore(&pch->lock, flags); - - pl330_tasklet((unsigned long) pch); + } return 0; } From ae43b886f174297366d4e09a008ad8e6592d95df Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 2 Sep 2011 09:44:32 +0900 Subject: [PATCH 39/51] DMA: PL330: Remove the start operation for handling DMA_TERMINATE_ALL command Original code carries out the start operation after flush operation. But start operation is not required for DMA_TERMINATE_ALL command. So, this patch removes the unnecessary start operation and only carries out the flush operation for handling DMA_TERMINATE_ALL command. Signed-off-by: Boojin Kim Acked-by: Linus Walleij Acked-by: Vinod Koul Cc: Dan Williams Signed-off-by: Kukjin Kim [Fixed typos in changelog] Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index e7f9d1d3d81a..59943ec1e74a 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -262,10 +262,11 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { struct dma_pl330_chan *pch = to_pchan(chan); - struct dma_pl330_desc *desc; + struct dma_pl330_desc *desc, *_dt; unsigned long flags; struct dma_pl330_dmac *pdmac = pch->dmac; struct dma_slave_config *slave_config; + LIST_HEAD(list); switch (cmd) { case DMA_TERMINATE_ALL: @@ -275,12 +276,14 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); /* Mark all desc done */ - list_for_each_entry(desc, &pch->work_list, node) + list_for_each_entry_safe(desc, _dt, &pch->work_list , node) { desc->status = DONE; + pch->completed = desc->txd.cookie; + list_move_tail(&desc->node, &list); + } + list_splice_tail_init(&list, &pdmac->desc_pool); spin_unlock_irqrestore(&pch->lock, flags); - - pl330_tasklet((unsigned long) pch); break; case DMA_SLAVE_CONFIG: slave_config = (struct dma_slave_config *)arg; From 42bc9cf45939c26a5c5eb946d4fd35f1a7b0f9f8 Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 2 Sep 2011 09:44:33 +0900 Subject: [PATCH 40/51] DMA: PL330: Add DMA_CYCLIC capability This patch adds DMA_CYCLIC capability that is used for audio driver. DMA driver activated with it reuses the dma requests that were submitted through tx_submit(). Signed-off-by: Boojin Kim Acked-by: Linus Walleij Acked-by: Vinod Koul Cc: Dan Williams Signed-off-by: Kukjin Kim Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 84 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 83 insertions(+), 1 deletion(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 59943ec1e74a..621134fdba4c 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -75,6 +75,9 @@ struct dma_pl330_chan { int burst_sz; /* the peripheral fifo width */ int burst_len; /* the number of burst */ dma_addr_t fifo_addr; + + /* for cyclic capability */ + bool cyclic; }; struct dma_pl330_dmac { @@ -161,6 +164,31 @@ static inline void free_desc_list(struct list_head *list) spin_unlock_irqrestore(&pdmac->pool_lock, flags); } +static inline void handle_cyclic_desc_list(struct list_head *list) +{ + struct dma_pl330_desc *desc; + struct dma_pl330_chan *pch; + unsigned long flags; + + if (list_empty(list)) + return; + + list_for_each_entry(desc, list, node) { + dma_async_tx_callback callback; + + /* Change status to reload it */ + desc->status = PREP; + pch = desc->pchan; + callback = desc->txd.callback; + if (callback) + callback(desc->txd.callback_param); + } + + spin_lock_irqsave(&pch->lock, flags); + list_splice_tail_init(list, &pch->work_list); + spin_unlock_irqrestore(&pch->lock, flags); +} + static inline void fill_queue(struct dma_pl330_chan *pch) { struct dma_pl330_desc *desc; @@ -214,7 +242,10 @@ static void pl330_tasklet(unsigned long data) spin_unlock_irqrestore(&pch->lock, flags); - free_desc_list(&list); + if (pch->cyclic) + handle_cyclic_desc_list(&list); + else + free_desc_list(&list); } static void dma_pl330_rqcb(void *token, enum pl330_op_err err) @@ -245,6 +276,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) spin_lock_irqsave(&pch->lock, flags); pch->completed = chan->cookie = 1; + pch->cyclic = false; pch->pl330_chid = pl330_request_channel(&pdmac->pif); if (!pch->pl330_chid) { @@ -324,6 +356,9 @@ static void pl330_free_chan_resources(struct dma_chan *chan) pl330_release_channel(pch->pl330_chid); pch->pl330_chid = NULL; + if (pch->cyclic) + list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); + spin_unlock_irqrestore(&pch->lock, flags); } @@ -560,6 +595,51 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) return burst_len; } +static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t dma_addr, size_t len, + size_t period_len, enum dma_data_direction direction) +{ + struct dma_pl330_desc *desc; + struct dma_pl330_chan *pch = to_pchan(chan); + dma_addr_t dst; + dma_addr_t src; + + desc = pl330_get_desc(pch); + if (!desc) { + dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", + __func__, __LINE__); + return NULL; + } + + switch (direction) { + case DMA_TO_DEVICE: + desc->rqcfg.src_inc = 1; + desc->rqcfg.dst_inc = 0; + src = dma_addr; + dst = pch->fifo_addr; + break; + case DMA_FROM_DEVICE: + desc->rqcfg.src_inc = 0; + desc->rqcfg.dst_inc = 1; + src = pch->fifo_addr; + dst = dma_addr; + break; + default: + dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", + __func__, __LINE__); + return NULL; + } + + desc->rqcfg.brst_size = pch->burst_sz; + desc->rqcfg.brst_len = 1; + + pch->cyclic = true; + + fill_px(&desc->px, dst, src, period_len); + + return &desc->txd; +} + static struct dma_async_tx_descriptor * pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t len, unsigned long flags) @@ -791,6 +871,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) case MEMTODEV: case DEVTOMEM: dma_cap_set(DMA_SLAVE, pd->cap_mask); + dma_cap_set(DMA_CYCLIC, pd->cap_mask); break; default: dev_err(&adev->dev, "DEVTODEV Not Supported\n"); @@ -819,6 +900,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pd->device_alloc_chan_resources = pl330_alloc_chan_resources; pd->device_free_chan_resources = pl330_free_chan_resources; pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; + pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic; pd->device_tx_status = pl330_tx_status; pd->device_prep_slave_sg = pl330_prep_slave_sg; pd->device_control = pl330_control; From aa0de00e4b9b3adba5db5ef5ee01e61b1b2e4329 Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 2 Sep 2011 09:44:34 +0900 Subject: [PATCH 41/51] ARM: SAMSUNG: Update to use PL330-DMA driver This patch adds to support PL330-DMA driver on DMADEVICE for S5P SoCs. Signed-off-by: Boojin Kim Acked-by: Linus Walleij Acked-by: Vinod Koul Signed-off-by: Kukjin Kim Signed-off-by: Vinod Koul --- arch/arm/mach-exynos4/include/mach/dma.h | 4 ++-- arch/arm/mach-s5p64x0/include/mach/dma.h | 2 +- arch/arm/mach-s5pc100/include/mach/dma.h | 2 +- arch/arm/mach-s5pv210/include/mach/dma.h | 2 +- arch/arm/plat-samsung/Kconfig | 9 +++++++++ .../include/plat/{s3c-dma-pl330.h => dma-pl330.h} | 8 ++++---- arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h | 2 +- 7 files changed, 19 insertions(+), 10 deletions(-) rename arch/arm/plat-samsung/include/plat/{s3c-dma-pl330.h => dma-pl330.h} (93%) diff --git a/arch/arm/mach-exynos4/include/mach/dma.h b/arch/arm/mach-exynos4/include/mach/dma.h index 81209eb1409b..201842a3769e 100644 --- a/arch/arm/mach-exynos4/include/mach/dma.h +++ b/arch/arm/mach-exynos4/include/mach/dma.h @@ -20,7 +20,7 @@ #ifndef __MACH_DMA_H #define __MACH_DMA_H -/* This platform uses the common S3C DMA API driver for PL330 */ -#include +/* This platform uses the common DMA API driver for PL330 */ +#include #endif /* __MACH_DMA_H */ diff --git a/arch/arm/mach-s5p64x0/include/mach/dma.h b/arch/arm/mach-s5p64x0/include/mach/dma.h index 81209eb1409b..1beae2cca6a3 100644 --- a/arch/arm/mach-s5p64x0/include/mach/dma.h +++ b/arch/arm/mach-s5p64x0/include/mach/dma.h @@ -21,6 +21,6 @@ #define __MACH_DMA_H /* This platform uses the common S3C DMA API driver for PL330 */ -#include +#include #endif /* __MACH_DMA_H */ diff --git a/arch/arm/mach-s5pc100/include/mach/dma.h b/arch/arm/mach-s5pc100/include/mach/dma.h index 81209eb1409b..1beae2cca6a3 100644 --- a/arch/arm/mach-s5pc100/include/mach/dma.h +++ b/arch/arm/mach-s5pc100/include/mach/dma.h @@ -21,6 +21,6 @@ #define __MACH_DMA_H /* This platform uses the common S3C DMA API driver for PL330 */ -#include +#include #endif /* __MACH_DMA_H */ diff --git a/arch/arm/mach-s5pv210/include/mach/dma.h b/arch/arm/mach-s5pv210/include/mach/dma.h index 81209eb1409b..1beae2cca6a3 100644 --- a/arch/arm/mach-s5pv210/include/mach/dma.h +++ b/arch/arm/mach-s5pv210/include/mach/dma.h @@ -21,6 +21,6 @@ #define __MACH_DMA_H /* This platform uses the common S3C DMA API driver for PL330 */ -#include +#include #endif /* __MACH_DMA_H */ diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig index b3e10659e4b8..e6f01ab86208 100644 --- a/arch/arm/plat-samsung/Kconfig +++ b/arch/arm/plat-samsung/Kconfig @@ -306,6 +306,15 @@ config S3C_PL330_DMA help S3C DMA API Driver for PL330 DMAC. +config SAMSUNG_DMADEV + bool + select DMADEVICES + select PL330_DMA if (CPU_EXYNOS4210 || CPU_S5PV210 || CPU_S5PC100 || \ + CPU_S5P6450 || CPU_S5P6440) + select ARM_AMBA + help + Use DMA device engine for PL330 DMAC. + comment "Power management" config SAMSUNG_PM_DEBUG diff --git a/arch/arm/plat-samsung/include/plat/s3c-dma-pl330.h b/arch/arm/plat-samsung/include/plat/dma-pl330.h similarity index 93% rename from arch/arm/plat-samsung/include/plat/s3c-dma-pl330.h rename to arch/arm/plat-samsung/include/plat/dma-pl330.h index 810744213120..0a5dade0e85c 100644 --- a/arch/arm/plat-samsung/include/plat/s3c-dma-pl330.h +++ b/arch/arm/plat-samsung/include/plat/dma-pl330.h @@ -8,8 +8,8 @@ * (at your option) any later version. */ -#ifndef __S3C_DMA_PL330_H_ -#define __S3C_DMA_PL330_H_ +#ifndef __DMA_PL330_H_ +#define __DMA_PL330_H_ __FILE__ #define S3C2410_DMAF_AUTOSTART (1 << 0) #define S3C2410_DMAF_CIRCULAR (1 << 1) @@ -20,7 +20,7 @@ * For the sake of consistency across client drivers, * We keep the channel names unchanged and only add * missing peripherals are added. - * Order is not important since S3C PL330 API driver + * Order is not important since DMA PL330 API driver * use these just as IDs. */ enum dma_ch { @@ -95,4 +95,4 @@ static inline bool s3c_dma_has_circular(void) #include -#endif /* __S3C_DMA_PL330_H_ */ +#endif /* __DMA_PL330_H_ */ diff --git a/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h b/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h index bf5e2a9d408d..64fdf66ff9cf 100644 --- a/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h +++ b/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h @@ -12,7 +12,7 @@ #ifndef __S3C_PL330_PDATA_H #define __S3C_PL330_PDATA_H -#include +#include /* * Every PL330 DMAC has max 32 peripheral interfaces, From c4e1662550a3bd23df7cff4611eff67ba2afe078 Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 2 Sep 2011 09:44:35 +0900 Subject: [PATCH 42/51] ARM: SAMSUNG: Add common DMA operations This patch adds common DMA operations which are used for Samsung DMA drivers. Currently there are two types of DMA driver for Samsung SoCs. The one is S3C-DMA for S3C SoCs and the other is PL330-DMA for S5P SoCs. This patch provides funcion pointers for common DMA operations to DMA client driver like SPI and Audio. It makes DMA client drivers support multi-platform. In addition, this common DMA operations implement the shared actions that are needed for DMA client driver. For example shared actions are filter() function for dma_request_channel() and parameter passing for device_prep_slave_sg(). Signed-off-by: Boojin Kim Acked-by: Linus Walleij Acked-by: Vinod Koul Signed-off-by: Kukjin Kim Signed-off-by: Vinod Koul --- arch/arm/mach-s3c2410/include/mach/dma.h | 8 +- arch/arm/mach-s3c64xx/include/mach/dma.h | 4 + arch/arm/plat-samsung/Makefile | 6 +- arch/arm/plat-samsung/dma-ops.c | 131 +++++++++++++++++ arch/arm/plat-samsung/include/plat/dma-ops.h | 63 +++++++++ .../arm/plat-samsung/include/plat/dma-pl330.h | 4 + arch/arm/plat-samsung/include/plat/dma.h | 1 + arch/arm/plat-samsung/s3c-dma-ops.c | 133 ++++++++++++++++++ 8 files changed, 347 insertions(+), 3 deletions(-) create mode 100644 arch/arm/plat-samsung/dma-ops.c create mode 100644 arch/arm/plat-samsung/include/plat/dma-ops.h create mode 100644 arch/arm/plat-samsung/s3c-dma-ops.c diff --git a/arch/arm/mach-s3c2410/include/mach/dma.h b/arch/arm/mach-s3c2410/include/mach/dma.h index b2b2a5bb275e..e61a91f88c52 100644 --- a/arch/arm/mach-s3c2410/include/mach/dma.h +++ b/arch/arm/mach-s3c2410/include/mach/dma.h @@ -13,7 +13,6 @@ #ifndef __ASM_ARCH_DMA_H #define __ASM_ARCH_DMA_H __FILE__ -#include #include #define MAX_DMA_TRANSFER_SIZE 0x100000 /* Data Unit is half word */ @@ -51,6 +50,13 @@ enum dma_ch { DMACH_MAX, /* the end entry */ }; +static inline bool samsung_dma_is_dmadev(void) +{ + return false; +} + +#include + #define DMACH_LOW_LEVEL (1<<28) /* use this to specifiy hardware ch no */ /* we have 4 dma channels */ diff --git a/arch/arm/mach-s3c64xx/include/mach/dma.h b/arch/arm/mach-s3c64xx/include/mach/dma.h index 0a5d9268a23e..49c3a53135d4 100644 --- a/arch/arm/mach-s3c64xx/include/mach/dma.h +++ b/arch/arm/mach-s3c64xx/include/mach/dma.h @@ -63,6 +63,10 @@ static __inline__ bool s3c_dma_has_circular(void) return true; } +static inline bool samsung_dma_is_dmadev(void) +{ + return false; +} #define S3C2410_DMAF_CIRCULAR (1 << 0) #include diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile index 853764ba8cc5..e2ee0b8a3763 100644 --- a/arch/arm/plat-samsung/Makefile +++ b/arch/arm/plat-samsung/Makefile @@ -63,9 +63,11 @@ obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT) += dev-backlight.o # DMA support -obj-$(CONFIG_S3C_DMA) += dma.o +obj-$(CONFIG_S3C_DMA) += dma.o s3c-dma-ops.o -obj-$(CONFIG_S3C_PL330_DMA) += s3c-pl330.o +obj-$(CONFIG_SAMSUNG_DMADEV) += dma-ops.o + +obj-$(CONFIG_S3C_PL330_DMA) += s3c-pl330.o s3c-dma-ops.o # PM support diff --git a/arch/arm/plat-samsung/dma-ops.c b/arch/arm/plat-samsung/dma-ops.c new file mode 100644 index 000000000000..6e3d9abc9e2e --- /dev/null +++ b/arch/arm/plat-samsung/dma-ops.c @@ -0,0 +1,131 @@ +/* linux/arch/arm/plat-samsung/dma-ops.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Samsung DMA Operations + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include + +static inline bool pl330_filter(struct dma_chan *chan, void *param) +{ + struct dma_pl330_peri *peri = chan->private; + return peri->peri_id == (unsigned)param; +} + +static unsigned samsung_dmadev_request(enum dma_ch dma_ch, + struct samsung_dma_info *info) +{ + struct dma_chan *chan; + dma_cap_mask_t mask; + struct dma_slave_config slave_config; + + dma_cap_zero(mask); + dma_cap_set(info->cap, mask); + + chan = dma_request_channel(mask, pl330_filter, (void *)dma_ch); + + if (info->direction == DMA_FROM_DEVICE) { + memset(&slave_config, 0, sizeof(struct dma_slave_config)); + slave_config.direction = info->direction; + slave_config.src_addr = info->fifo; + slave_config.src_addr_width = info->width; + slave_config.src_maxburst = 1; + dmaengine_slave_config(chan, &slave_config); + } else if (info->direction == DMA_TO_DEVICE) { + memset(&slave_config, 0, sizeof(struct dma_slave_config)); + slave_config.direction = info->direction; + slave_config.dst_addr = info->fifo; + slave_config.dst_addr_width = info->width; + slave_config.dst_maxburst = 1; + dmaengine_slave_config(chan, &slave_config); + } + + return (unsigned)chan; +} + +static int samsung_dmadev_release(unsigned ch, + struct s3c2410_dma_client *client) +{ + dma_release_channel((struct dma_chan *)ch); + + return 0; +} + +static int samsung_dmadev_prepare(unsigned ch, + struct samsung_dma_prep_info *info) +{ + struct scatterlist sg; + struct dma_chan *chan = (struct dma_chan *)ch; + struct dma_async_tx_descriptor *desc; + + switch (info->cap) { + case DMA_SLAVE: + sg_init_table(&sg, 1); + sg_dma_len(&sg) = info->len; + sg_set_page(&sg, pfn_to_page(PFN_DOWN(info->buf)), + info->len, offset_in_page(info->buf)); + sg_dma_address(&sg) = info->buf; + + desc = chan->device->device_prep_slave_sg(chan, + &sg, 1, info->direction, DMA_PREP_INTERRUPT); + break; + case DMA_CYCLIC: + desc = chan->device->device_prep_dma_cyclic(chan, + info->buf, info->len, info->period, info->direction); + break; + default: + dev_err(&chan->dev->device, "unsupported format\n"); + return -EFAULT; + } + + if (!desc) { + dev_err(&chan->dev->device, "cannot prepare cyclic dma\n"); + return -EFAULT; + } + + desc->callback = info->fp; + desc->callback_param = info->fp_param; + + dmaengine_submit((struct dma_async_tx_descriptor *)desc); + + return 0; +} + +static inline int samsung_dmadev_trigger(unsigned ch) +{ + dma_async_issue_pending((struct dma_chan *)ch); + + return 0; +} + +static inline int samsung_dmadev_flush(unsigned ch) +{ + return dmaengine_terminate_all((struct dma_chan *)ch); +} + +struct samsung_dma_ops dmadev_ops = { + .request = samsung_dmadev_request, + .release = samsung_dmadev_release, + .prepare = samsung_dmadev_prepare, + .trigger = samsung_dmadev_trigger, + .started = NULL, + .flush = samsung_dmadev_flush, + .stop = samsung_dmadev_flush, +}; + +void *samsung_dmadev_get_ops(void) +{ + return &dmadev_ops; +} +EXPORT_SYMBOL(samsung_dmadev_get_ops); diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h new file mode 100644 index 000000000000..4c1a363526cf --- /dev/null +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h @@ -0,0 +1,63 @@ +/* arch/arm/plat-samsung/include/plat/dma-ops.h + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Samsung DMA support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __SAMSUNG_DMA_OPS_H_ +#define __SAMSUNG_DMA_OPS_H_ __FILE__ + +#include + +struct samsung_dma_prep_info { + enum dma_transaction_type cap; + enum dma_data_direction direction; + dma_addr_t buf; + unsigned long period; + unsigned long len; + void (*fp)(void *data); + void *fp_param; +}; + +struct samsung_dma_info { + enum dma_transaction_type cap; + enum dma_data_direction direction; + enum dma_slave_buswidth width; + dma_addr_t fifo; + struct s3c2410_dma_client *client; +}; + +struct samsung_dma_ops { + unsigned (*request)(enum dma_ch ch, struct samsung_dma_info *info); + int (*release)(unsigned ch, struct s3c2410_dma_client *client); + int (*prepare)(unsigned ch, struct samsung_dma_prep_info *info); + int (*trigger)(unsigned ch); + int (*started)(unsigned ch); + int (*flush)(unsigned ch); + int (*stop)(unsigned ch); +}; + +extern void *samsung_dmadev_get_ops(void); +extern void *s3c_dma_get_ops(void); + +static inline void *__samsung_dma_get_ops(void) +{ + if (samsung_dma_is_dmadev()) + return samsung_dmadev_get_ops(); + else + return s3c_dma_get_ops(); +} + +/* + * samsung_dma_get_ops + * get the set of samsung dma operations + */ +#define samsung_dma_get_ops() __samsung_dma_get_ops() + +#endif /* __SAMSUNG_DMA_OPS_H_ */ diff --git a/arch/arm/plat-samsung/include/plat/dma-pl330.h b/arch/arm/plat-samsung/include/plat/dma-pl330.h index 0a5dade0e85c..2916920034f0 100644 --- a/arch/arm/plat-samsung/include/plat/dma-pl330.h +++ b/arch/arm/plat-samsung/include/plat/dma-pl330.h @@ -93,6 +93,10 @@ static inline bool s3c_dma_has_circular(void) return true; } +static inline bool samsung_dma_is_dmadev(void) +{ + return true; +} #include #endif /* __DMA_PL330_H_ */ diff --git a/arch/arm/plat-samsung/include/plat/dma.h b/arch/arm/plat-samsung/include/plat/dma.h index 8c273b7a6f56..361076060744 100644 --- a/arch/arm/plat-samsung/include/plat/dma.h +++ b/arch/arm/plat-samsung/include/plat/dma.h @@ -126,3 +126,4 @@ extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn); extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn); +#include diff --git a/arch/arm/plat-samsung/s3c-dma-ops.c b/arch/arm/plat-samsung/s3c-dma-ops.c new file mode 100644 index 000000000000..33ab3241268d --- /dev/null +++ b/arch/arm/plat-samsung/s3c-dma-ops.c @@ -0,0 +1,133 @@ +/* linux/arch/arm/plat-samsung/s3c-dma-ops.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Samsung S3C-DMA Operations + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include + +struct cb_data { + void (*fp) (void *); + void *fp_param; + unsigned ch; + struct list_head node; +}; + +static LIST_HEAD(dma_list); + +static void s3c_dma_cb(struct s3c2410_dma_chan *channel, void *param, + int size, enum s3c2410_dma_buffresult res) +{ + struct cb_data *data = param; + + data->fp(data->fp_param); +} + +static unsigned s3c_dma_request(enum dma_ch dma_ch, + struct samsung_dma_info *info) +{ + struct cb_data *data; + + if (s3c2410_dma_request(dma_ch, info->client, NULL) < 0) { + s3c2410_dma_free(dma_ch, info->client); + return 0; + } + + data = kzalloc(sizeof(struct cb_data), GFP_KERNEL); + data->ch = dma_ch; + list_add_tail(&data->node, &dma_list); + + if (info->direction == DMA_FROM_DEVICE) + s3c2410_dma_devconfig(dma_ch, S3C2410_DMASRC_HW, info->fifo); + else + s3c2410_dma_devconfig(dma_ch, S3C2410_DMASRC_MEM, info->fifo); + + if (info->cap == DMA_CYCLIC) + s3c2410_dma_setflags(dma_ch, S3C2410_DMAF_CIRCULAR); + + s3c2410_dma_config(dma_ch, info->width); + + return (unsigned)dma_ch; +} + +static int s3c_dma_release(unsigned ch, struct s3c2410_dma_client *client) +{ + struct cb_data *data; + + list_for_each_entry(data, &dma_list, node) + if (data->ch == ch) + break; + list_del(&data->node); + + s3c2410_dma_free(ch, client); + kfree(data); + + return 0; +} + +static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep_info *info) +{ + struct cb_data *data; + int len = (info->cap == DMA_CYCLIC) ? info->period : info->len; + + list_for_each_entry(data, &dma_list, node) + if (data->ch == ch) + break; + + if (!data->fp) { + s3c2410_dma_set_buffdone_fn(ch, s3c_dma_cb); + data->fp = info->fp; + data->fp_param = info->fp_param; + } + + s3c2410_dma_enqueue(ch, (void *)data, info->buf, len); + + return 0; +} + +static inline int s3c_dma_trigger(unsigned ch) +{ + return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_START); +} + +static inline int s3c_dma_started(unsigned ch) +{ + return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STARTED); +} + +static inline int s3c_dma_flush(unsigned ch) +{ + return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_FLUSH); +} + +static inline int s3c_dma_stop(unsigned ch) +{ + return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STOP); +} + +static struct samsung_dma_ops s3c_dma_ops = { + .request = s3c_dma_request, + .release = s3c_dma_release, + .prepare = s3c_dma_prepare, + .trigger = s3c_dma_trigger, + .started = s3c_dma_started, + .flush = s3c_dma_flush, + .stop = s3c_dma_stop, +}; + +void *s3c_dma_get_ops(void) +{ + return &s3c_dma_ops; +} +EXPORT_SYMBOL(s3c_dma_get_ops); From bf856fbb5e104ab9afd366baa0d744dd8cfa074d Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 2 Sep 2011 09:44:36 +0900 Subject: [PATCH 43/51] ARM: EXYNOS4: Use generic DMA PL330 driver This patch makes Samsung EXYNOS4 to use DMA PL330 driver on DMADEVICE. The EXYNOS4 uses DMA generic APIs instead of SAMSUNG specific S3C-PL330 APIs. Signed-off-by: Boojin Kim Acked-by: Linus Walleij Acked-by: Vinod Koul Signed-off-by: Kukjin Kim Signed-off-by: Vinod Koul --- arch/arm/mach-exynos4/Kconfig | 2 +- arch/arm/mach-exynos4/clock.c | 11 +- arch/arm/mach-exynos4/dma.c | 299 +++++++++++++++++++++------------- 3 files changed, 198 insertions(+), 114 deletions(-) diff --git a/arch/arm/mach-exynos4/Kconfig b/arch/arm/mach-exynos4/Kconfig index 0c77ab99fa16..9f00cf3450df 100644 --- a/arch/arm/mach-exynos4/Kconfig +++ b/arch/arm/mach-exynos4/Kconfig @@ -11,7 +11,7 @@ if ARCH_EXYNOS4 config CPU_EXYNOS4210 bool - select S3C_PL330_DMA + select SAMSUNG_DMADEV help Enable EXYNOS4210 CPU support diff --git a/arch/arm/mach-exynos4/clock.c b/arch/arm/mach-exynos4/clock.c index 851dea018578..fee2dd8791d0 100644 --- a/arch/arm/mach-exynos4/clock.c +++ b/arch/arm/mach-exynos4/clock.c @@ -43,6 +43,11 @@ static struct clk clk_sclk_usbphy1 = { .name = "sclk_usbphy1", }; +static struct clk dummy_apb_pclk = { + .name = "apb_pclk", + .id = -1, +}; + static int exynos4_clksrc_mask_top_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKSRC_MASK_TOP, clk, enable); @@ -454,12 +459,12 @@ static struct clk init_clocks_off[] = { .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 10), }, { - .name = "pdma", + .name = "dma", .devname = "s3c-pl330.0", .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 0), }, { - .name = "pdma", + .name = "dma", .devname = "s3c-pl330.1", .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 1), @@ -1210,5 +1215,7 @@ void __init exynos4_register_clocks(void) s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); + s3c24xx_register_clock(&dummy_apb_pclk); + s3c_pwmclk_init(); } diff --git a/arch/arm/mach-exynos4/dma.c b/arch/arm/mach-exynos4/dma.c index 564bb530f332..d57d66255021 100644 --- a/arch/arm/mach-exynos4/dma.c +++ b/arch/arm/mach-exynos4/dma.c @@ -21,151 +21,228 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include #include +#include +#include +#include #include #include #include #include - -#include +#include static u64 dma_dmamask = DMA_BIT_MASK(32); -static struct resource exynos4_pdma0_resource[] = { - [0] = { - .start = EXYNOS4_PA_PDMA0, - .end = EXYNOS4_PA_PDMA0 + SZ_4K, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = IRQ_PDMA0, - .end = IRQ_PDMA0, - .flags = IORESOURCE_IRQ, +struct dma_pl330_peri pdma0_peri[28] = { + { + .peri_id = (u8)DMACH_PCM0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_PCM0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_PCM2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_PCM2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_MSM_REQ0, + }, { + .peri_id = (u8)DMACH_MSM_REQ2, + }, { + .peri_id = (u8)DMACH_SPI0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SPI0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SPI2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SPI2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S0S_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART4_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART4_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SLIMBUS0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SLIMBUS0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SLIMBUS2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SLIMBUS2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SLIMBUS4_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SLIMBUS4_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_AC97_MICIN, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_AC97_PCMIN, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_AC97_PCMOUT, + .rqtype = MEMTODEV, }, }; -static struct s3c_pl330_platdata exynos4_pdma0_pdata = { - .peri = { - [0] = DMACH_PCM0_RX, - [1] = DMACH_PCM0_TX, - [2] = DMACH_PCM2_RX, - [3] = DMACH_PCM2_TX, - [4] = DMACH_MSM_REQ0, - [5] = DMACH_MSM_REQ2, - [6] = DMACH_SPI0_RX, - [7] = DMACH_SPI0_TX, - [8] = DMACH_SPI2_RX, - [9] = DMACH_SPI2_TX, - [10] = DMACH_I2S0S_TX, - [11] = DMACH_I2S0_RX, - [12] = DMACH_I2S0_TX, - [13] = DMACH_I2S2_RX, - [14] = DMACH_I2S2_TX, - [15] = DMACH_UART0_RX, - [16] = DMACH_UART0_TX, - [17] = DMACH_UART2_RX, - [18] = DMACH_UART2_TX, - [19] = DMACH_UART4_RX, - [20] = DMACH_UART4_TX, - [21] = DMACH_SLIMBUS0_RX, - [22] = DMACH_SLIMBUS0_TX, - [23] = DMACH_SLIMBUS2_RX, - [24] = DMACH_SLIMBUS2_TX, - [25] = DMACH_SLIMBUS4_RX, - [26] = DMACH_SLIMBUS4_TX, - [27] = DMACH_AC97_MICIN, - [28] = DMACH_AC97_PCMIN, - [29] = DMACH_AC97_PCMOUT, - [30] = DMACH_MAX, - [31] = DMACH_MAX, - }, +struct dma_pl330_platdata exynos4_pdma0_pdata = { + .nr_valid_peri = ARRAY_SIZE(pdma0_peri), + .peri = pdma0_peri, }; -static struct platform_device exynos4_device_pdma0 = { - .name = "s3c-pl330", - .id = 0, - .num_resources = ARRAY_SIZE(exynos4_pdma0_resource), - .resource = exynos4_pdma0_resource, - .dev = { +struct amba_device exynos4_device_pdma0 = { + .dev = { + .init_name = "dma-pl330.0", .dma_mask = &dma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &exynos4_pdma0_pdata, }, + .res = { + .start = EXYNOS4_PA_PDMA0, + .end = EXYNOS4_PA_PDMA0 + SZ_4K, + .flags = IORESOURCE_MEM, + }, + .irq = {IRQ_PDMA0, NO_IRQ}, + .periphid = 0x00041330, }; -static struct resource exynos4_pdma1_resource[] = { - [0] = { - .start = EXYNOS4_PA_PDMA1, - .end = EXYNOS4_PA_PDMA1 + SZ_4K, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = IRQ_PDMA1, - .end = IRQ_PDMA1, - .flags = IORESOURCE_IRQ, +struct dma_pl330_peri pdma1_peri[25] = { + { + .peri_id = (u8)DMACH_PCM0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_PCM0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_PCM1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_PCM1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_MSM_REQ1, + }, { + .peri_id = (u8)DMACH_MSM_REQ3, + }, { + .peri_id = (u8)DMACH_SPI1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SPI1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S0S_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART3_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART3_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SLIMBUS1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SLIMBUS1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SLIMBUS3_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SLIMBUS3_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SLIMBUS5_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SLIMBUS5_TX, + .rqtype = MEMTODEV, }, }; -static struct s3c_pl330_platdata exynos4_pdma1_pdata = { - .peri = { - [0] = DMACH_PCM0_RX, - [1] = DMACH_PCM0_TX, - [2] = DMACH_PCM1_RX, - [3] = DMACH_PCM1_TX, - [4] = DMACH_MSM_REQ1, - [5] = DMACH_MSM_REQ3, - [6] = DMACH_SPI1_RX, - [7] = DMACH_SPI1_TX, - [8] = DMACH_I2S0S_TX, - [9] = DMACH_I2S0_RX, - [10] = DMACH_I2S0_TX, - [11] = DMACH_I2S1_RX, - [12] = DMACH_I2S1_TX, - [13] = DMACH_UART0_RX, - [14] = DMACH_UART0_TX, - [15] = DMACH_UART1_RX, - [16] = DMACH_UART1_TX, - [17] = DMACH_UART3_RX, - [18] = DMACH_UART3_TX, - [19] = DMACH_SLIMBUS1_RX, - [20] = DMACH_SLIMBUS1_TX, - [21] = DMACH_SLIMBUS3_RX, - [22] = DMACH_SLIMBUS3_TX, - [23] = DMACH_SLIMBUS5_RX, - [24] = DMACH_SLIMBUS5_TX, - [25] = DMACH_SLIMBUS0AUX_RX, - [26] = DMACH_SLIMBUS0AUX_TX, - [27] = DMACH_SPDIF, - [28] = DMACH_MAX, - [29] = DMACH_MAX, - [30] = DMACH_MAX, - [31] = DMACH_MAX, - }, +struct dma_pl330_platdata exynos4_pdma1_pdata = { + .nr_valid_peri = ARRAY_SIZE(pdma1_peri), + .peri = pdma1_peri, }; -static struct platform_device exynos4_device_pdma1 = { - .name = "s3c-pl330", - .id = 1, - .num_resources = ARRAY_SIZE(exynos4_pdma1_resource), - .resource = exynos4_pdma1_resource, - .dev = { +struct amba_device exynos4_device_pdma1 = { + .dev = { + .init_name = "dma-pl330.1", .dma_mask = &dma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &exynos4_pdma1_pdata, }, -}; - -static struct platform_device *exynos4_dmacs[] __initdata = { - &exynos4_device_pdma0, - &exynos4_device_pdma1, + .res = { + .start = EXYNOS4_PA_PDMA1, + .end = EXYNOS4_PA_PDMA1 + SZ_4K, + .flags = IORESOURCE_MEM, + }, + .irq = {IRQ_PDMA1, NO_IRQ}, + .periphid = 0x00041330, }; static int __init exynos4_dma_init(void) { - platform_add_devices(exynos4_dmacs, ARRAY_SIZE(exynos4_dmacs)); + amba_device_register(&exynos4_device_pdma0, &iomem_resource); return 0; } From dafc954304ad3a471c82e7aad15b26f6be264560 Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 2 Sep 2011 09:44:37 +0900 Subject: [PATCH 44/51] ARM: S5PV210: Use generic DMA PL330 driver This patch makes Samsung S5PV210 to use DMA PL330 driver on DMADEVICE. The S5PV210 uses DMA generic APIs instead of SAMSUNG specific S3C-PL330 APIs. Signed-off-by: Boojin Kim Acked-by: Linus Walleij Acked-by: Vinod Koul Signed-off-by: Kukjin Kim Signed-off-by: Vinod Koul --- arch/arm/mach-s5pv210/Kconfig | 2 +- arch/arm/mach-s5pv210/clock.c | 10 +- arch/arm/mach-s5pv210/dma.c | 316 +++++++++++++++-------- arch/arm/mach-s5pv210/include/mach/dma.h | 2 +- 4 files changed, 214 insertions(+), 116 deletions(-) diff --git a/arch/arm/mach-s5pv210/Kconfig b/arch/arm/mach-s5pv210/Kconfig index 69dd87cd8e22..d960090e4656 100644 --- a/arch/arm/mach-s5pv210/Kconfig +++ b/arch/arm/mach-s5pv210/Kconfig @@ -11,7 +11,7 @@ if ARCH_S5PV210 config CPU_S5PV210 bool - select S3C_PL330_DMA + select SAMSUNG_DMADEV select S5P_EXT_INT select S5P_HRT select S5PV210_PM if PM diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c index 52a8e607bcc2..d35726a6faea 100644 --- a/arch/arm/mach-s5pv210/clock.c +++ b/arch/arm/mach-s5pv210/clock.c @@ -203,6 +203,11 @@ static struct clk clk_pcmcdclk2 = { .name = "pcmcdclk", }; +static struct clk dummy_apb_pclk = { + .name = "apb_pclk", + .id = -1, +}; + static struct clk *clkset_vpllsrc_list[] = { [0] = &clk_fin_vpll, [1] = &clk_sclk_hdmi27m, @@ -289,13 +294,13 @@ static struct clk_ops clk_fout_apll_ops = { static struct clk init_clocks_off[] = { { - .name = "pdma", + .name = "dma", .devname = "s3c-pl330.0", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip0_ctrl, .ctrlbit = (1 << 3), }, { - .name = "pdma", + .name = "dma", .devname = "s3c-pl330.1", .parent = &clk_hclk_psys.clk, .enable = s5pv210_clk_ip0_ctrl, @@ -1161,5 +1166,6 @@ void __init s5pv210_register_clocks(void) s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); + s3c24xx_register_clock(&dummy_apb_pclk); s3c_pwmclk_init(); } diff --git a/arch/arm/mach-s5pv210/dma.c b/arch/arm/mach-s5pv210/dma.c index 497d3439a142..f79d0b06cbf9 100644 --- a/arch/arm/mach-s5pv210/dma.c +++ b/arch/arm/mach-s5pv210/dma.c @@ -1,4 +1,8 @@ -/* +/* linux/arch/arm/mach-s5pv210/dma.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * * Copyright (C) 2010 Samsung Electronics Co. Ltd. * Jaswinder Singh * @@ -17,151 +21,239 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include #include +#include +#include +#include #include #include #include #include - -#include +#include static u64 dma_dmamask = DMA_BIT_MASK(32); -static struct resource s5pv210_pdma0_resource[] = { - [0] = { - .start = S5PV210_PA_PDMA0, - .end = S5PV210_PA_PDMA0 + SZ_4K, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = IRQ_PDMA0, - .end = IRQ_PDMA0, - .flags = IORESOURCE_IRQ, +struct dma_pl330_peri pdma0_peri[28] = { + { + .peri_id = (u8)DMACH_UART0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART3_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART3_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = DMACH_MAX, + }, { + .peri_id = (u8)DMACH_I2S0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S0S_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_MAX, + }, { + .peri_id = (u8)DMACH_MAX, + }, { + .peri_id = (u8)DMACH_SPI0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SPI0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SPI1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SPI1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_MAX, + }, { + .peri_id = (u8)DMACH_MAX, + }, { + .peri_id = (u8)DMACH_AC97_MICIN, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_AC97_PCMIN, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_AC97_PCMOUT, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_MAX, + }, { + .peri_id = (u8)DMACH_PWM, + }, { + .peri_id = (u8)DMACH_SPDIF, + .rqtype = MEMTODEV, }, }; -static struct s3c_pl330_platdata s5pv210_pdma0_pdata = { - .peri = { - [0] = DMACH_UART0_RX, - [1] = DMACH_UART0_TX, - [2] = DMACH_UART1_RX, - [3] = DMACH_UART1_TX, - [4] = DMACH_UART2_RX, - [5] = DMACH_UART2_TX, - [6] = DMACH_UART3_RX, - [7] = DMACH_UART3_TX, - [8] = DMACH_MAX, - [9] = DMACH_I2S0_RX, - [10] = DMACH_I2S0_TX, - [11] = DMACH_I2S0S_TX, - [12] = DMACH_I2S1_RX, - [13] = DMACH_I2S1_TX, - [14] = DMACH_MAX, - [15] = DMACH_MAX, - [16] = DMACH_SPI0_RX, - [17] = DMACH_SPI0_TX, - [18] = DMACH_SPI1_RX, - [19] = DMACH_SPI1_TX, - [20] = DMACH_MAX, - [21] = DMACH_MAX, - [22] = DMACH_AC97_MICIN, - [23] = DMACH_AC97_PCMIN, - [24] = DMACH_AC97_PCMOUT, - [25] = DMACH_MAX, - [26] = DMACH_PWM, - [27] = DMACH_SPDIF, - [28] = DMACH_MAX, - [29] = DMACH_MAX, - [30] = DMACH_MAX, - [31] = DMACH_MAX, - }, +struct dma_pl330_platdata s5pv210_pdma0_pdata = { + .nr_valid_peri = ARRAY_SIZE(pdma0_peri), + .peri = pdma0_peri, }; -static struct platform_device s5pv210_device_pdma0 = { - .name = "s3c-pl330", - .id = 0, - .num_resources = ARRAY_SIZE(s5pv210_pdma0_resource), - .resource = s5pv210_pdma0_resource, - .dev = { +struct amba_device s5pv210_device_pdma0 = { + .dev = { + .init_name = "dma-pl330.0", .dma_mask = &dma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &s5pv210_pdma0_pdata, }, -}; - -static struct resource s5pv210_pdma1_resource[] = { - [0] = { - .start = S5PV210_PA_PDMA1, - .end = S5PV210_PA_PDMA1 + SZ_4K, + .res = { + .start = S5PV210_PA_PDMA0, + .end = S5PV210_PA_PDMA0 + SZ_4K, .flags = IORESOURCE_MEM, }, - [1] = { - .start = IRQ_PDMA1, - .end = IRQ_PDMA1, - .flags = IORESOURCE_IRQ, + .irq = {IRQ_PDMA0, NO_IRQ}, + .periphid = 0x00041330, +}; + +struct dma_pl330_peri pdma1_peri[32] = { + { + .peri_id = (u8)DMACH_UART0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART3_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART3_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = DMACH_MAX, + }, { + .peri_id = (u8)DMACH_I2S0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S0S_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SPI0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SPI0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SPI1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SPI1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_MAX, + }, { + .peri_id = (u8)DMACH_MAX, + }, { + .peri_id = (u8)DMACH_PCM0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_PCM0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_PCM1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_PCM1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_MSM_REQ0, + }, { + .peri_id = (u8)DMACH_MSM_REQ1, + }, { + .peri_id = (u8)DMACH_MSM_REQ2, + }, { + .peri_id = (u8)DMACH_MSM_REQ3, + }, { + .peri_id = (u8)DMACH_PCM2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_PCM2_TX, + .rqtype = MEMTODEV, }, }; -static struct s3c_pl330_platdata s5pv210_pdma1_pdata = { - .peri = { - [0] = DMACH_UART0_RX, - [1] = DMACH_UART0_TX, - [2] = DMACH_UART1_RX, - [3] = DMACH_UART1_TX, - [4] = DMACH_UART2_RX, - [5] = DMACH_UART2_TX, - [6] = DMACH_UART3_RX, - [7] = DMACH_UART3_TX, - [8] = DMACH_MAX, - [9] = DMACH_I2S0_RX, - [10] = DMACH_I2S0_TX, - [11] = DMACH_I2S0S_TX, - [12] = DMACH_I2S1_RX, - [13] = DMACH_I2S1_TX, - [14] = DMACH_I2S2_RX, - [15] = DMACH_I2S2_TX, - [16] = DMACH_SPI0_RX, - [17] = DMACH_SPI0_TX, - [18] = DMACH_SPI1_RX, - [19] = DMACH_SPI1_TX, - [20] = DMACH_MAX, - [21] = DMACH_MAX, - [22] = DMACH_PCM0_RX, - [23] = DMACH_PCM0_TX, - [24] = DMACH_PCM1_RX, - [25] = DMACH_PCM1_TX, - [26] = DMACH_MSM_REQ0, - [27] = DMACH_MSM_REQ1, - [28] = DMACH_MSM_REQ2, - [29] = DMACH_MSM_REQ3, - [30] = DMACH_PCM2_RX, - [31] = DMACH_PCM2_TX, - }, +struct dma_pl330_platdata s5pv210_pdma1_pdata = { + .nr_valid_peri = ARRAY_SIZE(pdma1_peri), + .peri = pdma1_peri, }; -static struct platform_device s5pv210_device_pdma1 = { - .name = "s3c-pl330", - .id = 1, - .num_resources = ARRAY_SIZE(s5pv210_pdma1_resource), - .resource = s5pv210_pdma1_resource, - .dev = { +struct amba_device s5pv210_device_pdma1 = { + .dev = { + .init_name = "dma-pl330.1", .dma_mask = &dma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &s5pv210_pdma1_pdata, }, -}; - -static struct platform_device *s5pv210_dmacs[] __initdata = { - &s5pv210_device_pdma0, - &s5pv210_device_pdma1, + .res = { + .start = S5PV210_PA_PDMA1, + .end = S5PV210_PA_PDMA1 + SZ_4K, + .flags = IORESOURCE_MEM, + }, + .irq = {IRQ_PDMA1, NO_IRQ}, + .periphid = 0x00041330, }; static int __init s5pv210_dma_init(void) { - platform_add_devices(s5pv210_dmacs, ARRAY_SIZE(s5pv210_dmacs)); + amba_device_register(&s5pv210_device_pdma0, &iomem_resource); return 0; } diff --git a/arch/arm/mach-s5pv210/include/mach/dma.h b/arch/arm/mach-s5pv210/include/mach/dma.h index 1beae2cca6a3..201842a3769e 100644 --- a/arch/arm/mach-s5pv210/include/mach/dma.h +++ b/arch/arm/mach-s5pv210/include/mach/dma.h @@ -20,7 +20,7 @@ #ifndef __MACH_DMA_H #define __MACH_DMA_H -/* This platform uses the common S3C DMA API driver for PL330 */ +/* This platform uses the common DMA API driver for PL330 */ #include #endif /* __MACH_DMA_H */ From a422bd0f6d50bcb9c529d890bde70295915bb3e9 Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 2 Sep 2011 09:44:38 +0900 Subject: [PATCH 45/51] ARM: S5PC100: Use generic DMA PL330 driver This patch makes Samsung S5PC100 to use DMA PL330 driver on DMADEVICE. The S5PC100 uses DMA generic APIs instead of SAMSUNG specific S3C-PL330 APIs. Signed-off-by: Boojin Kim Acked-by: Linus Walleij Acked-by: Vinod Koul Signed-off-by: Kukjin Kim Signed-off-by: Vinod Koul --- arch/arm/mach-s5pc100/Kconfig | 2 +- arch/arm/mach-s5pc100/clock.c | 11 +- arch/arm/mach-s5pc100/dma.c | 323 +++++++++++++++-------- arch/arm/mach-s5pc100/include/mach/dma.h | 2 +- 4 files changed, 222 insertions(+), 116 deletions(-) diff --git a/arch/arm/mach-s5pc100/Kconfig b/arch/arm/mach-s5pc100/Kconfig index e8a33c4b054c..e538a4c67e9c 100644 --- a/arch/arm/mach-s5pc100/Kconfig +++ b/arch/arm/mach-s5pc100/Kconfig @@ -10,7 +10,7 @@ if ARCH_S5PC100 config CPU_S5PC100 bool select S5P_EXT_INT - select S3C_PL330_DMA + select SAMSUNG_DMADEV help Enable S5PC100 CPU support diff --git a/arch/arm/mach-s5pc100/clock.c b/arch/arm/mach-s5pc100/clock.c index ff5cbb30de5b..6527c05c5fa1 100644 --- a/arch/arm/mach-s5pc100/clock.c +++ b/arch/arm/mach-s5pc100/clock.c @@ -33,6 +33,11 @@ static struct clk s5p_clk_otgphy = { .name = "otg_phy", }; +static struct clk dummy_apb_pclk = { + .name = "apb_pclk", + .id = -1, +}; + static struct clk *clk_src_mout_href_list[] = { [0] = &s5p_clk_27m, [1] = &clk_fin_hpll, @@ -454,13 +459,13 @@ static struct clk init_clocks_off[] = { .enable = s5pc100_d1_0_ctrl, .ctrlbit = (1 << 2), }, { - .name = "pdma", + .name = "dma", .devname = "s3c-pl330.1", .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_0_ctrl, .ctrlbit = (1 << 1), }, { - .name = "pdma", + .name = "dma", .devname = "s3c-pl330.0", .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_0_ctrl, @@ -1276,5 +1281,7 @@ void __init s5pc100_register_clocks(void) s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); + s3c24xx_register_clock(&dummy_apb_pclk); + s3c_pwmclk_init(); } diff --git a/arch/arm/mach-s5pc100/dma.c b/arch/arm/mach-s5pc100/dma.c index bf4cd0fb97c6..ef803e92d35d 100644 --- a/arch/arm/mach-s5pc100/dma.c +++ b/arch/arm/mach-s5pc100/dma.c @@ -1,4 +1,8 @@ -/* +/* linux/arch/arm/mach-s5pc100/dma.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * * Copyright (C) 2010 Samsung Electronics Co. Ltd. * Jaswinder Singh * @@ -17,150 +21,245 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include #include +#include +#include +#include #include +#include #include #include - -#include +#include static u64 dma_dmamask = DMA_BIT_MASK(32); -static struct resource s5pc100_pdma0_resource[] = { - [0] = { - .start = S5PC100_PA_PDMA0, - .end = S5PC100_PA_PDMA0 + SZ_4K, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = IRQ_PDMA0, - .end = IRQ_PDMA0, - .flags = IORESOURCE_IRQ, +struct dma_pl330_peri pdma0_peri[30] = { + { + .peri_id = (u8)DMACH_UART0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART3_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART3_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = DMACH_IRDA, + }, { + .peri_id = (u8)DMACH_I2S0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S0S_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SPI0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SPI0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SPI1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SPI1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SPI2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SPI2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_AC97_MICIN, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_AC97_PCMIN, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_AC97_PCMOUT, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_EXTERNAL, + }, { + .peri_id = (u8)DMACH_PWM, + }, { + .peri_id = (u8)DMACH_SPDIF, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_HSI_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_HSI_TX, + .rqtype = MEMTODEV, }, }; -static struct s3c_pl330_platdata s5pc100_pdma0_pdata = { - .peri = { - [0] = DMACH_UART0_RX, - [1] = DMACH_UART0_TX, - [2] = DMACH_UART1_RX, - [3] = DMACH_UART1_TX, - [4] = DMACH_UART2_RX, - [5] = DMACH_UART2_TX, - [6] = DMACH_UART3_RX, - [7] = DMACH_UART3_TX, - [8] = DMACH_IRDA, - [9] = DMACH_I2S0_RX, - [10] = DMACH_I2S0_TX, - [11] = DMACH_I2S0S_TX, - [12] = DMACH_I2S1_RX, - [13] = DMACH_I2S1_TX, - [14] = DMACH_I2S2_RX, - [15] = DMACH_I2S2_TX, - [16] = DMACH_SPI0_RX, - [17] = DMACH_SPI0_TX, - [18] = DMACH_SPI1_RX, - [19] = DMACH_SPI1_TX, - [20] = DMACH_SPI2_RX, - [21] = DMACH_SPI2_TX, - [22] = DMACH_AC97_MICIN, - [23] = DMACH_AC97_PCMIN, - [24] = DMACH_AC97_PCMOUT, - [25] = DMACH_EXTERNAL, - [26] = DMACH_PWM, - [27] = DMACH_SPDIF, - [28] = DMACH_HSI_RX, - [29] = DMACH_HSI_TX, - [30] = DMACH_MAX, - [31] = DMACH_MAX, - }, +struct dma_pl330_platdata s5pc100_pdma0_pdata = { + .nr_valid_peri = ARRAY_SIZE(pdma0_peri), + .peri = pdma0_peri, }; -static struct platform_device s5pc100_device_pdma0 = { - .name = "s3c-pl330", - .id = 0, - .num_resources = ARRAY_SIZE(s5pc100_pdma0_resource), - .resource = s5pc100_pdma0_resource, - .dev = { +struct amba_device s5pc100_device_pdma0 = { + .dev = { + .init_name = "dma-pl330.0", .dma_mask = &dma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &s5pc100_pdma0_pdata, }, -}; - -static struct resource s5pc100_pdma1_resource[] = { - [0] = { - .start = S5PC100_PA_PDMA1, - .end = S5PC100_PA_PDMA1 + SZ_4K, + .res = { + .start = S5PC100_PA_PDMA0, + .end = S5PC100_PA_PDMA0 + SZ_4K, .flags = IORESOURCE_MEM, }, - [1] = { - .start = IRQ_PDMA1, - .end = IRQ_PDMA1, - .flags = IORESOURCE_IRQ, + .irq = {IRQ_PDMA0, NO_IRQ}, + .periphid = 0x00041330, +}; + +struct dma_pl330_peri pdma1_peri[30] = { + { + .peri_id = (u8)DMACH_UART0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART3_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART3_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = DMACH_IRDA, + }, { + .peri_id = (u8)DMACH_I2S0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S0S_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SPI0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SPI0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SPI1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SPI1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SPI2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SPI2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_PCM0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_PCM1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_PCM1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_PCM1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_MSM_REQ0, + }, { + .peri_id = (u8)DMACH_MSM_REQ1, + }, { + .peri_id = (u8)DMACH_MSM_REQ2, + }, { + .peri_id = (u8)DMACH_MSM_REQ3, }, }; -static struct s3c_pl330_platdata s5pc100_pdma1_pdata = { - .peri = { - [0] = DMACH_UART0_RX, - [1] = DMACH_UART0_TX, - [2] = DMACH_UART1_RX, - [3] = DMACH_UART1_TX, - [4] = DMACH_UART2_RX, - [5] = DMACH_UART2_TX, - [6] = DMACH_UART3_RX, - [7] = DMACH_UART3_TX, - [8] = DMACH_IRDA, - [9] = DMACH_I2S0_RX, - [10] = DMACH_I2S0_TX, - [11] = DMACH_I2S0S_TX, - [12] = DMACH_I2S1_RX, - [13] = DMACH_I2S1_TX, - [14] = DMACH_I2S2_RX, - [15] = DMACH_I2S2_TX, - [16] = DMACH_SPI0_RX, - [17] = DMACH_SPI0_TX, - [18] = DMACH_SPI1_RX, - [19] = DMACH_SPI1_TX, - [20] = DMACH_SPI2_RX, - [21] = DMACH_SPI2_TX, - [22] = DMACH_PCM0_RX, - [23] = DMACH_PCM0_TX, - [24] = DMACH_PCM1_RX, - [25] = DMACH_PCM1_TX, - [26] = DMACH_MSM_REQ0, - [27] = DMACH_MSM_REQ1, - [28] = DMACH_MSM_REQ2, - [29] = DMACH_MSM_REQ3, - [30] = DMACH_MAX, - [31] = DMACH_MAX, - }, +struct dma_pl330_platdata s5pc100_pdma1_pdata = { + .nr_valid_peri = ARRAY_SIZE(pdma1_peri), + .peri = pdma1_peri, }; -static struct platform_device s5pc100_device_pdma1 = { - .name = "s3c-pl330", - .id = 1, - .num_resources = ARRAY_SIZE(s5pc100_pdma1_resource), - .resource = s5pc100_pdma1_resource, - .dev = { +struct amba_device s5pc100_device_pdma1 = { + .dev = { + .init_name = "dma-pl330.1", .dma_mask = &dma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = &s5pc100_pdma1_pdata, }, -}; - -static struct platform_device *s5pc100_dmacs[] __initdata = { - &s5pc100_device_pdma0, - &s5pc100_device_pdma1, + .res = { + .start = S5PC100_PA_PDMA1, + .end = S5PC100_PA_PDMA1 + SZ_4K, + .flags = IORESOURCE_MEM, + }, + .irq = {IRQ_PDMA1, NO_IRQ}, + .periphid = 0x00041330, }; static int __init s5pc100_dma_init(void) { - platform_add_devices(s5pc100_dmacs, ARRAY_SIZE(s5pc100_dmacs)); + amba_device_register(&s5pc100_device_pdma0, &iomem_resource); return 0; } diff --git a/arch/arm/mach-s5pc100/include/mach/dma.h b/arch/arm/mach-s5pc100/include/mach/dma.h index 1beae2cca6a3..201842a3769e 100644 --- a/arch/arm/mach-s5pc100/include/mach/dma.h +++ b/arch/arm/mach-s5pc100/include/mach/dma.h @@ -20,7 +20,7 @@ #ifndef __MACH_DMA_H #define __MACH_DMA_H -/* This platform uses the common S3C DMA API driver for PL330 */ +/* This platform uses the common DMA API driver for PL330 */ #include #endif /* __MACH_DMA_H */ From 3091e61173211de3fbd9bcb99ddc33333377fcb7 Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 2 Sep 2011 09:44:39 +0900 Subject: [PATCH 46/51] ARM: S5P64X0: Use generic DMA PL330 driver This patch makes Samsung S5P64X0 to use DMA PL330 driver on DMADEVICE. The S5P64X0 uses DMA generic APIs instead of SAMSUNG specific S3C-PL330 APIs. Signed-off-by: Boojin Kim Acked-by: Linus Walleij Acked-by: Vinod Koul Signed-off-by: Kukjin Kim Signed-off-by: Vinod Koul --- arch/arm/mach-s5p64x0/Kconfig | 4 +- arch/arm/mach-s5p64x0/clock-s5p6440.c | 9 +- arch/arm/mach-s5p64x0/clock-s5p6450.c | 9 +- arch/arm/mach-s5p64x0/dma.c | 273 +++++++++++++++-------- arch/arm/mach-s5p64x0/include/mach/dma.h | 2 +- 5 files changed, 201 insertions(+), 96 deletions(-) diff --git a/arch/arm/mach-s5p64x0/Kconfig b/arch/arm/mach-s5p64x0/Kconfig index 65c7518dad7f..9527ed24dbff 100644 --- a/arch/arm/mach-s5p64x0/Kconfig +++ b/arch/arm/mach-s5p64x0/Kconfig @@ -9,14 +9,14 @@ if ARCH_S5P64X0 config CPU_S5P6440 bool - select S3C_PL330_DMA + select SAMSUNG_DMADEV select S5P_HRT help Enable S5P6440 CPU support config CPU_S5P6450 bool - select S3C_PL330_DMA + select SAMSUNG_DMADEV select S5P_HRT help Enable S5P6450 CPU support diff --git a/arch/arm/mach-s5p64x0/clock-s5p6440.c b/arch/arm/mach-s5p64x0/clock-s5p6440.c index 0e9cd3092dd2..c1f548f69a0d 100644 --- a/arch/arm/mach-s5p64x0/clock-s5p6440.c +++ b/arch/arm/mach-s5p64x0/clock-s5p6440.c @@ -146,7 +146,7 @@ static struct clk init_clocks_off[] = { .enable = s5p64x0_hclk0_ctrl, .ctrlbit = (1 << 8), }, { - .name = "pdma", + .name = "dma", .parent = &clk_hclk_low.clk, .enable = s5p64x0_hclk0_ctrl, .ctrlbit = (1 << 12), @@ -499,6 +499,11 @@ static struct clksrc_clk *sysclks[] = { &clk_pclk_low, }; +static struct clk dummy_apb_pclk = { + .name = "apb_pclk", + .id = -1, +}; + void __init_or_cpufreq s5p6440_setup_clocks(void) { struct clk *xtal_clk; @@ -581,5 +586,7 @@ void __init s5p6440_register_clocks(void) s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); + s3c24xx_register_clock(&dummy_apb_pclk); + s3c_pwmclk_init(); } diff --git a/arch/arm/mach-s5p64x0/clock-s5p6450.c b/arch/arm/mach-s5p64x0/clock-s5p6450.c index d9dc16cde109..3d9b60975570 100644 --- a/arch/arm/mach-s5p64x0/clock-s5p6450.c +++ b/arch/arm/mach-s5p64x0/clock-s5p6450.c @@ -179,7 +179,7 @@ static struct clk init_clocks_off[] = { .enable = s5p64x0_hclk0_ctrl, .ctrlbit = (1 << 3), }, { - .name = "pdma", + .name = "dma", .parent = &clk_hclk_low.clk, .enable = s5p64x0_hclk0_ctrl, .ctrlbit = (1 << 12), @@ -553,6 +553,11 @@ static struct clksrc_clk *sysclks[] = { &clk_sclk_audio0, }; +static struct clk dummy_apb_pclk = { + .name = "apb_pclk", + .id = -1, +}; + void __init_or_cpufreq s5p6450_setup_clocks(void) { struct clk *xtal_clk; @@ -632,5 +637,7 @@ void __init s5p6450_register_clocks(void) s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); + s3c24xx_register_clock(&dummy_apb_pclk); + s3c_pwmclk_init(); } diff --git a/arch/arm/mach-s5p64x0/dma.c b/arch/arm/mach-s5p64x0/dma.c index d7ad944b3475..aebf3fcb1ebe 100644 --- a/arch/arm/mach-s5p64x0/dma.c +++ b/arch/arm/mach-s5p64x0/dma.c @@ -21,128 +21,219 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include #include +#include +#include + +#include #include #include #include +#include #include -#include +#include static u64 dma_dmamask = DMA_BIT_MASK(32); -static struct resource s5p64x0_pdma_resource[] = { - [0] = { - .start = S5P64X0_PA_PDMA, - .end = S5P64X0_PA_PDMA + SZ_4K, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = IRQ_DMA0, - .end = IRQ_DMA0, - .flags = IORESOURCE_IRQ, +struct dma_pl330_peri s5p6440_pdma_peri[22] = { + { + .peri_id = (u8)DMACH_UART0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART3_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART3_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = DMACH_MAX, + }, { + .peri_id = DMACH_MAX, + }, { + .peri_id = (u8)DMACH_PCM0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_PCM0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SPI0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SPI0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_MAX, + }, { + .peri_id = (u8)DMACH_MAX, + }, { + .peri_id = (u8)DMACH_MAX, + }, { + .peri_id = (u8)DMACH_MAX, + }, { + .peri_id = (u8)DMACH_SPI1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SPI1_RX, + .rqtype = DEVTOMEM, }, }; -static struct s3c_pl330_platdata s5p6440_pdma_pdata = { - .peri = { - [0] = DMACH_UART0_RX, - [1] = DMACH_UART0_TX, - [2] = DMACH_UART1_RX, - [3] = DMACH_UART1_TX, - [4] = DMACH_UART2_RX, - [5] = DMACH_UART2_TX, - [6] = DMACH_UART3_RX, - [7] = DMACH_UART3_TX, - [8] = DMACH_MAX, - [9] = DMACH_MAX, - [10] = DMACH_PCM0_TX, - [11] = DMACH_PCM0_RX, - [12] = DMACH_I2S0_TX, - [13] = DMACH_I2S0_RX, - [14] = DMACH_SPI0_TX, - [15] = DMACH_SPI0_RX, - [16] = DMACH_MAX, - [17] = DMACH_MAX, - [18] = DMACH_MAX, - [19] = DMACH_MAX, - [20] = DMACH_SPI1_TX, - [21] = DMACH_SPI1_RX, - [22] = DMACH_MAX, - [23] = DMACH_MAX, - [24] = DMACH_MAX, - [25] = DMACH_MAX, - [26] = DMACH_MAX, - [27] = DMACH_MAX, - [28] = DMACH_MAX, - [29] = DMACH_PWM, - [30] = DMACH_MAX, - [31] = DMACH_MAX, +struct dma_pl330_platdata s5p6440_pdma_pdata = { + .nr_valid_peri = ARRAY_SIZE(s5p6440_pdma_peri), + .peri = s5p6440_pdma_peri, +}; + +struct dma_pl330_peri s5p6450_pdma_peri[32] = { + { + .peri_id = (u8)DMACH_UART0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART3_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART3_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_UART4_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART4_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_PCM0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_PCM0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SPI0_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SPI0_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_PCM1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_PCM1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_PCM2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_PCM2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_SPI1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_SPI1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_USI_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_USI_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_MAX, + }, { + .peri_id = (u8)DMACH_I2S1_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S1_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_I2S2_TX, + .rqtype = MEMTODEV, + }, { + .peri_id = (u8)DMACH_I2S2_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_PWM, + }, { + .peri_id = (u8)DMACH_UART5_RX, + .rqtype = DEVTOMEM, + }, { + .peri_id = (u8)DMACH_UART5_TX, + .rqtype = MEMTODEV, }, }; -static struct s3c_pl330_platdata s5p6450_pdma_pdata = { - .peri = { - [0] = DMACH_UART0_RX, - [1] = DMACH_UART0_TX, - [2] = DMACH_UART1_RX, - [3] = DMACH_UART1_TX, - [4] = DMACH_UART2_RX, - [5] = DMACH_UART2_TX, - [6] = DMACH_UART3_RX, - [7] = DMACH_UART3_TX, - [8] = DMACH_UART4_RX, - [9] = DMACH_UART4_TX, - [10] = DMACH_PCM0_TX, - [11] = DMACH_PCM0_RX, - [12] = DMACH_I2S0_TX, - [13] = DMACH_I2S0_RX, - [14] = DMACH_SPI0_TX, - [15] = DMACH_SPI0_RX, - [16] = DMACH_PCM1_TX, - [17] = DMACH_PCM1_RX, - [18] = DMACH_PCM2_TX, - [19] = DMACH_PCM2_RX, - [20] = DMACH_SPI1_TX, - [21] = DMACH_SPI1_RX, - [22] = DMACH_USI_TX, - [23] = DMACH_USI_RX, - [24] = DMACH_MAX, - [25] = DMACH_I2S1_TX, - [26] = DMACH_I2S1_RX, - [27] = DMACH_I2S2_TX, - [28] = DMACH_I2S2_RX, - [29] = DMACH_PWM, - [30] = DMACH_UART5_RX, - [31] = DMACH_UART5_TX, - }, +struct dma_pl330_platdata s5p6450_pdma_pdata = { + .nr_valid_peri = ARRAY_SIZE(s5p6450_pdma_peri), + .peri = s5p6450_pdma_peri, }; -static struct platform_device s5p64x0_device_pdma = { - .name = "s3c-pl330", - .id = -1, - .num_resources = ARRAY_SIZE(s5p64x0_pdma_resource), - .resource = s5p64x0_pdma_resource, - .dev = { +struct amba_device s5p64x0_device_pdma = { + .dev = { + .init_name = "dma-pl330", .dma_mask = &dma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, + .res = { + .start = S5P64X0_PA_PDMA, + .end = S5P64X0_PA_PDMA + SZ_4K, + .flags = IORESOURCE_MEM, + }, + .irq = {IRQ_DMA0, NO_IRQ}, + .periphid = 0x00041330, }; static int __init s5p64x0_dma_init(void) { - unsigned int id; - - id = __raw_readl(S5P64X0_SYS_ID) & 0xFF000; + unsigned int id = __raw_readl(S5P64X0_SYS_ID) & 0xFF000; if (id == 0x50000) s5p64x0_device_pdma.dev.platform_data = &s5p6450_pdma_pdata; else s5p64x0_device_pdma.dev.platform_data = &s5p6440_pdma_pdata; - platform_device_register(&s5p64x0_device_pdma); + amba_device_register(&s5p64x0_device_pdma, &iomem_resource); return 0; } diff --git a/arch/arm/mach-s5p64x0/include/mach/dma.h b/arch/arm/mach-s5p64x0/include/mach/dma.h index 1beae2cca6a3..5a622af461d7 100644 --- a/arch/arm/mach-s5p64x0/include/mach/dma.h +++ b/arch/arm/mach-s5p64x0/include/mach/dma.h @@ -20,7 +20,7 @@ #ifndef __MACH_DMA_H #define __MACH_DMA_H -/* This platform uses the common S3C DMA API driver for PL330 */ +/* This platform uses the common common DMA API driver for PL330 */ #include #endif /* __MACH_DMA_H */ From 978ce50dd5cfd93380ded89d61de9d8109ebd814 Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 2 Sep 2011 09:44:40 +0900 Subject: [PATCH 47/51] ARM: SAMSUNG: Remove S3C-PL330-DMA driver Since DMA generic APIs can be used for Samsung DMA now so that the s3c-pl330 which includes Samsung specific DMA APIs can be removed. Signed-off-by: Boojin Kim Cc: Jassi Brar Acked-by: Linus Walleij Acked-by: Vinod Koul Signed-off-by: Kukjin Kim Signed-off-by: Vinod Koul --- arch/arm/plat-samsung/Kconfig | 6 - arch/arm/plat-samsung/Makefile | 2 - .../arm/plat-samsung/include/plat/dma-pl330.h | 10 +- .../include/plat/s3c-pl330-pdata.h | 32 - arch/arm/plat-samsung/s3c-pl330.c | 1244 ----------------- 5 files changed, 6 insertions(+), 1288 deletions(-) delete mode 100644 arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h delete mode 100644 arch/arm/plat-samsung/s3c-pl330.c diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig index e6f01ab86208..0a3eec616d61 100644 --- a/arch/arm/plat-samsung/Kconfig +++ b/arch/arm/plat-samsung/Kconfig @@ -300,12 +300,6 @@ config S3C_DMA help Internal configuration for S3C DMA core -config S3C_PL330_DMA - bool - select PL330 - help - S3C DMA API Driver for PL330 DMAC. - config SAMSUNG_DMADEV bool select DMADEVICES diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile index e2ee0b8a3763..90758494cad8 100644 --- a/arch/arm/plat-samsung/Makefile +++ b/arch/arm/plat-samsung/Makefile @@ -67,8 +67,6 @@ obj-$(CONFIG_S3C_DMA) += dma.o s3c-dma-ops.o obj-$(CONFIG_SAMSUNG_DMADEV) += dma-ops.o -obj-$(CONFIG_S3C_PL330_DMA) += s3c-pl330.o s3c-dma-ops.o - # PM support obj-$(CONFIG_PM) += pm.o diff --git a/arch/arm/plat-samsung/include/plat/dma-pl330.h b/arch/arm/plat-samsung/include/plat/dma-pl330.h index 2916920034f0..9a1dadb0218e 100644 --- a/arch/arm/plat-samsung/include/plat/dma-pl330.h +++ b/arch/arm/plat-samsung/include/plat/dma-pl330.h @@ -11,9 +11,6 @@ #ifndef __DMA_PL330_H_ #define __DMA_PL330_H_ __FILE__ -#define S3C2410_DMAF_AUTOSTART (1 << 0) -#define S3C2410_DMAF_CIRCULAR (1 << 1) - /* * PL330 can assign any channel to communicate with * any of the peripherals attched to the DMAC. @@ -88,6 +85,10 @@ enum dma_ch { DMACH_MAX, }; +struct s3c2410_dma_client { + char *name; +}; + static inline bool s3c_dma_has_circular(void) { return true; @@ -97,6 +98,7 @@ static inline bool samsung_dma_is_dmadev(void) { return true; } -#include + +#include #endif /* __DMA_PL330_H_ */ diff --git a/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h b/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h deleted file mode 100644 index 64fdf66ff9cf..000000000000 --- a/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h +++ /dev/null @@ -1,32 +0,0 @@ -/* linux/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h - * - * Copyright (C) 2010 Samsung Electronics Co. Ltd. - * Jaswinder Singh - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __S3C_PL330_PDATA_H -#define __S3C_PL330_PDATA_H - -#include - -/* - * Every PL330 DMAC has max 32 peripheral interfaces, - * of which some may be not be really used in your - * DMAC's configuration. - * Populate this array of 32 peri i/fs with relevant - * channel IDs for used peri i/f and DMACH_MAX for - * those unused. - * - * The platforms just need to provide this info - * to the S3C DMA API driver for PL330. - */ -struct s3c_pl330_platdata { - enum dma_ch peri[32]; -}; - -#endif /* __S3C_PL330_PDATA_H */ diff --git a/arch/arm/plat-samsung/s3c-pl330.c b/arch/arm/plat-samsung/s3c-pl330.c deleted file mode 100644 index f85638c6f5ae..000000000000 --- a/arch/arm/plat-samsung/s3c-pl330.c +++ /dev/null @@ -1,1244 +0,0 @@ -/* linux/arch/arm/plat-samsung/s3c-pl330.c - * - * Copyright (C) 2010 Samsung Electronics Co. Ltd. - * Jaswinder Singh - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -/** - * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC. - * @busy_chan: Number of channels currently busy. - * @peri: List of IDs of peripherals this DMAC can work with. - * @node: To attach to the global list of DMACs. - * @pi: PL330 configuration info for the DMAC. - * @kmcache: Pool to quickly allocate xfers for all channels in the dmac. - * @clk: Pointer of DMAC operation clock. - */ -struct s3c_pl330_dmac { - unsigned busy_chan; - enum dma_ch *peri; - struct list_head node; - struct pl330_info *pi; - struct kmem_cache *kmcache; - struct clk *clk; -}; - -/** - * struct s3c_pl330_xfer - A request submitted by S3C DMA clients. - * @token: Xfer ID provided by the client. - * @node: To attach to the list of xfers on a channel. - * @px: Xfer for PL330 core. - * @chan: Owner channel of this xfer. - */ -struct s3c_pl330_xfer { - void *token; - struct list_head node; - struct pl330_xfer px; - struct s3c_pl330_chan *chan; -}; - -/** - * struct s3c_pl330_chan - Logical channel to communicate with - * a Physical peripheral. - * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC. - * NULL if the channel is available to be acquired. - * @id: ID of the peripheral that this channel can communicate with. - * @options: Options specified by the client. - * @sdaddr: Address provided via s3c2410_dma_devconfig. - * @node: To attach to the global list of channels. - * @lrq: Pointer to the last submitted pl330_req to PL330 core. - * @xfer_list: To manage list of xfers enqueued. - * @req: Two requests to communicate with the PL330 engine. - * @callback_fn: Callback function to the client. - * @rqcfg: Channel configuration for the xfers. - * @xfer_head: Pointer to the xfer to be next executed. - * @dmac: Pointer to the DMAC that manages this channel, NULL if the - * channel is available to be acquired. - * @client: Client of this channel. NULL if the - * channel is available to be acquired. - */ -struct s3c_pl330_chan { - void *pl330_chan_id; - enum dma_ch id; - unsigned int options; - unsigned long sdaddr; - struct list_head node; - struct pl330_req *lrq; - struct list_head xfer_list; - struct pl330_req req[2]; - s3c2410_dma_cbfn_t callback_fn; - struct pl330_reqcfg rqcfg; - struct s3c_pl330_xfer *xfer_head; - struct s3c_pl330_dmac *dmac; - struct s3c2410_dma_client *client; -}; - -/* All DMACs in the platform */ -static LIST_HEAD(dmac_list); - -/* All channels to peripherals in the platform */ -static LIST_HEAD(chan_list); - -/* - * Since we add resources(DMACs and Channels) to the global pool, - * we need to guard access to the resources using a global lock - */ -static DEFINE_SPINLOCK(res_lock); - -/* Returns the channel with ID 'id' in the chan_list */ -static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id) -{ - struct s3c_pl330_chan *ch; - - list_for_each_entry(ch, &chan_list, node) - if (ch->id == id) - return ch; - - return NULL; -} - -/* Allocate a new channel with ID 'id' and add to chan_list */ -static void chan_add(const enum dma_ch id) -{ - struct s3c_pl330_chan *ch = id_to_chan(id); - - /* Return if the channel already exists */ - if (ch) - return; - - ch = kmalloc(sizeof(*ch), GFP_KERNEL); - /* Return silently to work with other channels */ - if (!ch) - return; - - ch->id = id; - ch->dmac = NULL; - - list_add_tail(&ch->node, &chan_list); -} - -/* If the channel is not yet acquired by any client */ -static bool chan_free(struct s3c_pl330_chan *ch) -{ - if (!ch) - return false; - - /* Channel points to some DMAC only when it's acquired */ - return ch->dmac ? false : true; -} - -/* - * Returns 0 is peripheral i/f is invalid or not present on the dmac. - * Index + 1, otherwise. - */ -static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id) -{ - enum dma_ch *id = dmac->peri; - int i; - - /* Discount invalid markers */ - if (ch_id == DMACH_MAX) - return 0; - - for (i = 0; i < PL330_MAX_PERI; i++) - if (id[i] == ch_id) - return i + 1; - - return 0; -} - -/* If all channel threads of the DMAC are busy */ -static inline bool dmac_busy(struct s3c_pl330_dmac *dmac) -{ - struct pl330_info *pi = dmac->pi; - - return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true; -} - -/* - * Returns the number of free channels that - * can be handled by this dmac only. - */ -static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac) -{ - enum dma_ch *id = dmac->peri; - struct s3c_pl330_dmac *d; - struct s3c_pl330_chan *ch; - unsigned found, count = 0; - enum dma_ch p; - int i; - - for (i = 0; i < PL330_MAX_PERI; i++) { - p = id[i]; - ch = id_to_chan(p); - - if (p == DMACH_MAX || !chan_free(ch)) - continue; - - found = 0; - list_for_each_entry(d, &dmac_list, node) { - if (d != dmac && iface_of_dmac(d, ch->id)) { - found = 1; - break; - } - } - if (!found) - count++; - } - - return count; -} - -/* - * Measure of suitability of 'dmac' handling 'ch' - * - * 0 indicates 'dmac' can not handle 'ch' either - * because it is not supported by the hardware or - * because all dmac channels are currently busy. - * - * >0 vlaue indicates 'dmac' has the capability. - * The bigger the value the more suitable the dmac. - */ -#define MAX_SUIT UINT_MAX -#define MIN_SUIT 0 - -static unsigned suitablility(struct s3c_pl330_dmac *dmac, - struct s3c_pl330_chan *ch) -{ - struct pl330_info *pi = dmac->pi; - enum dma_ch *id = dmac->peri; - struct s3c_pl330_dmac *d; - unsigned s; - int i; - - s = MIN_SUIT; - /* If all the DMAC channel threads are busy */ - if (dmac_busy(dmac)) - return s; - - for (i = 0; i < PL330_MAX_PERI; i++) - if (id[i] == ch->id) - break; - - /* If the 'dmac' can't talk to 'ch' */ - if (i == PL330_MAX_PERI) - return s; - - s = MAX_SUIT; - list_for_each_entry(d, &dmac_list, node) { - /* - * If some other dmac can talk to this - * peri and has some channel free. - */ - if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) { - s = 0; - break; - } - } - if (s) - return s; - - s = 100; - - /* Good if free chans are more, bad otherwise */ - s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac); - - return s; -} - -/* More than one DMAC may have capability to transfer data with the - * peripheral. This function assigns most suitable DMAC to manage the - * channel and hence communicate with the peripheral. - */ -static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch) -{ - struct s3c_pl330_dmac *d, *dmac = NULL; - unsigned sn, sl = MIN_SUIT; - - list_for_each_entry(d, &dmac_list, node) { - sn = suitablility(d, ch); - - if (sn == MAX_SUIT) - return d; - - if (sn > sl) - dmac = d; - } - - return dmac; -} - -/* Acquire the channel for peripheral 'id' */ -static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id) -{ - struct s3c_pl330_chan *ch = id_to_chan(id); - struct s3c_pl330_dmac *dmac; - - /* If the channel doesn't exist or is already acquired */ - if (!ch || !chan_free(ch)) { - ch = NULL; - goto acq_exit; - } - - dmac = map_chan_to_dmac(ch); - /* If couldn't map */ - if (!dmac) { - ch = NULL; - goto acq_exit; - } - - dmac->busy_chan++; - ch->dmac = dmac; - -acq_exit: - return ch; -} - -/* Delete xfer from the queue */ -static inline void del_from_queue(struct s3c_pl330_xfer *xfer) -{ - struct s3c_pl330_xfer *t; - struct s3c_pl330_chan *ch; - int found; - - if (!xfer) - return; - - ch = xfer->chan; - - /* Make sure xfer is in the queue */ - found = 0; - list_for_each_entry(t, &ch->xfer_list, node) - if (t == xfer) { - found = 1; - break; - } - - if (!found) - return; - - /* If xfer is last entry in the queue */ - if (xfer->node.next == &ch->xfer_list) - t = list_entry(ch->xfer_list.next, - struct s3c_pl330_xfer, node); - else - t = list_entry(xfer->node.next, - struct s3c_pl330_xfer, node); - - /* If there was only one node left */ - if (t == xfer) - ch->xfer_head = NULL; - else if (ch->xfer_head == xfer) - ch->xfer_head = t; - - list_del(&xfer->node); -} - -/* Provides pointer to the next xfer in the queue. - * If CIRCULAR option is set, the list is left intact, - * otherwise the xfer is removed from the list. - * Forced delete 'pluck' can be set to override the CIRCULAR option. - */ -static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch, - int pluck) -{ - struct s3c_pl330_xfer *xfer = ch->xfer_head; - - if (!xfer) - return NULL; - - /* If xfer is last entry in the queue */ - if (xfer->node.next == &ch->xfer_list) - ch->xfer_head = list_entry(ch->xfer_list.next, - struct s3c_pl330_xfer, node); - else - ch->xfer_head = list_entry(xfer->node.next, - struct s3c_pl330_xfer, node); - - if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR)) - del_from_queue(xfer); - - return xfer; -} - -static inline void add_to_queue(struct s3c_pl330_chan *ch, - struct s3c_pl330_xfer *xfer, int front) -{ - struct pl330_xfer *xt; - - /* If queue empty */ - if (ch->xfer_head == NULL) - ch->xfer_head = xfer; - - xt = &ch->xfer_head->px; - /* If the head already submitted (CIRCULAR head) */ - if (ch->options & S3C2410_DMAF_CIRCULAR && - (xt == ch->req[0].x || xt == ch->req[1].x)) - ch->xfer_head = xfer; - - /* If this is a resubmission, it should go at the head */ - if (front) { - ch->xfer_head = xfer; - list_add(&xfer->node, &ch->xfer_list); - } else { - list_add_tail(&xfer->node, &ch->xfer_list); - } -} - -static inline void _finish_off(struct s3c_pl330_xfer *xfer, - enum s3c2410_dma_buffresult res, int ffree) -{ - struct s3c_pl330_chan *ch; - - if (!xfer) - return; - - ch = xfer->chan; - - /* Do callback */ - if (ch->callback_fn) - ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res); - - /* Force Free or if buffer is not needed anymore */ - if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR)) - kmem_cache_free(ch->dmac->kmcache, xfer); -} - -static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch, - struct pl330_req *r) -{ - struct s3c_pl330_xfer *xfer; - int ret = 0; - - /* If already submitted */ - if (r->x) - return 0; - - xfer = get_from_queue(ch, 0); - if (xfer) { - r->x = &xfer->px; - - /* Use max bandwidth for M<->M xfers */ - if (r->rqtype == MEMTOMEM) { - struct pl330_info *pi = xfer->chan->dmac->pi; - int burst = 1 << ch->rqcfg.brst_size; - u32 bytes = r->x->bytes; - int bl; - - bl = pi->pcfg.data_bus_width / 8; - bl *= pi->pcfg.data_buf_dep; - bl /= burst; - - /* src/dst_burst_len can't be more than 16 */ - if (bl > 16) - bl = 16; - - while (bl > 1) { - if (!(bytes % (bl * burst))) - break; - bl--; - } - - ch->rqcfg.brst_len = bl; - } else { - ch->rqcfg.brst_len = 1; - } - - ret = pl330_submit_req(ch->pl330_chan_id, r); - - /* If submission was successful */ - if (!ret) { - ch->lrq = r; /* latest submitted req */ - return 0; - } - - r->x = NULL; - - /* If both of the PL330 ping-pong buffers filled */ - if (ret == -EAGAIN) { - dev_err(ch->dmac->pi->dev, "%s:%d!\n", - __func__, __LINE__); - /* Queue back again */ - add_to_queue(ch, xfer, 1); - ret = 0; - } else { - dev_err(ch->dmac->pi->dev, "%s:%d!\n", - __func__, __LINE__); - _finish_off(xfer, S3C2410_RES_ERR, 0); - } - } - - return ret; -} - -static void s3c_pl330_rq(struct s3c_pl330_chan *ch, - struct pl330_req *r, enum pl330_op_err err) -{ - unsigned long flags; - struct s3c_pl330_xfer *xfer; - struct pl330_xfer *xl = r->x; - enum s3c2410_dma_buffresult res; - - spin_lock_irqsave(&res_lock, flags); - - r->x = NULL; - - s3c_pl330_submit(ch, r); - - spin_unlock_irqrestore(&res_lock, flags); - - /* Map result to S3C DMA API */ - if (err == PL330_ERR_NONE) - res = S3C2410_RES_OK; - else if (err == PL330_ERR_ABORT) - res = S3C2410_RES_ABORT; - else - res = S3C2410_RES_ERR; - - /* If last request had some xfer */ - if (xl) { - xfer = container_of(xl, struct s3c_pl330_xfer, px); - _finish_off(xfer, res, 0); - } else { - dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n", - __func__, __LINE__); - } -} - -static void s3c_pl330_rq0(void *token, enum pl330_op_err err) -{ - struct pl330_req *r = token; - struct s3c_pl330_chan *ch = container_of(r, - struct s3c_pl330_chan, req[0]); - s3c_pl330_rq(ch, r, err); -} - -static void s3c_pl330_rq1(void *token, enum pl330_op_err err) -{ - struct pl330_req *r = token; - struct s3c_pl330_chan *ch = container_of(r, - struct s3c_pl330_chan, req[1]); - s3c_pl330_rq(ch, r, err); -} - -/* Release an acquired channel */ -static void chan_release(struct s3c_pl330_chan *ch) -{ - struct s3c_pl330_dmac *dmac; - - if (chan_free(ch)) - return; - - dmac = ch->dmac; - ch->dmac = NULL; - dmac->busy_chan--; -} - -int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op) -{ - struct s3c_pl330_xfer *xfer; - enum pl330_chan_op pl330op; - struct s3c_pl330_chan *ch; - unsigned long flags; - int idx, ret; - - spin_lock_irqsave(&res_lock, flags); - - ch = id_to_chan(id); - - if (!ch || chan_free(ch)) { - ret = -EINVAL; - goto ctrl_exit; - } - - switch (op) { - case S3C2410_DMAOP_START: - /* Make sure both reqs are enqueued */ - idx = (ch->lrq == &ch->req[0]) ? 1 : 0; - s3c_pl330_submit(ch, &ch->req[idx]); - s3c_pl330_submit(ch, &ch->req[1 - idx]); - pl330op = PL330_OP_START; - break; - - case S3C2410_DMAOP_STOP: - pl330op = PL330_OP_ABORT; - break; - - case S3C2410_DMAOP_FLUSH: - pl330op = PL330_OP_FLUSH; - break; - - case S3C2410_DMAOP_PAUSE: - case S3C2410_DMAOP_RESUME: - case S3C2410_DMAOP_TIMEOUT: - case S3C2410_DMAOP_STARTED: - spin_unlock_irqrestore(&res_lock, flags); - return 0; - - default: - spin_unlock_irqrestore(&res_lock, flags); - return -EINVAL; - } - - ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op); - - if (pl330op == PL330_OP_START) { - spin_unlock_irqrestore(&res_lock, flags); - return ret; - } - - idx = (ch->lrq == &ch->req[0]) ? 1 : 0; - - /* Abort the current xfer */ - if (ch->req[idx].x) { - xfer = container_of(ch->req[idx].x, - struct s3c_pl330_xfer, px); - - /* Drop xfer during FLUSH */ - if (pl330op == PL330_OP_FLUSH) - del_from_queue(xfer); - - ch->req[idx].x = NULL; - - spin_unlock_irqrestore(&res_lock, flags); - _finish_off(xfer, S3C2410_RES_ABORT, - pl330op == PL330_OP_FLUSH ? 1 : 0); - spin_lock_irqsave(&res_lock, flags); - } - - /* Flush the whole queue */ - if (pl330op == PL330_OP_FLUSH) { - - if (ch->req[1 - idx].x) { - xfer = container_of(ch->req[1 - idx].x, - struct s3c_pl330_xfer, px); - - del_from_queue(xfer); - - ch->req[1 - idx].x = NULL; - - spin_unlock_irqrestore(&res_lock, flags); - _finish_off(xfer, S3C2410_RES_ABORT, 1); - spin_lock_irqsave(&res_lock, flags); - } - - /* Finish off the remaining in the queue */ - xfer = ch->xfer_head; - while (xfer) { - - del_from_queue(xfer); - - spin_unlock_irqrestore(&res_lock, flags); - _finish_off(xfer, S3C2410_RES_ABORT, 1); - spin_lock_irqsave(&res_lock, flags); - - xfer = ch->xfer_head; - } - } - -ctrl_exit: - spin_unlock_irqrestore(&res_lock, flags); - - return ret; -} -EXPORT_SYMBOL(s3c2410_dma_ctrl); - -int s3c2410_dma_enqueue(enum dma_ch id, void *token, - dma_addr_t addr, int size) -{ - struct s3c_pl330_chan *ch; - struct s3c_pl330_xfer *xfer; - unsigned long flags; - int idx, ret = 0; - - spin_lock_irqsave(&res_lock, flags); - - ch = id_to_chan(id); - - /* Error if invalid or free channel */ - if (!ch || chan_free(ch)) { - ret = -EINVAL; - goto enq_exit; - } - - /* Error if size is unaligned */ - if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) { - ret = -EINVAL; - goto enq_exit; - } - - xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC); - if (!xfer) { - ret = -ENOMEM; - goto enq_exit; - } - - xfer->token = token; - xfer->chan = ch; - xfer->px.bytes = size; - xfer->px.next = NULL; /* Single request */ - - /* For S3C DMA API, direction is always fixed for all xfers */ - if (ch->req[0].rqtype == MEMTODEV) { - xfer->px.src_addr = addr; - xfer->px.dst_addr = ch->sdaddr; - } else { - xfer->px.src_addr = ch->sdaddr; - xfer->px.dst_addr = addr; - } - - add_to_queue(ch, xfer, 0); - - /* Try submitting on either request */ - idx = (ch->lrq == &ch->req[0]) ? 1 : 0; - - if (!ch->req[idx].x) - s3c_pl330_submit(ch, &ch->req[idx]); - else - s3c_pl330_submit(ch, &ch->req[1 - idx]); - - spin_unlock_irqrestore(&res_lock, flags); - - if (ch->options & S3C2410_DMAF_AUTOSTART) - s3c2410_dma_ctrl(id, S3C2410_DMAOP_START); - - return 0; - -enq_exit: - spin_unlock_irqrestore(&res_lock, flags); - - return ret; -} -EXPORT_SYMBOL(s3c2410_dma_enqueue); - -int s3c2410_dma_request(enum dma_ch id, - struct s3c2410_dma_client *client, - void *dev) -{ - struct s3c_pl330_dmac *dmac; - struct s3c_pl330_chan *ch; - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&res_lock, flags); - - ch = chan_acquire(id); - if (!ch) { - ret = -EBUSY; - goto req_exit; - } - - dmac = ch->dmac; - - ch->pl330_chan_id = pl330_request_channel(dmac->pi); - if (!ch->pl330_chan_id) { - chan_release(ch); - ret = -EBUSY; - goto req_exit; - } - - ch->client = client; - ch->options = 0; /* Clear any option */ - ch->callback_fn = NULL; /* Clear any callback */ - ch->lrq = NULL; - - ch->rqcfg.brst_size = 2; /* Default word size */ - ch->rqcfg.swap = SWAP_NO; - ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */ - ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */ - ch->rqcfg.privileged = 0; - ch->rqcfg.insnaccess = 0; - - /* Set invalid direction */ - ch->req[0].rqtype = DEVTODEV; - ch->req[1].rqtype = ch->req[0].rqtype; - - ch->req[0].cfg = &ch->rqcfg; - ch->req[1].cfg = ch->req[0].cfg; - - ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */ - ch->req[1].peri = ch->req[0].peri; - - ch->req[0].token = &ch->req[0]; - ch->req[0].xfer_cb = s3c_pl330_rq0; - ch->req[1].token = &ch->req[1]; - ch->req[1].xfer_cb = s3c_pl330_rq1; - - ch->req[0].x = NULL; - ch->req[1].x = NULL; - - /* Reset xfer list */ - INIT_LIST_HEAD(&ch->xfer_list); - ch->xfer_head = NULL; - -req_exit: - spin_unlock_irqrestore(&res_lock, flags); - - return ret; -} -EXPORT_SYMBOL(s3c2410_dma_request); - -int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client) -{ - struct s3c_pl330_chan *ch; - struct s3c_pl330_xfer *xfer; - unsigned long flags; - int ret = 0; - unsigned idx; - - spin_lock_irqsave(&res_lock, flags); - - ch = id_to_chan(id); - - if (!ch || chan_free(ch)) - goto free_exit; - - /* Refuse if someone else wanted to free the channel */ - if (ch->client != client) { - ret = -EBUSY; - goto free_exit; - } - - /* Stop any active xfer, Flushe the queue and do callbacks */ - pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH); - - /* Abort the submitted requests */ - idx = (ch->lrq == &ch->req[0]) ? 1 : 0; - - if (ch->req[idx].x) { - xfer = container_of(ch->req[idx].x, - struct s3c_pl330_xfer, px); - - ch->req[idx].x = NULL; - del_from_queue(xfer); - - spin_unlock_irqrestore(&res_lock, flags); - _finish_off(xfer, S3C2410_RES_ABORT, 1); - spin_lock_irqsave(&res_lock, flags); - } - - if (ch->req[1 - idx].x) { - xfer = container_of(ch->req[1 - idx].x, - struct s3c_pl330_xfer, px); - - ch->req[1 - idx].x = NULL; - del_from_queue(xfer); - - spin_unlock_irqrestore(&res_lock, flags); - _finish_off(xfer, S3C2410_RES_ABORT, 1); - spin_lock_irqsave(&res_lock, flags); - } - - /* Pluck and Abort the queued requests in order */ - do { - xfer = get_from_queue(ch, 1); - - spin_unlock_irqrestore(&res_lock, flags); - _finish_off(xfer, S3C2410_RES_ABORT, 1); - spin_lock_irqsave(&res_lock, flags); - } while (xfer); - - ch->client = NULL; - - pl330_release_channel(ch->pl330_chan_id); - - ch->pl330_chan_id = NULL; - - chan_release(ch); - -free_exit: - spin_unlock_irqrestore(&res_lock, flags); - - return ret; -} -EXPORT_SYMBOL(s3c2410_dma_free); - -int s3c2410_dma_config(enum dma_ch id, int xferunit) -{ - struct s3c_pl330_chan *ch; - struct pl330_info *pi; - unsigned long flags; - int i, dbwidth, ret = 0; - - spin_lock_irqsave(&res_lock, flags); - - ch = id_to_chan(id); - - if (!ch || chan_free(ch)) { - ret = -EINVAL; - goto cfg_exit; - } - - pi = ch->dmac->pi; - dbwidth = pi->pcfg.data_bus_width / 8; - - /* Max size of xfer can be pcfg.data_bus_width */ - if (xferunit > dbwidth) { - ret = -EINVAL; - goto cfg_exit; - } - - i = 0; - while (xferunit != (1 << i)) - i++; - - /* If valid value */ - if (xferunit == (1 << i)) - ch->rqcfg.brst_size = i; - else - ret = -EINVAL; - -cfg_exit: - spin_unlock_irqrestore(&res_lock, flags); - - return ret; -} -EXPORT_SYMBOL(s3c2410_dma_config); - -/* Options that are supported by this driver */ -#define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART) - -int s3c2410_dma_setflags(enum dma_ch id, unsigned int options) -{ - struct s3c_pl330_chan *ch; - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&res_lock, flags); - - ch = id_to_chan(id); - - if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS)) - ret = -EINVAL; - else - ch->options = options; - - spin_unlock_irqrestore(&res_lock, flags); - - return 0; -} -EXPORT_SYMBOL(s3c2410_dma_setflags); - -int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn) -{ - struct s3c_pl330_chan *ch; - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&res_lock, flags); - - ch = id_to_chan(id); - - if (!ch || chan_free(ch)) - ret = -EINVAL; - else - ch->callback_fn = rtn; - - spin_unlock_irqrestore(&res_lock, flags); - - return ret; -} -EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn); - -int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source, - unsigned long address) -{ - struct s3c_pl330_chan *ch; - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&res_lock, flags); - - ch = id_to_chan(id); - - if (!ch || chan_free(ch)) { - ret = -EINVAL; - goto devcfg_exit; - } - - switch (source) { - case S3C2410_DMASRC_HW: /* P->M */ - ch->req[0].rqtype = DEVTOMEM; - ch->req[1].rqtype = DEVTOMEM; - ch->rqcfg.src_inc = 0; - ch->rqcfg.dst_inc = 1; - break; - case S3C2410_DMASRC_MEM: /* M->P */ - ch->req[0].rqtype = MEMTODEV; - ch->req[1].rqtype = MEMTODEV; - ch->rqcfg.src_inc = 1; - ch->rqcfg.dst_inc = 0; - break; - default: - ret = -EINVAL; - goto devcfg_exit; - } - - ch->sdaddr = address; - -devcfg_exit: - spin_unlock_irqrestore(&res_lock, flags); - - return ret; -} -EXPORT_SYMBOL(s3c2410_dma_devconfig); - -int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst) -{ - struct s3c_pl330_chan *ch = id_to_chan(id); - struct pl330_chanstatus status; - int ret; - - if (!ch || chan_free(ch)) - return -EINVAL; - - ret = pl330_chan_status(ch->pl330_chan_id, &status); - if (ret < 0) - return ret; - - *src = status.src_addr; - *dst = status.dst_addr; - - return 0; -} -EXPORT_SYMBOL(s3c2410_dma_getposition); - -static irqreturn_t pl330_irq_handler(int irq, void *data) -{ - if (pl330_update(data)) - return IRQ_HANDLED; - else - return IRQ_NONE; -} - -static int pl330_probe(struct platform_device *pdev) -{ - struct s3c_pl330_dmac *s3c_pl330_dmac; - struct s3c_pl330_platdata *pl330pd; - struct pl330_info *pl330_info; - struct resource *res; - int i, ret, irq; - - pl330pd = pdev->dev.platform_data; - - /* Can't do without the list of _32_ peripherals */ - if (!pl330pd || !pl330pd->peri) { - dev_err(&pdev->dev, "platform data missing!\n"); - return -ENODEV; - } - - pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL); - if (!pl330_info) - return -ENOMEM; - - pl330_info->pl330_data = NULL; - pl330_info->dev = &pdev->dev; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - ret = -ENODEV; - goto probe_err1; - } - - request_mem_region(res->start, resource_size(res), pdev->name); - - pl330_info->base = ioremap(res->start, resource_size(res)); - if (!pl330_info->base) { - ret = -ENXIO; - goto probe_err2; - } - - irq = platform_get_irq(pdev, 0); - if (irq < 0) { - ret = irq; - goto probe_err3; - } - - ret = request_irq(irq, pl330_irq_handler, 0, - dev_name(&pdev->dev), pl330_info); - if (ret) - goto probe_err4; - - /* Allocate a new DMAC */ - s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL); - if (!s3c_pl330_dmac) { - ret = -ENOMEM; - goto probe_err5; - } - - /* Get operation clock and enable it */ - s3c_pl330_dmac->clk = clk_get(&pdev->dev, "pdma"); - if (IS_ERR(s3c_pl330_dmac->clk)) { - dev_err(&pdev->dev, "Cannot get operation clock.\n"); - ret = -EINVAL; - goto probe_err6; - } - clk_enable(s3c_pl330_dmac->clk); - - ret = pl330_add(pl330_info); - if (ret) - goto probe_err7; - - /* Hook the info */ - s3c_pl330_dmac->pi = pl330_info; - - /* No busy channels */ - s3c_pl330_dmac->busy_chan = 0; - - s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev), - sizeof(struct s3c_pl330_xfer), 0, 0, NULL); - - if (!s3c_pl330_dmac->kmcache) { - ret = -ENOMEM; - goto probe_err8; - } - - /* Get the list of peripherals */ - s3c_pl330_dmac->peri = pl330pd->peri; - - /* Attach to the list of DMACs */ - list_add_tail(&s3c_pl330_dmac->node, &dmac_list); - - /* Create a channel for each peripheral in the DMAC - * that is, if it doesn't already exist - */ - for (i = 0; i < PL330_MAX_PERI; i++) - if (s3c_pl330_dmac->peri[i] != DMACH_MAX) - chan_add(s3c_pl330_dmac->peri[i]); - - printk(KERN_INFO - "Loaded driver for PL330 DMAC-%d %s\n", pdev->id, pdev->name); - printk(KERN_INFO - "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", - pl330_info->pcfg.data_buf_dep, - pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan, - pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events); - - return 0; - -probe_err8: - pl330_del(pl330_info); -probe_err7: - clk_disable(s3c_pl330_dmac->clk); - clk_put(s3c_pl330_dmac->clk); -probe_err6: - kfree(s3c_pl330_dmac); -probe_err5: - free_irq(irq, pl330_info); -probe_err4: -probe_err3: - iounmap(pl330_info->base); -probe_err2: - release_mem_region(res->start, resource_size(res)); -probe_err1: - kfree(pl330_info); - - return ret; -} - -static int pl330_remove(struct platform_device *pdev) -{ - struct s3c_pl330_dmac *dmac, *d; - struct s3c_pl330_chan *ch; - unsigned long flags; - int del, found; - - if (!pdev->dev.platform_data) - return -EINVAL; - - spin_lock_irqsave(&res_lock, flags); - - found = 0; - list_for_each_entry(d, &dmac_list, node) - if (d->pi->dev == &pdev->dev) { - found = 1; - break; - } - - if (!found) { - spin_unlock_irqrestore(&res_lock, flags); - return 0; - } - - dmac = d; - - /* Remove all Channels that are managed only by this DMAC */ - list_for_each_entry(ch, &chan_list, node) { - - /* Only channels that are handled by this DMAC */ - if (iface_of_dmac(dmac, ch->id)) - del = 1; - else - continue; - - /* Don't remove if some other DMAC has it too */ - list_for_each_entry(d, &dmac_list, node) - if (d != dmac && iface_of_dmac(d, ch->id)) { - del = 0; - break; - } - - if (del) { - spin_unlock_irqrestore(&res_lock, flags); - s3c2410_dma_free(ch->id, ch->client); - spin_lock_irqsave(&res_lock, flags); - list_del(&ch->node); - kfree(ch); - } - } - - /* Disable operation clock */ - clk_disable(dmac->clk); - clk_put(dmac->clk); - - /* Remove the DMAC */ - list_del(&dmac->node); - kfree(dmac); - - spin_unlock_irqrestore(&res_lock, flags); - - return 0; -} - -static struct platform_driver pl330_driver = { - .driver = { - .owner = THIS_MODULE, - .name = "s3c-pl330", - }, - .probe = pl330_probe, - .remove = pl330_remove, -}; - -static int __init pl330_init(void) -{ - return platform_driver_register(&pl330_driver); -} -module_init(pl330_init); - -static void __exit pl330_exit(void) -{ - platform_driver_unregister(&pl330_driver); - return; -} -module_exit(pl330_exit); - -MODULE_AUTHOR("Jaswinder Singh "); -MODULE_DESCRIPTION("Driver for PL330 DMA Controller"); -MODULE_LICENSE("GPL"); From 39d3e8074e44c1953928a0b91bc328f552c5fc79 Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 2 Sep 2011 09:44:41 +0900 Subject: [PATCH 48/51] spi/s3c64xx: Add support DMA engine API This patch adds to support DMA generic API to transfer raw SPI data. Basiclly the spi driver uses DMA generic API if architecture supports it. Otherwise, uses Samsung specific S3C-PL330 APIs. Signed-off-by: Boojin Kim Acked-by: Linus Walleij Acked-by: Vinod Koul Acked-by: Grant Likely Signed-off-by: Kukjin Kim Signed-off-by: Vinod Koul --- drivers/spi/spi-s3c64xx.c | 139 +++++++++++++++++++------------------- 1 file changed, 68 insertions(+), 71 deletions(-) diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 595dacc7645f..24f49032ec35 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -171,6 +171,9 @@ struct s3c64xx_spi_driver_data { unsigned state; unsigned cur_mode, cur_bpw; unsigned cur_speed; + unsigned rx_ch; + unsigned tx_ch; + struct samsung_dma_ops *ops; }; static struct s3c2410_dma_client s3c64xx_spi_dma_client = { @@ -226,6 +229,38 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) writel(val, regs + S3C64XX_SPI_CH_CFG); } +static void s3c64xx_spi_dma_rxcb(void *data) +{ + struct s3c64xx_spi_driver_data *sdd + = (struct s3c64xx_spi_driver_data *)data; + unsigned long flags; + + spin_lock_irqsave(&sdd->lock, flags); + + sdd->state &= ~RXBUSY; + /* If the other done */ + if (!(sdd->state & TXBUSY)) + complete(&sdd->xfer_completion); + + spin_unlock_irqrestore(&sdd->lock, flags); +} + +static void s3c64xx_spi_dma_txcb(void *data) +{ + struct s3c64xx_spi_driver_data *sdd + = (struct s3c64xx_spi_driver_data *)data; + unsigned long flags; + + spin_lock_irqsave(&sdd->lock, flags); + + sdd->state &= ~TXBUSY; + /* If the other done */ + if (!(sdd->state & RXBUSY)) + complete(&sdd->xfer_completion); + + spin_unlock_irqrestore(&sdd->lock, flags); +} + static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, struct spi_device *spi, struct spi_transfer *xfer, int dma_mode) @@ -233,6 +268,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, struct s3c64xx_spi_info *sci = sdd->cntrlr_info; void __iomem *regs = sdd->regs; u32 modecfg, chcfg; + struct samsung_dma_prep_info info; modecfg = readl(regs + S3C64XX_SPI_MODE_CFG); modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); @@ -258,10 +294,14 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, chcfg |= S3C64XX_SPI_CH_TXCH_ON; if (dma_mode) { modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; - s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8); - s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd, - xfer->tx_dma, xfer->len); - s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START); + info.cap = DMA_SLAVE; + info.direction = DMA_TO_DEVICE; + info.buf = xfer->tx_dma; + info.len = xfer->len; + info.fp = s3c64xx_spi_dma_txcb; + info.fp_param = sdd; + sdd->ops->prepare(sdd->tx_ch, &info); + sdd->ops->trigger(sdd->tx_ch); } else { switch (sdd->cur_bpw) { case 32: @@ -293,10 +333,14 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) | S3C64XX_SPI_PACKET_CNT_EN, regs + S3C64XX_SPI_PACKET_CNT); - s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8); - s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd, - xfer->rx_dma, xfer->len); - s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START); + info.cap = DMA_SLAVE; + info.direction = DMA_FROM_DEVICE; + info.buf = xfer->rx_dma; + info.len = xfer->len; + info.fp = s3c64xx_spi_dma_rxcb; + info.fp_param = sdd; + sdd->ops->prepare(sdd->rx_ch, &info); + sdd->ops->trigger(sdd->rx_ch); } } @@ -482,46 +526,6 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) } } -static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, - int size, enum s3c2410_dma_buffresult res) -{ - struct s3c64xx_spi_driver_data *sdd = buf_id; - unsigned long flags; - - spin_lock_irqsave(&sdd->lock, flags); - - if (res == S3C2410_RES_OK) - sdd->state &= ~RXBUSY; - else - dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size); - - /* If the other done */ - if (!(sdd->state & TXBUSY)) - complete(&sdd->xfer_completion); - - spin_unlock_irqrestore(&sdd->lock, flags); -} - -static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, - int size, enum s3c2410_dma_buffresult res) -{ - struct s3c64xx_spi_driver_data *sdd = buf_id; - unsigned long flags; - - spin_lock_irqsave(&sdd->lock, flags); - - if (res == S3C2410_RES_OK) - sdd->state &= ~TXBUSY; - else - dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size); - - /* If the other done */ - if (!(sdd->state & RXBUSY)) - complete(&sdd->xfer_completion); - - spin_unlock_irqrestore(&sdd->lock, flags); -} - #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, @@ -696,12 +700,10 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd, if (use_dma) { if (xfer->tx_buf != NULL && (sdd->state & TXBUSY)) - s3c2410_dma_ctrl(sdd->tx_dmach, - S3C2410_DMAOP_FLUSH); + sdd->ops->stop(sdd->tx_ch); if (xfer->rx_buf != NULL && (sdd->state & RXBUSY)) - s3c2410_dma_ctrl(sdd->rx_dmach, - S3C2410_DMAOP_FLUSH); + sdd->ops->stop(sdd->rx_ch); } goto out; @@ -741,24 +743,19 @@ out: static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) { - if (s3c2410_dma_request(sdd->rx_dmach, - &s3c64xx_spi_dma_client, NULL) < 0) { - dev_err(&sdd->pdev->dev, "cannot get RxDMA\n"); - return 0; - } - s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb); - s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW, - sdd->sfr_start + S3C64XX_SPI_RX_DATA); - if (s3c2410_dma_request(sdd->tx_dmach, - &s3c64xx_spi_dma_client, NULL) < 0) { - dev_err(&sdd->pdev->dev, "cannot get TxDMA\n"); - s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); - return 0; - } - s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb); - s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM, - sdd->sfr_start + S3C64XX_SPI_TX_DATA); + struct samsung_dma_info info; + sdd->ops = samsung_dma_get_ops(); + + info.cap = DMA_SLAVE; + info.client = &s3c64xx_spi_dma_client; + info.direction = DMA_FROM_DEVICE; + info.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA; + info.width = sdd->cur_bpw / 8; + sdd->rx_ch = sdd->ops->request(sdd->rx_dmach, &info); + info.direction = DMA_TO_DEVICE; + info.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA; + sdd->tx_ch = sdd->ops->request(sdd->tx_dmach, &info); return 1; } @@ -799,8 +796,8 @@ static void s3c64xx_spi_work(struct work_struct *work) spin_unlock_irqrestore(&sdd->lock, flags); /* Free DMA channels */ - s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client); - s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); + sdd->ops->release(sdd->rx_ch, &s3c64xx_spi_dma_client); + sdd->ops->release(sdd->tx_ch, &s3c64xx_spi_dma_client); } static int s3c64xx_spi_transfer(struct spi_device *spi, From 82ab8cd7ec32194757ac73a66633be73ba88ea69 Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 2 Sep 2011 09:44:42 +0900 Subject: [PATCH 49/51] spi/s3c64xx: Merge dma control code This patch modifies to merge the dma control code. Original s3c64xx spi driver has each dma control code for rx and tx channel. This patch merges these dma control codes into one. With this patch, a dma setup function and callback function handle for both rx and tx channel. Signed-off-by: Boojin Kim Acked-by: Linus Walleij Acked-by: Vinod Koul Cc: Grant Likely Signed-off-by: Kukjin Kim Signed-off-by: Vinod Koul --- drivers/spi/spi-s3c64xx.c | 140 +++++++++++++++++++++----------------- 1 file changed, 76 insertions(+), 64 deletions(-) diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 24f49032ec35..019a7163572f 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -131,6 +131,12 @@ #define RXBUSY (1<<2) #define TXBUSY (1<<3) +struct s3c64xx_spi_dma_data { + unsigned ch; + enum dma_data_direction direction; + enum dma_ch dmach; +}; + /** * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. * @clk: Pointer to the spi clock. @@ -164,15 +170,13 @@ struct s3c64xx_spi_driver_data { struct work_struct work; struct list_head queue; spinlock_t lock; - enum dma_ch rx_dmach; - enum dma_ch tx_dmach; unsigned long sfr_start; struct completion xfer_completion; unsigned state; unsigned cur_mode, cur_bpw; unsigned cur_speed; - unsigned rx_ch; - unsigned tx_ch; + struct s3c64xx_spi_dma_data rx_dma; + struct s3c64xx_spi_dma_data tx_dma; struct samsung_dma_ops *ops; }; @@ -229,36 +233,76 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) writel(val, regs + S3C64XX_SPI_CH_CFG); } -static void s3c64xx_spi_dma_rxcb(void *data) +static void s3c64xx_spi_dmacb(void *data) { - struct s3c64xx_spi_driver_data *sdd - = (struct s3c64xx_spi_driver_data *)data; + struct s3c64xx_spi_driver_data *sdd; + struct s3c64xx_spi_dma_data *dma = data; unsigned long flags; + if (dma->direction == DMA_FROM_DEVICE) + sdd = container_of(data, + struct s3c64xx_spi_driver_data, rx_dma); + else + sdd = container_of(data, + struct s3c64xx_spi_driver_data, tx_dma); + spin_lock_irqsave(&sdd->lock, flags); - sdd->state &= ~RXBUSY; - /* If the other done */ - if (!(sdd->state & TXBUSY)) - complete(&sdd->xfer_completion); + if (dma->direction == DMA_FROM_DEVICE) { + sdd->state &= ~RXBUSY; + if (!(sdd->state & TXBUSY)) + complete(&sdd->xfer_completion); + } else { + sdd->state &= ~TXBUSY; + if (!(sdd->state & RXBUSY)) + complete(&sdd->xfer_completion); + } spin_unlock_irqrestore(&sdd->lock, flags); } -static void s3c64xx_spi_dma_txcb(void *data) +static void prepare_dma(struct s3c64xx_spi_dma_data *dma, + unsigned len, dma_addr_t buf) { - struct s3c64xx_spi_driver_data *sdd - = (struct s3c64xx_spi_driver_data *)data; - unsigned long flags; + struct s3c64xx_spi_driver_data *sdd; + struct samsung_dma_prep_info info; - spin_lock_irqsave(&sdd->lock, flags); + if (dma->direction == DMA_FROM_DEVICE) + sdd = container_of((void *)dma, + struct s3c64xx_spi_driver_data, rx_dma); + else + sdd = container_of((void *)dma, + struct s3c64xx_spi_driver_data, tx_dma); - sdd->state &= ~TXBUSY; - /* If the other done */ - if (!(sdd->state & RXBUSY)) - complete(&sdd->xfer_completion); + info.cap = DMA_SLAVE; + info.len = len; + info.fp = s3c64xx_spi_dmacb; + info.fp_param = dma; + info.direction = dma->direction; + info.buf = buf; - spin_unlock_irqrestore(&sdd->lock, flags); + sdd->ops->prepare(dma->ch, &info); + sdd->ops->trigger(dma->ch); +} + +static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) +{ + struct samsung_dma_info info; + + sdd->ops = samsung_dma_get_ops(); + + info.cap = DMA_SLAVE; + info.client = &s3c64xx_spi_dma_client; + info.width = sdd->cur_bpw / 8; + + info.direction = sdd->rx_dma.direction; + info.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA; + sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &info); + info.direction = sdd->tx_dma.direction; + info.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA; + sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &info); + + return 1; } static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, @@ -268,7 +312,6 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, struct s3c64xx_spi_info *sci = sdd->cntrlr_info; void __iomem *regs = sdd->regs; u32 modecfg, chcfg; - struct samsung_dma_prep_info info; modecfg = readl(regs + S3C64XX_SPI_MODE_CFG); modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); @@ -294,14 +337,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, chcfg |= S3C64XX_SPI_CH_TXCH_ON; if (dma_mode) { modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; - info.cap = DMA_SLAVE; - info.direction = DMA_TO_DEVICE; - info.buf = xfer->tx_dma; - info.len = xfer->len; - info.fp = s3c64xx_spi_dma_txcb; - info.fp_param = sdd; - sdd->ops->prepare(sdd->tx_ch, &info); - sdd->ops->trigger(sdd->tx_ch); + prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); } else { switch (sdd->cur_bpw) { case 32: @@ -333,14 +369,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) | S3C64XX_SPI_PACKET_CNT_EN, regs + S3C64XX_SPI_PACKET_CNT); - info.cap = DMA_SLAVE; - info.direction = DMA_FROM_DEVICE; - info.buf = xfer->rx_dma; - info.len = xfer->len; - info.fp = s3c64xx_spi_dma_rxcb; - info.fp_param = sdd; - sdd->ops->prepare(sdd->rx_ch, &info); - sdd->ops->trigger(sdd->rx_ch); + prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); } } @@ -700,10 +729,10 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd, if (use_dma) { if (xfer->tx_buf != NULL && (sdd->state & TXBUSY)) - sdd->ops->stop(sdd->tx_ch); + sdd->ops->stop(sdd->tx_dma.ch); if (xfer->rx_buf != NULL && (sdd->state & RXBUSY)) - sdd->ops->stop(sdd->rx_ch); + sdd->ops->stop(sdd->rx_dma.ch); } goto out; @@ -741,25 +770,6 @@ out: msg->complete(msg->context); } -static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) -{ - - struct samsung_dma_info info; - sdd->ops = samsung_dma_get_ops(); - - info.cap = DMA_SLAVE; - info.client = &s3c64xx_spi_dma_client; - info.direction = DMA_FROM_DEVICE; - info.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA; - info.width = sdd->cur_bpw / 8; - sdd->rx_ch = sdd->ops->request(sdd->rx_dmach, &info); - info.direction = DMA_TO_DEVICE; - info.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA; - sdd->tx_ch = sdd->ops->request(sdd->tx_dmach, &info); - - return 1; -} - static void s3c64xx_spi_work(struct work_struct *work) { struct s3c64xx_spi_driver_data *sdd = container_of(work, @@ -796,8 +806,8 @@ static void s3c64xx_spi_work(struct work_struct *work) spin_unlock_irqrestore(&sdd->lock, flags); /* Free DMA channels */ - sdd->ops->release(sdd->rx_ch, &s3c64xx_spi_dma_client); - sdd->ops->release(sdd->tx_ch, &s3c64xx_spi_dma_client); + sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client); + sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client); } static int s3c64xx_spi_transfer(struct spi_device *spi, @@ -1014,8 +1024,10 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) sdd->cntrlr_info = sci; sdd->pdev = pdev; sdd->sfr_start = mem_res->start; - sdd->tx_dmach = dmatx_res->start; - sdd->rx_dmach = dmarx_res->start; + sdd->tx_dma.dmach = dmatx_res->start; + sdd->tx_dma.direction = DMA_TO_DEVICE; + sdd->rx_dma.dmach = dmarx_res->start; + sdd->rx_dma.direction = DMA_FROM_DEVICE; sdd->cur_bpw = 8; @@ -1103,7 +1115,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) pdev->id, master->num_chipselect); dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", mem_res->end, mem_res->start, - sdd->rx_dmach, sdd->tx_dmach); + sdd->rx_dma.dmach, sdd->tx_dma.dmach); return 0; From 344b4c48887a443f7478fc7047d1397b20821ed3 Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 2 Sep 2011 09:44:43 +0900 Subject: [PATCH 50/51] ASoC: Samsung: Update DMA interface This patch adds to support the DMA PL330 driver that uses DMA generic API. Samsung sound driver uses DMA generic API if architecture supports it. Otherwise, use samsung specific S3C-PL330 API driver to transfer PCM data. Signed-off-by: Boojin Kim Acked-by: Linus Walleij Acked-by: Vinod Koul Cc: Jassi Brar Cc: Liam Girdwood Acked-by: Mark Brown [kgene.kim@samsung.com: removed useless variable] Signed-off-by: Kukjin Kim Signed-off-by: Vinod Koul --- arch/arm/mach-s3c2410/include/mach/dma.h | 10 +- arch/arm/mach-s3c64xx/include/mach/dma.h | 2 +- .../arm/plat-samsung/include/plat/dma-pl330.h | 2 +- sound/soc/samsung/ac97.c | 10 +- sound/soc/samsung/dma.c | 144 +++++++----------- sound/soc/samsung/dma.h | 4 +- 6 files changed, 77 insertions(+), 95 deletions(-) diff --git a/arch/arm/mach-s3c2410/include/mach/dma.h b/arch/arm/mach-s3c2410/include/mach/dma.h index e61a91f88c52..4e485bab66de 100644 --- a/arch/arm/mach-s3c2410/include/mach/dma.h +++ b/arch/arm/mach-s3c2410/include/mach/dma.h @@ -50,6 +50,11 @@ enum dma_ch { DMACH_MAX, /* the end entry */ }; +static inline bool samsung_dma_has_circular(void) +{ + return false; +} + static inline bool samsung_dma_is_dmadev(void) { return false; @@ -202,9 +207,4 @@ struct s3c2410_dma_chan { typedef unsigned long dma_device_t; -static inline bool s3c_dma_has_circular(void) -{ - return false; -} - #endif /* __ASM_ARCH_DMA_H */ diff --git a/arch/arm/mach-s3c64xx/include/mach/dma.h b/arch/arm/mach-s3c64xx/include/mach/dma.h index 49c3a53135d4..74fdf25c9042 100644 --- a/arch/arm/mach-s3c64xx/include/mach/dma.h +++ b/arch/arm/mach-s3c64xx/include/mach/dma.h @@ -58,7 +58,7 @@ enum dma_ch { DMACH_MAX /* the end */ }; -static __inline__ bool s3c_dma_has_circular(void) +static inline bool samsung_dma_has_circular(void) { return true; } diff --git a/arch/arm/plat-samsung/include/plat/dma-pl330.h b/arch/arm/plat-samsung/include/plat/dma-pl330.h index 9a1dadb0218e..2e55e5958674 100644 --- a/arch/arm/plat-samsung/include/plat/dma-pl330.h +++ b/arch/arm/plat-samsung/include/plat/dma-pl330.h @@ -89,7 +89,7 @@ struct s3c2410_dma_client { char *name; }; -static inline bool s3c_dma_has_circular(void) +static inline bool samsung_dma_has_circular(void) { return true; } diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c index f97110e72e85..b4f9b0003685 100644 --- a/sound/soc/samsung/ac97.c +++ b/sound/soc/samsung/ac97.c @@ -271,7 +271,10 @@ static int s3c_ac97_trigger(struct snd_pcm_substream *substream, int cmd, writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL); - s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); + if (!dma_data->ops) + dma_data->ops = samsung_dma_get_ops(); + + dma_data->ops->started(dma_data->channel); return 0; } @@ -317,7 +320,10 @@ static int s3c_ac97_mic_trigger(struct snd_pcm_substream *substream, writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL); - s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); + if (!dma_data->ops) + dma_data->ops = samsung_dma_get_ops(); + + dma_data->ops->started(dma_data->channel); return 0; } diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c index 9465588b02f2..851346f7d68d 100644 --- a/sound/soc/samsung/dma.c +++ b/sound/soc/samsung/dma.c @@ -54,7 +54,6 @@ struct runtime_data { spinlock_t lock; int state; unsigned int dma_loaded; - unsigned int dma_limit; unsigned int dma_period; dma_addr_t dma_start; dma_addr_t dma_pos; @@ -62,77 +61,79 @@ struct runtime_data { struct s3c_dma_params *params; }; +static void audio_buffdone(void *data); + /* dma_enqueue * * place a dma buffer onto the queue for the dma system * to handle. -*/ + */ static void dma_enqueue(struct snd_pcm_substream *substream) { struct runtime_data *prtd = substream->runtime->private_data; dma_addr_t pos = prtd->dma_pos; unsigned int limit; - int ret; + struct samsung_dma_prep_info dma_info; pr_debug("Entered %s\n", __func__); - if (s3c_dma_has_circular()) - limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period; - else - limit = prtd->dma_limit; + limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period; pr_debug("%s: loaded %d, limit %d\n", __func__, prtd->dma_loaded, limit); - while (prtd->dma_loaded < limit) { - unsigned long len = prtd->dma_period; + dma_info.cap = (samsung_dma_has_circular() ? DMA_CYCLIC : DMA_SLAVE); + dma_info.direction = + (substream->stream == SNDRV_PCM_STREAM_PLAYBACK + ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + dma_info.fp = audio_buffdone; + dma_info.fp_param = substream; + dma_info.period = prtd->dma_period; + dma_info.len = prtd->dma_period*limit; + while (prtd->dma_loaded < limit) { pr_debug("dma_loaded: %d\n", prtd->dma_loaded); - if ((pos + len) > prtd->dma_end) { - len = prtd->dma_end - pos; - pr_debug("%s: corrected dma len %ld\n", __func__, len); + if ((pos + dma_info.period) > prtd->dma_end) { + dma_info.period = prtd->dma_end - pos; + pr_debug("%s: corrected dma len %ld\n", + __func__, dma_info.period); } - ret = s3c2410_dma_enqueue(prtd->params->channel, - substream, pos, len); + dma_info.buf = pos; + prtd->params->ops->prepare(prtd->params->ch, &dma_info); - if (ret == 0) { - prtd->dma_loaded++; - pos += prtd->dma_period; - if (pos >= prtd->dma_end) - pos = prtd->dma_start; - } else - break; + prtd->dma_loaded++; + pos += prtd->dma_period; + if (pos >= prtd->dma_end) + pos = prtd->dma_start; } prtd->dma_pos = pos; } -static void audio_buffdone(struct s3c2410_dma_chan *channel, - void *dev_id, int size, - enum s3c2410_dma_buffresult result) +static void audio_buffdone(void *data) { - struct snd_pcm_substream *substream = dev_id; - struct runtime_data *prtd; + struct snd_pcm_substream *substream = data; + struct runtime_data *prtd = substream->runtime->private_data; pr_debug("Entered %s\n", __func__); - if (result == S3C2410_RES_ABORT || result == S3C2410_RES_ERR) - return; + if (prtd->state & ST_RUNNING) { + prtd->dma_pos += prtd->dma_period; + if (prtd->dma_pos >= prtd->dma_end) + prtd->dma_pos = prtd->dma_start; - prtd = substream->runtime->private_data; + if (substream) + snd_pcm_period_elapsed(substream); - if (substream) - snd_pcm_period_elapsed(substream); - - spin_lock(&prtd->lock); - if (prtd->state & ST_RUNNING && !s3c_dma_has_circular()) { - prtd->dma_loaded--; - dma_enqueue(substream); + spin_lock(&prtd->lock); + if (!samsung_dma_has_circular()) { + prtd->dma_loaded--; + dma_enqueue(substream); + } + spin_unlock(&prtd->lock); } - - spin_unlock(&prtd->lock); } static int dma_hw_params(struct snd_pcm_substream *substream, @@ -144,8 +145,7 @@ static int dma_hw_params(struct snd_pcm_substream *substream, unsigned long totbytes = params_buffer_bytes(params); struct s3c_dma_params *dma = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); - int ret = 0; - + struct samsung_dma_info dma_info; pr_debug("Entered %s\n", __func__); @@ -163,30 +163,26 @@ static int dma_hw_params(struct snd_pcm_substream *substream, pr_debug("params %p, client %p, channel %d\n", prtd->params, prtd->params->client, prtd->params->channel); - ret = s3c2410_dma_request(prtd->params->channel, - prtd->params->client, NULL); + prtd->params->ops = samsung_dma_get_ops(); - if (ret < 0) { - printk(KERN_ERR "failed to get dma channel\n"); - return ret; - } - - /* use the circular buffering if we have it available. */ - if (s3c_dma_has_circular()) - s3c2410_dma_setflags(prtd->params->channel, - S3C2410_DMAF_CIRCULAR); + dma_info.cap = (samsung_dma_has_circular() ? + DMA_CYCLIC : DMA_SLAVE); + dma_info.client = prtd->params->client; + dma_info.direction = + (substream->stream == SNDRV_PCM_STREAM_PLAYBACK + ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + dma_info.width = prtd->params->dma_size; + dma_info.fifo = prtd->params->dma_addr; + prtd->params->ch = prtd->params->ops->request( + prtd->params->channel, &dma_info); } - s3c2410_dma_set_buffdone_fn(prtd->params->channel, - audio_buffdone); - snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); runtime->dma_bytes = totbytes; spin_lock_irq(&prtd->lock); prtd->dma_loaded = 0; - prtd->dma_limit = runtime->hw.periods_min; prtd->dma_period = params_period_bytes(params); prtd->dma_start = runtime->dma_addr; prtd->dma_pos = prtd->dma_start; @@ -206,7 +202,8 @@ static int dma_hw_free(struct snd_pcm_substream *substream) snd_pcm_set_runtime_buffer(substream, NULL); if (prtd->params) { - s3c2410_dma_free(prtd->params->channel, prtd->params->client); + prtd->params->ops->release(prtd->params->ch, + prtd->params->client); prtd->params = NULL; } @@ -225,23 +222,9 @@ static int dma_prepare(struct snd_pcm_substream *substream) if (!prtd->params) return 0; - /* channel needs configuring for mem=>device, increment memory addr, - * sync to pclk, half-word transfers to the IIS-FIFO. */ - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { - s3c2410_dma_devconfig(prtd->params->channel, - S3C2410_DMASRC_MEM, - prtd->params->dma_addr); - } else { - s3c2410_dma_devconfig(prtd->params->channel, - S3C2410_DMASRC_HW, - prtd->params->dma_addr); - } - - s3c2410_dma_config(prtd->params->channel, - prtd->params->dma_size); - /* flush the DMA channel */ - s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_FLUSH); + prtd->params->ops->flush(prtd->params->ch); + prtd->dma_loaded = 0; prtd->dma_pos = prtd->dma_start; @@ -265,14 +248,14 @@ static int dma_trigger(struct snd_pcm_substream *substream, int cmd) case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: prtd->state |= ST_RUNNING; - s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_START); + prtd->params->ops->trigger(prtd->params->ch); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: prtd->state &= ~ST_RUNNING; - s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_STOP); + prtd->params->ops->stop(prtd->params->ch); break; default: @@ -291,21 +274,12 @@ dma_pointer(struct snd_pcm_substream *substream) struct snd_pcm_runtime *runtime = substream->runtime; struct runtime_data *prtd = runtime->private_data; unsigned long res; - dma_addr_t src, dst; pr_debug("Entered %s\n", __func__); - spin_lock(&prtd->lock); - s3c2410_dma_getposition(prtd->params->channel, &src, &dst); + res = prtd->dma_pos - prtd->dma_start; - if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) - res = dst - prtd->dma_start; - else - res = src - prtd->dma_start; - - spin_unlock(&prtd->lock); - - pr_debug("Pointer %x %x\n", src, dst); + pr_debug("Pointer offset: %lu\n", res); /* we seem to be getting the odd error from the pcm library due * to out-of-bounds pointers. this is maybe due to the dma engine diff --git a/sound/soc/samsung/dma.h b/sound/soc/samsung/dma.h index c50659269a40..7d1ead77ef21 100644 --- a/sound/soc/samsung/dma.h +++ b/sound/soc/samsung/dma.h @@ -6,7 +6,7 @@ * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * - * ALSA PCM interface for the Samsung S3C24xx CPU + * ALSA PCM interface for the Samsung SoC */ #ifndef _S3C_AUDIO_H @@ -17,6 +17,8 @@ struct s3c_dma_params { int channel; /* Channel ID */ dma_addr_t dma_addr; int dma_size; /* Size of the DMA transfer */ + unsigned ch; + struct samsung_dma_ops *ops; }; #endif From 51ddf31da16b1ab9da861eafedad6d263faf4388 Mon Sep 17 00:00:00 2001 From: Boojin Kim Date: Fri, 2 Sep 2011 09:44:44 +0900 Subject: [PATCH 51/51] ARM: SAMSUNG: Remove Samsung specific enum type for dma direction This patch removes the samsung specific enum type 's3c2410_dmasrc' and uses 'dma_data_direction' instead. Signed-off-by: Boojin Kim Acked-by: Linus Walleij Acked-by: Vinod Koul Signed-off-by: Kukjin Kim Signed-off-by: Vinod Koul --- arch/arm/mach-s3c2410/include/mach/dma.h | 2 +- arch/arm/mach-s3c2412/dma.c | 4 ++-- arch/arm/mach-s3c64xx/dma.c | 10 +++++----- arch/arm/mach-s3c64xx/include/mach/dma.h | 2 +- arch/arm/plat-s3c24xx/dma.c | 10 +++++----- arch/arm/plat-samsung/include/plat/dma-s3c24xx.h | 2 +- arch/arm/plat-samsung/include/plat/dma.h | 9 +++------ arch/arm/plat-samsung/s3c-dma-ops.c | 5 +---- drivers/mmc/host/s3cmci.c | 6 +++--- 9 files changed, 22 insertions(+), 28 deletions(-) diff --git a/arch/arm/mach-s3c2410/include/mach/dma.h b/arch/arm/mach-s3c2410/include/mach/dma.h index 4e485bab66de..ae8e482b6427 100644 --- a/arch/arm/mach-s3c2410/include/mach/dma.h +++ b/arch/arm/mach-s3c2410/include/mach/dma.h @@ -174,7 +174,7 @@ struct s3c2410_dma_chan { struct s3c2410_dma_client *client; /* channel configuration */ - enum s3c2410_dmasrc source; + enum dma_data_direction source; enum dma_ch req_ch; unsigned long dev_addr; unsigned long load_timeout; diff --git a/arch/arm/mach-s3c2412/dma.c b/arch/arm/mach-s3c2412/dma.c index 7abecfca0b7e..b4fc3ba367d6 100644 --- a/arch/arm/mach-s3c2412/dma.c +++ b/arch/arm/mach-s3c2412/dma.c @@ -148,11 +148,11 @@ static struct s3c24xx_dma_map __initdata s3c2412_dma_mappings[] = { static void s3c2412_dma_direction(struct s3c2410_dma_chan *chan, struct s3c24xx_dma_map *map, - enum s3c2410_dmasrc dir) + enum dma_data_direction dir) { unsigned long chsel; - if (dir == S3C2410_DMASRC_HW) + if (dir == DMA_FROM_DEVICE) chsel = map->channels_rx[0]; else chsel = map->channels[0]; diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c index 204bfafe4bfc..67c97fab62fd 100644 --- a/arch/arm/mach-s3c64xx/dma.c +++ b/arch/arm/mach-s3c64xx/dma.c @@ -147,14 +147,14 @@ static void s3c64xx_dma_fill_lli(struct s3c2410_dma_chan *chan, u32 control0, control1; switch (chan->source) { - case S3C2410_DMASRC_HW: + case DMA_FROM_DEVICE: src = chan->dev_addr; dst = data; control0 = PL080_CONTROL_SRC_AHB2; control0 |= PL080_CONTROL_DST_INCR; break; - case S3C2410_DMASRC_MEM: + case DMA_TO_DEVICE: src = data; dst = chan->dev_addr; control0 = PL080_CONTROL_DST_AHB2; @@ -416,7 +416,7 @@ EXPORT_SYMBOL(s3c2410_dma_enqueue); int s3c2410_dma_devconfig(enum dma_ch channel, - enum s3c2410_dmasrc source, + enum dma_data_direction source, unsigned long devaddr) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -437,11 +437,11 @@ int s3c2410_dma_devconfig(enum dma_ch channel, pr_debug("%s: peripheral %d\n", __func__, peripheral); switch (source) { - case S3C2410_DMASRC_HW: + case DMA_FROM_DEVICE: config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT; config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT; break; - case S3C2410_DMASRC_MEM: + case DMA_TO_DEVICE: config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT; config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT; break; diff --git a/arch/arm/mach-s3c64xx/include/mach/dma.h b/arch/arm/mach-s3c64xx/include/mach/dma.h index 74fdf25c9042..fe1a98cf0e4c 100644 --- a/arch/arm/mach-s3c64xx/include/mach/dma.h +++ b/arch/arm/mach-s3c64xx/include/mach/dma.h @@ -99,7 +99,7 @@ struct s3c2410_dma_chan { unsigned char peripheral; unsigned int flags; - enum s3c2410_dmasrc source; + enum dma_data_direction source; dma_addr_t dev_addr; diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c index 539bd0e3defd..53754bcf15a7 100644 --- a/arch/arm/plat-s3c24xx/dma.c +++ b/arch/arm/plat-s3c24xx/dma.c @@ -1094,14 +1094,14 @@ EXPORT_SYMBOL(s3c2410_dma_config); * * configure the dma source/destination hardware type and address * - * source: S3C2410_DMASRC_HW: source is hardware - * S3C2410_DMASRC_MEM: source is memory + * source: DMA_FROM_DEVICE: source is hardware + * DMA_TO_DEVICE: source is memory * * devaddr: physical address of the source */ int s3c2410_dma_devconfig(enum dma_ch channel, - enum s3c2410_dmasrc source, + enum dma_data_direction source, unsigned long devaddr) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -1131,7 +1131,7 @@ int s3c2410_dma_devconfig(enum dma_ch channel, hwcfg |= S3C2410_DISRCC_INC; switch (source) { - case S3C2410_DMASRC_HW: + case DMA_FROM_DEVICE: /* source is hardware */ pr_debug("%s: hw source, devaddr=%08lx, hwcfg=%d\n", __func__, devaddr, hwcfg); @@ -1142,7 +1142,7 @@ int s3c2410_dma_devconfig(enum dma_ch channel, chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DIDST); break; - case S3C2410_DMASRC_MEM: + case DMA_TO_DEVICE: /* source is memory */ pr_debug("%s: mem source, devaddr=%08lx, hwcfg=%d\n", __func__, devaddr, hwcfg); diff --git a/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h b/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h index 336d5ac02035..19828296a562 100644 --- a/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h +++ b/arch/arm/plat-samsung/include/plat/dma-s3c24xx.h @@ -47,7 +47,7 @@ struct s3c24xx_dma_selection { void (*direction)(struct s3c2410_dma_chan *chan, struct s3c24xx_dma_map *map, - enum s3c2410_dmasrc dir); + enum dma_data_direction dir); }; extern int s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel); diff --git a/arch/arm/plat-samsung/include/plat/dma.h b/arch/arm/plat-samsung/include/plat/dma.h index 361076060744..b9061128abde 100644 --- a/arch/arm/plat-samsung/include/plat/dma.h +++ b/arch/arm/plat-samsung/include/plat/dma.h @@ -10,17 +10,14 @@ * published by the Free Software Foundation. */ +#include + enum s3c2410_dma_buffresult { S3C2410_RES_OK, S3C2410_RES_ERR, S3C2410_RES_ABORT }; -enum s3c2410_dmasrc { - S3C2410_DMASRC_HW, /* source is memory */ - S3C2410_DMASRC_MEM /* source is hardware */ -}; - /* enum s3c2410_chan_op * * operation codes passed to the DMA code by the user, and also used @@ -112,7 +109,7 @@ extern int s3c2410_dma_config(enum dma_ch channel, int xferunit); */ extern int s3c2410_dma_devconfig(enum dma_ch channel, - enum s3c2410_dmasrc source, unsigned long devaddr); + enum dma_data_direction source, unsigned long devaddr); /* s3c2410_dma_getposition * diff --git a/arch/arm/plat-samsung/s3c-dma-ops.c b/arch/arm/plat-samsung/s3c-dma-ops.c index 33ab3241268d..582333c70585 100644 --- a/arch/arm/plat-samsung/s3c-dma-ops.c +++ b/arch/arm/plat-samsung/s3c-dma-ops.c @@ -48,10 +48,7 @@ static unsigned s3c_dma_request(enum dma_ch dma_ch, data->ch = dma_ch; list_add_tail(&data->node, &dma_list); - if (info->direction == DMA_FROM_DEVICE) - s3c2410_dma_devconfig(dma_ch, S3C2410_DMASRC_HW, info->fifo); - else - s3c2410_dma_devconfig(dma_ch, S3C2410_DMASRC_MEM, info->fifo); + s3c2410_dma_devconfig(dma_ch, info->direction, info->fifo); if (info->cap == DMA_CYCLIC) s3c2410_dma_setflags(dma_ch, S3C2410_DMAF_CIRCULAR); diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index a04f87d7ee3d..03cfdab99c8f 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -913,9 +913,9 @@ request_done: } static void s3cmci_dma_setup(struct s3cmci_host *host, - enum s3c2410_dmasrc source) + enum dma_data_direction source) { - static enum s3c2410_dmasrc last_source = -1; + static enum dma_data_direction last_source = -1; static int setup_ok; if (last_source == source) @@ -1087,7 +1087,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data) BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); - s3cmci_dma_setup(host, rw ? S3C2410_DMASRC_MEM : S3C2410_DMASRC_HW); + s3cmci_dma_setup(host, rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE); s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,