Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine update from Vinod Koul: "This time we have a new dmaengine driver from the tegra folks. Also we have Guennadi's cleanup of sh drivers which incudes a library for sh drivers. And the usual odd fixes in bunch of drivers and some nice cleanup of dw_dmac from Andy." Fix up conflicts in drivers/mmc/host/sh_mmcif.c * 'next' of git://git.infradead.org/users/vkoul/slave-dma: (46 commits) dmaengine: Cleanup logging messages mmc: sh_mmcif: switch to the new DMA channel allocation and configuration dma: sh: provide a migration path for slave drivers to stop using .private dma: sh: use an integer slave ID to improve API compatibility dmaengine: shdma: prepare to stop using struct dma_chan::private sh: remove unused DMA device pointer from SIU platform data ASoC: siu: don't use DMA device for channel filtering dmaengine: shdma: (cosmetic) simplify a static function dmaengine: at_hdmac: add a few const qualifiers dw_dmac: use 'u32' for LLI structure members, not dma_addr_t dw_dmac: mark dwc_dump_lli inline dma: mxs-dma: Export missing symbols from mxs-dma.c dma: shdma: convert to the shdma base library ASoC: fsi: prepare for conversion to the shdma base library usb: renesas_usbhs: prepare for conversion to the shdma base library ASoC: siu: prepare for conversion to the shdma base library serial: sh-sci: prepare for conversion to the shdma base library mmc: sh_mobile_sdhi: prepare for conversion to the shdma base library mmc: sh_mmcif: remove unneeded struct sh_mmcif_dma, prepare to shdma conversion dma: shdma: prepare for conversion to the shdma base library ...
This commit is contained in:
commit
c511dc1fb6
|
@ -14,7 +14,6 @@
|
|||
struct device;
|
||||
|
||||
struct siu_platform {
|
||||
struct device *dma_dev;
|
||||
unsigned int dma_slave_tx_a;
|
||||
unsigned int dma_slave_rx_a;
|
||||
unsigned int dma_slave_tx_b;
|
||||
|
|
|
@ -512,7 +512,6 @@ static struct platform_device tmu2_device = {
|
|||
};
|
||||
|
||||
static struct siu_platform siu_platform_data = {
|
||||
.dma_dev = &dma_device.dev,
|
||||
.dma_slave_tx_a = SHDMA_SLAVE_SIUA_TX,
|
||||
.dma_slave_rx_a = SHDMA_SLAVE_SIUA_RX,
|
||||
.dma_slave_tx_b = SHDMA_SLAVE_SIUB_TX,
|
||||
|
|
|
@ -148,6 +148,20 @@ config TXX9_DMAC
|
|||
Support the TXx9 SoC internal DMA controller. This can be
|
||||
integrated in chips such as the Toshiba TX4927/38/39.
|
||||
|
||||
config TEGRA20_APB_DMA
|
||||
bool "NVIDIA Tegra20 APB DMA support"
|
||||
depends on ARCH_TEGRA
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Support for the NVIDIA Tegra20 APB DMA controller driver. The
|
||||
DMA controller is having multiple DMA channel which can be
|
||||
configured for different peripherals like audio, UART, SPI,
|
||||
I2C etc which is in APB bus.
|
||||
This DMA controller transfers data from memory to peripheral fifo
|
||||
or vice versa. It does not support memory to memory data transfer.
|
||||
|
||||
|
||||
|
||||
config SH_DMAE
|
||||
tristate "Renesas SuperH DMAC support"
|
||||
depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE)
|
||||
|
@ -237,7 +251,7 @@ config IMX_DMA
|
|||
|
||||
config MXS_DMA
|
||||
bool "MXS DMA support"
|
||||
depends on SOC_IMX23 || SOC_IMX28
|
||||
depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
|
||||
select STMP_DEVICE
|
||||
select DMA_ENGINE
|
||||
help
|
||||
|
@ -260,6 +274,16 @@ config DMA_SA11X0
|
|||
SA-1110 SoCs. This DMA engine can only be used with on-chip
|
||||
devices.
|
||||
|
||||
config MMP_TDMA
|
||||
bool "MMP Two-Channel DMA support"
|
||||
depends on ARCH_MMP
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Support the MMP Two-Channel DMA engine.
|
||||
This engine used for MMP Audio DMA and pxa910 SQU.
|
||||
|
||||
Say Y here if you enabled MMP ADMA, otherwise say N.
|
||||
|
||||
config DMA_ENGINE
|
||||
bool
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o
|
|||
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
|
||||
obj-$(CONFIG_MX3_IPU) += ipu/
|
||||
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
|
||||
obj-$(CONFIG_SH_DMAE) += shdma.o
|
||||
obj-$(CONFIG_SH_DMAE) += sh/
|
||||
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
|
||||
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
|
||||
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
|
||||
|
@ -23,8 +23,10 @@ obj-$(CONFIG_MXS_DMA) += mxs-dma.o
|
|||
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
|
||||
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
|
||||
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
|
||||
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
|
||||
obj-$(CONFIG_PL330_DMA) += pl330.o
|
||||
obj-$(CONFIG_PCH_DMA) += pch_dma.o
|
||||
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
|
||||
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
|
||||
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
|
||||
obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
|
||||
|
|
|
@ -9,10 +9,9 @@
|
|||
* (at your option) any later version.
|
||||
*
|
||||
*
|
||||
* This supports the Atmel AHB DMA Controller,
|
||||
*
|
||||
* The driver has currently been tested with the Atmel AT91SAM9RL
|
||||
* and AT91SAM9G45 series.
|
||||
* This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
|
||||
* The only Atmel DMA Controller that is not covered by this driver is the one
|
||||
* found on AT91SAM9263.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
|
@ -1217,7 +1216,7 @@ static const struct platform_device_id atdma_devtypes[] = {
|
|||
}
|
||||
};
|
||||
|
||||
static inline struct at_dma_platform_data * __init at_dma_get_driver_data(
|
||||
static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
if (pdev->dev.of_node) {
|
||||
|
@ -1255,7 +1254,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
|
|||
int irq;
|
||||
int err;
|
||||
int i;
|
||||
struct at_dma_platform_data *plat_dat;
|
||||
const struct at_dma_platform_data *plat_dat;
|
||||
|
||||
/* setup platform data for each SoC */
|
||||
dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
|
||||
|
|
|
@ -1438,34 +1438,32 @@ static int __init coh901318_probe(struct platform_device *pdev)
|
|||
|
||||
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!io)
|
||||
goto err_get_resource;
|
||||
return -ENODEV;
|
||||
|
||||
/* Map DMA controller registers to virtual memory */
|
||||
if (request_mem_region(io->start,
|
||||
if (devm_request_mem_region(&pdev->dev,
|
||||
io->start,
|
||||
resource_size(io),
|
||||
pdev->dev.driver->name) == NULL) {
|
||||
err = -EBUSY;
|
||||
goto err_request_mem;
|
||||
}
|
||||
pdev->dev.driver->name) == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
pdata = pdev->dev.platform_data;
|
||||
if (!pdata)
|
||||
goto err_no_platformdata;
|
||||
return -ENODEV;
|
||||
|
||||
base = kmalloc(ALIGN(sizeof(struct coh901318_base), 4) +
|
||||
base = devm_kzalloc(&pdev->dev,
|
||||
ALIGN(sizeof(struct coh901318_base), 4) +
|
||||
pdata->max_channels *
|
||||
sizeof(struct coh901318_chan),
|
||||
GFP_KERNEL);
|
||||
if (!base)
|
||||
goto err_alloc_coh_dma_channels;
|
||||
return -ENOMEM;
|
||||
|
||||
base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4);
|
||||
|
||||
base->virtbase = ioremap(io->start, resource_size(io));
|
||||
if (!base->virtbase) {
|
||||
err = -ENOMEM;
|
||||
goto err_no_ioremap;
|
||||
}
|
||||
base->virtbase = devm_ioremap(&pdev->dev, io->start, resource_size(io));
|
||||
if (!base->virtbase)
|
||||
return -ENOMEM;
|
||||
|
||||
base->dev = &pdev->dev;
|
||||
base->platform = pdata;
|
||||
|
@ -1474,25 +1472,20 @@ static int __init coh901318_probe(struct platform_device *pdev)
|
|||
|
||||
COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base);
|
||||
|
||||
platform_set_drvdata(pdev, base);
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
goto err_no_irq;
|
||||
return irq;
|
||||
|
||||
err = request_irq(irq, dma_irq_handler, IRQF_DISABLED,
|
||||
err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED,
|
||||
"coh901318", base);
|
||||
if (err) {
|
||||
dev_crit(&pdev->dev,
|
||||
"Cannot allocate IRQ for DMA controller!\n");
|
||||
goto err_request_irq;
|
||||
}
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = coh901318_pool_create(&base->pool, &pdev->dev,
|
||||
sizeof(struct coh901318_lli),
|
||||
32);
|
||||
if (err)
|
||||
goto err_pool_create;
|
||||
return err;
|
||||
|
||||
/* init channels for device transfers */
|
||||
coh901318_base_init(&base->dma_slave, base->platform->chans_slave,
|
||||
|
@ -1538,6 +1531,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
|
|||
if (err)
|
||||
goto err_register_memcpy;
|
||||
|
||||
platform_set_drvdata(pdev, base);
|
||||
dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
|
||||
(u32) base->virtbase);
|
||||
|
||||
|
@ -1547,19 +1541,6 @@ static int __init coh901318_probe(struct platform_device *pdev)
|
|||
dma_async_device_unregister(&base->dma_slave);
|
||||
err_register_slave:
|
||||
coh901318_pool_destroy(&base->pool);
|
||||
err_pool_create:
|
||||
free_irq(platform_get_irq(pdev, 0), base);
|
||||
err_request_irq:
|
||||
err_no_irq:
|
||||
iounmap(base->virtbase);
|
||||
err_no_ioremap:
|
||||
kfree(base);
|
||||
err_alloc_coh_dma_channels:
|
||||
err_no_platformdata:
|
||||
release_mem_region(pdev->resource->start,
|
||||
resource_size(pdev->resource));
|
||||
err_request_mem:
|
||||
err_get_resource:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1570,11 +1551,6 @@ static int __exit coh901318_remove(struct platform_device *pdev)
|
|||
dma_async_device_unregister(&base->dma_memcpy);
|
||||
dma_async_device_unregister(&base->dma_slave);
|
||||
coh901318_pool_destroy(&base->pool);
|
||||
free_irq(platform_get_irq(pdev, 0), base);
|
||||
iounmap(base->virtbase);
|
||||
kfree(base);
|
||||
release_mem_region(pdev->resource->start,
|
||||
resource_size(pdev->resource));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -45,6 +45,8 @@
|
|||
* See Documentation/dmaengine.txt for more details
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -261,7 +263,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
|
|||
do {
|
||||
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
|
||||
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
|
||||
printk(KERN_ERR "dma_sync_wait_timeout!\n");
|
||||
pr_err("%s: timeout!\n", __func__);
|
||||
return DMA_ERROR;
|
||||
}
|
||||
} while (status == DMA_IN_PROGRESS);
|
||||
|
@ -312,7 +314,7 @@ static int __init dma_channel_table_init(void)
|
|||
}
|
||||
|
||||
if (err) {
|
||||
pr_err("dmaengine: initialization failure\n");
|
||||
pr_err("initialization failure\n");
|
||||
for_each_dma_cap_mask(cap, dma_cap_mask_all)
|
||||
if (channel_table[cap])
|
||||
free_percpu(channel_table[cap]);
|
||||
|
@ -520,8 +522,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
|
|||
err = dma_chan_get(chan);
|
||||
|
||||
if (err == -ENODEV) {
|
||||
pr_debug("%s: %s module removed\n", __func__,
|
||||
dma_chan_name(chan));
|
||||
pr_debug("%s: %s module removed\n",
|
||||
__func__, dma_chan_name(chan));
|
||||
list_del_rcu(&device->global_node);
|
||||
} else if (err)
|
||||
pr_debug("%s: failed to get %s: (%d)\n",
|
||||
|
@ -535,7 +537,9 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
|
|||
}
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
|
||||
pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
|
||||
pr_debug("%s: %s (%s)\n",
|
||||
__func__,
|
||||
chan ? "success" : "fail",
|
||||
chan ? dma_chan_name(chan) : NULL);
|
||||
|
||||
return chan;
|
||||
|
|
|
@ -105,13 +105,13 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
|
|||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
|
||||
i++;
|
||||
if (async_tx_test_ack(&desc->txd)) {
|
||||
list_del(&desc->desc_node);
|
||||
ret = desc;
|
||||
break;
|
||||
}
|
||||
dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
|
||||
i++;
|
||||
}
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
|
@ -191,6 +191,42 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
|
|||
|
||||
/*----------------------------------------------------------------------*/
|
||||
|
||||
static inline unsigned int dwc_fast_fls(unsigned long long v)
|
||||
{
|
||||
/*
|
||||
* We can be a lot more clever here, but this should take care
|
||||
* of the most common optimization.
|
||||
*/
|
||||
if (!(v & 7))
|
||||
return 3;
|
||||
else if (!(v & 3))
|
||||
return 2;
|
||||
else if (!(v & 1))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
|
||||
{
|
||||
dev_err(chan2dev(&dwc->chan),
|
||||
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
|
||||
channel_readl(dwc, SAR),
|
||||
channel_readl(dwc, DAR),
|
||||
channel_readl(dwc, LLP),
|
||||
channel_readl(dwc, CTL_HI),
|
||||
channel_readl(dwc, CTL_LO));
|
||||
}
|
||||
|
||||
|
||||
static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
||||
{
|
||||
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||
while (dma_readl(dw, CH_EN) & dwc->mask)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------------*/
|
||||
|
||||
/* Called with dwc->lock held and bh disabled */
|
||||
static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
|
||||
{
|
||||
|
@ -200,13 +236,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
|
|||
if (dma_readl(dw, CH_EN) & dwc->mask) {
|
||||
dev_err(chan2dev(&dwc->chan),
|
||||
"BUG: Attempted to start non-idle channel\n");
|
||||
dev_err(chan2dev(&dwc->chan),
|
||||
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
|
||||
channel_readl(dwc, SAR),
|
||||
channel_readl(dwc, DAR),
|
||||
channel_readl(dwc, LLP),
|
||||
channel_readl(dwc, CTL_HI),
|
||||
channel_readl(dwc, CTL_LO));
|
||||
dwc_dump_chan_regs(dwc);
|
||||
|
||||
/* The tasklet will hopefully advance the queue... */
|
||||
return;
|
||||
|
@ -290,9 +320,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||
"BUG: XFER bit set, but channel not idle!\n");
|
||||
|
||||
/* Try to continue after resetting the channel... */
|
||||
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||
while (dma_readl(dw, CH_EN) & dwc->mask)
|
||||
cpu_relax();
|
||||
dwc_chan_disable(dw, dwc);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -337,7 +365,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||
return;
|
||||
}
|
||||
|
||||
dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
|
||||
dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
|
||||
(unsigned long long)llp);
|
||||
|
||||
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
|
||||
/* check first descriptors addr */
|
||||
|
@ -373,9 +402,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||
"BUG: All descriptors done, but channel not idle!\n");
|
||||
|
||||
/* Try to continue after resetting the channel... */
|
||||
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||
while (dma_readl(dw, CH_EN) & dwc->mask)
|
||||
cpu_relax();
|
||||
dwc_chan_disable(dw, dwc);
|
||||
|
||||
if (!list_empty(&dwc->queue)) {
|
||||
list_move(dwc->queue.next, &dwc->active_list);
|
||||
|
@ -384,12 +411,11 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
}
|
||||
|
||||
static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
|
||||
static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
|
||||
{
|
||||
dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
|
||||
" desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
|
||||
lli->sar, lli->dar, lli->llp,
|
||||
lli->ctlhi, lli->ctllo);
|
||||
lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
|
||||
}
|
||||
|
||||
static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
||||
|
@ -487,17 +513,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
|
|||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
|
||||
dev_err(chan2dev(&dwc->chan),
|
||||
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
|
||||
channel_readl(dwc, SAR),
|
||||
channel_readl(dwc, DAR),
|
||||
channel_readl(dwc, LLP),
|
||||
channel_readl(dwc, CTL_HI),
|
||||
channel_readl(dwc, CTL_LO));
|
||||
dwc_dump_chan_regs(dwc);
|
||||
|
||||
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||
while (dma_readl(dw, CH_EN) & dwc->mask)
|
||||
cpu_relax();
|
||||
dwc_chan_disable(dw, dwc);
|
||||
|
||||
/* make sure DMA does not restart by loading a new list */
|
||||
channel_writel(dwc, LLP, 0);
|
||||
|
@ -527,7 +545,7 @@ static void dw_dma_tasklet(unsigned long data)
|
|||
status_xfer = dma_readl(dw, RAW.XFER);
|
||||
status_err = dma_readl(dw, RAW.ERROR);
|
||||
|
||||
dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err);
|
||||
dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
|
||||
|
||||
for (i = 0; i < dw->dma.chancnt; i++) {
|
||||
dwc = &dw->chan[i];
|
||||
|
@ -551,7 +569,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
|
|||
struct dw_dma *dw = dev_id;
|
||||
u32 status;
|
||||
|
||||
dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
|
||||
dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
|
||||
dma_readl(dw, STATUS_INT));
|
||||
|
||||
/*
|
||||
|
@ -597,12 +615,12 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
|
|||
* for DMA. But this is hard to do in a race-free manner.
|
||||
*/
|
||||
if (list_empty(&dwc->active_list)) {
|
||||
dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
|
||||
dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
|
||||
desc->txd.cookie);
|
||||
list_add_tail(&desc->desc_node, &dwc->active_list);
|
||||
dwc_dostart(dwc, dwc_first_active(dwc));
|
||||
} else {
|
||||
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
|
||||
dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
|
||||
desc->txd.cookie);
|
||||
|
||||
list_add_tail(&desc->desc_node, &dwc->queue);
|
||||
|
@ -627,26 +645,17 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||
unsigned int dst_width;
|
||||
u32 ctllo;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
|
||||
dest, src, len, flags);
|
||||
dev_vdbg(chan2dev(chan),
|
||||
"%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
|
||||
(unsigned long long)dest, (unsigned long long)src,
|
||||
len, flags);
|
||||
|
||||
if (unlikely(!len)) {
|
||||
dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
|
||||
dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* We can be a lot more clever here, but this should take care
|
||||
* of the most common optimization.
|
||||
*/
|
||||
if (!((src | dest | len) & 7))
|
||||
src_width = dst_width = 3;
|
||||
else if (!((src | dest | len) & 3))
|
||||
src_width = dst_width = 2;
|
||||
else if (!((src | dest | len) & 1))
|
||||
src_width = dst_width = 1;
|
||||
else
|
||||
src_width = dst_width = 0;
|
||||
src_width = dst_width = dwc_fast_fls(src | dest | len);
|
||||
|
||||
ctllo = DWC_DEFAULT_CTLLO(chan)
|
||||
| DWC_CTLL_DST_WIDTH(dst_width)
|
||||
|
@ -720,7 +729,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
struct scatterlist *sg;
|
||||
size_t total_len = 0;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
|
||||
dev_vdbg(chan2dev(chan), "%s\n", __func__);
|
||||
|
||||
if (unlikely(!dws || !sg_len))
|
||||
return NULL;
|
||||
|
@ -746,14 +755,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
mem = sg_dma_address(sg);
|
||||
len = sg_dma_len(sg);
|
||||
|
||||
if (!((mem | len) & 7))
|
||||
mem_width = 3;
|
||||
else if (!((mem | len) & 3))
|
||||
mem_width = 2;
|
||||
else if (!((mem | len) & 1))
|
||||
mem_width = 1;
|
||||
else
|
||||
mem_width = 0;
|
||||
mem_width = dwc_fast_fls(mem | len);
|
||||
|
||||
slave_sg_todev_fill_desc:
|
||||
desc = dwc_desc_get(dwc);
|
||||
|
@ -813,14 +815,7 @@ slave_sg_todev_fill_desc:
|
|||
mem = sg_dma_address(sg);
|
||||
len = sg_dma_len(sg);
|
||||
|
||||
if (!((mem | len) & 7))
|
||||
mem_width = 3;
|
||||
else if (!((mem | len) & 3))
|
||||
mem_width = 2;
|
||||
else if (!((mem | len) & 1))
|
||||
mem_width = 1;
|
||||
else
|
||||
mem_width = 0;
|
||||
mem_width = dwc_fast_fls(mem | len);
|
||||
|
||||
slave_sg_fromdev_fill_desc:
|
||||
desc = dwc_desc_get(dwc);
|
||||
|
@ -950,9 +945,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
} else if (cmd == DMA_TERMINATE_ALL) {
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
|
||||
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||
while (dma_readl(dw, CH_EN) & dwc->mask)
|
||||
cpu_relax();
|
||||
dwc_chan_disable(dw, dwc);
|
||||
|
||||
dwc->paused = false;
|
||||
|
||||
|
@ -1014,7 +1007,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
|
||||
dev_vdbg(chan2dev(chan), "%s\n", __func__);
|
||||
|
||||
/* ASSERT: channel is idle */
|
||||
if (dma_readl(dw, CH_EN) & dwc->mask) {
|
||||
|
@ -1057,8 +1050,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|||
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
dev_dbg(chan2dev(chan),
|
||||
"alloc_chan_resources allocated %d descriptors\n", i);
|
||||
dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
@ -1071,7 +1063,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
|
|||
unsigned long flags;
|
||||
LIST_HEAD(list);
|
||||
|
||||
dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
|
||||
dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
|
||||
dwc->descs_allocated);
|
||||
|
||||
/* ASSERT: channel is idle */
|
||||
|
@ -1097,7 +1089,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
|
|||
kfree(desc);
|
||||
}
|
||||
|
||||
dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
|
||||
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
|
||||
}
|
||||
|
||||
/* --------------------- Cyclic DMA API extensions -------------------- */
|
||||
|
@ -1126,13 +1118,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
|
|||
if (dma_readl(dw, CH_EN) & dwc->mask) {
|
||||
dev_err(chan2dev(&dwc->chan),
|
||||
"BUG: Attempted to start non-idle channel\n");
|
||||
dev_err(chan2dev(&dwc->chan),
|
||||
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
|
||||
channel_readl(dwc, SAR),
|
||||
channel_readl(dwc, DAR),
|
||||
channel_readl(dwc, LLP),
|
||||
channel_readl(dwc, CTL_HI),
|
||||
channel_readl(dwc, CTL_LO));
|
||||
dwc_dump_chan_regs(dwc);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@ -1167,9 +1153,7 @@ void dw_dma_cyclic_stop(struct dma_chan *chan)
|
|||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
|
||||
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||
while (dma_readl(dw, CH_EN) & dwc->mask)
|
||||
cpu_relax();
|
||||
dwc_chan_disable(dw, dwc);
|
||||
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
}
|
||||
|
@ -1308,9 +1292,9 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
|
|||
dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
|
||||
sizeof(last->lli), DMA_TO_DEVICE);
|
||||
|
||||
dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
|
||||
"period %zu periods %d\n", buf_addr, buf_len,
|
||||
period_len, periods);
|
||||
dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
|
||||
"period %zu periods %d\n", (unsigned long long)buf_addr,
|
||||
buf_len, period_len, periods);
|
||||
|
||||
cdesc->periods = periods;
|
||||
dwc->cdesc = cdesc;
|
||||
|
@ -1340,16 +1324,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
|
|||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
|
||||
dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
|
||||
|
||||
if (!cdesc)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
|
||||
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||
while (dma_readl(dw, CH_EN) & dwc->mask)
|
||||
cpu_relax();
|
||||
dwc_chan_disable(dw, dwc);
|
||||
|
||||
dma_writel(dw, CLEAR.ERROR, dwc->mask);
|
||||
dma_writel(dw, CLEAR.XFER, dwc->mask);
|
||||
|
@ -1386,7 +1368,7 @@ static void dw_dma_off(struct dw_dma *dw)
|
|||
dw->chan[i].initialized = false;
|
||||
}
|
||||
|
||||
static int __init dw_probe(struct platform_device *pdev)
|
||||
static int __devinit dw_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct dw_dma_platform_data *pdata;
|
||||
struct resource *io;
|
||||
|
@ -1432,9 +1414,15 @@ static int __init dw_probe(struct platform_device *pdev)
|
|||
}
|
||||
clk_prepare_enable(dw->clk);
|
||||
|
||||
/* Calculate all channel mask before DMA setup */
|
||||
dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
|
||||
|
||||
/* force dma off, just in case */
|
||||
dw_dma_off(dw);
|
||||
|
||||
/* disable BLOCK interrupts as well */
|
||||
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
|
||||
|
||||
err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
|
||||
if (err)
|
||||
goto err_irq;
|
||||
|
@ -1443,8 +1431,6 @@ static int __init dw_probe(struct platform_device *pdev)
|
|||
|
||||
tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
|
||||
|
||||
dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
|
||||
|
||||
INIT_LIST_HEAD(&dw->dma.channels);
|
||||
for (i = 0; i < pdata->nr_channels; i++) {
|
||||
struct dw_dma_chan *dwc = &dw->chan[i];
|
||||
|
@ -1474,17 +1460,13 @@ static int __init dw_probe(struct platform_device *pdev)
|
|||
channel_clear_bit(dw, CH_EN, dwc->mask);
|
||||
}
|
||||
|
||||
/* Clear/disable all interrupts on all channels. */
|
||||
/* Clear all interrupts on all channels. */
|
||||
dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
|
||||
dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
|
||||
dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
|
||||
dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
|
||||
dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
|
||||
|
||||
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
|
||||
channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
|
||||
channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
|
||||
channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
|
||||
|
||||
dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
|
||||
dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
|
||||
if (pdata->is_private)
|
||||
|
@ -1523,7 +1505,7 @@ err_kfree:
|
|||
return err;
|
||||
}
|
||||
|
||||
static int __exit dw_remove(struct platform_device *pdev)
|
||||
static int __devexit dw_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct dw_dma *dw = platform_get_drvdata(pdev);
|
||||
struct dw_dma_chan *dwc, *_dwc;
|
||||
|
@ -1602,7 +1584,7 @@ MODULE_DEVICE_TABLE(of, dw_dma_id_table);
|
|||
#endif
|
||||
|
||||
static struct platform_driver dw_driver = {
|
||||
.remove = __exit_p(dw_remove),
|
||||
.remove = __devexit_p(dw_remove),
|
||||
.shutdown = dw_shutdown,
|
||||
.driver = {
|
||||
.name = "dw_dmac",
|
||||
|
|
|
@ -82,7 +82,7 @@ struct dw_dma_regs {
|
|||
DW_REG(ID);
|
||||
DW_REG(TEST);
|
||||
|
||||
/* optional encoded params, 0x3c8..0x3 */
|
||||
/* optional encoded params, 0x3c8..0x3f7 */
|
||||
};
|
||||
|
||||
/* Bitfields in CTL_LO */
|
||||
|
@ -219,9 +219,9 @@ static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
|
|||
/* LLI == Linked List Item; a.k.a. DMA block descriptor */
|
||||
struct dw_lli {
|
||||
/* values that are not changed by hardware */
|
||||
dma_addr_t sar;
|
||||
dma_addr_t dar;
|
||||
dma_addr_t llp; /* chain to next lli */
|
||||
u32 sar;
|
||||
u32 dar;
|
||||
u32 llp; /* chain to next lli */
|
||||
u32 ctllo;
|
||||
/* values that may get written back: */
|
||||
u32 ctlhi;
|
||||
|
|
|
@ -0,0 +1,610 @@
|
|||
/*
|
||||
* Driver For Marvell Two-channel DMA Engine
|
||||
*
|
||||
* Copyright: Marvell International Ltd.
|
||||
*
|
||||
* The code contained herein is licensed under the GNU General Public
|
||||
* License. You may obtain a copy of the GNU General Public License
|
||||
* Version 2 or later at the following locations:
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/device.h>
|
||||
#include <mach/regs-icu.h>
|
||||
#include <mach/sram.h>
|
||||
|
||||
#include "dmaengine.h"
|
||||
|
||||
/*
|
||||
* Two-Channel DMA registers
|
||||
*/
|
||||
#define TDBCR 0x00 /* Byte Count */
|
||||
#define TDSAR 0x10 /* Src Addr */
|
||||
#define TDDAR 0x20 /* Dst Addr */
|
||||
#define TDNDPR 0x30 /* Next Desc */
|
||||
#define TDCR 0x40 /* Control */
|
||||
#define TDCP 0x60 /* Priority*/
|
||||
#define TDCDPR 0x70 /* Current Desc */
|
||||
#define TDIMR 0x80 /* Int Mask */
|
||||
#define TDISR 0xa0 /* Int Status */
|
||||
|
||||
/* Two-Channel DMA Control Register */
|
||||
#define TDCR_SSZ_8_BITS (0x0 << 22) /* Sample Size */
|
||||
#define TDCR_SSZ_12_BITS (0x1 << 22)
|
||||
#define TDCR_SSZ_16_BITS (0x2 << 22)
|
||||
#define TDCR_SSZ_20_BITS (0x3 << 22)
|
||||
#define TDCR_SSZ_24_BITS (0x4 << 22)
|
||||
#define TDCR_SSZ_32_BITS (0x5 << 22)
|
||||
#define TDCR_SSZ_SHIFT (0x1 << 22)
|
||||
#define TDCR_SSZ_MASK (0x7 << 22)
|
||||
#define TDCR_SSPMOD (0x1 << 21) /* SSP MOD */
|
||||
#define TDCR_ABR (0x1 << 20) /* Channel Abort */
|
||||
#define TDCR_CDE (0x1 << 17) /* Close Desc Enable */
|
||||
#define TDCR_PACKMOD (0x1 << 16) /* Pack Mode (ADMA Only) */
|
||||
#define TDCR_CHANACT (0x1 << 14) /* Channel Active */
|
||||
#define TDCR_FETCHND (0x1 << 13) /* Fetch Next Desc */
|
||||
#define TDCR_CHANEN (0x1 << 12) /* Channel Enable */
|
||||
#define TDCR_INTMODE (0x1 << 10) /* Interrupt Mode */
|
||||
#define TDCR_CHAINMOD (0x1 << 9) /* Chain Mode */
|
||||
#define TDCR_BURSTSZ_MSK (0x7 << 6) /* Burst Size */
|
||||
#define TDCR_BURSTSZ_4B (0x0 << 6)
|
||||
#define TDCR_BURSTSZ_8B (0x1 << 6)
|
||||
#define TDCR_BURSTSZ_16B (0x3 << 6)
|
||||
#define TDCR_BURSTSZ_32B (0x6 << 6)
|
||||
#define TDCR_BURSTSZ_64B (0x7 << 6)
|
||||
#define TDCR_BURSTSZ_SQU_32B (0x7 << 6)
|
||||
#define TDCR_BURSTSZ_128B (0x5 << 6)
|
||||
#define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */
|
||||
#define TDCR_DSTDIR_ADDR_HOLD (0x2 << 4) /* Dst Addr Hold */
|
||||
#define TDCR_DSTDIR_ADDR_INC (0x0 << 4) /* Dst Addr Increment */
|
||||
#define TDCR_SRCDIR_MSK (0x3 << 2) /* Src Direction */
|
||||
#define TDCR_SRCDIR_ADDR_HOLD (0x2 << 2) /* Src Addr Hold */
|
||||
#define TDCR_SRCDIR_ADDR_INC (0x0 << 2) /* Src Addr Increment */
|
||||
#define TDCR_DSTDESCCONT (0x1 << 1)
|
||||
#define TDCR_SRCDESTCONT (0x1 << 0)
|
||||
|
||||
/* Two-Channel DMA Int Mask Register */
|
||||
#define TDIMR_COMP (0x1 << 0)
|
||||
|
||||
/* Two-Channel DMA Int Status Register */
|
||||
#define TDISR_COMP (0x1 << 0)
|
||||
|
||||
/*
|
||||
* Two-Channel DMA Descriptor Struct
|
||||
* NOTE: desc's buf must be aligned to 16 bytes.
|
||||
*/
|
||||
struct mmp_tdma_desc {
|
||||
u32 byte_cnt;
|
||||
u32 src_addr;
|
||||
u32 dst_addr;
|
||||
u32 nxt_desc;
|
||||
};
|
||||
|
||||
enum mmp_tdma_type {
|
||||
MMP_AUD_TDMA = 0,
|
||||
PXA910_SQU,
|
||||
};
|
||||
|
||||
#define TDMA_ALIGNMENT 3
|
||||
#define TDMA_MAX_XFER_BYTES SZ_64K
|
||||
|
||||
struct mmp_tdma_chan {
|
||||
struct device *dev;
|
||||
struct dma_chan chan;
|
||||
struct dma_async_tx_descriptor desc;
|
||||
struct tasklet_struct tasklet;
|
||||
|
||||
struct mmp_tdma_desc *desc_arr;
|
||||
phys_addr_t desc_arr_phys;
|
||||
int desc_num;
|
||||
enum dma_transfer_direction dir;
|
||||
dma_addr_t dev_addr;
|
||||
u32 burst_sz;
|
||||
enum dma_slave_buswidth buswidth;
|
||||
enum dma_status status;
|
||||
|
||||
int idx;
|
||||
enum mmp_tdma_type type;
|
||||
int irq;
|
||||
unsigned long reg_base;
|
||||
|
||||
size_t buf_len;
|
||||
size_t period_len;
|
||||
size_t pos;
|
||||
};
|
||||
|
||||
#define TDMA_CHANNEL_NUM 2
|
||||
struct mmp_tdma_device {
|
||||
struct device *dev;
|
||||
void __iomem *base;
|
||||
struct dma_device device;
|
||||
struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM];
|
||||
int irq;
|
||||
};
|
||||
|
||||
#define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
|
||||
|
||||
static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys)
|
||||
{
|
||||
writel(phys, tdmac->reg_base + TDNDPR);
|
||||
writel(readl(tdmac->reg_base + TDCR) | TDCR_FETCHND,
|
||||
tdmac->reg_base + TDCR);
|
||||
}
|
||||
|
||||
static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
|
||||
{
|
||||
/* enable irq */
|
||||
writel(TDIMR_COMP, tdmac->reg_base + TDIMR);
|
||||
/* enable dma chan */
|
||||
writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
|
||||
tdmac->reg_base + TDCR);
|
||||
tdmac->status = DMA_IN_PROGRESS;
|
||||
}
|
||||
|
||||
static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
|
||||
{
|
||||
writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
|
||||
tdmac->reg_base + TDCR);
|
||||
tdmac->status = DMA_SUCCESS;
|
||||
}
|
||||
|
||||
static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
|
||||
{
|
||||
writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
|
||||
tdmac->reg_base + TDCR);
|
||||
tdmac->status = DMA_IN_PROGRESS;
|
||||
}
|
||||
|
||||
static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac)
|
||||
{
|
||||
writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
|
||||
tdmac->reg_base + TDCR);
|
||||
tdmac->status = DMA_PAUSED;
|
||||
}
|
||||
|
||||
static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
|
||||
{
|
||||
unsigned int tdcr;
|
||||
|
||||
mmp_tdma_disable_chan(tdmac);
|
||||
|
||||
if (tdmac->dir == DMA_MEM_TO_DEV)
|
||||
tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC;
|
||||
else if (tdmac->dir == DMA_DEV_TO_MEM)
|
||||
tdcr = TDCR_SRCDIR_ADDR_HOLD | TDCR_DSTDIR_ADDR_INC;
|
||||
|
||||
if (tdmac->type == MMP_AUD_TDMA) {
|
||||
tdcr |= TDCR_PACKMOD;
|
||||
|
||||
switch (tdmac->burst_sz) {
|
||||
case 4:
|
||||
tdcr |= TDCR_BURSTSZ_4B;
|
||||
break;
|
||||
case 8:
|
||||
tdcr |= TDCR_BURSTSZ_8B;
|
||||
break;
|
||||
case 16:
|
||||
tdcr |= TDCR_BURSTSZ_16B;
|
||||
break;
|
||||
case 32:
|
||||
tdcr |= TDCR_BURSTSZ_32B;
|
||||
break;
|
||||
case 64:
|
||||
tdcr |= TDCR_BURSTSZ_64B;
|
||||
break;
|
||||
case 128:
|
||||
tdcr |= TDCR_BURSTSZ_128B;
|
||||
break;
|
||||
default:
|
||||
dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (tdmac->buswidth) {
|
||||
case DMA_SLAVE_BUSWIDTH_1_BYTE:
|
||||
tdcr |= TDCR_SSZ_8_BITS;
|
||||
break;
|
||||
case DMA_SLAVE_BUSWIDTH_2_BYTES:
|
||||
tdcr |= TDCR_SSZ_16_BITS;
|
||||
break;
|
||||
case DMA_SLAVE_BUSWIDTH_4_BYTES:
|
||||
tdcr |= TDCR_SSZ_32_BITS;
|
||||
break;
|
||||
default:
|
||||
dev_err(tdmac->dev, "mmp_tdma: unknown bus size.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else if (tdmac->type == PXA910_SQU) {
|
||||
tdcr |= TDCR_BURSTSZ_SQU_32B;
|
||||
tdcr |= TDCR_SSPMOD;
|
||||
}
|
||||
|
||||
writel(tdcr, tdmac->reg_base + TDCR);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac)
|
||||
{
|
||||
u32 reg = readl(tdmac->reg_base + TDISR);
|
||||
|
||||
if (reg & TDISR_COMP) {
|
||||
/* clear irq */
|
||||
reg &= ~TDISR_COMP;
|
||||
writel(reg, tdmac->reg_base + TDISR);
|
||||
|
||||
return 0;
|
||||
}
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct mmp_tdma_chan *tdmac = dev_id;
|
||||
|
||||
if (mmp_tdma_clear_chan_irq(tdmac) == 0) {
|
||||
tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len;
|
||||
tasklet_schedule(&tdmac->tasklet);
|
||||
return IRQ_HANDLED;
|
||||
} else
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static irqreturn_t mmp_tdma_int_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct mmp_tdma_device *tdev = dev_id;
|
||||
int i, ret;
|
||||
int irq_num = 0;
|
||||
|
||||
for (i = 0; i < TDMA_CHANNEL_NUM; i++) {
|
||||
struct mmp_tdma_chan *tdmac = tdev->tdmac[i];
|
||||
|
||||
ret = mmp_tdma_chan_handler(irq, tdmac);
|
||||
if (ret == IRQ_HANDLED)
|
||||
irq_num++;
|
||||
}
|
||||
|
||||
if (irq_num)
|
||||
return IRQ_HANDLED;
|
||||
else
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static void dma_do_tasklet(unsigned long data)
|
||||
{
|
||||
struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data;
|
||||
|
||||
if (tdmac->desc.callback)
|
||||
tdmac->desc.callback(tdmac->desc.callback_param);
|
||||
|
||||
}
|
||||
|
||||
static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
|
||||
{
|
||||
struct gen_pool *gpool;
|
||||
int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
|
||||
|
||||
gpool = sram_get_gpool("asram");
|
||||
if (tdmac->desc_arr)
|
||||
gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
|
||||
size);
|
||||
tdmac->desc_arr = NULL;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static dma_cookie_t mmp_tdma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(tx->chan);
|
||||
|
||||
mmp_tdma_chan_set_desc(tdmac, tdmac->desc_arr_phys);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
|
||||
int ret;
|
||||
|
||||
dma_async_tx_descriptor_init(&tdmac->desc, chan);
|
||||
tdmac->desc.tx_submit = mmp_tdma_tx_submit;
|
||||
|
||||
if (tdmac->irq) {
|
||||
ret = devm_request_irq(tdmac->dev, tdmac->irq,
|
||||
mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void mmp_tdma_free_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
|
||||
|
||||
if (tdmac->irq)
|
||||
devm_free_irq(tdmac->dev, tdmac->irq, tdmac);
|
||||
mmp_tdma_free_descriptor(tdmac);
|
||||
return;
|
||||
}
|
||||
|
||||
struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
|
||||
{
|
||||
struct gen_pool *gpool;
|
||||
int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
|
||||
|
||||
gpool = sram_get_gpool("asram");
|
||||
if (!gpool)
|
||||
return NULL;
|
||||
|
||||
tdmac->desc_arr = (void *)gen_pool_alloc(gpool, size);
|
||||
if (!tdmac->desc_arr)
|
||||
return NULL;
|
||||
|
||||
tdmac->desc_arr_phys = gen_pool_virt_to_phys(gpool,
|
||||
(unsigned long)tdmac->desc_arr);
|
||||
|
||||
return tdmac->desc_arr;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
|
||||
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_transfer_direction direction,
|
||||
void *context)
|
||||
{
|
||||
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
|
||||
struct mmp_tdma_desc *desc;
|
||||
int num_periods = buf_len / period_len;
|
||||
int i = 0, buf = 0;
|
||||
|
||||
if (tdmac->status != DMA_SUCCESS)
|
||||
return NULL;
|
||||
|
||||
if (period_len > TDMA_MAX_XFER_BYTES) {
|
||||
dev_err(tdmac->dev,
|
||||
"maximum period size exceeded: %d > %d\n",
|
||||
period_len, TDMA_MAX_XFER_BYTES);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
tdmac->status = DMA_IN_PROGRESS;
|
||||
tdmac->desc_num = num_periods;
|
||||
desc = mmp_tdma_alloc_descriptor(tdmac);
|
||||
if (!desc)
|
||||
goto err_out;
|
||||
|
||||
while (buf < buf_len) {
|
||||
desc = &tdmac->desc_arr[i];
|
||||
|
||||
if (i + 1 == num_periods)
|
||||
desc->nxt_desc = tdmac->desc_arr_phys;
|
||||
else
|
||||
desc->nxt_desc = tdmac->desc_arr_phys +
|
||||
sizeof(*desc) * (i + 1);
|
||||
|
||||
if (direction == DMA_MEM_TO_DEV) {
|
||||
desc->src_addr = dma_addr;
|
||||
desc->dst_addr = tdmac->dev_addr;
|
||||
} else {
|
||||
desc->src_addr = tdmac->dev_addr;
|
||||
desc->dst_addr = dma_addr;
|
||||
}
|
||||
desc->byte_cnt = period_len;
|
||||
dma_addr += period_len;
|
||||
buf += period_len;
|
||||
i++;
|
||||
}
|
||||
|
||||
tdmac->buf_len = buf_len;
|
||||
tdmac->period_len = period_len;
|
||||
tdmac->pos = 0;
|
||||
|
||||
return &tdmac->desc;
|
||||
|
||||
err_out:
|
||||
tdmac->status = DMA_ERROR;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
|
||||
struct dma_slave_config *dmaengine_cfg = (void *)arg;
|
||||
int ret = 0;
|
||||
|
||||
switch (cmd) {
|
||||
case DMA_TERMINATE_ALL:
|
||||
mmp_tdma_disable_chan(tdmac);
|
||||
break;
|
||||
case DMA_PAUSE:
|
||||
mmp_tdma_pause_chan(tdmac);
|
||||
break;
|
||||
case DMA_RESUME:
|
||||
mmp_tdma_resume_chan(tdmac);
|
||||
break;
|
||||
case DMA_SLAVE_CONFIG:
|
||||
if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
|
||||
tdmac->dev_addr = dmaengine_cfg->src_addr;
|
||||
tdmac->burst_sz = dmaengine_cfg->src_maxburst;
|
||||
tdmac->buswidth = dmaengine_cfg->src_addr_width;
|
||||
} else {
|
||||
tdmac->dev_addr = dmaengine_cfg->dst_addr;
|
||||
tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
|
||||
tdmac->buswidth = dmaengine_cfg->dst_addr_width;
|
||||
}
|
||||
tdmac->dir = dmaengine_cfg->direction;
|
||||
return mmp_tdma_config_chan(tdmac);
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
|
||||
dma_cookie_t cookie, struct dma_tx_state *txstate)
|
||||
{
|
||||
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
|
||||
|
||||
dma_set_residue(txstate, tdmac->buf_len - tdmac->pos);
|
||||
|
||||
return tdmac->status;
|
||||
}
|
||||
|
||||
static void mmp_tdma_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
|
||||
|
||||
mmp_tdma_enable_chan(tdmac);
|
||||
}
|
||||
|
||||
static int __devexit mmp_tdma_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct mmp_tdma_device *tdev = platform_get_drvdata(pdev);
|
||||
|
||||
dma_async_device_unregister(&tdev->device);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
|
||||
int idx, int irq, int type)
|
||||
{
|
||||
struct mmp_tdma_chan *tdmac;
|
||||
|
||||
if (idx >= TDMA_CHANNEL_NUM) {
|
||||
dev_err(tdev->dev, "too many channels for device!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* alloc channel */
|
||||
tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL);
|
||||
if (!tdmac) {
|
||||
dev_err(tdev->dev, "no free memory for DMA channels!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (irq)
|
||||
tdmac->irq = irq + idx;
|
||||
tdmac->dev = tdev->dev;
|
||||
tdmac->chan.device = &tdev->device;
|
||||
tdmac->idx = idx;
|
||||
tdmac->type = type;
|
||||
tdmac->reg_base = (unsigned long)tdev->base + idx * 4;
|
||||
tdmac->status = DMA_SUCCESS;
|
||||
tdev->tdmac[tdmac->idx] = tdmac;
|
||||
tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
|
||||
|
||||
/* add the channel to tdma_chan list */
|
||||
list_add_tail(&tdmac->chan.device_node,
|
||||
&tdev->device.channels);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __devinit mmp_tdma_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct platform_device_id *id = platform_get_device_id(pdev);
|
||||
enum mmp_tdma_type type = id->driver_data;
|
||||
struct mmp_tdma_device *tdev;
|
||||
struct resource *iores;
|
||||
int i, ret;
|
||||
int irq = 0;
|
||||
int chan_num = TDMA_CHANNEL_NUM;
|
||||
|
||||
/* always have couple channels */
|
||||
tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
|
||||
if (!tdev)
|
||||
return -ENOMEM;
|
||||
|
||||
tdev->dev = &pdev->dev;
|
||||
iores = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (!iores)
|
||||
return -EINVAL;
|
||||
|
||||
if (resource_size(iores) != chan_num)
|
||||
tdev->irq = iores->start;
|
||||
else
|
||||
irq = iores->start;
|
||||
|
||||
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!iores)
|
||||
return -EINVAL;
|
||||
|
||||
tdev->base = devm_request_and_ioremap(&pdev->dev, iores);
|
||||
if (!tdev->base)
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
if (tdev->irq) {
|
||||
ret = devm_request_irq(&pdev->dev, tdev->irq,
|
||||
mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
|
||||
dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
|
||||
|
||||
INIT_LIST_HEAD(&tdev->device.channels);
|
||||
|
||||
/* initialize channel parameters */
|
||||
for (i = 0; i < chan_num; i++) {
|
||||
ret = mmp_tdma_chan_init(tdev, i, irq, type);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
tdev->device.dev = &pdev->dev;
|
||||
tdev->device.device_alloc_chan_resources =
|
||||
mmp_tdma_alloc_chan_resources;
|
||||
tdev->device.device_free_chan_resources =
|
||||
mmp_tdma_free_chan_resources;
|
||||
tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
|
||||
tdev->device.device_tx_status = mmp_tdma_tx_status;
|
||||
tdev->device.device_issue_pending = mmp_tdma_issue_pending;
|
||||
tdev->device.device_control = mmp_tdma_control;
|
||||
tdev->device.copy_align = TDMA_ALIGNMENT;
|
||||
|
||||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||||
platform_set_drvdata(pdev, tdev);
|
||||
|
||||
ret = dma_async_device_register(&tdev->device);
|
||||
if (ret) {
|
||||
dev_err(tdev->device.dev, "unable to register\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_info(tdev->device.dev, "initialized\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct platform_device_id mmp_tdma_id_table[] = {
|
||||
{ "mmp-adma", MMP_AUD_TDMA },
|
||||
{ "pxa910-squ", PXA910_SQU },
|
||||
{ },
|
||||
};
|
||||
|
||||
static struct platform_driver mmp_tdma_driver = {
|
||||
.driver = {
|
||||
.name = "mmp-tdma",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
.id_table = mmp_tdma_id_table,
|
||||
.probe = mmp_tdma_probe,
|
||||
.remove = __devexit_p(mmp_tdma_remove),
|
||||
};
|
||||
|
||||
module_platform_driver(mmp_tdma_driver);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("MMP Two-Channel DMA Driver");
|
||||
MODULE_ALIAS("platform:mmp-tdma");
|
||||
MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
|
||||
MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");
|
|
@ -29,7 +29,6 @@
|
|||
#include <linux/of_device.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <mach/mxs.h>
|
||||
|
||||
#include "dmaengine.h"
|
||||
|
||||
|
@ -201,6 +200,7 @@ int mxs_dma_is_apbh(struct dma_chan *chan)
|
|||
|
||||
return dma_is_apbh(mxs_dma);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mxs_dma_is_apbh);
|
||||
|
||||
int mxs_dma_is_apbx(struct dma_chan *chan)
|
||||
{
|
||||
|
@ -209,6 +209,7 @@ int mxs_dma_is_apbx(struct dma_chan *chan)
|
|||
|
||||
return !dma_is_apbh(mxs_dma);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mxs_dma_is_apbx);
|
||||
|
||||
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
|
||||
{
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
obj-$(CONFIG_SH_DMAE) += shdma-base.o
|
||||
obj-$(CONFIG_SH_DMAE) += shdma.o
|
|
@ -0,0 +1,934 @@
|
|||
/*
|
||||
* Dmaengine driver base library for DMA controllers, found on SH-based SoCs
|
||||
*
|
||||
* extracted from shdma.c
|
||||
*
|
||||
* Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
||||
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
|
||||
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
|
||||
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/shdma-base.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include "../dmaengine.h"
|
||||
|
||||
/* DMA descriptor control */
|
||||
enum shdma_desc_status {
|
||||
DESC_IDLE,
|
||||
DESC_PREPARED,
|
||||
DESC_SUBMITTED,
|
||||
DESC_COMPLETED, /* completed, have to call callback */
|
||||
DESC_WAITING, /* callback called, waiting for ack / re-submit */
|
||||
};
|
||||
|
||||
#define NR_DESCS_PER_CHANNEL 32
|
||||
|
||||
#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
|
||||
#define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
|
||||
|
||||
/*
|
||||
* For slave DMA we assume, that there is a finite number of DMA slaves in the
|
||||
* system, and that each such slave can only use a finite number of channels.
|
||||
* We use slave channel IDs to make sure, that no such slave channel ID is
|
||||
* allocated more than once.
|
||||
*/
|
||||
static unsigned int slave_num = 256;
|
||||
module_param(slave_num, uint, 0444);
|
||||
|
||||
/* A bitmask with slave_num bits */
|
||||
static unsigned long *shdma_slave_used;
|
||||
|
||||
/* Called under spin_lock_irq(&schan->chan_lock") */
|
||||
static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
|
||||
{
|
||||
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
|
||||
const struct shdma_ops *ops = sdev->ops;
|
||||
struct shdma_desc *sdesc;
|
||||
|
||||
/* DMA work check */
|
||||
if (ops->channel_busy(schan))
|
||||
return;
|
||||
|
||||
/* Find the first not transferred descriptor */
|
||||
list_for_each_entry(sdesc, &schan->ld_queue, node)
|
||||
if (sdesc->mark == DESC_SUBMITTED) {
|
||||
ops->start_xfer(schan, sdesc);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
struct shdma_desc *chunk, *c, *desc =
|
||||
container_of(tx, struct shdma_desc, async_tx),
|
||||
*last = desc;
|
||||
struct shdma_chan *schan = to_shdma_chan(tx->chan);
|
||||
dma_async_tx_callback callback = tx->callback;
|
||||
dma_cookie_t cookie;
|
||||
bool power_up;
|
||||
|
||||
spin_lock_irq(&schan->chan_lock);
|
||||
|
||||
power_up = list_empty(&schan->ld_queue);
|
||||
|
||||
cookie = dma_cookie_assign(tx);
|
||||
|
||||
/* Mark all chunks of this descriptor as submitted, move to the queue */
|
||||
list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
|
||||
/*
|
||||
* All chunks are on the global ld_free, so, we have to find
|
||||
* the end of the chain ourselves
|
||||
*/
|
||||
if (chunk != desc && (chunk->mark == DESC_IDLE ||
|
||||
chunk->async_tx.cookie > 0 ||
|
||||
chunk->async_tx.cookie == -EBUSY ||
|
||||
&chunk->node == &schan->ld_free))
|
||||
break;
|
||||
chunk->mark = DESC_SUBMITTED;
|
||||
/* Callback goes to the last chunk */
|
||||
chunk->async_tx.callback = NULL;
|
||||
chunk->cookie = cookie;
|
||||
list_move_tail(&chunk->node, &schan->ld_queue);
|
||||
last = chunk;
|
||||
|
||||
dev_dbg(schan->dev, "submit #%d@%p on %d\n",
|
||||
tx->cookie, &last->async_tx, schan->id);
|
||||
}
|
||||
|
||||
last->async_tx.callback = callback;
|
||||
last->async_tx.callback_param = tx->callback_param;
|
||||
|
||||
if (power_up) {
|
||||
int ret;
|
||||
schan->pm_state = SHDMA_PM_BUSY;
|
||||
|
||||
ret = pm_runtime_get(schan->dev);
|
||||
|
||||
spin_unlock_irq(&schan->chan_lock);
|
||||
if (ret < 0)
|
||||
dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
|
||||
|
||||
pm_runtime_barrier(schan->dev);
|
||||
|
||||
spin_lock_irq(&schan->chan_lock);
|
||||
|
||||
/* Have we been reset, while waiting? */
|
||||
if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
|
||||
struct shdma_dev *sdev =
|
||||
to_shdma_dev(schan->dma_chan.device);
|
||||
const struct shdma_ops *ops = sdev->ops;
|
||||
dev_dbg(schan->dev, "Bring up channel %d\n",
|
||||
schan->id);
|
||||
/*
|
||||
* TODO: .xfer_setup() might fail on some platforms.
|
||||
* Make it int then, on error remove chunks from the
|
||||
* queue again
|
||||
*/
|
||||
ops->setup_xfer(schan, schan->slave_id);
|
||||
|
||||
if (schan->pm_state == SHDMA_PM_PENDING)
|
||||
shdma_chan_xfer_ld_queue(schan);
|
||||
schan->pm_state = SHDMA_PM_ESTABLISHED;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Tell .device_issue_pending() not to run the queue, interrupts
|
||||
* will do it anyway
|
||||
*/
|
||||
schan->pm_state = SHDMA_PM_PENDING;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&schan->chan_lock);
|
||||
|
||||
return cookie;
|
||||
}
|
||||
|
||||
/* Called with desc_lock held */
|
||||
static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
|
||||
{
|
||||
struct shdma_desc *sdesc;
|
||||
|
||||
list_for_each_entry(sdesc, &schan->ld_free, node)
|
||||
if (sdesc->mark != DESC_PREPARED) {
|
||||
BUG_ON(sdesc->mark != DESC_IDLE);
|
||||
list_del(&sdesc->node);
|
||||
return sdesc;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
|
||||
{
|
||||
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
|
||||
const struct shdma_ops *ops = sdev->ops;
|
||||
int ret;
|
||||
|
||||
if (slave_id < 0 || slave_id >= slave_num)
|
||||
return -EINVAL;
|
||||
|
||||
if (test_and_set_bit(slave_id, shdma_slave_used))
|
||||
return -EBUSY;
|
||||
|
||||
ret = ops->set_slave(schan, slave_id, false);
|
||||
if (ret < 0) {
|
||||
clear_bit(slave_id, shdma_slave_used);
|
||||
return ret;
|
||||
}
|
||||
|
||||
schan->slave_id = slave_id;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the standard shdma filter function to be used as a replacement to the
|
||||
* "old" method, using the .private pointer. If for some reason you allocate a
|
||||
* channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
|
||||
* parameter. If this filter is used, the slave driver, after calling
|
||||
* dma_request_channel(), will also have to call dmaengine_slave_config() with
|
||||
* .slave_id, .direction, and either .src_addr or .dst_addr set.
|
||||
* NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
|
||||
* capability! If this becomes a requirement, hardware glue drivers, using this
|
||||
* services would have to provide their own filters, which first would check
|
||||
* the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
|
||||
* this, and only then, in case of a match, call this common filter.
|
||||
*/
|
||||
bool shdma_chan_filter(struct dma_chan *chan, void *arg)
|
||||
{
|
||||
struct shdma_chan *schan = to_shdma_chan(chan);
|
||||
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
|
||||
const struct shdma_ops *ops = sdev->ops;
|
||||
int slave_id = (int)arg;
|
||||
int ret;
|
||||
|
||||
if (slave_id < 0)
|
||||
/* No slave requested - arbitrary channel */
|
||||
return true;
|
||||
|
||||
if (slave_id >= slave_num)
|
||||
return false;
|
||||
|
||||
ret = ops->set_slave(schan, slave_id, true);
|
||||
if (ret < 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(shdma_chan_filter);
|
||||
|
||||
static int shdma_alloc_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct shdma_chan *schan = to_shdma_chan(chan);
|
||||
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
|
||||
const struct shdma_ops *ops = sdev->ops;
|
||||
struct shdma_desc *desc;
|
||||
struct shdma_slave *slave = chan->private;
|
||||
int ret, i;
|
||||
|
||||
/*
|
||||
* This relies on the guarantee from dmaengine that alloc_chan_resources
|
||||
* never runs concurrently with itself or free_chan_resources.
|
||||
*/
|
||||
if (slave) {
|
||||
/* Legacy mode: .private is set in filter */
|
||||
ret = shdma_setup_slave(schan, slave->slave_id);
|
||||
if (ret < 0)
|
||||
goto esetslave;
|
||||
} else {
|
||||
schan->slave_id = -EINVAL;
|
||||
}
|
||||
|
||||
schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
|
||||
sdev->desc_size, GFP_KERNEL);
|
||||
if (!schan->desc) {
|
||||
ret = -ENOMEM;
|
||||
goto edescalloc;
|
||||
}
|
||||
schan->desc_num = NR_DESCS_PER_CHANNEL;
|
||||
|
||||
for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
|
||||
desc = ops->embedded_desc(schan->desc, i);
|
||||
dma_async_tx_descriptor_init(&desc->async_tx,
|
||||
&schan->dma_chan);
|
||||
desc->async_tx.tx_submit = shdma_tx_submit;
|
||||
desc->mark = DESC_IDLE;
|
||||
|
||||
list_add(&desc->node, &schan->ld_free);
|
||||
}
|
||||
|
||||
return NR_DESCS_PER_CHANNEL;
|
||||
|
||||
edescalloc:
|
||||
if (slave)
|
||||
esetslave:
|
||||
clear_bit(slave->slave_id, shdma_slave_used);
|
||||
chan->private = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
|
||||
{
|
||||
struct shdma_desc *desc, *_desc;
|
||||
/* Is the "exposed" head of a chain acked? */
|
||||
bool head_acked = false;
|
||||
dma_cookie_t cookie = 0;
|
||||
dma_async_tx_callback callback = NULL;
|
||||
void *param = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&schan->chan_lock, flags);
|
||||
list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
|
||||
struct dma_async_tx_descriptor *tx = &desc->async_tx;
|
||||
|
||||
BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
|
||||
BUG_ON(desc->mark != DESC_SUBMITTED &&
|
||||
desc->mark != DESC_COMPLETED &&
|
||||
desc->mark != DESC_WAITING);
|
||||
|
||||
/*
|
||||
* queue is ordered, and we use this loop to (1) clean up all
|
||||
* completed descriptors, and to (2) update descriptor flags of
|
||||
* any chunks in a (partially) completed chain
|
||||
*/
|
||||
if (!all && desc->mark == DESC_SUBMITTED &&
|
||||
desc->cookie != cookie)
|
||||
break;
|
||||
|
||||
if (tx->cookie > 0)
|
||||
cookie = tx->cookie;
|
||||
|
||||
if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
|
||||
if (schan->dma_chan.completed_cookie != desc->cookie - 1)
|
||||
dev_dbg(schan->dev,
|
||||
"Completing cookie %d, expected %d\n",
|
||||
desc->cookie,
|
||||
schan->dma_chan.completed_cookie + 1);
|
||||
schan->dma_chan.completed_cookie = desc->cookie;
|
||||
}
|
||||
|
||||
/* Call callback on the last chunk */
|
||||
if (desc->mark == DESC_COMPLETED && tx->callback) {
|
||||
desc->mark = DESC_WAITING;
|
||||
callback = tx->callback;
|
||||
param = tx->callback_param;
|
||||
dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
|
||||
tx->cookie, tx, schan->id);
|
||||
BUG_ON(desc->chunks != 1);
|
||||
break;
|
||||
}
|
||||
|
||||
if (tx->cookie > 0 || tx->cookie == -EBUSY) {
|
||||
if (desc->mark == DESC_COMPLETED) {
|
||||
BUG_ON(tx->cookie < 0);
|
||||
desc->mark = DESC_WAITING;
|
||||
}
|
||||
head_acked = async_tx_test_ack(tx);
|
||||
} else {
|
||||
switch (desc->mark) {
|
||||
case DESC_COMPLETED:
|
||||
desc->mark = DESC_WAITING;
|
||||
/* Fall through */
|
||||
case DESC_WAITING:
|
||||
if (head_acked)
|
||||
async_tx_ack(&desc->async_tx);
|
||||
}
|
||||
}
|
||||
|
||||
dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
|
||||
tx, tx->cookie);
|
||||
|
||||
if (((desc->mark == DESC_COMPLETED ||
|
||||
desc->mark == DESC_WAITING) &&
|
||||
async_tx_test_ack(&desc->async_tx)) || all) {
|
||||
/* Remove from ld_queue list */
|
||||
desc->mark = DESC_IDLE;
|
||||
|
||||
list_move(&desc->node, &schan->ld_free);
|
||||
|
||||
if (list_empty(&schan->ld_queue)) {
|
||||
dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
|
||||
pm_runtime_put(schan->dev);
|
||||
schan->pm_state = SHDMA_PM_ESTABLISHED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (all && !callback)
|
||||
/*
|
||||
* Terminating and the loop completed normally: forgive
|
||||
* uncompleted cookies
|
||||
*/
|
||||
schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
|
||||
|
||||
spin_unlock_irqrestore(&schan->chan_lock, flags);
|
||||
|
||||
if (callback)
|
||||
callback(param);
|
||||
|
||||
return callback;
|
||||
}
|
||||
|
||||
/*
|
||||
* shdma_chan_ld_cleanup - Clean up link descriptors
|
||||
*
|
||||
* Clean up the ld_queue of DMA channel.
|
||||
*/
|
||||
static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
|
||||
{
|
||||
while (__ld_cleanup(schan, all))
|
||||
;
|
||||
}
|
||||
|
||||
/*
|
||||
* shdma_free_chan_resources - Free all resources of the channel.
|
||||
*/
|
||||
static void shdma_free_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct shdma_chan *schan = to_shdma_chan(chan);
|
||||
struct shdma_dev *sdev = to_shdma_dev(chan->device);
|
||||
const struct shdma_ops *ops = sdev->ops;
|
||||
LIST_HEAD(list);
|
||||
|
||||
/* Protect against ISR */
|
||||
spin_lock_irq(&schan->chan_lock);
|
||||
ops->halt_channel(schan);
|
||||
spin_unlock_irq(&schan->chan_lock);
|
||||
|
||||
/* Now no new interrupts will occur */
|
||||
|
||||
/* Prepared and not submitted descriptors can still be on the queue */
|
||||
if (!list_empty(&schan->ld_queue))
|
||||
shdma_chan_ld_cleanup(schan, true);
|
||||
|
||||
if (schan->slave_id >= 0) {
|
||||
/* The caller is holding dma_list_mutex */
|
||||
clear_bit(schan->slave_id, shdma_slave_used);
|
||||
chan->private = NULL;
|
||||
}
|
||||
|
||||
spin_lock_irq(&schan->chan_lock);
|
||||
|
||||
list_splice_init(&schan->ld_free, &list);
|
||||
schan->desc_num = 0;
|
||||
|
||||
spin_unlock_irq(&schan->chan_lock);
|
||||
|
||||
kfree(schan->desc);
|
||||
}
|
||||
|
||||
/**
|
||||
* shdma_add_desc - get, set up and return one transfer descriptor
|
||||
* @schan: DMA channel
|
||||
* @flags: DMA transfer flags
|
||||
* @dst: destination DMA address, incremented when direction equals
|
||||
* DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
|
||||
* @src: source DMA address, incremented when direction equals
|
||||
* DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
|
||||
* @len: DMA transfer length
|
||||
* @first: if NULL, set to the current descriptor and cookie set to -EBUSY
|
||||
* @direction: needed for slave DMA to decide which address to keep constant,
|
||||
* equals DMA_MEM_TO_MEM for MEMCPY
|
||||
* Returns 0 or an error
|
||||
* Locks: called with desc_lock held
|
||||
*/
|
||||
static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
|
||||
unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
|
||||
struct shdma_desc **first, enum dma_transfer_direction direction)
|
||||
{
|
||||
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
|
||||
const struct shdma_ops *ops = sdev->ops;
|
||||
struct shdma_desc *new;
|
||||
size_t copy_size = *len;
|
||||
|
||||
if (!copy_size)
|
||||
return NULL;
|
||||
|
||||
/* Allocate the link descriptor from the free list */
|
||||
new = shdma_get_desc(schan);
|
||||
if (!new) {
|
||||
dev_err(schan->dev, "No free link descriptor available\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ops->desc_setup(schan, new, *src, *dst, ©_size);
|
||||
|
||||
if (!*first) {
|
||||
/* First desc */
|
||||
new->async_tx.cookie = -EBUSY;
|
||||
*first = new;
|
||||
} else {
|
||||
/* Other desc - invisible to the user */
|
||||
new->async_tx.cookie = -EINVAL;
|
||||
}
|
||||
|
||||
dev_dbg(schan->dev,
|
||||
"chaining (%u/%u)@%x -> %x with %p, cookie %d\n",
|
||||
copy_size, *len, *src, *dst, &new->async_tx,
|
||||
new->async_tx.cookie);
|
||||
|
||||
new->mark = DESC_PREPARED;
|
||||
new->async_tx.flags = flags;
|
||||
new->direction = direction;
|
||||
|
||||
*len -= copy_size;
|
||||
if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
|
||||
*src += copy_size;
|
||||
if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
|
||||
*dst += copy_size;
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
/*
|
||||
* shdma_prep_sg - prepare transfer descriptors from an SG list
|
||||
*
|
||||
* Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
|
||||
* converted to scatter-gather to guarantee consistent locking and a correct
|
||||
* list manipulation. For slave DMA direction carries the usual meaning, and,
|
||||
* logically, the SG list is RAM and the addr variable contains slave address,
|
||||
* e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
|
||||
* and the SG list contains only one element and points at the source buffer.
|
||||
*/
|
||||
static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
|
||||
struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
|
||||
enum dma_transfer_direction direction, unsigned long flags)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
|
||||
LIST_HEAD(tx_list);
|
||||
int chunks = 0;
|
||||
unsigned long irq_flags;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, sg_len, i)
|
||||
chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
|
||||
|
||||
/* Have to lock the whole loop to protect against concurrent release */
|
||||
spin_lock_irqsave(&schan->chan_lock, irq_flags);
|
||||
|
||||
/*
|
||||
* Chaining:
|
||||
* first descriptor is what user is dealing with in all API calls, its
|
||||
* cookie is at first set to -EBUSY, at tx-submit to a positive
|
||||
* number
|
||||
* if more than one chunk is needed further chunks have cookie = -EINVAL
|
||||
* the last chunk, if not equal to the first, has cookie = -ENOSPC
|
||||
* all chunks are linked onto the tx_list head with their .node heads
|
||||
* only during this function, then they are immediately spliced
|
||||
* back onto the free list in form of a chain
|
||||
*/
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
dma_addr_t sg_addr = sg_dma_address(sg);
|
||||
size_t len = sg_dma_len(sg);
|
||||
|
||||
if (!len)
|
||||
goto err_get_desc;
|
||||
|
||||
do {
|
||||
dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n",
|
||||
i, sg, len, (unsigned long long)sg_addr);
|
||||
|
||||
if (direction == DMA_DEV_TO_MEM)
|
||||
new = shdma_add_desc(schan, flags,
|
||||
&sg_addr, addr, &len, &first,
|
||||
direction);
|
||||
else
|
||||
new = shdma_add_desc(schan, flags,
|
||||
addr, &sg_addr, &len, &first,
|
||||
direction);
|
||||
if (!new)
|
||||
goto err_get_desc;
|
||||
|
||||
new->chunks = chunks--;
|
||||
list_add_tail(&new->node, &tx_list);
|
||||
} while (len);
|
||||
}
|
||||
|
||||
if (new != first)
|
||||
new->async_tx.cookie = -ENOSPC;
|
||||
|
||||
/* Put them back on the free list, so, they don't get lost */
|
||||
list_splice_tail(&tx_list, &schan->ld_free);
|
||||
|
||||
spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
|
||||
|
||||
return &first->async_tx;
|
||||
|
||||
err_get_desc:
|
||||
list_for_each_entry(new, &tx_list, node)
|
||||
new->mark = DESC_IDLE;
|
||||
list_splice(&tx_list, &schan->ld_free);
|
||||
|
||||
spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *shdma_prep_memcpy(
|
||||
struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
|
||||
size_t len, unsigned long flags)
|
||||
{
|
||||
struct shdma_chan *schan = to_shdma_chan(chan);
|
||||
struct scatterlist sg;
|
||||
|
||||
if (!chan || !len)
|
||||
return NULL;
|
||||
|
||||
BUG_ON(!schan->desc_num);
|
||||
|
||||
sg_init_table(&sg, 1);
|
||||
sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
|
||||
offset_in_page(dma_src));
|
||||
sg_dma_address(&sg) = dma_src;
|
||||
sg_dma_len(&sg) = len;
|
||||
|
||||
return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
|
||||
enum dma_transfer_direction direction, unsigned long flags, void *context)
|
||||
{
|
||||
struct shdma_chan *schan = to_shdma_chan(chan);
|
||||
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
|
||||
const struct shdma_ops *ops = sdev->ops;
|
||||
int slave_id = schan->slave_id;
|
||||
dma_addr_t slave_addr;
|
||||
|
||||
if (!chan)
|
||||
return NULL;
|
||||
|
||||
BUG_ON(!schan->desc_num);
|
||||
|
||||
/* Someone calling slave DMA on a generic channel? */
|
||||
if (slave_id < 0 || !sg_len) {
|
||||
dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
|
||||
__func__, sg_len, slave_id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
slave_addr = ops->slave_addr(schan);
|
||||
|
||||
return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
|
||||
direction, flags);
|
||||
}
|
||||
|
||||
static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct shdma_chan *schan = to_shdma_chan(chan);
|
||||
struct shdma_dev *sdev = to_shdma_dev(chan->device);
|
||||
const struct shdma_ops *ops = sdev->ops;
|
||||
struct dma_slave_config *config;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (!chan)
|
||||
return -EINVAL;
|
||||
|
||||
switch (cmd) {
|
||||
case DMA_TERMINATE_ALL:
|
||||
spin_lock_irqsave(&schan->chan_lock, flags);
|
||||
ops->halt_channel(schan);
|
||||
spin_unlock_irqrestore(&schan->chan_lock, flags);
|
||||
|
||||
shdma_chan_ld_cleanup(schan, true);
|
||||
break;
|
||||
case DMA_SLAVE_CONFIG:
|
||||
/*
|
||||
* So far only .slave_id is used, but the slave drivers are
|
||||
* encouraged to also set a transfer direction and an address.
|
||||
*/
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* We could lock this, but you shouldn't be configuring the
|
||||
* channel, while using it...
|
||||
*/
|
||||
config = (struct dma_slave_config *)arg;
|
||||
ret = shdma_setup_slave(schan, config->slave_id);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void shdma_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
struct shdma_chan *schan = to_shdma_chan(chan);
|
||||
|
||||
spin_lock_irq(&schan->chan_lock);
|
||||
if (schan->pm_state == SHDMA_PM_ESTABLISHED)
|
||||
shdma_chan_xfer_ld_queue(schan);
|
||||
else
|
||||
schan->pm_state = SHDMA_PM_PENDING;
|
||||
spin_unlock_irq(&schan->chan_lock);
|
||||
}
|
||||
|
||||
static enum dma_status shdma_tx_status(struct dma_chan *chan,
|
||||
dma_cookie_t cookie,
|
||||
struct dma_tx_state *txstate)
|
||||
{
|
||||
struct shdma_chan *schan = to_shdma_chan(chan);
|
||||
enum dma_status status;
|
||||
unsigned long flags;
|
||||
|
||||
shdma_chan_ld_cleanup(schan, false);
|
||||
|
||||
spin_lock_irqsave(&schan->chan_lock, flags);
|
||||
|
||||
status = dma_cookie_status(chan, cookie, txstate);
|
||||
|
||||
/*
|
||||
* If we don't find cookie on the queue, it has been aborted and we have
|
||||
* to report error
|
||||
*/
|
||||
if (status != DMA_SUCCESS) {
|
||||
struct shdma_desc *sdesc;
|
||||
status = DMA_ERROR;
|
||||
list_for_each_entry(sdesc, &schan->ld_queue, node)
|
||||
if (sdesc->cookie == cookie) {
|
||||
status = DMA_IN_PROGRESS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&schan->chan_lock, flags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Called from error IRQ or NMI */
|
||||
bool shdma_reset(struct shdma_dev *sdev)
|
||||
{
|
||||
const struct shdma_ops *ops = sdev->ops;
|
||||
struct shdma_chan *schan;
|
||||
unsigned int handled = 0;
|
||||
int i;
|
||||
|
||||
/* Reset all channels */
|
||||
shdma_for_each_chan(schan, sdev, i) {
|
||||
struct shdma_desc *sdesc;
|
||||
LIST_HEAD(dl);
|
||||
|
||||
if (!schan)
|
||||
continue;
|
||||
|
||||
spin_lock(&schan->chan_lock);
|
||||
|
||||
/* Stop the channel */
|
||||
ops->halt_channel(schan);
|
||||
|
||||
list_splice_init(&schan->ld_queue, &dl);
|
||||
|
||||
if (!list_empty(&dl)) {
|
||||
dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
|
||||
pm_runtime_put(schan->dev);
|
||||
}
|
||||
schan->pm_state = SHDMA_PM_ESTABLISHED;
|
||||
|
||||
spin_unlock(&schan->chan_lock);
|
||||
|
||||
/* Complete all */
|
||||
list_for_each_entry(sdesc, &dl, node) {
|
||||
struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
|
||||
sdesc->mark = DESC_IDLE;
|
||||
if (tx->callback)
|
||||
tx->callback(tx->callback_param);
|
||||
}
|
||||
|
||||
spin_lock(&schan->chan_lock);
|
||||
list_splice(&dl, &schan->ld_free);
|
||||
spin_unlock(&schan->chan_lock);
|
||||
|
||||
handled++;
|
||||
}
|
||||
|
||||
return !!handled;
|
||||
}
|
||||
EXPORT_SYMBOL(shdma_reset);
|
||||
|
||||
static irqreturn_t chan_irq(int irq, void *dev)
|
||||
{
|
||||
struct shdma_chan *schan = dev;
|
||||
const struct shdma_ops *ops =
|
||||
to_shdma_dev(schan->dma_chan.device)->ops;
|
||||
irqreturn_t ret;
|
||||
|
||||
spin_lock(&schan->chan_lock);
|
||||
|
||||
ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
|
||||
|
||||
spin_unlock(&schan->chan_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static irqreturn_t chan_irqt(int irq, void *dev)
|
||||
{
|
||||
struct shdma_chan *schan = dev;
|
||||
const struct shdma_ops *ops =
|
||||
to_shdma_dev(schan->dma_chan.device)->ops;
|
||||
struct shdma_desc *sdesc;
|
||||
|
||||
spin_lock_irq(&schan->chan_lock);
|
||||
list_for_each_entry(sdesc, &schan->ld_queue, node) {
|
||||
if (sdesc->mark == DESC_SUBMITTED &&
|
||||
ops->desc_completed(schan, sdesc)) {
|
||||
dev_dbg(schan->dev, "done #%d@%p\n",
|
||||
sdesc->async_tx.cookie, &sdesc->async_tx);
|
||||
sdesc->mark = DESC_COMPLETED;
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* Next desc */
|
||||
shdma_chan_xfer_ld_queue(schan);
|
||||
spin_unlock_irq(&schan->chan_lock);
|
||||
|
||||
shdma_chan_ld_cleanup(schan, false);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
int shdma_request_irq(struct shdma_chan *schan, int irq,
|
||||
unsigned long flags, const char *name)
|
||||
{
|
||||
int ret = request_threaded_irq(irq, chan_irq, chan_irqt,
|
||||
flags, name, schan);
|
||||
|
||||
schan->irq = ret < 0 ? ret : irq;
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(shdma_request_irq);
|
||||
|
||||
void shdma_free_irq(struct shdma_chan *schan)
|
||||
{
|
||||
if (schan->irq >= 0)
|
||||
free_irq(schan->irq, schan);
|
||||
}
|
||||
EXPORT_SYMBOL(shdma_free_irq);
|
||||
|
||||
void shdma_chan_probe(struct shdma_dev *sdev,
|
||||
struct shdma_chan *schan, int id)
|
||||
{
|
||||
schan->pm_state = SHDMA_PM_ESTABLISHED;
|
||||
|
||||
/* reference struct dma_device */
|
||||
schan->dma_chan.device = &sdev->dma_dev;
|
||||
dma_cookie_init(&schan->dma_chan);
|
||||
|
||||
schan->dev = sdev->dma_dev.dev;
|
||||
schan->id = id;
|
||||
|
||||
if (!schan->max_xfer_len)
|
||||
schan->max_xfer_len = PAGE_SIZE;
|
||||
|
||||
spin_lock_init(&schan->chan_lock);
|
||||
|
||||
/* Init descripter manage list */
|
||||
INIT_LIST_HEAD(&schan->ld_queue);
|
||||
INIT_LIST_HEAD(&schan->ld_free);
|
||||
|
||||
/* Add the channel to DMA device channel list */
|
||||
list_add_tail(&schan->dma_chan.device_node,
|
||||
&sdev->dma_dev.channels);
|
||||
sdev->schan[sdev->dma_dev.chancnt++] = schan;
|
||||
}
|
||||
EXPORT_SYMBOL(shdma_chan_probe);
|
||||
|
||||
void shdma_chan_remove(struct shdma_chan *schan)
|
||||
{
|
||||
list_del(&schan->dma_chan.device_node);
|
||||
}
|
||||
EXPORT_SYMBOL(shdma_chan_remove);
|
||||
|
||||
int shdma_init(struct device *dev, struct shdma_dev *sdev,
|
||||
int chan_num)
|
||||
{
|
||||
struct dma_device *dma_dev = &sdev->dma_dev;
|
||||
|
||||
/*
|
||||
* Require all call-backs for now, they can trivially be made optional
|
||||
* later as required
|
||||
*/
|
||||
if (!sdev->ops ||
|
||||
!sdev->desc_size ||
|
||||
!sdev->ops->embedded_desc ||
|
||||
!sdev->ops->start_xfer ||
|
||||
!sdev->ops->setup_xfer ||
|
||||
!sdev->ops->set_slave ||
|
||||
!sdev->ops->desc_setup ||
|
||||
!sdev->ops->slave_addr ||
|
||||
!sdev->ops->channel_busy ||
|
||||
!sdev->ops->halt_channel ||
|
||||
!sdev->ops->desc_completed)
|
||||
return -EINVAL;
|
||||
|
||||
sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
|
||||
if (!sdev->schan)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&dma_dev->channels);
|
||||
|
||||
/* Common and MEMCPY operations */
|
||||
dma_dev->device_alloc_chan_resources
|
||||
= shdma_alloc_chan_resources;
|
||||
dma_dev->device_free_chan_resources = shdma_free_chan_resources;
|
||||
dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
|
||||
dma_dev->device_tx_status = shdma_tx_status;
|
||||
dma_dev->device_issue_pending = shdma_issue_pending;
|
||||
|
||||
/* Compulsory for DMA_SLAVE fields */
|
||||
dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
|
||||
dma_dev->device_control = shdma_control;
|
||||
|
||||
dma_dev->dev = dev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(shdma_init);
|
||||
|
||||
void shdma_cleanup(struct shdma_dev *sdev)
|
||||
{
|
||||
kfree(sdev->schan);
|
||||
}
|
||||
EXPORT_SYMBOL(shdma_cleanup);
|
||||
|
||||
static int __init shdma_enter(void)
|
||||
{
|
||||
shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) *
|
||||
sizeof(long), GFP_KERNEL);
|
||||
if (!shdma_slave_used)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
module_init(shdma_enter);
|
||||
|
||||
static void __exit shdma_exit(void)
|
||||
{
|
||||
kfree(shdma_slave_used);
|
||||
}
|
||||
module_exit(shdma_exit);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("SH-DMA driver base library");
|
||||
MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
|
|
@ -0,0 +1,943 @@
|
|||
/*
|
||||
* Renesas SuperH DMA Engine support
|
||||
*
|
||||
* base is drivers/dma/flsdma.c
|
||||
*
|
||||
* Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
||||
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
|
||||
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
|
||||
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* - DMA of SuperH does not have Hardware DMA chain mode.
|
||||
* - MAX DMA size is 16MB.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/sh_dma.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/rculist.h>
|
||||
|
||||
#include "../dmaengine.h"
|
||||
#include "shdma.h"
|
||||
|
||||
#define SH_DMAE_DRV_NAME "sh-dma-engine"
|
||||
|
||||
/* Default MEMCPY transfer size = 2^2 = 4 bytes */
|
||||
#define LOG2_DEFAULT_XFER_SIZE 2
|
||||
#define SH_DMA_SLAVE_NUMBER 256
|
||||
#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
|
||||
|
||||
/*
|
||||
* Used for write-side mutual exclusion for the global device list,
|
||||
* read-side synchronization by way of RCU, and per-controller data.
|
||||
*/
|
||||
static DEFINE_SPINLOCK(sh_dmae_lock);
|
||||
static LIST_HEAD(sh_dmae_devices);
|
||||
|
||||
static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
|
||||
|
||||
__raw_writel(data, shdev->chan_reg +
|
||||
shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset);
|
||||
}
|
||||
|
||||
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
|
||||
{
|
||||
__raw_writel(data, sh_dc->base + reg / sizeof(u32));
|
||||
}
|
||||
|
||||
static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
|
||||
{
|
||||
return __raw_readl(sh_dc->base + reg / sizeof(u32));
|
||||
}
|
||||
|
||||
static u16 dmaor_read(struct sh_dmae_device *shdev)
|
||||
{
|
||||
u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
|
||||
|
||||
if (shdev->pdata->dmaor_is_32bit)
|
||||
return __raw_readl(addr);
|
||||
else
|
||||
return __raw_readw(addr);
|
||||
}
|
||||
|
||||
static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
|
||||
{
|
||||
u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
|
||||
|
||||
if (shdev->pdata->dmaor_is_32bit)
|
||||
__raw_writel(data, addr);
|
||||
else
|
||||
__raw_writew(data, addr);
|
||||
}
|
||||
|
||||
static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
|
||||
|
||||
__raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
|
||||
}
|
||||
|
||||
static u32 chcr_read(struct sh_dmae_chan *sh_dc)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
|
||||
|
||||
return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset DMA controller
|
||||
*
|
||||
* SH7780 has two DMAOR register
|
||||
*/
|
||||
static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
|
||||
{
|
||||
unsigned short dmaor;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sh_dmae_lock, flags);
|
||||
|
||||
dmaor = dmaor_read(shdev);
|
||||
dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
|
||||
|
||||
spin_unlock_irqrestore(&sh_dmae_lock, flags);
|
||||
}
|
||||
|
||||
static int sh_dmae_rst(struct sh_dmae_device *shdev)
|
||||
{
|
||||
unsigned short dmaor;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sh_dmae_lock, flags);
|
||||
|
||||
dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
|
||||
|
||||
if (shdev->pdata->chclr_present) {
|
||||
int i;
|
||||
for (i = 0; i < shdev->pdata->channel_num; i++) {
|
||||
struct sh_dmae_chan *sh_chan = shdev->chan[i];
|
||||
if (sh_chan)
|
||||
chclr_write(sh_chan, 0);
|
||||
}
|
||||
}
|
||||
|
||||
dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
|
||||
|
||||
dmaor = dmaor_read(shdev);
|
||||
|
||||
spin_unlock_irqrestore(&sh_dmae_lock, flags);
|
||||
|
||||
if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
|
||||
dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
|
||||
return -EIO;
|
||||
}
|
||||
if (shdev->pdata->dmaor_init & ~dmaor)
|
||||
dev_warn(shdev->shdma_dev.dma_dev.dev,
|
||||
"DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
|
||||
dmaor, shdev->pdata->dmaor_init);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
|
||||
{
|
||||
u32 chcr = chcr_read(sh_chan);
|
||||
|
||||
if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
|
||||
return true; /* working */
|
||||
|
||||
return false; /* waiting */
|
||||
}
|
||||
|
||||
static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||||
struct sh_dmae_pdata *pdata = shdev->pdata;
|
||||
int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
|
||||
((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
|
||||
|
||||
if (cnt >= pdata->ts_shift_num)
|
||||
cnt = 0;
|
||||
|
||||
return pdata->ts_shift[cnt];
|
||||
}
|
||||
|
||||
static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||||
struct sh_dmae_pdata *pdata = shdev->pdata;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pdata->ts_shift_num; i++)
|
||||
if (pdata->ts_shift[i] == l2size)
|
||||
break;
|
||||
|
||||
if (i == pdata->ts_shift_num)
|
||||
i = 0;
|
||||
|
||||
return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
|
||||
((i << pdata->ts_high_shift) & pdata->ts_high_mask);
|
||||
}
|
||||
|
||||
static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
|
||||
{
|
||||
sh_dmae_writel(sh_chan, hw->sar, SAR);
|
||||
sh_dmae_writel(sh_chan, hw->dar, DAR);
|
||||
sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
|
||||
}
|
||||
|
||||
static void dmae_start(struct sh_dmae_chan *sh_chan)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||||
u32 chcr = chcr_read(sh_chan);
|
||||
|
||||
if (shdev->pdata->needs_tend_set)
|
||||
sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
|
||||
|
||||
chcr |= CHCR_DE | shdev->chcr_ie_bit;
|
||||
chcr_write(sh_chan, chcr & ~CHCR_TE);
|
||||
}
|
||||
|
||||
static void dmae_init(struct sh_dmae_chan *sh_chan)
|
||||
{
|
||||
/*
|
||||
* Default configuration for dual address memory-memory transfer.
|
||||
* 0x400 represents auto-request.
|
||||
*/
|
||||
u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
|
||||
LOG2_DEFAULT_XFER_SIZE);
|
||||
sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
|
||||
chcr_write(sh_chan, chcr);
|
||||
}
|
||||
|
||||
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
|
||||
{
|
||||
/* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
|
||||
if (dmae_is_busy(sh_chan))
|
||||
return -EBUSY;
|
||||
|
||||
sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
|
||||
chcr_write(sh_chan, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||||
struct sh_dmae_pdata *pdata = shdev->pdata;
|
||||
const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
|
||||
u16 __iomem *addr = shdev->dmars;
|
||||
unsigned int shift = chan_pdata->dmars_bit;
|
||||
|
||||
if (dmae_is_busy(sh_chan))
|
||||
return -EBUSY;
|
||||
|
||||
if (pdata->no_dmars)
|
||||
return 0;
|
||||
|
||||
/* in the case of a missing DMARS resource use first memory window */
|
||||
if (!addr)
|
||||
addr = (u16 __iomem *)shdev->chan_reg;
|
||||
addr += chan_pdata->dmars / sizeof(u16);
|
||||
|
||||
__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
|
||||
addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sh_dmae_start_xfer(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||||
shdma_chan);
|
||||
struct sh_dmae_desc *sh_desc = container_of(sdesc,
|
||||
struct sh_dmae_desc, shdma_desc);
|
||||
dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
|
||||
sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
|
||||
sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
|
||||
/* Get the ld start address from ld_queue */
|
||||
dmae_set_reg(sh_chan, &sh_desc->hw);
|
||||
dmae_start(sh_chan);
|
||||
}
|
||||
|
||||
static bool sh_dmae_channel_busy(struct shdma_chan *schan)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||||
shdma_chan);
|
||||
return dmae_is_busy(sh_chan);
|
||||
}
|
||||
|
||||
static void sh_dmae_setup_xfer(struct shdma_chan *schan,
|
||||
int slave_id)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||||
shdma_chan);
|
||||
|
||||
if (slave_id >= 0) {
|
||||
const struct sh_dmae_slave_config *cfg =
|
||||
sh_chan->config;
|
||||
|
||||
dmae_set_dmars(sh_chan, cfg->mid_rid);
|
||||
dmae_set_chcr(sh_chan, cfg->chcr);
|
||||
} else {
|
||||
dmae_init(sh_chan);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct sh_dmae_slave_config *dmae_find_slave(
|
||||
struct sh_dmae_chan *sh_chan, int slave_id)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||||
struct sh_dmae_pdata *pdata = shdev->pdata;
|
||||
const struct sh_dmae_slave_config *cfg;
|
||||
int i;
|
||||
|
||||
if (slave_id >= SH_DMA_SLAVE_NUMBER)
|
||||
return NULL;
|
||||
|
||||
for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
|
||||
if (cfg->slave_id == slave_id)
|
||||
return cfg;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int sh_dmae_set_slave(struct shdma_chan *schan,
|
||||
int slave_id, bool try)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||||
shdma_chan);
|
||||
const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
|
||||
if (!cfg)
|
||||
return -ENODEV;
|
||||
|
||||
if (!try)
|
||||
sh_chan->config = cfg;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dmae_halt(struct sh_dmae_chan *sh_chan)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||||
u32 chcr = chcr_read(sh_chan);
|
||||
|
||||
chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
|
||||
chcr_write(sh_chan, chcr);
|
||||
}
|
||||
|
||||
static int sh_dmae_desc_setup(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc,
|
||||
dma_addr_t src, dma_addr_t dst, size_t *len)
|
||||
{
|
||||
struct sh_dmae_desc *sh_desc = container_of(sdesc,
|
||||
struct sh_dmae_desc, shdma_desc);
|
||||
|
||||
if (*len > schan->max_xfer_len)
|
||||
*len = schan->max_xfer_len;
|
||||
|
||||
sh_desc->hw.sar = src;
|
||||
sh_desc->hw.dar = dst;
|
||||
sh_desc->hw.tcr = *len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sh_dmae_halt(struct shdma_chan *schan)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||||
shdma_chan);
|
||||
dmae_halt(sh_chan);
|
||||
}
|
||||
|
||||
static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||||
shdma_chan);
|
||||
|
||||
if (!(chcr_read(sh_chan) & CHCR_TE))
|
||||
return false;
|
||||
|
||||
/* DMA stop */
|
||||
dmae_halt(sh_chan);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Called from error IRQ or NMI */
|
||||
static bool sh_dmae_reset(struct sh_dmae_device *shdev)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
/* halt the dma controller */
|
||||
sh_dmae_ctl_stop(shdev);
|
||||
|
||||
/* We cannot detect, which channel caused the error, have to reset all */
|
||||
ret = shdma_reset(&shdev->shdma_dev);
|
||||
|
||||
sh_dmae_rst(shdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static irqreturn_t sh_dmae_err(int irq, void *data)
|
||||
{
|
||||
struct sh_dmae_device *shdev = data;
|
||||
|
||||
if (!(dmaor_read(shdev) & DMAOR_AE))
|
||||
return IRQ_NONE;
|
||||
|
||||
sh_dmae_reset(shdev);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static bool sh_dmae_desc_completed(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan,
|
||||
struct sh_dmae_chan, shdma_chan);
|
||||
struct sh_dmae_desc *sh_desc = container_of(sdesc,
|
||||
struct sh_dmae_desc, shdma_desc);
|
||||
u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
|
||||
u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
|
||||
|
||||
return (sdesc->direction == DMA_DEV_TO_MEM &&
|
||||
(sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
|
||||
(sdesc->direction != DMA_DEV_TO_MEM &&
|
||||
(sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
|
||||
}
|
||||
|
||||
static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
|
||||
{
|
||||
/* Fast path out if NMIF is not asserted for this controller */
|
||||
if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
|
||||
return false;
|
||||
|
||||
return sh_dmae_reset(shdev);
|
||||
}
|
||||
|
||||
static int sh_dmae_nmi_handler(struct notifier_block *self,
|
||||
unsigned long cmd, void *data)
|
||||
{
|
||||
struct sh_dmae_device *shdev;
|
||||
int ret = NOTIFY_DONE;
|
||||
bool triggered;
|
||||
|
||||
/*
|
||||
* Only concern ourselves with NMI events.
|
||||
*
|
||||
* Normally we would check the die chain value, but as this needs
|
||||
* to be architecture independent, check for NMI context instead.
|
||||
*/
|
||||
if (!in_nmi())
|
||||
return NOTIFY_DONE;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
|
||||
/*
|
||||
* Only stop if one of the controllers has NMIF asserted,
|
||||
* we do not want to interfere with regular address error
|
||||
* handling or NMI events that don't concern the DMACs.
|
||||
*/
|
||||
triggered = sh_dmae_nmi_notify(shdev);
|
||||
if (triggered == true)
|
||||
ret = NOTIFY_OK;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
|
||||
.notifier_call = sh_dmae_nmi_handler,
|
||||
|
||||
/* Run before NMI debug handler and KGDB */
|
||||
.priority = 1,
|
||||
};
|
||||
|
||||
static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
|
||||
int irq, unsigned long flags)
|
||||
{
|
||||
const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
|
||||
struct shdma_dev *sdev = &shdev->shdma_dev;
|
||||
struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
|
||||
struct sh_dmae_chan *sh_chan;
|
||||
struct shdma_chan *schan;
|
||||
int err;
|
||||
|
||||
sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
|
||||
if (!sh_chan) {
|
||||
dev_err(sdev->dma_dev.dev,
|
||||
"No free memory for allocating dma channels!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
schan = &sh_chan->shdma_chan;
|
||||
schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
|
||||
|
||||
shdma_chan_probe(sdev, schan, id);
|
||||
|
||||
sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
|
||||
|
||||
/* set up channel irq */
|
||||
if (pdev->id >= 0)
|
||||
snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
|
||||
"sh-dmae%d.%d", pdev->id, id);
|
||||
else
|
||||
snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
|
||||
"sh-dma%d", id);
|
||||
|
||||
err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
|
||||
if (err) {
|
||||
dev_err(sdev->dma_dev.dev,
|
||||
"DMA channel %d request_irq error %d\n",
|
||||
id, err);
|
||||
goto err_no_irq;
|
||||
}
|
||||
|
||||
shdev->chan[id] = sh_chan;
|
||||
return 0;
|
||||
|
||||
err_no_irq:
|
||||
/* remove from dmaengine device node */
|
||||
shdma_chan_remove(schan);
|
||||
kfree(sh_chan);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
|
||||
{
|
||||
struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
|
||||
struct shdma_chan *schan;
|
||||
int i;
|
||||
|
||||
shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan,
|
||||
struct sh_dmae_chan, shdma_chan);
|
||||
BUG_ON(!schan);
|
||||
|
||||
shdma_free_irq(&sh_chan->shdma_chan);
|
||||
|
||||
shdma_chan_remove(schan);
|
||||
kfree(sh_chan);
|
||||
}
|
||||
dma_dev->chancnt = 0;
|
||||
}
|
||||
|
||||
static void sh_dmae_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
|
||||
sh_dmae_ctl_stop(shdev);
|
||||
}
|
||||
|
||||
static int sh_dmae_runtime_suspend(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sh_dmae_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
|
||||
|
||||
return sh_dmae_rst(shdev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int sh_dmae_suspend(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sh_dmae_resume(struct device *dev)
|
||||
{
|
||||
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
|
||||
int i, ret;
|
||||
|
||||
ret = sh_dmae_rst(shdev);
|
||||
if (ret < 0)
|
||||
dev_err(dev, "Failed to reset!\n");
|
||||
|
||||
for (i = 0; i < shdev->pdata->channel_num; i++) {
|
||||
struct sh_dmae_chan *sh_chan = shdev->chan[i];
|
||||
|
||||
if (!sh_chan->shdma_chan.desc_num)
|
||||
continue;
|
||||
|
||||
if (sh_chan->shdma_chan.slave_id >= 0) {
|
||||
const struct sh_dmae_slave_config *cfg = sh_chan->config;
|
||||
dmae_set_dmars(sh_chan, cfg->mid_rid);
|
||||
dmae_set_chcr(sh_chan, cfg->chcr);
|
||||
} else {
|
||||
dmae_init(sh_chan);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#define sh_dmae_suspend NULL
|
||||
#define sh_dmae_resume NULL
|
||||
#endif
|
||||
|
||||
const struct dev_pm_ops sh_dmae_pm = {
|
||||
.suspend = sh_dmae_suspend,
|
||||
.resume = sh_dmae_resume,
|
||||
.runtime_suspend = sh_dmae_runtime_suspend,
|
||||
.runtime_resume = sh_dmae_runtime_resume,
|
||||
};
|
||||
|
||||
static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan,
|
||||
struct sh_dmae_chan, shdma_chan);
|
||||
|
||||
/*
|
||||
* Implicit BUG_ON(!sh_chan->config)
|
||||
* This is an exclusive slave DMA operation, may only be called after a
|
||||
* successful slave configuration.
|
||||
*/
|
||||
return sh_chan->config->addr;
|
||||
}
|
||||
|
||||
static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
|
||||
{
|
||||
return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
|
||||
}
|
||||
|
||||
static const struct shdma_ops sh_dmae_shdma_ops = {
|
||||
.desc_completed = sh_dmae_desc_completed,
|
||||
.halt_channel = sh_dmae_halt,
|
||||
.channel_busy = sh_dmae_channel_busy,
|
||||
.slave_addr = sh_dmae_slave_addr,
|
||||
.desc_setup = sh_dmae_desc_setup,
|
||||
.set_slave = sh_dmae_set_slave,
|
||||
.setup_xfer = sh_dmae_setup_xfer,
|
||||
.start_xfer = sh_dmae_start_xfer,
|
||||
.embedded_desc = sh_dmae_embedded_desc,
|
||||
.chan_irq = sh_dmae_chan_irq,
|
||||
};
|
||||
|
||||
static int __devinit sh_dmae_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
|
||||
unsigned long irqflags = IRQF_DISABLED,
|
||||
chan_flag[SH_DMAE_MAX_CHANNELS] = {};
|
||||
int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
|
||||
int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
|
||||
struct sh_dmae_device *shdev;
|
||||
struct dma_device *dma_dev;
|
||||
struct resource *chan, *dmars, *errirq_res, *chanirq_res;
|
||||
|
||||
/* get platform data */
|
||||
if (!pdata || !pdata->channel_num)
|
||||
return -ENODEV;
|
||||
|
||||
chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
/* DMARS area is optional */
|
||||
dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
/*
|
||||
* IRQ resources:
|
||||
* 1. there always must be at least one IRQ IO-resource. On SH4 it is
|
||||
* the error IRQ, in which case it is the only IRQ in this resource:
|
||||
* start == end. If it is the only IRQ resource, all channels also
|
||||
* use the same IRQ.
|
||||
* 2. DMA channel IRQ resources can be specified one per resource or in
|
||||
* ranges (start != end)
|
||||
* 3. iff all events (channels and, optionally, error) on this
|
||||
* controller use the same IRQ, only one IRQ resource can be
|
||||
* specified, otherwise there must be one IRQ per channel, even if
|
||||
* some of them are equal
|
||||
* 4. if all IRQs on this controller are equal or if some specific IRQs
|
||||
* specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
|
||||
* requested with the IRQF_SHARED flag
|
||||
*/
|
||||
errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (!chan || !errirq_res)
|
||||
return -ENODEV;
|
||||
|
||||
if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
|
||||
dev_err(&pdev->dev, "DMAC register region already claimed\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
|
||||
dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
|
||||
err = -EBUSY;
|
||||
goto ermrdmars;
|
||||
}
|
||||
|
||||
err = -ENOMEM;
|
||||
shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
|
||||
if (!shdev) {
|
||||
dev_err(&pdev->dev, "Not enough memory\n");
|
||||
goto ealloc;
|
||||
}
|
||||
|
||||
dma_dev = &shdev->shdma_dev.dma_dev;
|
||||
|
||||
shdev->chan_reg = ioremap(chan->start, resource_size(chan));
|
||||
if (!shdev->chan_reg)
|
||||
goto emapchan;
|
||||
if (dmars) {
|
||||
shdev->dmars = ioremap(dmars->start, resource_size(dmars));
|
||||
if (!shdev->dmars)
|
||||
goto emapdmars;
|
||||
}
|
||||
|
||||
if (!pdata->slave_only)
|
||||
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
|
||||
if (pdata->slave && pdata->slave_num)
|
||||
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
|
||||
|
||||
/* Default transfer size of 32 bytes requires 32-byte alignment */
|
||||
dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
|
||||
|
||||
shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
|
||||
shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
|
||||
err = shdma_init(&pdev->dev, &shdev->shdma_dev,
|
||||
pdata->channel_num);
|
||||
if (err < 0)
|
||||
goto eshdma;
|
||||
|
||||
/* platform data */
|
||||
shdev->pdata = pdev->dev.platform_data;
|
||||
|
||||
if (pdata->chcr_offset)
|
||||
shdev->chcr_offset = pdata->chcr_offset;
|
||||
else
|
||||
shdev->chcr_offset = CHCR;
|
||||
|
||||
if (pdata->chcr_ie_bit)
|
||||
shdev->chcr_ie_bit = pdata->chcr_ie_bit;
|
||||
else
|
||||
shdev->chcr_ie_bit = CHCR_IE;
|
||||
|
||||
platform_set_drvdata(pdev, shdev);
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
err = pm_runtime_get_sync(&pdev->dev);
|
||||
if (err < 0)
|
||||
dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
|
||||
|
||||
spin_lock_irq(&sh_dmae_lock);
|
||||
list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
|
||||
spin_unlock_irq(&sh_dmae_lock);
|
||||
|
||||
/* reset dma controller - only needed as a test */
|
||||
err = sh_dmae_rst(shdev);
|
||||
if (err)
|
||||
goto rst_err;
|
||||
|
||||
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
|
||||
chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
|
||||
|
||||
if (!chanirq_res)
|
||||
chanirq_res = errirq_res;
|
||||
else
|
||||
irqres++;
|
||||
|
||||
if (chanirq_res == errirq_res ||
|
||||
(errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
|
||||
irqflags = IRQF_SHARED;
|
||||
|
||||
errirq = errirq_res->start;
|
||||
|
||||
err = request_irq(errirq, sh_dmae_err, irqflags,
|
||||
"DMAC Address Error", shdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev,
|
||||
"DMA failed requesting irq #%d, error %d\n",
|
||||
errirq, err);
|
||||
goto eirq_err;
|
||||
}
|
||||
|
||||
#else
|
||||
chanirq_res = errirq_res;
|
||||
#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
|
||||
|
||||
if (chanirq_res->start == chanirq_res->end &&
|
||||
!platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
|
||||
/* Special case - all multiplexed */
|
||||
for (; irq_cnt < pdata->channel_num; irq_cnt++) {
|
||||
if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
|
||||
chan_irq[irq_cnt] = chanirq_res->start;
|
||||
chan_flag[irq_cnt] = IRQF_SHARED;
|
||||
} else {
|
||||
irq_cap = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
do {
|
||||
for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
|
||||
if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
|
||||
irq_cap = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((errirq_res->flags & IORESOURCE_BITS) ==
|
||||
IORESOURCE_IRQ_SHAREABLE)
|
||||
chan_flag[irq_cnt] = IRQF_SHARED;
|
||||
else
|
||||
chan_flag[irq_cnt] = IRQF_DISABLED;
|
||||
dev_dbg(&pdev->dev,
|
||||
"Found IRQ %d for channel %d\n",
|
||||
i, irq_cnt);
|
||||
chan_irq[irq_cnt++] = i;
|
||||
}
|
||||
|
||||
if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
|
||||
break;
|
||||
|
||||
chanirq_res = platform_get_resource(pdev,
|
||||
IORESOURCE_IRQ, ++irqres);
|
||||
} while (irq_cnt < pdata->channel_num && chanirq_res);
|
||||
}
|
||||
|
||||
/* Create DMA Channel */
|
||||
for (i = 0; i < irq_cnt; i++) {
|
||||
err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
|
||||
if (err)
|
||||
goto chan_probe_err;
|
||||
}
|
||||
|
||||
if (irq_cap)
|
||||
dev_notice(&pdev->dev, "Attempting to register %d DMA "
|
||||
"channels when a maximum of %d are supported.\n",
|
||||
pdata->channel_num, SH_DMAE_MAX_CHANNELS);
|
||||
|
||||
pm_runtime_put(&pdev->dev);
|
||||
|
||||
err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
|
||||
if (err < 0)
|
||||
goto edmadevreg;
|
||||
|
||||
return err;
|
||||
|
||||
edmadevreg:
|
||||
pm_runtime_get(&pdev->dev);
|
||||
|
||||
chan_probe_err:
|
||||
sh_dmae_chan_remove(shdev);
|
||||
|
||||
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
|
||||
free_irq(errirq, shdev);
|
||||
eirq_err:
|
||||
#endif
|
||||
rst_err:
|
||||
spin_lock_irq(&sh_dmae_lock);
|
||||
list_del_rcu(&shdev->node);
|
||||
spin_unlock_irq(&sh_dmae_lock);
|
||||
|
||||
pm_runtime_put(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
shdma_cleanup(&shdev->shdma_dev);
|
||||
eshdma:
|
||||
if (dmars)
|
||||
iounmap(shdev->dmars);
|
||||
emapdmars:
|
||||
iounmap(shdev->chan_reg);
|
||||
synchronize_rcu();
|
||||
emapchan:
|
||||
kfree(shdev);
|
||||
ealloc:
|
||||
if (dmars)
|
||||
release_mem_region(dmars->start, resource_size(dmars));
|
||||
ermrdmars:
|
||||
release_mem_region(chan->start, resource_size(chan));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __devexit sh_dmae_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
|
||||
struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
|
||||
struct resource *res;
|
||||
int errirq = platform_get_irq(pdev, 0);
|
||||
|
||||
dma_async_device_unregister(dma_dev);
|
||||
|
||||
if (errirq > 0)
|
||||
free_irq(errirq, shdev);
|
||||
|
||||
spin_lock_irq(&sh_dmae_lock);
|
||||
list_del_rcu(&shdev->node);
|
||||
spin_unlock_irq(&sh_dmae_lock);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
sh_dmae_chan_remove(shdev);
|
||||
shdma_cleanup(&shdev->shdma_dev);
|
||||
|
||||
if (shdev->dmars)
|
||||
iounmap(shdev->dmars);
|
||||
iounmap(shdev->chan_reg);
|
||||
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
|
||||
synchronize_rcu();
|
||||
kfree(shdev);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (res)
|
||||
release_mem_region(res->start, resource_size(res));
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
if (res)
|
||||
release_mem_region(res->start, resource_size(res));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver sh_dmae_driver = {
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.pm = &sh_dmae_pm,
|
||||
.name = SH_DMAE_DRV_NAME,
|
||||
},
|
||||
.remove = __devexit_p(sh_dmae_remove),
|
||||
.shutdown = sh_dmae_shutdown,
|
||||
};
|
||||
|
||||
static int __init sh_dmae_init(void)
|
||||
{
|
||||
/* Wire up NMI handling */
|
||||
int err = register_die_notifier(&sh_dmae_nmi_notifier);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
|
||||
}
|
||||
module_init(sh_dmae_init);
|
||||
|
||||
static void __exit sh_dmae_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&sh_dmae_driver);
|
||||
|
||||
unregister_die_notifier(&sh_dmae_nmi_notifier);
|
||||
}
|
||||
module_exit(sh_dmae_exit);
|
||||
|
||||
MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
|
||||
MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
|
|
@ -13,42 +13,29 @@
|
|||
#ifndef __DMA_SHDMA_H
|
||||
#define __DMA_SHDMA_H
|
||||
|
||||
#include <linux/sh_dma.h>
|
||||
#include <linux/shdma-base.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
#define SH_DMAC_MAX_CHANNELS 20
|
||||
#define SH_DMA_SLAVE_NUMBER 256
|
||||
#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
|
||||
#define SH_DMAE_MAX_CHANNELS 20
|
||||
#define SH_DMAE_TCR_MAX 0x00FFFFFF /* 16MB */
|
||||
|
||||
struct device;
|
||||
|
||||
enum dmae_pm_state {
|
||||
DMAE_PM_ESTABLISHED,
|
||||
DMAE_PM_BUSY,
|
||||
DMAE_PM_PENDING,
|
||||
};
|
||||
|
||||
struct sh_dmae_chan {
|
||||
spinlock_t desc_lock; /* Descriptor operation lock */
|
||||
struct list_head ld_queue; /* Link descriptors queue */
|
||||
struct list_head ld_free; /* Link descriptors free */
|
||||
struct dma_chan common; /* DMA common channel */
|
||||
struct device *dev; /* Channel device */
|
||||
struct tasklet_struct tasklet; /* Tasklet */
|
||||
int descs_allocated; /* desc count */
|
||||
struct shdma_chan shdma_chan;
|
||||
const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
|
||||
int xmit_shift; /* log_2(bytes_per_xfer) */
|
||||
int irq;
|
||||
int id; /* Raw id of this channel */
|
||||
u32 __iomem *base;
|
||||
char dev_id[16]; /* unique name per DMAC of channel */
|
||||
int pm_error;
|
||||
enum dmae_pm_state pm_state;
|
||||
};
|
||||
|
||||
struct sh_dmae_device {
|
||||
struct dma_device common;
|
||||
struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS];
|
||||
struct shdma_dev shdma_dev;
|
||||
struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS];
|
||||
struct sh_dmae_pdata *pdata;
|
||||
struct list_head node;
|
||||
u32 __iomem *chan_reg;
|
||||
|
@ -57,10 +44,21 @@ struct sh_dmae_device {
|
|||
u32 chcr_ie_bit;
|
||||
};
|
||||
|
||||
#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common)
|
||||
struct sh_dmae_regs {
|
||||
u32 sar; /* SAR / source address */
|
||||
u32 dar; /* DAR / destination address */
|
||||
u32 tcr; /* TCR / transfer count */
|
||||
};
|
||||
|
||||
struct sh_dmae_desc {
|
||||
struct sh_dmae_regs hw;
|
||||
struct shdma_desc shdma_desc;
|
||||
};
|
||||
|
||||
#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, shdma_chan)
|
||||
#define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
|
||||
#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
|
||||
#define to_sh_dev(chan) container_of(chan->common.device,\
|
||||
struct sh_dmae_device, common)
|
||||
#define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\
|
||||
struct sh_dmae_device, shdma_dev.dma_dev)
|
||||
|
||||
#endif /* __DMA_SHDMA_H */
|
1524
drivers/dma/shdma.c
1524
drivers/dma/shdma.c
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -213,8 +213,6 @@ struct sh_mmcif_host {
|
|||
struct mmc_host *mmc;
|
||||
struct mmc_request *mrq;
|
||||
struct platform_device *pd;
|
||||
struct sh_dmae_slave dma_slave_tx;
|
||||
struct sh_dmae_slave dma_slave_rx;
|
||||
struct clk *hclk;
|
||||
unsigned int clk;
|
||||
int bus_width;
|
||||
|
@ -373,59 +371,69 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
|
|||
desc, cookie);
|
||||
}
|
||||
|
||||
static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
|
||||
{
|
||||
dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
|
||||
chan->private = arg;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
|
||||
struct sh_mmcif_plat_data *pdata)
|
||||
{
|
||||
struct sh_dmae_slave *tx, *rx;
|
||||
struct resource *res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
|
||||
struct dma_slave_config cfg;
|
||||
dma_cap_mask_t mask;
|
||||
int ret;
|
||||
|
||||
host->dma_active = false;
|
||||
|
||||
if (!pdata)
|
||||
return;
|
||||
|
||||
/* We can only either use DMA for both Tx and Rx or not use it at all */
|
||||
if (pdata->dma) {
|
||||
dev_warn(&host->pd->dev,
|
||||
"Update your platform to use embedded DMA slave IDs\n");
|
||||
tx = &pdata->dma->chan_priv_tx;
|
||||
rx = &pdata->dma->chan_priv_rx;
|
||||
} else {
|
||||
tx = &host->dma_slave_tx;
|
||||
tx->slave_id = pdata->slave_id_tx;
|
||||
rx = &host->dma_slave_rx;
|
||||
rx->slave_id = pdata->slave_id_rx;
|
||||
}
|
||||
if (tx->slave_id > 0 && rx->slave_id > 0) {
|
||||
dma_cap_mask_t mask;
|
||||
if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
|
||||
return;
|
||||
|
||||
/* We can only either use DMA for both Tx and Rx or not use it at all */
|
||||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
|
||||
host->chan_tx = dma_request_channel(mask, sh_mmcif_filter, tx);
|
||||
host->chan_tx = dma_request_channel(mask, shdma_chan_filter,
|
||||
(void *)pdata->slave_id_tx);
|
||||
dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
|
||||
host->chan_tx);
|
||||
|
||||
if (!host->chan_tx)
|
||||
return;
|
||||
|
||||
host->chan_rx = dma_request_channel(mask, sh_mmcif_filter, rx);
|
||||
cfg.slave_id = pdata->slave_id_tx;
|
||||
cfg.direction = DMA_MEM_TO_DEV;
|
||||
cfg.dst_addr = res->start + MMCIF_CE_DATA;
|
||||
cfg.src_addr = 0;
|
||||
ret = dmaengine_slave_config(host->chan_tx, &cfg);
|
||||
if (ret < 0)
|
||||
goto ecfgtx;
|
||||
|
||||
host->chan_rx = dma_request_channel(mask, shdma_chan_filter,
|
||||
(void *)pdata->slave_id_rx);
|
||||
dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
|
||||
host->chan_rx);
|
||||
|
||||
if (!host->chan_rx) {
|
||||
dma_release_channel(host->chan_tx);
|
||||
host->chan_tx = NULL;
|
||||
return;
|
||||
}
|
||||
if (!host->chan_rx)
|
||||
goto erqrx;
|
||||
|
||||
cfg.slave_id = pdata->slave_id_rx;
|
||||
cfg.direction = DMA_DEV_TO_MEM;
|
||||
cfg.dst_addr = 0;
|
||||
cfg.src_addr = res->start + MMCIF_CE_DATA;
|
||||
ret = dmaengine_slave_config(host->chan_rx, &cfg);
|
||||
if (ret < 0)
|
||||
goto ecfgrx;
|
||||
|
||||
init_completion(&host->dma_complete);
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
ecfgrx:
|
||||
dma_release_channel(host->chan_rx);
|
||||
host->chan_rx = NULL;
|
||||
erqrx:
|
||||
ecfgtx:
|
||||
dma_release_channel(host->chan_tx);
|
||||
host->chan_tx = NULL;
|
||||
}
|
||||
|
||||
static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
|
||||
|
|
|
@ -169,10 +169,10 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
|
|||
mmc_data->get_cd = sh_mobile_sdhi_get_cd;
|
||||
|
||||
if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
|
||||
priv->param_tx.slave_id = p->dma_slave_tx;
|
||||
priv->param_rx.slave_id = p->dma_slave_rx;
|
||||
priv->dma_priv.chan_priv_tx = &priv->param_tx;
|
||||
priv->dma_priv.chan_priv_rx = &priv->param_rx;
|
||||
priv->param_tx.shdma_slave.slave_id = p->dma_slave_tx;
|
||||
priv->param_rx.shdma_slave.slave_id = p->dma_slave_rx;
|
||||
priv->dma_priv.chan_priv_tx = &priv->param_tx.shdma_slave;
|
||||
priv->dma_priv.chan_priv_rx = &priv->param_rx.shdma_slave;
|
||||
priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
|
||||
mmc_data->dma = &priv->dma_priv;
|
||||
}
|
||||
|
|
|
@ -1615,9 +1615,9 @@ static bool filter(struct dma_chan *chan, void *slave)
|
|||
struct sh_dmae_slave *param = slave;
|
||||
|
||||
dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
|
||||
param->slave_id);
|
||||
param->shdma_slave.slave_id);
|
||||
|
||||
chan->private = param;
|
||||
chan->private = ¶m->shdma_slave;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1656,7 +1656,7 @@ static void sci_request_dma(struct uart_port *port)
|
|||
param = &s->param_tx;
|
||||
|
||||
/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
|
||||
param->slave_id = s->cfg->dma_slave_tx;
|
||||
param->shdma_slave.slave_id = s->cfg->dma_slave_tx;
|
||||
|
||||
s->cookie_tx = -EINVAL;
|
||||
chan = dma_request_channel(mask, filter, param);
|
||||
|
@ -1684,7 +1684,7 @@ static void sci_request_dma(struct uart_port *port)
|
|||
param = &s->param_rx;
|
||||
|
||||
/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
|
||||
param->slave_id = s->cfg->dma_slave_rx;
|
||||
param->shdma_slave.slave_id = s->cfg->dma_slave_rx;
|
||||
|
||||
chan = dma_request_channel(mask, filter, param);
|
||||
dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
|
||||
|
|
|
@ -994,7 +994,7 @@ static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
|
|||
*
|
||||
* usbhs doesn't recognize id = 0 as valid DMA
|
||||
*/
|
||||
if (0 == slave->slave_id)
|
||||
if (0 == slave->shdma_slave.slave_id)
|
||||
return false;
|
||||
|
||||
chan->private = slave;
|
||||
|
@ -1173,8 +1173,8 @@ int usbhs_fifo_probe(struct usbhs_priv *priv)
|
|||
fifo->port = D0FIFO;
|
||||
fifo->sel = D0FIFOSEL;
|
||||
fifo->ctr = D0FIFOCTR;
|
||||
fifo->tx_slave.slave_id = usbhs_get_dparam(priv, d0_tx_id);
|
||||
fifo->rx_slave.slave_id = usbhs_get_dparam(priv, d0_rx_id);
|
||||
fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_tx_id);
|
||||
fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_rx_id);
|
||||
|
||||
/* D1FIFO */
|
||||
fifo = usbhsf_get_d1fifo(priv);
|
||||
|
@ -1182,8 +1182,8 @@ int usbhs_fifo_probe(struct usbhs_priv *priv)
|
|||
fifo->port = D1FIFO;
|
||||
fifo->sel = D1FIFOSEL;
|
||||
fifo->ctr = D1FIFOCTR;
|
||||
fifo->tx_slave.slave_id = usbhs_get_dparam(priv, d1_tx_id);
|
||||
fifo->rx_slave.slave_id = usbhs_get_dparam(priv, d1_rx_id);
|
||||
fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_tx_id);
|
||||
fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_rx_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -338,6 +338,9 @@ enum dma_slave_buswidth {
|
|||
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
|
||||
* with 'true' if peripheral should be flow controller. Direction will be
|
||||
* selected at Runtime.
|
||||
* @slave_id: Slave requester id. Only valid for slave channels. The dma
|
||||
* slave peripheral will have unique id as dma requester which need to be
|
||||
* pass as slave config.
|
||||
*
|
||||
* This struct is passed in as configuration data to a DMA engine
|
||||
* in order to set up a certain channel for DMA transport at runtime.
|
||||
|
@ -365,6 +368,7 @@ struct dma_slave_config {
|
|||
u32 src_maxburst;
|
||||
u32 dst_maxburst;
|
||||
bool device_fc;
|
||||
unsigned int slave_id;
|
||||
};
|
||||
|
||||
static inline const char *dma_chan_name(struct dma_chan *chan)
|
||||
|
|
|
@ -32,17 +32,11 @@
|
|||
* 1111 : Peripheral clock (sup_pclk set '1')
|
||||
*/
|
||||
|
||||
struct sh_mmcif_dma {
|
||||
struct sh_dmae_slave chan_priv_tx;
|
||||
struct sh_dmae_slave chan_priv_rx;
|
||||
};
|
||||
|
||||
struct sh_mmcif_plat_data {
|
||||
void (*set_pwr)(struct platform_device *pdev, int state);
|
||||
void (*down_pwr)(struct platform_device *pdev);
|
||||
int (*get_cd)(struct platform_device *pdef);
|
||||
struct sh_mmcif_dma *dma; /* Deprecated. Instead */
|
||||
unsigned int slave_id_tx; /* use embedded slave_id_[tr]x */
|
||||
unsigned int slave_id_tx; /* embedded slave_id_[tr]x */
|
||||
unsigned int slave_id_rx;
|
||||
bool use_cd_gpio : 1;
|
||||
unsigned int cd_gpio;
|
||||
|
|
|
@ -10,35 +10,24 @@
|
|||
#ifndef SH_DMA_H
|
||||
#define SH_DMA_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/shdma-base.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct device;
|
||||
|
||||
/* Used by slave DMA clients to request DMA to/from a specific peripheral */
|
||||
struct sh_dmae_slave {
|
||||
unsigned int slave_id; /* Set by the platform */
|
||||
struct device *dma_dev; /* Set by the platform */
|
||||
const struct sh_dmae_slave_config *config; /* Set by the driver */
|
||||
};
|
||||
|
||||
struct sh_dmae_regs {
|
||||
u32 sar; /* SAR / source address */
|
||||
u32 dar; /* DAR / destination address */
|
||||
u32 tcr; /* TCR / transfer count */
|
||||
};
|
||||
|
||||
struct sh_desc {
|
||||
struct sh_dmae_regs hw;
|
||||
struct list_head node;
|
||||
struct dma_async_tx_descriptor async_tx;
|
||||
enum dma_transfer_direction direction;
|
||||
dma_cookie_t cookie;
|
||||
size_t partial;
|
||||
int chunks;
|
||||
int mark;
|
||||
struct shdma_slave shdma_slave; /* Set by the platform */
|
||||
};
|
||||
|
||||
/*
|
||||
* Supplied by platforms to specify, how a DMA channel has to be configured for
|
||||
* a certain peripheral
|
||||
*/
|
||||
struct sh_dmae_slave_config {
|
||||
unsigned int slave_id;
|
||||
int slave_id;
|
||||
dma_addr_t addr;
|
||||
u32 chcr;
|
||||
char mid_rid;
|
||||
|
@ -110,4 +99,6 @@ struct sh_dmae_pdata {
|
|||
#define CHCR_TE 0x00000002
|
||||
#define CHCR_IE 0x00000004
|
||||
|
||||
bool shdma_chan_filter(struct dma_chan *chan, void *arg);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,124 @@
|
|||
/*
|
||||
* Dmaengine driver base library for DMA controllers, found on SH-based SoCs
|
||||
*
|
||||
* extracted from shdma.c and headers
|
||||
*
|
||||
* Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
||||
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
|
||||
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
|
||||
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef SHDMA_BASE_H
|
||||
#define SHDMA_BASE_H
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/**
|
||||
* shdma_pm_state - DMA channel PM state
|
||||
* SHDMA_PM_ESTABLISHED: either idle or during data transfer
|
||||
* SHDMA_PM_BUSY: during the transfer preparation, when we have to
|
||||
* drop the lock temporarily
|
||||
* SHDMA_PM_PENDING: transfers pending
|
||||
*/
|
||||
enum shdma_pm_state {
|
||||
SHDMA_PM_ESTABLISHED,
|
||||
SHDMA_PM_BUSY,
|
||||
SHDMA_PM_PENDING,
|
||||
};
|
||||
|
||||
struct device;
|
||||
|
||||
/*
|
||||
* Drivers, using this library are expected to embed struct shdma_dev,
|
||||
* struct shdma_chan, struct shdma_desc, and struct shdma_slave
|
||||
* in their respective device, channel, descriptor and slave objects.
|
||||
*/
|
||||
|
||||
struct shdma_slave {
|
||||
int slave_id;
|
||||
};
|
||||
|
||||
struct shdma_desc {
|
||||
struct list_head node;
|
||||
struct dma_async_tx_descriptor async_tx;
|
||||
enum dma_transfer_direction direction;
|
||||
dma_cookie_t cookie;
|
||||
int chunks;
|
||||
int mark;
|
||||
};
|
||||
|
||||
struct shdma_chan {
|
||||
spinlock_t chan_lock; /* Channel operation lock */
|
||||
struct list_head ld_queue; /* Link descriptors queue */
|
||||
struct list_head ld_free; /* Free link descriptors */
|
||||
struct dma_chan dma_chan; /* DMA channel */
|
||||
struct device *dev; /* Channel device */
|
||||
void *desc; /* buffer for descriptor array */
|
||||
int desc_num; /* desc count */
|
||||
size_t max_xfer_len; /* max transfer length */
|
||||
int id; /* Raw id of this channel */
|
||||
int irq; /* Channel IRQ */
|
||||
int slave_id; /* Client ID for slave DMA */
|
||||
enum shdma_pm_state pm_state;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct shdma_ops - simple DMA driver operations
|
||||
* desc_completed: return true, if this is the descriptor, that just has
|
||||
* completed (atomic)
|
||||
* halt_channel: stop DMA channel operation (atomic)
|
||||
* channel_busy: return true, if the channel is busy (atomic)
|
||||
* slave_addr: return slave DMA address
|
||||
* desc_setup: set up the hardware specific descriptor portion (atomic)
|
||||
* set_slave: bind channel to a slave
|
||||
* setup_xfer: configure channel hardware for operation (atomic)
|
||||
* start_xfer: start the DMA transfer (atomic)
|
||||
* embedded_desc: return Nth struct shdma_desc pointer from the
|
||||
* descriptor array
|
||||
* chan_irq: process channel IRQ, return true if a transfer has
|
||||
* completed (atomic)
|
||||
*/
|
||||
struct shdma_ops {
|
||||
bool (*desc_completed)(struct shdma_chan *, struct shdma_desc *);
|
||||
void (*halt_channel)(struct shdma_chan *);
|
||||
bool (*channel_busy)(struct shdma_chan *);
|
||||
dma_addr_t (*slave_addr)(struct shdma_chan *);
|
||||
int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
|
||||
dma_addr_t, dma_addr_t, size_t *);
|
||||
int (*set_slave)(struct shdma_chan *, int, bool);
|
||||
void (*setup_xfer)(struct shdma_chan *, int);
|
||||
void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
|
||||
struct shdma_desc *(*embedded_desc)(void *, int);
|
||||
bool (*chan_irq)(struct shdma_chan *, int);
|
||||
};
|
||||
|
||||
struct shdma_dev {
|
||||
struct dma_device dma_dev;
|
||||
struct shdma_chan **schan;
|
||||
const struct shdma_ops *ops;
|
||||
size_t desc_size;
|
||||
};
|
||||
|
||||
#define shdma_for_each_chan(c, d, i) for (i = 0, c = (d)->schan[0]; \
|
||||
i < (d)->dma_dev.chancnt; c = (d)->schan[++i])
|
||||
|
||||
int shdma_request_irq(struct shdma_chan *, int,
|
||||
unsigned long, const char *);
|
||||
void shdma_free_irq(struct shdma_chan *);
|
||||
bool shdma_reset(struct shdma_dev *sdev);
|
||||
void shdma_chan_probe(struct shdma_dev *sdev,
|
||||
struct shdma_chan *schan, int id);
|
||||
void shdma_chan_remove(struct shdma_chan *schan);
|
||||
int shdma_init(struct device *dev, struct shdma_dev *sdev,
|
||||
int chan_num);
|
||||
void shdma_cleanup(struct shdma_dev *sdev);
|
||||
|
||||
#endif
|
|
@ -1631,7 +1631,7 @@ static void fsi_handler_init(struct fsi_priv *fsi)
|
|||
fsi->capture.priv = fsi;
|
||||
|
||||
if (fsi->info->tx_id) {
|
||||
fsi->playback.slave.slave_id = fsi->info->tx_id;
|
||||
fsi->playback.slave.shdma_slave.slave_id = fsi->info->tx_id;
|
||||
fsi->playback.handler = &fsi_dma_push_handler;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -330,12 +330,9 @@ static bool filter(struct dma_chan *chan, void *slave)
|
|||
{
|
||||
struct sh_dmae_slave *param = slave;
|
||||
|
||||
pr_debug("%s: slave ID %d\n", __func__, param->slave_id);
|
||||
pr_debug("%s: slave ID %d\n", __func__, param->shdma_slave.slave_id);
|
||||
|
||||
if (unlikely(param->dma_dev != chan->device->dev))
|
||||
return false;
|
||||
|
||||
chan->private = param;
|
||||
chan->private = ¶m->shdma_slave;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -360,16 +357,15 @@ static int siu_pcm_open(struct snd_pcm_substream *ss)
|
|||
if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) {
|
||||
siu_stream = &port_info->playback;
|
||||
param = &siu_stream->param;
|
||||
param->slave_id = port ? pdata->dma_slave_tx_b :
|
||||
param->shdma_slave.slave_id = port ? pdata->dma_slave_tx_b :
|
||||
pdata->dma_slave_tx_a;
|
||||
} else {
|
||||
siu_stream = &port_info->capture;
|
||||
param = &siu_stream->param;
|
||||
param->slave_id = port ? pdata->dma_slave_rx_b :
|
||||
param->shdma_slave.slave_id = port ? pdata->dma_slave_rx_b :
|
||||
pdata->dma_slave_rx_a;
|
||||
}
|
||||
|
||||
param->dma_dev = pdata->dma_dev;
|
||||
/* Get DMA channel */
|
||||
siu_stream->chan = dma_request_channel(mask, filter, param);
|
||||
if (!siu_stream->chan) {
|
||||
|
|
Loading…
Reference in New Issue