Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (53 commits) ARM: mach-shmobile: specify CHCLR registers on SH7372 dma: shdma: fix runtime PM: clear channel buffers on reset dma/imx-sdma: save irq flags when use spin_lock in sdma_tx_submit dmaengine/ste_dma40: clear LNK on channel startup dmaengine: intel_mid_dma: remove legacy pm interface ASoC: mxs: correct 'direction' of device_prep_dma_cyclic dmaengine: intel_mid_dma: error path fix dmaengine: intel_mid_dma: locking and freeing fixes mtd: gpmi-nand: move to dma_transfer_direction mtd: fix compile error for gpmi-nand mmc: mxs-mmc: fix the dma_transfer_direction migration dmaengine: add DMA_TRANS_NONE to dma_transfer_direction dma: mxs-dma: Don't use CLKGATE bits in CTRL0 to disable DMA channels dma: mxs-dma: make mxs_dma_prep_slave_sg() multi user safe dma: mxs-dma: Always leave mxs_dma_init() with the clock disabled. dma: mxs-dma: fix a typo in comment DMA: PL330: Remove pm_runtime_xxx calls from pl330 probe/remove video i.MX IPU: Fix display connections i.MX IPU DMA: Fix wrong burstsize settings dmaengine/ste_dma40: allow fixed physical channel ... Fix up conflicts in drivers/dma/{Kconfig,mxs-dma.c,pl330.c} The conflicts looked pretty trivial, but I'll ask people to verify them.
This commit is contained in:
commit
57f2685c16
|
@ -0,0 +1,14 @@
|
|||
* Atmel Direct Memory Access Controller (DMA)
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "atmel,<chip>-dma"
|
||||
- reg: Should contain DMA registers location and length
|
||||
- interrupts: Should contain DMA interrupt
|
||||
|
||||
Examples:
|
||||
|
||||
dma@ffffec00 {
|
||||
compatible = "atmel,at91sam9g45-dma";
|
||||
reg = <0xffffec00 0x200>;
|
||||
interrupts = <21>;
|
||||
};
|
|
@ -75,6 +75,10 @@ The slave DMA usage consists of following steps:
|
|||
slave_sg - DMA a list of scatter gather buffers from/to a peripheral
|
||||
dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the
|
||||
operation is explicitly stopped.
|
||||
interleaved_dma - This is common to Slave as well as M2M clients. For slave
|
||||
address of devices' fifo could be already known to the driver.
|
||||
Various types of operations could be expressed by setting
|
||||
appropriate values to the 'dma_interleaved_template' members.
|
||||
|
||||
A non-NULL return of this transfer API represents a "descriptor" for
|
||||
the given transaction.
|
||||
|
@ -89,6 +93,10 @@ The slave DMA usage consists of following steps:
|
|||
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_data_direction direction);
|
||||
|
||||
struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
|
||||
struct dma_chan *chan, struct dma_interleaved_template *xt,
|
||||
unsigned long flags);
|
||||
|
||||
The peripheral driver is expected to have mapped the scatterlist for
|
||||
the DMA operation prior to calling device_prep_slave_sg, and must
|
||||
keep the scatterlist mapped until the DMA operation has completed.
|
||||
|
|
|
@ -745,6 +745,7 @@ M: Barry Song <baohua.song@csr.com>
|
|||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/mach-prima2/
|
||||
F: drivers/dma/sirf-dma*
|
||||
|
||||
ARM/EBSA110 MACHINE SUPPORT
|
||||
M: Russell King <linux@arm.linux.org.uk>
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
*/
|
||||
struct ep93xx_dma_data {
|
||||
int port;
|
||||
enum dma_data_direction direction;
|
||||
enum dma_transfer_direction direction;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
|
@ -80,14 +80,14 @@ static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
|
|||
* channel supports given DMA direction. Only M2P channels have such
|
||||
* limitation, for M2M channels the direction is configurable.
|
||||
*/
|
||||
static inline enum dma_data_direction
|
||||
static inline enum dma_transfer_direction
|
||||
ep93xx_dma_chan_direction(struct dma_chan *chan)
|
||||
{
|
||||
if (!ep93xx_dma_chan_is_m2p(chan))
|
||||
return DMA_NONE;
|
||||
|
||||
/* even channels are for TX, odd for RX */
|
||||
return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
||||
return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
|
||||
}
|
||||
|
||||
#endif /* __ASM_ARCH_DMA_H */
|
||||
|
|
|
@ -445,31 +445,39 @@ static const struct sh_dmae_slave_config sh7372_dmae_slaves[] = {
|
|||
},
|
||||
};
|
||||
|
||||
#define SH7372_CHCLR 0x220
|
||||
|
||||
static const struct sh_dmae_channel sh7372_dmae_channels[] = {
|
||||
{
|
||||
.offset = 0,
|
||||
.dmars = 0,
|
||||
.dmars_bit = 0,
|
||||
.chclr_offset = SH7372_CHCLR + 0,
|
||||
}, {
|
||||
.offset = 0x10,
|
||||
.dmars = 0,
|
||||
.dmars_bit = 8,
|
||||
.chclr_offset = SH7372_CHCLR + 0x10,
|
||||
}, {
|
||||
.offset = 0x20,
|
||||
.dmars = 4,
|
||||
.dmars_bit = 0,
|
||||
.chclr_offset = SH7372_CHCLR + 0x20,
|
||||
}, {
|
||||
.offset = 0x30,
|
||||
.dmars = 4,
|
||||
.dmars_bit = 8,
|
||||
.chclr_offset = SH7372_CHCLR + 0x30,
|
||||
}, {
|
||||
.offset = 0x50,
|
||||
.dmars = 8,
|
||||
.dmars_bit = 0,
|
||||
.chclr_offset = SH7372_CHCLR + 0x50,
|
||||
}, {
|
||||
.offset = 0x60,
|
||||
.dmars = 8,
|
||||
.dmars_bit = 8,
|
||||
.chclr_offset = SH7372_CHCLR + 0x60,
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -487,6 +495,7 @@ static struct sh_dmae_pdata dma_platform_data = {
|
|||
.ts_shift = ts_shift,
|
||||
.ts_shift_num = ARRAY_SIZE(ts_shift),
|
||||
.dmaor_init = DMAOR_DME,
|
||||
.chclr_present = 1,
|
||||
};
|
||||
|
||||
/* Resource order important! */
|
||||
|
@ -494,7 +503,7 @@ static struct resource sh7372_dmae0_resources[] = {
|
|||
{
|
||||
/* Channel registers and DMAOR */
|
||||
.start = 0xfe008020,
|
||||
.end = 0xfe00808f,
|
||||
.end = 0xfe00828f,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
{
|
||||
|
@ -522,7 +531,7 @@ static struct resource sh7372_dmae1_resources[] = {
|
|||
{
|
||||
/* Channel registers and DMAOR */
|
||||
.start = 0xfe018020,
|
||||
.end = 0xfe01808f,
|
||||
.end = 0xfe01828f,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
{
|
||||
|
@ -550,7 +559,7 @@ static struct resource sh7372_dmae2_resources[] = {
|
|||
{
|
||||
/* Channel registers and DMAOR */
|
||||
.start = 0xfe028020,
|
||||
.end = 0xfe02808f,
|
||||
.end = 0xfe02828f,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
{
|
||||
|
|
|
@ -22,6 +22,20 @@
|
|||
#define FB_SYNC_SWAP_RGB 0x04000000
|
||||
#define FB_SYNC_CLK_SEL_EN 0x02000000
|
||||
|
||||
/*
|
||||
* Specify the way your display is connected. The IPU can arbitrarily
|
||||
* map the internal colors to the external data lines. We only support
|
||||
* the following mappings at the moment.
|
||||
*/
|
||||
enum disp_data_mapping {
|
||||
/* blue -> d[0..5], green -> d[6..11], red -> d[12..17] */
|
||||
IPU_DISP_DATA_MAPPING_RGB666,
|
||||
/* blue -> d[0..4], green -> d[5..10], red -> d[11..15] */
|
||||
IPU_DISP_DATA_MAPPING_RGB565,
|
||||
/* blue -> d[0..7], green -> d[8..15], red -> d[16..23] */
|
||||
IPU_DISP_DATA_MAPPING_RGB888,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct mx3fb_platform_data - mx3fb platform data
|
||||
*
|
||||
|
@ -33,6 +47,7 @@ struct mx3fb_platform_data {
|
|||
const char *name;
|
||||
const struct fb_videomode *mode;
|
||||
int num_modes;
|
||||
enum disp_data_mapping disp_data_fmt;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -113,7 +113,8 @@ struct stedma40_half_channel_info {
|
|||
* @dst_dev_type: Dst device type
|
||||
* @src_info: Parameters for dst half channel
|
||||
* @dst_info: Parameters for dst half channel
|
||||
*
|
||||
* @use_fixed_channel: if true, use physical channel specified by phy_channel
|
||||
* @phy_channel: physical channel to use, only if use_fixed_channel is true
|
||||
*
|
||||
* This structure has to be filled by the client drivers.
|
||||
* It is recommended to do all dma configurations for clients in the machine.
|
||||
|
@ -129,6 +130,9 @@ struct stedma40_chan_cfg {
|
|||
int dst_dev_type;
|
||||
struct stedma40_half_channel_info src_info;
|
||||
struct stedma40_half_channel_info dst_info;
|
||||
|
||||
bool use_fixed_channel;
|
||||
int phy_channel;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -153,6 +157,7 @@ struct stedma40_platform_data {
|
|||
struct stedma40_chan_cfg *memcpy_conf_phy;
|
||||
struct stedma40_chan_cfg *memcpy_conf_log;
|
||||
int disabled_channels[STEDMA40_MAX_PHYS];
|
||||
bool use_esram_lcla;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_STE_DMA40
|
||||
|
@ -187,7 +192,7 @@ static inline struct
|
|||
dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
|
||||
dma_addr_t addr,
|
||||
unsigned int size,
|
||||
enum dma_data_direction direction,
|
||||
enum dma_transfer_direction direction,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct scatterlist sg;
|
||||
|
@ -209,7 +214,7 @@ static inline struct
|
|||
dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
|
||||
dma_addr_t addr,
|
||||
unsigned int size,
|
||||
enum dma_data_direction direction,
|
||||
enum dma_transfer_direction direction,
|
||||
unsigned long flags)
|
||||
{
|
||||
return NULL;
|
||||
|
|
|
@ -37,14 +37,14 @@ static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
|
|||
(void *)dma_ch;
|
||||
chan = dma_request_channel(mask, pl330_filter, filter_param);
|
||||
|
||||
if (info->direction == DMA_FROM_DEVICE) {
|
||||
if (info->direction == DMA_DEV_TO_MEM) {
|
||||
memset(&slave_config, 0, sizeof(struct dma_slave_config));
|
||||
slave_config.direction = info->direction;
|
||||
slave_config.src_addr = info->fifo;
|
||||
slave_config.src_addr_width = info->width;
|
||||
slave_config.src_maxburst = 1;
|
||||
dmaengine_slave_config(chan, &slave_config);
|
||||
} else if (info->direction == DMA_TO_DEVICE) {
|
||||
} else if (info->direction == DMA_MEM_TO_DEV) {
|
||||
memset(&slave_config, 0, sizeof(struct dma_slave_config));
|
||||
slave_config.direction = info->direction;
|
||||
slave_config.dst_addr = info->fifo;
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
struct samsung_dma_prep_info {
|
||||
enum dma_transaction_type cap;
|
||||
enum dma_data_direction direction;
|
||||
enum dma_transfer_direction direction;
|
||||
dma_addr_t buf;
|
||||
unsigned long period;
|
||||
unsigned long len;
|
||||
|
@ -27,7 +27,7 @@ struct samsung_dma_prep_info {
|
|||
|
||||
struct samsung_dma_info {
|
||||
enum dma_transaction_type cap;
|
||||
enum dma_data_direction direction;
|
||||
enum dma_transfer_direction direction;
|
||||
enum dma_slave_buswidth width;
|
||||
dma_addr_t fifo;
|
||||
struct s3c2410_dma_client *client;
|
||||
|
|
|
@ -124,7 +124,7 @@ config MV_XOR
|
|||
|
||||
config MX3_IPU
|
||||
bool "MX3x Image Processing Unit support"
|
||||
depends on SOC_IMX31 || SOC_IMX35
|
||||
depends on ARCH_MXC
|
||||
select DMA_ENGINE
|
||||
default y
|
||||
help
|
||||
|
@ -187,6 +187,13 @@ config TIMB_DMA
|
|||
help
|
||||
Enable support for the Timberdale FPGA DMA engine.
|
||||
|
||||
config SIRF_DMA
|
||||
tristate "CSR SiRFprimaII DMA support"
|
||||
depends on ARCH_PRIMA2
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Enable support for the CSR SiRFprimaII DMA engine.
|
||||
|
||||
config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
|
||||
bool
|
||||
|
||||
|
@ -201,26 +208,26 @@ config PL330_DMA
|
|||
platform_data for a dma-pl330 device.
|
||||
|
||||
config PCH_DMA
|
||||
tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
|
||||
tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
|
||||
depends on PCI && X86
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Enable support for Intel EG20T PCH DMA engine.
|
||||
|
||||
This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
|
||||
Output Hub), ML7213 and ML7223.
|
||||
ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
|
||||
for MP(Media Phone) use.
|
||||
ML7213/ML7223 is companion chip for Intel Atom E6xx series.
|
||||
ML7213/ML7223 is completely compatible for Intel EG20T PCH.
|
||||
This driver also can be used for LAPIS Semiconductor IOH(Input/
|
||||
Output Hub), ML7213, ML7223 and ML7831.
|
||||
ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
|
||||
for MP(Media Phone) use and ML7831 IOH is for general purpose use.
|
||||
ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
|
||||
ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
|
||||
|
||||
config IMX_SDMA
|
||||
tristate "i.MX SDMA support"
|
||||
depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5
|
||||
depends on ARCH_MXC
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Support the i.MX SDMA engine. This engine is integrated into
|
||||
Freescale i.MX25/31/35/51 chips.
|
||||
Freescale i.MX25/31/35/51/53 chips.
|
||||
|
||||
config IMX_DMA
|
||||
tristate "i.MX DMA support"
|
||||
|
|
|
@ -21,6 +21,7 @@ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
|
|||
obj-$(CONFIG_IMX_DMA) += imx-dma.o
|
||||
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
|
||||
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
|
||||
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
|
||||
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
|
||||
obj-$(CONFIG_PL330_DMA) += pl330.o
|
||||
obj-$(CONFIG_PCH_DMA) += pch_dma.o
|
||||
|
|
|
@ -854,8 +854,10 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
|
|||
int ret;
|
||||
|
||||
/* Check if we already have a channel */
|
||||
if (plchan->phychan)
|
||||
return 0;
|
||||
if (plchan->phychan) {
|
||||
ch = plchan->phychan;
|
||||
goto got_channel;
|
||||
}
|
||||
|
||||
ch = pl08x_get_phy_channel(pl08x, plchan);
|
||||
if (!ch) {
|
||||
|
@ -880,21 +882,22 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
|
|||
return -EBUSY;
|
||||
}
|
||||
ch->signal = ret;
|
||||
|
||||
/* Assign the flow control signal to this channel */
|
||||
if (txd->direction == DMA_TO_DEVICE)
|
||||
txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
|
||||
else if (txd->direction == DMA_FROM_DEVICE)
|
||||
txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
|
||||
}
|
||||
|
||||
plchan->phychan = ch;
|
||||
dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
|
||||
ch->id,
|
||||
ch->signal,
|
||||
plchan->name);
|
||||
|
||||
got_channel:
|
||||
/* Assign the flow control signal to this channel */
|
||||
if (txd->direction == DMA_MEM_TO_DEV)
|
||||
txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
|
||||
else if (txd->direction == DMA_DEV_TO_MEM)
|
||||
txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
|
||||
|
||||
plchan->phychan_hold++;
|
||||
plchan->phychan = ch;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1102,10 +1105,10 @@ static int dma_set_runtime_config(struct dma_chan *chan,
|
|||
|
||||
/* Transfer direction */
|
||||
plchan->runtime_direction = config->direction;
|
||||
if (config->direction == DMA_TO_DEVICE) {
|
||||
if (config->direction == DMA_MEM_TO_DEV) {
|
||||
addr_width = config->dst_addr_width;
|
||||
maxburst = config->dst_maxburst;
|
||||
} else if (config->direction == DMA_FROM_DEVICE) {
|
||||
} else if (config->direction == DMA_DEV_TO_MEM) {
|
||||
addr_width = config->src_addr_width;
|
||||
maxburst = config->src_maxburst;
|
||||
} else {
|
||||
|
@ -1136,7 +1139,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
|
|||
cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
|
||||
cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
|
||||
|
||||
if (plchan->runtime_direction == DMA_FROM_DEVICE) {
|
||||
if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
|
||||
plchan->src_addr = config->src_addr;
|
||||
plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
|
||||
pl08x_select_bus(plchan->cd->periph_buses,
|
||||
|
@ -1152,7 +1155,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
|
|||
"configured channel %s (%s) for %s, data width %d, "
|
||||
"maxburst %d words, LE, CCTL=0x%08x\n",
|
||||
dma_chan_name(chan), plchan->name,
|
||||
(config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
|
||||
(config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
|
||||
addr_width,
|
||||
maxburst,
|
||||
cctl);
|
||||
|
@ -1322,7 +1325,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
|
|||
|
||||
static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_data_direction direction,
|
||||
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
|
||||
|
@ -1354,10 +1357,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
|
|||
*/
|
||||
txd->direction = direction;
|
||||
|
||||
if (direction == DMA_TO_DEVICE) {
|
||||
if (direction == DMA_MEM_TO_DEV) {
|
||||
txd->cctl = plchan->dst_cctl;
|
||||
slave_addr = plchan->dst_addr;
|
||||
} else if (direction == DMA_FROM_DEVICE) {
|
||||
} else if (direction == DMA_DEV_TO_MEM) {
|
||||
txd->cctl = plchan->src_cctl;
|
||||
slave_addr = plchan->src_addr;
|
||||
} else {
|
||||
|
@ -1368,10 +1371,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
|
|||
}
|
||||
|
||||
if (plchan->cd->device_fc)
|
||||
tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
|
||||
tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
|
||||
PL080_FLOW_PER2MEM_PER;
|
||||
else
|
||||
tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
|
||||
tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
|
||||
PL080_FLOW_PER2MEM;
|
||||
|
||||
txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
|
||||
|
@ -1387,7 +1390,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
|
|||
list_add_tail(&dsg->node, &txd->dsg_list);
|
||||
|
||||
dsg->len = sg_dma_len(sg);
|
||||
if (direction == DMA_TO_DEVICE) {
|
||||
if (direction == DMA_MEM_TO_DEV) {
|
||||
dsg->src_addr = sg_phys(sg);
|
||||
dsg->dst_addr = slave_addr;
|
||||
} else {
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
|
||||
#include "at_hdmac_regs.h"
|
||||
|
||||
|
@ -660,7 +662,7 @@ err_desc_get:
|
|||
*/
|
||||
static struct dma_async_tx_descriptor *
|
||||
atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_data_direction direction,
|
||||
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct at_dma_chan *atchan = to_at_dma_chan(chan);
|
||||
|
@ -678,7 +680,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
|
||||
dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
|
||||
sg_len,
|
||||
direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
|
||||
direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
|
||||
flags);
|
||||
|
||||
if (unlikely(!atslave || !sg_len)) {
|
||||
|
@ -692,7 +694,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
ctrlb = ATC_IEN;
|
||||
|
||||
switch (direction) {
|
||||
case DMA_TO_DEVICE:
|
||||
case DMA_MEM_TO_DEV:
|
||||
ctrla |= ATC_DST_WIDTH(reg_width);
|
||||
ctrlb |= ATC_DST_ADDR_MODE_FIXED
|
||||
| ATC_SRC_ADDR_MODE_INCR
|
||||
|
@ -725,7 +727,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
total_len += len;
|
||||
}
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
case DMA_DEV_TO_MEM:
|
||||
ctrla |= ATC_SRC_WIDTH(reg_width);
|
||||
ctrlb |= ATC_DST_ADDR_MODE_INCR
|
||||
| ATC_SRC_ADDR_MODE_FIXED
|
||||
|
@ -787,7 +789,7 @@ err_desc_get:
|
|||
*/
|
||||
static int
|
||||
atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
|
||||
size_t period_len, enum dma_data_direction direction)
|
||||
size_t period_len, enum dma_transfer_direction direction)
|
||||
{
|
||||
if (period_len > (ATC_BTSIZE_MAX << reg_width))
|
||||
goto err_out;
|
||||
|
@ -795,7 +797,7 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
|
|||
goto err_out;
|
||||
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
|
||||
goto err_out;
|
||||
if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
|
||||
if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
|
||||
goto err_out;
|
||||
|
||||
return 0;
|
||||
|
@ -810,7 +812,7 @@ err_out:
|
|||
static int
|
||||
atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
|
||||
unsigned int period_index, dma_addr_t buf_addr,
|
||||
size_t period_len, enum dma_data_direction direction)
|
||||
size_t period_len, enum dma_transfer_direction direction)
|
||||
{
|
||||
u32 ctrla;
|
||||
unsigned int reg_width = atslave->reg_width;
|
||||
|
@ -822,7 +824,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
|
|||
| period_len >> reg_width;
|
||||
|
||||
switch (direction) {
|
||||
case DMA_TO_DEVICE:
|
||||
case DMA_MEM_TO_DEV:
|
||||
desc->lli.saddr = buf_addr + (period_len * period_index);
|
||||
desc->lli.daddr = atslave->tx_reg;
|
||||
desc->lli.ctrla = ctrla;
|
||||
|
@ -833,7 +835,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
|
|||
| ATC_DIF(AT_DMA_PER_IF);
|
||||
break;
|
||||
|
||||
case DMA_FROM_DEVICE:
|
||||
case DMA_DEV_TO_MEM:
|
||||
desc->lli.saddr = atslave->rx_reg;
|
||||
desc->lli.daddr = buf_addr + (period_len * period_index);
|
||||
desc->lli.ctrla = ctrla;
|
||||
|
@ -861,7 +863,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
|
|||
*/
|
||||
static struct dma_async_tx_descriptor *
|
||||
atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_data_direction direction)
|
||||
size_t period_len, enum dma_transfer_direction direction)
|
||||
{
|
||||
struct at_dma_chan *atchan = to_at_dma_chan(chan);
|
||||
struct at_dma_slave *atslave = chan->private;
|
||||
|
@ -872,7 +874,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
|||
unsigned int i;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
|
||||
direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
|
||||
direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
|
||||
buf_addr,
|
||||
periods, buf_len, period_len);
|
||||
|
||||
|
@ -1175,6 +1177,56 @@ static void atc_free_chan_resources(struct dma_chan *chan)
|
|||
|
||||
/*-- Module Management -----------------------------------------------*/
|
||||
|
||||
/* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
|
||||
static struct at_dma_platform_data at91sam9rl_config = {
|
||||
.nr_channels = 2,
|
||||
};
|
||||
static struct at_dma_platform_data at91sam9g45_config = {
|
||||
.nr_channels = 8,
|
||||
};
|
||||
|
||||
#if defined(CONFIG_OF)
|
||||
static const struct of_device_id atmel_dma_dt_ids[] = {
|
||||
{
|
||||
.compatible = "atmel,at91sam9rl-dma",
|
||||
.data = &at91sam9rl_config,
|
||||
}, {
|
||||
.compatible = "atmel,at91sam9g45-dma",
|
||||
.data = &at91sam9g45_config,
|
||||
}, {
|
||||
/* sentinel */
|
||||
}
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
|
||||
#endif
|
||||
|
||||
static const struct platform_device_id atdma_devtypes[] = {
|
||||
{
|
||||
.name = "at91sam9rl_dma",
|
||||
.driver_data = (unsigned long) &at91sam9rl_config,
|
||||
}, {
|
||||
.name = "at91sam9g45_dma",
|
||||
.driver_data = (unsigned long) &at91sam9g45_config,
|
||||
}, {
|
||||
/* sentinel */
|
||||
}
|
||||
};
|
||||
|
||||
static inline struct at_dma_platform_data * __init at_dma_get_driver_data(
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
if (pdev->dev.of_node) {
|
||||
const struct of_device_id *match;
|
||||
match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
|
||||
if (match == NULL)
|
||||
return NULL;
|
||||
return match->data;
|
||||
}
|
||||
return (struct at_dma_platform_data *)
|
||||
platform_get_device_id(pdev)->driver_data;
|
||||
}
|
||||
|
||||
/**
|
||||
* at_dma_off - disable DMA controller
|
||||
* @atdma: the Atmel HDAMC device
|
||||
|
@ -1193,18 +1245,23 @@ static void at_dma_off(struct at_dma *atdma)
|
|||
|
||||
static int __init at_dma_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct at_dma_platform_data *pdata;
|
||||
struct resource *io;
|
||||
struct at_dma *atdma;
|
||||
size_t size;
|
||||
int irq;
|
||||
int err;
|
||||
int i;
|
||||
struct at_dma_platform_data *plat_dat;
|
||||
|
||||
/* get DMA Controller parameters from platform */
|
||||
pdata = pdev->dev.platform_data;
|
||||
if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
|
||||
return -EINVAL;
|
||||
/* setup platform data for each SoC */
|
||||
dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
|
||||
dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
|
||||
dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
|
||||
|
||||
/* get DMA parameters from controller type */
|
||||
plat_dat = at_dma_get_driver_data(pdev);
|
||||
if (!plat_dat)
|
||||
return -ENODEV;
|
||||
|
||||
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!io)
|
||||
|
@ -1215,14 +1272,14 @@ static int __init at_dma_probe(struct platform_device *pdev)
|
|||
return irq;
|
||||
|
||||
size = sizeof(struct at_dma);
|
||||
size += pdata->nr_channels * sizeof(struct at_dma_chan);
|
||||
size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
|
||||
atdma = kzalloc(size, GFP_KERNEL);
|
||||
if (!atdma)
|
||||
return -ENOMEM;
|
||||
|
||||
/* discover transaction capabilites from the platform data */
|
||||
atdma->dma_common.cap_mask = pdata->cap_mask;
|
||||
atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
|
||||
/* discover transaction capabilities */
|
||||
atdma->dma_common.cap_mask = plat_dat->cap_mask;
|
||||
atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
|
||||
|
||||
size = resource_size(io);
|
||||
if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
|
||||
|
@ -1268,7 +1325,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
|
|||
|
||||
/* initialize channels related values */
|
||||
INIT_LIST_HEAD(&atdma->dma_common.channels);
|
||||
for (i = 0; i < pdata->nr_channels; i++) {
|
||||
for (i = 0; i < plat_dat->nr_channels; i++) {
|
||||
struct at_dma_chan *atchan = &atdma->chan[i];
|
||||
|
||||
atchan->chan_common.device = &atdma->dma_common;
|
||||
|
@ -1313,7 +1370,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
|
|||
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
|
||||
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
|
||||
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
|
||||
pdata->nr_channels);
|
||||
plat_dat->nr_channels);
|
||||
|
||||
dma_async_device_register(&atdma->dma_common);
|
||||
|
||||
|
@ -1495,9 +1552,11 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = {
|
|||
static struct platform_driver at_dma_driver = {
|
||||
.remove = __exit_p(at_dma_remove),
|
||||
.shutdown = at_dma_shutdown,
|
||||
.id_table = atdma_devtypes,
|
||||
.driver = {
|
||||
.name = "at_hdmac",
|
||||
.pm = &at_dma_dev_pm_ops,
|
||||
.of_match_table = of_match_ptr(atmel_dma_dt_ids),
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -251,6 +251,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
|
|||
/**
|
||||
* struct at_dma - internal representation of an Atmel HDMA Controller
|
||||
* @chan_common: common dmaengine dma_device object members
|
||||
* @atdma_devtype: identifier of DMA controller compatibility
|
||||
* @ch_regs: memory mapped register base
|
||||
* @clk: dma controller clock
|
||||
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
|
||||
|
|
|
@ -39,7 +39,7 @@ struct coh901318_desc {
|
|||
struct scatterlist *sg;
|
||||
unsigned int sg_len;
|
||||
struct coh901318_lli *lli;
|
||||
enum dma_data_direction dir;
|
||||
enum dma_transfer_direction dir;
|
||||
unsigned long flags;
|
||||
u32 head_config;
|
||||
u32 head_ctrl;
|
||||
|
@ -1034,7 +1034,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||
|
||||
static struct dma_async_tx_descriptor *
|
||||
coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_data_direction direction,
|
||||
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct coh901318_chan *cohc = to_coh901318_chan(chan);
|
||||
|
@ -1077,7 +1077,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
ctrl_last |= cohc->runtime_ctrl;
|
||||
ctrl |= cohc->runtime_ctrl;
|
||||
|
||||
if (direction == DMA_TO_DEVICE) {
|
||||
if (direction == DMA_MEM_TO_DEV) {
|
||||
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
|
||||
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
|
||||
|
||||
|
@ -1085,7 +1085,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
ctrl_chained |= tx_flags;
|
||||
ctrl_last |= tx_flags;
|
||||
ctrl |= tx_flags;
|
||||
} else if (direction == DMA_FROM_DEVICE) {
|
||||
} else if (direction == DMA_DEV_TO_MEM) {
|
||||
u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
|
||||
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
|
||||
|
||||
|
@ -1274,11 +1274,11 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
|
|||
int i = 0;
|
||||
|
||||
/* We only support mem to per or per to mem transfers */
|
||||
if (config->direction == DMA_FROM_DEVICE) {
|
||||
if (config->direction == DMA_DEV_TO_MEM) {
|
||||
addr = config->src_addr;
|
||||
addr_width = config->src_addr_width;
|
||||
maxburst = config->src_maxburst;
|
||||
} else if (config->direction == DMA_TO_DEVICE) {
|
||||
} else if (config->direction == DMA_MEM_TO_DEV) {
|
||||
addr = config->dst_addr;
|
||||
addr_width = config->dst_addr_width;
|
||||
maxburst = config->dst_maxburst;
|
||||
|
|
|
@ -7,11 +7,10 @@
|
|||
* Author: Per Friden <per.friden@stericsson.com>
|
||||
*/
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <mach/coh901318.h>
|
||||
|
||||
#include "coh901318_lli.h"
|
||||
|
@ -177,18 +176,18 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
|
|||
struct coh901318_lli *lli,
|
||||
dma_addr_t buf, unsigned int size,
|
||||
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
|
||||
enum dma_data_direction dir)
|
||||
enum dma_transfer_direction dir)
|
||||
{
|
||||
int s = size;
|
||||
dma_addr_t src;
|
||||
dma_addr_t dst;
|
||||
|
||||
|
||||
if (dir == DMA_TO_DEVICE) {
|
||||
if (dir == DMA_MEM_TO_DEV) {
|
||||
src = buf;
|
||||
dst = dev_addr;
|
||||
|
||||
} else if (dir == DMA_FROM_DEVICE) {
|
||||
} else if (dir == DMA_DEV_TO_MEM) {
|
||||
|
||||
src = dev_addr;
|
||||
dst = buf;
|
||||
|
@ -215,9 +214,9 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
|
|||
|
||||
lli = coh901318_lli_next(lli);
|
||||
|
||||
if (dir == DMA_TO_DEVICE)
|
||||
if (dir == DMA_MEM_TO_DEV)
|
||||
src += block_size;
|
||||
else if (dir == DMA_FROM_DEVICE)
|
||||
else if (dir == DMA_DEV_TO_MEM)
|
||||
dst += block_size;
|
||||
}
|
||||
|
||||
|
@ -234,7 +233,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
|
|||
struct scatterlist *sgl, unsigned int nents,
|
||||
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
|
||||
u32 ctrl_last,
|
||||
enum dma_data_direction dir, u32 ctrl_irq_mask)
|
||||
enum dma_transfer_direction dir, u32 ctrl_irq_mask)
|
||||
{
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
|
@ -249,9 +248,9 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
|
|||
|
||||
spin_lock(&pool->lock);
|
||||
|
||||
if (dir == DMA_TO_DEVICE)
|
||||
if (dir == DMA_MEM_TO_DEV)
|
||||
dst = dev_addr;
|
||||
else if (dir == DMA_FROM_DEVICE)
|
||||
else if (dir == DMA_DEV_TO_MEM)
|
||||
src = dev_addr;
|
||||
else
|
||||
goto err;
|
||||
|
@ -269,7 +268,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
|
|||
ctrl_sg = ctrl ? ctrl : ctrl_last;
|
||||
|
||||
|
||||
if (dir == DMA_TO_DEVICE)
|
||||
if (dir == DMA_MEM_TO_DEV)
|
||||
/* increment source address */
|
||||
src = sg_phys(sg);
|
||||
else
|
||||
|
@ -293,7 +292,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
|
|||
lli->src_addr = src;
|
||||
lli->dst_addr = dst;
|
||||
|
||||
if (dir == DMA_FROM_DEVICE)
|
||||
if (dir == DMA_DEV_TO_MEM)
|
||||
dst += elem_size;
|
||||
else
|
||||
src += elem_size;
|
||||
|
|
|
@ -97,7 +97,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
|
|||
struct coh901318_lli *lli,
|
||||
dma_addr_t buf, unsigned int size,
|
||||
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
|
||||
enum dma_data_direction dir);
|
||||
enum dma_transfer_direction dir);
|
||||
|
||||
/**
|
||||
* coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
|
||||
|
@ -119,6 +119,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
|
|||
struct scatterlist *sg, unsigned int nents,
|
||||
dma_addr_t dev_addr, u32 ctrl_chained,
|
||||
u32 ctrl, u32 ctrl_last,
|
||||
enum dma_data_direction dir, u32 ctrl_irq_mask);
|
||||
enum dma_transfer_direction dir, u32 ctrl_irq_mask);
|
||||
|
||||
#endif /* COH901318_LLI_H */
|
||||
|
|
|
@ -693,12 +693,12 @@ int dma_async_device_register(struct dma_device *device)
|
|||
!device->device_prep_dma_interrupt);
|
||||
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
|
||||
!device->device_prep_dma_sg);
|
||||
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
|
||||
!device->device_prep_slave_sg);
|
||||
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
|
||||
!device->device_prep_dma_cyclic);
|
||||
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
|
||||
!device->device_control);
|
||||
BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
|
||||
!device->device_prep_interleaved_dma);
|
||||
|
||||
BUG_ON(!device->device_alloc_chan_resources);
|
||||
BUG_ON(!device->device_free_chan_resources);
|
||||
|
|
|
@ -166,6 +166,38 @@ dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
|||
return cookie;
|
||||
}
|
||||
|
||||
static void dwc_initialize(struct dw_dma_chan *dwc)
|
||||
{
|
||||
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
||||
struct dw_dma_slave *dws = dwc->chan.private;
|
||||
u32 cfghi = DWC_CFGH_FIFO_MODE;
|
||||
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
|
||||
|
||||
if (dwc->initialized == true)
|
||||
return;
|
||||
|
||||
if (dws) {
|
||||
/*
|
||||
* We need controller-specific data to set up slave
|
||||
* transfers.
|
||||
*/
|
||||
BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
|
||||
|
||||
cfghi = dws->cfg_hi;
|
||||
cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
|
||||
}
|
||||
|
||||
channel_writel(dwc, CFG_LO, cfglo);
|
||||
channel_writel(dwc, CFG_HI, cfghi);
|
||||
|
||||
/* Enable interrupts */
|
||||
channel_set_bit(dw, MASK.XFER, dwc->mask);
|
||||
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
|
||||
channel_set_bit(dw, MASK.ERROR, dwc->mask);
|
||||
|
||||
dwc->initialized = true;
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------------*/
|
||||
|
||||
/* Called with dwc->lock held and bh disabled */
|
||||
|
@ -189,6 +221,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
|
|||
return;
|
||||
}
|
||||
|
||||
dwc_initialize(dwc);
|
||||
|
||||
channel_writel(dwc, LLP, first->txd.phys);
|
||||
channel_writel(dwc, CTL_LO,
|
||||
DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
|
||||
|
@ -696,7 +730,7 @@ err_desc_get:
|
|||
|
||||
static struct dma_async_tx_descriptor *
|
||||
dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_data_direction direction,
|
||||
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||
|
@ -720,7 +754,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
prev = first = NULL;
|
||||
|
||||
switch (direction) {
|
||||
case DMA_TO_DEVICE:
|
||||
case DMA_MEM_TO_DEV:
|
||||
ctllo = (DWC_DEFAULT_CTLLO(chan->private)
|
||||
| DWC_CTLL_DST_WIDTH(reg_width)
|
||||
| DWC_CTLL_DST_FIX
|
||||
|
@ -777,7 +811,7 @@ slave_sg_todev_fill_desc:
|
|||
goto slave_sg_todev_fill_desc;
|
||||
}
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
case DMA_DEV_TO_MEM:
|
||||
ctllo = (DWC_DEFAULT_CTLLO(chan->private)
|
||||
| DWC_CTLL_SRC_WIDTH(reg_width)
|
||||
| DWC_CTLL_DST_INC
|
||||
|
@ -959,10 +993,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|||
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||
struct dw_dma *dw = to_dw_dma(chan->device);
|
||||
struct dw_desc *desc;
|
||||
struct dw_dma_slave *dws;
|
||||
int i;
|
||||
u32 cfghi;
|
||||
u32 cfglo;
|
||||
unsigned long flags;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
|
||||
|
@ -975,26 +1006,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|||
|
||||
dwc->completed = chan->cookie = 1;
|
||||
|
||||
cfghi = DWC_CFGH_FIFO_MODE;
|
||||
cfglo = 0;
|
||||
|
||||
dws = chan->private;
|
||||
if (dws) {
|
||||
/*
|
||||
* We need controller-specific data to set up slave
|
||||
* transfers.
|
||||
*/
|
||||
BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
|
||||
|
||||
cfghi = dws->cfg_hi;
|
||||
cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
|
||||
}
|
||||
|
||||
cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
|
||||
|
||||
channel_writel(dwc, CFG_LO, cfglo);
|
||||
channel_writel(dwc, CFG_HI, cfghi);
|
||||
|
||||
/*
|
||||
* NOTE: some controllers may have additional features that we
|
||||
* need to initialize here, like "scatter-gather" (which
|
||||
|
@ -1026,11 +1037,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|||
i = ++dwc->descs_allocated;
|
||||
}
|
||||
|
||||
/* Enable interrupts */
|
||||
channel_set_bit(dw, MASK.XFER, dwc->mask);
|
||||
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
|
||||
channel_set_bit(dw, MASK.ERROR, dwc->mask);
|
||||
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
dev_dbg(chan2dev(chan),
|
||||
|
@ -1058,6 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
|
|||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
list_splice_init(&dwc->free_list, &list);
|
||||
dwc->descs_allocated = 0;
|
||||
dwc->initialized = false;
|
||||
|
||||
/* Disable interrupts */
|
||||
channel_clear_bit(dw, MASK.XFER, dwc->mask);
|
||||
|
@ -1165,7 +1172,7 @@ EXPORT_SYMBOL(dw_dma_cyclic_stop);
|
|||
*/
|
||||
struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
|
||||
dma_addr_t buf_addr, size_t buf_len, size_t period_len,
|
||||
enum dma_data_direction direction)
|
||||
enum dma_transfer_direction direction)
|
||||
{
|
||||
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||
struct dw_cyclic_desc *cdesc;
|
||||
|
@ -1206,7 +1213,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
|
|||
goto out_err;
|
||||
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
|
||||
goto out_err;
|
||||
if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
|
||||
if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
|
||||
goto out_err;
|
||||
|
||||
retval = ERR_PTR(-ENOMEM);
|
||||
|
@ -1228,7 +1235,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
|
|||
goto out_err_desc_get;
|
||||
|
||||
switch (direction) {
|
||||
case DMA_TO_DEVICE:
|
||||
case DMA_MEM_TO_DEV:
|
||||
desc->lli.dar = dws->tx_reg;
|
||||
desc->lli.sar = buf_addr + (period_len * i);
|
||||
desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
|
||||
|
@ -1239,7 +1246,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
|
|||
| DWC_CTLL_FC(dws->fc)
|
||||
| DWC_CTLL_INT_EN);
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
case DMA_DEV_TO_MEM:
|
||||
desc->lli.dar = buf_addr + (period_len * i);
|
||||
desc->lli.sar = dws->rx_reg;
|
||||
desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
|
||||
|
@ -1335,6 +1342,8 @@ EXPORT_SYMBOL(dw_dma_cyclic_free);
|
|||
|
||||
static void dw_dma_off(struct dw_dma *dw)
|
||||
{
|
||||
int i;
|
||||
|
||||
dma_writel(dw, CFG, 0);
|
||||
|
||||
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
|
||||
|
@ -1345,6 +1354,9 @@ static void dw_dma_off(struct dw_dma *dw)
|
|||
|
||||
while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
|
||||
cpu_relax();
|
||||
|
||||
for (i = 0; i < dw->dma.chancnt; i++)
|
||||
dw->chan[i].initialized = false;
|
||||
}
|
||||
|
||||
static int __init dw_probe(struct platform_device *pdev)
|
||||
|
@ -1533,6 +1545,7 @@ static int dw_suspend_noirq(struct device *dev)
|
|||
|
||||
dw_dma_off(platform_get_drvdata(pdev));
|
||||
clk_disable(dw->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -140,6 +140,7 @@ struct dw_dma_chan {
|
|||
u8 mask;
|
||||
u8 priority;
|
||||
bool paused;
|
||||
bool initialized;
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
|
|
|
@ -246,6 +246,9 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
|
|||
static struct ep93xx_dma_desc *
|
||||
ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
|
||||
{
|
||||
if (list_empty(&edmac->active))
|
||||
return NULL;
|
||||
|
||||
return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
|
||||
}
|
||||
|
||||
|
@ -263,16 +266,22 @@ ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
|
|||
*/
|
||||
static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
|
||||
{
|
||||
struct ep93xx_dma_desc *desc;
|
||||
|
||||
list_rotate_left(&edmac->active);
|
||||
|
||||
if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
|
||||
return true;
|
||||
|
||||
desc = ep93xx_dma_get_active(edmac);
|
||||
if (!desc)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If txd.cookie is set it means that we are back in the first
|
||||
* descriptor in the chain and hence done with it.
|
||||
*/
|
||||
return !ep93xx_dma_get_active(edmac)->txd.cookie;
|
||||
return !desc->txd.cookie;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -327,10 +336,16 @@ static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
|
|||
|
||||
static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
|
||||
{
|
||||
struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
|
||||
struct ep93xx_dma_desc *desc;
|
||||
u32 bus_addr;
|
||||
|
||||
if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE)
|
||||
desc = ep93xx_dma_get_active(edmac);
|
||||
if (!desc) {
|
||||
dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
|
||||
bus_addr = desc->src_addr;
|
||||
else
|
||||
bus_addr = desc->dst_addr;
|
||||
|
@ -443,7 +458,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
|
|||
control = (5 << M2M_CONTROL_PWSC_SHIFT);
|
||||
control |= M2M_CONTROL_NO_HDSK;
|
||||
|
||||
if (data->direction == DMA_TO_DEVICE) {
|
||||
if (data->direction == DMA_MEM_TO_DEV) {
|
||||
control |= M2M_CONTROL_DAH;
|
||||
control |= M2M_CONTROL_TM_TX;
|
||||
control |= M2M_CONTROL_RSS_SSPTX;
|
||||
|
@ -459,11 +474,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
|
|||
* This IDE part is totally untested. Values below are taken
|
||||
* from the EP93xx Users's Guide and might not be correct.
|
||||
*/
|
||||
control |= M2M_CONTROL_NO_HDSK;
|
||||
control |= M2M_CONTROL_RSS_IDE;
|
||||
control |= M2M_CONTROL_PW_16;
|
||||
|
||||
if (data->direction == DMA_TO_DEVICE) {
|
||||
if (data->direction == DMA_MEM_TO_DEV) {
|
||||
/* Worst case from the UG */
|
||||
control = (3 << M2M_CONTROL_PWSC_SHIFT);
|
||||
control |= M2M_CONTROL_DAH;
|
||||
|
@ -473,6 +484,10 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
|
|||
control |= M2M_CONTROL_SAH;
|
||||
control |= M2M_CONTROL_TM_RX;
|
||||
}
|
||||
|
||||
control |= M2M_CONTROL_NO_HDSK;
|
||||
control |= M2M_CONTROL_RSS_IDE;
|
||||
control |= M2M_CONTROL_PW_16;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -491,7 +506,13 @@ static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
|
|||
|
||||
static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
|
||||
{
|
||||
struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
|
||||
struct ep93xx_dma_desc *desc;
|
||||
|
||||
desc = ep93xx_dma_get_active(edmac);
|
||||
if (!desc) {
|
||||
dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (edmac->buffer == 0) {
|
||||
writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
|
||||
|
@ -669,24 +690,30 @@ static void ep93xx_dma_tasklet(unsigned long data)
|
|||
{
|
||||
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
|
||||
struct ep93xx_dma_desc *desc, *d;
|
||||
dma_async_tx_callback callback;
|
||||
void *callback_param;
|
||||
dma_async_tx_callback callback = NULL;
|
||||
void *callback_param = NULL;
|
||||
LIST_HEAD(list);
|
||||
|
||||
spin_lock_irq(&edmac->lock);
|
||||
/*
|
||||
* If dma_terminate_all() was called before we get to run, the active
|
||||
* list has become empty. If that happens we aren't supposed to do
|
||||
* anything more than call ep93xx_dma_advance_work().
|
||||
*/
|
||||
desc = ep93xx_dma_get_active(edmac);
|
||||
if (desc->complete) {
|
||||
edmac->last_completed = desc->txd.cookie;
|
||||
list_splice_init(&edmac->active, &list);
|
||||
if (desc) {
|
||||
if (desc->complete) {
|
||||
edmac->last_completed = desc->txd.cookie;
|
||||
list_splice_init(&edmac->active, &list);
|
||||
}
|
||||
callback = desc->txd.callback;
|
||||
callback_param = desc->txd.callback_param;
|
||||
}
|
||||
spin_unlock_irq(&edmac->lock);
|
||||
|
||||
/* Pick up the next descriptor from the queue */
|
||||
ep93xx_dma_advance_work(edmac);
|
||||
|
||||
callback = desc->txd.callback;
|
||||
callback_param = desc->txd.callback_param;
|
||||
|
||||
/* Now we can release all the chained descriptors */
|
||||
list_for_each_entry_safe(desc, d, &list, node) {
|
||||
/*
|
||||
|
@ -706,13 +733,22 @@ static void ep93xx_dma_tasklet(unsigned long data)
|
|||
static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct ep93xx_dma_chan *edmac = dev_id;
|
||||
struct ep93xx_dma_desc *desc;
|
||||
irqreturn_t ret = IRQ_HANDLED;
|
||||
|
||||
spin_lock(&edmac->lock);
|
||||
|
||||
desc = ep93xx_dma_get_active(edmac);
|
||||
if (!desc) {
|
||||
dev_warn(chan2dev(edmac),
|
||||
"got interrupt while active list is empty\n");
|
||||
spin_unlock(&edmac->lock);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
switch (edmac->edma->hw_interrupt(edmac)) {
|
||||
case INTERRUPT_DONE:
|
||||
ep93xx_dma_get_active(edmac)->complete = true;
|
||||
desc->complete = true;
|
||||
tasklet_schedule(&edmac->tasklet);
|
||||
break;
|
||||
|
||||
|
@ -803,8 +839,8 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
|
|||
switch (data->port) {
|
||||
case EP93XX_DMA_SSP:
|
||||
case EP93XX_DMA_IDE:
|
||||
if (data->direction != DMA_TO_DEVICE &&
|
||||
data->direction != DMA_FROM_DEVICE)
|
||||
if (data->direction != DMA_MEM_TO_DEV &&
|
||||
data->direction != DMA_DEV_TO_MEM)
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
|
@ -952,7 +988,7 @@ fail:
|
|||
*/
|
||||
static struct dma_async_tx_descriptor *
|
||||
ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_data_direction dir,
|
||||
unsigned int sg_len, enum dma_transfer_direction dir,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
|
||||
|
@ -988,7 +1024,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
if (dir == DMA_TO_DEVICE) {
|
||||
if (dir == DMA_MEM_TO_DEV) {
|
||||
desc->src_addr = sg_dma_address(sg);
|
||||
desc->dst_addr = edmac->runtime_addr;
|
||||
} else {
|
||||
|
@ -1032,7 +1068,7 @@ fail:
|
|||
static struct dma_async_tx_descriptor *
|
||||
ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
|
||||
size_t buf_len, size_t period_len,
|
||||
enum dma_data_direction dir)
|
||||
enum dma_transfer_direction dir)
|
||||
{
|
||||
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
|
||||
struct ep93xx_dma_desc *desc, *first;
|
||||
|
@ -1065,7 +1101,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
if (dir == DMA_TO_DEVICE) {
|
||||
if (dir == DMA_MEM_TO_DEV) {
|
||||
desc->src_addr = dma_addr + offset;
|
||||
desc->dst_addr = edmac->runtime_addr;
|
||||
} else {
|
||||
|
@ -1133,12 +1169,12 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
|
|||
return -EINVAL;
|
||||
|
||||
switch (config->direction) {
|
||||
case DMA_FROM_DEVICE:
|
||||
case DMA_DEV_TO_MEM:
|
||||
width = config->src_addr_width;
|
||||
addr = config->src_addr;
|
||||
break;
|
||||
|
||||
case DMA_TO_DEVICE:
|
||||
case DMA_MEM_TO_DEV:
|
||||
width = config->dst_addr_width;
|
||||
addr = config->dst_addr;
|
||||
break;
|
||||
|
|
|
@ -772,7 +772,7 @@ fail:
|
|||
*/
|
||||
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
|
||||
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
|
||||
enum dma_data_direction direction, unsigned long flags)
|
||||
enum dma_transfer_direction direction, unsigned long flags)
|
||||
{
|
||||
/*
|
||||
* This operation is not supported on the Freescale DMA controller
|
||||
|
@ -819,7 +819,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
|
|||
return -ENXIO;
|
||||
|
||||
/* we set the controller burst size depending on direction */
|
||||
if (config->direction == DMA_TO_DEVICE)
|
||||
if (config->direction == DMA_MEM_TO_DEV)
|
||||
size = config->dst_addr_width * config->dst_maxburst;
|
||||
else
|
||||
size = config->src_addr_width * config->src_maxburst;
|
||||
|
|
|
@ -107,7 +107,7 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
imx_dma_disable(imxdmac->imxdma_channel);
|
||||
return 0;
|
||||
case DMA_SLAVE_CONFIG:
|
||||
if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
|
||||
if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
|
||||
imxdmac->per_address = dmaengine_cfg->src_addr;
|
||||
imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
|
||||
imxdmac->word_size = dmaengine_cfg->src_addr_width;
|
||||
|
@ -224,7 +224,7 @@ static void imxdma_free_chan_resources(struct dma_chan *chan)
|
|||
|
||||
static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_data_direction direction,
|
||||
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
|
||||
|
@ -241,7 +241,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
|
|||
dma_length += sg->length;
|
||||
}
|
||||
|
||||
if (direction == DMA_FROM_DEVICE)
|
||||
if (direction == DMA_DEV_TO_MEM)
|
||||
dmamode = DMA_MODE_READ;
|
||||
else
|
||||
dmamode = DMA_MODE_WRITE;
|
||||
|
@ -271,7 +271,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
|
|||
|
||||
static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
|
||||
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_data_direction direction)
|
||||
size_t period_len, enum dma_transfer_direction direction)
|
||||
{
|
||||
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
|
||||
struct imxdma_engine *imxdma = imxdmac->imxdma;
|
||||
|
@ -317,7 +317,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
|
|||
imxdmac->sg_list[periods].page_link =
|
||||
((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
|
||||
|
||||
if (direction == DMA_FROM_DEVICE)
|
||||
if (direction == DMA_DEV_TO_MEM)
|
||||
dmamode = DMA_MODE_READ;
|
||||
else
|
||||
dmamode = DMA_MODE_WRITE;
|
||||
|
|
|
@ -247,7 +247,7 @@ struct sdma_engine;
|
|||
struct sdma_channel {
|
||||
struct sdma_engine *sdma;
|
||||
unsigned int channel;
|
||||
enum dma_data_direction direction;
|
||||
enum dma_transfer_direction direction;
|
||||
enum sdma_peripheral_type peripheral_type;
|
||||
unsigned int event_id0;
|
||||
unsigned int event_id1;
|
||||
|
@ -268,6 +268,8 @@ struct sdma_channel {
|
|||
struct dma_async_tx_descriptor desc;
|
||||
dma_cookie_t last_completed;
|
||||
enum dma_status status;
|
||||
unsigned int chn_count;
|
||||
unsigned int chn_real_count;
|
||||
};
|
||||
|
||||
#define IMX_DMA_SG_LOOP (1 << 0)
|
||||
|
@ -503,6 +505,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
|
|||
struct sdma_buffer_descriptor *bd;
|
||||
int i, error = 0;
|
||||
|
||||
sdmac->chn_real_count = 0;
|
||||
/*
|
||||
* non loop mode. Iterate over all descriptors, collect
|
||||
* errors and call callback function
|
||||
|
@ -512,6 +515,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
|
|||
|
||||
if (bd->mode.status & (BD_DONE | BD_RROR))
|
||||
error = -EIO;
|
||||
sdmac->chn_real_count += bd->mode.count;
|
||||
}
|
||||
|
||||
if (error)
|
||||
|
@ -519,9 +523,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
|
|||
else
|
||||
sdmac->status = DMA_SUCCESS;
|
||||
|
||||
sdmac->last_completed = sdmac->desc.cookie;
|
||||
if (sdmac->desc.callback)
|
||||
sdmac->desc.callback(sdmac->desc.callback_param);
|
||||
sdmac->last_completed = sdmac->desc.cookie;
|
||||
}
|
||||
|
||||
static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
|
||||
|
@ -650,7 +654,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
|
|||
struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
|
||||
int ret;
|
||||
|
||||
if (sdmac->direction == DMA_FROM_DEVICE) {
|
||||
if (sdmac->direction == DMA_DEV_TO_MEM) {
|
||||
load_address = sdmac->pc_from_device;
|
||||
} else {
|
||||
load_address = sdmac->pc_to_device;
|
||||
|
@ -832,17 +836,18 @@ static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
|
|||
|
||||
static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
|
||||
struct sdma_engine *sdma = sdmac->sdma;
|
||||
dma_cookie_t cookie;
|
||||
|
||||
spin_lock_irq(&sdmac->lock);
|
||||
spin_lock_irqsave(&sdmac->lock, flags);
|
||||
|
||||
cookie = sdma_assign_cookie(sdmac);
|
||||
|
||||
sdma_enable_channel(sdma, sdmac->channel);
|
||||
|
||||
spin_unlock_irq(&sdmac->lock);
|
||||
spin_unlock_irqrestore(&sdmac->lock, flags);
|
||||
|
||||
return cookie;
|
||||
}
|
||||
|
@ -911,7 +916,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
|
|||
|
||||
static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_data_direction direction,
|
||||
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct sdma_channel *sdmac = to_sdma_chan(chan);
|
||||
|
@ -941,6 +946,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
|
|||
goto err_out;
|
||||
}
|
||||
|
||||
sdmac->chn_count = 0;
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
|
||||
int param;
|
||||
|
@ -957,6 +963,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
|
|||
}
|
||||
|
||||
bd->mode.count = count;
|
||||
sdmac->chn_count += count;
|
||||
|
||||
if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
|
||||
ret = -EINVAL;
|
||||
|
@ -1008,7 +1015,7 @@ err_out:
|
|||
|
||||
static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
|
||||
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_data_direction direction)
|
||||
size_t period_len, enum dma_transfer_direction direction)
|
||||
{
|
||||
struct sdma_channel *sdmac = to_sdma_chan(chan);
|
||||
struct sdma_engine *sdma = sdmac->sdma;
|
||||
|
@ -1093,7 +1100,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
sdma_disable_channel(sdmac);
|
||||
return 0;
|
||||
case DMA_SLAVE_CONFIG:
|
||||
if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
|
||||
if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
|
||||
sdmac->per_address = dmaengine_cfg->src_addr;
|
||||
sdmac->watermark_level = dmaengine_cfg->src_maxburst;
|
||||
sdmac->word_size = dmaengine_cfg->src_addr_width;
|
||||
|
@ -1102,6 +1109,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
|
||||
sdmac->word_size = dmaengine_cfg->dst_addr_width;
|
||||
}
|
||||
sdmac->direction = dmaengine_cfg->direction;
|
||||
return sdma_config_channel(sdmac);
|
||||
default:
|
||||
return -ENOSYS;
|
||||
|
@ -1119,7 +1127,8 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
|
|||
|
||||
last_used = chan->cookie;
|
||||
|
||||
dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
|
||||
dma_set_tx_state(txstate, sdmac->last_completed, last_used,
|
||||
sdmac->chn_count - sdmac->chn_real_count);
|
||||
|
||||
return sdmac->status;
|
||||
}
|
||||
|
|
|
@ -280,7 +280,8 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
|
|||
* callbacks but must be called with the lock held.
|
||||
*/
|
||||
static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
|
||||
struct intel_mid_dma_desc *desc)
|
||||
struct intel_mid_dma_desc *desc)
|
||||
__releases(&midc->lock) __acquires(&midc->lock)
|
||||
{
|
||||
struct dma_async_tx_descriptor *txd = &desc->txd;
|
||||
dma_async_tx_callback callback_txd = NULL;
|
||||
|
@ -311,6 +312,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
|
|||
pci_pool_free(desc->lli_pool, desc->lli,
|
||||
desc->lli_phys);
|
||||
pci_pool_destroy(desc->lli_pool);
|
||||
desc->lli = NULL;
|
||||
}
|
||||
list_move(&desc->desc_node, &midc->free_list);
|
||||
midc->busy = false;
|
||||
|
@ -395,10 +397,10 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
|
|||
midc->dma->block_size);
|
||||
/*Populate SAR and DAR values*/
|
||||
sg_phy_addr = sg_phys(sg);
|
||||
if (desc->dirn == DMA_TO_DEVICE) {
|
||||
if (desc->dirn == DMA_MEM_TO_DEV) {
|
||||
lli_bloc_desc->sar = sg_phy_addr;
|
||||
lli_bloc_desc->dar = mids->dma_slave.dst_addr;
|
||||
} else if (desc->dirn == DMA_FROM_DEVICE) {
|
||||
} else if (desc->dirn == DMA_DEV_TO_MEM) {
|
||||
lli_bloc_desc->sar = mids->dma_slave.src_addr;
|
||||
lli_bloc_desc->dar = sg_phy_addr;
|
||||
}
|
||||
|
@ -490,7 +492,9 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
|
|||
|
||||
ret = dma_async_is_complete(cookie, last_complete, last_used);
|
||||
if (ret != DMA_SUCCESS) {
|
||||
spin_lock_bh(&midc->lock);
|
||||
midc_scan_descriptors(to_middma_device(chan->device), midc);
|
||||
spin_unlock_bh(&midc->lock);
|
||||
|
||||
last_complete = midc->completed;
|
||||
last_used = chan->cookie;
|
||||
|
@ -566,6 +570,7 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
|
|||
pci_pool_free(desc->lli_pool, desc->lli,
|
||||
desc->lli_phys);
|
||||
pci_pool_destroy(desc->lli_pool);
|
||||
desc->lli = NULL;
|
||||
}
|
||||
list_move(&desc->desc_node, &midc->free_list);
|
||||
}
|
||||
|
@ -632,13 +637,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
|
|||
if (midc->dma->pimr_mask) {
|
||||
cfg_hi.cfgx.protctl = 0x0; /*default value*/
|
||||
cfg_hi.cfgx.fifo_mode = 1;
|
||||
if (mids->dma_slave.direction == DMA_TO_DEVICE) {
|
||||
if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
|
||||
cfg_hi.cfgx.src_per = 0;
|
||||
if (mids->device_instance == 0)
|
||||
cfg_hi.cfgx.dst_per = 3;
|
||||
if (mids->device_instance == 1)
|
||||
cfg_hi.cfgx.dst_per = 1;
|
||||
} else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
|
||||
} else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
|
||||
if (mids->device_instance == 0)
|
||||
cfg_hi.cfgx.src_per = 2;
|
||||
if (mids->device_instance == 1)
|
||||
|
@ -682,11 +687,11 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
|
|||
ctl_lo.ctlx.sinc = 0;
|
||||
ctl_lo.ctlx.dinc = 0;
|
||||
} else {
|
||||
if (mids->dma_slave.direction == DMA_TO_DEVICE) {
|
||||
if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
|
||||
ctl_lo.ctlx.sinc = 0;
|
||||
ctl_lo.ctlx.dinc = 2;
|
||||
ctl_lo.ctlx.tt_fc = 1;
|
||||
} else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
|
||||
} else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
|
||||
ctl_lo.ctlx.sinc = 2;
|
||||
ctl_lo.ctlx.dinc = 0;
|
||||
ctl_lo.ctlx.tt_fc = 2;
|
||||
|
@ -732,7 +737,7 @@ err_desc_get:
|
|||
*/
|
||||
static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_data_direction direction,
|
||||
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct intel_mid_dma_chan *midc = NULL;
|
||||
|
@ -868,7 +873,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
|
|||
pm_runtime_get_sync(&mid->pdev->dev);
|
||||
|
||||
if (mid->state == SUSPENDED) {
|
||||
if (dma_resume(mid->pdev)) {
|
||||
if (dma_resume(&mid->pdev->dev)) {
|
||||
pr_err("ERR_MDMA: resume failed");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
@ -1099,7 +1104,8 @@ static int mid_setup_dma(struct pci_dev *pdev)
|
|||
LNW_PERIPHRAL_MASK_SIZE);
|
||||
if (dma->mask_reg == NULL) {
|
||||
pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
|
||||
return -ENOMEM;
|
||||
err = -ENOMEM;
|
||||
goto err_ioremap;
|
||||
}
|
||||
} else
|
||||
dma->mask_reg = NULL;
|
||||
|
@ -1196,6 +1202,9 @@ static int mid_setup_dma(struct pci_dev *pdev)
|
|||
err_engine:
|
||||
free_irq(pdev->irq, dma);
|
||||
err_irq:
|
||||
if (dma->mask_reg)
|
||||
iounmap(dma->mask_reg);
|
||||
err_ioremap:
|
||||
pci_pool_destroy(dma->dma_pool);
|
||||
err_dma_pool:
|
||||
pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
|
||||
|
@ -1337,8 +1346,9 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
|
|||
*
|
||||
* This function is called by OS when a power event occurs
|
||||
*/
|
||||
int dma_suspend(struct pci_dev *pci, pm_message_t state)
|
||||
static int dma_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pci = to_pci_dev(dev);
|
||||
int i;
|
||||
struct middma_device *device = pci_get_drvdata(pci);
|
||||
pr_debug("MDMA: dma_suspend called\n");
|
||||
|
@ -1362,8 +1372,9 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
|
|||
*
|
||||
* This function is called by OS when a power event occurs
|
||||
*/
|
||||
int dma_resume(struct pci_dev *pci)
|
||||
int dma_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pci = to_pci_dev(dev);
|
||||
int ret;
|
||||
struct middma_device *device = pci_get_drvdata(pci);
|
||||
|
||||
|
@ -1429,6 +1440,8 @@ static const struct dev_pm_ops intel_mid_dma_pm = {
|
|||
.runtime_suspend = dma_runtime_suspend,
|
||||
.runtime_resume = dma_runtime_resume,
|
||||
.runtime_idle = dma_runtime_idle,
|
||||
.suspend = dma_suspend,
|
||||
.resume = dma_resume,
|
||||
};
|
||||
|
||||
static struct pci_driver intel_mid_dma_pci_driver = {
|
||||
|
@ -1437,8 +1450,6 @@ static struct pci_driver intel_mid_dma_pci_driver = {
|
|||
.probe = intel_mid_dma_probe,
|
||||
.remove = __devexit_p(intel_mid_dma_remove),
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = dma_suspend,
|
||||
.resume = dma_resume,
|
||||
.driver = {
|
||||
.pm = &intel_mid_dma_pm,
|
||||
},
|
||||
|
|
|
@ -262,7 +262,7 @@ struct intel_mid_dma_desc {
|
|||
unsigned int lli_length;
|
||||
unsigned int current_lli;
|
||||
dma_addr_t next;
|
||||
enum dma_data_direction dirn;
|
||||
enum dma_transfer_direction dirn;
|
||||
enum dma_status status;
|
||||
enum dma_slave_buswidth width; /*width of DMA txn*/
|
||||
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
|
||||
|
@ -296,6 +296,6 @@ static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
|
|||
}
|
||||
|
||||
|
||||
int dma_resume(struct pci_dev *pci);
|
||||
int dma_resume(struct device *dev);
|
||||
|
||||
#endif /*__INTEL_MID_DMAC_REGS_H__*/
|
||||
|
|
|
@ -1735,8 +1735,6 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
|
|||
spin_unlock_bh(&iop_chan->lock);
|
||||
}
|
||||
|
||||
MODULE_ALIAS("platform:iop-adma");
|
||||
|
||||
static struct platform_driver iop_adma_driver = {
|
||||
.probe = iop_adma_probe,
|
||||
.remove = __devexit_p(iop_adma_remove),
|
||||
|
@ -1746,19 +1744,9 @@ static struct platform_driver iop_adma_driver = {
|
|||
},
|
||||
};
|
||||
|
||||
static int __init iop_adma_init (void)
|
||||
{
|
||||
return platform_driver_register(&iop_adma_driver);
|
||||
}
|
||||
|
||||
static void __exit iop_adma_exit (void)
|
||||
{
|
||||
platform_driver_unregister(&iop_adma_driver);
|
||||
return;
|
||||
}
|
||||
module_exit(iop_adma_exit);
|
||||
module_init(iop_adma_init);
|
||||
module_platform_driver(iop_adma_driver);
|
||||
|
||||
MODULE_AUTHOR("Intel Corporation");
|
||||
MODULE_DESCRIPTION("IOP ADMA Engine Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("platform:iop-adma");
|
||||
|
|
|
@ -312,7 +312,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
|
|||
case IPU_PIX_FMT_RGB565:
|
||||
params->ip.bpp = 2;
|
||||
params->ip.pfs = 4;
|
||||
params->ip.npb = 7;
|
||||
params->ip.npb = 15;
|
||||
params->ip.sat = 2; /* SAT = 32-bit access */
|
||||
params->ip.ofs0 = 0; /* Red bit offset */
|
||||
params->ip.ofs1 = 5; /* Green bit offset */
|
||||
|
@ -422,12 +422,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
|
|||
params->pp.nsb = 1;
|
||||
}
|
||||
|
||||
static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
|
||||
uint16_t burst_pixels)
|
||||
{
|
||||
params->pp.npb = burst_pixels - 1;
|
||||
}
|
||||
|
||||
static void ipu_ch_param_set_buffer(union chan_param_mem *params,
|
||||
dma_addr_t buf0, dma_addr_t buf1)
|
||||
{
|
||||
|
@ -690,23 +684,6 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
|
|||
ipu_ch_param_set_size(¶ms, pixel_fmt, width, height, stride_bytes);
|
||||
ipu_ch_param_set_buffer(¶ms, phyaddr_0, phyaddr_1);
|
||||
ipu_ch_param_set_rotation(¶ms, rot_mode);
|
||||
/* Some channels (rotation) have restriction on burst length */
|
||||
switch (channel) {
|
||||
case IDMAC_IC_7: /* Hangs with burst 8, 16, other values
|
||||
invalid - Table 44-30 */
|
||||
/*
|
||||
ipu_ch_param_set_burst_size(¶ms, 8);
|
||||
*/
|
||||
break;
|
||||
case IDMAC_SDC_0:
|
||||
case IDMAC_SDC_1:
|
||||
/* In original code only IPU_PIX_FMT_RGB565 was setting burst */
|
||||
ipu_ch_param_set_burst_size(¶ms, 16);
|
||||
break;
|
||||
case IDMAC_IC_0:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ipu->lock, flags);
|
||||
|
||||
|
@ -1364,7 +1341,7 @@ static void ipu_gc_tasklet(unsigned long arg)
|
|||
/* Allocate and initialise a transfer descriptor. */
|
||||
static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
|
||||
struct scatterlist *sgl, unsigned int sg_len,
|
||||
enum dma_data_direction direction, unsigned long tx_flags)
|
||||
enum dma_transfer_direction direction, unsigned long tx_flags)
|
||||
{
|
||||
struct idmac_channel *ichan = to_idmac_chan(chan);
|
||||
struct idmac_tx_desc *desc = NULL;
|
||||
|
@ -1376,7 +1353,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
|
|||
chan->chan_id != IDMAC_IC_7)
|
||||
return NULL;
|
||||
|
||||
if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
|
||||
if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) {
|
||||
dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -835,17 +835,7 @@ static struct platform_driver mpc_dma_driver = {
|
|||
},
|
||||
};
|
||||
|
||||
static int __init mpc_dma_init(void)
|
||||
{
|
||||
return platform_driver_register(&mpc_dma_driver);
|
||||
}
|
||||
module_init(mpc_dma_init);
|
||||
|
||||
static void __exit mpc_dma_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&mpc_dma_driver);
|
||||
}
|
||||
module_exit(mpc_dma_exit);
|
||||
module_platform_driver(mpc_dma_driver);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
|
||||
|
|
|
@ -44,7 +44,6 @@
|
|||
#define HW_APBHX_CTRL0 0x000
|
||||
#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
|
||||
#define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
|
||||
#define BP_APBH_CTRL0_CLKGATE_CHANNEL 8
|
||||
#define BP_APBH_CTRL0_RESET_CHANNEL 16
|
||||
#define HW_APBHX_CTRL1 0x010
|
||||
#define HW_APBHX_CTRL2 0x020
|
||||
|
@ -111,6 +110,7 @@ struct mxs_dma_chan {
|
|||
int chan_irq;
|
||||
struct mxs_dma_ccw *ccw;
|
||||
dma_addr_t ccw_phys;
|
||||
int desc_count;
|
||||
dma_cookie_t last_completed;
|
||||
enum dma_status status;
|
||||
unsigned int flags;
|
||||
|
@ -130,23 +130,6 @@ struct mxs_dma_engine {
|
|||
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
|
||||
};
|
||||
|
||||
static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
|
||||
{
|
||||
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
|
||||
int chan_id = mxs_chan->chan.chan_id;
|
||||
int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
|
||||
|
||||
/* enable apbh channel clock */
|
||||
if (dma_is_apbh()) {
|
||||
if (apbh_is_old())
|
||||
writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
|
||||
mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
|
||||
else
|
||||
writel(1 << chan_id,
|
||||
mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
|
||||
}
|
||||
}
|
||||
|
||||
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
|
||||
{
|
||||
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
|
||||
|
@ -165,9 +148,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
|
|||
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
|
||||
int chan_id = mxs_chan->chan.chan_id;
|
||||
|
||||
/* clkgate needs to be enabled before writing other registers */
|
||||
mxs_dma_clkgate(mxs_chan, 1);
|
||||
|
||||
/* set cmd_addr up */
|
||||
writel(mxs_chan->ccw_phys,
|
||||
mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
|
||||
|
@ -178,9 +158,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
|
|||
|
||||
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
|
||||
{
|
||||
/* disable apbh channel clock */
|
||||
mxs_dma_clkgate(mxs_chan, 0);
|
||||
|
||||
mxs_chan->status = DMA_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -268,7 +245,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
|
|||
/*
|
||||
* When both completion and error of termination bits set at the
|
||||
* same time, we do not take it as an error. IOW, it only becomes
|
||||
* an error we need to handler here in case of ether it's (1) an bus
|
||||
* an error we need to handle here in case of either it's (1) a bus
|
||||
* error or (2) a termination error with no completion.
|
||||
*/
|
||||
stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
|
||||
|
@ -338,10 +315,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
|
|||
if (ret)
|
||||
goto err_clk;
|
||||
|
||||
/* clkgate needs to be enabled for reset to finish */
|
||||
mxs_dma_clkgate(mxs_chan, 1);
|
||||
mxs_dma_reset_chan(mxs_chan);
|
||||
mxs_dma_clkgate(mxs_chan, 0);
|
||||
|
||||
dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
|
||||
mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
|
||||
|
@ -377,7 +351,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
|
|||
|
||||
static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_data_direction direction,
|
||||
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||
unsigned long append)
|
||||
{
|
||||
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
|
||||
|
@ -386,7 +360,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
|
|||
struct scatterlist *sg;
|
||||
int i, j;
|
||||
u32 *pio;
|
||||
static int idx;
|
||||
int idx = append ? mxs_chan->desc_count : 0;
|
||||
|
||||
if (mxs_chan->status == DMA_IN_PROGRESS && !append)
|
||||
return NULL;
|
||||
|
@ -417,7 +391,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
|
|||
idx = 0;
|
||||
}
|
||||
|
||||
if (direction == DMA_NONE) {
|
||||
if (direction == DMA_TRANS_NONE) {
|
||||
ccw = &mxs_chan->ccw[idx++];
|
||||
pio = (u32 *) sgl;
|
||||
|
||||
|
@ -450,7 +424,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
|
|||
ccw->bits |= CCW_CHAIN;
|
||||
ccw->bits |= CCW_HALT_ON_TERM;
|
||||
ccw->bits |= CCW_TERM_FLUSH;
|
||||
ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
|
||||
ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
|
||||
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
|
||||
COMMAND);
|
||||
|
||||
|
@ -462,6 +436,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
|
|||
}
|
||||
}
|
||||
}
|
||||
mxs_chan->desc_count = idx;
|
||||
|
||||
return &mxs_chan->desc;
|
||||
|
||||
|
@ -472,7 +447,7 @@ err_out:
|
|||
|
||||
static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
|
||||
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_data_direction direction)
|
||||
size_t period_len, enum dma_transfer_direction direction)
|
||||
{
|
||||
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
|
||||
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
|
||||
|
@ -515,7 +490,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
|
|||
ccw->bits |= CCW_IRQ;
|
||||
ccw->bits |= CCW_HALT_ON_TERM;
|
||||
ccw->bits |= CCW_TERM_FLUSH;
|
||||
ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
|
||||
ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
|
||||
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
|
||||
|
||||
dma_addr += period_len;
|
||||
|
@ -523,6 +498,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
|
|||
|
||||
i++;
|
||||
}
|
||||
mxs_chan->desc_count = i;
|
||||
|
||||
return &mxs_chan->desc;
|
||||
|
||||
|
@ -539,8 +515,8 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
|
||||
switch (cmd) {
|
||||
case DMA_TERMINATE_ALL:
|
||||
mxs_dma_disable_chan(mxs_chan);
|
||||
mxs_dma_reset_chan(mxs_chan);
|
||||
mxs_dma_disable_chan(mxs_chan);
|
||||
break;
|
||||
case DMA_PAUSE:
|
||||
mxs_dma_pause_chan(mxs_chan);
|
||||
|
@ -580,7 +556,7 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
|
|||
|
||||
ret = clk_prepare_enable(mxs_dma->clk);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
return ret;
|
||||
|
||||
ret = mxs_reset_block(mxs_dma->base);
|
||||
if (ret)
|
||||
|
@ -604,11 +580,8 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
|
|||
writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
|
||||
mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
|
||||
|
||||
clk_disable_unprepare(mxs_dma->clk);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
clk_disable_unprepare(mxs_dma->clk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* Topcliff PCH DMA controller driver
|
||||
* Copyright (c) 2010 Intel Corporation
|
||||
* Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
|
||||
* Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -99,7 +99,7 @@ struct pch_dma_desc {
|
|||
struct pch_dma_chan {
|
||||
struct dma_chan chan;
|
||||
void __iomem *membase;
|
||||
enum dma_data_direction dir;
|
||||
enum dma_transfer_direction dir;
|
||||
struct tasklet_struct tasklet;
|
||||
unsigned long err_status;
|
||||
|
||||
|
@ -224,7 +224,7 @@ static void pdc_set_dir(struct dma_chan *chan)
|
|||
mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
|
||||
(DMA_CTL0_BITS_PER_CH * chan->chan_id));
|
||||
val &= mask_mode;
|
||||
if (pd_chan->dir == DMA_TO_DEVICE)
|
||||
if (pd_chan->dir == DMA_MEM_TO_DEV)
|
||||
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
|
||||
DMA_CTL0_DIR_SHIFT_BITS);
|
||||
else
|
||||
|
@ -242,7 +242,7 @@ static void pdc_set_dir(struct dma_chan *chan)
|
|||
mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
|
||||
(DMA_CTL0_BITS_PER_CH * ch));
|
||||
val &= mask_mode;
|
||||
if (pd_chan->dir == DMA_TO_DEVICE)
|
||||
if (pd_chan->dir == DMA_MEM_TO_DEV)
|
||||
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
|
||||
DMA_CTL0_DIR_SHIFT_BITS);
|
||||
else
|
||||
|
@ -607,7 +607,7 @@ static void pd_issue_pending(struct dma_chan *chan)
|
|||
|
||||
static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
|
||||
struct scatterlist *sgl, unsigned int sg_len,
|
||||
enum dma_data_direction direction, unsigned long flags)
|
||||
enum dma_transfer_direction direction, unsigned long flags)
|
||||
{
|
||||
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
|
||||
struct pch_dma_slave *pd_slave = chan->private;
|
||||
|
@ -623,9 +623,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (direction == DMA_FROM_DEVICE)
|
||||
if (direction == DMA_DEV_TO_MEM)
|
||||
reg = pd_slave->rx_reg;
|
||||
else if (direction == DMA_TO_DEVICE)
|
||||
else if (direction == DMA_MEM_TO_DEV)
|
||||
reg = pd_slave->tx_reg;
|
||||
else
|
||||
return NULL;
|
||||
|
@ -1018,6 +1018,8 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
|
|||
#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
|
||||
#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
|
||||
#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
|
||||
#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
|
||||
#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
|
||||
|
||||
DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
|
||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
|
||||
|
@ -1030,6 +1032,8 @@ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
|
|||
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
|
||||
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
|
||||
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
|
||||
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
|
||||
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
|
||||
{ 0, },
|
||||
};
|
||||
|
||||
|
@ -1057,7 +1061,7 @@ static void __exit pch_dma_exit(void)
|
|||
module_init(pch_dma_init);
|
||||
module_exit(pch_dma_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
|
||||
MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
|
||||
"DMA controller driver");
|
||||
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -350,14 +350,14 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
|
|||
case DMA_SLAVE_CONFIG:
|
||||
slave_config = (struct dma_slave_config *)arg;
|
||||
|
||||
if (slave_config->direction == DMA_TO_DEVICE) {
|
||||
if (slave_config->direction == DMA_MEM_TO_DEV) {
|
||||
if (slave_config->dst_addr)
|
||||
pch->fifo_addr = slave_config->dst_addr;
|
||||
if (slave_config->dst_addr_width)
|
||||
pch->burst_sz = __ffs(slave_config->dst_addr_width);
|
||||
if (slave_config->dst_maxburst)
|
||||
pch->burst_len = slave_config->dst_maxburst;
|
||||
} else if (slave_config->direction == DMA_FROM_DEVICE) {
|
||||
} else if (slave_config->direction == DMA_DEV_TO_MEM) {
|
||||
if (slave_config->src_addr)
|
||||
pch->fifo_addr = slave_config->src_addr;
|
||||
if (slave_config->src_addr_width)
|
||||
|
@ -621,7 +621,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
|
|||
|
||||
static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
|
||||
struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
|
||||
size_t period_len, enum dma_data_direction direction)
|
||||
size_t period_len, enum dma_transfer_direction direction)
|
||||
{
|
||||
struct dma_pl330_desc *desc;
|
||||
struct dma_pl330_chan *pch = to_pchan(chan);
|
||||
|
@ -636,14 +636,14 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
|
|||
}
|
||||
|
||||
switch (direction) {
|
||||
case DMA_TO_DEVICE:
|
||||
case DMA_MEM_TO_DEV:
|
||||
desc->rqcfg.src_inc = 1;
|
||||
desc->rqcfg.dst_inc = 0;
|
||||
desc->req.rqtype = MEMTODEV;
|
||||
src = dma_addr;
|
||||
dst = pch->fifo_addr;
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
case DMA_DEV_TO_MEM:
|
||||
desc->rqcfg.src_inc = 0;
|
||||
desc->rqcfg.dst_inc = 1;
|
||||
desc->req.rqtype = DEVTOMEM;
|
||||
|
@ -710,7 +710,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
|
|||
|
||||
static struct dma_async_tx_descriptor *
|
||||
pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_data_direction direction,
|
||||
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||
unsigned long flg)
|
||||
{
|
||||
struct dma_pl330_desc *first, *desc = NULL;
|
||||
|
@ -759,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
else
|
||||
list_add_tail(&desc->node, &first->node);
|
||||
|
||||
if (direction == DMA_TO_DEVICE) {
|
||||
if (direction == DMA_MEM_TO_DEV) {
|
||||
desc->rqcfg.src_inc = 1;
|
||||
desc->rqcfg.dst_inc = 0;
|
||||
desc->req.rqtype = MEMTODEV;
|
||||
|
@ -834,17 +834,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
|
||||
amba_set_drvdata(adev, pdmac);
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
/* to use the runtime PM helper functions */
|
||||
pm_runtime_enable(&adev->dev);
|
||||
|
||||
/* enable the power domain */
|
||||
if (pm_runtime_get_sync(&adev->dev)) {
|
||||
dev_err(&adev->dev, "failed to get runtime pm\n");
|
||||
ret = -ENODEV;
|
||||
goto probe_err1;
|
||||
}
|
||||
#else
|
||||
#ifndef CONFIG_PM_RUNTIME
|
||||
/* enable dma clk */
|
||||
clk_enable(pdmac->clk);
|
||||
#endif
|
||||
|
@ -977,10 +967,7 @@ static int __devexit pl330_remove(struct amba_device *adev)
|
|||
res = &adev->res;
|
||||
release_mem_region(res->start, resource_size(res));
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
pm_runtime_put(&adev->dev);
|
||||
pm_runtime_disable(&adev->dev);
|
||||
#else
|
||||
#ifndef CONFIG_PM_RUNTIME
|
||||
clk_disable(pdmac->clk);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/sh_dma.h>
|
||||
|
@ -57,6 +56,15 @@ static LIST_HEAD(sh_dmae_devices);
|
|||
static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
|
||||
|
||||
static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
|
||||
static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
|
||||
|
||||
static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
|
||||
|
||||
__raw_writel(data, shdev->chan_reg +
|
||||
shdev->pdata->channel[sh_dc->id].chclr_offset);
|
||||
}
|
||||
|
||||
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
|
||||
{
|
||||
|
@ -129,6 +137,15 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
|
|||
|
||||
dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
|
||||
|
||||
if (shdev->pdata->chclr_present) {
|
||||
int i;
|
||||
for (i = 0; i < shdev->pdata->channel_num; i++) {
|
||||
struct sh_dmae_chan *sh_chan = shdev->chan[i];
|
||||
if (sh_chan)
|
||||
chclr_write(sh_chan, 0);
|
||||
}
|
||||
}
|
||||
|
||||
dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
|
||||
|
||||
dmaor = dmaor_read(shdev);
|
||||
|
@ -139,6 +156,10 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
|
|||
dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
|
||||
return -EIO;
|
||||
}
|
||||
if (shdev->pdata->dmaor_init & ~dmaor)
|
||||
dev_warn(shdev->common.dev,
|
||||
"DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
|
||||
dmaor, shdev->pdata->dmaor_init);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -259,8 +280,6 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
|
||||
|
||||
static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
|
||||
|
@ -340,6 +359,8 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
|
|||
sh_chan_xfer_ld_queue(sh_chan);
|
||||
sh_chan->pm_state = DMAE_PM_ESTABLISHED;
|
||||
}
|
||||
} else {
|
||||
sh_chan->pm_state = DMAE_PM_PENDING;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&sh_chan->desc_lock);
|
||||
|
@ -479,19 +500,19 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
|
|||
* @sh_chan: DMA channel
|
||||
* @flags: DMA transfer flags
|
||||
* @dest: destination DMA address, incremented when direction equals
|
||||
* DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
|
||||
* DMA_DEV_TO_MEM
|
||||
* @src: source DMA address, incremented when direction equals
|
||||
* DMA_TO_DEVICE or DMA_BIDIRECTIONAL
|
||||
* DMA_MEM_TO_DEV
|
||||
* @len: DMA transfer length
|
||||
* @first: if NULL, set to the current descriptor and cookie set to -EBUSY
|
||||
* @direction: needed for slave DMA to decide which address to keep constant,
|
||||
* equals DMA_BIDIRECTIONAL for MEMCPY
|
||||
* equals DMA_MEM_TO_MEM for MEMCPY
|
||||
* Returns 0 or an error
|
||||
* Locks: called with desc_lock held
|
||||
*/
|
||||
static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
|
||||
unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
|
||||
struct sh_desc **first, enum dma_data_direction direction)
|
||||
struct sh_desc **first, enum dma_transfer_direction direction)
|
||||
{
|
||||
struct sh_desc *new;
|
||||
size_t copy_size;
|
||||
|
@ -531,9 +552,9 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
|
|||
new->direction = direction;
|
||||
|
||||
*len -= copy_size;
|
||||
if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
|
||||
if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
|
||||
*src += copy_size;
|
||||
if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
|
||||
if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
|
||||
*dest += copy_size;
|
||||
|
||||
return new;
|
||||
|
@ -546,12 +567,12 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
|
|||
* converted to scatter-gather to guarantee consistent locking and a correct
|
||||
* list manipulation. For slave DMA direction carries the usual meaning, and,
|
||||
* logically, the SG list is RAM and the addr variable contains slave address,
|
||||
* e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
|
||||
* e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
|
||||
* and the SG list contains only one element and points at the source buffer.
|
||||
*/
|
||||
static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
|
||||
struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
|
||||
enum dma_data_direction direction, unsigned long flags)
|
||||
enum dma_transfer_direction direction, unsigned long flags)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
struct sh_desc *first = NULL, *new = NULL /* compiler... */;
|
||||
|
@ -592,7 +613,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
|
|||
dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
|
||||
i, sg, len, (unsigned long long)sg_addr);
|
||||
|
||||
if (direction == DMA_FROM_DEVICE)
|
||||
if (direction == DMA_DEV_TO_MEM)
|
||||
new = sh_dmae_add_desc(sh_chan, flags,
|
||||
&sg_addr, addr, &len, &first,
|
||||
direction);
|
||||
|
@ -646,13 +667,13 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
|
|||
sg_dma_address(&sg) = dma_src;
|
||||
sg_dma_len(&sg) = len;
|
||||
|
||||
return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
|
||||
return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
|
||||
flags);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
|
||||
enum dma_data_direction direction, unsigned long flags)
|
||||
enum dma_transfer_direction direction, unsigned long flags)
|
||||
{
|
||||
struct sh_dmae_slave *param;
|
||||
struct sh_dmae_chan *sh_chan;
|
||||
|
@ -996,7 +1017,7 @@ static void dmae_do_tasklet(unsigned long data)
|
|||
spin_lock_irq(&sh_chan->desc_lock);
|
||||
list_for_each_entry(desc, &sh_chan->ld_queue, node) {
|
||||
if (desc->mark == DESC_SUBMITTED &&
|
||||
((desc->direction == DMA_FROM_DEVICE &&
|
||||
((desc->direction == DMA_DEV_TO_MEM &&
|
||||
(desc->hw.dar + desc->hw.tcr) == dar_buf) ||
|
||||
(desc->hw.sar + desc->hw.tcr) == sar_buf)) {
|
||||
dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
|
||||
|
@ -1225,6 +1246,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
|
|||
|
||||
platform_set_drvdata(pdev, shdev);
|
||||
|
||||
shdev->common.dev = &pdev->dev;
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
|
||||
|
@ -1254,7 +1277,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
|
|||
shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
|
||||
shdev->common.device_control = sh_dmae_control;
|
||||
|
||||
shdev->common.dev = &pdev->dev;
|
||||
/* Default transfer size of 32 bytes requires 32-byte alignment */
|
||||
shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
|
||||
|
||||
|
@ -1435,22 +1457,17 @@ static int sh_dmae_runtime_resume(struct device *dev)
|
|||
#ifdef CONFIG_PM
|
||||
static int sh_dmae_suspend(struct device *dev)
|
||||
{
|
||||
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < shdev->pdata->channel_num; i++) {
|
||||
struct sh_dmae_chan *sh_chan = shdev->chan[i];
|
||||
if (sh_chan->descs_allocated)
|
||||
sh_chan->pm_error = pm_runtime_put_sync(dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sh_dmae_resume(struct device *dev)
|
||||
{
|
||||
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
|
||||
int i;
|
||||
int i, ret;
|
||||
|
||||
ret = sh_dmae_rst(shdev);
|
||||
if (ret < 0)
|
||||
dev_err(dev, "Failed to reset!\n");
|
||||
|
||||
for (i = 0; i < shdev->pdata->channel_num; i++) {
|
||||
struct sh_dmae_chan *sh_chan = shdev->chan[i];
|
||||
|
@ -1459,9 +1476,6 @@ static int sh_dmae_resume(struct device *dev)
|
|||
if (!sh_chan->descs_allocated)
|
||||
continue;
|
||||
|
||||
if (!sh_chan->pm_error)
|
||||
pm_runtime_get_sync(dev);
|
||||
|
||||
if (param) {
|
||||
const struct sh_dmae_slave_config *cfg = param->config;
|
||||
dmae_set_dmars(sh_chan, cfg->mid_rid);
|
||||
|
|
|
@ -0,0 +1,707 @@
|
|||
/*
|
||||
* DMA controller driver for CSR SiRFprimaII
|
||||
*
|
||||
* Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
|
||||
*
|
||||
* Licensed under GPLv2 or later.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/sirfsoc_dma.h>
|
||||
|
||||
#define SIRFSOC_DMA_DESCRIPTORS 16
|
||||
#define SIRFSOC_DMA_CHANNELS 16
|
||||
|
||||
#define SIRFSOC_DMA_CH_ADDR 0x00
|
||||
#define SIRFSOC_DMA_CH_XLEN 0x04
|
||||
#define SIRFSOC_DMA_CH_YLEN 0x08
|
||||
#define SIRFSOC_DMA_CH_CTRL 0x0C
|
||||
|
||||
#define SIRFSOC_DMA_WIDTH_0 0x100
|
||||
#define SIRFSOC_DMA_CH_VALID 0x140
|
||||
#define SIRFSOC_DMA_CH_INT 0x144
|
||||
#define SIRFSOC_DMA_INT_EN 0x148
|
||||
#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
|
||||
|
||||
#define SIRFSOC_DMA_MODE_CTRL_BIT 4
|
||||
#define SIRFSOC_DMA_DIR_CTRL_BIT 5
|
||||
|
||||
/* xlen and dma_width register is in 4 bytes boundary */
|
||||
#define SIRFSOC_DMA_WORD_LEN 4
|
||||
|
||||
struct sirfsoc_dma_desc {
|
||||
struct dma_async_tx_descriptor desc;
|
||||
struct list_head node;
|
||||
|
||||
/* SiRFprimaII 2D-DMA parameters */
|
||||
|
||||
int xlen; /* DMA xlen */
|
||||
int ylen; /* DMA ylen */
|
||||
int width; /* DMA width */
|
||||
int dir;
|
||||
bool cyclic; /* is loop DMA? */
|
||||
u32 addr; /* DMA buffer address */
|
||||
};
|
||||
|
||||
struct sirfsoc_dma_chan {
|
||||
struct dma_chan chan;
|
||||
struct list_head free;
|
||||
struct list_head prepared;
|
||||
struct list_head queued;
|
||||
struct list_head active;
|
||||
struct list_head completed;
|
||||
dma_cookie_t completed_cookie;
|
||||
unsigned long happened_cyclic;
|
||||
unsigned long completed_cyclic;
|
||||
|
||||
/* Lock for this structure */
|
||||
spinlock_t lock;
|
||||
|
||||
int mode;
|
||||
};
|
||||
|
||||
struct sirfsoc_dma {
|
||||
struct dma_device dma;
|
||||
struct tasklet_struct tasklet;
|
||||
struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
|
||||
void __iomem *base;
|
||||
int irq;
|
||||
};
|
||||
|
||||
#define DRV_NAME "sirfsoc_dma"
|
||||
|
||||
/* Convert struct dma_chan to struct sirfsoc_dma_chan */
|
||||
static inline
|
||||
struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
|
||||
{
|
||||
return container_of(c, struct sirfsoc_dma_chan, chan);
|
||||
}
|
||||
|
||||
/* Convert struct dma_chan to struct sirfsoc_dma */
|
||||
static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
|
||||
{
|
||||
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
|
||||
return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
|
||||
}
|
||||
|
||||
/* Execute all queued DMA descriptors */
|
||||
static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
|
||||
{
|
||||
struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
|
||||
int cid = schan->chan.chan_id;
|
||||
struct sirfsoc_dma_desc *sdesc = NULL;
|
||||
|
||||
/*
|
||||
* lock has been held by functions calling this, so we don't hold
|
||||
* lock again
|
||||
*/
|
||||
|
||||
sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
|
||||
node);
|
||||
/* Move the first queued descriptor to active list */
|
||||
list_move_tail(&schan->queued, &schan->active);
|
||||
|
||||
/* Start the DMA transfer */
|
||||
writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
|
||||
cid * 4);
|
||||
writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
|
||||
(sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
|
||||
sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
|
||||
writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
|
||||
SIRFSOC_DMA_CH_XLEN);
|
||||
writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
|
||||
SIRFSOC_DMA_CH_YLEN);
|
||||
writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
|
||||
(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
|
||||
|
||||
/*
|
||||
* writel has an implict memory write barrier to make sure data is
|
||||
* flushed into memory before starting DMA
|
||||
*/
|
||||
writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
|
||||
|
||||
if (sdesc->cyclic) {
|
||||
writel((1 << cid) | 1 << (cid + 16) |
|
||||
readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
|
||||
sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
|
||||
schan->happened_cyclic = schan->completed_cyclic = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Interrupt handler */
|
||||
static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
|
||||
{
|
||||
struct sirfsoc_dma *sdma = data;
|
||||
struct sirfsoc_dma_chan *schan;
|
||||
struct sirfsoc_dma_desc *sdesc = NULL;
|
||||
u32 is;
|
||||
int ch;
|
||||
|
||||
is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
|
||||
while ((ch = fls(is) - 1) >= 0) {
|
||||
is &= ~(1 << ch);
|
||||
writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
|
||||
schan = &sdma->channels[ch];
|
||||
|
||||
spin_lock(&schan->lock);
|
||||
|
||||
sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
|
||||
node);
|
||||
if (!sdesc->cyclic) {
|
||||
/* Execute queued descriptors */
|
||||
list_splice_tail_init(&schan->active, &schan->completed);
|
||||
if (!list_empty(&schan->queued))
|
||||
sirfsoc_dma_execute(schan);
|
||||
} else
|
||||
schan->happened_cyclic++;
|
||||
|
||||
spin_unlock(&schan->lock);
|
||||
}
|
||||
|
||||
/* Schedule tasklet */
|
||||
tasklet_schedule(&sdma->tasklet);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* process completed descriptors */
|
||||
static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
|
||||
{
|
||||
dma_cookie_t last_cookie = 0;
|
||||
struct sirfsoc_dma_chan *schan;
|
||||
struct sirfsoc_dma_desc *sdesc;
|
||||
struct dma_async_tx_descriptor *desc;
|
||||
unsigned long flags;
|
||||
unsigned long happened_cyclic;
|
||||
LIST_HEAD(list);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sdma->dma.chancnt; i++) {
|
||||
schan = &sdma->channels[i];
|
||||
|
||||
/* Get all completed descriptors */
|
||||
spin_lock_irqsave(&schan->lock, flags);
|
||||
if (!list_empty(&schan->completed)) {
|
||||
list_splice_tail_init(&schan->completed, &list);
|
||||
spin_unlock_irqrestore(&schan->lock, flags);
|
||||
|
||||
/* Execute callbacks and run dependencies */
|
||||
list_for_each_entry(sdesc, &list, node) {
|
||||
desc = &sdesc->desc;
|
||||
|
||||
if (desc->callback)
|
||||
desc->callback(desc->callback_param);
|
||||
|
||||
last_cookie = desc->cookie;
|
||||
dma_run_dependencies(desc);
|
||||
}
|
||||
|
||||
/* Free descriptors */
|
||||
spin_lock_irqsave(&schan->lock, flags);
|
||||
list_splice_tail_init(&list, &schan->free);
|
||||
schan->completed_cookie = last_cookie;
|
||||
spin_unlock_irqrestore(&schan->lock, flags);
|
||||
} else {
|
||||
/* for cyclic channel, desc is always in active list */
|
||||
sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
|
||||
node);
|
||||
|
||||
if (!sdesc || (sdesc && !sdesc->cyclic)) {
|
||||
/* without active cyclic DMA */
|
||||
spin_unlock_irqrestore(&schan->lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* cyclic DMA */
|
||||
happened_cyclic = schan->happened_cyclic;
|
||||
spin_unlock_irqrestore(&schan->lock, flags);
|
||||
|
||||
desc = &sdesc->desc;
|
||||
while (happened_cyclic != schan->completed_cyclic) {
|
||||
if (desc->callback)
|
||||
desc->callback(desc->callback_param);
|
||||
schan->completed_cyclic++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* DMA Tasklet */
|
||||
static void sirfsoc_dma_tasklet(unsigned long data)
|
||||
{
|
||||
struct sirfsoc_dma *sdma = (void *)data;
|
||||
|
||||
sirfsoc_dma_process_completed(sdma);
|
||||
}
|
||||
|
||||
/* Submit descriptor to hardware */
|
||||
static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
|
||||
{
|
||||
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
|
||||
struct sirfsoc_dma_desc *sdesc;
|
||||
unsigned long flags;
|
||||
dma_cookie_t cookie;
|
||||
|
||||
sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
|
||||
|
||||
spin_lock_irqsave(&schan->lock, flags);
|
||||
|
||||
/* Move descriptor to queue */
|
||||
list_move_tail(&sdesc->node, &schan->queued);
|
||||
|
||||
/* Update cookie */
|
||||
cookie = schan->chan.cookie + 1;
|
||||
if (cookie <= 0)
|
||||
cookie = 1;
|
||||
|
||||
schan->chan.cookie = cookie;
|
||||
sdesc->desc.cookie = cookie;
|
||||
|
||||
spin_unlock_irqrestore(&schan->lock, flags);
|
||||
|
||||
return cookie;
|
||||
}
|
||||
|
||||
static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
|
||||
struct dma_slave_config *config)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
|
||||
(config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&schan->lock, flags);
|
||||
schan->mode = (config->src_maxburst == 4 ? 1 : 0);
|
||||
spin_unlock_irqrestore(&schan->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
|
||||
{
|
||||
struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
|
||||
int cid = schan->chan.chan_id;
|
||||
unsigned long flags;
|
||||
|
||||
writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
|
||||
~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
|
||||
writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
|
||||
|
||||
writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
|
||||
& ~((1 << cid) | 1 << (cid + 16)),
|
||||
sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
|
||||
|
||||
spin_lock_irqsave(&schan->lock, flags);
|
||||
list_splice_tail_init(&schan->active, &schan->free);
|
||||
list_splice_tail_init(&schan->queued, &schan->free);
|
||||
spin_unlock_irqrestore(&schan->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct dma_slave_config *config;
|
||||
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
|
||||
|
||||
switch (cmd) {
|
||||
case DMA_TERMINATE_ALL:
|
||||
return sirfsoc_dma_terminate_all(schan);
|
||||
case DMA_SLAVE_CONFIG:
|
||||
config = (struct dma_slave_config *)arg;
|
||||
return sirfsoc_dma_slave_config(schan, config);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
/* Alloc channel resources */
|
||||
static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
|
||||
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
|
||||
struct sirfsoc_dma_desc *sdesc;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(descs);
|
||||
int i;
|
||||
|
||||
/* Alloc descriptors for this channel */
|
||||
for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
|
||||
sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
|
||||
if (!sdesc) {
|
||||
dev_notice(sdma->dma.dev, "Memory allocation error. "
|
||||
"Allocated only %u descriptors\n", i);
|
||||
break;
|
||||
}
|
||||
|
||||
dma_async_tx_descriptor_init(&sdesc->desc, chan);
|
||||
sdesc->desc.flags = DMA_CTRL_ACK;
|
||||
sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
|
||||
|
||||
list_add_tail(&sdesc->node, &descs);
|
||||
}
|
||||
|
||||
/* Return error only if no descriptors were allocated */
|
||||
if (i == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_irqsave(&schan->lock, flags);
|
||||
|
||||
list_splice_tail_init(&descs, &schan->free);
|
||||
spin_unlock_irqrestore(&schan->lock, flags);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
/* Free channel resources */
|
||||
static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
|
||||
struct sirfsoc_dma_desc *sdesc, *tmp;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(descs);
|
||||
|
||||
spin_lock_irqsave(&schan->lock, flags);
|
||||
|
||||
/* Channel must be idle */
|
||||
BUG_ON(!list_empty(&schan->prepared));
|
||||
BUG_ON(!list_empty(&schan->queued));
|
||||
BUG_ON(!list_empty(&schan->active));
|
||||
BUG_ON(!list_empty(&schan->completed));
|
||||
|
||||
/* Move data */
|
||||
list_splice_tail_init(&schan->free, &descs);
|
||||
|
||||
spin_unlock_irqrestore(&schan->lock, flags);
|
||||
|
||||
/* Free descriptors */
|
||||
list_for_each_entry_safe(sdesc, tmp, &descs, node)
|
||||
kfree(sdesc);
|
||||
}
|
||||
|
||||
/* Send pending descriptor to hardware */
|
||||
static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&schan->lock, flags);
|
||||
|
||||
if (list_empty(&schan->active) && !list_empty(&schan->queued))
|
||||
sirfsoc_dma_execute(schan);
|
||||
|
||||
spin_unlock_irqrestore(&schan->lock, flags);
|
||||
}
|
||||
|
||||
/* Check request completion status */
|
||||
static enum dma_status
|
||||
sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
||||
struct dma_tx_state *txstate)
|
||||
{
|
||||
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
|
||||
unsigned long flags;
|
||||
dma_cookie_t last_used;
|
||||
dma_cookie_t last_complete;
|
||||
|
||||
spin_lock_irqsave(&schan->lock, flags);
|
||||
last_used = schan->chan.cookie;
|
||||
last_complete = schan->completed_cookie;
|
||||
spin_unlock_irqrestore(&schan->lock, flags);
|
||||
|
||||
dma_set_tx_state(txstate, last_complete, last_used, 0);
|
||||
return dma_async_is_complete(cookie, last_complete, last_used);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
|
||||
struct dma_chan *chan, struct dma_interleaved_template *xt,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
|
||||
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
|
||||
struct sirfsoc_dma_desc *sdesc = NULL;
|
||||
unsigned long iflags;
|
||||
int ret;
|
||||
|
||||
if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) {
|
||||
ret = -EINVAL;
|
||||
goto err_dir;
|
||||
}
|
||||
|
||||
/* Get free descriptor */
|
||||
spin_lock_irqsave(&schan->lock, iflags);
|
||||
if (!list_empty(&schan->free)) {
|
||||
sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
|
||||
node);
|
||||
list_del(&sdesc->node);
|
||||
}
|
||||
spin_unlock_irqrestore(&schan->lock, iflags);
|
||||
|
||||
if (!sdesc) {
|
||||
/* try to free completed descriptors */
|
||||
sirfsoc_dma_process_completed(sdma);
|
||||
ret = 0;
|
||||
goto no_desc;
|
||||
}
|
||||
|
||||
/* Place descriptor in prepared list */
|
||||
spin_lock_irqsave(&schan->lock, iflags);
|
||||
|
||||
/*
|
||||
* Number of chunks in a frame can only be 1 for prima2
|
||||
* and ylen (number of frame - 1) must be at least 0
|
||||
*/
|
||||
if ((xt->frame_size == 1) && (xt->numf > 0)) {
|
||||
sdesc->cyclic = 0;
|
||||
sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
|
||||
sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
|
||||
SIRFSOC_DMA_WORD_LEN;
|
||||
sdesc->ylen = xt->numf - 1;
|
||||
if (xt->dir == DMA_MEM_TO_DEV) {
|
||||
sdesc->addr = xt->src_start;
|
||||
sdesc->dir = 1;
|
||||
} else {
|
||||
sdesc->addr = xt->dst_start;
|
||||
sdesc->dir = 0;
|
||||
}
|
||||
|
||||
list_add_tail(&sdesc->node, &schan->prepared);
|
||||
} else {
|
||||
pr_err("sirfsoc DMA Invalid xfer\n");
|
||||
ret = -EINVAL;
|
||||
goto err_xfer;
|
||||
}
|
||||
spin_unlock_irqrestore(&schan->lock, iflags);
|
||||
|
||||
return &sdesc->desc;
|
||||
err_xfer:
|
||||
spin_unlock_irqrestore(&schan->lock, iflags);
|
||||
no_desc:
|
||||
err_dir:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
|
||||
size_t buf_len, size_t period_len,
|
||||
enum dma_transfer_direction direction)
|
||||
{
|
||||
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
|
||||
struct sirfsoc_dma_desc *sdesc = NULL;
|
||||
unsigned long iflags;
|
||||
|
||||
/*
|
||||
* we only support cycle transfer with 2 period
|
||||
* If the X-length is set to 0, it would be the loop mode.
|
||||
* The DMA address keeps increasing until reaching the end of a loop
|
||||
* area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
|
||||
* the DMA address goes back to the beginning of this area.
|
||||
* In loop mode, the DMA data region is divided into two parts, BUFA
|
||||
* and BUFB. DMA controller generates interrupts twice in each loop:
|
||||
* when the DMA address reaches the end of BUFA or the end of the
|
||||
* BUFB
|
||||
*/
|
||||
if (buf_len != 2 * period_len)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* Get free descriptor */
|
||||
spin_lock_irqsave(&schan->lock, iflags);
|
||||
if (!list_empty(&schan->free)) {
|
||||
sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
|
||||
node);
|
||||
list_del(&sdesc->node);
|
||||
}
|
||||
spin_unlock_irqrestore(&schan->lock, iflags);
|
||||
|
||||
if (!sdesc)
|
||||
return 0;
|
||||
|
||||
/* Place descriptor in prepared list */
|
||||
spin_lock_irqsave(&schan->lock, iflags);
|
||||
sdesc->addr = addr;
|
||||
sdesc->cyclic = 1;
|
||||
sdesc->xlen = 0;
|
||||
sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
|
||||
sdesc->width = 1;
|
||||
list_add_tail(&sdesc->node, &schan->prepared);
|
||||
spin_unlock_irqrestore(&schan->lock, iflags);
|
||||
|
||||
return &sdesc->desc;
|
||||
}
|
||||
|
||||
/*
|
||||
* The DMA controller consists of 16 independent DMA channels.
|
||||
* Each channel is allocated to a different function
|
||||
*/
|
||||
bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
|
||||
{
|
||||
unsigned int ch_nr = (unsigned int) chan_id;
|
||||
|
||||
if (ch_nr == chan->chan_id +
|
||||
chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(sirfsoc_dma_filter_id);
|
||||
|
||||
static int __devinit sirfsoc_dma_probe(struct platform_device *op)
|
||||
{
|
||||
struct device_node *dn = op->dev.of_node;
|
||||
struct device *dev = &op->dev;
|
||||
struct dma_device *dma;
|
||||
struct sirfsoc_dma *sdma;
|
||||
struct sirfsoc_dma_chan *schan;
|
||||
struct resource res;
|
||||
ulong regs_start, regs_size;
|
||||
u32 id;
|
||||
int ret, i;
|
||||
|
||||
sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
|
||||
if (!sdma) {
|
||||
dev_err(dev, "Memory exhausted!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(dn, "cell-index", &id)) {
|
||||
dev_err(dev, "Fail to get DMAC index\n");
|
||||
ret = -ENODEV;
|
||||
goto free_mem;
|
||||
}
|
||||
|
||||
sdma->irq = irq_of_parse_and_map(dn, 0);
|
||||
if (sdma->irq == NO_IRQ) {
|
||||
dev_err(dev, "Error mapping IRQ!\n");
|
||||
ret = -EINVAL;
|
||||
goto free_mem;
|
||||
}
|
||||
|
||||
ret = of_address_to_resource(dn, 0, &res);
|
||||
if (ret) {
|
||||
dev_err(dev, "Error parsing memory region!\n");
|
||||
goto free_mem;
|
||||
}
|
||||
|
||||
regs_start = res.start;
|
||||
regs_size = resource_size(&res);
|
||||
|
||||
sdma->base = devm_ioremap(dev, regs_start, regs_size);
|
||||
if (!sdma->base) {
|
||||
dev_err(dev, "Error mapping memory region!\n");
|
||||
ret = -ENOMEM;
|
||||
goto irq_dispose;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
|
||||
sdma);
|
||||
if (ret) {
|
||||
dev_err(dev, "Error requesting IRQ!\n");
|
||||
ret = -EINVAL;
|
||||
goto unmap_mem;
|
||||
}
|
||||
|
||||
dma = &sdma->dma;
|
||||
dma->dev = dev;
|
||||
dma->chancnt = SIRFSOC_DMA_CHANNELS;
|
||||
|
||||
dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
|
||||
dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
|
||||
dma->device_issue_pending = sirfsoc_dma_issue_pending;
|
||||
dma->device_control = sirfsoc_dma_control;
|
||||
dma->device_tx_status = sirfsoc_dma_tx_status;
|
||||
dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
|
||||
dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
|
||||
|
||||
INIT_LIST_HEAD(&dma->channels);
|
||||
dma_cap_set(DMA_SLAVE, dma->cap_mask);
|
||||
dma_cap_set(DMA_CYCLIC, dma->cap_mask);
|
||||
dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
|
||||
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
|
||||
|
||||
for (i = 0; i < dma->chancnt; i++) {
|
||||
schan = &sdma->channels[i];
|
||||
|
||||
schan->chan.device = dma;
|
||||
schan->chan.cookie = 1;
|
||||
schan->completed_cookie = schan->chan.cookie;
|
||||
|
||||
INIT_LIST_HEAD(&schan->free);
|
||||
INIT_LIST_HEAD(&schan->prepared);
|
||||
INIT_LIST_HEAD(&schan->queued);
|
||||
INIT_LIST_HEAD(&schan->active);
|
||||
INIT_LIST_HEAD(&schan->completed);
|
||||
|
||||
spin_lock_init(&schan->lock);
|
||||
list_add_tail(&schan->chan.device_node, &dma->channels);
|
||||
}
|
||||
|
||||
tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
|
||||
|
||||
/* Register DMA engine */
|
||||
dev_set_drvdata(dev, sdma);
|
||||
ret = dma_async_device_register(dma);
|
||||
if (ret)
|
||||
goto free_irq;
|
||||
|
||||
dev_info(dev, "initialized SIRFSOC DMAC driver\n");
|
||||
|
||||
return 0;
|
||||
|
||||
free_irq:
|
||||
devm_free_irq(dev, sdma->irq, sdma);
|
||||
irq_dispose:
|
||||
irq_dispose_mapping(sdma->irq);
|
||||
unmap_mem:
|
||||
iounmap(sdma->base);
|
||||
free_mem:
|
||||
devm_kfree(dev, sdma);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __devexit sirfsoc_dma_remove(struct platform_device *op)
|
||||
{
|
||||
struct device *dev = &op->dev;
|
||||
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
|
||||
|
||||
dma_async_device_unregister(&sdma->dma);
|
||||
devm_free_irq(dev, sdma->irq, sdma);
|
||||
irq_dispose_mapping(sdma->irq);
|
||||
iounmap(sdma->base);
|
||||
devm_kfree(dev, sdma);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct of_device_id sirfsoc_dma_match[] = {
|
||||
{ .compatible = "sirf,prima2-dmac", },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct platform_driver sirfsoc_dma_driver = {
|
||||
.probe = sirfsoc_dma_probe,
|
||||
.remove = __devexit_p(sirfsoc_dma_remove),
|
||||
.driver = {
|
||||
.name = DRV_NAME,
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = sirfsoc_dma_match,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(sirfsoc_dma_driver);
|
||||
|
||||
MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
|
||||
"Barry Song <baohua.song@csr.com>");
|
||||
MODULE_DESCRIPTION("SIRFSOC DMA control driver");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -14,6 +14,8 @@
|
|||
#include <linux/platform_device.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/amba/bus.h>
|
||||
|
||||
|
@ -32,6 +34,9 @@
|
|||
/* Maximum iterations taken before giving up suspending a channel */
|
||||
#define D40_SUSPEND_MAX_IT 500
|
||||
|
||||
/* Milliseconds */
|
||||
#define DMA40_AUTOSUSPEND_DELAY 100
|
||||
|
||||
/* Hardware requirement on LCLA alignment */
|
||||
#define LCLA_ALIGNMENT 0x40000
|
||||
|
||||
|
@ -62,6 +67,55 @@ enum d40_command {
|
|||
D40_DMA_SUSPENDED = 3
|
||||
};
|
||||
|
||||
/*
|
||||
* These are the registers that has to be saved and later restored
|
||||
* when the DMA hw is powered off.
|
||||
* TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
|
||||
*/
|
||||
static u32 d40_backup_regs[] = {
|
||||
D40_DREG_LCPA,
|
||||
D40_DREG_LCLA,
|
||||
D40_DREG_PRMSE,
|
||||
D40_DREG_PRMSO,
|
||||
D40_DREG_PRMOE,
|
||||
D40_DREG_PRMOO,
|
||||
};
|
||||
|
||||
#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
|
||||
|
||||
/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
|
||||
static u32 d40_backup_regs_v3[] = {
|
||||
D40_DREG_PSEG1,
|
||||
D40_DREG_PSEG2,
|
||||
D40_DREG_PSEG3,
|
||||
D40_DREG_PSEG4,
|
||||
D40_DREG_PCEG1,
|
||||
D40_DREG_PCEG2,
|
||||
D40_DREG_PCEG3,
|
||||
D40_DREG_PCEG4,
|
||||
D40_DREG_RSEG1,
|
||||
D40_DREG_RSEG2,
|
||||
D40_DREG_RSEG3,
|
||||
D40_DREG_RSEG4,
|
||||
D40_DREG_RCEG1,
|
||||
D40_DREG_RCEG2,
|
||||
D40_DREG_RCEG3,
|
||||
D40_DREG_RCEG4,
|
||||
};
|
||||
|
||||
#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
|
||||
|
||||
static u32 d40_backup_regs_chan[] = {
|
||||
D40_CHAN_REG_SSCFG,
|
||||
D40_CHAN_REG_SSELT,
|
||||
D40_CHAN_REG_SSPTR,
|
||||
D40_CHAN_REG_SSLNK,
|
||||
D40_CHAN_REG_SDCFG,
|
||||
D40_CHAN_REG_SDELT,
|
||||
D40_CHAN_REG_SDPTR,
|
||||
D40_CHAN_REG_SDLNK,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct d40_lli_pool - Structure for keeping LLIs in memory
|
||||
*
|
||||
|
@ -96,7 +150,7 @@ struct d40_lli_pool {
|
|||
* during a transfer.
|
||||
* @node: List entry.
|
||||
* @is_in_client_list: true if the client owns this descriptor.
|
||||
* the previous one.
|
||||
* @cyclic: true if this is a cyclic job
|
||||
*
|
||||
* This descriptor is used for both logical and physical transfers.
|
||||
*/
|
||||
|
@ -143,6 +197,7 @@ struct d40_lcla_pool {
|
|||
* channels.
|
||||
*
|
||||
* @lock: A lock protection this entity.
|
||||
* @reserved: True if used by secure world or otherwise.
|
||||
* @num: The physical channel number of this entity.
|
||||
* @allocated_src: Bit mapped to show which src event line's are mapped to
|
||||
* this physical channel. Can also be free or physically allocated.
|
||||
|
@ -152,6 +207,7 @@ struct d40_lcla_pool {
|
|||
*/
|
||||
struct d40_phy_res {
|
||||
spinlock_t lock;
|
||||
bool reserved;
|
||||
int num;
|
||||
u32 allocated_src;
|
||||
u32 allocated_dst;
|
||||
|
@ -185,7 +241,6 @@ struct d40_base;
|
|||
* @src_def_cfg: Default cfg register setting for src.
|
||||
* @dst_def_cfg: Default cfg register setting for dst.
|
||||
* @log_def: Default logical channel settings.
|
||||
* @lcla: Space for one dst src pair for logical channel transfers.
|
||||
* @lcpa: Pointer to dst and src lcpa settings.
|
||||
* @runtime_addr: runtime configured address.
|
||||
* @runtime_direction: runtime configured direction.
|
||||
|
@ -217,7 +272,7 @@ struct d40_chan {
|
|||
struct d40_log_lli_full *lcpa;
|
||||
/* Runtime reconfiguration */
|
||||
dma_addr_t runtime_addr;
|
||||
enum dma_data_direction runtime_direction;
|
||||
enum dma_transfer_direction runtime_direction;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -241,6 +296,7 @@ struct d40_chan {
|
|||
* @dma_both: dma_device channels that can do both memcpy and slave transfers.
|
||||
* @dma_slave: dma_device channels that can do only do slave transfers.
|
||||
* @dma_memcpy: dma_device channels that can do only do memcpy transfers.
|
||||
* @phy_chans: Room for all possible physical channels in system.
|
||||
* @log_chans: Room for all possible logical channels in system.
|
||||
* @lookup_log_chans: Used to map interrupt number to logical channel. Points
|
||||
* to log_chans entries.
|
||||
|
@ -248,12 +304,20 @@ struct d40_chan {
|
|||
* to phy_chans entries.
|
||||
* @plat_data: Pointer to provided platform_data which is the driver
|
||||
* configuration.
|
||||
* @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
|
||||
* @phy_res: Vector containing all physical channels.
|
||||
* @lcla_pool: lcla pool settings and data.
|
||||
* @lcpa_base: The virtual mapped address of LCPA.
|
||||
* @phy_lcpa: The physical address of the LCPA.
|
||||
* @lcpa_size: The size of the LCPA area.
|
||||
* @desc_slab: cache for descriptors.
|
||||
* @reg_val_backup: Here the values of some hardware registers are stored
|
||||
* before the DMA is powered off. They are restored when the power is back on.
|
||||
* @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
|
||||
* later.
|
||||
* @reg_val_backup_chan: Backup data for standard channel parameter registers.
|
||||
* @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
|
||||
* @initialized: true if the dma has been initialized
|
||||
*/
|
||||
struct d40_base {
|
||||
spinlock_t interrupt_lock;
|
||||
|
@ -275,6 +339,7 @@ struct d40_base {
|
|||
struct d40_chan **lookup_log_chans;
|
||||
struct d40_chan **lookup_phy_chans;
|
||||
struct stedma40_platform_data *plat_data;
|
||||
struct regulator *lcpa_regulator;
|
||||
/* Physical half channels */
|
||||
struct d40_phy_res *phy_res;
|
||||
struct d40_lcla_pool lcla_pool;
|
||||
|
@ -282,6 +347,11 @@ struct d40_base {
|
|||
dma_addr_t phy_lcpa;
|
||||
resource_size_t lcpa_size;
|
||||
struct kmem_cache *desc_slab;
|
||||
u32 reg_val_backup[BACKUP_REGS_SZ];
|
||||
u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3];
|
||||
u32 *reg_val_backup_chan;
|
||||
u16 gcc_pwr_off_mask;
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -479,13 +549,14 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
|
|||
struct d40_desc *d;
|
||||
struct d40_desc *_d;
|
||||
|
||||
list_for_each_entry_safe(d, _d, &d40c->client, node)
|
||||
list_for_each_entry_safe(d, _d, &d40c->client, node) {
|
||||
if (async_tx_test_ack(&d->txd)) {
|
||||
d40_desc_remove(d);
|
||||
desc = d;
|
||||
memset(desc, 0, sizeof(*desc));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!desc)
|
||||
|
@ -536,6 +607,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
|
|||
bool cyclic = desc->cyclic;
|
||||
int curr_lcla = -EINVAL;
|
||||
int first_lcla = 0;
|
||||
bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
|
||||
bool linkback;
|
||||
|
||||
/*
|
||||
|
@ -608,11 +680,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
|
|||
&lli->src[lli_current],
|
||||
next_lcla, flags);
|
||||
|
||||
dma_sync_single_range_for_device(chan->base->dev,
|
||||
pool->dma_addr, lcla_offset,
|
||||
2 * sizeof(struct d40_log_lli),
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
/*
|
||||
* Cache maintenance is not needed if lcla is
|
||||
* mapped in esram
|
||||
*/
|
||||
if (!use_esram_lcla) {
|
||||
dma_sync_single_range_for_device(chan->base->dev,
|
||||
pool->dma_addr, lcla_offset,
|
||||
2 * sizeof(struct d40_log_lli),
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
curr_lcla = next_lcla;
|
||||
|
||||
if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
|
||||
|
@ -740,7 +817,61 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
|
|||
return len;
|
||||
}
|
||||
|
||||
/* Support functions for logical channels */
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static void dma40_backup(void __iomem *baseaddr, u32 *backup,
|
||||
u32 *regaddr, int num, bool save)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
void __iomem *addr = baseaddr + regaddr[i];
|
||||
|
||||
if (save)
|
||||
backup[i] = readl_relaxed(addr);
|
||||
else
|
||||
writel_relaxed(backup[i], addr);
|
||||
}
|
||||
}
|
||||
|
||||
static void d40_save_restore_registers(struct d40_base *base, bool save)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Save/Restore channel specific registers */
|
||||
for (i = 0; i < base->num_phy_chans; i++) {
|
||||
void __iomem *addr;
|
||||
int idx;
|
||||
|
||||
if (base->phy_res[i].reserved)
|
||||
continue;
|
||||
|
||||
addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
|
||||
idx = i * ARRAY_SIZE(d40_backup_regs_chan);
|
||||
|
||||
dma40_backup(addr, &base->reg_val_backup_chan[idx],
|
||||
d40_backup_regs_chan,
|
||||
ARRAY_SIZE(d40_backup_regs_chan),
|
||||
save);
|
||||
}
|
||||
|
||||
/* Save/Restore global registers */
|
||||
dma40_backup(base->virtbase, base->reg_val_backup,
|
||||
d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
|
||||
save);
|
||||
|
||||
/* Save/Restore registers only existing on dma40 v3 and later */
|
||||
if (base->rev >= 3)
|
||||
dma40_backup(base->virtbase, base->reg_val_backup_v3,
|
||||
d40_backup_regs_v3,
|
||||
ARRAY_SIZE(d40_backup_regs_v3),
|
||||
save);
|
||||
}
|
||||
#else
|
||||
static void d40_save_restore_registers(struct d40_base *base, bool save)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static int d40_channel_execute_command(struct d40_chan *d40c,
|
||||
enum d40_command command)
|
||||
|
@ -973,6 +1104,10 @@ static void d40_config_write(struct d40_chan *d40c)
|
|||
/* Set LIDX for lcla */
|
||||
writel(lidx, chanbase + D40_CHAN_REG_SSELT);
|
||||
writel(lidx, chanbase + D40_CHAN_REG_SDELT);
|
||||
|
||||
/* Clear LNK which will be used by d40_chan_has_events() */
|
||||
writel(0, chanbase + D40_CHAN_REG_SSLNK);
|
||||
writel(0, chanbase + D40_CHAN_REG_SDLNK);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1013,6 +1148,7 @@ static int d40_pause(struct d40_chan *d40c)
|
|||
if (!d40c->busy)
|
||||
return 0;
|
||||
|
||||
pm_runtime_get_sync(d40c->base->dev);
|
||||
spin_lock_irqsave(&d40c->lock, flags);
|
||||
|
||||
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
|
||||
|
@ -1025,7 +1161,8 @@ static int d40_pause(struct d40_chan *d40c)
|
|||
D40_DMA_RUN);
|
||||
}
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(d40c->base->dev);
|
||||
pm_runtime_put_autosuspend(d40c->base->dev);
|
||||
spin_unlock_irqrestore(&d40c->lock, flags);
|
||||
return res;
|
||||
}
|
||||
|
@ -1039,7 +1176,7 @@ static int d40_resume(struct d40_chan *d40c)
|
|||
return 0;
|
||||
|
||||
spin_lock_irqsave(&d40c->lock, flags);
|
||||
|
||||
pm_runtime_get_sync(d40c->base->dev);
|
||||
if (d40c->base->rev == 0)
|
||||
if (chan_is_logical(d40c)) {
|
||||
res = d40_channel_execute_command(d40c,
|
||||
|
@ -1057,6 +1194,8 @@ static int d40_resume(struct d40_chan *d40c)
|
|||
}
|
||||
|
||||
no_suspend:
|
||||
pm_runtime_mark_last_busy(d40c->base->dev);
|
||||
pm_runtime_put_autosuspend(d40c->base->dev);
|
||||
spin_unlock_irqrestore(&d40c->lock, flags);
|
||||
return res;
|
||||
}
|
||||
|
@ -1129,7 +1268,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
|
|||
d40d = d40_first_queued(d40c);
|
||||
|
||||
if (d40d != NULL) {
|
||||
d40c->busy = true;
|
||||
if (!d40c->busy)
|
||||
d40c->busy = true;
|
||||
|
||||
pm_runtime_get_sync(d40c->base->dev);
|
||||
|
||||
/* Remove from queue */
|
||||
d40_desc_remove(d40d);
|
||||
|
@ -1190,6 +1332,8 @@ static void dma_tc_handle(struct d40_chan *d40c)
|
|||
|
||||
if (d40_queue_start(d40c) == NULL)
|
||||
d40c->busy = false;
|
||||
pm_runtime_mark_last_busy(d40c->base->dev);
|
||||
pm_runtime_put_autosuspend(d40c->base->dev);
|
||||
}
|
||||
|
||||
d40c->pending_tx++;
|
||||
|
@ -1405,11 +1549,16 @@ static int d40_validate_conf(struct d40_chan *d40c,
|
|||
return res;
|
||||
}
|
||||
|
||||
static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
|
||||
int log_event_line, bool is_log)
|
||||
static bool d40_alloc_mask_set(struct d40_phy_res *phy,
|
||||
bool is_src, int log_event_line, bool is_log,
|
||||
bool *first_user)
|
||||
{
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&phy->lock, flags);
|
||||
|
||||
*first_user = ((phy->allocated_src | phy->allocated_dst)
|
||||
== D40_ALLOC_FREE);
|
||||
|
||||
if (!is_log) {
|
||||
/* Physical interrupts are masked per physical full channel */
|
||||
if (phy->allocated_src == D40_ALLOC_FREE &&
|
||||
|
@ -1490,7 +1639,7 @@ out:
|
|||
return is_free;
|
||||
}
|
||||
|
||||
static int d40_allocate_channel(struct d40_chan *d40c)
|
||||
static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
|
||||
{
|
||||
int dev_type;
|
||||
int event_group;
|
||||
|
@ -1526,7 +1675,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
|
|||
for (i = 0; i < d40c->base->num_phy_chans; i++) {
|
||||
|
||||
if (d40_alloc_mask_set(&phys[i], is_src,
|
||||
0, is_log))
|
||||
0, is_log,
|
||||
first_phy_user))
|
||||
goto found_phy;
|
||||
}
|
||||
} else
|
||||
|
@ -1536,7 +1686,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
|
|||
if (d40_alloc_mask_set(&phys[i],
|
||||
is_src,
|
||||
0,
|
||||
is_log))
|
||||
is_log,
|
||||
first_phy_user))
|
||||
goto found_phy;
|
||||
}
|
||||
}
|
||||
|
@ -1552,6 +1703,25 @@ found_phy:
|
|||
/* Find logical channel */
|
||||
for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
|
||||
int phy_num = j + event_group * 2;
|
||||
|
||||
if (d40c->dma_cfg.use_fixed_channel) {
|
||||
i = d40c->dma_cfg.phy_channel;
|
||||
|
||||
if ((i != phy_num) && (i != phy_num + 1)) {
|
||||
dev_err(chan2dev(d40c),
|
||||
"invalid fixed phy channel %d\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (d40_alloc_mask_set(&phys[i], is_src, event_line,
|
||||
is_log, first_phy_user))
|
||||
goto found_log;
|
||||
|
||||
dev_err(chan2dev(d40c),
|
||||
"could not allocate fixed phy channel %d\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Spread logical channels across all available physical rather
|
||||
* than pack every logical channel at the first available phy
|
||||
|
@ -1560,13 +1730,15 @@ found_phy:
|
|||
if (is_src) {
|
||||
for (i = phy_num; i < phy_num + 2; i++) {
|
||||
if (d40_alloc_mask_set(&phys[i], is_src,
|
||||
event_line, is_log))
|
||||
event_line, is_log,
|
||||
first_phy_user))
|
||||
goto found_log;
|
||||
}
|
||||
} else {
|
||||
for (i = phy_num + 1; i >= phy_num; i--) {
|
||||
if (d40_alloc_mask_set(&phys[i], is_src,
|
||||
event_line, is_log))
|
||||
event_line, is_log,
|
||||
first_phy_user))
|
||||
goto found_log;
|
||||
}
|
||||
}
|
||||
|
@ -1643,10 +1815,11 @@ static int d40_free_dma(struct d40_chan *d40c)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
pm_runtime_get_sync(d40c->base->dev);
|
||||
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
|
||||
if (res) {
|
||||
chan_err(d40c, "suspend failed\n");
|
||||
return res;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (chan_is_logical(d40c)) {
|
||||
|
@ -1664,13 +1837,11 @@ static int d40_free_dma(struct d40_chan *d40c)
|
|||
if (d40_chan_has_events(d40c)) {
|
||||
res = d40_channel_execute_command(d40c,
|
||||
D40_DMA_RUN);
|
||||
if (res) {
|
||||
if (res)
|
||||
chan_err(d40c,
|
||||
"Executing RUN command\n");
|
||||
return res;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
(void) d40_alloc_mask_free(phy, is_src, 0);
|
||||
|
@ -1680,13 +1851,23 @@ static int d40_free_dma(struct d40_chan *d40c)
|
|||
res = d40_channel_execute_command(d40c, D40_DMA_STOP);
|
||||
if (res) {
|
||||
chan_err(d40c, "Failed to stop channel\n");
|
||||
return res;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (d40c->busy) {
|
||||
pm_runtime_mark_last_busy(d40c->base->dev);
|
||||
pm_runtime_put_autosuspend(d40c->base->dev);
|
||||
}
|
||||
|
||||
d40c->busy = false;
|
||||
d40c->phy_chan = NULL;
|
||||
d40c->configured = false;
|
||||
d40c->base->lookup_phy_chans[phy->num] = NULL;
|
||||
out:
|
||||
|
||||
return 0;
|
||||
pm_runtime_mark_last_busy(d40c->base->dev);
|
||||
pm_runtime_put_autosuspend(d40c->base->dev);
|
||||
return res;
|
||||
}
|
||||
|
||||
static bool d40_is_paused(struct d40_chan *d40c)
|
||||
|
@ -1855,7 +2036,7 @@ err:
|
|||
}
|
||||
|
||||
static dma_addr_t
|
||||
d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
|
||||
d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
|
||||
{
|
||||
struct stedma40_platform_data *plat = chan->base->plat_data;
|
||||
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
|
||||
|
@ -1864,9 +2045,9 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
|
|||
if (chan->runtime_addr)
|
||||
return chan->runtime_addr;
|
||||
|
||||
if (direction == DMA_FROM_DEVICE)
|
||||
if (direction == DMA_DEV_TO_MEM)
|
||||
addr = plat->dev_rx[cfg->src_dev_type];
|
||||
else if (direction == DMA_TO_DEVICE)
|
||||
else if (direction == DMA_MEM_TO_DEV)
|
||||
addr = plat->dev_tx[cfg->dst_dev_type];
|
||||
|
||||
return addr;
|
||||
|
@ -1875,7 +2056,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
|
|||
static struct dma_async_tx_descriptor *
|
||||
d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
|
||||
struct scatterlist *sg_dst, unsigned int sg_len,
|
||||
enum dma_data_direction direction, unsigned long dma_flags)
|
||||
enum dma_transfer_direction direction, unsigned long dma_flags)
|
||||
{
|
||||
struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
|
||||
dma_addr_t src_dev_addr = 0;
|
||||
|
@ -1902,9 +2083,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
|
|||
if (direction != DMA_NONE) {
|
||||
dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
|
||||
|
||||
if (direction == DMA_FROM_DEVICE)
|
||||
if (direction == DMA_DEV_TO_MEM)
|
||||
src_dev_addr = dev_addr;
|
||||
else if (direction == DMA_TO_DEVICE)
|
||||
else if (direction == DMA_MEM_TO_DEV)
|
||||
dst_dev_addr = dev_addr;
|
||||
}
|
||||
|
||||
|
@ -2011,14 +2192,15 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
|
|||
goto fail;
|
||||
}
|
||||
}
|
||||
is_free_phy = (d40c->phy_chan == NULL);
|
||||
|
||||
err = d40_allocate_channel(d40c);
|
||||
err = d40_allocate_channel(d40c, &is_free_phy);
|
||||
if (err) {
|
||||
chan_err(d40c, "Failed to allocate channel\n");
|
||||
d40c->configured = false;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
pm_runtime_get_sync(d40c->base->dev);
|
||||
/* Fill in basic CFG register values */
|
||||
d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
|
||||
&d40c->dst_def_cfg, chan_is_logical(d40c));
|
||||
|
@ -2038,6 +2220,12 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
|
|||
D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
|
||||
}
|
||||
|
||||
dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
|
||||
chan_is_logical(d40c) ? "logical" : "physical",
|
||||
d40c->phy_chan->num,
|
||||
d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
|
||||
|
||||
|
||||
/*
|
||||
* Only write channel configuration to the DMA if the physical
|
||||
* resource is free. In case of multiple logical channels
|
||||
|
@ -2046,6 +2234,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
|
|||
if (is_free_phy)
|
||||
d40_config_write(d40c);
|
||||
fail:
|
||||
pm_runtime_mark_last_busy(d40c->base->dev);
|
||||
pm_runtime_put_autosuspend(d40c->base->dev);
|
||||
spin_unlock_irqrestore(&d40c->lock, flags);
|
||||
return err;
|
||||
}
|
||||
|
@ -2108,10 +2298,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
|
|||
static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
|
||||
struct scatterlist *sgl,
|
||||
unsigned int sg_len,
|
||||
enum dma_data_direction direction,
|
||||
enum dma_transfer_direction direction,
|
||||
unsigned long dma_flags)
|
||||
{
|
||||
if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
|
||||
if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
|
||||
return NULL;
|
||||
|
||||
return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
|
||||
|
@ -2120,7 +2310,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
|
|||
static struct dma_async_tx_descriptor *
|
||||
dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
|
||||
size_t buf_len, size_t period_len,
|
||||
enum dma_data_direction direction)
|
||||
enum dma_transfer_direction direction)
|
||||
{
|
||||
unsigned int periods = buf_len / period_len;
|
||||
struct dma_async_tx_descriptor *txd;
|
||||
|
@ -2269,7 +2459,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
|
|||
dst_addr_width = config->dst_addr_width;
|
||||
dst_maxburst = config->dst_maxburst;
|
||||
|
||||
if (config->direction == DMA_FROM_DEVICE) {
|
||||
if (config->direction == DMA_DEV_TO_MEM) {
|
||||
dma_addr_t dev_addr_rx =
|
||||
d40c->base->plat_data->dev_rx[cfg->src_dev_type];
|
||||
|
||||
|
@ -2292,7 +2482,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
|
|||
if (dst_maxburst == 0)
|
||||
dst_maxburst = src_maxburst;
|
||||
|
||||
} else if (config->direction == DMA_TO_DEVICE) {
|
||||
} else if (config->direction == DMA_MEM_TO_DEV) {
|
||||
dma_addr_t dev_addr_tx =
|
||||
d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
|
||||
|
||||
|
@ -2357,7 +2547,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
|
|||
"configured channel %s for %s, data width %d/%d, "
|
||||
"maxburst %d/%d elements, LE, no flow control\n",
|
||||
dma_chan_name(chan),
|
||||
(config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
|
||||
(config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
|
||||
src_addr_width, dst_addr_width,
|
||||
src_maxburst, dst_maxburst);
|
||||
|
||||
|
@ -2519,6 +2709,72 @@ failure1:
|
|||
return err;
|
||||
}
|
||||
|
||||
/* Suspend resume functionality */
|
||||
#ifdef CONFIG_PM
|
||||
static int dma40_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct d40_base *base = platform_get_drvdata(pdev);
|
||||
int ret = 0;
|
||||
if (!pm_runtime_suspended(dev))
|
||||
return -EBUSY;
|
||||
|
||||
if (base->lcpa_regulator)
|
||||
ret = regulator_disable(base->lcpa_regulator);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dma40_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct d40_base *base = platform_get_drvdata(pdev);
|
||||
|
||||
d40_save_restore_registers(base, true);
|
||||
|
||||
/* Don't disable/enable clocks for v1 due to HW bugs */
|
||||
if (base->rev != 1)
|
||||
writel_relaxed(base->gcc_pwr_off_mask,
|
||||
base->virtbase + D40_DREG_GCC);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma40_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct d40_base *base = platform_get_drvdata(pdev);
|
||||
|
||||
if (base->initialized)
|
||||
d40_save_restore_registers(base, false);
|
||||
|
||||
writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
|
||||
base->virtbase + D40_DREG_GCC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma40_resume(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct d40_base *base = platform_get_drvdata(pdev);
|
||||
int ret = 0;
|
||||
|
||||
if (base->lcpa_regulator)
|
||||
ret = regulator_enable(base->lcpa_regulator);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops dma40_pm_ops = {
|
||||
.suspend = dma40_pm_suspend,
|
||||
.runtime_suspend = dma40_runtime_suspend,
|
||||
.runtime_resume = dma40_runtime_resume,
|
||||
.resume = dma40_resume,
|
||||
};
|
||||
#define DMA40_PM_OPS (&dma40_pm_ops)
|
||||
#else
|
||||
#define DMA40_PM_OPS NULL
|
||||
#endif
|
||||
|
||||
/* Initialization functions. */
|
||||
|
||||
static int __init d40_phy_res_init(struct d40_base *base)
|
||||
|
@ -2527,6 +2783,7 @@ static int __init d40_phy_res_init(struct d40_base *base)
|
|||
int num_phy_chans_avail = 0;
|
||||
u32 val[2];
|
||||
int odd_even_bit = -2;
|
||||
int gcc = D40_DREG_GCC_ENA;
|
||||
|
||||
val[0] = readl(base->virtbase + D40_DREG_PRSME);
|
||||
val[1] = readl(base->virtbase + D40_DREG_PRSMO);
|
||||
|
@ -2538,9 +2795,17 @@ static int __init d40_phy_res_init(struct d40_base *base)
|
|||
/* Mark security only channels as occupied */
|
||||
base->phy_res[i].allocated_src = D40_ALLOC_PHY;
|
||||
base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
|
||||
base->phy_res[i].reserved = true;
|
||||
gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
|
||||
D40_DREG_GCC_SRC);
|
||||
gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
|
||||
D40_DREG_GCC_DST);
|
||||
|
||||
|
||||
} else {
|
||||
base->phy_res[i].allocated_src = D40_ALLOC_FREE;
|
||||
base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
|
||||
base->phy_res[i].reserved = false;
|
||||
num_phy_chans_avail++;
|
||||
}
|
||||
spin_lock_init(&base->phy_res[i].lock);
|
||||
|
@ -2552,6 +2817,11 @@ static int __init d40_phy_res_init(struct d40_base *base)
|
|||
|
||||
base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
|
||||
base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
|
||||
base->phy_res[chan].reserved = true;
|
||||
gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
|
||||
D40_DREG_GCC_SRC);
|
||||
gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
|
||||
D40_DREG_GCC_DST);
|
||||
num_phy_chans_avail--;
|
||||
}
|
||||
|
||||
|
@ -2572,6 +2842,15 @@ static int __init d40_phy_res_init(struct d40_base *base)
|
|||
val[0] = val[0] >> 2;
|
||||
}
|
||||
|
||||
/*
|
||||
* To keep things simple, Enable all clocks initially.
|
||||
* The clocks will get managed later post channel allocation.
|
||||
* The clocks for the event lines on which reserved channels exists
|
||||
* are not managed here.
|
||||
*/
|
||||
writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
|
||||
base->gcc_pwr_off_mask = gcc;
|
||||
|
||||
return num_phy_chans_avail;
|
||||
}
|
||||
|
||||
|
@ -2699,10 +2978,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
|
|||
goto failure;
|
||||
}
|
||||
|
||||
base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
|
||||
sizeof(struct d40_desc *) *
|
||||
D40_LCLA_LINK_PER_EVENT_GRP,
|
||||
base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
|
||||
sizeof(d40_backup_regs_chan),
|
||||
GFP_KERNEL);
|
||||
if (!base->reg_val_backup_chan)
|
||||
goto failure;
|
||||
|
||||
base->lcla_pool.alloc_map =
|
||||
kzalloc(num_phy_chans * sizeof(struct d40_desc *)
|
||||
* D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
|
||||
if (!base->lcla_pool.alloc_map)
|
||||
goto failure;
|
||||
|
||||
|
@ -2741,9 +3025,9 @@ failure:
|
|||
static void __init d40_hw_init(struct d40_base *base)
|
||||
{
|
||||
|
||||
static const struct d40_reg_val dma_init_reg[] = {
|
||||
static struct d40_reg_val dma_init_reg[] = {
|
||||
/* Clock every part of the DMA block from start */
|
||||
{ .reg = D40_DREG_GCC, .val = 0x0000ff01},
|
||||
{ .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
|
||||
|
||||
/* Interrupts on all logical channels */
|
||||
{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
|
||||
|
@ -2943,11 +3227,31 @@ static int __init d40_probe(struct platform_device *pdev)
|
|||
d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
|
||||
goto failure;
|
||||
}
|
||||
/* If lcla has to be located in ESRAM we don't need to allocate */
|
||||
if (base->plat_data->use_esram_lcla) {
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
|
||||
"lcla_esram");
|
||||
if (!res) {
|
||||
ret = -ENOENT;
|
||||
d40_err(&pdev->dev,
|
||||
"No \"lcla_esram\" memory resource\n");
|
||||
goto failure;
|
||||
}
|
||||
base->lcla_pool.base = ioremap(res->start,
|
||||
resource_size(res));
|
||||
if (!base->lcla_pool.base) {
|
||||
ret = -ENOMEM;
|
||||
d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
|
||||
goto failure;
|
||||
}
|
||||
writel(res->start, base->virtbase + D40_DREG_LCLA);
|
||||
|
||||
ret = d40_lcla_allocate(base);
|
||||
if (ret) {
|
||||
d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
|
||||
goto failure;
|
||||
} else {
|
||||
ret = d40_lcla_allocate(base);
|
||||
if (ret) {
|
||||
d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
|
||||
goto failure;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_init(&base->lcla_pool.lock);
|
||||
|
@ -2960,6 +3264,32 @@ static int __init d40_probe(struct platform_device *pdev)
|
|||
goto failure;
|
||||
}
|
||||
|
||||
pm_runtime_irq_safe(base->dev);
|
||||
pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
|
||||
pm_runtime_use_autosuspend(base->dev);
|
||||
pm_runtime_enable(base->dev);
|
||||
pm_runtime_resume(base->dev);
|
||||
|
||||
if (base->plat_data->use_esram_lcla) {
|
||||
|
||||
base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
|
||||
if (IS_ERR(base->lcpa_regulator)) {
|
||||
d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
|
||||
base->lcpa_regulator = NULL;
|
||||
goto failure;
|
||||
}
|
||||
|
||||
ret = regulator_enable(base->lcpa_regulator);
|
||||
if (ret) {
|
||||
d40_err(&pdev->dev,
|
||||
"Failed to enable lcpa_regulator\n");
|
||||
regulator_put(base->lcpa_regulator);
|
||||
base->lcpa_regulator = NULL;
|
||||
goto failure;
|
||||
}
|
||||
}
|
||||
|
||||
base->initialized = true;
|
||||
err = d40_dmaengine_init(base, num_reserved_chans);
|
||||
if (err)
|
||||
goto failure;
|
||||
|
@ -2976,6 +3306,11 @@ failure:
|
|||
if (base->virtbase)
|
||||
iounmap(base->virtbase);
|
||||
|
||||
if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
|
||||
iounmap(base->lcla_pool.base);
|
||||
base->lcla_pool.base = NULL;
|
||||
}
|
||||
|
||||
if (base->lcla_pool.dma_addr)
|
||||
dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
|
||||
SZ_1K * base->num_phy_chans,
|
||||
|
@ -2998,6 +3333,11 @@ failure:
|
|||
clk_put(base->clk);
|
||||
}
|
||||
|
||||
if (base->lcpa_regulator) {
|
||||
regulator_disable(base->lcpa_regulator);
|
||||
regulator_put(base->lcpa_regulator);
|
||||
}
|
||||
|
||||
kfree(base->lcla_pool.alloc_map);
|
||||
kfree(base->lookup_log_chans);
|
||||
kfree(base->lookup_phy_chans);
|
||||
|
@ -3013,6 +3353,7 @@ static struct platform_driver d40_driver = {
|
|||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = D40_NAME,
|
||||
.pm = DMA40_PM_OPS,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
|
||||
#define D40_TYPE_TO_GROUP(type) (type / 16)
|
||||
#define D40_TYPE_TO_EVENT(type) (type % 16)
|
||||
#define D40_GROUP_SIZE 8
|
||||
#define D40_PHYS_TO_GROUP(phys) ((phys & (D40_GROUP_SIZE - 1)) / 2)
|
||||
|
||||
/* Most bits of the CFG register are the same in log as in phy mode */
|
||||
#define D40_SREG_CFG_MST_POS 15
|
||||
|
@ -123,6 +125,15 @@
|
|||
|
||||
/* DMA Register Offsets */
|
||||
#define D40_DREG_GCC 0x000
|
||||
#define D40_DREG_GCC_ENA 0x1
|
||||
/* This assumes that there are only 4 event groups */
|
||||
#define D40_DREG_GCC_ENABLE_ALL 0xff01
|
||||
#define D40_DREG_GCC_EVTGRP_POS 8
|
||||
#define D40_DREG_GCC_SRC 0
|
||||
#define D40_DREG_GCC_DST 1
|
||||
#define D40_DREG_GCC_EVTGRP_ENA(x, y) \
|
||||
(1 << (D40_DREG_GCC_EVTGRP_POS + 2 * x + y))
|
||||
|
||||
#define D40_DREG_PRTYP 0x004
|
||||
#define D40_DREG_PRSME 0x008
|
||||
#define D40_DREG_PRSMO 0x00C
|
||||
|
|
|
@ -90,7 +90,7 @@ struct timb_dma_chan {
|
|||
struct list_head queue;
|
||||
struct list_head free_list;
|
||||
unsigned int bytes_per_line;
|
||||
enum dma_data_direction direction;
|
||||
enum dma_transfer_direction direction;
|
||||
unsigned int descs; /* Descriptors to allocate */
|
||||
unsigned int desc_elems; /* number of elems per descriptor */
|
||||
};
|
||||
|
@ -166,10 +166,10 @@ static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
|
|||
|
||||
if (single)
|
||||
dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
|
||||
td_chan->direction);
|
||||
DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
|
||||
td_chan->direction);
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
|
||||
|
@ -235,7 +235,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan)
|
|||
"td_chan: %p, chan: %d, membase: %p\n",
|
||||
td_chan, td_chan->chan.chan_id, td_chan->membase);
|
||||
|
||||
if (td_chan->direction == DMA_FROM_DEVICE) {
|
||||
if (td_chan->direction == DMA_DEV_TO_MEM) {
|
||||
|
||||
/* descriptor address */
|
||||
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
|
||||
|
@ -278,7 +278,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
|
|||
txd->cookie);
|
||||
|
||||
/* make sure to stop the transfer */
|
||||
if (td_chan->direction == DMA_FROM_DEVICE)
|
||||
if (td_chan->direction == DMA_DEV_TO_MEM)
|
||||
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
|
||||
/* Currently no support for stopping DMA transfers
|
||||
else
|
||||
|
@ -558,7 +558,7 @@ static void td_issue_pending(struct dma_chan *chan)
|
|||
|
||||
static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
|
||||
struct scatterlist *sgl, unsigned int sg_len,
|
||||
enum dma_data_direction direction, unsigned long flags)
|
||||
enum dma_transfer_direction direction, unsigned long flags)
|
||||
{
|
||||
struct timb_dma_chan *td_chan =
|
||||
container_of(chan, struct timb_dma_chan, chan);
|
||||
|
@ -606,7 +606,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
|
|||
}
|
||||
|
||||
dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
|
||||
td_desc->desc_list_len, DMA_TO_DEVICE);
|
||||
td_desc->desc_list_len, DMA_MEM_TO_DEV);
|
||||
|
||||
return &td_desc->txd;
|
||||
}
|
||||
|
@ -775,8 +775,8 @@ static int __devinit td_probe(struct platform_device *pdev)
|
|||
td_chan->descs = pchan->descriptors;
|
||||
td_chan->desc_elems = pchan->descriptor_elements;
|
||||
td_chan->bytes_per_line = pchan->bytes_per_line;
|
||||
td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
|
||||
DMA_TO_DEVICE;
|
||||
td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
|
||||
DMA_MEM_TO_DEV;
|
||||
|
||||
td_chan->membase = td->membase +
|
||||
(i / 2) * TIMBDMA_INSTANCE_OFFSET +
|
||||
|
@ -841,17 +841,7 @@ static struct platform_driver td_driver = {
|
|||
.remove = __exit_p(td_remove),
|
||||
};
|
||||
|
||||
static int __init td_init(void)
|
||||
{
|
||||
return platform_driver_register(&td_driver);
|
||||
}
|
||||
module_init(td_init);
|
||||
|
||||
static void __exit td_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&td_driver);
|
||||
}
|
||||
module_exit(td_exit);
|
||||
module_platform_driver(td_driver);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Timberdale DMA controller driver");
|
||||
|
|
|
@ -845,7 +845,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||
|
||||
static struct dma_async_tx_descriptor *
|
||||
txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_data_direction direction,
|
||||
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
|
||||
|
@ -860,9 +860,9 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
|
||||
BUG_ON(!ds || !ds->reg_width);
|
||||
if (ds->tx_reg)
|
||||
BUG_ON(direction != DMA_TO_DEVICE);
|
||||
BUG_ON(direction != DMA_MEM_TO_DEV);
|
||||
else
|
||||
BUG_ON(direction != DMA_FROM_DEVICE);
|
||||
BUG_ON(direction != DMA_DEV_TO_MEM);
|
||||
if (unlikely(!sg_len))
|
||||
return NULL;
|
||||
|
||||
|
@ -882,7 +882,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
mem = sg_dma_address(sg);
|
||||
|
||||
if (__is_dmac64(ddev)) {
|
||||
if (direction == DMA_TO_DEVICE) {
|
||||
if (direction == DMA_MEM_TO_DEV) {
|
||||
desc->hwdesc.SAR = mem;
|
||||
desc->hwdesc.DAR = ds->tx_reg;
|
||||
} else {
|
||||
|
@ -891,7 +891,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
}
|
||||
desc->hwdesc.CNTR = sg_dma_len(sg);
|
||||
} else {
|
||||
if (direction == DMA_TO_DEVICE) {
|
||||
if (direction == DMA_MEM_TO_DEV) {
|
||||
desc->hwdesc32.SAR = mem;
|
||||
desc->hwdesc32.DAR = ds->tx_reg;
|
||||
} else {
|
||||
|
@ -900,7 +900,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
}
|
||||
desc->hwdesc32.CNTR = sg_dma_len(sg);
|
||||
}
|
||||
if (direction == DMA_TO_DEVICE) {
|
||||
if (direction == DMA_MEM_TO_DEV) {
|
||||
sai = ds->reg_width;
|
||||
dai = 0;
|
||||
} else {
|
||||
|
|
|
@ -287,7 +287,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
|
|||
sg_dma_len(sg) = new_size;
|
||||
|
||||
txd = ichan->dma_chan.device->device_prep_slave_sg(
|
||||
&ichan->dma_chan, sg, 1, DMA_FROM_DEVICE,
|
||||
&ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT);
|
||||
if (!txd)
|
||||
goto error;
|
||||
|
|
|
@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
|
|||
spin_unlock_irq(&fh->queue_lock);
|
||||
|
||||
desc = fh->chan->device->device_prep_slave_sg(fh->chan,
|
||||
buf->sg, sg_elems, DMA_FROM_DEVICE,
|
||||
buf->sg, sg_elems, DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
|
||||
if (!desc) {
|
||||
spin_lock_irq(&fh->queue_lock);
|
||||
|
|
|
@ -513,7 +513,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
|
|||
* transaction, and then put it under external control
|
||||
*/
|
||||
memset(&config, 0, sizeof(config));
|
||||
config.direction = DMA_TO_DEVICE;
|
||||
config.direction = DMA_MEM_TO_DEV;
|
||||
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4;
|
||||
ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG,
|
||||
|
|
|
@ -823,6 +823,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
|
|||
struct scatterlist *sg;
|
||||
unsigned int i;
|
||||
enum dma_data_direction direction;
|
||||
enum dma_transfer_direction slave_dirn;
|
||||
unsigned int sglen;
|
||||
u32 iflags;
|
||||
|
||||
|
@ -860,16 +861,19 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
|
|||
if (host->caps.has_dma)
|
||||
atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
|
||||
|
||||
if (data->flags & MMC_DATA_READ)
|
||||
if (data->flags & MMC_DATA_READ) {
|
||||
direction = DMA_FROM_DEVICE;
|
||||
else
|
||||
slave_dirn = DMA_DEV_TO_MEM;
|
||||
} else {
|
||||
direction = DMA_TO_DEVICE;
|
||||
slave_dirn = DMA_MEM_TO_DEV;
|
||||
}
|
||||
|
||||
sglen = dma_map_sg(chan->device->dev, data->sg,
|
||||
data->sg_len, direction);
|
||||
|
||||
desc = chan->device->device_prep_slave_sg(chan,
|
||||
data->sg, sglen, direction,
|
||||
data->sg, sglen, slave_dirn,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!desc)
|
||||
goto unmap_exit;
|
||||
|
|
|
@ -374,6 +374,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
|||
struct dma_chan *chan;
|
||||
struct dma_device *device;
|
||||
struct dma_async_tx_descriptor *desc;
|
||||
enum dma_data_direction buffer_dirn;
|
||||
int nr_sg;
|
||||
|
||||
/* Check if next job is already prepared */
|
||||
|
@ -387,10 +388,12 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
|||
}
|
||||
|
||||
if (data->flags & MMC_DATA_READ) {
|
||||
conf.direction = DMA_FROM_DEVICE;
|
||||
conf.direction = DMA_DEV_TO_MEM;
|
||||
buffer_dirn = DMA_FROM_DEVICE;
|
||||
chan = host->dma_rx_channel;
|
||||
} else {
|
||||
conf.direction = DMA_TO_DEVICE;
|
||||
conf.direction = DMA_MEM_TO_DEV;
|
||||
buffer_dirn = DMA_TO_DEVICE;
|
||||
chan = host->dma_tx_channel;
|
||||
}
|
||||
|
||||
|
@ -403,7 +406,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
|||
return -EINVAL;
|
||||
|
||||
device = chan->device;
|
||||
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
|
||||
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
|
||||
if (nr_sg == 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -426,7 +429,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
|||
unmap_exit:
|
||||
if (!next)
|
||||
dmaengine_terminate_all(chan);
|
||||
dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
|
||||
dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -218,6 +218,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
|
|||
unsigned int blksz = data->blksz;
|
||||
unsigned int datasize = nob * blksz;
|
||||
struct scatterlist *sg;
|
||||
enum dma_transfer_direction slave_dirn;
|
||||
int i, nents;
|
||||
|
||||
if (data->flags & MMC_DATA_STREAM)
|
||||
|
@ -240,10 +241,13 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
|
|||
}
|
||||
}
|
||||
|
||||
if (data->flags & MMC_DATA_READ)
|
||||
if (data->flags & MMC_DATA_READ) {
|
||||
host->dma_dir = DMA_FROM_DEVICE;
|
||||
else
|
||||
slave_dirn = DMA_DEV_TO_MEM;
|
||||
} else {
|
||||
host->dma_dir = DMA_TO_DEVICE;
|
||||
slave_dirn = DMA_MEM_TO_DEV;
|
||||
}
|
||||
|
||||
nents = dma_map_sg(host->dma->device->dev, data->sg,
|
||||
data->sg_len, host->dma_dir);
|
||||
|
@ -251,7 +255,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
|
|||
return -EINVAL;
|
||||
|
||||
host->desc = host->dma->device->device_prep_slave_sg(host->dma,
|
||||
data->sg, data->sg_len, host->dma_dir,
|
||||
data->sg, data->sg_len, slave_dirn,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
|
||||
if (!host->desc) {
|
||||
|
|
|
@ -154,6 +154,7 @@ struct mxs_mmc_host {
|
|||
struct dma_chan *dmach;
|
||||
struct mxs_dma_data dma_data;
|
||||
unsigned int dma_dir;
|
||||
enum dma_transfer_direction slave_dirn;
|
||||
u32 ssp_pio_words[SSP_PIO_NUM];
|
||||
|
||||
unsigned int version;
|
||||
|
@ -324,7 +325,7 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
|
|||
}
|
||||
|
||||
desc = host->dmach->device->device_prep_slave_sg(host->dmach,
|
||||
sgl, sg_len, host->dma_dir, append);
|
||||
sgl, sg_len, host->slave_dirn, append);
|
||||
if (desc) {
|
||||
desc->callback = mxs_mmc_dma_irq_callback;
|
||||
desc->callback_param = host;
|
||||
|
@ -356,6 +357,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
|
|||
host->ssp_pio_words[1] = cmd0;
|
||||
host->ssp_pio_words[2] = cmd1;
|
||||
host->dma_dir = DMA_NONE;
|
||||
host->slave_dirn = DMA_TRANS_NONE;
|
||||
desc = mxs_mmc_prep_dma(host, 0);
|
||||
if (!desc)
|
||||
goto out;
|
||||
|
@ -395,6 +397,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
|
|||
host->ssp_pio_words[1] = cmd0;
|
||||
host->ssp_pio_words[2] = cmd1;
|
||||
host->dma_dir = DMA_NONE;
|
||||
host->slave_dirn = DMA_TRANS_NONE;
|
||||
desc = mxs_mmc_prep_dma(host, 0);
|
||||
if (!desc)
|
||||
goto out;
|
||||
|
@ -433,6 +436,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
|
|||
int i;
|
||||
|
||||
unsigned short dma_data_dir, timeout;
|
||||
enum dma_transfer_direction slave_dirn;
|
||||
unsigned int data_size = 0, log2_blksz;
|
||||
unsigned int blocks = data->blocks;
|
||||
|
||||
|
@ -448,9 +452,11 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
|
|||
|
||||
if (data->flags & MMC_DATA_WRITE) {
|
||||
dma_data_dir = DMA_TO_DEVICE;
|
||||
slave_dirn = DMA_MEM_TO_DEV;
|
||||
read = 0;
|
||||
} else {
|
||||
dma_data_dir = DMA_FROM_DEVICE;
|
||||
slave_dirn = DMA_DEV_TO_MEM;
|
||||
read = BM_SSP_CTRL0_READ;
|
||||
}
|
||||
|
||||
|
@ -510,6 +516,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
|
|||
host->ssp_pio_words[1] = cmd0;
|
||||
host->ssp_pio_words[2] = cmd1;
|
||||
host->dma_dir = DMA_NONE;
|
||||
host->slave_dirn = DMA_TRANS_NONE;
|
||||
desc = mxs_mmc_prep_dma(host, 0);
|
||||
if (!desc)
|
||||
goto out;
|
||||
|
@ -518,6 +525,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
|
|||
WARN_ON(host->data != NULL);
|
||||
host->data = data;
|
||||
host->dma_dir = dma_data_dir;
|
||||
host->slave_dirn = slave_dirn;
|
||||
desc = mxs_mmc_prep_dma(host, 1);
|
||||
if (!desc)
|
||||
goto out;
|
||||
|
|
|
@ -286,7 +286,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
|
|||
if (ret > 0) {
|
||||
host->dma_active = true;
|
||||
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
|
||||
DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
}
|
||||
|
||||
if (desc) {
|
||||
|
@ -335,7 +335,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
|
|||
if (ret > 0) {
|
||||
host->dma_active = true;
|
||||
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
|
||||
DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
}
|
||||
|
||||
if (desc) {
|
||||
|
|
|
@ -77,7 +77,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
|
|||
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
|
||||
if (ret > 0)
|
||||
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
|
||||
DMA_FROM_DEVICE, DMA_CTRL_ACK);
|
||||
DMA_DEV_TO_MEM, DMA_CTRL_ACK);
|
||||
|
||||
if (desc) {
|
||||
cookie = dmaengine_submit(desc);
|
||||
|
@ -158,7 +158,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
|
|||
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
|
||||
if (ret > 0)
|
||||
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
|
||||
DMA_TO_DEVICE, DMA_CTRL_ACK);
|
||||
DMA_MEM_TO_DEV, DMA_CTRL_ACK);
|
||||
|
||||
if (desc) {
|
||||
cookie = dmaengine_submit(desc);
|
||||
|
|
|
@ -827,7 +827,7 @@ int gpmi_send_command(struct gpmi_nand_data *this)
|
|||
pio[1] = pio[2] = 0;
|
||||
desc = channel->device->device_prep_slave_sg(channel,
|
||||
(struct scatterlist *)pio,
|
||||
ARRAY_SIZE(pio), DMA_NONE, 0);
|
||||
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
|
||||
if (!desc) {
|
||||
pr_err("step 1 error\n");
|
||||
return -1;
|
||||
|
@ -839,7 +839,7 @@ int gpmi_send_command(struct gpmi_nand_data *this)
|
|||
sg_init_one(sgl, this->cmd_buffer, this->command_length);
|
||||
dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
|
||||
desc = channel->device->device_prep_slave_sg(channel,
|
||||
sgl, 1, DMA_TO_DEVICE, 1);
|
||||
sgl, 1, DMA_MEM_TO_DEV, 1);
|
||||
if (!desc) {
|
||||
pr_err("step 2 error\n");
|
||||
return -1;
|
||||
|
@ -872,7 +872,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
|
|||
pio[1] = 0;
|
||||
desc = channel->device->device_prep_slave_sg(channel,
|
||||
(struct scatterlist *)pio,
|
||||
ARRAY_SIZE(pio), DMA_NONE, 0);
|
||||
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
|
||||
if (!desc) {
|
||||
pr_err("step 1 error\n");
|
||||
return -1;
|
||||
|
@ -881,7 +881,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
|
|||
/* [2] send DMA request */
|
||||
prepare_data_dma(this, DMA_TO_DEVICE);
|
||||
desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
|
||||
1, DMA_TO_DEVICE, 1);
|
||||
1, DMA_MEM_TO_DEV, 1);
|
||||
if (!desc) {
|
||||
pr_err("step 2 error\n");
|
||||
return -1;
|
||||
|
@ -908,7 +908,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
|
|||
pio[1] = 0;
|
||||
desc = channel->device->device_prep_slave_sg(channel,
|
||||
(struct scatterlist *)pio,
|
||||
ARRAY_SIZE(pio), DMA_NONE, 0);
|
||||
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
|
||||
if (!desc) {
|
||||
pr_err("step 1 error\n");
|
||||
return -1;
|
||||
|
@ -917,7 +917,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
|
|||
/* [2] : send DMA request */
|
||||
prepare_data_dma(this, DMA_FROM_DEVICE);
|
||||
desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
|
||||
1, DMA_FROM_DEVICE, 1);
|
||||
1, DMA_DEV_TO_MEM, 1);
|
||||
if (!desc) {
|
||||
pr_err("step 2 error\n");
|
||||
return -1;
|
||||
|
@ -964,7 +964,7 @@ int gpmi_send_page(struct gpmi_nand_data *this,
|
|||
|
||||
desc = channel->device->device_prep_slave_sg(channel,
|
||||
(struct scatterlist *)pio,
|
||||
ARRAY_SIZE(pio), DMA_NONE, 0);
|
||||
ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
|
||||
if (!desc) {
|
||||
pr_err("step 2 error\n");
|
||||
return -1;
|
||||
|
@ -998,7 +998,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
|
|||
| BF_GPMI_CTRL0_XFER_COUNT(0);
|
||||
pio[1] = 0;
|
||||
desc = channel->device->device_prep_slave_sg(channel,
|
||||
(struct scatterlist *)pio, 2, DMA_NONE, 0);
|
||||
(struct scatterlist *)pio, 2,
|
||||
DMA_TRANS_NONE, 0);
|
||||
if (!desc) {
|
||||
pr_err("step 1 error\n");
|
||||
return -1;
|
||||
|
@ -1027,7 +1028,7 @@ int gpmi_read_page(struct gpmi_nand_data *this,
|
|||
pio[5] = auxiliary;
|
||||
desc = channel->device->device_prep_slave_sg(channel,
|
||||
(struct scatterlist *)pio,
|
||||
ARRAY_SIZE(pio), DMA_NONE, 1);
|
||||
ARRAY_SIZE(pio), DMA_TRANS_NONE, 1);
|
||||
if (!desc) {
|
||||
pr_err("step 2 error\n");
|
||||
return -1;
|
||||
|
@ -1045,7 +1046,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
|
|||
| BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
|
||||
pio[1] = 0;
|
||||
desc = channel->device->device_prep_slave_sg(channel,
|
||||
(struct scatterlist *)pio, 2, DMA_NONE, 1);
|
||||
(struct scatterlist *)pio, 2,
|
||||
DMA_TRANS_NONE, 1);
|
||||
if (!desc) {
|
||||
pr_err("step 3 error\n");
|
||||
return -1;
|
||||
|
|
|
@ -459,7 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
|
|||
sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
|
||||
|
||||
ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
|
||||
&ctl->sg, 1, DMA_TO_DEVICE,
|
||||
&ctl->sg, 1, DMA_MEM_TO_DEV,
|
||||
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
|
||||
if (!ctl->adesc)
|
||||
return NETDEV_TX_BUSY;
|
||||
|
@ -571,7 +571,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
|
|||
sg_dma_len(sg) = DMA_BUFFER_SIZE;
|
||||
|
||||
ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
|
||||
sg, 1, DMA_FROM_DEVICE,
|
||||
sg, 1, DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
|
||||
|
||||
if (!ctl->adesc)
|
||||
|
|
|
@ -131,7 +131,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
|
|||
rxchan = dws->rxchan;
|
||||
|
||||
/* 2. Prepare the TX dma transfer */
|
||||
txconf.direction = DMA_TO_DEVICE;
|
||||
txconf.direction = DMA_MEM_TO_DEV;
|
||||
txconf.dst_addr = dws->dma_addr;
|
||||
txconf.dst_maxburst = LNW_DMA_MSIZE_16;
|
||||
txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
|
@ -147,13 +147,13 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
|
|||
txdesc = txchan->device->device_prep_slave_sg(txchan,
|
||||
&dws->tx_sgl,
|
||||
1,
|
||||
DMA_TO_DEVICE,
|
||||
DMA_MEM_TO_DEV,
|
||||
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
|
||||
txdesc->callback = dw_spi_dma_done;
|
||||
txdesc->callback_param = dws;
|
||||
|
||||
/* 3. Prepare the RX dma transfer */
|
||||
rxconf.direction = DMA_FROM_DEVICE;
|
||||
rxconf.direction = DMA_DEV_TO_MEM;
|
||||
rxconf.src_addr = dws->dma_addr;
|
||||
rxconf.src_maxburst = LNW_DMA_MSIZE_16;
|
||||
rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
|
@ -169,7 +169,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
|
|||
rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
|
||||
&dws->rx_sgl,
|
||||
1,
|
||||
DMA_FROM_DEVICE,
|
||||
DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
|
||||
rxdesc->callback = dw_spi_dma_done;
|
||||
rxdesc->callback_param = dws;
|
||||
|
|
|
@ -551,6 +551,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
|
|||
struct dma_async_tx_descriptor *txd;
|
||||
enum dma_slave_buswidth buswidth;
|
||||
struct dma_slave_config conf;
|
||||
enum dma_transfer_direction slave_dirn;
|
||||
struct scatterlist *sg;
|
||||
struct sg_table *sgt;
|
||||
struct dma_chan *chan;
|
||||
|
@ -573,6 +574,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
|
|||
|
||||
conf.src_addr = espi->sspdr_phys;
|
||||
conf.src_addr_width = buswidth;
|
||||
slave_dirn = DMA_DEV_TO_MEM;
|
||||
} else {
|
||||
chan = espi->dma_tx;
|
||||
buf = t->tx_buf;
|
||||
|
@ -580,6 +582,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
|
|||
|
||||
conf.dst_addr = espi->sspdr_phys;
|
||||
conf.dst_addr_width = buswidth;
|
||||
slave_dirn = DMA_MEM_TO_DEV;
|
||||
}
|
||||
|
||||
ret = dmaengine_slave_config(chan, &conf);
|
||||
|
@ -631,7 +634,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
|
|||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
|
||||
dir, DMA_CTRL_ACK);
|
||||
slave_dirn, DMA_CTRL_ACK);
|
||||
if (!txd) {
|
||||
dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -979,7 +982,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
|
|||
dma_cap_set(DMA_SLAVE, mask);
|
||||
|
||||
espi->dma_rx_data.port = EP93XX_DMA_SSP;
|
||||
espi->dma_rx_data.direction = DMA_FROM_DEVICE;
|
||||
espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
|
||||
espi->dma_rx_data.name = "ep93xx-spi-rx";
|
||||
|
||||
espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
|
||||
|
@ -990,7 +993,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
|
|||
}
|
||||
|
||||
espi->dma_tx_data.port = EP93XX_DMA_SSP;
|
||||
espi->dma_tx_data.direction = DMA_TO_DEVICE;
|
||||
espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
|
||||
espi->dma_tx_data.name = "ep93xx-spi-tx";
|
||||
|
||||
espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
|
||||
|
|
|
@ -900,11 +900,11 @@ static int configure_dma(struct pl022 *pl022)
|
|||
{
|
||||
struct dma_slave_config rx_conf = {
|
||||
.src_addr = SSP_DR(pl022->phybase),
|
||||
.direction = DMA_FROM_DEVICE,
|
||||
.direction = DMA_DEV_TO_MEM,
|
||||
};
|
||||
struct dma_slave_config tx_conf = {
|
||||
.dst_addr = SSP_DR(pl022->phybase),
|
||||
.direction = DMA_TO_DEVICE,
|
||||
.direction = DMA_MEM_TO_DEV,
|
||||
};
|
||||
unsigned int pages;
|
||||
int ret;
|
||||
|
@ -1041,7 +1041,7 @@ static int configure_dma(struct pl022 *pl022)
|
|||
rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
|
||||
pl022->sgt_rx.sgl,
|
||||
rx_sglen,
|
||||
DMA_FROM_DEVICE,
|
||||
DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!rxdesc)
|
||||
goto err_rxdesc;
|
||||
|
@ -1049,7 +1049,7 @@ static int configure_dma(struct pl022 *pl022)
|
|||
txdesc = txchan->device->device_prep_slave_sg(txchan,
|
||||
pl022->sgt_tx.sgl,
|
||||
tx_sglen,
|
||||
DMA_TO_DEVICE,
|
||||
DMA_MEM_TO_DEV,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!txdesc)
|
||||
goto err_txdesc;
|
||||
|
|
|
@ -1079,7 +1079,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
|
|||
}
|
||||
sg = dma->sg_rx_p;
|
||||
desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg,
|
||||
num, DMA_FROM_DEVICE,
|
||||
num, DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!desc_rx) {
|
||||
dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
|
||||
|
@ -1124,7 +1124,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
|
|||
}
|
||||
sg = dma->sg_tx_p;
|
||||
desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx,
|
||||
sg, num, DMA_TO_DEVICE,
|
||||
sg, num, DMA_MEM_TO_DEV,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!desc_tx) {
|
||||
dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
|
||||
|
|
|
@ -268,7 +268,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
|
|||
struct dma_slave_config tx_conf = {
|
||||
.dst_addr = uap->port.mapbase + UART01x_DR,
|
||||
.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
|
||||
.direction = DMA_TO_DEVICE,
|
||||
.direction = DMA_MEM_TO_DEV,
|
||||
.dst_maxburst = uap->fifosize >> 1,
|
||||
};
|
||||
struct dma_chan *chan;
|
||||
|
@ -301,7 +301,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
|
|||
struct dma_slave_config rx_conf = {
|
||||
.src_addr = uap->port.mapbase + UART01x_DR,
|
||||
.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
|
||||
.direction = DMA_FROM_DEVICE,
|
||||
.direction = DMA_DEV_TO_MEM,
|
||||
.src_maxburst = uap->fifosize >> 1,
|
||||
};
|
||||
|
||||
|
@ -480,7 +480,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_TO_DEVICE,
|
||||
desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!desc) {
|
||||
dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
|
||||
|
@ -676,7 +676,7 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
|
|||
&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
|
||||
dma_dev = rxchan->device;
|
||||
desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
|
||||
DMA_FROM_DEVICE,
|
||||
DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
/*
|
||||
* If the DMA engine is busy and cannot prepare a
|
||||
|
|
|
@ -764,7 +764,7 @@ static int dma_handle_rx(struct eg20t_port *priv)
|
|||
sg_dma_address(sg) = priv->rx_buf_dma;
|
||||
|
||||
desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx,
|
||||
sg, 1, DMA_FROM_DEVICE,
|
||||
sg, 1, DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
|
||||
if (!desc)
|
||||
|
@ -923,7 +923,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
|
|||
}
|
||||
|
||||
desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx,
|
||||
priv->sg_tx_p, nent, DMA_TO_DEVICE,
|
||||
priv->sg_tx_p, nent, DMA_MEM_TO_DEV,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!desc) {
|
||||
dev_err(priv->port.dev, "%s:device_prep_slave_sg Failed\n",
|
||||
|
|
|
@ -1339,7 +1339,7 @@ static void sci_submit_rx(struct sci_port *s)
|
|||
struct dma_async_tx_descriptor *desc;
|
||||
|
||||
desc = chan->device->device_prep_slave_sg(chan,
|
||||
sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT);
|
||||
sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
|
||||
|
||||
if (desc) {
|
||||
s->desc_rx[i] = desc;
|
||||
|
@ -1454,7 +1454,7 @@ static void work_fn_tx(struct work_struct *work)
|
|||
BUG_ON(!sg_dma_len(sg));
|
||||
|
||||
desc = chan->device->device_prep_slave_sg(chan,
|
||||
sg, s->sg_len_tx, DMA_TO_DEVICE,
|
||||
sg, s->sg_len_tx, DMA_MEM_TO_DEV,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!desc) {
|
||||
/* switch to PIO */
|
||||
|
|
|
@ -84,7 +84,7 @@ static bool ux500_configure_channel(struct dma_channel *channel,
|
|||
struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
|
||||
struct dma_chan *dma_chan = ux500_channel->dma_chan;
|
||||
struct dma_async_tx_descriptor *dma_desc;
|
||||
enum dma_data_direction direction;
|
||||
enum dma_transfer_direction direction;
|
||||
struct scatterlist sg;
|
||||
struct dma_slave_config slave_conf;
|
||||
enum dma_slave_buswidth addr_width;
|
||||
|
@ -104,7 +104,7 @@ static bool ux500_configure_channel(struct dma_channel *channel,
|
|||
sg_dma_address(&sg) = dma_addr;
|
||||
sg_dma_len(&sg) = len;
|
||||
|
||||
direction = ux500_channel->is_tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
||||
direction = ux500_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
|
||||
addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE :
|
||||
DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
|
||||
|
|
|
@ -772,10 +772,10 @@ static void usbhsf_dma_prepare_tasklet(unsigned long data)
|
|||
struct dma_async_tx_descriptor *desc;
|
||||
struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
|
||||
struct device *dev = usbhs_priv_to_dev(priv);
|
||||
enum dma_data_direction dir;
|
||||
enum dma_transfer_direction dir;
|
||||
dma_cookie_t cookie;
|
||||
|
||||
dir = usbhs_pipe_is_dir_in(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
|
||||
|
||||
sg_init_table(&sg, 1);
|
||||
sg_set_page(&sg, virt_to_page(pkt->dma),
|
||||
|
|
|
@ -245,6 +245,7 @@ struct mx3fb_data {
|
|||
|
||||
uint32_t h_start_width;
|
||||
uint32_t v_start_width;
|
||||
enum disp_data_mapping disp_data_fmt;
|
||||
};
|
||||
|
||||
struct dma_chan_request {
|
||||
|
@ -287,11 +288,14 @@ static void mx3fb_write_reg(struct mx3fb_data *mx3fb, u32 value, unsigned long r
|
|||
__raw_writel(value, mx3fb->reg_base + reg);
|
||||
}
|
||||
|
||||
static const uint32_t di_mappings[] = {
|
||||
0x1600AAAA, 0x00E05555, 0x00070000, 3, /* RGB888 */
|
||||
0x0005000F, 0x000B000F, 0x0011000F, 1, /* RGB666 */
|
||||
0x0011000F, 0x000B000F, 0x0005000F, 1, /* BGR666 */
|
||||
0x0004003F, 0x000A000F, 0x000F003F, 1 /* RGB565 */
|
||||
struct di_mapping {
|
||||
uint32_t b0, b1, b2;
|
||||
};
|
||||
|
||||
static const struct di_mapping di_mappings[] = {
|
||||
[IPU_DISP_DATA_MAPPING_RGB666] = { 0x0005000f, 0x000b000f, 0x0011000f },
|
||||
[IPU_DISP_DATA_MAPPING_RGB565] = { 0x0004003f, 0x000a000f, 0x000f003f },
|
||||
[IPU_DISP_DATA_MAPPING_RGB888] = { 0x00070000, 0x000f0000, 0x00170000 },
|
||||
};
|
||||
|
||||
static void sdc_fb_init(struct mx3fb_info *fbi)
|
||||
|
@ -334,7 +338,7 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
|
|||
/* This enables the channel */
|
||||
if (mx3_fbi->cookie < 0) {
|
||||
mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan,
|
||||
&mx3_fbi->sg[0], 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
|
||||
&mx3_fbi->sg[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
|
||||
if (!mx3_fbi->txd) {
|
||||
dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n",
|
||||
dma_chan->chan_id);
|
||||
|
@ -425,7 +429,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel
|
|||
* @pixel_clk: desired pixel clock frequency in Hz.
|
||||
* @width: width of panel in pixels.
|
||||
* @height: height of panel in pixels.
|
||||
* @pixel_fmt: pixel format of buffer as FOURCC ASCII code.
|
||||
* @h_start_width: number of pixel clocks between the HSYNC signal pulse
|
||||
* and the start of valid data.
|
||||
* @h_sync_width: width of the HSYNC signal in units of pixel clocks.
|
||||
|
@ -442,7 +445,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel
|
|||
static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
|
||||
uint32_t pixel_clk,
|
||||
uint16_t width, uint16_t height,
|
||||
enum pixel_fmt pixel_fmt,
|
||||
uint16_t h_start_width, uint16_t h_sync_width,
|
||||
uint16_t h_end_width, uint16_t v_start_width,
|
||||
uint16_t v_sync_width, uint16_t v_end_width,
|
||||
|
@ -453,6 +455,7 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
|
|||
uint32_t old_conf;
|
||||
uint32_t div;
|
||||
struct clk *ipu_clk;
|
||||
const struct di_mapping *map;
|
||||
|
||||
dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height);
|
||||
|
||||
|
@ -540,36 +543,10 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
|
|||
sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
|
||||
mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL);
|
||||
|
||||
switch (pixel_fmt) {
|
||||
case IPU_PIX_FMT_RGB24:
|
||||
mx3fb_write_reg(mx3fb, di_mappings[0], DI_DISP3_B0_MAP);
|
||||
mx3fb_write_reg(mx3fb, di_mappings[1], DI_DISP3_B1_MAP);
|
||||
mx3fb_write_reg(mx3fb, di_mappings[2], DI_DISP3_B2_MAP);
|
||||
mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
|
||||
((di_mappings[3] - 1) << 12), DI_DISP_ACC_CC);
|
||||
break;
|
||||
case IPU_PIX_FMT_RGB666:
|
||||
mx3fb_write_reg(mx3fb, di_mappings[4], DI_DISP3_B0_MAP);
|
||||
mx3fb_write_reg(mx3fb, di_mappings[5], DI_DISP3_B1_MAP);
|
||||
mx3fb_write_reg(mx3fb, di_mappings[6], DI_DISP3_B2_MAP);
|
||||
mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
|
||||
((di_mappings[7] - 1) << 12), DI_DISP_ACC_CC);
|
||||
break;
|
||||
case IPU_PIX_FMT_BGR666:
|
||||
mx3fb_write_reg(mx3fb, di_mappings[8], DI_DISP3_B0_MAP);
|
||||
mx3fb_write_reg(mx3fb, di_mappings[9], DI_DISP3_B1_MAP);
|
||||
mx3fb_write_reg(mx3fb, di_mappings[10], DI_DISP3_B2_MAP);
|
||||
mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
|
||||
((di_mappings[11] - 1) << 12), DI_DISP_ACC_CC);
|
||||
break;
|
||||
default:
|
||||
mx3fb_write_reg(mx3fb, di_mappings[12], DI_DISP3_B0_MAP);
|
||||
mx3fb_write_reg(mx3fb, di_mappings[13], DI_DISP3_B1_MAP);
|
||||
mx3fb_write_reg(mx3fb, di_mappings[14], DI_DISP3_B2_MAP);
|
||||
mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
|
||||
((di_mappings[15] - 1) << 12), DI_DISP_ACC_CC);
|
||||
break;
|
||||
}
|
||||
map = &di_mappings[mx3fb->disp_data_fmt];
|
||||
mx3fb_write_reg(mx3fb, map->b0, DI_DISP3_B0_MAP);
|
||||
mx3fb_write_reg(mx3fb, map->b1, DI_DISP3_B1_MAP);
|
||||
mx3fb_write_reg(mx3fb, map->b2, DI_DISP3_B2_MAP);
|
||||
|
||||
spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
|
||||
|
||||
|
@ -780,8 +757,6 @@ static int __set_par(struct fb_info *fbi, bool lock)
|
|||
if (sdc_init_panel(mx3fb, mode,
|
||||
(PICOS2KHZ(fbi->var.pixclock)) * 1000UL,
|
||||
fbi->var.xres, fbi->var.yres,
|
||||
(fbi->var.sync & FB_SYNC_SWAP_RGB) ?
|
||||
IPU_PIX_FMT_BGR666 : IPU_PIX_FMT_RGB666,
|
||||
fbi->var.left_margin,
|
||||
fbi->var.hsync_len,
|
||||
fbi->var.right_margin +
|
||||
|
@ -1117,7 +1092,7 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
|
|||
async_tx_ack(mx3_fbi->txd);
|
||||
|
||||
txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg +
|
||||
mx3_fbi->cur_ipu_buf, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
|
||||
mx3_fbi->cur_ipu_buf, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
|
||||
if (!txd) {
|
||||
dev_err(fbi->device,
|
||||
"Error preparing a DMA transaction descriptor.\n");
|
||||
|
@ -1349,6 +1324,12 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
|
|||
const struct fb_videomode *mode;
|
||||
int ret, num_modes;
|
||||
|
||||
if (mx3fb_pdata->disp_data_fmt >= ARRAY_SIZE(di_mappings)) {
|
||||
dev_err(dev, "Illegal display data format %d\n",
|
||||
mx3fb_pdata->disp_data_fmt);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ichan->client = mx3fb;
|
||||
irq = ichan->eof_irq;
|
||||
|
||||
|
@ -1402,6 +1383,8 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
|
|||
mx3fbi->mx3fb = mx3fb;
|
||||
mx3fbi->blank = FB_BLANK_NORMAL;
|
||||
|
||||
mx3fb->disp_data_fmt = mx3fb_pdata->disp_data_fmt;
|
||||
|
||||
init_completion(&mx3fbi->flip_cmpl);
|
||||
disable_irq(ichan->eof_irq);
|
||||
dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq);
|
||||
|
|
|
@ -134,7 +134,7 @@ struct pl08x_txd {
|
|||
struct dma_async_tx_descriptor tx;
|
||||
struct list_head node;
|
||||
struct list_head dsg_list;
|
||||
enum dma_data_direction direction;
|
||||
enum dma_transfer_direction direction;
|
||||
dma_addr_t llis_bus;
|
||||
struct pl08x_lli *llis_va;
|
||||
/* Default cctl value for LLIs */
|
||||
|
@ -197,7 +197,7 @@ struct pl08x_dma_chan {
|
|||
dma_addr_t dst_addr;
|
||||
u32 src_cctl;
|
||||
u32 dst_cctl;
|
||||
enum dma_data_direction runtime_direction;
|
||||
enum dma_transfer_direction runtime_direction;
|
||||
dma_cookie_t lc;
|
||||
struct list_head pend_list;
|
||||
struct pl08x_txd *at;
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
|
||||
#include <linux/device.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/dma-direction.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <asm/page.h>
|
||||
|
@ -72,11 +71,93 @@ enum dma_transaction_type {
|
|||
DMA_ASYNC_TX,
|
||||
DMA_SLAVE,
|
||||
DMA_CYCLIC,
|
||||
DMA_INTERLEAVE,
|
||||
/* last transaction type for creation of the capabilities mask */
|
||||
DMA_TX_TYPE_END,
|
||||
};
|
||||
|
||||
/* last transaction type for creation of the capabilities mask */
|
||||
#define DMA_TX_TYPE_END (DMA_CYCLIC + 1)
|
||||
/**
|
||||
* enum dma_transfer_direction - dma transfer mode and direction indicator
|
||||
* @DMA_MEM_TO_MEM: Async/Memcpy mode
|
||||
* @DMA_MEM_TO_DEV: Slave mode & From Memory to Device
|
||||
* @DMA_DEV_TO_MEM: Slave mode & From Device to Memory
|
||||
* @DMA_DEV_TO_DEV: Slave mode & From Device to Device
|
||||
*/
|
||||
enum dma_transfer_direction {
|
||||
DMA_MEM_TO_MEM,
|
||||
DMA_MEM_TO_DEV,
|
||||
DMA_DEV_TO_MEM,
|
||||
DMA_DEV_TO_DEV,
|
||||
DMA_TRANS_NONE,
|
||||
};
|
||||
|
||||
/**
|
||||
* Interleaved Transfer Request
|
||||
* ----------------------------
|
||||
* A chunk is collection of contiguous bytes to be transfered.
|
||||
* The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
|
||||
* ICGs may or maynot change between chunks.
|
||||
* A FRAME is the smallest series of contiguous {chunk,icg} pairs,
|
||||
* that when repeated an integral number of times, specifies the transfer.
|
||||
* A transfer template is specification of a Frame, the number of times
|
||||
* it is to be repeated and other per-transfer attributes.
|
||||
*
|
||||
* Practically, a client driver would have ready a template for each
|
||||
* type of transfer it is going to need during its lifetime and
|
||||
* set only 'src_start' and 'dst_start' before submitting the requests.
|
||||
*
|
||||
*
|
||||
* | Frame-1 | Frame-2 | ~ | Frame-'numf' |
|
||||
* |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...|
|
||||
*
|
||||
* == Chunk size
|
||||
* ... ICG
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct data_chunk - Element of scatter-gather list that makes a frame.
|
||||
* @size: Number of bytes to read from source.
|
||||
* size_dst := fn(op, size_src), so doesn't mean much for destination.
|
||||
* @icg: Number of bytes to jump after last src/dst address of this
|
||||
* chunk and before first src/dst address for next chunk.
|
||||
* Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
|
||||
* Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
|
||||
*/
|
||||
struct data_chunk {
|
||||
size_t size;
|
||||
size_t icg;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dma_interleaved_template - Template to convey DMAC the transfer pattern
|
||||
* and attributes.
|
||||
* @src_start: Bus address of source for the first chunk.
|
||||
* @dst_start: Bus address of destination for the first chunk.
|
||||
* @dir: Specifies the type of Source and Destination.
|
||||
* @src_inc: If the source address increments after reading from it.
|
||||
* @dst_inc: If the destination address increments after writing to it.
|
||||
* @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read).
|
||||
* Otherwise, source is read contiguously (icg ignored).
|
||||
* Ignored if src_inc is false.
|
||||
* @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write).
|
||||
* Otherwise, destination is filled contiguously (icg ignored).
|
||||
* Ignored if dst_inc is false.
|
||||
* @numf: Number of frames in this template.
|
||||
* @frame_size: Number of chunks in a frame i.e, size of sgl[].
|
||||
* @sgl: Array of {chunk,icg} pairs that make up a frame.
|
||||
*/
|
||||
struct dma_interleaved_template {
|
||||
dma_addr_t src_start;
|
||||
dma_addr_t dst_start;
|
||||
enum dma_transfer_direction dir;
|
||||
bool src_inc;
|
||||
bool dst_inc;
|
||||
bool src_sgl;
|
||||
bool dst_sgl;
|
||||
size_t numf;
|
||||
size_t frame_size;
|
||||
struct data_chunk sgl[0];
|
||||
};
|
||||
|
||||
/**
|
||||
* enum dma_ctrl_flags - DMA flags to augment operation preparation,
|
||||
|
@ -269,7 +350,7 @@ enum dma_slave_buswidth {
|
|||
* struct, if applicable.
|
||||
*/
|
||||
struct dma_slave_config {
|
||||
enum dma_data_direction direction;
|
||||
enum dma_transfer_direction direction;
|
||||
dma_addr_t src_addr;
|
||||
dma_addr_t dst_addr;
|
||||
enum dma_slave_buswidth src_addr_width;
|
||||
|
@ -433,6 +514,7 @@ struct dma_tx_state {
|
|||
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
|
||||
* The function takes a buffer of size buf_len. The callback function will
|
||||
* be called after period_len bytes have been transferred.
|
||||
* @device_prep_interleaved_dma: Transfer expression in a generic way.
|
||||
* @device_control: manipulate all pending operations on a channel, returns
|
||||
* zero or error code
|
||||
* @device_tx_status: poll for transaction completion, the optional
|
||||
|
@ -492,11 +574,14 @@ struct dma_device {
|
|||
|
||||
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
|
||||
struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_data_direction direction,
|
||||
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||
unsigned long flags);
|
||||
struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
|
||||
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_data_direction direction);
|
||||
size_t period_len, enum dma_transfer_direction direction);
|
||||
struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
|
||||
struct dma_chan *chan, struct dma_interleaved_template *xt,
|
||||
unsigned long flags);
|
||||
int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||
unsigned long arg);
|
||||
|
||||
|
@ -522,7 +607,7 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
|
|||
|
||||
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
|
||||
struct dma_chan *chan, void *buf, size_t len,
|
||||
enum dma_data_direction dir, unsigned long flags)
|
||||
enum dma_transfer_direction dir, unsigned long flags)
|
||||
{
|
||||
struct scatterlist sg;
|
||||
sg_init_one(&sg, buf, len);
|
||||
|
|
|
@ -127,7 +127,7 @@ struct dw_cyclic_desc {
|
|||
|
||||
struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
|
||||
dma_addr_t buf_addr, size_t buf_len, size_t period_len,
|
||||
enum dma_data_direction direction);
|
||||
enum dma_transfer_direction direction);
|
||||
void dw_dma_cyclic_free(struct dma_chan *chan);
|
||||
int dw_dma_cyclic_start(struct dma_chan *chan);
|
||||
void dw_dma_cyclic_stop(struct dma_chan *chan);
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*/
|
||||
|
||||
#ifndef __MACH_MXS_GPMI_NAND_H__
|
||||
#define __MACH_MXS_GPMI_NAND_H__
|
||||
|
||||
/* The size of the resources is fixed. */
|
||||
#define GPMI_NAND_RES_SIZE 6
|
||||
|
||||
/* Resource names for the GPMI NAND driver. */
|
||||
#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "GPMI NAND GPMI Registers"
|
||||
#define GPMI_NAND_GPMI_INTERRUPT_RES_NAME "GPMI NAND GPMI Interrupt"
|
||||
#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "GPMI NAND BCH Registers"
|
||||
#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "GPMI NAND BCH Interrupt"
|
||||
#define GPMI_NAND_DMA_CHANNELS_RES_NAME "GPMI NAND DMA Channels"
|
||||
#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "GPMI NAND DMA Interrupt"
|
||||
|
||||
/**
|
||||
* struct gpmi_nand_platform_data - GPMI NAND driver platform data.
|
||||
*
|
||||
* This structure communicates platform-specific information to the GPMI NAND
|
||||
* driver that can't be expressed as resources.
|
||||
*
|
||||
* @platform_init: A pointer to a function the driver will call to
|
||||
* initialize the platform (e.g., set up the pin mux).
|
||||
* @min_prop_delay_in_ns: Minimum propagation delay of GPMI signals to and
|
||||
* from the NAND Flash device, in nanoseconds.
|
||||
* @max_prop_delay_in_ns: Maximum propagation delay of GPMI signals to and
|
||||
* from the NAND Flash device, in nanoseconds.
|
||||
* @max_chip_count: The maximum number of chips for which the driver
|
||||
* should configure the hardware. This value most
|
||||
* likely reflects the number of pins that are
|
||||
* connected to a NAND Flash device. If this is
|
||||
* greater than the SoC hardware can support, the
|
||||
* driver will print a message and fail to initialize.
|
||||
* @partitions: An optional pointer to an array of partition
|
||||
* descriptions.
|
||||
* @partition_count: The number of elements in the partitions array.
|
||||
*/
|
||||
struct gpmi_nand_platform_data {
|
||||
/* SoC hardware information. */
|
||||
int (*platform_init)(void);
|
||||
|
||||
/* NAND Flash information. */
|
||||
unsigned int min_prop_delay_in_ns;
|
||||
unsigned int max_prop_delay_in_ns;
|
||||
unsigned int max_chip_count;
|
||||
|
||||
/* Medium information. */
|
||||
struct mtd_partition *partitions;
|
||||
unsigned partition_count;
|
||||
};
|
||||
#endif
|
|
@ -30,7 +30,7 @@ struct sh_desc {
|
|||
struct sh_dmae_regs hw;
|
||||
struct list_head node;
|
||||
struct dma_async_tx_descriptor async_tx;
|
||||
enum dma_data_direction direction;
|
||||
enum dma_transfer_direction direction;
|
||||
dma_cookie_t cookie;
|
||||
size_t partial;
|
||||
int chunks;
|
||||
|
@ -48,6 +48,7 @@ struct sh_dmae_channel {
|
|||
unsigned int offset;
|
||||
unsigned int dmars;
|
||||
unsigned int dmars_bit;
|
||||
unsigned int chclr_offset;
|
||||
};
|
||||
|
||||
struct sh_dmae_pdata {
|
||||
|
@ -68,6 +69,7 @@ struct sh_dmae_pdata {
|
|||
unsigned int dmaor_is_32bit:1;
|
||||
unsigned int needs_tend_set:1;
|
||||
unsigned int no_dmars:1;
|
||||
unsigned int chclr_present:1;
|
||||
};
|
||||
|
||||
/* DMA register */
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
#ifndef _SIRFSOC_DMA_H_
|
||||
#define _SIRFSOC_DMA_H_
|
||||
|
||||
bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
|
||||
|
||||
#endif
|
|
@ -133,7 +133,7 @@ static int atmel_abdac_prepare_dma(struct atmel_abdac *dac,
|
|||
period_len = frames_to_bytes(runtime, runtime->period_size);
|
||||
|
||||
cdesc = dw_dma_cyclic_prep(chan, runtime->dma_addr, buffer_len,
|
||||
period_len, DMA_TO_DEVICE);
|
||||
period_len, DMA_MEM_TO_DEV);
|
||||
if (IS_ERR(cdesc)) {
|
||||
dev_dbg(&dac->pdev->dev, "could not prepare cyclic DMA\n");
|
||||
return PTR_ERR(cdesc);
|
||||
|
|
|
@ -102,7 +102,7 @@ static void atmel_ac97c_dma_capture_period_done(void *arg)
|
|||
|
||||
static int atmel_ac97c_prepare_dma(struct atmel_ac97c *chip,
|
||||
struct snd_pcm_substream *substream,
|
||||
enum dma_data_direction direction)
|
||||
enum dma_transfer_direction direction)
|
||||
{
|
||||
struct dma_chan *chan;
|
||||
struct dw_cyclic_desc *cdesc;
|
||||
|
@ -118,7 +118,7 @@ static int atmel_ac97c_prepare_dma(struct atmel_ac97c *chip,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (direction == DMA_TO_DEVICE)
|
||||
if (direction == DMA_MEM_TO_DEV)
|
||||
chan = chip->dma.tx_chan;
|
||||
else
|
||||
chan = chip->dma.rx_chan;
|
||||
|
@ -133,7 +133,7 @@ static int atmel_ac97c_prepare_dma(struct atmel_ac97c *chip,
|
|||
return PTR_ERR(cdesc);
|
||||
}
|
||||
|
||||
if (direction == DMA_TO_DEVICE) {
|
||||
if (direction == DMA_MEM_TO_DEV) {
|
||||
cdesc->period_callback = atmel_ac97c_dma_playback_period_done;
|
||||
set_bit(DMA_TX_READY, &chip->flags);
|
||||
} else {
|
||||
|
@ -393,7 +393,7 @@ static int atmel_ac97c_playback_prepare(struct snd_pcm_substream *substream)
|
|||
if (cpu_is_at32ap7000()) {
|
||||
if (!test_bit(DMA_TX_READY, &chip->flags))
|
||||
retval = atmel_ac97c_prepare_dma(chip, substream,
|
||||
DMA_TO_DEVICE);
|
||||
DMA_MEM_TO_DEV);
|
||||
} else {
|
||||
/* Initialize and start the PDC */
|
||||
writel(runtime->dma_addr, chip->regs + ATMEL_PDC_TPR);
|
||||
|
@ -484,7 +484,7 @@ static int atmel_ac97c_capture_prepare(struct snd_pcm_substream *substream)
|
|||
if (cpu_is_at32ap7000()) {
|
||||
if (!test_bit(DMA_RX_READY, &chip->flags))
|
||||
retval = atmel_ac97c_prepare_dma(chip, substream,
|
||||
DMA_FROM_DEVICE);
|
||||
DMA_DEV_TO_MEM);
|
||||
} else {
|
||||
/* Initialize and start the PDC */
|
||||
writel(runtime->dma_addr, chip->regs + ATMEL_PDC_RPR);
|
||||
|
|
|
@ -113,9 +113,9 @@ static int ep93xx_pcm_open(struct snd_pcm_substream *substream)
|
|||
rtd->dma_data.name = dma_params->name;
|
||||
|
||||
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
|
||||
rtd->dma_data.direction = DMA_TO_DEVICE;
|
||||
rtd->dma_data.direction = DMA_MEM_TO_DEV;
|
||||
else
|
||||
rtd->dma_data.direction = DMA_FROM_DEVICE;
|
||||
rtd->dma_data.direction = DMA_DEV_TO_MEM;
|
||||
|
||||
rtd->dma_chan = dma_request_channel(mask, ep93xx_pcm_dma_filter,
|
||||
&rtd->dma_data);
|
||||
|
|
|
@ -107,12 +107,12 @@ static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream,
|
|||
}
|
||||
|
||||
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
|
||||
slave_config.direction = DMA_TO_DEVICE;
|
||||
slave_config.direction = DMA_MEM_TO_DEV;
|
||||
slave_config.dst_addr = dma_params->dma_addr;
|
||||
slave_config.dst_addr_width = buswidth;
|
||||
slave_config.dst_maxburst = dma_params->burstsize;
|
||||
} else {
|
||||
slave_config.direction = DMA_FROM_DEVICE;
|
||||
slave_config.direction = DMA_DEV_TO_MEM;
|
||||
slave_config.src_addr = dma_params->dma_addr;
|
||||
slave_config.src_addr_width = buswidth;
|
||||
slave_config.src_maxburst = dma_params->burstsize;
|
||||
|
@ -159,7 +159,7 @@ static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream,
|
|||
iprtd->period_bytes * iprtd->periods,
|
||||
iprtd->period_bytes,
|
||||
substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
|
||||
DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
|
||||
if (!iprtd->desc) {
|
||||
dev_err(&chan->dev->device, "cannot prepare slave dma\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -136,7 +136,7 @@ static int snd_mxs_pcm_hw_params(struct snd_pcm_substream *substream,
|
|||
iprtd->period_bytes * iprtd->periods,
|
||||
iprtd->period_bytes,
|
||||
substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
|
||||
DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
|
||||
if (!iprtd->desc) {
|
||||
dev_err(&chan->dev->device, "cannot prepare slave dma\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -86,7 +86,7 @@ static void dma_enqueue(struct snd_pcm_substream *substream)
|
|||
dma_info.cap = (samsung_dma_has_circular() ? DMA_CYCLIC : DMA_SLAVE);
|
||||
dma_info.direction =
|
||||
(substream->stream == SNDRV_PCM_STREAM_PLAYBACK
|
||||
? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
|
||||
dma_info.fp = audio_buffdone;
|
||||
dma_info.fp_param = substream;
|
||||
dma_info.period = prtd->dma_period;
|
||||
|
@ -171,7 +171,7 @@ static int dma_hw_params(struct snd_pcm_substream *substream,
|
|||
dma_info.client = prtd->params->client;
|
||||
dma_info.direction =
|
||||
(substream->stream == SNDRV_PCM_STREAM_PLAYBACK
|
||||
? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
|
||||
dma_info.width = prtd->params->dma_size;
|
||||
dma_info.fifo = prtd->params->dma_addr;
|
||||
prtd->params->ch = prtd->params->ops->request(
|
||||
|
|
|
@ -131,7 +131,7 @@ static int siu_pcm_wr_set(struct siu_port *port_info,
|
|||
sg_dma_address(&sg) = buff;
|
||||
|
||||
desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan,
|
||||
&sg, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
&sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!desc) {
|
||||
dev_err(dev, "Failed to allocate a dma descriptor\n");
|
||||
return -ENOMEM;
|
||||
|
@ -181,7 +181,7 @@ static int siu_pcm_rd_set(struct siu_port *port_info,
|
|||
sg_dma_address(&sg) = buff;
|
||||
|
||||
desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan,
|
||||
&sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
&sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!desc) {
|
||||
dev_err(dev, "Failed to allocate dma descriptor\n");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -134,7 +134,7 @@ txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr)
|
|||
sg_dma_address(&sg) = buf_dma_addr;
|
||||
desc = chan->device->device_prep_slave_sg(chan, &sg, 1,
|
||||
dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
|
||||
DMA_TO_DEVICE : DMA_FROM_DEVICE,
|
||||
DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!desc) {
|
||||
dev_err(&chan->dev->device, "cannot prepare slave dma\n");
|
||||
|
|
Loading…
Reference in New Issue