Merge branch 'next/topic-dma-samsung' into next-samsung-devel

This commit is contained in:
Kukjin Kim 2011-10-04 18:57:38 +09:00
commit 59ca37f74a
49 changed files with 2067 additions and 2303 deletions

View File

@ -21,6 +21,9 @@
* OneNAND features. * OneNAND features.
*/ */
#ifndef ASM_PL080_H
#define ASM_PL080_H
#define PL080_INT_STATUS (0x00) #define PL080_INT_STATUS (0x00)
#define PL080_TC_STATUS (0x04) #define PL080_TC_STATUS (0x04)
#define PL080_TC_CLEAR (0x08) #define PL080_TC_CLEAR (0x08)
@ -138,3 +141,4 @@ struct pl080s_lli {
u32 control1; u32 control1;
}; };
#endif /* ASM_PL080_H */

View File

@ -11,7 +11,7 @@ if ARCH_EXYNOS4
config CPU_EXYNOS4210 config CPU_EXYNOS4210
bool bool
select S3C_PL330_DMA select SAMSUNG_DMADEV
help help
Enable EXYNOS4210 CPU support Enable EXYNOS4210 CPU support

View File

@ -43,6 +43,11 @@ static struct clk clk_sclk_usbphy1 = {
.name = "sclk_usbphy1", .name = "sclk_usbphy1",
}; };
static struct clk dummy_apb_pclk = {
.name = "apb_pclk",
.id = -1,
};
static int exynos4_clksrc_mask_top_ctrl(struct clk *clk, int enable) static int exynos4_clksrc_mask_top_ctrl(struct clk *clk, int enable)
{ {
return s5p_gatectrl(S5P_CLKSRC_MASK_TOP, clk, enable); return s5p_gatectrl(S5P_CLKSRC_MASK_TOP, clk, enable);
@ -454,13 +459,13 @@ static struct clk init_clocks_off[] = {
.enable = exynos4_clk_ip_fsys_ctrl, .enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 10), .ctrlbit = (1 << 10),
}, { }, {
.name = "pdma", .name = "dma",
.devname = "s3c-pl330.0", .devname = "dma-pl330.0",
.enable = exynos4_clk_ip_fsys_ctrl, .enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 0), .ctrlbit = (1 << 0),
}, { }, {
.name = "pdma", .name = "dma",
.devname = "s3c-pl330.1", .devname = "dma-pl330.1",
.enable = exynos4_clk_ip_fsys_ctrl, .enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 1), .ctrlbit = (1 << 1),
}, { }, {
@ -1208,5 +1213,7 @@ void __init exynos4_register_clocks(void)
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c24xx_register_clock(&dummy_apb_pclk);
s3c_pwmclk_init(); s3c_pwmclk_init();
} }

View File

@ -21,151 +21,229 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#include <linux/platform_device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
#include <asm/irq.h>
#include <plat/devs.h> #include <plat/devs.h>
#include <plat/irqs.h> #include <plat/irqs.h>
#include <mach/map.h> #include <mach/map.h>
#include <mach/irqs.h> #include <mach/irqs.h>
#include <mach/dma.h>
#include <plat/s3c-pl330-pdata.h>
static u64 dma_dmamask = DMA_BIT_MASK(32); static u64 dma_dmamask = DMA_BIT_MASK(32);
static struct resource exynos4_pdma0_resource[] = { struct dma_pl330_peri pdma0_peri[28] = {
[0] = { {
.start = EXYNOS4_PA_PDMA0, .peri_id = (u8)DMACH_PCM0_RX,
.end = EXYNOS4_PA_PDMA0 + SZ_4K, .rqtype = DEVTOMEM,
.flags = IORESOURCE_MEM, }, {
}, .peri_id = (u8)DMACH_PCM0_TX,
[1] = { .rqtype = MEMTODEV,
.start = IRQ_PDMA0, }, {
.end = IRQ_PDMA0, .peri_id = (u8)DMACH_PCM2_RX,
.flags = IORESOURCE_IRQ, .rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_MSM_REQ0,
}, {
.peri_id = (u8)DMACH_MSM_REQ2,
}, {
.peri_id = (u8)DMACH_SPI0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0S_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART4_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART4_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SLIMBUS0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SLIMBUS0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SLIMBUS2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SLIMBUS2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SLIMBUS4_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SLIMBUS4_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_AC97_MICIN,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_AC97_PCMIN,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_AC97_PCMOUT,
.rqtype = MEMTODEV,
}, },
}; };
static struct s3c_pl330_platdata exynos4_pdma0_pdata = { struct dma_pl330_platdata exynos4_pdma0_pdata = {
.peri = { .nr_valid_peri = ARRAY_SIZE(pdma0_peri),
[0] = DMACH_PCM0_RX, .peri = pdma0_peri,
[1] = DMACH_PCM0_TX,
[2] = DMACH_PCM2_RX,
[3] = DMACH_PCM2_TX,
[4] = DMACH_MSM_REQ0,
[5] = DMACH_MSM_REQ2,
[6] = DMACH_SPI0_RX,
[7] = DMACH_SPI0_TX,
[8] = DMACH_SPI2_RX,
[9] = DMACH_SPI2_TX,
[10] = DMACH_I2S0S_TX,
[11] = DMACH_I2S0_RX,
[12] = DMACH_I2S0_TX,
[13] = DMACH_I2S2_RX,
[14] = DMACH_I2S2_TX,
[15] = DMACH_UART0_RX,
[16] = DMACH_UART0_TX,
[17] = DMACH_UART2_RX,
[18] = DMACH_UART2_TX,
[19] = DMACH_UART4_RX,
[20] = DMACH_UART4_TX,
[21] = DMACH_SLIMBUS0_RX,
[22] = DMACH_SLIMBUS0_TX,
[23] = DMACH_SLIMBUS2_RX,
[24] = DMACH_SLIMBUS2_TX,
[25] = DMACH_SLIMBUS4_RX,
[26] = DMACH_SLIMBUS4_TX,
[27] = DMACH_AC97_MICIN,
[28] = DMACH_AC97_PCMIN,
[29] = DMACH_AC97_PCMOUT,
[30] = DMACH_MAX,
[31] = DMACH_MAX,
},
}; };
static struct platform_device exynos4_device_pdma0 = { struct amba_device exynos4_device_pdma0 = {
.name = "s3c-pl330",
.id = 0,
.num_resources = ARRAY_SIZE(exynos4_pdma0_resource),
.resource = exynos4_pdma0_resource,
.dev = { .dev = {
.init_name = "dma-pl330.0",
.dma_mask = &dma_dmamask, .dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &exynos4_pdma0_pdata, .platform_data = &exynos4_pdma0_pdata,
}, },
}; .res = {
.start = EXYNOS4_PA_PDMA0,
static struct resource exynos4_pdma1_resource[] = { .end = EXYNOS4_PA_PDMA0 + SZ_4K,
[0] = {
.start = EXYNOS4_PA_PDMA1,
.end = EXYNOS4_PA_PDMA1 + SZ_4K,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { .irq = {IRQ_PDMA0, NO_IRQ},
.start = IRQ_PDMA1, .periphid = 0x00041330,
.end = IRQ_PDMA1, };
.flags = IORESOURCE_IRQ,
struct dma_pl330_peri pdma1_peri[25] = {
{
.peri_id = (u8)DMACH_PCM0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_PCM1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_MSM_REQ1,
}, {
.peri_id = (u8)DMACH_MSM_REQ3,
}, {
.peri_id = (u8)DMACH_SPI1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0S_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART3_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART3_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SLIMBUS1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SLIMBUS1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SLIMBUS3_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SLIMBUS3_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SLIMBUS5_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SLIMBUS5_TX,
.rqtype = MEMTODEV,
}, },
}; };
static struct s3c_pl330_platdata exynos4_pdma1_pdata = { struct dma_pl330_platdata exynos4_pdma1_pdata = {
.peri = { .nr_valid_peri = ARRAY_SIZE(pdma1_peri),
[0] = DMACH_PCM0_RX, .peri = pdma1_peri,
[1] = DMACH_PCM0_TX,
[2] = DMACH_PCM1_RX,
[3] = DMACH_PCM1_TX,
[4] = DMACH_MSM_REQ1,
[5] = DMACH_MSM_REQ3,
[6] = DMACH_SPI1_RX,
[7] = DMACH_SPI1_TX,
[8] = DMACH_I2S0S_TX,
[9] = DMACH_I2S0_RX,
[10] = DMACH_I2S0_TX,
[11] = DMACH_I2S1_RX,
[12] = DMACH_I2S1_TX,
[13] = DMACH_UART0_RX,
[14] = DMACH_UART0_TX,
[15] = DMACH_UART1_RX,
[16] = DMACH_UART1_TX,
[17] = DMACH_UART3_RX,
[18] = DMACH_UART3_TX,
[19] = DMACH_SLIMBUS1_RX,
[20] = DMACH_SLIMBUS1_TX,
[21] = DMACH_SLIMBUS3_RX,
[22] = DMACH_SLIMBUS3_TX,
[23] = DMACH_SLIMBUS5_RX,
[24] = DMACH_SLIMBUS5_TX,
[25] = DMACH_SLIMBUS0AUX_RX,
[26] = DMACH_SLIMBUS0AUX_TX,
[27] = DMACH_SPDIF,
[28] = DMACH_MAX,
[29] = DMACH_MAX,
[30] = DMACH_MAX,
[31] = DMACH_MAX,
},
}; };
static struct platform_device exynos4_device_pdma1 = { struct amba_device exynos4_device_pdma1 = {
.name = "s3c-pl330",
.id = 1,
.num_resources = ARRAY_SIZE(exynos4_pdma1_resource),
.resource = exynos4_pdma1_resource,
.dev = { .dev = {
.init_name = "dma-pl330.1",
.dma_mask = &dma_dmamask, .dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &exynos4_pdma1_pdata, .platform_data = &exynos4_pdma1_pdata,
}, },
}; .res = {
.start = EXYNOS4_PA_PDMA1,
static struct platform_device *exynos4_dmacs[] __initdata = { .end = EXYNOS4_PA_PDMA1 + SZ_4K,
&exynos4_device_pdma0, .flags = IORESOURCE_MEM,
&exynos4_device_pdma1, },
.irq = {IRQ_PDMA1, NO_IRQ},
.periphid = 0x00041330,
}; };
static int __init exynos4_dma_init(void) static int __init exynos4_dma_init(void)
{ {
platform_add_devices(exynos4_dmacs, ARRAY_SIZE(exynos4_dmacs)); amba_device_register(&exynos4_device_pdma0, &iomem_resource);
amba_device_register(&exynos4_device_pdma1, &iomem_resource);
return 0; return 0;
} }

View File

@ -20,7 +20,7 @@
#ifndef __MACH_DMA_H #ifndef __MACH_DMA_H
#define __MACH_DMA_H #define __MACH_DMA_H
/* This platform uses the common S3C DMA API driver for PL330 */ /* This platform uses the common DMA API driver for PL330 */
#include <plat/s3c-dma-pl330.h> #include <plat/dma-pl330.h>
#endif /* __MACH_DMA_H */ #endif /* __MACH_DMA_H */

View File

@ -13,7 +13,6 @@
#ifndef __ASM_ARCH_DMA_H #ifndef __ASM_ARCH_DMA_H
#define __ASM_ARCH_DMA_H __FILE__ #define __ASM_ARCH_DMA_H __FILE__
#include <plat/dma.h>
#include <linux/sysdev.h> #include <linux/sysdev.h>
#define MAX_DMA_TRANSFER_SIZE 0x100000 /* Data Unit is half word */ #define MAX_DMA_TRANSFER_SIZE 0x100000 /* Data Unit is half word */
@ -51,6 +50,18 @@ enum dma_ch {
DMACH_MAX, /* the end entry */ DMACH_MAX, /* the end entry */
}; };
static inline bool samsung_dma_has_circular(void)
{
return false;
}
static inline bool samsung_dma_is_dmadev(void)
{
return false;
}
#include <plat/dma.h>
#define DMACH_LOW_LEVEL (1<<28) /* use this to specifiy hardware ch no */ #define DMACH_LOW_LEVEL (1<<28) /* use this to specifiy hardware ch no */
/* we have 4 dma channels */ /* we have 4 dma channels */
@ -163,7 +174,7 @@ struct s3c2410_dma_chan {
struct s3c2410_dma_client *client; struct s3c2410_dma_client *client;
/* channel configuration */ /* channel configuration */
enum s3c2410_dmasrc source; enum dma_data_direction source;
enum dma_ch req_ch; enum dma_ch req_ch;
unsigned long dev_addr; unsigned long dev_addr;
unsigned long load_timeout; unsigned long load_timeout;
@ -196,9 +207,4 @@ struct s3c2410_dma_chan {
typedef unsigned long dma_device_t; typedef unsigned long dma_device_t;
static inline bool s3c_dma_has_circular(void)
{
return false;
}
#endif /* __ASM_ARCH_DMA_H */ #endif /* __ASM_ARCH_DMA_H */

View File

@ -148,11 +148,11 @@ static struct s3c24xx_dma_map __initdata s3c2412_dma_mappings[] = {
static void s3c2412_dma_direction(struct s3c2410_dma_chan *chan, static void s3c2412_dma_direction(struct s3c2410_dma_chan *chan,
struct s3c24xx_dma_map *map, struct s3c24xx_dma_map *map,
enum s3c2410_dmasrc dir) enum dma_data_direction dir)
{ {
unsigned long chsel; unsigned long chsel;
if (dir == S3C2410_DMASRC_HW) if (dir == DMA_FROM_DEVICE)
chsel = map->channels_rx[0]; chsel = map->channels_rx[0];
else else
chsel = map->channels[0]; chsel = map->channels[0];

View File

@ -147,14 +147,14 @@ static void s3c64xx_dma_fill_lli(struct s3c2410_dma_chan *chan,
u32 control0, control1; u32 control0, control1;
switch (chan->source) { switch (chan->source) {
case S3C2410_DMASRC_HW: case DMA_FROM_DEVICE:
src = chan->dev_addr; src = chan->dev_addr;
dst = data; dst = data;
control0 = PL080_CONTROL_SRC_AHB2; control0 = PL080_CONTROL_SRC_AHB2;
control0 |= PL080_CONTROL_DST_INCR; control0 |= PL080_CONTROL_DST_INCR;
break; break;
case S3C2410_DMASRC_MEM: case DMA_TO_DEVICE:
src = data; src = data;
dst = chan->dev_addr; dst = chan->dev_addr;
control0 = PL080_CONTROL_DST_AHB2; control0 = PL080_CONTROL_DST_AHB2;
@ -416,7 +416,7 @@ EXPORT_SYMBOL(s3c2410_dma_enqueue);
int s3c2410_dma_devconfig(enum dma_ch channel, int s3c2410_dma_devconfig(enum dma_ch channel,
enum s3c2410_dmasrc source, enum dma_data_direction source,
unsigned long devaddr) unsigned long devaddr)
{ {
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@ -437,11 +437,11 @@ int s3c2410_dma_devconfig(enum dma_ch channel,
pr_debug("%s: peripheral %d\n", __func__, peripheral); pr_debug("%s: peripheral %d\n", __func__, peripheral);
switch (source) { switch (source) {
case S3C2410_DMASRC_HW: case DMA_FROM_DEVICE:
config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT; config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT; config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT;
break; break;
case S3C2410_DMASRC_MEM: case DMA_TO_DEVICE:
config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT; config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT; config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT;
break; break;

View File

@ -58,11 +58,15 @@ enum dma_ch {
DMACH_MAX /* the end */ DMACH_MAX /* the end */
}; };
static __inline__ bool s3c_dma_has_circular(void) static inline bool samsung_dma_has_circular(void)
{ {
return true; return true;
} }
static inline bool samsung_dma_is_dmadev(void)
{
return false;
}
#define S3C2410_DMAF_CIRCULAR (1 << 0) #define S3C2410_DMAF_CIRCULAR (1 << 0)
#include <plat/dma.h> #include <plat/dma.h>
@ -95,7 +99,7 @@ struct s3c2410_dma_chan {
unsigned char peripheral; unsigned char peripheral;
unsigned int flags; unsigned int flags;
enum s3c2410_dmasrc source; enum dma_data_direction source;
dma_addr_t dev_addr; dma_addr_t dev_addr;

View File

@ -9,14 +9,14 @@ if ARCH_S5P64X0
config CPU_S5P6440 config CPU_S5P6440
bool bool
select S3C_PL330_DMA select SAMSUNG_DMADEV
select S5P_HRT select S5P_HRT
help help
Enable S5P6440 CPU support Enable S5P6440 CPU support
config CPU_S5P6450 config CPU_S5P6450
bool bool
select S3C_PL330_DMA select SAMSUNG_DMADEV
select S5P_HRT select S5P_HRT
help help
Enable S5P6450 CPU support Enable S5P6450 CPU support

View File

@ -146,7 +146,8 @@ static struct clk init_clocks_off[] = {
.enable = s5p64x0_hclk0_ctrl, .enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 8), .ctrlbit = (1 << 8),
}, { }, {
.name = "pdma", .name = "dma",
.devname = "dma-pl330",
.parent = &clk_hclk_low.clk, .parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl, .enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 12), .ctrlbit = (1 << 12),
@ -499,6 +500,11 @@ static struct clksrc_clk *sysclks[] = {
&clk_pclk_low, &clk_pclk_low,
}; };
static struct clk dummy_apb_pclk = {
.name = "apb_pclk",
.id = -1,
};
void __init_or_cpufreq s5p6440_setup_clocks(void) void __init_or_cpufreq s5p6440_setup_clocks(void)
{ {
struct clk *xtal_clk; struct clk *xtal_clk;
@ -581,5 +587,7 @@ void __init s5p6440_register_clocks(void)
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c24xx_register_clock(&dummy_apb_pclk);
s3c_pwmclk_init(); s3c_pwmclk_init();
} }

View File

@ -179,7 +179,8 @@ static struct clk init_clocks_off[] = {
.enable = s5p64x0_hclk0_ctrl, .enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 3), .ctrlbit = (1 << 3),
}, { }, {
.name = "pdma", .name = "dma",
.devname = "dma-pl330",
.parent = &clk_hclk_low.clk, .parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl, .enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 12), .ctrlbit = (1 << 12),
@ -553,6 +554,11 @@ static struct clksrc_clk *sysclks[] = {
&clk_sclk_audio0, &clk_sclk_audio0,
}; };
static struct clk dummy_apb_pclk = {
.name = "apb_pclk",
.id = -1,
};
void __init_or_cpufreq s5p6450_setup_clocks(void) void __init_or_cpufreq s5p6450_setup_clocks(void)
{ {
struct clk *xtal_clk; struct clk *xtal_clk;
@ -632,5 +638,7 @@ void __init s5p6450_register_clocks(void)
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c24xx_register_clock(&dummy_apb_pclk);
s3c_pwmclk_init(); s3c_pwmclk_init();
} }

View File

@ -21,128 +21,219 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#include <linux/platform_device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
#include <asm/irq.h>
#include <mach/map.h> #include <mach/map.h>
#include <mach/irqs.h> #include <mach/irqs.h>
#include <mach/regs-clock.h> #include <mach/regs-clock.h>
#include <mach/dma.h>
#include <plat/devs.h> #include <plat/devs.h>
#include <plat/s3c-pl330-pdata.h> #include <plat/irqs.h>
static u64 dma_dmamask = DMA_BIT_MASK(32); static u64 dma_dmamask = DMA_BIT_MASK(32);
static struct resource s5p64x0_pdma_resource[] = { struct dma_pl330_peri s5p6440_pdma_peri[22] = {
[0] = { {
.peri_id = (u8)DMACH_UART0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART3_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART3_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = DMACH_MAX,
}, {
.peri_id = DMACH_MAX,
}, {
.peri_id = (u8)DMACH_PCM0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_PCM0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_SPI1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI1_RX,
.rqtype = DEVTOMEM,
},
};
struct dma_pl330_platdata s5p6440_pdma_pdata = {
.nr_valid_peri = ARRAY_SIZE(s5p6440_pdma_peri),
.peri = s5p6440_pdma_peri,
};
struct dma_pl330_peri s5p6450_pdma_peri[32] = {
{
.peri_id = (u8)DMACH_UART0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART3_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART3_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART4_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART4_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_PCM0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_PCM0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_PCM1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_PCM2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_USI_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_USI_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_I2S1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PWM,
}, {
.peri_id = (u8)DMACH_UART5_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART5_TX,
.rqtype = MEMTODEV,
},
};
struct dma_pl330_platdata s5p6450_pdma_pdata = {
.nr_valid_peri = ARRAY_SIZE(s5p6450_pdma_peri),
.peri = s5p6450_pdma_peri,
};
struct amba_device s5p64x0_device_pdma = {
.dev = {
.init_name = "dma-pl330",
.dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.res = {
.start = S5P64X0_PA_PDMA, .start = S5P64X0_PA_PDMA,
.end = S5P64X0_PA_PDMA + SZ_4K, .end = S5P64X0_PA_PDMA + SZ_4K,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { .irq = {IRQ_DMA0, NO_IRQ},
.start = IRQ_DMA0, .periphid = 0x00041330,
.end = IRQ_DMA0,
.flags = IORESOURCE_IRQ,
},
};
static struct s3c_pl330_platdata s5p6440_pdma_pdata = {
.peri = {
[0] = DMACH_UART0_RX,
[1] = DMACH_UART0_TX,
[2] = DMACH_UART1_RX,
[3] = DMACH_UART1_TX,
[4] = DMACH_UART2_RX,
[5] = DMACH_UART2_TX,
[6] = DMACH_UART3_RX,
[7] = DMACH_UART3_TX,
[8] = DMACH_MAX,
[9] = DMACH_MAX,
[10] = DMACH_PCM0_TX,
[11] = DMACH_PCM0_RX,
[12] = DMACH_I2S0_TX,
[13] = DMACH_I2S0_RX,
[14] = DMACH_SPI0_TX,
[15] = DMACH_SPI0_RX,
[16] = DMACH_MAX,
[17] = DMACH_MAX,
[18] = DMACH_MAX,
[19] = DMACH_MAX,
[20] = DMACH_SPI1_TX,
[21] = DMACH_SPI1_RX,
[22] = DMACH_MAX,
[23] = DMACH_MAX,
[24] = DMACH_MAX,
[25] = DMACH_MAX,
[26] = DMACH_MAX,
[27] = DMACH_MAX,
[28] = DMACH_MAX,
[29] = DMACH_PWM,
[30] = DMACH_MAX,
[31] = DMACH_MAX,
},
};
static struct s3c_pl330_platdata s5p6450_pdma_pdata = {
.peri = {
[0] = DMACH_UART0_RX,
[1] = DMACH_UART0_TX,
[2] = DMACH_UART1_RX,
[3] = DMACH_UART1_TX,
[4] = DMACH_UART2_RX,
[5] = DMACH_UART2_TX,
[6] = DMACH_UART3_RX,
[7] = DMACH_UART3_TX,
[8] = DMACH_UART4_RX,
[9] = DMACH_UART4_TX,
[10] = DMACH_PCM0_TX,
[11] = DMACH_PCM0_RX,
[12] = DMACH_I2S0_TX,
[13] = DMACH_I2S0_RX,
[14] = DMACH_SPI0_TX,
[15] = DMACH_SPI0_RX,
[16] = DMACH_PCM1_TX,
[17] = DMACH_PCM1_RX,
[18] = DMACH_PCM2_TX,
[19] = DMACH_PCM2_RX,
[20] = DMACH_SPI1_TX,
[21] = DMACH_SPI1_RX,
[22] = DMACH_USI_TX,
[23] = DMACH_USI_RX,
[24] = DMACH_MAX,
[25] = DMACH_I2S1_TX,
[26] = DMACH_I2S1_RX,
[27] = DMACH_I2S2_TX,
[28] = DMACH_I2S2_RX,
[29] = DMACH_PWM,
[30] = DMACH_UART5_RX,
[31] = DMACH_UART5_TX,
},
};
static struct platform_device s5p64x0_device_pdma = {
.name = "s3c-pl330",
.id = -1,
.num_resources = ARRAY_SIZE(s5p64x0_pdma_resource),
.resource = s5p64x0_pdma_resource,
.dev = {
.dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
}; };
static int __init s5p64x0_dma_init(void) static int __init s5p64x0_dma_init(void)
{ {
unsigned int id; unsigned int id = __raw_readl(S5P64X0_SYS_ID) & 0xFF000;
id = __raw_readl(S5P64X0_SYS_ID) & 0xFF000;
if (id == 0x50000) if (id == 0x50000)
s5p64x0_device_pdma.dev.platform_data = &s5p6450_pdma_pdata; s5p64x0_device_pdma.dev.platform_data = &s5p6450_pdma_pdata;
else else
s5p64x0_device_pdma.dev.platform_data = &s5p6440_pdma_pdata; s5p64x0_device_pdma.dev.platform_data = &s5p6440_pdma_pdata;
platform_device_register(&s5p64x0_device_pdma); amba_device_register(&s5p64x0_device_pdma, &iomem_resource);
return 0; return 0;
} }

View File

@ -20,7 +20,7 @@
#ifndef __MACH_DMA_H #ifndef __MACH_DMA_H
#define __MACH_DMA_H #define __MACH_DMA_H
/* This platform uses the common S3C DMA API driver for PL330 */ /* This platform uses the common common DMA API driver for PL330 */
#include <plat/s3c-dma-pl330.h> #include <plat/dma-pl330.h>
#endif /* __MACH_DMA_H */ #endif /* __MACH_DMA_H */

View File

@ -10,7 +10,7 @@ if ARCH_S5PC100
config CPU_S5PC100 config CPU_S5PC100
bool bool
select S5P_EXT_INT select S5P_EXT_INT
select S3C_PL330_DMA select SAMSUNG_DMADEV
help help
Enable S5PC100 CPU support Enable S5PC100 CPU support

View File

@ -33,6 +33,11 @@ static struct clk s5p_clk_otgphy = {
.name = "otg_phy", .name = "otg_phy",
}; };
static struct clk dummy_apb_pclk = {
.name = "apb_pclk",
.id = -1,
};
static struct clk *clk_src_mout_href_list[] = { static struct clk *clk_src_mout_href_list[] = {
[0] = &s5p_clk_27m, [0] = &s5p_clk_27m,
[1] = &clk_fin_hpll, [1] = &clk_fin_hpll,
@ -454,14 +459,14 @@ static struct clk init_clocks_off[] = {
.enable = s5pc100_d1_0_ctrl, .enable = s5pc100_d1_0_ctrl,
.ctrlbit = (1 << 2), .ctrlbit = (1 << 2),
}, { }, {
.name = "pdma", .name = "dma",
.devname = "s3c-pl330.1", .devname = "dma-pl330.1",
.parent = &clk_div_d1_bus.clk, .parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_0_ctrl, .enable = s5pc100_d1_0_ctrl,
.ctrlbit = (1 << 1), .ctrlbit = (1 << 1),
}, { }, {
.name = "pdma", .name = "dma",
.devname = "s3c-pl330.0", .devname = "dma-pl330.0",
.parent = &clk_div_d1_bus.clk, .parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_0_ctrl, .enable = s5pc100_d1_0_ctrl,
.ctrlbit = (1 << 0), .ctrlbit = (1 << 0),
@ -1276,5 +1281,7 @@ void __init s5pc100_register_clocks(void)
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c24xx_register_clock(&dummy_apb_pclk);
s3c_pwmclk_init(); s3c_pwmclk_init();
} }

View File

@ -1,4 +1,8 @@
/* /* linux/arch/arm/mach-s5pc100/dma.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Copyright (C) 2010 Samsung Electronics Co. Ltd. * Copyright (C) 2010 Samsung Electronics Co. Ltd.
* Jaswinder Singh <jassi.brar@samsung.com> * Jaswinder Singh <jassi.brar@samsung.com>
* *
@ -17,150 +21,246 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#include <linux/platform_device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
#include <asm/irq.h>
#include <plat/devs.h> #include <plat/devs.h>
#include <plat/irqs.h>
#include <mach/map.h> #include <mach/map.h>
#include <mach/irqs.h> #include <mach/irqs.h>
#include <mach/dma.h>
#include <plat/s3c-pl330-pdata.h>
static u64 dma_dmamask = DMA_BIT_MASK(32); static u64 dma_dmamask = DMA_BIT_MASK(32);
static struct resource s5pc100_pdma0_resource[] = { struct dma_pl330_peri pdma0_peri[30] = {
[0] = { {
.start = S5PC100_PA_PDMA0, .peri_id = (u8)DMACH_UART0_RX,
.end = S5PC100_PA_PDMA0 + SZ_4K, .rqtype = DEVTOMEM,
.flags = IORESOURCE_MEM, }, {
}, .peri_id = (u8)DMACH_UART0_TX,
[1] = { .rqtype = MEMTODEV,
.start = IRQ_PDMA0, }, {
.end = IRQ_PDMA0, .peri_id = (u8)DMACH_UART1_RX,
.flags = IORESOURCE_IRQ, .rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART3_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART3_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = DMACH_IRDA,
}, {
.peri_id = (u8)DMACH_I2S0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0S_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_AC97_MICIN,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_AC97_PCMIN,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_AC97_PCMOUT,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_EXTERNAL,
}, {
.peri_id = (u8)DMACH_PWM,
}, {
.peri_id = (u8)DMACH_SPDIF,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_HSI_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_HSI_TX,
.rqtype = MEMTODEV,
}, },
}; };
static struct s3c_pl330_platdata s5pc100_pdma0_pdata = { struct dma_pl330_platdata s5pc100_pdma0_pdata = {
.peri = { .nr_valid_peri = ARRAY_SIZE(pdma0_peri),
[0] = DMACH_UART0_RX, .peri = pdma0_peri,
[1] = DMACH_UART0_TX,
[2] = DMACH_UART1_RX,
[3] = DMACH_UART1_TX,
[4] = DMACH_UART2_RX,
[5] = DMACH_UART2_TX,
[6] = DMACH_UART3_RX,
[7] = DMACH_UART3_TX,
[8] = DMACH_IRDA,
[9] = DMACH_I2S0_RX,
[10] = DMACH_I2S0_TX,
[11] = DMACH_I2S0S_TX,
[12] = DMACH_I2S1_RX,
[13] = DMACH_I2S1_TX,
[14] = DMACH_I2S2_RX,
[15] = DMACH_I2S2_TX,
[16] = DMACH_SPI0_RX,
[17] = DMACH_SPI0_TX,
[18] = DMACH_SPI1_RX,
[19] = DMACH_SPI1_TX,
[20] = DMACH_SPI2_RX,
[21] = DMACH_SPI2_TX,
[22] = DMACH_AC97_MICIN,
[23] = DMACH_AC97_PCMIN,
[24] = DMACH_AC97_PCMOUT,
[25] = DMACH_EXTERNAL,
[26] = DMACH_PWM,
[27] = DMACH_SPDIF,
[28] = DMACH_HSI_RX,
[29] = DMACH_HSI_TX,
[30] = DMACH_MAX,
[31] = DMACH_MAX,
},
}; };
static struct platform_device s5pc100_device_pdma0 = { struct amba_device s5pc100_device_pdma0 = {
.name = "s3c-pl330",
.id = 0,
.num_resources = ARRAY_SIZE(s5pc100_pdma0_resource),
.resource = s5pc100_pdma0_resource,
.dev = { .dev = {
.init_name = "dma-pl330.0",
.dma_mask = &dma_dmamask, .dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &s5pc100_pdma0_pdata, .platform_data = &s5pc100_pdma0_pdata,
}, },
}; .res = {
.start = S5PC100_PA_PDMA0,
static struct resource s5pc100_pdma1_resource[] = { .end = S5PC100_PA_PDMA0 + SZ_4K,
[0] = {
.start = S5PC100_PA_PDMA1,
.end = S5PC100_PA_PDMA1 + SZ_4K,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { .irq = {IRQ_PDMA0, NO_IRQ},
.start = IRQ_PDMA1, .periphid = 0x00041330,
.end = IRQ_PDMA1, };
.flags = IORESOURCE_IRQ,
struct dma_pl330_peri pdma1_peri[30] = {
{
.peri_id = (u8)DMACH_UART0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART3_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART3_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = DMACH_IRDA,
}, {
.peri_id = (u8)DMACH_I2S0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0S_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_PCM0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_PCM1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_MSM_REQ0,
}, {
.peri_id = (u8)DMACH_MSM_REQ1,
}, {
.peri_id = (u8)DMACH_MSM_REQ2,
}, {
.peri_id = (u8)DMACH_MSM_REQ3,
}, },
}; };
static struct s3c_pl330_platdata s5pc100_pdma1_pdata = { struct dma_pl330_platdata s5pc100_pdma1_pdata = {
.peri = { .nr_valid_peri = ARRAY_SIZE(pdma1_peri),
[0] = DMACH_UART0_RX, .peri = pdma1_peri,
[1] = DMACH_UART0_TX,
[2] = DMACH_UART1_RX,
[3] = DMACH_UART1_TX,
[4] = DMACH_UART2_RX,
[5] = DMACH_UART2_TX,
[6] = DMACH_UART3_RX,
[7] = DMACH_UART3_TX,
[8] = DMACH_IRDA,
[9] = DMACH_I2S0_RX,
[10] = DMACH_I2S0_TX,
[11] = DMACH_I2S0S_TX,
[12] = DMACH_I2S1_RX,
[13] = DMACH_I2S1_TX,
[14] = DMACH_I2S2_RX,
[15] = DMACH_I2S2_TX,
[16] = DMACH_SPI0_RX,
[17] = DMACH_SPI0_TX,
[18] = DMACH_SPI1_RX,
[19] = DMACH_SPI1_TX,
[20] = DMACH_SPI2_RX,
[21] = DMACH_SPI2_TX,
[22] = DMACH_PCM0_RX,
[23] = DMACH_PCM0_TX,
[24] = DMACH_PCM1_RX,
[25] = DMACH_PCM1_TX,
[26] = DMACH_MSM_REQ0,
[27] = DMACH_MSM_REQ1,
[28] = DMACH_MSM_REQ2,
[29] = DMACH_MSM_REQ3,
[30] = DMACH_MAX,
[31] = DMACH_MAX,
},
}; };
static struct platform_device s5pc100_device_pdma1 = { struct amba_device s5pc100_device_pdma1 = {
.name = "s3c-pl330",
.id = 1,
.num_resources = ARRAY_SIZE(s5pc100_pdma1_resource),
.resource = s5pc100_pdma1_resource,
.dev = { .dev = {
.init_name = "dma-pl330.1",
.dma_mask = &dma_dmamask, .dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &s5pc100_pdma1_pdata, .platform_data = &s5pc100_pdma1_pdata,
}, },
}; .res = {
.start = S5PC100_PA_PDMA1,
static struct platform_device *s5pc100_dmacs[] __initdata = { .end = S5PC100_PA_PDMA1 + SZ_4K,
&s5pc100_device_pdma0, .flags = IORESOURCE_MEM,
&s5pc100_device_pdma1, },
.irq = {IRQ_PDMA1, NO_IRQ},
.periphid = 0x00041330,
}; };
static int __init s5pc100_dma_init(void) static int __init s5pc100_dma_init(void)
{ {
platform_add_devices(s5pc100_dmacs, ARRAY_SIZE(s5pc100_dmacs)); amba_device_register(&s5pc100_device_pdma0, &iomem_resource);
amba_device_register(&s5pc100_device_pdma1, &iomem_resource);
return 0; return 0;
} }

View File

@ -20,7 +20,7 @@
#ifndef __MACH_DMA_H #ifndef __MACH_DMA_H
#define __MACH_DMA_H #define __MACH_DMA_H
/* This platform uses the common S3C DMA API driver for PL330 */ /* This platform uses the common DMA API driver for PL330 */
#include <plat/s3c-dma-pl330.h> #include <plat/dma-pl330.h>
#endif /* __MACH_DMA_H */ #endif /* __MACH_DMA_H */

View File

@ -11,7 +11,7 @@ if ARCH_S5PV210
config CPU_S5PV210 config CPU_S5PV210
bool bool
select S3C_PL330_DMA select SAMSUNG_DMADEV
select S5P_EXT_INT select S5P_EXT_INT
select S5P_HRT select S5P_HRT
select S5PV210_PM if PM select S5PV210_PM if PM

View File

@ -203,6 +203,11 @@ static struct clk clk_pcmcdclk2 = {
.name = "pcmcdclk", .name = "pcmcdclk",
}; };
static struct clk dummy_apb_pclk = {
.name = "apb_pclk",
.id = -1,
};
static struct clk *clkset_vpllsrc_list[] = { static struct clk *clkset_vpllsrc_list[] = {
[0] = &clk_fin_vpll, [0] = &clk_fin_vpll,
[1] = &clk_sclk_hdmi27m, [1] = &clk_sclk_hdmi27m,
@ -289,14 +294,14 @@ static struct clk_ops clk_fout_apll_ops = {
static struct clk init_clocks_off[] = { static struct clk init_clocks_off[] = {
{ {
.name = "pdma", .name = "dma",
.devname = "s3c-pl330.0", .devname = "dma-pl330.0",
.parent = &clk_hclk_psys.clk, .parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip0_ctrl, .enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 3), .ctrlbit = (1 << 3),
}, { }, {
.name = "pdma", .name = "dma",
.devname = "s3c-pl330.1", .devname = "dma-pl330.1",
.parent = &clk_hclk_psys.clk, .parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip0_ctrl, .enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 4), .ctrlbit = (1 << 4),
@ -1159,5 +1164,6 @@ void __init s5pv210_register_clocks(void)
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c24xx_register_clock(&dummy_apb_pclk);
s3c_pwmclk_init(); s3c_pwmclk_init();
} }

View File

@ -1,4 +1,8 @@
/* /* linux/arch/arm/mach-s5pv210/dma.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Copyright (C) 2010 Samsung Electronics Co. Ltd. * Copyright (C) 2010 Samsung Electronics Co. Ltd.
* Jaswinder Singh <jassi.brar@samsung.com> * Jaswinder Singh <jassi.brar@samsung.com>
* *
@ -17,151 +21,240 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#include <linux/platform_device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
#include <asm/irq.h>
#include <plat/devs.h> #include <plat/devs.h>
#include <plat/irqs.h> #include <plat/irqs.h>
#include <mach/map.h> #include <mach/map.h>
#include <mach/irqs.h> #include <mach/irqs.h>
#include <mach/dma.h>
#include <plat/s3c-pl330-pdata.h>
static u64 dma_dmamask = DMA_BIT_MASK(32); static u64 dma_dmamask = DMA_BIT_MASK(32);
static struct resource s5pv210_pdma0_resource[] = { struct dma_pl330_peri pdma0_peri[28] = {
[0] = { {
.start = S5PV210_PA_PDMA0, .peri_id = (u8)DMACH_UART0_RX,
.end = S5PV210_PA_PDMA0 + SZ_4K, .rqtype = DEVTOMEM,
.flags = IORESOURCE_MEM, }, {
}, .peri_id = (u8)DMACH_UART0_TX,
[1] = { .rqtype = MEMTODEV,
.start = IRQ_PDMA0, }, {
.end = IRQ_PDMA0, .peri_id = (u8)DMACH_UART1_RX,
.flags = IORESOURCE_IRQ, .rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART3_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART3_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = DMACH_MAX,
}, {
.peri_id = (u8)DMACH_I2S0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0S_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_SPI0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_AC97_MICIN,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_AC97_PCMIN,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_AC97_PCMOUT,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_PWM,
}, {
.peri_id = (u8)DMACH_SPDIF,
.rqtype = MEMTODEV,
}, },
}; };
static struct s3c_pl330_platdata s5pv210_pdma0_pdata = { struct dma_pl330_platdata s5pv210_pdma0_pdata = {
.peri = { .nr_valid_peri = ARRAY_SIZE(pdma0_peri),
[0] = DMACH_UART0_RX, .peri = pdma0_peri,
[1] = DMACH_UART0_TX,
[2] = DMACH_UART1_RX,
[3] = DMACH_UART1_TX,
[4] = DMACH_UART2_RX,
[5] = DMACH_UART2_TX,
[6] = DMACH_UART3_RX,
[7] = DMACH_UART3_TX,
[8] = DMACH_MAX,
[9] = DMACH_I2S0_RX,
[10] = DMACH_I2S0_TX,
[11] = DMACH_I2S0S_TX,
[12] = DMACH_I2S1_RX,
[13] = DMACH_I2S1_TX,
[14] = DMACH_MAX,
[15] = DMACH_MAX,
[16] = DMACH_SPI0_RX,
[17] = DMACH_SPI0_TX,
[18] = DMACH_SPI1_RX,
[19] = DMACH_SPI1_TX,
[20] = DMACH_MAX,
[21] = DMACH_MAX,
[22] = DMACH_AC97_MICIN,
[23] = DMACH_AC97_PCMIN,
[24] = DMACH_AC97_PCMOUT,
[25] = DMACH_MAX,
[26] = DMACH_PWM,
[27] = DMACH_SPDIF,
[28] = DMACH_MAX,
[29] = DMACH_MAX,
[30] = DMACH_MAX,
[31] = DMACH_MAX,
},
}; };
static struct platform_device s5pv210_device_pdma0 = { struct amba_device s5pv210_device_pdma0 = {
.name = "s3c-pl330",
.id = 0,
.num_resources = ARRAY_SIZE(s5pv210_pdma0_resource),
.resource = s5pv210_pdma0_resource,
.dev = { .dev = {
.init_name = "dma-pl330.0",
.dma_mask = &dma_dmamask, .dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &s5pv210_pdma0_pdata, .platform_data = &s5pv210_pdma0_pdata,
}, },
}; .res = {
.start = S5PV210_PA_PDMA0,
static struct resource s5pv210_pdma1_resource[] = { .end = S5PV210_PA_PDMA0 + SZ_4K,
[0] = {
.start = S5PV210_PA_PDMA1,
.end = S5PV210_PA_PDMA1 + SZ_4K,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { .irq = {IRQ_PDMA0, NO_IRQ},
.start = IRQ_PDMA1, .periphid = 0x00041330,
.end = IRQ_PDMA1, };
.flags = IORESOURCE_IRQ,
struct dma_pl330_peri pdma1_peri[32] = {
{
.peri_id = (u8)DMACH_UART0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART3_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART3_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = DMACH_MAX,
}, {
.peri_id = (u8)DMACH_I2S0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0S_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_PCM0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_PCM1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_MSM_REQ0,
}, {
.peri_id = (u8)DMACH_MSM_REQ1,
}, {
.peri_id = (u8)DMACH_MSM_REQ2,
}, {
.peri_id = (u8)DMACH_MSM_REQ3,
}, {
.peri_id = (u8)DMACH_PCM2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM2_TX,
.rqtype = MEMTODEV,
}, },
}; };
static struct s3c_pl330_platdata s5pv210_pdma1_pdata = { struct dma_pl330_platdata s5pv210_pdma1_pdata = {
.peri = { .nr_valid_peri = ARRAY_SIZE(pdma1_peri),
[0] = DMACH_UART0_RX, .peri = pdma1_peri,
[1] = DMACH_UART0_TX,
[2] = DMACH_UART1_RX,
[3] = DMACH_UART1_TX,
[4] = DMACH_UART2_RX,
[5] = DMACH_UART2_TX,
[6] = DMACH_UART3_RX,
[7] = DMACH_UART3_TX,
[8] = DMACH_MAX,
[9] = DMACH_I2S0_RX,
[10] = DMACH_I2S0_TX,
[11] = DMACH_I2S0S_TX,
[12] = DMACH_I2S1_RX,
[13] = DMACH_I2S1_TX,
[14] = DMACH_I2S2_RX,
[15] = DMACH_I2S2_TX,
[16] = DMACH_SPI0_RX,
[17] = DMACH_SPI0_TX,
[18] = DMACH_SPI1_RX,
[19] = DMACH_SPI1_TX,
[20] = DMACH_MAX,
[21] = DMACH_MAX,
[22] = DMACH_PCM0_RX,
[23] = DMACH_PCM0_TX,
[24] = DMACH_PCM1_RX,
[25] = DMACH_PCM1_TX,
[26] = DMACH_MSM_REQ0,
[27] = DMACH_MSM_REQ1,
[28] = DMACH_MSM_REQ2,
[29] = DMACH_MSM_REQ3,
[30] = DMACH_PCM2_RX,
[31] = DMACH_PCM2_TX,
},
}; };
static struct platform_device s5pv210_device_pdma1 = { struct amba_device s5pv210_device_pdma1 = {
.name = "s3c-pl330",
.id = 1,
.num_resources = ARRAY_SIZE(s5pv210_pdma1_resource),
.resource = s5pv210_pdma1_resource,
.dev = { .dev = {
.init_name = "dma-pl330.1",
.dma_mask = &dma_dmamask, .dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &s5pv210_pdma1_pdata, .platform_data = &s5pv210_pdma1_pdata,
}, },
}; .res = {
.start = S5PV210_PA_PDMA1,
static struct platform_device *s5pv210_dmacs[] __initdata = { .end = S5PV210_PA_PDMA1 + SZ_4K,
&s5pv210_device_pdma0, .flags = IORESOURCE_MEM,
&s5pv210_device_pdma1, },
.irq = {IRQ_PDMA1, NO_IRQ},
.periphid = 0x00041330,
}; };
static int __init s5pv210_dma_init(void) static int __init s5pv210_dma_init(void)
{ {
platform_add_devices(s5pv210_dmacs, ARRAY_SIZE(s5pv210_dmacs)); amba_device_register(&s5pv210_device_pdma0, &iomem_resource);
amba_device_register(&s5pv210_device_pdma1, &iomem_resource);
return 0; return 0;
} }

View File

@ -20,7 +20,7 @@
#ifndef __MACH_DMA_H #ifndef __MACH_DMA_H
#define __MACH_DMA_H #define __MACH_DMA_H
/* This platform uses the common S3C DMA API driver for PL330 */ /* This platform uses the common DMA API driver for PL330 */
#include <plat/s3c-dma-pl330.h> #include <plat/dma-pl330.h>
#endif /* __MACH_DMA_H */ #endif /* __MACH_DMA_H */

View File

@ -1094,14 +1094,14 @@ EXPORT_SYMBOL(s3c2410_dma_config);
* *
* configure the dma source/destination hardware type and address * configure the dma source/destination hardware type and address
* *
* source: S3C2410_DMASRC_HW: source is hardware * source: DMA_FROM_DEVICE: source is hardware
* S3C2410_DMASRC_MEM: source is memory * DMA_TO_DEVICE: source is memory
* *
* devaddr: physical address of the source * devaddr: physical address of the source
*/ */
int s3c2410_dma_devconfig(enum dma_ch channel, int s3c2410_dma_devconfig(enum dma_ch channel,
enum s3c2410_dmasrc source, enum dma_data_direction source,
unsigned long devaddr) unsigned long devaddr)
{ {
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@ -1131,7 +1131,7 @@ int s3c2410_dma_devconfig(enum dma_ch channel,
hwcfg |= S3C2410_DISRCC_INC; hwcfg |= S3C2410_DISRCC_INC;
switch (source) { switch (source) {
case S3C2410_DMASRC_HW: case DMA_FROM_DEVICE:
/* source is hardware */ /* source is hardware */
pr_debug("%s: hw source, devaddr=%08lx, hwcfg=%d\n", pr_debug("%s: hw source, devaddr=%08lx, hwcfg=%d\n",
__func__, devaddr, hwcfg); __func__, devaddr, hwcfg);
@ -1142,7 +1142,7 @@ int s3c2410_dma_devconfig(enum dma_ch channel,
chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DIDST); chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DIDST);
break; break;
case S3C2410_DMASRC_MEM: case DMA_TO_DEVICE:
/* source is memory */ /* source is memory */
pr_debug("%s: mem source, devaddr=%08lx, hwcfg=%d\n", pr_debug("%s: mem source, devaddr=%08lx, hwcfg=%d\n",
__func__, devaddr, hwcfg); __func__, devaddr, hwcfg);

View File

@ -300,11 +300,14 @@ config S3C_DMA
help help
Internal configuration for S3C DMA core Internal configuration for S3C DMA core
config S3C_PL330_DMA config SAMSUNG_DMADEV
bool bool
select PL330 select DMADEVICES
select PL330_DMA if (CPU_EXYNOS4210 || CPU_S5PV210 || CPU_S5PC100 || \
CPU_S5P6450 || CPU_S5P6440)
select ARM_AMBA
help help
S3C DMA API Driver for PL330 DMAC. Use DMA device engine for PL330 DMAC.
comment "Power management" comment "Power management"

View File

@ -63,9 +63,9 @@ obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT) += dev-backlight.o
# DMA support # DMA support
obj-$(CONFIG_S3C_DMA) += dma.o obj-$(CONFIG_S3C_DMA) += dma.o s3c-dma-ops.o
obj-$(CONFIG_S3C_PL330_DMA) += s3c-pl330.o obj-$(CONFIG_SAMSUNG_DMADEV) += dma-ops.o
# PM support # PM support

View File

@ -0,0 +1,131 @@
/* linux/arch/arm/plat-samsung/dma-ops.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Samsung DMA Operations
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/amba/pl330.h>
#include <linux/scatterlist.h>
#include <mach/dma.h>
static inline bool pl330_filter(struct dma_chan *chan, void *param)
{
struct dma_pl330_peri *peri = chan->private;
return peri->peri_id == (unsigned)param;
}
static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
struct samsung_dma_info *info)
{
struct dma_chan *chan;
dma_cap_mask_t mask;
struct dma_slave_config slave_config;
dma_cap_zero(mask);
dma_cap_set(info->cap, mask);
chan = dma_request_channel(mask, pl330_filter, (void *)dma_ch);
if (info->direction == DMA_FROM_DEVICE) {
memset(&slave_config, 0, sizeof(struct dma_slave_config));
slave_config.direction = info->direction;
slave_config.src_addr = info->fifo;
slave_config.src_addr_width = info->width;
slave_config.src_maxburst = 1;
dmaengine_slave_config(chan, &slave_config);
} else if (info->direction == DMA_TO_DEVICE) {
memset(&slave_config, 0, sizeof(struct dma_slave_config));
slave_config.direction = info->direction;
slave_config.dst_addr = info->fifo;
slave_config.dst_addr_width = info->width;
slave_config.dst_maxburst = 1;
dmaengine_slave_config(chan, &slave_config);
}
return (unsigned)chan;
}
static int samsung_dmadev_release(unsigned ch,
struct s3c2410_dma_client *client)
{
dma_release_channel((struct dma_chan *)ch);
return 0;
}
static int samsung_dmadev_prepare(unsigned ch,
struct samsung_dma_prep_info *info)
{
struct scatterlist sg;
struct dma_chan *chan = (struct dma_chan *)ch;
struct dma_async_tx_descriptor *desc;
switch (info->cap) {
case DMA_SLAVE:
sg_init_table(&sg, 1);
sg_dma_len(&sg) = info->len;
sg_set_page(&sg, pfn_to_page(PFN_DOWN(info->buf)),
info->len, offset_in_page(info->buf));
sg_dma_address(&sg) = info->buf;
desc = chan->device->device_prep_slave_sg(chan,
&sg, 1, info->direction, DMA_PREP_INTERRUPT);
break;
case DMA_CYCLIC:
desc = chan->device->device_prep_dma_cyclic(chan,
info->buf, info->len, info->period, info->direction);
break;
default:
dev_err(&chan->dev->device, "unsupported format\n");
return -EFAULT;
}
if (!desc) {
dev_err(&chan->dev->device, "cannot prepare cyclic dma\n");
return -EFAULT;
}
desc->callback = info->fp;
desc->callback_param = info->fp_param;
dmaengine_submit((struct dma_async_tx_descriptor *)desc);
return 0;
}
static inline int samsung_dmadev_trigger(unsigned ch)
{
dma_async_issue_pending((struct dma_chan *)ch);
return 0;
}
static inline int samsung_dmadev_flush(unsigned ch)
{
return dmaengine_terminate_all((struct dma_chan *)ch);
}
struct samsung_dma_ops dmadev_ops = {
.request = samsung_dmadev_request,
.release = samsung_dmadev_release,
.prepare = samsung_dmadev_prepare,
.trigger = samsung_dmadev_trigger,
.started = NULL,
.flush = samsung_dmadev_flush,
.stop = samsung_dmadev_flush,
};
void *samsung_dmadev_get_ops(void)
{
return &dmadev_ops;
}
EXPORT_SYMBOL(samsung_dmadev_get_ops);

View File

@ -0,0 +1,63 @@
/* arch/arm/plat-samsung/include/plat/dma-ops.h
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Samsung DMA support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __SAMSUNG_DMA_OPS_H_
#define __SAMSUNG_DMA_OPS_H_ __FILE__
#include <linux/dmaengine.h>
struct samsung_dma_prep_info {
enum dma_transaction_type cap;
enum dma_data_direction direction;
dma_addr_t buf;
unsigned long period;
unsigned long len;
void (*fp)(void *data);
void *fp_param;
};
struct samsung_dma_info {
enum dma_transaction_type cap;
enum dma_data_direction direction;
enum dma_slave_buswidth width;
dma_addr_t fifo;
struct s3c2410_dma_client *client;
};
struct samsung_dma_ops {
unsigned (*request)(enum dma_ch ch, struct samsung_dma_info *info);
int (*release)(unsigned ch, struct s3c2410_dma_client *client);
int (*prepare)(unsigned ch, struct samsung_dma_prep_info *info);
int (*trigger)(unsigned ch);
int (*started)(unsigned ch);
int (*flush)(unsigned ch);
int (*stop)(unsigned ch);
};
extern void *samsung_dmadev_get_ops(void);
extern void *s3c_dma_get_ops(void);
static inline void *__samsung_dma_get_ops(void)
{
if (samsung_dma_is_dmadev())
return samsung_dmadev_get_ops();
else
return s3c_dma_get_ops();
}
/*
* samsung_dma_get_ops
* get the set of samsung dma operations
*/
#define samsung_dma_get_ops() __samsung_dma_get_ops()
#endif /* __SAMSUNG_DMA_OPS_H_ */

View File

@ -8,11 +8,8 @@
* (at your option) any later version. * (at your option) any later version.
*/ */
#ifndef __S3C_DMA_PL330_H_ #ifndef __DMA_PL330_H_
#define __S3C_DMA_PL330_H_ #define __DMA_PL330_H_ __FILE__
#define S3C2410_DMAF_AUTOSTART (1 << 0)
#define S3C2410_DMAF_CIRCULAR (1 << 1)
/* /*
* PL330 can assign any channel to communicate with * PL330 can assign any channel to communicate with
@ -20,7 +17,7 @@
* For the sake of consistency across client drivers, * For the sake of consistency across client drivers,
* We keep the channel names unchanged and only add * We keep the channel names unchanged and only add
* missing peripherals are added. * missing peripherals are added.
* Order is not important since S3C PL330 API driver * Order is not important since DMA PL330 API driver
* use these just as IDs. * use these just as IDs.
*/ */
enum dma_ch { enum dma_ch {
@ -88,11 +85,20 @@ enum dma_ch {
DMACH_MAX, DMACH_MAX,
}; };
static inline bool s3c_dma_has_circular(void) struct s3c2410_dma_client {
char *name;
};
static inline bool samsung_dma_has_circular(void)
{ {
return true; return true;
} }
#include <plat/dma.h> static inline bool samsung_dma_is_dmadev(void)
{
return true;
}
#endif /* __S3C_DMA_PL330_H_ */ #include <plat/dma-ops.h>
#endif /* __DMA_PL330_H_ */

View File

@ -47,7 +47,7 @@ struct s3c24xx_dma_selection {
void (*direction)(struct s3c2410_dma_chan *chan, void (*direction)(struct s3c2410_dma_chan *chan,
struct s3c24xx_dma_map *map, struct s3c24xx_dma_map *map,
enum s3c2410_dmasrc dir); enum dma_data_direction dir);
}; };
extern int s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel); extern int s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel);

View File

@ -10,17 +10,14 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/dma-mapping.h>
enum s3c2410_dma_buffresult { enum s3c2410_dma_buffresult {
S3C2410_RES_OK, S3C2410_RES_OK,
S3C2410_RES_ERR, S3C2410_RES_ERR,
S3C2410_RES_ABORT S3C2410_RES_ABORT
}; };
enum s3c2410_dmasrc {
S3C2410_DMASRC_HW, /* source is memory */
S3C2410_DMASRC_MEM /* source is hardware */
};
/* enum s3c2410_chan_op /* enum s3c2410_chan_op
* *
* operation codes passed to the DMA code by the user, and also used * operation codes passed to the DMA code by the user, and also used
@ -112,7 +109,7 @@ extern int s3c2410_dma_config(enum dma_ch channel, int xferunit);
*/ */
extern int s3c2410_dma_devconfig(enum dma_ch channel, extern int s3c2410_dma_devconfig(enum dma_ch channel,
enum s3c2410_dmasrc source, unsigned long devaddr); enum dma_data_direction source, unsigned long devaddr);
/* s3c2410_dma_getposition /* s3c2410_dma_getposition
* *
@ -126,3 +123,4 @@ extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn);
extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn); extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn);
#include <plat/dma-ops.h>

View File

@ -1,32 +0,0 @@
/* linux/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h
*
* Copyright (C) 2010 Samsung Electronics Co. Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef __S3C_PL330_PDATA_H
#define __S3C_PL330_PDATA_H
#include <plat/s3c-dma-pl330.h>
/*
* Every PL330 DMAC has max 32 peripheral interfaces,
* of which some may be not be really used in your
* DMAC's configuration.
* Populate this array of 32 peri i/fs with relevant
* channel IDs for used peri i/f and DMACH_MAX for
* those unused.
*
* The platforms just need to provide this info
* to the S3C DMA API driver for PL330.
*/
struct s3c_pl330_platdata {
enum dma_ch peri[32];
};
#endif /* __S3C_PL330_PDATA_H */

View File

@ -0,0 +1,130 @@
/* linux/arch/arm/plat-samsung/s3c-dma-ops.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Samsung S3C-DMA Operations
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <mach/dma.h>
struct cb_data {
void (*fp) (void *);
void *fp_param;
unsigned ch;
struct list_head node;
};
static LIST_HEAD(dma_list);
static void s3c_dma_cb(struct s3c2410_dma_chan *channel, void *param,
int size, enum s3c2410_dma_buffresult res)
{
struct cb_data *data = param;
data->fp(data->fp_param);
}
static unsigned s3c_dma_request(enum dma_ch dma_ch,
struct samsung_dma_info *info)
{
struct cb_data *data;
if (s3c2410_dma_request(dma_ch, info->client, NULL) < 0) {
s3c2410_dma_free(dma_ch, info->client);
return 0;
}
data = kzalloc(sizeof(struct cb_data), GFP_KERNEL);
data->ch = dma_ch;
list_add_tail(&data->node, &dma_list);
s3c2410_dma_devconfig(dma_ch, info->direction, info->fifo);
if (info->cap == DMA_CYCLIC)
s3c2410_dma_setflags(dma_ch, S3C2410_DMAF_CIRCULAR);
s3c2410_dma_config(dma_ch, info->width);
return (unsigned)dma_ch;
}
static int s3c_dma_release(unsigned ch, struct s3c2410_dma_client *client)
{
struct cb_data *data;
list_for_each_entry(data, &dma_list, node)
if (data->ch == ch)
break;
list_del(&data->node);
s3c2410_dma_free(ch, client);
kfree(data);
return 0;
}
static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep_info *info)
{
struct cb_data *data;
int len = (info->cap == DMA_CYCLIC) ? info->period : info->len;
list_for_each_entry(data, &dma_list, node)
if (data->ch == ch)
break;
if (!data->fp) {
s3c2410_dma_set_buffdone_fn(ch, s3c_dma_cb);
data->fp = info->fp;
data->fp_param = info->fp_param;
}
s3c2410_dma_enqueue(ch, (void *)data, info->buf, len);
return 0;
}
static inline int s3c_dma_trigger(unsigned ch)
{
return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_START);
}
static inline int s3c_dma_started(unsigned ch)
{
return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STARTED);
}
static inline int s3c_dma_flush(unsigned ch)
{
return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_FLUSH);
}
static inline int s3c_dma_stop(unsigned ch)
{
return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STOP);
}
static struct samsung_dma_ops s3c_dma_ops = {
.request = s3c_dma_request,
.release = s3c_dma_release,
.prepare = s3c_dma_prepare,
.trigger = s3c_dma_trigger,
.started = s3c_dma_started,
.flush = s3c_dma_flush,
.stop = s3c_dma_stop,
};
void *s3c_dma_get_ops(void)
{
return &s3c_dma_ops;
}
EXPORT_SYMBOL(s3c_dma_get_ops);

File diff suppressed because it is too large Load Diff

View File

@ -193,7 +193,8 @@ config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
config PL330_DMA config PL330_DMA
tristate "DMA API Driver for PL330" tristate "DMA API Driver for PL330"
select DMA_ENGINE select DMA_ENGINE
depends on PL330 depends on ARM_AMBA
select PL330
help help
Select if your platform has one or more PL330 DMACs. Select if your platform has one or more PL330 DMACs.
You need to provide platform specific settings via You need to provide platform specific settings via

View File

@ -66,32 +66,29 @@
* after the final transfer signalled by LBREQ or LSREQ. The DMAC * after the final transfer signalled by LBREQ or LSREQ. The DMAC
* will then move to the next LLI entry. * will then move to the next LLI entry.
* *
* Only the former works sanely with scatter lists, so we only implement
* the DMAC flow control method. However, peripherals which use the LBREQ
* and LSREQ signals (eg, MMCI) are unable to use this mode, which through
* these hardware restrictions prevents them from using scatter DMA.
*
* Global TODO: * Global TODO:
* - Break out common code from arch/arm/mach-s3c64xx and share * - Break out common code from arch/arm/mach-s3c64xx and share
*/ */
#include <linux/device.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/dmaengine.h>
#include <linux/amba/bus.h> #include <linux/amba/bus.h>
#include <linux/amba/pl08x.h> #include <linux/amba/pl08x.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/hardware/pl080.h> #include <asm/hardware/pl080.h>
#define DRIVER_NAME "pl08xdmac" #define DRIVER_NAME "pl08xdmac"
static struct amba_driver pl08x_amba_driver;
/** /**
* struct vendor_data - vendor-specific config parameters for PL08x derivatives * struct vendor_data - vendor-specific config parameters for PL08x derivatives
* @channels: the number of channels available in this variant * @channels: the number of channels available in this variant
@ -126,7 +123,8 @@ struct pl08x_lli {
* @phy_chans: array of data for the physical channels * @phy_chans: array of data for the physical channels
* @pool: a pool for the LLI descriptors * @pool: a pool for the LLI descriptors
* @pool_ctr: counter of LLIs in the pool * @pool_ctr: counter of LLIs in the pool
* @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
* fetches
* @mem_buses: set to indicate memory transfers on AHB2. * @mem_buses: set to indicate memory transfers on AHB2.
* @lock: a spinlock for this struct * @lock: a spinlock for this struct
*/ */
@ -149,14 +147,6 @@ struct pl08x_driver_data {
* PL08X specific defines * PL08X specific defines
*/ */
/*
* Memory boundaries: the manual for PL08x says that the controller
* cannot read past a 1KiB boundary, so these defines are used to
* create transfer LLIs that do not cross such boundaries.
*/
#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
/* Size (bytes) of each LLI buffer allocated for one transfer */ /* Size (bytes) of each LLI buffer allocated for one transfer */
# define PL08X_LLI_TSFR_SIZE 0x2000 # define PL08X_LLI_TSFR_SIZE 0x2000
@ -272,7 +262,6 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
writel(val, ch->base + PL080_CH_CONFIG); writel(val, ch->base + PL080_CH_CONFIG);
} }
/* /*
* pl08x_terminate_phy_chan() stops the channel, clears the FIFO and * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
* clears any pending interrupt status. This should not be used for * clears any pending interrupt status. This should not be used for
@ -407,6 +396,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
return NULL; return NULL;
} }
pm_runtime_get_sync(&pl08x->adev->dev);
return ch; return ch;
} }
@ -420,6 +410,8 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
/* Stop the channel and clear its interrupts */ /* Stop the channel and clear its interrupts */
pl08x_terminate_phy_chan(pl08x, ch); pl08x_terminate_phy_chan(pl08x, ch);
pm_runtime_put(&pl08x->adev->dev);
/* Mark it as free */ /* Mark it as free */
ch->serving = NULL; ch->serving = NULL;
spin_unlock_irqrestore(&ch->lock, flags); spin_unlock_irqrestore(&ch->lock, flags);
@ -499,36 +491,30 @@ struct pl08x_lli_build_data {
}; };
/* /*
* Autoselect a master bus to use for the transfer this prefers the * Autoselect a master bus to use for the transfer. Slave will be the chosen as
* destination bus if both available if fixed address on one bus the * victim in case src & dest are not similarly aligned. i.e. If after aligning
* other will be chosen * masters address with width requirements of transfer (by sending few byte by
* byte data), slave is still not aligned, then its width will be reduced to
* BYTE.
* - prefers the destination bus if both available
* - prefers bus with fixed address (i.e. peripheral)
*/ */
static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
{ {
if (!(cctl & PL080_CONTROL_DST_INCR)) { if (!(cctl & PL080_CONTROL_DST_INCR)) {
*mbus = &bd->srcbus; *mbus = &bd->dstbus;
*sbus = &bd->dstbus; *sbus = &bd->srcbus;
} else if (!(cctl & PL080_CONTROL_SRC_INCR)) { } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
*mbus = &bd->dstbus;
*sbus = &bd->srcbus;
} else {
if (bd->dstbus.buswidth == 4) {
*mbus = &bd->dstbus;
*sbus = &bd->srcbus;
} else if (bd->srcbus.buswidth == 4) {
*mbus = &bd->srcbus;
*sbus = &bd->dstbus;
} else if (bd->dstbus.buswidth == 2) {
*mbus = &bd->dstbus;
*sbus = &bd->srcbus;
} else if (bd->srcbus.buswidth == 2) {
*mbus = &bd->srcbus; *mbus = &bd->srcbus;
*sbus = &bd->dstbus; *sbus = &bd->dstbus;
} else { } else {
/* bd->srcbus.buswidth == 1 */ if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
*mbus = &bd->dstbus; *mbus = &bd->dstbus;
*sbus = &bd->srcbus; *sbus = &bd->srcbus;
} else {
*mbus = &bd->srcbus;
*sbus = &bd->dstbus;
} }
} }
} }
@ -547,7 +533,8 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
llis_va[num_llis].cctl = cctl; llis_va[num_llis].cctl = cctl;
llis_va[num_llis].src = bd->srcbus.addr; llis_va[num_llis].src = bd->srcbus.addr;
llis_va[num_llis].dst = bd->dstbus.addr; llis_va[num_llis].dst = bd->dstbus.addr;
llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
sizeof(struct pl08x_lli);
llis_va[num_llis].lli |= bd->lli_bus; llis_va[num_llis].lli |= bd->lli_bus;
if (cctl & PL080_CONTROL_SRC_INCR) if (cctl & PL080_CONTROL_SRC_INCR)
@ -560,16 +547,12 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
bd->remainder -= len; bd->remainder -= len;
} }
/* static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
* Return number of bytes to fill to boundary, or len. u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
* This calculation works for any value of addr.
*/
static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
{ {
size_t boundary_len = PL08X_BOUNDARY_SIZE - *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
(addr & (PL08X_BOUNDARY_SIZE - 1)); pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
(*total_bytes) += len;
return min(boundary_len, len);
} }
/* /*
@ -583,13 +566,11 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
struct pl08x_bus_data *mbus, *sbus; struct pl08x_bus_data *mbus, *sbus;
struct pl08x_lli_build_data bd; struct pl08x_lli_build_data bd;
int num_llis = 0; int num_llis = 0;
u32 cctl; u32 cctl, early_bytes = 0;
size_t max_bytes_per_lli; size_t max_bytes_per_lli, total_bytes = 0;
size_t total_bytes = 0;
struct pl08x_lli *llis_va; struct pl08x_lli *llis_va;
txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
&txd->llis_bus);
if (!txd->llis_va) { if (!txd->llis_va) {
dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
return 0; return 0;
@ -619,55 +600,85 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
bd.srcbus.buswidth = bd.srcbus.maxwidth; bd.srcbus.buswidth = bd.srcbus.maxwidth;
bd.dstbus.buswidth = bd.dstbus.maxwidth; bd.dstbus.buswidth = bd.dstbus.maxwidth;
/*
* Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
*/
max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
PL080_CONTROL_TRANSFER_SIZE_MASK;
/* We need to count this down to zero */ /* We need to count this down to zero */
bd.remainder = txd->len; bd.remainder = txd->len;
/*
* Choose bus to align to
* - prefers destination bus if both available
* - if fixed address on one bus chooses other
*/
pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
bd.srcbus.buswidth, bd.srcbus.buswidth,
bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
bd.dstbus.buswidth, bd.dstbus.buswidth,
bd.remainder, max_bytes_per_lli); bd.remainder);
dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
mbus == &bd.srcbus ? "src" : "dst", mbus == &bd.srcbus ? "src" : "dst",
sbus == &bd.srcbus ? "src" : "dst"); sbus == &bd.srcbus ? "src" : "dst");
if (txd->len < mbus->buswidth) { /*
/* Less than a bus width available - send as single bytes */ * Zero length is only allowed if all these requirements are met:
while (bd.remainder) { * - flow controller is peripheral.
dev_vdbg(&pl08x->adev->dev, * - src.addr is aligned to src.width
"%s single byte LLIs for a transfer of " * - dst.addr is aligned to dst.width
"less than a bus width (remain 0x%08x)\n", *
__func__, bd.remainder); * sg_len == 1 should be true, as there can be two cases here:
cctl = pl08x_cctl_bits(cctl, 1, 1, 1); * - Memory addresses are contiguous and are not scattered. Here, Only
pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); * one sg will be passed by user driver, with memory address and zero
total_bytes++; * length. We pass this to controller and after the transfer it will
} * receive the last burst request from peripheral and so transfer
} else { * finishes.
/* Make one byte LLIs until master bus is aligned */ *
while ((mbus->addr) % (mbus->buswidth)) { * - Memory addresses are scattered and are not contiguous. Here,
dev_vdbg(&pl08x->adev->dev, * Obviously as DMA controller doesn't know when a lli's transfer gets
"%s adjustment lli for less than bus width " * over, it can't load next lli. So in this case, there has to be an
"(remain 0x%08x)\n", * assumption that only one lli is supported. Thus, we can't have
__func__, bd.remainder); * scattered addresses.
cctl = pl08x_cctl_bits(cctl, 1, 1, 1); */
pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); if (!bd.remainder) {
total_bytes++; u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
PL080_CONFIG_FLOW_CONTROL_SHIFT;
if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
(fc <= PL080_FLOW_SRC2DST_SRC))) {
dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
__func__);
return 0;
} }
if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
(bd.srcbus.addr % bd.srcbus.buswidth)) {
dev_err(&pl08x->adev->dev,
"%s src & dst address must be aligned to src"
" & dst width if peripheral is flow controller",
__func__);
return 0;
}
cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
bd.dstbus.buswidth, 0);
pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
}
/*
* Send byte by byte for following cases
* - Less than a bus width available
* - until master bus is aligned
*/
if (bd.remainder < mbus->buswidth)
early_bytes = bd.remainder;
else if ((mbus->addr) % (mbus->buswidth)) {
early_bytes = mbus->buswidth - (mbus->addr) % (mbus->buswidth);
if ((bd.remainder - early_bytes) < mbus->buswidth)
early_bytes = bd.remainder;
}
if (early_bytes) {
dev_vdbg(&pl08x->adev->dev, "%s byte width LLIs "
"(remain 0x%08x)\n", __func__, bd.remainder);
prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
&total_bytes);
}
if (bd.remainder) {
/* /*
* Master now aligned * Master now aligned
* - if slave is not then we must set its width down * - if slave is not then we must set its width down
@ -680,138 +691,55 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
sbus->buswidth = 1; sbus->buswidth = 1;
} }
/* Bytes transferred = tsize * src width, not MIN(buswidths) */
max_bytes_per_lli = bd.srcbus.buswidth *
PL080_CONTROL_TRANSFER_SIZE_MASK;
/* /*
* Make largest possible LLIs until less than one bus * Make largest possible LLIs until less than one bus
* width left * width left
*/ */
while (bd.remainder > (mbus->buswidth - 1)) { while (bd.remainder > (mbus->buswidth - 1)) {
size_t lli_len, target_len, tsize, odd_bytes; size_t lli_len, tsize, width;
/* /*
* If enough left try to send max possible, * If enough left try to send max possible,
* otherwise try to send the remainder * otherwise try to send the remainder
*/ */
target_len = min(bd.remainder, max_bytes_per_lli); lli_len = min(bd.remainder, max_bytes_per_lli);
/* /*
* Set bus lengths for incrementing buses to the * Check against maximum bus alignment: Calculate actual
* number of bytes which fill to next memory boundary, * transfer size in relation to bus width and get a
* limiting on the target length calculated above. * maximum remainder of the highest bus width - 1
*/ */
if (cctl & PL080_CONTROL_SRC_INCR) width = max(mbus->buswidth, sbus->buswidth);
bd.srcbus.fill_bytes = lli_len = (lli_len / width) * width;
pl08x_pre_boundary(bd.srcbus.addr, tsize = lli_len / bd.srcbus.buswidth;
target_len);
else
bd.srcbus.fill_bytes = target_len;
if (cctl & PL080_CONTROL_DST_INCR)
bd.dstbus.fill_bytes =
pl08x_pre_boundary(bd.dstbus.addr,
target_len);
else
bd.dstbus.fill_bytes = target_len;
/* Find the nearest */
lli_len = min(bd.srcbus.fill_bytes,
bd.dstbus.fill_bytes);
BUG_ON(lli_len > bd.remainder);
if (lli_len <= 0) {
dev_err(&pl08x->adev->dev,
"%s lli_len is %zu, <= 0\n",
__func__, lli_len);
return 0;
}
if (lli_len == target_len) {
/*
* Can send what we wanted.
* Maintain alignment
*/
lli_len = (lli_len/mbus->buswidth) *
mbus->buswidth;
odd_bytes = 0;
} else {
/*
* So now we know how many bytes to transfer
* to get to the nearest boundary. The next
* LLI will past the boundary. However, we
* may be working to a boundary on the slave
* bus. We need to ensure the master stays
* aligned, and that we are working in
* multiples of the bus widths.
*/
odd_bytes = lli_len % mbus->buswidth;
lli_len -= odd_bytes;
}
if (lli_len) {
/*
* Check against minimum bus alignment:
* Calculate actual transfer size in relation
* to bus width an get a maximum remainder of
* the smallest bus width - 1
*/
/* FIXME: use round_down()? */
tsize = lli_len / min(mbus->buswidth,
sbus->buswidth);
lli_len = tsize * min(mbus->buswidth,
sbus->buswidth);
if (target_len != lli_len) {
dev_vdbg(&pl08x->adev->dev,
"%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
__func__, target_len, lli_len, txd->len);
}
cctl = pl08x_cctl_bits(cctl,
bd.srcbus.buswidth,
bd.dstbus.buswidth,
tsize);
dev_vdbg(&pl08x->adev->dev, dev_vdbg(&pl08x->adev->dev,
"%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", "%s fill lli with single lli chunk of "
"size 0x%08zx (remainder 0x%08zx)\n",
__func__, lli_len, bd.remainder); __func__, lli_len, bd.remainder);
pl08x_fill_lli_for_desc(&bd, num_llis++,
lli_len, cctl); cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
bd.dstbus.buswidth, tsize);
pl08x_fill_lli_for_desc(&bd, num_llis++, lli_len, cctl);
total_bytes += lli_len; total_bytes += lli_len;
} }
if (odd_bytes) {
/*
* Creep past the boundary, maintaining
* master alignment
*/
int j;
for (j = 0; (j < mbus->buswidth)
&& (bd.remainder); j++) {
cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
dev_vdbg(&pl08x->adev->dev,
"%s align with boundary, single byte (remain 0x%08zx)\n",
__func__, bd.remainder);
pl08x_fill_lli_for_desc(&bd,
num_llis++, 1, cctl);
total_bytes++;
}
}
}
/* /*
* Send any odd bytes * Send any odd bytes
*/ */
while (bd.remainder) { if (bd.remainder) {
cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
dev_vdbg(&pl08x->adev->dev, dev_vdbg(&pl08x->adev->dev,
"%s align with boundary, single odd byte (remain %zu)\n", "%s align with boundary, send odd bytes (remain %zu)\n",
__func__, bd.remainder); __func__, bd.remainder);
pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); prep_byte_width_lli(&bd, &cctl, bd.remainder,
total_bytes++; num_llis++, &total_bytes);
} }
} }
if (total_bytes != txd->len) { if (total_bytes != txd->len) {
dev_err(&pl08x->adev->dev, dev_err(&pl08x->adev->dev,
"%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
@ -917,9 +845,7 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
* need, but for slaves the physical signals may be muxed! * need, but for slaves the physical signals may be muxed!
* Can the platform allow us to use this channel? * Can the platform allow us to use this channel?
*/ */
if (plchan->slave && if (plchan->slave && pl08x->pd->get_signal) {
ch->signal < 0 &&
pl08x->pd->get_signal) {
ret = pl08x->pd->get_signal(plchan); ret = pl08x->pd->get_signal(plchan);
if (ret < 0) { if (ret < 0) {
dev_dbg(&pl08x->adev->dev, dev_dbg(&pl08x->adev->dev,
@ -1008,10 +934,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
* If slaves are relying on interrupts to signal completion this function * If slaves are relying on interrupts to signal completion this function
* must not be called with interrupts disabled. * must not be called with interrupts disabled.
*/ */
static enum dma_status static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
pl08x_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate)
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{ {
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
dma_cookie_t last_used; dma_cookie_t last_used;
@ -1253,7 +1177,9 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
num_llis = pl08x_fill_llis_for_desc(pl08x, txd); num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
if (!num_llis) { if (!num_llis) {
kfree(txd); spin_lock_irqsave(&plchan->lock, flags);
pl08x_free_txd(pl08x, txd);
spin_unlock_irqrestore(&plchan->lock, flags);
return -EINVAL; return -EINVAL;
} }
@ -1301,7 +1227,7 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
unsigned long flags) unsigned long flags)
{ {
struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
if (txd) { if (txd) {
dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
@ -1367,7 +1293,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd; struct pl08x_txd *txd;
int ret; int ret, tmp;
/* /*
* Current implementation ASSUMES only one sg * Current implementation ASSUMES only one sg
@ -1401,12 +1327,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
txd->len = sgl->length; txd->len = sgl->length;
if (direction == DMA_TO_DEVICE) { if (direction == DMA_TO_DEVICE) {
txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
txd->cctl = plchan->dst_cctl; txd->cctl = plchan->dst_cctl;
txd->src_addr = sgl->dma_address; txd->src_addr = sgl->dma_address;
txd->dst_addr = plchan->dst_addr; txd->dst_addr = plchan->dst_addr;
} else if (direction == DMA_FROM_DEVICE) { } else if (direction == DMA_FROM_DEVICE) {
txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
txd->cctl = plchan->src_cctl; txd->cctl = plchan->src_cctl;
txd->src_addr = plchan->src_addr; txd->src_addr = plchan->src_addr;
txd->dst_addr = sgl->dma_address; txd->dst_addr = sgl->dma_address;
@ -1416,6 +1340,15 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
return NULL; return NULL;
} }
if (plchan->cd->device_fc)
tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
PL080_FLOW_PER2MEM_PER;
else
tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
PL080_FLOW_PER2MEM;
txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
ret = pl08x_prep_channel_resources(plchan, txd); ret = pl08x_prep_channel_resources(plchan, txd);
if (ret) if (ret)
return NULL; return NULL;
@ -1489,9 +1422,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
{ {
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_dma_chan *plchan;
char *name = chan_id; char *name = chan_id;
/* Reject channels for devices not bound to this driver */
if (chan->device->dev->driver != &pl08x_amba_driver.drv)
return false;
plchan = to_pl08x_chan(chan);
/* Check that the channel is not taken! */ /* Check that the channel is not taken! */
if (!strcmp(plchan->name, name)) if (!strcmp(plchan->name, name))
return true; return true;
@ -1507,13 +1446,7 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
*/ */
static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
{ {
u32 val; writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
val = readl(pl08x->base + PL080_CONFIG);
val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
/* We implicitly clear bit 1 and that means little-endian mode */
val |= PL080_CONFIG_ENABLE;
writel(val, pl08x->base + PL080_CONFIG);
} }
static void pl08x_unmap_buffers(struct pl08x_txd *txd) static void pl08x_unmap_buffers(struct pl08x_txd *txd)
@ -1630,38 +1563,40 @@ static void pl08x_tasklet(unsigned long data)
static irqreturn_t pl08x_irq(int irq, void *dev) static irqreturn_t pl08x_irq(int irq, void *dev)
{ {
struct pl08x_driver_data *pl08x = dev; struct pl08x_driver_data *pl08x = dev;
u32 mask = 0; u32 mask = 0, err, tc, i;
u32 val;
int i;
val = readl(pl08x->base + PL080_ERR_STATUS); /* check & clear - ERR & TC interrupts */
if (val) { err = readl(pl08x->base + PL080_ERR_STATUS);
/* An error interrupt (on one or more channels) */ if (err) {
dev_err(&pl08x->adev->dev, dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
"%s error interrupt, register value 0x%08x\n", __func__, err);
__func__, val); writel(err, pl08x->base + PL080_ERR_CLEAR);
/*
* Simply clear ALL PL08X error interrupts,
* regardless of channel and cause
* FIXME: should be 0x00000003 on PL081 really.
*/
writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
} }
val = readl(pl08x->base + PL080_INT_STATUS); tc = readl(pl08x->base + PL080_INT_STATUS);
if (tc)
writel(tc, pl08x->base + PL080_TC_CLEAR);
if (!err && !tc)
return IRQ_NONE;
for (i = 0; i < pl08x->vd->channels; i++) { for (i = 0; i < pl08x->vd->channels; i++) {
if ((1 << i) & val) { if (((1 << i) & err) || ((1 << i) & tc)) {
/* Locate physical channel */ /* Locate physical channel */
struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
struct pl08x_dma_chan *plchan = phychan->serving; struct pl08x_dma_chan *plchan = phychan->serving;
if (!plchan) {
dev_err(&pl08x->adev->dev,
"%s Error TC interrupt on unused channel: 0x%08x\n",
__func__, i);
continue;
}
/* Schedule tasklet on this channel */ /* Schedule tasklet on this channel */
tasklet_schedule(&plchan->tasklet); tasklet_schedule(&plchan->tasklet);
mask |= (1 << i); mask |= (1 << i);
} }
} }
/* Clear only the terminal interrupts on channels we processed */
writel(mask, pl08x->base + PL080_TC_CLEAR);
return mask ? IRQ_HANDLED : IRQ_NONE; return mask ? IRQ_HANDLED : IRQ_NONE;
} }
@ -1685,9 +1620,7 @@ static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
* Make a local wrapper to hold required data * Make a local wrapper to hold required data
*/ */
static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
struct dma_device *dmadev, struct dma_device *dmadev, unsigned int channels, bool slave)
unsigned int channels,
bool slave)
{ {
struct pl08x_dma_chan *chan; struct pl08x_dma_chan *chan;
int i; int i;
@ -1700,7 +1633,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
* to cope with that situation. * to cope with that situation.
*/ */
for (i = 0; i < channels; i++) { for (i = 0; i < channels; i++) {
chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan) { if (!chan) {
dev_err(&pl08x->adev->dev, dev_err(&pl08x->adev->dev,
"%s no memory for channel\n", __func__); "%s no memory for channel\n", __func__);
@ -1728,7 +1661,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
kfree(chan); kfree(chan);
continue; continue;
} }
dev_info(&pl08x->adev->dev, dev_dbg(&pl08x->adev->dev,
"initialize virtual channel \"%s\"\n", "initialize virtual channel \"%s\"\n",
chan->name); chan->name);
@ -1837,8 +1770,8 @@ static const struct file_operations pl08x_debugfs_operations = {
static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
{ {
/* Expose a simple debugfs interface to view all clocks */ /* Expose a simple debugfs interface to view all clocks */
(void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
NULL, pl08x, S_IFREG | S_IRUGO, NULL, pl08x,
&pl08x_debugfs_operations); &pl08x_debugfs_operations);
} }
@ -1860,12 +1793,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
return ret; return ret;
/* Create the driver state holder */ /* Create the driver state holder */
pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
if (!pl08x) { if (!pl08x) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_no_pl08x; goto out_no_pl08x;
} }
pm_runtime_set_active(&adev->dev);
pm_runtime_enable(&adev->dev);
/* Initialize memcpy engine */ /* Initialize memcpy engine */
dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
pl08x->memcpy.dev = &adev->dev; pl08x->memcpy.dev = &adev->dev;
@ -1939,7 +1875,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
} }
/* Initialize physical channels */ /* Initialize physical channels */
pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)),
GFP_KERNEL); GFP_KERNEL);
if (!pl08x->phy_chans) { if (!pl08x->phy_chans) {
dev_err(&adev->dev, "%s failed to allocate " dev_err(&adev->dev, "%s failed to allocate "
@ -1956,9 +1892,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&ch->lock); spin_lock_init(&ch->lock);
ch->serving = NULL; ch->serving = NULL;
ch->signal = -1; ch->signal = -1;
dev_info(&adev->dev, dev_dbg(&adev->dev, "physical channel %d is %s\n",
"physical channel %d is %s\n", i, i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
} }
/* Register as many memcpy channels as there are physical channels */ /* Register as many memcpy channels as there are physical channels */
@ -1974,8 +1909,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
/* Register slave channels */ /* Register slave channels */
ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
pl08x->pd->num_slave_channels, pl08x->pd->num_slave_channels, true);
true);
if (ret <= 0) { if (ret <= 0) {
dev_warn(&pl08x->adev->dev, dev_warn(&pl08x->adev->dev,
"%s failed to enumerate slave channels - %d\n", "%s failed to enumerate slave channels - %d\n",
@ -2005,6 +1939,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
amba_part(adev), amba_rev(adev), amba_part(adev), amba_rev(adev),
(unsigned long long)adev->res.start, adev->irq[0]); (unsigned long long)adev->res.start, adev->irq[0]);
pm_runtime_put(&adev->dev);
return 0; return 0;
out_no_slave_reg: out_no_slave_reg:
@ -2023,6 +1959,9 @@ out_no_ioremap:
dma_pool_destroy(pl08x->pool); dma_pool_destroy(pl08x->pool);
out_no_lli_pool: out_no_lli_pool:
out_no_platdata: out_no_platdata:
pm_runtime_put(&adev->dev);
pm_runtime_disable(&adev->dev);
kfree(pl08x); kfree(pl08x);
out_no_pl08x: out_no_pl08x:
amba_release_regions(adev); amba_release_regions(adev);

View File

@ -107,10 +107,11 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
{ {
struct at_desc *desc, *_desc; struct at_desc *desc, *_desc;
struct at_desc *ret = NULL; struct at_desc *ret = NULL;
unsigned long flags;
unsigned int i = 0; unsigned int i = 0;
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
i++; i++;
if (async_tx_test_ack(&desc->txd)) { if (async_tx_test_ack(&desc->txd)) {
@ -121,7 +122,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
dev_dbg(chan2dev(&atchan->chan_common), dev_dbg(chan2dev(&atchan->chan_common),
"desc %p not ACKed\n", desc); "desc %p not ACKed\n", desc);
} }
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
dev_vdbg(chan2dev(&atchan->chan_common), dev_vdbg(chan2dev(&atchan->chan_common),
"scanned %u descriptors on freelist\n", i); "scanned %u descriptors on freelist\n", i);
@ -129,9 +130,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
if (!ret) { if (!ret) {
ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
if (ret) { if (ret) {
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated++; atchan->descs_allocated++;
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} else { } else {
dev_err(chan2dev(&atchan->chan_common), dev_err(chan2dev(&atchan->chan_common),
"not enough descriptors available\n"); "not enough descriptors available\n");
@ -150,8 +151,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
{ {
if (desc) { if (desc) {
struct at_desc *child; struct at_desc *child;
unsigned long flags;
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry(child, &desc->tx_list, desc_node) list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&atchan->chan_common), dev_vdbg(chan2dev(&atchan->chan_common),
"moving child desc %p to freelist\n", "moving child desc %p to freelist\n",
@ -160,7 +162,7 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
dev_vdbg(chan2dev(&atchan->chan_common), dev_vdbg(chan2dev(&atchan->chan_common),
"moving desc %p to freelist\n", desc); "moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &atchan->free_list); list_add(&desc->desc_node, &atchan->free_list);
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} }
} }
@ -299,7 +301,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* for cyclic transfers, /* for cyclic transfers,
* no need to replay callback function while stopping */ * no need to replay callback function while stopping */
if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) { if (!atc_chan_is_cyclic(atchan)) {
dma_async_tx_callback callback = txd->callback; dma_async_tx_callback callback = txd->callback;
void *param = txd->callback_param; void *param = txd->callback_param;
@ -471,16 +473,17 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
static void atc_tasklet(unsigned long data) static void atc_tasklet(unsigned long data)
{ {
struct at_dma_chan *atchan = (struct at_dma_chan *)data; struct at_dma_chan *atchan = (struct at_dma_chan *)data;
unsigned long flags;
spin_lock(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
atc_handle_error(atchan); atc_handle_error(atchan);
else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) else if (atc_chan_is_cyclic(atchan))
atc_handle_cyclic(atchan); atc_handle_cyclic(atchan);
else else
atc_advance_work(atchan); atc_advance_work(atchan);
spin_unlock(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} }
static irqreturn_t at_dma_interrupt(int irq, void *dev_id) static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
@ -539,8 +542,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
struct at_desc *desc = txd_to_at_desc(tx); struct at_desc *desc = txd_to_at_desc(tx);
struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
dma_cookie_t cookie; dma_cookie_t cookie;
unsigned long flags;
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
cookie = atc_assign_cookie(atchan, desc); cookie = atc_assign_cookie(atchan, desc);
if (list_empty(&atchan->active_list)) { if (list_empty(&atchan->active_list)) {
@ -554,7 +558,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
list_add_tail(&desc->desc_node, &atchan->queue); list_add_tail(&desc->desc_node, &atchan->queue);
} }
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
return cookie; return cookie;
} }
@ -927,28 +931,29 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device); struct at_dma *atdma = to_at_dma(chan->device);
int chan_id = atchan->chan_common.chan_id; int chan_id = atchan->chan_common.chan_id;
unsigned long flags;
LIST_HEAD(list); LIST_HEAD(list);
dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
if (cmd == DMA_PAUSE) { if (cmd == DMA_PAUSE) {
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
set_bit(ATC_IS_PAUSED, &atchan->status); set_bit(ATC_IS_PAUSED, &atchan->status);
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_RESUME) { } else if (cmd == DMA_RESUME) {
if (!test_bit(ATC_IS_PAUSED, &atchan->status)) if (!atc_chan_is_paused(atchan))
return 0; return 0;
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
clear_bit(ATC_IS_PAUSED, &atchan->status); clear_bit(ATC_IS_PAUSED, &atchan->status);
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_TERMINATE_ALL) { } else if (cmd == DMA_TERMINATE_ALL) {
struct at_desc *desc, *_desc; struct at_desc *desc, *_desc;
/* /*
@ -957,7 +962,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* channel. We still have to poll the channel enable bit due * channel. We still have to poll the channel enable bit due
* to AHB/HSB limitations. * to AHB/HSB limitations.
*/ */
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
/* disabling channel: must also remove suspend state */ /* disabling channel: must also remove suspend state */
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
@ -978,7 +983,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
/* if channel dedicated to cyclic operations, free it */ /* if channel dedicated to cyclic operations, free it */
clear_bit(ATC_IS_CYCLIC, &atchan->status); clear_bit(ATC_IS_CYCLIC, &atchan->status);
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} else { } else {
return -ENXIO; return -ENXIO;
} }
@ -1004,9 +1009,10 @@ atc_tx_status(struct dma_chan *chan,
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
dma_cookie_t last_used; dma_cookie_t last_used;
dma_cookie_t last_complete; dma_cookie_t last_complete;
unsigned long flags;
enum dma_status ret; enum dma_status ret;
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
last_complete = atchan->completed_cookie; last_complete = atchan->completed_cookie;
last_used = chan->cookie; last_used = chan->cookie;
@ -1021,7 +1027,7 @@ atc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used); ret = dma_async_is_complete(cookie, last_complete, last_used);
} }
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
if (ret != DMA_SUCCESS) if (ret != DMA_SUCCESS)
dma_set_tx_state(txstate, last_complete, last_used, dma_set_tx_state(txstate, last_complete, last_used,
@ -1029,7 +1035,7 @@ atc_tx_status(struct dma_chan *chan,
else else
dma_set_tx_state(txstate, last_complete, last_used, 0); dma_set_tx_state(txstate, last_complete, last_used, 0);
if (test_bit(ATC_IS_PAUSED, &atchan->status)) if (atc_chan_is_paused(atchan))
ret = DMA_PAUSED; ret = DMA_PAUSED;
dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
@ -1046,18 +1052,19 @@ atc_tx_status(struct dma_chan *chan,
static void atc_issue_pending(struct dma_chan *chan) static void atc_issue_pending(struct dma_chan *chan)
{ {
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
unsigned long flags;
dev_vdbg(chan2dev(chan), "issue_pending\n"); dev_vdbg(chan2dev(chan), "issue_pending\n");
/* Not needed for cyclic transfers */ /* Not needed for cyclic transfers */
if (test_bit(ATC_IS_CYCLIC, &atchan->status)) if (atc_chan_is_cyclic(atchan))
return; return;
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
if (!atc_chan_is_enabled(atchan)) { if (!atc_chan_is_enabled(atchan)) {
atc_advance_work(atchan); atc_advance_work(atchan);
} }
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} }
/** /**
@ -1073,6 +1080,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
struct at_dma *atdma = to_at_dma(chan->device); struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc; struct at_desc *desc;
struct at_dma_slave *atslave; struct at_dma_slave *atslave;
unsigned long flags;
int i; int i;
u32 cfg; u32 cfg;
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
@ -1116,11 +1124,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->desc_node, &tmp_list); list_add_tail(&desc->desc_node, &tmp_list);
} }
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated = i; atchan->descs_allocated = i;
list_splice(&tmp_list, &atchan->free_list); list_splice(&tmp_list, &atchan->free_list);
atchan->completed_cookie = chan->cookie = 1; atchan->completed_cookie = chan->cookie = 1;
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
/* channel parameters */ /* channel parameters */
channel_writel(atchan, CFG, cfg); channel_writel(atchan, CFG, cfg);
@ -1293,15 +1301,13 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
/* controller can do slave DMA: can trigger cyclic transfers */
if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
atdma->dma_common.device_control = atc_control; atdma->dma_common.device_control = atc_control;
}
dma_writel(atdma, EN, AT_DMA_ENABLE); dma_writel(atdma, EN, AT_DMA_ENABLE);
@ -1377,27 +1383,112 @@ static void at_dma_shutdown(struct platform_device *pdev)
clk_disable(atdma->clk); clk_disable(atdma->clk);
} }
static int at_dma_prepare(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev);
struct dma_chan *chan, *_chan;
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
device_node) {
struct at_dma_chan *atchan = to_at_dma_chan(chan);
/* wait for transaction completion (except in cyclic case) */
if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
return -EAGAIN;
}
return 0;
}
static void atc_suspend_cyclic(struct at_dma_chan *atchan)
{
struct dma_chan *chan = &atchan->chan_common;
/* Channel should be paused by user
* do it anyway even if it is not done already */
if (!atc_chan_is_paused(atchan)) {
dev_warn(chan2dev(chan),
"cyclic channel not paused, should be done by channel user\n");
atc_control(chan, DMA_PAUSE, 0);
}
/* now preserve additional data for cyclic operations */
/* next descriptor address in the cyclic list */
atchan->save_dscr = channel_readl(atchan, DSCR);
vdbg_dump_regs(atchan);
}
static int at_dma_suspend_noirq(struct device *dev) static int at_dma_suspend_noirq(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev); struct at_dma *atdma = platform_get_drvdata(pdev);
struct dma_chan *chan, *_chan;
at_dma_off(platform_get_drvdata(pdev)); /* preserve data */
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
device_node) {
struct at_dma_chan *atchan = to_at_dma_chan(chan);
if (atc_chan_is_cyclic(atchan))
atc_suspend_cyclic(atchan);
atchan->save_cfg = channel_readl(atchan, CFG);
}
atdma->save_imr = dma_readl(atdma, EBCIMR);
/* disable DMA controller */
at_dma_off(atdma);
clk_disable(atdma->clk); clk_disable(atdma->clk);
return 0; return 0;
} }
static void atc_resume_cyclic(struct at_dma_chan *atchan)
{
struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
/* restore channel status for cyclic descriptors list:
* next descriptor in the cyclic list at the time of suspend */
channel_writel(atchan, SADDR, 0);
channel_writel(atchan, DADDR, 0);
channel_writel(atchan, CTRLA, 0);
channel_writel(atchan, CTRLB, 0);
channel_writel(atchan, DSCR, atchan->save_dscr);
dma_writel(atdma, CHER, atchan->mask);
/* channel pause status should be removed by channel user
* We cannot take the initiative to do it here */
vdbg_dump_regs(atchan);
}
static int at_dma_resume_noirq(struct device *dev) static int at_dma_resume_noirq(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev); struct at_dma *atdma = platform_get_drvdata(pdev);
struct dma_chan *chan, *_chan;
/* bring back DMA controller */
clk_enable(atdma->clk); clk_enable(atdma->clk);
dma_writel(atdma, EN, AT_DMA_ENABLE); dma_writel(atdma, EN, AT_DMA_ENABLE);
/* clear any pending interrupt */
while (dma_readl(atdma, EBCISR))
cpu_relax();
/* restore saved data */
dma_writel(atdma, EBCIER, atdma->save_imr);
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
device_node) {
struct at_dma_chan *atchan = to_at_dma_chan(chan);
channel_writel(atchan, CFG, atchan->save_cfg);
if (atc_chan_is_cyclic(atchan))
atc_resume_cyclic(atchan);
}
return 0; return 0;
} }
static const struct dev_pm_ops at_dma_dev_pm_ops = { static const struct dev_pm_ops at_dma_dev_pm_ops = {
.prepare = at_dma_prepare,
.suspend_noirq = at_dma_suspend_noirq, .suspend_noirq = at_dma_suspend_noirq,
.resume_noirq = at_dma_resume_noirq, .resume_noirq = at_dma_resume_noirq,
}; };

View File

@ -204,6 +204,9 @@ enum atc_status {
* @status: transmit status information from irq/prep* functions * @status: transmit status information from irq/prep* functions
* to tasklet (use atomic operations) * to tasklet (use atomic operations)
* @tasklet: bottom half to finish transaction work * @tasklet: bottom half to finish transaction work
* @save_cfg: configuration register that is saved on suspend/resume cycle
* @save_dscr: for cyclic operations, preserve next descriptor address in
* the cyclic list on suspend/resume cycle
* @lock: serializes enqueue/dequeue operations to descriptors lists * @lock: serializes enqueue/dequeue operations to descriptors lists
* @completed_cookie: identifier for the most recently completed operation * @completed_cookie: identifier for the most recently completed operation
* @active_list: list of descriptors dmaengine is being running on * @active_list: list of descriptors dmaengine is being running on
@ -218,6 +221,8 @@ struct at_dma_chan {
u8 mask; u8 mask;
unsigned long status; unsigned long status;
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
u32 save_cfg;
u32 save_dscr;
spinlock_t lock; spinlock_t lock;
@ -248,6 +253,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
* @chan_common: common dmaengine dma_device object members * @chan_common: common dmaengine dma_device object members
* @ch_regs: memory mapped register base * @ch_regs: memory mapped register base
* @clk: dma controller clock * @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
* @all_chan_mask: all channels availlable in a mask * @all_chan_mask: all channels availlable in a mask
* @dma_desc_pool: base of DMA descriptor region (DMA address) * @dma_desc_pool: base of DMA descriptor region (DMA address)
* @chan: channels table to store at_dma_chan structures * @chan: channels table to store at_dma_chan structures
@ -256,6 +262,7 @@ struct at_dma {
struct dma_device dma_common; struct dma_device dma_common;
void __iomem *regs; void __iomem *regs;
struct clk *clk; struct clk *clk;
u32 save_imr;
u8 all_chan_mask; u8 all_chan_mask;
@ -355,6 +362,23 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
return !!(dma_readl(atdma, CHSR) & atchan->mask); return !!(dma_readl(atdma, CHSR) & atchan->mask);
} }
/**
* atc_chan_is_paused - test channel pause/resume status
* @atchan: channel we want to test status
*/
static inline int atc_chan_is_paused(struct at_dma_chan *atchan)
{
return test_bit(ATC_IS_PAUSED, &atchan->status);
}
/**
* atc_chan_is_cyclic - test if given channel has cyclic property set
* @atchan: channel we want to test status
*/
static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan)
{
return test_bit(ATC_IS_CYCLIC, &atchan->status);
}
/** /**
* set_desc_eol - set end-of-link to descriptor so it will end transfer * set_desc_eol - set end-of-link to descriptor so it will end transfer

View File

@ -10,6 +10,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/freezer.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/module.h> #include <linux/module.h>
@ -251,6 +252,7 @@ static int dmatest_func(void *data)
int i; int i;
thread_name = current->comm; thread_name = current->comm;
set_freezable_with_signal();
ret = -ENOMEM; ret = -ENOMEM;
@ -305,7 +307,8 @@ static int dmatest_func(void *data)
dma_addr_t dma_srcs[src_cnt]; dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt]; dma_addr_t dma_dsts[dst_cnt];
struct completion cmp; struct completion cmp;
unsigned long tmo = msecs_to_jiffies(timeout); unsigned long start, tmo, end = 0 /* compiler... */;
bool reload = true;
u8 align = 0; u8 align = 0;
total_tests++; total_tests++;
@ -404,7 +407,17 @@ static int dmatest_func(void *data)
} }
dma_async_issue_pending(chan); dma_async_issue_pending(chan);
tmo = wait_for_completion_timeout(&cmp, tmo); do {
start = jiffies;
if (reload)
end = start + msecs_to_jiffies(timeout);
else if (end <= start)
end = start + 1;
tmo = wait_for_completion_interruptible_timeout(&cmp,
end - start);
reload = try_to_freeze();
} while (tmo == -ERESTARTSYS);
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
if (tmo == 0) { if (tmo == 0) {
@ -477,6 +490,8 @@ err_srcs:
pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
thread_name, total_tests, failed_tests, ret); thread_name, total_tests, failed_tests, ret);
/* terminate all transfers on specified channels */
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
if (iterations > 0) if (iterations > 0)
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
@ -499,6 +514,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
list_del(&thread->node); list_del(&thread->node);
kfree(thread); kfree(thread);
} }
/* terminate all transfers on specified channels */
dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
kfree(dtc); kfree(dtc);
} }

View File

@ -318,6 +318,7 @@ struct sdma_engine {
dma_addr_t context_phys; dma_addr_t context_phys;
struct dma_device dma_device; struct dma_device dma_device;
struct clk *clk; struct clk *clk;
struct mutex channel_0_lock;
struct sdma_script_start_addrs *script_addrs; struct sdma_script_start_addrs *script_addrs;
}; };
@ -415,11 +416,15 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
dma_addr_t buf_phys; dma_addr_t buf_phys;
int ret; int ret;
mutex_lock(&sdma->channel_0_lock);
buf_virt = dma_alloc_coherent(NULL, buf_virt = dma_alloc_coherent(NULL,
size, size,
&buf_phys, GFP_KERNEL); &buf_phys, GFP_KERNEL);
if (!buf_virt) if (!buf_virt) {
return -ENOMEM; ret = -ENOMEM;
goto err_out;
}
bd0->mode.command = C0_SETPM; bd0->mode.command = C0_SETPM;
bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
@ -433,6 +438,9 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
dma_free_coherent(NULL, size, buf_virt, buf_phys); dma_free_coherent(NULL, size, buf_virt, buf_phys);
err_out:
mutex_unlock(&sdma->channel_0_lock);
return ret; return ret;
} }
@ -656,6 +664,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
mutex_lock(&sdma->channel_0_lock);
memset(context, 0, sizeof(*context)); memset(context, 0, sizeof(*context));
context->channel_state.pc = load_address; context->channel_state.pc = load_address;
@ -676,6 +686,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
ret = sdma_run_channel(&sdma->channel[0]); ret = sdma_run_channel(&sdma->channel[0]);
mutex_unlock(&sdma->channel_0_lock);
return ret; return ret;
} }
@ -1131,18 +1143,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
saddr_arr[i] = addr_arr[i]; saddr_arr[i] = addr_arr[i];
} }
static int __init sdma_get_firmware(struct sdma_engine *sdma, static void sdma_load_firmware(const struct firmware *fw, void *context)
const char *fw_name)
{ {
const struct firmware *fw; struct sdma_engine *sdma = context;
const struct sdma_firmware_header *header; const struct sdma_firmware_header *header;
int ret;
const struct sdma_script_start_addrs *addr; const struct sdma_script_start_addrs *addr;
unsigned short *ram_code; unsigned short *ram_code;
ret = request_firmware(&fw, fw_name, sdma->dev); if (!fw) {
if (ret) dev_err(sdma->dev, "firmware not found\n");
return ret; return;
}
if (fw->size < sizeof(*header)) if (fw->size < sizeof(*header))
goto err_firmware; goto err_firmware;
@ -1172,6 +1183,16 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma,
err_firmware: err_firmware:
release_firmware(fw); release_firmware(fw);
}
static int __init sdma_get_firmware(struct sdma_engine *sdma,
const char *fw_name)
{
int ret;
ret = request_firmware_nowait(THIS_MODULE,
FW_ACTION_HOTPLUG, fw_name, sdma->dev,
GFP_KERNEL, sdma, sdma_load_firmware);
return ret; return ret;
} }
@ -1269,11 +1290,14 @@ static int __init sdma_probe(struct platform_device *pdev)
struct sdma_platform_data *pdata = pdev->dev.platform_data; struct sdma_platform_data *pdata = pdev->dev.platform_data;
int i; int i;
struct sdma_engine *sdma; struct sdma_engine *sdma;
s32 *saddr_arr;
sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
if (!sdma) if (!sdma)
return -ENOMEM; return -ENOMEM;
mutex_init(&sdma->channel_0_lock);
sdma->dev = &pdev->dev; sdma->dev = &pdev->dev;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@ -1310,6 +1334,11 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_alloc; goto err_alloc;
} }
/* initially no scripts available */
saddr_arr = (s32 *)sdma->script_addrs;
for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
saddr_arr[i] = -EINVAL;
if (of_id) if (of_id)
pdev->id_entry = of_id->data; pdev->id_entry = of_id->data;
sdma->devtype = pdev->id_entry->driver_data; sdma->devtype = pdev->id_entry->driver_data;

View File

@ -130,6 +130,23 @@ struct mxs_dma_engine {
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
}; };
static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
/* enable apbh channel clock */
if (dma_is_apbh()) {
if (apbh_is_old())
writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
else
writel(1 << chan_id,
mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
}
}
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{ {
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@ -148,38 +165,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id; int chan_id = mxs_chan->chan.chan_id;
/* clkgate needs to be enabled before writing other registers */
mxs_dma_clkgate(mxs_chan, 1);
/* set cmd_addr up */ /* set cmd_addr up */
writel(mxs_chan->ccw_phys, writel(mxs_chan->ccw_phys,
mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
/* enable apbh channel clock */
if (dma_is_apbh()) {
if (apbh_is_old())
writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
else
writel(1 << chan_id,
mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
}
/* write 1 to SEMA to kick off the channel */ /* write 1 to SEMA to kick off the channel */
writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
} }
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
{ {
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
/* disable apbh channel clock */ /* disable apbh channel clock */
if (dma_is_apbh()) { mxs_dma_clkgate(mxs_chan, 0);
if (apbh_is_old())
writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
else
writel(1 << chan_id,
mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
}
mxs_chan->status = DMA_SUCCESS; mxs_chan->status = DMA_SUCCESS;
} }
@ -338,7 +338,10 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
if (ret) if (ret)
goto err_clk; goto err_clk;
/* clkgate needs to be enabled for reset to finish */
mxs_dma_clkgate(mxs_chan, 1);
mxs_dma_reset_chan(mxs_chan); mxs_dma_reset_chan(mxs_chan);
mxs_dma_clkgate(mxs_chan, 0);
dma_async_tx_descriptor_init(&mxs_chan->desc, chan); dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
mxs_chan->desc.tx_submit = mxs_dma_tx_submit; mxs_chan->desc.tx_submit = mxs_dma_tx_submit;

View File

@ -17,6 +17,8 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/amba/bus.h> #include <linux/amba/bus.h>
#include <linux/amba/pl330.h> #include <linux/amba/pl330.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#define NR_DEFAULT_DESC 16 #define NR_DEFAULT_DESC 16
@ -68,6 +70,14 @@ struct dma_pl330_chan {
* NULL if the channel is available to be acquired. * NULL if the channel is available to be acquired.
*/ */
void *pl330_chid; void *pl330_chid;
/* For D-to-M and M-to-D channels */
int burst_sz; /* the peripheral fifo width */
int burst_len; /* the number of burst */
dma_addr_t fifo_addr;
/* for cyclic capability */
bool cyclic;
}; };
struct dma_pl330_dmac { struct dma_pl330_dmac {
@ -83,6 +93,8 @@ struct dma_pl330_dmac {
/* Peripheral channels connected to this DMAC */ /* Peripheral channels connected to this DMAC */
struct dma_pl330_chan *peripherals; /* keep at end */ struct dma_pl330_chan *peripherals; /* keep at end */
struct clk *clk;
}; };
struct dma_pl330_desc { struct dma_pl330_desc {
@ -152,6 +164,31 @@ static inline void free_desc_list(struct list_head *list)
spin_unlock_irqrestore(&pdmac->pool_lock, flags); spin_unlock_irqrestore(&pdmac->pool_lock, flags);
} }
static inline void handle_cyclic_desc_list(struct list_head *list)
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch;
unsigned long flags;
if (list_empty(list))
return;
list_for_each_entry(desc, list, node) {
dma_async_tx_callback callback;
/* Change status to reload it */
desc->status = PREP;
pch = desc->pchan;
callback = desc->txd.callback;
if (callback)
callback(desc->txd.callback_param);
}
spin_lock_irqsave(&pch->lock, flags);
list_splice_tail_init(list, &pch->work_list);
spin_unlock_irqrestore(&pch->lock, flags);
}
static inline void fill_queue(struct dma_pl330_chan *pch) static inline void fill_queue(struct dma_pl330_chan *pch)
{ {
struct dma_pl330_desc *desc; struct dma_pl330_desc *desc;
@ -205,6 +242,9 @@ static void pl330_tasklet(unsigned long data)
spin_unlock_irqrestore(&pch->lock, flags); spin_unlock_irqrestore(&pch->lock, flags);
if (pch->cyclic)
handle_cyclic_desc_list(&list);
else
free_desc_list(&list); free_desc_list(&list);
} }
@ -236,6 +276,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&pch->lock, flags); spin_lock_irqsave(&pch->lock, flags);
pch->completed = chan->cookie = 1; pch->completed = chan->cookie = 1;
pch->cyclic = false;
pch->pl330_chid = pl330_request_channel(&pdmac->pif); pch->pl330_chid = pl330_request_channel(&pdmac->pif);
if (!pch->pl330_chid) { if (!pch->pl330_chid) {
@ -253,25 +294,52 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
{ {
struct dma_pl330_chan *pch = to_pchan(chan); struct dma_pl330_chan *pch = to_pchan(chan);
struct dma_pl330_desc *desc; struct dma_pl330_desc *desc, *_dt;
unsigned long flags; unsigned long flags;
struct dma_pl330_dmac *pdmac = pch->dmac;
struct dma_slave_config *slave_config;
LIST_HEAD(list);
/* Only supports DMA_TERMINATE_ALL */ switch (cmd) {
if (cmd != DMA_TERMINATE_ALL) case DMA_TERMINATE_ALL:
return -ENXIO;
spin_lock_irqsave(&pch->lock, flags); spin_lock_irqsave(&pch->lock, flags);
/* FLUSH the PL330 Channel thread */ /* FLUSH the PL330 Channel thread */
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
/* Mark all desc done */ /* Mark all desc done */
list_for_each_entry(desc, &pch->work_list, node) list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
desc->status = DONE; desc->status = DONE;
pch->completed = desc->txd.cookie;
list_move_tail(&desc->node, &list);
}
list_splice_tail_init(&list, &pdmac->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags); spin_unlock_irqrestore(&pch->lock, flags);
break;
case DMA_SLAVE_CONFIG:
slave_config = (struct dma_slave_config *)arg;
pl330_tasklet((unsigned long) pch); if (slave_config->direction == DMA_TO_DEVICE) {
if (slave_config->dst_addr)
pch->fifo_addr = slave_config->dst_addr;
if (slave_config->dst_addr_width)
pch->burst_sz = __ffs(slave_config->dst_addr_width);
if (slave_config->dst_maxburst)
pch->burst_len = slave_config->dst_maxburst;
} else if (slave_config->direction == DMA_FROM_DEVICE) {
if (slave_config->src_addr)
pch->fifo_addr = slave_config->src_addr;
if (slave_config->src_addr_width)
pch->burst_sz = __ffs(slave_config->src_addr_width);
if (slave_config->src_maxburst)
pch->burst_len = slave_config->src_maxburst;
}
break;
default:
dev_err(pch->dmac->pif.dev, "Not supported command.\n");
return -ENXIO;
}
return 0; return 0;
} }
@ -288,6 +356,9 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
pl330_release_channel(pch->pl330_chid); pl330_release_channel(pch->pl330_chid);
pch->pl330_chid = NULL; pch->pl330_chid = NULL;
if (pch->cyclic)
list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags); spin_unlock_irqrestore(&pch->lock, flags);
} }
@ -453,7 +524,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
if (peri) { if (peri) {
desc->req.rqtype = peri->rqtype; desc->req.rqtype = peri->rqtype;
desc->req.peri = peri->peri_id; desc->req.peri = pch->chan.chan_id;
} else { } else {
desc->req.rqtype = MEMTOMEM; desc->req.rqtype = MEMTOMEM;
desc->req.peri = 0; desc->req.peri = 0;
@ -524,6 +595,51 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
return burst_len; return burst_len;
} }
static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
size_t period_len, enum dma_data_direction direction)
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch = to_pchan(chan);
dma_addr_t dst;
dma_addr_t src;
desc = pl330_get_desc(pch);
if (!desc) {
dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
__func__, __LINE__);
return NULL;
}
switch (direction) {
case DMA_TO_DEVICE:
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
src = dma_addr;
dst = pch->fifo_addr;
break;
case DMA_FROM_DEVICE:
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
src = pch->fifo_addr;
dst = dma_addr;
break;
default:
dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
__func__, __LINE__);
return NULL;
}
desc->rqcfg.brst_size = pch->burst_sz;
desc->rqcfg.brst_len = 1;
pch->cyclic = true;
fill_px(&desc->px, dst, src, period_len);
return &desc->txd;
}
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
dma_addr_t src, size_t len, unsigned long flags) dma_addr_t src, size_t len, unsigned long flags)
@ -579,7 +695,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dma_pl330_peri *peri = chan->private; struct dma_pl330_peri *peri = chan->private;
struct scatterlist *sg; struct scatterlist *sg;
unsigned long flags; unsigned long flags;
int i, burst_size; int i;
dma_addr_t addr; dma_addr_t addr;
if (unlikely(!pch || !sgl || !sg_len || !peri)) if (unlikely(!pch || !sgl || !sg_len || !peri))
@ -595,8 +711,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL; return NULL;
} }
addr = peri->fifo_addr; addr = pch->fifo_addr;
burst_size = peri->burst_sz;
first = NULL; first = NULL;
@ -644,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
sg_dma_address(sg), addr, sg_dma_len(sg)); sg_dma_address(sg), addr, sg_dma_len(sg));
} }
desc->rqcfg.brst_size = burst_size; desc->rqcfg.brst_size = pch->burst_sz;
desc->rqcfg.brst_len = 1; desc->rqcfg.brst_len = 1;
} }
@ -696,6 +811,30 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
goto probe_err1; goto probe_err1;
} }
pdmac->clk = clk_get(&adev->dev, "dma");
if (IS_ERR(pdmac->clk)) {
dev_err(&adev->dev, "Cannot get operation clock.\n");
ret = -EINVAL;
goto probe_err1;
}
amba_set_drvdata(adev, pdmac);
#ifdef CONFIG_PM_RUNTIME
/* to use the runtime PM helper functions */
pm_runtime_enable(&adev->dev);
/* enable the power domain */
if (pm_runtime_get_sync(&adev->dev)) {
dev_err(&adev->dev, "failed to get runtime pm\n");
ret = -ENODEV;
goto probe_err1;
}
#else
/* enable dma clk */
clk_enable(pdmac->clk);
#endif
irq = adev->irq[0]; irq = adev->irq[0];
ret = request_irq(irq, pl330_irq_handler, 0, ret = request_irq(irq, pl330_irq_handler, 0,
dev_name(&adev->dev), pi); dev_name(&adev->dev), pi);
@ -732,6 +871,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
case MEMTODEV: case MEMTODEV:
case DEVTOMEM: case DEVTOMEM:
dma_cap_set(DMA_SLAVE, pd->cap_mask); dma_cap_set(DMA_SLAVE, pd->cap_mask);
dma_cap_set(DMA_CYCLIC, pd->cap_mask);
break; break;
default: default:
dev_err(&adev->dev, "DEVTODEV Not Supported\n"); dev_err(&adev->dev, "DEVTODEV Not Supported\n");
@ -760,6 +900,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->device_alloc_chan_resources = pl330_alloc_chan_resources; pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
pd->device_free_chan_resources = pl330_free_chan_resources; pd->device_free_chan_resources = pl330_free_chan_resources;
pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
pd->device_tx_status = pl330_tx_status; pd->device_tx_status = pl330_tx_status;
pd->device_prep_slave_sg = pl330_prep_slave_sg; pd->device_prep_slave_sg = pl330_prep_slave_sg;
pd->device_control = pl330_control; pd->device_control = pl330_control;
@ -771,8 +912,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
goto probe_err4; goto probe_err4;
} }
amba_set_drvdata(adev, pdmac);
dev_info(&adev->dev, dev_info(&adev->dev,
"Loaded driver for PL330 DMAC-%d\n", adev->periphid); "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
dev_info(&adev->dev, dev_info(&adev->dev,
@ -833,6 +972,13 @@ static int __devexit pl330_remove(struct amba_device *adev)
res = &adev->res; res = &adev->res;
release_mem_region(res->start, resource_size(res)); release_mem_region(res->start, resource_size(res));
#ifdef CONFIG_PM_RUNTIME
pm_runtime_put(&adev->dev);
pm_runtime_disable(&adev->dev);
#else
clk_disable(pdmac->clk);
#endif
kfree(pdmac); kfree(pdmac);
return 0; return 0;
@ -846,10 +992,49 @@ static struct amba_id pl330_ids[] = {
{ 0, 0 }, { 0, 0 },
}; };
#ifdef CONFIG_PM_RUNTIME
static int pl330_runtime_suspend(struct device *dev)
{
struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
if (!pdmac) {
dev_err(dev, "failed to get dmac\n");
return -ENODEV;
}
clk_disable(pdmac->clk);
return 0;
}
static int pl330_runtime_resume(struct device *dev)
{
struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
if (!pdmac) {
dev_err(dev, "failed to get dmac\n");
return -ENODEV;
}
clk_enable(pdmac->clk);
return 0;
}
#else
#define pl330_runtime_suspend NULL
#define pl330_runtime_resume NULL
#endif /* CONFIG_PM_RUNTIME */
static const struct dev_pm_ops pl330_pm_ops = {
.runtime_suspend = pl330_runtime_suspend,
.runtime_resume = pl330_runtime_resume,
};
static struct amba_driver pl330_driver = { static struct amba_driver pl330_driver = {
.drv = { .drv = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "dma-pl330", .name = "dma-pl330",
.pm = &pl330_pm_ops,
}, },
.id_table = pl330_ids, .id_table = pl330_ids,
.probe = pl330_probe, .probe = pl330_probe,

View File

@ -913,9 +913,9 @@ request_done:
} }
static void s3cmci_dma_setup(struct s3cmci_host *host, static void s3cmci_dma_setup(struct s3cmci_host *host,
enum s3c2410_dmasrc source) enum dma_data_direction source)
{ {
static enum s3c2410_dmasrc last_source = -1; static enum dma_data_direction last_source = -1;
static int setup_ok; static int setup_ok;
if (last_source == source) if (last_source == source)
@ -1087,7 +1087,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
s3cmci_dma_setup(host, rw ? S3C2410_DMASRC_MEM : S3C2410_DMASRC_HW); s3cmci_dma_setup(host, rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,

View File

@ -131,6 +131,12 @@
#define RXBUSY (1<<2) #define RXBUSY (1<<2)
#define TXBUSY (1<<3) #define TXBUSY (1<<3)
struct s3c64xx_spi_dma_data {
unsigned ch;
enum dma_data_direction direction;
enum dma_ch dmach;
};
/** /**
* struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
* @clk: Pointer to the spi clock. * @clk: Pointer to the spi clock.
@ -164,13 +170,14 @@ struct s3c64xx_spi_driver_data {
struct work_struct work; struct work_struct work;
struct list_head queue; struct list_head queue;
spinlock_t lock; spinlock_t lock;
enum dma_ch rx_dmach;
enum dma_ch tx_dmach;
unsigned long sfr_start; unsigned long sfr_start;
struct completion xfer_completion; struct completion xfer_completion;
unsigned state; unsigned state;
unsigned cur_mode, cur_bpw; unsigned cur_mode, cur_bpw;
unsigned cur_speed; unsigned cur_speed;
struct s3c64xx_spi_dma_data rx_dma;
struct s3c64xx_spi_dma_data tx_dma;
struct samsung_dma_ops *ops;
}; };
static struct s3c2410_dma_client s3c64xx_spi_dma_client = { static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
@ -226,6 +233,78 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
writel(val, regs + S3C64XX_SPI_CH_CFG); writel(val, regs + S3C64XX_SPI_CH_CFG);
} }
static void s3c64xx_spi_dmacb(void *data)
{
struct s3c64xx_spi_driver_data *sdd;
struct s3c64xx_spi_dma_data *dma = data;
unsigned long flags;
if (dma->direction == DMA_FROM_DEVICE)
sdd = container_of(data,
struct s3c64xx_spi_driver_data, rx_dma);
else
sdd = container_of(data,
struct s3c64xx_spi_driver_data, tx_dma);
spin_lock_irqsave(&sdd->lock, flags);
if (dma->direction == DMA_FROM_DEVICE) {
sdd->state &= ~RXBUSY;
if (!(sdd->state & TXBUSY))
complete(&sdd->xfer_completion);
} else {
sdd->state &= ~TXBUSY;
if (!(sdd->state & RXBUSY))
complete(&sdd->xfer_completion);
}
spin_unlock_irqrestore(&sdd->lock, flags);
}
static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
unsigned len, dma_addr_t buf)
{
struct s3c64xx_spi_driver_data *sdd;
struct samsung_dma_prep_info info;
if (dma->direction == DMA_FROM_DEVICE)
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, rx_dma);
else
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, tx_dma);
info.cap = DMA_SLAVE;
info.len = len;
info.fp = s3c64xx_spi_dmacb;
info.fp_param = dma;
info.direction = dma->direction;
info.buf = buf;
sdd->ops->prepare(dma->ch, &info);
sdd->ops->trigger(dma->ch);
}
static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
{
struct samsung_dma_info info;
sdd->ops = samsung_dma_get_ops();
info.cap = DMA_SLAVE;
info.client = &s3c64xx_spi_dma_client;
info.width = sdd->cur_bpw / 8;
info.direction = sdd->rx_dma.direction;
info.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &info);
info.direction = sdd->tx_dma.direction;
info.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &info);
return 1;
}
static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi, struct spi_device *spi,
struct spi_transfer *xfer, int dma_mode) struct spi_transfer *xfer, int dma_mode)
@ -258,10 +337,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg |= S3C64XX_SPI_CH_TXCH_ON; chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) { if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8); prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd,
xfer->tx_dma, xfer->len);
s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START);
} else { } else {
switch (sdd->cur_bpw) { switch (sdd->cur_bpw) {
case 32: case 32:
@ -293,10 +369,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN, | S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT); regs + S3C64XX_SPI_PACKET_CNT);
s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8); prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd,
xfer->rx_dma, xfer->len);
s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START);
} }
} }
@ -482,46 +555,6 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
} }
} }
static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
int size, enum s3c2410_dma_buffresult res)
{
struct s3c64xx_spi_driver_data *sdd = buf_id;
unsigned long flags;
spin_lock_irqsave(&sdd->lock, flags);
if (res == S3C2410_RES_OK)
sdd->state &= ~RXBUSY;
else
dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size);
/* If the other done */
if (!(sdd->state & TXBUSY))
complete(&sdd->xfer_completion);
spin_unlock_irqrestore(&sdd->lock, flags);
}
static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
int size, enum s3c2410_dma_buffresult res)
{
struct s3c64xx_spi_driver_data *sdd = buf_id;
unsigned long flags;
spin_lock_irqsave(&sdd->lock, flags);
if (res == S3C2410_RES_OK)
sdd->state &= ~TXBUSY;
else
dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size);
/* If the other done */
if (!(sdd->state & RXBUSY))
complete(&sdd->xfer_completion);
spin_unlock_irqrestore(&sdd->lock, flags);
}
#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
@ -696,12 +729,10 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
if (use_dma) { if (use_dma) {
if (xfer->tx_buf != NULL if (xfer->tx_buf != NULL
&& (sdd->state & TXBUSY)) && (sdd->state & TXBUSY))
s3c2410_dma_ctrl(sdd->tx_dmach, sdd->ops->stop(sdd->tx_dma.ch);
S3C2410_DMAOP_FLUSH);
if (xfer->rx_buf != NULL if (xfer->rx_buf != NULL
&& (sdd->state & RXBUSY)) && (sdd->state & RXBUSY))
s3c2410_dma_ctrl(sdd->rx_dmach, sdd->ops->stop(sdd->rx_dma.ch);
S3C2410_DMAOP_FLUSH);
} }
goto out; goto out;
@ -739,30 +770,6 @@ out:
msg->complete(msg->context); msg->complete(msg->context);
} }
static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
{
if (s3c2410_dma_request(sdd->rx_dmach,
&s3c64xx_spi_dma_client, NULL) < 0) {
dev_err(&sdd->pdev->dev, "cannot get RxDMA\n");
return 0;
}
s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb);
s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW,
sdd->sfr_start + S3C64XX_SPI_RX_DATA);
if (s3c2410_dma_request(sdd->tx_dmach,
&s3c64xx_spi_dma_client, NULL) < 0) {
dev_err(&sdd->pdev->dev, "cannot get TxDMA\n");
s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
return 0;
}
s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb);
s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM,
sdd->sfr_start + S3C64XX_SPI_TX_DATA);
return 1;
}
static void s3c64xx_spi_work(struct work_struct *work) static void s3c64xx_spi_work(struct work_struct *work)
{ {
struct s3c64xx_spi_driver_data *sdd = container_of(work, struct s3c64xx_spi_driver_data *sdd = container_of(work,
@ -799,8 +806,8 @@ static void s3c64xx_spi_work(struct work_struct *work)
spin_unlock_irqrestore(&sdd->lock, flags); spin_unlock_irqrestore(&sdd->lock, flags);
/* Free DMA channels */ /* Free DMA channels */
s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client); sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client);
s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client);
} }
static int s3c64xx_spi_transfer(struct spi_device *spi, static int s3c64xx_spi_transfer(struct spi_device *spi,
@ -1017,8 +1024,10 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
sdd->cntrlr_info = sci; sdd->cntrlr_info = sci;
sdd->pdev = pdev; sdd->pdev = pdev;
sdd->sfr_start = mem_res->start; sdd->sfr_start = mem_res->start;
sdd->tx_dmach = dmatx_res->start; sdd->tx_dma.dmach = dmatx_res->start;
sdd->rx_dmach = dmarx_res->start; sdd->tx_dma.direction = DMA_TO_DEVICE;
sdd->rx_dma.dmach = dmarx_res->start;
sdd->rx_dma.direction = DMA_FROM_DEVICE;
sdd->cur_bpw = 8; sdd->cur_bpw = 8;
@ -1106,7 +1115,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
pdev->id, master->num_chipselect); pdev->id, master->num_chipselect);
dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
mem_res->end, mem_res->start, mem_res->end, mem_res->start,
sdd->rx_dmach, sdd->tx_dmach); sdd->rx_dma.dmach, sdd->tx_dma.dmach);
return 0; return 0;

View File

@ -47,6 +47,9 @@ enum {
* @muxval: a number usually used to poke into some mux regiser to * @muxval: a number usually used to poke into some mux regiser to
* mux in the signal to this channel * mux in the signal to this channel
* @cctl_opt: default options for the channel control register * @cctl_opt: default options for the channel control register
* @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
* channels. Fill with 'true' if peripheral should be flow controller. Direction
* will be selected at Runtime.
* @addr: source/target address in physical memory for this DMA channel, * @addr: source/target address in physical memory for this DMA channel,
* can be the address of a FIFO register for burst requests for example. * can be the address of a FIFO register for burst requests for example.
* This can be left undefined if the PrimeCell API is used for configuring * This can be left undefined if the PrimeCell API is used for configuring
@ -65,6 +68,7 @@ struct pl08x_channel_data {
int max_signal; int max_signal;
u32 muxval; u32 muxval;
u32 cctl; u32 cctl;
bool device_fc;
dma_addr_t addr; dma_addr_t addr;
bool circular_buffer; bool circular_buffer;
bool single; bool single;
@ -77,13 +81,11 @@ struct pl08x_channel_data {
* @addr: current address * @addr: current address
* @maxwidth: the maximum width of a transfer on this bus * @maxwidth: the maximum width of a transfer on this bus
* @buswidth: the width of this bus in bytes: 1, 2 or 4 * @buswidth: the width of this bus in bytes: 1, 2 or 4
* @fill_bytes: bytes required to fill to the next bus memory boundary
*/ */
struct pl08x_bus_data { struct pl08x_bus_data {
dma_addr_t addr; dma_addr_t addr;
u8 maxwidth; u8 maxwidth;
u8 buswidth; u8 buswidth;
size_t fill_bytes;
}; };
/** /**
@ -105,8 +107,16 @@ struct pl08x_phy_chan {
/** /**
* struct pl08x_txd - wrapper for struct dma_async_tx_descriptor * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
* @tx: async tx descriptor
* @node: node for txd list for channels
* @src_addr: src address of txd
* @dst_addr: dst address of txd
* @len: transfer len in bytes
* @direction: direction of transfer
* @llis_bus: DMA memory address (physical) start for the LLIs * @llis_bus: DMA memory address (physical) start for the LLIs
* @llis_va: virtual memory address start for the LLIs * @llis_va: virtual memory address start for the LLIs
* @cctl: control reg values for current txd
* @ccfg: config reg values for current txd
*/ */
struct pl08x_txd { struct pl08x_txd {
struct dma_async_tx_descriptor tx; struct dma_async_tx_descriptor tx;

View File

@ -19,12 +19,8 @@ struct dma_pl330_peri {
* Peri_Req i/f of the DMAC that is * Peri_Req i/f of the DMAC that is
* peripheral could be reached from. * peripheral could be reached from.
*/ */
u8 peri_id; /* {0, 31} */ u8 peri_id; /* specific dma id */
enum pl330_reqtype rqtype; enum pl330_reqtype rqtype;
/* For M->D and D->M Channels */
int burst_sz; /* in power of 2 */
dma_addr_t fifo_addr;
}; };
struct dma_pl330_platdata { struct dma_pl330_platdata {

View File

@ -24,8 +24,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/dma-direction.h> #include <linux/dma-direction.h>
#include <linux/scatterlist.h>
struct scatterlist;
/** /**
* typedef dma_cookie_t - an opaque DMA cookie * typedef dma_cookie_t - an opaque DMA cookie
@ -519,6 +518,16 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
(unsigned long)config); (unsigned long)config);
} }
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
struct dma_chan *chan, void *buf, size_t len,
enum dma_data_direction dir, unsigned long flags)
{
struct scatterlist sg;
sg_init_one(&sg, buf, len);
return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags);
}
static inline int dmaengine_terminate_all(struct dma_chan *chan) static inline int dmaengine_terminate_all(struct dma_chan *chan)
{ {
return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);

View File

@ -271,7 +271,10 @@ static int s3c_ac97_trigger(struct snd_pcm_substream *substream, int cmd,
writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL); writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); if (!dma_data->ops)
dma_data->ops = samsung_dma_get_ops();
dma_data->ops->started(dma_data->channel);
return 0; return 0;
} }
@ -317,7 +320,10 @@ static int s3c_ac97_mic_trigger(struct snd_pcm_substream *substream,
writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL); writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); if (!dma_data->ops)
dma_data->ops = samsung_dma_get_ops();
dma_data->ops->started(dma_data->channel);
return 0; return 0;
} }

View File

@ -54,7 +54,6 @@ struct runtime_data {
spinlock_t lock; spinlock_t lock;
int state; int state;
unsigned int dma_loaded; unsigned int dma_loaded;
unsigned int dma_limit;
unsigned int dma_period; unsigned int dma_period;
dma_addr_t dma_start; dma_addr_t dma_start;
dma_addr_t dma_pos; dma_addr_t dma_pos;
@ -62,77 +61,79 @@ struct runtime_data {
struct s3c_dma_params *params; struct s3c_dma_params *params;
}; };
static void audio_buffdone(void *data);
/* dma_enqueue /* dma_enqueue
* *
* place a dma buffer onto the queue for the dma system * place a dma buffer onto the queue for the dma system
* to handle. * to handle.
*/ */
static void dma_enqueue(struct snd_pcm_substream *substream) static void dma_enqueue(struct snd_pcm_substream *substream)
{ {
struct runtime_data *prtd = substream->runtime->private_data; struct runtime_data *prtd = substream->runtime->private_data;
dma_addr_t pos = prtd->dma_pos; dma_addr_t pos = prtd->dma_pos;
unsigned int limit; unsigned int limit;
int ret; struct samsung_dma_prep_info dma_info;
pr_debug("Entered %s\n", __func__); pr_debug("Entered %s\n", __func__);
if (s3c_dma_has_circular())
limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period; limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period;
else
limit = prtd->dma_limit;
pr_debug("%s: loaded %d, limit %d\n", pr_debug("%s: loaded %d, limit %d\n",
__func__, prtd->dma_loaded, limit); __func__, prtd->dma_loaded, limit);
while (prtd->dma_loaded < limit) { dma_info.cap = (samsung_dma_has_circular() ? DMA_CYCLIC : DMA_SLAVE);
unsigned long len = prtd->dma_period; dma_info.direction =
(substream->stream == SNDRV_PCM_STREAM_PLAYBACK
? DMA_TO_DEVICE : DMA_FROM_DEVICE);
dma_info.fp = audio_buffdone;
dma_info.fp_param = substream;
dma_info.period = prtd->dma_period;
dma_info.len = prtd->dma_period*limit;
while (prtd->dma_loaded < limit) {
pr_debug("dma_loaded: %d\n", prtd->dma_loaded); pr_debug("dma_loaded: %d\n", prtd->dma_loaded);
if ((pos + len) > prtd->dma_end) { if ((pos + dma_info.period) > prtd->dma_end) {
len = prtd->dma_end - pos; dma_info.period = prtd->dma_end - pos;
pr_debug("%s: corrected dma len %ld\n", __func__, len); pr_debug("%s: corrected dma len %ld\n",
__func__, dma_info.period);
} }
ret = s3c2410_dma_enqueue(prtd->params->channel, dma_info.buf = pos;
substream, pos, len); prtd->params->ops->prepare(prtd->params->ch, &dma_info);
if (ret == 0) {
prtd->dma_loaded++; prtd->dma_loaded++;
pos += prtd->dma_period; pos += prtd->dma_period;
if (pos >= prtd->dma_end) if (pos >= prtd->dma_end)
pos = prtd->dma_start; pos = prtd->dma_start;
} else
break;
} }
prtd->dma_pos = pos; prtd->dma_pos = pos;
} }
static void audio_buffdone(struct s3c2410_dma_chan *channel, static void audio_buffdone(void *data)
void *dev_id, int size,
enum s3c2410_dma_buffresult result)
{ {
struct snd_pcm_substream *substream = dev_id; struct snd_pcm_substream *substream = data;
struct runtime_data *prtd; struct runtime_data *prtd = substream->runtime->private_data;
pr_debug("Entered %s\n", __func__); pr_debug("Entered %s\n", __func__);
if (result == S3C2410_RES_ABORT || result == S3C2410_RES_ERR) if (prtd->state & ST_RUNNING) {
return; prtd->dma_pos += prtd->dma_period;
if (prtd->dma_pos >= prtd->dma_end)
prtd = substream->runtime->private_data; prtd->dma_pos = prtd->dma_start;
if (substream) if (substream)
snd_pcm_period_elapsed(substream); snd_pcm_period_elapsed(substream);
spin_lock(&prtd->lock); spin_lock(&prtd->lock);
if (prtd->state & ST_RUNNING && !s3c_dma_has_circular()) { if (!samsung_dma_has_circular()) {
prtd->dma_loaded--; prtd->dma_loaded--;
dma_enqueue(substream); dma_enqueue(substream);
} }
spin_unlock(&prtd->lock); spin_unlock(&prtd->lock);
}
} }
static int dma_hw_params(struct snd_pcm_substream *substream, static int dma_hw_params(struct snd_pcm_substream *substream,
@ -144,8 +145,7 @@ static int dma_hw_params(struct snd_pcm_substream *substream,
unsigned long totbytes = params_buffer_bytes(params); unsigned long totbytes = params_buffer_bytes(params);
struct s3c_dma_params *dma = struct s3c_dma_params *dma =
snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
int ret = 0; struct samsung_dma_info dma_info;
pr_debug("Entered %s\n", __func__); pr_debug("Entered %s\n", __func__);
@ -163,30 +163,26 @@ static int dma_hw_params(struct snd_pcm_substream *substream,
pr_debug("params %p, client %p, channel %d\n", prtd->params, pr_debug("params %p, client %p, channel %d\n", prtd->params,
prtd->params->client, prtd->params->channel); prtd->params->client, prtd->params->channel);
ret = s3c2410_dma_request(prtd->params->channel, prtd->params->ops = samsung_dma_get_ops();
prtd->params->client, NULL);
if (ret < 0) { dma_info.cap = (samsung_dma_has_circular() ?
printk(KERN_ERR "failed to get dma channel\n"); DMA_CYCLIC : DMA_SLAVE);
return ret; dma_info.client = prtd->params->client;
dma_info.direction =
(substream->stream == SNDRV_PCM_STREAM_PLAYBACK
? DMA_TO_DEVICE : DMA_FROM_DEVICE);
dma_info.width = prtd->params->dma_size;
dma_info.fifo = prtd->params->dma_addr;
prtd->params->ch = prtd->params->ops->request(
prtd->params->channel, &dma_info);
} }
/* use the circular buffering if we have it available. */
if (s3c_dma_has_circular())
s3c2410_dma_setflags(prtd->params->channel,
S3C2410_DMAF_CIRCULAR);
}
s3c2410_dma_set_buffdone_fn(prtd->params->channel,
audio_buffdone);
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
runtime->dma_bytes = totbytes; runtime->dma_bytes = totbytes;
spin_lock_irq(&prtd->lock); spin_lock_irq(&prtd->lock);
prtd->dma_loaded = 0; prtd->dma_loaded = 0;
prtd->dma_limit = runtime->hw.periods_min;
prtd->dma_period = params_period_bytes(params); prtd->dma_period = params_period_bytes(params);
prtd->dma_start = runtime->dma_addr; prtd->dma_start = runtime->dma_addr;
prtd->dma_pos = prtd->dma_start; prtd->dma_pos = prtd->dma_start;
@ -206,7 +202,8 @@ static int dma_hw_free(struct snd_pcm_substream *substream)
snd_pcm_set_runtime_buffer(substream, NULL); snd_pcm_set_runtime_buffer(substream, NULL);
if (prtd->params) { if (prtd->params) {
s3c2410_dma_free(prtd->params->channel, prtd->params->client); prtd->params->ops->release(prtd->params->ch,
prtd->params->client);
prtd->params = NULL; prtd->params = NULL;
} }
@ -225,23 +222,9 @@ static int dma_prepare(struct snd_pcm_substream *substream)
if (!prtd->params) if (!prtd->params)
return 0; return 0;
/* channel needs configuring for mem=>device, increment memory addr,
* sync to pclk, half-word transfers to the IIS-FIFO. */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
s3c2410_dma_devconfig(prtd->params->channel,
S3C2410_DMASRC_MEM,
prtd->params->dma_addr);
} else {
s3c2410_dma_devconfig(prtd->params->channel,
S3C2410_DMASRC_HW,
prtd->params->dma_addr);
}
s3c2410_dma_config(prtd->params->channel,
prtd->params->dma_size);
/* flush the DMA channel */ /* flush the DMA channel */
s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_FLUSH); prtd->params->ops->flush(prtd->params->ch);
prtd->dma_loaded = 0; prtd->dma_loaded = 0;
prtd->dma_pos = prtd->dma_start; prtd->dma_pos = prtd->dma_start;
@ -265,14 +248,14 @@ static int dma_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
prtd->state |= ST_RUNNING; prtd->state |= ST_RUNNING;
s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_START); prtd->params->ops->trigger(prtd->params->ch);
break; break;
case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
prtd->state &= ~ST_RUNNING; prtd->state &= ~ST_RUNNING;
s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_STOP); prtd->params->ops->stop(prtd->params->ch);
break; break;
default: default:
@ -291,21 +274,12 @@ dma_pointer(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_runtime *runtime = substream->runtime;
struct runtime_data *prtd = runtime->private_data; struct runtime_data *prtd = runtime->private_data;
unsigned long res; unsigned long res;
dma_addr_t src, dst;
pr_debug("Entered %s\n", __func__); pr_debug("Entered %s\n", __func__);
spin_lock(&prtd->lock); res = prtd->dma_pos - prtd->dma_start;
s3c2410_dma_getposition(prtd->params->channel, &src, &dst);
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) pr_debug("Pointer offset: %lu\n", res);
res = dst - prtd->dma_start;
else
res = src - prtd->dma_start;
spin_unlock(&prtd->lock);
pr_debug("Pointer %x %x\n", src, dst);
/* we seem to be getting the odd error from the pcm library due /* we seem to be getting the odd error from the pcm library due
* to out-of-bounds pointers. this is maybe due to the dma engine * to out-of-bounds pointers. this is maybe due to the dma engine

View File

@ -6,7 +6,7 @@
* Free Software Foundation; either version 2 of the License, or (at your * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version. * option) any later version.
* *
* ALSA PCM interface for the Samsung S3C24xx CPU * ALSA PCM interface for the Samsung SoC
*/ */
#ifndef _S3C_AUDIO_H #ifndef _S3C_AUDIO_H
@ -17,6 +17,8 @@ struct s3c_dma_params {
int channel; /* Channel ID */ int channel; /* Channel ID */
dma_addr_t dma_addr; dma_addr_t dma_addr;
int dma_size; /* Size of the DMA transfer */ int dma_size; /* Size of the DMA transfer */
unsigned ch;
struct samsung_dma_ops *ops;
}; };
#endif #endif