提交 fba95699 编写于 作者: L Linus Torvalds

Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma

* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (63 commits)
  dmaengine: mid_dma: mask_peripheral_interrupt only when dmac is idle
  dmaengine/ep93xx_dma: add module.h include
  pch_dma: Reduce wasting memory
  pch_dma: Fix suspend issue
  dma/timberdale: free_irq() on an error path
  dma: shdma: transfer based runtime PM
  dmaengine: shdma: protect against the IRQ handler
  dmaengine i.MX DMA/SDMA: add missing include of linux/module.h
  dmaengine: delete redundant chan_id and chancnt initialization in dma drivers
  dmaengine/amba-pl08x: Check txd->llis_va before freeing dma_pool
  dmaengine/amba-pl08x: Add support for sg len greater than one for slave transfers
  serial: sh-sci: don't filter on DMA device, use only channel ID
  ARM: SAMSUNG: Remove Samsung specific enum type for dma direction
  ASoC: Samsung: Update DMA interface
  spi/s3c64xx: Merge dma control code
  spi/s3c64xx: Add support DMA engine API
  ARM: SAMSUNG: Remove S3C-PL330-DMA driver
  ARM: S5P64X0: Use generic DMA PL330 driver
  ARM: S5PC100: Use generic DMA PL330 driver
  ARM: S5PV210: Use generic DMA PL330 driver
  ...

Fix up fairly trivial conflicts in
 - arch/arm/mach-exynos4/{Kconfig,clock.c}
 - arch/arm/mach-s5p64x0/dma.c
...@@ -21,6 +21,9 @@ ...@@ -21,6 +21,9 @@
* OneNAND features. * OneNAND features.
*/ */
#ifndef ASM_PL080_H
#define ASM_PL080_H
#define PL080_INT_STATUS (0x00) #define PL080_INT_STATUS (0x00)
#define PL080_TC_STATUS (0x04) #define PL080_TC_STATUS (0x04)
#define PL080_TC_CLEAR (0x08) #define PL080_TC_CLEAR (0x08)
...@@ -138,3 +141,4 @@ struct pl080s_lli { ...@@ -138,3 +141,4 @@ struct pl080s_lli {
u32 control1; u32 control1;
}; };
#endif /* ASM_PL080_H */
...@@ -11,7 +11,7 @@ if ARCH_EXYNOS4 ...@@ -11,7 +11,7 @@ if ARCH_EXYNOS4
config CPU_EXYNOS4210 config CPU_EXYNOS4210
bool bool
select S3C_PL330_DMA select SAMSUNG_DMADEV
select ARM_CPU_SUSPEND if PM select ARM_CPU_SUSPEND if PM
help help
Enable EXYNOS4210 CPU support Enable EXYNOS4210 CPU support
......
...@@ -111,6 +111,11 @@ struct clk clk_sclk_usbphy1 = { ...@@ -111,6 +111,11 @@ struct clk clk_sclk_usbphy1 = {
.name = "sclk_usbphy1", .name = "sclk_usbphy1",
}; };
static struct clk dummy_apb_pclk = {
.name = "apb_pclk",
.id = -1,
};
static int exynos4_clksrc_mask_top_ctrl(struct clk *clk, int enable) static int exynos4_clksrc_mask_top_ctrl(struct clk *clk, int enable)
{ {
return s5p_gatectrl(S5P_CLKSRC_MASK_TOP, clk, enable); return s5p_gatectrl(S5P_CLKSRC_MASK_TOP, clk, enable);
...@@ -503,12 +508,12 @@ static struct clk init_clocks_off[] = { ...@@ -503,12 +508,12 @@ static struct clk init_clocks_off[] = {
.enable = exynos4_clk_ip_fsys_ctrl, .enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 9), .ctrlbit = (1 << 9),
}, { }, {
.name = "pdma", .name = "dma",
.devname = "s3c-pl330.0", .devname = "s3c-pl330.0",
.enable = exynos4_clk_ip_fsys_ctrl, .enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 0), .ctrlbit = (1 << 0),
}, { }, {
.name = "pdma", .name = "dma",
.devname = "s3c-pl330.1", .devname = "s3c-pl330.1",
.enable = exynos4_clk_ip_fsys_ctrl, .enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 1), .ctrlbit = (1 << 1),
...@@ -1281,6 +1286,11 @@ void __init exynos4_register_clocks(void) ...@@ -1281,6 +1286,11 @@ void __init exynos4_register_clocks(void)
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
<<<<<<< HEAD
register_syscore_ops(&exynos4_clock_syscore_ops); register_syscore_ops(&exynos4_clock_syscore_ops);
=======
s3c24xx_register_clock(&dummy_apb_pclk);
>>>>>>> 4598fc2c94b68740e0269db03c98a1e7ad5af773
s3c_pwmclk_init(); s3c_pwmclk_init();
} }
...@@ -21,151 +21,228 @@ ...@@ -21,151 +21,228 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#include <linux/platform_device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
#include <asm/irq.h>
#include <plat/devs.h> #include <plat/devs.h>
#include <plat/irqs.h> #include <plat/irqs.h>
#include <mach/map.h> #include <mach/map.h>
#include <mach/irqs.h> #include <mach/irqs.h>
#include <mach/dma.h>
#include <plat/s3c-pl330-pdata.h>
static u64 dma_dmamask = DMA_BIT_MASK(32); static u64 dma_dmamask = DMA_BIT_MASK(32);
static struct resource exynos4_pdma0_resource[] = { struct dma_pl330_peri pdma0_peri[28] = {
[0] = { {
.start = EXYNOS4_PA_PDMA0, .peri_id = (u8)DMACH_PCM0_RX,
.end = EXYNOS4_PA_PDMA0 + SZ_4K, .rqtype = DEVTOMEM,
.flags = IORESOURCE_MEM, }, {
}, .peri_id = (u8)DMACH_PCM0_TX,
[1] = { .rqtype = MEMTODEV,
.start = IRQ_PDMA0, }, {
.end = IRQ_PDMA0, .peri_id = (u8)DMACH_PCM2_RX,
.flags = IORESOURCE_IRQ, .rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_MSM_REQ0,
}, {
.peri_id = (u8)DMACH_MSM_REQ2,
}, {
.peri_id = (u8)DMACH_SPI0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0S_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART4_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART4_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SLIMBUS0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SLIMBUS0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SLIMBUS2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SLIMBUS2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SLIMBUS4_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SLIMBUS4_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_AC97_MICIN,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_AC97_PCMIN,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_AC97_PCMOUT,
.rqtype = MEMTODEV,
}, },
}; };
static struct s3c_pl330_platdata exynos4_pdma0_pdata = { struct dma_pl330_platdata exynos4_pdma0_pdata = {
.peri = { .nr_valid_peri = ARRAY_SIZE(pdma0_peri),
[0] = DMACH_PCM0_RX, .peri = pdma0_peri,
[1] = DMACH_PCM0_TX,
[2] = DMACH_PCM2_RX,
[3] = DMACH_PCM2_TX,
[4] = DMACH_MSM_REQ0,
[5] = DMACH_MSM_REQ2,
[6] = DMACH_SPI0_RX,
[7] = DMACH_SPI0_TX,
[8] = DMACH_SPI2_RX,
[9] = DMACH_SPI2_TX,
[10] = DMACH_I2S0S_TX,
[11] = DMACH_I2S0_RX,
[12] = DMACH_I2S0_TX,
[13] = DMACH_I2S2_RX,
[14] = DMACH_I2S2_TX,
[15] = DMACH_UART0_RX,
[16] = DMACH_UART0_TX,
[17] = DMACH_UART2_RX,
[18] = DMACH_UART2_TX,
[19] = DMACH_UART4_RX,
[20] = DMACH_UART4_TX,
[21] = DMACH_SLIMBUS0_RX,
[22] = DMACH_SLIMBUS0_TX,
[23] = DMACH_SLIMBUS2_RX,
[24] = DMACH_SLIMBUS2_TX,
[25] = DMACH_SLIMBUS4_RX,
[26] = DMACH_SLIMBUS4_TX,
[27] = DMACH_AC97_MICIN,
[28] = DMACH_AC97_PCMIN,
[29] = DMACH_AC97_PCMOUT,
[30] = DMACH_MAX,
[31] = DMACH_MAX,
},
}; };
static struct platform_device exynos4_device_pdma0 = { struct amba_device exynos4_device_pdma0 = {
.name = "s3c-pl330", .dev = {
.id = 0, .init_name = "dma-pl330.0",
.num_resources = ARRAY_SIZE(exynos4_pdma0_resource),
.resource = exynos4_pdma0_resource,
.dev = {
.dma_mask = &dma_dmamask, .dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &exynos4_pdma0_pdata, .platform_data = &exynos4_pdma0_pdata,
}, },
.res = {
.start = EXYNOS4_PA_PDMA0,
.end = EXYNOS4_PA_PDMA0 + SZ_4K,
.flags = IORESOURCE_MEM,
},
.irq = {IRQ_PDMA0, NO_IRQ},
.periphid = 0x00041330,
}; };
static struct resource exynos4_pdma1_resource[] = { struct dma_pl330_peri pdma1_peri[25] = {
[0] = { {
.start = EXYNOS4_PA_PDMA1, .peri_id = (u8)DMACH_PCM0_RX,
.end = EXYNOS4_PA_PDMA1 + SZ_4K, .rqtype = DEVTOMEM,
.flags = IORESOURCE_MEM, }, {
}, .peri_id = (u8)DMACH_PCM0_TX,
[1] = { .rqtype = MEMTODEV,
.start = IRQ_PDMA1, }, {
.end = IRQ_PDMA1, .peri_id = (u8)DMACH_PCM1_RX,
.flags = IORESOURCE_IRQ, .rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_MSM_REQ1,
}, {
.peri_id = (u8)DMACH_MSM_REQ3,
}, {
.peri_id = (u8)DMACH_SPI1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0S_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART3_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART3_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SLIMBUS1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SLIMBUS1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SLIMBUS3_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SLIMBUS3_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SLIMBUS5_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SLIMBUS5_TX,
.rqtype = MEMTODEV,
}, },
}; };
static struct s3c_pl330_platdata exynos4_pdma1_pdata = { struct dma_pl330_platdata exynos4_pdma1_pdata = {
.peri = { .nr_valid_peri = ARRAY_SIZE(pdma1_peri),
[0] = DMACH_PCM0_RX, .peri = pdma1_peri,
[1] = DMACH_PCM0_TX,
[2] = DMACH_PCM1_RX,
[3] = DMACH_PCM1_TX,
[4] = DMACH_MSM_REQ1,
[5] = DMACH_MSM_REQ3,
[6] = DMACH_SPI1_RX,
[7] = DMACH_SPI1_TX,
[8] = DMACH_I2S0S_TX,
[9] = DMACH_I2S0_RX,
[10] = DMACH_I2S0_TX,
[11] = DMACH_I2S1_RX,
[12] = DMACH_I2S1_TX,
[13] = DMACH_UART0_RX,
[14] = DMACH_UART0_TX,
[15] = DMACH_UART1_RX,
[16] = DMACH_UART1_TX,
[17] = DMACH_UART3_RX,
[18] = DMACH_UART3_TX,
[19] = DMACH_SLIMBUS1_RX,
[20] = DMACH_SLIMBUS1_TX,
[21] = DMACH_SLIMBUS3_RX,
[22] = DMACH_SLIMBUS3_TX,
[23] = DMACH_SLIMBUS5_RX,
[24] = DMACH_SLIMBUS5_TX,
[25] = DMACH_SLIMBUS0AUX_RX,
[26] = DMACH_SLIMBUS0AUX_TX,
[27] = DMACH_SPDIF,
[28] = DMACH_MAX,
[29] = DMACH_MAX,
[30] = DMACH_MAX,
[31] = DMACH_MAX,
},
}; };
static struct platform_device exynos4_device_pdma1 = { struct amba_device exynos4_device_pdma1 = {
.name = "s3c-pl330", .dev = {
.id = 1, .init_name = "dma-pl330.1",
.num_resources = ARRAY_SIZE(exynos4_pdma1_resource),
.resource = exynos4_pdma1_resource,
.dev = {
.dma_mask = &dma_dmamask, .dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &exynos4_pdma1_pdata, .platform_data = &exynos4_pdma1_pdata,
}, },
}; .res = {
.start = EXYNOS4_PA_PDMA1,
static struct platform_device *exynos4_dmacs[] __initdata = { .end = EXYNOS4_PA_PDMA1 + SZ_4K,
&exynos4_device_pdma0, .flags = IORESOURCE_MEM,
&exynos4_device_pdma1, },
.irq = {IRQ_PDMA1, NO_IRQ},
.periphid = 0x00041330,
}; };
static int __init exynos4_dma_init(void) static int __init exynos4_dma_init(void)
{ {
platform_add_devices(exynos4_dmacs, ARRAY_SIZE(exynos4_dmacs)); amba_device_register(&exynos4_device_pdma0, &iomem_resource);
return 0; return 0;
} }
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#ifndef __MACH_DMA_H #ifndef __MACH_DMA_H
#define __MACH_DMA_H #define __MACH_DMA_H
/* This platform uses the common S3C DMA API driver for PL330 */ /* This platform uses the common DMA API driver for PL330 */
#include <plat/s3c-dma-pl330.h> #include <plat/dma-pl330.h>
#endif /* __MACH_DMA_H */ #endif /* __MACH_DMA_H */
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#ifndef __ASM_ARCH_DMA_H #ifndef __ASM_ARCH_DMA_H
#define __ASM_ARCH_DMA_H __FILE__ #define __ASM_ARCH_DMA_H __FILE__
#include <plat/dma.h>
#include <linux/sysdev.h> #include <linux/sysdev.h>
#define MAX_DMA_TRANSFER_SIZE 0x100000 /* Data Unit is half word */ #define MAX_DMA_TRANSFER_SIZE 0x100000 /* Data Unit is half word */
...@@ -51,6 +50,18 @@ enum dma_ch { ...@@ -51,6 +50,18 @@ enum dma_ch {
DMACH_MAX, /* the end entry */ DMACH_MAX, /* the end entry */
}; };
static inline bool samsung_dma_has_circular(void)
{
return false;
}
static inline bool samsung_dma_is_dmadev(void)
{
return false;
}
#include <plat/dma.h>
#define DMACH_LOW_LEVEL (1<<28) /* use this to specifiy hardware ch no */ #define DMACH_LOW_LEVEL (1<<28) /* use this to specifiy hardware ch no */
/* we have 4 dma channels */ /* we have 4 dma channels */
...@@ -163,7 +174,7 @@ struct s3c2410_dma_chan { ...@@ -163,7 +174,7 @@ struct s3c2410_dma_chan {
struct s3c2410_dma_client *client; struct s3c2410_dma_client *client;
/* channel configuration */ /* channel configuration */
enum s3c2410_dmasrc source; enum dma_data_direction source;
enum dma_ch req_ch; enum dma_ch req_ch;
unsigned long dev_addr; unsigned long dev_addr;
unsigned long load_timeout; unsigned long load_timeout;
...@@ -196,9 +207,4 @@ struct s3c2410_dma_chan { ...@@ -196,9 +207,4 @@ struct s3c2410_dma_chan {
typedef unsigned long dma_device_t; typedef unsigned long dma_device_t;
static inline bool s3c_dma_has_circular(void)
{
return false;
}
#endif /* __ASM_ARCH_DMA_H */ #endif /* __ASM_ARCH_DMA_H */
...@@ -130,11 +130,11 @@ static struct s3c24xx_dma_map __initdata s3c2412_dma_mappings[] = { ...@@ -130,11 +130,11 @@ static struct s3c24xx_dma_map __initdata s3c2412_dma_mappings[] = {
static void s3c2412_dma_direction(struct s3c2410_dma_chan *chan, static void s3c2412_dma_direction(struct s3c2410_dma_chan *chan,
struct s3c24xx_dma_map *map, struct s3c24xx_dma_map *map,
enum s3c2410_dmasrc dir) enum dma_data_direction dir)
{ {
unsigned long chsel; unsigned long chsel;
if (dir == S3C2410_DMASRC_HW) if (dir == DMA_FROM_DEVICE)
chsel = map->channels_rx[0]; chsel = map->channels_rx[0];
else else
chsel = map->channels[0]; chsel = map->channels[0];
......
...@@ -147,14 +147,14 @@ static void s3c64xx_dma_fill_lli(struct s3c2410_dma_chan *chan, ...@@ -147,14 +147,14 @@ static void s3c64xx_dma_fill_lli(struct s3c2410_dma_chan *chan,
u32 control0, control1; u32 control0, control1;
switch (chan->source) { switch (chan->source) {
case S3C2410_DMASRC_HW: case DMA_FROM_DEVICE:
src = chan->dev_addr; src = chan->dev_addr;
dst = data; dst = data;
control0 = PL080_CONTROL_SRC_AHB2; control0 = PL080_CONTROL_SRC_AHB2;
control0 |= PL080_CONTROL_DST_INCR; control0 |= PL080_CONTROL_DST_INCR;
break; break;
case S3C2410_DMASRC_MEM: case DMA_TO_DEVICE:
src = data; src = data;
dst = chan->dev_addr; dst = chan->dev_addr;
control0 = PL080_CONTROL_DST_AHB2; control0 = PL080_CONTROL_DST_AHB2;
...@@ -416,7 +416,7 @@ EXPORT_SYMBOL(s3c2410_dma_enqueue); ...@@ -416,7 +416,7 @@ EXPORT_SYMBOL(s3c2410_dma_enqueue);
int s3c2410_dma_devconfig(enum dma_ch channel, int s3c2410_dma_devconfig(enum dma_ch channel,
enum s3c2410_dmasrc source, enum dma_data_direction source,
unsigned long devaddr) unsigned long devaddr)
{ {
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
...@@ -437,11 +437,11 @@ int s3c2410_dma_devconfig(enum dma_ch channel, ...@@ -437,11 +437,11 @@ int s3c2410_dma_devconfig(enum dma_ch channel,
pr_debug("%s: peripheral %d\n", __func__, peripheral); pr_debug("%s: peripheral %d\n", __func__, peripheral);
switch (source) { switch (source) {
case S3C2410_DMASRC_HW: case DMA_FROM_DEVICE:
config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT; config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT; config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT;
break; break;
case S3C2410_DMASRC_MEM: case DMA_TO_DEVICE:
config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT; config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT; config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT;
break; break;
......
...@@ -58,11 +58,15 @@ enum dma_ch { ...@@ -58,11 +58,15 @@ enum dma_ch {
DMACH_MAX /* the end */ DMACH_MAX /* the end */
}; };
static __inline__ bool s3c_dma_has_circular(void) static inline bool samsung_dma_has_circular(void)
{ {
return true; return true;
} }
static inline bool samsung_dma_is_dmadev(void)
{
return false;
}
#define S3C2410_DMAF_CIRCULAR (1 << 0) #define S3C2410_DMAF_CIRCULAR (1 << 0)
#include <plat/dma.h> #include <plat/dma.h>
...@@ -95,7 +99,7 @@ struct s3c2410_dma_chan { ...@@ -95,7 +99,7 @@ struct s3c2410_dma_chan {
unsigned char peripheral; unsigned char peripheral;
unsigned int flags; unsigned int flags;
enum s3c2410_dmasrc source; enum dma_data_direction source;
dma_addr_t dev_addr; dma_addr_t dev_addr;
......
...@@ -9,14 +9,14 @@ if ARCH_S5P64X0 ...@@ -9,14 +9,14 @@ if ARCH_S5P64X0
config CPU_S5P6440 config CPU_S5P6440
bool bool
select S3C_PL330_DMA select SAMSUNG_DMADEV
select S5P_HRT select S5P_HRT
help help
Enable S5P6440 CPU support Enable S5P6440 CPU support
config CPU_S5P6450 config CPU_S5P6450
bool bool
select S3C_PL330_DMA select SAMSUNG_DMADEV
select S5P_HRT select S5P_HRT
help help
Enable S5P6450 CPU support Enable S5P6450 CPU support
......
...@@ -146,7 +146,7 @@ static struct clk init_clocks_off[] = { ...@@ -146,7 +146,7 @@ static struct clk init_clocks_off[] = {
.enable = s5p64x0_hclk0_ctrl, .enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 8), .ctrlbit = (1 << 8),
}, { }, {
.name = "pdma", .name = "dma",
.parent = &clk_hclk_low.clk, .parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl, .enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 12), .ctrlbit = (1 << 12),
...@@ -499,6 +499,11 @@ static struct clksrc_clk *sysclks[] = { ...@@ -499,6 +499,11 @@ static struct clksrc_clk *sysclks[] = {
&clk_pclk_low, &clk_pclk_low,
}; };
static struct clk dummy_apb_pclk = {
.name = "apb_pclk",
.id = -1,
};
void __init_or_cpufreq s5p6440_setup_clocks(void) void __init_or_cpufreq s5p6440_setup_clocks(void)
{ {
struct clk *xtal_clk; struct clk *xtal_clk;
...@@ -581,5 +586,7 @@ void __init s5p6440_register_clocks(void) ...@@ -581,5 +586,7 @@ void __init s5p6440_register_clocks(void)
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c24xx_register_clock(&dummy_apb_pclk);
s3c_pwmclk_init(); s3c_pwmclk_init();
} }
...@@ -179,7 +179,7 @@ static struct clk init_clocks_off[] = { ...@@ -179,7 +179,7 @@ static struct clk init_clocks_off[] = {
.enable = s5p64x0_hclk0_ctrl, .enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 3), .ctrlbit = (1 << 3),
}, { }, {
.name = "pdma", .name = "dma",
.parent = &clk_hclk_low.clk, .parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl, .enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 12), .ctrlbit = (1 << 12),
...@@ -553,6 +553,11 @@ static struct clksrc_clk *sysclks[] = { ...@@ -553,6 +553,11 @@ static struct clksrc_clk *sysclks[] = {
&clk_sclk_audio0, &clk_sclk_audio0,
}; };
static struct clk dummy_apb_pclk = {
.name = "apb_pclk",
.id = -1,
};
void __init_or_cpufreq s5p6450_setup_clocks(void) void __init_or_cpufreq s5p6450_setup_clocks(void)
{ {
struct clk *xtal_clk; struct clk *xtal_clk;
...@@ -632,5 +637,7 @@ void __init s5p6450_register_clocks(void) ...@@ -632,5 +637,7 @@ void __init s5p6450_register_clocks(void)
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c24xx_register_clock(&dummy_apb_pclk);
s3c_pwmclk_init(); s3c_pwmclk_init();
} }
...@@ -21,115 +21,208 @@ ...@@ -21,115 +21,208 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#include <linux/platform_device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
#include <asm/irq.h>
#include <mach/map.h> #include <mach/map.h>
#include <mach/irqs.h> #include <mach/irqs.h>
#include <mach/regs-clock.h> #include <mach/regs-clock.h>
#include <mach/dma.h>
#include <plat/cpu.h> #include <plat/cpu.h>
#include <plat/devs.h> #include <plat/devs.h>
#include <plat/s3c-pl330-pdata.h> #include <plat/irqs.h>
static u64 dma_dmamask = DMA_BIT_MASK(32); static u64 dma_dmamask = DMA_BIT_MASK(32);
static struct resource s5p64x0_pdma_resource[] = { struct dma_pl330_peri s5p6440_pdma_peri[22] = {
[0] = { {
.start = S5P64X0_PA_PDMA, .peri_id = (u8)DMACH_UART0_RX,
.end = S5P64X0_PA_PDMA + SZ_4K, .rqtype = DEVTOMEM,
.flags = IORESOURCE_MEM, }, {
}, .peri_id = (u8)DMACH_UART0_TX,
[1] = { .rqtype = MEMTODEV,
.start = IRQ_DMA0, }, {
.end = IRQ_DMA0, .peri_id = (u8)DMACH_UART1_RX,
.flags = IORESOURCE_IRQ, .rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART3_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART3_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = DMACH_MAX,
}, {
.peri_id = DMACH_MAX,
}, {
.peri_id = (u8)DMACH_PCM0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_PCM0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_SPI1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI1_RX,
.rqtype = DEVTOMEM,
}, },
}; };
static struct s3c_pl330_platdata s5p6440_pdma_pdata = { struct dma_pl330_platdata s5p6440_pdma_pdata = {
.peri = { .nr_valid_peri = ARRAY_SIZE(s5p6440_pdma_peri),
[0] = DMACH_UART0_RX, .peri = s5p6440_pdma_peri,
[1] = DMACH_UART0_TX,
[2] = DMACH_UART1_RX,
[3] = DMACH_UART1_TX,
[4] = DMACH_UART2_RX,
[5] = DMACH_UART2_TX,
[6] = DMACH_UART3_RX,
[7] = DMACH_UART3_TX,
[8] = DMACH_MAX,
[9] = DMACH_MAX,
[10] = DMACH_PCM0_TX,
[11] = DMACH_PCM0_RX,
[12] = DMACH_I2S0_TX,
[13] = DMACH_I2S0_RX,
[14] = DMACH_SPI0_TX,
[15] = DMACH_SPI0_RX,
[16] = DMACH_MAX,
[17] = DMACH_MAX,
[18] = DMACH_MAX,
[19] = DMACH_MAX,
[20] = DMACH_SPI1_TX,
[21] = DMACH_SPI1_RX,
[22] = DMACH_MAX,
[23] = DMACH_MAX,
[24] = DMACH_MAX,
[25] = DMACH_MAX,
[26] = DMACH_MAX,
[27] = DMACH_MAX,
[28] = DMACH_MAX,
[29] = DMACH_PWM,
[30] = DMACH_MAX,
[31] = DMACH_MAX,
},
}; };
static struct s3c_pl330_platdata s5p6450_pdma_pdata = { struct dma_pl330_peri s5p6450_pdma_peri[32] = {
.peri = { {
[0] = DMACH_UART0_RX, .peri_id = (u8)DMACH_UART0_RX,
[1] = DMACH_UART0_TX, .rqtype = DEVTOMEM,
[2] = DMACH_UART1_RX, }, {
[3] = DMACH_UART1_TX, .peri_id = (u8)DMACH_UART0_TX,
[4] = DMACH_UART2_RX, .rqtype = MEMTODEV,
[5] = DMACH_UART2_TX, }, {
[6] = DMACH_UART3_RX, .peri_id = (u8)DMACH_UART1_RX,
[7] = DMACH_UART3_TX, .rqtype = DEVTOMEM,
[8] = DMACH_UART4_RX, }, {
[9] = DMACH_UART4_TX, .peri_id = (u8)DMACH_UART1_TX,
[10] = DMACH_PCM0_TX, .rqtype = MEMTODEV,
[11] = DMACH_PCM0_RX, }, {
[12] = DMACH_I2S0_TX, .peri_id = (u8)DMACH_UART2_RX,
[13] = DMACH_I2S0_RX, .rqtype = DEVTOMEM,
[14] = DMACH_SPI0_TX, }, {
[15] = DMACH_SPI0_RX, .peri_id = (u8)DMACH_UART2_TX,
[16] = DMACH_PCM1_TX, .rqtype = MEMTODEV,
[17] = DMACH_PCM1_RX, }, {
[18] = DMACH_PCM2_TX, .peri_id = (u8)DMACH_UART3_RX,
[19] = DMACH_PCM2_RX, .rqtype = DEVTOMEM,
[20] = DMACH_SPI1_TX, }, {
[21] = DMACH_SPI1_RX, .peri_id = (u8)DMACH_UART3_TX,
[22] = DMACH_USI_TX, .rqtype = MEMTODEV,
[23] = DMACH_USI_RX, }, {
[24] = DMACH_MAX, .peri_id = (u8)DMACH_UART4_RX,
[25] = DMACH_I2S1_TX, .rqtype = DEVTOMEM,
[26] = DMACH_I2S1_RX, }, {
[27] = DMACH_I2S2_TX, .peri_id = (u8)DMACH_UART4_TX,
[28] = DMACH_I2S2_RX, .rqtype = MEMTODEV,
[29] = DMACH_PWM, }, {
[30] = DMACH_UART5_RX, .peri_id = (u8)DMACH_PCM0_TX,
[31] = DMACH_UART5_TX, .rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_PCM0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_PCM1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_PCM2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_USI_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_USI_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_I2S1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PWM,
}, {
.peri_id = (u8)DMACH_UART5_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART5_TX,
.rqtype = MEMTODEV,
}, },
}; };
static struct platform_device s5p64x0_device_pdma = { struct dma_pl330_platdata s5p6450_pdma_pdata = {
.name = "s3c-pl330", .nr_valid_peri = ARRAY_SIZE(s5p6450_pdma_peri),
.id = -1, .peri = s5p6450_pdma_peri,
.num_resources = ARRAY_SIZE(s5p64x0_pdma_resource), };
.resource = s5p64x0_pdma_resource,
.dev = { struct amba_device s5p64x0_device_pdma = {
.dev = {
.init_name = "dma-pl330",
.dma_mask = &dma_dmamask, .dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32), .coherent_dma_mask = DMA_BIT_MASK(32),
}, },
.res = {
.start = S5P64X0_PA_PDMA,
.end = S5P64X0_PA_PDMA + SZ_4K,
.flags = IORESOURCE_MEM,
},
.irq = {IRQ_DMA0, NO_IRQ},
.periphid = 0x00041330,
}; };
static int __init s5p64x0_dma_init(void) static int __init s5p64x0_dma_init(void)
...@@ -139,7 +232,7 @@ static int __init s5p64x0_dma_init(void) ...@@ -139,7 +232,7 @@ static int __init s5p64x0_dma_init(void)
else else
s5p64x0_device_pdma.dev.platform_data = &s5p6440_pdma_pdata; s5p64x0_device_pdma.dev.platform_data = &s5p6440_pdma_pdata;
platform_device_register(&s5p64x0_device_pdma); amba_device_register(&s5p64x0_device_pdma, &iomem_resource);
return 0; return 0;
} }
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#ifndef __MACH_DMA_H #ifndef __MACH_DMA_H
#define __MACH_DMA_H #define __MACH_DMA_H
/* This platform uses the common S3C DMA API driver for PL330 */ /* This platform uses the common common DMA API driver for PL330 */
#include <plat/s3c-dma-pl330.h> #include <plat/dma-pl330.h>
#endif /* __MACH_DMA_H */ #endif /* __MACH_DMA_H */
...@@ -10,7 +10,7 @@ if ARCH_S5PC100 ...@@ -10,7 +10,7 @@ if ARCH_S5PC100
config CPU_S5PC100 config CPU_S5PC100
bool bool
select S5P_EXT_INT select S5P_EXT_INT
select S3C_PL330_DMA select SAMSUNG_DMADEV
help help
Enable S5PC100 CPU support Enable S5PC100 CPU support
......
...@@ -33,6 +33,11 @@ static struct clk s5p_clk_otgphy = { ...@@ -33,6 +33,11 @@ static struct clk s5p_clk_otgphy = {
.name = "otg_phy", .name = "otg_phy",
}; };
static struct clk dummy_apb_pclk = {
.name = "apb_pclk",
.id = -1,
};
static struct clk *clk_src_mout_href_list[] = { static struct clk *clk_src_mout_href_list[] = {
[0] = &s5p_clk_27m, [0] = &s5p_clk_27m,
[1] = &clk_fin_hpll, [1] = &clk_fin_hpll,
...@@ -454,13 +459,13 @@ static struct clk init_clocks_off[] = { ...@@ -454,13 +459,13 @@ static struct clk init_clocks_off[] = {
.enable = s5pc100_d1_0_ctrl, .enable = s5pc100_d1_0_ctrl,
.ctrlbit = (1 << 2), .ctrlbit = (1 << 2),
}, { }, {
.name = "pdma", .name = "dma",
.devname = "s3c-pl330.1", .devname = "s3c-pl330.1",
.parent = &clk_div_d1_bus.clk, .parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_0_ctrl, .enable = s5pc100_d1_0_ctrl,
.ctrlbit = (1 << 1), .ctrlbit = (1 << 1),
}, { }, {
.name = "pdma", .name = "dma",
.devname = "s3c-pl330.0", .devname = "s3c-pl330.0",
.parent = &clk_div_d1_bus.clk, .parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_0_ctrl, .enable = s5pc100_d1_0_ctrl,
...@@ -1276,5 +1281,7 @@ void __init s5pc100_register_clocks(void) ...@@ -1276,5 +1281,7 @@ void __init s5pc100_register_clocks(void)
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c24xx_register_clock(&dummy_apb_pclk);
s3c_pwmclk_init(); s3c_pwmclk_init();
} }
/* /* linux/arch/arm/mach-s5pc100/dma.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Copyright (C) 2010 Samsung Electronics Co. Ltd. * Copyright (C) 2010 Samsung Electronics Co. Ltd.
* Jaswinder Singh <jassi.brar@samsung.com> * Jaswinder Singh <jassi.brar@samsung.com>
* *
...@@ -17,150 +21,245 @@ ...@@ -17,150 +21,245 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#include <linux/platform_device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
#include <asm/irq.h>
#include <plat/devs.h> #include <plat/devs.h>
#include <plat/irqs.h>
#include <mach/map.h> #include <mach/map.h>
#include <mach/irqs.h> #include <mach/irqs.h>
#include <mach/dma.h>
#include <plat/s3c-pl330-pdata.h>
static u64 dma_dmamask = DMA_BIT_MASK(32); static u64 dma_dmamask = DMA_BIT_MASK(32);
static struct resource s5pc100_pdma0_resource[] = { struct dma_pl330_peri pdma0_peri[30] = {
[0] = { {
.start = S5PC100_PA_PDMA0, .peri_id = (u8)DMACH_UART0_RX,
.end = S5PC100_PA_PDMA0 + SZ_4K, .rqtype = DEVTOMEM,
.flags = IORESOURCE_MEM, }, {
}, .peri_id = (u8)DMACH_UART0_TX,
[1] = { .rqtype = MEMTODEV,
.start = IRQ_PDMA0, }, {
.end = IRQ_PDMA0, .peri_id = (u8)DMACH_UART1_RX,
.flags = IORESOURCE_IRQ, .rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART3_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART3_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = DMACH_IRDA,
}, {
.peri_id = (u8)DMACH_I2S0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0S_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_AC97_MICIN,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_AC97_PCMIN,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_AC97_PCMOUT,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_EXTERNAL,
}, {
.peri_id = (u8)DMACH_PWM,
}, {
.peri_id = (u8)DMACH_SPDIF,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_HSI_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_HSI_TX,
.rqtype = MEMTODEV,
}, },
}; };
static struct s3c_pl330_platdata s5pc100_pdma0_pdata = { struct dma_pl330_platdata s5pc100_pdma0_pdata = {
.peri = { .nr_valid_peri = ARRAY_SIZE(pdma0_peri),
[0] = DMACH_UART0_RX, .peri = pdma0_peri,
[1] = DMACH_UART0_TX,
[2] = DMACH_UART1_RX,
[3] = DMACH_UART1_TX,
[4] = DMACH_UART2_RX,
[5] = DMACH_UART2_TX,
[6] = DMACH_UART3_RX,
[7] = DMACH_UART3_TX,
[8] = DMACH_IRDA,
[9] = DMACH_I2S0_RX,
[10] = DMACH_I2S0_TX,
[11] = DMACH_I2S0S_TX,
[12] = DMACH_I2S1_RX,
[13] = DMACH_I2S1_TX,
[14] = DMACH_I2S2_RX,
[15] = DMACH_I2S2_TX,
[16] = DMACH_SPI0_RX,
[17] = DMACH_SPI0_TX,
[18] = DMACH_SPI1_RX,
[19] = DMACH_SPI1_TX,
[20] = DMACH_SPI2_RX,
[21] = DMACH_SPI2_TX,
[22] = DMACH_AC97_MICIN,
[23] = DMACH_AC97_PCMIN,
[24] = DMACH_AC97_PCMOUT,
[25] = DMACH_EXTERNAL,
[26] = DMACH_PWM,
[27] = DMACH_SPDIF,
[28] = DMACH_HSI_RX,
[29] = DMACH_HSI_TX,
[30] = DMACH_MAX,
[31] = DMACH_MAX,
},
}; };
static struct platform_device s5pc100_device_pdma0 = { struct amba_device s5pc100_device_pdma0 = {
.name = "s3c-pl330", .dev = {
.id = 0, .init_name = "dma-pl330.0",
.num_resources = ARRAY_SIZE(s5pc100_pdma0_resource),
.resource = s5pc100_pdma0_resource,
.dev = {
.dma_mask = &dma_dmamask, .dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &s5pc100_pdma0_pdata, .platform_data = &s5pc100_pdma0_pdata,
}, },
}; .res = {
.start = S5PC100_PA_PDMA0,
static struct resource s5pc100_pdma1_resource[] = { .end = S5PC100_PA_PDMA0 + SZ_4K,
[0] = {
.start = S5PC100_PA_PDMA1,
.end = S5PC100_PA_PDMA1 + SZ_4K,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { .irq = {IRQ_PDMA0, NO_IRQ},
.start = IRQ_PDMA1, .periphid = 0x00041330,
.end = IRQ_PDMA1,
.flags = IORESOURCE_IRQ,
},
}; };
static struct s3c_pl330_platdata s5pc100_pdma1_pdata = { struct dma_pl330_peri pdma1_peri[30] = {
.peri = { {
[0] = DMACH_UART0_RX, .peri_id = (u8)DMACH_UART0_RX,
[1] = DMACH_UART0_TX, .rqtype = DEVTOMEM,
[2] = DMACH_UART1_RX, }, {
[3] = DMACH_UART1_TX, .peri_id = (u8)DMACH_UART0_TX,
[4] = DMACH_UART2_RX, .rqtype = MEMTODEV,
[5] = DMACH_UART2_TX, }, {
[6] = DMACH_UART3_RX, .peri_id = (u8)DMACH_UART1_RX,
[7] = DMACH_UART3_TX, .rqtype = DEVTOMEM,
[8] = DMACH_IRDA, }, {
[9] = DMACH_I2S0_RX, .peri_id = (u8)DMACH_UART1_TX,
[10] = DMACH_I2S0_TX, .rqtype = MEMTODEV,
[11] = DMACH_I2S0S_TX, }, {
[12] = DMACH_I2S1_RX, .peri_id = (u8)DMACH_UART2_RX,
[13] = DMACH_I2S1_TX, .rqtype = DEVTOMEM,
[14] = DMACH_I2S2_RX, }, {
[15] = DMACH_I2S2_TX, .peri_id = (u8)DMACH_UART2_TX,
[16] = DMACH_SPI0_RX, .rqtype = MEMTODEV,
[17] = DMACH_SPI0_TX, }, {
[18] = DMACH_SPI1_RX, .peri_id = (u8)DMACH_UART3_RX,
[19] = DMACH_SPI1_TX, .rqtype = DEVTOMEM,
[20] = DMACH_SPI2_RX, }, {
[21] = DMACH_SPI2_TX, .peri_id = (u8)DMACH_UART3_TX,
[22] = DMACH_PCM0_RX, .rqtype = MEMTODEV,
[23] = DMACH_PCM0_TX, }, {
[24] = DMACH_PCM1_RX, .peri_id = DMACH_IRDA,
[25] = DMACH_PCM1_TX, }, {
[26] = DMACH_MSM_REQ0, .peri_id = (u8)DMACH_I2S0_RX,
[27] = DMACH_MSM_REQ1, .rqtype = DEVTOMEM,
[28] = DMACH_MSM_REQ2, }, {
[29] = DMACH_MSM_REQ3, .peri_id = (u8)DMACH_I2S0_TX,
[30] = DMACH_MAX, .rqtype = MEMTODEV,
[31] = DMACH_MAX, }, {
.peri_id = (u8)DMACH_I2S0S_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_PCM0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_PCM1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_MSM_REQ0,
}, {
.peri_id = (u8)DMACH_MSM_REQ1,
}, {
.peri_id = (u8)DMACH_MSM_REQ2,
}, {
.peri_id = (u8)DMACH_MSM_REQ3,
}, },
}; };
static struct platform_device s5pc100_device_pdma1 = { struct dma_pl330_platdata s5pc100_pdma1_pdata = {
.name = "s3c-pl330", .nr_valid_peri = ARRAY_SIZE(pdma1_peri),
.id = 1, .peri = pdma1_peri,
.num_resources = ARRAY_SIZE(s5pc100_pdma1_resource), };
.resource = s5pc100_pdma1_resource,
.dev = { struct amba_device s5pc100_device_pdma1 = {
.dev = {
.init_name = "dma-pl330.1",
.dma_mask = &dma_dmamask, .dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &s5pc100_pdma1_pdata, .platform_data = &s5pc100_pdma1_pdata,
}, },
}; .res = {
.start = S5PC100_PA_PDMA1,
static struct platform_device *s5pc100_dmacs[] __initdata = { .end = S5PC100_PA_PDMA1 + SZ_4K,
&s5pc100_device_pdma0, .flags = IORESOURCE_MEM,
&s5pc100_device_pdma1, },
.irq = {IRQ_PDMA1, NO_IRQ},
.periphid = 0x00041330,
}; };
static int __init s5pc100_dma_init(void) static int __init s5pc100_dma_init(void)
{ {
platform_add_devices(s5pc100_dmacs, ARRAY_SIZE(s5pc100_dmacs)); amba_device_register(&s5pc100_device_pdma0, &iomem_resource);
return 0; return 0;
} }
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#ifndef __MACH_DMA_H #ifndef __MACH_DMA_H
#define __MACH_DMA_H #define __MACH_DMA_H
/* This platform uses the common S3C DMA API driver for PL330 */ /* This platform uses the common DMA API driver for PL330 */
#include <plat/s3c-dma-pl330.h> #include <plat/dma-pl330.h>
#endif /* __MACH_DMA_H */ #endif /* __MACH_DMA_H */
...@@ -11,7 +11,7 @@ if ARCH_S5PV210 ...@@ -11,7 +11,7 @@ if ARCH_S5PV210
config CPU_S5PV210 config CPU_S5PV210
bool bool
select S3C_PL330_DMA select SAMSUNG_DMADEV
select S5P_EXT_INT select S5P_EXT_INT
select S5P_HRT select S5P_HRT
help help
......
...@@ -203,6 +203,11 @@ static struct clk clk_pcmcdclk2 = { ...@@ -203,6 +203,11 @@ static struct clk clk_pcmcdclk2 = {
.name = "pcmcdclk", .name = "pcmcdclk",
}; };
static struct clk dummy_apb_pclk = {
.name = "apb_pclk",
.id = -1,
};
static struct clk *clkset_vpllsrc_list[] = { static struct clk *clkset_vpllsrc_list[] = {
[0] = &clk_fin_vpll, [0] = &clk_fin_vpll,
[1] = &clk_sclk_hdmi27m, [1] = &clk_sclk_hdmi27m,
...@@ -289,13 +294,13 @@ static struct clk_ops clk_fout_apll_ops = { ...@@ -289,13 +294,13 @@ static struct clk_ops clk_fout_apll_ops = {
static struct clk init_clocks_off[] = { static struct clk init_clocks_off[] = {
{ {
.name = "pdma", .name = "dma",
.devname = "s3c-pl330.0", .devname = "s3c-pl330.0",
.parent = &clk_hclk_psys.clk, .parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip0_ctrl, .enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 3), .ctrlbit = (1 << 3),
}, { }, {
.name = "pdma", .name = "dma",
.devname = "s3c-pl330.1", .devname = "s3c-pl330.1",
.parent = &clk_hclk_psys.clk, .parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip0_ctrl, .enable = s5pv210_clk_ip0_ctrl,
...@@ -1159,5 +1164,6 @@ void __init s5pv210_register_clocks(void) ...@@ -1159,5 +1164,6 @@ void __init s5pv210_register_clocks(void)
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c24xx_register_clock(&dummy_apb_pclk);
s3c_pwmclk_init(); s3c_pwmclk_init();
} }
/* /* linux/arch/arm/mach-s5pv210/dma.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Copyright (C) 2010 Samsung Electronics Co. Ltd. * Copyright (C) 2010 Samsung Electronics Co. Ltd.
* Jaswinder Singh <jassi.brar@samsung.com> * Jaswinder Singh <jassi.brar@samsung.com>
* *
...@@ -17,151 +21,239 @@ ...@@ -17,151 +21,239 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#include <linux/platform_device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
#include <asm/irq.h>
#include <plat/devs.h> #include <plat/devs.h>
#include <plat/irqs.h> #include <plat/irqs.h>
#include <mach/map.h> #include <mach/map.h>
#include <mach/irqs.h> #include <mach/irqs.h>
#include <mach/dma.h>
#include <plat/s3c-pl330-pdata.h>
static u64 dma_dmamask = DMA_BIT_MASK(32); static u64 dma_dmamask = DMA_BIT_MASK(32);
static struct resource s5pv210_pdma0_resource[] = { struct dma_pl330_peri pdma0_peri[28] = {
[0] = { {
.start = S5PV210_PA_PDMA0, .peri_id = (u8)DMACH_UART0_RX,
.end = S5PV210_PA_PDMA0 + SZ_4K, .rqtype = DEVTOMEM,
.flags = IORESOURCE_MEM, }, {
}, .peri_id = (u8)DMACH_UART0_TX,
[1] = { .rqtype = MEMTODEV,
.start = IRQ_PDMA0, }, {
.end = IRQ_PDMA0, .peri_id = (u8)DMACH_UART1_RX,
.flags = IORESOURCE_IRQ, .rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_UART3_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_UART3_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = DMACH_MAX,
}, {
.peri_id = (u8)DMACH_I2S0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S0S_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_SPI0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_AC97_MICIN,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_AC97_PCMIN,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_AC97_PCMOUT,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_PWM,
}, {
.peri_id = (u8)DMACH_SPDIF,
.rqtype = MEMTODEV,
}, },
}; };
static struct s3c_pl330_platdata s5pv210_pdma0_pdata = { struct dma_pl330_platdata s5pv210_pdma0_pdata = {
.peri = { .nr_valid_peri = ARRAY_SIZE(pdma0_peri),
[0] = DMACH_UART0_RX, .peri = pdma0_peri,
[1] = DMACH_UART0_TX,
[2] = DMACH_UART1_RX,
[3] = DMACH_UART1_TX,
[4] = DMACH_UART2_RX,
[5] = DMACH_UART2_TX,
[6] = DMACH_UART3_RX,
[7] = DMACH_UART3_TX,
[8] = DMACH_MAX,
[9] = DMACH_I2S0_RX,
[10] = DMACH_I2S0_TX,
[11] = DMACH_I2S0S_TX,
[12] = DMACH_I2S1_RX,
[13] = DMACH_I2S1_TX,
[14] = DMACH_MAX,
[15] = DMACH_MAX,
[16] = DMACH_SPI0_RX,
[17] = DMACH_SPI0_TX,
[18] = DMACH_SPI1_RX,
[19] = DMACH_SPI1_TX,
[20] = DMACH_MAX,
[21] = DMACH_MAX,
[22] = DMACH_AC97_MICIN,
[23] = DMACH_AC97_PCMIN,
[24] = DMACH_AC97_PCMOUT,
[25] = DMACH_MAX,
[26] = DMACH_PWM,
[27] = DMACH_SPDIF,
[28] = DMACH_MAX,
[29] = DMACH_MAX,
[30] = DMACH_MAX,
[31] = DMACH_MAX,
},
}; };
static struct platform_device s5pv210_device_pdma0 = { struct amba_device s5pv210_device_pdma0 = {
.name = "s3c-pl330", .dev = {
.id = 0, .init_name = "dma-pl330.0",
.num_resources = ARRAY_SIZE(s5pv210_pdma0_resource),
.resource = s5pv210_pdma0_resource,
.dev = {
.dma_mask = &dma_dmamask, .dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &s5pv210_pdma0_pdata, .platform_data = &s5pv210_pdma0_pdata,
}, },
}; .res = {
.start = S5PV210_PA_PDMA0,
static struct resource s5pv210_pdma1_resource[] = { .end = S5PV210_PA_PDMA0 + SZ_4K,
[0] = {
.start = S5PV210_PA_PDMA1,
.end = S5PV210_PA_PDMA1 + SZ_4K,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { .irq = {IRQ_PDMA0, NO_IRQ},
.start = IRQ_PDMA1, .periphid = 0x00041330,
.end = IRQ_PDMA1,
.flags = IORESOURCE_IRQ,
},
}; };
static struct s3c_pl330_platdata s5pv210_pdma1_pdata = { struct dma_pl330_peri pdma1_peri[32] = {
.peri = { {
[0] = DMACH_UART0_RX, .peri_id = (u8)DMACH_UART0_RX,
[1] = DMACH_UART0_TX, .rqtype = DEVTOMEM,
[2] = DMACH_UART1_RX, }, {
[3] = DMACH_UART1_TX, .peri_id = (u8)DMACH_UART0_TX,
[4] = DMACH_UART2_RX, .rqtype = MEMTODEV,
[5] = DMACH_UART2_TX, }, {
[6] = DMACH_UART3_RX, .peri_id = (u8)DMACH_UART1_RX,
[7] = DMACH_UART3_TX, .rqtype = DEVTOMEM,
[8] = DMACH_MAX, }, {
[9] = DMACH_I2S0_RX, .peri_id = (u8)DMACH_UART1_TX,
[10] = DMACH_I2S0_TX, .rqtype = MEMTODEV,
[11] = DMACH_I2S0S_TX, }, {
[12] = DMACH_I2S1_RX, .peri_id = (u8)DMACH_UART2_RX,
[13] = DMACH_I2S1_TX, .rqtype = DEVTOMEM,
[14] = DMACH_I2S2_RX, }, {
[15] = DMACH_I2S2_TX, .peri_id = (u8)DMACH_UART2_TX,
[16] = DMACH_SPI0_RX, .rqtype = MEMTODEV,
[17] = DMACH_SPI0_TX, }, {
[18] = DMACH_SPI1_RX, .peri_id = (u8)DMACH_UART3_RX,
[19] = DMACH_SPI1_TX, .rqtype = DEVTOMEM,
[20] = DMACH_MAX, }, {
[21] = DMACH_MAX, .peri_id = (u8)DMACH_UART3_TX,
[22] = DMACH_PCM0_RX, .rqtype = MEMTODEV,
[23] = DMACH_PCM0_TX, }, {
[24] = DMACH_PCM1_RX, .peri_id = DMACH_MAX,
[25] = DMACH_PCM1_TX, }, {
[26] = DMACH_MSM_REQ0, .peri_id = (u8)DMACH_I2S0_RX,
[27] = DMACH_MSM_REQ1, .rqtype = DEVTOMEM,
[28] = DMACH_MSM_REQ2, }, {
[29] = DMACH_MSM_REQ3, .peri_id = (u8)DMACH_I2S0_TX,
[30] = DMACH_PCM2_RX, .rqtype = MEMTODEV,
[31] = DMACH_PCM2_TX, }, {
.peri_id = (u8)DMACH_I2S0S_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_I2S2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_I2S2_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_SPI1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_SPI1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_MAX,
}, {
.peri_id = (u8)DMACH_PCM0_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM0_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_PCM1_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM1_TX,
.rqtype = MEMTODEV,
}, {
.peri_id = (u8)DMACH_MSM_REQ0,
}, {
.peri_id = (u8)DMACH_MSM_REQ1,
}, {
.peri_id = (u8)DMACH_MSM_REQ2,
}, {
.peri_id = (u8)DMACH_MSM_REQ3,
}, {
.peri_id = (u8)DMACH_PCM2_RX,
.rqtype = DEVTOMEM,
}, {
.peri_id = (u8)DMACH_PCM2_TX,
.rqtype = MEMTODEV,
}, },
}; };
static struct platform_device s5pv210_device_pdma1 = { struct dma_pl330_platdata s5pv210_pdma1_pdata = {
.name = "s3c-pl330", .nr_valid_peri = ARRAY_SIZE(pdma1_peri),
.id = 1, .peri = pdma1_peri,
.num_resources = ARRAY_SIZE(s5pv210_pdma1_resource), };
.resource = s5pv210_pdma1_resource,
.dev = { struct amba_device s5pv210_device_pdma1 = {
.dev = {
.init_name = "dma-pl330.1",
.dma_mask = &dma_dmamask, .dma_mask = &dma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32), .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &s5pv210_pdma1_pdata, .platform_data = &s5pv210_pdma1_pdata,
}, },
}; .res = {
.start = S5PV210_PA_PDMA1,
static struct platform_device *s5pv210_dmacs[] __initdata = { .end = S5PV210_PA_PDMA1 + SZ_4K,
&s5pv210_device_pdma0, .flags = IORESOURCE_MEM,
&s5pv210_device_pdma1, },
.irq = {IRQ_PDMA1, NO_IRQ},
.periphid = 0x00041330,
}; };
static int __init s5pv210_dma_init(void) static int __init s5pv210_dma_init(void)
{ {
platform_add_devices(s5pv210_dmacs, ARRAY_SIZE(s5pv210_dmacs)); amba_device_register(&s5pv210_device_pdma0, &iomem_resource);
return 0; return 0;
} }
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#ifndef __MACH_DMA_H #ifndef __MACH_DMA_H
#define __MACH_DMA_H #define __MACH_DMA_H
/* This platform uses the common S3C DMA API driver for PL330 */ /* This platform uses the common DMA API driver for PL330 */
#include <plat/s3c-dma-pl330.h> #include <plat/dma-pl330.h>
#endif /* __MACH_DMA_H */ #endif /* __MACH_DMA_H */
...@@ -1094,14 +1094,14 @@ EXPORT_SYMBOL(s3c2410_dma_config); ...@@ -1094,14 +1094,14 @@ EXPORT_SYMBOL(s3c2410_dma_config);
* *
* configure the dma source/destination hardware type and address * configure the dma source/destination hardware type and address
* *
* source: S3C2410_DMASRC_HW: source is hardware * source: DMA_FROM_DEVICE: source is hardware
* S3C2410_DMASRC_MEM: source is memory * DMA_TO_DEVICE: source is memory
* *
* devaddr: physical address of the source * devaddr: physical address of the source
*/ */
int s3c2410_dma_devconfig(enum dma_ch channel, int s3c2410_dma_devconfig(enum dma_ch channel,
enum s3c2410_dmasrc source, enum dma_data_direction source,
unsigned long devaddr) unsigned long devaddr)
{ {
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
...@@ -1131,7 +1131,7 @@ int s3c2410_dma_devconfig(enum dma_ch channel, ...@@ -1131,7 +1131,7 @@ int s3c2410_dma_devconfig(enum dma_ch channel,
hwcfg |= S3C2410_DISRCC_INC; hwcfg |= S3C2410_DISRCC_INC;
switch (source) { switch (source) {
case S3C2410_DMASRC_HW: case DMA_FROM_DEVICE:
/* source is hardware */ /* source is hardware */
pr_debug("%s: hw source, devaddr=%08lx, hwcfg=%d\n", pr_debug("%s: hw source, devaddr=%08lx, hwcfg=%d\n",
__func__, devaddr, hwcfg); __func__, devaddr, hwcfg);
...@@ -1142,7 +1142,7 @@ int s3c2410_dma_devconfig(enum dma_ch channel, ...@@ -1142,7 +1142,7 @@ int s3c2410_dma_devconfig(enum dma_ch channel,
chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DIDST); chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DIDST);
break; break;
case S3C2410_DMASRC_MEM: case DMA_TO_DEVICE:
/* source is memory */ /* source is memory */
pr_debug("%s: mem source, devaddr=%08lx, hwcfg=%d\n", pr_debug("%s: mem source, devaddr=%08lx, hwcfg=%d\n",
__func__, devaddr, hwcfg); __func__, devaddr, hwcfg);
......
...@@ -295,11 +295,14 @@ config S3C_DMA ...@@ -295,11 +295,14 @@ config S3C_DMA
help help
Internal configuration for S3C DMA core Internal configuration for S3C DMA core
config S3C_PL330_DMA config SAMSUNG_DMADEV
bool bool
select PL330 select DMADEVICES
select PL330_DMA if (CPU_EXYNOS4210 || CPU_S5PV210 || CPU_S5PC100 || \
CPU_S5P6450 || CPU_S5P6440)
select ARM_AMBA
help help
S3C DMA API Driver for PL330 DMAC. Use DMA device engine for PL330 DMAC.
comment "Power management" comment "Power management"
......
...@@ -62,9 +62,9 @@ obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT) += dev-backlight.o ...@@ -62,9 +62,9 @@ obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT) += dev-backlight.o
# DMA support # DMA support
obj-$(CONFIG_S3C_DMA) += dma.o obj-$(CONFIG_S3C_DMA) += dma.o s3c-dma-ops.o
obj-$(CONFIG_S3C_PL330_DMA) += s3c-pl330.o obj-$(CONFIG_SAMSUNG_DMADEV) += dma-ops.o
# PM support # PM support
......
/* linux/arch/arm/plat-samsung/dma-ops.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Samsung DMA Operations
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/amba/pl330.h>
#include <linux/scatterlist.h>
#include <mach/dma.h>
static inline bool pl330_filter(struct dma_chan *chan, void *param)
{
struct dma_pl330_peri *peri = chan->private;
return peri->peri_id == (unsigned)param;
}
static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
struct samsung_dma_info *info)
{
struct dma_chan *chan;
dma_cap_mask_t mask;
struct dma_slave_config slave_config;
dma_cap_zero(mask);
dma_cap_set(info->cap, mask);
chan = dma_request_channel(mask, pl330_filter, (void *)dma_ch);
if (info->direction == DMA_FROM_DEVICE) {
memset(&slave_config, 0, sizeof(struct dma_slave_config));
slave_config.direction = info->direction;
slave_config.src_addr = info->fifo;
slave_config.src_addr_width = info->width;
slave_config.src_maxburst = 1;
dmaengine_slave_config(chan, &slave_config);
} else if (info->direction == DMA_TO_DEVICE) {
memset(&slave_config, 0, sizeof(struct dma_slave_config));
slave_config.direction = info->direction;
slave_config.dst_addr = info->fifo;
slave_config.dst_addr_width = info->width;
slave_config.dst_maxburst = 1;
dmaengine_slave_config(chan, &slave_config);
}
return (unsigned)chan;
}
static int samsung_dmadev_release(unsigned ch,
struct s3c2410_dma_client *client)
{
dma_release_channel((struct dma_chan *)ch);
return 0;
}
static int samsung_dmadev_prepare(unsigned ch,
struct samsung_dma_prep_info *info)
{
struct scatterlist sg;
struct dma_chan *chan = (struct dma_chan *)ch;
struct dma_async_tx_descriptor *desc;
switch (info->cap) {
case DMA_SLAVE:
sg_init_table(&sg, 1);
sg_dma_len(&sg) = info->len;
sg_set_page(&sg, pfn_to_page(PFN_DOWN(info->buf)),
info->len, offset_in_page(info->buf));
sg_dma_address(&sg) = info->buf;
desc = chan->device->device_prep_slave_sg(chan,
&sg, 1, info->direction, DMA_PREP_INTERRUPT);
break;
case DMA_CYCLIC:
desc = chan->device->device_prep_dma_cyclic(chan,
info->buf, info->len, info->period, info->direction);
break;
default:
dev_err(&chan->dev->device, "unsupported format\n");
return -EFAULT;
}
if (!desc) {
dev_err(&chan->dev->device, "cannot prepare cyclic dma\n");
return -EFAULT;
}
desc->callback = info->fp;
desc->callback_param = info->fp_param;
dmaengine_submit((struct dma_async_tx_descriptor *)desc);
return 0;
}
static inline int samsung_dmadev_trigger(unsigned ch)
{
dma_async_issue_pending((struct dma_chan *)ch);
return 0;
}
static inline int samsung_dmadev_flush(unsigned ch)
{
return dmaengine_terminate_all((struct dma_chan *)ch);
}
struct samsung_dma_ops dmadev_ops = {
.request = samsung_dmadev_request,
.release = samsung_dmadev_release,
.prepare = samsung_dmadev_prepare,
.trigger = samsung_dmadev_trigger,
.started = NULL,
.flush = samsung_dmadev_flush,
.stop = samsung_dmadev_flush,
};
void *samsung_dmadev_get_ops(void)
{
return &dmadev_ops;
}
EXPORT_SYMBOL(samsung_dmadev_get_ops);
/* arch/arm/plat-samsung/include/plat/dma-ops.h
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Samsung DMA support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __SAMSUNG_DMA_OPS_H_
#define __SAMSUNG_DMA_OPS_H_ __FILE__
#include <linux/dmaengine.h>
struct samsung_dma_prep_info {
enum dma_transaction_type cap;
enum dma_data_direction direction;
dma_addr_t buf;
unsigned long period;
unsigned long len;
void (*fp)(void *data);
void *fp_param;
};
struct samsung_dma_info {
enum dma_transaction_type cap;
enum dma_data_direction direction;
enum dma_slave_buswidth width;
dma_addr_t fifo;
struct s3c2410_dma_client *client;
};
struct samsung_dma_ops {
unsigned (*request)(enum dma_ch ch, struct samsung_dma_info *info);
int (*release)(unsigned ch, struct s3c2410_dma_client *client);
int (*prepare)(unsigned ch, struct samsung_dma_prep_info *info);
int (*trigger)(unsigned ch);
int (*started)(unsigned ch);
int (*flush)(unsigned ch);
int (*stop)(unsigned ch);
};
extern void *samsung_dmadev_get_ops(void);
extern void *s3c_dma_get_ops(void);
static inline void *__samsung_dma_get_ops(void)
{
if (samsung_dma_is_dmadev())
return samsung_dmadev_get_ops();
else
return s3c_dma_get_ops();
}
/*
* samsung_dma_get_ops
* get the set of samsung dma operations
*/
#define samsung_dma_get_ops() __samsung_dma_get_ops()
#endif /* __SAMSUNG_DMA_OPS_H_ */
...@@ -8,11 +8,8 @@ ...@@ -8,11 +8,8 @@
* (at your option) any later version. * (at your option) any later version.
*/ */
#ifndef __S3C_DMA_PL330_H_ #ifndef __DMA_PL330_H_
#define __S3C_DMA_PL330_H_ #define __DMA_PL330_H_ __FILE__
#define S3C2410_DMAF_AUTOSTART (1 << 0)
#define S3C2410_DMAF_CIRCULAR (1 << 1)
/* /*
* PL330 can assign any channel to communicate with * PL330 can assign any channel to communicate with
...@@ -20,7 +17,7 @@ ...@@ -20,7 +17,7 @@
* For the sake of consistency across client drivers, * For the sake of consistency across client drivers,
* We keep the channel names unchanged and only add * We keep the channel names unchanged and only add
* missing peripherals are added. * missing peripherals are added.
* Order is not important since S3C PL330 API driver * Order is not important since DMA PL330 API driver
* use these just as IDs. * use these just as IDs.
*/ */
enum dma_ch { enum dma_ch {
...@@ -88,11 +85,20 @@ enum dma_ch { ...@@ -88,11 +85,20 @@ enum dma_ch {
DMACH_MAX, DMACH_MAX,
}; };
static inline bool s3c_dma_has_circular(void) struct s3c2410_dma_client {
char *name;
};
static inline bool samsung_dma_has_circular(void)
{
return true;
}
static inline bool samsung_dma_is_dmadev(void)
{ {
return true; return true;
} }
#include <plat/dma.h> #include <plat/dma-ops.h>
#endif /* __S3C_DMA_PL330_H_ */ #endif /* __DMA_PL330_H_ */
...@@ -41,7 +41,7 @@ struct s3c24xx_dma_selection { ...@@ -41,7 +41,7 @@ struct s3c24xx_dma_selection {
void (*direction)(struct s3c2410_dma_chan *chan, void (*direction)(struct s3c2410_dma_chan *chan,
struct s3c24xx_dma_map *map, struct s3c24xx_dma_map *map,
enum s3c2410_dmasrc dir); enum dma_data_direction dir);
}; };
extern int s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel); extern int s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel);
......
...@@ -10,17 +10,14 @@ ...@@ -10,17 +10,14 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/dma-mapping.h>
enum s3c2410_dma_buffresult { enum s3c2410_dma_buffresult {
S3C2410_RES_OK, S3C2410_RES_OK,
S3C2410_RES_ERR, S3C2410_RES_ERR,
S3C2410_RES_ABORT S3C2410_RES_ABORT
}; };
enum s3c2410_dmasrc {
S3C2410_DMASRC_HW, /* source is memory */
S3C2410_DMASRC_MEM /* source is hardware */
};
/* enum s3c2410_chan_op /* enum s3c2410_chan_op
* *
* operation codes passed to the DMA code by the user, and also used * operation codes passed to the DMA code by the user, and also used
...@@ -112,7 +109,7 @@ extern int s3c2410_dma_config(enum dma_ch channel, int xferunit); ...@@ -112,7 +109,7 @@ extern int s3c2410_dma_config(enum dma_ch channel, int xferunit);
*/ */
extern int s3c2410_dma_devconfig(enum dma_ch channel, extern int s3c2410_dma_devconfig(enum dma_ch channel,
enum s3c2410_dmasrc source, unsigned long devaddr); enum dma_data_direction source, unsigned long devaddr);
/* s3c2410_dma_getposition /* s3c2410_dma_getposition
* *
...@@ -126,3 +123,4 @@ extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn); ...@@ -126,3 +123,4 @@ extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn);
extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn); extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn);
#include <plat/dma-ops.h>
/* linux/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h
*
* Copyright (C) 2010 Samsung Electronics Co. Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef __S3C_PL330_PDATA_H
#define __S3C_PL330_PDATA_H
#include <plat/s3c-dma-pl330.h>
/*
* Every PL330 DMAC has max 32 peripheral interfaces,
* of which some may be not be really used in your
* DMAC's configuration.
* Populate this array of 32 peri i/fs with relevant
* channel IDs for used peri i/f and DMACH_MAX for
* those unused.
*
* The platforms just need to provide this info
* to the S3C DMA API driver for PL330.
*/
struct s3c_pl330_platdata {
enum dma_ch peri[32];
};
#endif /* __S3C_PL330_PDATA_H */
/* linux/arch/arm/plat-samsung/s3c-dma-ops.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Samsung S3C-DMA Operations
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <mach/dma.h>
struct cb_data {
void (*fp) (void *);
void *fp_param;
unsigned ch;
struct list_head node;
};
static LIST_HEAD(dma_list);
static void s3c_dma_cb(struct s3c2410_dma_chan *channel, void *param,
int size, enum s3c2410_dma_buffresult res)
{
struct cb_data *data = param;
data->fp(data->fp_param);
}
static unsigned s3c_dma_request(enum dma_ch dma_ch,
struct samsung_dma_info *info)
{
struct cb_data *data;
if (s3c2410_dma_request(dma_ch, info->client, NULL) < 0) {
s3c2410_dma_free(dma_ch, info->client);
return 0;
}
data = kzalloc(sizeof(struct cb_data), GFP_KERNEL);
data->ch = dma_ch;
list_add_tail(&data->node, &dma_list);
s3c2410_dma_devconfig(dma_ch, info->direction, info->fifo);
if (info->cap == DMA_CYCLIC)
s3c2410_dma_setflags(dma_ch, S3C2410_DMAF_CIRCULAR);
s3c2410_dma_config(dma_ch, info->width);
return (unsigned)dma_ch;
}
static int s3c_dma_release(unsigned ch, struct s3c2410_dma_client *client)
{
struct cb_data *data;
list_for_each_entry(data, &dma_list, node)
if (data->ch == ch)
break;
list_del(&data->node);
s3c2410_dma_free(ch, client);
kfree(data);
return 0;
}
static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep_info *info)
{
struct cb_data *data;
int len = (info->cap == DMA_CYCLIC) ? info->period : info->len;
list_for_each_entry(data, &dma_list, node)
if (data->ch == ch)
break;
if (!data->fp) {
s3c2410_dma_set_buffdone_fn(ch, s3c_dma_cb);
data->fp = info->fp;
data->fp_param = info->fp_param;
}
s3c2410_dma_enqueue(ch, (void *)data, info->buf, len);
return 0;
}
static inline int s3c_dma_trigger(unsigned ch)
{
return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_START);
}
static inline int s3c_dma_started(unsigned ch)
{
return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STARTED);
}
static inline int s3c_dma_flush(unsigned ch)
{
return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_FLUSH);
}
static inline int s3c_dma_stop(unsigned ch)
{
return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STOP);
}
static struct samsung_dma_ops s3c_dma_ops = {
.request = s3c_dma_request,
.release = s3c_dma_release,
.prepare = s3c_dma_prepare,
.trigger = s3c_dma_trigger,
.started = s3c_dma_started,
.flush = s3c_dma_flush,
.stop = s3c_dma_stop,
};
void *s3c_dma_get_ops(void)
{
return &s3c_dma_ops;
}
EXPORT_SYMBOL(s3c_dma_get_ops);
此差异已折叠。
...@@ -193,7 +193,8 @@ config ARCH_HAS_ASYNC_TX_FIND_CHANNEL ...@@ -193,7 +193,8 @@ config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
config PL330_DMA config PL330_DMA
tristate "DMA API Driver for PL330" tristate "DMA API Driver for PL330"
select DMA_ENGINE select DMA_ENGINE
depends on PL330 depends on ARM_AMBA
select PL330
help help
Select if your platform has one or more PL330 DMACs. Select if your platform has one or more PL330 DMACs.
You need to provide platform specific settings via You need to provide platform specific settings via
......
此差异已折叠。
...@@ -107,10 +107,11 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) ...@@ -107,10 +107,11 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
{ {
struct at_desc *desc, *_desc; struct at_desc *desc, *_desc;
struct at_desc *ret = NULL; struct at_desc *ret = NULL;
unsigned long flags;
unsigned int i = 0; unsigned int i = 0;
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
i++; i++;
if (async_tx_test_ack(&desc->txd)) { if (async_tx_test_ack(&desc->txd)) {
...@@ -121,7 +122,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) ...@@ -121,7 +122,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
dev_dbg(chan2dev(&atchan->chan_common), dev_dbg(chan2dev(&atchan->chan_common),
"desc %p not ACKed\n", desc); "desc %p not ACKed\n", desc);
} }
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
dev_vdbg(chan2dev(&atchan->chan_common), dev_vdbg(chan2dev(&atchan->chan_common),
"scanned %u descriptors on freelist\n", i); "scanned %u descriptors on freelist\n", i);
...@@ -129,9 +130,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) ...@@ -129,9 +130,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
if (!ret) { if (!ret) {
ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
if (ret) { if (ret) {
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated++; atchan->descs_allocated++;
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} else { } else {
dev_err(chan2dev(&atchan->chan_common), dev_err(chan2dev(&atchan->chan_common),
"not enough descriptors available\n"); "not enough descriptors available\n");
...@@ -150,8 +151,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) ...@@ -150,8 +151,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
{ {
if (desc) { if (desc) {
struct at_desc *child; struct at_desc *child;
unsigned long flags;
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry(child, &desc->tx_list, desc_node) list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&atchan->chan_common), dev_vdbg(chan2dev(&atchan->chan_common),
"moving child desc %p to freelist\n", "moving child desc %p to freelist\n",
...@@ -160,7 +162,7 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) ...@@ -160,7 +162,7 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
dev_vdbg(chan2dev(&atchan->chan_common), dev_vdbg(chan2dev(&atchan->chan_common),
"moving desc %p to freelist\n", desc); "moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &atchan->free_list); list_add(&desc->desc_node, &atchan->free_list);
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} }
} }
...@@ -299,7 +301,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) ...@@ -299,7 +301,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* for cyclic transfers, /* for cyclic transfers,
* no need to replay callback function while stopping */ * no need to replay callback function while stopping */
if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) { if (!atc_chan_is_cyclic(atchan)) {
dma_async_tx_callback callback = txd->callback; dma_async_tx_callback callback = txd->callback;
void *param = txd->callback_param; void *param = txd->callback_param;
...@@ -471,16 +473,17 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan) ...@@ -471,16 +473,17 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
static void atc_tasklet(unsigned long data) static void atc_tasklet(unsigned long data)
{ {
struct at_dma_chan *atchan = (struct at_dma_chan *)data; struct at_dma_chan *atchan = (struct at_dma_chan *)data;
unsigned long flags;
spin_lock(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
atc_handle_error(atchan); atc_handle_error(atchan);
else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) else if (atc_chan_is_cyclic(atchan))
atc_handle_cyclic(atchan); atc_handle_cyclic(atchan);
else else
atc_advance_work(atchan); atc_advance_work(atchan);
spin_unlock(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} }
static irqreturn_t at_dma_interrupt(int irq, void *dev_id) static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
...@@ -539,8 +542,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -539,8 +542,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
struct at_desc *desc = txd_to_at_desc(tx); struct at_desc *desc = txd_to_at_desc(tx);
struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
dma_cookie_t cookie; dma_cookie_t cookie;
unsigned long flags;
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
cookie = atc_assign_cookie(atchan, desc); cookie = atc_assign_cookie(atchan, desc);
if (list_empty(&atchan->active_list)) { if (list_empty(&atchan->active_list)) {
...@@ -554,7 +558,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -554,7 +558,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
list_add_tail(&desc->desc_node, &atchan->queue); list_add_tail(&desc->desc_node, &atchan->queue);
} }
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
return cookie; return cookie;
} }
...@@ -927,28 +931,29 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, ...@@ -927,28 +931,29 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device); struct at_dma *atdma = to_at_dma(chan->device);
int chan_id = atchan->chan_common.chan_id; int chan_id = atchan->chan_common.chan_id;
unsigned long flags;
LIST_HEAD(list); LIST_HEAD(list);
dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
if (cmd == DMA_PAUSE) { if (cmd == DMA_PAUSE) {
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
set_bit(ATC_IS_PAUSED, &atchan->status); set_bit(ATC_IS_PAUSED, &atchan->status);
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_RESUME) { } else if (cmd == DMA_RESUME) {
if (!test_bit(ATC_IS_PAUSED, &atchan->status)) if (!atc_chan_is_paused(atchan))
return 0; return 0;
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
clear_bit(ATC_IS_PAUSED, &atchan->status); clear_bit(ATC_IS_PAUSED, &atchan->status);
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_TERMINATE_ALL) { } else if (cmd == DMA_TERMINATE_ALL) {
struct at_desc *desc, *_desc; struct at_desc *desc, *_desc;
/* /*
...@@ -957,7 +962,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, ...@@ -957,7 +962,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* channel. We still have to poll the channel enable bit due * channel. We still have to poll the channel enable bit due
* to AHB/HSB limitations. * to AHB/HSB limitations.
*/ */
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
/* disabling channel: must also remove suspend state */ /* disabling channel: must also remove suspend state */
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
...@@ -978,7 +983,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, ...@@ -978,7 +983,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
/* if channel dedicated to cyclic operations, free it */ /* if channel dedicated to cyclic operations, free it */
clear_bit(ATC_IS_CYCLIC, &atchan->status); clear_bit(ATC_IS_CYCLIC, &atchan->status);
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} else { } else {
return -ENXIO; return -ENXIO;
} }
...@@ -1004,9 +1009,10 @@ atc_tx_status(struct dma_chan *chan, ...@@ -1004,9 +1009,10 @@ atc_tx_status(struct dma_chan *chan,
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
dma_cookie_t last_used; dma_cookie_t last_used;
dma_cookie_t last_complete; dma_cookie_t last_complete;
unsigned long flags;
enum dma_status ret; enum dma_status ret;
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
last_complete = atchan->completed_cookie; last_complete = atchan->completed_cookie;
last_used = chan->cookie; last_used = chan->cookie;
...@@ -1021,7 +1027,7 @@ atc_tx_status(struct dma_chan *chan, ...@@ -1021,7 +1027,7 @@ atc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used); ret = dma_async_is_complete(cookie, last_complete, last_used);
} }
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
if (ret != DMA_SUCCESS) if (ret != DMA_SUCCESS)
dma_set_tx_state(txstate, last_complete, last_used, dma_set_tx_state(txstate, last_complete, last_used,
...@@ -1029,7 +1035,7 @@ atc_tx_status(struct dma_chan *chan, ...@@ -1029,7 +1035,7 @@ atc_tx_status(struct dma_chan *chan,
else else
dma_set_tx_state(txstate, last_complete, last_used, 0); dma_set_tx_state(txstate, last_complete, last_used, 0);
if (test_bit(ATC_IS_PAUSED, &atchan->status)) if (atc_chan_is_paused(atchan))
ret = DMA_PAUSED; ret = DMA_PAUSED;
dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
...@@ -1046,18 +1052,19 @@ atc_tx_status(struct dma_chan *chan, ...@@ -1046,18 +1052,19 @@ atc_tx_status(struct dma_chan *chan,
static void atc_issue_pending(struct dma_chan *chan) static void atc_issue_pending(struct dma_chan *chan)
{ {
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
unsigned long flags;
dev_vdbg(chan2dev(chan), "issue_pending\n"); dev_vdbg(chan2dev(chan), "issue_pending\n");
/* Not needed for cyclic transfers */ /* Not needed for cyclic transfers */
if (test_bit(ATC_IS_CYCLIC, &atchan->status)) if (atc_chan_is_cyclic(atchan))
return; return;
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
if (!atc_chan_is_enabled(atchan)) { if (!atc_chan_is_enabled(atchan)) {
atc_advance_work(atchan); atc_advance_work(atchan);
} }
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
} }
/** /**
...@@ -1073,6 +1080,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) ...@@ -1073,6 +1080,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
struct at_dma *atdma = to_at_dma(chan->device); struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc; struct at_desc *desc;
struct at_dma_slave *atslave; struct at_dma_slave *atslave;
unsigned long flags;
int i; int i;
u32 cfg; u32 cfg;
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
...@@ -1116,11 +1124,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) ...@@ -1116,11 +1124,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->desc_node, &tmp_list); list_add_tail(&desc->desc_node, &tmp_list);
} }
spin_lock_bh(&atchan->lock); spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated = i; atchan->descs_allocated = i;
list_splice(&tmp_list, &atchan->free_list); list_splice(&tmp_list, &atchan->free_list);
atchan->completed_cookie = chan->cookie = 1; atchan->completed_cookie = chan->cookie = 1;
spin_unlock_bh(&atchan->lock); spin_unlock_irqrestore(&atchan->lock, flags);
/* channel parameters */ /* channel parameters */
channel_writel(atchan, CFG, cfg); channel_writel(atchan, CFG, cfg);
...@@ -1260,12 +1268,11 @@ static int __init at_dma_probe(struct platform_device *pdev) ...@@ -1260,12 +1268,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* initialize channels related values */ /* initialize channels related values */
INIT_LIST_HEAD(&atdma->dma_common.channels); INIT_LIST_HEAD(&atdma->dma_common.channels);
for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) { for (i = 0; i < pdata->nr_channels; i++) {
struct at_dma_chan *atchan = &atdma->chan[i]; struct at_dma_chan *atchan = &atdma->chan[i];
atchan->chan_common.device = &atdma->dma_common; atchan->chan_common.device = &atdma->dma_common;
atchan->chan_common.cookie = atchan->completed_cookie = 1; atchan->chan_common.cookie = atchan->completed_cookie = 1;
atchan->chan_common.chan_id = i;
list_add_tail(&atchan->chan_common.device_node, list_add_tail(&atchan->chan_common.device_node,
&atdma->dma_common.channels); &atdma->dma_common.channels);
...@@ -1293,22 +1300,20 @@ static int __init at_dma_probe(struct platform_device *pdev) ...@@ -1293,22 +1300,20 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
/* controller can do slave DMA: can trigger cyclic transfers */
if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
atdma->dma_common.device_control = atc_control; atdma->dma_common.device_control = atc_control;
}
dma_writel(atdma, EN, AT_DMA_ENABLE); dma_writel(atdma, EN, AT_DMA_ENABLE);
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
atdma->dma_common.chancnt); pdata->nr_channels);
dma_async_device_register(&atdma->dma_common); dma_async_device_register(&atdma->dma_common);
...@@ -1377,27 +1382,112 @@ static void at_dma_shutdown(struct platform_device *pdev) ...@@ -1377,27 +1382,112 @@ static void at_dma_shutdown(struct platform_device *pdev)
clk_disable(atdma->clk); clk_disable(atdma->clk);
} }
static int at_dma_prepare(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev);
struct dma_chan *chan, *_chan;
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
device_node) {
struct at_dma_chan *atchan = to_at_dma_chan(chan);
/* wait for transaction completion (except in cyclic case) */
if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
return -EAGAIN;
}
return 0;
}
static void atc_suspend_cyclic(struct at_dma_chan *atchan)
{
struct dma_chan *chan = &atchan->chan_common;
/* Channel should be paused by user
* do it anyway even if it is not done already */
if (!atc_chan_is_paused(atchan)) {
dev_warn(chan2dev(chan),
"cyclic channel not paused, should be done by channel user\n");
atc_control(chan, DMA_PAUSE, 0);
}
/* now preserve additional data for cyclic operations */
/* next descriptor address in the cyclic list */
atchan->save_dscr = channel_readl(atchan, DSCR);
vdbg_dump_regs(atchan);
}
static int at_dma_suspend_noirq(struct device *dev) static int at_dma_suspend_noirq(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev); struct at_dma *atdma = platform_get_drvdata(pdev);
struct dma_chan *chan, *_chan;
at_dma_off(platform_get_drvdata(pdev)); /* preserve data */
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
device_node) {
struct at_dma_chan *atchan = to_at_dma_chan(chan);
if (atc_chan_is_cyclic(atchan))
atc_suspend_cyclic(atchan);
atchan->save_cfg = channel_readl(atchan, CFG);
}
atdma->save_imr = dma_readl(atdma, EBCIMR);
/* disable DMA controller */
at_dma_off(atdma);
clk_disable(atdma->clk); clk_disable(atdma->clk);
return 0; return 0;
} }
static void atc_resume_cyclic(struct at_dma_chan *atchan)
{
struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
/* restore channel status for cyclic descriptors list:
* next descriptor in the cyclic list at the time of suspend */
channel_writel(atchan, SADDR, 0);
channel_writel(atchan, DADDR, 0);
channel_writel(atchan, CTRLA, 0);
channel_writel(atchan, CTRLB, 0);
channel_writel(atchan, DSCR, atchan->save_dscr);
dma_writel(atdma, CHER, atchan->mask);
/* channel pause status should be removed by channel user
* We cannot take the initiative to do it here */
vdbg_dump_regs(atchan);
}
static int at_dma_resume_noirq(struct device *dev) static int at_dma_resume_noirq(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev); struct at_dma *atdma = platform_get_drvdata(pdev);
struct dma_chan *chan, *_chan;
/* bring back DMA controller */
clk_enable(atdma->clk); clk_enable(atdma->clk);
dma_writel(atdma, EN, AT_DMA_ENABLE); dma_writel(atdma, EN, AT_DMA_ENABLE);
/* clear any pending interrupt */
while (dma_readl(atdma, EBCISR))
cpu_relax();
/* restore saved data */
dma_writel(atdma, EBCIER, atdma->save_imr);
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
device_node) {
struct at_dma_chan *atchan = to_at_dma_chan(chan);
channel_writel(atchan, CFG, atchan->save_cfg);
if (atc_chan_is_cyclic(atchan))
atc_resume_cyclic(atchan);
}
return 0; return 0;
} }
static const struct dev_pm_ops at_dma_dev_pm_ops = { static const struct dev_pm_ops at_dma_dev_pm_ops = {
.prepare = at_dma_prepare,
.suspend_noirq = at_dma_suspend_noirq, .suspend_noirq = at_dma_suspend_noirq,
.resume_noirq = at_dma_resume_noirq, .resume_noirq = at_dma_resume_noirq,
}; };
......
...@@ -204,6 +204,9 @@ enum atc_status { ...@@ -204,6 +204,9 @@ enum atc_status {
* @status: transmit status information from irq/prep* functions * @status: transmit status information from irq/prep* functions
* to tasklet (use atomic operations) * to tasklet (use atomic operations)
* @tasklet: bottom half to finish transaction work * @tasklet: bottom half to finish transaction work
* @save_cfg: configuration register that is saved on suspend/resume cycle
* @save_dscr: for cyclic operations, preserve next descriptor address in
* the cyclic list on suspend/resume cycle
* @lock: serializes enqueue/dequeue operations to descriptors lists * @lock: serializes enqueue/dequeue operations to descriptors lists
* @completed_cookie: identifier for the most recently completed operation * @completed_cookie: identifier for the most recently completed operation
* @active_list: list of descriptors dmaengine is being running on * @active_list: list of descriptors dmaengine is being running on
...@@ -218,6 +221,8 @@ struct at_dma_chan { ...@@ -218,6 +221,8 @@ struct at_dma_chan {
u8 mask; u8 mask;
unsigned long status; unsigned long status;
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
u32 save_cfg;
u32 save_dscr;
spinlock_t lock; spinlock_t lock;
...@@ -248,6 +253,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan) ...@@ -248,6 +253,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
* @chan_common: common dmaengine dma_device object members * @chan_common: common dmaengine dma_device object members
* @ch_regs: memory mapped register base * @ch_regs: memory mapped register base
* @clk: dma controller clock * @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
* @all_chan_mask: all channels availlable in a mask * @all_chan_mask: all channels availlable in a mask
* @dma_desc_pool: base of DMA descriptor region (DMA address) * @dma_desc_pool: base of DMA descriptor region (DMA address)
* @chan: channels table to store at_dma_chan structures * @chan: channels table to store at_dma_chan structures
...@@ -256,6 +262,7 @@ struct at_dma { ...@@ -256,6 +262,7 @@ struct at_dma {
struct dma_device dma_common; struct dma_device dma_common;
void __iomem *regs; void __iomem *regs;
struct clk *clk; struct clk *clk;
u32 save_imr;
u8 all_chan_mask; u8 all_chan_mask;
...@@ -355,6 +362,23 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan) ...@@ -355,6 +362,23 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
return !!(dma_readl(atdma, CHSR) & atchan->mask); return !!(dma_readl(atdma, CHSR) & atchan->mask);
} }
/**
* atc_chan_is_paused - test channel pause/resume status
* @atchan: channel we want to test status
*/
static inline int atc_chan_is_paused(struct at_dma_chan *atchan)
{
return test_bit(ATC_IS_PAUSED, &atchan->status);
}
/**
* atc_chan_is_cyclic - test if given channel has cyclic property set
* @atchan: channel we want to test status
*/
static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan)
{
return test_bit(ATC_IS_CYCLIC, &atchan->status);
}
/** /**
* set_desc_eol - set end-of-link to descriptor so it will end transfer * set_desc_eol - set end-of-link to descriptor so it will end transfer
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/freezer.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -251,6 +252,7 @@ static int dmatest_func(void *data) ...@@ -251,6 +252,7 @@ static int dmatest_func(void *data)
int i; int i;
thread_name = current->comm; thread_name = current->comm;
set_freezable_with_signal();
ret = -ENOMEM; ret = -ENOMEM;
...@@ -305,7 +307,8 @@ static int dmatest_func(void *data) ...@@ -305,7 +307,8 @@ static int dmatest_func(void *data)
dma_addr_t dma_srcs[src_cnt]; dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt]; dma_addr_t dma_dsts[dst_cnt];
struct completion cmp; struct completion cmp;
unsigned long tmo = msecs_to_jiffies(timeout); unsigned long start, tmo, end = 0 /* compiler... */;
bool reload = true;
u8 align = 0; u8 align = 0;
total_tests++; total_tests++;
...@@ -404,7 +407,17 @@ static int dmatest_func(void *data) ...@@ -404,7 +407,17 @@ static int dmatest_func(void *data)
} }
dma_async_issue_pending(chan); dma_async_issue_pending(chan);
tmo = wait_for_completion_timeout(&cmp, tmo); do {
start = jiffies;
if (reload)
end = start + msecs_to_jiffies(timeout);
else if (end <= start)
end = start + 1;
tmo = wait_for_completion_interruptible_timeout(&cmp,
end - start);
reload = try_to_freeze();
} while (tmo == -ERESTARTSYS);
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
if (tmo == 0) { if (tmo == 0) {
...@@ -477,6 +490,8 @@ static int dmatest_func(void *data) ...@@ -477,6 +490,8 @@ static int dmatest_func(void *data)
pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
thread_name, total_tests, failed_tests, ret); thread_name, total_tests, failed_tests, ret);
/* terminate all transfers on specified channels */
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
if (iterations > 0) if (iterations > 0)
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
...@@ -499,6 +514,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) ...@@ -499,6 +514,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
list_del(&thread->node); list_del(&thread->node);
kfree(thread); kfree(thread);
} }
/* terminate all transfers on specified channels */
dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
kfree(dtc); kfree(dtc);
} }
......
...@@ -1407,12 +1407,11 @@ static int __init dw_probe(struct platform_device *pdev) ...@@ -1407,12 +1407,11 @@ static int __init dw_probe(struct platform_device *pdev)
dw->all_chan_mask = (1 << pdata->nr_channels) - 1; dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
INIT_LIST_HEAD(&dw->dma.channels); INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) { for (i = 0; i < pdata->nr_channels; i++) {
struct dw_dma_chan *dwc = &dw->chan[i]; struct dw_dma_chan *dwc = &dw->chan[i];
dwc->chan.device = &dw->dma; dwc->chan.device = &dw->dma;
dwc->chan.cookie = dwc->completed = 1; dwc->chan.cookie = dwc->completed = 1;
dwc->chan.chan_id = i;
if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
list_add_tail(&dwc->chan.device_node, list_add_tail(&dwc->chan.device_node,
&dw->dma.channels); &dw->dma.channels);
...@@ -1468,7 +1467,7 @@ static int __init dw_probe(struct platform_device *pdev) ...@@ -1468,7 +1467,7 @@ static int __init dw_probe(struct platform_device *pdev)
dma_writel(dw, CFG, DW_CFG_DMA_EN); dma_writel(dw, CFG, DW_CFG_DMA_EN);
printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
dev_name(&pdev->dev), dw->dma.chancnt); dev_name(&pdev->dev), pdata->nr_channels);
dma_async_device_register(&dw->dma); dma_async_device_register(&dw->dma);
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/slab.h> #include <linux/slab.h>
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
* http://www.gnu.org/copyleft/gpl.html * http://www.gnu.org/copyleft/gpl.html
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
......
此差异已折叠。
此差异已折叠。
...@@ -741,7 +741,6 @@ static int __devinit mpc_dma_probe(struct platform_device *op) ...@@ -741,7 +741,6 @@ static int __devinit mpc_dma_probe(struct platform_device *op)
mchan = &mdma->channels[i]; mchan = &mdma->channels[i];
mchan->chan.device = dma; mchan->chan.device = dma;
mchan->chan.chan_id = i;
mchan->chan.cookie = 1; mchan->chan.cookie = 1;
mchan->completed_cookie = mchan->chan.cookie; mchan->completed_cookie = mchan->chan.cookie;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -23,6 +23,12 @@ ...@@ -23,6 +23,12 @@
struct device; struct device;
enum dmae_pm_state {
DMAE_PM_ESTABLISHED,
DMAE_PM_BUSY,
DMAE_PM_PENDING,
};
struct sh_dmae_chan { struct sh_dmae_chan {
dma_cookie_t completed_cookie; /* The maximum cookie completed */ dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */ spinlock_t desc_lock; /* Descriptor operation lock */
...@@ -38,6 +44,7 @@ struct sh_dmae_chan { ...@@ -38,6 +44,7 @@ struct sh_dmae_chan {
u32 __iomem *base; u32 __iomem *base;
char dev_id[16]; /* unique name per DMAC of channel */ char dev_id[16]; /* unique name per DMAC of channel */
int pm_error; int pm_error;
enum dmae_pm_state pm_state;
}; };
struct sh_dmae_device { struct sh_dmae_device {
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -131,8 +131,6 @@ struct plat_sci_port { ...@@ -131,8 +131,6 @@ struct plat_sci_port {
struct plat_sci_port_ops *ops; struct plat_sci_port_ops *ops;
struct device *dma_dev;
unsigned int dma_slave_tx; unsigned int dma_slave_tx;
unsigned int dma_slave_rx; unsigned int dma_slave_rx;
}; };
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册