Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
f2889fee
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f2889fee
编写于
5月 25, 2011
作者:
V
Vinod Koul
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'next' into for-linus
上级
6221f222
aecb7b64
变更
15
展开全部
隐藏空白更改
内联
并排
Showing
15 changed file
with
609 addition
and
234 deletion
+609
-234
MAINTAINERS
MAINTAINERS
+7
-0
drivers/Makefile
drivers/Makefile
+3
-1
drivers/dma/Kconfig
drivers/dma/Kconfig
+7
-5
drivers/dma/at_hdmac.c
drivers/dma/at_hdmac.c
+291
-85
drivers/dma/at_hdmac_regs.h
drivers/dma/at_hdmac_regs.h
+25
-5
drivers/dma/coh901318.c
drivers/dma/coh901318.c
+1
-1
drivers/dma/dw_dmac.c
drivers/dma/dw_dmac.c
+181
-91
drivers/dma/dw_dmac_regs.h
drivers/dma/dw_dmac_regs.h
+2
-0
drivers/dma/intel_mid_dma.c
drivers/dma/intel_mid_dma.c
+13
-4
drivers/dma/iop-adma.c
drivers/dma/iop-adma.c
+3
-3
drivers/dma/mv_xor.c
drivers/dma/mv_xor.c
+3
-3
drivers/dma/pch_dma.c
drivers/dma/pch_dma.c
+66
-30
drivers/dma/ppc4xx/adma.c
drivers/dma/ppc4xx/adma.c
+4
-4
drivers/dma/ste_dma40.c
drivers/dma/ste_dma40.c
+2
-2
include/linux/dw_dmac.h
include/linux/dw_dmac.h
+1
-0
未找到文件。
MAINTAINERS
浏览文件 @
f2889fee
...
...
@@ -5401,6 +5401,13 @@ L: linux-serial@vger.kernel.org
S: Maintained
F: drivers/tty/serial
SYNOPSYS DESIGNWARE DMAC DRIVER
M: Viresh Kumar <viresh.kumar@st.com>
S: Maintained
F: include/linux/dw_dmac.h
F: drivers/dma/dw_dmac_regs.h
F: drivers/dma/dw_dmac.c
TIMEKEEPING, NTP
M: John Stultz <johnstul@us.ibm.com>
M: Thomas Gleixner <tglx@linutronix.de>
...
...
drivers/Makefile
浏览文件 @
f2889fee
...
...
@@ -17,6 +17,9 @@ obj-$(CONFIG_SFI) += sfi/
# was used and do nothing if so
obj-$(CONFIG_PNP)
+=
pnp/
obj-$(CONFIG_ARM_AMBA)
+=
amba/
# Many drivers will want to use DMA so this has to be made available
# really early.
obj-$(CONFIG_DMA_ENGINE)
+=
dma/
obj-$(CONFIG_VIRTIO)
+=
virtio/
obj-$(CONFIG_XEN)
+=
xen/
...
...
@@ -92,7 +95,6 @@ obj-$(CONFIG_EISA) += eisa/
obj-y
+=
lguest/
obj-$(CONFIG_CPU_FREQ)
+=
cpufreq/
obj-$(CONFIG_CPU_IDLE)
+=
cpuidle/
obj-$(CONFIG_DMA_ENGINE)
+=
dma/
obj-$(CONFIG_MMC)
+=
mmc/
obj-$(CONFIG_MEMSTICK)
+=
memstick/
obj-$(CONFIG_NEW_LEDS)
+=
leds/
...
...
drivers/dma/Kconfig
浏览文件 @
f2889fee
...
...
@@ -200,16 +200,18 @@ config PL330_DMA
platform_data for a dma-pl330 device.
config PCH_DMA
tristate "Intel EG20T PCH / OKI S
EMICONDUCTOR ML7213 IOH
DMA support"
tristate "Intel EG20T PCH / OKI S
emi IOH(ML7213/ML7223)
DMA support"
depends on PCI && X86
select DMA_ENGINE
help
Enable support for Intel EG20T PCH DMA engine.
This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/
Output Hub) which is for IVI(In-Vehicle Infotainment) use.
ML7213 is companion chip for Intel Atom E6xx series.
ML7213 is completely compatible for Intel EG20T PCH.
This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
Output Hub), ML7213 and ML7223.
ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
for MP(Media Phone) use.
ML7213/ML7223 is companion chip for Intel Atom E6xx series.
ML7213/ML7223 is completely compatible for Intel EG20T PCH.
config IMX_SDMA
tristate "i.MX SDMA support"
...
...
drivers/dma/at_hdmac.c
浏览文件 @
f2889fee
...
...
@@ -37,8 +37,8 @@
#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
#define ATC_DEFAULT_CTRLA (0)
#define ATC_DEFAULT_CTRLB (ATC_SIF(
0)
\
|ATC_DIF(
1
))
#define ATC_DEFAULT_CTRLB (ATC_SIF(
AT_DMA_MEM_IF)
\
|ATC_DIF(
AT_DMA_MEM_IF
))
/*
* Initial number of descriptors to allocate for each channel. This could
...
...
@@ -164,6 +164,29 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
}
}
/**
* atc_desc_chain - build chain adding a descripor
* @first: address of first descripor of the chain
* @prev: address of previous descripor of the chain
* @desc: descriptor to queue
*
* Called from prep_* functions
*/
static
void
atc_desc_chain
(
struct
at_desc
**
first
,
struct
at_desc
**
prev
,
struct
at_desc
*
desc
)
{
if
(
!
(
*
first
))
{
*
first
=
desc
;
}
else
{
/* inform the HW lli about chaining */
(
*
prev
)
->
lli
.
dscr
=
desc
->
txd
.
phys
;
/* insert the link descriptor to the LD ring */
list_add_tail
(
&
desc
->
desc_node
,
&
(
*
first
)
->
tx_list
);
}
*
prev
=
desc
;
}
/**
* atc_assign_cookie - compute and assign new cookie
* @atchan: channel we work on
...
...
@@ -237,16 +260,12 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
static
void
atc_chain_complete
(
struct
at_dma_chan
*
atchan
,
struct
at_desc
*
desc
)
{
dma_async_tx_callback
callback
;
void
*
param
;
struct
dma_async_tx_descriptor
*
txd
=
&
desc
->
txd
;
dev_vdbg
(
chan2dev
(
&
atchan
->
chan_common
),
"descriptor %u complete
\n
"
,
txd
->
cookie
);
atchan
->
completed_cookie
=
txd
->
cookie
;
callback
=
txd
->
callback
;
param
=
txd
->
callback_param
;
/* move children to free_list */
list_splice_init
(
&
desc
->
tx_list
,
&
atchan
->
free_list
);
...
...
@@ -278,12 +297,19 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
}
}
/*
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
*/
if
(
callback
)
callback
(
param
);
/* for cyclic transfers,
* no need to replay callback function while stopping */
if
(
!
test_bit
(
ATC_IS_CYCLIC
,
&
atchan
->
status
))
{
dma_async_tx_callback
callback
=
txd
->
callback
;
void
*
param
=
txd
->
callback_param
;
/*
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
*/
if
(
callback
)
callback
(
param
);
}
dma_run_dependencies
(
txd
);
}
...
...
@@ -419,6 +445,26 @@ static void atc_handle_error(struct at_dma_chan *atchan)
atc_chain_complete
(
atchan
,
bad_desc
);
}
/**
* atc_handle_cyclic - at the end of a period, run callback function
* @atchan: channel used for cyclic operations
*
* Called with atchan->lock held and bh disabled
*/
static
void
atc_handle_cyclic
(
struct
at_dma_chan
*
atchan
)
{
struct
at_desc
*
first
=
atc_first_active
(
atchan
);
struct
dma_async_tx_descriptor
*
txd
=
&
first
->
txd
;
dma_async_tx_callback
callback
=
txd
->
callback
;
void
*
param
=
txd
->
callback_param
;
dev_vdbg
(
chan2dev
(
&
atchan
->
chan_common
),
"new cyclic period llp 0x%08x
\n
"
,
channel_readl
(
atchan
,
DSCR
));
if
(
callback
)
callback
(
param
);
}
/*-- IRQ & Tasklet ---------------------------------------------------*/
...
...
@@ -426,16 +472,11 @@ static void atc_tasklet(unsigned long data)
{
struct
at_dma_chan
*
atchan
=
(
struct
at_dma_chan
*
)
data
;
/* Channel cannot be enabled here */
if
(
atc_chan_is_enabled
(
atchan
))
{
dev_err
(
chan2dev
(
&
atchan
->
chan_common
),
"BUG: channel enabled in tasklet
\n
"
);
return
;
}
spin_lock
(
&
atchan
->
lock
);
if
(
test_and_clear_bit
(
0
,
&
atchan
->
error_
status
))
if
(
test_and_clear_bit
(
ATC_IS_ERROR
,
&
atchan
->
status
))
atc_handle_error
(
atchan
);
else
if
(
test_bit
(
ATC_IS_CYCLIC
,
&
atchan
->
status
))
atc_handle_cyclic
(
atchan
);
else
atc_advance_work
(
atchan
);
...
...
@@ -464,12 +505,13 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
for
(
i
=
0
;
i
<
atdma
->
dma_common
.
chancnt
;
i
++
)
{
atchan
=
&
atdma
->
chan
[
i
];
if
(
pending
&
(
AT_DMA_
C
BTC
(
i
)
|
AT_DMA_ERR
(
i
)))
{
if
(
pending
&
(
AT_DMA_BTC
(
i
)
|
AT_DMA_ERR
(
i
)))
{
if
(
pending
&
AT_DMA_ERR
(
i
))
{
/* Disable channel on AHB error */
dma_writel
(
atdma
,
CHDR
,
atchan
->
mask
);
dma_writel
(
atdma
,
CHDR
,
AT_DMA_RES
(
i
)
|
atchan
->
mask
);
/* Give information to tasklet */
set_bit
(
0
,
&
atchan
->
error_
status
);
set_bit
(
ATC_IS_ERROR
,
&
atchan
->
status
);
}
tasklet_schedule
(
&
atchan
->
tasklet
);
ret
=
IRQ_HANDLED
;
...
...
@@ -549,7 +591,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
}
ctrla
=
ATC_DEFAULT_CTRLA
;
ctrlb
=
ATC_DEFAULT_CTRLB
ctrlb
=
ATC_DEFAULT_CTRLB
|
ATC_IEN
|
ATC_SRC_ADDR_MODE_INCR
|
ATC_DST_ADDR_MODE_INCR
|
ATC_FC_MEM2MEM
;
...
...
@@ -584,16 +626,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
desc
->
txd
.
cookie
=
0
;
if
(
!
first
)
{
first
=
desc
;
}
else
{
/* inform the HW lli about chaining */
prev
->
lli
.
dscr
=
desc
->
txd
.
phys
;
/* insert the link descriptor to the LD ring */
list_add_tail
(
&
desc
->
desc_node
,
&
first
->
tx_list
);
}
prev
=
desc
;
atc_desc_chain
(
&
first
,
&
prev
,
desc
);
}
/* First descriptor of the chain embedds additional information */
...
...
@@ -639,7 +672,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct
scatterlist
*
sg
;
size_t
total_len
=
0
;
dev_vdbg
(
chan2dev
(
chan
),
"prep_slave_sg: %s f0x%lx
\n
"
,
dev_vdbg
(
chan2dev
(
chan
),
"prep_slave_sg (%d): %s f0x%lx
\n
"
,
sg_len
,
direction
==
DMA_TO_DEVICE
?
"TO DEVICE"
:
"FROM DEVICE"
,
flags
);
...
...
@@ -651,14 +685,15 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg_width
=
atslave
->
reg_width
;
ctrla
=
ATC_DEFAULT_CTRLA
|
atslave
->
ctrla
;
ctrlb
=
ATC_
DEFAULT_CTRLB
|
ATC_
IEN
;
ctrlb
=
ATC_IEN
;
switch
(
direction
)
{
case
DMA_TO_DEVICE
:
ctrla
|=
ATC_DST_WIDTH
(
reg_width
);
ctrlb
|=
ATC_DST_ADDR_MODE_FIXED
|
ATC_SRC_ADDR_MODE_INCR
|
ATC_FC_MEM2PER
;
|
ATC_FC_MEM2PER
|
ATC_SIF
(
AT_DMA_MEM_IF
)
|
ATC_DIF
(
AT_DMA_PER_IF
);
reg
=
atslave
->
tx_reg
;
for_each_sg
(
sgl
,
sg
,
sg_len
,
i
)
{
struct
at_desc
*
desc
;
...
...
@@ -682,16 +717,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
len
>>
mem_width
;
desc
->
lli
.
ctrlb
=
ctrlb
;
if
(
!
first
)
{
first
=
desc
;
}
else
{
/* inform the HW lli about chaining */
prev
->
lli
.
dscr
=
desc
->
txd
.
phys
;
/* insert the link descriptor to the LD ring */
list_add_tail
(
&
desc
->
desc_node
,
&
first
->
tx_list
);
}
prev
=
desc
;
atc_desc_chain
(
&
first
,
&
prev
,
desc
);
total_len
+=
len
;
}
break
;
...
...
@@ -699,7 +725,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrla
|=
ATC_SRC_WIDTH
(
reg_width
);
ctrlb
|=
ATC_DST_ADDR_MODE_INCR
|
ATC_SRC_ADDR_MODE_FIXED
|
ATC_FC_PER2MEM
;
|
ATC_FC_PER2MEM
|
ATC_SIF
(
AT_DMA_PER_IF
)
|
ATC_DIF
(
AT_DMA_MEM_IF
);
reg
=
atslave
->
rx_reg
;
for_each_sg
(
sgl
,
sg
,
sg_len
,
i
)
{
...
...
@@ -724,16 +751,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
len
>>
reg_width
;
desc
->
lli
.
ctrlb
=
ctrlb
;
if
(
!
first
)
{
first
=
desc
;
}
else
{
/* inform the HW lli about chaining */
prev
->
lli
.
dscr
=
desc
->
txd
.
phys
;
/* insert the link descriptor to the LD ring */
list_add_tail
(
&
desc
->
desc_node
,
&
first
->
tx_list
);
}
prev
=
desc
;
atc_desc_chain
(
&
first
,
&
prev
,
desc
);
total_len
+=
len
;
}
break
;
...
...
@@ -759,41 +777,211 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return
NULL
;
}
/**
* atc_dma_cyclic_check_values
* Check for too big/unaligned periods and unaligned DMA buffer
*/
static
int
atc_dma_cyclic_check_values
(
unsigned
int
reg_width
,
dma_addr_t
buf_addr
,
size_t
period_len
,
enum
dma_data_direction
direction
)
{
if
(
period_len
>
(
ATC_BTSIZE_MAX
<<
reg_width
))
goto
err_out
;
if
(
unlikely
(
period_len
&
((
1
<<
reg_width
)
-
1
)))
goto
err_out
;
if
(
unlikely
(
buf_addr
&
((
1
<<
reg_width
)
-
1
)))
goto
err_out
;
if
(
unlikely
(
!
(
direction
&
(
DMA_TO_DEVICE
|
DMA_FROM_DEVICE
))))
goto
err_out
;
return
0
;
err_out:
return
-
EINVAL
;
}
/**
* atc_dma_cyclic_fill_desc - Fill one period decriptor
*/
static
int
atc_dma_cyclic_fill_desc
(
struct
at_dma_slave
*
atslave
,
struct
at_desc
*
desc
,
unsigned
int
period_index
,
dma_addr_t
buf_addr
,
size_t
period_len
,
enum
dma_data_direction
direction
)
{
u32
ctrla
;
unsigned
int
reg_width
=
atslave
->
reg_width
;
/* prepare common CRTLA value */
ctrla
=
ATC_DEFAULT_CTRLA
|
atslave
->
ctrla
|
ATC_DST_WIDTH
(
reg_width
)
|
ATC_SRC_WIDTH
(
reg_width
)
|
period_len
>>
reg_width
;
switch
(
direction
)
{
case
DMA_TO_DEVICE
:
desc
->
lli
.
saddr
=
buf_addr
+
(
period_len
*
period_index
);
desc
->
lli
.
daddr
=
atslave
->
tx_reg
;
desc
->
lli
.
ctrla
=
ctrla
;
desc
->
lli
.
ctrlb
=
ATC_DST_ADDR_MODE_FIXED
|
ATC_SRC_ADDR_MODE_INCR
|
ATC_FC_MEM2PER
|
ATC_SIF
(
AT_DMA_MEM_IF
)
|
ATC_DIF
(
AT_DMA_PER_IF
);
break
;
case
DMA_FROM_DEVICE
:
desc
->
lli
.
saddr
=
atslave
->
rx_reg
;
desc
->
lli
.
daddr
=
buf_addr
+
(
period_len
*
period_index
);
desc
->
lli
.
ctrla
=
ctrla
;
desc
->
lli
.
ctrlb
=
ATC_DST_ADDR_MODE_INCR
|
ATC_SRC_ADDR_MODE_FIXED
|
ATC_FC_PER2MEM
|
ATC_SIF
(
AT_DMA_PER_IF
)
|
ATC_DIF
(
AT_DMA_MEM_IF
);
break
;
default:
return
-
EINVAL
;
}
return
0
;
}
/**
* atc_prep_dma_cyclic - prepare the cyclic DMA transfer
* @chan: the DMA channel to prepare
* @buf_addr: physical DMA address where the buffer starts
* @buf_len: total number of bytes for the entire buffer
* @period_len: number of bytes for each period
* @direction: transfer direction, to or from device
*/
static
struct
dma_async_tx_descriptor
*
atc_prep_dma_cyclic
(
struct
dma_chan
*
chan
,
dma_addr_t
buf_addr
,
size_t
buf_len
,
size_t
period_len
,
enum
dma_data_direction
direction
)
{
struct
at_dma_chan
*
atchan
=
to_at_dma_chan
(
chan
);
struct
at_dma_slave
*
atslave
=
chan
->
private
;
struct
at_desc
*
first
=
NULL
;
struct
at_desc
*
prev
=
NULL
;
unsigned
long
was_cyclic
;
unsigned
int
periods
=
buf_len
/
period_len
;
unsigned
int
i
;
dev_vdbg
(
chan2dev
(
chan
),
"prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)
\n
"
,
direction
==
DMA_TO_DEVICE
?
"TO DEVICE"
:
"FROM DEVICE"
,
buf_addr
,
periods
,
buf_len
,
period_len
);
if
(
unlikely
(
!
atslave
||
!
buf_len
||
!
period_len
))
{
dev_dbg
(
chan2dev
(
chan
),
"prep_dma_cyclic: length is zero!
\n
"
);
return
NULL
;
}
was_cyclic
=
test_and_set_bit
(
ATC_IS_CYCLIC
,
&
atchan
->
status
);
if
(
was_cyclic
)
{
dev_dbg
(
chan2dev
(
chan
),
"prep_dma_cyclic: channel in use!
\n
"
);
return
NULL
;
}
/* Check for too big/unaligned periods and unaligned DMA buffer */
if
(
atc_dma_cyclic_check_values
(
atslave
->
reg_width
,
buf_addr
,
period_len
,
direction
))
goto
err_out
;
/* build cyclic linked list */
for
(
i
=
0
;
i
<
periods
;
i
++
)
{
struct
at_desc
*
desc
;
desc
=
atc_desc_get
(
atchan
);
if
(
!
desc
)
goto
err_desc_get
;
if
(
atc_dma_cyclic_fill_desc
(
atslave
,
desc
,
i
,
buf_addr
,
period_len
,
direction
))
goto
err_desc_get
;
atc_desc_chain
(
&
first
,
&
prev
,
desc
);
}
/* lets make a cyclic list */
prev
->
lli
.
dscr
=
first
->
txd
.
phys
;
/* First descriptor of the chain embedds additional information */
first
->
txd
.
cookie
=
-
EBUSY
;
first
->
len
=
buf_len
;
return
&
first
->
txd
;
err_desc_get:
dev_err
(
chan2dev
(
chan
),
"not enough descriptors available
\n
"
);
atc_desc_put
(
atchan
,
first
);
err_out:
clear_bit
(
ATC_IS_CYCLIC
,
&
atchan
->
status
);
return
NULL
;
}
static
int
atc_control
(
struct
dma_chan
*
chan
,
enum
dma_ctrl_cmd
cmd
,
unsigned
long
arg
)
{
struct
at_dma_chan
*
atchan
=
to_at_dma_chan
(
chan
);
struct
at_dma
*
atdma
=
to_at_dma
(
chan
->
device
);
struct
at_desc
*
desc
,
*
_desc
;
int
chan_id
=
atchan
->
chan_common
.
chan_id
;
LIST_HEAD
(
list
);
/* Only supports DMA_TERMINATE_ALL */
if
(
cmd
!=
DMA_TERMINATE_ALL
)
return
-
ENXIO
;
dev_vdbg
(
chan2dev
(
chan
),
"atc_control (%d)
\n
"
,
cmd
);
/*
* This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the
* channel. We still have to poll the channel enable bit due
* to AHB/HSB limitations.
*/
spin_lock_bh
(
&
atchan
->
lock
);
if
(
cmd
==
DMA_PAUSE
)
{
spin_lock_bh
(
&
atchan
->
lock
);
dma_writel
(
atdma
,
CHDR
,
atchan
->
mask
);
dma_writel
(
atdma
,
CHER
,
AT_DMA_SUSP
(
chan_id
));
set_bit
(
ATC_IS_PAUSED
,
&
atchan
->
status
);
/* confirm that this channel is disabled */
while
(
dma_readl
(
atdma
,
CHSR
)
&
atchan
->
mask
)
cpu_relax
();
spin_unlock_bh
(
&
atchan
->
lock
);
}
else
if
(
cmd
==
DMA_RESUME
)
{
if
(
!
test_bit
(
ATC_IS_PAUSED
,
&
atchan
->
status
))
return
0
;
/* active_list entries will end up before queued entries */
list_splice_init
(
&
atchan
->
queue
,
&
list
);
list_splice_init
(
&
atchan
->
active_list
,
&
list
);
spin_lock_bh
(
&
atchan
->
lock
);
/* Flush all pending and queued descriptors */
list_for_each_entry_safe
(
desc
,
_desc
,
&
list
,
desc_node
)
atc_chain_complete
(
atchan
,
desc
);
dma_writel
(
atdma
,
CHDR
,
AT_DMA_RES
(
chan_id
));
clear_bit
(
ATC_IS_PAUSED
,
&
atchan
->
status
);
spin_unlock_bh
(
&
atchan
->
lock
);
spin_unlock_bh
(
&
atchan
->
lock
);
}
else
if
(
cmd
==
DMA_TERMINATE_ALL
)
{
struct
at_desc
*
desc
,
*
_desc
;
/*
* This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the
* channel. We still have to poll the channel enable bit due
* to AHB/HSB limitations.
*/
spin_lock_bh
(
&
atchan
->
lock
);
/* disabling channel: must also remove suspend state */
dma_writel
(
atdma
,
CHDR
,
AT_DMA_RES
(
chan_id
)
|
atchan
->
mask
);
/* confirm that this channel is disabled */
while
(
dma_readl
(
atdma
,
CHSR
)
&
atchan
->
mask
)
cpu_relax
();
/* active_list entries will end up before queued entries */
list_splice_init
(
&
atchan
->
queue
,
&
list
);
list_splice_init
(
&
atchan
->
active_list
,
&
list
);
/* Flush all pending and queued descriptors */
list_for_each_entry_safe
(
desc
,
_desc
,
&
list
,
desc_node
)
atc_chain_complete
(
atchan
,
desc
);
clear_bit
(
ATC_IS_PAUSED
,
&
atchan
->
status
);
/* if channel dedicated to cyclic operations, free it */
clear_bit
(
ATC_IS_CYCLIC
,
&
atchan
->
status
);
spin_unlock_bh
(
&
atchan
->
lock
);
}
else
{
return
-
ENXIO
;
}
return
0
;
}
...
...
@@ -835,9 +1023,17 @@ atc_tx_status(struct dma_chan *chan,
spin_unlock_bh
(
&
atchan
->
lock
);
dma_set_tx_state
(
txstate
,
last_complete
,
last_used
,
0
);
dev_vdbg
(
chan2dev
(
chan
),
"tx_status: %d (d%d, u%d)
\n
"
,
cookie
,
last_complete
?
last_complete
:
0
,
if
(
ret
!=
DMA_SUCCESS
)
dma_set_tx_state
(
txstate
,
last_complete
,
last_used
,
atc_first_active
(
atchan
)
->
len
);
else
dma_set_tx_state
(
txstate
,
last_complete
,
last_used
,
0
);
if
(
test_bit
(
ATC_IS_PAUSED
,
&
atchan
->
status
))
ret
=
DMA_PAUSED
;
dev_vdbg
(
chan2dev
(
chan
),
"tx_status %d: cookie = %d (d%d, u%d)
\n
"
,
ret
,
cookie
,
last_complete
?
last_complete
:
0
,
last_used
?
last_used
:
0
);
return
ret
;
...
...
@@ -853,6 +1049,10 @@ static void atc_issue_pending(struct dma_chan *chan)
dev_vdbg
(
chan2dev
(
chan
),
"issue_pending
\n
"
);
/* Not needed for cyclic transfers */
if
(
test_bit
(
ATC_IS_CYCLIC
,
&
atchan
->
status
))
return
;
spin_lock_bh
(
&
atchan
->
lock
);
if
(
!
atc_chan_is_enabled
(
atchan
))
{
atc_advance_work
(
atchan
);
...
...
@@ -959,6 +1159,7 @@ static void atc_free_chan_resources(struct dma_chan *chan)
}
list_splice_init
(
&
atchan
->
free_list
,
&
list
);
atchan
->
descs_allocated
=
0
;
atchan
->
status
=
0
;
dev_vdbg
(
chan2dev
(
chan
),
"free_chan_resources: done
\n
"
);
}
...
...
@@ -1092,10 +1293,15 @@ static int __init at_dma_probe(struct platform_device *pdev)
if
(
dma_has_cap
(
DMA_MEMCPY
,
atdma
->
dma_common
.
cap_mask
))
atdma
->
dma_common
.
device_prep_dma_memcpy
=
atc_prep_dma_memcpy
;
if
(
dma_has_cap
(
DMA_SLAVE
,
atdma
->
dma_common
.
cap_mask
))
{
if
(
dma_has_cap
(
DMA_SLAVE
,
atdma
->
dma_common
.
cap_mask
))
atdma
->
dma_common
.
device_prep_slave_sg
=
atc_prep_slave_sg
;
if
(
dma_has_cap
(
DMA_CYCLIC
,
atdma
->
dma_common
.
cap_mask
))
atdma
->
dma_common
.
device_prep_dma_cyclic
=
atc_prep_dma_cyclic
;
if
(
dma_has_cap
(
DMA_SLAVE
,
atdma
->
dma_common
.
cap_mask
)
||
dma_has_cap
(
DMA_CYCLIC
,
atdma
->
dma_common
.
cap_mask
))
atdma
->
dma_common
.
device_control
=
atc_control
;
}
dma_writel
(
atdma
,
EN
,
AT_DMA_ENABLE
);
...
...
drivers/dma/at_hdmac_regs.h
浏览文件 @
f2889fee
...
...
@@ -103,6 +103,10 @@
/* Bitfields in CTRLB */
#define ATC_SIF(i) (0x3 & (i))
/* Src tx done via AHB-Lite Interface i */
#define ATC_DIF(i) ((0x3 & (i)) << 4)
/* Dst tx done via AHB-Lite Interface i */
/* Specify AHB interfaces */
#define AT_DMA_MEM_IF 0
/* interface 0 as memory interface */
#define AT_DMA_PER_IF 1
/* interface 1 as peripheral interface */
#define ATC_SRC_PIP (0x1 << 8)
/* Source Picture-in-Picture enabled */
#define ATC_DST_PIP (0x1 << 12)
/* Destination Picture-in-Picture enabled */
#define ATC_SRC_DSCR_DIS (0x1 << 16)
/* Src Descriptor fetch disable */
...
...
@@ -180,13 +184,24 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd)
/*-- Channels --------------------------------------------------------*/
/**
* atc_status - information bits stored in channel status flag
*
* Manipulated with atomic operations.
*/
enum
atc_status
{
ATC_IS_ERROR
=
0
,
ATC_IS_PAUSED
=
1
,
ATC_IS_CYCLIC
=
24
,
};
/**
* struct at_dma_chan - internal representation of an Atmel HDMAC channel
* @chan_common: common dmaengine channel object members
* @device: parent device
* @ch_regs: memory mapped register base
* @mask: channel index in a mask
* @
error_status: transmit error status information from irq handler
* @
status: transmit status information from irq/prep* functions
* to tasklet (use atomic operations)
* @tasklet: bottom half to finish transaction work
* @lock: serializes enqueue/dequeue operations to descriptors lists
...
...
@@ -201,7 +216,7 @@ struct at_dma_chan {
struct
at_dma
*
device
;
void
__iomem
*
ch_regs
;
u8
mask
;
unsigned
long
error_
status
;
unsigned
long
status
;
struct
tasklet_struct
tasklet
;
spinlock_t
lock
;
...
...
@@ -309,8 +324,8 @@ static void atc_setup_irq(struct at_dma_chan *atchan, int on)
struct
at_dma
*
atdma
=
to_at_dma
(
atchan
->
chan_common
.
device
);
u32
ebci
;
/* enable interrupts on buffer
chain
completion & error */
ebci
=
AT_DMA_
C
BTC
(
atchan
->
chan_common
.
chan_id
)
/* enable interrupts on buffer
transfer
completion & error */
ebci
=
AT_DMA_BTC
(
atchan
->
chan_common
.
chan_id
)
|
AT_DMA_ERR
(
atchan
->
chan_common
.
chan_id
);
if
(
on
)
dma_writel
(
atdma
,
EBCIER
,
ebci
);
...
...
@@ -347,7 +362,12 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
*/
static
void
set_desc_eol
(
struct
at_desc
*
desc
)
{
desc
->
lli
.
ctrlb
|=
ATC_SRC_DSCR_DIS
|
ATC_DST_DSCR_DIS
;
u32
ctrlb
=
desc
->
lli
.
ctrlb
;
ctrlb
&=
~
ATC_IEN
;
ctrlb
|=
ATC_SRC_DSCR_DIS
|
ATC_DST_DSCR_DIS
;
desc
->
lli
.
ctrlb
=
ctrlb
;
desc
->
lli
.
dscr
=
0
;
}
...
...
drivers/dma/coh901318.c
浏览文件 @
f2889fee
...
...
@@ -1610,7 +1610,7 @@ int __init coh901318_init(void)
{
return
platform_driver_probe
(
&
coh901318_driver
,
coh901318_probe
);
}
arch
_initcall
(
coh901318_init
);
subsys
_initcall
(
coh901318_init
);
void
__exit
coh901318_exit
(
void
)
{
...
...
drivers/dma/dw_dmac.c
浏览文件 @
f2889fee
此差异已折叠。
点击以展开。
drivers/dma/dw_dmac_regs.h
浏览文件 @
f2889fee
...
...
@@ -2,6 +2,7 @@
* Driver for the Synopsys DesignWare AHB DMA Controller
*
* Copyright (C) 2005-2007 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
...
...
@@ -138,6 +139,7 @@ struct dw_dma_chan {
void
__iomem
*
ch_regs
;
u8
mask
;
u8
priority
;
bool
paused
;
spinlock_t
lock
;
...
...
drivers/dma/intel_mid_dma.c
浏览文件 @
f2889fee
...
...
@@ -1292,8 +1292,7 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
if
(
err
)
goto
err_dma
;
pm_runtime_set_active
(
&
pdev
->
dev
);
pm_runtime_enable
(
&
pdev
->
dev
);
pm_runtime_put_noidle
(
&
pdev
->
dev
);
pm_runtime_allow
(
&
pdev
->
dev
);
return
0
;
...
...
@@ -1322,6 +1321,9 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
static
void
__devexit
intel_mid_dma_remove
(
struct
pci_dev
*
pdev
)
{
struct
middma_device
*
device
=
pci_get_drvdata
(
pdev
);
pm_runtime_get_noresume
(
&
pdev
->
dev
);
pm_runtime_forbid
(
&
pdev
->
dev
);
middma_shutdown
(
pdev
);
pci_dev_put
(
pdev
);
kfree
(
device
);
...
...
@@ -1385,13 +1387,20 @@ int dma_resume(struct pci_dev *pci)
static
int
dma_runtime_suspend
(
struct
device
*
dev
)
{
struct
pci_dev
*
pci_dev
=
to_pci_dev
(
dev
);
return
dma_suspend
(
pci_dev
,
PMSG_SUSPEND
);
struct
middma_device
*
device
=
pci_get_drvdata
(
pci_dev
);
device
->
state
=
SUSPENDED
;
return
0
;
}
static
int
dma_runtime_resume
(
struct
device
*
dev
)
{
struct
pci_dev
*
pci_dev
=
to_pci_dev
(
dev
);
return
dma_resume
(
pci_dev
);
struct
middma_device
*
device
=
pci_get_drvdata
(
pci_dev
);
device
->
state
=
RUNNING
;
iowrite32
(
REG_BIT0
,
device
->
dma_base
+
DMA_CFG
);
return
0
;
}
static
int
dma_runtime_idle
(
struct
device
*
dev
)
...
...
drivers/dma/iop-adma.c
浏览文件 @
f2889fee
...
...
@@ -619,7 +619,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
if
(
unlikely
(
!
len
))
return
NULL
;
BUG_ON
(
unlikely
(
len
>
IOP_ADMA_MAX_BYTE_COUNT
)
);
BUG_ON
(
len
>
IOP_ADMA_MAX_BYTE_COUNT
);
dev_dbg
(
iop_chan
->
device
->
common
.
dev
,
"%s len: %u
\n
"
,
__func__
,
len
);
...
...
@@ -652,7 +652,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
if
(
unlikely
(
!
len
))
return
NULL
;
BUG_ON
(
unlikely
(
len
>
IOP_ADMA_MAX_BYTE_COUNT
)
);
BUG_ON
(
len
>
IOP_ADMA_MAX_BYTE_COUNT
);
dev_dbg
(
iop_chan
->
device
->
common
.
dev
,
"%s len: %u
\n
"
,
__func__
,
len
);
...
...
@@ -686,7 +686,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
if
(
unlikely
(
!
len
))
return
NULL
;
BUG_ON
(
unlikely
(
len
>
IOP_ADMA_XOR_MAX_BYTE_COUNT
)
);
BUG_ON
(
len
>
IOP_ADMA_XOR_MAX_BYTE_COUNT
);
dev_dbg
(
iop_chan
->
device
->
common
.
dev
,
"%s src_cnt: %d len: %u flags: %lx
\n
"
,
...
...
drivers/dma/mv_xor.c
浏览文件 @
f2889fee
...
...
@@ -671,7 +671,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if
(
unlikely
(
len
<
MV_XOR_MIN_BYTE_COUNT
))
return
NULL
;
BUG_ON
(
unlikely
(
len
>
MV_XOR_MAX_BYTE_COUNT
)
);
BUG_ON
(
len
>
MV_XOR_MAX_BYTE_COUNT
);
spin_lock_bh
(
&
mv_chan
->
lock
);
slot_cnt
=
mv_chan_memcpy_slot_count
(
len
);
...
...
@@ -710,7 +710,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
if
(
unlikely
(
len
<
MV_XOR_MIN_BYTE_COUNT
))
return
NULL
;
BUG_ON
(
unlikely
(
len
>
MV_XOR_MAX_BYTE_COUNT
)
);
BUG_ON
(
len
>
MV_XOR_MAX_BYTE_COUNT
);
spin_lock_bh
(
&
mv_chan
->
lock
);
slot_cnt
=
mv_chan_memset_slot_count
(
len
);
...
...
@@ -744,7 +744,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
if
(
unlikely
(
len
<
MV_XOR_MIN_BYTE_COUNT
))
return
NULL
;
BUG_ON
(
unlikely
(
len
>
MV_XOR_MAX_BYTE_COUNT
)
);
BUG_ON
(
len
>
MV_XOR_MAX_BYTE_COUNT
);
dev_dbg
(
mv_chan
->
device
->
common
.
dev
,
"%s src_cnt: %d len: dest %x %u flags: %ld
\n
"
,
...
...
drivers/dma/pch_dma.c
浏览文件 @
f2889fee
...
...
@@ -77,10 +77,10 @@ struct pch_dma_regs {
u32
dma_ctl0
;
u32
dma_ctl1
;
u32
dma_ctl2
;
u32
reserved1
;
u32
dma_ctl3
;
u32
dma_sts0
;
u32
dma_sts1
;
u32
reserved
2
;
u32
dma_sts
2
;
u32
reserved3
;
struct
pch_dma_desc_regs
desc
[
MAX_CHAN_NR
];
};
...
...
@@ -130,6 +130,7 @@ struct pch_dma {
#define PCH_DMA_CTL0 0x00
#define PCH_DMA_CTL1 0x04
#define PCH_DMA_CTL2 0x08
#define PCH_DMA_CTL3 0x0C
#define PCH_DMA_STS0 0x10
#define PCH_DMA_STS1 0x14
...
...
@@ -138,7 +139,8 @@ struct pch_dma {
#define dma_writel(pd, name, val) \
writel((val), (pd)->membase + PCH_DMA_##name)
static
inline
struct
pch_dma_desc
*
to_pd_desc
(
struct
dma_async_tx_descriptor
*
txd
)
static
inline
struct
pch_dma_desc
*
to_pd_desc
(
struct
dma_async_tx_descriptor
*
txd
)
{
return
container_of
(
txd
,
struct
pch_dma_desc
,
txd
);
}
...
...
@@ -163,13 +165,15 @@ static inline struct device *chan2parent(struct dma_chan *chan)
return
chan
->
dev
->
device
.
parent
;
}
static
inline
struct
pch_dma_desc
*
pdc_first_active
(
struct
pch_dma_chan
*
pd_chan
)
static
inline
struct
pch_dma_desc
*
pdc_first_active
(
struct
pch_dma_chan
*
pd_chan
)
{
return
list_first_entry
(
&
pd_chan
->
active_list
,
struct
pch_dma_desc
,
desc_node
);
}
static
inline
struct
pch_dma_desc
*
pdc_first_queued
(
struct
pch_dma_chan
*
pd_chan
)
static
inline
struct
pch_dma_desc
*
pdc_first_queued
(
struct
pch_dma_chan
*
pd_chan
)
{
return
list_first_entry
(
&
pd_chan
->
queue
,
struct
pch_dma_desc
,
desc_node
);
...
...
@@ -199,16 +203,30 @@ static void pdc_set_dir(struct dma_chan *chan)
struct
pch_dma
*
pd
=
to_pd
(
chan
->
device
);
u32
val
;
val
=
dma_readl
(
pd
,
CTL0
);
if
(
chan
->
chan_id
<
8
)
{
val
=
dma_readl
(
pd
,
CTL0
);
if
(
pd_chan
->
dir
==
DMA_TO_DEVICE
)
val
|=
0x1
<<
(
DMA_CTL0_BITS_PER_CH
*
chan
->
chan_id
+
DMA_CTL0_DIR_SHIFT_BITS
);
else
val
&=
~
(
0x1
<<
(
DMA_CTL0_BITS_PER_CH
*
chan
->
chan_id
+
DMA_CTL0_DIR_SHIFT_BITS
));
if
(
pd_chan
->
dir
==
DMA_TO_DEVICE
)
val
|=
0x1
<<
(
DMA_CTL0_BITS_PER_CH
*
chan
->
chan_id
+
DMA_CTL0_DIR_SHIFT_BITS
);
else
val
&=
~
(
0x1
<<
(
DMA_CTL0_BITS_PER_CH
*
chan
->
chan_id
+
DMA_CTL0_DIR_SHIFT_BITS
));
dma_writel
(
pd
,
CTL0
,
val
);
}
else
{
int
ch
=
chan
->
chan_id
-
8
;
/* ch8-->0 ch9-->1 ... ch11->3 */
val
=
dma_readl
(
pd
,
CTL3
);
dma_writel
(
pd
,
CTL0
,
val
);
if
(
pd_chan
->
dir
==
DMA_TO_DEVICE
)
val
|=
0x1
<<
(
DMA_CTL0_BITS_PER_CH
*
ch
+
DMA_CTL0_DIR_SHIFT_BITS
);
else
val
&=
~
(
0x1
<<
(
DMA_CTL0_BITS_PER_CH
*
ch
+
DMA_CTL0_DIR_SHIFT_BITS
));
dma_writel
(
pd
,
CTL3
,
val
);
}
dev_dbg
(
chan2dev
(
chan
),
"pdc_set_dir: chan %d -> %x
\n
"
,
chan
->
chan_id
,
val
);
...
...
@@ -219,13 +237,26 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode)
struct
pch_dma
*
pd
=
to_pd
(
chan
->
device
);
u32
val
;
val
=
dma_readl
(
pd
,
CTL0
);
if
(
chan
->
chan_id
<
8
)
{
val
=
dma_readl
(
pd
,
CTL0
);
val
&=
~
(
DMA_CTL0_MODE_MASK_BITS
<<
(
DMA_CTL0_BITS_PER_CH
*
chan
->
chan_id
));
val
|=
mode
<<
(
DMA_CTL0_BITS_PER_CH
*
chan
->
chan_id
);
val
&=
~
(
DMA_CTL0_MODE_MASK_BITS
<<
(
DMA_CTL0_BITS_PER_CH
*
chan
->
chan_id
));
val
|=
mode
<<
(
DMA_CTL0_BITS_PER_CH
*
chan
->
chan_id
);
dma_writel
(
pd
,
CTL0
,
val
);
}
else
{
int
ch
=
chan
->
chan_id
-
8
;
/* ch8-->0 ch9-->1 ... ch11->3 */
val
=
dma_readl
(
pd
,
CTL3
);
val
&=
~
(
DMA_CTL0_MODE_MASK_BITS
<<
(
DMA_CTL0_BITS_PER_CH
*
ch
));
val
|=
mode
<<
(
DMA_CTL0_BITS_PER_CH
*
ch
);
dma_writel
(
pd
,
CTL0
,
val
);
dma_writel
(
pd
,
CTL3
,
val
);
}
dev_dbg
(
chan2dev
(
chan
),
"pdc_set_mode: chan %d -> %x
\n
"
,
chan
->
chan_id
,
val
);
...
...
@@ -251,9 +282,6 @@ static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
static
void
pdc_dostart
(
struct
pch_dma_chan
*
pd_chan
,
struct
pch_dma_desc
*
desc
)
{
struct
pch_dma
*
pd
=
to_pd
(
pd_chan
->
chan
.
device
);
u32
val
;
if
(
!
pdc_is_idle
(
pd_chan
))
{
dev_err
(
chan2dev
(
&
pd_chan
->
chan
),
"BUG: Attempt to start non-idle channel
\n
"
);
...
...
@@ -279,10 +307,6 @@ static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
channel_writel
(
pd_chan
,
NEXT
,
desc
->
txd
.
phys
);
pdc_set_mode
(
&
pd_chan
->
chan
,
DMA_CTL0_SG
);
}
val
=
dma_readl
(
pd
,
CTL2
);
val
|=
1
<<
(
DMA_CTL2_START_SHIFT_BITS
+
pd_chan
->
chan
.
chan_id
);
dma_writel
(
pd
,
CTL2
,
val
);
}
static
void
pdc_chain_complete
(
struct
pch_dma_chan
*
pd_chan
,
...
...
@@ -403,7 +427,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
{
struct
pch_dma_desc
*
desc
,
*
_d
;
struct
pch_dma_desc
*
ret
=
NULL
;
int
i
;
int
i
=
0
;
spin_lock
(
&
pd_chan
->
lock
);
list_for_each_entry_safe
(
desc
,
_d
,
&
pd_chan
->
free_list
,
desc_node
)
{
...
...
@@ -478,7 +502,6 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
spin_unlock_bh
(
&
pd_chan
->
lock
);
pdc_enable_irq
(
chan
,
1
);
pdc_set_dir
(
chan
);
return
pd_chan
->
descs_allocated
;
}
...
...
@@ -561,6 +584,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
else
return
NULL
;
pd_chan
->
dir
=
direction
;
pdc_set_dir
(
chan
);
for_each_sg
(
sgl
,
sg
,
sg_len
,
i
)
{
desc
=
pdc_desc_get
(
pd_chan
);
...
...
@@ -703,6 +729,7 @@ static void pch_dma_save_regs(struct pch_dma *pd)
pd
->
regs
.
dma_ctl0
=
dma_readl
(
pd
,
CTL0
);
pd
->
regs
.
dma_ctl1
=
dma_readl
(
pd
,
CTL1
);
pd
->
regs
.
dma_ctl2
=
dma_readl
(
pd
,
CTL2
);
pd
->
regs
.
dma_ctl3
=
dma_readl
(
pd
,
CTL3
);
list_for_each_entry_safe
(
chan
,
_c
,
&
pd
->
dma
.
channels
,
device_node
)
{
pd_chan
=
to_pd_chan
(
chan
);
...
...
@@ -725,6 +752,7 @@ static void pch_dma_restore_regs(struct pch_dma *pd)
dma_writel
(
pd
,
CTL0
,
pd
->
regs
.
dma_ctl0
);
dma_writel
(
pd
,
CTL1
,
pd
->
regs
.
dma_ctl1
);
dma_writel
(
pd
,
CTL2
,
pd
->
regs
.
dma_ctl2
);
dma_writel
(
pd
,
CTL3
,
pd
->
regs
.
dma_ctl3
);
list_for_each_entry_safe
(
chan
,
_c
,
&
pd
->
dma
.
channels
,
device_node
)
{
pd_chan
=
to_pd_chan
(
chan
);
...
...
@@ -850,8 +878,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
pd_chan
->
membase
=
&
regs
->
desc
[
i
];
pd_chan
->
dir
=
(
i
%
2
)
?
DMA_FROM_DEVICE
:
DMA_TO_DEVICE
;
spin_lock_init
(
&
pd_chan
->
lock
);
INIT_LIST_HEAD
(
&
pd_chan
->
active_list
);
...
...
@@ -929,13 +955,23 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
#define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
#define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B
#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
static
const
struct
pci_device_id
pch_dma_id_table
[]
=
{
DEFINE_PCI_DEVICE_TABLE
(
pch_dma_id_table
)
=
{
{
PCI_VDEVICE
(
INTEL
,
PCI_DEVICE_ID_EG20T_PCH_DMA_8CH
),
8
},
{
PCI_VDEVICE
(
INTEL
,
PCI_DEVICE_ID_EG20T_PCH_DMA_4CH
),
4
},
{
PCI_VDEVICE
(
ROHM
,
PCI_DEVICE_ID_ML7213_DMA1_8CH
),
8
},
/* UART Video */
{
PCI_VDEVICE
(
ROHM
,
PCI_DEVICE_ID_ML7213_DMA2_8CH
),
8
},
/* PCMIF SPI */
{
PCI_VDEVICE
(
ROHM
,
PCI_DEVICE_ID_ML7213_DMA3_4CH
),
4
},
/* FPGA */
{
PCI_VDEVICE
(
ROHM
,
PCI_DEVICE_ID_ML7213_DMA4_12CH
),
12
},
/* I2S */
{
PCI_VDEVICE
(
ROHM
,
PCI_DEVICE_ID_ML7223_DMA1_4CH
),
4
},
/* UART */
{
PCI_VDEVICE
(
ROHM
,
PCI_DEVICE_ID_ML7223_DMA2_4CH
),
4
},
/* Video SPI */
{
PCI_VDEVICE
(
ROHM
,
PCI_DEVICE_ID_ML7223_DMA3_4CH
),
4
},
/* Security */
{
PCI_VDEVICE
(
ROHM
,
PCI_DEVICE_ID_ML7223_DMA4_4CH
),
4
},
/* FPGA */
{
0
,
},
};
...
...
drivers/dma/ppc4xx/adma.c
浏览文件 @
f2889fee
...
...
@@ -2313,7 +2313,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
if
(
unlikely
(
!
len
))
return
NULL
;
BUG_ON
(
unlikely
(
len
>
PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT
)
);
BUG_ON
(
len
>
PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT
);
spin_lock_bh
(
&
ppc440spe_chan
->
lock
);
...
...
@@ -2354,7 +2354,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset(
if
(
unlikely
(
!
len
))
return
NULL
;
BUG_ON
(
unlikely
(
len
>
PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT
)
);
BUG_ON
(
len
>
PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT
);
spin_lock_bh
(
&
ppc440spe_chan
->
lock
);
...
...
@@ -2397,7 +2397,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
dma_dest
,
dma_src
,
src_cnt
));
if
(
unlikely
(
!
len
))
return
NULL
;
BUG_ON
(
unlikely
(
len
>
PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT
)
);
BUG_ON
(
len
>
PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT
);
dev_dbg
(
ppc440spe_chan
->
device
->
common
.
dev
,
"ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d
\n
"
,
...
...
@@ -2887,7 +2887,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
ADMA_LL_DBG
(
prep_dma_pq_dbg
(
ppc440spe_chan
->
device
->
id
,
dst
,
src
,
src_cnt
));
BUG_ON
(
!
len
);
BUG_ON
(
unlikely
(
len
>
PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT
)
);
BUG_ON
(
len
>
PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT
);
BUG_ON
(
!
src_cnt
);
if
(
src_cnt
==
1
&&
dst
[
1
]
==
src
[
0
])
{
...
...
drivers/dma/ste_dma40.c
浏览文件 @
f2889fee
...
...
@@ -1829,7 +1829,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
{
struct
stedma40_platform_data
*
plat
=
chan
->
base
->
plat_data
;
struct
stedma40_chan_cfg
*
cfg
=
&
chan
->
dma_cfg
;
dma_addr_t
addr
;
dma_addr_t
addr
=
0
;
if
(
chan
->
runtime_addr
)
return
chan
->
runtime_addr
;
...
...
@@ -2962,4 +2962,4 @@ static int __init stedma40_init(void)
{
return
platform_driver_probe
(
&
d40_driver
,
d40_probe
);
}
arch
_initcall
(
stedma40_init
);
subsys
_initcall
(
stedma40_init
);
include/linux/dw_dmac.h
浏览文件 @
f2889fee
...
...
@@ -3,6 +3,7 @@
* AVR32 systems.)
*
* Copyright (C) 2007 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录