Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
80cc07af
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
163
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
80cc07af
编写于
2月 14, 2011
作者:
D
Dan Williams
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'dma40' into dmaengine
上级
e19d1d49
0c842b55
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
761 addition
and
947 deletion
+761
-947
arch/arm/plat-nomadik/include/plat/ste_dma40.h
arch/arm/plat-nomadik/include/plat/ste_dma40.h
+3
-19
drivers/dma/ste_dma40.c
drivers/dma/ste_dma40.c
+650
-752
drivers/dma/ste_dma40_ll.c
drivers/dma/ste_dma40_ll.c
+79
-139
drivers/dma/ste_dma40_ll.h
drivers/dma/ste_dma40_ll.h
+29
-37
未找到文件。
arch/arm/plat-nomadik/include/plat/ste_dma40.h
浏览文件 @
80cc07af
...
...
@@ -104,6 +104,8 @@ struct stedma40_half_channel_info {
*
* @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
* @high_priority: true if high-priority
* @realtime: true if realtime mode is to be enabled. Only available on DMA40
* version 3+, i.e DB8500v2+
* @mode: channel mode: physical, logical, or operation
* @mode_opt: options for the chosen channel mode
* @src_dev_type: Src device type
...
...
@@ -119,6 +121,7 @@ struct stedma40_half_channel_info {
struct
stedma40_chan_cfg
{
enum
stedma40_xfer_dir
dir
;
bool
high_priority
;
bool
realtime
;
enum
stedma40_mode
mode
;
enum
stedma40_mode_opt
mode_opt
;
int
src_dev_type
;
...
...
@@ -168,25 +171,6 @@ struct stedma40_platform_data {
bool
stedma40_filter
(
struct
dma_chan
*
chan
,
void
*
data
);
/**
* stedma40_memcpy_sg() - extension of the dma framework, memcpy to/from
* scattergatter lists.
*
* @chan: dmaengine handle
* @sgl_dst: Destination scatter list
* @sgl_src: Source scatter list
* @sgl_len: The length of each scatterlist. Both lists must be of equal length
* and each element must match the corresponding element in the other scatter
* list.
* @flags: is actually enum dma_ctrl_flags. See dmaengine.h
*/
struct
dma_async_tx_descriptor
*
stedma40_memcpy_sg
(
struct
dma_chan
*
chan
,
struct
scatterlist
*
sgl_dst
,
struct
scatterlist
*
sgl_src
,
unsigned
int
sgl_len
,
unsigned
long
flags
);
/**
* stedma40_slave_mem() - Transfers a raw data buffer to or from a slave
* (=device)
...
...
drivers/dma/ste_dma40.c
浏览文件 @
80cc07af
...
...
@@ -68,6 +68,7 @@ enum d40_command {
* @base: Pointer to memory area when the pre_alloc_lli's are not large
* enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
* pre_alloc_lli is used.
* @dma_addr: DMA address, if mapped
* @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
* @pre_alloc_lli: Pre allocated area for the most common case of transfers,
* one buffer to one buffer.
...
...
@@ -75,6 +76,7 @@ enum d40_command {
struct
d40_lli_pool
{
void
*
base
;
int
size
;
dma_addr_t
dma_addr
;
/* Space for dst and src, plus an extra for padding */
u8
pre_alloc_lli
[
3
*
sizeof
(
struct
d40_phy_lli
)];
};
...
...
@@ -94,7 +96,6 @@ struct d40_lli_pool {
* during a transfer.
* @node: List entry.
* @is_in_client_list: true if the client owns this descriptor.
* @is_hw_linked: true if this job will automatically be continued for
* the previous one.
*
* This descriptor is used for both logical and physical transfers.
...
...
@@ -114,7 +115,7 @@ struct d40_desc {
struct
list_head
node
;
bool
is_in_client_list
;
bool
is_hw_linked
;
bool
cyclic
;
};
/**
...
...
@@ -130,6 +131,7 @@ struct d40_desc {
*/
struct
d40_lcla_pool
{
void
*
base
;
dma_addr_t
dma_addr
;
void
*
base_unaligned
;
int
pages
;
spinlock_t
lock
;
...
...
@@ -303,9 +305,37 @@ struct d40_reg_val {
unsigned
int
val
;
};
static
int
d40_pool_lli_alloc
(
struct
d40_desc
*
d40d
,
int
lli_len
,
bool
is_log
)
static
struct
device
*
chan2dev
(
struct
d40_chan
*
d40c
)
{
return
&
d40c
->
chan
.
dev
->
device
;
}
static
bool
chan_is_physical
(
struct
d40_chan
*
chan
)
{
return
chan
->
log_num
==
D40_PHY_CHAN
;
}
static
bool
chan_is_logical
(
struct
d40_chan
*
chan
)
{
return
!
chan_is_physical
(
chan
);
}
static
void
__iomem
*
chan_base
(
struct
d40_chan
*
chan
)
{
return
chan
->
base
->
virtbase
+
D40_DREG_PCBASE
+
chan
->
phy_chan
->
num
*
D40_DREG_PCDELTA
;
}
#define d40_err(dev, format, arg...) \
dev_err(dev, "[%s] " format, __func__, ## arg)
#define chan_err(d40c, format, arg...) \
d40_err(chan2dev(d40c), format, ## arg)
static
int
d40_pool_lli_alloc
(
struct
d40_chan
*
d40c
,
struct
d40_desc
*
d40d
,
int
lli_len
)
{
bool
is_log
=
chan_is_logical
(
d40c
);
u32
align
;
void
*
base
;
...
...
@@ -319,7 +349,7 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
d40d
->
lli_pool
.
size
=
sizeof
(
d40d
->
lli_pool
.
pre_alloc_lli
);
d40d
->
lli_pool
.
base
=
NULL
;
}
else
{
d40d
->
lli_pool
.
size
=
ALIGN
(
lli_len
*
2
*
align
,
align
)
;
d40d
->
lli_pool
.
size
=
lli_len
*
2
*
align
;
base
=
kmalloc
(
d40d
->
lli_pool
.
size
+
align
,
GFP_NOWAIT
);
d40d
->
lli_pool
.
base
=
base
;
...
...
@@ -329,22 +359,37 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
}
if
(
is_log
)
{
d40d
->
lli_log
.
src
=
PTR_ALIGN
(
(
struct
d40_log_lli
*
)
base
,
align
)
;
d40d
->
lli_log
.
dst
=
PTR_ALIGN
(
d40d
->
lli_log
.
src
+
lli_len
,
align
)
;
d40d
->
lli_log
.
src
=
PTR_ALIGN
(
base
,
align
);
d40d
->
lli_log
.
dst
=
d40d
->
lli_log
.
src
+
lli_len
;
d40d
->
lli_pool
.
dma_addr
=
0
;
}
else
{
d40d
->
lli_phy
.
src
=
PTR_ALIGN
((
struct
d40_phy_lli
*
)
base
,
align
);
d40d
->
lli_phy
.
dst
=
PTR_ALIGN
(
d40d
->
lli_phy
.
src
+
lli_len
,
align
);
d40d
->
lli_phy
.
src
=
PTR_ALIGN
(
base
,
align
);
d40d
->
lli_phy
.
dst
=
d40d
->
lli_phy
.
src
+
lli_len
;
d40d
->
lli_pool
.
dma_addr
=
dma_map_single
(
d40c
->
base
->
dev
,
d40d
->
lli_phy
.
src
,
d40d
->
lli_pool
.
size
,
DMA_TO_DEVICE
);
if
(
dma_mapping_error
(
d40c
->
base
->
dev
,
d40d
->
lli_pool
.
dma_addr
))
{
kfree
(
d40d
->
lli_pool
.
base
);
d40d
->
lli_pool
.
base
=
NULL
;
d40d
->
lli_pool
.
dma_addr
=
0
;
return
-
ENOMEM
;
}
}
return
0
;
}
static
void
d40_pool_lli_free
(
struct
d40_desc
*
d40d
)
static
void
d40_pool_lli_free
(
struct
d40_
chan
*
d40c
,
struct
d40_
desc
*
d40d
)
{
if
(
d40d
->
lli_pool
.
dma_addr
)
dma_unmap_single
(
d40c
->
base
->
dev
,
d40d
->
lli_pool
.
dma_addr
,
d40d
->
lli_pool
.
size
,
DMA_TO_DEVICE
);
kfree
(
d40d
->
lli_pool
.
base
);
d40d
->
lli_pool
.
base
=
NULL
;
d40d
->
lli_pool
.
size
=
0
;
...
...
@@ -391,7 +436,7 @@ static int d40_lcla_free_all(struct d40_chan *d40c,
int
i
;
int
ret
=
-
EINVAL
;
if
(
d40c
->
log_num
==
D40_PHY_CHAN
)
if
(
chan_is_physical
(
d40c
)
)
return
0
;
spin_lock_irqsave
(
&
d40c
->
base
->
lcla_pool
.
lock
,
flags
);
...
...
@@ -430,7 +475,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
list_for_each_entry_safe
(
d
,
_d
,
&
d40c
->
client
,
node
)
if
(
async_tx_test_ack
(
&
d
->
txd
))
{
d40_pool_lli_free
(
d
);
d40_pool_lli_free
(
d
40c
,
d
);
d40_desc_remove
(
d
);
desc
=
d
;
memset
(
desc
,
0
,
sizeof
(
*
desc
));
...
...
@@ -450,6 +495,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
static
void
d40_desc_free
(
struct
d40_chan
*
d40c
,
struct
d40_desc
*
d40d
)
{
d40_pool_lli_free
(
d40c
,
d40d
);
d40_lcla_free_all
(
d40c
,
d40d
);
kmem_cache_free
(
d40c
->
base
->
desc_slab
,
d40d
);
}
...
...
@@ -459,57 +505,128 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
list_add_tail
(
&
desc
->
node
,
&
d40c
->
active
);
}
static
void
d40_
desc_load
(
struct
d40_chan
*
d40c
,
struct
d40_desc
*
d40d
)
static
void
d40_
phy_lli_load
(
struct
d40_chan
*
chan
,
struct
d40_desc
*
desc
)
{
int
curr_lcla
=
-
EINVAL
,
next_lcla
;
struct
d40_phy_lli
*
lli_dst
=
desc
->
lli_phy
.
dst
;
struct
d40_phy_lli
*
lli_src
=
desc
->
lli_phy
.
src
;
void
__iomem
*
base
=
chan_base
(
chan
);
writel
(
lli_src
->
reg_cfg
,
base
+
D40_CHAN_REG_SSCFG
);
writel
(
lli_src
->
reg_elt
,
base
+
D40_CHAN_REG_SSELT
);
writel
(
lli_src
->
reg_ptr
,
base
+
D40_CHAN_REG_SSPTR
);
writel
(
lli_src
->
reg_lnk
,
base
+
D40_CHAN_REG_SSLNK
);
writel
(
lli_dst
->
reg_cfg
,
base
+
D40_CHAN_REG_SDCFG
);
writel
(
lli_dst
->
reg_elt
,
base
+
D40_CHAN_REG_SDELT
);
writel
(
lli_dst
->
reg_ptr
,
base
+
D40_CHAN_REG_SDPTR
);
writel
(
lli_dst
->
reg_lnk
,
base
+
D40_CHAN_REG_SDLNK
);
}
if
(
d40c
->
log_num
==
D40_PHY_CHAN
)
{
d40_phy_lli_write
(
d40c
->
base
->
virtbase
,
d40c
->
phy_chan
->
num
,
d40d
->
lli_phy
.
dst
,
d40d
->
lli_phy
.
src
);
d40d
->
lli_current
=
d40d
->
lli_len
;
}
else
{
static
void
d40_log_lli_to_lcxa
(
struct
d40_chan
*
chan
,
struct
d40_desc
*
desc
)
{
struct
d40_lcla_pool
*
pool
=
&
chan
->
base
->
lcla_pool
;
struct
d40_log_lli_bidir
*
lli
=
&
desc
->
lli_log
;
int
lli_current
=
desc
->
lli_current
;
int
lli_len
=
desc
->
lli_len
;
bool
cyclic
=
desc
->
cyclic
;
int
curr_lcla
=
-
EINVAL
;
int
first_lcla
=
0
;
bool
linkback
;
if
((
d40d
->
lli_len
-
d40d
->
lli_current
)
>
1
)
curr_lcla
=
d40_lcla_alloc_one
(
d40c
,
d40d
);
/*
* We may have partially running cyclic transfers, in case we did't get
* enough LCLA entries.
*/
linkback
=
cyclic
&&
lli_current
==
0
;
d40_log_lli_lcpa_write
(
d40c
->
lcpa
,
&
d40d
->
lli_log
.
dst
[
d40d
->
lli_current
],
&
d40d
->
lli_log
.
src
[
d40d
->
lli_current
],
curr_lcla
);
/*
* For linkback, we need one LCLA even with only one link, because we
* can't link back to the one in LCPA space
*/
if
(
linkback
||
(
lli_len
-
lli_current
>
1
))
{
curr_lcla
=
d40_lcla_alloc_one
(
chan
,
desc
);
first_lcla
=
curr_lcla
;
}
d40d
->
lli_current
++
;
for
(;
d40d
->
lli_current
<
d40d
->
lli_len
;
d40d
->
lli_current
++
)
{
struct
d40_log_lli
*
lcla
;
/*
* For linkback, we normally load the LCPA in the loop since we need to
* link it to the second LCLA and not the first. However, if we
* couldn't even get a first LCLA, then we have to run in LCPA and
* reload manually.
*/
if
(
!
linkback
||
curr_lcla
==
-
EINVAL
)
{
unsigned
int
flags
=
0
;
if
(
d40d
->
lli_current
+
1
<
d40d
->
lli_len
)
next_lcla
=
d40_lcla_alloc_one
(
d40c
,
d40d
);
else
next_lcla
=
-
EINVAL
;
if
(
curr_lcla
==
-
EINVAL
)
flags
|=
LLI_TERM_INT
;
lcla
=
d40c
->
base
->
lcla_pool
.
base
+
d40c
->
phy_chan
->
num
*
1024
+
8
*
curr_lcla
*
2
;
d40_log_lli_lcpa_write
(
chan
->
lcpa
,
&
lli
->
dst
[
lli_current
],
&
lli
->
src
[
lli_current
],
curr_lcla
,
flags
);
lli_current
++
;
}
d40_log_lli_lcla_write
(
lcla
,
&
d40d
->
lli_log
.
dst
[
d40d
->
lli_current
],
&
d40d
->
lli_log
.
src
[
d40d
->
lli_current
],
next_lcla
);
if
(
curr_lcla
<
0
)
goto
out
;
(
void
)
dma_map_single
(
d40c
->
base
->
dev
,
lcla
,
2
*
sizeof
(
struct
d40_log_lli
),
DMA_TO_DEVICE
);
for
(;
lli_current
<
lli_len
;
lli_current
++
)
{
unsigned
int
lcla_offset
=
chan
->
phy_chan
->
num
*
1024
+
8
*
curr_lcla
*
2
;
struct
d40_log_lli
*
lcla
=
pool
->
base
+
lcla_offset
;
unsigned
int
flags
=
0
;
int
next_lcla
;
curr_lcla
=
next_lcla
;
if
(
lli_current
+
1
<
lli_len
)
next_lcla
=
d40_lcla_alloc_one
(
chan
,
desc
);
else
next_lcla
=
linkback
?
first_lcla
:
-
EINVAL
;
if
(
curr_lcla
==
-
EINVAL
)
{
d40d
->
lli_current
++
;
break
;
}
if
(
cyclic
||
next_lcla
==
-
EINVAL
)
flags
|=
LLI_TERM_INT
;
if
(
linkback
&&
curr_lcla
==
first_lcla
)
{
/* First link goes in both LCPA and LCLA */
d40_log_lli_lcpa_write
(
chan
->
lcpa
,
&
lli
->
dst
[
lli_current
],
&
lli
->
src
[
lli_current
],
next_lcla
,
flags
);
}
/*
* One unused LCLA in the cyclic case if the very first
* next_lcla fails...
*/
d40_log_lli_lcla_write
(
lcla
,
&
lli
->
dst
[
lli_current
],
&
lli
->
src
[
lli_current
],
next_lcla
,
flags
);
dma_sync_single_range_for_device
(
chan
->
base
->
dev
,
pool
->
dma_addr
,
lcla_offset
,
2
*
sizeof
(
struct
d40_log_lli
),
DMA_TO_DEVICE
);
curr_lcla
=
next_lcla
;
if
(
curr_lcla
==
-
EINVAL
||
curr_lcla
==
first_lcla
)
{
lli_current
++
;
break
;
}
}
out:
desc
->
lli_current
=
lli_current
;
}
static
void
d40_desc_load
(
struct
d40_chan
*
d40c
,
struct
d40_desc
*
d40d
)
{
if
(
chan_is_physical
(
d40c
))
{
d40_phy_lli_load
(
d40c
,
d40d
);
d40d
->
lli_current
=
d40d
->
lli_len
;
}
else
d40_log_lli_to_lcxa
(
d40c
,
d40d
);
}
static
struct
d40_desc
*
d40_first_active_get
(
struct
d40_chan
*
d40c
)
...
...
@@ -543,18 +660,6 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
return
d
;
}
static
struct
d40_desc
*
d40_last_queued
(
struct
d40_chan
*
d40c
)
{
struct
d40_desc
*
d
;
if
(
list_empty
(
&
d40c
->
queue
))
return
NULL
;
list_for_each_entry
(
d
,
&
d40c
->
queue
,
node
)
if
(
list_is_last
(
&
d
->
node
,
&
d40c
->
queue
))
break
;
return
d
;
}
static
int
d40_psize_2_burst_size
(
bool
is_log
,
int
psize
)
{
if
(
is_log
)
{
...
...
@@ -666,9 +771,9 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
}
if
(
i
==
D40_SUSPEND_MAX_IT
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"
[%s]:
unable to suspend the chl %d (log: %d) status %x
\n
"
,
__func__
,
d40c
->
phy_chan
->
num
,
d40c
->
log_num
,
chan_err
(
d40c
,
"unable to suspend the chl %d (log: %d) status %x
\n
"
,
d40c
->
phy_chan
->
num
,
d40c
->
log_num
,
status
);
dump_stack
();
ret
=
-
EBUSY
;
...
...
@@ -701,17 +806,45 @@ static void d40_term_all(struct d40_chan *d40c)
d40c
->
busy
=
false
;
}
static
void
__d40_config_set_event
(
struct
d40_chan
*
d40c
,
bool
enable
,
u32
event
,
int
reg
)
{
void
__iomem
*
addr
=
chan_base
(
d40c
)
+
reg
;
int
tries
;
if
(
!
enable
)
{
writel
((
D40_DEACTIVATE_EVENTLINE
<<
D40_EVENTLINE_POS
(
event
))
|
~
D40_EVENTLINE_MASK
(
event
),
addr
);
return
;
}
/*
* The hardware sometimes doesn't register the enable when src and dst
* event lines are active on the same logical channel. Retry to ensure
* it does. Usually only one retry is sufficient.
*/
tries
=
100
;
while
(
--
tries
)
{
writel
((
D40_ACTIVATE_EVENTLINE
<<
D40_EVENTLINE_POS
(
event
))
|
~
D40_EVENTLINE_MASK
(
event
),
addr
);
if
(
readl
(
addr
)
&
D40_EVENTLINE_MASK
(
event
))
break
;
}
if
(
tries
!=
99
)
dev_dbg
(
chan2dev
(
d40c
),
"[%s] workaround enable S%cLNK (%d tries)
\n
"
,
__func__
,
reg
==
D40_CHAN_REG_SSLNK
?
'S'
:
'D'
,
100
-
tries
);
WARN_ON
(
!
tries
);
}
static
void
d40_config_set_event
(
struct
d40_chan
*
d40c
,
bool
do_enable
)
{
u32
val
;
unsigned
long
flags
;
/* Notice, that disable requires the physical channel to be stopped */
if
(
do_enable
)
val
=
D40_ACTIVATE_EVENTLINE
;
else
val
=
D40_DEACTIVATE_EVENTLINE
;
spin_lock_irqsave
(
&
d40c
->
phy_chan
->
lock
,
flags
);
/* Enable event line connected to device (or memcpy) */
...
...
@@ -719,20 +852,15 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
(
d40c
->
dma_cfg
.
dir
==
STEDMA40_PERIPH_TO_PERIPH
))
{
u32
event
=
D40_TYPE_TO_EVENT
(
d40c
->
dma_cfg
.
src_dev_type
);
writel
((
val
<<
D40_EVENTLINE_POS
(
event
))
|
~
D40_EVENTLINE_MASK
(
event
),
d40c
->
base
->
virtbase
+
D40_DREG_PCBASE
+
d40c
->
phy_chan
->
num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SSLNK
);
__d40_config_set_event
(
d40c
,
do_enable
,
event
,
D40_CHAN_REG_SSLNK
);
}
if
(
d40c
->
dma_cfg
.
dir
!=
STEDMA40_PERIPH_TO_MEM
)
{
u32
event
=
D40_TYPE_TO_EVENT
(
d40c
->
dma_cfg
.
dst_dev_type
);
writel
((
val
<<
D40_EVENTLINE_POS
(
event
))
|
~
D40_EVENTLINE_MASK
(
event
),
d40c
->
base
->
virtbase
+
D40_DREG_PCBASE
+
d40c
->
phy_chan
->
num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SDLNK
);
__d40_config_set_event
(
d40c
,
do_enable
,
event
,
D40_CHAN_REG_SDLNK
);
}
spin_unlock_irqrestore
(
&
d40c
->
phy_chan
->
lock
,
flags
);
...
...
@@ -740,15 +868,12 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
static
u32
d40_chan_has_events
(
struct
d40_chan
*
d40c
)
{
void
__iomem
*
chanbase
=
chan_base
(
d40c
);
u32
val
;
val
=
readl
(
d40c
->
base
->
virtbase
+
D40_DREG_PCBASE
+
d40c
->
phy_chan
->
num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SSLNK
);
val
=
readl
(
chanbase
+
D40_CHAN_REG_SSLNK
);
val
|=
readl
(
chanbase
+
D40_CHAN_REG_SDLNK
);
val
|=
readl
(
d40c
->
base
->
virtbase
+
D40_DREG_PCBASE
+
d40c
->
phy_chan
->
num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SDLNK
);
return
val
;
}
...
...
@@ -771,7 +896,7 @@ static u32 d40_get_prmo(struct d40_chan *d40c)
=
D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG
,
};
if
(
d40c
->
log_num
==
D40_PHY_CHAN
)
if
(
chan_is_physical
(
d40c
)
)
return
phy_map
[
d40c
->
dma_cfg
.
mode_opt
];
else
return
log_map
[
d40c
->
dma_cfg
.
mode_opt
];
...
...
@@ -785,7 +910,7 @@ static void d40_config_write(struct d40_chan *d40c)
/* Odd addresses are even addresses + 4 */
addr_base
=
(
d40c
->
phy_chan
->
num
%
2
)
*
4
;
/* Setup channel mode to logical or physical */
var
=
((
u32
)(
d40c
->
log_num
!=
D40_PHY_CHAN
)
+
1
)
<<
var
=
((
u32
)(
chan_is_logical
(
d40c
)
)
+
1
)
<<
D40_CHAN_POS
(
d40c
->
phy_chan
->
num
);
writel
(
var
,
d40c
->
base
->
virtbase
+
D40_DREG_PRMSE
+
addr_base
);
...
...
@@ -794,30 +919,18 @@ static void d40_config_write(struct d40_chan *d40c)
writel
(
var
,
d40c
->
base
->
virtbase
+
D40_DREG_PRMOE
+
addr_base
);
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
{
if
(
chan_is_logical
(
d40c
))
{
int
lidx
=
(
d40c
->
phy_chan
->
num
<<
D40_SREG_ELEM_LOG_LIDX_POS
)
&
D40_SREG_ELEM_LOG_LIDX_MASK
;
void
__iomem
*
chanbase
=
chan_base
(
d40c
);
/* Set default config for CFG reg */
writel
(
d40c
->
src_def_cfg
,
d40c
->
base
->
virtbase
+
D40_DREG_PCBASE
+
d40c
->
phy_chan
->
num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SSCFG
);
writel
(
d40c
->
dst_def_cfg
,
d40c
->
base
->
virtbase
+
D40_DREG_PCBASE
+
d40c
->
phy_chan
->
num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SDCFG
);
writel
(
d40c
->
src_def_cfg
,
chanbase
+
D40_CHAN_REG_SSCFG
);
writel
(
d40c
->
dst_def_cfg
,
chanbase
+
D40_CHAN_REG_SDCFG
);
/* Set LIDX for lcla */
writel
((
d40c
->
phy_chan
->
num
<<
D40_SREG_ELEM_LOG_LIDX_POS
)
&
D40_SREG_ELEM_LOG_LIDX_MASK
,
d40c
->
base
->
virtbase
+
D40_DREG_PCBASE
+
d40c
->
phy_chan
->
num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SDELT
);
writel
((
d40c
->
phy_chan
->
num
<<
D40_SREG_ELEM_LOG_LIDX_POS
)
&
D40_SREG_ELEM_LOG_LIDX_MASK
,
d40c
->
base
->
virtbase
+
D40_DREG_PCBASE
+
d40c
->
phy_chan
->
num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SSELT
);
writel
(
lidx
,
chanbase
+
D40_CHAN_REG_SSELT
);
writel
(
lidx
,
chanbase
+
D40_CHAN_REG_SDELT
);
}
}
...
...
@@ -825,15 +938,15 @@ static u32 d40_residue(struct d40_chan *d40c)
{
u32
num_elt
;
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
if
(
chan_is_logical
(
d40c
)
)
num_elt
=
(
readl
(
&
d40c
->
lcpa
->
lcsp2
)
&
D40_MEM_LCSP2_ECNT_MASK
)
>>
D40_MEM_LCSP2_ECNT_POS
;
else
num_elt
=
(
readl
(
d40c
->
base
->
virtbase
+
D40_DREG_PCBASE
+
d40c
->
phy_chan
->
num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SDELT
)
&
D40_SREG_ELEM_PHY_ECNT_MASK
)
>>
D40_SREG_ELEM_PHY_ECNT_POS
;
else
{
u32
val
=
readl
(
chan_base
(
d40c
)
+
D40_CHAN_REG_SDELT
);
num_elt
=
(
val
&
D40_SREG_ELEM_PHY_ECNT_MASK
)
>>
D40_SREG_ELEM_PHY_ECNT_POS
;
}
return
num_elt
*
(
1
<<
d40c
->
dma_cfg
.
dst_info
.
data_width
);
}
...
...
@@ -841,20 +954,17 @@ static bool d40_tx_is_linked(struct d40_chan *d40c)
{
bool
is_link
;
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
if
(
chan_is_logical
(
d40c
)
)
is_link
=
readl
(
&
d40c
->
lcpa
->
lcsp3
)
&
D40_MEM_LCSP3_DLOS_MASK
;
else
is_link
=
readl
(
d40c
->
base
->
virtbase
+
D40_DREG_PCBASE
+
d40c
->
phy_chan
->
num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SDLNK
)
&
D40_SREG_LNK_PHYS_LNK_MASK
;
is_link
=
readl
(
chan_base
(
d40c
)
+
D40_CHAN_REG_SDLNK
)
&
D40_SREG_LNK_PHYS_LNK_MASK
;
return
is_link
;
}
static
int
d40_pause
(
struct
d
ma_chan
*
chan
)
static
int
d40_pause
(
struct
d
40_chan
*
d40c
)
{
struct
d40_chan
*
d40c
=
container_of
(
chan
,
struct
d40_chan
,
chan
);
int
res
=
0
;
unsigned
long
flags
;
...
...
@@ -865,7 +975,7 @@ static int d40_pause(struct dma_chan *chan)
res
=
d40_channel_execute_command
(
d40c
,
D40_DMA_SUSPEND_REQ
);
if
(
res
==
0
)
{
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
{
if
(
chan_is_logical
(
d40c
)
)
{
d40_config_set_event
(
d40c
,
false
);
/* Resume the other logical channels if any */
if
(
d40_chan_has_events
(
d40c
))
...
...
@@ -878,10 +988,8 @@ static int d40_pause(struct dma_chan *chan)
return
res
;
}
static
int
d40_resume
(
struct
d
ma_chan
*
chan
)
static
int
d40_resume
(
struct
d
40_chan
*
d40c
)
{
struct
d40_chan
*
d40c
=
container_of
(
chan
,
struct
d40_chan
,
chan
);
int
res
=
0
;
unsigned
long
flags
;
...
...
@@ -891,7 +999,7 @@ static int d40_resume(struct dma_chan *chan)
spin_lock_irqsave
(
&
d40c
->
lock
,
flags
);
if
(
d40c
->
base
->
rev
==
0
)
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
{
if
(
chan_is_logical
(
d40c
)
)
{
res
=
d40_channel_execute_command
(
d40c
,
D40_DMA_SUSPEND_REQ
);
goto
no_suspend
;
...
...
@@ -900,7 +1008,7 @@ static int d40_resume(struct dma_chan *chan)
/* If bytes left to transfer or linked tx resume job */
if
(
d40_residue
(
d40c
)
||
d40_tx_is_linked
(
d40c
))
{
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
if
(
chan_is_logical
(
d40c
)
)
d40_config_set_event
(
d40c
,
true
);
res
=
d40_channel_execute_command
(
d40c
,
D40_DMA_RUN
);
...
...
@@ -911,75 +1019,20 @@ static int d40_resume(struct dma_chan *chan)
return
res
;
}
static
void
d40_tx_submit_log
(
struct
d40_chan
*
d40c
,
struct
d40_desc
*
d40d
)
static
int
d40_terminate_all
(
struct
d40_chan
*
chan
)
{
/* TODO: Write */
}
static
void
d40_tx_submit_phy
(
struct
d40_chan
*
d40c
,
struct
d40_desc
*
d40d
)
{
struct
d40_desc
*
d40d_prev
=
NULL
;
int
i
;
u32
val
;
if
(
!
list_empty
(
&
d40c
->
queue
))
d40d_prev
=
d40_last_queued
(
d40c
);
else
if
(
!
list_empty
(
&
d40c
->
active
))
d40d_prev
=
d40_first_active_get
(
d40c
);
if
(
!
d40d_prev
)
return
;
/* Here we try to join this job with previous jobs */
val
=
readl
(
d40c
->
base
->
virtbase
+
D40_DREG_PCBASE
+
d40c
->
phy_chan
->
num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SSLNK
);
/* Figure out which link we're currently transmitting */
for
(
i
=
0
;
i
<
d40d_prev
->
lli_len
;
i
++
)
if
(
val
==
d40d_prev
->
lli_phy
.
src
[
i
].
reg_lnk
)
break
;
val
=
readl
(
d40c
->
base
->
virtbase
+
D40_DREG_PCBASE
+
d40c
->
phy_chan
->
num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SSELT
)
>>
D40_SREG_ELEM_LOG_ECNT_POS
;
if
(
i
==
(
d40d_prev
->
lli_len
-
1
)
&&
val
>
0
)
{
/* Change the current one */
writel
(
virt_to_phys
(
d40d
->
lli_phy
.
src
),
d40c
->
base
->
virtbase
+
D40_DREG_PCBASE
+
d40c
->
phy_chan
->
num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SSLNK
);
writel
(
virt_to_phys
(
d40d
->
lli_phy
.
dst
),
d40c
->
base
->
virtbase
+
D40_DREG_PCBASE
+
d40c
->
phy_chan
->
num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SDLNK
);
d40d
->
is_hw_linked
=
true
;
}
else
if
(
i
<
d40d_prev
->
lli_len
)
{
(
void
)
dma_unmap_single
(
d40c
->
base
->
dev
,
virt_to_phys
(
d40d_prev
->
lli_phy
.
src
),
d40d_prev
->
lli_pool
.
size
,
DMA_TO_DEVICE
);
unsigned
long
flags
;
int
ret
=
0
;
/* Keep the settings */
val
=
d40d_prev
->
lli_phy
.
src
[
d40d_prev
->
lli_len
-
1
].
reg_lnk
&
~
D40_SREG_LNK_PHYS_LNK_MASK
;
d40d_prev
->
lli_phy
.
src
[
d40d_prev
->
lli_len
-
1
].
reg_lnk
=
val
|
virt_to_phys
(
d40d
->
lli_phy
.
src
);
ret
=
d40_pause
(
chan
);
if
(
!
ret
&&
chan_is_physical
(
chan
))
ret
=
d40_channel_execute_command
(
chan
,
D40_DMA_STOP
);
val
=
d40d_prev
->
lli_phy
.
dst
[
d40d_prev
->
lli_len
-
1
].
reg_lnk
&
~
D40_SREG_LNK_PHYS_LNK_MASK
;
d40d_prev
->
lli_phy
.
dst
[
d40d_prev
->
lli_len
-
1
].
reg_lnk
=
val
|
virt_to_phys
(
d40d
->
lli_phy
.
dst
);
spin_lock_irqsave
(
&
chan
->
lock
,
flags
);
d40_term_all
(
chan
);
spin_unlock_irqrestore
(
&
chan
->
lock
,
flags
);
(
void
)
dma_map_single
(
d40c
->
base
->
dev
,
d40d_prev
->
lli_phy
.
src
,
d40d_prev
->
lli_pool
.
size
,
DMA_TO_DEVICE
);
d40d
->
is_hw_linked
=
true
;
}
return
ret
;
}
static
dma_cookie_t
d40_tx_submit
(
struct
dma_async_tx_descriptor
*
tx
)
...
...
@@ -990,8 +1043,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
struct
d40_desc
*
d40d
=
container_of
(
tx
,
struct
d40_desc
,
txd
);
unsigned
long
flags
;
(
void
)
d40_pause
(
&
d40c
->
chan
);
spin_lock_irqsave
(
&
d40c
->
lock
,
flags
);
d40c
->
chan
.
cookie
++
;
...
...
@@ -1001,17 +1052,10 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
d40d
->
txd
.
cookie
=
d40c
->
chan
.
cookie
;
if
(
d40c
->
log_num
==
D40_PHY_CHAN
)
d40_tx_submit_phy
(
d40c
,
d40d
);
else
d40_tx_submit_log
(
d40c
,
d40d
);
d40_desc_queue
(
d40c
,
d40d
);
spin_unlock_irqrestore
(
&
d40c
->
lock
,
flags
);
(
void
)
d40_resume
(
&
d40c
->
chan
);
return
tx
->
cookie
;
}
...
...
@@ -1020,7 +1064,7 @@ static int d40_start(struct d40_chan *d40c)
if
(
d40c
->
base
->
rev
==
0
)
{
int
err
;
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
{
if
(
chan_is_logical
(
d40c
)
)
{
err
=
d40_channel_execute_command
(
d40c
,
D40_DMA_SUSPEND_REQ
);
if
(
err
)
...
...
@@ -1028,7 +1072,7 @@ static int d40_start(struct d40_chan *d40c)
}
}
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
if
(
chan_is_logical
(
d40c
)
)
d40_config_set_event
(
d40c
,
true
);
return
d40_channel_execute_command
(
d40c
,
D40_DMA_RUN
);
...
...
@@ -1051,21 +1095,14 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
/* Add to active queue */
d40_desc_submit
(
d40c
,
d40d
);
/*
* If this job is already linked in hw,
* do not submit it.
*/
if
(
!
d40d
->
is_hw_linked
)
{
/* Initiate DMA job */
d40_desc_load
(
d40c
,
d40d
);
/* Initiate DMA job */
d40_desc_load
(
d40c
,
d40d
);
/* Start dma job */
err
=
d40_start
(
d40c
);
/* Start dma job */
err
=
d40_start
(
d40c
);
if
(
err
)
return
NULL
;
}
if
(
err
)
return
NULL
;
}
return
d40d
;
...
...
@@ -1082,17 +1119,36 @@ static void dma_tc_handle(struct d40_chan *d40c)
if
(
d40d
==
NULL
)
return
;
d40_lcla_free_all
(
d40c
,
d40d
);
if
(
d40d
->
cyclic
)
{
/*
* If this was a paritially loaded list, we need to reloaded
* it, and only when the list is completed. We need to check
* for done because the interrupt will hit for every link, and
* not just the last one.
*/
if
(
d40d
->
lli_current
<
d40d
->
lli_len
&&
!
d40_tx_is_linked
(
d40c
)
&&
!
d40_residue
(
d40c
))
{
d40_lcla_free_all
(
d40c
,
d40d
);
d40_desc_load
(
d40c
,
d40d
);
(
void
)
d40_start
(
d40c
);
if
(
d40d
->
lli_current
<
d40d
->
lli_len
)
{
d40_desc_load
(
d40c
,
d40d
);
/* Start dma job */
(
void
)
d40_start
(
d40c
);
return
;
}
if
(
d40d
->
lli_current
==
d40d
->
lli_len
)
d40d
->
lli_current
=
0
;
}
}
else
{
d40_lcla_free_all
(
d40c
,
d40d
);
if
(
d40_queue_start
(
d40c
)
==
NULL
)
d40c
->
busy
=
false
;
if
(
d40d
->
lli_current
<
d40d
->
lli_len
)
{
d40_desc_load
(
d40c
,
d40d
);
/* Start dma job */
(
void
)
d40_start
(
d40c
);
return
;
}
if
(
d40_queue_start
(
d40c
)
==
NULL
)
d40c
->
busy
=
false
;
}
d40c
->
pending_tx
++
;
tasklet_schedule
(
&
d40c
->
tasklet
);
...
...
@@ -1111,11 +1167,11 @@ static void dma_tasklet(unsigned long data)
/* Get first active entry from list */
d40d
=
d40_first_active_get
(
d40c
);
if
(
d40d
==
NULL
)
goto
err
;
d40c
->
completed
=
d40d
->
txd
.
cookie
;
if
(
!
d40d
->
cyclic
)
d40c
->
completed
=
d40d
->
txd
.
cookie
;
/*
* If terminating a channel pending_tx is set to zero.
...
...
@@ -1130,16 +1186,18 @@ static void dma_tasklet(unsigned long data)
callback
=
d40d
->
txd
.
callback
;
callback_param
=
d40d
->
txd
.
callback_param
;
if
(
async_tx_test_ack
(
&
d40d
->
txd
))
{
d40_pool_lli_free
(
d40d
);
d40_desc_remove
(
d40d
);
d40_desc_free
(
d40c
,
d40d
);
}
else
{
if
(
!
d40d
->
is_in_client_list
)
{
if
(
!
d40d
->
cyclic
)
{
if
(
async_tx_test_ack
(
&
d40d
->
txd
))
{
d40_pool_lli_free
(
d40c
,
d40d
);
d40_desc_remove
(
d40d
);
d40_lcla_free_all
(
d40c
,
d40d
);
list_add_tail
(
&
d40d
->
node
,
&
d40c
->
client
);
d40d
->
is_in_client_list
=
true
;
d40_desc_free
(
d40c
,
d40d
);
}
else
{
if
(
!
d40d
->
is_in_client_list
)
{
d40_desc_remove
(
d40d
);
d40_lcla_free_all
(
d40c
,
d40d
);
list_add_tail
(
&
d40d
->
node
,
&
d40c
->
client
);
d40d
->
is_in_client_list
=
true
;
}
}
}
...
...
@@ -1216,9 +1274,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
if
(
!
il
[
row
].
is_error
)
dma_tc_handle
(
d40c
);
else
dev_err
(
base
->
dev
,
"[%s] IRQ chan: %ld offset %d idx %d
\n
"
,
__func__
,
chan
,
il
[
row
].
offset
,
idx
);
d40_err
(
base
->
dev
,
"IRQ chan: %ld offset %d idx %d
\n
"
,
chan
,
il
[
row
].
offset
,
idx
);
spin_unlock
(
&
d40c
->
lock
);
}
...
...
@@ -1237,8 +1294,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
bool
is_log
=
conf
->
mode
==
STEDMA40_MODE_LOGICAL
;
if
(
!
conf
->
dir
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Invalid direction.
\n
"
,
__func__
);
chan_err
(
d40c
,
"Invalid direction.
\n
"
);
res
=
-
EINVAL
;
}
...
...
@@ -1246,46 +1302,40 @@ static int d40_validate_conf(struct d40_chan *d40c,
d40c
->
base
->
plat_data
->
dev_tx
[
conf
->
dst_dev_type
]
==
0
&&
d40c
->
runtime_addr
==
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Invalid TX channel address (%d)
\n
"
,
__func__
,
conf
->
dst_dev_type
);
chan_err
(
d40c
,
"Invalid TX channel address (%d)
\n
"
,
conf
->
dst_dev_type
);
res
=
-
EINVAL
;
}
if
(
conf
->
src_dev_type
!=
STEDMA40_DEV_SRC_MEMORY
&&
d40c
->
base
->
plat_data
->
dev_rx
[
conf
->
src_dev_type
]
==
0
&&
d40c
->
runtime_addr
==
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Invalid RX channel address (%d)
\n
"
,
__func__
,
conf
->
src_dev_type
);
chan_err
(
d40c
,
"Invalid RX channel address (%d)
\n
"
,
conf
->
src_dev_type
);
res
=
-
EINVAL
;
}
if
(
conf
->
dir
==
STEDMA40_MEM_TO_PERIPH
&&
dst_event_group
==
STEDMA40_DEV_DST_MEMORY
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Invalid dst
\n
"
,
__func__
);
chan_err
(
d40c
,
"Invalid dst
\n
"
);
res
=
-
EINVAL
;
}
if
(
conf
->
dir
==
STEDMA40_PERIPH_TO_MEM
&&
src_event_group
==
STEDMA40_DEV_SRC_MEMORY
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Invalid src
\n
"
,
__func__
);
chan_err
(
d40c
,
"Invalid src
\n
"
);
res
=
-
EINVAL
;
}
if
(
src_event_group
==
STEDMA40_DEV_SRC_MEMORY
&&
dst_event_group
==
STEDMA40_DEV_DST_MEMORY
&&
is_log
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] No event line
\n
"
,
__func__
);
chan_err
(
d40c
,
"No event line
\n
"
);
res
=
-
EINVAL
;
}
if
(
conf
->
dir
==
STEDMA40_PERIPH_TO_PERIPH
&&
(
src_event_group
!=
dst_event_group
))
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Invalid event group
\n
"
,
__func__
);
chan_err
(
d40c
,
"Invalid event group
\n
"
);
res
=
-
EINVAL
;
}
...
...
@@ -1294,9 +1344,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
* DMAC HW supports it. Will be added to this driver,
* in case any dma client requires it.
*/
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] periph to periph not supported
\n
"
,
__func__
);
chan_err
(
d40c
,
"periph to periph not supported
\n
"
);
res
=
-
EINVAL
;
}
...
...
@@ -1309,9 +1357,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
* src (burst x width) == dst (burst x width)
*/
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] src (burst x width) != dst (burst x width)
\n
"
,
__func__
);
chan_err
(
d40c
,
"src (burst x width) != dst (burst x width)
\n
"
);
res
=
-
EINVAL
;
}
...
...
@@ -1514,8 +1560,7 @@ static int d40_config_memcpy(struct d40_chan *d40c)
dma_has_cap
(
DMA_SLAVE
,
cap
))
{
d40c
->
dma_cfg
=
*
d40c
->
base
->
plat_data
->
memcpy_conf_phy
;
}
else
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] No memcpy
\n
"
,
__func__
);
chan_err
(
d40c
,
"No memcpy
\n
"
);
return
-
EINVAL
;
}
...
...
@@ -1540,21 +1585,19 @@ static int d40_free_dma(struct d40_chan *d40c)
/* Release client owned descriptors */
if
(
!
list_empty
(
&
d40c
->
client
))
list_for_each_entry_safe
(
d
,
_d
,
&
d40c
->
client
,
node
)
{
d40_pool_lli_free
(
d
);
d40_pool_lli_free
(
d
40c
,
d
);
d40_desc_remove
(
d
);
d40_desc_free
(
d40c
,
d
);
}
if
(
phy
==
NULL
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] phy == null
\n
"
,
__func__
);
chan_err
(
d40c
,
"phy == null
\n
"
);
return
-
EINVAL
;
}
if
(
phy
->
allocated_src
==
D40_ALLOC_FREE
&&
phy
->
allocated_dst
==
D40_ALLOC_FREE
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] channel already free
\n
"
,
__func__
);
chan_err
(
d40c
,
"channel already free
\n
"
);
return
-
EINVAL
;
}
...
...
@@ -1566,19 +1609,17 @@ static int d40_free_dma(struct d40_chan *d40c)
event
=
D40_TYPE_TO_EVENT
(
d40c
->
dma_cfg
.
src_dev_type
);
is_src
=
true
;
}
else
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Unknown direction
\n
"
,
__func__
);
chan_err
(
d40c
,
"Unknown direction
\n
"
);
return
-
EINVAL
;
}
res
=
d40_channel_execute_command
(
d40c
,
D40_DMA_SUSPEND_REQ
);
if
(
res
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] suspend failed
\n
"
,
__func__
);
chan_err
(
d40c
,
"suspend failed
\n
"
);
return
res
;
}
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
{
if
(
chan_is_logical
(
d40c
)
)
{
/* Release logical channel, deactivate the event line */
d40_config_set_event
(
d40c
,
false
);
...
...
@@ -1594,9 +1635,8 @@ static int d40_free_dma(struct d40_chan *d40c)
res
=
d40_channel_execute_command
(
d40c
,
D40_DMA_RUN
);
if
(
res
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Executing RUN command
\n
"
,
__func__
);
chan_err
(
d40c
,
"Executing RUN command
\n
"
);
return
res
;
}
}
...
...
@@ -1609,8 +1649,7 @@ static int d40_free_dma(struct d40_chan *d40c)
/* Release physical channel */
res
=
d40_channel_execute_command
(
d40c
,
D40_DMA_STOP
);
if
(
res
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Failed to stop channel
\n
"
,
__func__
);
chan_err
(
d40c
,
"Failed to stop channel
\n
"
);
return
res
;
}
d40c
->
phy_chan
=
NULL
;
...
...
@@ -1622,6 +1661,7 @@ static int d40_free_dma(struct d40_chan *d40c)
static
bool
d40_is_paused
(
struct
d40_chan
*
d40c
)
{
void
__iomem
*
chanbase
=
chan_base
(
d40c
);
bool
is_paused
=
false
;
unsigned
long
flags
;
void
__iomem
*
active_reg
;
...
...
@@ -1630,7 +1670,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
spin_lock_irqsave
(
&
d40c
->
lock
,
flags
);
if
(
d40c
->
log_num
==
D40_PHY_CHAN
)
{
if
(
chan_is_physical
(
d40c
)
)
{
if
(
d40c
->
phy_chan
->
num
%
2
==
0
)
active_reg
=
d40c
->
base
->
virtbase
+
D40_DREG_ACTIVE
;
else
...
...
@@ -1648,17 +1688,12 @@ static bool d40_is_paused(struct d40_chan *d40c)
if
(
d40c
->
dma_cfg
.
dir
==
STEDMA40_MEM_TO_PERIPH
||
d40c
->
dma_cfg
.
dir
==
STEDMA40_MEM_TO_MEM
)
{
event
=
D40_TYPE_TO_EVENT
(
d40c
->
dma_cfg
.
dst_dev_type
);
status
=
readl
(
d40c
->
base
->
virtbase
+
D40_DREG_PCBASE
+
d40c
->
phy_chan
->
num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SDLNK
);
status
=
readl
(
chanbase
+
D40_CHAN_REG_SDLNK
);
}
else
if
(
d40c
->
dma_cfg
.
dir
==
STEDMA40_PERIPH_TO_MEM
)
{
event
=
D40_TYPE_TO_EVENT
(
d40c
->
dma_cfg
.
src_dev_type
);
status
=
readl
(
d40c
->
base
->
virtbase
+
D40_DREG_PCBASE
+
d40c
->
phy_chan
->
num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SSLNK
);
status
=
readl
(
chanbase
+
D40_CHAN_REG_SSLNK
);
}
else
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Unknown direction
\n
"
,
__func__
);
chan_err
(
d40c
,
"Unknown direction
\n
"
);
goto
_exit
;
}
...
...
@@ -1688,114 +1723,184 @@ static u32 stedma40_residue(struct dma_chan *chan)
return
bytes_left
;
}
st
ruct
dma_async_tx_descriptor
*
stedma40_memcpy_sg
(
struct
dma_chan
*
chan
,
struct
scatterlist
*
sgl_dst
,
struct
scatterlist
*
sgl_src
,
unsigned
int
sgl_len
,
unsigned
long
dma_flags
)
st
atic
int
d40_prep_sg_log
(
struct
d40_chan
*
chan
,
struct
d40_desc
*
desc
,
struct
scatterlist
*
sg_src
,
struct
scatterlist
*
sg_dst
,
unsigned
int
sg_len
,
dma_addr_t
src_dev_addr
,
dma_addr_t
dst_dev_addr
)
{
int
res
;
struct
d40_desc
*
d40d
;
struct
d40_chan
*
d40c
=
container_of
(
chan
,
struct
d40_chan
,
chan
);
unsigned
long
flags
;
struct
stedma40_chan_cfg
*
cfg
=
&
chan
->
dma_cfg
;
struct
stedma40_half_channel_info
*
src_info
=
&
cfg
->
src_info
;
struct
stedma40_half_channel_info
*
dst_info
=
&
cfg
->
dst_info
;
int
ret
;
if
(
d40c
->
phy_chan
==
NULL
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Unallocated channel.
\n
"
,
__func__
);
return
ERR_PTR
(
-
EINVAL
);
}
ret
=
d40_log_sg_to_lli
(
sg_src
,
sg_len
,
src_dev_addr
,
desc
->
lli_log
.
src
,
chan
->
log_def
.
lcsp1
,
src_info
->
data_width
,
dst_info
->
data_width
);
spin_lock_irqsave
(
&
d40c
->
lock
,
flags
);
d40d
=
d40_desc_get
(
d40c
);
ret
=
d40_log_sg_to_lli
(
sg_dst
,
sg_len
,
dst_dev_addr
,
desc
->
lli_log
.
dst
,
chan
->
log_def
.
lcsp3
,
dst_info
->
data_width
,
src_info
->
data_width
);
if
(
d40d
==
NULL
)
return
ret
<
0
?
ret
:
0
;
}
static
int
d40_prep_sg_phy
(
struct
d40_chan
*
chan
,
struct
d40_desc
*
desc
,
struct
scatterlist
*
sg_src
,
struct
scatterlist
*
sg_dst
,
unsigned
int
sg_len
,
dma_addr_t
src_dev_addr
,
dma_addr_t
dst_dev_addr
)
{
struct
stedma40_chan_cfg
*
cfg
=
&
chan
->
dma_cfg
;
struct
stedma40_half_channel_info
*
src_info
=
&
cfg
->
src_info
;
struct
stedma40_half_channel_info
*
dst_info
=
&
cfg
->
dst_info
;
unsigned
long
flags
=
0
;
int
ret
;
if
(
desc
->
cyclic
)
flags
|=
LLI_CYCLIC
|
LLI_TERM_INT
;
ret
=
d40_phy_sg_to_lli
(
sg_src
,
sg_len
,
src_dev_addr
,
desc
->
lli_phy
.
src
,
virt_to_phys
(
desc
->
lli_phy
.
src
),
chan
->
src_def_cfg
,
src_info
,
dst_info
,
flags
);
ret
=
d40_phy_sg_to_lli
(
sg_dst
,
sg_len
,
dst_dev_addr
,
desc
->
lli_phy
.
dst
,
virt_to_phys
(
desc
->
lli_phy
.
dst
),
chan
->
dst_def_cfg
,
dst_info
,
src_info
,
flags
);
dma_sync_single_for_device
(
chan
->
base
->
dev
,
desc
->
lli_pool
.
dma_addr
,
desc
->
lli_pool
.
size
,
DMA_TO_DEVICE
);
return
ret
<
0
?
ret
:
0
;
}
static
struct
d40_desc
*
d40_prep_desc
(
struct
d40_chan
*
chan
,
struct
scatterlist
*
sg
,
unsigned
int
sg_len
,
unsigned
long
dma_flags
)
{
struct
stedma40_chan_cfg
*
cfg
=
&
chan
->
dma_cfg
;
struct
d40_desc
*
desc
;
int
ret
;
desc
=
d40_desc_get
(
chan
);
if
(
!
desc
)
return
NULL
;
desc
->
lli_len
=
d40_sg_2_dmalen
(
sg
,
sg_len
,
cfg
->
src_info
.
data_width
,
cfg
->
dst_info
.
data_width
);
if
(
desc
->
lli_len
<
0
)
{
chan_err
(
chan
,
"Unaligned size
\n
"
);
goto
err
;
}
d40d
->
lli_len
=
d40_sg_2_dmalen
(
sgl_dst
,
sgl_len
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
);
if
(
d40d
->
lli_len
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Unaligned size
\n
"
,
__func__
);
ret
=
d40_pool_lli_alloc
(
chan
,
desc
,
desc
->
lli_len
);
if
(
ret
<
0
)
{
chan_err
(
chan
,
"Could not allocate lli
\n
"
);
goto
err
;
}
d40d
->
lli_current
=
0
;
d40d
->
txd
.
flags
=
dma_flags
;
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
{
desc
->
lli_current
=
0
;
desc
->
txd
.
flags
=
dma_flags
;
desc
->
txd
.
tx_submit
=
d40_tx_submit
;
if
(
d40_pool_lli_alloc
(
d40d
,
d40d
->
lli_len
,
true
)
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Out of memory
\n
"
,
__func__
);
goto
err
;
}
dma_async_tx_descriptor_init
(
&
desc
->
txd
,
&
chan
->
chan
);
(
void
)
d40_log_sg_to_lli
(
sgl_src
,
sgl_len
,
d40d
->
lli_log
.
src
,
d40c
->
log_def
.
lcsp1
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
);
(
void
)
d40_log_sg_to_lli
(
sgl_dst
,
sgl_len
,
d40d
->
lli_log
.
dst
,
d40c
->
log_def
.
lcsp3
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
d40c
->
dma_cfg
.
src_info
.
data_width
);
}
else
{
if
(
d40_pool_lli_alloc
(
d40d
,
d40d
->
lli_len
,
false
)
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Out of memory
\n
"
,
__func__
);
goto
err
;
}
return
desc
;
err:
d40_desc_free
(
chan
,
desc
);
return
NULL
;
}
static
dma_addr_t
d40_get_dev_addr
(
struct
d40_chan
*
chan
,
enum
dma_data_direction
direction
)
{
struct
stedma40_platform_data
*
plat
=
chan
->
base
->
plat_data
;
struct
stedma40_chan_cfg
*
cfg
=
&
chan
->
dma_cfg
;
dma_addr_t
addr
;
res
=
d40_phy_sg_to_lli
(
sgl_src
,
sgl_len
,
0
,
d40d
->
lli_phy
.
src
,
virt_to_phys
(
d40d
->
lli_phy
.
src
),
d40c
->
src_def_cfg
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
d40c
->
dma_cfg
.
src_info
.
psize
);
if
(
chan
->
runtime_addr
)
return
chan
->
runtime_addr
;
if
(
res
<
0
)
goto
err
;
if
(
direction
==
DMA_FROM_DEVICE
)
addr
=
plat
->
dev_rx
[
cfg
->
src_dev_type
];
else
if
(
direction
==
DMA_TO_DEVICE
)
addr
=
plat
->
dev_tx
[
cfg
->
dst_dev_type
];
res
=
d40_phy_sg_to_lli
(
sgl_dst
,
sgl_len
,
0
,
d40d
->
lli_phy
.
dst
,
virt_to_phys
(
d40d
->
lli_phy
.
dst
),
d40c
->
dst_def_cfg
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
psize
);
return
addr
;
}
if
(
res
<
0
)
goto
err
;
static
struct
dma_async_tx_descriptor
*
d40_prep_sg
(
struct
dma_chan
*
dchan
,
struct
scatterlist
*
sg_src
,
struct
scatterlist
*
sg_dst
,
unsigned
int
sg_len
,
enum
dma_data_direction
direction
,
unsigned
long
dma_flags
)
{
struct
d40_chan
*
chan
=
container_of
(
dchan
,
struct
d40_chan
,
chan
);
dma_addr_t
src_dev_addr
=
0
;
dma_addr_t
dst_dev_addr
=
0
;
struct
d40_desc
*
desc
;
unsigned
long
flags
;
int
ret
;
(
void
)
dma_map_single
(
d40c
->
base
->
dev
,
d40d
->
lli_phy
.
src
,
d40d
->
lli_pool
.
size
,
DMA_TO_DEVICE
);
if
(
!
chan
->
phy_chan
)
{
chan_err
(
chan
,
"Cannot prepare unallocated channel
\n
"
);
return
NULL
;
}
dma_async_tx_descriptor_init
(
&
d40d
->
txd
,
chan
);
d40d
->
txd
.
tx_submit
=
d40_tx_submit
;
spin_lock_irqsave
(
&
chan
->
lock
,
flags
)
;
spin_unlock_irqrestore
(
&
d40c
->
lock
,
flags
);
desc
=
d40_prep_desc
(
chan
,
sg_src
,
sg_len
,
dma_flags
);
if
(
desc
==
NULL
)
goto
err
;
if
(
sg_next
(
&
sg_src
[
sg_len
-
1
])
==
sg_src
)
desc
->
cyclic
=
true
;
if
(
direction
!=
DMA_NONE
)
{
dma_addr_t
dev_addr
=
d40_get_dev_addr
(
chan
,
direction
);
if
(
direction
==
DMA_FROM_DEVICE
)
src_dev_addr
=
dev_addr
;
else
if
(
direction
==
DMA_TO_DEVICE
)
dst_dev_addr
=
dev_addr
;
}
if
(
chan_is_logical
(
chan
))
ret
=
d40_prep_sg_log
(
chan
,
desc
,
sg_src
,
sg_dst
,
sg_len
,
src_dev_addr
,
dst_dev_addr
);
else
ret
=
d40_prep_sg_phy
(
chan
,
desc
,
sg_src
,
sg_dst
,
sg_len
,
src_dev_addr
,
dst_dev_addr
);
if
(
ret
)
{
chan_err
(
chan
,
"Failed to prepare %s sg job: %d
\n
"
,
chan_is_logical
(
chan
)
?
"log"
:
"phy"
,
ret
);
goto
err
;
}
spin_unlock_irqrestore
(
&
chan
->
lock
,
flags
);
return
&
desc
->
txd
;
return
&
d40d
->
txd
;
err:
if
(
d
40d
)
d40_desc_free
(
d40c
,
d40d
);
spin_unlock_irqrestore
(
&
d40c
->
lock
,
flags
);
if
(
d
esc
)
d40_desc_free
(
chan
,
desc
);
spin_unlock_irqrestore
(
&
chan
->
lock
,
flags
);
return
NULL
;
}
EXPORT_SYMBOL
(
stedma40_memcpy_sg
);
bool
stedma40_filter
(
struct
dma_chan
*
chan
,
void
*
data
)
{
...
...
@@ -1818,6 +1923,38 @@ bool stedma40_filter(struct dma_chan *chan, void *data)
}
EXPORT_SYMBOL
(
stedma40_filter
);
static
void
__d40_set_prio_rt
(
struct
d40_chan
*
d40c
,
int
dev_type
,
bool
src
)
{
bool
realtime
=
d40c
->
dma_cfg
.
realtime
;
bool
highprio
=
d40c
->
dma_cfg
.
high_priority
;
u32
prioreg
=
highprio
?
D40_DREG_PSEG1
:
D40_DREG_PCEG1
;
u32
rtreg
=
realtime
?
D40_DREG_RSEG1
:
D40_DREG_RCEG1
;
u32
event
=
D40_TYPE_TO_EVENT
(
dev_type
);
u32
group
=
D40_TYPE_TO_GROUP
(
dev_type
);
u32
bit
=
1
<<
event
;
/* Destination event lines are stored in the upper halfword */
if
(
!
src
)
bit
<<=
16
;
writel
(
bit
,
d40c
->
base
->
virtbase
+
prioreg
+
group
*
4
);
writel
(
bit
,
d40c
->
base
->
virtbase
+
rtreg
+
group
*
4
);
}
static
void
d40_set_prio_realtime
(
struct
d40_chan
*
d40c
)
{
if
(
d40c
->
base
->
rev
<
3
)
return
;
if
((
d40c
->
dma_cfg
.
dir
==
STEDMA40_PERIPH_TO_MEM
)
||
(
d40c
->
dma_cfg
.
dir
==
STEDMA40_PERIPH_TO_PERIPH
))
__d40_set_prio_rt
(
d40c
,
d40c
->
dma_cfg
.
src_dev_type
,
true
);
if
((
d40c
->
dma_cfg
.
dir
==
STEDMA40_MEM_TO_PERIPH
)
||
(
d40c
->
dma_cfg
.
dir
==
STEDMA40_PERIPH_TO_PERIPH
))
__d40_set_prio_rt
(
d40c
,
d40c
->
dma_cfg
.
dst_dev_type
,
false
);
}
/* DMA ENGINE functions */
static
int
d40_alloc_chan_resources
(
struct
dma_chan
*
chan
)
{
...
...
@@ -1834,9 +1971,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
if
(
!
d40c
->
configured
)
{
err
=
d40_config_memcpy
(
d40c
);
if
(
err
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Failed to configure memcpy channel
\n
"
,
__func__
);
chan_err
(
d40c
,
"Failed to configure memcpy channel
\n
"
);
goto
fail
;
}
}
...
...
@@ -1844,16 +1979,17 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
err
=
d40_allocate_channel
(
d40c
);
if
(
err
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Failed to allocate channel
\n
"
,
__func__
);
chan_err
(
d40c
,
"Failed to allocate channel
\n
"
);
goto
fail
;
}
/* Fill in basic CFG register values */
d40_phy_cfg
(
&
d40c
->
dma_cfg
,
&
d40c
->
src_def_cfg
,
&
d40c
->
dst_def_cfg
,
d40c
->
log_num
!=
D40_PHY_CHAN
);
&
d40c
->
dst_def_cfg
,
chan_is_logical
(
d40c
)
);
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
{
d40_set_prio_realtime
(
d40c
);
if
(
chan_is_logical
(
d40c
))
{
d40_log_cfg
(
&
d40c
->
dma_cfg
,
&
d40c
->
log_def
.
lcsp1
,
&
d40c
->
log_def
.
lcsp3
);
...
...
@@ -1886,8 +2022,7 @@ static void d40_free_chan_resources(struct dma_chan *chan)
unsigned
long
flags
;
if
(
d40c
->
phy_chan
==
NULL
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Cannot free unallocated channel
\n
"
,
__func__
);
chan_err
(
d40c
,
"Cannot free unallocated channel
\n
"
);
return
;
}
...
...
@@ -1897,8 +2032,7 @@ static void d40_free_chan_resources(struct dma_chan *chan)
err
=
d40_free_dma
(
d40c
);
if
(
err
)
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Failed to free channel
\n
"
,
__func__
);
chan_err
(
d40c
,
"Failed to free channel
\n
"
);
spin_unlock_irqrestore
(
&
d40c
->
lock
,
flags
);
}
...
...
@@ -1908,251 +2042,31 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
size_t
size
,
unsigned
long
dma_flags
)
{
struct
d40_desc
*
d40d
;
struct
d40_chan
*
d40c
=
container_of
(
chan
,
struct
d40_chan
,
chan
);
unsigned
long
flags
;
if
(
d40c
->
phy_chan
==
NULL
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Channel is not allocated.
\n
"
,
__func__
);
return
ERR_PTR
(
-
EINVAL
);
}
spin_lock_irqsave
(
&
d40c
->
lock
,
flags
);
d40d
=
d40_desc_get
(
d40c
);
if
(
d40d
==
NULL
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Descriptor is NULL
\n
"
,
__func__
);
goto
err
;
}
struct
scatterlist
dst_sg
;
struct
scatterlist
src_sg
;
d40d
->
txd
.
flags
=
dma_flags
;
d40d
->
lli_len
=
d40_size_2_dmalen
(
size
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
);
if
(
d40d
->
lli_len
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Unaligned size
\n
"
,
__func__
);
goto
err
;
}
sg_init_table
(
&
dst_sg
,
1
);
sg_init_table
(
&
src_sg
,
1
);
sg_dma_address
(
&
dst_sg
)
=
dst
;
sg_dma_address
(
&
src_sg
)
=
src
;
dma_async_tx_descriptor_init
(
&
d40d
->
txd
,
chan
);
sg_dma_len
(
&
dst_sg
)
=
size
;
sg_dma_len
(
&
src_sg
)
=
size
;
d40d
->
txd
.
tx_submit
=
d40_tx_submit
;
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
{
if
(
d40_pool_lli_alloc
(
d40d
,
d40d
->
lli_len
,
true
)
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Out of memory
\n
"
,
__func__
);
goto
err
;
}
d40d
->
lli_current
=
0
;
if
(
d40_log_buf_to_lli
(
d40d
->
lli_log
.
src
,
src
,
size
,
d40c
->
log_def
.
lcsp1
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
true
)
==
NULL
)
goto
err
;
if
(
d40_log_buf_to_lli
(
d40d
->
lli_log
.
dst
,
dst
,
size
,
d40c
->
log_def
.
lcsp3
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
true
)
==
NULL
)
goto
err
;
}
else
{
if
(
d40_pool_lli_alloc
(
d40d
,
d40d
->
lli_len
,
false
)
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Out of memory
\n
"
,
__func__
);
goto
err
;
}
if
(
d40_phy_buf_to_lli
(
d40d
->
lli_phy
.
src
,
src
,
size
,
d40c
->
dma_cfg
.
src_info
.
psize
,
0
,
d40c
->
src_def_cfg
,
true
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
false
)
==
NULL
)
goto
err
;
if
(
d40_phy_buf_to_lli
(
d40d
->
lli_phy
.
dst
,
dst
,
size
,
d40c
->
dma_cfg
.
dst_info
.
psize
,
0
,
d40c
->
dst_def_cfg
,
true
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
false
)
==
NULL
)
goto
err
;
(
void
)
dma_map_single
(
d40c
->
base
->
dev
,
d40d
->
lli_phy
.
src
,
d40d
->
lli_pool
.
size
,
DMA_TO_DEVICE
);
}
spin_unlock_irqrestore
(
&
d40c
->
lock
,
flags
);
return
&
d40d
->
txd
;
err:
if
(
d40d
)
d40_desc_free
(
d40c
,
d40d
);
spin_unlock_irqrestore
(
&
d40c
->
lock
,
flags
);
return
NULL
;
return
d40_prep_sg
(
chan
,
&
src_sg
,
&
dst_sg
,
1
,
DMA_NONE
,
dma_flags
);
}
static
struct
dma_async_tx_descriptor
*
d40_prep_sg
(
struct
dma_chan
*
chan
,
struct
scatterlist
*
dst_sg
,
unsigned
int
dst_nents
,
struct
scatterlist
*
src_sg
,
unsigned
int
src_nents
,
unsigned
long
dma_flags
)
d40_prep_
memcpy_
sg
(
struct
dma_chan
*
chan
,
struct
scatterlist
*
dst_sg
,
unsigned
int
dst_nents
,
struct
scatterlist
*
src_sg
,
unsigned
int
src_nents
,
unsigned
long
dma_flags
)
{
if
(
dst_nents
!=
src_nents
)
return
NULL
;
return
stedma40_memcpy_sg
(
chan
,
dst_sg
,
src_sg
,
dst_nents
,
dma_flags
);
}
static
int
d40_prep_slave_sg_log
(
struct
d40_desc
*
d40d
,
struct
d40_chan
*
d40c
,
struct
scatterlist
*
sgl
,
unsigned
int
sg_len
,
enum
dma_data_direction
direction
,
unsigned
long
dma_flags
)
{
dma_addr_t
dev_addr
=
0
;
int
total_size
;
d40d
->
lli_len
=
d40_sg_2_dmalen
(
sgl
,
sg_len
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
);
if
(
d40d
->
lli_len
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Unaligned size
\n
"
,
__func__
);
return
-
EINVAL
;
}
if
(
d40_pool_lli_alloc
(
d40d
,
d40d
->
lli_len
,
true
)
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Out of memory
\n
"
,
__func__
);
return
-
ENOMEM
;
}
d40d
->
lli_current
=
0
;
if
(
direction
==
DMA_FROM_DEVICE
)
if
(
d40c
->
runtime_addr
)
dev_addr
=
d40c
->
runtime_addr
;
else
dev_addr
=
d40c
->
base
->
plat_data
->
dev_rx
[
d40c
->
dma_cfg
.
src_dev_type
];
else
if
(
direction
==
DMA_TO_DEVICE
)
if
(
d40c
->
runtime_addr
)
dev_addr
=
d40c
->
runtime_addr
;
else
dev_addr
=
d40c
->
base
->
plat_data
->
dev_tx
[
d40c
->
dma_cfg
.
dst_dev_type
];
else
return
-
EINVAL
;
total_size
=
d40_log_sg_to_dev
(
sgl
,
sg_len
,
&
d40d
->
lli_log
,
&
d40c
->
log_def
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
direction
,
dev_addr
);
if
(
total_size
<
0
)
return
-
EINVAL
;
return
0
;
}
static
int
d40_prep_slave_sg_phy
(
struct
d40_desc
*
d40d
,
struct
d40_chan
*
d40c
,
struct
scatterlist
*
sgl
,
unsigned
int
sgl_len
,
enum
dma_data_direction
direction
,
unsigned
long
dma_flags
)
{
dma_addr_t
src_dev_addr
;
dma_addr_t
dst_dev_addr
;
int
res
;
d40d
->
lli_len
=
d40_sg_2_dmalen
(
sgl
,
sgl_len
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
);
if
(
d40d
->
lli_len
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Unaligned size
\n
"
,
__func__
);
return
-
EINVAL
;
}
if
(
d40_pool_lli_alloc
(
d40d
,
d40d
->
lli_len
,
false
)
<
0
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Out of memory
\n
"
,
__func__
);
return
-
ENOMEM
;
}
d40d
->
lli_current
=
0
;
if
(
direction
==
DMA_FROM_DEVICE
)
{
dst_dev_addr
=
0
;
if
(
d40c
->
runtime_addr
)
src_dev_addr
=
d40c
->
runtime_addr
;
else
src_dev_addr
=
d40c
->
base
->
plat_data
->
dev_rx
[
d40c
->
dma_cfg
.
src_dev_type
];
}
else
if
(
direction
==
DMA_TO_DEVICE
)
{
if
(
d40c
->
runtime_addr
)
dst_dev_addr
=
d40c
->
runtime_addr
;
else
dst_dev_addr
=
d40c
->
base
->
plat_data
->
dev_tx
[
d40c
->
dma_cfg
.
dst_dev_type
];
src_dev_addr
=
0
;
}
else
return
-
EINVAL
;
res
=
d40_phy_sg_to_lli
(
sgl
,
sgl_len
,
src_dev_addr
,
d40d
->
lli_phy
.
src
,
virt_to_phys
(
d40d
->
lli_phy
.
src
),
d40c
->
src_def_cfg
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
d40c
->
dma_cfg
.
src_info
.
psize
);
if
(
res
<
0
)
return
res
;
res
=
d40_phy_sg_to_lli
(
sgl
,
sgl_len
,
dst_dev_addr
,
d40d
->
lli_phy
.
dst
,
virt_to_phys
(
d40d
->
lli_phy
.
dst
),
d40c
->
dst_def_cfg
,
d40c
->
dma_cfg
.
dst_info
.
data_width
,
d40c
->
dma_cfg
.
src_info
.
data_width
,
d40c
->
dma_cfg
.
dst_info
.
psize
);
if
(
res
<
0
)
return
res
;
(
void
)
dma_map_single
(
d40c
->
base
->
dev
,
d40d
->
lli_phy
.
src
,
d40d
->
lli_pool
.
size
,
DMA_TO_DEVICE
);
return
0
;
return
d40_prep_sg
(
chan
,
src_sg
,
dst_sg
,
src_nents
,
DMA_NONE
,
dma_flags
);
}
static
struct
dma_async_tx_descriptor
*
d40_prep_slave_sg
(
struct
dma_chan
*
chan
,
...
...
@@ -2161,52 +2075,40 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
enum
dma_data_direction
direction
,
unsigned
long
dma_flags
)
{
struct
d40_desc
*
d40d
;
struct
d40_chan
*
d40c
=
container_of
(
chan
,
struct
d40_chan
,
chan
);
unsigned
long
flags
;
int
err
;
if
(
d40c
->
phy_chan
==
NULL
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Cannot prepare unallocated channel
\n
"
,
__func__
);
return
ERR_PTR
(
-
EINVAL
);
}
if
(
direction
!=
DMA_FROM_DEVICE
&&
direction
!=
DMA_TO_DEVICE
)
return
NULL
;
spin_lock_irqsave
(
&
d40c
->
lock
,
flags
);
d40d
=
d40_desc_get
(
d40c
);
return
d40_prep_sg
(
chan
,
sgl
,
sgl
,
sg_len
,
direction
,
dma_
flags
);
}
if
(
d40d
==
NULL
)
goto
err
;
static
struct
dma_async_tx_descriptor
*
dma40_prep_dma_cyclic
(
struct
dma_chan
*
chan
,
dma_addr_t
dma_addr
,
size_t
buf_len
,
size_t
period_len
,
enum
dma_data_direction
direction
)
{
unsigned
int
periods
=
buf_len
/
period_len
;
struct
dma_async_tx_descriptor
*
txd
;
struct
scatterlist
*
sg
;
int
i
;
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
err
=
d40_prep_slave_sg_log
(
d40d
,
d40c
,
sgl
,
sg_len
,
direction
,
dma_flags
);
else
err
=
d40_prep_slave_sg_phy
(
d40d
,
d40c
,
sgl
,
sg_len
,
direction
,
dma_flags
);
if
(
err
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Failed to prepare %s slave sg job: %d
\n
"
,
__func__
,
d40c
->
log_num
!=
D40_PHY_CHAN
?
"log"
:
"phy"
,
err
);
goto
err
;
sg
=
kcalloc
(
periods
+
1
,
sizeof
(
struct
scatterlist
),
GFP_KERNEL
);
for
(
i
=
0
;
i
<
periods
;
i
++
)
{
sg_dma_address
(
&
sg
[
i
])
=
dma_addr
;
sg_dma_len
(
&
sg
[
i
])
=
period_len
;
dma_addr
+=
period_len
;
}
d40d
->
txd
.
flags
=
dma_flags
;
sg
[
periods
].
offset
=
0
;
sg
[
periods
].
length
=
0
;
sg
[
periods
].
page_link
=
((
unsigned
long
)
sg
|
0x01
)
&
~
0x02
;
dma_async_tx_descriptor_init
(
&
d40d
->
txd
,
chan
);
txd
=
d40_prep_sg
(
chan
,
sg
,
sg
,
periods
,
direction
,
DMA_PREP_INTERRUPT
);
d40d
->
txd
.
tx_submit
=
d40_tx_submit
;
kfree
(
sg
)
;
spin_unlock_irqrestore
(
&
d40c
->
lock
,
flags
);
return
&
d40d
->
txd
;
err:
if
(
d40d
)
d40_desc_free
(
d40c
,
d40d
);
spin_unlock_irqrestore
(
&
d40c
->
lock
,
flags
);
return
NULL
;
return
txd
;
}
static
enum
dma_status
d40_tx_status
(
struct
dma_chan
*
chan
,
...
...
@@ -2219,9 +2121,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
int
ret
;
if
(
d40c
->
phy_chan
==
NULL
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Cannot read status of unallocated channel
\n
"
,
__func__
);
chan_err
(
d40c
,
"Cannot read status of unallocated channel
\n
"
);
return
-
EINVAL
;
}
...
...
@@ -2245,8 +2145,7 @@ static void d40_issue_pending(struct dma_chan *chan)
unsigned
long
flags
;
if
(
d40c
->
phy_chan
==
NULL
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Channel is not allocated!
\n
"
,
__func__
);
chan_err
(
d40c
,
"Channel is not allocated!
\n
"
);
return
;
}
...
...
@@ -2339,7 +2238,7 @@ static void d40_set_runtime_config(struct dma_chan *chan,
return
;
}
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
{
if
(
chan_is_logical
(
d40c
)
)
{
if
(
config_maxburst
>=
16
)
psize
=
STEDMA40_PSIZE_LOG_16
;
else
if
(
config_maxburst
>=
8
)
...
...
@@ -2372,7 +2271,7 @@ static void d40_set_runtime_config(struct dma_chan *chan,
cfg
->
dst_info
.
flow_ctrl
=
STEDMA40_NO_FLOW_CTRL
;
/* Fill in register values */
if
(
d40c
->
log_num
!=
D40_PHY_CHAN
)
if
(
chan_is_logical
(
d40c
)
)
d40_log_cfg
(
cfg
,
&
d40c
->
log_def
.
lcsp1
,
&
d40c
->
log_def
.
lcsp3
);
else
d40_phy_cfg
(
cfg
,
&
d40c
->
src_def_cfg
,
...
...
@@ -2393,25 +2292,20 @@ static void d40_set_runtime_config(struct dma_chan *chan,
static
int
d40_control
(
struct
dma_chan
*
chan
,
enum
dma_ctrl_cmd
cmd
,
unsigned
long
arg
)
{
unsigned
long
flags
;
struct
d40_chan
*
d40c
=
container_of
(
chan
,
struct
d40_chan
,
chan
);
if
(
d40c
->
phy_chan
==
NULL
)
{
dev_err
(
&
d40c
->
chan
.
dev
->
device
,
"[%s] Channel is not allocated!
\n
"
,
__func__
);
chan_err
(
d40c
,
"Channel is not allocated!
\n
"
);
return
-
EINVAL
;
}
switch
(
cmd
)
{
case
DMA_TERMINATE_ALL
:
spin_lock_irqsave
(
&
d40c
->
lock
,
flags
);
d40_term_all
(
d40c
);
spin_unlock_irqrestore
(
&
d40c
->
lock
,
flags
);
return
0
;
return
d40_terminate_all
(
d40c
);
case
DMA_PAUSE
:
return
d40_pause
(
chan
);
return
d40_pause
(
d40c
);
case
DMA_RESUME
:
return
d40_resume
(
chan
);
return
d40_resume
(
d40c
);
case
DMA_SLAVE_CONFIG
:
d40_set_runtime_config
(
chan
,
(
struct
dma_slave_config
*
)
arg
);
...
...
@@ -2456,6 +2350,35 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
}
}
static
void
d40_ops_init
(
struct
d40_base
*
base
,
struct
dma_device
*
dev
)
{
if
(
dma_has_cap
(
DMA_SLAVE
,
dev
->
cap_mask
))
dev
->
device_prep_slave_sg
=
d40_prep_slave_sg
;
if
(
dma_has_cap
(
DMA_MEMCPY
,
dev
->
cap_mask
))
{
dev
->
device_prep_dma_memcpy
=
d40_prep_memcpy
;
/*
* This controller can only access address at even
* 32bit boundaries, i.e. 2^2
*/
dev
->
copy_align
=
2
;
}
if
(
dma_has_cap
(
DMA_SG
,
dev
->
cap_mask
))
dev
->
device_prep_dma_sg
=
d40_prep_memcpy_sg
;
if
(
dma_has_cap
(
DMA_CYCLIC
,
dev
->
cap_mask
))
dev
->
device_prep_dma_cyclic
=
dma40_prep_dma_cyclic
;
dev
->
device_alloc_chan_resources
=
d40_alloc_chan_resources
;
dev
->
device_free_chan_resources
=
d40_free_chan_resources
;
dev
->
device_issue_pending
=
d40_issue_pending
;
dev
->
device_tx_status
=
d40_tx_status
;
dev
->
device_control
=
d40_control
;
dev
->
dev
=
base
->
dev
;
}
static
int
__init
d40_dmaengine_init
(
struct
d40_base
*
base
,
int
num_reserved_chans
)
{
...
...
@@ -2466,23 +2389,14 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero
(
base
->
dma_slave
.
cap_mask
);
dma_cap_set
(
DMA_SLAVE
,
base
->
dma_slave
.
cap_mask
);
dma_cap_set
(
DMA_CYCLIC
,
base
->
dma_slave
.
cap_mask
);
base
->
dma_slave
.
device_alloc_chan_resources
=
d40_alloc_chan_resources
;
base
->
dma_slave
.
device_free_chan_resources
=
d40_free_chan_resources
;
base
->
dma_slave
.
device_prep_dma_memcpy
=
d40_prep_memcpy
;
base
->
dma_slave
.
device_prep_dma_sg
=
d40_prep_sg
;
base
->
dma_slave
.
device_prep_slave_sg
=
d40_prep_slave_sg
;
base
->
dma_slave
.
device_tx_status
=
d40_tx_status
;
base
->
dma_slave
.
device_issue_pending
=
d40_issue_pending
;
base
->
dma_slave
.
device_control
=
d40_control
;
base
->
dma_slave
.
dev
=
base
->
dev
;
d40_ops_init
(
base
,
&
base
->
dma_slave
);
err
=
dma_async_device_register
(
&
base
->
dma_slave
);
if
(
err
)
{
dev_err
(
base
->
dev
,
"[%s] Failed to register slave channels
\n
"
,
__func__
);
d40_err
(
base
->
dev
,
"Failed to register slave channels
\n
"
);
goto
failure1
;
}
...
...
@@ -2491,29 +2405,15 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero
(
base
->
dma_memcpy
.
cap_mask
);
dma_cap_set
(
DMA_MEMCPY
,
base
->
dma_memcpy
.
cap_mask
);
dma_cap_set
(
DMA_SG
,
base
->
dma_slave
.
cap_mask
);
base
->
dma_memcpy
.
device_alloc_chan_resources
=
d40_alloc_chan_resources
;
base
->
dma_memcpy
.
device_free_chan_resources
=
d40_free_chan_resources
;
base
->
dma_memcpy
.
device_prep_dma_memcpy
=
d40_prep_memcpy
;
base
->
dma_slave
.
device_prep_dma_sg
=
d40_prep_sg
;
base
->
dma_memcpy
.
device_prep_slave_sg
=
d40_prep_slave_sg
;
base
->
dma_memcpy
.
device_tx_status
=
d40_tx_status
;
base
->
dma_memcpy
.
device_issue_pending
=
d40_issue_pending
;
base
->
dma_memcpy
.
device_control
=
d40_control
;
base
->
dma_memcpy
.
dev
=
base
->
dev
;
/*
* This controller can only access address at even
* 32bit boundaries, i.e. 2^2
*/
base
->
dma_memcpy
.
copy_align
=
2
;
dma_cap_set
(
DMA_SG
,
base
->
dma_memcpy
.
cap_mask
);
d40_ops_init
(
base
,
&
base
->
dma_memcpy
);
err
=
dma_async_device_register
(
&
base
->
dma_memcpy
);
if
(
err
)
{
dev_err
(
base
->
dev
,
"[%s] Failed to regsiter memcpy only channels
\n
"
,
__func__
);
d40_err
(
base
->
dev
,
"Failed to regsiter memcpy only channels
\n
"
);
goto
failure2
;
}
...
...
@@ -2523,24 +2423,15 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_zero
(
base
->
dma_both
.
cap_mask
);
dma_cap_set
(
DMA_SLAVE
,
base
->
dma_both
.
cap_mask
);
dma_cap_set
(
DMA_MEMCPY
,
base
->
dma_both
.
cap_mask
);
dma_cap_set
(
DMA_SG
,
base
->
dma_slave
.
cap_mask
);
base
->
dma_both
.
device_alloc_chan_resources
=
d40_alloc_chan_resources
;
base
->
dma_both
.
device_free_chan_resources
=
d40_free_chan_resources
;
base
->
dma_both
.
device_prep_dma_memcpy
=
d40_prep_memcpy
;
base
->
dma_slave
.
device_prep_dma_sg
=
d40_prep_sg
;
base
->
dma_both
.
device_prep_slave_sg
=
d40_prep_slave_sg
;
base
->
dma_both
.
device_tx_status
=
d40_tx_status
;
base
->
dma_both
.
device_issue_pending
=
d40_issue_pending
;
base
->
dma_both
.
device_control
=
d40_control
;
base
->
dma_both
.
dev
=
base
->
dev
;
base
->
dma_both
.
copy_align
=
2
;
dma_cap_set
(
DMA_SG
,
base
->
dma_both
.
cap_mask
);
dma_cap_set
(
DMA_CYCLIC
,
base
->
dma_slave
.
cap_mask
);
d40_ops_init
(
base
,
&
base
->
dma_both
);
err
=
dma_async_device_register
(
&
base
->
dma_both
);
if
(
err
)
{
dev_err
(
base
->
dev
,
"[%s] Failed to register logical and physical capable channels
\n
"
,
__func__
);
d40_err
(
base
->
dev
,
"Failed to register logical and physical capable channels
\n
"
);
goto
failure3
;
}
return
0
;
...
...
@@ -2616,9 +2507,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
{
.
reg
=
D40_DREG_PERIPHID1
,
.
val
=
0x0000
},
/*
* D40_DREG_PERIPHID2 Depends on HW revision:
*
MOP500/HREF ED
has 0x0008,
*
DB8500ed
has 0x0008,
* ? has 0x0018,
* HREF V1 has 0x0028
* DB8500v1 has 0x0028
* DB8500v2 has 0x0038
*/
{
.
reg
=
D40_DREG_PERIPHID3
,
.
val
=
0x0000
},
...
...
@@ -2642,8 +2534,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
clk
=
clk_get
(
&
pdev
->
dev
,
NULL
);
if
(
IS_ERR
(
clk
))
{
dev_err
(
&
pdev
->
dev
,
"[%s] No matching clock found
\n
"
,
__func__
);
d40_err
(
&
pdev
->
dev
,
"No matching clock found
\n
"
);
goto
failure
;
}
...
...
@@ -2666,9 +2557,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
dma_id_regs
);
i
++
)
{
if
(
dma_id_regs
[
i
].
val
!=
readl
(
virtbase
+
dma_id_regs
[
i
].
reg
))
{
dev_err
(
&
pdev
->
dev
,
"[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x
\n
"
,
__func__
,
d40_err
(
&
pdev
->
dev
,
"Unknown hardware! Expected 0x%x at 0x%x but got 0x%x
\n
"
,
dma_id_regs
[
i
].
val
,
dma_id_regs
[
i
].
reg
,
readl
(
virtbase
+
dma_id_regs
[
i
].
reg
));
...
...
@@ -2681,9 +2571,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
if
((
val
&
D40_DREG_PERIPHID2_DESIGNER_MASK
)
!=
D40_HW_DESIGNER
)
{
dev_err
(
&
pdev
->
dev
,
"[%s] Unknown designer! Got %x wanted %x
\n
"
,
__func__
,
val
&
D40_DREG_PERIPHID2_DESIGNER_MASK
,
d40_err
(
&
pdev
->
dev
,
"Unknown designer! Got %x wanted %x
\n
"
,
val
&
D40_DREG_PERIPHID2_DESIGNER_MASK
,
D40_HW_DESIGNER
);
goto
failure
;
}
...
...
@@ -2713,7 +2602,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
sizeof
(
struct
d40_chan
),
GFP_KERNEL
);
if
(
base
==
NULL
)
{
d
ev_err
(
&
pdev
->
dev
,
"[%s] Out of memory
\n
"
,
__func__
);
d
40_err
(
&
pdev
->
dev
,
"Out of memory
\n
"
);
goto
failure
;
}
...
...
@@ -2860,6 +2749,7 @@ static void __init d40_hw_init(struct d40_base *base)
static
int
__init
d40_lcla_allocate
(
struct
d40_base
*
base
)
{
struct
d40_lcla_pool
*
pool
=
&
base
->
lcla_pool
;
unsigned
long
*
page_list
;
int
i
,
j
;
int
ret
=
0
;
...
...
@@ -2885,9 +2775,8 @@ static int __init d40_lcla_allocate(struct d40_base *base)
base
->
lcla_pool
.
pages
);
if
(
!
page_list
[
i
])
{
dev_err
(
base
->
dev
,
"[%s] Failed to allocate %d pages.
\n
"
,
__func__
,
base
->
lcla_pool
.
pages
);
d40_err
(
base
->
dev
,
"Failed to allocate %d pages.
\n
"
,
base
->
lcla_pool
.
pages
);
for
(
j
=
0
;
j
<
i
;
j
++
)
free_pages
(
page_list
[
j
],
base
->
lcla_pool
.
pages
);
...
...
@@ -2925,6 +2814,15 @@ static int __init d40_lcla_allocate(struct d40_base *base)
LCLA_ALIGNMENT
);
}
pool
->
dma_addr
=
dma_map_single
(
base
->
dev
,
pool
->
base
,
SZ_1K
*
base
->
num_phy_chans
,
DMA_TO_DEVICE
);
if
(
dma_mapping_error
(
base
->
dev
,
pool
->
dma_addr
))
{
pool
->
dma_addr
=
0
;
ret
=
-
ENOMEM
;
goto
failure
;
}
writel
(
virt_to_phys
(
base
->
lcla_pool
.
base
),
base
->
virtbase
+
D40_DREG_LCLA
);
failure:
...
...
@@ -2957,9 +2855,7 @@ static int __init d40_probe(struct platform_device *pdev)
res
=
platform_get_resource_byname
(
pdev
,
IORESOURCE_MEM
,
"lcpa"
);
if
(
!
res
)
{
ret
=
-
ENOENT
;
dev_err
(
&
pdev
->
dev
,
"[%s] No
\"
lcpa
\"
memory resource
\n
"
,
__func__
);
d40_err
(
&
pdev
->
dev
,
"No
\"
lcpa
\"
memory resource
\n
"
);
goto
failure
;
}
base
->
lcpa_size
=
resource_size
(
res
);
...
...
@@ -2968,9 +2864,9 @@ static int __init d40_probe(struct platform_device *pdev)
if
(
request_mem_region
(
res
->
start
,
resource_size
(
res
),
D40_NAME
" I/O lcpa"
)
==
NULL
)
{
ret
=
-
EBUSY
;
d
ev
_err
(
&
pdev
->
dev
,
"
[%s]
Failed to request LCPA region 0x%x-0x%x
\n
"
,
__func__
,
res
->
start
,
res
->
end
);
d
40
_err
(
&
pdev
->
dev
,
"Failed to request LCPA region 0x%x-0x%x
\n
"
,
res
->
start
,
res
->
end
);
goto
failure
;
}
...
...
@@ -2986,16 +2882,13 @@ static int __init d40_probe(struct platform_device *pdev)
base
->
lcpa_base
=
ioremap
(
res
->
start
,
resource_size
(
res
));
if
(
!
base
->
lcpa_base
)
{
ret
=
-
ENOMEM
;
dev_err
(
&
pdev
->
dev
,
"[%s] Failed to ioremap LCPA region
\n
"
,
__func__
);
d40_err
(
&
pdev
->
dev
,
"Failed to ioremap LCPA region
\n
"
);
goto
failure
;
}
ret
=
d40_lcla_allocate
(
base
);
if
(
ret
)
{
dev_err
(
&
pdev
->
dev
,
"[%s] Failed to allocate LCLA area
\n
"
,
__func__
);
d40_err
(
&
pdev
->
dev
,
"Failed to allocate LCLA area
\n
"
);
goto
failure
;
}
...
...
@@ -3004,9 +2897,8 @@ static int __init d40_probe(struct platform_device *pdev)
base
->
irq
=
platform_get_irq
(
pdev
,
0
);
ret
=
request_irq
(
base
->
irq
,
d40_handle_interrupt
,
0
,
D40_NAME
,
base
);
if
(
ret
)
{
d
ev_err
(
&
pdev
->
dev
,
"[%s] No IRQ defined
\n
"
,
__func__
);
d
40_err
(
&
pdev
->
dev
,
"No IRQ defined
\n
"
);
goto
failure
;
}
...
...
@@ -3025,6 +2917,12 @@ static int __init d40_probe(struct platform_device *pdev)
kmem_cache_destroy
(
base
->
desc_slab
);
if
(
base
->
virtbase
)
iounmap
(
base
->
virtbase
);
if
(
base
->
lcla_pool
.
dma_addr
)
dma_unmap_single
(
base
->
dev
,
base
->
lcla_pool
.
dma_addr
,
SZ_1K
*
base
->
num_phy_chans
,
DMA_TO_DEVICE
);
if
(
!
base
->
lcla_pool
.
base_unaligned
&&
base
->
lcla_pool
.
base
)
free_pages
((
unsigned
long
)
base
->
lcla_pool
.
base
,
base
->
lcla_pool
.
pages
);
...
...
@@ -3049,7 +2947,7 @@ static int __init d40_probe(struct platform_device *pdev)
kfree
(
base
);
}
d
ev_err
(
&
pdev
->
dev
,
"[%s] probe failed
\n
"
,
__func__
);
d
40_err
(
&
pdev
->
dev
,
"probe failed
\n
"
);
return
ret
;
}
...
...
@@ -3060,7 +2958,7 @@ static struct platform_driver d40_driver = {
},
};
int
__init
stedma40_init
(
void
)
static
int
__init
stedma40_init
(
void
)
{
return
platform_driver_probe
(
&
d40_driver
,
d40_probe
);
}
...
...
drivers/dma/ste_dma40_ll.c
浏览文件 @
80cc07af
...
...
@@ -125,13 +125,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
static
int
d40_phy_fill_lli
(
struct
d40_phy_lli
*
lli
,
dma_addr_t
data
,
u32
data_size
,
int
psize
,
dma_addr_t
next_lli
,
u32
reg_cfg
,
bool
term_int
,
u32
data_width
,
bool
is_device
)
struct
stedma40_half_channel_info
*
info
,
unsigned
int
flags
)
{
bool
addr_inc
=
flags
&
LLI_ADDR_INC
;
bool
term_int
=
flags
&
LLI_TERM_INT
;
unsigned
int
data_width
=
info
->
data_width
;
int
psize
=
info
->
psize
;
int
num_elems
;
if
(
psize
==
STEDMA40_PSIZE_PHY_1
)
...
...
@@ -154,7 +156,7 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli,
* Distance to next element sized entry.
* Usually the size of the element unless you want gaps.
*/
if
(
!
is_device
)
if
(
addr_inc
)
lli
->
reg_elt
|=
(
0x1
<<
data_width
)
<<
D40_SREG_ELEM_PHY_EIDX_POS
;
...
...
@@ -198,47 +200,51 @@ static int d40_seg_size(int size, int data_width1, int data_width2)
return
seg_max
;
}
struct
d40_phy_lli
*
d40_phy_buf_to_lli
(
struct
d40_phy_lli
*
lli
,
dma_addr_t
addr
,
u32
size
,
int
psize
,
dma_addr_t
lli_phys
,
u32
reg_cfg
,
bool
term_int
,
u32
data_width1
,
u32
data_width2
,
bool
is_device
)
static
struct
d40_phy_lli
*
d40_phy_buf_to_lli
(
struct
d40_phy_lli
*
lli
,
dma_addr_t
addr
,
u32
size
,
dma_addr_t
lli_phys
,
dma_addr_t
first_phys
,
u32
reg_cfg
,
struct
stedma40_half_channel_info
*
info
,
struct
stedma40_half_channel_info
*
otherinfo
,
unsigned
long
flags
)
{
bool
lastlink
=
flags
&
LLI_LAST_LINK
;
bool
addr_inc
=
flags
&
LLI_ADDR_INC
;
bool
term_int
=
flags
&
LLI_TERM_INT
;
bool
cyclic
=
flags
&
LLI_CYCLIC
;
int
err
;
dma_addr_t
next
=
lli_phys
;
int
size_rest
=
size
;
int
size_seg
=
0
;
/*
* This piece may be split up based on d40_seg_size(); we only want the
* term int on the last part.
*/
if
(
term_int
)
flags
&=
~
LLI_TERM_INT
;
do
{
size_seg
=
d40_seg_size
(
size_rest
,
data_width1
,
data_width2
);
size_seg
=
d40_seg_size
(
size_rest
,
info
->
data_width
,
otherinfo
->
data_width
);
size_rest
-=
size_seg
;
if
(
term_int
&&
size_rest
==
0
)
next
=
0
;
if
(
size_rest
==
0
&&
term_int
)
flags
|=
LLI_TERM_INT
;
if
(
size_rest
==
0
&&
lastlink
)
next
=
cyclic
?
first_phys
:
0
;
else
next
=
ALIGN
(
next
+
sizeof
(
struct
d40_phy_lli
),
D40_LLI_ALIGN
);
err
=
d40_phy_fill_lli
(
lli
,
addr
,
size_seg
,
psize
,
next
,
reg_cfg
,
!
next
,
data_width1
,
is_device
);
err
=
d40_phy_fill_lli
(
lli
,
addr
,
size_seg
,
next
,
reg_cfg
,
info
,
flags
);
if
(
err
)
goto
err
;
lli
++
;
if
(
!
is_device
)
if
(
addr_inc
)
addr
+=
size_seg
;
}
while
(
size_rest
);
...
...
@@ -254,39 +260,35 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
struct
d40_phy_lli
*
lli_sg
,
dma_addr_t
lli_phys
,
u32
reg_cfg
,
u32
data_width1
,
u32
data_width2
,
int
psize
)
struct
stedma40_half_channel_info
*
info
,
struct
stedma40_half_channel_info
*
otherinfo
,
unsigned
long
flags
)
{
int
total_size
=
0
;
int
i
;
struct
scatterlist
*
current_sg
=
sg
;
dma_addr_t
dst
;
struct
d40_phy_lli
*
lli
=
lli_sg
;
dma_addr_t
l_phys
=
lli_phys
;
if
(
!
target
)
flags
|=
LLI_ADDR_INC
;
for_each_sg
(
sg
,
current_sg
,
sg_len
,
i
)
{
dma_addr_t
sg_addr
=
sg_dma_address
(
current_sg
);
unsigned
int
len
=
sg_dma_len
(
current_sg
);
dma_addr_t
dst
=
target
?:
sg_addr
;
total_size
+=
sg_dma_len
(
current_sg
);
if
(
target
)
dst
=
target
;
else
dst
=
sg_phys
(
current_sg
);
if
(
i
==
sg_len
-
1
)
flags
|=
LLI_TERM_INT
|
LLI_LAST_LINK
;
l_phys
=
ALIGN
(
lli_phys
+
(
lli
-
lli_sg
)
*
sizeof
(
struct
d40_phy_lli
),
D40_LLI_ALIGN
);
lli
=
d40_phy_buf_to_lli
(
lli
,
dst
,
sg_dma_len
(
current_sg
),
psize
,
l_phys
,
reg_cfg
,
sg_len
-
1
==
i
,
data_width1
,
data_width2
,
target
==
dst
);
lli
=
d40_phy_buf_to_lli
(
lli
,
dst
,
len
,
l_phys
,
lli_phys
,
reg_cfg
,
info
,
otherinfo
,
flags
);
if
(
lli
==
NULL
)
return
-
EINVAL
;
}
...
...
@@ -295,45 +297,22 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
}
void
d40_phy_lli_write
(
void
__iomem
*
virtbase
,
u32
phy_chan_num
,
struct
d40_phy_lli
*
lli_dst
,
struct
d40_phy_lli
*
lli_src
)
{
writel
(
lli_src
->
reg_cfg
,
virtbase
+
D40_DREG_PCBASE
+
phy_chan_num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SSCFG
);
writel
(
lli_src
->
reg_elt
,
virtbase
+
D40_DREG_PCBASE
+
phy_chan_num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SSELT
);
writel
(
lli_src
->
reg_ptr
,
virtbase
+
D40_DREG_PCBASE
+
phy_chan_num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SSPTR
);
writel
(
lli_src
->
reg_lnk
,
virtbase
+
D40_DREG_PCBASE
+
phy_chan_num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SSLNK
);
writel
(
lli_dst
->
reg_cfg
,
virtbase
+
D40_DREG_PCBASE
+
phy_chan_num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SDCFG
);
writel
(
lli_dst
->
reg_elt
,
virtbase
+
D40_DREG_PCBASE
+
phy_chan_num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SDELT
);
writel
(
lli_dst
->
reg_ptr
,
virtbase
+
D40_DREG_PCBASE
+
phy_chan_num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SDPTR
);
writel
(
lli_dst
->
reg_lnk
,
virtbase
+
D40_DREG_PCBASE
+
phy_chan_num
*
D40_DREG_PCDELTA
+
D40_CHAN_REG_SDLNK
);
}
/* DMA logical lli operations */
static
void
d40_log_lli_link
(
struct
d40_log_lli
*
lli_dst
,
struct
d40_log_lli
*
lli_src
,
int
next
)
int
next
,
unsigned
int
flags
)
{
bool
interrupt
=
flags
&
LLI_TERM_INT
;
u32
slos
=
0
;
u32
dlos
=
0
;
if
(
next
!=
-
EINVAL
)
{
slos
=
next
*
2
;
dlos
=
next
*
2
+
1
;
}
else
{
}
if
(
interrupt
)
{
lli_dst
->
lcsp13
|=
D40_MEM_LCSP1_SCFG_TIM_MASK
;
lli_dst
->
lcsp13
|=
D40_MEM_LCSP3_DTCP_MASK
;
}
...
...
@@ -348,9 +327,9 @@ static void d40_log_lli_link(struct d40_log_lli *lli_dst,
void
d40_log_lli_lcpa_write
(
struct
d40_log_lli_full
*
lcpa
,
struct
d40_log_lli
*
lli_dst
,
struct
d40_log_lli
*
lli_src
,
int
next
)
int
next
,
unsigned
int
flags
)
{
d40_log_lli_link
(
lli_dst
,
lli_src
,
next
);
d40_log_lli_link
(
lli_dst
,
lli_src
,
next
,
flags
);
writel
(
lli_src
->
lcsp02
,
&
lcpa
[
0
].
lcsp0
);
writel
(
lli_src
->
lcsp13
,
&
lcpa
[
0
].
lcsp1
);
...
...
@@ -361,9 +340,9 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
void
d40_log_lli_lcla_write
(
struct
d40_log_lli
*
lcla
,
struct
d40_log_lli
*
lli_dst
,
struct
d40_log_lli
*
lli_src
,
int
next
)
int
next
,
unsigned
int
flags
)
{
d40_log_lli_link
(
lli_dst
,
lli_src
,
next
);
d40_log_lli_link
(
lli_dst
,
lli_src
,
next
,
flags
);
writel
(
lli_src
->
lcsp02
,
&
lcla
[
0
].
lcsp02
);
writel
(
lli_src
->
lcsp13
,
&
lcla
[
0
].
lcsp13
);
...
...
@@ -375,8 +354,10 @@ static void d40_log_fill_lli(struct d40_log_lli *lli,
dma_addr_t
data
,
u32
data_size
,
u32
reg_cfg
,
u32
data_width
,
bool
addr_inc
)
unsigned
int
flags
)
{
bool
addr_inc
=
flags
&
LLI_ADDR_INC
;
lli
->
lcsp13
=
reg_cfg
;
/* The number of elements to transfer */
...
...
@@ -395,67 +376,15 @@ static void d40_log_fill_lli(struct d40_log_lli *lli,
}
int
d40_log_sg_to_dev
(
struct
scatterlist
*
sg
,
int
sg_len
,
struct
d40_log_lli_bidir
*
lli
,
struct
d40_def_lcsp
*
lcsp
,
u32
src_data_width
,
u32
dst_data_width
,
enum
dma_data_direction
direction
,
dma_addr_t
dev_addr
)
{
int
total_size
=
0
;
struct
scatterlist
*
current_sg
=
sg
;
int
i
;
struct
d40_log_lli
*
lli_src
=
lli
->
src
;
struct
d40_log_lli
*
lli_dst
=
lli
->
dst
;
for_each_sg
(
sg
,
current_sg
,
sg_len
,
i
)
{
total_size
+=
sg_dma_len
(
current_sg
);
if
(
direction
==
DMA_TO_DEVICE
)
{
lli_src
=
d40_log_buf_to_lli
(
lli_src
,
sg_phys
(
current_sg
),
sg_dma_len
(
current_sg
),
lcsp
->
lcsp1
,
src_data_width
,
dst_data_width
,
true
);
lli_dst
=
d40_log_buf_to_lli
(
lli_dst
,
dev_addr
,
sg_dma_len
(
current_sg
),
lcsp
->
lcsp3
,
dst_data_width
,
src_data_width
,
false
);
}
else
{
lli_dst
=
d40_log_buf_to_lli
(
lli_dst
,
sg_phys
(
current_sg
),
sg_dma_len
(
current_sg
),
lcsp
->
lcsp3
,
dst_data_width
,
src_data_width
,
true
);
lli_src
=
d40_log_buf_to_lli
(
lli_src
,
dev_addr
,
sg_dma_len
(
current_sg
),
lcsp
->
lcsp1
,
src_data_width
,
dst_data_width
,
false
);
}
}
return
total_size
;
}
struct
d40_log_lli
*
d40_log_buf_to_lli
(
struct
d40_log_lli
*
lli_sg
,
static
struct
d40_log_lli
*
d40_log_buf_to_lli
(
struct
d40_log_lli
*
lli_sg
,
dma_addr_t
addr
,
int
size
,
u32
lcsp13
,
/* src or dst*/
u32
data_width1
,
u32
data_width2
,
bool
addr_inc
)
unsigned
int
flags
)
{
bool
addr_inc
=
flags
&
LLI_ADDR_INC
;
struct
d40_log_lli
*
lli
=
lli_sg
;
int
size_rest
=
size
;
int
size_seg
=
0
;
...
...
@@ -468,7 +397,7 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
addr
,
size_seg
,
lcsp13
,
data_width1
,
addr_inc
);
flags
);
if
(
addr_inc
)
addr
+=
size_seg
;
lli
++
;
...
...
@@ -479,6 +408,7 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
int
d40_log_sg_to_lli
(
struct
scatterlist
*
sg
,
int
sg_len
,
dma_addr_t
dev_addr
,
struct
d40_log_lli
*
lli_sg
,
u32
lcsp13
,
/* src or dst*/
u32
data_width1
,
u32
data_width2
)
...
...
@@ -487,14 +417,24 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
struct
scatterlist
*
current_sg
=
sg
;
int
i
;
struct
d40_log_lli
*
lli
=
lli_sg
;
unsigned
long
flags
=
0
;
if
(
!
dev_addr
)
flags
|=
LLI_ADDR_INC
;
for_each_sg
(
sg
,
current_sg
,
sg_len
,
i
)
{
dma_addr_t
sg_addr
=
sg_dma_address
(
current_sg
);
unsigned
int
len
=
sg_dma_len
(
current_sg
);
dma_addr_t
addr
=
dev_addr
?:
sg_addr
;
total_size
+=
sg_dma_len
(
current_sg
);
lli
=
d40_log_buf_to_lli
(
lli
,
sg_phys
(
current_sg
),
sg_dma_len
(
current_sg
),
lli
=
d40_log_buf_to_lli
(
lli
,
addr
,
len
,
lcsp13
,
data_width1
,
data_width2
,
true
);
data_width1
,
data_width2
,
flags
);
}
return
total_size
;
}
drivers/dma/ste_dma40_ll.h
浏览文件 @
80cc07af
...
...
@@ -163,6 +163,22 @@
#define D40_DREG_LCEIS1 0x0B4
#define D40_DREG_LCEIS2 0x0B8
#define D40_DREG_LCEIS3 0x0BC
#define D40_DREG_PSEG1 0x110
#define D40_DREG_PSEG2 0x114
#define D40_DREG_PSEG3 0x118
#define D40_DREG_PSEG4 0x11C
#define D40_DREG_PCEG1 0x120
#define D40_DREG_PCEG2 0x124
#define D40_DREG_PCEG3 0x128
#define D40_DREG_PCEG4 0x12C
#define D40_DREG_RSEG1 0x130
#define D40_DREG_RSEG2 0x134
#define D40_DREG_RSEG3 0x138
#define D40_DREG_RSEG4 0x13C
#define D40_DREG_RCEG1 0x140
#define D40_DREG_RCEG2 0x144
#define D40_DREG_RCEG3 0x148
#define D40_DREG_RCEG4 0x14C
#define D40_DREG_STFU 0xFC8
#define D40_DREG_ICFG 0xFCC
#define D40_DREG_PERIPHID0 0xFE0
...
...
@@ -277,6 +293,13 @@ struct d40_def_lcsp {
/* Physical channels */
enum
d40_lli_flags
{
LLI_ADDR_INC
=
1
<<
0
,
LLI_TERM_INT
=
1
<<
1
,
LLI_CYCLIC
=
1
<<
2
,
LLI_LAST_LINK
=
1
<<
3
,
};
void
d40_phy_cfg
(
struct
stedma40_chan_cfg
*
cfg
,
u32
*
src_cfg
,
u32
*
dst_cfg
,
...
...
@@ -292,46 +315,15 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
struct
d40_phy_lli
*
lli
,
dma_addr_t
lli_phys
,
u32
reg_cfg
,
u32
data_width1
,
u32
data_width2
,
int
psize
);
struct
d40_phy_lli
*
d40_phy_buf_to_lli
(
struct
d40_phy_lli
*
lli
,
dma_addr_t
data
,
u32
data_size
,
int
psize
,
dma_addr_t
next_lli
,
u32
reg_cfg
,
bool
term_int
,
u32
data_width1
,
u32
data_width2
,
bool
is_device
);
void
d40_phy_lli_write
(
void
__iomem
*
virtbase
,
u32
phy_chan_num
,
struct
d40_phy_lli
*
lli_dst
,
struct
d40_phy_lli
*
lli_src
);
struct
stedma40_half_channel_info
*
info
,
struct
stedma40_half_channel_info
*
otherinfo
,
unsigned
long
flags
);
/* Logical channels */
struct
d40_log_lli
*
d40_log_buf_to_lli
(
struct
d40_log_lli
*
lli_sg
,
dma_addr_t
addr
,
int
size
,
u32
lcsp13
,
/* src or dst*/
u32
data_width1
,
u32
data_width2
,
bool
addr_inc
);
int
d40_log_sg_to_dev
(
struct
scatterlist
*
sg
,
int
sg_len
,
struct
d40_log_lli_bidir
*
lli
,
struct
d40_def_lcsp
*
lcsp
,
u32
src_data_width
,
u32
dst_data_width
,
enum
dma_data_direction
direction
,
dma_addr_t
dev_addr
);
int
d40_log_sg_to_lli
(
struct
scatterlist
*
sg
,
int
sg_len
,
dma_addr_t
dev_addr
,
struct
d40_log_lli
*
lli_sg
,
u32
lcsp13
,
/* src or dst*/
u32
data_width1
,
u32
data_width2
);
...
...
@@ -339,11 +331,11 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
void
d40_log_lli_lcpa_write
(
struct
d40_log_lli_full
*
lcpa
,
struct
d40_log_lli
*
lli_dst
,
struct
d40_log_lli
*
lli_src
,
int
next
);
int
next
,
unsigned
int
flags
);
void
d40_log_lli_lcla_write
(
struct
d40_log_lli
*
lcla
,
struct
d40_log_lli
*
lli_dst
,
struct
d40_log_lli
*
lli_src
,
int
next
);
int
next
,
unsigned
int
flags
);
#endif
/* STE_DMA40_LLI_H */
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录