Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
c7960fc5
K
Kernel
项目概览
openeuler
/
Kernel
接近 2 年 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
c7960fc5
编写于
11月 14, 2017
作者:
V
Vinod Koul
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'topic/qcom' into for-linus
上级
4cd46d0c
6b4faeac
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
109 addition
and
60 deletion
+109
-60
drivers/dma/qcom/bam_dma.c
drivers/dma/qcom/bam_dma.c
+109
-60
未找到文件。
drivers/dma/qcom/bam_dma.c
浏览文件 @
c7960fc5
...
@@ -46,6 +46,7 @@
...
@@ -46,6 +46,7 @@
#include <linux/of_address.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_irq.h>
#include <linux/of_dma.h>
#include <linux/of_dma.h>
#include <linux/circ_buf.h>
#include <linux/clk.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dmaengine.h>
#include <linux/pm_runtime.h>
#include <linux/pm_runtime.h>
...
@@ -78,6 +79,8 @@ struct bam_async_desc {
...
@@ -78,6 +79,8 @@ struct bam_async_desc {
struct
bam_desc_hw
*
curr_desc
;
struct
bam_desc_hw
*
curr_desc
;
/* list node for the desc in the bam_chan list of descriptors */
struct
list_head
desc_node
;
enum
dma_transfer_direction
dir
;
enum
dma_transfer_direction
dir
;
size_t
length
;
size_t
length
;
struct
bam_desc_hw
desc
[
0
];
struct
bam_desc_hw
desc
[
0
];
...
@@ -347,6 +350,8 @@ static const struct reg_offset_data bam_v1_7_reg_info[] = {
...
@@ -347,6 +350,8 @@ static const struct reg_offset_data bam_v1_7_reg_info[] = {
#define BAM_DESC_FIFO_SIZE SZ_32K
#define BAM_DESC_FIFO_SIZE SZ_32K
#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
#define BAM_FIFO_SIZE (SZ_32K - 8)
#define BAM_FIFO_SIZE (SZ_32K - 8)
#define IS_BUSY(chan) (CIRC_SPACE(bchan->tail, bchan->head,\
MAX_DESCRIPTORS + 1) == 0)
struct
bam_chan
{
struct
bam_chan
{
struct
virt_dma_chan
vc
;
struct
virt_dma_chan
vc
;
...
@@ -356,8 +361,6 @@ struct bam_chan {
...
@@ -356,8 +361,6 @@ struct bam_chan {
/* configuration from device tree */
/* configuration from device tree */
u32
id
;
u32
id
;
struct
bam_async_desc
*
curr_txd
;
/* current running dma */
/* runtime configuration */
/* runtime configuration */
struct
dma_slave_config
slave
;
struct
dma_slave_config
slave
;
...
@@ -372,6 +375,8 @@ struct bam_chan {
...
@@ -372,6 +375,8 @@ struct bam_chan {
unsigned
int
initialized
;
/* is the channel hw initialized? */
unsigned
int
initialized
;
/* is the channel hw initialized? */
unsigned
int
paused
;
/* is the channel paused? */
unsigned
int
paused
;
/* is the channel paused? */
unsigned
int
reconfigure
;
/* new slave config? */
unsigned
int
reconfigure
;
/* new slave config? */
/* list of descriptors currently processed */
struct
list_head
desc_list
;
struct
list_head
node
;
struct
list_head
node
;
};
};
...
@@ -539,7 +544,7 @@ static void bam_free_chan(struct dma_chan *chan)
...
@@ -539,7 +544,7 @@ static void bam_free_chan(struct dma_chan *chan)
vchan_free_chan_resources
(
to_virt_chan
(
chan
));
vchan_free_chan_resources
(
to_virt_chan
(
chan
));
if
(
bchan
->
curr_txd
)
{
if
(
!
list_empty
(
&
bchan
->
desc_list
)
)
{
dev_err
(
bchan
->
bdev
->
dev
,
"Cannot free busy channel
\n
"
);
dev_err
(
bchan
->
bdev
->
dev
,
"Cannot free busy channel
\n
"
);
goto
err
;
goto
err
;
}
}
...
@@ -632,8 +637,6 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
...
@@ -632,8 +637,6 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
if
(
flags
&
DMA_PREP_INTERRUPT
)
if
(
flags
&
DMA_PREP_INTERRUPT
)
async_desc
->
flags
|=
DESC_FLAG_EOT
;
async_desc
->
flags
|=
DESC_FLAG_EOT
;
else
async_desc
->
flags
|=
DESC_FLAG_INT
;
async_desc
->
num_desc
=
num_alloc
;
async_desc
->
num_desc
=
num_alloc
;
async_desc
->
curr_desc
=
async_desc
->
desc
;
async_desc
->
curr_desc
=
async_desc
->
desc
;
...
@@ -684,14 +687,16 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
...
@@ -684,14 +687,16 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
static
int
bam_dma_terminate_all
(
struct
dma_chan
*
chan
)
static
int
bam_dma_terminate_all
(
struct
dma_chan
*
chan
)
{
{
struct
bam_chan
*
bchan
=
to_bam_chan
(
chan
);
struct
bam_chan
*
bchan
=
to_bam_chan
(
chan
);
struct
bam_async_desc
*
async_desc
,
*
tmp
;
unsigned
long
flag
;
unsigned
long
flag
;
LIST_HEAD
(
head
);
LIST_HEAD
(
head
);
/* remove all transactions, including active transaction */
/* remove all transactions, including active transaction */
spin_lock_irqsave
(
&
bchan
->
vc
.
lock
,
flag
);
spin_lock_irqsave
(
&
bchan
->
vc
.
lock
,
flag
);
if
(
bchan
->
curr_txd
)
{
list_for_each_entry_safe
(
async_desc
,
tmp
,
list_add
(
&
bchan
->
curr_txd
->
vd
.
node
,
&
bchan
->
vc
.
desc_issued
);
&
bchan
->
desc_list
,
desc_node
)
{
bchan
->
curr_txd
=
NULL
;
list_add
(
&
async_desc
->
vd
.
node
,
&
bchan
->
vc
.
desc_issued
);
list_del
(
&
async_desc
->
desc_node
);
}
}
vchan_get_all_descriptors
(
&
bchan
->
vc
,
&
head
);
vchan_get_all_descriptors
(
&
bchan
->
vc
,
&
head
);
...
@@ -763,9 +768,9 @@ static int bam_resume(struct dma_chan *chan)
...
@@ -763,9 +768,9 @@ static int bam_resume(struct dma_chan *chan)
*/
*/
static
u32
process_channel_irqs
(
struct
bam_device
*
bdev
)
static
u32
process_channel_irqs
(
struct
bam_device
*
bdev
)
{
{
u32
i
,
srcs
,
pipe_stts
;
u32
i
,
srcs
,
pipe_stts
,
offset
,
avail
;
unsigned
long
flags
;
unsigned
long
flags
;
struct
bam_async_desc
*
async_desc
;
struct
bam_async_desc
*
async_desc
,
*
tmp
;
srcs
=
readl_relaxed
(
bam_addr
(
bdev
,
0
,
BAM_IRQ_SRCS_EE
));
srcs
=
readl_relaxed
(
bam_addr
(
bdev
,
0
,
BAM_IRQ_SRCS_EE
));
...
@@ -785,27 +790,40 @@ static u32 process_channel_irqs(struct bam_device *bdev)
...
@@ -785,27 +790,40 @@ static u32 process_channel_irqs(struct bam_device *bdev)
writel_relaxed
(
pipe_stts
,
bam_addr
(
bdev
,
i
,
BAM_P_IRQ_CLR
));
writel_relaxed
(
pipe_stts
,
bam_addr
(
bdev
,
i
,
BAM_P_IRQ_CLR
));
spin_lock_irqsave
(
&
bchan
->
vc
.
lock
,
flags
);
spin_lock_irqsave
(
&
bchan
->
vc
.
lock
,
flags
);
async_desc
=
bchan
->
curr_txd
;
if
(
async_desc
)
{
offset
=
readl_relaxed
(
bam_addr
(
bdev
,
i
,
BAM_P_SW_OFSTS
))
&
async_desc
->
num_desc
-=
async_desc
->
xfer_len
;
P_SW_OFSTS_MASK
;
async_desc
->
curr_desc
+=
async_desc
->
xfer_len
;
offset
/=
sizeof
(
struct
bam_desc_hw
);
bchan
->
curr_txd
=
NULL
;
/* Number of bytes available to read */
avail
=
CIRC_CNT
(
offset
,
bchan
->
head
,
MAX_DESCRIPTORS
+
1
);
list_for_each_entry_safe
(
async_desc
,
tmp
,
&
bchan
->
desc_list
,
desc_node
)
{
/* Not enough data to read */
if
(
avail
<
async_desc
->
xfer_len
)
break
;
/* manage FIFO */
/* manage FIFO */
bchan
->
head
+=
async_desc
->
xfer_len
;
bchan
->
head
+=
async_desc
->
xfer_len
;
bchan
->
head
%=
MAX_DESCRIPTORS
;
bchan
->
head
%=
MAX_DESCRIPTORS
;
async_desc
->
num_desc
-=
async_desc
->
xfer_len
;
async_desc
->
curr_desc
+=
async_desc
->
xfer_len
;
avail
-=
async_desc
->
xfer_len
;
/*
/*
* if complete, process cookie.
Otherwise
* if complete, process cookie. Otherwise
* push back to front of desc_issued so that
* push back to front of desc_issued so that
* it gets restarted by the tasklet
* it gets restarted by the tasklet
*/
*/
if
(
!
async_desc
->
num_desc
)
if
(
!
async_desc
->
num_desc
)
{
vchan_cookie_complete
(
&
async_desc
->
vd
);
vchan_cookie_complete
(
&
async_desc
->
vd
);
else
}
else
{
list_add
(
&
async_desc
->
vd
.
node
,
list_add
(
&
async_desc
->
vd
.
node
,
&
bchan
->
vc
.
desc_issued
);
&
bchan
->
vc
.
desc_issued
);
}
list_del
(
&
async_desc
->
desc_node
);
}
}
spin_unlock_irqrestore
(
&
bchan
->
vc
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
bchan
->
vc
.
lock
,
flags
);
...
@@ -867,6 +885,7 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
...
@@ -867,6 +885,7 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct
dma_tx_state
*
txstate
)
struct
dma_tx_state
*
txstate
)
{
{
struct
bam_chan
*
bchan
=
to_bam_chan
(
chan
);
struct
bam_chan
*
bchan
=
to_bam_chan
(
chan
);
struct
bam_async_desc
*
async_desc
;
struct
virt_dma_desc
*
vd
;
struct
virt_dma_desc
*
vd
;
int
ret
;
int
ret
;
size_t
residue
=
0
;
size_t
residue
=
0
;
...
@@ -882,11 +901,17 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
...
@@ -882,11 +901,17 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
spin_lock_irqsave
(
&
bchan
->
vc
.
lock
,
flags
);
spin_lock_irqsave
(
&
bchan
->
vc
.
lock
,
flags
);
vd
=
vchan_find_desc
(
&
bchan
->
vc
,
cookie
);
vd
=
vchan_find_desc
(
&
bchan
->
vc
,
cookie
);
if
(
vd
)
if
(
vd
)
{
residue
=
container_of
(
vd
,
struct
bam_async_desc
,
vd
)
->
length
;
residue
=
container_of
(
vd
,
struct
bam_async_desc
,
vd
)
->
length
;
else
if
(
bchan
->
curr_txd
&&
bchan
->
curr_txd
->
vd
.
tx
.
cookie
==
cookie
)
}
else
{
for
(
i
=
0
;
i
<
bchan
->
curr_txd
->
num_desc
;
i
++
)
list_for_each_entry
(
async_desc
,
&
bchan
->
desc_list
,
desc_node
)
{
residue
+=
bchan
->
curr_txd
->
curr_desc
[
i
].
size
;
if
(
async_desc
->
vd
.
tx
.
cookie
!=
cookie
)
continue
;
for
(
i
=
0
;
i
<
async_desc
->
num_desc
;
i
++
)
residue
+=
async_desc
->
curr_desc
[
i
].
size
;
}
}
spin_unlock_irqrestore
(
&
bchan
->
vc
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
bchan
->
vc
.
lock
,
flags
);
...
@@ -927,63 +952,86 @@ static void bam_start_dma(struct bam_chan *bchan)
...
@@ -927,63 +952,86 @@ static void bam_start_dma(struct bam_chan *bchan)
{
{
struct
virt_dma_desc
*
vd
=
vchan_next_desc
(
&
bchan
->
vc
);
struct
virt_dma_desc
*
vd
=
vchan_next_desc
(
&
bchan
->
vc
);
struct
bam_device
*
bdev
=
bchan
->
bdev
;
struct
bam_device
*
bdev
=
bchan
->
bdev
;
struct
bam_async_desc
*
async_desc
;
struct
bam_async_desc
*
async_desc
=
NULL
;
struct
bam_desc_hw
*
desc
;
struct
bam_desc_hw
*
desc
;
struct
bam_desc_hw
*
fifo
=
PTR_ALIGN
(
bchan
->
fifo_virt
,
struct
bam_desc_hw
*
fifo
=
PTR_ALIGN
(
bchan
->
fifo_virt
,
sizeof
(
struct
bam_desc_hw
));
sizeof
(
struct
bam_desc_hw
));
int
ret
;
int
ret
;
unsigned
int
avail
;
struct
dmaengine_desc_callback
cb
;
lockdep_assert_held
(
&
bchan
->
vc
.
lock
);
lockdep_assert_held
(
&
bchan
->
vc
.
lock
);
if
(
!
vd
)
if
(
!
vd
)
return
;
return
;
list_del
(
&
vd
->
node
);
async_desc
=
container_of
(
vd
,
struct
bam_async_desc
,
vd
);
bchan
->
curr_txd
=
async_desc
;
ret
=
pm_runtime_get_sync
(
bdev
->
dev
);
ret
=
pm_runtime_get_sync
(
bdev
->
dev
);
if
(
ret
<
0
)
if
(
ret
<
0
)
return
;
return
;
/* on first use, initialize the channel hardware */
while
(
vd
&&
!
IS_BUSY
(
bchan
))
{
if
(
!
bchan
->
initialized
)
list_del
(
&
vd
->
node
);
bam_chan_init_hw
(
bchan
,
async_desc
->
dir
);
/* apply new slave config changes, if necessary */
async_desc
=
container_of
(
vd
,
struct
bam_async_desc
,
vd
);
if
(
bchan
->
reconfigure
)
bam_apply_new_config
(
bchan
,
async_desc
->
dir
);
desc
=
bchan
->
curr_txd
->
curr_desc
;
/* on first use, initialize the channel hardware */
if
(
!
bchan
->
initialized
)
bam_chan_init_hw
(
bchan
,
async_desc
->
dir
);
if
(
async_desc
->
num_desc
>
MAX_DESCRIPTORS
)
/* apply new slave config changes, if necessary */
async_desc
->
xfer_len
=
MAX_DESCRIPTORS
;
if
(
bchan
->
reconfigure
)
else
bam_apply_new_config
(
bchan
,
async_desc
->
dir
);
async_desc
->
xfer_len
=
async_desc
->
num_desc
;
/* set any special flags on the last descriptor */
desc
=
async_desc
->
curr_desc
;
if
(
async_desc
->
num_desc
==
async_desc
->
xfer_len
)
avail
=
CIRC_SPACE
(
bchan
->
tail
,
bchan
->
head
,
desc
[
async_desc
->
xfer_len
-
1
].
flags
|=
MAX_DESCRIPTORS
+
1
);
cpu_to_le16
(
async_desc
->
flags
);
else
if
(
async_desc
->
num_desc
>
avail
)
desc
[
async_desc
->
xfer_len
-
1
].
flags
|=
async_desc
->
xfer_len
=
avail
;
cpu_to_le16
(
DESC_FLAG_INT
);
else
async_desc
->
xfer_len
=
async_desc
->
num_desc
;
/* set any special flags on the last descriptor */
if
(
async_desc
->
num_desc
==
async_desc
->
xfer_len
)
desc
[
async_desc
->
xfer_len
-
1
].
flags
|=
cpu_to_le16
(
async_desc
->
flags
);
if
(
bchan
->
tail
+
async_desc
->
xfer_len
>
MAX_DESCRIPTORS
)
{
vd
=
vchan_next_desc
(
&
bchan
->
vc
);
u32
partial
=
MAX_DESCRIPTORS
-
bchan
->
tail
;
memcpy
(
&
fifo
[
bchan
->
tail
],
desc
,
dmaengine_desc_get_callback
(
&
async_desc
->
vd
.
tx
,
&
cb
);
partial
*
sizeof
(
struct
bam_desc_hw
));
memcpy
(
fifo
,
&
desc
[
partial
],
(
async_desc
->
xfer_len
-
partial
)
*
/*
* An interrupt is generated at this desc, if
* - FIFO is FULL.
* - No more descriptors to add.
* - If a callback completion was requested for this DESC,
* In this case, BAM will deliver the completion callback
* for this desc and continue processing the next desc.
*/
if
(((
avail
<=
async_desc
->
xfer_len
)
||
!
vd
||
dmaengine_desc_callback_valid
(
&
cb
))
&&
!
(
async_desc
->
flags
&
DESC_FLAG_EOT
))
desc
[
async_desc
->
xfer_len
-
1
].
flags
|=
cpu_to_le16
(
DESC_FLAG_INT
);
if
(
bchan
->
tail
+
async_desc
->
xfer_len
>
MAX_DESCRIPTORS
)
{
u32
partial
=
MAX_DESCRIPTORS
-
bchan
->
tail
;
memcpy
(
&
fifo
[
bchan
->
tail
],
desc
,
partial
*
sizeof
(
struct
bam_desc_hw
));
memcpy
(
fifo
,
&
desc
[
partial
],
(
async_desc
->
xfer_len
-
partial
)
*
sizeof
(
struct
bam_desc_hw
));
sizeof
(
struct
bam_desc_hw
));
}
else
{
}
else
{
memcpy
(
&
fifo
[
bchan
->
tail
],
desc
,
memcpy
(
&
fifo
[
bchan
->
tail
],
desc
,
async_desc
->
xfer_len
*
sizeof
(
struct
bam_desc_hw
));
async_desc
->
xfer_len
*
}
sizeof
(
struct
bam_desc_hw
));
}
bchan
->
tail
+=
async_desc
->
xfer_len
;
bchan
->
tail
+=
async_desc
->
xfer_len
;
bchan
->
tail
%=
MAX_DESCRIPTORS
;
bchan
->
tail
%=
MAX_DESCRIPTORS
;
list_add_tail
(
&
async_desc
->
desc_node
,
&
bchan
->
desc_list
);
}
/* ensure descriptor writes and dma start not reordered */
/* ensure descriptor writes and dma start not reordered */
wmb
();
wmb
();
...
@@ -1012,7 +1060,7 @@ static void dma_tasklet(unsigned long data)
...
@@ -1012,7 +1060,7 @@ static void dma_tasklet(unsigned long data)
bchan
=
&
bdev
->
channels
[
i
];
bchan
=
&
bdev
->
channels
[
i
];
spin_lock_irqsave
(
&
bchan
->
vc
.
lock
,
flags
);
spin_lock_irqsave
(
&
bchan
->
vc
.
lock
,
flags
);
if
(
!
list_empty
(
&
bchan
->
vc
.
desc_issued
)
&&
!
bchan
->
curr_txd
)
if
(
!
list_empty
(
&
bchan
->
vc
.
desc_issued
)
&&
!
IS_BUSY
(
bchan
)
)
bam_start_dma
(
bchan
);
bam_start_dma
(
bchan
);
spin_unlock_irqrestore
(
&
bchan
->
vc
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
bchan
->
vc
.
lock
,
flags
);
}
}
...
@@ -1033,7 +1081,7 @@ static void bam_issue_pending(struct dma_chan *chan)
...
@@ -1033,7 +1081,7 @@ static void bam_issue_pending(struct dma_chan *chan)
spin_lock_irqsave
(
&
bchan
->
vc
.
lock
,
flags
);
spin_lock_irqsave
(
&
bchan
->
vc
.
lock
,
flags
);
/* if work pending and idle, start a transaction */
/* if work pending and idle, start a transaction */
if
(
vchan_issue_pending
(
&
bchan
->
vc
)
&&
!
bchan
->
curr_txd
)
if
(
vchan_issue_pending
(
&
bchan
->
vc
)
&&
!
IS_BUSY
(
bchan
)
)
bam_start_dma
(
bchan
);
bam_start_dma
(
bchan
);
spin_unlock_irqrestore
(
&
bchan
->
vc
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
bchan
->
vc
.
lock
,
flags
);
...
@@ -1133,6 +1181,7 @@ static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
...
@@ -1133,6 +1181,7 @@ static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
vchan_init
(
&
bchan
->
vc
,
&
bdev
->
common
);
vchan_init
(
&
bchan
->
vc
,
&
bdev
->
common
);
bchan
->
vc
.
desc_free
=
bam_dma_free_desc
;
bchan
->
vc
.
desc_free
=
bam_dma_free_desc
;
INIT_LIST_HEAD
(
&
bchan
->
desc_list
);
}
}
static
const
struct
of_device_id
bam_of_match
[]
=
{
static
const
struct
of_device_id
bam_of_match
[]
=
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录