Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
kernel_linux
提交
87fce2f5
K
kernel_linux
项目概览
OpenHarmony
/
kernel_linux
上一次同步 4 年多
通知
15
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
kernel_linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
87fce2f5
编写于
7月 04, 2017
作者:
V
Vinod Koul
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'topic/mv_xor' into for-linus
上级
930a6348
ecfa7714
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
91 addition
and
56 deletion
+91
-56
drivers/dma/mv_xor_v2.c
drivers/dma/mv_xor_v2.c
+91
-56
未找到文件。
drivers/dma/mv_xor_v2.c
浏览文件 @
87fce2f5
...
...
@@ -42,6 +42,7 @@
#define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018
#define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF
#define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0
#define MV_XOR_V2_DMA_IMSG_TIMER_EN BIT(18)
#define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C
/* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
#define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C
...
...
@@ -55,6 +56,9 @@
#define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800
#define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804
#define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808
#define MV_XOR_V2_DMA_IMSG_TMOT 0x810
#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK 0x1FFF
#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT 0
/* XOR Global registers */
#define MV_XOR_V2_GLOB_BW_CTRL 0x4
...
...
@@ -90,6 +94,13 @@
*/
#define MV_XOR_V2_DESC_NUM 1024
/*
* Threshold values for descriptors and timeout, determined by
* experimentation as giving a good level of performance.
*/
#define MV_XOR_V2_DONE_IMSG_THRD 0x14
#define MV_XOR_V2_TIMER_THRD 0xB0
/**
* struct mv_xor_v2_descriptor - DMA HW descriptor
* @desc_id: used by S/W and is not affected by H/W.
...
...
@@ -161,6 +172,7 @@ struct mv_xor_v2_device {
struct
mv_xor_v2_sw_desc
*
sw_desq
;
int
desc_size
;
unsigned
int
npendings
;
unsigned
int
hw_queue_idx
;
};
/**
...
...
@@ -213,18 +225,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
}
}
/*
* Return the next available index in the DESQ.
*/
static
int
mv_xor_v2_get_desq_write_ptr
(
struct
mv_xor_v2_device
*
xor_dev
)
{
/* read the index for the next available descriptor in the DESQ */
u32
reg
=
readl
(
xor_dev
->
dma_base
+
MV_XOR_V2_DMA_DESQ_ALLOC_OFF
);
return
((
reg
>>
MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT
)
&
MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK
);
}
/*
* notify the engine of new descriptors, and update the available index.
*/
...
...
@@ -261,16 +261,23 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
* Set the IMSG threshold
*/
static
inline
void
mv_xor_v2_
set_imsg_thrd
(
struct
mv_xor_v2_device
*
xor_dev
,
int
thrd_val
)
void
mv_xor_v2_
enable_imsg_thrd
(
struct
mv_xor_v2_device
*
xor_dev
)
{
u32
reg
;
/* Configure threshold of number of descriptors, and enable timer */
reg
=
readl
(
xor_dev
->
dma_base
+
MV_XOR_V2_DMA_IMSG_THRD_OFF
);
reg
&=
(
~
MV_XOR_V2_DMA_IMSG_THRD_MASK
<<
MV_XOR_V2_DMA_IMSG_THRD_SHIFT
);
reg
|=
(
thrd_val
<<
MV_XOR_V2_DMA_IMSG_THRD_SHIFT
);
reg
|=
(
MV_XOR_V2_DONE_IMSG_THRD
<<
MV_XOR_V2_DMA_IMSG_THRD_SHIFT
);
reg
|=
MV_XOR_V2_DMA_IMSG_TIMER_EN
;
writel
(
reg
,
xor_dev
->
dma_base
+
MV_XOR_V2_DMA_IMSG_THRD_OFF
);
/* Configure Timer Threshold */
reg
=
readl
(
xor_dev
->
dma_base
+
MV_XOR_V2_DMA_IMSG_TMOT
);
reg
&=
(
~
MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK
<<
MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT
);
reg
|=
(
MV_XOR_V2_TIMER_THRD
<<
MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT
);
writel
(
reg
,
xor_dev
->
dma_base
+
MV_XOR_V2_DMA_IMSG_TMOT
);
}
static
irqreturn_t
mv_xor_v2_interrupt_handler
(
int
irq
,
void
*
data
)
...
...
@@ -288,12 +295,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
if
(
!
ndescs
)
return
IRQ_NONE
;
/*
* Update IMSG threshold, to disable new IMSG interrupts until
* end of the tasklet
*/
mv_xor_v2_set_imsg_thrd
(
xor_dev
,
MV_XOR_V2_DESC_NUM
);
/* schedule a tasklet to handle descriptors callbacks */
tasklet_schedule
(
&
xor_dev
->
irq_tasklet
);
...
...
@@ -306,7 +307,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
static
dma_cookie_t
mv_xor_v2_tx_submit
(
struct
dma_async_tx_descriptor
*
tx
)
{
int
desq_ptr
;
void
*
dest_hw_desc
;
dma_cookie_t
cookie
;
struct
mv_xor_v2_sw_desc
*
sw_desc
=
...
...
@@ -322,15 +322,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_bh
(
&
xor_dev
->
lock
);
cookie
=
dma_cookie_assign
(
tx
);
/* get the next available slot in the DESQ */
desq_ptr
=
mv_xor_v2_get_desq_write_ptr
(
xor_dev
);
/* copy the HW descriptor from the SW descriptor to the DESQ */
dest_hw_desc
=
xor_dev
->
hw_desq_virt
+
desq_ptr
;
dest_hw_desc
=
xor_dev
->
hw_desq_virt
+
xor_dev
->
hw_queue_idx
;
memcpy
(
dest_hw_desc
,
&
sw_desc
->
hw_desc
,
xor_dev
->
desc_size
);
xor_dev
->
npendings
++
;
xor_dev
->
hw_queue_idx
++
;
if
(
xor_dev
->
hw_queue_idx
>=
MV_XOR_V2_DESC_NUM
)
xor_dev
->
hw_queue_idx
=
0
;
spin_unlock_bh
(
&
xor_dev
->
lock
);
...
...
@@ -344,6 +344,7 @@ static struct mv_xor_v2_sw_desc *
mv_xor_v2_prep_sw_desc
(
struct
mv_xor_v2_device
*
xor_dev
)
{
struct
mv_xor_v2_sw_desc
*
sw_desc
;
bool
found
=
false
;
/* Lock the channel */
spin_lock_bh
(
&
xor_dev
->
lock
);
...
...
@@ -355,19 +356,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
return
NULL
;
}
/* get a free SW descriptor from the SW DESQ */
sw_desc
=
list_first_entry
(
&
xor_dev
->
free_sw_desc
,
struct
mv_xor_v2_sw_desc
,
free_list
);
list_for_each_entry
(
sw_desc
,
&
xor_dev
->
free_sw_desc
,
free_list
)
{
if
(
async_tx_test_ack
(
&
sw_desc
->
async_tx
))
{
found
=
true
;
break
;
}
}
if
(
!
found
)
{
spin_unlock_bh
(
&
xor_dev
->
lock
);
return
NULL
;
}
list_del
(
&
sw_desc
->
free_list
);
/* Release the channel */
spin_unlock_bh
(
&
xor_dev
->
lock
);
/* set the async tx descriptor */
dma_async_tx_descriptor_init
(
&
sw_desc
->
async_tx
,
&
xor_dev
->
dmachan
);
sw_desc
->
async_tx
.
tx_submit
=
mv_xor_v2_tx_submit
;
async_tx_ack
(
&
sw_desc
->
async_tx
);
return
sw_desc
;
}
...
...
@@ -389,6 +394,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
__func__
,
len
,
&
src
,
&
dest
,
flags
);
sw_desc
=
mv_xor_v2_prep_sw_desc
(
xor_dev
);
if
(
!
sw_desc
)
return
NULL
;
sw_desc
->
async_tx
.
flags
=
flags
;
...
...
@@ -443,6 +450,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
__func__
,
src_cnt
,
len
,
&
dest
,
flags
);
sw_desc
=
mv_xor_v2_prep_sw_desc
(
xor_dev
);
if
(
!
sw_desc
)
return
NULL
;
sw_desc
->
async_tx
.
flags
=
flags
;
...
...
@@ -491,6 +500,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
container_of
(
chan
,
struct
mv_xor_v2_device
,
dmachan
);
sw_desc
=
mv_xor_v2_prep_sw_desc
(
xor_dev
);
if
(
!
sw_desc
)
return
NULL
;
/* set the HW descriptor */
hw_descriptor
=
&
sw_desc
->
hw_desc
;
...
...
@@ -524,9 +535,6 @@ static void mv_xor_v2_issue_pending(struct dma_chan *chan)
mv_xor_v2_add_desc_to_desq
(
xor_dev
,
xor_dev
->
npendings
);
xor_dev
->
npendings
=
0
;
/* Activate the channel */
writel
(
0
,
xor_dev
->
dma_base
+
MV_XOR_V2_DMA_DESQ_STOP_OFF
);
spin_unlock_bh
(
&
xor_dev
->
lock
);
}
...
...
@@ -554,7 +562,6 @@ static void mv_xor_v2_tasklet(unsigned long data)
{
struct
mv_xor_v2_device
*
xor_dev
=
(
struct
mv_xor_v2_device
*
)
data
;
int
pending_ptr
,
num_of_pending
,
i
;
struct
mv_xor_v2_descriptor
*
next_pending_hw_desc
=
NULL
;
struct
mv_xor_v2_sw_desc
*
next_pending_sw_desc
=
NULL
;
dev_dbg
(
xor_dev
->
dmadev
.
dev
,
"%s %d
\n
"
,
__func__
,
__LINE__
);
...
...
@@ -562,17 +569,10 @@ static void mv_xor_v2_tasklet(unsigned long data)
/* get the pending descriptors parameters */
num_of_pending
=
mv_xor_v2_get_pending_params
(
xor_dev
,
&
pending_ptr
);
/* next HW descriptor */
next_pending_hw_desc
=
xor_dev
->
hw_desq_virt
+
pending_ptr
;
/* loop over free descriptors */
for
(
i
=
0
;
i
<
num_of_pending
;
i
++
)
{
if
(
pending_ptr
>
MV_XOR_V2_DESC_NUM
)
pending_ptr
=
0
;
if
(
next_pending_sw_desc
!=
NULL
)
next_pending_hw_desc
++
;
struct
mv_xor_v2_descriptor
*
next_pending_hw_desc
=
xor_dev
->
hw_desq_virt
+
pending_ptr
;
/* get the SW descriptor related to the HW descriptor */
next_pending_sw_desc
=
...
...
@@ -608,15 +608,14 @@ static void mv_xor_v2_tasklet(unsigned long data)
/* increment the next descriptor */
pending_ptr
++
;
if
(
pending_ptr
>=
MV_XOR_V2_DESC_NUM
)
pending_ptr
=
0
;
}
if
(
num_of_pending
!=
0
)
{
/* free the descriptores */
mv_xor_v2_free_desc_from_desq
(
xor_dev
,
num_of_pending
);
}
/* Update IMSG threshold, to enable new IMSG interrupts */
mv_xor_v2_set_imsg_thrd
(
xor_dev
,
0
);
}
/*
...
...
@@ -648,9 +647,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
writel
((
xor_dev
->
hw_desq
&
0xFFFF00000000
)
>>
32
,
xor_dev
->
dma_base
+
MV_XOR_V2_DMA_DESQ_BAHR_OFF
);
/* enable the DMA engine */
writel
(
0
,
xor_dev
->
dma_base
+
MV_XOR_V2_DMA_DESQ_STOP_OFF
);
/*
* This is a temporary solution, until we activate the
* SMMU. Set the attributes for reading & writing data buffers
...
...
@@ -694,6 +690,30 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
reg
|=
MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL
;
writel
(
reg
,
xor_dev
->
glob_base
+
MV_XOR_V2_GLOB_PAUSE
);
/* enable the DMA engine */
writel
(
0
,
xor_dev
->
dma_base
+
MV_XOR_V2_DMA_DESQ_STOP_OFF
);
return
0
;
}
static
int
mv_xor_v2_suspend
(
struct
platform_device
*
dev
,
pm_message_t
state
)
{
struct
mv_xor_v2_device
*
xor_dev
=
platform_get_drvdata
(
dev
);
/* Set this bit to disable to stop the XOR unit. */
writel
(
0x1
,
xor_dev
->
dma_base
+
MV_XOR_V2_DMA_DESQ_STOP_OFF
);
return
0
;
}
static
int
mv_xor_v2_resume
(
struct
platform_device
*
dev
)
{
struct
mv_xor_v2_device
*
xor_dev
=
platform_get_drvdata
(
dev
);
mv_xor_v2_set_desc_size
(
xor_dev
);
mv_xor_v2_enable_imsg_thrd
(
xor_dev
);
mv_xor_v2_descq_init
(
xor_dev
);
return
0
;
}
...
...
@@ -725,6 +745,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
platform_set_drvdata
(
pdev
,
xor_dev
);
ret
=
dma_set_mask_and_coherent
(
&
pdev
->
dev
,
DMA_BIT_MASK
(
40
));
if
(
ret
)
return
ret
;
xor_dev
->
clk
=
devm_clk_get
(
&
pdev
->
dev
,
NULL
);
if
(
IS_ERR
(
xor_dev
->
clk
)
&&
PTR_ERR
(
xor_dev
->
clk
)
==
-
EPROBE_DEFER
)
return
-
EPROBE_DEFER
;
...
...
@@ -785,8 +809,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
/* add all SW descriptors to the free list */
for
(
i
=
0
;
i
<
MV_XOR_V2_DESC_NUM
;
i
++
)
{
xor_dev
->
sw_desq
[
i
].
idx
=
i
;
list_add
(
&
xor_dev
->
sw_desq
[
i
].
free_list
,
struct
mv_xor_v2_sw_desc
*
sw_desc
=
xor_dev
->
sw_desq
+
i
;
sw_desc
->
idx
=
i
;
dma_async_tx_descriptor_init
(
&
sw_desc
->
async_tx
,
&
xor_dev
->
dmachan
);
sw_desc
->
async_tx
.
tx_submit
=
mv_xor_v2_tx_submit
;
async_tx_ack
(
&
sw_desc
->
async_tx
);
list_add
(
&
sw_desc
->
free_list
,
&
xor_dev
->
free_sw_desc
);
}
...
...
@@ -816,6 +847,8 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
list_add_tail
(
&
xor_dev
->
dmachan
.
device_node
,
&
dma_dev
->
channels
);
mv_xor_v2_enable_imsg_thrd
(
xor_dev
);
mv_xor_v2_descq_init
(
xor_dev
);
ret
=
dma_async_device_register
(
dma_dev
);
...
...
@@ -865,6 +898,8 @@ MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids);
static
struct
platform_driver
mv_xor_v2_driver
=
{
.
probe
=
mv_xor_v2_probe
,
.
suspend
=
mv_xor_v2_suspend
,
.
resume
=
mv_xor_v2_resume
,
.
remove
=
mv_xor_v2_remove
,
.
driver
=
{
.
name
=
"mv_xor_v2"
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录