Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
330542fc
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
330542fc
编写于
1月 31, 2018
作者:
V
Vinod Koul
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'topic/xilinx' into for-linus
上级
b8e1a963
0e847d44
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
313 addition
and
168 deletion
+313
-168
drivers/dma/xilinx/xilinx_dma.c
drivers/dma/xilinx/xilinx_dma.c
+173
-129
drivers/dma/xilinx/zynqmp_dma.c
drivers/dma/xilinx/zynqmp_dma.c
+140
-39
未找到文件。
drivers/dma/xilinx/xilinx_dma.c
浏览文件 @
330542fc
...
...
@@ -99,7 +99,9 @@
#define XILINX_DMA_REG_FRMPTR_STS 0x0024
#define XILINX_DMA_REG_PARK_PTR 0x0028
#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
#define XILINX_DMA_REG_VDMA_VERSION 0x002c
/* Register Direct Mode Registers */
...
...
@@ -163,6 +165,7 @@
#define XILINX_DMA_BD_SOP BIT(27)
#define XILINX_DMA_BD_EOP BIT(26)
#define XILINX_DMA_COALESCE_MAX 255
#define XILINX_DMA_NUM_DESCS 255
#define XILINX_DMA_NUM_APP_WORDS 5
/* Multi-Channel DMA Descriptor offsets*/
...
...
@@ -211,8 +214,8 @@ struct xilinx_vdma_desc_hw {
* @next_desc_msb: MSB of Next Descriptor Pointer @0x04
* @buf_addr: Buffer address @0x08
* @buf_addr_msb: MSB of Buffer address @0x0C
* @
pad1: Reserved
@0x10
* @
pad2: Reserved
@0x14
* @
mcdma_control: Control field for mcdma
@0x10
* @
vsize_stride: Vsize and Stride field for mcdma
@0x14
* @control: Control field @0x18
* @status: Status field @0x1C
* @app: APP Fields @0x20 - 0x30
...
...
@@ -232,11 +235,11 @@ struct xilinx_axidma_desc_hw {
/**
* struct xilinx_cdma_desc_hw - Hardware Descriptor
* @next_desc: Next Descriptor Pointer @0x00
* @next_descmsb: Next Descriptor Pointer MSB @0x04
* @next_desc
_
msb: Next Descriptor Pointer MSB @0x04
* @src_addr: Source address @0x08
* @src_addrmsb: Source address MSB @0x0C
* @src_addr
_
msb: Source address MSB @0x0C
* @dest_addr: Destination address @0x10
* @dest_addrmsb: Destination address MSB @0x14
* @dest_addr
_
msb: Destination address MSB @0x14
* @control: Control field @0x18
* @status: Status field @0x1C
*/
...
...
@@ -310,6 +313,7 @@ struct xilinx_dma_tx_descriptor {
* @pending_list: Descriptors waiting
* @active_list: Descriptors ready to submit
* @done_list: Complete descriptors
* @free_seg_list: Free descriptors
* @common: DMA common channel
* @desc_pool: Descriptors pool
* @dev: The dma device
...
...
@@ -321,6 +325,7 @@ struct xilinx_dma_tx_descriptor {
* @cyclic: Check for cyclic transfers.
* @genlock: Support genlock mode
* @err: Channel has errors
* @idle: Check for channel idle
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
...
...
@@ -329,9 +334,12 @@ struct xilinx_dma_tx_descriptor {
* @desc_submitcount: Descriptor h/w submitted count
* @residue: Residue for AXI DMA
* @seg_v: Statically allocated segments base
* @seg_p: Physical allocated segments base
* @cyclic_seg_v: Statically allocated segment base for cyclic transfers
* @cyclic_seg_p: Physical allocated segments base for cyclic dma
* @start_transfer: Differentiate b/w DMA IP's transfer
* @stop_transfer: Differentiate b/w DMA IP's quiesce
* @tdest: TDEST value for mcdma
*/
struct
xilinx_dma_chan
{
struct
xilinx_dma_device
*
xdev
;
...
...
@@ -341,6 +349,7 @@ struct xilinx_dma_chan {
struct
list_head
pending_list
;
struct
list_head
active_list
;
struct
list_head
done_list
;
struct
list_head
free_seg_list
;
struct
dma_chan
common
;
struct
dma_pool
*
desc_pool
;
struct
device
*
dev
;
...
...
@@ -352,6 +361,7 @@ struct xilinx_dma_chan {
bool
cyclic
;
bool
genlock
;
bool
err
;
bool
idle
;
struct
tasklet_struct
tasklet
;
struct
xilinx_vdma_config
config
;
bool
flush_on_fsync
;
...
...
@@ -360,18 +370,20 @@ struct xilinx_dma_chan {
u32
desc_submitcount
;
u32
residue
;
struct
xilinx_axidma_tx_segment
*
seg_v
;
dma_addr_t
seg_p
;
struct
xilinx_axidma_tx_segment
*
cyclic_seg_v
;
dma_addr_t
cyclic_seg_p
;
void
(
*
start_transfer
)(
struct
xilinx_dma_chan
*
chan
);
int
(
*
stop_transfer
)(
struct
xilinx_dma_chan
*
chan
);
u16
tdest
;
};
/**
* enum xdma_ip_type
:
DMA IP type.
* enum xdma_ip_type
-
DMA IP type.
*
* XDMA_TYPE_AXIDMA: Axi dma ip.
* XDMA_TYPE_CDMA: Axi cdma ip.
* XDMA_TYPE_VDMA: Axi vdma ip.
*
@
XDMA_TYPE_AXIDMA: Axi dma ip.
*
@
XDMA_TYPE_CDMA: Axi cdma ip.
*
@
XDMA_TYPE_VDMA: Axi vdma ip.
*
*/
enum
xdma_ip_type
{
...
...
@@ -580,18 +592,32 @@ xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
static
struct
xilinx_axidma_tx_segment
*
xilinx_axidma_alloc_tx_segment
(
struct
xilinx_dma_chan
*
chan
)
{
struct
xilinx_axidma_tx_segment
*
segment
;
dma_addr_t
phys
;
segment
=
dma_pool_zalloc
(
chan
->
desc_pool
,
GFP_ATOMIC
,
&
phys
);
if
(
!
segment
)
return
NULL
;
struct
xilinx_axidma_tx_segment
*
segment
=
NULL
;
unsigned
long
flags
;
segment
->
phys
=
phys
;
spin_lock_irqsave
(
&
chan
->
lock
,
flags
);
if
(
!
list_empty
(
&
chan
->
free_seg_list
))
{
segment
=
list_first_entry
(
&
chan
->
free_seg_list
,
struct
xilinx_axidma_tx_segment
,
node
);
list_del
(
&
segment
->
node
);
}
spin_unlock_irqrestore
(
&
chan
->
lock
,
flags
);
return
segment
;
}
static
void
xilinx_dma_clean_hw_desc
(
struct
xilinx_axidma_desc_hw
*
hw
)
{
u32
next_desc
=
hw
->
next_desc
;
u32
next_desc_msb
=
hw
->
next_desc_msb
;
memset
(
hw
,
0
,
sizeof
(
struct
xilinx_axidma_desc_hw
));
hw
->
next_desc
=
next_desc
;
hw
->
next_desc_msb
=
next_desc_msb
;
}
/**
* xilinx_dma_free_tx_segment - Free transaction segment
* @chan: Driver specific DMA channel
...
...
@@ -600,7 +626,9 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
static
void
xilinx_dma_free_tx_segment
(
struct
xilinx_dma_chan
*
chan
,
struct
xilinx_axidma_tx_segment
*
segment
)
{
dma_pool_free
(
chan
->
desc_pool
,
segment
,
segment
->
phys
);
xilinx_dma_clean_hw_desc
(
&
segment
->
hw
);
list_add_tail
(
&
segment
->
node
,
&
chan
->
free_seg_list
);
}
/**
...
...
@@ -725,16 +753,31 @@ static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
static
void
xilinx_dma_free_chan_resources
(
struct
dma_chan
*
dchan
)
{
struct
xilinx_dma_chan
*
chan
=
to_xilinx_chan
(
dchan
);
unsigned
long
flags
;
dev_dbg
(
chan
->
dev
,
"Free all channel resources.
\n
"
);
xilinx_dma_free_descriptors
(
chan
);
if
(
chan
->
xdev
->
dma_config
->
dmatype
==
XDMA_TYPE_AXIDMA
)
{
xilinx_dma_free_tx_segment
(
chan
,
chan
->
cyclic_seg_v
);
xilinx_dma_free_tx_segment
(
chan
,
chan
->
seg_v
);
spin_lock_irqsave
(
&
chan
->
lock
,
flags
);
INIT_LIST_HEAD
(
&
chan
->
free_seg_list
);
spin_unlock_irqrestore
(
&
chan
->
lock
,
flags
);
/* Free memory that is allocated for BD */
dma_free_coherent
(
chan
->
dev
,
sizeof
(
*
chan
->
seg_v
)
*
XILINX_DMA_NUM_DESCS
,
chan
->
seg_v
,
chan
->
seg_p
);
/* Free Memory that is allocated for cyclic DMA Mode */
dma_free_coherent
(
chan
->
dev
,
sizeof
(
*
chan
->
cyclic_seg_v
),
chan
->
cyclic_seg_v
,
chan
->
cyclic_seg_p
);
}
if
(
chan
->
xdev
->
dma_config
->
dmatype
!=
XDMA_TYPE_AXIDMA
)
{
dma_pool_destroy
(
chan
->
desc_pool
);
chan
->
desc_pool
=
NULL
;
}
dma_pool_destroy
(
chan
->
desc_pool
);
chan
->
desc_pool
=
NULL
;
}
/**
...
...
@@ -817,6 +860,7 @@ static void xilinx_dma_do_tasklet(unsigned long data)
static
int
xilinx_dma_alloc_chan_resources
(
struct
dma_chan
*
dchan
)
{
struct
xilinx_dma_chan
*
chan
=
to_xilinx_chan
(
dchan
);
int
i
;
/* Has this channel already been allocated? */
if
(
chan
->
desc_pool
)
...
...
@@ -827,11 +871,30 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
* for meeting Xilinx VDMA specification requirement.
*/
if
(
chan
->
xdev
->
dma_config
->
dmatype
==
XDMA_TYPE_AXIDMA
)
{
chan
->
desc_pool
=
dma_pool_create
(
"xilinx_dma_desc_pool"
,
chan
->
dev
,
sizeof
(
struct
xilinx_axidma_tx_segment
),
__alignof__
(
struct
xilinx_axidma_tx_segment
),
0
);
/* Allocate the buffer descriptors. */
chan
->
seg_v
=
dma_zalloc_coherent
(
chan
->
dev
,
sizeof
(
*
chan
->
seg_v
)
*
XILINX_DMA_NUM_DESCS
,
&
chan
->
seg_p
,
GFP_KERNEL
);
if
(
!
chan
->
seg_v
)
{
dev_err
(
chan
->
dev
,
"unable to allocate channel %d descriptors
\n
"
,
chan
->
id
);
return
-
ENOMEM
;
}
for
(
i
=
0
;
i
<
XILINX_DMA_NUM_DESCS
;
i
++
)
{
chan
->
seg_v
[
i
].
hw
.
next_desc
=
lower_32_bits
(
chan
->
seg_p
+
sizeof
(
*
chan
->
seg_v
)
*
((
i
+
1
)
%
XILINX_DMA_NUM_DESCS
));
chan
->
seg_v
[
i
].
hw
.
next_desc_msb
=
upper_32_bits
(
chan
->
seg_p
+
sizeof
(
*
chan
->
seg_v
)
*
((
i
+
1
)
%
XILINX_DMA_NUM_DESCS
));
chan
->
seg_v
[
i
].
phys
=
chan
->
seg_p
+
sizeof
(
*
chan
->
seg_v
)
*
i
;
list_add_tail
(
&
chan
->
seg_v
[
i
].
node
,
&
chan
->
free_seg_list
);
}
}
else
if
(
chan
->
xdev
->
dma_config
->
dmatype
==
XDMA_TYPE_CDMA
)
{
chan
->
desc_pool
=
dma_pool_create
(
"xilinx_cdma_desc_pool"
,
chan
->
dev
,
...
...
@@ -846,7 +909,8 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
0
);
}
if
(
!
chan
->
desc_pool
)
{
if
(
!
chan
->
desc_pool
&&
(
chan
->
xdev
->
dma_config
->
dmatype
!=
XDMA_TYPE_AXIDMA
))
{
dev_err
(
chan
->
dev
,
"unable to allocate channel %d descriptor pool
\n
"
,
chan
->
id
);
...
...
@@ -854,23 +918,21 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
}
if
(
chan
->
xdev
->
dma_config
->
dmatype
==
XDMA_TYPE_AXIDMA
)
{
/*
* For AXI DMA case after submitting a pending_list, keep
* an extra segment allocated so that the "next descriptor"
* pointer on the tail descriptor always points to a
* valid descriptor, even when paused after reaching taildesc.
* This way, it is possible to issue additional
* transfers without halting and restarting the channel.
*/
chan
->
seg_v
=
xilinx_axidma_alloc_tx_segment
(
chan
);
/*
* For cyclic DMA mode we need to program the tail Descriptor
* register with a value which is not a part of the BD chain
* so allocating a desc segment during channel allocation for
* programming tail descriptor.
*/
chan
->
cyclic_seg_v
=
xilinx_axidma_alloc_tx_segment
(
chan
);
chan
->
cyclic_seg_v
=
dma_zalloc_coherent
(
chan
->
dev
,
sizeof
(
*
chan
->
cyclic_seg_v
),
&
chan
->
cyclic_seg_p
,
GFP_KERNEL
);
if
(
!
chan
->
cyclic_seg_v
)
{
dev_err
(
chan
->
dev
,
"unable to allocate desc segment for cyclic DMA
\n
"
);
return
-
ENOMEM
;
}
chan
->
cyclic_seg_v
->
phys
=
chan
->
cyclic_seg_p
;
}
dma_cookie_init
(
dchan
);
...
...
@@ -935,35 +997,11 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
return
ret
;
}
/**
* xilinx_dma_is_running - Check if DMA channel is running
* @chan: Driver specific DMA channel
*
* Return: '1' if running, '0' if not.
*/
static
bool
xilinx_dma_is_running
(
struct
xilinx_dma_chan
*
chan
)
{
return
!
(
dma_ctrl_read
(
chan
,
XILINX_DMA_REG_DMASR
)
&
XILINX_DMA_DMASR_HALTED
)
&&
(
dma_ctrl_read
(
chan
,
XILINX_DMA_REG_DMACR
)
&
XILINX_DMA_DMACR_RUNSTOP
);
}
/**
* xilinx_dma_is_idle - Check if DMA channel is idle
* @chan: Driver specific DMA channel
*
* Return: '1' if idle, '0' if not.
*/
static
bool
xilinx_dma_is_idle
(
struct
xilinx_dma_chan
*
chan
)
{
return
dma_ctrl_read
(
chan
,
XILINX_DMA_REG_DMASR
)
&
XILINX_DMA_DMASR_IDLE
;
}
/**
* xilinx_dma_stop_transfer - Halt DMA channel
* @chan: Driver specific DMA channel
*
* Return: '0' on success and failure value on error
*/
static
int
xilinx_dma_stop_transfer
(
struct
xilinx_dma_chan
*
chan
)
{
...
...
@@ -980,6 +1018,8 @@ static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
/**
* xilinx_cdma_stop_transfer - Wait for the current transfer to complete
* @chan: Driver specific DMA channel
*
* Return: '0' on success and failure value on error
*/
static
int
xilinx_cdma_stop_transfer
(
struct
xilinx_dma_chan
*
chan
)
{
...
...
@@ -1022,13 +1062,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
{
struct
xilinx_vdma_config
*
config
=
&
chan
->
config
;
struct
xilinx_dma_tx_descriptor
*
desc
,
*
tail_desc
;
u32
reg
;
u32
reg
,
j
;
struct
xilinx_vdma_tx_segment
*
tail_segment
;
/* This function was invoked with lock held */
if
(
chan
->
err
)
return
;
if
(
!
chan
->
idle
)
return
;
if
(
list_empty
(
&
chan
->
pending_list
))
return
;
...
...
@@ -1040,13 +1083,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
tail_segment
=
list_last_entry
(
&
tail_desc
->
segments
,
struct
xilinx_vdma_tx_segment
,
node
);
/* If it is SG mode and hardware is busy, cannot submit */
if
(
chan
->
has_sg
&&
xilinx_dma_is_running
(
chan
)
&&
!
xilinx_dma_is_idle
(
chan
))
{
dev_dbg
(
chan
->
dev
,
"DMA controller still busy
\n
"
);
return
;
}
/*
* If hardware is idle, then all descriptors on the running lists are
* done, start new transfers
...
...
@@ -1063,10 +1099,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
else
reg
&=
~
XILINX_DMA_DMACR_FRAMECNT_EN
;
/* Configure channel to allow number frame buffers */
dma_ctrl_write
(
chan
,
XILINX_DMA_REG_FRMSTORE
,
chan
->
desc_pendingcount
);
/*
* With SG, start with circular mode, so that BDs can be fetched.
* In direct register mode, if not parking, enable circular mode
...
...
@@ -1079,17 +1111,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write
(
chan
,
XILINX_DMA_REG_DMACR
,
reg
);
if
(
config
->
park
&&
(
config
->
park_frm
>=
0
)
&&
(
config
->
park_frm
<
chan
->
num_frms
))
{
if
(
chan
->
direction
==
DMA_MEM_TO_DEV
)
dma_write
(
chan
,
XILINX_DMA_REG_PARK_PTR
,
config
->
park_frm
<<
XILINX_DMA_PARK_PTR_RD_REF_SHIFT
);
else
dma_write
(
chan
,
XILINX_DMA_REG_PARK_PTR
,
config
->
park_frm
<<
XILINX_DMA_PARK_PTR_WR_REF_SHIFT
);
j
=
chan
->
desc_submitcount
;
reg
=
dma_read
(
chan
,
XILINX_DMA_REG_PARK_PTR
);
if
(
chan
->
direction
==
DMA_MEM_TO_DEV
)
{
reg
&=
~
XILINX_DMA_PARK_PTR_RD_REF_MASK
;
reg
|=
j
<<
XILINX_DMA_PARK_PTR_RD_REF_SHIFT
;
}
else
{
reg
&=
~
XILINX_DMA_PARK_PTR_WR_REF_MASK
;
reg
|=
j
<<
XILINX_DMA_PARK_PTR_WR_REF_SHIFT
;
}
dma_write
(
chan
,
XILINX_DMA_REG_PARK_PTR
,
reg
);
/* Start the hardware */
xilinx_dma_start
(
chan
);
...
...
@@ -1101,6 +1132,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
if
(
chan
->
has_sg
)
{
dma_ctrl_write
(
chan
,
XILINX_DMA_REG_TAILDESC
,
tail_segment
->
phys
);
list_splice_tail_init
(
&
chan
->
pending_list
,
&
chan
->
active_list
);
chan
->
desc_pendingcount
=
0
;
}
else
{
struct
xilinx_vdma_tx_segment
*
segment
,
*
last
=
NULL
;
int
i
=
0
;
...
...
@@ -1130,19 +1163,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
vdma_desc_write
(
chan
,
XILINX_DMA_REG_FRMDLY_STRIDE
,
last
->
hw
.
stride
);
vdma_desc_write
(
chan
,
XILINX_DMA_REG_VSIZE
,
last
->
hw
.
vsize
);
}
if
(
!
chan
->
has_sg
)
{
list_del
(
&
desc
->
node
);
list_add_tail
(
&
desc
->
node
,
&
chan
->
active_list
);
chan
->
desc_submitcount
++
;
chan
->
desc_pendingcount
--
;
list_del
(
&
desc
->
node
);
list_add_tail
(
&
desc
->
node
,
&
chan
->
active_list
);
if
(
chan
->
desc_submitcount
==
chan
->
num_frms
)
chan
->
desc_submitcount
=
0
;
}
else
{
list_splice_tail_init
(
&
chan
->
pending_list
,
&
chan
->
active_list
);
chan
->
desc_pendingcount
=
0
;
}
chan
->
idle
=
false
;
}
/**
...
...
@@ -1158,6 +1188,9 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
if
(
chan
->
err
)
return
;
if
(
!
chan
->
idle
)
return
;
if
(
list_empty
(
&
chan
->
pending_list
))
return
;
...
...
@@ -1176,6 +1209,12 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
}
if
(
chan
->
has_sg
)
{
dma_ctrl_clr
(
chan
,
XILINX_DMA_REG_DMACR
,
XILINX_CDMA_CR_SGMODE
);
dma_ctrl_set
(
chan
,
XILINX_DMA_REG_DMACR
,
XILINX_CDMA_CR_SGMODE
);
xilinx_write
(
chan
,
XILINX_DMA_REG_CURDESC
,
head_desc
->
async_tx
.
phys
);
...
...
@@ -1203,6 +1242,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
list_splice_tail_init
(
&
chan
->
pending_list
,
&
chan
->
active_list
);
chan
->
desc_pendingcount
=
0
;
chan
->
idle
=
false
;
}
/**
...
...
@@ -1212,7 +1252,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
static
void
xilinx_dma_start_transfer
(
struct
xilinx_dma_chan
*
chan
)
{
struct
xilinx_dma_tx_descriptor
*
head_desc
,
*
tail_desc
;
struct
xilinx_axidma_tx_segment
*
tail_segment
,
*
old_head
,
*
new_head
;
struct
xilinx_axidma_tx_segment
*
tail_segment
;
u32
reg
;
if
(
chan
->
err
)
...
...
@@ -1221,12 +1261,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
if
(
list_empty
(
&
chan
->
pending_list
))
return
;
/* If it is SG mode and hardware is busy, cannot submit */
if
(
chan
->
has_sg
&&
xilinx_dma_is_running
(
chan
)
&&
!
xilinx_dma_is_idle
(
chan
))
{
dev_dbg
(
chan
->
dev
,
"DMA controller still busy
\n
"
);
if
(
!
chan
->
idle
)
return
;
}
head_desc
=
list_first_entry
(
&
chan
->
pending_list
,
struct
xilinx_dma_tx_descriptor
,
node
);
...
...
@@ -1235,21 +1271,6 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
tail_segment
=
list_last_entry
(
&
tail_desc
->
segments
,
struct
xilinx_axidma_tx_segment
,
node
);
if
(
chan
->
has_sg
&&
!
chan
->
xdev
->
mcdma
)
{
old_head
=
list_first_entry
(
&
head_desc
->
segments
,
struct
xilinx_axidma_tx_segment
,
node
);
new_head
=
chan
->
seg_v
;
/* Copy Buffer Descriptor fields. */
new_head
->
hw
=
old_head
->
hw
;
/* Swap and save new reserve */
list_replace_init
(
&
old_head
->
node
,
&
new_head
->
node
);
chan
->
seg_v
=
old_head
;
tail_segment
->
hw
.
next_desc
=
chan
->
seg_v
->
phys
;
head_desc
->
async_tx
.
phys
=
new_head
->
phys
;
}
reg
=
dma_ctrl_read
(
chan
,
XILINX_DMA_REG_DMACR
);
if
(
chan
->
desc_pendingcount
<=
XILINX_DMA_COALESCE_MAX
)
{
...
...
@@ -1324,6 +1345,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
list_splice_tail_init
(
&
chan
->
pending_list
,
&
chan
->
active_list
);
chan
->
desc_pendingcount
=
0
;
chan
->
idle
=
false
;
}
/**
...
...
@@ -1388,6 +1410,8 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
}
chan
->
err
=
false
;
chan
->
idle
=
true
;
chan
->
desc_submitcount
=
0
;
return
err
;
}
...
...
@@ -1469,6 +1493,7 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
if
(
status
&
XILINX_DMA_DMASR_FRM_CNT_IRQ
)
{
spin_lock
(
&
chan
->
lock
);
xilinx_dma_complete_descriptor
(
chan
);
chan
->
idle
=
true
;
chan
->
start_transfer
(
chan
);
spin_unlock
(
&
chan
->
lock
);
}
...
...
@@ -1591,7 +1616,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
{
struct
xilinx_dma_chan
*
chan
=
to_xilinx_chan
(
dchan
);
struct
xilinx_dma_tx_descriptor
*
desc
;
struct
xilinx_vdma_tx_segment
*
segment
,
*
prev
=
NULL
;
struct
xilinx_vdma_tx_segment
*
segment
;
struct
xilinx_vdma_desc_hw
*
hw
;
if
(
!
is_slave_direction
(
xt
->
dir
))
...
...
@@ -1645,8 +1670,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
/* Insert the segment into the descriptor segments list. */
list_add_tail
(
&
segment
->
node
,
&
desc
->
segments
);
prev
=
segment
;
/* Link the last hardware descriptor with the first. */
segment
=
list_first_entry
(
&
desc
->
segments
,
struct
xilinx_vdma_tx_segment
,
node
);
...
...
@@ -1733,7 +1756,7 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
{
struct
xilinx_dma_chan
*
chan
=
to_xilinx_chan
(
dchan
);
struct
xilinx_dma_tx_descriptor
*
desc
;
struct
xilinx_axidma_tx_segment
*
segment
=
NULL
,
*
prev
=
NULL
;
struct
xilinx_axidma_tx_segment
*
segment
=
NULL
;
u32
*
app_w
=
(
u32
*
)
context
;
struct
scatterlist
*
sg
;
size_t
copy
;
...
...
@@ -1784,10 +1807,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
XILINX_DMA_NUM_APP_WORDS
);
}
if
(
prev
)
prev
->
hw
.
next_desc
=
segment
->
phys
;
prev
=
segment
;
sg_used
+=
copy
;
/*
...
...
@@ -1801,7 +1820,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
segment
=
list_first_entry
(
&
desc
->
segments
,
struct
xilinx_axidma_tx_segment
,
node
);
desc
->
async_tx
.
phys
=
segment
->
phys
;
prev
->
hw
.
next_desc
=
segment
->
phys
;
/* For the last DMA_MEM_TO_DEV transfer, set EOP */
if
(
chan
->
direction
==
DMA_MEM_TO_DEV
)
{
...
...
@@ -1821,11 +1839,14 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
/**
* xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
* @chan: DMA channel
* @sgl: scatterlist to transfer to/from
* @sg_len: number of entries in @scatterlist
* @dchan: DMA channel
* @buf_addr: Physical address of the buffer
* @buf_len: Total length of the cyclic buffers
* @period_len: length of individual cyclic buffer
* @direction: DMA direction
* @flags: transfer ack flags
*
* Return: Async transaction descriptor on success and NULL on failure
*/
static
struct
dma_async_tx_descriptor
*
xilinx_dma_prep_dma_cyclic
(
struct
dma_chan
*
dchan
,
dma_addr_t
buf_addr
,
size_t
buf_len
,
...
...
@@ -2009,7 +2030,9 @@ xilinx_dma_prep_interleaved(struct dma_chan *dchan,
/**
* xilinx_dma_terminate_all - Halt the channel and free descriptors
* @chan: Driver specific DMA Channel pointer
* @dchan: Driver specific DMA Channel pointer
*
* Return: '0' always.
*/
static
int
xilinx_dma_terminate_all
(
struct
dma_chan
*
dchan
)
{
...
...
@@ -2029,6 +2052,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
/* Remove and free all of the descriptors in the lists */
xilinx_dma_free_descriptors
(
chan
);
chan
->
idle
=
true
;
if
(
chan
->
cyclic
)
{
reg
=
dma_ctrl_read
(
chan
,
XILINX_DMA_REG_DMACR
);
...
...
@@ -2037,6 +2061,10 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
chan
->
cyclic
=
false
;
}
if
((
chan
->
xdev
->
dma_config
->
dmatype
==
XDMA_TYPE_CDMA
)
&&
chan
->
has_sg
)
dma_ctrl_clr
(
chan
,
XILINX_DMA_REG_DMACR
,
XILINX_CDMA_CR_SGMODE
);
return
0
;
}
...
...
@@ -2323,6 +2351,7 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
*
* @xdev: Driver specific device structure
* @node: Device node
* @chan_id: DMA Channel id
*
* Return: '0' on success and failure value on error
*/
...
...
@@ -2344,11 +2373,18 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan
->
has_sg
=
xdev
->
has_sg
;
chan
->
desc_pendingcount
=
0x0
;
chan
->
ext_addr
=
xdev
->
ext_addr
;
/* This variable ensures that descriptors are not
* Submitted when dma engine is in progress. This variable is
* Added to avoid polling for a bit in the status register to
* Know dma state in the driver hot path.
*/
chan
->
idle
=
true
;
spin_lock_init
(
&
chan
->
lock
);
INIT_LIST_HEAD
(
&
chan
->
pending_list
);
INIT_LIST_HEAD
(
&
chan
->
done_list
);
INIT_LIST_HEAD
(
&
chan
->
active_list
);
INIT_LIST_HEAD
(
&
chan
->
free_seg_list
);
/* Retrieve the channel properties from the device tree */
has_dre
=
of_property_read_bool
(
node
,
"xlnx,include-dre"
);
...
...
@@ -2379,6 +2415,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan
->
ctrl_offset
=
XILINX_DMA_MM2S_CTRL_OFFSET
;
if
(
xdev
->
dma_config
->
dmatype
==
XDMA_TYPE_VDMA
)
{
chan
->
desc_offset
=
XILINX_VDMA_MM2S_DESC_OFFSET
;
chan
->
config
.
park
=
1
;
if
(
xdev
->
flush_on_fsync
==
XILINX_DMA_FLUSH_BOTH
||
xdev
->
flush_on_fsync
==
XILINX_DMA_FLUSH_MM2S
)
...
...
@@ -2395,6 +2432,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan
->
ctrl_offset
=
XILINX_DMA_S2MM_CTRL_OFFSET
;
if
(
xdev
->
dma_config
->
dmatype
==
XDMA_TYPE_VDMA
)
{
chan
->
desc_offset
=
XILINX_VDMA_S2MM_DESC_OFFSET
;
chan
->
config
.
park
=
1
;
if
(
xdev
->
flush_on_fsync
==
XILINX_DMA_FLUSH_BOTH
||
xdev
->
flush_on_fsync
==
XILINX_DMA_FLUSH_S2MM
)
...
...
@@ -2459,7 +2497,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
* Return: 0 always.
*/
static
int
xilinx_dma_child_probe
(
struct
xilinx_dma_device
*
xdev
,
struct
device_node
*
node
)
{
struct
device_node
*
node
)
{
int
ret
,
i
,
nr_channels
=
1
;
ret
=
of_property_read_u32
(
node
,
"dma-channels"
,
&
nr_channels
);
...
...
@@ -2654,7 +2693,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
goto
error
;
}
dev_info
(
&
pdev
->
dev
,
"Xilinx AXI VDMA Engine Driver Probed!!
\n
"
);
if
(
xdev
->
dma_config
->
dmatype
==
XDMA_TYPE_AXIDMA
)
dev_info
(
&
pdev
->
dev
,
"Xilinx AXI DMA Engine Driver Probed!!
\n
"
);
else
if
(
xdev
->
dma_config
->
dmatype
==
XDMA_TYPE_CDMA
)
dev_info
(
&
pdev
->
dev
,
"Xilinx AXI CDMA Engine Driver Probed!!
\n
"
);
else
dev_info
(
&
pdev
->
dev
,
"Xilinx AXI VDMA Engine Driver Probed!!
\n
"
);
return
0
;
...
...
drivers/dma/xilinx/zynqmp_dma.c
浏览文件 @
330542fc
...
...
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/pm_runtime.h>
#include "../dmaengine.h"
...
...
@@ -47,6 +48,7 @@
#define ZYNQMP_DMA_SRC_START_MSB 0x15C
#define ZYNQMP_DMA_DST_START_LSB 0x160
#define ZYNQMP_DMA_DST_START_MSB 0x164
#define ZYNQMP_DMA_TOTAL_BYTE 0x188
#define ZYNQMP_DMA_RATE_CTRL 0x18C
#define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190
#define ZYNQMP_DMA_IRQ_DST_ACCT 0x194
...
...
@@ -138,6 +140,8 @@
#define ZYNQMP_DMA_BUS_WIDTH_64 64
#define ZYNQMP_DMA_BUS_WIDTH_128 128
#define ZDMA_PM_TIMEOUT 100
#define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size)
#define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \
...
...
@@ -211,8 +215,6 @@ struct zynqmp_dma_desc_sw {
* @bus_width: Bus width
* @src_burst_len: Source burst length
* @dst_burst_len: Dest burst length
* @clk_main: Pointer to main clock
* @clk_apb: Pointer to apb clock
*/
struct
zynqmp_dma_chan
{
struct
zynqmp_dma_device
*
zdev
;
...
...
@@ -237,8 +239,6 @@ struct zynqmp_dma_chan {
u32
bus_width
;
u32
src_burst_len
;
u32
dst_burst_len
;
struct
clk
*
clk_main
;
struct
clk
*
clk_apb
;
};
/**
...
...
@@ -246,11 +246,15 @@ struct zynqmp_dma_chan {
* @dev: Device Structure
* @common: DMA device structure
* @chan: Driver specific DMA channel
* @clk_main: Pointer to main clock
* @clk_apb: Pointer to apb clock
*/
struct
zynqmp_dma_device
{
struct
device
*
dev
;
struct
dma_device
common
;
struct
zynqmp_dma_chan
*
chan
;
struct
clk
*
clk_main
;
struct
clk
*
clk_apb
;
};
static
inline
void
zynqmp_dma_writeq
(
struct
zynqmp_dma_chan
*
chan
,
u32
reg
,
...
...
@@ -461,7 +465,11 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
{
struct
zynqmp_dma_chan
*
chan
=
to_chan
(
dchan
);
struct
zynqmp_dma_desc_sw
*
desc
;
int
i
;
int
i
,
ret
;
ret
=
pm_runtime_get_sync
(
chan
->
dev
);
if
(
ret
<
0
)
return
ret
;
chan
->
sw_desc_pool
=
kzalloc
(
sizeof
(
*
desc
)
*
ZYNQMP_DMA_NUM_DESCS
,
GFP_KERNEL
);
...
...
@@ -506,6 +514,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
static
void
zynqmp_dma_start
(
struct
zynqmp_dma_chan
*
chan
)
{
writel
(
ZYNQMP_DMA_INT_EN_DEFAULT_MASK
,
chan
->
regs
+
ZYNQMP_DMA_IER
);
writel
(
0
,
chan
->
regs
+
ZYNQMP_DMA_TOTAL_BYTE
);
chan
->
idle
=
false
;
writel
(
ZYNQMP_DMA_ENABLE
,
chan
->
regs
+
ZYNQMP_DMA_CTRL2
);
}
...
...
@@ -517,12 +526,12 @@ static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
*/
static
void
zynqmp_dma_handle_ovfl_int
(
struct
zynqmp_dma_chan
*
chan
,
u32
status
)
{
u32
val
;
if
(
status
&
ZYNQMP_DMA_BYTE_CNT_OVRFL
)
writel
(
0
,
chan
->
regs
+
ZYNQMP_DMA_TOTAL_BYTE
);
if
(
status
&
ZYNQMP_DMA_IRQ_DST_ACCT_ERR
)
val
=
readl
(
chan
->
regs
+
ZYNQMP_DMA_IRQ_DST_ACCT
);
readl
(
chan
->
regs
+
ZYNQMP_DMA_IRQ_DST_ACCT
);
if
(
status
&
ZYNQMP_DMA_IRQ_SRC_ACCT_ERR
)
val
=
readl
(
chan
->
regs
+
ZYNQMP_DMA_IRQ_SRC_ACCT
);
readl
(
chan
->
regs
+
ZYNQMP_DMA_IRQ_SRC_ACCT
);
}
static
void
zynqmp_dma_config
(
struct
zynqmp_dma_chan
*
chan
)
...
...
@@ -545,6 +554,8 @@ static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
* zynqmp_dma_device_config - Zynqmp dma device configuration
* @dchan: DMA channel
* @config: DMA device config
*
* Return: 0 always
*/
static
int
zynqmp_dma_device_config
(
struct
dma_chan
*
dchan
,
struct
dma_slave_config
*
config
)
...
...
@@ -640,7 +651,7 @@ static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
/**
* zynqmp_dma_free_descriptors - Free channel descriptors
* @
dchan:
DMA channel pointer
* @
chan: ZynqMP
DMA channel pointer
*/
static
void
zynqmp_dma_free_descriptors
(
struct
zynqmp_dma_chan
*
chan
)
{
...
...
@@ -664,6 +675,8 @@ static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
(
2
*
ZYNQMP_DMA_DESC_SIZE
(
chan
)
*
ZYNQMP_DMA_NUM_DESCS
),
chan
->
desc_pool_v
,
chan
->
desc_pool_p
);
kfree
(
chan
->
sw_desc_pool
);
pm_runtime_mark_last_busy
(
chan
->
dev
);
pm_runtime_put_autosuspend
(
chan
->
dev
);
}
/**
...
...
@@ -715,7 +728,7 @@ static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data)
if
(
status
&
ZYNQMP_DMA_INT_OVRFL
)
{
zynqmp_dma_handle_ovfl_int
(
chan
,
status
);
dev_
info
(
chan
->
dev
,
"Channel %p overflow interrupt
\n
"
,
chan
);
dev_
dbg
(
chan
->
dev
,
"Channel %p overflow interrupt
\n
"
,
chan
);
ret
=
IRQ_HANDLED
;
}
...
...
@@ -838,11 +851,10 @@ static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan)
if
(
!
chan
)
return
;
devm_free_irq
(
chan
->
zdev
->
dev
,
chan
->
irq
,
chan
);
if
(
chan
->
irq
)
devm_free_irq
(
chan
->
zdev
->
dev
,
chan
->
irq
,
chan
);
tasklet_kill
(
&
chan
->
tasklet
);
list_del
(
&
chan
->
common
.
device_node
);
clk_disable_unprepare
(
chan
->
clk_apb
);
clk_disable_unprepare
(
chan
->
clk_main
);
}
/**
...
...
@@ -907,30 +919,6 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
"zynqmp-dma"
,
chan
);
if
(
err
)
return
err
;
chan
->
clk_main
=
devm_clk_get
(
&
pdev
->
dev
,
"clk_main"
);
if
(
IS_ERR
(
chan
->
clk_main
))
{
dev_err
(
&
pdev
->
dev
,
"main clock not found.
\n
"
);
return
PTR_ERR
(
chan
->
clk_main
);
}
chan
->
clk_apb
=
devm_clk_get
(
&
pdev
->
dev
,
"clk_apb"
);
if
(
IS_ERR
(
chan
->
clk_apb
))
{
dev_err
(
&
pdev
->
dev
,
"apb clock not found.
\n
"
);
return
PTR_ERR
(
chan
->
clk_apb
);
}
err
=
clk_prepare_enable
(
chan
->
clk_main
);
if
(
err
)
{
dev_err
(
&
pdev
->
dev
,
"Unable to enable main clock.
\n
"
);
return
err
;
}
err
=
clk_prepare_enable
(
chan
->
clk_apb
);
if
(
err
)
{
clk_disable_unprepare
(
chan
->
clk_main
);
dev_err
(
&
pdev
->
dev
,
"Unable to enable apb clock.
\n
"
);
return
err
;
}
chan
->
desc_size
=
sizeof
(
struct
zynqmp_dma_desc_ll
);
chan
->
idle
=
true
;
...
...
@@ -952,6 +940,87 @@ static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec,
return
dma_get_slave_channel
(
&
zdev
->
chan
->
common
);
}
/**
* zynqmp_dma_suspend - Suspend method for the driver
* @dev: Address of the device structure
*
* Put the driver into low power mode.
* Return: 0 on success and failure value on error
*/
static
int
__maybe_unused
zynqmp_dma_suspend
(
struct
device
*
dev
)
{
if
(
!
device_may_wakeup
(
dev
))
return
pm_runtime_force_suspend
(
dev
);
return
0
;
}
/**
* zynqmp_dma_resume - Resume from suspend
* @dev: Address of the device structure
*
* Resume operation after suspend.
* Return: 0 on success and failure value on error
*/
static
int
__maybe_unused
zynqmp_dma_resume
(
struct
device
*
dev
)
{
if
(
!
device_may_wakeup
(
dev
))
return
pm_runtime_force_resume
(
dev
);
return
0
;
}
/**
* zynqmp_dma_runtime_suspend - Runtime suspend method for the driver
* @dev: Address of the device structure
*
* Put the driver into low power mode.
* Return: 0 always
*/
static
int
__maybe_unused
zynqmp_dma_runtime_suspend
(
struct
device
*
dev
)
{
struct
zynqmp_dma_device
*
zdev
=
dev_get_drvdata
(
dev
);
clk_disable_unprepare
(
zdev
->
clk_main
);
clk_disable_unprepare
(
zdev
->
clk_apb
);
return
0
;
}
/**
* zynqmp_dma_runtime_resume - Runtime suspend method for the driver
* @dev: Address of the device structure
*
* Put the driver into low power mode.
* Return: 0 always
*/
static
int
__maybe_unused
zynqmp_dma_runtime_resume
(
struct
device
*
dev
)
{
struct
zynqmp_dma_device
*
zdev
=
dev_get_drvdata
(
dev
);
int
err
;
err
=
clk_prepare_enable
(
zdev
->
clk_main
);
if
(
err
)
{
dev_err
(
dev
,
"Unable to enable main clock.
\n
"
);
return
err
;
}
err
=
clk_prepare_enable
(
zdev
->
clk_apb
);
if
(
err
)
{
dev_err
(
dev
,
"Unable to enable apb clock.
\n
"
);
clk_disable_unprepare
(
zdev
->
clk_main
);
return
err
;
}
return
0
;
}
static
const
struct
dev_pm_ops
zynqmp_dma_dev_pm_ops
=
{
SET_SYSTEM_SLEEP_PM_OPS
(
zynqmp_dma_suspend
,
zynqmp_dma_resume
)
SET_RUNTIME_PM_OPS
(
zynqmp_dma_runtime_suspend
,
zynqmp_dma_runtime_resume
,
NULL
)
};
/**
* zynqmp_dma_probe - Driver probe function
* @pdev: Pointer to the platform_device structure
...
...
@@ -984,12 +1053,33 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
p
->
device_config
=
zynqmp_dma_device_config
;
p
->
dev
=
&
pdev
->
dev
;
zdev
->
clk_main
=
devm_clk_get
(
&
pdev
->
dev
,
"clk_main"
);
if
(
IS_ERR
(
zdev
->
clk_main
))
{
dev_err
(
&
pdev
->
dev
,
"main clock not found.
\n
"
);
return
PTR_ERR
(
zdev
->
clk_main
);
}
zdev
->
clk_apb
=
devm_clk_get
(
&
pdev
->
dev
,
"clk_apb"
);
if
(
IS_ERR
(
zdev
->
clk_apb
))
{
dev_err
(
&
pdev
->
dev
,
"apb clock not found.
\n
"
);
return
PTR_ERR
(
zdev
->
clk_apb
);
}
platform_set_drvdata
(
pdev
,
zdev
);
pm_runtime_set_autosuspend_delay
(
zdev
->
dev
,
ZDMA_PM_TIMEOUT
);
pm_runtime_use_autosuspend
(
zdev
->
dev
);
pm_runtime_enable
(
zdev
->
dev
);
pm_runtime_get_sync
(
zdev
->
dev
);
if
(
!
pm_runtime_enabled
(
zdev
->
dev
))
{
ret
=
zynqmp_dma_runtime_resume
(
zdev
->
dev
);
if
(
ret
)
return
ret
;
}
ret
=
zynqmp_dma_chan_probe
(
zdev
,
pdev
);
if
(
ret
)
{
dev_err
(
&
pdev
->
dev
,
"Probing channel failed
\n
"
);
goto
free_chan_resources
;
goto
err_disable_pm
;
}
p
->
dst_addr_widths
=
BIT
(
zdev
->
chan
->
bus_width
/
8
);
...
...
@@ -1005,12 +1095,19 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
goto
free_chan_resources
;
}
pm_runtime_mark_last_busy
(
zdev
->
dev
);
pm_runtime_put_sync_autosuspend
(
zdev
->
dev
);
dev_info
(
&
pdev
->
dev
,
"ZynqMP DMA driver Probe success
\n
"
);
return
0
;
free_chan_resources:
zynqmp_dma_chan_remove
(
zdev
->
chan
);
err_disable_pm:
if
(
!
pm_runtime_enabled
(
zdev
->
dev
))
zynqmp_dma_runtime_suspend
(
zdev
->
dev
);
pm_runtime_disable
(
zdev
->
dev
);
return
ret
;
}
...
...
@@ -1028,6 +1125,9 @@ static int zynqmp_dma_remove(struct platform_device *pdev)
dma_async_device_unregister
(
&
zdev
->
common
);
zynqmp_dma_chan_remove
(
zdev
->
chan
);
pm_runtime_disable
(
zdev
->
dev
);
if
(
!
pm_runtime_enabled
(
zdev
->
dev
))
zynqmp_dma_runtime_suspend
(
zdev
->
dev
);
return
0
;
}
...
...
@@ -1042,6 +1142,7 @@ static struct platform_driver zynqmp_dma_driver = {
.
driver
=
{
.
name
=
"xilinx-zynqmp-dma"
,
.
of_match_table
=
zynqmp_dma_of_match
,
.
pm
=
&
zynqmp_dma_dev_pm_ops
,
},
.
probe
=
zynqmp_dma_probe
,
.
remove
=
zynqmp_dma_remove
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录