Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
kernel_linux
提交
2ffb850e
K
kernel_linux
项目概览
OpenHarmony
/
kernel_linux
上一次同步 4 年多
通知
15
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
kernel_linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
2ffb850e
编写于
4月 10, 2018
作者:
V
Vinod Koul
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'topic/stm' into for-linus
上级
3a7b854d
9df3bd55
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
243 addition
and
50 deletion
+243
-50
Documentation/devicetree/bindings/dma/stm32-dma.txt
Documentation/devicetree/bindings/dma/stm32-dma.txt
+3
-3
drivers/dma/stm32-dma.c
drivers/dma/stm32-dma.c
+240
-47
未找到文件。
Documentation/devicetree/bindings/dma/stm32-dma.txt
浏览文件 @
2ffb850e
...
@@ -62,14 +62,14 @@ channel: a phandle to the DMA controller plus the following four integer cells:
...
@@ -62,14 +62,14 @@ channel: a phandle to the DMA controller plus the following four integer cells:
0x1: medium
0x1: medium
0x2: high
0x2: high
0x3: very high
0x3: very high
4. A 32bit mask specifying the DMA FIFO threshold configuration which are device
4. A 32bit bitfield value specifying DMA features which are device dependent:
dependent:
-bit 0-1: DMA FIFO threshold selection
-bit 0-1: Fifo threshold
0x0: 1/4 full FIFO
0x0: 1/4 full FIFO
0x1: 1/2 full FIFO
0x1: 1/2 full FIFO
0x2: 3/4 full FIFO
0x2: 3/4 full FIFO
0x3: full FIFO
0x3: full FIFO
Example:
Example:
usart1: serial@40011000 {
usart1: serial@40011000 {
...
...
drivers/dma/stm32-dma.c
浏览文件 @
2ffb850e
...
@@ -5,6 +5,7 @@
...
@@ -5,6 +5,7 @@
*
*
* Copyright (C) M'boumba Cedric Madianga 2015
* Copyright (C) M'boumba Cedric Madianga 2015
* Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
* Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
* Pierre-Yves Mordret <pierre-yves.mordret@st.com>
*
*
* License terms: GNU General Public License (GPL), version 2
* License terms: GNU General Public License (GPL), version 2
*/
*/
...
@@ -33,9 +34,14 @@
...
@@ -33,9 +34,14 @@
#define STM32_DMA_LIFCR 0x0008
/* DMA Low Int Flag Clear Reg */
#define STM32_DMA_LIFCR 0x0008
/* DMA Low Int Flag Clear Reg */
#define STM32_DMA_HIFCR 0x000c
/* DMA High Int Flag Clear Reg */
#define STM32_DMA_HIFCR 0x000c
/* DMA High Int Flag Clear Reg */
#define STM32_DMA_TCI BIT(5)
/* Transfer Complete Interrupt */
#define STM32_DMA_TCI BIT(5)
/* Transfer Complete Interrupt */
#define STM32_DMA_HTI BIT(4)
/* Half Transfer Interrupt */
#define STM32_DMA_TEI BIT(3)
/* Transfer Error Interrupt */
#define STM32_DMA_TEI BIT(3)
/* Transfer Error Interrupt */
#define STM32_DMA_DMEI BIT(2)
/* Direct Mode Error Interrupt */
#define STM32_DMA_DMEI BIT(2)
/* Direct Mode Error Interrupt */
#define STM32_DMA_FEI BIT(0)
/* FIFO Error Interrupt */
#define STM32_DMA_FEI BIT(0)
/* FIFO Error Interrupt */
#define STM32_DMA_MASKI (STM32_DMA_TCI \
| STM32_DMA_TEI \
| STM32_DMA_DMEI \
| STM32_DMA_FEI)
/* DMA Stream x Configuration Register */
/* DMA Stream x Configuration Register */
#define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x))
/* x = 0..7 */
#define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x))
/* x = 0..7 */
...
@@ -60,7 +66,8 @@
...
@@ -60,7 +66,8 @@
#define STM32_DMA_SCR_PINC BIT(9)
/* Peripheral increment mode */
#define STM32_DMA_SCR_PINC BIT(9)
/* Peripheral increment mode */
#define STM32_DMA_SCR_CIRC BIT(8)
/* Circular mode */
#define STM32_DMA_SCR_CIRC BIT(8)
/* Circular mode */
#define STM32_DMA_SCR_PFCTRL BIT(5)
/* Peripheral Flow Controller */
#define STM32_DMA_SCR_PFCTRL BIT(5)
/* Peripheral Flow Controller */
#define STM32_DMA_SCR_TCIE BIT(4)
/* Transfer Cplete Int Enable*/
#define STM32_DMA_SCR_TCIE BIT(4)
/* Transfer Complete Int Enable
*/
#define STM32_DMA_SCR_TEIE BIT(2)
/* Transfer Error Int Enable */
#define STM32_DMA_SCR_TEIE BIT(2)
/* Transfer Error Int Enable */
#define STM32_DMA_SCR_DMEIE BIT(1)
/* Direct Mode Err Int Enable */
#define STM32_DMA_SCR_DMEIE BIT(1)
/* Direct Mode Err Int Enable */
#define STM32_DMA_SCR_EN BIT(0)
/* Stream Enable */
#define STM32_DMA_SCR_EN BIT(0)
/* Stream Enable */
...
@@ -111,11 +118,24 @@
...
@@ -111,11 +118,24 @@
#define STM32_DMA_FIFO_THRESHOLD_FULL 0x03
#define STM32_DMA_FIFO_THRESHOLD_FULL 0x03
#define STM32_DMA_MAX_DATA_ITEMS 0xffff
#define STM32_DMA_MAX_DATA_ITEMS 0xffff
/*
* Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter
* gather at boundary. Thus it's safer to round down this value on FIFO
* size (16 Bytes)
*/
#define STM32_DMA_ALIGNED_MAX_DATA_ITEMS \
ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16)
#define STM32_DMA_MAX_CHANNELS 0x08
#define STM32_DMA_MAX_CHANNELS 0x08
#define STM32_DMA_MAX_REQUEST_ID 0x08
#define STM32_DMA_MAX_REQUEST_ID 0x08
#define STM32_DMA_MAX_DATA_PARAM 0x03
#define STM32_DMA_MAX_DATA_PARAM 0x03
#define STM32_DMA_FIFO_SIZE 16
/* FIFO is 16 bytes */
#define STM32_DMA_MIN_BURST 4
#define STM32_DMA_MAX_BURST 16
#define STM32_DMA_MAX_BURST 16
/* DMA Features */
#define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
#define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
enum
stm32_dma_width
{
enum
stm32_dma_width
{
STM32_DMA_BYTE
,
STM32_DMA_BYTE
,
STM32_DMA_HALF_WORD
,
STM32_DMA_HALF_WORD
,
...
@@ -129,11 +149,18 @@ enum stm32_dma_burst_size {
...
@@ -129,11 +149,18 @@ enum stm32_dma_burst_size {
STM32_DMA_BURST_INCR16
,
STM32_DMA_BURST_INCR16
,
};
};
/**
* struct stm32_dma_cfg - STM32 DMA custom configuration
* @channel_id: channel ID
* @request_line: DMA request
* @stream_config: 32bit mask specifying the DMA channel configuration
* @features: 32bit mask specifying the DMA Feature list
*/
struct
stm32_dma_cfg
{
struct
stm32_dma_cfg
{
u32
channel_id
;
u32
channel_id
;
u32
request_line
;
u32
request_line
;
u32
stream_config
;
u32
stream_config
;
u32
threshold
;
u32
features
;
};
};
struct
stm32_dma_chan_reg
{
struct
stm32_dma_chan_reg
{
...
@@ -171,6 +198,9 @@ struct stm32_dma_chan {
...
@@ -171,6 +198,9 @@ struct stm32_dma_chan {
u32
next_sg
;
u32
next_sg
;
struct
dma_slave_config
dma_sconfig
;
struct
dma_slave_config
dma_sconfig
;
struct
stm32_dma_chan_reg
chan_reg
;
struct
stm32_dma_chan_reg
chan_reg
;
u32
threshold
;
u32
mem_burst
;
u32
mem_width
;
};
};
struct
stm32_dma_device
{
struct
stm32_dma_device
{
...
@@ -235,6 +265,85 @@ static int stm32_dma_get_width(struct stm32_dma_chan *chan,
...
@@ -235,6 +265,85 @@ static int stm32_dma_get_width(struct stm32_dma_chan *chan,
}
}
}
}
static
enum
dma_slave_buswidth
stm32_dma_get_max_width
(
u32
buf_len
,
u32
threshold
)
{
enum
dma_slave_buswidth
max_width
;
if
(
threshold
==
STM32_DMA_FIFO_THRESHOLD_FULL
)
max_width
=
DMA_SLAVE_BUSWIDTH_4_BYTES
;
else
max_width
=
DMA_SLAVE_BUSWIDTH_2_BYTES
;
while
((
buf_len
<
max_width
||
buf_len
%
max_width
)
&&
max_width
>
DMA_SLAVE_BUSWIDTH_1_BYTE
)
max_width
=
max_width
>>
1
;
return
max_width
;
}
static
bool
stm32_dma_fifo_threshold_is_allowed
(
u32
burst
,
u32
threshold
,
enum
dma_slave_buswidth
width
)
{
u32
remaining
;
if
(
width
!=
DMA_SLAVE_BUSWIDTH_UNDEFINED
)
{
if
(
burst
!=
0
)
{
/*
* If number of beats fit in several whole bursts
* this configuration is allowed.
*/
remaining
=
((
STM32_DMA_FIFO_SIZE
/
width
)
*
(
threshold
+
1
)
/
4
)
%
burst
;
if
(
remaining
==
0
)
return
true
;
}
else
{
return
true
;
}
}
return
false
;
}
static
bool
stm32_dma_is_burst_possible
(
u32
buf_len
,
u32
threshold
)
{
switch
(
threshold
)
{
case
STM32_DMA_FIFO_THRESHOLD_FULL
:
if
(
buf_len
>=
STM32_DMA_MAX_BURST
)
return
true
;
else
return
false
;
case
STM32_DMA_FIFO_THRESHOLD_HALFFULL
:
if
(
buf_len
>=
STM32_DMA_MAX_BURST
/
2
)
return
true
;
else
return
false
;
default:
return
false
;
}
}
static
u32
stm32_dma_get_best_burst
(
u32
buf_len
,
u32
max_burst
,
u32
threshold
,
enum
dma_slave_buswidth
width
)
{
u32
best_burst
=
max_burst
;
if
(
best_burst
==
1
||
!
stm32_dma_is_burst_possible
(
buf_len
,
threshold
))
return
0
;
while
((
buf_len
<
best_burst
*
width
&&
best_burst
>
1
)
||
!
stm32_dma_fifo_threshold_is_allowed
(
best_burst
,
threshold
,
width
))
{
if
(
best_burst
>
STM32_DMA_MIN_BURST
)
best_burst
=
best_burst
>>
1
;
else
best_burst
=
0
;
}
return
best_burst
;
}
static
int
stm32_dma_get_burst
(
struct
stm32_dma_chan
*
chan
,
u32
maxburst
)
static
int
stm32_dma_get_burst
(
struct
stm32_dma_chan
*
chan
,
u32
maxburst
)
{
{
switch
(
maxburst
)
{
switch
(
maxburst
)
{
...
@@ -254,12 +363,12 @@ static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst)
...
@@ -254,12 +363,12 @@ static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst)
}
}
static
void
stm32_dma_set_fifo_config
(
struct
stm32_dma_chan
*
chan
,
static
void
stm32_dma_set_fifo_config
(
struct
stm32_dma_chan
*
chan
,
u32
src_
maxburst
,
u32
dst_max
burst
)
u32
src_
burst
,
u32
dst_
burst
)
{
{
chan
->
chan_reg
.
dma_sfcr
&=
~
STM32_DMA_SFCR_MASK
;
chan
->
chan_reg
.
dma_sfcr
&=
~
STM32_DMA_SFCR_MASK
;
chan
->
chan_reg
.
dma_scr
&=
~
STM32_DMA_SCR_DMEIE
;
chan
->
chan_reg
.
dma_scr
&=
~
STM32_DMA_SCR_DMEIE
;
if
(
(
!
src_maxburst
)
&&
(
!
dst_maxburst
)
)
{
if
(
!
src_burst
&&
!
dst_burst
)
{
/* Using direct mode */
/* Using direct mode */
chan
->
chan_reg
.
dma_scr
|=
STM32_DMA_SCR_DMEIE
;
chan
->
chan_reg
.
dma_scr
|=
STM32_DMA_SCR_DMEIE
;
}
else
{
}
else
{
...
@@ -300,7 +409,7 @@ static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan)
...
@@ -300,7 +409,7 @@ static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan)
flags
=
dma_isr
>>
(((
chan
->
id
&
2
)
<<
3
)
|
((
chan
->
id
&
1
)
*
6
));
flags
=
dma_isr
>>
(((
chan
->
id
&
2
)
<<
3
)
|
((
chan
->
id
&
1
)
*
6
));
return
flags
;
return
flags
&
STM32_DMA_MASKI
;
}
}
static
void
stm32_dma_irq_clear
(
struct
stm32_dma_chan
*
chan
,
u32
flags
)
static
void
stm32_dma_irq_clear
(
struct
stm32_dma_chan
*
chan
,
u32
flags
)
...
@@ -315,6 +424,7 @@ static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
...
@@ -315,6 +424,7 @@ static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
* If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
* If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
* If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
* If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
*/
*/
flags
&=
STM32_DMA_MASKI
;
dma_ifcr
=
flags
<<
(((
chan
->
id
&
2
)
<<
3
)
|
((
chan
->
id
&
1
)
*
6
));
dma_ifcr
=
flags
<<
(((
chan
->
id
&
2
)
<<
3
)
|
((
chan
->
id
&
1
)
*
6
));
if
(
chan
->
id
&
4
)
if
(
chan
->
id
&
4
)
...
@@ -429,6 +539,8 @@ static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
...
@@ -429,6 +539,8 @@ static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
dev_dbg
(
chan2dev
(
chan
),
"SFCR: 0x%08x
\n
"
,
sfcr
);
dev_dbg
(
chan2dev
(
chan
),
"SFCR: 0x%08x
\n
"
,
sfcr
);
}
}
static
void
stm32_dma_configure_next_sg
(
struct
stm32_dma_chan
*
chan
);
static
void
stm32_dma_start_transfer
(
struct
stm32_dma_chan
*
chan
)
static
void
stm32_dma_start_transfer
(
struct
stm32_dma_chan
*
chan
)
{
{
struct
stm32_dma_device
*
dmadev
=
stm32_dma_get_dev
(
chan
);
struct
stm32_dma_device
*
dmadev
=
stm32_dma_get_dev
(
chan
);
...
@@ -471,6 +583,9 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
...
@@ -471,6 +583,9 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
if
(
status
)
if
(
status
)
stm32_dma_irq_clear
(
chan
,
status
);
stm32_dma_irq_clear
(
chan
,
status
);
if
(
chan
->
desc
->
cyclic
)
stm32_dma_configure_next_sg
(
chan
);
stm32_dma_dump_reg
(
chan
);
stm32_dma_dump_reg
(
chan
);
/* Start DMA */
/* Start DMA */
...
@@ -541,13 +656,29 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
...
@@ -541,13 +656,29 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
status
=
stm32_dma_irq_status
(
chan
);
status
=
stm32_dma_irq_status
(
chan
);
scr
=
stm32_dma_read
(
dmadev
,
STM32_DMA_SCR
(
chan
->
id
));
scr
=
stm32_dma_read
(
dmadev
,
STM32_DMA_SCR
(
chan
->
id
));
if
(
(
status
&
STM32_DMA_TCI
)
&&
(
scr
&
STM32_DMA_SCR_TCIE
)
)
{
if
(
status
&
STM32_DMA_TCI
)
{
stm32_dma_irq_clear
(
chan
,
STM32_DMA_TCI
);
stm32_dma_irq_clear
(
chan
,
STM32_DMA_TCI
);
stm32_dma_handle_chan_done
(
chan
);
if
(
scr
&
STM32_DMA_SCR_TCIE
)
stm32_dma_handle_chan_done
(
chan
);
}
else
{
status
&=
~
STM32_DMA_TCI
;
}
if
(
status
&
STM32_DMA_HTI
)
{
stm32_dma_irq_clear
(
chan
,
STM32_DMA_HTI
);
status
&=
~
STM32_DMA_HTI
;
}
if
(
status
&
STM32_DMA_FEI
)
{
stm32_dma_irq_clear
(
chan
,
STM32_DMA_FEI
);
status
&=
~
STM32_DMA_FEI
;
if
(
!
(
scr
&
STM32_DMA_SCR_EN
))
dev_err
(
chan2dev
(
chan
),
"FIFO Error
\n
"
);
else
dev_dbg
(
chan2dev
(
chan
),
"FIFO over/underrun
\n
"
);
}
if
(
status
)
{
stm32_dma_irq_clear
(
chan
,
status
);
stm32_dma_irq_clear
(
chan
,
status
);
dev_err
(
chan2dev
(
chan
),
"DMA error: status=0x%08x
\n
"
,
status
);
dev_err
(
chan2dev
(
chan
),
"DMA error: status=0x%08x
\n
"
,
status
);
if
(
!
(
scr
&
STM32_DMA_SCR_EN
))
dev_err
(
chan2dev
(
chan
),
"chan disabled by HW
\n
"
);
}
}
spin_unlock
(
&
chan
->
vchan
.
lock
);
spin_unlock
(
&
chan
->
vchan
.
lock
);
...
@@ -564,45 +695,59 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
...
@@ -564,45 +695,59 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
if
(
vchan_issue_pending
(
&
chan
->
vchan
)
&&
!
chan
->
desc
&&
!
chan
->
busy
)
{
if
(
vchan_issue_pending
(
&
chan
->
vchan
)
&&
!
chan
->
desc
&&
!
chan
->
busy
)
{
dev_dbg
(
chan2dev
(
chan
),
"vchan %p: issued
\n
"
,
&
chan
->
vchan
);
dev_dbg
(
chan2dev
(
chan
),
"vchan %p: issued
\n
"
,
&
chan
->
vchan
);
stm32_dma_start_transfer
(
chan
);
stm32_dma_start_transfer
(
chan
);
if
(
chan
->
desc
->
cyclic
)
stm32_dma_configure_next_sg
(
chan
);
}
}
spin_unlock_irqrestore
(
&
chan
->
vchan
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
chan
->
vchan
.
lock
,
flags
);
}
}
static
int
stm32_dma_set_xfer_param
(
struct
stm32_dma_chan
*
chan
,
static
int
stm32_dma_set_xfer_param
(
struct
stm32_dma_chan
*
chan
,
enum
dma_transfer_direction
direction
,
enum
dma_transfer_direction
direction
,
enum
dma_slave_buswidth
*
buswidth
)
enum
dma_slave_buswidth
*
buswidth
,
u32
buf_len
)
{
{
enum
dma_slave_buswidth
src_addr_width
,
dst_addr_width
;
enum
dma_slave_buswidth
src_addr_width
,
dst_addr_width
;
int
src_bus_width
,
dst_bus_width
;
int
src_bus_width
,
dst_bus_width
;
int
src_burst_size
,
dst_burst_size
;
int
src_burst_size
,
dst_burst_size
;
u32
src_maxburst
,
dst_maxburst
;
u32
src_maxburst
,
dst_maxburst
,
src_best_burst
,
dst_best_burst
;
u32
dma_scr
=
0
;
u32
dma_scr
,
threshold
;
src_addr_width
=
chan
->
dma_sconfig
.
src_addr_width
;
src_addr_width
=
chan
->
dma_sconfig
.
src_addr_width
;
dst_addr_width
=
chan
->
dma_sconfig
.
dst_addr_width
;
dst_addr_width
=
chan
->
dma_sconfig
.
dst_addr_width
;
src_maxburst
=
chan
->
dma_sconfig
.
src_maxburst
;
src_maxburst
=
chan
->
dma_sconfig
.
src_maxburst
;
dst_maxburst
=
chan
->
dma_sconfig
.
dst_maxburst
;
dst_maxburst
=
chan
->
dma_sconfig
.
dst_maxburst
;
threshold
=
chan
->
threshold
;
switch
(
direction
)
{
switch
(
direction
)
{
case
DMA_MEM_TO_DEV
:
case
DMA_MEM_TO_DEV
:
/* Set device data size */
dst_bus_width
=
stm32_dma_get_width
(
chan
,
dst_addr_width
);
dst_bus_width
=
stm32_dma_get_width
(
chan
,
dst_addr_width
);
if
(
dst_bus_width
<
0
)
if
(
dst_bus_width
<
0
)
return
dst_bus_width
;
return
dst_bus_width
;
dst_burst_size
=
stm32_dma_get_burst
(
chan
,
dst_maxburst
);
/* Set device burst size */
dst_best_burst
=
stm32_dma_get_best_burst
(
buf_len
,
dst_maxburst
,
threshold
,
dst_addr_width
);
dst_burst_size
=
stm32_dma_get_burst
(
chan
,
dst_best_burst
);
if
(
dst_burst_size
<
0
)
if
(
dst_burst_size
<
0
)
return
dst_burst_size
;
return
dst_burst_size
;
if
(
!
src_addr_width
)
/* Set memory data size */
src_addr_width
=
dst_addr_width
;
src_addr_width
=
stm32_dma_get_max_width
(
buf_len
,
threshold
)
;
chan
->
mem_width
=
src_addr_width
;
src_bus_width
=
stm32_dma_get_width
(
chan
,
src_addr_width
);
src_bus_width
=
stm32_dma_get_width
(
chan
,
src_addr_width
);
if
(
src_bus_width
<
0
)
if
(
src_bus_width
<
0
)
return
src_bus_width
;
return
src_bus_width
;
src_burst_size
=
stm32_dma_get_burst
(
chan
,
src_maxburst
);
/* Set memory burst size */
src_maxburst
=
STM32_DMA_MAX_BURST
;
src_best_burst
=
stm32_dma_get_best_burst
(
buf_len
,
src_maxburst
,
threshold
,
src_addr_width
);
src_burst_size
=
stm32_dma_get_burst
(
chan
,
src_best_burst
);
if
(
src_burst_size
<
0
)
if
(
src_burst_size
<
0
)
return
src_burst_size
;
return
src_burst_size
;
...
@@ -612,27 +757,46 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
...
@@ -612,27 +757,46 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
STM32_DMA_SCR_PBURST
(
dst_burst_size
)
|
STM32_DMA_SCR_PBURST
(
dst_burst_size
)
|
STM32_DMA_SCR_MBURST
(
src_burst_size
);
STM32_DMA_SCR_MBURST
(
src_burst_size
);
/* Set FIFO threshold */
chan
->
chan_reg
.
dma_sfcr
&=
~
STM32_DMA_SFCR_FTH_MASK
;
chan
->
chan_reg
.
dma_sfcr
|=
STM32_DMA_SFCR_FTH
(
threshold
);
/* Set peripheral address */
chan
->
chan_reg
.
dma_spar
=
chan
->
dma_sconfig
.
dst_addr
;
chan
->
chan_reg
.
dma_spar
=
chan
->
dma_sconfig
.
dst_addr
;
*
buswidth
=
dst_addr_width
;
*
buswidth
=
dst_addr_width
;
break
;
break
;
case
DMA_DEV_TO_MEM
:
case
DMA_DEV_TO_MEM
:
/* Set device data size */
src_bus_width
=
stm32_dma_get_width
(
chan
,
src_addr_width
);
src_bus_width
=
stm32_dma_get_width
(
chan
,
src_addr_width
);
if
(
src_bus_width
<
0
)
if
(
src_bus_width
<
0
)
return
src_bus_width
;
return
src_bus_width
;
src_burst_size
=
stm32_dma_get_burst
(
chan
,
src_maxburst
);
/* Set device burst size */
src_best_burst
=
stm32_dma_get_best_burst
(
buf_len
,
src_maxburst
,
threshold
,
src_addr_width
);
chan
->
mem_burst
=
src_best_burst
;
src_burst_size
=
stm32_dma_get_burst
(
chan
,
src_best_burst
);
if
(
src_burst_size
<
0
)
if
(
src_burst_size
<
0
)
return
src_burst_size
;
return
src_burst_size
;
if
(
!
dst_addr_width
)
/* Set memory data size */
dst_addr_width
=
src_addr_width
;
dst_addr_width
=
stm32_dma_get_max_width
(
buf_len
,
threshold
)
;
chan
->
mem_width
=
dst_addr_width
;
dst_bus_width
=
stm32_dma_get_width
(
chan
,
dst_addr_width
);
dst_bus_width
=
stm32_dma_get_width
(
chan
,
dst_addr_width
);
if
(
dst_bus_width
<
0
)
if
(
dst_bus_width
<
0
)
return
dst_bus_width
;
return
dst_bus_width
;
dst_burst_size
=
stm32_dma_get_burst
(
chan
,
dst_maxburst
);
/* Set memory burst size */
dst_maxburst
=
STM32_DMA_MAX_BURST
;
dst_best_burst
=
stm32_dma_get_best_burst
(
buf_len
,
dst_maxburst
,
threshold
,
dst_addr_width
);
chan
->
mem_burst
=
dst_best_burst
;
dst_burst_size
=
stm32_dma_get_burst
(
chan
,
dst_best_burst
);
if
(
dst_burst_size
<
0
)
if
(
dst_burst_size
<
0
)
return
dst_burst_size
;
return
dst_burst_size
;
...
@@ -642,6 +806,11 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
...
@@ -642,6 +806,11 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
STM32_DMA_SCR_PBURST
(
src_burst_size
)
|
STM32_DMA_SCR_PBURST
(
src_burst_size
)
|
STM32_DMA_SCR_MBURST
(
dst_burst_size
);
STM32_DMA_SCR_MBURST
(
dst_burst_size
);
/* Set FIFO threshold */
chan
->
chan_reg
.
dma_sfcr
&=
~
STM32_DMA_SFCR_FTH_MASK
;
chan
->
chan_reg
.
dma_sfcr
|=
STM32_DMA_SFCR_FTH
(
threshold
);
/* Set peripheral address */
chan
->
chan_reg
.
dma_spar
=
chan
->
dma_sconfig
.
src_addr
;
chan
->
chan_reg
.
dma_spar
=
chan
->
dma_sconfig
.
src_addr
;
*
buswidth
=
chan
->
dma_sconfig
.
src_addr_width
;
*
buswidth
=
chan
->
dma_sconfig
.
src_addr_width
;
break
;
break
;
...
@@ -651,8 +820,9 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
...
@@ -651,8 +820,9 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
return
-
EINVAL
;
return
-
EINVAL
;
}
}
stm32_dma_set_fifo_config
(
chan
,
src_
maxburst
,
dst_max
burst
);
stm32_dma_set_fifo_config
(
chan
,
src_
best_burst
,
dst_best_
burst
);
/* Set DMA control register */
chan
->
chan_reg
.
dma_scr
&=
~
(
STM32_DMA_SCR_DIR_MASK
|
chan
->
chan_reg
.
dma_scr
&=
~
(
STM32_DMA_SCR_DIR_MASK
|
STM32_DMA_SCR_PSIZE_MASK
|
STM32_DMA_SCR_MSIZE_MASK
|
STM32_DMA_SCR_PSIZE_MASK
|
STM32_DMA_SCR_MSIZE_MASK
|
STM32_DMA_SCR_PBURST_MASK
|
STM32_DMA_SCR_MBURST_MASK
);
STM32_DMA_SCR_PBURST_MASK
|
STM32_DMA_SCR_MBURST_MASK
);
...
@@ -692,10 +862,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
...
@@ -692,10 +862,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
if
(
!
desc
)
if
(
!
desc
)
return
NULL
;
return
NULL
;
ret
=
stm32_dma_set_xfer_param
(
chan
,
direction
,
&
buswidth
);
if
(
ret
<
0
)
goto
err
;
/* Set peripheral flow controller */
/* Set peripheral flow controller */
if
(
chan
->
dma_sconfig
.
device_fc
)
if
(
chan
->
dma_sconfig
.
device_fc
)
chan
->
chan_reg
.
dma_scr
|=
STM32_DMA_SCR_PFCTRL
;
chan
->
chan_reg
.
dma_scr
|=
STM32_DMA_SCR_PFCTRL
;
...
@@ -703,10 +869,15 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
...
@@ -703,10 +869,15 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
chan
->
chan_reg
.
dma_scr
&=
~
STM32_DMA_SCR_PFCTRL
;
chan
->
chan_reg
.
dma_scr
&=
~
STM32_DMA_SCR_PFCTRL
;
for_each_sg
(
sgl
,
sg
,
sg_len
,
i
)
{
for_each_sg
(
sgl
,
sg
,
sg_len
,
i
)
{
ret
=
stm32_dma_set_xfer_param
(
chan
,
direction
,
&
buswidth
,
sg_dma_len
(
sg
));
if
(
ret
<
0
)
goto
err
;
desc
->
sg_req
[
i
].
len
=
sg_dma_len
(
sg
);
desc
->
sg_req
[
i
].
len
=
sg_dma_len
(
sg
);
nb_data_items
=
desc
->
sg_req
[
i
].
len
/
buswidth
;
nb_data_items
=
desc
->
sg_req
[
i
].
len
/
buswidth
;
if
(
nb_data_items
>
STM32_DMA_MAX_DATA_ITEMS
)
{
if
(
nb_data_items
>
STM32_DMA_
ALIGNED_
MAX_DATA_ITEMS
)
{
dev_err
(
chan2dev
(
chan
),
"nb items not supported
\n
"
);
dev_err
(
chan2dev
(
chan
),
"nb items not supported
\n
"
);
goto
err
;
goto
err
;
}
}
...
@@ -767,12 +938,12 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
...
@@ -767,12 +938,12 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
return
NULL
;
return
NULL
;
}
}
ret
=
stm32_dma_set_xfer_param
(
chan
,
direction
,
&
buswidth
);
ret
=
stm32_dma_set_xfer_param
(
chan
,
direction
,
&
buswidth
,
period_len
);
if
(
ret
<
0
)
if
(
ret
<
0
)
return
NULL
;
return
NULL
;
nb_data_items
=
period_len
/
buswidth
;
nb_data_items
=
period_len
/
buswidth
;
if
(
nb_data_items
>
STM32_DMA_MAX_DATA_ITEMS
)
{
if
(
nb_data_items
>
STM32_DMA_
ALIGNED_
MAX_DATA_ITEMS
)
{
dev_err
(
chan2dev
(
chan
),
"number of items not supported
\n
"
);
dev_err
(
chan2dev
(
chan
),
"number of items not supported
\n
"
);
return
NULL
;
return
NULL
;
}
}
...
@@ -816,35 +987,45 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
...
@@ -816,35 +987,45 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
dma_addr_t
src
,
size_t
len
,
unsigned
long
flags
)
dma_addr_t
src
,
size_t
len
,
unsigned
long
flags
)
{
{
struct
stm32_dma_chan
*
chan
=
to_stm32_dma_chan
(
c
);
struct
stm32_dma_chan
*
chan
=
to_stm32_dma_chan
(
c
);
u32
num_sgs
;
enum
dma_slave_buswidth
max_width
;
struct
stm32_dma_desc
*
desc
;
struct
stm32_dma_desc
*
desc
;
size_t
xfer_count
,
offset
;
size_t
xfer_count
,
offset
;
u32
num_sgs
,
best_burst
,
dma_burst
,
threshold
;
int
i
;
int
i
;
num_sgs
=
DIV_ROUND_UP
(
len
,
STM32_DMA_MAX_DATA_ITEMS
);
num_sgs
=
DIV_ROUND_UP
(
len
,
STM32_DMA_
ALIGNED_
MAX_DATA_ITEMS
);
desc
=
stm32_dma_alloc_desc
(
num_sgs
);
desc
=
stm32_dma_alloc_desc
(
num_sgs
);
if
(
!
desc
)
if
(
!
desc
)
return
NULL
;
return
NULL
;
threshold
=
chan
->
threshold
;
for
(
offset
=
0
,
i
=
0
;
offset
<
len
;
offset
+=
xfer_count
,
i
++
)
{
for
(
offset
=
0
,
i
=
0
;
offset
<
len
;
offset
+=
xfer_count
,
i
++
)
{
xfer_count
=
min_t
(
size_t
,
len
-
offset
,
xfer_count
=
min_t
(
size_t
,
len
-
offset
,
STM32_DMA_MAX_DATA_ITEMS
);
STM32_DMA_
ALIGNED_
MAX_DATA_ITEMS
);
desc
->
sg_req
[
i
].
len
=
xfer_count
;
/* Compute best burst size */
max_width
=
DMA_SLAVE_BUSWIDTH_1_BYTE
;
best_burst
=
stm32_dma_get_best_burst
(
len
,
STM32_DMA_MAX_BURST
,
threshold
,
max_width
);
dma_burst
=
stm32_dma_get_burst
(
chan
,
best_burst
);
stm32_dma_clear_reg
(
&
desc
->
sg_req
[
i
].
chan_reg
);
stm32_dma_clear_reg
(
&
desc
->
sg_req
[
i
].
chan_reg
);
desc
->
sg_req
[
i
].
chan_reg
.
dma_scr
=
desc
->
sg_req
[
i
].
chan_reg
.
dma_scr
=
STM32_DMA_SCR_DIR
(
STM32_DMA_MEM_TO_MEM
)
|
STM32_DMA_SCR_DIR
(
STM32_DMA_MEM_TO_MEM
)
|
STM32_DMA_SCR_PBURST
(
dma_burst
)
|
STM32_DMA_SCR_MBURST
(
dma_burst
)
|
STM32_DMA_SCR_MINC
|
STM32_DMA_SCR_MINC
|
STM32_DMA_SCR_PINC
|
STM32_DMA_SCR_PINC
|
STM32_DMA_SCR_TCIE
|
STM32_DMA_SCR_TCIE
|
STM32_DMA_SCR_TEIE
;
STM32_DMA_SCR_TEIE
;
desc
->
sg_req
[
i
].
chan_reg
.
dma_sfcr
=
STM32_DMA_SFCR_DMDIS
|
desc
->
sg_req
[
i
].
chan_reg
.
dma_sfcr
|=
STM32_DMA_SFCR_MASK
;
STM32_DMA_SFCR_FTH
(
STM32_DMA_FIFO_THRESHOLD_FULL
)
|
desc
->
sg_req
[
i
].
chan_reg
.
dma_sfcr
|=
STM32_DMA_SFCR_F
EIE
;
STM32_DMA_SFCR_F
TH
(
threshold
)
;
desc
->
sg_req
[
i
].
chan_reg
.
dma_spar
=
src
+
offset
;
desc
->
sg_req
[
i
].
chan_reg
.
dma_spar
=
src
+
offset
;
desc
->
sg_req
[
i
].
chan_reg
.
dma_sm0ar
=
dest
+
offset
;
desc
->
sg_req
[
i
].
chan_reg
.
dma_sm0ar
=
dest
+
offset
;
desc
->
sg_req
[
i
].
chan_reg
.
dma_sndtr
=
xfer_count
;
desc
->
sg_req
[
i
].
chan_reg
.
dma_sndtr
=
xfer_count
;
desc
->
sg_req
[
i
].
len
=
xfer_count
;
}
}
desc
->
num_sgs
=
num_sgs
;
desc
->
num_sgs
=
num_sgs
;
...
@@ -869,6 +1050,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
...
@@ -869,6 +1050,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
struct
stm32_dma_desc
*
desc
,
struct
stm32_dma_desc
*
desc
,
u32
next_sg
)
u32
next_sg
)
{
{
u32
modulo
,
burst_size
;
u32
residue
=
0
;
u32
residue
=
0
;
int
i
;
int
i
;
...
@@ -876,8 +1058,10 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
...
@@ -876,8 +1058,10 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
* In cyclic mode, for the last period, residue = remaining bytes from
* In cyclic mode, for the last period, residue = remaining bytes from
* NDTR
* NDTR
*/
*/
if
(
chan
->
desc
->
cyclic
&&
next_sg
==
0
)
if
(
chan
->
desc
->
cyclic
&&
next_sg
==
0
)
{
return
stm32_dma_get_remaining_bytes
(
chan
);
residue
=
stm32_dma_get_remaining_bytes
(
chan
);
goto
end
;
}
/*
/*
* For all other periods in cyclic mode, and in sg mode,
* For all other periods in cyclic mode, and in sg mode,
...
@@ -888,6 +1072,15 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
...
@@ -888,6 +1072,15 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
residue
+=
desc
->
sg_req
[
i
].
len
;
residue
+=
desc
->
sg_req
[
i
].
len
;
residue
+=
stm32_dma_get_remaining_bytes
(
chan
);
residue
+=
stm32_dma_get_remaining_bytes
(
chan
);
end:
if
(
!
chan
->
mem_burst
)
return
residue
;
burst_size
=
chan
->
mem_burst
*
chan
->
mem_width
;
modulo
=
residue
%
burst_size
;
if
(
modulo
)
residue
=
residue
-
modulo
+
burst_size
;
return
residue
;
return
residue
;
}
}
...
@@ -902,7 +1095,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
...
@@ -902,7 +1095,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
u32
residue
=
0
;
u32
residue
=
0
;
status
=
dma_cookie_status
(
c
,
cookie
,
state
);
status
=
dma_cookie_status
(
c
,
cookie
,
state
);
if
(
(
status
==
DMA_COMPLETE
)
||
(
!
state
)
)
if
(
status
==
DMA_COMPLETE
||
!
state
)
return
status
;
return
status
;
spin_lock_irqsave
(
&
chan
->
vchan
.
lock
,
flags
);
spin_lock_irqsave
(
&
chan
->
vchan
.
lock
,
flags
);
...
@@ -966,7 +1159,7 @@ static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
...
@@ -966,7 +1159,7 @@ static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
}
}
static
void
stm32_dma_set_config
(
struct
stm32_dma_chan
*
chan
,
static
void
stm32_dma_set_config
(
struct
stm32_dma_chan
*
chan
,
struct
stm32_dma_cfg
*
cfg
)
struct
stm32_dma_cfg
*
cfg
)
{
{
stm32_dma_clear_reg
(
&
chan
->
chan_reg
);
stm32_dma_clear_reg
(
&
chan
->
chan_reg
);
...
@@ -976,7 +1169,7 @@ static void stm32_dma_set_config(struct stm32_dma_chan *chan,
...
@@ -976,7 +1169,7 @@ static void stm32_dma_set_config(struct stm32_dma_chan *chan,
/* Enable Interrupts */
/* Enable Interrupts */
chan
->
chan_reg
.
dma_scr
|=
STM32_DMA_SCR_TEIE
|
STM32_DMA_SCR_TCIE
;
chan
->
chan_reg
.
dma_scr
|=
STM32_DMA_SCR_TEIE
|
STM32_DMA_SCR_TCIE
;
chan
->
chan_reg
.
dma_sfcr
=
cfg
->
threshold
&
STM32_DMA_SFCR_FTH_MASK
;
chan
->
threshold
=
STM32_DMA_THRESHOLD_FTR_GET
(
cfg
->
features
)
;
}
}
static
struct
dma_chan
*
stm32_dma_of_xlate
(
struct
of_phandle_args
*
dma_spec
,
static
struct
dma_chan
*
stm32_dma_of_xlate
(
struct
of_phandle_args
*
dma_spec
,
...
@@ -996,10 +1189,10 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
...
@@ -996,10 +1189,10 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
cfg
.
channel_id
=
dma_spec
->
args
[
0
];
cfg
.
channel_id
=
dma_spec
->
args
[
0
];
cfg
.
request_line
=
dma_spec
->
args
[
1
];
cfg
.
request_line
=
dma_spec
->
args
[
1
];
cfg
.
stream_config
=
dma_spec
->
args
[
2
];
cfg
.
stream_config
=
dma_spec
->
args
[
2
];
cfg
.
threshold
=
dma_spec
->
args
[
3
];
cfg
.
features
=
dma_spec
->
args
[
3
];
if
(
(
cfg
.
channel_id
>=
STM32_DMA_MAX_CHANNELS
)
||
if
(
cfg
.
channel_id
>=
STM32_DMA_MAX_CHANNELS
||
(
cfg
.
request_line
>=
STM32_DMA_MAX_REQUEST_ID
)
)
{
cfg
.
request_line
>=
STM32_DMA_MAX_REQUEST_ID
)
{
dev_err
(
dev
,
"Bad channel and/or request id
\n
"
);
dev_err
(
dev
,
"Bad channel and/or request id
\n
"
);
return
NULL
;
return
NULL
;
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录