Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
2e332fec
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2e332fec
编写于
7月 18, 2017
作者:
V
Vineet Gupta
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
ARC: dma: implement dma_unmap_page and sg variant
Signed-off-by:
N
Vineet Gupta
<
vgupta@synopsys.com
>
上级
b37174d9
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
45 addition
and
0 deletion
+45
-0
arch/arc/mm/dma.c
arch/arc/mm/dma.c
+45
-0
未找到文件。
arch/arc/mm/dma.c
浏览文件 @
2e332fec
...
@@ -153,6 +153,19 @@ static void _dma_cache_sync(phys_addr_t paddr, size_t size,
...
@@ -153,6 +153,19 @@ static void _dma_cache_sync(phys_addr_t paddr, size_t size,
}
}
}
}
/*
* arc_dma_map_page - map a portion of a page for streaming DMA
*
* Ensure that any data held in the cache is appropriately discarded
* or written back.
*
* The device owns this memory once this call has completed. The CPU
* can regain ownership by calling dma_unmap_page().
*
* Note: while it takes struct page as arg, caller can "abuse" it to pass
* a region larger than PAGE_SIZE, provided it is physically contiguous
* and this still works correctly
*/
static
dma_addr_t
arc_dma_map_page
(
struct
device
*
dev
,
struct
page
*
page
,
static
dma_addr_t
arc_dma_map_page
(
struct
device
*
dev
,
struct
page
*
page
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
dir
,
unsigned
long
offset
,
size_t
size
,
enum
dma_data_direction
dir
,
unsigned
long
attrs
)
unsigned
long
attrs
)
...
@@ -165,6 +178,24 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
...
@@ -165,6 +178,24 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
return
plat_phys_to_dma
(
dev
,
paddr
);
return
plat_phys_to_dma
(
dev
,
paddr
);
}
}
/*
* arc_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
*
* After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there.
*
* Note: historically this routine was not implemented for ARC
*/
static
void
arc_dma_unmap_page
(
struct
device
*
dev
,
dma_addr_t
handle
,
size_t
size
,
enum
dma_data_direction
dir
,
unsigned
long
attrs
)
{
phys_addr_t
paddr
=
plat_dma_to_phys
(
dev
,
handle
);
if
(
!
(
attrs
&
DMA_ATTR_SKIP_CPU_SYNC
))
_dma_cache_sync
(
paddr
,
size
,
dir
);
}
static
int
arc_dma_map_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
static
int
arc_dma_map_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
dir
,
unsigned
long
attrs
)
int
nents
,
enum
dma_data_direction
dir
,
unsigned
long
attrs
)
{
{
...
@@ -178,6 +209,18 @@ static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
...
@@ -178,6 +209,18 @@ static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
return
nents
;
return
nents
;
}
}
static
void
arc_dma_unmap_sg
(
struct
device
*
dev
,
struct
scatterlist
*
sg
,
int
nents
,
enum
dma_data_direction
dir
,
unsigned
long
attrs
)
{
struct
scatterlist
*
s
;
int
i
;
for_each_sg
(
sg
,
s
,
nents
,
i
)
arc_dma_unmap_page
(
dev
,
sg_dma_address
(
s
),
sg_dma_len
(
s
),
dir
,
attrs
);
}
static
void
arc_dma_sync_single_for_cpu
(
struct
device
*
dev
,
static
void
arc_dma_sync_single_for_cpu
(
struct
device
*
dev
,
dma_addr_t
dma_handle
,
size_t
size
,
enum
dma_data_direction
dir
)
dma_addr_t
dma_handle
,
size_t
size
,
enum
dma_data_direction
dir
)
{
{
...
@@ -223,7 +266,9 @@ const struct dma_map_ops arc_dma_ops = {
...
@@ -223,7 +266,9 @@ const struct dma_map_ops arc_dma_ops = {
.
free
=
arc_dma_free
,
.
free
=
arc_dma_free
,
.
mmap
=
arc_dma_mmap
,
.
mmap
=
arc_dma_mmap
,
.
map_page
=
arc_dma_map_page
,
.
map_page
=
arc_dma_map_page
,
.
unmap_page
=
arc_dma_unmap_page
,
.
map_sg
=
arc_dma_map_sg
,
.
map_sg
=
arc_dma_map_sg
,
.
unmap_sg
=
arc_dma_unmap_sg
,
.
sync_single_for_device
=
arc_dma_sync_single_for_device
,
.
sync_single_for_device
=
arc_dma_sync_single_for_device
,
.
sync_single_for_cpu
=
arc_dma_sync_single_for_cpu
,
.
sync_single_for_cpu
=
arc_dma_sync_single_for_cpu
,
.
sync_sg_for_cpu
=
arc_dma_sync_sg_for_cpu
,
.
sync_sg_for_cpu
=
arc_dma_sync_sg_for_cpu
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录