Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
0fd53cfb
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0fd53cfb
编写于
10月 24, 2013
作者:
T
Thomas Hellstrom
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
drm/vmwgfx: Use the linux DMA api also for MOBs
Signed-off-by:
N
Thomas Hellstrom
<
thellstrom@vmware.com
>
上级
4b9e45e6
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
142 addition
and
41 deletion
+142
-41
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+60
-1
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+6
-2
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+76
-38
未找到文件。
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
浏览文件 @
0fd53cfb
...
...
@@ -272,6 +272,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
viter
->
dma_address
=
&
__vmw_piter_dma_addr
;
viter
->
page
=
&
__vmw_piter_non_sg_page
;
viter
->
addrs
=
vsgt
->
addrs
;
viter
->
pages
=
vsgt
->
pages
;
break
;
case
vmw_dma_map_populate
:
case
vmw_dma_map_bind
:
...
...
@@ -452,6 +453,63 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
vmw_tt
->
mapped
=
false
;
}
/**
* vmw_bo_map_dma - Make sure buffer object pages are visible to the device
*
* @bo: Pointer to a struct ttm_buffer_object
*
* Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
* instead of a pointer to a struct vmw_ttm_backend as argument.
* Note that the buffer object must be either pinned or reserved before
* calling this function.
*/
int
vmw_bo_map_dma
(
struct
ttm_buffer_object
*
bo
)
{
struct
vmw_ttm_tt
*
vmw_tt
=
container_of
(
bo
->
ttm
,
struct
vmw_ttm_tt
,
dma_ttm
.
ttm
);
return
vmw_ttm_map_dma
(
vmw_tt
);
}
/**
* vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
*
* @bo: Pointer to a struct ttm_buffer_object
*
* Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
* instead of a pointer to a struct vmw_ttm_backend as argument.
*/
void
vmw_bo_unmap_dma
(
struct
ttm_buffer_object
*
bo
)
{
struct
vmw_ttm_tt
*
vmw_tt
=
container_of
(
bo
->
ttm
,
struct
vmw_ttm_tt
,
dma_ttm
.
ttm
);
vmw_ttm_unmap_dma
(
vmw_tt
);
}
/**
* vmw_bo_sg_table - Return a struct vmw_sg_table object for a
* TTM buffer object
*
* @bo: Pointer to a struct ttm_buffer_object
*
* Returns a pointer to a struct vmw_sg_table object. The object should
* not be freed after use.
* Note that for the device addresses to be valid, the buffer object must
* either be reserved or pinned.
*/
const
struct
vmw_sg_table
*
vmw_bo_sg_table
(
struct
ttm_buffer_object
*
bo
)
{
struct
vmw_ttm_tt
*
vmw_tt
=
container_of
(
bo
->
ttm
,
struct
vmw_ttm_tt
,
dma_ttm
.
ttm
);
return
&
vmw_tt
->
vsgt
;
}
static
int
vmw_ttm_bind
(
struct
ttm_tt
*
ttm
,
struct
ttm_mem_reg
*
bo_mem
)
{
struct
vmw_ttm_tt
*
vmw_be
=
...
...
@@ -478,7 +536,7 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
}
return
vmw_mob_bind
(
vmw_be
->
dev_priv
,
vmw_be
->
mob
,
ttm
->
pages
,
ttm
->
num_pages
,
&
vmw_be
->
vsgt
,
ttm
->
num_pages
,
vmw_be
->
gmr_id
);
default:
BUG
();
...
...
@@ -526,6 +584,7 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm)
kfree
(
vmw_be
);
}
static
int
vmw_ttm_populate
(
struct
ttm_tt
*
ttm
)
{
struct
vmw_ttm_tt
*
vmw_tt
=
...
...
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
浏览文件 @
0fd53cfb
...
...
@@ -708,6 +708,10 @@ extern struct ttm_placement vmw_srf_placement;
extern
struct
ttm_placement
vmw_mob_placement
;
extern
struct
ttm_bo_driver
vmw_bo_driver
;
extern
int
vmw_dma_quiescent
(
struct
drm_device
*
dev
);
extern
int
vmw_bo_map_dma
(
struct
ttm_buffer_object
*
bo
);
extern
void
vmw_bo_unmap_dma
(
struct
ttm_buffer_object
*
bo
);
extern
const
struct
vmw_sg_table
*
vmw_bo_sg_table
(
struct
ttm_buffer_object
*
bo
);
extern
void
vmw_piter_start
(
struct
vmw_piter
*
viter
,
const
struct
vmw_sg_table
*
vsgt
,
unsigned
long
p_offs
);
...
...
@@ -919,8 +923,8 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
*/
struct
vmw_mob
;
extern
int
vmw_mob_bind
(
struct
vmw_private
*
dev_priv
,
struct
vmw_mob
*
mob
,
struct
page
**
data_pages
,
unsigned
long
num_data_pages
,
int32_t
mob_id
);
const
struct
vmw_sg_table
*
vsgt
,
unsigned
long
num_data_pages
,
int32_t
mob_id
);
extern
void
vmw_mob_unbind
(
struct
vmw_private
*
dev_priv
,
struct
vmw_mob
*
mob
);
extern
void
vmw_mob_destroy
(
struct
vmw_mob
*
mob
);
...
...
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
浏览文件 @
0fd53cfb
...
...
@@ -41,13 +41,13 @@
*
* @num_pages Number of pages that make up the page table.
* @pt_level The indirection level of the page table. 0-2.
* @pt_root_page
Pointer to
the level 0 page of the page table.
* @pt_root_page
DMA address of
the level 0 page of the page table.
*/
struct
vmw_mob
{
struct
ttm_buffer_object
*
pt_bo
;
unsigned
long
num_pages
;
unsigned
pt_level
;
struct
page
*
pt_root_page
;
dma_addr_t
pt_root_page
;
uint32_t
id
;
};
...
...
@@ -65,7 +65,7 @@ struct vmw_otable {
static
int
vmw_mob_pt_populate
(
struct
vmw_private
*
dev_priv
,
struct
vmw_mob
*
mob
);
static
void
vmw_mob_pt_setup
(
struct
vmw_mob
*
mob
,
struct
page
**
data_pages
,
struct
vmw_piter
data_iter
,
unsigned
long
num_data_pages
);
/*
...
...
@@ -89,13 +89,17 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
SVGA3dCmdHeader
header
;
SVGA3dCmdSetOTableBase
body
;
}
*
cmd
;
struct
page
**
pages
=
dev_priv
->
otable_bo
->
ttm
->
pages
+
(
offset
>>
PAGE_SHIFT
);
struct
vmw_mob
*
mob
;
const
struct
vmw_sg_table
*
vsgt
;
struct
vmw_piter
iter
;
int
ret
;
BUG_ON
(
otable
->
page_table
!=
NULL
);
vsgt
=
vmw_bo_sg_table
(
dev_priv
->
otable_bo
);
vmw_piter_start
(
&
iter
,
vsgt
,
offset
>>
PAGE_SHIFT
);
WARN_ON
(
!
vmw_piter_next
(
&
iter
));
mob
=
vmw_mob_create
(
otable
->
size
>>
PAGE_SHIFT
);
if
(
unlikely
(
mob
==
NULL
))
{
DRM_ERROR
(
"Failed creating OTable page table.
\n
"
);
...
...
@@ -103,15 +107,17 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
}
if
(
otable
->
size
<=
PAGE_SIZE
)
{
mob
->
pt_level
=
0
;
mob
->
pt_root_page
=
pages
[
0
];
mob
->
pt_level
=
SVGA3D_MOBFMT_PTDEPTH_0
;
mob
->
pt_root_page
=
vmw_piter_dma_addr
(
&
iter
);
}
else
if
(
vsgt
->
num_regions
==
1
)
{
mob
->
pt_level
=
SVGA3D_MOBFMT_RANGE
;
mob
->
pt_root_page
=
vmw_piter_dma_addr
(
&
iter
);
}
else
{
ret
=
vmw_mob_pt_populate
(
dev_priv
,
mob
);
if
(
unlikely
(
ret
!=
0
))
goto
out_no_populate
;
vmw_mob_pt_setup
(
mob
,
pages
,
otable
->
size
>>
PAGE_SHIFT
);
vmw_mob_pt_setup
(
mob
,
iter
,
otable
->
size
>>
PAGE_SHIFT
);
}
cmd
=
vmw_fifo_reserve
(
dev_priv
,
sizeof
(
*
cmd
));
...
...
@@ -124,7 +130,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
cmd
->
header
.
id
=
SVGA_3D_CMD_SET_OTABLE_BASE
;
cmd
->
header
.
size
=
sizeof
(
cmd
->
body
);
cmd
->
body
.
type
=
type
;
cmd
->
body
.
baseAddress
=
page_to_pfn
(
mob
->
pt_root_page
)
;
cmd
->
body
.
baseAddress
=
mob
->
pt_root_page
>>
PAGE_SHIFT
;
cmd
->
body
.
sizeInBytes
=
otable
->
size
;
cmd
->
body
.
validSizeInBytes
=
0
;
cmd
->
body
.
ptDepth
=
mob
->
pt_level
;
...
...
@@ -244,9 +250,13 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
ret
=
ttm_bo_reserve
(
dev_priv
->
otable_bo
,
false
,
true
,
false
,
false
);
BUG_ON
(
ret
!=
0
);
ret
=
vmw_bo_driver
.
ttm_tt_populate
(
dev_priv
->
otable_bo
->
ttm
);
ttm_bo_unreserve
(
dev_priv
->
otable_bo
);
if
(
unlikely
(
ret
!=
0
))
goto
out_no_setup
;
goto
out_unreserve
;
ret
=
vmw_bo_map_dma
(
dev_priv
->
otable_bo
);
if
(
unlikely
(
ret
!=
0
))
goto
out_unreserve
;
ttm_bo_unreserve
(
dev_priv
->
otable_bo
);
offset
=
0
;
for
(
i
=
0
;
i
<
SVGA_OTABLE_COUNT
;
++
i
)
{
...
...
@@ -260,6 +270,8 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
dev_priv
->
otables
=
otables
;
return
0
;
out_unreserve:
ttm_bo_unreserve
(
dev_priv
->
otable_bo
);
out_no_setup:
for
(
i
=
0
;
i
<
SVGA_OTABLE_COUNT
;
++
i
)
vmw_takedown_otable_base
(
dev_priv
,
i
,
&
otables
[
i
]);
...
...
@@ -365,9 +377,19 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
BUG_ON
(
ret
!=
0
);
ret
=
vmw_bo_driver
.
ttm_tt_populate
(
mob
->
pt_bo
->
ttm
);
ttm_bo_unreserve
(
mob
->
pt_bo
);
if
(
unlikely
(
ret
!=
0
))
ttm_bo_unref
(
&
mob
->
pt_bo
);
goto
out_unreserve
;
ret
=
vmw_bo_map_dma
(
mob
->
pt_bo
);
if
(
unlikely
(
ret
!=
0
))
goto
out_unreserve
;
ttm_bo_unreserve
(
mob
->
pt_bo
);
return
0
;
out_unreserve:
ttm_bo_unreserve
(
mob
->
pt_bo
);
ttm_bo_unref
(
&
mob
->
pt_bo
);
return
ret
;
}
...
...
@@ -376,7 +398,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
/*
* vmw_mob_build_pt - Build a pagetable
*
* @data_
pages: Array of page pointer
s to the underlying buffer
* @data_
addr: Array of DMA addresse
s to the underlying buffer
* object's data pages.
* @num_data_pages: Number of buffer object data pages.
* @pt_pages: Array of page pointers to the page table pages.
...
...
@@ -384,26 +406,31 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
* Returns the number of page table pages actually used.
* Uses atomic kmaps of highmem pages to avoid TLB thrashing.
*/
static
unsigned
long
vmw_mob_build_pt
(
struct
page
**
data_pages
,
static
unsigned
long
vmw_mob_build_pt
(
struct
vmw_piter
*
data_iter
,
unsigned
long
num_data_pages
,
struct
page
**
pt_pages
)
struct
vmw_piter
*
pt_iter
)
{
unsigned
long
pt_size
=
num_data_pages
*
VMW_PPN_SIZE
;
unsigned
long
num_pt_pages
=
DIV_ROUND_UP
(
pt_size
,
PAGE_SIZE
);
unsigned
long
pt_page
,
data_page
;
unsigned
long
pt_page
;
uint32_t
*
addr
,
*
save_addr
;
unsigned
long
i
;
struct
page
*
page
;
data_page
=
0
;
for
(
pt_page
=
0
;
pt_page
<
num_pt_pages
;
++
pt_page
)
{
save_addr
=
addr
=
kmap_atomic
(
pt_pages
[
pt_page
]);
page
=
vmw_piter_page
(
pt_iter
);
save_addr
=
addr
=
kmap_atomic
(
page
);
for
(
i
=
0
;
i
<
PAGE_SIZE
/
VMW_PPN_SIZE
;
++
i
)
{
*
addr
++
=
page_to_pfn
(
data_pages
[
data_page
++
]);
if
(
unlikely
(
data_page
>=
num_data_pages
))
u32
tmp
=
vmw_piter_dma_addr
(
data_iter
)
>>
PAGE_SHIFT
;
*
addr
++
=
tmp
;
if
(
unlikely
(
--
num_data_pages
==
0
))
break
;
WARN_ON
(
!
vmw_piter_next
(
data_iter
));
}
kunmap_atomic
(
save_addr
);
vmw_piter_next
(
pt_iter
);
}
return
num_pt_pages
;
...
...
@@ -413,38 +440,41 @@ static unsigned long vmw_mob_build_pt(struct page **data_pages,
* vmw_mob_build_pt - Set up a multilevel mob pagetable
*
* @mob: Pointer to a mob whose page table needs setting up.
* @data_
pages Array of page pointer
s to the buffer object's data
* @data_
addr Array of DMA addresse
s to the buffer object's data
* pages.
* @num_data_pages: Number of buffer object data pages.
*
* Uses tail recursion to set up a multilevel mob page table.
*/
static
void
vmw_mob_pt_setup
(
struct
vmw_mob
*
mob
,
struct
page
**
data_pages
,
struct
vmw_piter
data_iter
,
unsigned
long
num_data_pages
)
{
struct
page
**
pt_pages
;
unsigned
long
num_pt_pages
=
0
;
struct
ttm_buffer_object
*
bo
=
mob
->
pt_bo
;
struct
vmw_piter
save_pt_iter
;
struct
vmw_piter
pt_iter
;
const
struct
vmw_sg_table
*
vsgt
;
int
ret
;
ret
=
ttm_bo_reserve
(
bo
,
false
,
true
,
false
,
0
);
BUG_ON
(
ret
!=
0
);
pt_pages
=
bo
->
ttm
->
pages
;
vsgt
=
vmw_bo_sg_table
(
bo
);
vmw_piter_start
(
&
pt_iter
,
vsgt
,
0
);
BUG_ON
(
!
vmw_piter_next
(
&
pt_iter
));
mob
->
pt_level
=
0
;
while
(
likely
(
num_data_pages
>
1
))
{
++
mob
->
pt_level
;
BUG_ON
(
mob
->
pt_level
>
2
);
pt_pages
+=
num_pt_pages
;
num_pt_pages
=
vmw_mob_build_pt
(
data_pages
,
num_data_pages
,
pt_pages
);
data_pages
=
pt_pages
;
save_pt_iter
=
pt_iter
;
num_pt_pages
=
vmw_mob_build_pt
(
&
data_iter
,
num_data_pages
,
&
pt_iter
);
data_iter
=
save_pt_iter
;
num_data_pages
=
num_pt_pages
;
}
mob
->
pt_root_page
=
*
pt_pages
;
mob
->
pt_root_page
=
vmw_piter_dma_addr
(
&
save_pt_iter
)
;
ttm_bo_unreserve
(
bo
);
}
...
...
@@ -506,7 +536,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
*
* @dev_priv: Pointer to a device private.
* @mob: Pointer to the mob we're making visible.
* @data_
pages: Array of pointer
s to the data pages of the underlying
* @data_
addr: Array of DMA addresse
s to the data pages of the underlying
* buffer object.
* @num_data_pages: Number of data pages of the underlying buffer
* object.
...
...
@@ -517,27 +547,35 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
*/
int
vmw_mob_bind
(
struct
vmw_private
*
dev_priv
,
struct
vmw_mob
*
mob
,
struct
page
**
data_pages
,
const
struct
vmw_sg_table
*
vsgt
,
unsigned
long
num_data_pages
,
int32_t
mob_id
)
{
int
ret
;
bool
pt_set_up
=
false
;
struct
vmw_piter
data_iter
;
struct
{
SVGA3dCmdHeader
header
;
SVGA3dCmdDefineGBMob
body
;
}
*
cmd
;
mob
->
id
=
mob_id
;
vmw_piter_start
(
&
data_iter
,
vsgt
,
0
);
if
(
unlikely
(
!
vmw_piter_next
(
&
data_iter
)))
return
0
;
if
(
likely
(
num_data_pages
==
1
))
{
mob
->
pt_level
=
0
;
mob
->
pt_root_page
=
*
data_pages
;
mob
->
pt_level
=
SVGA3D_MOBFMT_PTDEPTH_0
;
mob
->
pt_root_page
=
vmw_piter_dma_addr
(
&
data_iter
);
}
else
if
(
vsgt
->
num_regions
==
1
)
{
mob
->
pt_level
=
SVGA3D_MOBFMT_RANGE
;
mob
->
pt_root_page
=
vmw_piter_dma_addr
(
&
data_iter
);
}
else
if
(
unlikely
(
mob
->
pt_bo
==
NULL
))
{
ret
=
vmw_mob_pt_populate
(
dev_priv
,
mob
);
if
(
unlikely
(
ret
!=
0
))
return
ret
;
vmw_mob_pt_setup
(
mob
,
data_
pages
,
num_data_pages
);
vmw_mob_pt_setup
(
mob
,
data_
iter
,
num_data_pages
);
pt_set_up
=
true
;
}
...
...
@@ -554,7 +592,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
cmd
->
header
.
size
=
sizeof
(
cmd
->
body
);
cmd
->
body
.
mobid
=
mob_id
;
cmd
->
body
.
ptDepth
=
mob
->
pt_level
;
cmd
->
body
.
base
=
page_to_pfn
(
mob
->
pt_root_page
)
;
cmd
->
body
.
base
=
mob
->
pt_root_page
>>
PAGE_SHIFT
;
cmd
->
body
.
sizeInBytes
=
num_data_pages
*
PAGE_SIZE
;
vmw_fifo_commit
(
dev_priv
,
sizeof
(
*
cmd
));
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录