Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
605f9ccd
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
161
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
605f9ccd
编写于
5月 17, 2016
作者:
B
Ben Skeggs
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
drm/nouveau: s/mem/reg/ for struct ttm_mem_reg variables
Signed-off-by:
N
Ben Skeggs
<
bskeggs@redhat.com
>
上级
1167c6bc
变更
5
展开全部
隐藏空白更改
内联
并排
Showing
5 changed file
with
169 addition
and
169 deletion
+169
-169
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_bo.c
+133
-133
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
+6
-6
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/nouveau/nouveau_ttm.c
+24
-24
drivers/gpu/drm/nouveau/nv17_fence.c
drivers/gpu/drm/nouveau/nv17_fence.c
+3
-3
drivers/gpu/drm/nouveau/nv50_fence.c
drivers/gpu/drm/nouveau/nv50_fence.c
+3
-3
未找到文件。
drivers/gpu/drm/nouveau/nouveau_bo.c
浏览文件 @
605f9ccd
此差异已折叠。
点击以展开。
drivers/gpu/drm/nouveau/nouveau_sgdma.c
浏览文件 @
605f9ccd
...
...
@@ -24,10 +24,10 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm)
}
static
int
nv04_sgdma_bind
(
struct
ttm_tt
*
ttm
,
struct
ttm_mem_reg
*
mem
)
nv04_sgdma_bind
(
struct
ttm_tt
*
ttm
,
struct
ttm_mem_reg
*
reg
)
{
struct
nouveau_sgdma_be
*
nvbe
=
(
struct
nouveau_sgdma_be
*
)
ttm
;
struct
nvkm_mem
*
node
=
mem
->
mm_node
;
struct
nvkm_mem
*
node
=
reg
->
mm_node
;
if
(
ttm
->
sg
)
{
node
->
sg
=
ttm
->
sg
;
...
...
@@ -36,7 +36,7 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
node
->
sg
=
NULL
;
node
->
pages
=
nvbe
->
ttm
.
dma_address
;
}
node
->
size
=
(
mem
->
num_pages
<<
PAGE_SHIFT
)
>>
12
;
node
->
size
=
(
reg
->
num_pages
<<
PAGE_SHIFT
)
>>
12
;
nvkm_vm_map
(
&
node
->
vma
[
0
],
node
);
nvbe
->
node
=
node
;
...
...
@@ -58,10 +58,10 @@ static struct ttm_backend_func nv04_sgdma_backend = {
};
static
int
nv50_sgdma_bind
(
struct
ttm_tt
*
ttm
,
struct
ttm_mem_reg
*
mem
)
nv50_sgdma_bind
(
struct
ttm_tt
*
ttm
,
struct
ttm_mem_reg
*
reg
)
{
struct
nouveau_sgdma_be
*
nvbe
=
(
struct
nouveau_sgdma_be
*
)
ttm
;
struct
nvkm_mem
*
node
=
mem
->
mm_node
;
struct
nvkm_mem
*
node
=
reg
->
mm_node
;
/* noop: bound in move_notify() */
if
(
ttm
->
sg
)
{
...
...
@@ -71,7 +71,7 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
node
->
sg
=
NULL
;
node
->
pages
=
nvbe
->
ttm
.
dma_address
;
}
node
->
size
=
(
mem
->
num_pages
<<
PAGE_SHIFT
)
>>
12
;
node
->
size
=
(
reg
->
num_pages
<<
PAGE_SHIFT
)
>>
12
;
return
0
;
}
...
...
drivers/gpu/drm/nouveau/nouveau_ttm.c
浏览文件 @
605f9ccd
...
...
@@ -64,19 +64,19 @@ nvkm_mem_node_cleanup(struct nvkm_mem *node)
static
void
nouveau_vram_manager_del
(
struct
ttm_mem_type_manager
*
man
,
struct
ttm_mem_reg
*
mem
)
struct
ttm_mem_reg
*
reg
)
{
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
man
->
bdev
);
struct
nvkm_ram
*
ram
=
nvxx_fb
(
&
drm
->
client
.
device
)
->
ram
;
nvkm_mem_node_cleanup
(
mem
->
mm_node
);
ram
->
func
->
put
(
ram
,
(
struct
nvkm_mem
**
)
&
mem
->
mm_node
);
nvkm_mem_node_cleanup
(
reg
->
mm_node
);
ram
->
func
->
put
(
ram
,
(
struct
nvkm_mem
**
)
&
reg
->
mm_node
);
}
static
int
nouveau_vram_manager_new
(
struct
ttm_mem_type_manager
*
man
,
struct
ttm_buffer_object
*
bo
,
const
struct
ttm_place
*
place
,
struct
ttm_mem_reg
*
mem
)
struct
ttm_mem_reg
*
reg
)
{
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
man
->
bdev
);
struct
nvkm_ram
*
ram
=
nvxx_fb
(
&
drm
->
client
.
device
)
->
ram
;
...
...
@@ -91,18 +91,18 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
if
(
nvbo
->
tile_flags
&
NOUVEAU_GEM_TILE_NONCONTIG
)
size_nc
=
1
<<
nvbo
->
page_shift
;
ret
=
ram
->
func
->
get
(
ram
,
mem
->
num_pages
<<
PAGE_SHIFT
,
mem
->
page_alignment
<<
PAGE_SHIFT
,
size_nc
,
ret
=
ram
->
func
->
get
(
ram
,
reg
->
num_pages
<<
PAGE_SHIFT
,
reg
->
page_alignment
<<
PAGE_SHIFT
,
size_nc
,
(
nvbo
->
tile_flags
>>
8
)
&
0x3ff
,
&
node
);
if
(
ret
)
{
mem
->
mm_node
=
NULL
;
reg
->
mm_node
=
NULL
;
return
(
ret
==
-
ENOSPC
)
?
0
:
ret
;
}
node
->
page_shift
=
nvbo
->
page_shift
;
mem
->
mm_node
=
node
;
mem
->
start
=
node
->
offset
>>
PAGE_SHIFT
;
reg
->
mm_node
=
node
;
reg
->
start
=
node
->
offset
>>
PAGE_SHIFT
;
return
0
;
}
...
...
@@ -127,18 +127,18 @@ nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
static
void
nouveau_gart_manager_del
(
struct
ttm_mem_type_manager
*
man
,
struct
ttm_mem_reg
*
mem
)
struct
ttm_mem_reg
*
reg
)
{
nvkm_mem_node_cleanup
(
mem
->
mm_node
);
kfree
(
mem
->
mm_node
);
mem
->
mm_node
=
NULL
;
nvkm_mem_node_cleanup
(
reg
->
mm_node
);
kfree
(
reg
->
mm_node
);
reg
->
mm_node
=
NULL
;
}
static
int
nouveau_gart_manager_new
(
struct
ttm_mem_type_manager
*
man
,
struct
ttm_buffer_object
*
bo
,
const
struct
ttm_place
*
place
,
struct
ttm_mem_reg
*
mem
)
struct
ttm_mem_reg
*
reg
)
{
struct
nouveau_drm
*
drm
=
nouveau_bdev
(
bo
->
bdev
);
struct
nouveau_bo
*
nvbo
=
nouveau_bo
(
bo
);
...
...
@@ -173,8 +173,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
break
;
}
mem
->
mm_node
=
node
;
mem
->
start
=
0
;
reg
->
mm_node
=
node
;
reg
->
start
=
0
;
return
0
;
}
...
...
@@ -215,20 +215,20 @@ nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
}
static
void
nv04_gart_manager_del
(
struct
ttm_mem_type_manager
*
man
,
struct
ttm_mem_reg
*
mem
)
nv04_gart_manager_del
(
struct
ttm_mem_type_manager
*
man
,
struct
ttm_mem_reg
*
reg
)
{
struct
nvkm_mem
*
node
=
mem
->
mm_node
;
struct
nvkm_mem
*
node
=
reg
->
mm_node
;
if
(
node
->
vma
[
0
].
node
)
nvkm_vm_put
(
&
node
->
vma
[
0
]);
kfree
(
mem
->
mm_node
);
mem
->
mm_node
=
NULL
;
kfree
(
reg
->
mm_node
);
reg
->
mm_node
=
NULL
;
}
static
int
nv04_gart_manager_new
(
struct
ttm_mem_type_manager
*
man
,
struct
ttm_buffer_object
*
bo
,
const
struct
ttm_place
*
place
,
struct
ttm_mem_reg
*
mem
)
struct
ttm_mem_reg
*
reg
)
{
struct
nvkm_mem
*
node
;
int
ret
;
...
...
@@ -239,15 +239,15 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
node
->
page_shift
=
12
;
ret
=
nvkm_vm_get
(
man
->
priv
,
mem
->
num_pages
<<
12
,
node
->
page_shift
,
ret
=
nvkm_vm_get
(
man
->
priv
,
reg
->
num_pages
<<
12
,
node
->
page_shift
,
NV_MEM_ACCESS_RW
,
&
node
->
vma
[
0
]);
if
(
ret
)
{
kfree
(
node
);
return
ret
;
}
mem
->
mm_node
=
node
;
mem
->
start
=
node
->
vma
[
0
].
offset
>>
PAGE_SHIFT
;
reg
->
mm_node
=
node
;
reg
->
start
=
node
->
vma
[
0
].
offset
>>
PAGE_SHIFT
;
return
0
;
}
...
...
drivers/gpu/drm/nouveau/nv17_fence.c
浏览文件 @
605f9ccd
...
...
@@ -76,9 +76,9 @@ nv17_fence_context_new(struct nouveau_channel *chan)
{
struct
nv10_fence_priv
*
priv
=
chan
->
drm
->
fence
;
struct
nv10_fence_chan
*
fctx
;
struct
ttm_mem_reg
*
mem
=
&
priv
->
bo
->
bo
.
mem
;
u32
start
=
mem
->
start
*
PAGE_SIZE
;
u32
limit
=
start
+
mem
->
size
-
1
;
struct
ttm_mem_reg
*
reg
=
&
priv
->
bo
->
bo
.
mem
;
u32
start
=
reg
->
start
*
PAGE_SIZE
;
u32
limit
=
start
+
reg
->
size
-
1
;
int
ret
=
0
;
fctx
=
chan
->
fence
=
kzalloc
(
sizeof
(
*
fctx
),
GFP_KERNEL
);
...
...
drivers/gpu/drm/nouveau/nv50_fence.c
浏览文件 @
605f9ccd
...
...
@@ -37,9 +37,9 @@ nv50_fence_context_new(struct nouveau_channel *chan)
{
struct
nv10_fence_priv
*
priv
=
chan
->
drm
->
fence
;
struct
nv10_fence_chan
*
fctx
;
struct
ttm_mem_reg
*
mem
=
&
priv
->
bo
->
bo
.
mem
;
u32
start
=
mem
->
start
*
PAGE_SIZE
;
u32
limit
=
start
+
mem
->
size
-
1
;
struct
ttm_mem_reg
*
reg
=
&
priv
->
bo
->
bo
.
mem
;
u32
start
=
reg
->
start
*
PAGE_SIZE
;
u32
limit
=
start
+
reg
->
size
-
1
;
int
ret
;
fctx
=
chan
->
fence
=
kzalloc
(
sizeof
(
*
fctx
),
GFP_KERNEL
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录