Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
b571fe21
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
b571fe21
编写于
11月 16, 2010
作者:
B
Ben Skeggs
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
drm/nv50: tidy up PCIEGART implementation
Signed-off-by:
N
Ben Skeggs
<
bskeggs@redhat.com
>
上级
5f6fdca5
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
79 addition
and
95 deletion
+79
-95
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_bo.c
+3
-4
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_drv.h
+1
-3
drivers/gpu/drm/nouveau/nouveau_object.c
drivers/gpu/drm/nouveau/nouveau_object.c
+1
-3
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
+71
-76
drivers/gpu/drm/nouveau/nv50_instmem.c
drivers/gpu/drm/nouveau/nv50_instmem.c
+3
-9
未找到文件。
drivers/gpu/drm/nouveau/nouveau_bo.c
浏览文件 @
b571fe21
...
...
@@ -425,7 +425,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man
->
available_caching
=
TTM_PL_FLAG_UNCACHED
|
TTM_PL_FLAG_WC
;
man
->
default_caching
=
TTM_PL_FLAG_WC
;
man
->
gpu_offset
=
0
;
break
;
case
TTM_PL_TT
:
man
->
func
=
&
ttm_bo_manager_func
;
...
...
@@ -441,13 +440,13 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
TTM_MEMTYPE_FLAG_CMA
;
man
->
available_caching
=
TTM_PL_MASK_CACHING
;
man
->
default_caching
=
TTM_PL_FLAG_CACHED
;
man
->
gpu_offset
=
dev_priv
->
gart_info
.
aper_base
;
break
;
default:
NV_ERROR
(
dev
,
"Unknown GART type: %d
\n
"
,
dev_priv
->
gart_info
.
type
);
return
-
EINVAL
;
}
man
->
gpu_offset
=
dev_priv
->
vm_gart_base
;
break
;
default:
NV_ERROR
(
dev
,
"Unsupported memory type %u
\n
"
,
(
unsigned
)
type
);
...
...
@@ -531,12 +530,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if
(
old_mem
->
mem_type
==
TTM_PL_VRAM
)
src_offset
=
nvbo
->
vma
.
offset
;
else
src_offset
+=
dev_priv
->
vm_gart
_base
;
src_offset
+=
dev_priv
->
gart_info
.
aper
_base
;
if
(
new_mem
->
mem_type
==
TTM_PL_VRAM
)
dst_offset
=
nvbo
->
vma
.
offset
;
else
dst_offset
+=
dev_priv
->
vm_gart
_base
;
dst_offset
+=
dev_priv
->
gart_info
.
aper
_base
;
}
ret
=
RING_SPACE
(
chan
,
3
);
...
...
drivers/gpu/drm/nouveau/nouveau_drv.h
浏览文件 @
b571fe21
...
...
@@ -248,7 +248,6 @@ struct nouveau_channel {
/* NV50 VM */
struct
nouveau_vm
*
vm
;
struct
nouveau_gpuobj
*
vm_pd
;
struct
nouveau_gpuobj
*
vm_gart_pt
;
/* Objects */
struct
nouveau_gpuobj
*
ramin
;
/* Private instmem */
...
...
@@ -684,6 +683,7 @@ struct drm_nouveau_private {
uint64_t
aper_free
;
struct
nouveau_gpuobj
*
sg_ctxdma
;
struct
nouveau_vma
vma
;
}
gart_info
;
/* nv10-nv40 tiling regions */
...
...
@@ -709,8 +709,6 @@ struct drm_nouveau_private {
/* G8x/G9x virtual address space */
struct
nouveau_vm
*
chan_vm
;
uint64_t
vm_gart_base
;
uint64_t
vm_gart_size
;
struct
nvbios
vbios
;
...
...
drivers/gpu/drm/nouveau/nouveau_object.c
浏览文件 @
b571fe21
...
...
@@ -433,7 +433,7 @@ nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
flags0
|=
0x00030000
;
break
;
case
NV_MEM_TARGET_GART
:
base
+=
dev_priv
->
vm_gart
_base
;
base
+=
dev_priv
->
gart_info
.
aper
_base
;
default:
flags0
&=
~
0x00100000
;
break
;
...
...
@@ -801,7 +801,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
return
ret
;
nouveau_vm_ref
(
dev_priv
->
chan_vm
,
&
chan
->
vm
,
chan
->
vm_pd
);
chan
->
vm
->
map_pgt
(
chan
->
vm_pd
,
12
,
1
,
dev_priv
->
gart_info
.
sg_ctxdma
);
}
/* RAMHT */
...
...
@@ -889,7 +888,6 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
nouveau_vm_ref
(
NULL
,
&
chan
->
vm
,
chan
->
vm_pd
);
nouveau_gpuobj_ref
(
NULL
,
&
chan
->
vm_pd
);
nouveau_gpuobj_ref
(
NULL
,
&
chan
->
vm_gart_pt
);
if
(
chan
->
ramin_heap
.
free_stack
.
next
)
drm_mm_takedown
(
&
chan
->
ramin_heap
);
...
...
drivers/gpu/drm/nouveau/nouveau_sgdma.c
浏览文件 @
b571fe21
...
...
@@ -14,7 +14,7 @@ struct nouveau_sgdma_be {
dma_addr_t
*
pages
;
unsigned
nr_pages
;
u
nsigned
pte_star
t
;
u
64
offse
t
;
bool
bound
;
};
...
...
@@ -74,18 +74,6 @@ nouveau_sgdma_clear(struct ttm_backend *be)
}
}
static
inline
unsigned
nouveau_sgdma_pte
(
struct
drm_device
*
dev
,
uint64_t
offset
)
{
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
unsigned
pte
=
(
offset
>>
NV_CTXDMA_PAGE_SHIFT
);
if
(
dev_priv
->
card_type
<
NV_50
)
return
pte
+
2
;
return
pte
<<
1
;
}
static
int
nouveau_sgdma_bind
(
struct
ttm_backend
*
be
,
struct
ttm_mem_reg
*
mem
)
{
...
...
@@ -97,32 +85,17 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
NV_DEBUG
(
dev
,
"pg=0x%lx
\n
"
,
mem
->
start
);
pte
=
nouveau_sgdma_pte
(
nvbe
->
dev
,
mem
->
start
<<
PAGE_SHIFT
)
;
nvbe
->
pte_start
=
pte
;
nvbe
->
offset
=
mem
->
start
<<
PAGE_SHIFT
;
pte
=
(
nvbe
->
offset
>>
NV_CTXDMA_PAGE_SHIFT
)
+
2
;
for
(
i
=
0
;
i
<
nvbe
->
nr_pages
;
i
++
)
{
dma_addr_t
dma_offset
=
nvbe
->
pages
[
i
];
uint32_t
offset_l
=
lower_32_bits
(
dma_offset
);
uint32_t
offset_h
=
upper_32_bits
(
dma_offset
);
for
(
j
=
0
;
j
<
PAGE_SIZE
/
NV_CTXDMA_PAGE_SIZE
;
j
++
)
{
if
(
dev_priv
->
card_type
<
NV_50
)
{
nv_wo32
(
gpuobj
,
(
pte
*
4
)
+
0
,
offset_l
|
3
);
pte
+=
1
;
}
else
{
nv_wo32
(
gpuobj
,
(
pte
*
4
)
+
0
,
offset_l
|
0x21
);
nv_wo32
(
gpuobj
,
(
pte
*
4
)
+
4
,
offset_h
&
0xff
);
pte
+=
2
;
}
for
(
j
=
0
;
j
<
PAGE_SIZE
/
NV_CTXDMA_PAGE_SIZE
;
j
++
,
pte
++
)
{
nv_wo32
(
gpuobj
,
(
pte
*
4
)
+
0
,
offset_l
|
3
);
dma_offset
+=
NV_CTXDMA_PAGE_SIZE
;
}
}
dev_priv
->
engine
.
instmem
.
flush
(
nvbe
->
dev
);
if
(
dev_priv
->
card_type
==
NV_50
)
{
dev_priv
->
engine
.
fifo
.
tlb_flush
(
dev
);
dev_priv
->
engine
.
graph
.
tlb_flush
(
dev
);
}
nvbe
->
bound
=
true
;
return
0
;
...
...
@@ -142,24 +115,10 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
if
(
!
nvbe
->
bound
)
return
0
;
pte
=
nvbe
->
pte_start
;
pte
=
(
nvbe
->
offset
>>
NV_CTXDMA_PAGE_SHIFT
)
+
2
;
for
(
i
=
0
;
i
<
nvbe
->
nr_pages
;
i
++
)
{
for
(
j
=
0
;
j
<
PAGE_SIZE
/
NV_CTXDMA_PAGE_SIZE
;
j
++
)
{
if
(
dev_priv
->
card_type
<
NV_50
)
{
nv_wo32
(
gpuobj
,
(
pte
*
4
)
+
0
,
0x00000000
);
pte
+=
1
;
}
else
{
nv_wo32
(
gpuobj
,
(
pte
*
4
)
+
0
,
0x00000000
);
nv_wo32
(
gpuobj
,
(
pte
*
4
)
+
4
,
0x00000000
);
pte
+=
2
;
}
}
}
dev_priv
->
engine
.
instmem
.
flush
(
nvbe
->
dev
);
if
(
dev_priv
->
card_type
==
NV_50
)
{
dev_priv
->
engine
.
fifo
.
tlb_flush
(
dev
);
dev_priv
->
engine
.
graph
.
tlb_flush
(
dev
);
for
(
j
=
0
;
j
<
PAGE_SIZE
/
NV_CTXDMA_PAGE_SIZE
;
j
++
,
pte
++
)
nv_wo32
(
gpuobj
,
(
pte
*
4
)
+
0
,
0x00000000
);
}
nvbe
->
bound
=
false
;
...
...
@@ -182,6 +141,35 @@ nouveau_sgdma_destroy(struct ttm_backend *be)
}
}
static
int
nv50_sgdma_bind
(
struct
ttm_backend
*
be
,
struct
ttm_mem_reg
*
mem
)
{
struct
nouveau_sgdma_be
*
nvbe
=
(
struct
nouveau_sgdma_be
*
)
be
;
struct
drm_nouveau_private
*
dev_priv
=
nvbe
->
dev
->
dev_private
;
nvbe
->
offset
=
mem
->
start
<<
PAGE_SHIFT
;
nouveau_vm_map_sg
(
&
dev_priv
->
gart_info
.
vma
,
nvbe
->
offset
,
nvbe
->
nr_pages
<<
PAGE_SHIFT
,
nvbe
->
pages
);
nvbe
->
bound
=
true
;
return
0
;
}
static
int
nv50_sgdma_unbind
(
struct
ttm_backend
*
be
)
{
struct
nouveau_sgdma_be
*
nvbe
=
(
struct
nouveau_sgdma_be
*
)
be
;
struct
drm_nouveau_private
*
dev_priv
=
nvbe
->
dev
->
dev_private
;
if
(
!
nvbe
->
bound
)
return
0
;
nouveau_vm_unmap_at
(
&
dev_priv
->
gart_info
.
vma
,
nvbe
->
offset
,
nvbe
->
nr_pages
<<
PAGE_SHIFT
);
nvbe
->
bound
=
false
;
return
0
;
}
static
struct
ttm_backend_func
nouveau_sgdma_backend
=
{
.
populate
=
nouveau_sgdma_populate
,
.
clear
=
nouveau_sgdma_clear
,
...
...
@@ -190,23 +178,30 @@ static struct ttm_backend_func nouveau_sgdma_backend = {
.
destroy
=
nouveau_sgdma_destroy
};
static
struct
ttm_backend_func
nv50_sgdma_backend
=
{
.
populate
=
nouveau_sgdma_populate
,
.
clear
=
nouveau_sgdma_clear
,
.
bind
=
nv50_sgdma_bind
,
.
unbind
=
nv50_sgdma_unbind
,
.
destroy
=
nouveau_sgdma_destroy
};
struct
ttm_backend
*
nouveau_sgdma_init_ttm
(
struct
drm_device
*
dev
)
{
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
struct
nouveau_sgdma_be
*
nvbe
;
if
(
!
dev_priv
->
gart_info
.
sg_ctxdma
)
return
NULL
;
nvbe
=
kzalloc
(
sizeof
(
*
nvbe
),
GFP_KERNEL
);
if
(
!
nvbe
)
return
NULL
;
nvbe
->
dev
=
dev
;
nvbe
->
backend
.
func
=
&
nouveau_sgdma_backend
;
if
(
dev_priv
->
card_type
<
NV_50
)
nvbe
->
backend
.
func
=
&
nouveau_sgdma_backend
;
else
nvbe
->
backend
.
func
=
&
nv50_sgdma_backend
;
return
&
nvbe
->
backend
;
}
...
...
@@ -226,21 +221,15 @@ nouveau_sgdma_init(struct drm_device *dev)
obj_size
=
(
aper_size
>>
NV_CTXDMA_PAGE_SHIFT
)
*
4
;
obj_size
+=
8
;
/* ctxdma header */
}
else
{
/* 1 entire VM page table */
aper_size
=
(
512
*
1024
*
1024
);
obj_size
=
(
aper_size
>>
NV_CTXDMA_PAGE_SHIFT
)
*
8
;
}
ret
=
nouveau_gpuobj_new
(
dev
,
NULL
,
obj_size
,
16
,
NVOBJ_FLAG_ZERO_ALLOC
|
NVOBJ_FLAG_ZERO_FREE
,
&
gpuobj
);
if
(
ret
)
{
NV_ERROR
(
dev
,
"Error creating sgdma object: %d
\n
"
,
ret
);
return
ret
;
}
ret
=
nouveau_gpuobj_new
(
dev
,
NULL
,
obj_size
,
16
,
NVOBJ_FLAG_ZERO_ALLOC
|
NVOBJ_FLAG_ZERO_FREE
,
&
gpuobj
);
if
(
ret
)
{
NV_ERROR
(
dev
,
"Error creating sgdma object: %d
\n
"
,
ret
);
return
ret
;
}
if
(
dev_priv
->
card_type
<
NV_50
)
{
nv_wo32
(
gpuobj
,
0
,
NV_CLASS_DMA_IN_MEMORY
|
(
1
<<
12
)
/* PT present */
|
(
0
<<
13
)
/* PT *not* linear */
|
...
...
@@ -249,18 +238,23 @@ nouveau_sgdma_init(struct drm_device *dev)
nv_wo32
(
gpuobj
,
4
,
aper_size
-
1
);
for
(
i
=
2
;
i
<
2
+
(
aper_size
>>
12
);
i
++
)
nv_wo32
(
gpuobj
,
i
*
4
,
0x00000000
);
}
else
{
for
(
i
=
0
;
i
<
obj_size
;
i
+=
8
)
{
nv_wo32
(
gpuobj
,
i
+
0
,
0x00000000
);
nv_wo32
(
gpuobj
,
i
+
4
,
0x00000000
);
}
dev_priv
->
gart_info
.
sg_ctxdma
=
gpuobj
;
dev_priv
->
gart_info
.
aper_base
=
0
;
dev_priv
->
gart_info
.
aper_size
=
aper_size
;
}
else
if
(
dev_priv
->
chan_vm
)
{
ret
=
nouveau_vm_get
(
dev_priv
->
chan_vm
,
512
*
1024
*
1024
,
12
,
NV_MEM_ACCESS_RW
,
&
dev_priv
->
gart_info
.
vma
);
if
(
ret
)
return
ret
;
dev_priv
->
gart_info
.
aper_base
=
dev_priv
->
gart_info
.
vma
.
offset
;
dev_priv
->
gart_info
.
aper_size
=
512
*
1024
*
1024
;
}
dev_priv
->
engine
.
instmem
.
flush
(
dev
);
dev_priv
->
gart_info
.
type
=
NOUVEAU_GART_SGDMA
;
dev_priv
->
gart_info
.
aper_base
=
0
;
dev_priv
->
gart_info
.
aper_size
=
aper_size
;
dev_priv
->
gart_info
.
sg_ctxdma
=
gpuobj
;
return
0
;
}
...
...
@@ -270,6 +264,7 @@ nouveau_sgdma_takedown(struct drm_device *dev)
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
nouveau_gpuobj_ref
(
NULL
,
&
dev_priv
->
gart_info
.
sg_ctxdma
);
nouveau_vm_put
(
&
dev_priv
->
gart_info
.
vma
);
}
int
...
...
drivers/gpu/drm/nouveau/nv50_instmem.c
浏览文件 @
b571fe21
...
...
@@ -131,7 +131,6 @@ nv50_instmem_init(struct drm_device *dev)
struct
nouveau_channel
*
chan
;
struct
nouveau_vm
*
vm
;
int
ret
,
i
;
u64
nongart_o
;
u32
tmp
;
priv
=
kzalloc
(
sizeof
(
*
priv
),
GFP_KERNEL
);
...
...
@@ -216,15 +215,10 @@ nv50_instmem_init(struct drm_device *dev)
for
(
i
=
0
;
i
<
8
;
i
++
)
nv_wr32
(
dev
,
0x1900
+
(
i
*
4
),
0
);
/* Create shared channel VM, space is reserved for GART mappings at
* the beginning of this address space, it's managed separately
* because TTM makes life painful
/* Create shared channel VM, space is reserved at the beginning
* to catch "NULL pointer" references
*/
dev_priv
->
vm_gart_base
=
0x0020000000ULL
;
dev_priv
->
vm_gart_size
=
512
*
1024
*
1024
;
nongart_o
=
dev_priv
->
vm_gart_base
+
dev_priv
->
vm_gart_size
;
ret
=
nouveau_vm_new
(
dev
,
0
,
(
1ULL
<<
40
),
nongart_o
,
ret
=
nouveau_vm_new
(
dev
,
0
,
(
1ULL
<<
40
),
0x0020000000ULL
,
29
,
12
,
16
,
&
dev_priv
->
chan_vm
);
if
(
ret
)
return
ret
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录