Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
03b0ba7b
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
03b0ba7b
编写于
11月 01, 2017
作者:
B
Ben Skeggs
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
drm/nouveau/mmu/nv44: implement vmm on top of new base
Signed-off-by:
N
Ben Skeggs
<
bskeggs@redhat.com
>
上级
77783435
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
96 addition
and
91 deletion
+96
-91
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+3
-0
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+1
-0
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+1
-4
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
+0
-27
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
+0
-17
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
+19
-42
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+0
-1
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+5
-0
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+2
-0
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
+65
-0
未找到文件。
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
浏览文件 @
03b0ba7b
...
...
@@ -48,6 +48,9 @@ struct nvkm_vm {
bool
bootstrapped
;
atomic_t
engref
[
NVKM_SUBDEV_NR
];
dma_addr_t
null
;
void
*
nullp
;
};
int
nvkm_vm_new
(
struct
nvkm_device
*
,
u64
offset
,
u64
length
,
u64
mm_offset
,
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
浏览文件 @
03b0ba7b
...
...
@@ -15,3 +15,4 @@ nvkm-y += nvkm/subdev/mmu/gp10b.o
nvkm-y += nvkm/subdev/mmu/vmm.o
nvkm-y += nvkm/subdev/mmu/vmmnv04.o
nvkm-y += nvkm/subdev/mmu/vmmnv41.o
nvkm-y += nvkm/subdev/mmu/vmmnv44.o
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
浏览文件 @
03b0ba7b
...
...
@@ -786,14 +786,11 @@ static void *
nvkm_mmu_dtor
(
struct
nvkm_subdev
*
subdev
)
{
struct
nvkm_mmu
*
mmu
=
nvkm_mmu
(
subdev
);
void
*
data
=
mmu
;
if
(
mmu
->
func
->
dtor
)
data
=
mmu
->
func
->
dtor
(
mmu
);
nvkm_vm_ref
(
NULL
,
&
mmu
->
vmm
,
NULL
);
nvkm_mmu_ptc_fini
(
mmu
);
return
data
;
return
mmu
;
}
static
const
struct
nvkm_subdev_func
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
浏览文件 @
03b0ba7b
...
...
@@ -21,7 +21,6 @@
*
* Authors: Ben Skeggs
*/
#include "nv04.h"
#include "vmm.h"
#include <nvif/class.h>
...
...
@@ -81,32 +80,6 @@ nv04_mmu_oneinit(struct nvkm_mmu *mmu)
return
0
;
}
void
*
nv04_mmu_dtor
(
struct
nvkm_mmu
*
base
)
{
struct
nv04_mmu
*
mmu
=
nv04_mmu
(
base
);
struct
nvkm_device
*
device
=
mmu
->
base
.
subdev
.
device
;
if
(
mmu
->
base
.
vmm
)
nvkm_memory_unref
(
&
mmu
->
base
.
vmm
->
pgt
[
0
].
mem
[
0
]);
if
(
mmu
->
nullp
)
{
dma_free_coherent
(
device
->
dev
,
16
*
1024
,
mmu
->
nullp
,
mmu
->
null
);
}
return
mmu
;
}
int
nv04_mmu_new_
(
const
struct
nvkm_mmu_func
*
func
,
struct
nvkm_device
*
device
,
int
index
,
struct
nvkm_mmu
**
pmmu
)
{
struct
nv04_mmu
*
mmu
;
if
(
!
(
mmu
=
kzalloc
(
sizeof
(
*
mmu
),
GFP_KERNEL
)))
return
-
ENOMEM
;
*
pmmu
=
&
mmu
->
base
;
nvkm_mmu_ctor
(
func
,
device
,
index
,
&
mmu
->
base
);
return
0
;
}
const
struct
nvkm_mmu_func
nv04_mmu
=
{
.
oneinit
=
nv04_mmu_oneinit
,
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
已删除
100644 → 0
浏览文件 @
77783435
#ifndef __NV04_MMU_PRIV__
#define __NV04_MMU_PRIV__
#define nv04_mmu(p) container_of((p), struct nv04_mmu, base)
#include "priv.h"
struct
nv04_mmu
{
struct
nvkm_mmu
base
;
dma_addr_t
null
;
void
*
nullp
;
};
int
nv04_mmu_new_
(
const
struct
nvkm_mmu_func
*
,
struct
nvkm_device
*
,
int
index
,
struct
nvkm_mmu
**
);
void
*
nv04_mmu_dtor
(
struct
nvkm_mmu
*
);
extern
const
struct
nvkm_mmu_func
nv04_mmu
;
#endif
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
浏览文件 @
03b0ba7b
...
...
@@ -21,12 +21,13 @@
*
* Authors: Ben Skeggs
*/
#include "
nv04
.h"
#include "
vmm
.h"
#include <core/gpuobj.h>
#include <core/option.h>
#include <subdev/timer.h>
#include <nvif/class.h>
#define NV44_GART_SIZE (512 * 1024 * 1024)
#define NV44_GART_PAGE ( 4 * 1024)
...
...
@@ -84,7 +85,6 @@ static void
nv44_vm_map_sg
(
struct
nvkm_vma
*
vma
,
struct
nvkm_memory
*
pgt
,
struct
nvkm_mem
*
mem
,
u32
pte
,
u32
cnt
,
dma_addr_t
*
list
)
{
struct
nv04_mmu
*
mmu
=
nv04_mmu
(
vma
->
vm
->
mmu
);
u32
tmp
[
4
];
int
i
;
...
...
@@ -92,7 +92,7 @@ nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
if
(
pte
&
3
)
{
u32
max
=
4
-
(
pte
&
3
);
u32
part
=
(
cnt
>
max
)
?
max
:
cnt
;
nv44_vm_fill
(
pgt
,
mmu
->
null
,
list
,
pte
,
part
);
nv44_vm_fill
(
pgt
,
vma
->
vm
->
null
,
list
,
pte
,
part
);
pte
+=
part
;
list
+=
part
;
cnt
-=
part
;
...
...
@@ -109,20 +109,18 @@ nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
}
if
(
cnt
)
nv44_vm_fill
(
pgt
,
mmu
->
null
,
list
,
pte
,
cnt
);
nv44_vm_fill
(
pgt
,
vma
->
vm
->
null
,
list
,
pte
,
cnt
);
nvkm_done
(
pgt
);
}
static
void
nv44_vm_unmap
(
struct
nvkm_vma
*
vma
,
struct
nvkm_memory
*
pgt
,
u32
pte
,
u32
cnt
)
{
struct
nv04_mmu
*
mmu
=
nv04_mmu
(
vma
->
vm
->
mmu
);
nvkm_kmap
(
pgt
);
if
(
pte
&
3
)
{
u32
max
=
4
-
(
pte
&
3
);
u32
part
=
(
cnt
>
max
)
?
max
:
cnt
;
nv44_vm_fill
(
pgt
,
mmu
->
null
,
NULL
,
pte
,
part
);
nv44_vm_fill
(
pgt
,
vma
->
vm
->
null
,
NULL
,
pte
,
part
);
pte
+=
part
;
cnt
-=
part
;
}
...
...
@@ -136,16 +134,15 @@ nv44_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
}
if
(
cnt
)
nv44_vm_fill
(
pgt
,
mmu
->
null
,
NULL
,
pte
,
cnt
);
nv44_vm_fill
(
pgt
,
vma
->
vm
->
null
,
NULL
,
pte
,
cnt
);
nvkm_done
(
pgt
);
}
static
void
nv44_vm_flush
(
struct
nvkm_vm
*
vm
)
{
struct
nv04_mmu
*
mmu
=
nv04_mmu
(
vm
->
mmu
);
struct
nvkm_device
*
device
=
mmu
->
base
.
subdev
.
device
;
nvkm_wr32
(
device
,
0x100814
,
mmu
->
base
.
limit
-
NV44_GART_PAGE
);
struct
nvkm_device
*
device
=
vm
->
mmu
->
subdev
.
device
;
nvkm_wr32
(
device
,
0x100814
,
vm
->
mmu
->
limit
-
NV44_GART_PAGE
);
nvkm_wr32
(
device
,
0x100808
,
0x00000020
);
nvkm_msec
(
device
,
2000
,
if
(
nvkm_rd32
(
device
,
0x100808
)
&
0x00000001
)
...
...
@@ -159,38 +156,18 @@ nv44_vm_flush(struct nvkm_vm *vm)
******************************************************************************/
static
int
nv44_mmu_oneinit
(
struct
nvkm_mmu
*
base
)
nv44_mmu_oneinit
(
struct
nvkm_mmu
*
mmu
)
{
struct
nv04_mmu
*
mmu
=
nv04_mmu
(
base
);
struct
nvkm_device
*
device
=
mmu
->
base
.
subdev
.
device
;
int
ret
;
mmu
->
nullp
=
dma_alloc_coherent
(
device
->
dev
,
16
*
1024
,
&
mmu
->
null
,
GFP_KERNEL
);
if
(
!
mmu
->
nullp
)
{
nvkm_warn
(
&
mmu
->
base
.
subdev
,
"unable to allocate dummy pages
\n
"
);
mmu
->
null
=
0
;
}
ret
=
nvkm_vm_create
(
&
mmu
->
base
,
0
,
NV44_GART_SIZE
,
0
,
4096
,
NULL
,
&
mmu
->
base
.
vmm
);
if
(
ret
)
return
ret
;
ret
=
nvkm_memory_new
(
device
,
NVKM_MEM_TARGET_INST
,
(
NV44_GART_SIZE
/
NV44_GART_PAGE
)
*
4
,
512
*
1024
,
true
,
&
mmu
->
base
.
vmm
->
pgt
[
0
].
mem
[
0
]);
mmu
->
base
.
vmm
->
pgt
[
0
].
refcount
[
0
]
=
1
;
return
ret
;
mmu
->
vmm
->
pgt
[
0
].
mem
[
0
]
=
mmu
->
vmm
->
pd
->
pt
[
0
]
->
memory
;
mmu
->
vmm
->
pgt
[
0
].
refcount
[
0
]
=
1
;
return
0
;
}
static
void
nv44_mmu_init
(
struct
nvkm_mmu
*
base
)
nv44_mmu_init
(
struct
nvkm_mmu
*
mmu
)
{
struct
nv04_mmu
*
mmu
=
nv04_mmu
(
base
);
struct
nvkm_device
*
device
=
mmu
->
base
.
subdev
.
device
;
struct
nvkm_memory
*
gart
=
mmu
->
base
.
vmm
->
pgt
[
0
].
mem
[
0
];
struct
nvkm_device
*
device
=
mmu
->
subdev
.
device
;
struct
nvkm_memory
*
gart
=
mmu
->
vmm
->
pgt
[
0
].
mem
[
0
];
u32
addr
;
/* calculate vram address of this PRAMIN block, object must be
...
...
@@ -201,7 +178,7 @@ nv44_mmu_init(struct nvkm_mmu *base)
addr
-=
((
nvkm_memory_addr
(
gart
)
>>
19
)
+
1
)
<<
19
;
nvkm_wr32
(
device
,
0x100850
,
0x80000000
);
nvkm_wr32
(
device
,
0x100818
,
mmu
->
null
);
nvkm_wr32
(
device
,
0x100818
,
mmu
->
vmm
->
null
);
nvkm_wr32
(
device
,
0x100804
,
NV44_GART_SIZE
);
nvkm_wr32
(
device
,
0x100850
,
0x00008000
);
nvkm_mask
(
device
,
0x10008c
,
0x00000200
,
0x00000200
);
...
...
@@ -212,7 +189,6 @@ nv44_mmu_init(struct nvkm_mmu *base)
static
const
struct
nvkm_mmu_func
nv44_mmu
=
{
.
dtor
=
nv04_mmu_dtor
,
.
oneinit
=
nv44_mmu_oneinit
,
.
init
=
nv44_mmu_init
,
.
limit
=
NV44_GART_SIZE
,
...
...
@@ -223,6 +199,7 @@ nv44_mmu = {
.
map_sg
=
nv44_vm_map_sg
,
.
unmap
=
nv44_vm_unmap
,
.
flush
=
nv44_vm_flush
,
.
vmm
=
{{
-
1
,
-
1
,
NVIF_CLASS_VMM_NV04
},
nv44_vmm_new
,
true
},
};
int
...
...
@@ -232,5 +209,5 @@ nv44_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
!
nvkm_boolopt
(
device
->
cfgopt
,
"NvPCIE"
,
true
))
return
nv04_mmu_new
(
device
,
index
,
pmmu
);
return
nv
04
_mmu_new_
(
&
nv44_mmu
,
device
,
index
,
pmmu
);
return
nv
km
_mmu_new_
(
&
nv44_mmu
,
device
,
index
,
pmmu
);
}
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
浏览文件 @
03b0ba7b
...
...
@@ -9,7 +9,6 @@ int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
int
index
,
struct
nvkm_mmu
**
);
struct
nvkm_mmu_func
{
void
*
(
*
dtor
)(
struct
nvkm_mmu
*
);
int
(
*
oneinit
)(
struct
nvkm_mmu
*
);
void
(
*
init
)(
struct
nvkm_mmu
*
);
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
浏览文件 @
03b0ba7b
...
...
@@ -70,6 +70,11 @@ nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse,
void
nvkm_vmm_dtor
(
struct
nvkm_vmm
*
vmm
)
{
if
(
vmm
->
nullp
)
{
dma_free_coherent
(
vmm
->
mmu
->
subdev
.
device
->
dev
,
16
*
1024
,
vmm
->
nullp
,
vmm
->
null
);
}
if
(
vmm
->
pd
)
{
nvkm_mmu_ptc_put
(
vmm
->
mmu
,
true
,
&
vmm
->
pd
->
pt
[
0
]);
nvkm_vmm_pt_del
(
&
vmm
->
pd
);
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
浏览文件 @
03b0ba7b
...
...
@@ -110,4 +110,6 @@ int nv04_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
struct
lock_class_key
*
,
const
char
*
,
struct
nvkm_vmm
**
);
int
nv41_vmm_new
(
struct
nvkm_mmu
*
,
u64
,
u64
,
void
*
,
u32
,
struct
lock_class_key
*
,
const
char
*
,
struct
nvkm_vmm
**
);
int
nv44_vmm_new
(
struct
nvkm_mmu
*
,
u64
,
u64
,
void
*
,
u32
,
struct
lock_class_key
*
,
const
char
*
,
struct
nvkm_vmm
**
);
#endif
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
0 → 100644
浏览文件 @
03b0ba7b
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vmm.h"
static
const
struct
nvkm_vmm_desc_func
nv44_vmm_desc_pgt
=
{
};
static
const
struct
nvkm_vmm_desc
nv44_vmm_desc_12
[]
=
{
{
PGT
,
17
,
4
,
0x80000
,
&
nv44_vmm_desc_pgt
},
{}
};
static
const
struct
nvkm_vmm_func
nv44_vmm
=
{
.
page
=
{
{
12
,
&
nv44_vmm_desc_12
[
0
],
NVKM_VMM_PAGE_HOST
},
{}
}
};
int
nv44_vmm_new
(
struct
nvkm_mmu
*
mmu
,
u64
addr
,
u64
size
,
void
*
argv
,
u32
argc
,
struct
lock_class_key
*
key
,
const
char
*
name
,
struct
nvkm_vmm
**
pvmm
)
{
struct
nvkm_subdev
*
subdev
=
&
mmu
->
subdev
;
struct
nvkm_vmm
*
vmm
;
int
ret
;
ret
=
nv04_vmm_new_
(
&
nv44_vmm
,
mmu
,
0
,
addr
,
size
,
argv
,
argc
,
key
,
name
,
&
vmm
);
*
pvmm
=
vmm
;
if
(
ret
)
return
ret
;
vmm
->
nullp
=
dma_alloc_coherent
(
subdev
->
device
->
dev
,
16
*
1024
,
&
vmm
->
null
,
GFP_KERNEL
);
if
(
!
vmm
->
nullp
)
{
nvkm_warn
(
subdev
,
"unable to allocate dummy pages
\n
"
);
vmm
->
null
=
0
;
}
return
0
;
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录