Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
920d2b5e
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
920d2b5e
编写于
11月 01, 2017
作者:
B
Ben Skeggs
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
drm/nouveau/mmu: define user interfaces to mmu vmm opertaions
Signed-off-by:
N
Ben Skeggs
<
bskeggs@redhat.com
>
上级
c83c4097
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
647 addition
and
0 deletion
+647
-0
drivers/gpu/drm/nouveau/include/nvif/if000c.h
drivers/gpu/drm/nouveau/include/nvif/if000c.h
+61
-0
drivers/gpu/drm/nouveau/include/nvif/vmm.h
drivers/gpu/drm/nouveau/include/nvif/vmm.h
+42
-0
drivers/gpu/drm/nouveau/nvif/Kbuild
drivers/gpu/drm/nouveau/nvif/Kbuild
+1
-0
drivers/gpu/drm/nouveau/nvif/vmm.c
drivers/gpu/drm/nouveau/nvif/vmm.c
+167
-0
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+1
-0
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
+9
-0
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+352
-0
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h
+14
-0
未找到文件。
drivers/gpu/drm/nouveau/include/nvif/if000c.h
浏览文件 @
920d2b5e
#ifndef __NVIF_IF000C_H__
#define __NVIF_IF000C_H__
struct
nvif_vmm_v0
{
__u8
version
;
__u8
page_nr
;
__u8
pad02
[
6
];
__u64
addr
;
__u64
size
;
__u8
data
[];
};
#define NVIF_VMM_V0_PAGE 0x00
#define NVIF_VMM_V0_GET 0x01
#define NVIF_VMM_V0_PUT 0x02
#define NVIF_VMM_V0_MAP 0x03
#define NVIF_VMM_V0_UNMAP 0x04
struct
nvif_vmm_page_v0
{
__u8
version
;
__u8
index
;
__u8
shift
;
__u8
sparse
;
__u8
vram
;
__u8
host
;
__u8
comp
;
__u8
pad07
[
1
];
};
struct
nvif_vmm_get_v0
{
__u8
version
;
#define NVIF_VMM_GET_V0_ADDR 0x00
#define NVIF_VMM_GET_V0_PTES 0x01
#define NVIF_VMM_GET_V0_LAZY 0x02
__u8
type
;
__u8
sparse
;
__u8
page
;
__u8
align
;
__u8
pad05
[
3
];
__u64
size
;
__u64
addr
;
};
struct
nvif_vmm_put_v0
{
__u8
version
;
__u8
pad01
[
7
];
__u64
addr
;
};
struct
nvif_vmm_map_v0
{
__u8
version
;
__u8
pad01
[
7
];
__u64
addr
;
__u64
size
;
__u64
memory
;
__u64
offset
;
__u8
data
[];
};
struct
nvif_vmm_unmap_v0
{
__u8
version
;
__u8
pad01
[
7
];
__u64
addr
;
};
#endif
drivers/gpu/drm/nouveau/include/nvif/vmm.h
0 → 100644
浏览文件 @
920d2b5e
#ifndef __NVIF_VMM_H__
#define __NVIF_VMM_H__
#include <nvif/object.h>
struct
nvif_mem
;
struct
nvif_mmu
;
enum
nvif_vmm_get
{
ADDR
,
PTES
,
LAZY
};
struct
nvif_vma
{
u64
addr
;
u64
size
;
};
struct
nvif_vmm
{
struct
nvif_object
object
;
u64
start
;
u64
limit
;
struct
{
u8
shift
;
bool
sparse
:
1
;
bool
vram
:
1
;
bool
host
:
1
;
bool
comp
:
1
;
}
*
page
;
int
page_nr
;
};
int
nvif_vmm_init
(
struct
nvif_mmu
*
,
s32
oclass
,
u64
addr
,
u64
size
,
void
*
argv
,
u32
argc
,
struct
nvif_vmm
*
);
void
nvif_vmm_fini
(
struct
nvif_vmm
*
);
int
nvif_vmm_get
(
struct
nvif_vmm
*
,
enum
nvif_vmm_get
,
bool
sparse
,
u8
page
,
u8
align
,
u64
size
,
struct
nvif_vma
*
);
void
nvif_vmm_put
(
struct
nvif_vmm
*
,
struct
nvif_vma
*
);
int
nvif_vmm_map
(
struct
nvif_vmm
*
,
u64
addr
,
u64
size
,
void
*
argv
,
u32
argc
,
struct
nvif_mem
*
,
u64
offset
);
int
nvif_vmm_unmap
(
struct
nvif_vmm
*
,
u64
);
#endif
drivers/gpu/drm/nouveau/nvif/Kbuild
浏览文件 @
920d2b5e
...
...
@@ -5,3 +5,4 @@ nvif-y += nvif/driver.o
nvif-y += nvif/mem.o
nvif-y += nvif/mmu.o
nvif-y += nvif/notify.o
nvif-y += nvif/vmm.o
drivers/gpu/drm/nouveau/nvif/vmm.c
0 → 100644
浏览文件 @
920d2b5e
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/vmm.h>
#include <nvif/mem.h>
#include <nvif/if000c.h>
int
nvif_vmm_unmap
(
struct
nvif_vmm
*
vmm
,
u64
addr
)
{
return
nvif_object_mthd
(
&
vmm
->
object
,
NVIF_VMM_V0_UNMAP
,
&
(
struct
nvif_vmm_unmap_v0
)
{
.
addr
=
addr
},
sizeof
(
struct
nvif_vmm_unmap_v0
));
}
int
nvif_vmm_map
(
struct
nvif_vmm
*
vmm
,
u64
addr
,
u64
size
,
void
*
argv
,
u32
argc
,
struct
nvif_mem
*
mem
,
u64
offset
)
{
struct
nvif_vmm_map_v0
*
args
;
u8
stack
[
16
];
int
ret
;
if
(
sizeof
(
*
args
)
+
argc
>
sizeof
(
stack
))
{
if
(
!
(
args
=
kmalloc
(
sizeof
(
*
args
)
+
argc
,
GFP_KERNEL
)))
return
-
ENOMEM
;
}
else
{
args
=
(
void
*
)
stack
;
}
args
->
version
=
0
;
args
->
addr
=
addr
;
args
->
size
=
size
;
args
->
memory
=
nvif_handle
(
&
mem
->
object
);
args
->
offset
=
offset
;
memcpy
(
args
->
data
,
argv
,
argc
);
ret
=
nvif_object_mthd
(
&
vmm
->
object
,
NVIF_VMM_V0_MAP
,
args
,
sizeof
(
*
args
)
+
argc
);
if
(
args
!=
(
void
*
)
stack
)
kfree
(
args
);
return
ret
;
}
void
nvif_vmm_put
(
struct
nvif_vmm
*
vmm
,
struct
nvif_vma
*
vma
)
{
if
(
vma
->
size
)
{
WARN_ON
(
nvif_object_mthd
(
&
vmm
->
object
,
NVIF_VMM_V0_PUT
,
&
(
struct
nvif_vmm_put_v0
)
{
.
addr
=
vma
->
addr
,
},
sizeof
(
struct
nvif_vmm_put_v0
)));
vma
->
size
=
0
;
}
}
int
nvif_vmm_get
(
struct
nvif_vmm
*
vmm
,
enum
nvif_vmm_get
type
,
bool
sparse
,
u8
page
,
u8
align
,
u64
size
,
struct
nvif_vma
*
vma
)
{
struct
nvif_vmm_get_v0
args
;
int
ret
;
args
.
version
=
vma
->
size
=
0
;
args
.
sparse
=
sparse
;
args
.
page
=
page
;
args
.
align
=
align
;
args
.
size
=
size
;
switch
(
type
)
{
case
ADDR
:
args
.
type
=
NVIF_VMM_GET_V0_ADDR
;
break
;
case
PTES
:
args
.
type
=
NVIF_VMM_GET_V0_PTES
;
break
;
case
LAZY
:
args
.
type
=
NVIF_VMM_GET_V0_LAZY
;
break
;
default:
WARN_ON
(
1
);
return
-
EINVAL
;
}
ret
=
nvif_object_mthd
(
&
vmm
->
object
,
NVIF_VMM_V0_GET
,
&
args
,
sizeof
(
args
));
if
(
ret
==
0
)
{
vma
->
addr
=
args
.
addr
;
vma
->
size
=
args
.
size
;
}
return
ret
;
}
void
nvif_vmm_fini
(
struct
nvif_vmm
*
vmm
)
{
kfree
(
vmm
->
page
);
nvif_object_fini
(
&
vmm
->
object
);
}
int
nvif_vmm_init
(
struct
nvif_mmu
*
mmu
,
s32
oclass
,
u64
addr
,
u64
size
,
void
*
argv
,
u32
argc
,
struct
nvif_vmm
*
vmm
)
{
struct
nvif_vmm_v0
*
args
;
u32
argn
=
sizeof
(
*
args
)
+
argc
;
int
ret
=
-
ENOSYS
,
i
;
vmm
->
object
.
client
=
NULL
;
vmm
->
page
=
NULL
;
if
(
!
(
args
=
kmalloc
(
argn
,
GFP_KERNEL
)))
return
-
ENOMEM
;
args
->
version
=
0
;
args
->
addr
=
addr
;
args
->
size
=
size
;
memcpy
(
args
->
data
,
argv
,
argc
);
ret
=
nvif_object_init
(
&
mmu
->
object
,
0
,
oclass
,
args
,
argn
,
&
vmm
->
object
);
if
(
ret
)
goto
done
;
vmm
->
start
=
args
->
addr
;
vmm
->
limit
=
args
->
size
;
vmm
->
page_nr
=
args
->
page_nr
;
vmm
->
page
=
kmalloc
(
sizeof
(
*
vmm
->
page
)
*
vmm
->
page_nr
,
GFP_KERNEL
);
if
(
!
vmm
->
page
)
{
ret
=
-
ENOMEM
;
goto
done
;
}
for
(
i
=
0
;
i
<
vmm
->
page_nr
;
i
++
)
{
struct
nvif_vmm_page_v0
args
=
{
.
index
=
i
};
ret
=
nvif_object_mthd
(
&
vmm
->
object
,
NVIF_VMM_V0_PAGE
,
&
args
,
sizeof
(
args
));
if
(
ret
)
break
;
vmm
->
page
[
i
].
shift
=
args
.
shift
;
vmm
->
page
[
i
].
sparse
=
args
.
sparse
;
vmm
->
page
[
i
].
vram
=
args
.
vram
;
vmm
->
page
[
i
].
host
=
args
.
host
;
vmm
->
page
[
i
].
comp
=
args
.
comp
;
}
done:
if
(
ret
)
nvif_vmm_fini
(
vmm
);
kfree
(
args
);
return
ret
;
}
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
浏览文件 @
920d2b5e
...
...
@@ -32,3 +32,4 @@ nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
nvkm-y += nvkm/subdev/mmu/umem.o
nvkm-y += nvkm/subdev/mmu/ummu.o
nvkm-y += nvkm/subdev/mmu/uvmm.o
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
浏览文件 @
920d2b5e
...
...
@@ -21,6 +21,7 @@
*/
#include "ummu.h"
#include "umem.h"
#include "uvmm.h"
#include <core/client.h>
...
...
@@ -41,6 +42,14 @@ nvkm_ummu_sclass(struct nvkm_object *object, int index,
}
}
if
(
mmu
->
func
->
vmm
.
user
.
oclass
)
{
if
(
index
--
==
0
)
{
oclass
->
base
=
mmu
->
func
->
vmm
.
user
;
oclass
->
ctor
=
nvkm_uvmm_new
;
return
0
;
}
}
return
-
EINVAL
;
}
...
...
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
0 → 100644
浏览文件 @
920d2b5e
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "uvmm.h"
#include "umem.h"
#include "ummu.h"
#include <core/client.h>
#include <core/memory.h>
#include <nvif/if000c.h>
#include <nvif/unpack.h>
static
const
struct
nvkm_object_func
nvkm_uvmm
;
struct
nvkm_vmm
*
nvkm_uvmm_search
(
struct
nvkm_client
*
client
,
u64
handle
)
{
struct
nvkm_object
*
object
;
object
=
nvkm_object_search
(
client
,
handle
,
&
nvkm_uvmm
);
if
(
IS_ERR
(
object
))
return
(
void
*
)
object
;
return
nvkm_uvmm
(
object
)
->
vmm
;
}
static
int
nvkm_uvmm_mthd_unmap
(
struct
nvkm_uvmm
*
uvmm
,
void
*
argv
,
u32
argc
)
{
struct
nvkm_client
*
client
=
uvmm
->
object
.
client
;
union
{
struct
nvif_vmm_unmap_v0
v0
;
}
*
args
=
argv
;
struct
nvkm_vmm
*
vmm
=
uvmm
->
vmm
;
struct
nvkm_vma
*
vma
;
int
ret
=
-
ENOSYS
;
u64
addr
;
if
(
!
(
ret
=
nvif_unpack
(
ret
,
&
argv
,
&
argc
,
args
->
v0
,
0
,
0
,
false
)))
{
addr
=
args
->
v0
.
addr
;
}
else
return
ret
;
mutex_lock
(
&
vmm
->
mutex
);
vma
=
nvkm_vmm_node_search
(
vmm
,
addr
);
if
(
ret
=
-
ENOENT
,
!
vma
||
vma
->
addr
!=
addr
)
{
VMM_DEBUG
(
vmm
,
"lookup %016llx: %016llx"
,
addr
,
vma
?
vma
->
addr
:
~
0ULL
);
goto
done
;
}
if
(
ret
=
-
ENOENT
,
(
!
vma
->
user
&&
!
client
->
super
)
||
vma
->
busy
)
{
VMM_DEBUG
(
vmm
,
"denied %016llx: %d %d %d"
,
addr
,
vma
->
user
,
!
client
->
super
,
vma
->
busy
);
goto
done
;
}
if
(
ret
=
-
EINVAL
,
!
vma
->
memory
)
{
VMM_DEBUG
(
vmm
,
"unmapped"
);
goto
done
;
}
nvkm_vmm_unmap_locked
(
vmm
,
vma
);
ret
=
0
;
done:
mutex_unlock
(
&
vmm
->
mutex
);
return
ret
;
}
static
int
nvkm_uvmm_mthd_map
(
struct
nvkm_uvmm
*
uvmm
,
void
*
argv
,
u32
argc
)
{
struct
nvkm_client
*
client
=
uvmm
->
object
.
client
;
union
{
struct
nvif_vmm_map_v0
v0
;
}
*
args
=
argv
;
u64
addr
,
size
,
handle
,
offset
;
struct
nvkm_vmm
*
vmm
=
uvmm
->
vmm
;
struct
nvkm_vma
*
vma
;
struct
nvkm_memory
*
memory
;
int
ret
=
-
ENOSYS
;
if
(
!
(
ret
=
nvif_unpack
(
ret
,
&
argv
,
&
argc
,
args
->
v0
,
0
,
0
,
true
)))
{
addr
=
args
->
v0
.
addr
;
size
=
args
->
v0
.
size
;
handle
=
args
->
v0
.
memory
;
offset
=
args
->
v0
.
offset
;
}
else
return
ret
;
if
(
IS_ERR
((
memory
=
nvkm_umem_search
(
client
,
handle
))))
{
VMM_DEBUG
(
vmm
,
"memory %016llx %ld
\n
"
,
handle
,
PTR_ERR
(
memory
));
return
PTR_ERR
(
memory
);
}
mutex_lock
(
&
vmm
->
mutex
);
if
(
ret
=
-
ENOENT
,
!
(
vma
=
nvkm_vmm_node_search
(
vmm
,
addr
)))
{
VMM_DEBUG
(
vmm
,
"lookup %016llx"
,
addr
);
goto
fail
;
}
if
(
ret
=
-
ENOENT
,
(
!
vma
->
user
&&
!
client
->
super
)
||
vma
->
busy
)
{
VMM_DEBUG
(
vmm
,
"denied %016llx: %d %d %d"
,
addr
,
vma
->
user
,
!
client
->
super
,
vma
->
busy
);
goto
fail
;
}
if
(
ret
=
-
EINVAL
,
vma
->
addr
!=
addr
||
vma
->
size
!=
size
)
{
if
(
addr
+
size
>
vma
->
addr
+
vma
->
size
||
vma
->
memory
||
(
vma
->
refd
==
NVKM_VMA_PAGE_NONE
&&
!
vma
->
mapref
))
{
VMM_DEBUG
(
vmm
,
"split %d %d %d "
"%016llx %016llx %016llx %016llx"
,
!!
vma
->
memory
,
vma
->
refd
,
vma
->
mapref
,
addr
,
size
,
vma
->
addr
,
(
u64
)
vma
->
size
);
goto
fail
;
}
if
(
vma
->
addr
!=
addr
)
{
const
u64
tail
=
vma
->
size
+
vma
->
addr
-
addr
;
if
(
ret
=
-
ENOMEM
,
!
(
vma
=
nvkm_vma_tail
(
vma
,
tail
)))
goto
fail
;
vma
->
part
=
true
;
nvkm_vmm_node_insert
(
vmm
,
vma
);
}
if
(
vma
->
size
!=
size
)
{
const
u64
tail
=
vma
->
size
-
size
;
struct
nvkm_vma
*
tmp
;
if
(
ret
=
-
ENOMEM
,
!
(
tmp
=
nvkm_vma_tail
(
vma
,
tail
)))
{
nvkm_vmm_unmap_region
(
vmm
,
vma
);
goto
fail
;
}
tmp
->
part
=
true
;
nvkm_vmm_node_insert
(
vmm
,
tmp
);
}
}
vma
->
busy
=
true
;
mutex_unlock
(
&
vmm
->
mutex
);
ret
=
nvkm_memory_map
(
memory
,
offset
,
vmm
,
vma
,
argv
,
argc
);
if
(
ret
==
0
)
{
/* Successful map will clear vma->busy. */
nvkm_memory_unref
(
&
memory
);
return
0
;
}
mutex_lock
(
&
vmm
->
mutex
);
vma
->
busy
=
false
;
nvkm_vmm_unmap_region
(
vmm
,
vma
);
fail:
mutex_unlock
(
&
vmm
->
mutex
);
nvkm_memory_unref
(
&
memory
);
return
ret
;
}
static
int
nvkm_uvmm_mthd_put
(
struct
nvkm_uvmm
*
uvmm
,
void
*
argv
,
u32
argc
)
{
struct
nvkm_client
*
client
=
uvmm
->
object
.
client
;
union
{
struct
nvif_vmm_put_v0
v0
;
}
*
args
=
argv
;
struct
nvkm_vmm
*
vmm
=
uvmm
->
vmm
;
struct
nvkm_vma
*
vma
;
int
ret
=
-
ENOSYS
;
u64
addr
;
if
(
!
(
ret
=
nvif_unpack
(
ret
,
&
argv
,
&
argc
,
args
->
v0
,
0
,
0
,
false
)))
{
addr
=
args
->
v0
.
addr
;
}
else
return
ret
;
mutex_lock
(
&
vmm
->
mutex
);
vma
=
nvkm_vmm_node_search
(
vmm
,
args
->
v0
.
addr
);
if
(
ret
=
-
ENOENT
,
!
vma
||
vma
->
addr
!=
addr
||
vma
->
part
)
{
VMM_DEBUG
(
vmm
,
"lookup %016llx: %016llx %d"
,
addr
,
vma
?
vma
->
addr
:
~
0ULL
,
vma
?
vma
->
part
:
0
);
goto
done
;
}
if
(
ret
=
-
ENOENT
,
(
!
vma
->
user
&&
!
client
->
super
)
||
vma
->
busy
)
{
VMM_DEBUG
(
vmm
,
"denied %016llx: %d %d %d"
,
addr
,
vma
->
user
,
!
client
->
super
,
vma
->
busy
);
goto
done
;
}
nvkm_vmm_put_locked
(
vmm
,
vma
);
ret
=
0
;
done:
mutex_unlock
(
&
vmm
->
mutex
);
return
ret
;
}
static
int
nvkm_uvmm_mthd_get
(
struct
nvkm_uvmm
*
uvmm
,
void
*
argv
,
u32
argc
)
{
struct
nvkm_client
*
client
=
uvmm
->
object
.
client
;
union
{
struct
nvif_vmm_get_v0
v0
;
}
*
args
=
argv
;
struct
nvkm_vmm
*
vmm
=
uvmm
->
vmm
;
struct
nvkm_vma
*
vma
;
int
ret
=
-
ENOSYS
;
bool
getref
,
mapref
,
sparse
;
u8
page
,
align
;
u64
size
;
if
(
!
(
ret
=
nvif_unpack
(
ret
,
&
argv
,
&
argc
,
args
->
v0
,
0
,
0
,
false
)))
{
getref
=
args
->
v0
.
type
==
NVIF_VMM_GET_V0_PTES
;
mapref
=
args
->
v0
.
type
==
NVIF_VMM_GET_V0_ADDR
;
sparse
=
args
->
v0
.
sparse
;
page
=
args
->
v0
.
page
;
align
=
args
->
v0
.
align
;
size
=
args
->
v0
.
size
;
}
else
return
ret
;
mutex_lock
(
&
vmm
->
mutex
);
ret
=
nvkm_vmm_get_locked
(
vmm
,
getref
,
mapref
,
sparse
,
page
,
align
,
size
,
&
vma
);
mutex_unlock
(
&
vmm
->
mutex
);
if
(
ret
)
return
ret
;
args
->
v0
.
addr
=
vma
->
addr
;
vma
->
user
=
!
client
->
super
;
return
ret
;
}
static
int
nvkm_uvmm_mthd_page
(
struct
nvkm_uvmm
*
uvmm
,
void
*
argv
,
u32
argc
)
{
union
{
struct
nvif_vmm_page_v0
v0
;
}
*
args
=
argv
;
const
struct
nvkm_vmm_page
*
page
;
int
ret
=
-
ENOSYS
;
u8
type
,
index
,
nr
;
page
=
uvmm
->
vmm
->
func
->
page
;
for
(
nr
=
0
;
page
[
nr
].
shift
;
nr
++
);
if
(
!
(
ret
=
nvif_unpack
(
ret
,
&
argv
,
&
argc
,
args
->
v0
,
0
,
0
,
false
)))
{
if
((
index
=
args
->
v0
.
index
)
>=
nr
)
return
-
EINVAL
;
type
=
page
[
index
].
type
;
args
->
v0
.
shift
=
page
[
index
].
shift
;
args
->
v0
.
sparse
=
!!
(
type
&
NVKM_VMM_PAGE_SPARSE
);
args
->
v0
.
vram
=
!!
(
type
&
NVKM_VMM_PAGE_VRAM
);
args
->
v0
.
host
=
!!
(
type
&
NVKM_VMM_PAGE_HOST
);
args
->
v0
.
comp
=
!!
(
type
&
NVKM_VMM_PAGE_COMP
);
}
else
return
-
ENOSYS
;
return
0
;
}
static
int
nvkm_uvmm_mthd
(
struct
nvkm_object
*
object
,
u32
mthd
,
void
*
argv
,
u32
argc
)
{
struct
nvkm_uvmm
*
uvmm
=
nvkm_uvmm
(
object
);
switch
(
mthd
)
{
case
NVIF_VMM_V0_PAGE
:
return
nvkm_uvmm_mthd_page
(
uvmm
,
argv
,
argc
);
case
NVIF_VMM_V0_GET
:
return
nvkm_uvmm_mthd_get
(
uvmm
,
argv
,
argc
);
case
NVIF_VMM_V0_PUT
:
return
nvkm_uvmm_mthd_put
(
uvmm
,
argv
,
argc
);
case
NVIF_VMM_V0_MAP
:
return
nvkm_uvmm_mthd_map
(
uvmm
,
argv
,
argc
);
case
NVIF_VMM_V0_UNMAP
:
return
nvkm_uvmm_mthd_unmap
(
uvmm
,
argv
,
argc
);
default:
break
;
}
return
-
EINVAL
;
}
static
void
*
nvkm_uvmm_dtor
(
struct
nvkm_object
*
object
)
{
struct
nvkm_uvmm
*
uvmm
=
nvkm_uvmm
(
object
);
nvkm_vmm_unref
(
&
uvmm
->
vmm
);
return
uvmm
;
}
static
const
struct
nvkm_object_func
nvkm_uvmm
=
{
.
dtor
=
nvkm_uvmm_dtor
,
.
mthd
=
nvkm_uvmm_mthd
,
};
int
nvkm_uvmm_new
(
const
struct
nvkm_oclass
*
oclass
,
void
*
argv
,
u32
argc
,
struct
nvkm_object
**
pobject
)
{
struct
nvkm_mmu
*
mmu
=
nvkm_ummu
(
oclass
->
parent
)
->
mmu
;
const
bool
more
=
oclass
->
base
.
maxver
>=
0
;
union
{
struct
nvif_vmm_v0
v0
;
}
*
args
=
argv
;
const
struct
nvkm_vmm_page
*
page
;
struct
nvkm_uvmm
*
uvmm
;
int
ret
=
-
ENOSYS
;
u64
addr
,
size
;
if
(
!
(
ret
=
nvif_unpack
(
ret
,
&
argv
,
&
argc
,
args
->
v0
,
0
,
0
,
more
)))
{
addr
=
args
->
v0
.
addr
;
size
=
args
->
v0
.
size
;
}
else
return
ret
;
if
(
!
(
uvmm
=
kzalloc
(
sizeof
(
*
uvmm
),
GFP_KERNEL
)))
return
-
ENOMEM
;
nvkm_object_ctor
(
&
nvkm_uvmm
,
oclass
,
&
uvmm
->
object
);
*
pobject
=
&
uvmm
->
object
;
if
(
!
mmu
->
vmm
)
{
ret
=
mmu
->
func
->
vmm
.
ctor
(
mmu
,
addr
,
size
,
argv
,
argc
,
NULL
,
"user"
,
&
uvmm
->
vmm
);
if
(
ret
)
return
ret
;
uvmm
->
vmm
->
debug
=
max
(
uvmm
->
vmm
->
debug
,
oclass
->
client
->
debug
);
}
else
{
if
(
size
)
return
-
EINVAL
;
uvmm
->
vmm
=
nvkm_vmm_ref
(
mmu
->
vmm
);
}
page
=
uvmm
->
vmm
->
func
->
page
;
args
->
v0
.
page_nr
=
0
;
while
(
page
&&
(
page
++
)
->
shift
)
args
->
v0
.
page_nr
++
;
args
->
v0
.
addr
=
uvmm
->
vmm
->
start
;
args
->
v0
.
size
=
uvmm
->
vmm
->
limit
;
return
0
;
}
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h
0 → 100644
浏览文件 @
920d2b5e
#ifndef __NVKM_UVMM_H__
#define __NVKM_UVMM_H__
#define nvkm_uvmm(p) container_of((p), struct nvkm_uvmm, object)
#include <core/object.h>
#include "vmm.h"
struct
nvkm_uvmm
{
struct
nvkm_object
object
;
struct
nvkm_vmm
*
vmm
;
};
int
nvkm_uvmm_new
(
const
struct
nvkm_oclass
*
,
void
*
argv
,
u32
argc
,
struct
nvkm_object
**
);
#endif
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录