Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
a11c3198
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a11c3198
编写于
8月 27, 2010
作者:
B
Ben Skeggs
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
drm/nv50: import new vm code
Signed-off-by:
N
Ben Skeggs
<
bskeggs@redhat.com
>
上级
573a2a37
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
714 addition
and
22 deletion
+714
-22
drivers/gpu/drm/nouveau/Makefile
drivers/gpu/drm/nouveau/Makefile
+3
-2
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_drv.h
+5
-4
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_mem.c
+3
-2
drivers/gpu/drm/nouveau/nouveau_object.c
drivers/gpu/drm/nouveau/nouveau_object.c
+1
-1
drivers/gpu/drm/nouveau/nouveau_vm.c
drivers/gpu/drm/nouveau/nouveau_vm.c
+421
-0
drivers/gpu/drm/nouveau/nouveau_vm.h
drivers/gpu/drm/nouveau/nouveau_vm.h
+107
-0
drivers/gpu/drm/nouveau/nv50_fifo.c
drivers/gpu/drm/nouveau/nv50_fifo.c
+2
-1
drivers/gpu/drm/nouveau/nv50_graph.c
drivers/gpu/drm/nouveau/nv50_graph.c
+3
-2
drivers/gpu/drm/nouveau/nv50_instmem.c
drivers/gpu/drm/nouveau/nv50_instmem.c
+3
-9
drivers/gpu/drm/nouveau/nv50_vm.c
drivers/gpu/drm/nouveau/nv50_vm.c
+164
-0
drivers/gpu/drm/nouveau/nv84_crypt.c
drivers/gpu/drm/nouveau/nv84_crypt.c
+2
-1
未找到文件。
drivers/gpu/drm/nouveau/Makefile
浏览文件 @
a11c3198
...
...
@@ -9,8 +9,9 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o
\
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o
\
nouveau_display.o nouveau_connector.o nouveau_fbcon.o
\
nouveau_dp.o nouveau_ramht.o
nouveau_mm.o
\
nouveau_dp.o nouveau_ramht.o
\
nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o
\
nouveau_mm.o nouveau_vm.o
\
nv04_timer.o
\
nv04_mc.o nv40_mc.o nv50_mc.o
\
nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o
\
...
...
@@ -27,7 +28,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv10_gpio.o nv50_gpio.o
\
nv50_calc.o
\
nv04_pm.o nv50_pm.o nva3_pm.o
\
nv50_vram.o
nv50_vram.o
nv50_vm.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG)
+=
nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT)
+=
nouveau_ioc32.o
...
...
drivers/gpu/drm/nouveau/nouveau_drv.h
浏览文件 @
a11c3198
...
...
@@ -153,6 +153,7 @@ enum nouveau_flags {
#define NVOBJ_ENGINE_DISPLAY 0xcafe0001
#define NVOBJ_ENGINE_INT 0xdeadbeef
#define NVOBJ_FLAG_DONT_MAP (1 << 0)
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
...
...
@@ -1213,7 +1214,6 @@ extern int nv50_instmem_map(struct nouveau_gpuobj *);
extern
void
nv50_instmem_unmap
(
struct
nouveau_gpuobj
*
);
extern
void
nv50_instmem_flush
(
struct
drm_device
*
);
extern
void
nv84_instmem_flush
(
struct
drm_device
*
);
extern
void
nv50_vm_flush
(
struct
drm_device
*
,
int
engine
);
/* nvc0_instmem.c */
extern
int
nvc0_instmem_init
(
struct
drm_device
*
);
...
...
@@ -1564,10 +1564,11 @@ nv_match_device(struct drm_device *dev, unsigned device,
}
/* memory type/access flags, do not match hardware values */
#define NV_MEM_ACCESS_RO 1
#define NV_MEM_ACCESS_WO 2
#define NV_MEM_ACCESS_RO
1
#define NV_MEM_ACCESS_WO
2
#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
#define NV_MEM_ACCESS_VM 4
#define NV_MEM_ACCESS_SYS 4
#define NV_MEM_ACCESS_VM 8
#define NV_MEM_TARGET_VRAM 0
#define NV_MEM_TARGET_PCI 1
...
...
drivers/gpu/drm/nouveau/nouveau_mem.c
浏览文件 @
a11c3198
...
...
@@ -37,6 +37,7 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
#include "nouveau_mm.h"
#include "nouveau_vm.h"
/*
* NV10-NV40 tiling helpers
...
...
@@ -201,7 +202,7 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
dev_priv
->
engine
.
instmem
.
flush
(
dev
);
dev_priv
->
engine
.
fifo
.
tlb_flush
(
dev
);
dev_priv
->
engine
.
graph
.
tlb_flush
(
dev
);
nv50_vm_flush
(
dev
,
6
);
nv50_vm_flush
_engine
(
dev
,
6
);
return
0
;
}
...
...
@@ -234,7 +235,7 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
dev_priv
->
engine
.
instmem
.
flush
(
dev
);
dev_priv
->
engine
.
fifo
.
tlb_flush
(
dev
);
dev_priv
->
engine
.
graph
.
tlb_flush
(
dev
);
nv50_vm_flush
(
dev
,
6
);
nv50_vm_flush
_engine
(
dev
,
6
);
}
/*
...
...
drivers/gpu/drm/nouveau/nouveau_object.c
浏览文件 @
a11c3198
...
...
@@ -213,7 +213,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
}
ret
=
-
ENOSYS
;
if
(
dev_priv
->
ramin_available
)
if
(
!
(
flags
&
NVOBJ_FLAG_DONT_MAP
)
)
ret
=
instmem
->
map
(
gpuobj
);
if
(
ret
)
gpuobj
->
pinst
=
~
0
;
...
...
drivers/gpu/drm/nouveau/nouveau_vm.c
0 → 100644
浏览文件 @
a11c3198
/*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_mm.h"
#include "nouveau_vm.h"
void
nouveau_vm_map_at
(
struct
nouveau_vma
*
vma
,
u64
delta
,
struct
nouveau_vram
*
vram
)
{
struct
nouveau_vm
*
vm
=
vma
->
vm
;
struct
nouveau_mm_node
*
r
;
u32
offset
=
vma
->
node
->
offset
+
(
delta
>>
12
);
u32
bits
=
vma
->
node
->
type
-
12
;
u32
pde
=
(
offset
>>
vm
->
pgt_bits
)
-
vm
->
fpde
;
u32
pte
=
(
offset
&
((
1
<<
vm
->
pgt_bits
)
-
1
))
>>
bits
;
u32
max
=
1
<<
(
vm
->
pgt_bits
-
bits
);
u32
end
,
len
;
list_for_each_entry
(
r
,
&
vram
->
regions
,
rl_entry
)
{
u64
phys
=
(
u64
)
r
->
offset
<<
12
;
u32
num
=
r
->
length
>>
bits
;
while
(
num
)
{
struct
nouveau_gpuobj
*
pgt
=
vm
->
pgt
[
pde
].
obj
;
end
=
(
pte
+
num
);
if
(
unlikely
(
end
>=
max
))
end
=
max
;
len
=
end
-
pte
;
vm
->
map
(
vma
,
pgt
,
vram
,
pte
,
len
,
phys
);
num
-=
len
;
pte
+=
len
;
if
(
unlikely
(
end
>=
max
))
{
pde
++
;
pte
=
0
;
}
}
}
vm
->
flush
(
vm
);
}
void
nouveau_vm_map
(
struct
nouveau_vma
*
vma
,
struct
nouveau_vram
*
vram
)
{
nouveau_vm_map_at
(
vma
,
0
,
vram
);
}
void
nouveau_vm_map_sg
(
struct
nouveau_vma
*
vma
,
u64
delta
,
u64
length
,
dma_addr_t
*
list
)
{
struct
nouveau_vm
*
vm
=
vma
->
vm
;
u32
offset
=
vma
->
node
->
offset
+
(
delta
>>
12
);
u32
bits
=
vma
->
node
->
type
-
12
;
u32
num
=
length
>>
vma
->
node
->
type
;
u32
pde
=
(
offset
>>
vm
->
pgt_bits
)
-
vm
->
fpde
;
u32
pte
=
(
offset
&
((
1
<<
vm
->
pgt_bits
)
-
1
))
>>
bits
;
u32
max
=
1
<<
(
vm
->
pgt_bits
-
bits
);
u32
end
,
len
;
while
(
num
)
{
struct
nouveau_gpuobj
*
pgt
=
vm
->
pgt
[
pde
].
obj
;
end
=
(
pte
+
num
);
if
(
unlikely
(
end
>=
max
))
end
=
max
;
len
=
end
-
pte
;
vm
->
map_sg
(
vma
,
pgt
,
pte
,
list
,
len
);
num
-=
len
;
pte
+=
len
;
list
+=
len
;
if
(
unlikely
(
end
>=
max
))
{
pde
++
;
pte
=
0
;
}
}
vm
->
flush
(
vm
);
}
void
nouveau_vm_unmap_at
(
struct
nouveau_vma
*
vma
,
u64
delta
,
u64
length
)
{
struct
nouveau_vm
*
vm
=
vma
->
vm
;
u32
offset
=
vma
->
node
->
offset
+
(
delta
>>
12
);
u32
bits
=
vma
->
node
->
type
-
12
;
u32
num
=
length
>>
vma
->
node
->
type
;
u32
pde
=
(
offset
>>
vm
->
pgt_bits
)
-
vm
->
fpde
;
u32
pte
=
(
offset
&
((
1
<<
vm
->
pgt_bits
)
-
1
))
>>
bits
;
u32
max
=
1
<<
(
vm
->
pgt_bits
-
bits
);
u32
end
,
len
;
while
(
num
)
{
struct
nouveau_gpuobj
*
pgt
=
vm
->
pgt
[
pde
].
obj
;
end
=
(
pte
+
num
);
if
(
unlikely
(
end
>=
max
))
end
=
max
;
len
=
end
-
pte
;
vm
->
unmap
(
pgt
,
pte
,
len
);
num
-=
len
;
pte
+=
len
;
if
(
unlikely
(
end
>=
max
))
{
pde
++
;
pte
=
0
;
}
}
vm
->
flush
(
vm
);
}
void
nouveau_vm_unmap
(
struct
nouveau_vma
*
vma
)
{
nouveau_vm_unmap_at
(
vma
,
0
,
(
u64
)
vma
->
node
->
length
<<
12
);
}
static
void
nouveau_vm_unmap_pgt
(
struct
nouveau_vm
*
vm
,
u32
fpde
,
u32
lpde
)
{
struct
nouveau_vm_pgd
*
vpgd
;
struct
nouveau_vm_pgt
*
vpgt
;
struct
nouveau_gpuobj
*
pgt
;
u32
pde
;
for
(
pde
=
fpde
;
pde
<=
lpde
;
pde
++
)
{
vpgt
=
&
vm
->
pgt
[
pde
-
vm
->
fpde
];
if
(
--
vpgt
->
refcount
)
continue
;
list_for_each_entry
(
vpgd
,
&
vm
->
pgd_list
,
head
)
{
vm
->
unmap_pgt
(
vpgd
->
obj
,
pde
);
}
pgt
=
vpgt
->
obj
;
vpgt
->
obj
=
NULL
;
mutex_unlock
(
&
vm
->
mm
->
mutex
);
nouveau_gpuobj_ref
(
NULL
,
&
pgt
);
mutex_lock
(
&
vm
->
mm
->
mutex
);
}
}
static
int
nouveau_vm_map_pgt
(
struct
nouveau_vm
*
vm
,
u32
pde
,
u32
type
)
{
struct
nouveau_vm_pgt
*
vpgt
=
&
vm
->
pgt
[
pde
-
vm
->
fpde
];
struct
nouveau_vm_pgd
*
vpgd
;
struct
nouveau_gpuobj
*
pgt
;
u32
pgt_size
;
int
ret
;
pgt_size
=
(
1
<<
(
vm
->
pgt_bits
+
12
))
>>
type
;
pgt_size
*=
8
;
mutex_unlock
(
&
vm
->
mm
->
mutex
);
ret
=
nouveau_gpuobj_new
(
vm
->
dev
,
NULL
,
pgt_size
,
0x1000
,
NVOBJ_FLAG_ZERO_ALLOC
,
&
pgt
);
mutex_lock
(
&
vm
->
mm
->
mutex
);
if
(
unlikely
(
ret
))
return
ret
;
/* someone beat us to filling the PDE while we didn't have the lock */
if
(
unlikely
(
vpgt
->
refcount
++
))
{
mutex_unlock
(
&
vm
->
mm
->
mutex
);
nouveau_gpuobj_ref
(
NULL
,
&
pgt
);
mutex_lock
(
&
vm
->
mm
->
mutex
);
return
0
;
}
list_for_each_entry
(
vpgd
,
&
vm
->
pgd_list
,
head
)
{
vm
->
map_pgt
(
vpgd
->
obj
,
type
,
pde
,
pgt
);
}
vpgt
->
page_shift
=
type
;
vpgt
->
obj
=
pgt
;
return
0
;
}
int
nouveau_vm_get
(
struct
nouveau_vm
*
vm
,
u64
size
,
u32
page_shift
,
u32
access
,
struct
nouveau_vma
*
vma
)
{
u32
align
=
(
1
<<
page_shift
)
>>
12
;
u32
msize
=
size
>>
12
;
u32
fpde
,
lpde
,
pde
;
int
ret
;
mutex_lock
(
&
vm
->
mm
->
mutex
);
ret
=
nouveau_mm_get
(
vm
->
mm
,
page_shift
,
msize
,
0
,
align
,
&
vma
->
node
);
if
(
unlikely
(
ret
!=
0
))
{
mutex_unlock
(
&
vm
->
mm
->
mutex
);
return
ret
;
}
fpde
=
(
vma
->
node
->
offset
>>
vm
->
pgt_bits
);
lpde
=
(
vma
->
node
->
offset
+
vma
->
node
->
length
-
1
)
>>
vm
->
pgt_bits
;
for
(
pde
=
fpde
;
pde
<=
lpde
;
pde
++
)
{
struct
nouveau_vm_pgt
*
vpgt
=
&
vm
->
pgt
[
pde
-
vm
->
fpde
];
if
(
likely
(
vpgt
->
refcount
))
{
vpgt
->
refcount
++
;
continue
;
}
ret
=
nouveau_vm_map_pgt
(
vm
,
pde
,
vma
->
node
->
type
);
if
(
ret
)
{
if
(
pde
!=
fpde
)
nouveau_vm_unmap_pgt
(
vm
,
fpde
,
pde
-
1
);
nouveau_mm_put
(
vm
->
mm
,
vma
->
node
);
mutex_unlock
(
&
vm
->
mm
->
mutex
);
vma
->
node
=
NULL
;
return
ret
;
}
}
mutex_unlock
(
&
vm
->
mm
->
mutex
);
vma
->
vm
=
vm
;
vma
->
offset
=
(
u64
)
vma
->
node
->
offset
<<
12
;
vma
->
access
=
access
;
return
0
;
}
void
nouveau_vm_put
(
struct
nouveau_vma
*
vma
)
{
struct
nouveau_vm
*
vm
=
vma
->
vm
;
u32
fpde
,
lpde
;
if
(
unlikely
(
vma
->
node
==
NULL
))
return
;
fpde
=
(
vma
->
node
->
offset
>>
vm
->
pgt_bits
);
lpde
=
(
vma
->
node
->
offset
+
vma
->
node
->
length
-
1
)
>>
vm
->
pgt_bits
;
mutex_lock
(
&
vm
->
mm
->
mutex
);
nouveau_mm_put
(
vm
->
mm
,
vma
->
node
);
vma
->
node
=
NULL
;
nouveau_vm_unmap_pgt
(
vm
,
fpde
,
lpde
);
mutex_unlock
(
&
vm
->
mm
->
mutex
);
}
int
nouveau_vm_new
(
struct
drm_device
*
dev
,
u64
offset
,
u64
length
,
u64
mm_offset
,
u8
pgt_bits
,
u8
spg_shift
,
u8
lpg_shift
,
struct
nouveau_vm
**
pvm
)
{
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
struct
nouveau_vm
*
vm
;
u64
mm_length
=
(
offset
+
length
)
-
mm_offset
;
u32
block
;
int
ret
;
vm
=
kzalloc
(
sizeof
(
*
vm
),
GFP_KERNEL
);
if
(
!
vm
)
return
-
ENOMEM
;
if
(
dev_priv
->
card_type
==
NV_50
)
{
vm
->
map_pgt
=
nv50_vm_map_pgt
;
vm
->
unmap_pgt
=
nv50_vm_unmap_pgt
;
vm
->
map
=
nv50_vm_map
;
vm
->
map_sg
=
nv50_vm_map_sg
;
vm
->
unmap
=
nv50_vm_unmap
;
vm
->
flush
=
nv50_vm_flush
;
}
else
{
kfree
(
vm
);
return
-
ENOSYS
;
}
vm
->
fpde
=
offset
>>
pgt_bits
;
vm
->
lpde
=
(
offset
+
length
-
1
)
>>
pgt_bits
;
vm
->
pgt
=
kcalloc
(
vm
->
lpde
-
vm
->
fpde
+
1
,
sizeof
(
*
vm
->
pgt
),
GFP_KERNEL
);
if
(
!
vm
->
pgt
)
{
kfree
(
vm
);
return
-
ENOMEM
;
}
INIT_LIST_HEAD
(
&
vm
->
pgd_list
);
vm
->
dev
=
dev
;
vm
->
refcount
=
1
;
vm
->
pgt_bits
=
pgt_bits
-
12
;
vm
->
spg_shift
=
spg_shift
;
vm
->
lpg_shift
=
lpg_shift
;
block
=
(
1
<<
pgt_bits
);
if
(
length
<
block
)
block
=
length
;
ret
=
nouveau_mm_init
(
&
vm
->
mm
,
mm_offset
>>
12
,
mm_length
>>
12
,
block
>>
12
);
if
(
ret
)
{
kfree
(
vm
);
return
ret
;
}
*
pvm
=
vm
;
return
0
;
}
static
int
nouveau_vm_link
(
struct
nouveau_vm
*
vm
,
struct
nouveau_gpuobj
*
pgd
)
{
struct
nouveau_vm_pgd
*
vpgd
;
int
i
;
if
(
!
pgd
)
return
0
;
vpgd
=
kzalloc
(
sizeof
(
*
vpgd
),
GFP_KERNEL
);
if
(
!
vpgd
)
return
-
ENOMEM
;
nouveau_gpuobj_ref
(
pgd
,
&
vpgd
->
obj
);
mutex_lock
(
&
vm
->
mm
->
mutex
);
for
(
i
=
vm
->
fpde
;
i
<=
vm
->
lpde
;
i
++
)
{
struct
nouveau_vm_pgt
*
vpgt
=
&
vm
->
pgt
[
i
-
vm
->
fpde
];
if
(
!
vpgt
->
obj
)
{
vm
->
unmap_pgt
(
pgd
,
i
);
continue
;
}
vm
->
map_pgt
(
pgd
,
vpgt
->
page_shift
,
i
,
vpgt
->
obj
);
}
list_add
(
&
vpgd
->
head
,
&
vm
->
pgd_list
);
mutex_unlock
(
&
vm
->
mm
->
mutex
);
return
0
;
}
static
void
nouveau_vm_unlink
(
struct
nouveau_vm
*
vm
,
struct
nouveau_gpuobj
*
pgd
)
{
struct
nouveau_vm_pgd
*
vpgd
,
*
tmp
;
if
(
!
pgd
)
return
;
mutex_lock
(
&
vm
->
mm
->
mutex
);
list_for_each_entry_safe
(
vpgd
,
tmp
,
&
vm
->
pgd_list
,
head
)
{
if
(
vpgd
->
obj
!=
pgd
)
continue
;
list_del
(
&
vpgd
->
head
);
nouveau_gpuobj_ref
(
NULL
,
&
vpgd
->
obj
);
kfree
(
vpgd
);
}
mutex_unlock
(
&
vm
->
mm
->
mutex
);
}
static
void
nouveau_vm_del
(
struct
nouveau_vm
*
vm
)
{
struct
nouveau_vm_pgd
*
vpgd
,
*
tmp
;
list_for_each_entry_safe
(
vpgd
,
tmp
,
&
vm
->
pgd_list
,
head
)
{
nouveau_vm_unlink
(
vm
,
vpgd
->
obj
);
}
WARN_ON
(
nouveau_mm_fini
(
&
vm
->
mm
)
!=
0
);
kfree
(
vm
->
pgt
);
kfree
(
vm
);
}
int
nouveau_vm_ref
(
struct
nouveau_vm
*
ref
,
struct
nouveau_vm
**
ptr
,
struct
nouveau_gpuobj
*
pgd
)
{
struct
nouveau_vm
*
vm
;
int
ret
;
vm
=
ref
;
if
(
vm
)
{
ret
=
nouveau_vm_link
(
vm
,
pgd
);
if
(
ret
)
return
ret
;
vm
->
refcount
++
;
}
vm
=
*
ptr
;
*
ptr
=
ref
;
if
(
vm
)
{
nouveau_vm_unlink
(
vm
,
pgd
);
if
(
--
vm
->
refcount
==
0
)
nouveau_vm_del
(
vm
);
}
return
0
;
}
drivers/gpu/drm/nouveau/nouveau_vm.h
0 → 100644
浏览文件 @
a11c3198
/*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#ifndef __NOUVEAU_VM_H__
#define __NOUVEAU_VM_H__
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_mm.h"
struct
nouveau_vm_pgt
{
struct
nouveau_gpuobj
*
obj
;
u32
page_shift
;
u32
refcount
;
};
struct
nouveau_vm_pgd
{
struct
list_head
head
;
struct
nouveau_gpuobj
*
obj
;
};
struct
nouveau_vma
{
struct
nouveau_vm
*
vm
;
struct
nouveau_mm_node
*
node
;
u64
offset
;
u32
access
;
};
struct
nouveau_vm
{
struct
drm_device
*
dev
;
struct
nouveau_mm
*
mm
;
int
refcount
;
struct
list_head
pgd_list
;
atomic_t
pgraph_refs
;
atomic_t
pcrypt_refs
;
struct
nouveau_vm_pgt
*
pgt
;
u32
fpde
;
u32
lpde
;
u32
pgt_bits
;
u8
spg_shift
;
u8
lpg_shift
;
void
(
*
map_pgt
)(
struct
nouveau_gpuobj
*
pgd
,
u32
type
,
u32
pde
,
struct
nouveau_gpuobj
*
pgt
);
void
(
*
unmap_pgt
)(
struct
nouveau_gpuobj
*
pgd
,
u32
pde
);
void
(
*
map
)(
struct
nouveau_vma
*
,
struct
nouveau_gpuobj
*
,
struct
nouveau_vram
*
,
u32
pte
,
u32
cnt
,
u64
phys
);
void
(
*
map_sg
)(
struct
nouveau_vma
*
,
struct
nouveau_gpuobj
*
,
u32
pte
,
dma_addr_t
*
,
u32
cnt
);
void
(
*
unmap
)(
struct
nouveau_gpuobj
*
pgt
,
u32
pte
,
u32
cnt
);
void
(
*
flush
)(
struct
nouveau_vm
*
);
};
/* nouveau_vm.c */
int
nouveau_vm_new
(
struct
drm_device
*
,
u64
offset
,
u64
length
,
u64
mm_offset
,
u8
pgt_bits
,
u8
spg_shift
,
u8
lpg_shift
,
struct
nouveau_vm
**
);
int
nouveau_vm_ref
(
struct
nouveau_vm
*
,
struct
nouveau_vm
**
,
struct
nouveau_gpuobj
*
pgd
);
int
nouveau_vm_get
(
struct
nouveau_vm
*
,
u64
size
,
u32
page_shift
,
u32
access
,
struct
nouveau_vma
*
);
void
nouveau_vm_put
(
struct
nouveau_vma
*
);
void
nouveau_vm_map
(
struct
nouveau_vma
*
,
struct
nouveau_vram
*
);
void
nouveau_vm_map_at
(
struct
nouveau_vma
*
,
u64
offset
,
struct
nouveau_vram
*
);
void
nouveau_vm_unmap
(
struct
nouveau_vma
*
);
void
nouveau_vm_unmap_at
(
struct
nouveau_vma
*
,
u64
offset
,
u64
length
);
void
nouveau_vm_map_sg
(
struct
nouveau_vma
*
,
u64
offset
,
u64
length
,
dma_addr_t
*
);
/* nv50_vm.c */
void
nv50_vm_map_pgt
(
struct
nouveau_gpuobj
*
pgd
,
u32
type
,
u32
pde
,
struct
nouveau_gpuobj
*
pgt
);
void
nv50_vm_unmap_pgt
(
struct
nouveau_gpuobj
*
pgd
,
u32
pde
);
void
nv50_vm_map
(
struct
nouveau_vma
*
,
struct
nouveau_gpuobj
*
,
struct
nouveau_vram
*
,
u32
pte
,
u32
cnt
,
u64
phys
);
void
nv50_vm_map_sg
(
struct
nouveau_vma
*
,
struct
nouveau_gpuobj
*
,
u32
pte
,
dma_addr_t
*
,
u32
cnt
);
void
nv50_vm_unmap
(
struct
nouveau_gpuobj
*
,
u32
pte
,
u32
cnt
);
void
nv50_vm_flush
(
struct
nouveau_vm
*
);
void
nv50_vm_flush_engine
(
struct
drm_device
*
,
int
engine
);
#endif
drivers/gpu/drm/nouveau/nv50_fifo.c
浏览文件 @
a11c3198
...
...
@@ -28,6 +28,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
#include "nouveau_vm.h"
static
void
nv50_fifo_playlist_update
(
struct
drm_device
*
dev
)
...
...
@@ -498,5 +499,5 @@ nv50_fifo_unload_context(struct drm_device *dev)
void
nv50_fifo_tlb_flush
(
struct
drm_device
*
dev
)
{
nv50_vm_flush
(
dev
,
5
);
nv50_vm_flush
_engine
(
dev
,
5
);
}
drivers/gpu/drm/nouveau/nv50_graph.c
浏览文件 @
a11c3198
...
...
@@ -30,6 +30,7 @@
#include "nouveau_ramht.h"
#include "nouveau_grctx.h"
#include "nouveau_dma.h"
#include "nouveau_vm.h"
#include "nv50_evo.h"
static
int
nv50_graph_register
(
struct
drm_device
*
);
...
...
@@ -468,7 +469,7 @@ nv50_graph_register(struct drm_device *dev)
void
nv50_graph_tlb_flush
(
struct
drm_device
*
dev
)
{
nv50_vm_flush
(
dev
,
0
);
nv50_vm_flush
_engine
(
dev
,
0
);
}
void
...
...
@@ -511,7 +512,7 @@ nv86_graph_tlb_flush(struct drm_device *dev)
nv_rd32
(
dev
,
0x400384
),
nv_rd32
(
dev
,
0x400388
));
}
nv50_vm_flush
(
dev
,
0
);
nv50_vm_flush
_engine
(
dev
,
0
);
nv_mask
(
dev
,
0x400500
,
0x00000001
,
0x00000001
);
spin_unlock_irqrestore
(
&
dev_priv
->
context_switch_lock
,
flags
);
...
...
drivers/gpu/drm/nouveau/nv50_instmem.c
浏览文件 @
a11c3198
...
...
@@ -27,7 +27,9 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_vm.h"
struct
nv50_instmem_priv
{
uint32_t
save1700
[
5
];
/* 0x1700->0x1710 */
...
...
@@ -404,7 +406,7 @@ nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
}
dev_priv
->
engine
.
instmem
.
flush
(
dev
);
nv50_vm_flush
(
dev
,
6
);
nv50_vm_flush
_engine
(
dev
,
6
);
node
->
ramin
=
ramin
;
gpuobj
->
pinst
=
ramin
->
start
;
...
...
@@ -454,11 +456,3 @@ nv84_instmem_flush(struct drm_device *dev)
NV_ERROR
(
dev
,
"PRAMIN flush timeout
\n
"
);
}
void
nv50_vm_flush
(
struct
drm_device
*
dev
,
int
engine
)
{
nv_wr32
(
dev
,
0x100c80
,
(
engine
<<
16
)
|
1
);
if
(
!
nv_wait
(
dev
,
0x100c80
,
0x00000001
,
0x00000000
))
NV_ERROR
(
dev
,
"vm flush timeout: engine %d
\n
"
,
engine
);
}
drivers/gpu/drm/nouveau/nv50_vm.c
0 → 100644
浏览文件 @
a11c3198
/*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_vm.h"
void
nv50_vm_map_pgt
(
struct
nouveau_gpuobj
*
pgd
,
u32
type
,
u32
pde
,
struct
nouveau_gpuobj
*
pgt
)
{
struct
drm_nouveau_private
*
dev_priv
=
pgd
->
dev
->
dev_private
;
u32
coverage
=
(
pgt
->
size
>>
3
)
<<
type
;
u64
phys
;
phys
=
pgt
->
vinst
;
phys
|=
0x01
;
/* present */
phys
|=
(
type
==
12
)
?
0x02
:
0x00
;
/* 4KiB pages */
if
(
dev_priv
->
vram_sys_base
)
{
phys
+=
dev_priv
->
vram_sys_base
;
phys
|=
0x30
;
}
if
(
coverage
<=
32
*
1024
*
1024
)
phys
|=
0x60
;
else
if
(
coverage
<=
64
*
1024
*
1024
)
phys
|=
0x40
;
else
if
(
coverage
<
128
*
1024
*
1024
)
phys
|=
0x20
;
nv_wo32
(
pgd
,
(
pde
*
8
)
+
0
,
lower_32_bits
(
phys
));
nv_wo32
(
pgd
,
(
pde
*
8
)
+
4
,
upper_32_bits
(
phys
));
}
void
nv50_vm_unmap_pgt
(
struct
nouveau_gpuobj
*
pgd
,
u32
pde
)
{
nv_wo32
(
pgd
,
(
pde
*
8
)
+
0
,
0x00000000
);
nv_wo32
(
pgd
,
(
pde
*
8
)
+
4
,
0xdeadcafe
);
}
static
inline
u64
nv50_vm_addr
(
struct
nouveau_vma
*
vma
,
struct
nouveau_gpuobj
*
pgt
,
u64
phys
,
u32
memtype
,
u32
target
)
{
struct
drm_nouveau_private
*
dev_priv
=
pgt
->
dev
->
dev_private
;
phys
|=
1
;
/* present */
phys
|=
(
u64
)
memtype
<<
40
;
/* IGPs don't have real VRAM, re-target to stolen system memory */
if
(
target
==
0
&&
dev_priv
->
vram_sys_base
)
{
phys
+=
dev_priv
->
vram_sys_base
;
target
=
3
;
}
phys
|=
target
<<
4
;
if
(
vma
->
access
&
NV_MEM_ACCESS_SYS
)
phys
|=
(
1
<<
6
);
if
(
!
(
vma
->
access
&
NV_MEM_ACCESS_WO
))
phys
|=
(
1
<<
3
);
return
phys
;
}
void
nv50_vm_map
(
struct
nouveau_vma
*
vma
,
struct
nouveau_gpuobj
*
pgt
,
struct
nouveau_vram
*
mem
,
u32
pte
,
u32
cnt
,
u64
phys
)
{
u32
block
,
i
;
phys
=
nv50_vm_addr
(
vma
,
pgt
,
phys
,
mem
->
memtype
,
0
);
pte
<<=
3
;
cnt
<<=
3
;
while
(
cnt
)
{
u32
offset_h
=
upper_32_bits
(
phys
);
u32
offset_l
=
lower_32_bits
(
phys
);
for
(
i
=
7
;
i
>=
0
;
i
--
)
{
block
=
1
<<
(
i
+
3
);
if
(
cnt
>=
block
&&
!
(
pte
&
(
block
-
1
)))
break
;
}
offset_l
|=
(
i
<<
7
);
phys
+=
block
<<
(
vma
->
node
->
type
-
3
);
cnt
-=
block
;
while
(
block
)
{
nv_wo32
(
pgt
,
pte
+
0
,
offset_l
);
nv_wo32
(
pgt
,
pte
+
4
,
offset_h
);
pte
+=
8
;
block
-=
8
;
}
}
}
void
nv50_vm_map_sg
(
struct
nouveau_vma
*
vma
,
struct
nouveau_gpuobj
*
pgt
,
u32
pte
,
dma_addr_t
*
list
,
u32
cnt
)
{
pte
<<=
3
;
while
(
cnt
--
)
{
u64
phys
=
nv50_vm_addr
(
vma
,
pgt
,
(
u64
)
*
list
++
,
0
,
2
);
nv_wo32
(
pgt
,
pte
+
0
,
lower_32_bits
(
phys
));
nv_wo32
(
pgt
,
pte
+
4
,
upper_32_bits
(
phys
));
pte
+=
8
;
}
}
void
nv50_vm_unmap
(
struct
nouveau_gpuobj
*
pgt
,
u32
pte
,
u32
cnt
)
{
pte
<<=
3
;
while
(
cnt
--
)
{
nv_wo32
(
pgt
,
pte
+
0
,
0x00000000
);
nv_wo32
(
pgt
,
pte
+
4
,
0x00000000
);
pte
+=
8
;
}
}
void
nv50_vm_flush
(
struct
nouveau_vm
*
vm
)
{
struct
drm_nouveau_private
*
dev_priv
=
vm
->
dev
->
dev_private
;
struct
nouveau_instmem_engine
*
pinstmem
=
&
dev_priv
->
engine
.
instmem
;
pinstmem
->
flush
(
vm
->
dev
);
nv50_vm_flush_engine
(
vm
->
dev
,
6
);
}
void
nv50_vm_flush_engine
(
struct
drm_device
*
dev
,
int
engine
)
{
nv_wr32
(
dev
,
0x100c80
,
(
engine
<<
16
)
|
1
);
if
(
!
nv_wait
(
dev
,
0x100c80
,
0x00000001
,
0x00000000
))
NV_ERROR
(
dev
,
"vm flush timeout: engine %d
\n
"
,
engine
);
}
drivers/gpu/drm/nouveau/nv84_crypt.c
浏览文件 @
a11c3198
...
...
@@ -25,6 +25,7 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include "nouveau_vm.h"
static
void
nv84_crypt_isr
(
struct
drm_device
*
);
...
...
@@ -84,7 +85,7 @@ nv84_crypt_destroy_context(struct nouveau_channel *chan)
void
nv84_crypt_tlb_flush
(
struct
drm_device
*
dev
)
{
nv50_vm_flush
(
dev
,
0x0a
);
nv50_vm_flush
_engine
(
dev
,
0x0a
);
}
int
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录