Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
5444e770
cloud-kernel
项目概览
openanolis
/
cloud-kernel
接近 2 年 前同步成功
通知
170
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
5444e770
编写于
8月 20, 2015
作者:
B
Ben Skeggs
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
drm/nouveau/fifo: switch to gpuobj accessor macros
Signed-off-by:
N
Ben Skeggs
<
bskeggs@redhat.com
>
上级
3f532ef1
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
220 addition
and
158 deletion
+220
-158
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
+50
-42
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+43
-28
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+41
-26
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+11
-7
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
+6
-4
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
+6
-4
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+13
-7
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+50
-40
未找到文件。
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
浏览文件 @
5444e770
...
@@ -65,14 +65,16 @@ g84_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
...
@@ -65,14 +65,16 @@ g84_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
}
}
nv_engctx
(
ectx
)
->
addr
=
nv_gpuobj
(
base
)
->
addr
>>
12
;
nv_engctx
(
ectx
)
->
addr
=
nv_gpuobj
(
base
)
->
addr
>>
12
;
nv_wo32
(
base
->
eng
,
addr
+
0x00
,
0x00190000
);
nvkm_kmap
(
base
->
eng
);
nv_wo32
(
base
->
eng
,
addr
+
0x04
,
lower_32_bits
(
limit
));
nvkm_wo32
(
base
->
eng
,
addr
+
0x00
,
0x00190000
);
nv_wo32
(
base
->
eng
,
addr
+
0x08
,
lower_32_bits
(
start
));
nvkm_wo32
(
base
->
eng
,
addr
+
0x04
,
lower_32_bits
(
limit
));
nv_wo32
(
base
->
eng
,
addr
+
0x0c
,
upper_32_bits
(
limit
)
<<
24
|
nvkm_wo32
(
base
->
eng
,
addr
+
0x08
,
lower_32_bits
(
start
));
upper_32_bits
(
start
));
nvkm_wo32
(
base
->
eng
,
addr
+
0x0c
,
upper_32_bits
(
limit
)
<<
24
|
nv_wo32
(
base
->
eng
,
addr
+
0x10
,
0x00000000
);
upper_32_bits
(
start
));
nv_wo32
(
base
->
eng
,
addr
+
0x14
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x10
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x14
,
0x00000000
);
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
base
->
eng
);
return
0
;
return
0
;
}
}
...
@@ -119,13 +121,15 @@ g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
...
@@ -119,13 +121,15 @@ g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
return
-
EBUSY
;
return
-
EBUSY
;
}
}
nv_wo32
(
base
->
eng
,
addr
+
0x00
,
0x00000000
);
nvkm_kmap
(
base
->
eng
);
nv_wo32
(
base
->
eng
,
addr
+
0x04
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x00
,
0x00000000
);
nv_wo32
(
base
->
eng
,
addr
+
0x08
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x04
,
0x00000000
);
nv_wo32
(
base
->
eng
,
addr
+
0x0c
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x08
,
0x00000000
);
nv_wo32
(
base
->
eng
,
addr
+
0x10
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x0c
,
0x00000000
);
nv_wo32
(
base
->
eng
,
addr
+
0x14
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x10
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x14
,
0x00000000
);
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
base
->
eng
);
return
0
;
return
0
;
}
}
...
@@ -216,23 +220,25 @@ g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -216,23 +220,25 @@ g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
nv_parent
(
chan
)
->
object_attach
=
g84_fifo_object_attach
;
nv_parent
(
chan
)
->
object_attach
=
g84_fifo_object_attach
;
nv_parent
(
chan
)
->
object_detach
=
nv50_fifo_object_detach
;
nv_parent
(
chan
)
->
object_detach
=
nv50_fifo_object_detach
;
nv_wo32
(
base
->
ramfc
,
0x08
,
lower_32_bits
(
args
->
v0
.
offset
));
nvkm_kmap
(
base
->
ramfc
);
nv_wo32
(
base
->
ramfc
,
0x0c
,
upper_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
base
->
ramfc
,
0x08
,
lower_32_bits
(
args
->
v0
.
offset
));
nv_wo32
(
base
->
ramfc
,
0x10
,
lower_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
base
->
ramfc
,
0x0c
,
upper_32_bits
(
args
->
v0
.
offset
));
nv_wo32
(
base
->
ramfc
,
0x14
,
upper_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
base
->
ramfc
,
0x10
,
lower_32_bits
(
args
->
v0
.
offset
));
nv_wo32
(
base
->
ramfc
,
0x3c
,
0x003f6078
);
nvkm_wo32
(
base
->
ramfc
,
0x14
,
upper_32_bits
(
args
->
v0
.
offset
));
nv_wo32
(
base
->
ramfc
,
0x44
,
0x01003fff
);
nvkm_wo32
(
base
->
ramfc
,
0x3c
,
0x003f6078
);
nv_wo32
(
base
->
ramfc
,
0x48
,
chan
->
base
.
pushgpu
->
node
->
offset
>>
4
);
nvkm_wo32
(
base
->
ramfc
,
0x44
,
0x01003fff
);
nv_wo32
(
base
->
ramfc
,
0x4c
,
0xffffffff
);
nvkm_wo32
(
base
->
ramfc
,
0x48
,
chan
->
base
.
pushgpu
->
node
->
offset
>>
4
);
nv_wo32
(
base
->
ramfc
,
0x60
,
0x7fffffff
);
nvkm_wo32
(
base
->
ramfc
,
0x4c
,
0xffffffff
);
nv_wo32
(
base
->
ramfc
,
0x78
,
0x00000000
);
nvkm_wo32
(
base
->
ramfc
,
0x60
,
0x7fffffff
);
nv_wo32
(
base
->
ramfc
,
0x7c
,
0x30000001
);
nvkm_wo32
(
base
->
ramfc
,
0x78
,
0x00000000
);
nv_wo32
(
base
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
nvkm_wo32
(
base
->
ramfc
,
0x7c
,
0x30000001
);
(
4
<<
24
)
/* SEARCH_FULL */
|
nvkm_wo32
(
base
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
(
chan
->
ramht
->
gpuobj
.
node
->
offset
>>
4
));
(
4
<<
24
)
/* SEARCH_FULL */
|
nv_wo32
(
base
->
ramfc
,
0x88
,
base
->
cache
->
addr
>>
10
);
(
chan
->
ramht
->
gpuobj
.
node
->
offset
>>
4
));
nv_wo32
(
base
->
ramfc
,
0x98
,
nv_gpuobj
(
base
)
->
addr
>>
12
);
nvkm_wo32
(
base
->
ramfc
,
0x88
,
base
->
cache
->
addr
>>
10
);
nvkm_wo32
(
base
->
ramfc
,
0x98
,
nv_gpuobj
(
base
)
->
addr
>>
12
);
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
base
->
ramfc
);
return
0
;
return
0
;
}
}
...
@@ -294,20 +300,22 @@ g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -294,20 +300,22 @@ g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
ioffset
=
args
->
v0
.
ioffset
;
ioffset
=
args
->
v0
.
ioffset
;
ilength
=
order_base_2
(
args
->
v0
.
ilength
/
8
);
ilength
=
order_base_2
(
args
->
v0
.
ilength
/
8
);
nv_wo32
(
base
->
ramfc
,
0x3c
,
0x403f6078
);
nvkm_kmap
(
base
->
ramfc
);
nv_wo32
(
base
->
ramfc
,
0x44
,
0x01003fff
);
nvkm_wo32
(
base
->
ramfc
,
0x3c
,
0x403f6078
);
nv_wo32
(
base
->
ramfc
,
0x48
,
chan
->
base
.
pushgpu
->
node
->
offset
>>
4
);
nvkm_wo32
(
base
->
ramfc
,
0x44
,
0x01003fff
);
nv_wo32
(
base
->
ramfc
,
0x50
,
lower_32_bits
(
ioffset
));
nvkm_wo32
(
base
->
ramfc
,
0x48
,
chan
->
base
.
pushgpu
->
node
->
offset
>>
4
);
nv_wo32
(
base
->
ramfc
,
0x54
,
upper_32_bits
(
ioffset
)
|
(
ilength
<<
16
));
nvkm_wo32
(
base
->
ramfc
,
0x50
,
lower_32_bits
(
ioffset
));
nv_wo32
(
base
->
ramfc
,
0x60
,
0x7fffffff
);
nvkm_wo32
(
base
->
ramfc
,
0x54
,
upper_32_bits
(
ioffset
)
|
(
ilength
<<
16
));
nv_wo32
(
base
->
ramfc
,
0x78
,
0x00000000
);
nvkm_wo32
(
base
->
ramfc
,
0x60
,
0x7fffffff
);
nv_wo32
(
base
->
ramfc
,
0x7c
,
0x30000001
);
nvkm_wo32
(
base
->
ramfc
,
0x78
,
0x00000000
);
nv_wo32
(
base
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
nvkm_wo32
(
base
->
ramfc
,
0x7c
,
0x30000001
);
(
4
<<
24
)
/* SEARCH_FULL */
|
nvkm_wo32
(
base
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
(
chan
->
ramht
->
gpuobj
.
node
->
offset
>>
4
));
(
4
<<
24
)
/* SEARCH_FULL */
|
nv_wo32
(
base
->
ramfc
,
0x88
,
base
->
cache
->
addr
>>
10
);
(
chan
->
ramht
->
gpuobj
.
node
->
offset
>>
4
));
nv_wo32
(
base
->
ramfc
,
0x98
,
nv_gpuobj
(
base
)
->
addr
>>
12
);
nvkm_wo32
(
base
->
ramfc
,
0x88
,
base
->
cache
->
addr
>>
10
);
nvkm_wo32
(
base
->
ramfc
,
0x98
,
nv_gpuobj
(
base
)
->
addr
>>
12
);
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
base
->
ramfc
);
return
0
;
return
0
;
}
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
浏览文件 @
5444e770
...
@@ -86,15 +86,17 @@ gf100_fifo_runlist_update(struct gf100_fifo *fifo)
...
@@ -86,15 +86,17 @@ gf100_fifo_runlist_update(struct gf100_fifo *fifo)
cur
=
fifo
->
runlist
.
mem
[
fifo
->
runlist
.
active
];
cur
=
fifo
->
runlist
.
mem
[
fifo
->
runlist
.
active
];
fifo
->
runlist
.
active
=
!
fifo
->
runlist
.
active
;
fifo
->
runlist
.
active
=
!
fifo
->
runlist
.
active
;
nvkm_kmap
(
cur
);
for
(
i
=
0
,
p
=
0
;
i
<
128
;
i
++
)
{
for
(
i
=
0
,
p
=
0
;
i
<
128
;
i
++
)
{
struct
gf100_fifo_chan
*
chan
=
(
void
*
)
fifo
->
base
.
channel
[
i
];
struct
gf100_fifo_chan
*
chan
=
(
void
*
)
fifo
->
base
.
channel
[
i
];
if
(
chan
&&
chan
->
state
==
RUNNING
)
{
if
(
chan
&&
chan
->
state
==
RUNNING
)
{
nv_wo32
(
cur
,
p
+
0
,
i
);
nv
km
_wo32
(
cur
,
p
+
0
,
i
);
nv_wo32
(
cur
,
p
+
4
,
0x00000004
);
nv
km
_wo32
(
cur
,
p
+
4
,
0x00000004
);
p
+=
8
;
p
+=
8
;
}
}
}
}
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
cur
);
nvkm_wr32
(
device
,
0x002270
,
cur
->
addr
>>
12
);
nvkm_wr32
(
device
,
0x002270
,
cur
->
addr
>>
12
);
nvkm_wr32
(
device
,
0x002274
,
0x01f00000
|
(
p
>>
3
));
nvkm_wr32
(
device
,
0x002274
,
0x01f00000
|
(
p
>>
3
));
...
@@ -112,6 +114,7 @@ gf100_fifo_context_attach(struct nvkm_object *parent,
...
@@ -112,6 +114,7 @@ gf100_fifo_context_attach(struct nvkm_object *parent,
{
{
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
gf100_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
gf100_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
nvkm_gpuobj
*
engn
=
&
base
->
base
.
gpuobj
;
struct
nvkm_engctx
*
ectx
=
(
void
*
)
object
;
struct
nvkm_engctx
*
ectx
=
(
void
*
)
object
;
u32
addr
;
u32
addr
;
int
ret
;
int
ret
;
...
@@ -137,9 +140,11 @@ gf100_fifo_context_attach(struct nvkm_object *parent,
...
@@ -137,9 +140,11 @@ gf100_fifo_context_attach(struct nvkm_object *parent,
nv_engctx
(
ectx
)
->
addr
=
nv_gpuobj
(
base
)
->
addr
>>
12
;
nv_engctx
(
ectx
)
->
addr
=
nv_gpuobj
(
base
)
->
addr
>>
12
;
}
}
nv_wo32
(
base
,
addr
+
0x00
,
lower_32_bits
(
ectx
->
vma
.
offset
)
|
4
);
nvkm_kmap
(
engn
);
nv_wo32
(
base
,
addr
+
0x04
,
upper_32_bits
(
ectx
->
vma
.
offset
));
nvkm_wo32
(
engn
,
addr
+
0x00
,
lower_32_bits
(
ectx
->
vma
.
offset
)
|
4
);
nvkm_wo32
(
engn
,
addr
+
0x04
,
upper_32_bits
(
ectx
->
vma
.
offset
));
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
engn
);
return
0
;
return
0
;
}
}
...
@@ -150,6 +155,7 @@ gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
...
@@ -150,6 +155,7 @@ gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
struct
gf100_fifo
*
fifo
=
(
void
*
)
parent
->
engine
;
struct
gf100_fifo
*
fifo
=
(
void
*
)
parent
->
engine
;
struct
gf100_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
gf100_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
gf100_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
gf100_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
nvkm_gpuobj
*
engn
=
&
base
->
base
.
gpuobj
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_device
*
device
=
subdev
->
device
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
struct
nvkm_bar
*
bar
=
device
->
bar
;
...
@@ -178,9 +184,11 @@ gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
...
@@ -178,9 +184,11 @@ gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
return
-
EBUSY
;
return
-
EBUSY
;
}
}
nv_wo32
(
base
,
addr
+
0x00
,
0x00000000
);
nvkm_kmap
(
engn
);
nv_wo32
(
base
,
addr
+
0x04
,
0x00000000
);
nvkm_wo32
(
engn
,
addr
+
0x00
,
0x00000000
);
nvkm_wo32
(
engn
,
addr
+
0x04
,
0x00000000
);
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
engn
);
return
0
;
return
0
;
}
}
...
@@ -196,6 +204,7 @@ gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -196,6 +204,7 @@ gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct
gf100_fifo
*
fifo
=
(
void
*
)
engine
;
struct
gf100_fifo
*
fifo
=
(
void
*
)
engine
;
struct
gf100_fifo_base
*
base
=
(
void
*
)
parent
;
struct
gf100_fifo_base
*
base
=
(
void
*
)
parent
;
struct
gf100_fifo_chan
*
chan
;
struct
gf100_fifo_chan
*
chan
;
struct
nvkm_gpuobj
*
ramfc
=
&
base
->
base
.
gpuobj
;
u64
usermem
,
ioffset
,
ilength
;
u64
usermem
,
ioffset
,
ilength
;
int
ret
,
i
;
int
ret
,
i
;
...
@@ -231,26 +240,30 @@ gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -231,26 +240,30 @@ gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
ioffset
=
args
->
v0
.
ioffset
;
ioffset
=
args
->
v0
.
ioffset
;
ilength
=
order_base_2
(
args
->
v0
.
ilength
/
8
);
ilength
=
order_base_2
(
args
->
v0
.
ilength
/
8
);
nvkm_kmap
(
fifo
->
user
.
mem
);
for
(
i
=
0
;
i
<
0x1000
;
i
+=
4
)
for
(
i
=
0
;
i
<
0x1000
;
i
+=
4
)
nv_wo32
(
fifo
->
user
.
mem
,
usermem
+
i
,
0x00000000
);
nvkm_wo32
(
fifo
->
user
.
mem
,
usermem
+
i
,
0x00000000
);
nvkm_done
(
fifo
->
user
.
mem
);
nv_wo32
(
base
,
0x08
,
lower_32_bits
(
fifo
->
user
.
mem
->
addr
+
usermem
));
nv_wo32
(
base
,
0x0c
,
upper_32_bits
(
fifo
->
user
.
mem
->
addr
+
usermem
));
nvkm_kmap
(
ramfc
);
nv_wo32
(
base
,
0x10
,
0x0000face
);
nvkm_wo32
(
ramfc
,
0x08
,
lower_32_bits
(
fifo
->
user
.
mem
->
addr
+
usermem
));
nv_wo32
(
base
,
0x30
,
0xfffff902
);
nvkm_wo32
(
ramfc
,
0x0c
,
upper_32_bits
(
fifo
->
user
.
mem
->
addr
+
usermem
));
nv_wo32
(
base
,
0x48
,
lower_32_bits
(
ioffset
));
nvkm_wo32
(
ramfc
,
0x10
,
0x0000face
);
nv_wo32
(
base
,
0x4c
,
upper_32_bits
(
ioffset
)
|
(
ilength
<<
16
));
nvkm_wo32
(
ramfc
,
0x30
,
0xfffff902
);
nv_wo32
(
base
,
0x54
,
0x00000002
);
nvkm_wo32
(
ramfc
,
0x48
,
lower_32_bits
(
ioffset
));
nv_wo32
(
base
,
0x84
,
0x20400000
);
nvkm_wo32
(
ramfc
,
0x4c
,
upper_32_bits
(
ioffset
)
|
(
ilength
<<
16
));
nv_wo32
(
base
,
0x94
,
0x30000001
);
nvkm_wo32
(
ramfc
,
0x54
,
0x00000002
);
nv_wo32
(
base
,
0x9c
,
0x00000100
);
nvkm_wo32
(
ramfc
,
0x84
,
0x20400000
);
nv_wo32
(
base
,
0xa4
,
0x1f1f1f1f
);
nvkm_wo32
(
ramfc
,
0x94
,
0x30000001
);
nv_wo32
(
base
,
0xa8
,
0x1f1f1f1f
);
nvkm_wo32
(
ramfc
,
0x9c
,
0x00000100
);
nv_wo32
(
base
,
0xac
,
0x0000001f
);
nvkm_wo32
(
ramfc
,
0xa4
,
0x1f1f1f1f
);
nv_wo32
(
base
,
0xb8
,
0xf8000000
);
nvkm_wo32
(
ramfc
,
0xa8
,
0x1f1f1f1f
);
nv_wo32
(
base
,
0xf8
,
0x10003080
);
/* 0x002310 */
nvkm_wo32
(
ramfc
,
0xac
,
0x0000001f
);
nv_wo32
(
base
,
0xfc
,
0x10000010
);
/* 0x002350 */
nvkm_wo32
(
ramfc
,
0xb8
,
0xf8000000
);
nvkm_wo32
(
ramfc
,
0xf8
,
0x10003080
);
/* 0x002310 */
nvkm_wo32
(
ramfc
,
0xfc
,
0x10000010
);
/* 0x002350 */
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
ramfc
);
return
0
;
return
0
;
}
}
...
@@ -341,10 +354,12 @@ gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -341,10 +354,12 @@ gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
nv_wo32
(
base
,
0x0200
,
lower_32_bits
(
base
->
pgd
->
addr
));
nvkm_kmap
(
&
base
->
base
.
gpuobj
);
nv_wo32
(
base
,
0x0204
,
upper_32_bits
(
base
->
pgd
->
addr
));
nvkm_wo32
(
&
base
->
base
.
gpuobj
,
0x0200
,
lower_32_bits
(
base
->
pgd
->
addr
));
nv_wo32
(
base
,
0x0208
,
0xffffffff
);
nvkm_wo32
(
&
base
->
base
.
gpuobj
,
0x0204
,
upper_32_bits
(
base
->
pgd
->
addr
));
nv_wo32
(
base
,
0x020c
,
0x000000ff
);
nvkm_wo32
(
&
base
->
base
.
gpuobj
,
0x0208
,
0xffffffff
);
nvkm_wo32
(
&
base
->
base
.
gpuobj
,
0x020c
,
0x000000ff
);
nvkm_done
(
&
base
->
base
.
gpuobj
);
ret
=
nvkm_vm_ref
(
nvkm_client
(
parent
)
->
vm
,
&
base
->
vm
,
base
->
pgd
);
ret
=
nvkm_vm_ref
(
nvkm_client
(
parent
)
->
vm
,
&
base
->
vm
,
base
->
pgd
);
if
(
ret
)
if
(
ret
)
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
浏览文件 @
5444e770
...
@@ -106,15 +106,17 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
...
@@ -106,15 +106,17 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
cur
=
engn
->
runlist
[
engn
->
cur_runlist
];
cur
=
engn
->
runlist
[
engn
->
cur_runlist
];
engn
->
cur_runlist
=
!
engn
->
cur_runlist
;
engn
->
cur_runlist
=
!
engn
->
cur_runlist
;
nvkm_kmap
(
cur
);
for
(
i
=
0
,
p
=
0
;
i
<
fifo
->
base
.
max
;
i
++
)
{
for
(
i
=
0
,
p
=
0
;
i
<
fifo
->
base
.
max
;
i
++
)
{
struct
gk104_fifo_chan
*
chan
=
(
void
*
)
fifo
->
base
.
channel
[
i
];
struct
gk104_fifo_chan
*
chan
=
(
void
*
)
fifo
->
base
.
channel
[
i
];
if
(
chan
&&
chan
->
state
==
RUNNING
&&
chan
->
engine
==
engine
)
{
if
(
chan
&&
chan
->
state
==
RUNNING
&&
chan
->
engine
==
engine
)
{
nv_wo32
(
cur
,
p
+
0
,
i
);
nv
km
_wo32
(
cur
,
p
+
0
,
i
);
nv_wo32
(
cur
,
p
+
4
,
0x00000000
);
nv
km
_wo32
(
cur
,
p
+
4
,
0x00000000
);
p
+=
8
;
p
+=
8
;
}
}
}
}
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
cur
);
nvkm_wr32
(
device
,
0x002270
,
cur
->
addr
>>
12
);
nvkm_wr32
(
device
,
0x002270
,
cur
->
addr
>>
12
);
nvkm_wr32
(
device
,
0x002274
,
(
engine
<<
20
)
|
(
p
>>
3
));
nvkm_wr32
(
device
,
0x002274
,
(
engine
<<
20
)
|
(
p
>>
3
));
...
@@ -132,6 +134,7 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
...
@@ -132,6 +134,7 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
{
{
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
gk104_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
gk104_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
nvkm_gpuobj
*
engn
=
&
base
->
base
.
gpuobj
;
struct
nvkm_engctx
*
ectx
=
(
void
*
)
object
;
struct
nvkm_engctx
*
ectx
=
(
void
*
)
object
;
u32
addr
;
u32
addr
;
int
ret
;
int
ret
;
...
@@ -161,9 +164,11 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
...
@@ -161,9 +164,11 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
nv_engctx
(
ectx
)
->
addr
=
nv_gpuobj
(
base
)
->
addr
>>
12
;
nv_engctx
(
ectx
)
->
addr
=
nv_gpuobj
(
base
)
->
addr
>>
12
;
}
}
nv_wo32
(
base
,
addr
+
0x00
,
lower_32_bits
(
ectx
->
vma
.
offset
)
|
4
);
nvkm_kmap
(
engn
);
nv_wo32
(
base
,
addr
+
0x04
,
upper_32_bits
(
ectx
->
vma
.
offset
));
nvkm_wo32
(
engn
,
addr
+
0x00
,
lower_32_bits
(
ectx
->
vma
.
offset
)
|
4
);
nvkm_wo32
(
engn
,
addr
+
0x04
,
upper_32_bits
(
ectx
->
vma
.
offset
));
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
engn
);
return
0
;
return
0
;
}
}
...
@@ -195,6 +200,7 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
...
@@ -195,6 +200,7 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
nvkm_bar
*
bar
=
nvkm_bar
(
parent
);
struct
gk104_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
gk104_fifo_base
*
base
=
(
void
*
)
parent
->
parent
;
struct
gk104_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
gk104_fifo_chan
*
chan
=
(
void
*
)
parent
;
struct
nvkm_gpuobj
*
engn
=
&
base
->
base
.
gpuobj
;
u32
addr
;
u32
addr
;
int
ret
;
int
ret
;
...
@@ -216,9 +222,11 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
...
@@ -216,9 +222,11 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
return
ret
;
return
ret
;
if
(
addr
)
{
if
(
addr
)
{
nv_wo32
(
base
,
addr
+
0x00
,
0x00000000
);
nvkm_kmap
(
engn
);
nv_wo32
(
base
,
addr
+
0x04
,
0x00000000
);
nvkm_wo32
(
engn
,
addr
+
0x00
,
0x00000000
);
nvkm_wo32
(
engn
,
addr
+
0x04
,
0x00000000
);
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
engn
);
}
}
return
0
;
return
0
;
...
@@ -237,6 +245,7 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -237,6 +245,7 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct
gk104_fifo_base
*
base
=
(
void
*
)
parent
;
struct
gk104_fifo_base
*
base
=
(
void
*
)
parent
;
struct
gk104_fifo_chan
*
chan
;
struct
gk104_fifo_chan
*
chan
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_subdev
*
subdev
=
&
fifo
->
base
.
engine
.
subdev
;
struct
nvkm_gpuobj
*
ramfc
=
&
base
->
base
.
gpuobj
;
u64
usermem
,
ioffset
,
ilength
;
u64
usermem
,
ioffset
,
ilength
;
int
ret
,
i
;
int
ret
,
i
;
...
@@ -282,24 +291,28 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -282,24 +291,28 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
ioffset
=
args
->
v0
.
ioffset
;
ioffset
=
args
->
v0
.
ioffset
;
ilength
=
order_base_2
(
args
->
v0
.
ilength
/
8
);
ilength
=
order_base_2
(
args
->
v0
.
ilength
/
8
);
nvkm_kmap
(
fifo
->
user
.
mem
);
for
(
i
=
0
;
i
<
0x200
;
i
+=
4
)
for
(
i
=
0
;
i
<
0x200
;
i
+=
4
)
nv_wo32
(
fifo
->
user
.
mem
,
usermem
+
i
,
0x00000000
);
nvkm_wo32
(
fifo
->
user
.
mem
,
usermem
+
i
,
0x00000000
);
nvkm_done
(
fifo
->
user
.
mem
);
nv_wo32
(
base
,
0x08
,
lower_32_bits
(
fifo
->
user
.
mem
->
addr
+
usermem
));
nv_wo32
(
base
,
0x0c
,
upper_32_bits
(
fifo
->
user
.
mem
->
addr
+
usermem
));
nvkm_kmap
(
ramfc
);
nv_wo32
(
base
,
0x10
,
0x0000face
);
nvkm_wo32
(
ramfc
,
0x08
,
lower_32_bits
(
fifo
->
user
.
mem
->
addr
+
usermem
));
nv_wo32
(
base
,
0x30
,
0xfffff902
);
nvkm_wo32
(
ramfc
,
0x0c
,
upper_32_bits
(
fifo
->
user
.
mem
->
addr
+
usermem
));
nv_wo32
(
base
,
0x48
,
lower_32_bits
(
ioffset
));
nvkm_wo32
(
ramfc
,
0x10
,
0x0000face
);
nv_wo32
(
base
,
0x4c
,
upper_32_bits
(
ioffset
)
|
(
ilength
<<
16
));
nvkm_wo32
(
ramfc
,
0x30
,
0xfffff902
);
nv_wo32
(
base
,
0x84
,
0x20400000
);
nvkm_wo32
(
ramfc
,
0x48
,
lower_32_bits
(
ioffset
));
nv_wo32
(
base
,
0x94
,
0x30000001
);
nvkm_wo32
(
ramfc
,
0x4c
,
upper_32_bits
(
ioffset
)
|
(
ilength
<<
16
));
nv_wo32
(
base
,
0x9c
,
0x00000100
);
nvkm_wo32
(
ramfc
,
0x84
,
0x20400000
);
nv_wo32
(
base
,
0xac
,
0x0000001f
);
nvkm_wo32
(
ramfc
,
0x94
,
0x30000001
);
nv_wo32
(
base
,
0xe8
,
chan
->
base
.
chid
);
nvkm_wo32
(
ramfc
,
0x9c
,
0x00000100
);
nv_wo32
(
base
,
0xb8
,
0xf8000000
);
nvkm_wo32
(
ramfc
,
0xac
,
0x0000001f
);
nv_wo32
(
base
,
0xf8
,
0x10003080
);
/* 0x002310 */
nvkm_wo32
(
ramfc
,
0xe8
,
chan
->
base
.
chid
);
nv_wo32
(
base
,
0xfc
,
0x10000010
);
/* 0x002350 */
nvkm_wo32
(
ramfc
,
0xb8
,
0xf8000000
);
nvkm_wo32
(
ramfc
,
0xf8
,
0x10003080
);
/* 0x002310 */
nvkm_wo32
(
ramfc
,
0xfc
,
0x10000010
);
/* 0x002350 */
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
ramfc
);
return
0
;
return
0
;
}
}
...
@@ -387,10 +400,12 @@ gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -387,10 +400,12 @@ gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
nv_wo32
(
base
,
0x0200
,
lower_32_bits
(
base
->
pgd
->
addr
));
nvkm_kmap
(
&
base
->
base
.
gpuobj
);
nv_wo32
(
base
,
0x0204
,
upper_32_bits
(
base
->
pgd
->
addr
));
nvkm_wo32
(
&
base
->
base
.
gpuobj
,
0x0200
,
lower_32_bits
(
base
->
pgd
->
addr
));
nv_wo32
(
base
,
0x0208
,
0xffffffff
);
nvkm_wo32
(
&
base
->
base
.
gpuobj
,
0x0204
,
upper_32_bits
(
base
->
pgd
->
addr
));
nv_wo32
(
base
,
0x020c
,
0x000000ff
);
nvkm_wo32
(
&
base
->
base
.
gpuobj
,
0x0208
,
0xffffffff
);
nvkm_wo32
(
&
base
->
base
.
gpuobj
,
0x020c
,
0x000000ff
);
nvkm_done
(
&
base
->
base
.
gpuobj
);
ret
=
nvkm_vm_ref
(
nvkm_client
(
parent
)
->
vm
,
&
base
->
vm
,
base
->
pgd
);
ret
=
nvkm_vm_ref
(
nvkm_client
(
parent
)
->
vm
,
&
base
->
vm
,
base
->
pgd
);
if
(
ret
)
if
(
ret
)
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
浏览文件 @
5444e770
...
@@ -142,16 +142,18 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent,
...
@@ -142,16 +142,18 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent,
nv_parent
(
chan
)
->
context_attach
=
nv04_fifo_context_attach
;
nv_parent
(
chan
)
->
context_attach
=
nv04_fifo_context_attach
;
chan
->
ramfc
=
chan
->
base
.
chid
*
32
;
chan
->
ramfc
=
chan
->
base
.
chid
*
32
;
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x00
,
args
->
v0
.
offset
);
nvkm_kmap
(
fifo
->
ramfc
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x04
,
args
->
v0
.
offset
);
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x00
,
args
->
v0
.
offset
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x08
,
chan
->
base
.
pushgpu
->
addr
>>
4
);
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x04
,
args
->
v0
.
offset
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x10
,
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x08
,
chan
->
base
.
pushgpu
->
addr
>>
4
);
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x10
,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
#ifdef __BIG_ENDIAN
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN
|
NV_PFIFO_CACHE1_BIG_ENDIAN
|
#endif
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8
);
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8
);
nvkm_done
(
fifo
->
ramfc
);
return
0
;
return
0
;
}
}
...
@@ -162,9 +164,11 @@ nv04_fifo_chan_dtor(struct nvkm_object *object)
...
@@ -162,9 +164,11 @@ nv04_fifo_chan_dtor(struct nvkm_object *object)
struct
nv04_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
nv04_fifo_chan
*
chan
=
(
void
*
)
object
;
struct
ramfc_desc
*
c
=
fifo
->
ramfc_desc
;
struct
ramfc_desc
*
c
=
fifo
->
ramfc_desc
;
nvkm_kmap
(
fifo
->
ramfc
);
do
{
do
{
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
c
->
ctxp
,
0x00000000
);
nv
km
_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
c
->
ctxp
,
0x00000000
);
}
while
((
++
c
)
->
bits
);
}
while
((
++
c
)
->
bits
);
nvkm_done
(
fifo
->
ramfc
);
nvkm_fifo_channel_destroy
(
&
chan
->
base
);
nvkm_fifo_channel_destroy
(
&
chan
->
base
);
}
}
...
@@ -217,8 +221,8 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
...
@@ -217,8 +221,8 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
u32
rm
=
((
1ULL
<<
c
->
bits
)
-
1
)
<<
c
->
regs
;
u32
rm
=
((
1ULL
<<
c
->
bits
)
-
1
)
<<
c
->
regs
;
u32
cm
=
((
1ULL
<<
c
->
bits
)
-
1
)
<<
c
->
ctxs
;
u32
cm
=
((
1ULL
<<
c
->
bits
)
-
1
)
<<
c
->
ctxs
;
u32
rv
=
(
nvkm_rd32
(
device
,
c
->
regp
)
&
rm
)
>>
c
->
regs
;
u32
rv
=
(
nvkm_rd32
(
device
,
c
->
regp
)
&
rm
)
>>
c
->
regs
;
u32
cv
=
(
nv_ro32
(
fctx
,
c
->
ctxp
+
data
)
&
~
cm
);
u32
cv
=
(
nv
km
_ro32
(
fctx
,
c
->
ctxp
+
data
)
&
~
cm
);
nv_wo32
(
fctx
,
c
->
ctxp
+
data
,
cv
|
(
rv
<<
c
->
ctxs
));
nv
km
_wo32
(
fctx
,
c
->
ctxp
+
data
,
cv
|
(
rv
<<
c
->
ctxs
));
}
while
((
++
c
)
->
bits
);
}
while
((
++
c
)
->
bits
);
c
=
fifo
->
ramfc_desc
;
c
=
fifo
->
ramfc_desc
;
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
浏览文件 @
5444e770
...
@@ -86,16 +86,18 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent,
...
@@ -86,16 +86,18 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent,
nv_parent
(
chan
)
->
context_attach
=
nv04_fifo_context_attach
;
nv_parent
(
chan
)
->
context_attach
=
nv04_fifo_context_attach
;
chan
->
ramfc
=
chan
->
base
.
chid
*
32
;
chan
->
ramfc
=
chan
->
base
.
chid
*
32
;
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x00
,
args
->
v0
.
offset
);
nvkm_kmap
(
fifo
->
ramfc
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x04
,
args
->
v0
.
offset
);
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x00
,
args
->
v0
.
offset
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x0c
,
chan
->
base
.
pushgpu
->
addr
>>
4
);
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x04
,
args
->
v0
.
offset
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x14
,
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x0c
,
chan
->
base
.
pushgpu
->
addr
>>
4
);
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x14
,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
#ifdef __BIG_ENDIAN
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN
|
NV_PFIFO_CACHE1_BIG_ENDIAN
|
#endif
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8
);
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8
);
nvkm_done
(
fifo
->
ramfc
);
return
0
;
return
0
;
}
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
浏览文件 @
5444e770
...
@@ -93,16 +93,18 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent,
...
@@ -93,16 +93,18 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent,
nv_parent
(
chan
)
->
context_attach
=
nv04_fifo_context_attach
;
nv_parent
(
chan
)
->
context_attach
=
nv04_fifo_context_attach
;
chan
->
ramfc
=
chan
->
base
.
chid
*
64
;
chan
->
ramfc
=
chan
->
base
.
chid
*
64
;
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x00
,
args
->
v0
.
offset
);
nvkm_kmap
(
fifo
->
ramfc
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x04
,
args
->
v0
.
offset
);
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x00
,
args
->
v0
.
offset
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x0c
,
chan
->
base
.
pushgpu
->
addr
>>
4
);
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x04
,
args
->
v0
.
offset
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x14
,
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x0c
,
chan
->
base
.
pushgpu
->
addr
>>
4
);
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x14
,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
#ifdef __BIG_ENDIAN
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN
|
NV_PFIFO_CACHE1_BIG_ENDIAN
|
#endif
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8
);
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8
);
nvkm_done
(
fifo
->
ramfc
);
return
0
;
return
0
;
}
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
浏览文件 @
5444e770
...
@@ -130,7 +130,9 @@ nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
...
@@ -130,7 +130,9 @@ nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
if
((
nvkm_rd32
(
device
,
0x003204
)
&
fifo
->
base
.
max
)
==
chan
->
base
.
chid
)
if
((
nvkm_rd32
(
device
,
0x003204
)
&
fifo
->
base
.
max
)
==
chan
->
base
.
chid
)
nvkm_wr32
(
device
,
reg
,
nv_engctx
(
engctx
)
->
addr
);
nvkm_wr32
(
device
,
reg
,
nv_engctx
(
engctx
)
->
addr
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
ctx
,
nv_engctx
(
engctx
)
->
addr
);
nvkm_kmap
(
fifo
->
ramfc
);
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
ctx
,
nv_engctx
(
engctx
)
->
addr
);
nvkm_done
(
fifo
->
ramfc
);
nvkm_mask
(
device
,
0x002500
,
0x00000001
,
0x00000001
);
nvkm_mask
(
device
,
0x002500
,
0x00000001
,
0x00000001
);
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
...
@@ -167,7 +169,9 @@ nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
...
@@ -167,7 +169,9 @@ nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
if
((
nvkm_rd32
(
device
,
0x003204
)
&
fifo
->
base
.
max
)
==
chan
->
base
.
chid
)
if
((
nvkm_rd32
(
device
,
0x003204
)
&
fifo
->
base
.
max
)
==
chan
->
base
.
chid
)
nvkm_wr32
(
device
,
reg
,
0x00000000
);
nvkm_wr32
(
device
,
reg
,
0x00000000
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
ctx
,
0x00000000
);
nvkm_kmap
(
fifo
->
ramfc
);
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
ctx
,
0x00000000
);
nvkm_done
(
fifo
->
ramfc
);
nvkm_mask
(
device
,
0x002500
,
0x00000001
,
0x00000001
);
nvkm_mask
(
device
,
0x002500
,
0x00000001
,
0x00000001
);
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
fifo
->
base
.
lock
,
flags
);
...
@@ -212,17 +216,19 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -212,17 +216,19 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_parent
(
chan
)
->
object_detach
=
nv04_fifo_object_detach
;
nv_parent
(
chan
)
->
object_detach
=
nv04_fifo_object_detach
;
chan
->
ramfc
=
chan
->
base
.
chid
*
128
;
chan
->
ramfc
=
chan
->
base
.
chid
*
128
;
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x00
,
args
->
v0
.
offset
);
nvkm_kmap
(
fifo
->
ramfc
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x04
,
args
->
v0
.
offset
);
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x00
,
args
->
v0
.
offset
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x0c
,
chan
->
base
.
pushgpu
->
addr
>>
4
);
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x04
,
args
->
v0
.
offset
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x18
,
0x30000000
|
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x0c
,
chan
->
base
.
pushgpu
->
addr
>>
4
);
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x18
,
0x30000000
|
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
#ifdef __BIG_ENDIAN
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN
|
NV_PFIFO_CACHE1_BIG_ENDIAN
|
#endif
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8
);
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8
);
nv_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x3c
,
0x0001ffff
);
nvkm_wo32
(
fifo
->
ramfc
,
chan
->
ramfc
+
0x3c
,
0x0001ffff
);
nvkm_done
(
fifo
->
ramfc
);
return
0
;
return
0
;
}
}
...
...
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
浏览文件 @
5444e770
...
@@ -49,12 +49,13 @@ nv50_fifo_playlist_update_locked(struct nv50_fifo *fifo)
...
@@ -49,12 +49,13 @@ nv50_fifo_playlist_update_locked(struct nv50_fifo *fifo)
cur
=
fifo
->
playlist
[
fifo
->
cur_playlist
];
cur
=
fifo
->
playlist
[
fifo
->
cur_playlist
];
fifo
->
cur_playlist
=
!
fifo
->
cur_playlist
;
fifo
->
cur_playlist
=
!
fifo
->
cur_playlist
;
nvkm_kmap
(
cur
);
for
(
i
=
fifo
->
base
.
min
,
p
=
0
;
i
<
fifo
->
base
.
max
;
i
++
)
{
for
(
i
=
fifo
->
base
.
min
,
p
=
0
;
i
<
fifo
->
base
.
max
;
i
++
)
{
if
(
nvkm_rd32
(
device
,
0x002600
+
(
i
*
4
))
&
0x80000000
)
if
(
nvkm_rd32
(
device
,
0x002600
+
(
i
*
4
))
&
0x80000000
)
nv_wo32
(
cur
,
p
++
*
4
,
i
);
nv
km
_wo32
(
cur
,
p
++
*
4
,
i
);
}
}
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
cur
);
nvkm_wr32
(
device
,
0x0032f4
,
cur
->
addr
>>
12
);
nvkm_wr32
(
device
,
0x0032f4
,
cur
->
addr
>>
12
);
nvkm_wr32
(
device
,
0x0032ec
,
p
);
nvkm_wr32
(
device
,
0x0032ec
,
p
);
...
@@ -88,14 +89,17 @@ nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
...
@@ -88,14 +89,17 @@ nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
}
}
nv_engctx
(
ectx
)
->
addr
=
nv_gpuobj
(
base
)
->
addr
>>
12
;
nv_engctx
(
ectx
)
->
addr
=
nv_gpuobj
(
base
)
->
addr
>>
12
;
nv_wo32
(
base
->
eng
,
addr
+
0x00
,
0x00190000
);
nv_wo32
(
base
->
eng
,
addr
+
0x04
,
lower_32_bits
(
limit
));
nvkm_kmap
(
base
->
eng
);
nv_wo32
(
base
->
eng
,
addr
+
0x08
,
lower_32_bits
(
start
));
nvkm_wo32
(
base
->
eng
,
addr
+
0x00
,
0x00190000
);
nv_wo32
(
base
->
eng
,
addr
+
0x0c
,
upper_32_bits
(
limit
)
<<
24
|
nvkm_wo32
(
base
->
eng
,
addr
+
0x04
,
lower_32_bits
(
limit
));
upper_32_bits
(
start
));
nvkm_wo32
(
base
->
eng
,
addr
+
0x08
,
lower_32_bits
(
start
));
nv_wo32
(
base
->
eng
,
addr
+
0x10
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x0c
,
upper_32_bits
(
limit
)
<<
24
|
nv_wo32
(
base
->
eng
,
addr
+
0x14
,
0x00000000
);
upper_32_bits
(
start
));
nvkm_wo32
(
base
->
eng
,
addr
+
0x10
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x14
,
0x00000000
);
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
base
->
eng
);
return
0
;
return
0
;
}
}
...
@@ -148,13 +152,15 @@ nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
...
@@ -148,13 +152,15 @@ nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
nvkm_wr32
(
device
,
0x00b860
,
me
);
nvkm_wr32
(
device
,
0x00b860
,
me
);
if
(
ret
==
0
)
{
if
(
ret
==
0
)
{
nv_wo32
(
base
->
eng
,
addr
+
0x00
,
0x00000000
);
nvkm_kmap
(
base
->
eng
);
nv_wo32
(
base
->
eng
,
addr
+
0x04
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x00
,
0x00000000
);
nv_wo32
(
base
->
eng
,
addr
+
0x08
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x04
,
0x00000000
);
nv_wo32
(
base
->
eng
,
addr
+
0x0c
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x08
,
0x00000000
);
nv_wo32
(
base
->
eng
,
addr
+
0x10
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x0c
,
0x00000000
);
nv_wo32
(
base
->
eng
,
addr
+
0x14
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x10
,
0x00000000
);
nvkm_wo32
(
base
->
eng
,
addr
+
0x14
,
0x00000000
);
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
base
->
eng
);
}
}
return
ret
;
return
ret
;
...
@@ -234,21 +240,23 @@ nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -234,21 +240,23 @@ nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
nv_wo32
(
base
->
ramfc
,
0x08
,
lower_32_bits
(
args
->
v0
.
offset
));
nvkm_kmap
(
base
->
ramfc
);
nv_wo32
(
base
->
ramfc
,
0x0c
,
upper_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
base
->
ramfc
,
0x08
,
lower_32_bits
(
args
->
v0
.
offset
));
nv_wo32
(
base
->
ramfc
,
0x10
,
lower_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
base
->
ramfc
,
0x0c
,
upper_32_bits
(
args
->
v0
.
offset
));
nv_wo32
(
base
->
ramfc
,
0x14
,
upper_32_bits
(
args
->
v0
.
offset
));
nvkm_wo32
(
base
->
ramfc
,
0x10
,
lower_32_bits
(
args
->
v0
.
offset
));
nv_wo32
(
base
->
ramfc
,
0x3c
,
0x003f6078
);
nvkm_wo32
(
base
->
ramfc
,
0x14
,
upper_32_bits
(
args
->
v0
.
offset
));
nv_wo32
(
base
->
ramfc
,
0x44
,
0x01003fff
);
nvkm_wo32
(
base
->
ramfc
,
0x3c
,
0x003f6078
);
nv_wo32
(
base
->
ramfc
,
0x48
,
chan
->
base
.
pushgpu
->
node
->
offset
>>
4
);
nvkm_wo32
(
base
->
ramfc
,
0x44
,
0x01003fff
);
nv_wo32
(
base
->
ramfc
,
0x4c
,
0xffffffff
);
nvkm_wo32
(
base
->
ramfc
,
0x48
,
chan
->
base
.
pushgpu
->
node
->
offset
>>
4
);
nv_wo32
(
base
->
ramfc
,
0x60
,
0x7fffffff
);
nvkm_wo32
(
base
->
ramfc
,
0x4c
,
0xffffffff
);
nv_wo32
(
base
->
ramfc
,
0x78
,
0x00000000
);
nvkm_wo32
(
base
->
ramfc
,
0x60
,
0x7fffffff
);
nv_wo32
(
base
->
ramfc
,
0x7c
,
0x30000001
);
nvkm_wo32
(
base
->
ramfc
,
0x78
,
0x00000000
);
nv_wo32
(
base
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
nvkm_wo32
(
base
->
ramfc
,
0x7c
,
0x30000001
);
(
4
<<
24
)
/* SEARCH_FULL */
|
nvkm_wo32
(
base
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
(
chan
->
ramht
->
gpuobj
.
node
->
offset
>>
4
));
(
4
<<
24
)
/* SEARCH_FULL */
|
(
chan
->
ramht
->
gpuobj
.
node
->
offset
>>
4
));
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
base
->
ramfc
);
return
0
;
return
0
;
}
}
...
@@ -300,18 +308,20 @@ nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
...
@@ -300,18 +308,20 @@ nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
ioffset
=
args
->
v0
.
ioffset
;
ioffset
=
args
->
v0
.
ioffset
;
ilength
=
order_base_2
(
args
->
v0
.
ilength
/
8
);
ilength
=
order_base_2
(
args
->
v0
.
ilength
/
8
);
nv_wo32
(
base
->
ramfc
,
0x3c
,
0x403f6078
);
nvkm_kmap
(
base
->
ramfc
);
nv_wo32
(
base
->
ramfc
,
0x44
,
0x01003fff
);
nvkm_wo32
(
base
->
ramfc
,
0x3c
,
0x403f6078
);
nv_wo32
(
base
->
ramfc
,
0x48
,
chan
->
base
.
pushgpu
->
node
->
offset
>>
4
);
nvkm_wo32
(
base
->
ramfc
,
0x44
,
0x01003fff
);
nv_wo32
(
base
->
ramfc
,
0x50
,
lower_32_bits
(
ioffset
));
nvkm_wo32
(
base
->
ramfc
,
0x48
,
chan
->
base
.
pushgpu
->
node
->
offset
>>
4
);
nv_wo32
(
base
->
ramfc
,
0x54
,
upper_32_bits
(
ioffset
)
|
(
ilength
<<
16
));
nvkm_wo32
(
base
->
ramfc
,
0x50
,
lower_32_bits
(
ioffset
));
nv_wo32
(
base
->
ramfc
,
0x60
,
0x7fffffff
);
nvkm_wo32
(
base
->
ramfc
,
0x54
,
upper_32_bits
(
ioffset
)
|
(
ilength
<<
16
));
nv_wo32
(
base
->
ramfc
,
0x78
,
0x00000000
);
nvkm_wo32
(
base
->
ramfc
,
0x60
,
0x7fffffff
);
nv_wo32
(
base
->
ramfc
,
0x7c
,
0x30000001
);
nvkm_wo32
(
base
->
ramfc
,
0x78
,
0x00000000
);
nv_wo32
(
base
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
nvkm_wo32
(
base
->
ramfc
,
0x7c
,
0x30000001
);
(
4
<<
24
)
/* SEARCH_FULL */
|
nvkm_wo32
(
base
->
ramfc
,
0x80
,
((
chan
->
ramht
->
bits
-
9
)
<<
27
)
|
(
chan
->
ramht
->
gpuobj
.
node
->
offset
>>
4
));
(
4
<<
24
)
/* SEARCH_FULL */
|
(
chan
->
ramht
->
gpuobj
.
node
->
offset
>>
4
));
bar
->
flush
(
bar
);
bar
->
flush
(
bar
);
nvkm_done
(
base
->
ramfc
);
return
0
;
return
0
;
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录