Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
c78ec30b
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
162
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c78ec30b
编写于
14年前
作者:
C
Chris Wilson
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
drm/i915: Merge ring flushing and lazy requests
Signed-off-by:
N
Chris Wilson
<
chris@chris-wilson.co.uk
>
上级
53640e1d
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
24 addition
and
32 deletion
+24
-32
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_drv.h
+1
-0
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem.c
+22
-31
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_display.c
+1
-1
未找到文件。
drivers/gpu/drm/i915/i915_drv.h
浏览文件 @
c78ec30b
...
...
@@ -1003,6 +1003,7 @@ void i915_gem_reset_flushing_list(struct drm_device *dev);
void
i915_gem_reset_inactive_gpu_domains
(
struct
drm_device
*
dev
);
void
i915_gem_clflush_object
(
struct
drm_gem_object
*
obj
);
void
i915_gem_flush_ring
(
struct
drm_device
*
dev
,
struct
drm_file
*
file_priv
,
struct
intel_ring_buffer
*
ring
,
uint32_t
invalidate_domains
,
uint32_t
flush_domains
);
...
...
This diff is collapsed.
Click to expand it.
drivers/gpu/drm/i915/i915_gem.c
浏览文件 @
c78ec30b
...
...
@@ -1910,16 +1910,23 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno,
void
i915_gem_flush_ring
(
struct
drm_device
*
dev
,
struct
drm_file
*
file_priv
,
struct
intel_ring_buffer
*
ring
,
uint32_t
invalidate_domains
,
uint32_t
flush_domains
)
{
ring
->
flush
(
dev
,
ring
,
invalidate_domains
,
flush_domains
);
i915_gem_process_flushing_list
(
dev
,
flush_domains
,
ring
);
if
(
ring
->
outstanding_lazy_request
)
{
(
void
)
i915_add_request
(
dev
,
file_priv
,
NULL
,
ring
);
ring
->
outstanding_lazy_request
=
false
;
}
}
static
void
i915_gem_flush
(
struct
drm_device
*
dev
,
struct
drm_file
*
file_priv
,
uint32_t
invalidate_domains
,
uint32_t
flush_domains
,
uint32_t
flush_rings
)
...
...
@@ -1931,11 +1938,11 @@ i915_gem_flush(struct drm_device *dev,
if
((
flush_domains
|
invalidate_domains
)
&
I915_GEM_GPU_DOMAINS
)
{
if
(
flush_rings
&
RING_RENDER
)
i915_gem_flush_ring
(
dev
,
i915_gem_flush_ring
(
dev
,
file_priv
,
&
dev_priv
->
render_ring
,
invalidate_domains
,
flush_domains
);
if
(
flush_rings
&
RING_BSD
)
i915_gem_flush_ring
(
dev
,
i915_gem_flush_ring
(
dev
,
file_priv
,
&
dev_priv
->
bsd_ring
,
invalidate_domains
,
flush_domains
);
}
...
...
@@ -2054,6 +2061,7 @@ i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t
*
dev_priv
=
dev
->
dev_private
;
bool
lists_empty
;
u32
seqno
;
int
ret
;
lists_empty
=
(
list_empty
(
&
dev_priv
->
mm
.
flushing_list
)
&&
...
...
@@ -2064,24 +2072,18 @@ i915_gpu_idle(struct drm_device *dev)
return
0
;
/* Flush everything onto the inactive list. */
i915_gem_flush_ring
(
dev
,
&
dev_priv
->
render_ring
,
seqno
=
i915_gem_next_request_seqno
(
dev
,
&
dev_priv
->
render_ring
);
i915_gem_flush_ring
(
dev
,
NULL
,
&
dev_priv
->
render_ring
,
I915_GEM_GPU_DOMAINS
,
I915_GEM_GPU_DOMAINS
);
ret
=
i915_wait_request
(
dev
,
i915_gem_next_request_seqno
(
dev
,
&
dev_priv
->
render_ring
),
&
dev_priv
->
render_ring
);
ret
=
i915_wait_request
(
dev
,
seqno
,
&
dev_priv
->
render_ring
);
if
(
ret
)
return
ret
;
if
(
HAS_BSD
(
dev
))
{
i915_gem_flush_ring
(
dev
,
&
dev_priv
->
bsd_ring
,
seqno
=
i915_gem_next_request_seqno
(
dev
,
&
dev_priv
->
render_ring
);
i915_gem_flush_ring
(
dev
,
NULL
,
&
dev_priv
->
bsd_ring
,
I915_GEM_GPU_DOMAINS
,
I915_GEM_GPU_DOMAINS
);
ret
=
i915_wait_request
(
dev
,
i915_gem_next_request_seqno
(
dev
,
&
dev_priv
->
bsd_ring
),
&
dev_priv
->
bsd_ring
);
ret
=
i915_wait_request
(
dev
,
seqno
,
&
dev_priv
->
bsd_ring
);
if
(
ret
)
return
ret
;
}
...
...
@@ -2651,7 +2653,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
/* Queue the GPU write cache flushing we need. */
old_write_domain
=
obj
->
write_domain
;
i915_gem_flush_ring
(
dev
,
i915_gem_flush_ring
(
dev
,
NULL
,
to_intel_bo
(
obj
)
->
ring
,
0
,
obj
->
write_domain
);
BUG_ON
(
obj
->
write_domain
);
...
...
@@ -2780,7 +2782,7 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
i915_gem_object_flush_cpu_write_domain
(
obj
);
old_read_domains
=
obj
->
read_domains
;
obj
->
read_domains
=
I915_GEM_DOMAIN_GTT
;
obj
->
read_domains
|
=
I915_GEM_DOMAIN_GTT
;
trace_i915_gem_object_change_domain
(
obj
,
old_read_domains
,
...
...
@@ -2837,7 +2839,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
* need to be invalidated at next use.
*/
if
(
write
)
{
obj
->
read_domains
&
=
I915_GEM_DOMAIN_CPU
;
obj
->
read_domains
=
I915_GEM_DOMAIN_CPU
;
obj
->
write_domain
=
I915_GEM_DOMAIN_CPU
;
}
...
...
@@ -3762,21 +3764,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
dev
->
invalidate_domains
,
dev
->
flush_domains
);
#endif
i915_gem_flush
(
dev
,
i915_gem_flush
(
dev
,
file_priv
,
dev
->
invalidate_domains
,
dev
->
flush_domains
,
dev_priv
->
mm
.
flush_rings
);
}
if
(
dev_priv
->
render_ring
.
outstanding_lazy_request
)
{
(
void
)
i915_add_request
(
dev
,
file_priv
,
NULL
,
&
dev_priv
->
render_ring
);
dev_priv
->
render_ring
.
outstanding_lazy_request
=
false
;
}
if
(
dev_priv
->
bsd_ring
.
outstanding_lazy_request
)
{
(
void
)
i915_add_request
(
dev
,
file_priv
,
NULL
,
&
dev_priv
->
bsd_ring
);
dev_priv
->
bsd_ring
.
outstanding_lazy_request
=
false
;
}
for
(
i
=
0
;
i
<
args
->
buffer_count
;
i
++
)
{
struct
drm_gem_object
*
obj
=
object_list
[
i
];
struct
drm_i915_gem_object
*
obj_priv
=
to_intel_bo
(
obj
);
...
...
@@ -4232,12 +4225,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* use this buffer rather sooner than later, so issuing the required
* flush earlier is beneficial.
*/
if
(
obj
->
write_domain
&
I915_GEM_GPU_DOMAINS
)
{
i915_gem_flush_ring
(
dev
,
if
(
obj
->
write_domain
&
I915_GEM_GPU_DOMAINS
)
i915_gem_flush_ring
(
dev
,
file_priv
,
obj_priv
->
ring
,
0
,
obj
->
write_domain
);
(
void
)
i915_add_request
(
dev
,
file_priv
,
NULL
,
obj_priv
->
ring
);
}
/* Update the active list for the hardware's current position.
* Otherwise this only updates on a delayed timer or when irqs
...
...
This diff is collapsed.
Click to expand it.
drivers/gpu/drm/i915/intel_display.c
浏览文件 @
c78ec30b
...
...
@@ -5058,7 +5058,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
/* Schedule the pipelined flush */
if
(
was_dirty
)
i915_gem_flush_ring
(
dev
,
obj_priv
->
ring
,
0
,
was_dirty
);
i915_gem_flush_ring
(
dev
,
NULL
,
obj_priv
->
ring
,
0
,
was_dirty
);
if
(
IS_GEN3
(
dev
)
||
IS_GEN2
(
dev
))
{
u32
flip_mask
;
...
...
This diff is collapsed.
Click to expand it.
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录
新手
引导
客服
返回
顶部