Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
e384eafc
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e384eafc
编写于
11月 23, 2010
作者:
C
Chris Wilson
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'drm-intel-fixes' into drm-intel-next
上级
faa60c41
bcf50e27
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
307 addition
and
179 deletion
+307
-179
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem.c
+293
-168
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sdvo.c
+14
-11
未找到文件。
drivers/gpu/drm/i915/i915_gem.c
浏览文件 @
e384eafc
...
...
@@ -3373,192 +3373,230 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
return
0
;
}
/**
* Pin an object to the GTT and evaluate the relocations landing in it.
*/
static
int
i915_gem_execbuffer_relocate
(
struct
drm_i915_gem_object
*
obj
,
struct
drm_file
*
file_priv
,
struct
drm_i915_gem_exec_object2
*
entry
)
i915_gem_execbuffer_relocate_entry
(
struct
drm_i915_gem_object
*
obj
,
struct
drm_file
*
file_priv
,
struct
drm_i915_gem_exec_object2
*
entry
,
struct
drm_i915_gem_relocation_entry
*
reloc
)
{
struct
drm_device
*
dev
=
obj
->
base
.
dev
;
drm_i915_private_t
*
dev_priv
=
dev
->
dev_private
;
struct
drm_i915_gem_relocation_entry
__user
*
user_relocs
;
struct
drm_gem_object
*
target_obj
=
NULL
;
uint32_t
target_handle
=
0
;
int
i
,
ret
=
0
;
struct
drm_gem_object
*
target_obj
;
uint32_t
target_offset
;
int
ret
=
-
EINVAL
;
user_relocs
=
(
void
__user
*
)(
uintptr_t
)
entry
->
relocs_ptr
;
for
(
i
=
0
;
i
<
entry
->
relocation_count
;
i
++
)
{
struct
drm_i915_gem_relocation_entry
reloc
;
uint32_t
target_offset
;
target_obj
=
drm_gem_object_lookup
(
dev
,
file_priv
,
reloc
->
target_handle
);
if
(
target_obj
==
NULL
)
return
-
ENOENT
;
if
(
__copy_from_user_inatomic
(
&
reloc
,
user_relocs
+
i
,
sizeof
(
reloc
)))
{
ret
=
-
EFAULT
;
break
;
}
target_offset
=
to_intel_bo
(
target_obj
)
->
gtt_offset
;
if
(
reloc
.
target_handle
!=
target_handle
)
{
drm_gem_object_unreference
(
target_obj
);
#if WATCH_RELOC
DRM_INFO
(
"%s: obj %p offset %08x target %d "
"read %08x write %08x gtt %08x "
"presumed %08x delta %08x
\n
"
,
__func__
,
obj
,
(
int
)
reloc
->
offset
,
(
int
)
reloc
->
target_handle
,
(
int
)
reloc
->
read_domains
,
(
int
)
reloc
->
write_domain
,
(
int
)
target_offset
,
(
int
)
reloc
->
presumed_offset
,
reloc
->
delta
);
#endif
target_obj
=
drm_gem_object_lookup
(
dev
,
file_priv
,
reloc
.
target_handle
);
if
(
target_obj
==
NULL
)
{
ret
=
-
ENOENT
;
break
;
}
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
if
(
target_offset
==
0
)
{
DRM_ERROR
(
"No GTT space found for object %d
\n
"
,
reloc
->
target_handle
);
goto
err
;
}
target_handle
=
reloc
.
target_handle
;
}
target_offset
=
to_intel_bo
(
target_obj
)
->
gtt_offset
;
/* Validate that the target is in a valid r/w GPU domain */
if
(
reloc
->
write_domain
&
(
reloc
->
write_domain
-
1
))
{
DRM_ERROR
(
"reloc with multiple write domains: "
"obj %p target %d offset %d "
"read %08x write %08x"
,
obj
,
reloc
->
target_handle
,
(
int
)
reloc
->
offset
,
reloc
->
read_domains
,
reloc
->
write_domain
);
goto
err
;
}
if
(
reloc
->
write_domain
&
I915_GEM_DOMAIN_CPU
||
reloc
->
read_domains
&
I915_GEM_DOMAIN_CPU
)
{
DRM_ERROR
(
"reloc with read/write CPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x"
,
obj
,
reloc
->
target_handle
,
(
int
)
reloc
->
offset
,
reloc
->
read_domains
,
reloc
->
write_domain
);
goto
err
;
}
if
(
reloc
->
write_domain
&&
target_obj
->
pending_write_domain
&&
reloc
->
write_domain
!=
target_obj
->
pending_write_domain
)
{
DRM_ERROR
(
"Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x
\n
"
,
obj
,
reloc
->
target_handle
,
(
int
)
reloc
->
offset
,
reloc
->
write_domain
,
target_obj
->
pending_write_domain
);
goto
err
;
}
#if WATCH_RELOC
DRM_INFO
(
"%s: obj %p offset %08x target %d "
"read %08x write %08x gtt %08x "
"presumed %08x delta %08x
\n
"
,
__func__
,
obj
,
(
int
)
reloc
.
offset
,
(
int
)
reloc
.
target_handle
,
(
int
)
reloc
.
read_domains
,
(
int
)
reloc
.
write_domain
,
(
int
)
target_offset
,
(
int
)
reloc
.
presumed_offset
,
reloc
.
delta
);
#endif
target_obj
->
pending_read_domains
|=
reloc
->
read_domains
;
target_obj
->
pending_write_domain
|=
reloc
->
write_domain
;
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
if
(
target_offset
==
0
)
{
DRM_ERROR
(
"No GTT space found for object %d
\n
"
,
reloc
.
target_handle
);
ret
=
-
EINVAL
;
break
;
}
/* If the relocation already has the right value in it, no
* more work needs to be done.
*/
if
(
target_offset
==
reloc
->
presumed_offset
)
goto
out
;
/* Validate that the target is in a valid r/w GPU domain */
if
(
reloc
.
write_domain
&
(
reloc
.
write_domain
-
1
))
{
DRM_ERROR
(
"reloc with multiple write domains: "
"obj %p target %d offset %d "
"read %08x write %08x"
,
obj
,
reloc
.
target_handle
,
(
int
)
reloc
.
offset
,
reloc
.
read_domains
,
reloc
.
write_domain
);
ret
=
-
EINVAL
;
break
;
}
if
(
reloc
.
write_domain
&
I915_GEM_DOMAIN_CPU
||
reloc
.
read_domains
&
I915_GEM_DOMAIN_CPU
)
{
DRM_ERROR
(
"reloc with read/write CPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x"
,
obj
,
reloc
.
target_handle
,
(
int
)
reloc
.
offset
,
reloc
.
read_domains
,
reloc
.
write_domain
);
ret
=
-
EINVAL
;
break
;
}
if
(
reloc
.
write_domain
&&
target_obj
->
pending_write_domain
&&
reloc
.
write_domain
!=
target_obj
->
pending_write_domain
)
{
DRM_ERROR
(
"Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x
\n
"
,
obj
,
reloc
.
target_handle
,
(
int
)
reloc
.
offset
,
reloc
.
write_domain
,
target_obj
->
pending_write_domain
);
ret
=
-
EINVAL
;
break
;
}
/* Check that the relocation address is valid... */
if
(
reloc
->
offset
>
obj
->
base
.
size
-
4
)
{
DRM_ERROR
(
"Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.
\n
"
,
obj
,
reloc
->
target_handle
,
(
int
)
reloc
->
offset
,
(
int
)
obj
->
base
.
size
);
goto
err
;
}
if
(
reloc
->
offset
&
3
)
{
DRM_ERROR
(
"Relocation not 4-byte aligned: "
"obj %p target %d offset %d.
\n
"
,
obj
,
reloc
->
target_handle
,
(
int
)
reloc
->
offset
);
goto
err
;
}
target_obj
->
pending_read_domains
|=
reloc
.
read_domains
;
target_obj
->
pending_write_domain
|=
reloc
.
write_domain
;
/* and points to somewhere within the target object. */
if
(
reloc
->
delta
>=
target_obj
->
size
)
{
DRM_ERROR
(
"Relocation beyond target object bounds: "
"obj %p target %d delta %d size %d.
\n
"
,
obj
,
reloc
->
target_handle
,
(
int
)
reloc
->
delta
,
(
int
)
target_obj
->
size
);
goto
err
;
}
/* If the relocation already has the right value in it, no
* more work needs to be done.
*/
if
(
target_offset
==
reloc
.
presumed_offset
)
continue
;
reloc
->
delta
+=
target_offset
;
if
(
obj
->
base
.
write_domain
==
I915_GEM_DOMAIN_CPU
)
{
uint32_t
page_offset
=
reloc
->
offset
&
~
PAGE_MASK
;
char
*
vaddr
;
/* Check that the relocation address is valid... */
if
(
reloc
.
offset
>
obj
->
base
.
size
-
4
)
{
DRM_ERROR
(
"Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.
\n
"
,
obj
,
reloc
.
target_handle
,
(
int
)
reloc
.
offset
,
(
int
)
obj
->
base
.
size
);
ret
=
-
EINVAL
;
break
;
}
if
(
reloc
.
offset
&
3
)
{
DRM_ERROR
(
"Relocation not 4-byte aligned: "
"obj %p target %d offset %d.
\n
"
,
obj
,
reloc
.
target_handle
,
(
int
)
reloc
.
offset
);
ret
=
-
EINVAL
;
break
;
}
vaddr
=
kmap_atomic
(
obj
->
pages
[
reloc
->
offset
>>
PAGE_SHIFT
]);
*
(
uint32_t
*
)(
vaddr
+
page_offset
)
=
reloc
->
delta
;
kunmap_atomic
(
vaddr
);
}
else
{
struct
drm_i915_private
*
dev_priv
=
dev
->
dev_private
;
uint32_t
__iomem
*
reloc_entry
;
void
__iomem
*
reloc_page
;
/* and points to somewhere within the target object. */
if
(
reloc
.
delta
>=
target_obj
->
size
)
{
DRM_ERROR
(
"Relocation beyond target object bounds: "
"obj %p target %d delta %d size %d.
\n
"
,
obj
,
reloc
.
target_handle
,
(
int
)
reloc
.
delta
,
(
int
)
target_obj
->
size
);
ret
=
-
EINVAL
;
break
;
}
ret
=
i915_gem_object_set_to_gtt_domain
(
&
obj
->
base
,
1
);
if
(
ret
)
goto
err
;
reloc
.
delta
+=
target_offset
;
if
(
obj
->
base
.
write_domain
==
I915_GEM_DOMAIN_CPU
)
{
uint32_t
page_offset
=
reloc
.
offset
&
~
PAGE_MASK
;
char
*
vaddr
;
/* Map the page containing the relocation we're going to perform. */
reloc
->
offset
+=
obj
->
gtt_offset
;
reloc_page
=
io_mapping_map_atomic_wc
(
dev_priv
->
mm
.
gtt_mapping
,
reloc
->
offset
&
PAGE_MASK
);
reloc_entry
=
(
uint32_t
__iomem
*
)
(
reloc_page
+
(
reloc
->
offset
&
~
PAGE_MASK
));
iowrite32
(
reloc
->
delta
,
reloc_entry
);
io_mapping_unmap_atomic
(
reloc_page
);
}
vaddr
=
kmap_atomic
(
obj
->
pages
[
reloc
.
offset
>>
PAGE_SHIFT
]);
*
(
uint32_t
*
)(
vaddr
+
page_offset
)
=
reloc
.
delta
;
kunmap_atomic
(
vaddr
);
}
else
{
uint32_t
__iomem
*
reloc_entry
;
void
__iomem
*
reloc_page
;
/* and update the user's relocation entry */
reloc
->
presumed_offset
=
target_offset
;
ret
=
i915_gem_object_set_to_gtt_domain
(
&
obj
->
base
,
1
);
if
(
ret
)
break
;
out:
ret
=
0
;
err:
drm_gem_object_unreference
(
target_obj
);
return
ret
;
}
/* Map the page containing the relocation we're going to perform. */
reloc
.
offset
+=
obj
->
gtt_offset
;
reloc_page
=
io_mapping_map_atomic_wc
(
dev_priv
->
mm
.
gtt_mapping
,
reloc
.
offset
&
PAGE_MASK
);
reloc_entry
=
(
uint32_t
__iomem
*
)
(
reloc_page
+
(
reloc
.
offset
&
~
PAGE_MASK
));
iowrite32
(
reloc
.
delta
,
reloc_entry
);
io_mapping_unmap_atomic
(
reloc_page
);
}
static
int
i915_gem_execbuffer_relocate_object
(
struct
drm_i915_gem_object
*
obj
,
struct
drm_file
*
file_priv
,
struct
drm_i915_gem_exec_object2
*
entry
)
{
struct
drm_i915_gem_relocation_entry
__user
*
user_relocs
;
int
i
,
ret
;
user_relocs
=
(
void
__user
*
)(
uintptr_t
)
entry
->
relocs_ptr
;
for
(
i
=
0
;
i
<
entry
->
relocation_count
;
i
++
)
{
struct
drm_i915_gem_relocation_entry
reloc
;
if
(
__copy_from_user_inatomic
(
&
reloc
,
user_relocs
+
i
,
sizeof
(
reloc
)))
return
-
EFAULT
;
ret
=
i915_gem_execbuffer_relocate_entry
(
obj
,
file_priv
,
entry
,
&
reloc
);
if
(
ret
)
return
ret
;
/* and update the user's relocation entry */
reloc
.
presumed_offset
=
target_offset
;
if
(
__copy_to_user_inatomic
(
&
user_relocs
[
i
].
presumed_offset
,
&
reloc
.
presumed_offset
,
sizeof
(
reloc
.
presumed_offset
)))
{
ret
=
-
EFAULT
;
break
;
}
&
reloc
.
presumed_offset
,
sizeof
(
reloc
.
presumed_offset
)))
return
-
EFAULT
;
}
drm_gem_object_unreference
(
target_obj
);
return
ret
;
return
0
;
}
static
int
i915_gem_execbuffer_pin
(
struct
drm_device
*
dev
,
struct
drm_file
*
file
,
struct
drm_gem_object
**
object_list
,
struct
drm_i915_gem_exec_object2
*
exec_list
,
int
count
)
i915_gem_execbuffer_relocate_object_slow
(
struct
drm_i915_gem_object
*
obj
,
struct
drm_file
*
file_priv
,
struct
drm_i915_gem_exec_object2
*
entry
,
struct
drm_i915_gem_relocation_entry
*
relocs
)
{
int
i
,
ret
;
for
(
i
=
0
;
i
<
entry
->
relocation_count
;
i
++
)
{
ret
=
i915_gem_execbuffer_relocate_entry
(
obj
,
file_priv
,
entry
,
&
relocs
[
i
]);
if
(
ret
)
return
ret
;
}
return
0
;
}
static
int
i915_gem_execbuffer_relocate
(
struct
drm_device
*
dev
,
struct
drm_file
*
file
,
struct
drm_gem_object
**
object_list
,
struct
drm_i915_gem_exec_object2
*
exec_list
,
int
count
)
{
int
i
,
ret
;
for
(
i
=
0
;
i
<
count
;
i
++
)
{
struct
drm_i915_gem_object
*
obj
=
to_intel_bo
(
object_list
[
i
]);
obj
->
base
.
pending_read_domains
=
0
;
obj
->
base
.
pending_write_domain
=
0
;
ret
=
i915_gem_execbuffer_relocate_object
(
obj
,
file
,
&
exec_list
[
i
]);
if
(
ret
)
return
ret
;
}
return
0
;
}
static
int
i915_gem_execbuffer_reserve
(
struct
drm_device
*
dev
,
struct
drm_file
*
file
,
struct
drm_gem_object
**
object_list
,
struct
drm_i915_gem_exec_object2
*
exec_list
,
int
count
)
{
struct
drm_i915_private
*
dev_priv
=
dev
->
dev_private
;
int
ret
,
i
,
retry
;
...
...
@@ -3625,6 +3663,87 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
}
while
(
1
);
}
static
int
i915_gem_execbuffer_relocate_slow
(
struct
drm_device
*
dev
,
struct
drm_file
*
file
,
struct
drm_gem_object
**
object_list
,
struct
drm_i915_gem_exec_object2
*
exec_list
,
int
count
)
{
struct
drm_i915_gem_relocation_entry
*
reloc
;
int
i
,
total
,
ret
;
for
(
i
=
0
;
i
<
count
;
i
++
)
{
struct
drm_i915_gem_object
*
obj
=
to_intel_bo
(
object_list
[
i
]);
obj
->
in_execbuffer
=
false
;
}
mutex_unlock
(
&
dev
->
struct_mutex
);
total
=
0
;
for
(
i
=
0
;
i
<
count
;
i
++
)
total
+=
exec_list
[
i
].
relocation_count
;
reloc
=
drm_malloc_ab
(
total
,
sizeof
(
*
reloc
));
if
(
reloc
==
NULL
)
{
mutex_lock
(
&
dev
->
struct_mutex
);
return
-
ENOMEM
;
}
total
=
0
;
for
(
i
=
0
;
i
<
count
;
i
++
)
{
struct
drm_i915_gem_relocation_entry
__user
*
user_relocs
;
user_relocs
=
(
void
__user
*
)(
uintptr_t
)
exec_list
[
i
].
relocs_ptr
;
if
(
copy_from_user
(
reloc
+
total
,
user_relocs
,
exec_list
[
i
].
relocation_count
*
sizeof
(
*
reloc
)))
{
ret
=
-
EFAULT
;
mutex_lock
(
&
dev
->
struct_mutex
);
goto
err
;
}
total
+=
exec_list
[
i
].
relocation_count
;
}
ret
=
i915_mutex_lock_interruptible
(
dev
);
if
(
ret
)
{
mutex_lock
(
&
dev
->
struct_mutex
);
goto
err
;
}
ret
=
i915_gem_execbuffer_reserve
(
dev
,
file
,
object_list
,
exec_list
,
count
);
if
(
ret
)
goto
err
;
total
=
0
;
for
(
i
=
0
;
i
<
count
;
i
++
)
{
struct
drm_i915_gem_object
*
obj
=
to_intel_bo
(
object_list
[
i
]);
obj
->
base
.
pending_read_domains
=
0
;
obj
->
base
.
pending_write_domain
=
0
;
ret
=
i915_gem_execbuffer_relocate_object_slow
(
obj
,
file
,
&
exec_list
[
i
],
reloc
+
total
);
if
(
ret
)
goto
err
;
total
+=
exec_list
[
i
].
relocation_count
;
}
/* Leave the user relocations as are, this is the painfully slow path,
* and we want to avoid the complication of dropping the lock whilst
* having buffers reserved in the aperture and so causing spurious
* ENOSPC for random operations.
*/
err:
drm_free_large
(
reloc
);
return
ret
;
}
static
int
i915_gem_execbuffer_move_to_gpu
(
struct
drm_device
*
dev
,
struct
drm_file
*
file
,
...
...
@@ -3902,18 +4021,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Move the objects en-masse into the GTT, evicting if necessary. */
ret
=
i915_gem_execbuffer_
pin
(
dev
,
file
,
object_list
,
exec_list
,
args
->
buffer_count
);
ret
=
i915_gem_execbuffer_
reserve
(
dev
,
file
,
object_list
,
exec_list
,
args
->
buffer_count
);
if
(
ret
)
goto
err
;
/* The objects are in their final locations, apply the relocations. */
for
(
i
=
0
;
i
<
args
->
buffer_count
;
i
++
)
{
struct
drm_i915_gem_object
*
obj
=
to_intel_bo
(
object_list
[
i
]);
obj
->
base
.
pending_read_domains
=
0
;
obj
->
base
.
pending_write_domain
=
0
;
ret
=
i915_gem_execbuffer_relocate
(
obj
,
file
,
&
exec_list
[
i
]);
ret
=
i915_gem_execbuffer_relocate
(
dev
,
file
,
object_list
,
exec_list
,
args
->
buffer_count
);
if
(
ret
)
{
if
(
ret
==
-
EFAULT
)
{
ret
=
i915_gem_execbuffer_relocate_slow
(
dev
,
file
,
object_list
,
exec_list
,
args
->
buffer_count
);
BUG_ON
(
!
mutex_is_locked
(
&
dev
->
struct_mutex
));
}
if
(
ret
)
goto
err
;
}
...
...
drivers/gpu/drm/i915/intel_sdvo.c
浏览文件 @
e384eafc
...
...
@@ -107,7 +107,8 @@ struct intel_sdvo {
* This is set if we treat the device as HDMI, instead of DVI.
*/
bool
is_hdmi
;
bool
has_audio
;
bool
has_hdmi_monitor
;
bool
has_hdmi_audio
;
/**
* This is set if we detect output of sdvo device as LVDS and
...
...
@@ -1023,7 +1024,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if
(
!
intel_sdvo_set_target_input
(
intel_sdvo
))
return
;
if
(
intel_sdvo
->
is_hdmi
&&
if
(
intel_sdvo
->
has_hdmi_monitor
&&
!
intel_sdvo_set_avi_infoframe
(
intel_sdvo
))
return
;
...
...
@@ -1063,7 +1064,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
if
(
intel_crtc
->
pipe
==
1
)
sdvox
|=
SDVO_PIPE_B_SELECT
;
if
(
intel_sdvo
->
has_audio
)
if
(
intel_sdvo
->
has_
hdmi_
audio
)
sdvox
|=
SDVO_AUDIO_ENABLE
;
if
(
INTEL_INFO
(
dev
)
->
gen
>=
4
)
{
...
...
@@ -1388,8 +1389,10 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
/* DDC bus is shared, match EDID to connector type */
if
(
edid
->
input
&
DRM_EDID_INPUT_DIGITAL
)
{
status
=
connector_status_connected
;
intel_sdvo
->
is_hdmi
=
drm_detect_hdmi_monitor
(
edid
);
intel_sdvo
->
has_audio
=
drm_detect_monitor_audio
(
edid
);
if
(
intel_sdvo
->
is_hdmi
)
{
intel_sdvo
->
has_hdmi_monitor
=
drm_detect_hdmi_monitor
(
edid
);
intel_sdvo
->
has_hdmi_audio
=
drm_detect_monitor_audio
(
edid
);
}
}
connector
->
display_info
.
raw_edid
=
NULL
;
kfree
(
edid
);
...
...
@@ -1398,7 +1401,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
if
(
status
==
connector_status_connected
)
{
struct
intel_sdvo_connector
*
intel_sdvo_connector
=
to_intel_sdvo_connector
(
connector
);
if
(
intel_sdvo_connector
->
force_audio
)
intel_sdvo
->
has_audio
=
intel_sdvo_connector
->
force_audio
>
0
;
intel_sdvo
->
has_
hdmi_
audio
=
intel_sdvo_connector
->
force_audio
>
0
;
}
return
status
;
...
...
@@ -1713,12 +1716,12 @@ intel_sdvo_set_property(struct drm_connector *connector,
intel_sdvo_connector
->
force_audio
=
val
;
if
(
val
>
0
&&
intel_sdvo
->
has_audio
)
if
(
val
>
0
&&
intel_sdvo
->
has_
hdmi_
audio
)
return
0
;
if
(
val
<
0
&&
!
intel_sdvo
->
has_audio
)
if
(
val
<
0
&&
!
intel_sdvo
->
has_
hdmi_
audio
)
return
0
;
intel_sdvo
->
has_audio
=
val
>
0
;
intel_sdvo
->
has_
hdmi_
audio
=
val
>
0
;
goto
done
;
}
...
...
@@ -2070,6 +2073,8 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_set_colorimetry
(
intel_sdvo
,
SDVO_COLORIMETRY_RGB256
);
connector
->
connector_type
=
DRM_MODE_CONNECTOR_HDMIA
;
intel_sdvo_add_hdmi_properties
(
intel_sdvo_connector
);
intel_sdvo
->
is_hdmi
=
true
;
}
intel_sdvo
->
base
.
clone_mask
=
((
1
<<
INTEL_SDVO_NON_TV_CLONE_BIT
)
|
...
...
@@ -2077,8 +2082,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector_init
(
intel_sdvo_connector
,
intel_sdvo
);
intel_sdvo_add_hdmi_properties
(
intel_sdvo_connector
);
return
true
;
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录