Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
cff5c133
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
cff5c133
编写于
10月 06, 2010
作者:
B
Ben Skeggs
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
drm/nouveau: add more fine-grained locking to channel list + structures
Signed-off-by:
N
Ben Skeggs
<
bskeggs@redhat.com
>
上级
6a6b73f2
变更
19
隐藏空白更改
内联
并排
Showing
19 changed file
with
236 addition
and
161 deletion
+236
-161
drivers/gpu/drm/nouveau/nouveau_channel.c
drivers/gpu/drm/nouveau/nouveau_channel.c
+110
-76
drivers/gpu/drm/nouveau/nouveau_drv.c
drivers/gpu/drm/nouveau/nouveau_drv.c
+3
-4
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_drv.h
+8
-15
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_fence.c
+10
-0
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_gem.c
+19
-9
drivers/gpu/drm/nouveau/nouveau_irq.c
drivers/gpu/drm/nouveau/nouveau_irq.c
+28
-14
drivers/gpu/drm/nouveau/nouveau_notifier.c
drivers/gpu/drm/nouveau/nouveau_notifier.c
+5
-5
drivers/gpu/drm/nouveau/nouveau_object.c
drivers/gpu/drm/nouveau/nouveau_object.c
+22
-12
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/nouveau/nouveau_state.c
+4
-4
drivers/gpu/drm/nouveau/nv04_fifo.c
drivers/gpu/drm/nouveau/nv04_fifo.c
+2
-2
drivers/gpu/drm/nouveau/nv04_graph.c
drivers/gpu/drm/nouveau/nv04_graph.c
+2
-2
drivers/gpu/drm/nouveau/nv10_fifo.c
drivers/gpu/drm/nouveau/nv10_fifo.c
+1
-1
drivers/gpu/drm/nouveau/nv10_graph.c
drivers/gpu/drm/nouveau/nv10_graph.c
+2
-2
drivers/gpu/drm/nouveau/nv40_fifo.c
drivers/gpu/drm/nouveau/nv40_fifo.c
+1
-1
drivers/gpu/drm/nouveau/nv40_graph.c
drivers/gpu/drm/nouveau/nv40_graph.c
+1
-1
drivers/gpu/drm/nouveau/nv50_fb.c
drivers/gpu/drm/nouveau/nv50_fb.c
+5
-1
drivers/gpu/drm/nouveau/nv50_fifo.c
drivers/gpu/drm/nouveau/nv50_fifo.c
+5
-4
drivers/gpu/drm/nouveau/nv50_graph.c
drivers/gpu/drm/nouveau/nv50_graph.c
+1
-1
drivers/gpu/drm/nouveau/nv50_instmem.c
drivers/gpu/drm/nouveau/nv50_instmem.c
+7
-7
未找到文件。
drivers/gpu/drm/nouveau/nouveau_channel.c
浏览文件 @
cff5c133
...
...
@@ -107,54 +107,54 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
int
nouveau_channel_alloc
(
struct
drm_device
*
dev
,
struct
nouveau_channel
**
chan_ret
,
struct
drm_file
*
file_priv
,
uint32_t
vram_handle
,
uint32_t
t
t_handle
)
uint32_t
vram_handle
,
uint32_t
gar
t_handle
)
{
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
struct
nouveau_pgraph_engine
*
pgraph
=
&
dev_priv
->
engine
.
graph
;
struct
nouveau_fifo_engine
*
pfifo
=
&
dev_priv
->
engine
.
fifo
;
struct
nouveau_channel
*
chan
;
int
channel
,
user
;
int
ret
;
/*
* Alright, here is the full story
* Nvidia cards have multiple hw fifo contexts (praise them for that,
* no complicated crash-prone context switches)
* We allocate a new context for each app and let it write to it
* directly (woo, full userspace command submission !)
* When there are no more contexts, you lost
*/
for
(
channel
=
0
;
channel
<
pfifo
->
channels
;
channel
++
)
{
if
(
dev_priv
->
fifos
[
channel
]
==
NULL
)
break
;
}
/* no more fifos. you lost. */
if
(
channel
==
pfifo
->
channels
)
return
-
EINVAL
;
unsigned
long
flags
;
int
user
,
ret
;
dev_priv
->
fifos
[
channel
]
=
kzalloc
(
sizeof
(
struct
nouveau_channel
),
GFP_KERNEL
);
if
(
!
dev_priv
->
fifos
[
channel
]
)
/* allocate and lock channel structure */
chan
=
kzalloc
(
sizeof
(
*
chan
),
GFP_KERNEL
);
if
(
!
chan
)
return
-
ENOMEM
;
chan
=
dev_priv
->
fifos
[
channel
];
INIT_LIST_HEAD
(
&
chan
->
nvsw
.
vbl_wait
);
INIT_LIST_HEAD
(
&
chan
->
fence
.
pending
);
chan
->
dev
=
dev
;
chan
->
id
=
channel
;
chan
->
file_priv
=
file_priv
;
chan
->
vram_handle
=
vram_handle
;
chan
->
gart_handle
=
tt_handle
;
chan
->
gart_handle
=
gart_handle
;
atomic_set
(
&
chan
->
refcount
,
1
);
mutex_init
(
&
chan
->
mutex
);
mutex_lock
(
&
chan
->
mutex
);
NV_INFO
(
dev
,
"Allocating FIFO number %d
\n
"
,
channel
);
/* allocate hw channel id */
spin_lock_irqsave
(
&
dev_priv
->
channels
.
lock
,
flags
);
for
(
chan
->
id
=
0
;
chan
->
id
<
pfifo
->
channels
;
chan
->
id
++
)
{
if
(
!
dev_priv
->
channels
.
ptr
[
chan
->
id
])
{
dev_priv
->
channels
.
ptr
[
chan
->
id
]
=
chan
;
break
;
}
}
spin_unlock_irqrestore
(
&
dev_priv
->
channels
.
lock
,
flags
);
if
(
chan
->
id
==
pfifo
->
channels
)
{
mutex_unlock
(
&
chan
->
mutex
);
kfree
(
chan
);
return
-
ENODEV
;
}
NV_DEBUG
(
dev
,
"initialising channel %d
\n
"
,
chan
->
id
);
INIT_LIST_HEAD
(
&
chan
->
nvsw
.
vbl_wait
);
INIT_LIST_HEAD
(
&
chan
->
fence
.
pending
);
/* Allocate DMA push buffer */
chan
->
pushbuf_bo
=
nouveau_channel_user_pushbuf_alloc
(
dev
);
if
(
!
chan
->
pushbuf_bo
)
{
ret
=
-
ENOMEM
;
NV_ERROR
(
dev
,
"pushbuf %d
\n
"
,
ret
);
nouveau_channel_
free
(
chan
);
nouveau_channel_
put
(
&
chan
);
return
ret
;
}
...
...
@@ -162,18 +162,18 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
/* Locate channel's user control regs */
if
(
dev_priv
->
card_type
<
NV_40
)
user
=
NV03_USER
(
chan
nel
);
user
=
NV03_USER
(
chan
->
id
);
else
if
(
dev_priv
->
card_type
<
NV_50
)
user
=
NV40_USER
(
chan
nel
);
user
=
NV40_USER
(
chan
->
id
);
else
user
=
NV50_USER
(
chan
nel
);
user
=
NV50_USER
(
chan
->
id
);
chan
->
user
=
ioremap
(
pci_resource_start
(
dev
->
pdev
,
0
)
+
user
,
PAGE_SIZE
);
if
(
!
chan
->
user
)
{
NV_ERROR
(
dev
,
"ioremap of regs failed.
\n
"
);
nouveau_channel_
free
(
chan
);
nouveau_channel_
put
(
&
chan
);
return
-
ENOMEM
;
}
chan
->
user_put
=
0x40
;
...
...
@@ -183,15 +183,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
ret
=
nouveau_notifier_init_channel
(
chan
);
if
(
ret
)
{
NV_ERROR
(
dev
,
"ntfy %d
\n
"
,
ret
);
nouveau_channel_
free
(
chan
);
nouveau_channel_
put
(
&
chan
);
return
ret
;
}
/* Setup channel's default objects */
ret
=
nouveau_gpuobj_channel_init
(
chan
,
vram_handle
,
t
t_handle
);
ret
=
nouveau_gpuobj_channel_init
(
chan
,
vram_handle
,
gar
t_handle
);
if
(
ret
)
{
NV_ERROR
(
dev
,
"gpuobj %d
\n
"
,
ret
);
nouveau_channel_
free
(
chan
);
nouveau_channel_
put
(
&
chan
);
return
ret
;
}
...
...
@@ -199,7 +199,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
ret
=
nouveau_channel_pushbuf_ctxdma_init
(
chan
);
if
(
ret
)
{
NV_ERROR
(
dev
,
"pbctxdma %d
\n
"
,
ret
);
nouveau_channel_
free
(
chan
);
nouveau_channel_
put
(
&
chan
);
return
ret
;
}
...
...
@@ -209,14 +209,14 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
/* Create a graphics context for new channel */
ret
=
pgraph
->
create_context
(
chan
);
if
(
ret
)
{
nouveau_channel_
free
(
chan
);
nouveau_channel_
put
(
&
chan
);
return
ret
;
}
/* Construct inital RAMFC for new channel */
ret
=
pfifo
->
create_context
(
chan
);
if
(
ret
)
{
nouveau_channel_
free
(
chan
);
nouveau_channel_
put
(
&
chan
);
return
ret
;
}
...
...
@@ -226,33 +226,70 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
if
(
!
ret
)
ret
=
nouveau_fence_channel_init
(
chan
);
if
(
ret
)
{
nouveau_channel_
free
(
chan
);
nouveau_channel_
put
(
&
chan
);
return
ret
;
}
nouveau_debugfs_channel_init
(
chan
);
NV_
INFO
(
dev
,
"%s: initialised FIFO %d
\n
"
,
__func__
,
channel
);
NV_
DEBUG
(
dev
,
"channel %d initialised
\n
"
,
chan
->
id
);
*
chan_ret
=
chan
;
return
0
;
}
/* stops a fifo */
struct
nouveau_channel
*
nouveau_channel_get
(
struct
drm_device
*
dev
,
struct
drm_file
*
file_priv
,
int
id
)
{
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
struct
nouveau_channel
*
chan
=
ERR_PTR
(
-
ENODEV
);
unsigned
long
flags
;
spin_lock_irqsave
(
&
dev_priv
->
channels
.
lock
,
flags
);
chan
=
dev_priv
->
channels
.
ptr
[
id
];
if
(
unlikely
(
!
chan
||
atomic_read
(
&
chan
->
refcount
)
==
0
))
{
spin_unlock_irqrestore
(
&
dev_priv
->
channels
.
lock
,
flags
);
return
ERR_PTR
(
-
EINVAL
);
}
if
(
unlikely
(
file_priv
&&
chan
->
file_priv
!=
file_priv
))
{
spin_unlock_irqrestore
(
&
dev_priv
->
channels
.
lock
,
flags
);
return
ERR_PTR
(
-
EINVAL
);
}
atomic_inc
(
&
chan
->
refcount
);
spin_unlock_irqrestore
(
&
dev_priv
->
channels
.
lock
,
flags
);
mutex_lock
(
&
chan
->
mutex
);
return
chan
;
}
void
nouveau_channel_
free
(
struct
nouveau_channel
*
chan
)
nouveau_channel_
put
(
struct
nouveau_channel
**
p
chan
)
{
struct
nouveau_channel
*
chan
=
*
pchan
;
struct
drm_device
*
dev
=
chan
->
dev
;
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
struct
nouveau_pgraph_engine
*
pgraph
=
&
dev_priv
->
engine
.
graph
;
struct
nouveau_fifo_engine
*
pfifo
=
&
dev_priv
->
engine
.
fifo
;
struct
nouveau_pgraph_engine
*
pgraph
=
&
dev_priv
->
engine
.
graph
;
unsigned
long
flags
;
int
ret
;
NV_INFO
(
dev
,
"%s: freeing fifo %d
\n
"
,
__func__
,
chan
->
id
);
/* unlock the channel */
mutex_unlock
(
&
chan
->
mutex
);
/* decrement the refcount, and we're done if there's still refs */
if
(
likely
(
!
atomic_dec_and_test
(
&
chan
->
refcount
)))
{
*
pchan
=
NULL
;
return
;
}
/* noone wants the channel anymore */
NV_DEBUG
(
dev
,
"freeing channel %d
\n
"
,
chan
->
id
);
nouveau_debugfs_channel_fini
(
chan
);
*
pchan
=
NULL
;
/*
Give outstanding push buffers a chance to complet
e */
/*
give it chance to idl
e */
nouveau_fence_update
(
chan
);
if
(
chan
->
fence
.
sequence
!=
chan
->
fence
.
sequence_ack
)
{
struct
nouveau_fence
*
fence
=
NULL
;
...
...
@@ -267,13 +304,13 @@ nouveau_channel_free(struct nouveau_channel *chan)
NV_ERROR
(
dev
,
"Failed to idle channel %d.
\n
"
,
chan
->
id
);
}
/*
Ensure all outstanding fences are signaled. T
hey should be if the
/*
ensure all outstanding fences are signaled. t
hey should be if the
* above attempts at idling were OK, but if we failed this'll tell TTM
* we're done with the buffers.
*/
nouveau_fence_channel_fini
(
chan
);
/*
This will prevent pfifo from switching channels.
*/
/*
boot it off the hardware
*/
pfifo
->
reassign
(
dev
,
false
);
/* We want to give pgraph a chance to idle and get rid of all potential
...
...
@@ -302,7 +339,14 @@ nouveau_channel_free(struct nouveau_channel *chan)
spin_unlock_irqrestore
(
&
dev_priv
->
context_switch_lock
,
flags
);
/* Release the channel's resources */
/* aside from its resources, the channel should now be dead,
* remove it from the channel list
*/
spin_lock_irqsave
(
&
dev_priv
->
channels
.
lock
,
flags
);
dev_priv
->
channels
.
ptr
[
chan
->
id
]
=
NULL
;
spin_unlock_irqrestore
(
&
dev_priv
->
channels
.
lock
,
flags
);
/* destroy any resources the channel owned */
nouveau_gpuobj_ref
(
NULL
,
&
chan
->
pushbuf
);
if
(
chan
->
pushbuf_bo
)
{
nouveau_bo_unmap
(
chan
->
pushbuf_bo
);
...
...
@@ -314,7 +358,6 @@ nouveau_channel_free(struct nouveau_channel *chan)
if
(
chan
->
user
)
iounmap
(
chan
->
user
);
dev_priv
->
fifos
[
chan
->
id
]
=
NULL
;
kfree
(
chan
);
}
...
...
@@ -324,31 +367,20 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
{
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
struct
nouveau_engine
*
engine
=
&
dev_priv
->
engine
;
struct
nouveau_channel
*
chan
;
int
i
;
NV_DEBUG
(
dev
,
"clearing FIFO enables from file_priv
\n
"
);
for
(
i
=
0
;
i
<
engine
->
fifo
.
channels
;
i
++
)
{
struct
nouveau_channel
*
chan
=
dev_priv
->
fifos
[
i
];
chan
=
nouveau_channel_get
(
dev
,
file_priv
,
i
);
if
(
IS_ERR
(
chan
))
continue
;
if
(
chan
&&
chan
->
file_priv
==
file_priv
)
nouveau_channel_free
(
chan
);
atomic_dec
(
&
chan
->
refcount
);
nouveau_channel_put
(
&
chan
);
}
}
int
nouveau_channel_owner
(
struct
drm_device
*
dev
,
struct
drm_file
*
file_priv
,
int
channel
)
{
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
struct
nouveau_engine
*
engine
=
&
dev_priv
->
engine
;
if
(
channel
>=
engine
->
fifo
.
channels
)
return
0
;
if
(
dev_priv
->
fifos
[
channel
]
==
NULL
)
return
0
;
return
(
dev_priv
->
fifos
[
channel
]
->
file_priv
==
file_priv
);
}
/***********************************
* ioctls wrapping the functions
...
...
@@ -396,24 +428,26 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
/* Named memory object area */
ret
=
drm_gem_handle_create
(
file_priv
,
chan
->
notifier_bo
->
gem
,
&
init
->
notifier_handle
);
if
(
ret
)
{
nouveau_channel_free
(
chan
);
return
ret
;
}
return
0
;
if
(
ret
==
0
)
atomic_inc
(
&
chan
->
refcount
);
/* userspace reference */
nouveau_channel_put
(
&
chan
);
return
ret
;
}
static
int
nouveau_ioctl_fifo_free
(
struct
drm_device
*
dev
,
void
*
data
,
struct
drm_file
*
file_priv
)
{
struct
drm_nouveau_channel_free
*
cfree
=
data
;
struct
drm_nouveau_channel_free
*
req
=
data
;
struct
nouveau_channel
*
chan
;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN
(
cfree
->
channel
,
file_priv
,
chan
);
chan
=
nouveau_channel_get
(
dev
,
file_priv
,
req
->
channel
);
if
(
IS_ERR
(
chan
))
return
PTR_ERR
(
chan
);
nouveau_channel_free
(
chan
);
atomic_dec
(
&
chan
->
refcount
);
nouveau_channel_put
(
&
chan
);
return
0
;
}
...
...
drivers/gpu/drm/nouveau/nouveau_drv.c
浏览文件 @
cff5c133
...
...
@@ -195,9 +195,8 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
for
(
i
=
0
;
i
<
pfifo
->
channels
;
i
++
)
{
struct
nouveau_fence
*
fence
=
NULL
;
chan
=
dev_priv
->
fifos
[
i
];
if
(
!
chan
||
(
dev_priv
->
card_type
>=
NV_50
&&
chan
==
dev_priv
->
fifos
[
0
]))
chan
=
dev_priv
->
channels
.
ptr
[
i
];
if
(
!
chan
||
!
chan
->
pushbuf_bo
)
continue
;
ret
=
nouveau_fence_new
(
chan
,
&
fence
,
true
);
...
...
@@ -313,7 +312,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
int
j
;
for
(
i
=
0
;
i
<
dev_priv
->
engine
.
fifo
.
channels
;
i
++
)
{
chan
=
dev_priv
->
fifos
[
i
];
chan
=
dev_priv
->
channels
.
ptr
[
i
];
if
(
!
chan
||
!
chan
->
pushbuf_bo
)
continue
;
...
...
drivers/gpu/drm/nouveau/nouveau_drv.h
浏览文件 @
cff5c133
...
...
@@ -166,6 +166,7 @@ struct nouveau_channel {
struct
drm_device
*
dev
;
int
id
;
atomic_t
refcount
;
struct
mutex
mutex
;
/* owner of this fifo */
...
...
@@ -607,8 +608,10 @@ struct drm_nouveau_private {
struct
nouveau_bo
*
bo
;
}
fence
;
int
fifo_alloc_count
;
struct
nouveau_channel
*
fifos
[
NOUVEAU_MAX_CHANNEL_NR
];
struct
{
spinlock_t
lock
;
struct
nouveau_channel
*
ptr
[
NOUVEAU_MAX_CHANNEL_NR
];
}
channels
;
struct
nouveau_engine
engine
;
struct
nouveau_channel
*
channel
;
...
...
@@ -721,16 +724,6 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
return
0
;
}
#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \
struct drm_nouveau_private *nv = dev->dev_private; \
if (!nouveau_channel_owner(dev, (cl), (id))) { \
NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
DRM_CURRENTPID, (id)); \
return -EPERM; \
} \
(ch) = nv->fifos[(id)]; \
} while (0)
/* nouveau_drv.c */
extern
int
nouveau_agpmode
;
extern
int
nouveau_duallink
;
...
...
@@ -805,13 +798,13 @@ extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
extern
struct
drm_ioctl_desc
nouveau_ioctls
[];
extern
int
nouveau_max_ioctl
;
extern
void
nouveau_channel_cleanup
(
struct
drm_device
*
,
struct
drm_file
*
);
extern
int
nouveau_channel_owner
(
struct
drm_device
*
,
struct
drm_file
*
,
int
channel
);
extern
int
nouveau_channel_alloc
(
struct
drm_device
*
dev
,
struct
nouveau_channel
**
chan
,
struct
drm_file
*
file_priv
,
uint32_t
fb_ctxdma
,
uint32_t
tt_ctxdma
);
extern
void
nouveau_channel_free
(
struct
nouveau_channel
*
);
extern
struct
nouveau_channel
*
nouveau_channel_get
(
struct
drm_device
*
,
struct
drm_file
*
,
int
id
);
extern
void
nouveau_channel_put
(
struct
nouveau_channel
**
);
/* nouveau_object.c */
extern
int
nouveau_gpuobj_early_init
(
struct
drm_device
*
);
...
...
drivers/gpu/drm/nouveau/nouveau_fence.c
浏览文件 @
cff5c133
...
...
@@ -393,8 +393,18 @@ nouveau_fence_sync(struct nouveau_fence *fence,
return
nouveau_fence_wait
(
fence
,
NULL
,
false
,
false
);
}
/* try to take wchan's mutex, if we can't take it right away
* we have to fallback to software sync to prevent locking
* order issues
*/
if
(
!
mutex_trylock
(
&
wchan
->
mutex
))
{
free_semaphore
(
&
sema
->
ref
);
return
nouveau_fence_wait
(
fence
,
NULL
,
false
,
false
);
}
/* Make wchan wait until it gets signalled */
ret
=
emit_semaphore
(
wchan
,
NV_SW_SEMAPHORE_ACQUIRE
,
sema
);
mutex_unlock
(
&
wchan
->
mutex
);
if
(
ret
)
goto
out
;
...
...
drivers/gpu/drm/nouveau/nouveau_gem.c
浏览文件 @
cff5c133
...
...
@@ -146,11 +146,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
if
(
unlikely
(
dev_priv
->
ttm
.
bdev
.
dev_mapping
==
NULL
))
dev_priv
->
ttm
.
bdev
.
dev_mapping
=
dev_priv
->
dev
->
dev_mapping
;
if
(
req
->
channel_hint
)
{
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN
(
req
->
channel_hint
,
file_priv
,
chan
);
}
if
(
req
->
info
.
domain
&
NOUVEAU_GEM_DOMAIN_VRAM
)
flags
|=
TTM_PL_FLAG_VRAM
;
if
(
req
->
info
.
domain
&
NOUVEAU_GEM_DOMAIN_GART
)
...
...
@@ -161,10 +156,18 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
if
(
!
nouveau_gem_tile_flags_valid
(
dev
,
req
->
info
.
tile_flags
))
return
-
EINVAL
;
if
(
req
->
channel_hint
)
{
chan
=
nouveau_channel_get
(
dev
,
file_priv
,
req
->
channel_hint
);
if
(
IS_ERR
(
chan
))
return
PTR_ERR
(
chan
);
}
ret
=
nouveau_gem_new
(
dev
,
chan
,
req
->
info
.
size
,
req
->
align
,
flags
,
req
->
info
.
tile_mode
,
req
->
info
.
tile_flags
,
false
,
(
req
->
info
.
domain
&
NOUVEAU_GEM_DOMAIN_MAPPABLE
),
&
nvbo
);
if
(
chan
)
nouveau_channel_put
(
&
chan
);
if
(
ret
)
return
ret
;
...
...
@@ -341,9 +344,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
return
-
EINVAL
;
}
mutex_unlock
(
&
drm_global_mutex
);
ret
=
ttm_bo_wait_cpu
(
&
nvbo
->
bo
,
false
);
mutex_lock
(
&
drm_global_mutex
);
if
(
ret
)
{
NV_ERROR
(
dev
,
"fail wait_cpu
\n
"
);
return
ret
;
...
...
@@ -585,7 +586,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct
nouveau_fence
*
fence
=
NULL
;
int
i
,
j
,
ret
=
0
,
do_reloc
=
0
;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN
(
req
->
channel
,
file_priv
,
chan
);
chan
=
nouveau_channel_get
(
dev
,
file_priv
,
req
->
channel
);
if
(
IS_ERR
(
chan
))
return
PTR_ERR
(
chan
);
req
->
vram_available
=
dev_priv
->
fb_aper_free
;
req
->
gart_available
=
dev_priv
->
gart_info
.
aper_free
;
...
...
@@ -595,28 +598,34 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if
(
unlikely
(
req
->
nr_push
>
NOUVEAU_GEM_MAX_PUSH
))
{
NV_ERROR
(
dev
,
"pushbuf push count exceeds limit: %d max %d
\n
"
,
req
->
nr_push
,
NOUVEAU_GEM_MAX_PUSH
);
nouveau_channel_put
(
&
chan
);
return
-
EINVAL
;
}
if
(
unlikely
(
req
->
nr_buffers
>
NOUVEAU_GEM_MAX_BUFFERS
))
{
NV_ERROR
(
dev
,
"pushbuf bo count exceeds limit: %d max %d
\n
"
,
req
->
nr_buffers
,
NOUVEAU_GEM_MAX_BUFFERS
);
nouveau_channel_put
(
&
chan
);
return
-
EINVAL
;
}
if
(
unlikely
(
req
->
nr_relocs
>
NOUVEAU_GEM_MAX_RELOCS
))
{
NV_ERROR
(
dev
,
"pushbuf reloc count exceeds limit: %d max %d
\n
"
,
req
->
nr_relocs
,
NOUVEAU_GEM_MAX_RELOCS
);
nouveau_channel_put
(
&
chan
);
return
-
EINVAL
;
}
push
=
u_memcpya
(
req
->
push
,
req
->
nr_push
,
sizeof
(
*
push
));
if
(
IS_ERR
(
push
))
if
(
IS_ERR
(
push
))
{
nouveau_channel_put
(
&
chan
);
return
PTR_ERR
(
push
);
}
bo
=
u_memcpya
(
req
->
buffers
,
req
->
nr_buffers
,
sizeof
(
*
bo
));
if
(
IS_ERR
(
bo
))
{
kfree
(
push
);
nouveau_channel_put
(
&
chan
);
return
PTR_ERR
(
bo
);
}
...
...
@@ -750,6 +759,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
req
->
suffix1
=
0x00000000
;
}
nouveau_channel_put
(
&
chan
);
return
ret
;
}
...
...
drivers/gpu/drm/nouveau/nouveau_irq.c
浏览文件 @
cff5c133
...
...
@@ -113,15 +113,17 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
struct
nouveau_channel
*
chan
=
NULL
;
struct
nouveau_gpuobj
*
obj
;
unsigned
long
flags
;
const
int
subc
=
(
addr
>>
13
)
&
0x7
;
const
int
mthd
=
addr
&
0x1ffc
;
bool
handled
=
false
;
u32
engine
;
spin_lock_irqsave
(
&
dev_priv
->
channels
.
lock
,
flags
);
if
(
likely
(
chid
>=
0
&&
chid
<
dev_priv
->
engine
.
fifo
.
channels
))
chan
=
dev_priv
->
fifos
[
chid
];
chan
=
dev_priv
->
channels
.
ptr
[
chid
];
if
(
unlikely
(
!
chan
))
return
false
;
goto
out
;
switch
(
mthd
)
{
case
0x0000
:
/* bind object to subchannel */
...
...
@@ -146,6 +148,8 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
break
;
}
out:
spin_unlock_irqrestore
(
&
dev_priv
->
channels
.
lock
,
flags
);
return
handled
;
}
...
...
@@ -398,6 +402,8 @@ static int
nouveau_graph_chid_from_grctx
(
struct
drm_device
*
dev
)
{
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
struct
nouveau_channel
*
chan
;
unsigned
long
flags
;
uint32_t
inst
;
int
i
;
...
...
@@ -407,27 +413,29 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev)
if
(
dev_priv
->
card_type
<
NV_50
)
{
inst
=
(
nv_rd32
(
dev
,
0x40032c
)
&
0xfffff
)
<<
4
;
spin_lock_irqsave
(
&
dev_priv
->
channels
.
lock
,
flags
);
for
(
i
=
0
;
i
<
dev_priv
->
engine
.
fifo
.
channels
;
i
++
)
{
struct
nouveau_channel
*
chan
=
dev_priv
->
fifos
[
i
];
chan
=
dev_priv
->
channels
.
ptr
[
i
];
if
(
!
chan
||
!
chan
->
ramin_grctx
)
continue
;
if
(
inst
==
chan
->
ramin_grctx
->
pinst
)
break
;
}
spin_unlock_irqrestore
(
&
dev_priv
->
channels
.
lock
,
flags
);
}
else
{
inst
=
(
nv_rd32
(
dev
,
0x40032c
)
&
0xfffff
)
<<
12
;
spin_lock_irqsave
(
&
dev_priv
->
channels
.
lock
,
flags
);
for
(
i
=
0
;
i
<
dev_priv
->
engine
.
fifo
.
channels
;
i
++
)
{
struct
nouveau_channel
*
chan
=
dev_priv
->
fifos
[
i
];
chan
=
dev_priv
->
channels
.
ptr
[
i
];
if
(
!
chan
||
!
chan
->
ramin
)
continue
;
if
(
inst
==
chan
->
ramin
->
vinst
)
break
;
}
spin_unlock_irqrestore
(
&
dev_priv
->
channels
.
lock
,
flags
);
}
...
...
@@ -449,7 +457,8 @@ nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
else
channel
=
nouveau_graph_chid_from_grctx
(
dev
);
if
(
channel
>=
engine
->
fifo
.
channels
||
!
dev_priv
->
fifos
[
channel
])
{
if
(
channel
>=
engine
->
fifo
.
channels
||
!
dev_priv
->
channels
.
ptr
[
channel
])
{
NV_ERROR
(
dev
,
"AIII, invalid/inactive channel id %d
\n
"
,
channel
);
return
-
EINVAL
;
}
...
...
@@ -532,14 +541,19 @@ nouveau_pgraph_intr_swmthd(struct drm_device *dev,
struct
nouveau_pgraph_trap
*
trap
)
{
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
unsigned
long
flags
;
int
ret
=
-
EINVAL
;
spin_lock_irqsave
(
&
dev_priv
->
channels
.
lock
,
flags
);
if
(
trap
->
channel
>
0
&&
trap
->
channel
<
dev_priv
->
engine
.
fifo
.
channels
&&
dev_priv
->
channels
.
ptr
[
trap
->
channel
])
{
ret
=
nouveau_call_method
(
dev_priv
->
channels
.
ptr
[
trap
->
channel
],
trap
->
class
,
trap
->
mthd
,
trap
->
data
);
}
spin_unlock_irqrestore
(
&
dev_priv
->
channels
.
lock
,
flags
);
if
(
trap
->
channel
<
0
||
trap
->
channel
>=
dev_priv
->
engine
.
fifo
.
channels
||
!
dev_priv
->
fifos
[
trap
->
channel
])
return
-
ENODEV
;
return
nouveau_call_method
(
dev_priv
->
fifos
[
trap
->
channel
],
trap
->
class
,
trap
->
mthd
,
trap
->
data
);
return
ret
;
}
static
inline
void
...
...
drivers/gpu/drm/nouveau/nouveau_notifier.c
浏览文件 @
cff5c133
...
...
@@ -185,11 +185,11 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
struct
nouveau_channel
*
chan
;
int
ret
;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN
(
na
->
channel
,
file_priv
,
chan
);
chan
=
nouveau_channel_get
(
dev
,
file_priv
,
na
->
channel
);
if
(
IS_ERR
(
chan
))
return
PTR_ERR
(
chan
);
ret
=
nouveau_notifier_alloc
(
chan
,
na
->
handle
,
na
->
size
,
&
na
->
offset
);
if
(
ret
)
return
ret
;
return
0
;
nouveau_channel_put
(
&
chan
);
return
ret
;
}
drivers/gpu/drm/nouveau/nouveau_object.c
浏览文件 @
cff5c133
...
...
@@ -876,8 +876,6 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
struct
nouveau_channel
*
chan
;
int
ret
;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN
(
init
->
channel
,
file_priv
,
chan
);
if
(
init
->
handle
==
~
0
)
return
-
EINVAL
;
...
...
@@ -893,8 +891,14 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
return
-
EPERM
;
}
if
(
nouveau_ramht_find
(
chan
,
init
->
handle
))
return
-
EEXIST
;
chan
=
nouveau_channel_get
(
dev
,
file_priv
,
init
->
channel
);
if
(
IS_ERR
(
chan
))
return
PTR_ERR
(
chan
);
if
(
nouveau_ramht_find
(
chan
,
init
->
handle
))
{
ret
=
-
EEXIST
;
goto
out
;
}
if
(
!
grc
->
software
)
ret
=
nouveau_gpuobj_gr_new
(
chan
,
grc
->
id
,
&
gr
);
...
...
@@ -903,7 +907,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
if
(
ret
)
{
NV_ERROR
(
dev
,
"Error creating object: %d (%d/0x%08x)
\n
"
,
ret
,
init
->
channel
,
init
->
handle
);
return
re
t
;
goto
ou
t
;
}
ret
=
nouveau_ramht_insert
(
chan
,
init
->
handle
,
gr
);
...
...
@@ -911,10 +915,11 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
if
(
ret
)
{
NV_ERROR
(
dev
,
"Error referencing object: %d (%d/0x%08x)
\n
"
,
ret
,
init
->
channel
,
init
->
handle
);
return
ret
;
}
return
0
;
out:
nouveau_channel_put
(
&
chan
);
return
ret
;
}
int
nouveau_ioctl_gpuobj_free
(
struct
drm_device
*
dev
,
void
*
data
,
...
...
@@ -923,15 +928,20 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
struct
drm_nouveau_gpuobj_free
*
objfree
=
data
;
struct
nouveau_gpuobj
*
gpuobj
;
struct
nouveau_channel
*
chan
;
int
ret
=
-
ENOENT
;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN
(
objfree
->
channel
,
file_priv
,
chan
);
chan
=
nouveau_channel_get
(
dev
,
file_priv
,
objfree
->
channel
);
if
(
IS_ERR
(
chan
))
return
PTR_ERR
(
chan
);
gpuobj
=
nouveau_ramht_find
(
chan
,
objfree
->
handle
);
if
(
!
gpuobj
)
return
-
ENOENT
;
if
(
gpuobj
)
{
nouveau_ramht_remove
(
chan
,
objfree
->
handle
);
ret
=
0
;
}
nouveau_
ramht_remove
(
chan
,
objfree
->
handle
);
return
0
;
nouveau_
channel_put
(
&
chan
);
return
ret
;
}
u32
...
...
drivers/gpu/drm/nouveau/nouveau_state.c
浏览文件 @
cff5c133
...
...
@@ -516,11 +516,11 @@ nouveau_card_init_channel(struct drm_device *dev)
if
(
ret
)
goto
out_err
;
mutex_unlock
(
&
dev_priv
->
channel
->
mutex
);
return
0
;
out_err:
nouveau_channel_free
(
dev_priv
->
channel
);
dev_priv
->
channel
=
NULL
;
nouveau_channel_put
(
&
dev_priv
->
channel
);
return
ret
;
}
...
...
@@ -567,6 +567,7 @@ nouveau_card_init(struct drm_device *dev)
if
(
ret
)
goto
out
;
engine
=
&
dev_priv
->
engine
;
spin_lock_init
(
&
dev_priv
->
channels
.
lock
);
spin_lock_init
(
&
dev_priv
->
context_switch_lock
);
/* Make the CRTCs and I2C buses accessible */
...
...
@@ -713,8 +714,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
if
(
!
engine
->
graph
.
accel_blocked
)
{
nouveau_fence_fini
(
dev
);
nouveau_channel_free
(
dev_priv
->
channel
);
dev_priv
->
channel
=
NULL
;
nouveau_channel_put
(
&
dev_priv
->
channel
);
}
if
(
!
nouveau_noaccel
)
{
...
...
drivers/gpu/drm/nouveau/nv04_fifo.c
浏览文件 @
cff5c133
...
...
@@ -208,7 +208,7 @@ nv04_fifo_unload_context(struct drm_device *dev)
if
(
chid
<
0
||
chid
>=
dev_priv
->
engine
.
fifo
.
channels
)
return
0
;
chan
=
dev_priv
->
fifos
[
chid
];
chan
=
dev_priv
->
channels
.
ptr
[
chid
];
if
(
!
chan
)
{
NV_ERROR
(
dev
,
"Inactive channel on PFIFO: %d
\n
"
,
chid
);
return
-
EINVAL
;
...
...
@@ -289,7 +289,7 @@ nv04_fifo_init(struct drm_device *dev)
pfifo
->
reassign
(
dev
,
true
);
for
(
i
=
0
;
i
<
dev_priv
->
engine
.
fifo
.
channels
;
i
++
)
{
if
(
dev_priv
->
fifos
[
i
])
{
if
(
dev_priv
->
channels
.
ptr
[
i
])
{
uint32_t
mode
=
nv_rd32
(
dev
,
NV04_PFIFO_MODE
);
nv_wr32
(
dev
,
NV04_PFIFO_MODE
,
mode
|
(
1
<<
i
));
}
...
...
drivers/gpu/drm/nouveau/nv04_graph.c
浏览文件 @
cff5c133
...
...
@@ -357,7 +357,7 @@ nv04_graph_channel(struct drm_device *dev)
if
(
chid
>=
dev_priv
->
engine
.
fifo
.
channels
)
return
NULL
;
return
dev_priv
->
fifos
[
chid
];
return
dev_priv
->
channels
.
ptr
[
chid
];
}
void
...
...
@@ -376,7 +376,7 @@ nv04_graph_context_switch(struct drm_device *dev)
/* Load context for next channel */
chid
=
dev_priv
->
engine
.
fifo
.
channel_id
(
dev
);
chan
=
dev_priv
->
fifos
[
chid
];
chan
=
dev_priv
->
channels
.
ptr
[
chid
];
if
(
chan
)
nv04_graph_load_context
(
chan
);
...
...
drivers/gpu/drm/nouveau/nv10_fifo.c
浏览文件 @
cff5c133
...
...
@@ -241,7 +241,7 @@ nv10_fifo_init(struct drm_device *dev)
pfifo
->
reassign
(
dev
,
true
);
for
(
i
=
0
;
i
<
dev_priv
->
engine
.
fifo
.
channels
;
i
++
)
{
if
(
dev_priv
->
fifos
[
i
])
{
if
(
dev_priv
->
channels
.
ptr
[
i
])
{
uint32_t
mode
=
nv_rd32
(
dev
,
NV04_PFIFO_MODE
);
nv_wr32
(
dev
,
NV04_PFIFO_MODE
,
mode
|
(
1
<<
i
));
}
...
...
drivers/gpu/drm/nouveau/nv10_graph.c
浏览文件 @
cff5c133
...
...
@@ -802,7 +802,7 @@ nv10_graph_context_switch(struct drm_device *dev)
/* Load context for next channel */
chid
=
(
nv_rd32
(
dev
,
NV04_PGRAPH_TRAPPED_ADDR
)
>>
20
)
&
0x1f
;
chan
=
dev_priv
->
fifos
[
chid
];
chan
=
dev_priv
->
channels
.
ptr
[
chid
];
if
(
chan
&&
chan
->
pgraph_ctx
)
nv10_graph_load_context
(
chan
);
...
...
@@ -833,7 +833,7 @@ nv10_graph_channel(struct drm_device *dev)
if
(
chid
>=
dev_priv
->
engine
.
fifo
.
channels
)
return
NULL
;
return
dev_priv
->
fifos
[
chid
];
return
dev_priv
->
channels
.
ptr
[
chid
];
}
int
nv10_graph_create_context
(
struct
nouveau_channel
*
chan
)
...
...
drivers/gpu/drm/nouveau/nv40_fifo.c
浏览文件 @
cff5c133
...
...
@@ -301,7 +301,7 @@ nv40_fifo_init(struct drm_device *dev)
pfifo
->
reassign
(
dev
,
true
);
for
(
i
=
0
;
i
<
dev_priv
->
engine
.
fifo
.
channels
;
i
++
)
{
if
(
dev_priv
->
fifos
[
i
])
{
if
(
dev_priv
->
channels
.
ptr
[
i
])
{
uint32_t
mode
=
nv_rd32
(
dev
,
NV04_PFIFO_MODE
);
nv_wr32
(
dev
,
NV04_PFIFO_MODE
,
mode
|
(
1
<<
i
));
}
...
...
drivers/gpu/drm/nouveau/nv40_graph.c
浏览文件 @
cff5c133
...
...
@@ -42,7 +42,7 @@ nv40_graph_channel(struct drm_device *dev)
inst
=
(
inst
&
NV40_PGRAPH_CTXCTL_CUR_INSTANCE
)
<<
4
;
for
(
i
=
0
;
i
<
dev_priv
->
engine
.
fifo
.
channels
;
i
++
)
{
struct
nouveau_channel
*
chan
=
dev_priv
->
fifos
[
i
];
struct
nouveau_channel
*
chan
=
dev_priv
->
channels
.
ptr
[
i
];
if
(
chan
&&
chan
->
ramin_grctx
&&
chan
->
ramin_grctx
->
pinst
==
inst
)
...
...
drivers/gpu/drm/nouveau/nv50_fb.c
浏览文件 @
cff5c133
...
...
@@ -42,6 +42,7 @@ void
nv50_fb_vm_trap
(
struct
drm_device
*
dev
,
int
display
,
const
char
*
name
)
{
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
unsigned
long
flags
;
u32
trap
[
6
],
idx
,
chinst
;
int
i
,
ch
;
...
...
@@ -60,8 +61,10 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
return
;
chinst
=
(
trap
[
2
]
<<
16
)
|
trap
[
1
];
spin_lock_irqsave
(
&
dev_priv
->
channels
.
lock
,
flags
);
for
(
ch
=
0
;
ch
<
dev_priv
->
engine
.
fifo
.
channels
;
ch
++
)
{
struct
nouveau_channel
*
chan
=
dev_priv
->
fifos
[
ch
];
struct
nouveau_channel
*
chan
=
dev_priv
->
channels
.
ptr
[
ch
];
if
(
!
chan
||
!
chan
->
ramin
)
continue
;
...
...
@@ -69,6 +72,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
if
(
chinst
==
chan
->
ramin
->
vinst
>>
12
)
break
;
}
spin_unlock_irqrestore
(
&
dev_priv
->
channels
.
lock
,
flags
);
NV_INFO
(
dev
,
"%s - VM: Trapped %s at %02x%04x%04x status %08x "
"channel %d (0x%08x)
\n
"
,
...
...
drivers/gpu/drm/nouveau/nv50_fifo.c
浏览文件 @
cff5c133
...
...
@@ -44,7 +44,8 @@ nv50_fifo_playlist_update(struct drm_device *dev)
/* We never schedule channel 0 or 127 */
for
(
i
=
1
,
nr
=
0
;
i
<
127
;
i
++
)
{
if
(
dev_priv
->
fifos
[
i
]
&&
dev_priv
->
fifos
[
i
]
->
ramfc
)
{
if
(
dev_priv
->
channels
.
ptr
[
i
]
&&
dev_priv
->
channels
.
ptr
[
i
]
->
ramfc
)
{
nv_wo32
(
cur
,
(
nr
*
4
),
i
);
nr
++
;
}
...
...
@@ -60,7 +61,7 @@ static void
nv50_fifo_channel_enable
(
struct
drm_device
*
dev
,
int
channel
)
{
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
struct
nouveau_channel
*
chan
=
dev_priv
->
fifos
[
channel
];
struct
nouveau_channel
*
chan
=
dev_priv
->
channels
.
ptr
[
channel
];
uint32_t
inst
;
NV_DEBUG
(
dev
,
"ch%d
\n
"
,
channel
);
...
...
@@ -118,7 +119,7 @@ nv50_fifo_init_context_table(struct drm_device *dev)
NV_DEBUG
(
dev
,
"
\n
"
);
for
(
i
=
0
;
i
<
NV50_PFIFO_CTX_TABLE__SIZE
;
i
++
)
{
if
(
dev_priv
->
fifos
[
i
])
if
(
dev_priv
->
channels
.
ptr
[
i
])
nv50_fifo_channel_enable
(
dev
,
i
);
else
nv50_fifo_channel_disable
(
dev
,
i
);
...
...
@@ -392,7 +393,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
if
(
chid
<
1
||
chid
>=
dev_priv
->
engine
.
fifo
.
channels
-
1
)
return
0
;
chan
=
dev_priv
->
fifos
[
chid
];
chan
=
dev_priv
->
channels
.
ptr
[
chid
];
if
(
!
chan
)
{
NV_ERROR
(
dev
,
"Inactive channel on PFIFO: %d
\n
"
,
chid
);
return
-
EINVAL
;
...
...
drivers/gpu/drm/nouveau/nv50_graph.c
浏览文件 @
cff5c133
...
...
@@ -190,7 +190,7 @@ nv50_graph_channel(struct drm_device *dev)
inst
=
(
inst
&
NV50_PGRAPH_CTXCTL_CUR_INSTANCE
)
<<
12
;
for
(
i
=
0
;
i
<
dev_priv
->
engine
.
fifo
.
channels
;
i
++
)
{
struct
nouveau_channel
*
chan
=
dev_priv
->
fifos
[
i
];
struct
nouveau_channel
*
chan
=
dev_priv
->
channels
.
ptr
[
i
];
if
(
chan
&&
chan
->
ramin
&&
chan
->
ramin
->
vinst
==
inst
)
return
chan
;
...
...
drivers/gpu/drm/nouveau/nv50_instmem.c
浏览文件 @
cff5c133
...
...
@@ -131,10 +131,10 @@ nv50_instmem_init(struct drm_device *dev)
}
/* we need a channel to plug into the hw to control the BARs */
ret
=
nv50_channel_new
(
dev
,
128
*
1024
,
&
dev_priv
->
fifos
[
0
]);
ret
=
nv50_channel_new
(
dev
,
128
*
1024
,
&
dev_priv
->
channels
.
ptr
[
0
]);
if
(
ret
)
return
ret
;
chan
=
dev_priv
->
fifos
[
127
]
=
dev_priv
->
fifos
[
0
];
chan
=
dev_priv
->
channels
.
ptr
[
127
]
=
dev_priv
->
channels
.
ptr
[
0
];
/* allocate page table for PRAMIN BAR */
ret
=
nouveau_gpuobj_new
(
dev
,
chan
,
(
dev_priv
->
ramin_size
>>
12
)
*
8
,
...
...
@@ -240,7 +240,7 @@ nv50_instmem_takedown(struct drm_device *dev)
{
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
struct
nv50_instmem_priv
*
priv
=
dev_priv
->
engine
.
instmem
.
priv
;
struct
nouveau_channel
*
chan
=
dev_priv
->
fifos
[
0
];
struct
nouveau_channel
*
chan
=
dev_priv
->
channels
.
ptr
[
0
];
int
i
;
NV_DEBUG
(
dev
,
"
\n
"
);
...
...
@@ -264,8 +264,8 @@ nv50_instmem_takedown(struct drm_device *dev)
nouveau_gpuobj_ref
(
NULL
,
&
chan
->
vm_vram_pt
[
i
]);
dev_priv
->
vm_vram_pt_nr
=
0
;
nv50_channel_del
(
&
dev_priv
->
fifos
[
0
]);
dev_priv
->
fifos
[
127
]
=
NULL
;
nv50_channel_del
(
&
dev_priv
->
channels
.
ptr
[
0
]);
dev_priv
->
channels
.
ptr
[
127
]
=
NULL
;
}
dev_priv
->
engine
.
instmem
.
priv
=
NULL
;
...
...
@@ -276,7 +276,7 @@ int
nv50_instmem_suspend
(
struct
drm_device
*
dev
)
{
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
struct
nouveau_channel
*
chan
=
dev_priv
->
fifos
[
0
];
struct
nouveau_channel
*
chan
=
dev_priv
->
channels
.
ptr
[
0
];
struct
nouveau_gpuobj
*
ramin
=
chan
->
ramin
;
int
i
;
...
...
@@ -294,7 +294,7 @@ nv50_instmem_resume(struct drm_device *dev)
{
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
struct
nv50_instmem_priv
*
priv
=
dev_priv
->
engine
.
instmem
.
priv
;
struct
nouveau_channel
*
chan
=
dev_priv
->
fifos
[
0
];
struct
nouveau_channel
*
chan
=
dev_priv
->
channels
.
ptr
[
0
];
struct
nouveau_gpuobj
*
ramin
=
chan
->
ramin
;
int
i
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录