提交 d1b167e1 编写于 作者: B Ben Skeggs

drm/nouveau/ttm: untangle code to support accelerated buffer moves

Signed-off-by: NBen Skeggs <bskeggs@redhat.com>
上级 78df3a1c
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include "nouveau_mm.h" #include "nouveau_mm.h"
#include "nouveau_vm.h" #include "nouveau_vm.h"
#include "nouveau_fence.h" #include "nouveau_fence.h"
#include "nouveau_ramht.h"
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -510,6 +511,17 @@ nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -510,6 +511,17 @@ nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
return ret; return ret;
} }
static int
nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
int ret = RING_SPACE(chan, 2);
if (ret == 0) {
BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
OUT_RING (chan, handle);
}
return ret;
}
static int static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
...@@ -528,17 +540,17 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -528,17 +540,17 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret) if (ret)
return ret; return ret;
BEGIN_NVC0(chan, NvSubM2MF, 0x0238, 2); BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
OUT_RING (chan, upper_32_bits(dst_offset)); OUT_RING (chan, upper_32_bits(dst_offset));
OUT_RING (chan, lower_32_bits(dst_offset)); OUT_RING (chan, lower_32_bits(dst_offset));
BEGIN_NVC0(chan, NvSubM2MF, 0x030c, 6); BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
OUT_RING (chan, upper_32_bits(src_offset)); OUT_RING (chan, upper_32_bits(src_offset));
OUT_RING (chan, lower_32_bits(src_offset)); OUT_RING (chan, lower_32_bits(src_offset));
OUT_RING (chan, PAGE_SIZE); /* src_pitch */ OUT_RING (chan, PAGE_SIZE); /* src_pitch */
OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
OUT_RING (chan, PAGE_SIZE); /* line_length */ OUT_RING (chan, PAGE_SIZE); /* line_length */
OUT_RING (chan, line_count); OUT_RING (chan, line_count);
BEGIN_NVC0(chan, NvSubM2MF, 0x0300, 1); BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
OUT_RING (chan, 0x00100110); OUT_RING (chan, 0x00100110);
page_count -= line_count; page_count -= line_count;
...@@ -549,6 +561,28 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -549,6 +561,28 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
return 0; return 0;
} }
static int
nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
&chan->m2mf_ntfy);
if (ret == 0) {
ret = RING_SPACE(chan, 6);
if (ret == 0) {
BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
OUT_RING (chan, handle);
BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
OUT_RING (chan, NvNotify0);
OUT_RING (chan, NvDmaFB);
OUT_RING (chan, NvDmaFB);
} else {
nouveau_ramht_remove(chan, NvNotify0);
}
}
return ret;
}
static int static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
...@@ -573,7 +607,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -573,7 +607,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret) if (ret)
return ret; return ret;
BEGIN_NV04(chan, NvSubM2MF, 0x0200, 7); BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
OUT_RING (chan, 0); OUT_RING (chan, 0);
OUT_RING (chan, 0); OUT_RING (chan, 0);
OUT_RING (chan, stride); OUT_RING (chan, stride);
...@@ -586,7 +620,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -586,7 +620,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret) if (ret)
return ret; return ret;
BEGIN_NV04(chan, NvSubM2MF, 0x0200, 1); BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
OUT_RING (chan, 1); OUT_RING (chan, 1);
} }
if (old_mem->mem_type == TTM_PL_VRAM && if (old_mem->mem_type == TTM_PL_VRAM &&
...@@ -595,7 +629,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -595,7 +629,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret) if (ret)
return ret; return ret;
BEGIN_NV04(chan, NvSubM2MF, 0x021c, 7); BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
OUT_RING (chan, 0); OUT_RING (chan, 0);
OUT_RING (chan, 0); OUT_RING (chan, 0);
OUT_RING (chan, stride); OUT_RING (chan, stride);
...@@ -608,7 +642,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -608,7 +642,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret) if (ret)
return ret; return ret;
BEGIN_NV04(chan, NvSubM2MF, 0x021c, 1); BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
OUT_RING (chan, 1); OUT_RING (chan, 1);
} }
...@@ -616,10 +650,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -616,10 +650,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret) if (ret)
return ret; return ret;
BEGIN_NV04(chan, NvSubM2MF, 0x0238, 2); BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
OUT_RING (chan, upper_32_bits(src_offset)); OUT_RING (chan, upper_32_bits(src_offset));
OUT_RING (chan, upper_32_bits(dst_offset)); OUT_RING (chan, upper_32_bits(dst_offset));
BEGIN_NV04(chan, NvSubM2MF, 0x030c, 8); BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
OUT_RING (chan, lower_32_bits(src_offset)); OUT_RING (chan, lower_32_bits(src_offset));
OUT_RING (chan, lower_32_bits(dst_offset)); OUT_RING (chan, lower_32_bits(dst_offset));
OUT_RING (chan, stride); OUT_RING (chan, stride);
...@@ -628,7 +662,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -628,7 +662,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
OUT_RING (chan, height); OUT_RING (chan, height);
OUT_RING (chan, 0x00000101); OUT_RING (chan, 0x00000101);
OUT_RING (chan, 0x00000000); OUT_RING (chan, 0x00000000);
BEGIN_NV04(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
OUT_RING (chan, 0); OUT_RING (chan, 0);
length -= amount; length -= amount;
...@@ -639,6 +673,24 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -639,6 +673,24 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
return 0; return 0;
} }
static int
nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
&chan->m2mf_ntfy);
if (ret == 0) {
ret = RING_SPACE(chan, 4);
if (ret == 0) {
BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
OUT_RING (chan, handle);
BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
OUT_RING (chan, NvNotify0);
}
}
return ret;
}
static inline uint32_t static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
struct nouveau_channel *chan, struct ttm_mem_reg *mem) struct nouveau_channel *chan, struct ttm_mem_reg *mem)
...@@ -661,7 +713,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -661,7 +713,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret) if (ret)
return ret; return ret;
BEGIN_NV04(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
...@@ -673,7 +725,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -673,7 +725,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret) if (ret)
return ret; return ret;
BEGIN_NV04(chan, NvSubM2MF, BEGIN_NV04(chan, NvSubCopy,
NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
OUT_RING (chan, src_offset); OUT_RING (chan, src_offset);
OUT_RING (chan, dst_offset); OUT_RING (chan, dst_offset);
...@@ -683,7 +735,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -683,7 +735,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
OUT_RING (chan, line_count); OUT_RING (chan, line_count);
OUT_RING (chan, 0x00000101); OUT_RING (chan, 0x00000101);
OUT_RING (chan, 0x00000000); OUT_RING (chan, 0x00000000);
BEGIN_NV04(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
OUT_RING (chan, 0); OUT_RING (chan, 0);
page_count -= line_count; page_count -= line_count;
...@@ -743,16 +795,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, ...@@ -743,16 +795,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
goto out; goto out;
} }
if (dev_priv->card_type < NV_50) ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem);
ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
else
if (dev_priv->card_type < NV_C0)
ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
else
if (dev_priv->card_type < NV_E0)
ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
else
ret = nve0_bo_move_copy(chan, bo, &bo->mem, new_mem);
if (ret == 0) { if (ret == 0) {
ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
no_wait_reserve, no_wait_reserve,
...@@ -764,6 +807,42 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, ...@@ -764,6 +807,42 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
return ret; return ret;
} }
void
nouveau_bo_move_init(struct nouveau_channel *chan)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
static const struct {
const char *name;
u32 oclass;
int (*exec)(struct nouveau_channel *,
struct ttm_buffer_object *,
struct ttm_mem_reg *, struct ttm_mem_reg *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
{ "COPY", 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "M2MF", 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
{ "M2MF", 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
{ "M2MF", 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
{}
}, *mthd = _methods;
const char *name = "CPU";
int ret;
do {
ret = nouveau_gpuobj_gr_new(chan, mthd->oclass, mthd->oclass);
if (ret == 0) {
ret = mthd->init(chan, mthd->oclass);
if (ret == 0) {
dev_priv->ttm.move = mthd->exec;
name = mthd->name;
break;
}
}
} while ((++mthd)->exec);
NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name);
}
static int static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu, bool no_wait_reserve, bool no_wait_gpu,
...@@ -920,8 +999,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, ...@@ -920,8 +999,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
goto out; goto out;
} }
/* Software copy if the card isn't up and running yet. */ /* CPU copy if we have no accelerated method available */
if (!dev_priv->channel) { if (!dev_priv->ttm.move) {
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
goto out; goto out;
} }
......
...@@ -355,7 +355,7 @@ nouveau_channel_ref(struct nouveau_channel *chan, ...@@ -355,7 +355,7 @@ nouveau_channel_ref(struct nouveau_channel *chan,
*pchan = chan; *pchan = chan;
} }
void int
nouveau_channel_idle(struct nouveau_channel *chan) nouveau_channel_idle(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev; struct drm_device *dev = chan->dev;
...@@ -370,6 +370,7 @@ nouveau_channel_idle(struct nouveau_channel *chan) ...@@ -370,6 +370,7 @@ nouveau_channel_idle(struct nouveau_channel *chan)
if (ret) if (ret)
NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
return ret;
} }
/* cleans up all the fifos from file_priv */ /* cleans up all the fifos from file_priv */
......
...@@ -48,13 +48,12 @@ void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *, ...@@ -48,13 +48,12 @@ void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
/* Hardcoded object assignments to subchannels (subchannel id). */ /* Hardcoded object assignments to subchannels (subchannel id). */
enum { enum {
NvSubM2MF = 0, NvSubCtxSurf2D = 0,
NvSubSw = 1, NvSubSw = 1,
NvSubCtxSurf2D = 2, NvSubImageBlit = 2,
NvSub2D = 3, NvSub2D = 3,
NvSubGdiRect = 3, NvSubGdiRect = 3,
NvSubCopy = 4, NvSubCopy = 4,
NvSubImageBlit = 4
}; };
/* Object handles. */ /* Object handles. */
...@@ -74,6 +73,7 @@ enum { ...@@ -74,6 +73,7 @@ enum {
NvSema = 0x8000000f, NvSema = 0x8000000f,
NvEvoSema0 = 0x80000010, NvEvoSema0 = 0x80000010,
NvEvoSema1 = 0x80000011, NvEvoSema1 = 0x80000011,
NvNotify1 = 0x80000012,
/* G80+ display objects */ /* G80+ display objects */
NvEvoVRAM = 0x01000000, NvEvoVRAM = 0x01000000,
......
...@@ -693,6 +693,9 @@ struct drm_nouveau_private { ...@@ -693,6 +693,9 @@ struct drm_nouveau_private {
struct ttm_bo_global_ref bo_global_ref; struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev; struct ttm_bo_device bdev;
atomic_t validate_sequence; atomic_t validate_sequence;
int (*move)(struct nouveau_channel *,
struct ttm_buffer_object *,
struct ttm_mem_reg *, struct ttm_mem_reg *);
} ttm; } ttm;
struct { struct {
...@@ -930,7 +933,7 @@ extern void nouveau_channel_put_unlocked(struct nouveau_channel **); ...@@ -930,7 +933,7 @@ extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
extern void nouveau_channel_put(struct nouveau_channel **); extern void nouveau_channel_put(struct nouveau_channel **);
extern void nouveau_channel_ref(struct nouveau_channel *chan, extern void nouveau_channel_ref(struct nouveau_channel *chan,
struct nouveau_channel **pchan); struct nouveau_channel **pchan);
extern void nouveau_channel_idle(struct nouveau_channel *chan); extern int nouveau_channel_idle(struct nouveau_channel *chan);
/* nouveau_object.c */ /* nouveau_object.c */
#define NVOBJ_ENGINE_ADD(d, e, p) do { \ #define NVOBJ_ENGINE_ADD(d, e, p) do { \
...@@ -1322,6 +1325,7 @@ extern int nv04_crtc_create(struct drm_device *, int index); ...@@ -1322,6 +1325,7 @@ extern int nv04_crtc_create(struct drm_device *, int index);
/* nouveau_bo.c */ /* nouveau_bo.c */
extern struct ttm_bo_driver nouveau_bo_driver; extern struct ttm_bo_driver nouveau_bo_driver;
extern void nouveau_bo_move_init(struct nouveau_channel *);
extern int nouveau_bo_new(struct drm_device *, int size, int align, extern int nouveau_bo_new(struct drm_device *, int size, int align,
uint32_t flags, uint32_t tile_mode, uint32_t flags, uint32_t tile_mode,
uint32_t tile_flags, uint32_t tile_flags,
......
...@@ -508,7 +508,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class) ...@@ -508,7 +508,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
return eng->object_new(chan, oc->engine, handle, class); return eng->object_new(chan, oc->engine, handle, class);
} }
NV_ERROR(dev, "illegal object class: 0x%x\n", class);
return -EINVAL; return -EINVAL;
} }
......
...@@ -509,73 +509,16 @@ nouveau_card_channel_init(struct drm_device *dev) ...@@ -509,73 +509,16 @@ nouveau_card_channel_init(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan; struct nouveau_channel *chan;
int ret, oclass; int ret;
ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT); ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
dev_priv->channel = chan; dev_priv->channel = chan;
if (ret) if (ret)
return ret; return ret;
mutex_unlock(&dev_priv->channel->mutex); mutex_unlock(&dev_priv->channel->mutex);
if (dev_priv->card_type <= NV_50) { nouveau_bo_move_init(chan);
if (dev_priv->card_type < NV_50) return 0;
oclass = 0x0039;
else
oclass = 0x5039;
ret = nouveau_gpuobj_gr_new(chan, NvM2MF, oclass);
if (ret)
goto error;
ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
&chan->m2mf_ntfy);
if (ret)
goto error;
ret = RING_SPACE(chan, 6);
if (ret)
goto error;
BEGIN_NV04(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
OUT_RING (chan, NvM2MF);
BEGIN_NV04(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
OUT_RING (chan, NvNotify0);
OUT_RING (chan, chan->vram_handle);
OUT_RING (chan, chan->gart_handle);
} else
if (dev_priv->card_type <= NV_D0) {
ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
if (ret)
goto error;
ret = RING_SPACE(chan, 2);
if (ret)
goto error;
BEGIN_NVC0(chan, NvSubM2MF, 0x0000, 1);
OUT_RING (chan, 0x00009039);
} else
if (dev_priv->card_type <= NV_E0) {
/* not used, but created to get a graph context */
ret = nouveau_gpuobj_gr_new(chan, 0xa040, 0xa040);
if (ret)
goto error;
/* bind strange copy engine to subchannel 4 (fixed...) */
ret = RING_SPACE(chan, 2);
if (ret)
goto error;
BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
OUT_RING (chan, 0x0000a0b5);
}
FIRE_RING (chan);
error:
if (ret)
nouveau_card_channel_fini(dev);
return ret;
} }
static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = { static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = {
......
...@@ -201,8 +201,7 @@ nv50_fbcon_accel_init(struct fb_info *info) ...@@ -201,8 +201,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
BEGIN_NV04(chan, NvSub2D, 0x0000, 1); BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
OUT_RING(chan, Nv2D); OUT_RING(chan, Nv2D);
BEGIN_NV04(chan, NvSub2D, 0x0180, 4); BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
OUT_RING(chan, NvNotify0);
OUT_RING(chan, chan->vram_handle); OUT_RING(chan, chan->vram_handle);
OUT_RING(chan, chan->vram_handle); OUT_RING(chan, chan->vram_handle);
OUT_RING(chan, chan->vram_handle); OUT_RING(chan, chan->vram_handle);
......
...@@ -822,7 +822,7 @@ nve0_graph_create(struct drm_device *dev) ...@@ -822,7 +822,7 @@ nve0_graph_create(struct drm_device *dev)
NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */ NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */
NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */ NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */
NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */ NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */
//NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */ NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */
return 0; return 0;
error: error:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册