gk104.c 29.6 KB
Newer Older
1
/*
2
 * Copyright 2012 Red Hat Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */
24
#include "gk104.h"
25

26 27 28
#include <core/client.h>
#include <core/engctx.h>
#include <core/enum.h>
29
#include <core/handle.h>
30
#include <subdev/bar.h>
31
#include <subdev/fb.h>
32
#include <subdev/mmu.h>
33
#include <subdev/timer.h>
34

35 36
#include <nvif/class.h>
#include <nvif/unpack.h>
37

38
#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
39
static const struct {
40 41
	u64 subdev;
	u64 mask;
42
} fifo_engine[] = {
43
	_(NVDEV_ENGINE_GR      , (1ULL << NVDEV_ENGINE_SW) |
44
				 (1ULL << NVDEV_ENGINE_CE2)),
45
	_(NVDEV_ENGINE_MSPDEC  , 0),
46
	_(NVDEV_ENGINE_MSPPP   , 0),
47
	_(NVDEV_ENGINE_MSVLD   , 0),
48 49
	_(NVDEV_ENGINE_CE0     , 0),
	_(NVDEV_ENGINE_CE1     , 0),
50
	_(NVDEV_ENGINE_MSENC   , 0),
51 52 53 54
};
#undef _
#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)

55 56
struct gk104_fifo_engn {
	struct nvkm_gpuobj *runlist[2];
B
Ben Skeggs 已提交
57
	int cur_runlist;
B
Ben Skeggs 已提交
58
	wait_queue_head_t wait;
59 60
};

61 62
struct gk104_fifo_priv {
	struct nvkm_fifo base;
63 64 65 66

	struct work_struct fault;
	u64 mask;

67
	struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
68
	struct {
69 70
		struct nvkm_gpuobj *mem;
		struct nvkm_vma bar;
71 72 73 74
	} user;
	int spoon_nr;
};

75 76 77 78
struct gk104_fifo_base {
	struct nvkm_fifo_base base;
	struct nvkm_gpuobj *pgd;
	struct nvkm_vm *vm;
79 80
};

81 82
struct gk104_fifo_chan {
	struct nvkm_fifo_chan base;
83
	u32 engine;
84 85 86 87 88
	enum {
		STOPPED,
		RUNNING,
		KILLED
	} state;
89 90
};

91 92 93 94
/*******************************************************************************
 * FIFO channel objects
 ******************************************************************************/

95
static void
96
gk104_fifo_runlist_update(struct gk104_fifo_priv *priv, u32 engine)
97
{
98 99 100
	struct nvkm_bar *bar = nvkm_bar(priv);
	struct gk104_fifo_engn *engn = &priv->engine[engine];
	struct nvkm_gpuobj *cur;
101
	int i, p;
102

103
	mutex_lock(&nv_subdev(priv)->mutex);
B
Ben Skeggs 已提交
104 105
	cur = engn->runlist[engn->cur_runlist];
	engn->cur_runlist = !engn->cur_runlist;
106

107
	for (i = 0, p = 0; i < priv->base.max; i++) {
108
		struct gk104_fifo_chan *chan = (void *)priv->base.channel[i];
109 110 111 112 113
		if (chan && chan->state == RUNNING && chan->engine == engine) {
			nv_wo32(cur, p + 0, i);
			nv_wo32(cur, p + 4, 0x00000000);
			p += 8;
		}
114
	}
115
	bar->flush(bar);
116

117 118
	nv_wr32(priv, 0x002270, cur->addr >> 12);
	nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
119

120 121 122
	if (wait_event_timeout(engn->wait, !(nv_rd32(priv, 0x002284 +
			       (engine * 0x08)) & 0x00100000),
				msecs_to_jiffies(2000)) == 0)
B
Ben Skeggs 已提交
123
		nv_error(priv, "runlist %d update timeout\n", engine);
124
	mutex_unlock(&nv_subdev(priv)->mutex);
125 126
}

127
static int
128 129
gk104_fifo_context_attach(struct nvkm_object *parent,
			  struct nvkm_object *object)
130
{
131 132 133
	struct nvkm_bar *bar = nvkm_bar(parent);
	struct gk104_fifo_base *base = (void *)parent->parent;
	struct nvkm_engctx *ectx = (void *)object;
134 135 136 137
	u32 addr;
	int ret;

	switch (nv_engidx(object->engine)) {
138
	case NVDEV_ENGINE_SW   :
139
		return 0;
140 141 142
	case NVDEV_ENGINE_CE0:
	case NVDEV_ENGINE_CE1:
	case NVDEV_ENGINE_CE2:
143
		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
144
		return 0;
145 146 147 148
	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
149 150
	default:
		return -EINVAL;
151 152
	}

153
	if (!ectx->vma.node) {
154 155
		ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
					 NV_MEM_ACCESS_RW, &ectx->vma);
156 157
		if (ret)
			return ret;
158 159

		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
160 161 162 163 164 165
	}

	nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
	nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
	bar->flush(bar);
	return 0;
166 167
}

168
static int
169 170
gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
			  struct nvkm_object *object)
171
{
172 173 174 175
	struct nvkm_bar *bar = nvkm_bar(parent);
	struct gk104_fifo_priv *priv = (void *)parent->engine;
	struct gk104_fifo_base *base = (void *)parent->parent;
	struct gk104_fifo_chan *chan = (void *)parent;
176 177 178
	u32 addr;

	switch (nv_engidx(object->engine)) {
179 180 181 182 183 184 185 186
	case NVDEV_ENGINE_SW    : return 0;
	case NVDEV_ENGINE_CE0   :
	case NVDEV_ENGINE_CE1   :
	case NVDEV_ENGINE_CE2   : addr = 0x0000; break;
	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
187 188 189 190 191 192
	default:
		return -EINVAL;
	}

	nv_wr32(priv, 0x002634, chan->base.chid);
	if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
193
		nv_error(priv, "channel %d [%s] kick timeout\n",
194
			 chan->base.chid, nvkm_client_name(chan));
195 196
		if (suspend)
			return -EBUSY;
197 198
	}

199 200 201 202 203 204
	if (addr) {
		nv_wo32(base, addr + 0x00, 0x00000000);
		nv_wo32(base, addr + 0x04, 0x00000000);
		bar->flush(bar);
	}

205
	return 0;
206 207 208
}

static int
209 210 211
gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		     struct nvkm_oclass *oclass, void *data, u32 size,
		     struct nvkm_object **pobject)
212
{
213 214 215
	union {
		struct kepler_channel_gpfifo_a_v0 v0;
	} *args = data;
216 217 218 219
	struct nvkm_bar *bar = nvkm_bar(parent);
	struct gk104_fifo_priv *priv = (void *)engine;
	struct gk104_fifo_base *base = (void *)parent;
	struct gk104_fifo_chan *chan;
220 221 222
	u64 usermem, ioffset, ilength;
	int ret, i;

223 224 225 226 227 228 229 230
	nv_ioctl(parent, "create channel gpfifo size %d\n", size);
	if (nvif_unpack(args->v0, 0, 0, false)) {
		nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
				 "ioffset %016llx ilength %08x engine %08x\n",
			 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
			 args->v0.ilength, args->v0.engine);
	} else
		return ret;
231

232
	for (i = 0; i < FIFO_ENGINE_NR; i++) {
233
		if (args->v0.engine & (1 << i)) {
234
			if (nvkm_engine(parent, fifo_engine[i].subdev)) {
235
				args->v0.engine = (1 << i);
236 237 238 239 240
				break;
			}
		}
	}

B
Ben Skeggs 已提交
241
	if (i == FIFO_ENGINE_NR) {
242
		nv_error(priv, "unsupported engines 0x%08x\n", args->v0.engine);
243
		return -ENODEV;
B
Ben Skeggs 已提交
244
	}
245

246 247 248 249
	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
				       priv->user.bar.offset, 0x200,
				       args->v0.pushbuf,
				       fifo_engine[i].mask, &chan);
250 251 252 253
	*pobject = nv_object(chan);
	if (ret)
		return ret;

254 255
	args->v0.chid = chan->base.chid;

256 257
	nv_parent(chan)->context_attach = gk104_fifo_context_attach;
	nv_parent(chan)->context_detach = gk104_fifo_context_detach;
258
	chan->engine = i;
259 260

	usermem = chan->base.chid * 0x200;
261 262
	ioffset = args->v0.ioffset;
	ilength = order_base_2(args->v0.ilength / 8);
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283

	for (i = 0; i < 0x200; i += 4)
		nv_wo32(priv->user.mem, usermem + i, 0x00000000);

	nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
	nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
	nv_wo32(base, 0x10, 0x0000face);
	nv_wo32(base, 0x30, 0xfffff902);
	nv_wo32(base, 0x48, lower_32_bits(ioffset));
	nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
	nv_wo32(base, 0x84, 0x20400000);
	nv_wo32(base, 0x94, 0x30000001);
	nv_wo32(base, 0x9c, 0x00000100);
	nv_wo32(base, 0xac, 0x0000001f);
	nv_wo32(base, 0xe8, chan->base.chid);
	nv_wo32(base, 0xb8, 0xf8000000);
	nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
	nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
	bar->flush(bar);
	return 0;
}
284

285
static int
286
gk104_fifo_chan_init(struct nvkm_object *object)
287
{
288 289 290
	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
	struct gk104_fifo_priv *priv = (void *)object->engine;
	struct gk104_fifo_chan *chan = (void *)object;
291 292
	u32 chid = chan->base.chid;
	int ret;
293

294
	ret = nvkm_fifo_channel_init(&chan->base);
295 296
	if (ret)
		return ret;
297

298
	nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
299
	nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
300 301 302

	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
		nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
303
		gk104_fifo_runlist_update(priv, chan->engine);
304 305 306
		nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
	}

307 308
	return 0;
}
309

310
static int
311
gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
312
{
313 314
	struct gk104_fifo_priv *priv = (void *)object->engine;
	struct gk104_fifo_chan *chan = (void *)object;
315
	u32 chid = chan->base.chid;
316

317 318
	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
		nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
319
		gk104_fifo_runlist_update(priv, chan->engine);
320
	}
321

322
	nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
323
	return nvkm_fifo_channel_fini(&chan->base, suspend);
324
}
325

326 327 328 329 330 331 332 333 334 335
static struct nvkm_ofuncs
gk104_fifo_ofuncs = {
	.ctor = gk104_fifo_chan_ctor,
	.dtor = _nvkm_fifo_channel_dtor,
	.init = gk104_fifo_chan_init,
	.fini = gk104_fifo_chan_fini,
	.map  = _nvkm_fifo_channel_map,
	.rd32 = _nvkm_fifo_channel_rd32,
	.wr32 = _nvkm_fifo_channel_wr32,
	.ntfy = _nvkm_fifo_channel_ntfy
336
};
337

338 339 340
static struct nvkm_oclass
gk104_fifo_sclass[] = {
	{ KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_ofuncs },
341 342 343 344 345 346
	{}
};

/*******************************************************************************
 * FIFO context - instmem heap and vm setup
 ******************************************************************************/
347

348
static int
349 350 351
gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
			struct nvkm_oclass *oclass, void *data, u32 size,
			struct nvkm_object **pobject)
352
{
353
	struct gk104_fifo_base *base;
354
	int ret;
355

356 357
	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
				       0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
358 359 360
	*pobject = nv_object(base);
	if (ret)
		return ret;
361

362 363
	ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
			      &base->pgd);
364 365 366 367 368 369 370 371
	if (ret)
		return ret;

	nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
	nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
	nv_wo32(base, 0x0208, 0xffffffff);
	nv_wo32(base, 0x020c, 0x000000ff);

372
	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
373 374
	if (ret)
		return ret;
375 376 377 378

	return 0;
}

379
static void
380
gk104_fifo_context_dtor(struct nvkm_object *object)
381
{
382 383 384 385
	struct gk104_fifo_base *base = (void *)object;
	nvkm_vm_ref(NULL, &base->vm, base->pgd);
	nvkm_gpuobj_ref(NULL, &base->pgd);
	nvkm_fifo_context_destroy(&base->base);
386 387
}

388 389
static struct nvkm_oclass
gk104_fifo_cclass = {
390
	.handle = NV_ENGCTX(FIFO, 0xe0),
391 392 393 394 395 396 397
	.ofuncs = &(struct nvkm_ofuncs) {
		.ctor = gk104_fifo_context_ctor,
		.dtor = gk104_fifo_context_dtor,
		.init = _nvkm_fifo_context_init,
		.fini = _nvkm_fifo_context_fini,
		.rd32 = _nvkm_fifo_context_rd32,
		.wr32 = _nvkm_fifo_context_wr32,
398 399 400 401 402 403 404
	},
};

/*******************************************************************************
 * PFIFO engine
 ******************************************************************************/

405
static inline int
406
gk104_fifo_engidx(struct gk104_fifo_priv *priv, u32 engn)
407 408
{
	switch (engn) {
409 410 411 412 413 414 415 416
	case NVDEV_ENGINE_GR    :
	case NVDEV_ENGINE_CE2   : engn = 0; break;
	case NVDEV_ENGINE_MSVLD : engn = 1; break;
	case NVDEV_ENGINE_MSPPP : engn = 2; break;
	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
	case NVDEV_ENGINE_CE0   : engn = 4; break;
	case NVDEV_ENGINE_CE1   : engn = 5; break;
	case NVDEV_ENGINE_MSENC : engn = 6; break;
417 418 419 420 421 422 423
	default:
		return -1;
	}

	return engn;
}

424 425
static inline struct nvkm_engine *
gk104_fifo_engine(struct gk104_fifo_priv *priv, u32 engn)
426 427 428
{
	if (engn >= ARRAY_SIZE(fifo_engine))
		return NULL;
429
	return nvkm_engine(priv, fifo_engine[engn].subdev);
430 431
}

432
static void
433
gk104_fifo_recover_work(struct work_struct *work)
434
{
435 436
	struct gk104_fifo_priv *priv = container_of(work, typeof(*priv), fault);
	struct nvkm_object *engine;
437 438 439 440 441 442 443 444 445 446
	unsigned long flags;
	u32 engn, engm = 0;
	u64 mask, todo;

	spin_lock_irqsave(&priv->base.lock, flags);
	mask = priv->mask;
	priv->mask = 0ULL;
	spin_unlock_irqrestore(&priv->base.lock, flags);

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
447
		engm |= 1 << gk104_fifo_engidx(priv, engn);
448 449 450
	nv_mask(priv, 0x002630, engm, engm);

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
451
		if ((engine = (void *)nvkm_engine(priv, engn))) {
452 453 454
			nv_ofuncs(engine)->fini(engine, false);
			WARN_ON(nv_ofuncs(engine)->init(engine));
		}
455
		gk104_fifo_runlist_update(priv, gk104_fifo_engidx(priv, engn));
456 457 458 459 460 461 462
	}

	nv_wr32(priv, 0x00262c, engm);
	nv_mask(priv, 0x002630, engm, 0x00000000);
}

static void
463 464
gk104_fifo_recover(struct gk104_fifo_priv *priv, struct nvkm_engine *engine,
		  struct gk104_fifo_chan *chan)
465 466 467 468 469 470 471 472 473 474 475
{
	u32 chid = chan->base.chid;
	unsigned long flags;

	nv_error(priv, "%s engine fault on channel %d, recovering...\n",
		       nv_subdev(engine)->name, chid);

	nv_mask(priv, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
	chan->state = KILLED;

	spin_lock_irqsave(&priv->base.lock, flags);
476
	priv->mask |= 1ULL << nv_engidx(engine);
477 478 479 480
	spin_unlock_irqrestore(&priv->base.lock, flags);
	schedule_work(&priv->fault);
}

481
static int
482
gk104_fifo_swmthd(struct gk104_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
483
{
484 485
	struct gk104_fifo_chan *chan = NULL;
	struct nvkm_handle *bind;
486 487 488 489 490 491 492 493 494
	unsigned long flags;
	int ret = -EINVAL;

	spin_lock_irqsave(&priv->base.lock, flags);
	if (likely(chid >= priv->base.min && chid <= priv->base.max))
		chan = (void *)priv->base.channel[chid];
	if (unlikely(!chan))
		goto out;

495
	bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
496 497 498
	if (likely(bind)) {
		if (!mthd || !nv_call(bind->object, mthd, data))
			ret = 0;
499
		nvkm_namedb_put(bind);
500 501 502 503 504 505 506
	}

out:
	spin_unlock_irqrestore(&priv->base.lock, flags);
	return ret;
}

507 508
static const struct nvkm_enum
gk104_fifo_bind_reason[] = {
B
Ben Skeggs 已提交
509 510 511 512 513 514 515 516 517 518
	{ 0x01, "BIND_NOT_UNBOUND" },
	{ 0x02, "SNOOP_WITHOUT_BAR1" },
	{ 0x03, "UNBIND_WHILE_RUNNING" },
	{ 0x05, "INVALID_RUNLIST" },
	{ 0x06, "INVALID_CTX_TGT" },
	{ 0x0b, "UNBIND_WHILE_PARKED" },
	{}
};

static void
519
gk104_fifo_intr_bind(struct gk104_fifo_priv *priv)
B
Ben Skeggs 已提交
520 521 522
{
	u32 intr = nv_rd32(priv, 0x00252c);
	u32 code = intr & 0x000000ff;
523
	const struct nvkm_enum *en;
B
Ben Skeggs 已提交
524 525
	char enunk[6] = "";

526
	en = nvkm_enum_find(gk104_fifo_bind_reason, code);
B
Ben Skeggs 已提交
527 528 529 530 531 532
	if (!en)
		snprintf(enunk, sizeof(enunk), "UNK%02x", code);

	nv_error(priv, "BIND_ERROR [ %s ]\n", en ? en->name : enunk);
}

533 534
static const struct nvkm_enum
gk104_fifo_sched_reason[] = {
535 536 537 538
	{ 0x0a, "CTXSW_TIMEOUT" },
	{}
};

539
static void
540
gk104_fifo_intr_sched_ctxsw(struct gk104_fifo_priv *priv)
541
{
542 543
	struct nvkm_engine *engine;
	struct gk104_fifo_chan *chan;
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
	u32 engn;

	for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
		u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04));
		u32 busy = (stat & 0x80000000);
		u32 next = (stat & 0x07ff0000) >> 16;
		u32 chsw = (stat & 0x00008000);
		u32 save = (stat & 0x00004000);
		u32 load = (stat & 0x00002000);
		u32 prev = (stat & 0x000007ff);
		u32 chid = load ? next : prev;
		(void)save;

		if (busy && chsw) {
			if (!(chan = (void *)priv->base.channel[chid]))
				continue;
560
			if (!(engine = gk104_fifo_engine(priv, engn)))
561
				continue;
562
			gk104_fifo_recover(priv, engine, chan);
563 564 565 566
		}
	}
}

567
static void
568
gk104_fifo_intr_sched(struct gk104_fifo_priv *priv)
569 570 571
{
	u32 intr = nv_rd32(priv, 0x00254c);
	u32 code = intr & 0x000000ff;
572
	const struct nvkm_enum *en;
573 574
	char enunk[6] = "";

575
	en = nvkm_enum_find(gk104_fifo_sched_reason, code);
576 577 578 579
	if (!en)
		snprintf(enunk, sizeof(enunk), "UNK%02x", code);

	nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk);
580 581 582

	switch (code) {
	case 0x0a:
583
		gk104_fifo_intr_sched_ctxsw(priv);
584 585 586 587
		break;
	default:
		break;
	}
588 589 590
}

static void
591
gk104_fifo_intr_chsw(struct gk104_fifo_priv *priv)
592 593 594 595 596 597 598
{
	u32 stat = nv_rd32(priv, 0x00256c);
	nv_error(priv, "CHSW_ERROR 0x%08x\n", stat);
	nv_wr32(priv, 0x00256c, stat);
}

static void
599
gk104_fifo_intr_dropped_fault(struct gk104_fifo_priv *priv)
600 601 602 603 604
{
	u32 stat = nv_rd32(priv, 0x00259c);
	nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat);
}

605 606
static const struct nvkm_enum
gk104_fifo_fault_engine[] = {
607
	{ 0x00, "GR", NULL, NVDEV_ENGINE_GR },
608
	{ 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
609 610
	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
611 612 613
	{ 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
	{ 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
	{ 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
614
	{ 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD },
615
	{ 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP },
616
	{ 0x13, "PERF" },
617
	{ 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
618 619
	{ 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 },
	{ 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 },
620
	{ 0x17, "PMU" },
621
	{ 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC },
622
	{ 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 },
623 624 625
	{}
};

626 627
static const struct nvkm_enum
gk104_fifo_fault_reason[] = {
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
	{ 0x00, "PDE" },
	{ 0x01, "PDE_SIZE" },
	{ 0x02, "PTE" },
	{ 0x03, "VA_LIMIT_VIOLATION" },
	{ 0x04, "UNBOUND_INST_BLOCK" },
	{ 0x05, "PRIV_VIOLATION" },
	{ 0x06, "RO_VIOLATION" },
	{ 0x07, "WO_VIOLATION" },
	{ 0x08, "PITCH_MASK_VIOLATION" },
	{ 0x09, "WORK_CREATION" },
	{ 0x0a, "UNSUPPORTED_APERTURE" },
	{ 0x0b, "COMPRESSION_FAILURE" },
	{ 0x0c, "UNSUPPORTED_KIND" },
	{ 0x0d, "REGION_VIOLATION" },
	{ 0x0e, "BOTH_PTES_VALID" },
	{ 0x0f, "INFO_TYPE_POISONED" },
644 645 646
	{}
};

647 648
static const struct nvkm_enum
gk104_fifo_fault_hubclient[] = {
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
	{ 0x00, "VIP" },
	{ 0x01, "CE0" },
	{ 0x02, "CE1" },
	{ 0x03, "DNISO" },
	{ 0x04, "FE" },
	{ 0x05, "FECS" },
	{ 0x06, "HOST" },
	{ 0x07, "HOST_CPU" },
	{ 0x08, "HOST_CPU_NB" },
	{ 0x09, "ISO" },
	{ 0x0a, "MMU" },
	{ 0x0b, "MSPDEC" },
	{ 0x0c, "MSPPP" },
	{ 0x0d, "MSVLD" },
	{ 0x0e, "NISO" },
	{ 0x0f, "P2P" },
	{ 0x10, "PD" },
	{ 0x11, "PERF" },
	{ 0x12, "PMU" },
	{ 0x13, "RASTERTWOD" },
	{ 0x14, "SCC" },
	{ 0x15, "SCC_NB" },
	{ 0x16, "SEC" },
	{ 0x17, "SSYNC" },
673
	{ 0x18, "GR_CE" },
674 675 676 677 678 679 680
	{ 0x19, "CE2" },
	{ 0x1a, "XV" },
	{ 0x1b, "MMU_NB" },
	{ 0x1c, "MSENC" },
	{ 0x1d, "DFALCON" },
	{ 0x1e, "SKED" },
	{ 0x1f, "AFALCON" },
681 682 683
	{}
};

684 685
static const struct nvkm_enum
gk104_fifo_fault_gpcclient[] = {
686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
	{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
	{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
	{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
	{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
	{ 0x0c, "RAST" },
	{ 0x0d, "GCC" },
	{ 0x0e, "GPCCS" },
	{ 0x0f, "PROP_0" },
	{ 0x10, "PROP_1" },
	{ 0x11, "PROP_2" },
	{ 0x12, "PROP_3" },
	{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
	{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
	{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
	{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
	{ 0x1f, "GPM" },
	{ 0x20, "LTP_UTLB_0" },
	{ 0x21, "LTP_UTLB_1" },
	{ 0x22, "LTP_UTLB_2" },
	{ 0x23, "LTP_UTLB_3" },
	{ 0x24, "GPC_RGG_UTLB" },
707 708 709
	{}
};

710
static void
711
gk104_fifo_intr_fault(struct gk104_fifo_priv *priv, int unit)
712
{
713 714 715 716 717
	u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
	u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
	u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
	u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
	u32 gpc    = (stat & 0x1f000000) >> 24;
718
	u32 client = (stat & 0x00001f00) >> 8;
719 720 721
	u32 write  = (stat & 0x00000080);
	u32 hub    = (stat & 0x00000040);
	u32 reason = (stat & 0x0000000f);
722 723 724
	struct nvkm_object *engctx = NULL, *object;
	struct nvkm_engine *engine = NULL;
	const struct nvkm_enum *er, *eu, *ec;
725 726 727 728 729
	char erunk[6] = "";
	char euunk[6] = "";
	char ecunk[6] = "";
	char gpcid[3] = "";

730
	er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
731 732 733
	if (!er)
		snprintf(erunk, sizeof(erunk), "UNK%02X", reason);

734
	eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
735 736 737
	if (eu) {
		switch (eu->data2) {
		case NVDEV_SUBDEV_BAR:
738
			nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
739 740
			break;
		case NVDEV_SUBDEV_INSTMEM:
741
			nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
742 743 744 745 746
			break;
		case NVDEV_ENGINE_IFB:
			nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
			break;
		default:
747
			engine = nvkm_engine(priv, eu->data2);
748
			if (engine)
749
				engctx = nvkm_engctx_get(engine, inst);
750
			break;
751
		}
752 753
	} else {
		snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
754
	}
755 756

	if (hub) {
757
		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
758
	} else {
759
		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
760 761 762 763 764 765 766 767 768 769 770
		snprintf(gpcid, sizeof(gpcid), "%d", gpc);
	}

	if (!ec)
		snprintf(ecunk, sizeof(ecunk), "UNK%02x", client);

	nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on "
		       "channel 0x%010llx [%s]\n", write ? "write" : "read",
		 (u64)vahi << 32 | valo, er ? er->name : erunk,
		 eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
		 ec ? ec->name : ecunk, (u64)inst << 12,
771
		 nvkm_client_name(engctx));
772

773 774 775
	object = engctx;
	while (object) {
		switch (nv_mclass(object)) {
776
		case KEPLER_CHANNEL_GPFIFO_A:
777
			gk104_fifo_recover(priv, engine, (void *)object);
778 779 780 781 782
			break;
		}
		object = object->parent;
	}

783
	nvkm_engctx_put(engctx);
784 785
}

786
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
	{ 0x00000001, "MEMREQ" },
	{ 0x00000002, "MEMACK_TIMEOUT" },
	{ 0x00000004, "MEMACK_EXTRA" },
	{ 0x00000008, "MEMDAT_TIMEOUT" },
	{ 0x00000010, "MEMDAT_EXTRA" },
	{ 0x00000020, "MEMFLUSH" },
	{ 0x00000040, "MEMOP" },
	{ 0x00000080, "LBCONNECT" },
	{ 0x00000100, "LBREQ" },
	{ 0x00000200, "LBACK_TIMEOUT" },
	{ 0x00000400, "LBACK_EXTRA" },
	{ 0x00000800, "LBDAT_TIMEOUT" },
	{ 0x00001000, "LBDAT_EXTRA" },
	{ 0x00002000, "GPFIFO" },
	{ 0x00004000, "GPPTR" },
	{ 0x00008000, "GPENTRY" },
	{ 0x00010000, "GPCRC" },
	{ 0x00020000, "PBPTR" },
	{ 0x00040000, "PBENTRY" },
	{ 0x00080000, "PBCRC" },
	{ 0x00100000, "XBARCONNECT" },
	{ 0x00200000, "METHOD" },
	{ 0x00400000, "METHODCRC" },
	{ 0x00800000, "DEVICE" },
	{ 0x02000000, "SEMAPHORE" },
	{ 0x04000000, "ACQUIRE" },
	{ 0x08000000, "PRI" },
	{ 0x20000000, "NO_CTXSW_SEG" },
	{ 0x40000000, "PBSEG" },
	{ 0x80000000, "SIGNATURE" },
	{}
};
819

820
static void
821
gk104_fifo_intr_pbdma_0(struct gk104_fifo_priv *priv, int unit)
822
{
823 824
	u32 mask = nv_rd32(priv, 0x04010c + (unit * 0x2000));
	u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)) & mask;
825 826 827 828
	u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
	u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
	u32 subc = (addr & 0x00070000) >> 16;
829
	u32 mthd = (addr & 0x00003ffc);
830 831
	u32 show = stat;

832
	if (stat & 0x00800000) {
833
		if (!gk104_fifo_swmthd(priv, chid, mthd, data))
834
			show &= ~0x00800000;
835
		nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
836 837
	}

838
	if (show) {
B
Ben Skeggs 已提交
839
		nv_error(priv, "PBDMA%d:", unit);
840
		nvkm_bitfield_print(gk104_fifo_pbdma_intr_0, show);
M
Marcin Slusarz 已提交
841
		pr_cont("\n");
842
		nv_error(priv,
B
Ben Skeggs 已提交
843
			 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
844
			 unit, chid,
845
			 nvkm_client_name_for_fifo_chid(&priv->base, chid),
846
			 subc, mthd, data);
847
	}
848

849
	nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
850 851
}

852
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
853 854 855 856 857 858 859 860 861
	{ 0x00000001, "HCE_RE_ILLEGAL_OP" },
	{ 0x00000002, "HCE_RE_ALIGNB" },
	{ 0x00000004, "HCE_PRIV" },
	{ 0x00000008, "HCE_ILLEGAL_MTHD" },
	{ 0x00000010, "HCE_ILLEGAL_CLASS" },
	{}
};

static void
862
gk104_fifo_intr_pbdma_1(struct gk104_fifo_priv *priv, int unit)
863 864 865 866 867 868 869
{
	u32 mask = nv_rd32(priv, 0x04014c + (unit * 0x2000));
	u32 stat = nv_rd32(priv, 0x040148 + (unit * 0x2000)) & mask;
	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;

	if (stat) {
		nv_error(priv, "PBDMA%d:", unit);
870
		nvkm_bitfield_print(gk104_fifo_pbdma_intr_1, stat);
871 872 873 874 875 876 877 878 879
		pr_cont("\n");
		nv_error(priv, "PBDMA%d: ch %d %08x %08x\n", unit, chid,
			 nv_rd32(priv, 0x040150 + (unit * 0x2000)),
			 nv_rd32(priv, 0x040154 + (unit * 0x2000)));
	}

	nv_wr32(priv, 0x040148 + (unit * 0x2000), stat);
}

B
Ben Skeggs 已提交
880
static void
881
gk104_fifo_intr_runlist(struct gk104_fifo_priv *priv)
B
Ben Skeggs 已提交
882 883 884 885 886 887 888 889 890 891
{
	u32 mask = nv_rd32(priv, 0x002a00);
	while (mask) {
		u32 engn = __ffs(mask);
		wake_up(&priv->engine[engn].wait);
		nv_wr32(priv, 0x002a00, 1 << engn);
		mask &= ~(1 << engn);
	}
}

B
Ben Skeggs 已提交
892
static void
893
gk104_fifo_intr_engine(struct gk104_fifo_priv *priv)
B
Ben Skeggs 已提交
894
{
895
	nvkm_fifo_uevent(&priv->base);
B
Ben Skeggs 已提交
896 897
}

898
static void
899
gk104_fifo_intr(struct nvkm_subdev *subdev)
900
{
901
	struct gk104_fifo_priv *priv = (void *)subdev;
902 903
	u32 mask = nv_rd32(priv, 0x002140);
	u32 stat = nv_rd32(priv, 0x002100) & mask;
904

905
	if (stat & 0x00000001) {
906
		gk104_fifo_intr_bind(priv);
907 908 909 910 911 912 913 914 915 916
		nv_wr32(priv, 0x002100, 0x00000001);
		stat &= ~0x00000001;
	}

	if (stat & 0x00000010) {
		nv_error(priv, "PIO_ERROR\n");
		nv_wr32(priv, 0x002100, 0x00000010);
		stat &= ~0x00000010;
	}

917
	if (stat & 0x00000100) {
918
		gk104_fifo_intr_sched(priv);
919
		nv_wr32(priv, 0x002100, 0x00000100);
920 921 922
		stat &= ~0x00000100;
	}

923
	if (stat & 0x00010000) {
924
		gk104_fifo_intr_chsw(priv);
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941
		nv_wr32(priv, 0x002100, 0x00010000);
		stat &= ~0x00010000;
	}

	if (stat & 0x00800000) {
		nv_error(priv, "FB_FLUSH_TIMEOUT\n");
		nv_wr32(priv, 0x002100, 0x00800000);
		stat &= ~0x00800000;
	}

	if (stat & 0x01000000) {
		nv_error(priv, "LB_ERROR\n");
		nv_wr32(priv, 0x002100, 0x01000000);
		stat &= ~0x01000000;
	}

	if (stat & 0x08000000) {
942
		gk104_fifo_intr_dropped_fault(priv);
943 944 945 946
		nv_wr32(priv, 0x002100, 0x08000000);
		stat &= ~0x08000000;
	}

947
	if (stat & 0x10000000) {
948 949 950
		u32 mask = nv_rd32(priv, 0x00259c);
		while (mask) {
			u32 unit = __ffs(mask);
951
			gk104_fifo_intr_fault(priv, unit);
952 953
			nv_wr32(priv, 0x00259c, (1 << unit));
			mask &= ~(1 << unit);
954 955 956 957 958
		}
		stat &= ~0x10000000;
	}

	if (stat & 0x20000000) {
B
Ben Skeggs 已提交
959
		u32 mask = nv_rd32(priv, 0x0025a0);
960 961
		while (mask) {
			u32 unit = __ffs(mask);
962 963
			gk104_fifo_intr_pbdma_0(priv, unit);
			gk104_fifo_intr_pbdma_1(priv, unit);
964 965
			nv_wr32(priv, 0x0025a0, (1 << unit));
			mask &= ~(1 << unit);
966 967 968 969 970
		}
		stat &= ~0x20000000;
	}

	if (stat & 0x40000000) {
971
		gk104_fifo_intr_runlist(priv);
972 973 974
		stat &= ~0x40000000;
	}

975 976
	if (stat & 0x80000000) {
		nv_wr32(priv, 0x002100, 0x80000000);
977
		gk104_fifo_intr_engine(priv);
978 979 980
		stat &= ~0x80000000;
	}

981
	if (stat) {
982 983
		nv_error(priv, "INTR 0x%08x\n", stat);
		nv_mask(priv, 0x002140, stat, 0x00000000);
984
		nv_wr32(priv, 0x002100, stat);
985 986
	}
}
987

988
static void
989
gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
990
{
991
	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
992
	nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
993 994 995
}

static void
996
gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
997
{
998
	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
999
	nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
1000 1001
}

1002
static const struct nvkm_event_func
1003 1004 1005 1006
gk104_fifo_uevent_func = {
	.ctor = nvkm_fifo_uevent_ctor,
	.init = gk104_fifo_uevent_init,
	.fini = gk104_fifo_uevent_fini,
1007 1008
};

1009
int
1010
gk104_fifo_fini(struct nvkm_object *object, bool suspend)
1011
{
1012
	struct gk104_fifo_priv *priv = (void *)object;
1013 1014
	int ret;

1015
	ret = nvkm_fifo_fini(&priv->base, suspend);
1016 1017 1018 1019 1020 1021 1022 1023
	if (ret)
		return ret;

	/* allow mmu fault interrupts, even when we're not using fifo */
	nv_mask(priv, 0x002140, 0x10000000, 0x10000000);
	return 0;
}

B
Ben Skeggs 已提交
1024
int
1025
gk104_fifo_init(struct nvkm_object *object)
B
Ben Skeggs 已提交
1026
{
1027
	struct gk104_fifo_priv *priv = (void *)object;
B
Ben Skeggs 已提交
1028 1029
	int ret, i;

1030
	ret = nvkm_fifo_init(&priv->base);
B
Ben Skeggs 已提交
1031 1032 1033
	if (ret)
		return ret;

B
Ben Skeggs 已提交
1034
	/* enable all available PBDMA units */
B
Ben Skeggs 已提交
1035 1036
	nv_wr32(priv, 0x000204, 0xffffffff);
	priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
B
Ben Skeggs 已提交
1037
	nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
B
Ben Skeggs 已提交
1038

B
Ben Skeggs 已提交
1039
	/* PBDMA[n] */
B
Ben Skeggs 已提交
1040 1041 1042 1043 1044 1045
	for (i = 0; i < priv->spoon_nr; i++) {
		nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
		nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
		nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
	}

1046 1047 1048 1049 1050 1051
	/* PBDMA[n].HCE */
	for (i = 0; i < priv->spoon_nr; i++) {
		nv_wr32(priv, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
		nv_wr32(priv, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
	}

B
Ben Skeggs 已提交
1052 1053 1054
	nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);

	nv_wr32(priv, 0x002100, 0xffffffff);
B
Ben Skeggs 已提交
1055
	nv_wr32(priv, 0x002140, 0x7fffffff);
B
Ben Skeggs 已提交
1056 1057 1058 1059
	return 0;
}

void
1060
gk104_fifo_dtor(struct nvkm_object *object)
B
Ben Skeggs 已提交
1061
{
1062
	struct gk104_fifo_priv *priv = (void *)object;
B
Ben Skeggs 已提交
1063 1064
	int i;

1065 1066
	nvkm_gpuobj_unmap(&priv->user.bar);
	nvkm_gpuobj_ref(NULL, &priv->user.mem);
B
Ben Skeggs 已提交
1067 1068

	for (i = 0; i < FIFO_ENGINE_NR; i++) {
1069 1070
		nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[1]);
		nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[0]);
B
Ben Skeggs 已提交
1071 1072
	}

1073
	nvkm_fifo_destroy(&priv->base);
B
Ben Skeggs 已提交
1074 1075 1076
}

int
1077 1078 1079
gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		struct nvkm_oclass *oclass, void *data, u32 size,
		struct nvkm_object **pobject)
1080
{
1081 1082
	struct gk104_fifo_impl *impl = (void *)oclass;
	struct gk104_fifo_priv *priv;
1083
	int ret, i;
1084

1085 1086
	ret = nvkm_fifo_create(parent, engine, oclass, 0,
			       impl->channels - 1, &priv);
1087 1088 1089 1090
	*pobject = nv_object(priv);
	if (ret)
		return ret;

1091
	INIT_WORK(&priv->fault, gk104_fifo_recover_work);
1092

1093
	for (i = 0; i < FIFO_ENGINE_NR; i++) {
1094 1095
		ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
				      0, &priv->engine[i].runlist[0]);
1096 1097 1098
		if (ret)
			return ret;

1099 1100
		ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
				      0, &priv->engine[i].runlist[1]);
1101 1102
		if (ret)
			return ret;
B
Ben Skeggs 已提交
1103 1104

		init_waitqueue_head(&priv->engine[i].wait);
1105 1106
	}

1107 1108
	ret = nvkm_gpuobj_new(nv_object(priv), NULL, impl->channels * 0x200,
			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
1109 1110 1111
	if (ret)
		return ret;

1112 1113
	ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
			      &priv->user.bar);
1114 1115 1116
	if (ret)
		return ret;

1117
	ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &priv->base.uevent);
1118 1119
	if (ret)
		return ret;
1120

1121
	nv_subdev(priv)->unit = 0x00000100;
1122 1123 1124
	nv_subdev(priv)->intr = gk104_fifo_intr;
	nv_engine(priv)->cclass = &gk104_fifo_cclass;
	nv_engine(priv)->sclass = gk104_fifo_sclass;
1125 1126 1127
	return 0;
}

1128 1129
struct nvkm_oclass *
gk104_fifo_oclass = &(struct gk104_fifo_impl) {
B
Ben Skeggs 已提交
1130
	.base.handle = NV_ENGINE(FIFO, 0xe0),
1131 1132 1133 1134 1135
	.base.ofuncs = &(struct nvkm_ofuncs) {
		.ctor = gk104_fifo_ctor,
		.dtor = gk104_fifo_dtor,
		.init = gk104_fifo_init,
		.fini = gk104_fifo_fini,
1136
	},
B
Ben Skeggs 已提交
1137 1138
	.channels = 4096,
}.base;