gk104.c 31.4 KB
Newer Older
1
/*
2
 * Copyright 2012 Red Hat Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */
24
#include "gk104.h"
25

26 27 28
#include <core/client.h>
#include <core/engctx.h>
#include <core/enum.h>
29
#include <core/handle.h>
30
#include <subdev/bar.h>
31
#include <subdev/fb.h>
32
#include <subdev/mmu.h>
33
#include <subdev/timer.h>
34

35 36
#include <nvif/class.h>
#include <nvif/unpack.h>
37

38
#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
39
static const struct {
40 41
	u64 subdev;
	u64 mask;
42
} fifo_engine[] = {
43
	_(NVDEV_ENGINE_GR      , (1ULL << NVDEV_ENGINE_SW) |
44
				 (1ULL << NVDEV_ENGINE_CE2)),
45
	_(NVDEV_ENGINE_MSPDEC  , 0),
46
	_(NVDEV_ENGINE_MSPPP   , 0),
47
	_(NVDEV_ENGINE_MSVLD   , 0),
48 49
	_(NVDEV_ENGINE_CE0     , 0),
	_(NVDEV_ENGINE_CE1     , 0),
50
	_(NVDEV_ENGINE_MSENC   , 0),
51 52 53 54
};
#undef _
#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)

55 56
struct gk104_fifo_engn {
	struct nvkm_gpuobj *runlist[2];
B
Ben Skeggs 已提交
57
	int cur_runlist;
B
Ben Skeggs 已提交
58
	wait_queue_head_t wait;
59 60
};

B
Ben Skeggs 已提交
61
struct gk104_fifo {
62
	struct nvkm_fifo base;
63 64 65 66

	struct work_struct fault;
	u64 mask;

67
	struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
68
	struct {
69 70
		struct nvkm_gpuobj *mem;
		struct nvkm_vma bar;
71 72 73 74
	} user;
	int spoon_nr;
};

75 76 77 78
struct gk104_fifo_base {
	struct nvkm_fifo_base base;
	struct nvkm_gpuobj *pgd;
	struct nvkm_vm *vm;
79 80
};

81 82
struct gk104_fifo_chan {
	struct nvkm_fifo_chan base;
83
	u32 engine;
84 85 86 87 88
	enum {
		STOPPED,
		RUNNING,
		KILLED
	} state;
89 90
};

91 92 93 94
/*******************************************************************************
 * FIFO channel objects
 ******************************************************************************/

95
static void
B
Ben Skeggs 已提交
96
gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
97
{
B
Ben Skeggs 已提交
98
	struct gk104_fifo_engn *engn = &fifo->engine[engine];
99 100
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
101
	struct nvkm_bar *bar = device->bar;
102
	struct nvkm_gpuobj *cur;
103
	int i, p;
104

B
Ben Skeggs 已提交
105
	mutex_lock(&nv_subdev(fifo)->mutex);
B
Ben Skeggs 已提交
106 107
	cur = engn->runlist[engn->cur_runlist];
	engn->cur_runlist = !engn->cur_runlist;
108

B
Ben Skeggs 已提交
109 110
	for (i = 0, p = 0; i < fifo->base.max; i++) {
		struct gk104_fifo_chan *chan = (void *)fifo->base.channel[i];
111 112 113 114 115
		if (chan && chan->state == RUNNING && chan->engine == engine) {
			nv_wo32(cur, p + 0, i);
			nv_wo32(cur, p + 4, 0x00000000);
			p += 8;
		}
116
	}
117
	bar->flush(bar);
118

119 120
	nvkm_wr32(device, 0x002270, cur->addr >> 12);
	nvkm_wr32(device, 0x002274, (engine << 20) | (p >> 3));
121

122
	if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
123 124
			       (engine * 0x08)) & 0x00100000),
				msecs_to_jiffies(2000)) == 0)
125
		nvkm_error(subdev, "runlist %d update timeout\n", engine);
B
Ben Skeggs 已提交
126
	mutex_unlock(&nv_subdev(fifo)->mutex);
127 128
}

129
static int
130 131
gk104_fifo_context_attach(struct nvkm_object *parent,
			  struct nvkm_object *object)
132
{
133 134 135
	struct nvkm_bar *bar = nvkm_bar(parent);
	struct gk104_fifo_base *base = (void *)parent->parent;
	struct nvkm_engctx *ectx = (void *)object;
136 137 138 139
	u32 addr;
	int ret;

	switch (nv_engidx(object->engine)) {
140
	case NVDEV_ENGINE_SW   :
141
		return 0;
142 143 144
	case NVDEV_ENGINE_CE0:
	case NVDEV_ENGINE_CE1:
	case NVDEV_ENGINE_CE2:
145
		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
146
		return 0;
147 148 149 150
	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
151 152
	default:
		return -EINVAL;
153 154
	}

155
	if (!ectx->vma.node) {
156 157
		ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
					 NV_MEM_ACCESS_RW, &ectx->vma);
158 159
		if (ret)
			return ret;
160 161

		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
162 163 164 165 166 167
	}

	nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
	nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
	bar->flush(bar);
	return 0;
168 169
}

B
Ben Skeggs 已提交
170 171 172 173 174
static int
gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
{
	struct nvkm_object *obj = (void *)chan;
	struct gk104_fifo *fifo = (void *)obj->engine;
175 176
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
B
Ben Skeggs 已提交
177

178
	nvkm_wr32(device, 0x002634, chan->base.chid);
179 180 181 182
	if (nvkm_msec(device, 2000,
		if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
			break;
	) < 0) {
183 184
		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
			   chan->base.chid, nvkm_client_name(chan));
B
Ben Skeggs 已提交
185 186 187 188 189 190
		return -EBUSY;
	}

	return 0;
}

191
static int
192 193
gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
			  struct nvkm_object *object)
194
{
195 196 197
	struct nvkm_bar *bar = nvkm_bar(parent);
	struct gk104_fifo_base *base = (void *)parent->parent;
	struct gk104_fifo_chan *chan = (void *)parent;
198
	u32 addr;
B
Ben Skeggs 已提交
199
	int ret;
200 201

	switch (nv_engidx(object->engine)) {
202 203 204 205 206 207 208 209
	case NVDEV_ENGINE_SW    : return 0;
	case NVDEV_ENGINE_CE0   :
	case NVDEV_ENGINE_CE1   :
	case NVDEV_ENGINE_CE2   : addr = 0x0000; break;
	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
210 211 212 213
	default:
		return -EINVAL;
	}

B
Ben Skeggs 已提交
214 215 216
	ret = gk104_fifo_chan_kick(chan);
	if (ret && suspend)
		return ret;
217

218 219 220 221 222 223
	if (addr) {
		nv_wo32(base, addr + 0x00, 0x00000000);
		nv_wo32(base, addr + 0x04, 0x00000000);
		bar->flush(bar);
	}

224
	return 0;
225 226 227
}

static int
228 229 230
gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		     struct nvkm_oclass *oclass, void *data, u32 size,
		     struct nvkm_object **pobject)
231
{
232 233 234
	union {
		struct kepler_channel_gpfifo_a_v0 v0;
	} *args = data;
235
	struct nvkm_bar *bar = nvkm_bar(parent);
B
Ben Skeggs 已提交
236
	struct gk104_fifo *fifo = (void *)engine;
237 238
	struct gk104_fifo_base *base = (void *)parent;
	struct gk104_fifo_chan *chan;
239
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
240 241 242
	u64 usermem, ioffset, ilength;
	int ret, i;

243
	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
244
	if (nvif_unpack(args->v0, 0, 0, false)) {
245 246 247 248
		nvif_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
				   "ioffset %016llx ilength %08x engine %08x\n",
			   args->v0.version, args->v0.pushbuf, args->v0.ioffset,
			   args->v0.ilength, args->v0.engine);
249 250
	} else
		return ret;
251

252
	for (i = 0; i < FIFO_ENGINE_NR; i++) {
253
		if (args->v0.engine & (1 << i)) {
254
			if (nvkm_engine(parent, fifo_engine[i].subdev)) {
255
				args->v0.engine = (1 << i);
256 257 258 259 260
				break;
			}
		}
	}

B
Ben Skeggs 已提交
261
	if (i == FIFO_ENGINE_NR) {
262 263
		nvkm_error(subdev, "unsupported engines %08x\n",
			   args->v0.engine);
264
		return -ENODEV;
B
Ben Skeggs 已提交
265
	}
266

267
	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
B
Ben Skeggs 已提交
268
				       fifo->user.bar.offset, 0x200,
269 270
				       args->v0.pushbuf,
				       fifo_engine[i].mask, &chan);
271 272 273 274
	*pobject = nv_object(chan);
	if (ret)
		return ret;

275 276
	args->v0.chid = chan->base.chid;

277 278
	nv_parent(chan)->context_attach = gk104_fifo_context_attach;
	nv_parent(chan)->context_detach = gk104_fifo_context_detach;
279
	chan->engine = i;
280 281

	usermem = chan->base.chid * 0x200;
282 283
	ioffset = args->v0.ioffset;
	ilength = order_base_2(args->v0.ilength / 8);
284 285

	for (i = 0; i < 0x200; i += 4)
B
Ben Skeggs 已提交
286
		nv_wo32(fifo->user.mem, usermem + i, 0x00000000);
287

B
Ben Skeggs 已提交
288 289
	nv_wo32(base, 0x08, lower_32_bits(fifo->user.mem->addr + usermem));
	nv_wo32(base, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem));
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
	nv_wo32(base, 0x10, 0x0000face);
	nv_wo32(base, 0x30, 0xfffff902);
	nv_wo32(base, 0x48, lower_32_bits(ioffset));
	nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
	nv_wo32(base, 0x84, 0x20400000);
	nv_wo32(base, 0x94, 0x30000001);
	nv_wo32(base, 0x9c, 0x00000100);
	nv_wo32(base, 0xac, 0x0000001f);
	nv_wo32(base, 0xe8, chan->base.chid);
	nv_wo32(base, 0xb8, 0xf8000000);
	nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
	nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
	bar->flush(bar);
	return 0;
}
305

306
static int
307
gk104_fifo_chan_init(struct nvkm_object *object)
308
{
309
	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
B
Ben Skeggs 已提交
310
	struct gk104_fifo *fifo = (void *)object->engine;
311
	struct gk104_fifo_chan *chan = (void *)object;
312
	struct nvkm_device *device = fifo->base.engine.subdev.device;
313 314
	u32 chid = chan->base.chid;
	int ret;
315

316
	ret = nvkm_fifo_channel_init(&chan->base);
317 318
	if (ret)
		return ret;
319

320 321
	nvkm_mask(device, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
	nvkm_wr32(device, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
322 323

	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
324
		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
B
Ben Skeggs 已提交
325
		gk104_fifo_runlist_update(fifo, chan->engine);
326
		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
327 328
	}

329 330
	return 0;
}
331

332
static int
333
gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
334
{
B
Ben Skeggs 已提交
335
	struct gk104_fifo *fifo = (void *)object->engine;
336
	struct gk104_fifo_chan *chan = (void *)object;
337
	struct nvkm_device *device = fifo->base.engine.subdev.device;
338
	u32 chid = chan->base.chid;
339

340
	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
341
		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
B
Ben Skeggs 已提交
342
		gk104_fifo_runlist_update(fifo, chan->engine);
343
	}
344

345
	nvkm_wr32(device, 0x800000 + (chid * 8), 0x00000000);
346
	return nvkm_fifo_channel_fini(&chan->base, suspend);
347
}
348

349 350
struct nvkm_ofuncs
gk104_fifo_chan_ofuncs = {
351 352 353 354 355 356 357 358
	.ctor = gk104_fifo_chan_ctor,
	.dtor = _nvkm_fifo_channel_dtor,
	.init = gk104_fifo_chan_init,
	.fini = gk104_fifo_chan_fini,
	.map  = _nvkm_fifo_channel_map,
	.rd32 = _nvkm_fifo_channel_rd32,
	.wr32 = _nvkm_fifo_channel_wr32,
	.ntfy = _nvkm_fifo_channel_ntfy
359
};
360

361 362
static struct nvkm_oclass
gk104_fifo_sclass[] = {
363
	{ KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
364 365 366 367 368 369
	{}
};

/*******************************************************************************
 * FIFO context - instmem heap and vm setup
 ******************************************************************************/
370

371
static int
372 373 374
gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
			struct nvkm_oclass *oclass, void *data, u32 size,
			struct nvkm_object **pobject)
375
{
376
	struct gk104_fifo_base *base;
377
	int ret;
378

379 380
	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
				       0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
381 382 383
	*pobject = nv_object(base);
	if (ret)
		return ret;
384

385 386
	ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
			      &base->pgd);
387 388 389 390 391 392 393 394
	if (ret)
		return ret;

	nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
	nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
	nv_wo32(base, 0x0208, 0xffffffff);
	nv_wo32(base, 0x020c, 0x000000ff);

395
	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
396 397
	if (ret)
		return ret;
398 399 400 401

	return 0;
}

402
static void
403
gk104_fifo_context_dtor(struct nvkm_object *object)
404
{
405 406 407 408
	struct gk104_fifo_base *base = (void *)object;
	nvkm_vm_ref(NULL, &base->vm, base->pgd);
	nvkm_gpuobj_ref(NULL, &base->pgd);
	nvkm_fifo_context_destroy(&base->base);
409 410
}

411 412
static struct nvkm_oclass
gk104_fifo_cclass = {
413
	.handle = NV_ENGCTX(FIFO, 0xe0),
414 415 416 417 418 419 420
	.ofuncs = &(struct nvkm_ofuncs) {
		.ctor = gk104_fifo_context_ctor,
		.dtor = gk104_fifo_context_dtor,
		.init = _nvkm_fifo_context_init,
		.fini = _nvkm_fifo_context_fini,
		.rd32 = _nvkm_fifo_context_rd32,
		.wr32 = _nvkm_fifo_context_wr32,
421 422 423 424 425 426 427
	},
};

/*******************************************************************************
 * PFIFO engine
 ******************************************************************************/

428
static inline int
B
Ben Skeggs 已提交
429
gk104_fifo_engidx(struct gk104_fifo *fifo, u32 engn)
430 431
{
	switch (engn) {
432 433 434 435 436 437 438 439
	case NVDEV_ENGINE_GR    :
	case NVDEV_ENGINE_CE2   : engn = 0; break;
	case NVDEV_ENGINE_MSVLD : engn = 1; break;
	case NVDEV_ENGINE_MSPPP : engn = 2; break;
	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
	case NVDEV_ENGINE_CE0   : engn = 4; break;
	case NVDEV_ENGINE_CE1   : engn = 5; break;
	case NVDEV_ENGINE_MSENC : engn = 6; break;
440 441 442 443 444 445 446
	default:
		return -1;
	}

	return engn;
}

447
static inline struct nvkm_engine *
B
Ben Skeggs 已提交
448
gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
449 450 451
{
	if (engn >= ARRAY_SIZE(fifo_engine))
		return NULL;
B
Ben Skeggs 已提交
452
	return nvkm_engine(fifo, fifo_engine[engn].subdev);
453 454
}

455
static void
456
gk104_fifo_recover_work(struct work_struct *work)
457
{
B
Ben Skeggs 已提交
458
	struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
459
	struct nvkm_device *device = fifo->base.engine.subdev.device;
460
	struct nvkm_object *engine;
461 462 463 464
	unsigned long flags;
	u32 engn, engm = 0;
	u64 mask, todo;

B
Ben Skeggs 已提交
465 466 467 468
	spin_lock_irqsave(&fifo->base.lock, flags);
	mask = fifo->mask;
	fifo->mask = 0ULL;
	spin_unlock_irqrestore(&fifo->base.lock, flags);
469 470

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
B
Ben Skeggs 已提交
471
		engm |= 1 << gk104_fifo_engidx(fifo, engn);
472
	nvkm_mask(device, 0x002630, engm, engm);
473 474

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
B
Ben Skeggs 已提交
475
		if ((engine = (void *)nvkm_engine(fifo, engn))) {
476 477 478
			nv_ofuncs(engine)->fini(engine, false);
			WARN_ON(nv_ofuncs(engine)->init(engine));
		}
B
Ben Skeggs 已提交
479
		gk104_fifo_runlist_update(fifo, gk104_fifo_engidx(fifo, engn));
480 481
	}

482 483
	nvkm_wr32(device, 0x00262c, engm);
	nvkm_mask(device, 0x002630, engm, 0x00000000);
484 485 486
}

static void
B
Ben Skeggs 已提交
487
gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
488
		  struct gk104_fifo_chan *chan)
489
{
490 491
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
492 493 494
	u32 chid = chan->base.chid;
	unsigned long flags;

495 496
	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
		   nv_subdev(engine)->name, chid);
497

498
	nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
499 500
	chan->state = KILLED;

B
Ben Skeggs 已提交
501 502 503 504
	spin_lock_irqsave(&fifo->base.lock, flags);
	fifo->mask |= 1ULL << nv_engidx(engine);
	spin_unlock_irqrestore(&fifo->base.lock, flags);
	schedule_work(&fifo->fault);
505 506
}

507
static int
B
Ben Skeggs 已提交
508
gk104_fifo_swmthd(struct gk104_fifo *fifo, u32 chid, u32 mthd, u32 data)
509
{
510 511
	struct gk104_fifo_chan *chan = NULL;
	struct nvkm_handle *bind;
512 513 514
	unsigned long flags;
	int ret = -EINVAL;

B
Ben Skeggs 已提交
515 516 517
	spin_lock_irqsave(&fifo->base.lock, flags);
	if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
		chan = (void *)fifo->base.channel[chid];
518 519 520
	if (unlikely(!chan))
		goto out;

521
	bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
522 523 524
	if (likely(bind)) {
		if (!mthd || !nv_call(bind->object, mthd, data))
			ret = 0;
525
		nvkm_namedb_put(bind);
526 527 528
	}

out:
B
Ben Skeggs 已提交
529
	spin_unlock_irqrestore(&fifo->base.lock, flags);
530 531 532
	return ret;
}

533 534
static const struct nvkm_enum
gk104_fifo_bind_reason[] = {
B
Ben Skeggs 已提交
535 536 537 538 539 540 541 542 543 544
	{ 0x01, "BIND_NOT_UNBOUND" },
	{ 0x02, "SNOOP_WITHOUT_BAR1" },
	{ 0x03, "UNBIND_WHILE_RUNNING" },
	{ 0x05, "INVALID_RUNLIST" },
	{ 0x06, "INVALID_CTX_TGT" },
	{ 0x0b, "UNBIND_WHILE_PARKED" },
	{}
};

static void
B
Ben Skeggs 已提交
545
gk104_fifo_intr_bind(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
546
{
547 548
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
549
	u32 intr = nvkm_rd32(device, 0x00252c);
B
Ben Skeggs 已提交
550
	u32 code = intr & 0x000000ff;
551 552
	const struct nvkm_enum *en =
		nvkm_enum_find(gk104_fifo_bind_reason, code);
B
Ben Skeggs 已提交
553

554
	nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
B
Ben Skeggs 已提交
555 556
}

557 558
static const struct nvkm_enum
gk104_fifo_sched_reason[] = {
559 560 561 562
	{ 0x0a, "CTXSW_TIMEOUT" },
	{}
};

563
static void
B
Ben Skeggs 已提交
564
gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
565
{
566
	struct nvkm_device *device = fifo->base.engine.subdev.device;
567 568
	struct nvkm_engine *engine;
	struct gk104_fifo_chan *chan;
569 570 571
	u32 engn;

	for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
572
		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
573 574 575 576 577 578 579 580 581 582
		u32 busy = (stat & 0x80000000);
		u32 next = (stat & 0x07ff0000) >> 16;
		u32 chsw = (stat & 0x00008000);
		u32 save = (stat & 0x00004000);
		u32 load = (stat & 0x00002000);
		u32 prev = (stat & 0x000007ff);
		u32 chid = load ? next : prev;
		(void)save;

		if (busy && chsw) {
B
Ben Skeggs 已提交
583
			if (!(chan = (void *)fifo->base.channel[chid]))
584
				continue;
B
Ben Skeggs 已提交
585
			if (!(engine = gk104_fifo_engine(fifo, engn)))
586
				continue;
B
Ben Skeggs 已提交
587
			gk104_fifo_recover(fifo, engine, chan);
588 589 590 591
		}
	}
}

592
static void
B
Ben Skeggs 已提交
593
gk104_fifo_intr_sched(struct gk104_fifo *fifo)
594
{
595 596
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
597
	u32 intr = nvkm_rd32(device, 0x00254c);
598
	u32 code = intr & 0x000000ff;
599 600
	const struct nvkm_enum *en =
		nvkm_enum_find(gk104_fifo_sched_reason, code);
601

602
	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
603 604 605

	switch (code) {
	case 0x0a:
B
Ben Skeggs 已提交
606
		gk104_fifo_intr_sched_ctxsw(fifo);
607 608 609 610
		break;
	default:
		break;
	}
611 612 613
}

static void
B
Ben Skeggs 已提交
614
gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
615
{
616 617
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
618
	u32 stat = nvkm_rd32(device, 0x00256c);
619
	nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
620
	nvkm_wr32(device, 0x00256c, stat);
621 622 623
}

static void
B
Ben Skeggs 已提交
624
gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
625
{
626 627
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
628
	u32 stat = nvkm_rd32(device, 0x00259c);
629
	nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
630 631
}

632 633
static const struct nvkm_enum
gk104_fifo_fault_engine[] = {
634
	{ 0x00, "GR", NULL, NVDEV_ENGINE_GR },
635
	{ 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
636 637
	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
638 639 640
	{ 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
	{ 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
	{ 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
641
	{ 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD },
642
	{ 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP },
643
	{ 0x13, "PERF" },
644
	{ 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
645 646
	{ 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 },
	{ 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 },
647
	{ 0x17, "PMU" },
648
	{ 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC },
649
	{ 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 },
650 651 652
	{}
};

653 654
static const struct nvkm_enum
gk104_fifo_fault_reason[] = {
655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
	{ 0x00, "PDE" },
	{ 0x01, "PDE_SIZE" },
	{ 0x02, "PTE" },
	{ 0x03, "VA_LIMIT_VIOLATION" },
	{ 0x04, "UNBOUND_INST_BLOCK" },
	{ 0x05, "PRIV_VIOLATION" },
	{ 0x06, "RO_VIOLATION" },
	{ 0x07, "WO_VIOLATION" },
	{ 0x08, "PITCH_MASK_VIOLATION" },
	{ 0x09, "WORK_CREATION" },
	{ 0x0a, "UNSUPPORTED_APERTURE" },
	{ 0x0b, "COMPRESSION_FAILURE" },
	{ 0x0c, "UNSUPPORTED_KIND" },
	{ 0x0d, "REGION_VIOLATION" },
	{ 0x0e, "BOTH_PTES_VALID" },
	{ 0x0f, "INFO_TYPE_POISONED" },
671 672 673
	{}
};

674 675
static const struct nvkm_enum
gk104_fifo_fault_hubclient[] = {
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
	{ 0x00, "VIP" },
	{ 0x01, "CE0" },
	{ 0x02, "CE1" },
	{ 0x03, "DNISO" },
	{ 0x04, "FE" },
	{ 0x05, "FECS" },
	{ 0x06, "HOST" },
	{ 0x07, "HOST_CPU" },
	{ 0x08, "HOST_CPU_NB" },
	{ 0x09, "ISO" },
	{ 0x0a, "MMU" },
	{ 0x0b, "MSPDEC" },
	{ 0x0c, "MSPPP" },
	{ 0x0d, "MSVLD" },
	{ 0x0e, "NISO" },
	{ 0x0f, "P2P" },
	{ 0x10, "PD" },
	{ 0x11, "PERF" },
	{ 0x12, "PMU" },
	{ 0x13, "RASTERTWOD" },
	{ 0x14, "SCC" },
	{ 0x15, "SCC_NB" },
	{ 0x16, "SEC" },
	{ 0x17, "SSYNC" },
700
	{ 0x18, "GR_CE" },
701 702 703 704 705 706 707
	{ 0x19, "CE2" },
	{ 0x1a, "XV" },
	{ 0x1b, "MMU_NB" },
	{ 0x1c, "MSENC" },
	{ 0x1d, "DFALCON" },
	{ 0x1e, "SKED" },
	{ 0x1f, "AFALCON" },
708 709 710
	{}
};

711 712
static const struct nvkm_enum
gk104_fifo_fault_gpcclient[] = {
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
	{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
	{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
	{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
	{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
	{ 0x0c, "RAST" },
	{ 0x0d, "GCC" },
	{ 0x0e, "GPCCS" },
	{ 0x0f, "PROP_0" },
	{ 0x10, "PROP_1" },
	{ 0x11, "PROP_2" },
	{ 0x12, "PROP_3" },
	{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
	{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
	{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
	{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
	{ 0x1f, "GPM" },
	{ 0x20, "LTP_UTLB_0" },
	{ 0x21, "LTP_UTLB_1" },
	{ 0x22, "LTP_UTLB_2" },
	{ 0x23, "LTP_UTLB_3" },
	{ 0x24, "GPC_RGG_UTLB" },
734 735 736
	{}
};

737
static void
B
Ben Skeggs 已提交
738
gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
739
{
740 741
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
742 743 744 745
	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
746
	u32 gpc    = (stat & 0x1f000000) >> 24;
747
	u32 client = (stat & 0x00001f00) >> 8;
748 749 750
	u32 write  = (stat & 0x00000080);
	u32 hub    = (stat & 0x00000040);
	u32 reason = (stat & 0x0000000f);
751 752 753
	struct nvkm_object *engctx = NULL, *object;
	struct nvkm_engine *engine = NULL;
	const struct nvkm_enum *er, *eu, *ec;
754
	char gpcid[8] = "";
755

756 757
	er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
	eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
758 759 760 761 762 763 764
	if (hub) {
		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
	} else {
		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
	}

765 766 767
	if (eu) {
		switch (eu->data2) {
		case NVDEV_SUBDEV_BAR:
768
			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
769 770
			break;
		case NVDEV_SUBDEV_INSTMEM:
771
			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
772 773
			break;
		case NVDEV_ENGINE_IFB:
774
			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
775 776
			break;
		default:
B
Ben Skeggs 已提交
777
			engine = nvkm_engine(fifo, eu->data2);
778
			if (engine)
779
				engctx = nvkm_engctx_get(engine, inst);
780
			break;
781
		}
782 783
	}

784 785 786 787 788 789 790
	nvkm_error(subdev,
		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
		   "reason %02x [%s] on channel %d [%010llx %s]\n",
		   write ? "write" : "read", (u64)vahi << 32 | valo,
		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
		   reason, er ? er->name : "", -1, (u64)inst << 12,
		   nvkm_client_name(engctx));
791

792 793 794
	object = engctx;
	while (object) {
		switch (nv_mclass(object)) {
795
		case KEPLER_CHANNEL_GPFIFO_A:
796
		case MAXWELL_CHANNEL_GPFIFO_A:
B
Ben Skeggs 已提交
797
			gk104_fifo_recover(fifo, engine, (void *)object);
798 799 800 801 802
			break;
		}
		object = object->parent;
	}

803
	nvkm_engctx_put(engctx);
804 805
}

806
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
	{ 0x00000001, "MEMREQ" },
	{ 0x00000002, "MEMACK_TIMEOUT" },
	{ 0x00000004, "MEMACK_EXTRA" },
	{ 0x00000008, "MEMDAT_TIMEOUT" },
	{ 0x00000010, "MEMDAT_EXTRA" },
	{ 0x00000020, "MEMFLUSH" },
	{ 0x00000040, "MEMOP" },
	{ 0x00000080, "LBCONNECT" },
	{ 0x00000100, "LBREQ" },
	{ 0x00000200, "LBACK_TIMEOUT" },
	{ 0x00000400, "LBACK_EXTRA" },
	{ 0x00000800, "LBDAT_TIMEOUT" },
	{ 0x00001000, "LBDAT_EXTRA" },
	{ 0x00002000, "GPFIFO" },
	{ 0x00004000, "GPPTR" },
	{ 0x00008000, "GPENTRY" },
	{ 0x00010000, "GPCRC" },
	{ 0x00020000, "PBPTR" },
	{ 0x00040000, "PBENTRY" },
	{ 0x00080000, "PBCRC" },
	{ 0x00100000, "XBARCONNECT" },
	{ 0x00200000, "METHOD" },
	{ 0x00400000, "METHODCRC" },
	{ 0x00800000, "DEVICE" },
	{ 0x02000000, "SEMAPHORE" },
	{ 0x04000000, "ACQUIRE" },
	{ 0x08000000, "PRI" },
	{ 0x20000000, "NO_CTXSW_SEG" },
	{ 0x40000000, "PBSEG" },
	{ 0x80000000, "SIGNATURE" },
	{}
};
839

840
static void
B
Ben Skeggs 已提交
841
gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
842
{
843 844
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
845 846 847 848 849
	u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
850
	u32 subc = (addr & 0x00070000) >> 16;
851
	u32 mthd = (addr & 0x00003ffc);
852
	u32 show = stat;
853
	char msg[128];
854

855
	if (stat & 0x00800000) {
B
Ben Skeggs 已提交
856
		if (!gk104_fifo_swmthd(fifo, chid, mthd, data))
857
			show &= ~0x00800000;
858
		nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
859 860
	}

861
	if (show) {
862 863 864 865 866 867
		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d "
				   "mthd %04x data %08x\n",
			   unit, show, msg, chid,
			   nvkm_client_name_for_fifo_chid(&fifo->base, chid),
			   subc, mthd, data);
868
	}
869

870
	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
871 872
}

873
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
874 875 876 877 878 879 880 881 882
	{ 0x00000001, "HCE_RE_ILLEGAL_OP" },
	{ 0x00000002, "HCE_RE_ALIGNB" },
	{ 0x00000004, "HCE_PRIV" },
	{ 0x00000008, "HCE_ILLEGAL_MTHD" },
	{ 0x00000010, "HCE_ILLEGAL_CLASS" },
	{}
};

static void
B
Ben Skeggs 已提交
883
gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
884
{
885 886
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
887 888 889
	u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
	u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
890
	char msg[128];
891 892

	if (stat) {
893 894 895 896 897
		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
			   unit, stat, msg, chid,
			   nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
			   nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
898 899
	}

900
	nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
901 902
}

B
Ben Skeggs 已提交
903
static void
B
Ben Skeggs 已提交
904
gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
905
{
906 907
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x002a00);
B
Ben Skeggs 已提交
908 909
	while (mask) {
		u32 engn = __ffs(mask);
B
Ben Skeggs 已提交
910
		wake_up(&fifo->engine[engn].wait);
911
		nvkm_wr32(device, 0x002a00, 1 << engn);
B
Ben Skeggs 已提交
912 913 914 915
		mask &= ~(1 << engn);
	}
}

B
Ben Skeggs 已提交
916
static void
B
Ben Skeggs 已提交
917
gk104_fifo_intr_engine(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
918
{
B
Ben Skeggs 已提交
919
	nvkm_fifo_uevent(&fifo->base);
B
Ben Skeggs 已提交
920 921
}

922
static void
923
gk104_fifo_intr(struct nvkm_subdev *subdev)
924
{
B
Ben Skeggs 已提交
925
	struct gk104_fifo *fifo = (void *)subdev;
926 927 928
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x002140);
	u32 stat = nvkm_rd32(device, 0x002100) & mask;
929

930
	if (stat & 0x00000001) {
B
Ben Skeggs 已提交
931
		gk104_fifo_intr_bind(fifo);
932
		nvkm_wr32(device, 0x002100, 0x00000001);
933 934 935 936
		stat &= ~0x00000001;
	}

	if (stat & 0x00000010) {
937
		nvkm_error(subdev, "PIO_ERROR\n");
938
		nvkm_wr32(device, 0x002100, 0x00000010);
939 940 941
		stat &= ~0x00000010;
	}

942
	if (stat & 0x00000100) {
B
Ben Skeggs 已提交
943
		gk104_fifo_intr_sched(fifo);
944
		nvkm_wr32(device, 0x002100, 0x00000100);
945 946 947
		stat &= ~0x00000100;
	}

948
	if (stat & 0x00010000) {
B
Ben Skeggs 已提交
949
		gk104_fifo_intr_chsw(fifo);
950
		nvkm_wr32(device, 0x002100, 0x00010000);
951 952 953 954
		stat &= ~0x00010000;
	}

	if (stat & 0x00800000) {
955
		nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
956
		nvkm_wr32(device, 0x002100, 0x00800000);
957 958 959 960
		stat &= ~0x00800000;
	}

	if (stat & 0x01000000) {
961
		nvkm_error(subdev, "LB_ERROR\n");
962
		nvkm_wr32(device, 0x002100, 0x01000000);
963 964 965 966
		stat &= ~0x01000000;
	}

	if (stat & 0x08000000) {
B
Ben Skeggs 已提交
967
		gk104_fifo_intr_dropped_fault(fifo);
968
		nvkm_wr32(device, 0x002100, 0x08000000);
969 970 971
		stat &= ~0x08000000;
	}

972
	if (stat & 0x10000000) {
973
		u32 mask = nvkm_rd32(device, 0x00259c);
974 975
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
976
			gk104_fifo_intr_fault(fifo, unit);
977
			nvkm_wr32(device, 0x00259c, (1 << unit));
978
			mask &= ~(1 << unit);
979 980 981 982 983
		}
		stat &= ~0x10000000;
	}

	if (stat & 0x20000000) {
984
		u32 mask = nvkm_rd32(device, 0x0025a0);
985 986
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
987 988
			gk104_fifo_intr_pbdma_0(fifo, unit);
			gk104_fifo_intr_pbdma_1(fifo, unit);
989
			nvkm_wr32(device, 0x0025a0, (1 << unit));
990
			mask &= ~(1 << unit);
991 992 993 994 995
		}
		stat &= ~0x20000000;
	}

	if (stat & 0x40000000) {
B
Ben Skeggs 已提交
996
		gk104_fifo_intr_runlist(fifo);
997 998 999
		stat &= ~0x40000000;
	}

1000
	if (stat & 0x80000000) {
1001
		nvkm_wr32(device, 0x002100, 0x80000000);
B
Ben Skeggs 已提交
1002
		gk104_fifo_intr_engine(fifo);
1003 1004 1005
		stat &= ~0x80000000;
	}

1006
	if (stat) {
1007
		nvkm_error(subdev, "INTR %08x\n", stat);
1008 1009
		nvkm_mask(device, 0x002140, stat, 0x00000000);
		nvkm_wr32(device, 0x002100, stat);
1010 1011
	}
}
1012

1013
static void
1014
gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
1015
{
1016
	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
1017 1018
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
1019 1020 1021
}

static void
1022
gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
1023
{
1024
	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
1025 1026
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
1027 1028
}

1029
static const struct nvkm_event_func
1030 1031 1032 1033
gk104_fifo_uevent_func = {
	.ctor = nvkm_fifo_uevent_ctor,
	.init = gk104_fifo_uevent_init,
	.fini = gk104_fifo_uevent_fini,
1034 1035
};

1036
int
1037
gk104_fifo_fini(struct nvkm_object *object, bool suspend)
1038
{
B
Ben Skeggs 已提交
1039
	struct gk104_fifo *fifo = (void *)object;
1040
	struct nvkm_device *device = fifo->base.engine.subdev.device;
1041 1042
	int ret;

B
Ben Skeggs 已提交
1043
	ret = nvkm_fifo_fini(&fifo->base, suspend);
1044 1045 1046 1047
	if (ret)
		return ret;

	/* allow mmu fault interrupts, even when we're not using fifo */
1048
	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
1049 1050 1051
	return 0;
}

B
Ben Skeggs 已提交
1052
int
1053
gk104_fifo_init(struct nvkm_object *object)
B
Ben Skeggs 已提交
1054
{
B
Ben Skeggs 已提交
1055
	struct gk104_fifo *fifo = (void *)object;
1056 1057
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
B
Ben Skeggs 已提交
1058 1059
	int ret, i;

B
Ben Skeggs 已提交
1060
	ret = nvkm_fifo_init(&fifo->base);
B
Ben Skeggs 已提交
1061 1062 1063
	if (ret)
		return ret;

B
Ben Skeggs 已提交
1064
	/* enable all available PBDMA units */
1065 1066
	nvkm_wr32(device, 0x000204, 0xffffffff);
	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
1067
	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
B
Ben Skeggs 已提交
1068

B
Ben Skeggs 已提交
1069
	/* PBDMA[n] */
B
Ben Skeggs 已提交
1070
	for (i = 0; i < fifo->spoon_nr; i++) {
1071 1072 1073
		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
B
Ben Skeggs 已提交
1074 1075
	}

1076
	/* PBDMA[n].HCE */
B
Ben Skeggs 已提交
1077
	for (i = 0; i < fifo->spoon_nr; i++) {
1078 1079
		nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
1080 1081
	}

1082
	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
B
Ben Skeggs 已提交
1083

1084 1085
	nvkm_wr32(device, 0x002100, 0xffffffff);
	nvkm_wr32(device, 0x002140, 0x7fffffff);
B
Ben Skeggs 已提交
1086 1087 1088 1089
	return 0;
}

void
1090
gk104_fifo_dtor(struct nvkm_object *object)
B
Ben Skeggs 已提交
1091
{
B
Ben Skeggs 已提交
1092
	struct gk104_fifo *fifo = (void *)object;
B
Ben Skeggs 已提交
1093 1094
	int i;

B
Ben Skeggs 已提交
1095 1096
	nvkm_gpuobj_unmap(&fifo->user.bar);
	nvkm_gpuobj_ref(NULL, &fifo->user.mem);
B
Ben Skeggs 已提交
1097 1098

	for (i = 0; i < FIFO_ENGINE_NR; i++) {
B
Ben Skeggs 已提交
1099 1100
		nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[1]);
		nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[0]);
B
Ben Skeggs 已提交
1101 1102
	}

B
Ben Skeggs 已提交
1103
	nvkm_fifo_destroy(&fifo->base);
B
Ben Skeggs 已提交
1104 1105 1106
}

int
1107 1108 1109
gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		struct nvkm_oclass *oclass, void *data, u32 size,
		struct nvkm_object **pobject)
1110
{
1111
	struct gk104_fifo_impl *impl = (void *)oclass;
B
Ben Skeggs 已提交
1112
	struct gk104_fifo *fifo;
1113
	int ret, i;
1114

1115
	ret = nvkm_fifo_create(parent, engine, oclass, 0,
B
Ben Skeggs 已提交
1116 1117
			       impl->channels - 1, &fifo);
	*pobject = nv_object(fifo);
1118 1119 1120
	if (ret)
		return ret;

B
Ben Skeggs 已提交
1121
	INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
1122

1123
	for (i = 0; i < FIFO_ENGINE_NR; i++) {
B
Ben Skeggs 已提交
1124 1125
		ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000,
				      0, &fifo->engine[i].runlist[0]);
1126 1127 1128
		if (ret)
			return ret;

B
Ben Skeggs 已提交
1129 1130
		ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000,
				      0, &fifo->engine[i].runlist[1]);
1131 1132
		if (ret)
			return ret;
B
Ben Skeggs 已提交
1133

B
Ben Skeggs 已提交
1134
		init_waitqueue_head(&fifo->engine[i].wait);
1135 1136
	}

B
Ben Skeggs 已提交
1137 1138
	ret = nvkm_gpuobj_new(nv_object(fifo), NULL, impl->channels * 0x200,
			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &fifo->user.mem);
1139 1140 1141
	if (ret)
		return ret;

B
Ben Skeggs 已提交
1142 1143
	ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW,
			      &fifo->user.bar);
1144 1145 1146
	if (ret)
		return ret;

B
Ben Skeggs 已提交
1147
	ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &fifo->base.uevent);
1148 1149
	if (ret)
		return ret;
1150

B
Ben Skeggs 已提交
1151 1152 1153 1154
	nv_subdev(fifo)->unit = 0x00000100;
	nv_subdev(fifo)->intr = gk104_fifo_intr;
	nv_engine(fifo)->cclass = &gk104_fifo_cclass;
	nv_engine(fifo)->sclass = gk104_fifo_sclass;
1155 1156 1157
	return 0;
}

1158 1159
struct nvkm_oclass *
gk104_fifo_oclass = &(struct gk104_fifo_impl) {
B
Ben Skeggs 已提交
1160
	.base.handle = NV_ENGINE(FIFO, 0xe0),
1161 1162 1163 1164 1165
	.base.ofuncs = &(struct nvkm_ofuncs) {
		.ctor = gk104_fifo_ctor,
		.dtor = gk104_fifo_dtor,
		.init = gk104_fifo_init,
		.fini = gk104_fifo_fini,
1166
	},
B
Ben Skeggs 已提交
1167 1168
	.channels = 4096,
}.base;