gk104.c 32.0 KB
Newer Older
1
/*
2
 * Copyright 2012 Red Hat Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */
24
#include "gk104.h"
25

26 27 28
#include <core/client.h>
#include <core/engctx.h>
#include <core/enum.h>
29
#include <core/handle.h>
30
#include <subdev/bar.h>
31
#include <subdev/fb.h>
32
#include <subdev/mmu.h>
33
#include <subdev/timer.h>
34

35
#include <nvif/class.h>
36
#include <nvif/ioctl.h>
37
#include <nvif/unpack.h>
38

39
#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
40
static const struct {
41 42
	u64 subdev;
	u64 mask;
43
} fifo_engine[] = {
44
	_(NVDEV_ENGINE_GR      , (1ULL << NVDEV_ENGINE_SW) |
45
				 (1ULL << NVDEV_ENGINE_CE2)),
46
	_(NVDEV_ENGINE_MSPDEC  , 0),
47
	_(NVDEV_ENGINE_MSPPP   , 0),
48
	_(NVDEV_ENGINE_MSVLD   , 0),
49 50
	_(NVDEV_ENGINE_CE0     , 0),
	_(NVDEV_ENGINE_CE1     , 0),
51
	_(NVDEV_ENGINE_MSENC   , 0),
52 53 54 55
};
#undef _
#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)

56 57
struct gk104_fifo_engn {
	struct nvkm_gpuobj *runlist[2];
B
Ben Skeggs 已提交
58
	int cur_runlist;
B
Ben Skeggs 已提交
59
	wait_queue_head_t wait;
60 61
};

B
Ben Skeggs 已提交
62
struct gk104_fifo {
63
	struct nvkm_fifo base;
64 65 66 67

	struct work_struct fault;
	u64 mask;

68
	struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
69
	struct {
70 71
		struct nvkm_gpuobj *mem;
		struct nvkm_vma bar;
72 73 74 75
	} user;
	int spoon_nr;
};

76 77 78 79
struct gk104_fifo_base {
	struct nvkm_fifo_base base;
	struct nvkm_gpuobj *pgd;
	struct nvkm_vm *vm;
80 81
};

82 83
struct gk104_fifo_chan {
	struct nvkm_fifo_chan base;
84
	u32 engine;
85 86 87 88 89
	enum {
		STOPPED,
		RUNNING,
		KILLED
	} state;
90 91
};

92 93 94 95
/*******************************************************************************
 * FIFO channel objects
 ******************************************************************************/

96
static void
B
Ben Skeggs 已提交
97
gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
98
{
B
Ben Skeggs 已提交
99
	struct gk104_fifo_engn *engn = &fifo->engine[engine];
100 101
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
102
	struct nvkm_bar *bar = device->bar;
103
	struct nvkm_gpuobj *cur;
104
	int i, p;
105

B
Ben Skeggs 已提交
106
	mutex_lock(&nv_subdev(fifo)->mutex);
B
Ben Skeggs 已提交
107 108
	cur = engn->runlist[engn->cur_runlist];
	engn->cur_runlist = !engn->cur_runlist;
109

110
	nvkm_kmap(cur);
B
Ben Skeggs 已提交
111 112
	for (i = 0, p = 0; i < fifo->base.max; i++) {
		struct gk104_fifo_chan *chan = (void *)fifo->base.channel[i];
113
		if (chan && chan->state == RUNNING && chan->engine == engine) {
114 115
			nvkm_wo32(cur, p + 0, i);
			nvkm_wo32(cur, p + 4, 0x00000000);
116 117
			p += 8;
		}
118
	}
119
	bar->flush(bar);
120
	nvkm_done(cur);
121

122 123
	nvkm_wr32(device, 0x002270, cur->addr >> 12);
	nvkm_wr32(device, 0x002274, (engine << 20) | (p >> 3));
124

125
	if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
126 127
			       (engine * 0x08)) & 0x00100000),
				msecs_to_jiffies(2000)) == 0)
128
		nvkm_error(subdev, "runlist %d update timeout\n", engine);
B
Ben Skeggs 已提交
129
	mutex_unlock(&nv_subdev(fifo)->mutex);
130 131
}

132
static int
133 134
gk104_fifo_context_attach(struct nvkm_object *parent,
			  struct nvkm_object *object)
135
{
136 137
	struct nvkm_bar *bar = nvkm_bar(parent);
	struct gk104_fifo_base *base = (void *)parent->parent;
138
	struct nvkm_gpuobj *engn = &base->base.gpuobj;
139
	struct nvkm_engctx *ectx = (void *)object;
140 141 142 143
	u32 addr;
	int ret;

	switch (nv_engidx(object->engine)) {
144
	case NVDEV_ENGINE_SW   :
145
		return 0;
146 147 148
	case NVDEV_ENGINE_CE0:
	case NVDEV_ENGINE_CE1:
	case NVDEV_ENGINE_CE2:
149
		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
150
		return 0;
151 152 153 154
	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
155 156
	default:
		return -EINVAL;
157 158
	}

159
	if (!ectx->vma.node) {
160 161
		ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
					 NV_MEM_ACCESS_RW, &ectx->vma);
162 163
		if (ret)
			return ret;
164 165

		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
166 167
	}

168 169 170
	nvkm_kmap(engn);
	nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
	nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
171
	bar->flush(bar);
172
	nvkm_done(engn);
173
	return 0;
174 175
}

B
Ben Skeggs 已提交
176 177 178 179 180
static int
gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
{
	struct nvkm_object *obj = (void *)chan;
	struct gk104_fifo *fifo = (void *)obj->engine;
181 182
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
B
Ben Skeggs 已提交
183

184
	nvkm_wr32(device, 0x002634, chan->base.chid);
185 186 187 188
	if (nvkm_msec(device, 2000,
		if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
			break;
	) < 0) {
189 190
		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
			   chan->base.chid, nvkm_client_name(chan));
B
Ben Skeggs 已提交
191 192 193 194 195 196
		return -EBUSY;
	}

	return 0;
}

197
static int
198 199
gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
			  struct nvkm_object *object)
200
{
201 202 203
	struct nvkm_bar *bar = nvkm_bar(parent);
	struct gk104_fifo_base *base = (void *)parent->parent;
	struct gk104_fifo_chan *chan = (void *)parent;
204
	struct nvkm_gpuobj *engn = &base->base.gpuobj;
205
	u32 addr;
B
Ben Skeggs 已提交
206
	int ret;
207 208

	switch (nv_engidx(object->engine)) {
209 210 211 212 213 214 215 216
	case NVDEV_ENGINE_SW    : return 0;
	case NVDEV_ENGINE_CE0   :
	case NVDEV_ENGINE_CE1   :
	case NVDEV_ENGINE_CE2   : addr = 0x0000; break;
	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
217 218 219 220
	default:
		return -EINVAL;
	}

B
Ben Skeggs 已提交
221 222 223
	ret = gk104_fifo_chan_kick(chan);
	if (ret && suspend)
		return ret;
224

225
	if (addr) {
226 227 228
		nvkm_kmap(engn);
		nvkm_wo32(engn, addr + 0x00, 0x00000000);
		nvkm_wo32(engn, addr + 0x04, 0x00000000);
229
		bar->flush(bar);
230
		nvkm_done(engn);
231 232
	}

233
	return 0;
234 235 236
}

static int
237 238 239
gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		     struct nvkm_oclass *oclass, void *data, u32 size,
		     struct nvkm_object **pobject)
240
{
241 242 243
	union {
		struct kepler_channel_gpfifo_a_v0 v0;
	} *args = data;
244
	struct nvkm_bar *bar = nvkm_bar(parent);
B
Ben Skeggs 已提交
245
	struct gk104_fifo *fifo = (void *)engine;
246 247
	struct gk104_fifo_base *base = (void *)parent;
	struct gk104_fifo_chan *chan;
248
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
249
	struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
250 251 252
	u64 usermem, ioffset, ilength;
	int ret, i;

253
	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
254
	if (nvif_unpack(args->v0, 0, 0, false)) {
255
		nvif_ioctl(parent, "create channel gpfifo vers %d pushbuf %llx "
256 257 258
				   "ioffset %016llx ilength %08x engine %08x\n",
			   args->v0.version, args->v0.pushbuf, args->v0.ioffset,
			   args->v0.ilength, args->v0.engine);
259 260
	} else
		return ret;
261

262
	for (i = 0; i < FIFO_ENGINE_NR; i++) {
263
		if (args->v0.engine & (1 << i)) {
264
			if (nvkm_engine(parent, fifo_engine[i].subdev)) {
265
				args->v0.engine = (1 << i);
266 267 268 269 270
				break;
			}
		}
	}

B
Ben Skeggs 已提交
271
	if (i == FIFO_ENGINE_NR) {
272 273
		nvkm_error(subdev, "unsupported engines %08x\n",
			   args->v0.engine);
274
		return -ENODEV;
B
Ben Skeggs 已提交
275
	}
276

277
	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
B
Ben Skeggs 已提交
278
				       fifo->user.bar.offset, 0x200,
279 280
				       args->v0.pushbuf,
				       fifo_engine[i].mask, &chan);
281 282 283 284
	*pobject = nv_object(chan);
	if (ret)
		return ret;

285 286
	args->v0.chid = chan->base.chid;

287 288
	nv_parent(chan)->context_attach = gk104_fifo_context_attach;
	nv_parent(chan)->context_detach = gk104_fifo_context_detach;
289
	chan->engine = i;
290 291

	usermem = chan->base.chid * 0x200;
292 293
	ioffset = args->v0.ioffset;
	ilength = order_base_2(args->v0.ilength / 8);
294

295
	nvkm_kmap(fifo->user.mem);
296
	for (i = 0; i < 0x200; i += 4)
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
	nvkm_done(fifo->user.mem);

	nvkm_kmap(ramfc);
	nvkm_wo32(ramfc, 0x08, lower_32_bits(fifo->user.mem->addr + usermem));
	nvkm_wo32(ramfc, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem));
	nvkm_wo32(ramfc, 0x10, 0x0000face);
	nvkm_wo32(ramfc, 0x30, 0xfffff902);
	nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
	nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
	nvkm_wo32(ramfc, 0x84, 0x20400000);
	nvkm_wo32(ramfc, 0x94, 0x30000001);
	nvkm_wo32(ramfc, 0x9c, 0x00000100);
	nvkm_wo32(ramfc, 0xac, 0x0000001f);
	nvkm_wo32(ramfc, 0xe8, chan->base.chid);
	nvkm_wo32(ramfc, 0xb8, 0xf8000000);
	nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
	nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
315
	bar->flush(bar);
316
	nvkm_done(ramfc);
317 318
	return 0;
}
319

320
static int
321
gk104_fifo_chan_init(struct nvkm_object *object)
322
{
323
	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
B
Ben Skeggs 已提交
324
	struct gk104_fifo *fifo = (void *)object->engine;
325
	struct gk104_fifo_chan *chan = (void *)object;
326
	struct nvkm_device *device = fifo->base.engine.subdev.device;
327 328
	u32 chid = chan->base.chid;
	int ret;
329

330
	ret = nvkm_fifo_channel_init(&chan->base);
331 332
	if (ret)
		return ret;
333

334 335
	nvkm_mask(device, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
	nvkm_wr32(device, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
336 337

	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
338
		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
B
Ben Skeggs 已提交
339
		gk104_fifo_runlist_update(fifo, chan->engine);
340
		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
341 342
	}

343 344
	return 0;
}
345

346
static int
347
gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
348
{
B
Ben Skeggs 已提交
349
	struct gk104_fifo *fifo = (void *)object->engine;
350
	struct gk104_fifo_chan *chan = (void *)object;
351
	struct nvkm_device *device = fifo->base.engine.subdev.device;
352
	u32 chid = chan->base.chid;
353

354
	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
355
		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
B
Ben Skeggs 已提交
356
		gk104_fifo_runlist_update(fifo, chan->engine);
357
	}
358

359
	nvkm_wr32(device, 0x800000 + (chid * 8), 0x00000000);
360
	return nvkm_fifo_channel_fini(&chan->base, suspend);
361
}
362

363 364
struct nvkm_ofuncs
gk104_fifo_chan_ofuncs = {
365 366 367 368 369 370 371 372
	.ctor = gk104_fifo_chan_ctor,
	.dtor = _nvkm_fifo_channel_dtor,
	.init = gk104_fifo_chan_init,
	.fini = gk104_fifo_chan_fini,
	.map  = _nvkm_fifo_channel_map,
	.rd32 = _nvkm_fifo_channel_rd32,
	.wr32 = _nvkm_fifo_channel_wr32,
	.ntfy = _nvkm_fifo_channel_ntfy
373
};
374

375 376
static struct nvkm_oclass
gk104_fifo_sclass[] = {
377
	{ KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
378 379 380 381 382 383
	{}
};

/*******************************************************************************
 * FIFO context - instmem heap and vm setup
 ******************************************************************************/
384

385
static int
386 387 388
gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
			struct nvkm_oclass *oclass, void *data, u32 size,
			struct nvkm_object **pobject)
389
{
390
	struct gk104_fifo_base *base;
391
	int ret;
392

393 394
	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
				       0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
395 396 397
	*pobject = nv_object(base);
	if (ret)
		return ret;
398

399 400
	ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
			      &base->pgd);
401 402 403
	if (ret)
		return ret;

404 405 406 407 408 409
	nvkm_kmap(&base->base.gpuobj);
	nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
	nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
	nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
	nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
	nvkm_done(&base->base.gpuobj);
410

411
	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
412 413
	if (ret)
		return ret;
414 415 416 417

	return 0;
}

418
static void
419
gk104_fifo_context_dtor(struct nvkm_object *object)
420
{
421 422 423 424
	struct gk104_fifo_base *base = (void *)object;
	nvkm_vm_ref(NULL, &base->vm, base->pgd);
	nvkm_gpuobj_ref(NULL, &base->pgd);
	nvkm_fifo_context_destroy(&base->base);
425 426
}

427 428
static struct nvkm_oclass
gk104_fifo_cclass = {
429
	.handle = NV_ENGCTX(FIFO, 0xe0),
430 431 432 433 434 435 436
	.ofuncs = &(struct nvkm_ofuncs) {
		.ctor = gk104_fifo_context_ctor,
		.dtor = gk104_fifo_context_dtor,
		.init = _nvkm_fifo_context_init,
		.fini = _nvkm_fifo_context_fini,
		.rd32 = _nvkm_fifo_context_rd32,
		.wr32 = _nvkm_fifo_context_wr32,
437 438 439 440 441 442 443
	},
};

/*******************************************************************************
 * PFIFO engine
 ******************************************************************************/

444
static inline int
B
Ben Skeggs 已提交
445
gk104_fifo_engidx(struct gk104_fifo *fifo, u32 engn)
446 447
{
	switch (engn) {
448 449 450 451 452 453 454 455
	case NVDEV_ENGINE_GR    :
	case NVDEV_ENGINE_CE2   : engn = 0; break;
	case NVDEV_ENGINE_MSVLD : engn = 1; break;
	case NVDEV_ENGINE_MSPPP : engn = 2; break;
	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
	case NVDEV_ENGINE_CE0   : engn = 4; break;
	case NVDEV_ENGINE_CE1   : engn = 5; break;
	case NVDEV_ENGINE_MSENC : engn = 6; break;
456 457 458 459 460 461 462
	default:
		return -1;
	}

	return engn;
}

463
static inline struct nvkm_engine *
B
Ben Skeggs 已提交
464
gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
465 466 467
{
	if (engn >= ARRAY_SIZE(fifo_engine))
		return NULL;
B
Ben Skeggs 已提交
468
	return nvkm_engine(fifo, fifo_engine[engn].subdev);
469 470
}

471
static void
472
gk104_fifo_recover_work(struct work_struct *work)
473
{
B
Ben Skeggs 已提交
474
	struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
475
	struct nvkm_device *device = fifo->base.engine.subdev.device;
476
	struct nvkm_object *engine;
477 478 479 480
	unsigned long flags;
	u32 engn, engm = 0;
	u64 mask, todo;

B
Ben Skeggs 已提交
481 482 483 484
	spin_lock_irqsave(&fifo->base.lock, flags);
	mask = fifo->mask;
	fifo->mask = 0ULL;
	spin_unlock_irqrestore(&fifo->base.lock, flags);
485 486

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
B
Ben Skeggs 已提交
487
		engm |= 1 << gk104_fifo_engidx(fifo, engn);
488
	nvkm_mask(device, 0x002630, engm, engm);
489 490

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
B
Ben Skeggs 已提交
491
		if ((engine = (void *)nvkm_engine(fifo, engn))) {
492 493 494
			nv_ofuncs(engine)->fini(engine, false);
			WARN_ON(nv_ofuncs(engine)->init(engine));
		}
B
Ben Skeggs 已提交
495
		gk104_fifo_runlist_update(fifo, gk104_fifo_engidx(fifo, engn));
496 497
	}

498 499
	nvkm_wr32(device, 0x00262c, engm);
	nvkm_mask(device, 0x002630, engm, 0x00000000);
500 501 502
}

static void
B
Ben Skeggs 已提交
503
gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
504
		  struct gk104_fifo_chan *chan)
505
{
506 507
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
508 509 510
	u32 chid = chan->base.chid;
	unsigned long flags;

511 512
	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
		   nv_subdev(engine)->name, chid);
513

514
	nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
515 516
	chan->state = KILLED;

B
Ben Skeggs 已提交
517 518 519 520
	spin_lock_irqsave(&fifo->base.lock, flags);
	fifo->mask |= 1ULL << nv_engidx(engine);
	spin_unlock_irqrestore(&fifo->base.lock, flags);
	schedule_work(&fifo->fault);
521 522
}

523
static int
B
Ben Skeggs 已提交
524
gk104_fifo_swmthd(struct gk104_fifo *fifo, u32 chid, u32 mthd, u32 data)
525
{
526 527
	struct gk104_fifo_chan *chan = NULL;
	struct nvkm_handle *bind;
528 529 530
	unsigned long flags;
	int ret = -EINVAL;

B
Ben Skeggs 已提交
531 532 533
	spin_lock_irqsave(&fifo->base.lock, flags);
	if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
		chan = (void *)fifo->base.channel[chid];
534 535 536
	if (unlikely(!chan))
		goto out;

537
	bind = nvkm_namedb_get_class(nv_namedb(chan), NVIF_IOCTL_NEW_V0_SW_GF100);
538 539 540
	if (likely(bind)) {
		if (!mthd || !nv_call(bind->object, mthd, data))
			ret = 0;
541
		nvkm_namedb_put(bind);
542 543 544
	}

out:
B
Ben Skeggs 已提交
545
	spin_unlock_irqrestore(&fifo->base.lock, flags);
546 547 548
	return ret;
}

549 550
static const struct nvkm_enum
gk104_fifo_bind_reason[] = {
B
Ben Skeggs 已提交
551 552 553 554 555 556 557 558 559 560
	{ 0x01, "BIND_NOT_UNBOUND" },
	{ 0x02, "SNOOP_WITHOUT_BAR1" },
	{ 0x03, "UNBIND_WHILE_RUNNING" },
	{ 0x05, "INVALID_RUNLIST" },
	{ 0x06, "INVALID_CTX_TGT" },
	{ 0x0b, "UNBIND_WHILE_PARKED" },
	{}
};

static void
B
Ben Skeggs 已提交
561
gk104_fifo_intr_bind(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
562
{
563 564
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
565
	u32 intr = nvkm_rd32(device, 0x00252c);
B
Ben Skeggs 已提交
566
	u32 code = intr & 0x000000ff;
567 568
	const struct nvkm_enum *en =
		nvkm_enum_find(gk104_fifo_bind_reason, code);
B
Ben Skeggs 已提交
569

570
	nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
B
Ben Skeggs 已提交
571 572
}

573 574
static const struct nvkm_enum
gk104_fifo_sched_reason[] = {
575 576 577 578
	{ 0x0a, "CTXSW_TIMEOUT" },
	{}
};

579
static void
B
Ben Skeggs 已提交
580
gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
581
{
582
	struct nvkm_device *device = fifo->base.engine.subdev.device;
583 584
	struct nvkm_engine *engine;
	struct gk104_fifo_chan *chan;
585 586 587
	u32 engn;

	for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
588
		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
589 590 591 592 593 594 595 596 597 598
		u32 busy = (stat & 0x80000000);
		u32 next = (stat & 0x07ff0000) >> 16;
		u32 chsw = (stat & 0x00008000);
		u32 save = (stat & 0x00004000);
		u32 load = (stat & 0x00002000);
		u32 prev = (stat & 0x000007ff);
		u32 chid = load ? next : prev;
		(void)save;

		if (busy && chsw) {
B
Ben Skeggs 已提交
599
			if (!(chan = (void *)fifo->base.channel[chid]))
600
				continue;
B
Ben Skeggs 已提交
601
			if (!(engine = gk104_fifo_engine(fifo, engn)))
602
				continue;
B
Ben Skeggs 已提交
603
			gk104_fifo_recover(fifo, engine, chan);
604 605 606 607
		}
	}
}

608
static void
B
Ben Skeggs 已提交
609
gk104_fifo_intr_sched(struct gk104_fifo *fifo)
610
{
611 612
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
613
	u32 intr = nvkm_rd32(device, 0x00254c);
614
	u32 code = intr & 0x000000ff;
615 616
	const struct nvkm_enum *en =
		nvkm_enum_find(gk104_fifo_sched_reason, code);
617

618
	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
619 620 621

	switch (code) {
	case 0x0a:
B
Ben Skeggs 已提交
622
		gk104_fifo_intr_sched_ctxsw(fifo);
623 624 625 626
		break;
	default:
		break;
	}
627 628 629
}

static void
B
Ben Skeggs 已提交
630
gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
631
{
632 633
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
634
	u32 stat = nvkm_rd32(device, 0x00256c);
635
	nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
636
	nvkm_wr32(device, 0x00256c, stat);
637 638 639
}

static void
B
Ben Skeggs 已提交
640
gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
641
{
642 643
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
644
	u32 stat = nvkm_rd32(device, 0x00259c);
645
	nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
646 647
}

648 649
static const struct nvkm_enum
gk104_fifo_fault_engine[] = {
650
	{ 0x00, "GR", NULL, NVDEV_ENGINE_GR },
651
	{ 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
652 653
	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
654 655 656
	{ 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
	{ 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
	{ 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
657
	{ 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD },
658
	{ 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP },
659
	{ 0x13, "PERF" },
660
	{ 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
661 662
	{ 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 },
	{ 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 },
663
	{ 0x17, "PMU" },
664
	{ 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC },
665
	{ 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 },
666 667 668
	{}
};

669 670
static const struct nvkm_enum
gk104_fifo_fault_reason[] = {
671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
	{ 0x00, "PDE" },
	{ 0x01, "PDE_SIZE" },
	{ 0x02, "PTE" },
	{ 0x03, "VA_LIMIT_VIOLATION" },
	{ 0x04, "UNBOUND_INST_BLOCK" },
	{ 0x05, "PRIV_VIOLATION" },
	{ 0x06, "RO_VIOLATION" },
	{ 0x07, "WO_VIOLATION" },
	{ 0x08, "PITCH_MASK_VIOLATION" },
	{ 0x09, "WORK_CREATION" },
	{ 0x0a, "UNSUPPORTED_APERTURE" },
	{ 0x0b, "COMPRESSION_FAILURE" },
	{ 0x0c, "UNSUPPORTED_KIND" },
	{ 0x0d, "REGION_VIOLATION" },
	{ 0x0e, "BOTH_PTES_VALID" },
	{ 0x0f, "INFO_TYPE_POISONED" },
687 688 689
	{}
};

690 691
static const struct nvkm_enum
gk104_fifo_fault_hubclient[] = {
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
	{ 0x00, "VIP" },
	{ 0x01, "CE0" },
	{ 0x02, "CE1" },
	{ 0x03, "DNISO" },
	{ 0x04, "FE" },
	{ 0x05, "FECS" },
	{ 0x06, "HOST" },
	{ 0x07, "HOST_CPU" },
	{ 0x08, "HOST_CPU_NB" },
	{ 0x09, "ISO" },
	{ 0x0a, "MMU" },
	{ 0x0b, "MSPDEC" },
	{ 0x0c, "MSPPP" },
	{ 0x0d, "MSVLD" },
	{ 0x0e, "NISO" },
	{ 0x0f, "P2P" },
	{ 0x10, "PD" },
	{ 0x11, "PERF" },
	{ 0x12, "PMU" },
	{ 0x13, "RASTERTWOD" },
	{ 0x14, "SCC" },
	{ 0x15, "SCC_NB" },
	{ 0x16, "SEC" },
	{ 0x17, "SSYNC" },
716
	{ 0x18, "GR_CE" },
717 718 719 720 721 722 723
	{ 0x19, "CE2" },
	{ 0x1a, "XV" },
	{ 0x1b, "MMU_NB" },
	{ 0x1c, "MSENC" },
	{ 0x1d, "DFALCON" },
	{ 0x1e, "SKED" },
	{ 0x1f, "AFALCON" },
724 725 726
	{}
};

727 728
static const struct nvkm_enum
gk104_fifo_fault_gpcclient[] = {
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
	{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
	{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
	{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
	{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
	{ 0x0c, "RAST" },
	{ 0x0d, "GCC" },
	{ 0x0e, "GPCCS" },
	{ 0x0f, "PROP_0" },
	{ 0x10, "PROP_1" },
	{ 0x11, "PROP_2" },
	{ 0x12, "PROP_3" },
	{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
	{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
	{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
	{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
	{ 0x1f, "GPM" },
	{ 0x20, "LTP_UTLB_0" },
	{ 0x21, "LTP_UTLB_1" },
	{ 0x22, "LTP_UTLB_2" },
	{ 0x23, "LTP_UTLB_3" },
	{ 0x24, "GPC_RGG_UTLB" },
750 751 752
	{}
};

753
static void
B
Ben Skeggs 已提交
754
gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
755
{
756 757
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
758 759 760 761
	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
762
	u32 gpc    = (stat & 0x1f000000) >> 24;
763
	u32 client = (stat & 0x00001f00) >> 8;
764 765 766
	u32 write  = (stat & 0x00000080);
	u32 hub    = (stat & 0x00000040);
	u32 reason = (stat & 0x0000000f);
767 768 769
	struct nvkm_object *engctx = NULL, *object;
	struct nvkm_engine *engine = NULL;
	const struct nvkm_enum *er, *eu, *ec;
770
	char gpcid[8] = "";
771

772 773
	er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
	eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
774 775 776 777 778 779 780
	if (hub) {
		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
	} else {
		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
	}

781 782 783
	if (eu) {
		switch (eu->data2) {
		case NVDEV_SUBDEV_BAR:
784
			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
785 786
			break;
		case NVDEV_SUBDEV_INSTMEM:
787
			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
788 789
			break;
		case NVDEV_ENGINE_IFB:
790
			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
791 792
			break;
		default:
B
Ben Skeggs 已提交
793
			engine = nvkm_engine(fifo, eu->data2);
794
			if (engine)
795
				engctx = nvkm_engctx_get(engine, inst);
796
			break;
797
		}
798 799
	}

800 801 802 803 804 805 806
	nvkm_error(subdev,
		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
		   "reason %02x [%s] on channel %d [%010llx %s]\n",
		   write ? "write" : "read", (u64)vahi << 32 | valo,
		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
		   reason, er ? er->name : "", -1, (u64)inst << 12,
		   nvkm_client_name(engctx));
807

808 809 810
	object = engctx;
	while (object) {
		switch (nv_mclass(object)) {
811
		case KEPLER_CHANNEL_GPFIFO_A:
812
		case MAXWELL_CHANNEL_GPFIFO_A:
B
Ben Skeggs 已提交
813
			gk104_fifo_recover(fifo, engine, (void *)object);
814 815 816 817 818
			break;
		}
		object = object->parent;
	}

819
	nvkm_engctx_put(engctx);
820 821
}

822
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854
	{ 0x00000001, "MEMREQ" },
	{ 0x00000002, "MEMACK_TIMEOUT" },
	{ 0x00000004, "MEMACK_EXTRA" },
	{ 0x00000008, "MEMDAT_TIMEOUT" },
	{ 0x00000010, "MEMDAT_EXTRA" },
	{ 0x00000020, "MEMFLUSH" },
	{ 0x00000040, "MEMOP" },
	{ 0x00000080, "LBCONNECT" },
	{ 0x00000100, "LBREQ" },
	{ 0x00000200, "LBACK_TIMEOUT" },
	{ 0x00000400, "LBACK_EXTRA" },
	{ 0x00000800, "LBDAT_TIMEOUT" },
	{ 0x00001000, "LBDAT_EXTRA" },
	{ 0x00002000, "GPFIFO" },
	{ 0x00004000, "GPPTR" },
	{ 0x00008000, "GPENTRY" },
	{ 0x00010000, "GPCRC" },
	{ 0x00020000, "PBPTR" },
	{ 0x00040000, "PBENTRY" },
	{ 0x00080000, "PBCRC" },
	{ 0x00100000, "XBARCONNECT" },
	{ 0x00200000, "METHOD" },
	{ 0x00400000, "METHODCRC" },
	{ 0x00800000, "DEVICE" },
	{ 0x02000000, "SEMAPHORE" },
	{ 0x04000000, "ACQUIRE" },
	{ 0x08000000, "PRI" },
	{ 0x20000000, "NO_CTXSW_SEG" },
	{ 0x40000000, "PBSEG" },
	{ 0x80000000, "SIGNATURE" },
	{}
};
855

856
static void
B
Ben Skeggs 已提交
857
gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
858
{
859 860
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
861 862 863 864 865
	u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
866
	u32 subc = (addr & 0x00070000) >> 16;
867
	u32 mthd = (addr & 0x00003ffc);
868
	u32 show = stat;
869
	char msg[128];
870

871
	if (stat & 0x00800000) {
B
Ben Skeggs 已提交
872
		if (!gk104_fifo_swmthd(fifo, chid, mthd, data))
873
			show &= ~0x00800000;
874
		nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
875 876
	}

877
	if (show) {
878 879 880 881 882 883
		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d "
				   "mthd %04x data %08x\n",
			   unit, show, msg, chid,
			   nvkm_client_name_for_fifo_chid(&fifo->base, chid),
			   subc, mthd, data);
884
	}
885

886
	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
887 888
}

889
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
890 891 892 893 894 895 896 897 898
	{ 0x00000001, "HCE_RE_ILLEGAL_OP" },
	{ 0x00000002, "HCE_RE_ALIGNB" },
	{ 0x00000004, "HCE_PRIV" },
	{ 0x00000008, "HCE_ILLEGAL_MTHD" },
	{ 0x00000010, "HCE_ILLEGAL_CLASS" },
	{}
};

static void
B
Ben Skeggs 已提交
899
gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
900
{
901 902
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
903 904 905
	u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
	u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
906
	char msg[128];
907 908

	if (stat) {
909 910 911 912 913
		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
			   unit, stat, msg, chid,
			   nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
			   nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
914 915
	}

916
	nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
917 918
}

B
Ben Skeggs 已提交
919
static void
B
Ben Skeggs 已提交
920
gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
921
{
922 923
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x002a00);
B
Ben Skeggs 已提交
924 925
	while (mask) {
		u32 engn = __ffs(mask);
B
Ben Skeggs 已提交
926
		wake_up(&fifo->engine[engn].wait);
927
		nvkm_wr32(device, 0x002a00, 1 << engn);
B
Ben Skeggs 已提交
928 929 930 931
		mask &= ~(1 << engn);
	}
}

B
Ben Skeggs 已提交
932
static void
B
Ben Skeggs 已提交
933
gk104_fifo_intr_engine(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
934
{
B
Ben Skeggs 已提交
935
	nvkm_fifo_uevent(&fifo->base);
B
Ben Skeggs 已提交
936 937
}

938
static void
939
gk104_fifo_intr(struct nvkm_subdev *subdev)
940
{
B
Ben Skeggs 已提交
941
	struct gk104_fifo *fifo = (void *)subdev;
942 943 944
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x002140);
	u32 stat = nvkm_rd32(device, 0x002100) & mask;
945

946
	if (stat & 0x00000001) {
B
Ben Skeggs 已提交
947
		gk104_fifo_intr_bind(fifo);
948
		nvkm_wr32(device, 0x002100, 0x00000001);
949 950 951 952
		stat &= ~0x00000001;
	}

	if (stat & 0x00000010) {
953
		nvkm_error(subdev, "PIO_ERROR\n");
954
		nvkm_wr32(device, 0x002100, 0x00000010);
955 956 957
		stat &= ~0x00000010;
	}

958
	if (stat & 0x00000100) {
B
Ben Skeggs 已提交
959
		gk104_fifo_intr_sched(fifo);
960
		nvkm_wr32(device, 0x002100, 0x00000100);
961 962 963
		stat &= ~0x00000100;
	}

964
	if (stat & 0x00010000) {
B
Ben Skeggs 已提交
965
		gk104_fifo_intr_chsw(fifo);
966
		nvkm_wr32(device, 0x002100, 0x00010000);
967 968 969 970
		stat &= ~0x00010000;
	}

	if (stat & 0x00800000) {
971
		nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
972
		nvkm_wr32(device, 0x002100, 0x00800000);
973 974 975 976
		stat &= ~0x00800000;
	}

	if (stat & 0x01000000) {
977
		nvkm_error(subdev, "LB_ERROR\n");
978
		nvkm_wr32(device, 0x002100, 0x01000000);
979 980 981 982
		stat &= ~0x01000000;
	}

	if (stat & 0x08000000) {
B
Ben Skeggs 已提交
983
		gk104_fifo_intr_dropped_fault(fifo);
984
		nvkm_wr32(device, 0x002100, 0x08000000);
985 986 987
		stat &= ~0x08000000;
	}

988
	if (stat & 0x10000000) {
989
		u32 mask = nvkm_rd32(device, 0x00259c);
990 991
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
992
			gk104_fifo_intr_fault(fifo, unit);
993
			nvkm_wr32(device, 0x00259c, (1 << unit));
994
			mask &= ~(1 << unit);
995 996 997 998 999
		}
		stat &= ~0x10000000;
	}

	if (stat & 0x20000000) {
1000
		u32 mask = nvkm_rd32(device, 0x0025a0);
1001 1002
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
1003 1004
			gk104_fifo_intr_pbdma_0(fifo, unit);
			gk104_fifo_intr_pbdma_1(fifo, unit);
1005
			nvkm_wr32(device, 0x0025a0, (1 << unit));
1006
			mask &= ~(1 << unit);
1007 1008 1009 1010 1011
		}
		stat &= ~0x20000000;
	}

	if (stat & 0x40000000) {
B
Ben Skeggs 已提交
1012
		gk104_fifo_intr_runlist(fifo);
1013 1014 1015
		stat &= ~0x40000000;
	}

1016
	if (stat & 0x80000000) {
1017
		nvkm_wr32(device, 0x002100, 0x80000000);
B
Ben Skeggs 已提交
1018
		gk104_fifo_intr_engine(fifo);
1019 1020 1021
		stat &= ~0x80000000;
	}

1022
	if (stat) {
1023
		nvkm_error(subdev, "INTR %08x\n", stat);
1024 1025
		nvkm_mask(device, 0x002140, stat, 0x00000000);
		nvkm_wr32(device, 0x002100, stat);
1026 1027
	}
}
1028

1029
static void
1030
gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
1031
{
1032
	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
1033 1034
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
1035 1036 1037
}

static void
1038
gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
1039
{
1040
	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
1041 1042
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
1043 1044
}

1045
static const struct nvkm_event_func
1046 1047 1048 1049
gk104_fifo_uevent_func = {
	.ctor = nvkm_fifo_uevent_ctor,
	.init = gk104_fifo_uevent_init,
	.fini = gk104_fifo_uevent_fini,
1050 1051
};

1052
int
1053
gk104_fifo_fini(struct nvkm_object *object, bool suspend)
1054
{
B
Ben Skeggs 已提交
1055
	struct gk104_fifo *fifo = (void *)object;
1056
	struct nvkm_device *device = fifo->base.engine.subdev.device;
1057 1058
	int ret;

B
Ben Skeggs 已提交
1059
	ret = nvkm_fifo_fini(&fifo->base, suspend);
1060 1061 1062 1063
	if (ret)
		return ret;

	/* allow mmu fault interrupts, even when we're not using fifo */
1064
	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
1065 1066 1067
	return 0;
}

B
Ben Skeggs 已提交
1068
int
1069
gk104_fifo_init(struct nvkm_object *object)
B
Ben Skeggs 已提交
1070
{
B
Ben Skeggs 已提交
1071
	struct gk104_fifo *fifo = (void *)object;
1072 1073
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
B
Ben Skeggs 已提交
1074 1075
	int ret, i;

B
Ben Skeggs 已提交
1076
	ret = nvkm_fifo_init(&fifo->base);
B
Ben Skeggs 已提交
1077 1078 1079
	if (ret)
		return ret;

B
Ben Skeggs 已提交
1080
	/* enable all available PBDMA units */
1081 1082
	nvkm_wr32(device, 0x000204, 0xffffffff);
	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
1083
	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
B
Ben Skeggs 已提交
1084

B
Ben Skeggs 已提交
1085
	/* PBDMA[n] */
B
Ben Skeggs 已提交
1086
	for (i = 0; i < fifo->spoon_nr; i++) {
1087 1088 1089
		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
B
Ben Skeggs 已提交
1090 1091
	}

1092
	/* PBDMA[n].HCE */
B
Ben Skeggs 已提交
1093
	for (i = 0; i < fifo->spoon_nr; i++) {
1094 1095
		nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
1096 1097
	}

1098
	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
B
Ben Skeggs 已提交
1099

1100 1101
	nvkm_wr32(device, 0x002100, 0xffffffff);
	nvkm_wr32(device, 0x002140, 0x7fffffff);
B
Ben Skeggs 已提交
1102 1103 1104 1105
	return 0;
}

void
1106
gk104_fifo_dtor(struct nvkm_object *object)
B
Ben Skeggs 已提交
1107
{
B
Ben Skeggs 已提交
1108
	struct gk104_fifo *fifo = (void *)object;
B
Ben Skeggs 已提交
1109 1110
	int i;

B
Ben Skeggs 已提交
1111 1112
	nvkm_gpuobj_unmap(&fifo->user.bar);
	nvkm_gpuobj_ref(NULL, &fifo->user.mem);
B
Ben Skeggs 已提交
1113 1114

	for (i = 0; i < FIFO_ENGINE_NR; i++) {
B
Ben Skeggs 已提交
1115 1116
		nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[1]);
		nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[0]);
B
Ben Skeggs 已提交
1117 1118
	}

B
Ben Skeggs 已提交
1119
	nvkm_fifo_destroy(&fifo->base);
B
Ben Skeggs 已提交
1120 1121 1122
}

int
1123 1124 1125
gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		struct nvkm_oclass *oclass, void *data, u32 size,
		struct nvkm_object **pobject)
1126
{
1127
	struct gk104_fifo_impl *impl = (void *)oclass;
B
Ben Skeggs 已提交
1128
	struct gk104_fifo *fifo;
1129
	int ret, i;
1130

1131
	ret = nvkm_fifo_create(parent, engine, oclass, 0,
B
Ben Skeggs 已提交
1132 1133
			       impl->channels - 1, &fifo);
	*pobject = nv_object(fifo);
1134 1135 1136
	if (ret)
		return ret;

B
Ben Skeggs 已提交
1137
	INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
1138

1139
	for (i = 0; i < FIFO_ENGINE_NR; i++) {
B
Ben Skeggs 已提交
1140 1141
		ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000,
				      0, &fifo->engine[i].runlist[0]);
1142 1143 1144
		if (ret)
			return ret;

B
Ben Skeggs 已提交
1145 1146
		ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000,
				      0, &fifo->engine[i].runlist[1]);
1147 1148
		if (ret)
			return ret;
B
Ben Skeggs 已提交
1149

B
Ben Skeggs 已提交
1150
		init_waitqueue_head(&fifo->engine[i].wait);
1151 1152
	}

B
Ben Skeggs 已提交
1153 1154
	ret = nvkm_gpuobj_new(nv_object(fifo), NULL, impl->channels * 0x200,
			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &fifo->user.mem);
1155 1156 1157
	if (ret)
		return ret;

B
Ben Skeggs 已提交
1158 1159
	ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW,
			      &fifo->user.bar);
1160 1161 1162
	if (ret)
		return ret;

B
Ben Skeggs 已提交
1163
	ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &fifo->base.uevent);
1164 1165
	if (ret)
		return ret;
1166

B
Ben Skeggs 已提交
1167 1168 1169 1170
	nv_subdev(fifo)->unit = 0x00000100;
	nv_subdev(fifo)->intr = gk104_fifo_intr;
	nv_engine(fifo)->cclass = &gk104_fifo_cclass;
	nv_engine(fifo)->sclass = gk104_fifo_sclass;
1171 1172 1173
	return 0;
}

1174 1175
struct nvkm_oclass *
gk104_fifo_oclass = &(struct gk104_fifo_impl) {
B
Ben Skeggs 已提交
1176
	.base.handle = NV_ENGINE(FIFO, 0xe0),
1177 1178 1179 1180 1181
	.base.ofuncs = &(struct nvkm_ofuncs) {
		.ctor = gk104_fifo_ctor,
		.dtor = gk104_fifo_dtor,
		.init = gk104_fifo_init,
		.fini = gk104_fifo_fini,
1182
	},
B
Ben Skeggs 已提交
1183 1184
	.channels = 4096,
}.base;