gk104.c 32.1 KB
Newer Older
1
/*
2
 * Copyright 2012 Red Hat Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */
24
#include "gk104.h"
25

26 27 28
#include <core/client.h>
#include <core/engctx.h>
#include <core/enum.h>
29
#include <core/handle.h>
30
#include <subdev/bar.h>
31
#include <subdev/fb.h>
32
#include <subdev/mmu.h>
33
#include <subdev/timer.h>
34

35
#include <nvif/class.h>
36
#include <nvif/ioctl.h>
37
#include <nvif/unpack.h>
38

39
#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
40
static const struct {
41 42
	u64 subdev;
	u64 mask;
43
} fifo_engine[] = {
44
	_(NVDEV_ENGINE_GR      , (1ULL << NVDEV_ENGINE_SW) |
45
				 (1ULL << NVDEV_ENGINE_CE2)),
46
	_(NVDEV_ENGINE_MSPDEC  , 0),
47
	_(NVDEV_ENGINE_MSPPP   , 0),
48
	_(NVDEV_ENGINE_MSVLD   , 0),
49 50
	_(NVDEV_ENGINE_CE0     , 0),
	_(NVDEV_ENGINE_CE1     , 0),
51
	_(NVDEV_ENGINE_MSENC   , 0),
52 53 54 55
};
#undef _
#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)

56 57
struct gk104_fifo_engn {
	struct nvkm_gpuobj *runlist[2];
B
Ben Skeggs 已提交
58
	int cur_runlist;
B
Ben Skeggs 已提交
59
	wait_queue_head_t wait;
60 61
};

B
Ben Skeggs 已提交
62
struct gk104_fifo {
63
	struct nvkm_fifo base;
64 65 66 67

	struct work_struct fault;
	u64 mask;

68
	struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
69
	struct {
70 71
		struct nvkm_gpuobj *mem;
		struct nvkm_vma bar;
72 73 74 75
	} user;
	int spoon_nr;
};

76 77 78 79
struct gk104_fifo_base {
	struct nvkm_fifo_base base;
	struct nvkm_gpuobj *pgd;
	struct nvkm_vm *vm;
80 81
};

82 83
struct gk104_fifo_chan {
	struct nvkm_fifo_chan base;
84
	u32 engine;
85 86 87 88 89
	enum {
		STOPPED,
		RUNNING,
		KILLED
	} state;
90 91
};

92 93 94 95
/*******************************************************************************
 * FIFO channel objects
 ******************************************************************************/

96
static void
B
Ben Skeggs 已提交
97
gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
98
{
B
Ben Skeggs 已提交
99
	struct gk104_fifo_engn *engn = &fifo->engine[engine];
100 101
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
102
	struct nvkm_bar *bar = device->bar;
103
	struct nvkm_gpuobj *cur;
104
	int i, p;
105

B
Ben Skeggs 已提交
106
	mutex_lock(&nv_subdev(fifo)->mutex);
B
Ben Skeggs 已提交
107 108
	cur = engn->runlist[engn->cur_runlist];
	engn->cur_runlist = !engn->cur_runlist;
109

110
	nvkm_kmap(cur);
B
Ben Skeggs 已提交
111 112
	for (i = 0, p = 0; i < fifo->base.max; i++) {
		struct gk104_fifo_chan *chan = (void *)fifo->base.channel[i];
113
		if (chan && chan->state == RUNNING && chan->engine == engine) {
114 115
			nvkm_wo32(cur, p + 0, i);
			nvkm_wo32(cur, p + 4, 0x00000000);
116 117
			p += 8;
		}
118
	}
119
	bar->flush(bar);
120
	nvkm_done(cur);
121

122 123
	nvkm_wr32(device, 0x002270, cur->addr >> 12);
	nvkm_wr32(device, 0x002274, (engine << 20) | (p >> 3));
124

125
	if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
126 127
			       (engine * 0x08)) & 0x00100000),
				msecs_to_jiffies(2000)) == 0)
128
		nvkm_error(subdev, "runlist %d update timeout\n", engine);
B
Ben Skeggs 已提交
129
	mutex_unlock(&nv_subdev(fifo)->mutex);
130 131
}

132
static int
133 134
gk104_fifo_context_attach(struct nvkm_object *parent,
			  struct nvkm_object *object)
135
{
136 137
	struct nvkm_bar *bar = nvkm_bar(parent);
	struct gk104_fifo_base *base = (void *)parent->parent;
138
	struct nvkm_gpuobj *engn = &base->base.gpuobj;
139
	struct nvkm_engctx *ectx = (void *)object;
140 141 142 143
	u32 addr;
	int ret;

	switch (nv_engidx(object->engine)) {
144
	case NVDEV_ENGINE_SW   :
145
		return 0;
146 147 148
	case NVDEV_ENGINE_CE0:
	case NVDEV_ENGINE_CE1:
	case NVDEV_ENGINE_CE2:
149
		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
150
		return 0;
151 152 153 154
	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
155 156
	default:
		return -EINVAL;
157 158
	}

159
	if (!ectx->vma.node) {
160 161
		ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
					 NV_MEM_ACCESS_RW, &ectx->vma);
162 163
		if (ret)
			return ret;
164 165

		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
166 167
	}

168 169 170
	nvkm_kmap(engn);
	nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
	nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
171
	bar->flush(bar);
172
	nvkm_done(engn);
173
	return 0;
174 175
}

B
Ben Skeggs 已提交
176 177 178 179 180
static int
gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
{
	struct nvkm_object *obj = (void *)chan;
	struct gk104_fifo *fifo = (void *)obj->engine;
181 182
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
B
Ben Skeggs 已提交
183

184
	nvkm_wr32(device, 0x002634, chan->base.chid);
185 186 187 188
	if (nvkm_msec(device, 2000,
		if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
			break;
	) < 0) {
189 190
		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
			   chan->base.chid, nvkm_client_name(chan));
B
Ben Skeggs 已提交
191 192 193 194 195 196
		return -EBUSY;
	}

	return 0;
}

197
static int
198 199
gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
			  struct nvkm_object *object)
200
{
201 202 203
	struct nvkm_bar *bar = nvkm_bar(parent);
	struct gk104_fifo_base *base = (void *)parent->parent;
	struct gk104_fifo_chan *chan = (void *)parent;
204
	struct nvkm_gpuobj *engn = &base->base.gpuobj;
205
	u32 addr;
B
Ben Skeggs 已提交
206
	int ret;
207 208

	switch (nv_engidx(object->engine)) {
209 210 211 212 213 214 215 216
	case NVDEV_ENGINE_SW    : return 0;
	case NVDEV_ENGINE_CE0   :
	case NVDEV_ENGINE_CE1   :
	case NVDEV_ENGINE_CE2   : addr = 0x0000; break;
	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
217 218 219 220
	default:
		return -EINVAL;
	}

B
Ben Skeggs 已提交
221 222 223
	ret = gk104_fifo_chan_kick(chan);
	if (ret && suspend)
		return ret;
224

225
	if (addr) {
226 227 228
		nvkm_kmap(engn);
		nvkm_wo32(engn, addr + 0x00, 0x00000000);
		nvkm_wo32(engn, addr + 0x04, 0x00000000);
229
		bar->flush(bar);
230
		nvkm_done(engn);
231 232
	}

233
	return 0;
234 235 236
}

static int
237 238 239
gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		     struct nvkm_oclass *oclass, void *data, u32 size,
		     struct nvkm_object **pobject)
240
{
241 242 243
	union {
		struct kepler_channel_gpfifo_a_v0 v0;
	} *args = data;
244
	struct nvkm_bar *bar = nvkm_bar(parent);
B
Ben Skeggs 已提交
245
	struct gk104_fifo *fifo = (void *)engine;
246 247
	struct gk104_fifo_base *base = (void *)parent;
	struct gk104_fifo_chan *chan;
248
	struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
249
	u64 usermem, ioffset, ilength;
250
	u32 engines;
251 252
	int ret, i;

253
	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
254
	if (nvif_unpack(args->v0, 0, 0, false)) {
255
		nvif_ioctl(parent, "create channel gpfifo vers %d pushbuf %llx "
256 257 258
				   "ioffset %016llx ilength %08x engine %08x\n",
			   args->v0.version, args->v0.pushbuf, args->v0.ioffset,
			   args->v0.ilength, args->v0.engine);
259 260
	} else
		return ret;
261

262 263 264 265 266 267 268 269 270 271 272 273
	for (i = 0, engines = 0; i < FIFO_ENGINE_NR; i++) {
		if (!nvkm_engine(parent, fifo_engine[i].subdev))
			continue;
		engines |= (1 << i);
	}

	if (!args->v0.engine) {
		static struct nvkm_oclass oclass = {
			.ofuncs = &nvkm_object_ofuncs,
		};
		args->v0.engine = engines;
		return nvkm_object_ctor(parent, engine, &oclass, NULL, 0, pobject);
274 275
	}

276 277 278
	engines &= args->v0.engine;
	if (!engines) {
		nvif_ioctl(parent, "unsupported engines %08x\n",
279
			   args->v0.engine);
280
		return -ENODEV;
B
Ben Skeggs 已提交
281
	}
282
	i = __ffs(engines);
283

284
	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
B
Ben Skeggs 已提交
285
				       fifo->user.bar.offset, 0x200,
286 287
				       args->v0.pushbuf,
				       fifo_engine[i].mask, &chan);
288 289 290 291
	*pobject = nv_object(chan);
	if (ret)
		return ret;

292 293
	args->v0.chid = chan->base.chid;

294 295
	nv_parent(chan)->context_attach = gk104_fifo_context_attach;
	nv_parent(chan)->context_detach = gk104_fifo_context_detach;
296
	chan->engine = i;
297 298

	usermem = chan->base.chid * 0x200;
299 300
	ioffset = args->v0.ioffset;
	ilength = order_base_2(args->v0.ilength / 8);
301

302
	nvkm_kmap(fifo->user.mem);
303
	for (i = 0; i < 0x200; i += 4)
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
	nvkm_done(fifo->user.mem);

	nvkm_kmap(ramfc);
	nvkm_wo32(ramfc, 0x08, lower_32_bits(fifo->user.mem->addr + usermem));
	nvkm_wo32(ramfc, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem));
	nvkm_wo32(ramfc, 0x10, 0x0000face);
	nvkm_wo32(ramfc, 0x30, 0xfffff902);
	nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
	nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
	nvkm_wo32(ramfc, 0x84, 0x20400000);
	nvkm_wo32(ramfc, 0x94, 0x30000001);
	nvkm_wo32(ramfc, 0x9c, 0x00000100);
	nvkm_wo32(ramfc, 0xac, 0x0000001f);
	nvkm_wo32(ramfc, 0xe8, chan->base.chid);
	nvkm_wo32(ramfc, 0xb8, 0xf8000000);
	nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
	nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
322
	bar->flush(bar);
323
	nvkm_done(ramfc);
324 325
	return 0;
}
326

327
static int
328
gk104_fifo_chan_init(struct nvkm_object *object)
329
{
330
	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
B
Ben Skeggs 已提交
331
	struct gk104_fifo *fifo = (void *)object->engine;
332
	struct gk104_fifo_chan *chan = (void *)object;
333
	struct nvkm_device *device = fifo->base.engine.subdev.device;
334 335
	u32 chid = chan->base.chid;
	int ret;
336

337
	ret = nvkm_fifo_channel_init(&chan->base);
338 339
	if (ret)
		return ret;
340

341 342
	nvkm_mask(device, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
	nvkm_wr32(device, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
343 344

	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
345
		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
B
Ben Skeggs 已提交
346
		gk104_fifo_runlist_update(fifo, chan->engine);
347
		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
348 349
	}

350 351
	return 0;
}
352

353
static int
354
gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
355
{
B
Ben Skeggs 已提交
356
	struct gk104_fifo *fifo = (void *)object->engine;
357
	struct gk104_fifo_chan *chan = (void *)object;
358
	struct nvkm_device *device = fifo->base.engine.subdev.device;
359
	u32 chid = chan->base.chid;
360

361
	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
362
		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
B
Ben Skeggs 已提交
363
		gk104_fifo_runlist_update(fifo, chan->engine);
364
	}
365

366
	nvkm_wr32(device, 0x800000 + (chid * 8), 0x00000000);
367
	return nvkm_fifo_channel_fini(&chan->base, suspend);
368
}
369

370 371
struct nvkm_ofuncs
gk104_fifo_chan_ofuncs = {
372 373 374 375 376 377 378 379
	.ctor = gk104_fifo_chan_ctor,
	.dtor = _nvkm_fifo_channel_dtor,
	.init = gk104_fifo_chan_init,
	.fini = gk104_fifo_chan_fini,
	.map  = _nvkm_fifo_channel_map,
	.rd32 = _nvkm_fifo_channel_rd32,
	.wr32 = _nvkm_fifo_channel_wr32,
	.ntfy = _nvkm_fifo_channel_ntfy
380
};
381

382 383
static struct nvkm_oclass
gk104_fifo_sclass[] = {
384
	{ KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
385 386 387 388 389 390
	{}
};

/*******************************************************************************
 * FIFO context - instmem heap and vm setup
 ******************************************************************************/
391

392
static int
393 394 395
gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
			struct nvkm_oclass *oclass, void *data, u32 size,
			struct nvkm_object **pobject)
396
{
397
	struct gk104_fifo_base *base;
398
	int ret;
399

400 401
	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
				       0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
402 403 404
	*pobject = nv_object(base);
	if (ret)
		return ret;
405

406 407
	ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
			      &base->pgd);
408 409 410
	if (ret)
		return ret;

411 412 413 414 415 416
	nvkm_kmap(&base->base.gpuobj);
	nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
	nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
	nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
	nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
	nvkm_done(&base->base.gpuobj);
417

418
	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
419 420
	if (ret)
		return ret;
421 422 423 424

	return 0;
}

425
static void
426
gk104_fifo_context_dtor(struct nvkm_object *object)
427
{
428 429 430 431
	struct gk104_fifo_base *base = (void *)object;
	nvkm_vm_ref(NULL, &base->vm, base->pgd);
	nvkm_gpuobj_ref(NULL, &base->pgd);
	nvkm_fifo_context_destroy(&base->base);
432 433
}

434 435
static struct nvkm_oclass
gk104_fifo_cclass = {
436
	.handle = NV_ENGCTX(FIFO, 0xe0),
437 438 439 440 441 442 443
	.ofuncs = &(struct nvkm_ofuncs) {
		.ctor = gk104_fifo_context_ctor,
		.dtor = gk104_fifo_context_dtor,
		.init = _nvkm_fifo_context_init,
		.fini = _nvkm_fifo_context_fini,
		.rd32 = _nvkm_fifo_context_rd32,
		.wr32 = _nvkm_fifo_context_wr32,
444 445 446 447 448 449 450
	},
};

/*******************************************************************************
 * PFIFO engine
 ******************************************************************************/

451
static inline int
B
Ben Skeggs 已提交
452
gk104_fifo_engidx(struct gk104_fifo *fifo, u32 engn)
453 454
{
	switch (engn) {
455 456 457 458 459 460 461 462
	case NVDEV_ENGINE_GR    :
	case NVDEV_ENGINE_CE2   : engn = 0; break;
	case NVDEV_ENGINE_MSVLD : engn = 1; break;
	case NVDEV_ENGINE_MSPPP : engn = 2; break;
	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
	case NVDEV_ENGINE_CE0   : engn = 4; break;
	case NVDEV_ENGINE_CE1   : engn = 5; break;
	case NVDEV_ENGINE_MSENC : engn = 6; break;
463 464 465 466 467 468 469
	default:
		return -1;
	}

	return engn;
}

470
static inline struct nvkm_engine *
B
Ben Skeggs 已提交
471
gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
472 473 474
{
	if (engn >= ARRAY_SIZE(fifo_engine))
		return NULL;
B
Ben Skeggs 已提交
475
	return nvkm_engine(fifo, fifo_engine[engn].subdev);
476 477
}

478
static void
479
gk104_fifo_recover_work(struct work_struct *work)
480
{
B
Ben Skeggs 已提交
481
	struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
482
	struct nvkm_device *device = fifo->base.engine.subdev.device;
483
	struct nvkm_object *engine;
484 485 486 487
	unsigned long flags;
	u32 engn, engm = 0;
	u64 mask, todo;

B
Ben Skeggs 已提交
488 489 490 491
	spin_lock_irqsave(&fifo->base.lock, flags);
	mask = fifo->mask;
	fifo->mask = 0ULL;
	spin_unlock_irqrestore(&fifo->base.lock, flags);
492 493

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
B
Ben Skeggs 已提交
494
		engm |= 1 << gk104_fifo_engidx(fifo, engn);
495
	nvkm_mask(device, 0x002630, engm, engm);
496 497

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
B
Ben Skeggs 已提交
498
		if ((engine = (void *)nvkm_engine(fifo, engn))) {
499 500 501
			nv_ofuncs(engine)->fini(engine, false);
			WARN_ON(nv_ofuncs(engine)->init(engine));
		}
B
Ben Skeggs 已提交
502
		gk104_fifo_runlist_update(fifo, gk104_fifo_engidx(fifo, engn));
503 504
	}

505 506
	nvkm_wr32(device, 0x00262c, engm);
	nvkm_mask(device, 0x002630, engm, 0x00000000);
507 508 509
}

static void
B
Ben Skeggs 已提交
510
gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
511
		  struct gk104_fifo_chan *chan)
512
{
513 514
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
515 516 517
	u32 chid = chan->base.chid;
	unsigned long flags;

518 519
	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
		   nv_subdev(engine)->name, chid);
520

521
	nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
522 523
	chan->state = KILLED;

B
Ben Skeggs 已提交
524 525 526 527
	spin_lock_irqsave(&fifo->base.lock, flags);
	fifo->mask |= 1ULL << nv_engidx(engine);
	spin_unlock_irqrestore(&fifo->base.lock, flags);
	schedule_work(&fifo->fault);
528 529
}

530
static int
B
Ben Skeggs 已提交
531
gk104_fifo_swmthd(struct gk104_fifo *fifo, u32 chid, u32 mthd, u32 data)
532
{
533 534
	struct gk104_fifo_chan *chan = NULL;
	struct nvkm_handle *bind;
535 536 537
	unsigned long flags;
	int ret = -EINVAL;

B
Ben Skeggs 已提交
538 539 540
	spin_lock_irqsave(&fifo->base.lock, flags);
	if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
		chan = (void *)fifo->base.channel[chid];
541 542 543
	if (unlikely(!chan))
		goto out;

544
	bind = nvkm_namedb_get_class(nv_namedb(chan), NVIF_IOCTL_NEW_V0_SW_GF100);
545 546 547
	if (likely(bind)) {
		if (!mthd || !nv_call(bind->object, mthd, data))
			ret = 0;
548
		nvkm_namedb_put(bind);
549 550 551
	}

out:
B
Ben Skeggs 已提交
552
	spin_unlock_irqrestore(&fifo->base.lock, flags);
553 554 555
	return ret;
}

556 557
static const struct nvkm_enum
gk104_fifo_bind_reason[] = {
B
Ben Skeggs 已提交
558 559 560 561 562 563 564 565 566 567
	{ 0x01, "BIND_NOT_UNBOUND" },
	{ 0x02, "SNOOP_WITHOUT_BAR1" },
	{ 0x03, "UNBIND_WHILE_RUNNING" },
	{ 0x05, "INVALID_RUNLIST" },
	{ 0x06, "INVALID_CTX_TGT" },
	{ 0x0b, "UNBIND_WHILE_PARKED" },
	{}
};

static void
B
Ben Skeggs 已提交
568
gk104_fifo_intr_bind(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
569
{
570 571
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
572
	u32 intr = nvkm_rd32(device, 0x00252c);
B
Ben Skeggs 已提交
573
	u32 code = intr & 0x000000ff;
574 575
	const struct nvkm_enum *en =
		nvkm_enum_find(gk104_fifo_bind_reason, code);
B
Ben Skeggs 已提交
576

577
	nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
B
Ben Skeggs 已提交
578 579
}

580 581
static const struct nvkm_enum
gk104_fifo_sched_reason[] = {
582 583 584 585
	{ 0x0a, "CTXSW_TIMEOUT" },
	{}
};

586
static void
B
Ben Skeggs 已提交
587
gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
588
{
589
	struct nvkm_device *device = fifo->base.engine.subdev.device;
590 591
	struct nvkm_engine *engine;
	struct gk104_fifo_chan *chan;
592 593 594
	u32 engn;

	for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
595
		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
596 597 598 599 600 601 602 603 604 605
		u32 busy = (stat & 0x80000000);
		u32 next = (stat & 0x07ff0000) >> 16;
		u32 chsw = (stat & 0x00008000);
		u32 save = (stat & 0x00004000);
		u32 load = (stat & 0x00002000);
		u32 prev = (stat & 0x000007ff);
		u32 chid = load ? next : prev;
		(void)save;

		if (busy && chsw) {
B
Ben Skeggs 已提交
606
			if (!(chan = (void *)fifo->base.channel[chid]))
607
				continue;
B
Ben Skeggs 已提交
608
			if (!(engine = gk104_fifo_engine(fifo, engn)))
609
				continue;
B
Ben Skeggs 已提交
610
			gk104_fifo_recover(fifo, engine, chan);
611 612 613 614
		}
	}
}

615
static void
B
Ben Skeggs 已提交
616
gk104_fifo_intr_sched(struct gk104_fifo *fifo)
617
{
618 619
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
620
	u32 intr = nvkm_rd32(device, 0x00254c);
621
	u32 code = intr & 0x000000ff;
622 623
	const struct nvkm_enum *en =
		nvkm_enum_find(gk104_fifo_sched_reason, code);
624

625
	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
626 627 628

	switch (code) {
	case 0x0a:
B
Ben Skeggs 已提交
629
		gk104_fifo_intr_sched_ctxsw(fifo);
630 631 632 633
		break;
	default:
		break;
	}
634 635 636
}

static void
B
Ben Skeggs 已提交
637
gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
638
{
639 640
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
641
	u32 stat = nvkm_rd32(device, 0x00256c);
642
	nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
643
	nvkm_wr32(device, 0x00256c, stat);
644 645 646
}

static void
B
Ben Skeggs 已提交
647
gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
648
{
649 650
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
651
	u32 stat = nvkm_rd32(device, 0x00259c);
652
	nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
653 654
}

655 656
static const struct nvkm_enum
gk104_fifo_fault_engine[] = {
657
	{ 0x00, "GR", NULL, NVDEV_ENGINE_GR },
658
	{ 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
659 660
	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
661 662 663
	{ 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
	{ 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
	{ 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
664
	{ 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD },
665
	{ 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP },
666
	{ 0x13, "PERF" },
667
	{ 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
668 669
	{ 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 },
	{ 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 },
670
	{ 0x17, "PMU" },
671
	{ 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC },
672
	{ 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 },
673 674 675
	{}
};

676 677
static const struct nvkm_enum
gk104_fifo_fault_reason[] = {
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
	{ 0x00, "PDE" },
	{ 0x01, "PDE_SIZE" },
	{ 0x02, "PTE" },
	{ 0x03, "VA_LIMIT_VIOLATION" },
	{ 0x04, "UNBOUND_INST_BLOCK" },
	{ 0x05, "PRIV_VIOLATION" },
	{ 0x06, "RO_VIOLATION" },
	{ 0x07, "WO_VIOLATION" },
	{ 0x08, "PITCH_MASK_VIOLATION" },
	{ 0x09, "WORK_CREATION" },
	{ 0x0a, "UNSUPPORTED_APERTURE" },
	{ 0x0b, "COMPRESSION_FAILURE" },
	{ 0x0c, "UNSUPPORTED_KIND" },
	{ 0x0d, "REGION_VIOLATION" },
	{ 0x0e, "BOTH_PTES_VALID" },
	{ 0x0f, "INFO_TYPE_POISONED" },
694 695 696
	{}
};

697 698
static const struct nvkm_enum
gk104_fifo_fault_hubclient[] = {
699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
	{ 0x00, "VIP" },
	{ 0x01, "CE0" },
	{ 0x02, "CE1" },
	{ 0x03, "DNISO" },
	{ 0x04, "FE" },
	{ 0x05, "FECS" },
	{ 0x06, "HOST" },
	{ 0x07, "HOST_CPU" },
	{ 0x08, "HOST_CPU_NB" },
	{ 0x09, "ISO" },
	{ 0x0a, "MMU" },
	{ 0x0b, "MSPDEC" },
	{ 0x0c, "MSPPP" },
	{ 0x0d, "MSVLD" },
	{ 0x0e, "NISO" },
	{ 0x0f, "P2P" },
	{ 0x10, "PD" },
	{ 0x11, "PERF" },
	{ 0x12, "PMU" },
	{ 0x13, "RASTERTWOD" },
	{ 0x14, "SCC" },
	{ 0x15, "SCC_NB" },
	{ 0x16, "SEC" },
	{ 0x17, "SSYNC" },
723
	{ 0x18, "GR_CE" },
724 725 726 727 728 729 730
	{ 0x19, "CE2" },
	{ 0x1a, "XV" },
	{ 0x1b, "MMU_NB" },
	{ 0x1c, "MSENC" },
	{ 0x1d, "DFALCON" },
	{ 0x1e, "SKED" },
	{ 0x1f, "AFALCON" },
731 732 733
	{}
};

734 735
static const struct nvkm_enum
gk104_fifo_fault_gpcclient[] = {
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
	{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
	{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
	{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
	{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
	{ 0x0c, "RAST" },
	{ 0x0d, "GCC" },
	{ 0x0e, "GPCCS" },
	{ 0x0f, "PROP_0" },
	{ 0x10, "PROP_1" },
	{ 0x11, "PROP_2" },
	{ 0x12, "PROP_3" },
	{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
	{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
	{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
	{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
	{ 0x1f, "GPM" },
	{ 0x20, "LTP_UTLB_0" },
	{ 0x21, "LTP_UTLB_1" },
	{ 0x22, "LTP_UTLB_2" },
	{ 0x23, "LTP_UTLB_3" },
	{ 0x24, "GPC_RGG_UTLB" },
757 758 759
	{}
};

760
static void
B
Ben Skeggs 已提交
761
gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
762
{
763 764
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
765 766 767 768
	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
769
	u32 gpc    = (stat & 0x1f000000) >> 24;
770
	u32 client = (stat & 0x00001f00) >> 8;
771 772 773
	u32 write  = (stat & 0x00000080);
	u32 hub    = (stat & 0x00000040);
	u32 reason = (stat & 0x0000000f);
774 775 776
	struct nvkm_object *engctx = NULL, *object;
	struct nvkm_engine *engine = NULL;
	const struct nvkm_enum *er, *eu, *ec;
777
	char gpcid[8] = "";
778

779 780
	er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
	eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
781 782 783 784 785 786 787
	if (hub) {
		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
	} else {
		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
	}

788 789 790
	if (eu) {
		switch (eu->data2) {
		case NVDEV_SUBDEV_BAR:
791
			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
792 793
			break;
		case NVDEV_SUBDEV_INSTMEM:
794
			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
795 796
			break;
		case NVDEV_ENGINE_IFB:
797
			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
798 799
			break;
		default:
B
Ben Skeggs 已提交
800
			engine = nvkm_engine(fifo, eu->data2);
801
			if (engine)
802
				engctx = nvkm_engctx_get(engine, inst);
803
			break;
804
		}
805 806
	}

807 808 809 810 811 812 813
	nvkm_error(subdev,
		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
		   "reason %02x [%s] on channel %d [%010llx %s]\n",
		   write ? "write" : "read", (u64)vahi << 32 | valo,
		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
		   reason, er ? er->name : "", -1, (u64)inst << 12,
		   nvkm_client_name(engctx));
814

815 816 817
	object = engctx;
	while (object) {
		switch (nv_mclass(object)) {
818
		case KEPLER_CHANNEL_GPFIFO_A:
819
		case MAXWELL_CHANNEL_GPFIFO_A:
B
Ben Skeggs 已提交
820
			gk104_fifo_recover(fifo, engine, (void *)object);
821 822 823 824 825
			break;
		}
		object = object->parent;
	}

826
	nvkm_engctx_put(engctx);
827 828
}

829
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
	{ 0x00000001, "MEMREQ" },
	{ 0x00000002, "MEMACK_TIMEOUT" },
	{ 0x00000004, "MEMACK_EXTRA" },
	{ 0x00000008, "MEMDAT_TIMEOUT" },
	{ 0x00000010, "MEMDAT_EXTRA" },
	{ 0x00000020, "MEMFLUSH" },
	{ 0x00000040, "MEMOP" },
	{ 0x00000080, "LBCONNECT" },
	{ 0x00000100, "LBREQ" },
	{ 0x00000200, "LBACK_TIMEOUT" },
	{ 0x00000400, "LBACK_EXTRA" },
	{ 0x00000800, "LBDAT_TIMEOUT" },
	{ 0x00001000, "LBDAT_EXTRA" },
	{ 0x00002000, "GPFIFO" },
	{ 0x00004000, "GPPTR" },
	{ 0x00008000, "GPENTRY" },
	{ 0x00010000, "GPCRC" },
	{ 0x00020000, "PBPTR" },
	{ 0x00040000, "PBENTRY" },
	{ 0x00080000, "PBCRC" },
	{ 0x00100000, "XBARCONNECT" },
	{ 0x00200000, "METHOD" },
	{ 0x00400000, "METHODCRC" },
	{ 0x00800000, "DEVICE" },
	{ 0x02000000, "SEMAPHORE" },
	{ 0x04000000, "ACQUIRE" },
	{ 0x08000000, "PRI" },
	{ 0x20000000, "NO_CTXSW_SEG" },
	{ 0x40000000, "PBSEG" },
	{ 0x80000000, "SIGNATURE" },
	{}
};
862

863
static void
B
Ben Skeggs 已提交
864
gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
865
{
866 867
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
868 869 870 871 872
	u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
873
	u32 subc = (addr & 0x00070000) >> 16;
874
	u32 mthd = (addr & 0x00003ffc);
875
	u32 show = stat;
876
	char msg[128];
877

878
	if (stat & 0x00800000) {
B
Ben Skeggs 已提交
879
		if (!gk104_fifo_swmthd(fifo, chid, mthd, data))
880
			show &= ~0x00800000;
881
		nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
882 883
	}

884
	if (show) {
885 886 887 888 889 890
		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d "
				   "mthd %04x data %08x\n",
			   unit, show, msg, chid,
			   nvkm_client_name_for_fifo_chid(&fifo->base, chid),
			   subc, mthd, data);
891
	}
892

893
	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
894 895
}

896
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
897 898 899 900 901 902 903 904 905
	{ 0x00000001, "HCE_RE_ILLEGAL_OP" },
	{ 0x00000002, "HCE_RE_ALIGNB" },
	{ 0x00000004, "HCE_PRIV" },
	{ 0x00000008, "HCE_ILLEGAL_MTHD" },
	{ 0x00000010, "HCE_ILLEGAL_CLASS" },
	{}
};

static void
B
Ben Skeggs 已提交
906
gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
907
{
908 909
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
910 911 912
	u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
	u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
913
	char msg[128];
914 915

	if (stat) {
916 917 918 919 920
		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
			   unit, stat, msg, chid,
			   nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
			   nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
921 922
	}

923
	nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
924 925
}

B
Ben Skeggs 已提交
926
static void
B
Ben Skeggs 已提交
927
gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
928
{
929 930
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x002a00);
B
Ben Skeggs 已提交
931 932
	while (mask) {
		u32 engn = __ffs(mask);
B
Ben Skeggs 已提交
933
		wake_up(&fifo->engine[engn].wait);
934
		nvkm_wr32(device, 0x002a00, 1 << engn);
B
Ben Skeggs 已提交
935 936 937 938
		mask &= ~(1 << engn);
	}
}

B
Ben Skeggs 已提交
939
static void
B
Ben Skeggs 已提交
940
gk104_fifo_intr_engine(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
941
{
B
Ben Skeggs 已提交
942
	nvkm_fifo_uevent(&fifo->base);
B
Ben Skeggs 已提交
943 944
}

945
static void
946
gk104_fifo_intr(struct nvkm_subdev *subdev)
947
{
B
Ben Skeggs 已提交
948
	struct gk104_fifo *fifo = (void *)subdev;
949 950 951
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x002140);
	u32 stat = nvkm_rd32(device, 0x002100) & mask;
952

953
	if (stat & 0x00000001) {
B
Ben Skeggs 已提交
954
		gk104_fifo_intr_bind(fifo);
955
		nvkm_wr32(device, 0x002100, 0x00000001);
956 957 958 959
		stat &= ~0x00000001;
	}

	if (stat & 0x00000010) {
960
		nvkm_error(subdev, "PIO_ERROR\n");
961
		nvkm_wr32(device, 0x002100, 0x00000010);
962 963 964
		stat &= ~0x00000010;
	}

965
	if (stat & 0x00000100) {
B
Ben Skeggs 已提交
966
		gk104_fifo_intr_sched(fifo);
967
		nvkm_wr32(device, 0x002100, 0x00000100);
968 969 970
		stat &= ~0x00000100;
	}

971
	if (stat & 0x00010000) {
B
Ben Skeggs 已提交
972
		gk104_fifo_intr_chsw(fifo);
973
		nvkm_wr32(device, 0x002100, 0x00010000);
974 975 976 977
		stat &= ~0x00010000;
	}

	if (stat & 0x00800000) {
978
		nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
979
		nvkm_wr32(device, 0x002100, 0x00800000);
980 981 982 983
		stat &= ~0x00800000;
	}

	if (stat & 0x01000000) {
984
		nvkm_error(subdev, "LB_ERROR\n");
985
		nvkm_wr32(device, 0x002100, 0x01000000);
986 987 988 989
		stat &= ~0x01000000;
	}

	if (stat & 0x08000000) {
B
Ben Skeggs 已提交
990
		gk104_fifo_intr_dropped_fault(fifo);
991
		nvkm_wr32(device, 0x002100, 0x08000000);
992 993 994
		stat &= ~0x08000000;
	}

995
	if (stat & 0x10000000) {
996
		u32 mask = nvkm_rd32(device, 0x00259c);
997 998
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
999
			gk104_fifo_intr_fault(fifo, unit);
1000
			nvkm_wr32(device, 0x00259c, (1 << unit));
1001
			mask &= ~(1 << unit);
1002 1003 1004 1005 1006
		}
		stat &= ~0x10000000;
	}

	if (stat & 0x20000000) {
1007
		u32 mask = nvkm_rd32(device, 0x0025a0);
1008 1009
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
1010 1011
			gk104_fifo_intr_pbdma_0(fifo, unit);
			gk104_fifo_intr_pbdma_1(fifo, unit);
1012
			nvkm_wr32(device, 0x0025a0, (1 << unit));
1013
			mask &= ~(1 << unit);
1014 1015 1016 1017 1018
		}
		stat &= ~0x20000000;
	}

	if (stat & 0x40000000) {
B
Ben Skeggs 已提交
1019
		gk104_fifo_intr_runlist(fifo);
1020 1021 1022
		stat &= ~0x40000000;
	}

1023
	if (stat & 0x80000000) {
1024
		nvkm_wr32(device, 0x002100, 0x80000000);
B
Ben Skeggs 已提交
1025
		gk104_fifo_intr_engine(fifo);
1026 1027 1028
		stat &= ~0x80000000;
	}

1029
	if (stat) {
1030
		nvkm_error(subdev, "INTR %08x\n", stat);
1031 1032
		nvkm_mask(device, 0x002140, stat, 0x00000000);
		nvkm_wr32(device, 0x002100, stat);
1033 1034
	}
}
1035

1036
static void
1037
gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
1038
{
1039
	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
1040 1041
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
1042 1043 1044
}

static void
1045
gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
1046
{
1047
	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
1048 1049
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
1050 1051
}

1052
static const struct nvkm_event_func
1053 1054 1055 1056
gk104_fifo_uevent_func = {
	.ctor = nvkm_fifo_uevent_ctor,
	.init = gk104_fifo_uevent_init,
	.fini = gk104_fifo_uevent_fini,
1057 1058
};

1059
int
1060
gk104_fifo_fini(struct nvkm_object *object, bool suspend)
1061
{
B
Ben Skeggs 已提交
1062
	struct gk104_fifo *fifo = (void *)object;
1063
	struct nvkm_device *device = fifo->base.engine.subdev.device;
1064 1065
	int ret;

B
Ben Skeggs 已提交
1066
	ret = nvkm_fifo_fini(&fifo->base, suspend);
1067 1068 1069 1070
	if (ret)
		return ret;

	/* allow mmu fault interrupts, even when we're not using fifo */
1071
	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
1072 1073 1074
	return 0;
}

B
Ben Skeggs 已提交
1075
int
1076
gk104_fifo_init(struct nvkm_object *object)
B
Ben Skeggs 已提交
1077
{
B
Ben Skeggs 已提交
1078
	struct gk104_fifo *fifo = (void *)object;
1079 1080
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
B
Ben Skeggs 已提交
1081 1082
	int ret, i;

B
Ben Skeggs 已提交
1083
	ret = nvkm_fifo_init(&fifo->base);
B
Ben Skeggs 已提交
1084 1085 1086
	if (ret)
		return ret;

B
Ben Skeggs 已提交
1087
	/* enable all available PBDMA units */
1088 1089
	nvkm_wr32(device, 0x000204, 0xffffffff);
	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
1090
	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
B
Ben Skeggs 已提交
1091

B
Ben Skeggs 已提交
1092
	/* PBDMA[n] */
B
Ben Skeggs 已提交
1093
	for (i = 0; i < fifo->spoon_nr; i++) {
1094 1095 1096
		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
B
Ben Skeggs 已提交
1097 1098
	}

1099
	/* PBDMA[n].HCE */
B
Ben Skeggs 已提交
1100
	for (i = 0; i < fifo->spoon_nr; i++) {
1101 1102
		nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
1103 1104
	}

1105
	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
B
Ben Skeggs 已提交
1106

1107 1108
	nvkm_wr32(device, 0x002100, 0xffffffff);
	nvkm_wr32(device, 0x002140, 0x7fffffff);
B
Ben Skeggs 已提交
1109 1110 1111 1112
	return 0;
}

void
1113
gk104_fifo_dtor(struct nvkm_object *object)
B
Ben Skeggs 已提交
1114
{
B
Ben Skeggs 已提交
1115
	struct gk104_fifo *fifo = (void *)object;
B
Ben Skeggs 已提交
1116 1117
	int i;

B
Ben Skeggs 已提交
1118 1119
	nvkm_gpuobj_unmap(&fifo->user.bar);
	nvkm_gpuobj_ref(NULL, &fifo->user.mem);
B
Ben Skeggs 已提交
1120 1121

	for (i = 0; i < FIFO_ENGINE_NR; i++) {
B
Ben Skeggs 已提交
1122 1123
		nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[1]);
		nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[0]);
B
Ben Skeggs 已提交
1124 1125
	}

B
Ben Skeggs 已提交
1126
	nvkm_fifo_destroy(&fifo->base);
B
Ben Skeggs 已提交
1127 1128 1129
}

int
1130 1131 1132
gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		struct nvkm_oclass *oclass, void *data, u32 size,
		struct nvkm_object **pobject)
1133
{
1134
	struct gk104_fifo_impl *impl = (void *)oclass;
B
Ben Skeggs 已提交
1135
	struct gk104_fifo *fifo;
1136
	int ret, i;
1137

1138
	ret = nvkm_fifo_create(parent, engine, oclass, 0,
B
Ben Skeggs 已提交
1139 1140
			       impl->channels - 1, &fifo);
	*pobject = nv_object(fifo);
1141 1142 1143
	if (ret)
		return ret;

B
Ben Skeggs 已提交
1144
	INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
1145

1146
	for (i = 0; i < FIFO_ENGINE_NR; i++) {
B
Ben Skeggs 已提交
1147 1148
		ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000,
				      0, &fifo->engine[i].runlist[0]);
1149 1150 1151
		if (ret)
			return ret;

B
Ben Skeggs 已提交
1152 1153
		ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000,
				      0, &fifo->engine[i].runlist[1]);
1154 1155
		if (ret)
			return ret;
B
Ben Skeggs 已提交
1156

B
Ben Skeggs 已提交
1157
		init_waitqueue_head(&fifo->engine[i].wait);
1158 1159
	}

B
Ben Skeggs 已提交
1160 1161
	ret = nvkm_gpuobj_new(nv_object(fifo), NULL, impl->channels * 0x200,
			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &fifo->user.mem);
1162 1163 1164
	if (ret)
		return ret;

B
Ben Skeggs 已提交
1165 1166
	ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW,
			      &fifo->user.bar);
1167 1168 1169
	if (ret)
		return ret;

B
Ben Skeggs 已提交
1170
	ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &fifo->base.uevent);
1171 1172
	if (ret)
		return ret;
1173

B
Ben Skeggs 已提交
1174 1175 1176 1177
	nv_subdev(fifo)->unit = 0x00000100;
	nv_subdev(fifo)->intr = gk104_fifo_intr;
	nv_engine(fifo)->cclass = &gk104_fifo_cclass;
	nv_engine(fifo)->sclass = gk104_fifo_sclass;
1178 1179 1180
	return 0;
}

1181 1182
struct nvkm_oclass *
gk104_fifo_oclass = &(struct gk104_fifo_impl) {
B
Ben Skeggs 已提交
1183
	.base.handle = NV_ENGINE(FIFO, 0xe0),
1184 1185 1186 1187 1188
	.base.ofuncs = &(struct nvkm_ofuncs) {
		.ctor = gk104_fifo_ctor,
		.dtor = gk104_fifo_dtor,
		.init = gk104_fifo_init,
		.fini = gk104_fifo_fini,
1189
	},
B
Ben Skeggs 已提交
1190 1191
	.channels = 4096,
}.base;