gk104.c 32.1 KB
Newer Older
1
/*
2
 * Copyright 2012 Red Hat Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */
24
#include "gk104.h"
25

26 27 28
#include <core/client.h>
#include <core/engctx.h>
#include <core/enum.h>
29
#include <core/handle.h>
30
#include <subdev/bar.h>
31
#include <subdev/fb.h>
32
#include <subdev/mmu.h>
33
#include <subdev/timer.h>
34

35
#include <nvif/class.h>
36
#include <nvif/ioctl.h>
37
#include <nvif/unpack.h>
38

39
#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
40
static const struct {
41 42
	u64 subdev;
	u64 mask;
43
} fifo_engine[] = {
44
	_(NVDEV_ENGINE_GR      , (1ULL << NVDEV_ENGINE_SW) |
45
				 (1ULL << NVDEV_ENGINE_CE2)),
46
	_(NVDEV_ENGINE_MSPDEC  , 0),
47
	_(NVDEV_ENGINE_MSPPP   , 0),
48
	_(NVDEV_ENGINE_MSVLD   , 0),
49 50
	_(NVDEV_ENGINE_CE0     , 0),
	_(NVDEV_ENGINE_CE1     , 0),
51
	_(NVDEV_ENGINE_MSENC   , 0),
52 53 54 55
};
#undef _
#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)

56
struct gk104_fifo_engn {
57
	struct nvkm_memory *runlist[2];
B
Ben Skeggs 已提交
58
	int cur_runlist;
B
Ben Skeggs 已提交
59
	wait_queue_head_t wait;
60 61
};

B
Ben Skeggs 已提交
62
struct gk104_fifo {
63
	struct nvkm_fifo base;
64 65 66 67

	struct work_struct fault;
	u64 mask;

68
	struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
69
	struct {
70
		struct nvkm_memory *mem;
71
		struct nvkm_vma bar;
72 73 74 75
	} user;
	int spoon_nr;
};

76 77 78 79
struct gk104_fifo_base {
	struct nvkm_fifo_base base;
	struct nvkm_gpuobj *pgd;
	struct nvkm_vm *vm;
80 81
};

82 83
struct gk104_fifo_chan {
	struct nvkm_fifo_chan base;
84
	u32 engine;
85 86 87 88 89
	enum {
		STOPPED,
		RUNNING,
		KILLED
	} state;
90 91
};

92 93 94 95
/*******************************************************************************
 * FIFO channel objects
 ******************************************************************************/

96
static void
B
Ben Skeggs 已提交
97
gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
98
{
B
Ben Skeggs 已提交
99
	struct gk104_fifo_engn *engn = &fifo->engine[engine];
100 101
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
102
	struct nvkm_memory *cur;
103
	int i, p;
104

B
Ben Skeggs 已提交
105
	mutex_lock(&nv_subdev(fifo)->mutex);
B
Ben Skeggs 已提交
106 107
	cur = engn->runlist[engn->cur_runlist];
	engn->cur_runlist = !engn->cur_runlist;
108

109
	nvkm_kmap(cur);
B
Ben Skeggs 已提交
110 111
	for (i = 0, p = 0; i < fifo->base.max; i++) {
		struct gk104_fifo_chan *chan = (void *)fifo->base.channel[i];
112
		if (chan && chan->state == RUNNING && chan->engine == engine) {
113 114
			nvkm_wo32(cur, p + 0, i);
			nvkm_wo32(cur, p + 4, 0x00000000);
115 116
			p += 8;
		}
117
	}
118
	nvkm_done(cur);
119

120
	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
121
	nvkm_wr32(device, 0x002274, (engine << 20) | (p >> 3));
122

123
	if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
124 125
			       (engine * 0x08)) & 0x00100000),
				msecs_to_jiffies(2000)) == 0)
126
		nvkm_error(subdev, "runlist %d update timeout\n", engine);
B
Ben Skeggs 已提交
127
	mutex_unlock(&nv_subdev(fifo)->mutex);
128 129
}

130
static int
131 132
gk104_fifo_context_attach(struct nvkm_object *parent,
			  struct nvkm_object *object)
133
{
134
	struct gk104_fifo_base *base = (void *)parent->parent;
135
	struct nvkm_gpuobj *engn = &base->base.gpuobj;
136
	struct nvkm_engctx *ectx = (void *)object;
137 138 139 140
	u32 addr;
	int ret;

	switch (nv_engidx(object->engine)) {
141
	case NVDEV_ENGINE_SW   :
142
		return 0;
143 144 145
	case NVDEV_ENGINE_CE0:
	case NVDEV_ENGINE_CE1:
	case NVDEV_ENGINE_CE2:
146
		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
147
		return 0;
148 149 150 151
	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
152 153
	default:
		return -EINVAL;
154 155
	}

156
	if (!ectx->vma.node) {
157 158
		ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
					 NV_MEM_ACCESS_RW, &ectx->vma);
159 160
		if (ret)
			return ret;
161 162

		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
163 164
	}

165 166 167 168
	nvkm_kmap(engn);
	nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
	nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
	nvkm_done(engn);
169
	return 0;
170 171
}

B
Ben Skeggs 已提交
172 173 174 175 176
static int
gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
{
	struct nvkm_object *obj = (void *)chan;
	struct gk104_fifo *fifo = (void *)obj->engine;
177 178
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
B
Ben Skeggs 已提交
179

180
	nvkm_wr32(device, 0x002634, chan->base.chid);
181 182 183 184
	if (nvkm_msec(device, 2000,
		if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
			break;
	) < 0) {
185 186
		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
			   chan->base.chid, nvkm_client_name(chan));
B
Ben Skeggs 已提交
187 188 189 190 191 192
		return -EBUSY;
	}

	return 0;
}

193
static int
194 195
gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
			  struct nvkm_object *object)
196
{
197 198
	struct gk104_fifo_base *base = (void *)parent->parent;
	struct gk104_fifo_chan *chan = (void *)parent;
199
	struct nvkm_gpuobj *engn = &base->base.gpuobj;
200
	u32 addr;
B
Ben Skeggs 已提交
201
	int ret;
202 203

	switch (nv_engidx(object->engine)) {
204 205 206 207 208 209 210 211
	case NVDEV_ENGINE_SW    : return 0;
	case NVDEV_ENGINE_CE0   :
	case NVDEV_ENGINE_CE1   :
	case NVDEV_ENGINE_CE2   : addr = 0x0000; break;
	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
212 213 214 215
	default:
		return -EINVAL;
	}

B
Ben Skeggs 已提交
216 217 218
	ret = gk104_fifo_chan_kick(chan);
	if (ret && suspend)
		return ret;
219

220
	if (addr) {
221 222 223 224
		nvkm_kmap(engn);
		nvkm_wo32(engn, addr + 0x00, 0x00000000);
		nvkm_wo32(engn, addr + 0x04, 0x00000000);
		nvkm_done(engn);
225 226
	}

227
	return 0;
228 229 230
}

static int
231 232 233
gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		     struct nvkm_oclass *oclass, void *data, u32 size,
		     struct nvkm_object **pobject)
234
{
235 236 237
	union {
		struct kepler_channel_gpfifo_a_v0 v0;
	} *args = data;
B
Ben Skeggs 已提交
238
	struct gk104_fifo *fifo = (void *)engine;
239 240
	struct gk104_fifo_base *base = (void *)parent;
	struct gk104_fifo_chan *chan;
241
	struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
242
	u64 usermem, ioffset, ilength;
243
	u32 engines;
244 245
	int ret, i;

246
	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
247
	if (nvif_unpack(args->v0, 0, 0, false)) {
248
		nvif_ioctl(parent, "create channel gpfifo vers %d "
249
				   "ioffset %016llx ilength %08x engine %08x\n",
250
			   args->v0.version, args->v0.ioffset,
251
			   args->v0.ilength, args->v0.engine);
252 253
		if (args->v0.vm)
			return -ENOENT;
254 255
	} else
		return ret;
256

257 258 259 260 261 262 263 264 265 266 267
	for (i = 0, engines = 0; i < FIFO_ENGINE_NR; i++) {
		if (!nvkm_engine(parent, fifo_engine[i].subdev))
			continue;
		engines |= (1 << i);
	}

	if (!args->v0.engine) {
		static struct nvkm_oclass oclass = {
			.ofuncs = &nvkm_object_ofuncs,
		};
		args->v0.engine = engines;
268
		return nvkm_object_old(parent, engine, &oclass, NULL, 0, pobject);
269 270
	}

271 272 273
	engines &= args->v0.engine;
	if (!engines) {
		nvif_ioctl(parent, "unsupported engines %08x\n",
274
			   args->v0.engine);
275
		return -ENODEV;
B
Ben Skeggs 已提交
276
	}
277
	i = __ffs(engines);
278

279
	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
280
				       fifo->user.bar.offset, 0x200, 0,
281
				       fifo_engine[i].mask, &chan);
282 283 284 285
	*pobject = nv_object(chan);
	if (ret)
		return ret;

286 287
	args->v0.chid = chan->base.chid;

288 289
	nv_parent(chan)->context_attach = gk104_fifo_context_attach;
	nv_parent(chan)->context_detach = gk104_fifo_context_detach;
290
	chan->engine = i;
291 292

	usermem = chan->base.chid * 0x200;
293 294
	ioffset = args->v0.ioffset;
	ilength = order_base_2(args->v0.ilength / 8);
295

296
	nvkm_kmap(fifo->user.mem);
297
	for (i = 0; i < 0x200; i += 4)
298 299
		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
	nvkm_done(fifo->user.mem);
300
	usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
301 302

	nvkm_kmap(ramfc);
303 304
	nvkm_wo32(ramfc, 0x08, lower_32_bits(usermem));
	nvkm_wo32(ramfc, 0x0c, upper_32_bits(usermem));
305 306 307 308 309 310 311 312 313 314 315 316 317
	nvkm_wo32(ramfc, 0x10, 0x0000face);
	nvkm_wo32(ramfc, 0x30, 0xfffff902);
	nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
	nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
	nvkm_wo32(ramfc, 0x84, 0x20400000);
	nvkm_wo32(ramfc, 0x94, 0x30000001);
	nvkm_wo32(ramfc, 0x9c, 0x00000100);
	nvkm_wo32(ramfc, 0xac, 0x0000001f);
	nvkm_wo32(ramfc, 0xe8, chan->base.chid);
	nvkm_wo32(ramfc, 0xb8, 0xf8000000);
	nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
	nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
	nvkm_done(ramfc);
318 319
	return 0;
}
320

321
static int
322
gk104_fifo_chan_init(struct nvkm_object *object)
323
{
324
	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
B
Ben Skeggs 已提交
325
	struct gk104_fifo *fifo = (void *)object->engine;
326
	struct gk104_fifo_chan *chan = (void *)object;
327
	struct nvkm_device *device = fifo->base.engine.subdev.device;
328 329
	u32 chid = chan->base.chid;
	int ret;
330

331
	ret = nvkm_fifo_channel_init(&chan->base);
332 333
	if (ret)
		return ret;
334

335 336
	nvkm_mask(device, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
	nvkm_wr32(device, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
337 338

	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
339
		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
B
Ben Skeggs 已提交
340
		gk104_fifo_runlist_update(fifo, chan->engine);
341
		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
342 343
	}

344 345
	return 0;
}
346

347
static int
348
gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
349
{
B
Ben Skeggs 已提交
350
	struct gk104_fifo *fifo = (void *)object->engine;
351
	struct gk104_fifo_chan *chan = (void *)object;
352
	struct nvkm_device *device = fifo->base.engine.subdev.device;
353
	u32 chid = chan->base.chid;
354

355
	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
356
		nvkm_mask(device, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
B
Ben Skeggs 已提交
357
		gk104_fifo_runlist_update(fifo, chan->engine);
358
	}
359

360
	nvkm_wr32(device, 0x800000 + (chid * 8), 0x00000000);
361
	return nvkm_fifo_channel_fini(&chan->base, suspend);
362
}
363

364 365
struct nvkm_ofuncs
gk104_fifo_chan_ofuncs = {
366 367 368 369 370 371 372 373
	.ctor = gk104_fifo_chan_ctor,
	.dtor = _nvkm_fifo_channel_dtor,
	.init = gk104_fifo_chan_init,
	.fini = gk104_fifo_chan_fini,
	.map  = _nvkm_fifo_channel_map,
	.rd32 = _nvkm_fifo_channel_rd32,
	.wr32 = _nvkm_fifo_channel_wr32,
	.ntfy = _nvkm_fifo_channel_ntfy
374
};
375

376 377
static struct nvkm_oclass
gk104_fifo_sclass[] = {
378
	{ KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
379 380 381 382 383 384
	{}
};

/*******************************************************************************
 * FIFO context - instmem heap and vm setup
 ******************************************************************************/
385

386
static int
387 388 389
gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
			struct nvkm_oclass *oclass, void *data, u32 size,
			struct nvkm_object **pobject)
390
{
391
	struct gk104_fifo_base *base;
392
	int ret;
393

394 395
	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
				       0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
396 397 398
	*pobject = nv_object(base);
	if (ret)
		return ret;
399

400 401
	ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
			      &base->pgd);
402 403 404
	if (ret)
		return ret;

405 406 407 408 409 410
	nvkm_kmap(&base->base.gpuobj);
	nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
	nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
	nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
	nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
	nvkm_done(&base->base.gpuobj);
411

412
	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
413 414
	if (ret)
		return ret;
415 416 417 418

	return 0;
}

419
static void
420
gk104_fifo_context_dtor(struct nvkm_object *object)
421
{
422 423 424 425
	struct gk104_fifo_base *base = (void *)object;
	nvkm_vm_ref(NULL, &base->vm, base->pgd);
	nvkm_gpuobj_ref(NULL, &base->pgd);
	nvkm_fifo_context_destroy(&base->base);
426 427
}

428 429
static struct nvkm_oclass
gk104_fifo_cclass = {
430
	.handle = NV_ENGCTX(FIFO, 0xe0),
431 432 433 434 435 436 437
	.ofuncs = &(struct nvkm_ofuncs) {
		.ctor = gk104_fifo_context_ctor,
		.dtor = gk104_fifo_context_dtor,
		.init = _nvkm_fifo_context_init,
		.fini = _nvkm_fifo_context_fini,
		.rd32 = _nvkm_fifo_context_rd32,
		.wr32 = _nvkm_fifo_context_wr32,
438 439 440 441 442 443 444
	},
};

/*******************************************************************************
 * PFIFO engine
 ******************************************************************************/

445
static inline int
B
Ben Skeggs 已提交
446
gk104_fifo_engidx(struct gk104_fifo *fifo, u32 engn)
447 448
{
	switch (engn) {
449 450 451 452 453 454 455 456
	case NVDEV_ENGINE_GR    :
	case NVDEV_ENGINE_CE2   : engn = 0; break;
	case NVDEV_ENGINE_MSVLD : engn = 1; break;
	case NVDEV_ENGINE_MSPPP : engn = 2; break;
	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
	case NVDEV_ENGINE_CE0   : engn = 4; break;
	case NVDEV_ENGINE_CE1   : engn = 5; break;
	case NVDEV_ENGINE_MSENC : engn = 6; break;
457 458 459 460 461 462 463
	default:
		return -1;
	}

	return engn;
}

464
static inline struct nvkm_engine *
B
Ben Skeggs 已提交
465
gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
466 467 468
{
	if (engn >= ARRAY_SIZE(fifo_engine))
		return NULL;
B
Ben Skeggs 已提交
469
	return nvkm_engine(fifo, fifo_engine[engn].subdev);
470 471
}

472
static void
473
gk104_fifo_recover_work(struct work_struct *work)
474
{
B
Ben Skeggs 已提交
475
	struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
476
	struct nvkm_device *device = fifo->base.engine.subdev.device;
477
	struct nvkm_engine *engine;
478 479 480 481
	unsigned long flags;
	u32 engn, engm = 0;
	u64 mask, todo;

B
Ben Skeggs 已提交
482 483 484 485
	spin_lock_irqsave(&fifo->base.lock, flags);
	mask = fifo->mask;
	fifo->mask = 0ULL;
	spin_unlock_irqrestore(&fifo->base.lock, flags);
486 487

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
B
Ben Skeggs 已提交
488
		engm |= 1 << gk104_fifo_engidx(fifo, engn);
489
	nvkm_mask(device, 0x002630, engm, engm);
490 491

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
492 493 494
		if ((engine = nvkm_device_engine(device, engn))) {
			nvkm_subdev_fini(&engine->subdev, false);
			WARN_ON(nvkm_subdev_init(&engine->subdev));
495
		}
B
Ben Skeggs 已提交
496
		gk104_fifo_runlist_update(fifo, gk104_fifo_engidx(fifo, engn));
497 498
	}

499 500
	nvkm_wr32(device, 0x00262c, engm);
	nvkm_mask(device, 0x002630, engm, 0x00000000);
501 502 503
}

static void
B
Ben Skeggs 已提交
504
gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
505
		  struct gk104_fifo_chan *chan)
506
{
507 508
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
509 510 511
	u32 chid = chan->base.chid;
	unsigned long flags;

512
	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
513
		   nvkm_subdev_name[nv_subdev(engine)->index], chid);
514

515
	nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
516 517
	chan->state = KILLED;

B
Ben Skeggs 已提交
518 519 520 521
	spin_lock_irqsave(&fifo->base.lock, flags);
	fifo->mask |= 1ULL << nv_engidx(engine);
	spin_unlock_irqrestore(&fifo->base.lock, flags);
	schedule_work(&fifo->fault);
522 523
}

524
static int
B
Ben Skeggs 已提交
525
gk104_fifo_swmthd(struct gk104_fifo *fifo, u32 chid, u32 mthd, u32 data)
526
{
527 528
	struct gk104_fifo_chan *chan = NULL;
	struct nvkm_handle *bind;
529 530 531
	unsigned long flags;
	int ret = -EINVAL;

B
Ben Skeggs 已提交
532 533 534
	spin_lock_irqsave(&fifo->base.lock, flags);
	if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
		chan = (void *)fifo->base.channel[chid];
535 536 537
	if (unlikely(!chan))
		goto out;

538
	bind = nvkm_namedb_get_class(nv_namedb(chan), NVIF_IOCTL_NEW_V0_SW_GF100);
539 540 541
	if (likely(bind)) {
		if (!mthd || !nv_call(bind->object, mthd, data))
			ret = 0;
542
		nvkm_namedb_put(bind);
543 544 545
	}

out:
B
Ben Skeggs 已提交
546
	spin_unlock_irqrestore(&fifo->base.lock, flags);
547 548 549
	return ret;
}

550 551
static const struct nvkm_enum
gk104_fifo_bind_reason[] = {
B
Ben Skeggs 已提交
552 553 554 555 556 557 558 559 560 561
	{ 0x01, "BIND_NOT_UNBOUND" },
	{ 0x02, "SNOOP_WITHOUT_BAR1" },
	{ 0x03, "UNBIND_WHILE_RUNNING" },
	{ 0x05, "INVALID_RUNLIST" },
	{ 0x06, "INVALID_CTX_TGT" },
	{ 0x0b, "UNBIND_WHILE_PARKED" },
	{}
};

static void
B
Ben Skeggs 已提交
562
gk104_fifo_intr_bind(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
563
{
564 565
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
566
	u32 intr = nvkm_rd32(device, 0x00252c);
B
Ben Skeggs 已提交
567
	u32 code = intr & 0x000000ff;
568 569
	const struct nvkm_enum *en =
		nvkm_enum_find(gk104_fifo_bind_reason, code);
B
Ben Skeggs 已提交
570

571
	nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
B
Ben Skeggs 已提交
572 573
}

574 575
static const struct nvkm_enum
gk104_fifo_sched_reason[] = {
576 577 578 579
	{ 0x0a, "CTXSW_TIMEOUT" },
	{}
};

580
static void
B
Ben Skeggs 已提交
581
gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
582
{
583
	struct nvkm_device *device = fifo->base.engine.subdev.device;
584 585
	struct nvkm_engine *engine;
	struct gk104_fifo_chan *chan;
586 587 588
	u32 engn;

	for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
589
		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
590 591 592 593 594 595 596 597 598 599
		u32 busy = (stat & 0x80000000);
		u32 next = (stat & 0x07ff0000) >> 16;
		u32 chsw = (stat & 0x00008000);
		u32 save = (stat & 0x00004000);
		u32 load = (stat & 0x00002000);
		u32 prev = (stat & 0x000007ff);
		u32 chid = load ? next : prev;
		(void)save;

		if (busy && chsw) {
B
Ben Skeggs 已提交
600
			if (!(chan = (void *)fifo->base.channel[chid]))
601
				continue;
B
Ben Skeggs 已提交
602
			if (!(engine = gk104_fifo_engine(fifo, engn)))
603
				continue;
B
Ben Skeggs 已提交
604
			gk104_fifo_recover(fifo, engine, chan);
605 606 607 608
		}
	}
}

609
static void
B
Ben Skeggs 已提交
610
gk104_fifo_intr_sched(struct gk104_fifo *fifo)
611
{
612 613
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
614
	u32 intr = nvkm_rd32(device, 0x00254c);
615
	u32 code = intr & 0x000000ff;
616 617
	const struct nvkm_enum *en =
		nvkm_enum_find(gk104_fifo_sched_reason, code);
618

619
	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
620 621 622

	switch (code) {
	case 0x0a:
B
Ben Skeggs 已提交
623
		gk104_fifo_intr_sched_ctxsw(fifo);
624 625 626 627
		break;
	default:
		break;
	}
628 629 630
}

static void
B
Ben Skeggs 已提交
631
gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
632
{
633 634
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
635
	u32 stat = nvkm_rd32(device, 0x00256c);
636
	nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
637
	nvkm_wr32(device, 0x00256c, stat);
638 639 640
}

static void
B
Ben Skeggs 已提交
641
gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
642
{
643 644
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
645
	u32 stat = nvkm_rd32(device, 0x00259c);
646
	nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
647 648
}

649 650
static const struct nvkm_enum
gk104_fifo_fault_engine[] = {
651
	{ 0x00, "GR", NULL, NVDEV_ENGINE_GR },
652
	{ 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
653 654
	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
655 656 657
	{ 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
	{ 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
	{ 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
658
	{ 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD },
659
	{ 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP },
660
	{ 0x13, "PERF" },
661
	{ 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
662 663
	{ 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 },
	{ 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 },
664
	{ 0x17, "PMU" },
665
	{ 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC },
666
	{ 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 },
667 668 669
	{}
};

670 671
static const struct nvkm_enum
gk104_fifo_fault_reason[] = {
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
	{ 0x00, "PDE" },
	{ 0x01, "PDE_SIZE" },
	{ 0x02, "PTE" },
	{ 0x03, "VA_LIMIT_VIOLATION" },
	{ 0x04, "UNBOUND_INST_BLOCK" },
	{ 0x05, "PRIV_VIOLATION" },
	{ 0x06, "RO_VIOLATION" },
	{ 0x07, "WO_VIOLATION" },
	{ 0x08, "PITCH_MASK_VIOLATION" },
	{ 0x09, "WORK_CREATION" },
	{ 0x0a, "UNSUPPORTED_APERTURE" },
	{ 0x0b, "COMPRESSION_FAILURE" },
	{ 0x0c, "UNSUPPORTED_KIND" },
	{ 0x0d, "REGION_VIOLATION" },
	{ 0x0e, "BOTH_PTES_VALID" },
	{ 0x0f, "INFO_TYPE_POISONED" },
688 689 690
	{}
};

691 692
static const struct nvkm_enum
gk104_fifo_fault_hubclient[] = {
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
	{ 0x00, "VIP" },
	{ 0x01, "CE0" },
	{ 0x02, "CE1" },
	{ 0x03, "DNISO" },
	{ 0x04, "FE" },
	{ 0x05, "FECS" },
	{ 0x06, "HOST" },
	{ 0x07, "HOST_CPU" },
	{ 0x08, "HOST_CPU_NB" },
	{ 0x09, "ISO" },
	{ 0x0a, "MMU" },
	{ 0x0b, "MSPDEC" },
	{ 0x0c, "MSPPP" },
	{ 0x0d, "MSVLD" },
	{ 0x0e, "NISO" },
	{ 0x0f, "P2P" },
	{ 0x10, "PD" },
	{ 0x11, "PERF" },
	{ 0x12, "PMU" },
	{ 0x13, "RASTERTWOD" },
	{ 0x14, "SCC" },
	{ 0x15, "SCC_NB" },
	{ 0x16, "SEC" },
	{ 0x17, "SSYNC" },
717
	{ 0x18, "GR_CE" },
718 719 720 721 722 723 724
	{ 0x19, "CE2" },
	{ 0x1a, "XV" },
	{ 0x1b, "MMU_NB" },
	{ 0x1c, "MSENC" },
	{ 0x1d, "DFALCON" },
	{ 0x1e, "SKED" },
	{ 0x1f, "AFALCON" },
725 726 727
	{}
};

728 729
static const struct nvkm_enum
gk104_fifo_fault_gpcclient[] = {
730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
	{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
	{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
	{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
	{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
	{ 0x0c, "RAST" },
	{ 0x0d, "GCC" },
	{ 0x0e, "GPCCS" },
	{ 0x0f, "PROP_0" },
	{ 0x10, "PROP_1" },
	{ 0x11, "PROP_2" },
	{ 0x12, "PROP_3" },
	{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
	{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
	{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
	{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
	{ 0x1f, "GPM" },
	{ 0x20, "LTP_UTLB_0" },
	{ 0x21, "LTP_UTLB_1" },
	{ 0x22, "LTP_UTLB_2" },
	{ 0x23, "LTP_UTLB_3" },
	{ 0x24, "GPC_RGG_UTLB" },
751 752 753
	{}
};

754
static void
B
Ben Skeggs 已提交
755
gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
756
{
757 758
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
759 760 761 762
	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
763
	u32 gpc    = (stat & 0x1f000000) >> 24;
764
	u32 client = (stat & 0x00001f00) >> 8;
765 766 767
	u32 write  = (stat & 0x00000080);
	u32 hub    = (stat & 0x00000040);
	u32 reason = (stat & 0x0000000f);
768 769 770
	struct nvkm_object *engctx = NULL, *object;
	struct nvkm_engine *engine = NULL;
	const struct nvkm_enum *er, *eu, *ec;
771
	char gpcid[8] = "";
772

773 774
	er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
	eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
775 776 777 778 779 780 781
	if (hub) {
		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
	} else {
		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
	}

782 783 784
	if (eu) {
		switch (eu->data2) {
		case NVDEV_SUBDEV_BAR:
785
			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
786 787
			break;
		case NVDEV_SUBDEV_INSTMEM:
788
			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
789 790
			break;
		case NVDEV_ENGINE_IFB:
791
			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
792 793
			break;
		default:
B
Ben Skeggs 已提交
794
			engine = nvkm_engine(fifo, eu->data2);
795
			if (engine)
796
				engctx = nvkm_engctx_get(engine, inst);
797
			break;
798
		}
799 800
	}

801 802 803 804 805 806 807
	nvkm_error(subdev,
		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
		   "reason %02x [%s] on channel %d [%010llx %s]\n",
		   write ? "write" : "read", (u64)vahi << 32 | valo,
		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
		   reason, er ? er->name : "", -1, (u64)inst << 12,
		   nvkm_client_name(engctx));
808

809 810 811
	object = engctx;
	while (object) {
		switch (nv_mclass(object)) {
812
		case KEPLER_CHANNEL_GPFIFO_A:
813
		case MAXWELL_CHANNEL_GPFIFO_A:
B
Ben Skeggs 已提交
814
			gk104_fifo_recover(fifo, engine, (void *)object);
815 816 817 818 819
			break;
		}
		object = object->parent;
	}

820
	nvkm_engctx_put(engctx);
821 822
}

823
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
	{ 0x00000001, "MEMREQ" },
	{ 0x00000002, "MEMACK_TIMEOUT" },
	{ 0x00000004, "MEMACK_EXTRA" },
	{ 0x00000008, "MEMDAT_TIMEOUT" },
	{ 0x00000010, "MEMDAT_EXTRA" },
	{ 0x00000020, "MEMFLUSH" },
	{ 0x00000040, "MEMOP" },
	{ 0x00000080, "LBCONNECT" },
	{ 0x00000100, "LBREQ" },
	{ 0x00000200, "LBACK_TIMEOUT" },
	{ 0x00000400, "LBACK_EXTRA" },
	{ 0x00000800, "LBDAT_TIMEOUT" },
	{ 0x00001000, "LBDAT_EXTRA" },
	{ 0x00002000, "GPFIFO" },
	{ 0x00004000, "GPPTR" },
	{ 0x00008000, "GPENTRY" },
	{ 0x00010000, "GPCRC" },
	{ 0x00020000, "PBPTR" },
	{ 0x00040000, "PBENTRY" },
	{ 0x00080000, "PBCRC" },
	{ 0x00100000, "XBARCONNECT" },
	{ 0x00200000, "METHOD" },
	{ 0x00400000, "METHODCRC" },
	{ 0x00800000, "DEVICE" },
	{ 0x02000000, "SEMAPHORE" },
	{ 0x04000000, "ACQUIRE" },
	{ 0x08000000, "PRI" },
	{ 0x20000000, "NO_CTXSW_SEG" },
	{ 0x40000000, "PBSEG" },
	{ 0x80000000, "SIGNATURE" },
	{}
};
856

857
static void
B
Ben Skeggs 已提交
858
gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
859
{
860 861
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
862 863 864 865 866
	u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
867
	u32 subc = (addr & 0x00070000) >> 16;
868
	u32 mthd = (addr & 0x00003ffc);
869
	u32 show = stat;
870
	char msg[128];
871

872
	if (stat & 0x00800000) {
B
Ben Skeggs 已提交
873
		if (!gk104_fifo_swmthd(fifo, chid, mthd, data))
874
			show &= ~0x00800000;
875
		nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
876 877
	}

878
	if (show) {
879 880 881 882 883 884
		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d "
				   "mthd %04x data %08x\n",
			   unit, show, msg, chid,
			   nvkm_client_name_for_fifo_chid(&fifo->base, chid),
			   subc, mthd, data);
885
	}
886

887
	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
888 889
}

890
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
891 892 893 894 895 896 897 898 899
	{ 0x00000001, "HCE_RE_ILLEGAL_OP" },
	{ 0x00000002, "HCE_RE_ALIGNB" },
	{ 0x00000004, "HCE_PRIV" },
	{ 0x00000008, "HCE_ILLEGAL_MTHD" },
	{ 0x00000010, "HCE_ILLEGAL_CLASS" },
	{}
};

static void
B
Ben Skeggs 已提交
900
gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
901
{
902 903
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
904 905 906
	u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
	u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
907
	char msg[128];
908 909

	if (stat) {
910 911 912 913 914
		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
			   unit, stat, msg, chid,
			   nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
			   nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
915 916
	}

917
	nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
918 919
}

B
Ben Skeggs 已提交
920
static void
B
Ben Skeggs 已提交
921
gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
922
{
923 924
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x002a00);
B
Ben Skeggs 已提交
925 926
	while (mask) {
		u32 engn = __ffs(mask);
B
Ben Skeggs 已提交
927
		wake_up(&fifo->engine[engn].wait);
928
		nvkm_wr32(device, 0x002a00, 1 << engn);
B
Ben Skeggs 已提交
929 930 931 932
		mask &= ~(1 << engn);
	}
}

B
Ben Skeggs 已提交
933
static void
B
Ben Skeggs 已提交
934
gk104_fifo_intr_engine(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
935
{
B
Ben Skeggs 已提交
936
	nvkm_fifo_uevent(&fifo->base);
B
Ben Skeggs 已提交
937 938
}

939
static void
940
gk104_fifo_intr(struct nvkm_subdev *subdev)
941
{
B
Ben Skeggs 已提交
942
	struct gk104_fifo *fifo = (void *)subdev;
943 944 945
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x002140);
	u32 stat = nvkm_rd32(device, 0x002100) & mask;
946

947
	if (stat & 0x00000001) {
B
Ben Skeggs 已提交
948
		gk104_fifo_intr_bind(fifo);
949
		nvkm_wr32(device, 0x002100, 0x00000001);
950 951 952 953
		stat &= ~0x00000001;
	}

	if (stat & 0x00000010) {
954
		nvkm_error(subdev, "PIO_ERROR\n");
955
		nvkm_wr32(device, 0x002100, 0x00000010);
956 957 958
		stat &= ~0x00000010;
	}

959
	if (stat & 0x00000100) {
B
Ben Skeggs 已提交
960
		gk104_fifo_intr_sched(fifo);
961
		nvkm_wr32(device, 0x002100, 0x00000100);
962 963 964
		stat &= ~0x00000100;
	}

965
	if (stat & 0x00010000) {
B
Ben Skeggs 已提交
966
		gk104_fifo_intr_chsw(fifo);
967
		nvkm_wr32(device, 0x002100, 0x00010000);
968 969 970 971
		stat &= ~0x00010000;
	}

	if (stat & 0x00800000) {
972
		nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
973
		nvkm_wr32(device, 0x002100, 0x00800000);
974 975 976 977
		stat &= ~0x00800000;
	}

	if (stat & 0x01000000) {
978
		nvkm_error(subdev, "LB_ERROR\n");
979
		nvkm_wr32(device, 0x002100, 0x01000000);
980 981 982 983
		stat &= ~0x01000000;
	}

	if (stat & 0x08000000) {
B
Ben Skeggs 已提交
984
		gk104_fifo_intr_dropped_fault(fifo);
985
		nvkm_wr32(device, 0x002100, 0x08000000);
986 987 988
		stat &= ~0x08000000;
	}

989
	if (stat & 0x10000000) {
990
		u32 mask = nvkm_rd32(device, 0x00259c);
991 992
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
993
			gk104_fifo_intr_fault(fifo, unit);
994
			nvkm_wr32(device, 0x00259c, (1 << unit));
995
			mask &= ~(1 << unit);
996 997 998 999 1000
		}
		stat &= ~0x10000000;
	}

	if (stat & 0x20000000) {
1001
		u32 mask = nvkm_rd32(device, 0x0025a0);
1002 1003
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
1004 1005
			gk104_fifo_intr_pbdma_0(fifo, unit);
			gk104_fifo_intr_pbdma_1(fifo, unit);
1006
			nvkm_wr32(device, 0x0025a0, (1 << unit));
1007
			mask &= ~(1 << unit);
1008 1009 1010 1011 1012
		}
		stat &= ~0x20000000;
	}

	if (stat & 0x40000000) {
B
Ben Skeggs 已提交
1013
		gk104_fifo_intr_runlist(fifo);
1014 1015 1016
		stat &= ~0x40000000;
	}

1017
	if (stat & 0x80000000) {
1018
		nvkm_wr32(device, 0x002100, 0x80000000);
B
Ben Skeggs 已提交
1019
		gk104_fifo_intr_engine(fifo);
1020 1021 1022
		stat &= ~0x80000000;
	}

1023
	if (stat) {
1024
		nvkm_error(subdev, "INTR %08x\n", stat);
1025 1026
		nvkm_mask(device, 0x002140, stat, 0x00000000);
		nvkm_wr32(device, 0x002100, stat);
1027 1028
	}
}
1029

1030
static void
1031
gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
1032
{
1033
	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
1034 1035
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
1036 1037 1038
}

static void
1039
gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
1040
{
1041
	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
1042 1043
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
1044 1045
}

1046
static const struct nvkm_event_func
1047 1048 1049 1050
gk104_fifo_uevent_func = {
	.ctor = nvkm_fifo_uevent_ctor,
	.init = gk104_fifo_uevent_init,
	.fini = gk104_fifo_uevent_fini,
1051 1052
};

1053
int
1054
gk104_fifo_fini(struct nvkm_object *object, bool suspend)
1055
{
B
Ben Skeggs 已提交
1056
	struct gk104_fifo *fifo = (void *)object;
1057
	struct nvkm_device *device = fifo->base.engine.subdev.device;
1058 1059
	int ret;

B
Ben Skeggs 已提交
1060
	ret = nvkm_fifo_fini(&fifo->base, suspend);
1061 1062 1063 1064
	if (ret)
		return ret;

	/* allow mmu fault interrupts, even when we're not using fifo */
1065
	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
1066 1067 1068
	return 0;
}

B
Ben Skeggs 已提交
1069
int
1070
gk104_fifo_init(struct nvkm_object *object)
B
Ben Skeggs 已提交
1071
{
B
Ben Skeggs 已提交
1072
	struct gk104_fifo *fifo = (void *)object;
1073 1074
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
B
Ben Skeggs 已提交
1075 1076
	int ret, i;

B
Ben Skeggs 已提交
1077
	ret = nvkm_fifo_init(&fifo->base);
B
Ben Skeggs 已提交
1078 1079 1080
	if (ret)
		return ret;

B
Ben Skeggs 已提交
1081
	/* enable all available PBDMA units */
1082 1083
	nvkm_wr32(device, 0x000204, 0xffffffff);
	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
1084
	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
B
Ben Skeggs 已提交
1085

B
Ben Skeggs 已提交
1086
	/* PBDMA[n] */
B
Ben Skeggs 已提交
1087
	for (i = 0; i < fifo->spoon_nr; i++) {
1088 1089 1090
		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
B
Ben Skeggs 已提交
1091 1092
	}

1093
	/* PBDMA[n].HCE */
B
Ben Skeggs 已提交
1094
	for (i = 0; i < fifo->spoon_nr; i++) {
1095 1096
		nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
1097 1098
	}

1099
	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
B
Ben Skeggs 已提交
1100

1101 1102
	nvkm_wr32(device, 0x002100, 0xffffffff);
	nvkm_wr32(device, 0x002140, 0x7fffffff);
B
Ben Skeggs 已提交
1103 1104 1105 1106
	return 0;
}

void
1107
gk104_fifo_dtor(struct nvkm_object *object)
B
Ben Skeggs 已提交
1108
{
B
Ben Skeggs 已提交
1109
	struct gk104_fifo *fifo = (void *)object;
B
Ben Skeggs 已提交
1110 1111
	int i;

1112 1113
	nvkm_vm_put(&fifo->user.bar);
	nvkm_memory_del(&fifo->user.mem);
B
Ben Skeggs 已提交
1114 1115

	for (i = 0; i < FIFO_ENGINE_NR; i++) {
1116 1117
		nvkm_memory_del(&fifo->engine[i].runlist[1]);
		nvkm_memory_del(&fifo->engine[i].runlist[0]);
B
Ben Skeggs 已提交
1118 1119
	}

B
Ben Skeggs 已提交
1120
	nvkm_fifo_destroy(&fifo->base);
B
Ben Skeggs 已提交
1121 1122 1123
}

int
1124 1125 1126
gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		struct nvkm_oclass *oclass, void *data, u32 size,
		struct nvkm_object **pobject)
1127
{
1128 1129
	struct nvkm_device *device = (void *)parent;
	struct nvkm_bar *bar = device->bar;
1130
	struct gk104_fifo_impl *impl = (void *)oclass;
B
Ben Skeggs 已提交
1131
	struct gk104_fifo *fifo;
1132
	int ret, i;
1133

1134
	ret = nvkm_fifo_create(parent, engine, oclass, 0,
B
Ben Skeggs 已提交
1135 1136
			       impl->channels - 1, &fifo);
	*pobject = nv_object(fifo);
1137 1138 1139
	if (ret)
		return ret;

B
Ben Skeggs 已提交
1140
	INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
1141

1142
	for (i = 0; i < FIFO_ENGINE_NR; i++) {
1143 1144 1145
		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
				      0x8000, 0x1000, false,
				      &fifo->engine[i].runlist[0]);
1146 1147 1148
		if (ret)
			return ret;

1149 1150 1151
		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
				      0x8000, 0x1000, false,
				      &fifo->engine[i].runlist[1]);
1152 1153
		if (ret)
			return ret;
B
Ben Skeggs 已提交
1154

B
Ben Skeggs 已提交
1155
		init_waitqueue_head(&fifo->engine[i].wait);
1156 1157
	}

1158 1159 1160
	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
			      impl->channels * 0x200, 0x1000,
			      true, &fifo->user.mem);
1161 1162 1163
	if (ret)
		return ret;

1164
	ret = bar->umap(bar, impl->channels * 0x200, 12, &fifo->user.bar);
1165 1166 1167
	if (ret)
		return ret;

1168 1169
	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);

B
Ben Skeggs 已提交
1170
	ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &fifo->base.uevent);
1171 1172
	if (ret)
		return ret;
1173

B
Ben Skeggs 已提交
1174 1175 1176 1177
	nv_subdev(fifo)->unit = 0x00000100;
	nv_subdev(fifo)->intr = gk104_fifo_intr;
	nv_engine(fifo)->cclass = &gk104_fifo_cclass;
	nv_engine(fifo)->sclass = gk104_fifo_sclass;
1178 1179 1180
	return 0;
}

1181 1182
struct nvkm_oclass *
gk104_fifo_oclass = &(struct gk104_fifo_impl) {
B
Ben Skeggs 已提交
1183
	.base.handle = NV_ENGINE(FIFO, 0xe0),
1184 1185 1186 1187 1188
	.base.ofuncs = &(struct nvkm_ofuncs) {
		.ctor = gk104_fifo_ctor,
		.dtor = gk104_fifo_dtor,
		.init = gk104_fifo_init,
		.fini = gk104_fifo_fini,
1189
	},
B
Ben Skeggs 已提交
1190 1191
	.channels = 4096,
}.base;