gf100.c 27.0 KB
Newer Older
1
/*
2
 * Copyright 2012 Red Hat Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */
24
#include <engine/fifo.h>
25

26 27 28
#include <core/client.h>
#include <core/engctx.h>
#include <core/enum.h>
29
#include <core/handle.h>
30
#include <subdev/bar.h>
31
#include <subdev/fb.h>
32
#include <subdev/mmu.h>
33
#include <subdev/timer.h>
34

35 36
#include <nvif/class.h>
#include <nvif/unpack.h>
37

B
Ben Skeggs 已提交
38
struct gf100_fifo {
39
	struct nvkm_fifo base;
40 41 42 43

	struct work_struct fault;
	u64 mask;

B
Ben Skeggs 已提交
44
	struct {
45
		struct nvkm_gpuobj *mem[2];
B
Ben Skeggs 已提交
46 47 48
		int active;
		wait_queue_head_t wait;
	} runlist;
49

50
	struct {
51 52
		struct nvkm_gpuobj *mem;
		struct nvkm_vma bar;
53
	} user;
54
	int spoon_nr;
55 56
};

57 58 59 60
struct gf100_fifo_base {
	struct nvkm_fifo_base base;
	struct nvkm_gpuobj *pgd;
	struct nvkm_vm *vm;
61 62
};

63 64
struct gf100_fifo_chan {
	struct nvkm_fifo_chan base;
65 66 67 68 69
	enum {
		STOPPED,
		RUNNING,
		KILLED
	} state;
70 71
};

72 73 74 75
/*******************************************************************************
 * FIFO channel objects
 ******************************************************************************/

76
static void
B
Ben Skeggs 已提交
77
gf100_fifo_runlist_update(struct gf100_fifo *fifo)
78
{
79 80
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
81
	struct nvkm_bar *bar = device->bar;
82
	struct nvkm_gpuobj *cur;
83 84
	int i, p;

B
Ben Skeggs 已提交
85 86 87
	mutex_lock(&nv_subdev(fifo)->mutex);
	cur = fifo->runlist.mem[fifo->runlist.active];
	fifo->runlist.active = !fifo->runlist.active;
88

89
	nvkm_kmap(cur);
90
	for (i = 0, p = 0; i < 128; i++) {
B
Ben Skeggs 已提交
91
		struct gf100_fifo_chan *chan = (void *)fifo->base.channel[i];
92
		if (chan && chan->state == RUNNING) {
93 94
			nvkm_wo32(cur, p + 0, i);
			nvkm_wo32(cur, p + 4, 0x00000004);
95 96
			p += 8;
		}
97
	}
98
	bar->flush(bar);
99
	nvkm_done(cur);
100

101 102
	nvkm_wr32(device, 0x002270, cur->addr >> 12);
	nvkm_wr32(device, 0x002274, 0x01f00000 | (p >> 3));
103

B
Ben Skeggs 已提交
104
	if (wait_event_timeout(fifo->runlist.wait,
105
			       !(nvkm_rd32(device, 0x00227c) & 0x00100000),
106
			       msecs_to_jiffies(2000)) == 0)
107
		nvkm_error(subdev, "runlist update timeout\n");
B
Ben Skeggs 已提交
108
	mutex_unlock(&nv_subdev(fifo)->mutex);
109
}
110

111
static int
112 113
gf100_fifo_context_attach(struct nvkm_object *parent,
			  struct nvkm_object *object)
114
{
115 116
	struct nvkm_bar *bar = nvkm_bar(parent);
	struct gf100_fifo_base *base = (void *)parent->parent;
117
	struct nvkm_gpuobj *engn = &base->base.gpuobj;
118
	struct nvkm_engctx *ectx = (void *)object;
119 120
	u32 addr;
	int ret;
121

122
	switch (nv_engidx(object->engine)) {
123 124 125 126 127 128 129
	case NVDEV_ENGINE_SW    : return 0;
	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
	case NVDEV_ENGINE_CE0   : addr = 0x0230; break;
	case NVDEV_ENGINE_CE1   : addr = 0x0240; break;
	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
130 131 132
	default:
		return -EINVAL;
	}
133

134
	if (!ectx->vma.node) {
135 136
		ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
					 NV_MEM_ACCESS_RW, &ectx->vma);
137 138
		if (ret)
			return ret;
139 140

		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
141 142
	}

143 144 145
	nvkm_kmap(engn);
	nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
	nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
146
	bar->flush(bar);
147
	nvkm_done(engn);
148
	return 0;
149 150
}

151
static int
152 153
gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
			  struct nvkm_object *object)
154
{
B
Ben Skeggs 已提交
155
	struct gf100_fifo *fifo = (void *)parent->engine;
156 157
	struct gf100_fifo_base *base = (void *)parent->parent;
	struct gf100_fifo_chan *chan = (void *)parent;
158
	struct nvkm_gpuobj *engn = &base->base.gpuobj;
159 160
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
161
	struct nvkm_bar *bar = device->bar;
162 163 164
	u32 addr;

	switch (nv_engidx(object->engine)) {
165 166 167 168 169 170 171
	case NVDEV_ENGINE_SW    : return 0;
	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
	case NVDEV_ENGINE_CE0   : addr = 0x0230; break;
	case NVDEV_ENGINE_CE1   : addr = 0x0240; break;
	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
172 173
	default:
		return -EINVAL;
174 175
	}

176
	nvkm_wr32(device, 0x002634, chan->base.chid);
177 178 179 180
	if (nvkm_msec(device, 2000,
		if (nvkm_rd32(device, 0x002634) == chan->base.chid)
			break;
	) < 0) {
181 182
		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
			   chan->base.chid, nvkm_client_name(chan));
183 184 185 186
		if (suspend)
			return -EBUSY;
	}

187 188 189
	nvkm_kmap(engn);
	nvkm_wo32(engn, addr + 0x00, 0x00000000);
	nvkm_wo32(engn, addr + 0x04, 0x00000000);
190
	bar->flush(bar);
191
	nvkm_done(engn);
192
	return 0;
193 194 195
}

static int
196 197 198
gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		     struct nvkm_oclass *oclass, void *data, u32 size,
		     struct nvkm_object **pobject)
199
{
200 201 202
	union {
		struct nv50_channel_gpfifo_v0 v0;
	} *args = data;
203
	struct nvkm_bar *bar = nvkm_bar(parent);
B
Ben Skeggs 已提交
204
	struct gf100_fifo *fifo = (void *)engine;
205 206
	struct gf100_fifo_base *base = (void *)parent;
	struct gf100_fifo_chan *chan;
207
	struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
208 209
	u64 usermem, ioffset, ilength;
	int ret, i;
210

211
	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
212
	if (nvif_unpack(args->v0, 0, 0, false)) {
213
		nvif_ioctl(parent, "create channel gpfifo vers %d pushbuf %llx "
214 215 216
				   "ioffset %016llx ilength %08x\n",
			   args->v0.version, args->v0.pushbuf, args->v0.ioffset,
			   args->v0.ilength);
217 218
	} else
		return ret;
219

220
	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
B
Ben Skeggs 已提交
221
				       fifo->user.bar.offset, 0x1000,
222 223 224 225 226 227 228 229
				       args->v0.pushbuf,
				       (1ULL << NVDEV_ENGINE_SW) |
				       (1ULL << NVDEV_ENGINE_GR) |
				       (1ULL << NVDEV_ENGINE_CE0) |
				       (1ULL << NVDEV_ENGINE_CE1) |
				       (1ULL << NVDEV_ENGINE_MSVLD) |
				       (1ULL << NVDEV_ENGINE_MSPDEC) |
				       (1ULL << NVDEV_ENGINE_MSPPP), &chan);
230 231 232 233
	*pobject = nv_object(chan);
	if (ret)
		return ret;

234 235
	args->v0.chid = chan->base.chid;

236 237
	nv_parent(chan)->context_attach = gf100_fifo_context_attach;
	nv_parent(chan)->context_detach = gf100_fifo_context_detach;
238 239

	usermem = chan->base.chid * 0x1000;
240 241
	ioffset = args->v0.ioffset;
	ilength = order_base_2(args->v0.ilength / 8);
242

243
	nvkm_kmap(fifo->user.mem);
244
	for (i = 0; i < 0x1000; i += 4)
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
	nvkm_done(fifo->user.mem);

	nvkm_kmap(ramfc);
	nvkm_wo32(ramfc, 0x08, lower_32_bits(fifo->user.mem->addr + usermem));
	nvkm_wo32(ramfc, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem));
	nvkm_wo32(ramfc, 0x10, 0x0000face);
	nvkm_wo32(ramfc, 0x30, 0xfffff902);
	nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
	nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
	nvkm_wo32(ramfc, 0x54, 0x00000002);
	nvkm_wo32(ramfc, 0x84, 0x20400000);
	nvkm_wo32(ramfc, 0x94, 0x30000001);
	nvkm_wo32(ramfc, 0x9c, 0x00000100);
	nvkm_wo32(ramfc, 0xa4, 0x1f1f1f1f);
	nvkm_wo32(ramfc, 0xa8, 0x1f1f1f1f);
	nvkm_wo32(ramfc, 0xac, 0x0000001f);
	nvkm_wo32(ramfc, 0xb8, 0xf8000000);
	nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
	nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
265
	bar->flush(bar);
266
	nvkm_done(ramfc);
267 268
	return 0;
}
269

270
static int
271
gf100_fifo_chan_init(struct nvkm_object *object)
272
{
273
	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
B
Ben Skeggs 已提交
274
	struct gf100_fifo *fifo = (void *)object->engine;
275
	struct gf100_fifo_chan *chan = (void *)object;
276
	struct nvkm_device *device = fifo->base.engine.subdev.device;
277 278
	u32 chid = chan->base.chid;
	int ret;
279

280
	ret = nvkm_fifo_channel_init(&chan->base);
281 282
	if (ret)
		return ret;
283

284
	nvkm_wr32(device, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
285 286

	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
287
		nvkm_wr32(device, 0x003004 + (chid * 8), 0x001f0001);
B
Ben Skeggs 已提交
288
		gf100_fifo_runlist_update(fifo);
289 290
	}

291 292
	return 0;
}
293

B
Ben Skeggs 已提交
294
static void gf100_fifo_intr_engine(struct gf100_fifo *fifo);
B
Ben Skeggs 已提交
295

296
static int
297
gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
298
{
B
Ben Skeggs 已提交
299
	struct gf100_fifo *fifo = (void *)object->engine;
300
	struct gf100_fifo_chan *chan = (void *)object;
301
	struct nvkm_device *device = fifo->base.engine.subdev.device;
302
	u32 chid = chan->base.chid;
303

304
	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
305
		nvkm_mask(device, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
B
Ben Skeggs 已提交
306
		gf100_fifo_runlist_update(fifo);
307
	}
B
Ben Skeggs 已提交
308

B
Ben Skeggs 已提交
309
	gf100_fifo_intr_engine(fifo);
B
Ben Skeggs 已提交
310

311
	nvkm_wr32(device, 0x003000 + (chid * 8), 0x00000000);
312
	return nvkm_fifo_channel_fini(&chan->base, suspend);
313
}
314

315 316 317 318 319 320 321 322 323 324
static struct nvkm_ofuncs
gf100_fifo_ofuncs = {
	.ctor = gf100_fifo_chan_ctor,
	.dtor = _nvkm_fifo_channel_dtor,
	.init = gf100_fifo_chan_init,
	.fini = gf100_fifo_chan_fini,
	.map  = _nvkm_fifo_channel_map,
	.rd32 = _nvkm_fifo_channel_rd32,
	.wr32 = _nvkm_fifo_channel_wr32,
	.ntfy = _nvkm_fifo_channel_ntfy
325
};
326

327 328 329
static struct nvkm_oclass
gf100_fifo_sclass[] = {
	{ FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs },
330 331 332 333 334 335
	{}
};

/*******************************************************************************
 * FIFO context - instmem heap and vm setup
 ******************************************************************************/
336

337
static int
338 339 340
gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
			struct nvkm_oclass *oclass, void *data, u32 size,
			struct nvkm_object **pobject)
341
{
342
	struct gf100_fifo_base *base;
343
	int ret;
344

345 346 347
	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
				       0x1000, NVOBJ_FLAG_ZERO_ALLOC |
				       NVOBJ_FLAG_HEAP, &base);
348 349 350
	*pobject = nv_object(base);
	if (ret)
		return ret;
351

352 353
	ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
			      &base->pgd);
354 355 356
	if (ret)
		return ret;

357 358 359 360 361 362
	nvkm_kmap(&base->base.gpuobj);
	nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
	nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
	nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
	nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
	nvkm_done(&base->base.gpuobj);
363

364
	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
365 366
	if (ret)
		return ret;
367 368 369 370

	return 0;
}

371
static void
372
gf100_fifo_context_dtor(struct nvkm_object *object)
373
{
374 375 376 377
	struct gf100_fifo_base *base = (void *)object;
	nvkm_vm_ref(NULL, &base->vm, base->pgd);
	nvkm_gpuobj_ref(NULL, &base->pgd);
	nvkm_fifo_context_destroy(&base->base);
378 379
}

380 381
static struct nvkm_oclass
gf100_fifo_cclass = {
382
	.handle = NV_ENGCTX(FIFO, 0xc0),
383 384 385 386 387 388 389
	.ofuncs = &(struct nvkm_ofuncs) {
		.ctor = gf100_fifo_context_ctor,
		.dtor = gf100_fifo_context_dtor,
		.init = _nvkm_fifo_context_init,
		.fini = _nvkm_fifo_context_fini,
		.rd32 = _nvkm_fifo_context_rd32,
		.wr32 = _nvkm_fifo_context_wr32,
390 391 392 393 394 395
	},
};

/*******************************************************************************
 * PFIFO engine
 ******************************************************************************/
396

397
static inline int
B
Ben Skeggs 已提交
398
gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
399 400
{
	switch (engn) {
401 402 403 404 405 406
	case NVDEV_ENGINE_GR    : engn = 0; break;
	case NVDEV_ENGINE_MSVLD : engn = 1; break;
	case NVDEV_ENGINE_MSPPP : engn = 2; break;
	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
	case NVDEV_ENGINE_CE0   : engn = 4; break;
	case NVDEV_ENGINE_CE1   : engn = 5; break;
407 408 409 410 411 412 413
	default:
		return -1;
	}

	return engn;
}

414
static inline struct nvkm_engine *
B
Ben Skeggs 已提交
415
gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
416 417 418
{
	switch (engn) {
	case 0: engn = NVDEV_ENGINE_GR; break;
419
	case 1: engn = NVDEV_ENGINE_MSVLD; break;
420
	case 2: engn = NVDEV_ENGINE_MSPPP; break;
421
	case 3: engn = NVDEV_ENGINE_MSPDEC; break;
422 423
	case 4: engn = NVDEV_ENGINE_CE0; break;
	case 5: engn = NVDEV_ENGINE_CE1; break;
424 425 426 427
	default:
		return NULL;
	}

B
Ben Skeggs 已提交
428
	return nvkm_engine(fifo, engn);
429 430 431
}

static void
432
gf100_fifo_recover_work(struct work_struct *work)
433
{
B
Ben Skeggs 已提交
434
	struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
435
	struct nvkm_device *device = fifo->base.engine.subdev.device;
436
	struct nvkm_object *engine;
437 438 439 440
	unsigned long flags;
	u32 engn, engm = 0;
	u64 mask, todo;

B
Ben Skeggs 已提交
441 442 443 444
	spin_lock_irqsave(&fifo->base.lock, flags);
	mask = fifo->mask;
	fifo->mask = 0ULL;
	spin_unlock_irqrestore(&fifo->base.lock, flags);
445 446

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
B
Ben Skeggs 已提交
447
		engm |= 1 << gf100_fifo_engidx(fifo, engn);
448
	nvkm_mask(device, 0x002630, engm, engm);
449 450

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
B
Ben Skeggs 已提交
451
		if ((engine = (void *)nvkm_engine(fifo, engn))) {
452 453 454 455 456
			nv_ofuncs(engine)->fini(engine, false);
			WARN_ON(nv_ofuncs(engine)->init(engine));
		}
	}

B
Ben Skeggs 已提交
457
	gf100_fifo_runlist_update(fifo);
458 459
	nvkm_wr32(device, 0x00262c, engm);
	nvkm_mask(device, 0x002630, engm, 0x00000000);
460 461 462
}

static void
B
Ben Skeggs 已提交
463
gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
464
		   struct gf100_fifo_chan *chan)
465
{
466 467
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
468 469 470
	u32 chid = chan->base.chid;
	unsigned long flags;

471 472
	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
		   engine->subdev.name, chid);
473

474
	nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
475 476
	chan->state = KILLED;

B
Ben Skeggs 已提交
477 478 479 480
	spin_lock_irqsave(&fifo->base.lock, flags);
	fifo->mask |= 1ULL << nv_engidx(engine);
	spin_unlock_irqrestore(&fifo->base.lock, flags);
	schedule_work(&fifo->fault);
481 482
}

483
static int
B
Ben Skeggs 已提交
484
gf100_fifo_swmthd(struct gf100_fifo *fifo, u32 chid, u32 mthd, u32 data)
485
{
486 487
	struct gf100_fifo_chan *chan = NULL;
	struct nvkm_handle *bind;
488 489 490
	unsigned long flags;
	int ret = -EINVAL;

B
Ben Skeggs 已提交
491 492 493
	spin_lock_irqsave(&fifo->base.lock, flags);
	if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
		chan = (void *)fifo->base.channel[chid];
494 495 496
	if (unlikely(!chan))
		goto out;

497
	bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
498 499 500
	if (likely(bind)) {
		if (!mthd || !nv_call(bind->object, mthd, data))
			ret = 0;
501
		nvkm_namedb_put(bind);
502 503 504
	}

out:
B
Ben Skeggs 已提交
505
	spin_unlock_irqrestore(&fifo->base.lock, flags);
506 507 508
	return ret;
}

509 510
static const struct nvkm_enum
gf100_fifo_sched_reason[] = {
B
Ben Skeggs 已提交
511 512 513 514
	{ 0x0a, "CTXSW_TIMEOUT" },
	{}
};

515
static void
B
Ben Skeggs 已提交
516
gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
517
{
518
	struct nvkm_device *device = fifo->base.engine.subdev.device;
519 520
	struct nvkm_engine *engine;
	struct gf100_fifo_chan *chan;
521 522 523
	u32 engn;

	for (engn = 0; engn < 6; engn++) {
524
		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
525 526 527 528 529 530 531 532
		u32 busy = (stat & 0x80000000);
		u32 save = (stat & 0x00100000); /* maybe? */
		u32 unk0 = (stat & 0x00040000);
		u32 unk1 = (stat & 0x00001000);
		u32 chid = (stat & 0x0000007f);
		(void)save;

		if (busy && unk0 && unk1) {
B
Ben Skeggs 已提交
533
			if (!(chan = (void *)fifo->base.channel[chid]))
534
				continue;
B
Ben Skeggs 已提交
535
			if (!(engine = gf100_fifo_engine(fifo, engn)))
536
				continue;
B
Ben Skeggs 已提交
537
			gf100_fifo_recover(fifo, engine, chan);
538 539 540 541
		}
	}
}

B
Ben Skeggs 已提交
542
static void
B
Ben Skeggs 已提交
543
gf100_fifo_intr_sched(struct gf100_fifo *fifo)
B
Ben Skeggs 已提交
544
{
545 546
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
547
	u32 intr = nvkm_rd32(device, 0x00254c);
B
Ben Skeggs 已提交
548
	u32 code = intr & 0x000000ff;
549
	const struct nvkm_enum *en;
B
Ben Skeggs 已提交
550

551
	en = nvkm_enum_find(gf100_fifo_sched_reason, code);
B
Ben Skeggs 已提交
552

553
	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
554 555 556

	switch (code) {
	case 0x0a:
B
Ben Skeggs 已提交
557
		gf100_fifo_intr_sched_ctxsw(fifo);
558 559 560 561
		break;
	default:
		break;
	}
B
Ben Skeggs 已提交
562 563
}

564 565
static const struct nvkm_enum
gf100_fifo_fault_engine[] = {
566
	{ 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
567 568 569
	{ 0x03, "PEEPHOLE", NULL, NVDEV_ENGINE_IFB },
	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
570
	{ 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
571
	{ 0x10, "PMSVLD", NULL, NVDEV_ENGINE_MSVLD },
572
	{ 0x11, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
B
Ben Skeggs 已提交
573
	{ 0x13, "PCOUNTER" },
574
	{ 0x14, "PMSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
575 576
	{ 0x15, "PCE0", NULL, NVDEV_ENGINE_CE0 },
	{ 0x16, "PCE1", NULL, NVDEV_ENGINE_CE1 },
B
Ben Skeggs 已提交
577
	{ 0x17, "PDAEMON" },
578 579 580
	{}
};

581 582
static const struct nvkm_enum
gf100_fifo_fault_reason[] = {
B
Ben Skeggs 已提交
583 584 585 586 587 588 589 590 591
	{ 0x00, "PT_NOT_PRESENT" },
	{ 0x01, "PT_TOO_SHORT" },
	{ 0x02, "PAGE_NOT_PRESENT" },
	{ 0x03, "VM_LIMIT_EXCEEDED" },
	{ 0x04, "NO_CHANNEL" },
	{ 0x05, "PAGE_SYSTEM_ONLY" },
	{ 0x06, "PAGE_READ_ONLY" },
	{ 0x0a, "COMPRESSED_SYSRAM" },
	{ 0x0c, "INVALID_STORAGE_TYPE" },
592 593 594
	{}
};

595 596
static const struct nvkm_enum
gf100_fifo_fault_hubclient[] = {
597 598 599 600 601 602 603 604
	{ 0x01, "PCOPY0" },
	{ 0x02, "PCOPY1" },
	{ 0x04, "DISPATCH" },
	{ 0x05, "CTXCTL" },
	{ 0x06, "PFIFO" },
	{ 0x07, "BAR_READ" },
	{ 0x08, "BAR_WRITE" },
	{ 0x0b, "PVP" },
605
	{ 0x0c, "PMSPPP" },
606
	{ 0x0d, "PMSVLD" },
607 608 609 610 611 612 613
	{ 0x11, "PCOUNTER" },
	{ 0x12, "PDAEMON" },
	{ 0x14, "CCACHE" },
	{ 0x15, "CCACHE_POST" },
	{}
};

614 615
static const struct nvkm_enum
gf100_fifo_fault_gpcclient[] = {
616 617 618 619 620 621 622
	{ 0x01, "TEX" },
	{ 0x0c, "ESETUP" },
	{ 0x0e, "CTXCTL" },
	{ 0x0f, "PROP" },
	{}
};

623
static void
B
Ben Skeggs 已提交
624
gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
625
{
626 627
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
628 629 630 631
	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
632
	u32 gpc    = (stat & 0x1f000000) >> 24;
633
	u32 client = (stat & 0x00001f00) >> 8;
634 635 636
	u32 write  = (stat & 0x00000080);
	u32 hub    = (stat & 0x00000040);
	u32 reason = (stat & 0x0000000f);
637 638 639
	struct nvkm_object *engctx = NULL, *object;
	struct nvkm_engine *engine = NULL;
	const struct nvkm_enum *er, *eu, *ec;
640
	char gpcid[8] = "";
641

642 643
	er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
	eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
644 645 646 647 648 649 650
	if (hub) {
		ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
	} else {
		ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
	}

651
	if (eu) {
652 653
		switch (eu->data2) {
		case NVDEV_SUBDEV_BAR:
654
			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
655 656
			break;
		case NVDEV_SUBDEV_INSTMEM:
657
			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
658 659
			break;
		case NVDEV_ENGINE_IFB:
660
			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
661 662
			break;
		default:
B
Ben Skeggs 已提交
663
			engine = nvkm_engine(fifo, eu->data2);
664
			if (engine)
665
				engctx = nvkm_engctx_get(engine, inst);
666
			break;
667
		}
668
	}
669

670 671 672 673 674 675 676
	nvkm_error(subdev,
		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
		   "reason %02x [%s] on channel %d [%010llx %s]\n",
		   write ? "write" : "read", (u64)vahi << 32 | valo,
		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
		   reason, er ? er->name : "", -1, (u64)inst << 12,
		   nvkm_client_name(engctx));
677

678 679 680
	object = engctx;
	while (object) {
		switch (nv_mclass(object)) {
681
		case FERMI_CHANNEL_GPFIFO:
B
Ben Skeggs 已提交
682
			gf100_fifo_recover(fifo, engine, (void *)object);
683 684 685 686 687
			break;
		}
		object = object->parent;
	}

688
	nvkm_engctx_put(engctx);
689 690
}

691 692
static const struct nvkm_bitfield
gf100_fifo_pbdma_intr[] = {
693 694 695 696 697
/*	{ 0x00008000, "" }	seen with null ib push */
	{ 0x00200000, "ILLEGAL_MTHD" },
	{ 0x00800000, "EMPTY_SUBC" },
	{}
};
698

699
static void
B
Ben Skeggs 已提交
700
gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
701
{
702 703
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
704 705 706 707
	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
708
	u32 subc = (addr & 0x00070000) >> 16;
709
	u32 mthd = (addr & 0x00003ffc);
710 711
	u32 show= stat;
	char msg[128];
712

713
	if (stat & 0x00800000) {
B
Ben Skeggs 已提交
714
		if (!gf100_fifo_swmthd(fifo, chid, mthd, data))
715 716 717
			show &= ~0x00800000;
	}

718
	if (show) {
719 720 721 722 723 724
		nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d "
				   "mthd %04x data %08x\n",
			   unit, show, msg, chid,
			   nvkm_client_name_for_fifo_chid(&fifo->base, chid),
			   subc, mthd, data);
725
	}
726

727 728
	nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
729 730
}

B
Ben Skeggs 已提交
731
static void
B
Ben Skeggs 已提交
732
gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
B
Ben Skeggs 已提交
733
{
734 735
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
736
	u32 intr = nvkm_rd32(device, 0x002a00);
B
Ben Skeggs 已提交
737 738

	if (intr & 0x10000000) {
B
Ben Skeggs 已提交
739
		wake_up(&fifo->runlist.wait);
740
		nvkm_wr32(device, 0x002a00, 0x10000000);
B
Ben Skeggs 已提交
741 742 743 744
		intr &= ~0x10000000;
	}

	if (intr) {
745
		nvkm_error(subdev, "RUNLIST %08x\n", intr);
746
		nvkm_wr32(device, 0x002a00, intr);
B
Ben Skeggs 已提交
747 748 749
	}
}

B
Ben Skeggs 已提交
750
static void
B
Ben Skeggs 已提交
751
gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
B
Ben Skeggs 已提交
752
{
753 754
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
755 756
	u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
	u32 inte = nvkm_rd32(device, 0x002628);
B
Ben Skeggs 已提交
757 758
	u32 unkn;

759
	nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
760

B
Ben Skeggs 已提交
761 762 763
	for (unkn = 0; unkn < 8; unkn++) {
		u32 ints = (intr >> (unkn * 0x04)) & inte;
		if (ints & 0x1) {
B
Ben Skeggs 已提交
764
			nvkm_fifo_uevent(&fifo->base);
B
Ben Skeggs 已提交
765 766 767
			ints &= ~1;
		}
		if (ints) {
768 769
			nvkm_error(subdev, "ENGINE %d %d %01x",
				   engn, unkn, ints);
770
			nvkm_mask(device, 0x002628, ints, 0);
B
Ben Skeggs 已提交
771 772 773 774 775
		}
	}
}

static void
B
Ben Skeggs 已提交
776
gf100_fifo_intr_engine(struct gf100_fifo *fifo)
B
Ben Skeggs 已提交
777
{
778 779
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x0025a4);
B
Ben Skeggs 已提交
780 781
	while (mask) {
		u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
782
		gf100_fifo_intr_engine_unit(fifo, unit);
B
Ben Skeggs 已提交
783 784 785 786
		mask &= ~(1 << unit);
	}
}

787
static void
788
gf100_fifo_intr(struct nvkm_subdev *subdev)
789
{
B
Ben Skeggs 已提交
790
	struct gf100_fifo *fifo = (void *)subdev;
791 792 793
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x002140);
	u32 stat = nvkm_rd32(device, 0x002100) & mask;
794

795
	if (stat & 0x00000001) {
796
		u32 intr = nvkm_rd32(device, 0x00252c);
797
		nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
798
		nvkm_wr32(device, 0x002100, 0x00000001);
799 800 801
		stat &= ~0x00000001;
	}

802
	if (stat & 0x00000100) {
B
Ben Skeggs 已提交
803
		gf100_fifo_intr_sched(fifo);
804
		nvkm_wr32(device, 0x002100, 0x00000100);
805 806 807
		stat &= ~0x00000100;
	}

808
	if (stat & 0x00010000) {
809
		u32 intr = nvkm_rd32(device, 0x00256c);
810
		nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
811
		nvkm_wr32(device, 0x002100, 0x00010000);
812 813 814 815
		stat &= ~0x00010000;
	}

	if (stat & 0x01000000) {
816
		u32 intr = nvkm_rd32(device, 0x00258c);
817
		nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
818
		nvkm_wr32(device, 0x002100, 0x01000000);
819 820 821
		stat &= ~0x01000000;
	}

822
	if (stat & 0x10000000) {
823
		u32 mask = nvkm_rd32(device, 0x00259c);
824 825
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
826
			gf100_fifo_intr_fault(fifo, unit);
827
			nvkm_wr32(device, 0x00259c, (1 << unit));
828
			mask &= ~(1 << unit);
829 830 831 832 833
		}
		stat &= ~0x10000000;
	}

	if (stat & 0x20000000) {
834
		u32 mask = nvkm_rd32(device, 0x0025a0);
835 836
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
837
			gf100_fifo_intr_pbdma(fifo, unit);
838
			nvkm_wr32(device, 0x0025a0, (1 << unit));
839
			mask &= ~(1 << unit);
840 841 842 843
		}
		stat &= ~0x20000000;
	}

844
	if (stat & 0x40000000) {
B
Ben Skeggs 已提交
845
		gf100_fifo_intr_runlist(fifo);
846 847 848
		stat &= ~0x40000000;
	}

849
	if (stat & 0x80000000) {
B
Ben Skeggs 已提交
850
		gf100_fifo_intr_engine(fifo);
851 852 853
		stat &= ~0x80000000;
	}

854
	if (stat) {
855
		nvkm_error(subdev, "INTR %08x\n", stat);
856 857
		nvkm_mask(device, 0x002140, stat, 0x00000000);
		nvkm_wr32(device, 0x002100, stat);
858 859
	}
}
860

861
static void
862
gf100_fifo_uevent_init(struct nvkm_event *event, int type, int index)
863
{
864
	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
865 866
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
867 868 869
}

static void
870
gf100_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
871
{
872
	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
873 874
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
875 876
}

877
static const struct nvkm_event_func
878 879 880 881
gf100_fifo_uevent_func = {
	.ctor = nvkm_fifo_uevent_ctor,
	.init = gf100_fifo_uevent_init,
	.fini = gf100_fifo_uevent_fini,
882 883
};

884
static int
885 886 887
gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		struct nvkm_oclass *oclass, void *data, u32 size,
		struct nvkm_object **pobject)
888
{
B
Ben Skeggs 已提交
889
	struct gf100_fifo *fifo;
890 891
	int ret;

B
Ben Skeggs 已提交
892 893
	ret = nvkm_fifo_create(parent, engine, oclass, 0, 127, &fifo);
	*pobject = nv_object(fifo);
894 895 896
	if (ret)
		return ret;

B
Ben Skeggs 已提交
897
	INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
898

B
Ben Skeggs 已提交
899 900
	ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x1000, 0x1000, 0,
			      &fifo->runlist.mem[0]);
901 902 903
	if (ret)
		return ret;

B
Ben Skeggs 已提交
904 905
	ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x1000, 0x1000, 0,
			      &fifo->runlist.mem[1]);
906 907 908
	if (ret)
		return ret;

B
Ben Skeggs 已提交
909
	init_waitqueue_head(&fifo->runlist.wait);
B
Ben Skeggs 已提交
910

B
Ben Skeggs 已提交
911 912
	ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 0x1000, 0x1000, 0,
			      &fifo->user.mem);
913 914 915
	if (ret)
		return ret;

B
Ben Skeggs 已提交
916 917
	ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW,
			      &fifo->user.bar);
918 919 920
	if (ret)
		return ret;

B
Ben Skeggs 已提交
921
	ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &fifo->base.uevent);
922 923
	if (ret)
		return ret;
924

B
Ben Skeggs 已提交
925 926 927 928
	nv_subdev(fifo)->unit = 0x00000100;
	nv_subdev(fifo)->intr = gf100_fifo_intr;
	nv_engine(fifo)->cclass = &gf100_fifo_cclass;
	nv_engine(fifo)->sclass = gf100_fifo_sclass;
929 930 931
	return 0;
}

932
static void
933
gf100_fifo_dtor(struct nvkm_object *object)
934
{
B
Ben Skeggs 已提交
935
	struct gf100_fifo *fifo = (void *)object;
936

B
Ben Skeggs 已提交
937 938 939 940
	nvkm_gpuobj_unmap(&fifo->user.bar);
	nvkm_gpuobj_ref(NULL, &fifo->user.mem);
	nvkm_gpuobj_ref(NULL, &fifo->runlist.mem[0]);
	nvkm_gpuobj_ref(NULL, &fifo->runlist.mem[1]);
941

B
Ben Skeggs 已提交
942
	nvkm_fifo_destroy(&fifo->base);
943 944
}

945
static int
946
gf100_fifo_init(struct nvkm_object *object)
947
{
B
Ben Skeggs 已提交
948
	struct gf100_fifo *fifo = (void *)object;
949 950
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
951
	int ret, i;
952

B
Ben Skeggs 已提交
953
	ret = nvkm_fifo_init(&fifo->base);
954 955
	if (ret)
		return ret;
956

957 958
	nvkm_wr32(device, 0x000204, 0xffffffff);
	nvkm_wr32(device, 0x002204, 0xffffffff);
959

960
	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
961
	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
962

963
	/* assign engines to PBDMAs */
B
Ben Skeggs 已提交
964
	if (fifo->spoon_nr >= 3) {
965 966 967 968 969 970
		nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
		nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
		nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
		nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
		nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
		nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
971
	}
972

973
	/* PBDMA[n] */
B
Ben Skeggs 已提交
974
	for (i = 0; i < fifo->spoon_nr; i++) {
975 976 977
		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
978
	}
979

980 981
	nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
982

983 984 985
	nvkm_wr32(device, 0x002100, 0xffffffff);
	nvkm_wr32(device, 0x002140, 0x7fffffff);
	nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
986
	return 0;
987
}
988

989 990
struct nvkm_oclass *
gf100_fifo_oclass = &(struct nvkm_oclass) {
991
	.handle = NV_ENGINE(FIFO, 0xc0),
992 993 994 995 996
	.ofuncs = &(struct nvkm_ofuncs) {
		.ctor = gf100_fifo_ctor,
		.dtor = gf100_fifo_dtor,
		.init = gf100_fifo_init,
		.fini = _nvkm_fifo_fini,
997 998
	},
};