gf100.c 27.0 KB
Newer Older
1
/*
2
 * Copyright 2012 Red Hat Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */
24
#include <engine/fifo.h>
25

26 27 28
#include <core/client.h>
#include <core/engctx.h>
#include <core/enum.h>
29
#include <core/handle.h>
30
#include <subdev/bar.h>
31
#include <subdev/fb.h>
32
#include <subdev/mmu.h>
33
#include <subdev/timer.h>
34

35
#include <nvif/class.h>
36
#include <nvif/ioctl.h>
37
#include <nvif/unpack.h>
38

B
Ben Skeggs 已提交
39
struct gf100_fifo {
40
	struct nvkm_fifo base;
41 42 43 44

	struct work_struct fault;
	u64 mask;

B
Ben Skeggs 已提交
45
	struct {
46
		struct nvkm_memory *mem[2];
B
Ben Skeggs 已提交
47 48 49
		int active;
		wait_queue_head_t wait;
	} runlist;
50

51
	struct {
52
		struct nvkm_memory *mem;
53
		struct nvkm_vma bar;
54
	} user;
55
	int spoon_nr;
56 57
};

58 59 60 61
struct gf100_fifo_base {
	struct nvkm_fifo_base base;
	struct nvkm_gpuobj *pgd;
	struct nvkm_vm *vm;
62 63
};

64 65
struct gf100_fifo_chan {
	struct nvkm_fifo_chan base;
66 67 68 69 70
	enum {
		STOPPED,
		RUNNING,
		KILLED
	} state;
71 72
};

73 74 75 76
/*******************************************************************************
 * FIFO channel objects
 ******************************************************************************/

77
static void
B
Ben Skeggs 已提交
78
gf100_fifo_runlist_update(struct gf100_fifo *fifo)
79
{
80 81
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
82
	struct nvkm_memory *cur;
83 84
	int i, p;

B
Ben Skeggs 已提交
85 86 87
	mutex_lock(&nv_subdev(fifo)->mutex);
	cur = fifo->runlist.mem[fifo->runlist.active];
	fifo->runlist.active = !fifo->runlist.active;
88

89
	nvkm_kmap(cur);
90
	for (i = 0, p = 0; i < 128; i++) {
B
Ben Skeggs 已提交
91
		struct gf100_fifo_chan *chan = (void *)fifo->base.channel[i];
92
		if (chan && chan->state == RUNNING) {
93 94
			nvkm_wo32(cur, p + 0, i);
			nvkm_wo32(cur, p + 4, 0x00000004);
95 96
			p += 8;
		}
97
	}
98
	nvkm_done(cur);
99

100
	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
101
	nvkm_wr32(device, 0x002274, 0x01f00000 | (p >> 3));
102

B
Ben Skeggs 已提交
103
	if (wait_event_timeout(fifo->runlist.wait,
104
			       !(nvkm_rd32(device, 0x00227c) & 0x00100000),
105
			       msecs_to_jiffies(2000)) == 0)
106
		nvkm_error(subdev, "runlist update timeout\n");
B
Ben Skeggs 已提交
107
	mutex_unlock(&nv_subdev(fifo)->mutex);
108
}
109

110
static int
111 112
gf100_fifo_context_attach(struct nvkm_object *parent,
			  struct nvkm_object *object)
113
{
114
	struct gf100_fifo_base *base = (void *)parent->parent;
115
	struct nvkm_gpuobj *engn = &base->base.gpuobj;
116
	struct nvkm_engctx *ectx = (void *)object;
117 118
	u32 addr;
	int ret;
119

120
	switch (nv_engidx(object->engine)) {
121 122 123 124 125 126 127
	case NVDEV_ENGINE_SW    : return 0;
	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
	case NVDEV_ENGINE_CE0   : addr = 0x0230; break;
	case NVDEV_ENGINE_CE1   : addr = 0x0240; break;
	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
128 129 130
	default:
		return -EINVAL;
	}
131

132
	if (!ectx->vma.node) {
133 134
		ret = nvkm_gpuobj_map(nv_gpuobj(ectx), base->vm,
				      NV_MEM_ACCESS_RW, &ectx->vma);
135 136
		if (ret)
			return ret;
137 138

		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
139 140
	}

141 142 143 144
	nvkm_kmap(engn);
	nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
	nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
	nvkm_done(engn);
145
	return 0;
146 147
}

148
static int
149 150
gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
			  struct nvkm_object *object)
151
{
B
Ben Skeggs 已提交
152
	struct gf100_fifo *fifo = (void *)parent->engine;
153 154
	struct gf100_fifo_base *base = (void *)parent->parent;
	struct gf100_fifo_chan *chan = (void *)parent;
155
	struct nvkm_gpuobj *engn = &base->base.gpuobj;
156 157
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
158 159 160
	u32 addr;

	switch (nv_engidx(object->engine)) {
161 162 163 164 165 166 167
	case NVDEV_ENGINE_SW    : return 0;
	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
	case NVDEV_ENGINE_CE0   : addr = 0x0230; break;
	case NVDEV_ENGINE_CE1   : addr = 0x0240; break;
	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
168 169
	default:
		return -EINVAL;
170 171
	}

172
	nvkm_wr32(device, 0x002634, chan->base.chid);
173 174 175 176
	if (nvkm_msec(device, 2000,
		if (nvkm_rd32(device, 0x002634) == chan->base.chid)
			break;
	) < 0) {
177 178
		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
			   chan->base.chid, nvkm_client_name(chan));
179 180 181 182
		if (suspend)
			return -EBUSY;
	}

183 184 185 186
	nvkm_kmap(engn);
	nvkm_wo32(engn, addr + 0x00, 0x00000000);
	nvkm_wo32(engn, addr + 0x04, 0x00000000);
	nvkm_done(engn);
187
	return 0;
188 189 190
}

static int
191 192 193
gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		     struct nvkm_oclass *oclass, void *data, u32 size,
		     struct nvkm_object **pobject)
194
{
195
	union {
196
		struct fermi_channel_gpfifo_v0 v0;
197
	} *args = data;
B
Ben Skeggs 已提交
198
	struct gf100_fifo *fifo = (void *)engine;
199 200
	struct gf100_fifo_base *base = (void *)parent;
	struct gf100_fifo_chan *chan;
201
	struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
202 203
	u64 usermem, ioffset, ilength;
	int ret, i;
204

205
	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
206
	if (nvif_unpack(args->v0, 0, 0, false)) {
207
		nvif_ioctl(parent, "create channel gpfifo vers %d "
208
				   "ioffset %016llx ilength %08x\n",
209
			   args->v0.version, args->v0.ioffset,
210
			   args->v0.ilength);
211 212
		if (args->v0.vm)
			return -ENOENT;
213 214
	} else
		return ret;
215

216
	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
217
				       fifo->user.bar.offset, 0x1000, 0,
218 219 220 221 222 223 224
				       (1ULL << NVDEV_ENGINE_SW) |
				       (1ULL << NVDEV_ENGINE_GR) |
				       (1ULL << NVDEV_ENGINE_CE0) |
				       (1ULL << NVDEV_ENGINE_CE1) |
				       (1ULL << NVDEV_ENGINE_MSVLD) |
				       (1ULL << NVDEV_ENGINE_MSPDEC) |
				       (1ULL << NVDEV_ENGINE_MSPPP), &chan);
225 226 227 228
	*pobject = nv_object(chan);
	if (ret)
		return ret;

229 230
	args->v0.chid = chan->base.chid;

231 232
	nv_parent(chan)->context_attach = gf100_fifo_context_attach;
	nv_parent(chan)->context_detach = gf100_fifo_context_detach;
233 234

	usermem = chan->base.chid * 0x1000;
235 236
	ioffset = args->v0.ioffset;
	ilength = order_base_2(args->v0.ilength / 8);
237

238
	nvkm_kmap(fifo->user.mem);
239
	for (i = 0; i < 0x1000; i += 4)
240 241
		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
	nvkm_done(fifo->user.mem);
242
	usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
243 244

	nvkm_kmap(ramfc);
245 246
	nvkm_wo32(ramfc, 0x08, lower_32_bits(usermem));
	nvkm_wo32(ramfc, 0x0c, upper_32_bits(usermem));
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
	nvkm_wo32(ramfc, 0x10, 0x0000face);
	nvkm_wo32(ramfc, 0x30, 0xfffff902);
	nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
	nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
	nvkm_wo32(ramfc, 0x54, 0x00000002);
	nvkm_wo32(ramfc, 0x84, 0x20400000);
	nvkm_wo32(ramfc, 0x94, 0x30000001);
	nvkm_wo32(ramfc, 0x9c, 0x00000100);
	nvkm_wo32(ramfc, 0xa4, 0x1f1f1f1f);
	nvkm_wo32(ramfc, 0xa8, 0x1f1f1f1f);
	nvkm_wo32(ramfc, 0xac, 0x0000001f);
	nvkm_wo32(ramfc, 0xb8, 0xf8000000);
	nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
	nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
	nvkm_done(ramfc);
262 263
	return 0;
}
264

265
static int
266
gf100_fifo_chan_init(struct nvkm_object *object)
267
{
268
	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
B
Ben Skeggs 已提交
269
	struct gf100_fifo *fifo = (void *)object->engine;
270
	struct gf100_fifo_chan *chan = (void *)object;
271
	struct nvkm_device *device = fifo->base.engine.subdev.device;
272 273
	u32 chid = chan->base.chid;
	int ret;
274

275
	ret = nvkm_fifo_channel_init(&chan->base);
276 277
	if (ret)
		return ret;
278

279
	nvkm_wr32(device, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
280 281

	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
282
		nvkm_wr32(device, 0x003004 + (chid * 8), 0x001f0001);
B
Ben Skeggs 已提交
283
		gf100_fifo_runlist_update(fifo);
284 285
	}

286 287
	return 0;
}
288

B
Ben Skeggs 已提交
289
static void gf100_fifo_intr_engine(struct gf100_fifo *fifo);
B
Ben Skeggs 已提交
290

291
static int
292
gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
293
{
B
Ben Skeggs 已提交
294
	struct gf100_fifo *fifo = (void *)object->engine;
295
	struct gf100_fifo_chan *chan = (void *)object;
296
	struct nvkm_device *device = fifo->base.engine.subdev.device;
297
	u32 chid = chan->base.chid;
298

299
	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
300
		nvkm_mask(device, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
B
Ben Skeggs 已提交
301
		gf100_fifo_runlist_update(fifo);
302
	}
B
Ben Skeggs 已提交
303

B
Ben Skeggs 已提交
304
	gf100_fifo_intr_engine(fifo);
B
Ben Skeggs 已提交
305

306
	nvkm_wr32(device, 0x003000 + (chid * 8), 0x00000000);
307
	return nvkm_fifo_channel_fini(&chan->base, suspend);
308
}
309

310 311 312 313 314 315 316 317 318 319
static struct nvkm_ofuncs
gf100_fifo_ofuncs = {
	.ctor = gf100_fifo_chan_ctor,
	.dtor = _nvkm_fifo_channel_dtor,
	.init = gf100_fifo_chan_init,
	.fini = gf100_fifo_chan_fini,
	.map  = _nvkm_fifo_channel_map,
	.rd32 = _nvkm_fifo_channel_rd32,
	.wr32 = _nvkm_fifo_channel_wr32,
	.ntfy = _nvkm_fifo_channel_ntfy
320
};
321

322 323 324
static struct nvkm_oclass
gf100_fifo_sclass[] = {
	{ FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs },
325 326 327 328 329 330
	{}
};

/*******************************************************************************
 * FIFO context - instmem heap and vm setup
 ******************************************************************************/
331

332
static int
333 334 335
gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
			struct nvkm_oclass *oclass, void *data, u32 size,
			struct nvkm_object **pobject)
336
{
337
	struct nvkm_device *device = nv_engine(engine)->subdev.device;
338
	struct gf100_fifo_base *base;
339
	int ret;
340

341 342 343
	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
				       0x1000, NVOBJ_FLAG_ZERO_ALLOC |
				       NVOBJ_FLAG_HEAP, &base);
344 345 346
	*pobject = nv_object(base);
	if (ret)
		return ret;
347

348
	ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &base->pgd);
349 350 351
	if (ret)
		return ret;

352 353 354 355 356 357
	nvkm_kmap(&base->base.gpuobj);
	nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
	nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
	nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
	nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
	nvkm_done(&base->base.gpuobj);
358

359
	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
360 361
	if (ret)
		return ret;
362 363 364 365

	return 0;
}

366
static void
367
gf100_fifo_context_dtor(struct nvkm_object *object)
368
{
369 370
	struct gf100_fifo_base *base = (void *)object;
	nvkm_vm_ref(NULL, &base->vm, base->pgd);
371
	nvkm_gpuobj_del(&base->pgd);
372
	nvkm_fifo_context_destroy(&base->base);
373 374
}

375 376
static struct nvkm_oclass
gf100_fifo_cclass = {
377
	.handle = NV_ENGCTX(FIFO, 0xc0),
378 379 380 381 382 383 384
	.ofuncs = &(struct nvkm_ofuncs) {
		.ctor = gf100_fifo_context_ctor,
		.dtor = gf100_fifo_context_dtor,
		.init = _nvkm_fifo_context_init,
		.fini = _nvkm_fifo_context_fini,
		.rd32 = _nvkm_fifo_context_rd32,
		.wr32 = _nvkm_fifo_context_wr32,
385 386 387 388 389 390
	},
};

/*******************************************************************************
 * PFIFO engine
 ******************************************************************************/
391

392
static inline int
B
Ben Skeggs 已提交
393
gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
394 395
{
	switch (engn) {
396 397 398 399 400 401
	case NVDEV_ENGINE_GR    : engn = 0; break;
	case NVDEV_ENGINE_MSVLD : engn = 1; break;
	case NVDEV_ENGINE_MSPPP : engn = 2; break;
	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
	case NVDEV_ENGINE_CE0   : engn = 4; break;
	case NVDEV_ENGINE_CE1   : engn = 5; break;
402 403 404 405 406 407 408
	default:
		return -1;
	}

	return engn;
}

409
static inline struct nvkm_engine *
B
Ben Skeggs 已提交
410
gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
411 412 413
{
	switch (engn) {
	case 0: engn = NVDEV_ENGINE_GR; break;
414
	case 1: engn = NVDEV_ENGINE_MSVLD; break;
415
	case 2: engn = NVDEV_ENGINE_MSPPP; break;
416
	case 3: engn = NVDEV_ENGINE_MSPDEC; break;
417 418
	case 4: engn = NVDEV_ENGINE_CE0; break;
	case 5: engn = NVDEV_ENGINE_CE1; break;
419 420 421 422
	default:
		return NULL;
	}

B
Ben Skeggs 已提交
423
	return nvkm_engine(fifo, engn);
424 425 426
}

static void
427
gf100_fifo_recover_work(struct work_struct *work)
428
{
B
Ben Skeggs 已提交
429
	struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
430
	struct nvkm_device *device = fifo->base.engine.subdev.device;
431
	struct nvkm_engine *engine;
432 433 434 435
	unsigned long flags;
	u32 engn, engm = 0;
	u64 mask, todo;

B
Ben Skeggs 已提交
436 437 438 439
	spin_lock_irqsave(&fifo->base.lock, flags);
	mask = fifo->mask;
	fifo->mask = 0ULL;
	spin_unlock_irqrestore(&fifo->base.lock, flags);
440 441

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
B
Ben Skeggs 已提交
442
		engm |= 1 << gf100_fifo_engidx(fifo, engn);
443
	nvkm_mask(device, 0x002630, engm, engm);
444 445

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
446 447 448
		if ((engine = nvkm_device_engine(device, engn))) {
			nvkm_subdev_fini(&engine->subdev, false);
			WARN_ON(nvkm_subdev_init(&engine->subdev));
449 450 451
		}
	}

B
Ben Skeggs 已提交
452
	gf100_fifo_runlist_update(fifo);
453 454
	nvkm_wr32(device, 0x00262c, engm);
	nvkm_mask(device, 0x002630, engm, 0x00000000);
455 456 457
}

static void
B
Ben Skeggs 已提交
458
gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
459
		   struct gf100_fifo_chan *chan)
460
{
461 462
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
463 464 465
	u32 chid = chan->base.chid;
	unsigned long flags;

466
	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
467
		   nvkm_subdev_name[engine->subdev.index], chid);
468

469
	nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
470 471
	chan->state = KILLED;

B
Ben Skeggs 已提交
472 473 474 475
	spin_lock_irqsave(&fifo->base.lock, flags);
	fifo->mask |= 1ULL << nv_engidx(engine);
	spin_unlock_irqrestore(&fifo->base.lock, flags);
	schedule_work(&fifo->fault);
476 477
}

478
static int
B
Ben Skeggs 已提交
479
gf100_fifo_swmthd(struct gf100_fifo *fifo, u32 chid, u32 mthd, u32 data)
480
{
481 482
	struct gf100_fifo_chan *chan = NULL;
	struct nvkm_handle *bind;
483 484 485
	unsigned long flags;
	int ret = -EINVAL;

B
Ben Skeggs 已提交
486 487 488
	spin_lock_irqsave(&fifo->base.lock, flags);
	if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
		chan = (void *)fifo->base.channel[chid];
489 490 491
	if (unlikely(!chan))
		goto out;

492
	bind = nvkm_namedb_get_class(nv_namedb(chan), NVIF_IOCTL_NEW_V0_SW_GF100);
493 494 495
	if (likely(bind)) {
		if (!mthd || !nv_call(bind->object, mthd, data))
			ret = 0;
496
		nvkm_namedb_put(bind);
497 498 499
	}

out:
B
Ben Skeggs 已提交
500
	spin_unlock_irqrestore(&fifo->base.lock, flags);
501 502 503
	return ret;
}

504 505
static const struct nvkm_enum
gf100_fifo_sched_reason[] = {
B
Ben Skeggs 已提交
506 507 508 509
	{ 0x0a, "CTXSW_TIMEOUT" },
	{}
};

510
static void
B
Ben Skeggs 已提交
511
gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
512
{
513
	struct nvkm_device *device = fifo->base.engine.subdev.device;
514 515
	struct nvkm_engine *engine;
	struct gf100_fifo_chan *chan;
516 517 518
	u32 engn;

	for (engn = 0; engn < 6; engn++) {
519
		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
520 521 522 523 524 525 526 527
		u32 busy = (stat & 0x80000000);
		u32 save = (stat & 0x00100000); /* maybe? */
		u32 unk0 = (stat & 0x00040000);
		u32 unk1 = (stat & 0x00001000);
		u32 chid = (stat & 0x0000007f);
		(void)save;

		if (busy && unk0 && unk1) {
B
Ben Skeggs 已提交
528
			if (!(chan = (void *)fifo->base.channel[chid]))
529
				continue;
B
Ben Skeggs 已提交
530
			if (!(engine = gf100_fifo_engine(fifo, engn)))
531
				continue;
B
Ben Skeggs 已提交
532
			gf100_fifo_recover(fifo, engine, chan);
533 534 535 536
		}
	}
}

B
Ben Skeggs 已提交
537
static void
B
Ben Skeggs 已提交
538
gf100_fifo_intr_sched(struct gf100_fifo *fifo)
B
Ben Skeggs 已提交
539
{
540 541
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
542
	u32 intr = nvkm_rd32(device, 0x00254c);
B
Ben Skeggs 已提交
543
	u32 code = intr & 0x000000ff;
544
	const struct nvkm_enum *en;
B
Ben Skeggs 已提交
545

546
	en = nvkm_enum_find(gf100_fifo_sched_reason, code);
B
Ben Skeggs 已提交
547

548
	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
549 550 551

	switch (code) {
	case 0x0a:
B
Ben Skeggs 已提交
552
		gf100_fifo_intr_sched_ctxsw(fifo);
553 554 555 556
		break;
	default:
		break;
	}
B
Ben Skeggs 已提交
557 558
}

559 560
static const struct nvkm_enum
gf100_fifo_fault_engine[] = {
561
	{ 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
562 563 564
	{ 0x03, "PEEPHOLE", NULL, NVDEV_ENGINE_IFB },
	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
565
	{ 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
566
	{ 0x10, "PMSVLD", NULL, NVDEV_ENGINE_MSVLD },
567
	{ 0x11, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
B
Ben Skeggs 已提交
568
	{ 0x13, "PCOUNTER" },
569
	{ 0x14, "PMSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
570 571
	{ 0x15, "PCE0", NULL, NVDEV_ENGINE_CE0 },
	{ 0x16, "PCE1", NULL, NVDEV_ENGINE_CE1 },
B
Ben Skeggs 已提交
572
	{ 0x17, "PDAEMON" },
573 574 575
	{}
};

576 577
static const struct nvkm_enum
gf100_fifo_fault_reason[] = {
B
Ben Skeggs 已提交
578 579 580 581 582 583 584 585 586
	{ 0x00, "PT_NOT_PRESENT" },
	{ 0x01, "PT_TOO_SHORT" },
	{ 0x02, "PAGE_NOT_PRESENT" },
	{ 0x03, "VM_LIMIT_EXCEEDED" },
	{ 0x04, "NO_CHANNEL" },
	{ 0x05, "PAGE_SYSTEM_ONLY" },
	{ 0x06, "PAGE_READ_ONLY" },
	{ 0x0a, "COMPRESSED_SYSRAM" },
	{ 0x0c, "INVALID_STORAGE_TYPE" },
587 588 589
	{}
};

590 591
static const struct nvkm_enum
gf100_fifo_fault_hubclient[] = {
592 593 594 595 596 597 598 599
	{ 0x01, "PCOPY0" },
	{ 0x02, "PCOPY1" },
	{ 0x04, "DISPATCH" },
	{ 0x05, "CTXCTL" },
	{ 0x06, "PFIFO" },
	{ 0x07, "BAR_READ" },
	{ 0x08, "BAR_WRITE" },
	{ 0x0b, "PVP" },
600
	{ 0x0c, "PMSPPP" },
601
	{ 0x0d, "PMSVLD" },
602 603 604 605 606 607 608
	{ 0x11, "PCOUNTER" },
	{ 0x12, "PDAEMON" },
	{ 0x14, "CCACHE" },
	{ 0x15, "CCACHE_POST" },
	{}
};

609 610
static const struct nvkm_enum
gf100_fifo_fault_gpcclient[] = {
611 612 613 614 615 616 617
	{ 0x01, "TEX" },
	{ 0x0c, "ESETUP" },
	{ 0x0e, "CTXCTL" },
	{ 0x0f, "PROP" },
	{}
};

618
static void
B
Ben Skeggs 已提交
619
gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
620
{
621 622
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
623 624 625 626
	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
627
	u32 gpc    = (stat & 0x1f000000) >> 24;
628
	u32 client = (stat & 0x00001f00) >> 8;
629 630 631
	u32 write  = (stat & 0x00000080);
	u32 hub    = (stat & 0x00000040);
	u32 reason = (stat & 0x0000000f);
632 633 634
	struct nvkm_object *engctx = NULL, *object;
	struct nvkm_engine *engine = NULL;
	const struct nvkm_enum *er, *eu, *ec;
635
	char gpcid[8] = "";
636

637 638
	er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
	eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
639 640 641 642 643 644 645
	if (hub) {
		ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
	} else {
		ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
	}

646
	if (eu) {
647 648
		switch (eu->data2) {
		case NVDEV_SUBDEV_BAR:
649
			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
650 651
			break;
		case NVDEV_SUBDEV_INSTMEM:
652
			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
653 654
			break;
		case NVDEV_ENGINE_IFB:
655
			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
656 657
			break;
		default:
B
Ben Skeggs 已提交
658
			engine = nvkm_engine(fifo, eu->data2);
659
			if (engine)
660
				engctx = nvkm_engctx_get(engine, inst);
661
			break;
662
		}
663
	}
664

665 666 667 668 669 670 671
	nvkm_error(subdev,
		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
		   "reason %02x [%s] on channel %d [%010llx %s]\n",
		   write ? "write" : "read", (u64)vahi << 32 | valo,
		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
		   reason, er ? er->name : "", -1, (u64)inst << 12,
		   nvkm_client_name(engctx));
672

673 674 675
	object = engctx;
	while (object) {
		switch (nv_mclass(object)) {
676
		case FERMI_CHANNEL_GPFIFO:
B
Ben Skeggs 已提交
677
			gf100_fifo_recover(fifo, engine, (void *)object);
678 679 680 681 682
			break;
		}
		object = object->parent;
	}

683
	nvkm_engctx_put(engctx);
684 685
}

686 687
static const struct nvkm_bitfield
gf100_fifo_pbdma_intr[] = {
688 689 690 691 692
/*	{ 0x00008000, "" }	seen with null ib push */
	{ 0x00200000, "ILLEGAL_MTHD" },
	{ 0x00800000, "EMPTY_SUBC" },
	{}
};
693

694
static void
B
Ben Skeggs 已提交
695
gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
696
{
697 698
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
699 700 701 702
	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
703
	u32 subc = (addr & 0x00070000) >> 16;
704
	u32 mthd = (addr & 0x00003ffc);
705 706
	u32 show= stat;
	char msg[128];
707

708
	if (stat & 0x00800000) {
B
Ben Skeggs 已提交
709
		if (!gf100_fifo_swmthd(fifo, chid, mthd, data))
710 711 712
			show &= ~0x00800000;
	}

713
	if (show) {
714 715 716 717 718 719
		nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d "
				   "mthd %04x data %08x\n",
			   unit, show, msg, chid,
			   nvkm_client_name_for_fifo_chid(&fifo->base, chid),
			   subc, mthd, data);
720
	}
721

722 723
	nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
724 725
}

B
Ben Skeggs 已提交
726
static void
B
Ben Skeggs 已提交
727
gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
B
Ben Skeggs 已提交
728
{
729 730
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
731
	u32 intr = nvkm_rd32(device, 0x002a00);
B
Ben Skeggs 已提交
732 733

	if (intr & 0x10000000) {
B
Ben Skeggs 已提交
734
		wake_up(&fifo->runlist.wait);
735
		nvkm_wr32(device, 0x002a00, 0x10000000);
B
Ben Skeggs 已提交
736 737 738 739
		intr &= ~0x10000000;
	}

	if (intr) {
740
		nvkm_error(subdev, "RUNLIST %08x\n", intr);
741
		nvkm_wr32(device, 0x002a00, intr);
B
Ben Skeggs 已提交
742 743 744
	}
}

B
Ben Skeggs 已提交
745
static void
B
Ben Skeggs 已提交
746
gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
B
Ben Skeggs 已提交
747
{
748 749
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
750 751
	u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
	u32 inte = nvkm_rd32(device, 0x002628);
B
Ben Skeggs 已提交
752 753
	u32 unkn;

754
	nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
755

B
Ben Skeggs 已提交
756 757 758
	for (unkn = 0; unkn < 8; unkn++) {
		u32 ints = (intr >> (unkn * 0x04)) & inte;
		if (ints & 0x1) {
B
Ben Skeggs 已提交
759
			nvkm_fifo_uevent(&fifo->base);
B
Ben Skeggs 已提交
760 761 762
			ints &= ~1;
		}
		if (ints) {
763 764
			nvkm_error(subdev, "ENGINE %d %d %01x",
				   engn, unkn, ints);
765
			nvkm_mask(device, 0x002628, ints, 0);
B
Ben Skeggs 已提交
766 767 768 769 770
		}
	}
}

static void
B
Ben Skeggs 已提交
771
gf100_fifo_intr_engine(struct gf100_fifo *fifo)
B
Ben Skeggs 已提交
772
{
773 774
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x0025a4);
B
Ben Skeggs 已提交
775 776
	while (mask) {
		u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
777
		gf100_fifo_intr_engine_unit(fifo, unit);
B
Ben Skeggs 已提交
778 779 780 781
		mask &= ~(1 << unit);
	}
}

782
static void
783
gf100_fifo_intr(struct nvkm_subdev *subdev)
784
{
B
Ben Skeggs 已提交
785
	struct gf100_fifo *fifo = (void *)subdev;
786 787 788
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x002140);
	u32 stat = nvkm_rd32(device, 0x002100) & mask;
789

790
	if (stat & 0x00000001) {
791
		u32 intr = nvkm_rd32(device, 0x00252c);
792
		nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
793
		nvkm_wr32(device, 0x002100, 0x00000001);
794 795 796
		stat &= ~0x00000001;
	}

797
	if (stat & 0x00000100) {
B
Ben Skeggs 已提交
798
		gf100_fifo_intr_sched(fifo);
799
		nvkm_wr32(device, 0x002100, 0x00000100);
800 801 802
		stat &= ~0x00000100;
	}

803
	if (stat & 0x00010000) {
804
		u32 intr = nvkm_rd32(device, 0x00256c);
805
		nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
806
		nvkm_wr32(device, 0x002100, 0x00010000);
807 808 809 810
		stat &= ~0x00010000;
	}

	if (stat & 0x01000000) {
811
		u32 intr = nvkm_rd32(device, 0x00258c);
812
		nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
813
		nvkm_wr32(device, 0x002100, 0x01000000);
814 815 816
		stat &= ~0x01000000;
	}

817
	if (stat & 0x10000000) {
818
		u32 mask = nvkm_rd32(device, 0x00259c);
819 820
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
821
			gf100_fifo_intr_fault(fifo, unit);
822
			nvkm_wr32(device, 0x00259c, (1 << unit));
823
			mask &= ~(1 << unit);
824 825 826 827 828
		}
		stat &= ~0x10000000;
	}

	if (stat & 0x20000000) {
829
		u32 mask = nvkm_rd32(device, 0x0025a0);
830 831
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
832
			gf100_fifo_intr_pbdma(fifo, unit);
833
			nvkm_wr32(device, 0x0025a0, (1 << unit));
834
			mask &= ~(1 << unit);
835 836 837 838
		}
		stat &= ~0x20000000;
	}

839
	if (stat & 0x40000000) {
B
Ben Skeggs 已提交
840
		gf100_fifo_intr_runlist(fifo);
841 842 843
		stat &= ~0x40000000;
	}

844
	if (stat & 0x80000000) {
B
Ben Skeggs 已提交
845
		gf100_fifo_intr_engine(fifo);
846 847 848
		stat &= ~0x80000000;
	}

849
	if (stat) {
850
		nvkm_error(subdev, "INTR %08x\n", stat);
851 852
		nvkm_mask(device, 0x002140, stat, 0x00000000);
		nvkm_wr32(device, 0x002100, stat);
853 854
	}
}
855

856
static void
857
gf100_fifo_uevent_init(struct nvkm_event *event, int type, int index)
858
{
859
	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
860 861
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
862 863 864
}

static void
865
gf100_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
866
{
867
	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
868 869
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
870 871
}

872
static const struct nvkm_event_func
873 874 875 876
gf100_fifo_uevent_func = {
	.ctor = nvkm_fifo_uevent_ctor,
	.init = gf100_fifo_uevent_init,
	.fini = gf100_fifo_uevent_fini,
877 878
};

879
static int
880 881 882
gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		struct nvkm_oclass *oclass, void *data, u32 size,
		struct nvkm_object **pobject)
883
{
884 885
	struct nvkm_device *device = (void *)parent;
	struct nvkm_bar *bar = device->bar;
B
Ben Skeggs 已提交
886
	struct gf100_fifo *fifo;
887 888
	int ret;

B
Ben Skeggs 已提交
889 890
	ret = nvkm_fifo_create(parent, engine, oclass, 0, 127, &fifo);
	*pobject = nv_object(fifo);
891 892 893
	if (ret)
		return ret;

B
Ben Skeggs 已提交
894
	INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
895

896 897
	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
			      false, &fifo->runlist.mem[0]);
898 899 900
	if (ret)
		return ret;

901 902
	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
			      false, &fifo->runlist.mem[1]);
903 904 905
	if (ret)
		return ret;

B
Ben Skeggs 已提交
906
	init_waitqueue_head(&fifo->runlist.wait);
B
Ben Skeggs 已提交
907

908 909
	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
			      0x1000, false, &fifo->user.mem);
910 911 912
	if (ret)
		return ret;

913
	ret = bar->umap(bar, 128 * 0x1000, 12, &fifo->user.bar);
914 915 916
	if (ret)
		return ret;

917 918
	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);

B
Ben Skeggs 已提交
919
	ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &fifo->base.uevent);
920 921
	if (ret)
		return ret;
922

B
Ben Skeggs 已提交
923 924 925 926
	nv_subdev(fifo)->unit = 0x00000100;
	nv_subdev(fifo)->intr = gf100_fifo_intr;
	nv_engine(fifo)->cclass = &gf100_fifo_cclass;
	nv_engine(fifo)->sclass = gf100_fifo_sclass;
927 928 929
	return 0;
}

930
static void
931
gf100_fifo_dtor(struct nvkm_object *object)
932
{
B
Ben Skeggs 已提交
933
	struct gf100_fifo *fifo = (void *)object;
934

935 936 937 938
	nvkm_vm_put(&fifo->user.bar);
	nvkm_memory_del(&fifo->user.mem);
	nvkm_memory_del(&fifo->runlist.mem[0]);
	nvkm_memory_del(&fifo->runlist.mem[1]);
939

B
Ben Skeggs 已提交
940
	nvkm_fifo_destroy(&fifo->base);
941 942
}

943
static int
944
gf100_fifo_init(struct nvkm_object *object)
945
{
B
Ben Skeggs 已提交
946
	struct gf100_fifo *fifo = (void *)object;
947 948
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
949
	int ret, i;
950

B
Ben Skeggs 已提交
951
	ret = nvkm_fifo_init(&fifo->base);
952 953
	if (ret)
		return ret;
954

955 956
	nvkm_wr32(device, 0x000204, 0xffffffff);
	nvkm_wr32(device, 0x002204, 0xffffffff);
957

958
	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
959
	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
960

961
	/* assign engines to PBDMAs */
B
Ben Skeggs 已提交
962
	if (fifo->spoon_nr >= 3) {
963 964 965 966 967 968
		nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
		nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
		nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
		nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
		nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
		nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
969
	}
970

971
	/* PBDMA[n] */
B
Ben Skeggs 已提交
972
	for (i = 0; i < fifo->spoon_nr; i++) {
973 974 975
		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
976
	}
977

978 979
	nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
980

981 982 983
	nvkm_wr32(device, 0x002100, 0xffffffff);
	nvkm_wr32(device, 0x002140, 0x7fffffff);
	nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
984
	return 0;
985
}
986

987 988
struct nvkm_oclass *
gf100_fifo_oclass = &(struct nvkm_oclass) {
989
	.handle = NV_ENGINE(FIFO, 0xc0),
990 991 992 993 994
	.ofuncs = &(struct nvkm_ofuncs) {
		.ctor = gf100_fifo_ctor,
		.dtor = gf100_fifo_dtor,
		.init = gf100_fifo_init,
		.fini = _nvkm_fifo_fini,
995 996
	},
};