gf100.c 17.8 KB
Newer Older
1
/*
2
 * Copyright 2012 Red Hat Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */
24 25
#include "gf100.h"
#include "changf100.h"
26

27 28
#include <core/client.h>
#include <core/enum.h>
29
#include <core/gpuobj.h>
30
#include <subdev/bar.h>
31
#include <engine/sw.h>
32

33
#include <nvif/class.h>
34

35
static void
36
gf100_fifo_uevent_init(struct nvkm_fifo *fifo)
37 38 39 40
{
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
}
41

42
static void
43
gf100_fifo_uevent_fini(struct nvkm_fifo *fifo)
44 45 46 47
{
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
}
48

49
void
50
gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
51
{
52
	struct gf100_fifo_chan *chan;
53 54
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
55
	struct nvkm_memory *cur;
56
	int nr = 0;
57
	int target;
58

59
	mutex_lock(&subdev->mutex);
B
Ben Skeggs 已提交
60 61
	cur = fifo->runlist.mem[fifo->runlist.active];
	fifo->runlist.active = !fifo->runlist.active;
62

63
	nvkm_kmap(cur);
64 65 66 67
	list_for_each_entry(chan, &fifo->chan, head) {
		nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
		nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
		nr++;
68
	}
69
	nvkm_done(cur);
70

71 72 73 74
	target = (nvkm_memory_target(cur) == NVKM_MEM_TARGET_HOST) ? 0x3 : 0x0;

	nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) |
				    (target << 28));
75
	nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
76

B
Ben Skeggs 已提交
77
	if (wait_event_timeout(fifo->runlist.wait,
78
			       !(nvkm_rd32(device, 0x00227c) & 0x00100000),
79
			       msecs_to_jiffies(2000)) == 0)
80
		nvkm_error(subdev, "runlist update timeout\n");
81
	mutex_unlock(&subdev->mutex);
82
}
83

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
void
gf100_fifo_runlist_remove(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
{
	mutex_lock(&fifo->base.engine.subdev.mutex);
	list_del_init(&chan->head);
	mutex_unlock(&fifo->base.engine.subdev.mutex);
}

void
gf100_fifo_runlist_insert(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
{
	mutex_lock(&fifo->base.engine.subdev.mutex);
	list_add_tail(&chan->head, &fifo->chan);
	mutex_unlock(&fifo->base.engine.subdev.mutex);
}

100
static inline int
B
Ben Skeggs 已提交
101
gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
102 103
{
	switch (engn) {
104 105 106 107 108 109
	case NVKM_ENGINE_GR    : engn = 0; break;
	case NVKM_ENGINE_MSVLD : engn = 1; break;
	case NVKM_ENGINE_MSPPP : engn = 2; break;
	case NVKM_ENGINE_MSPDEC: engn = 3; break;
	case NVKM_ENGINE_CE0   : engn = 4; break;
	case NVKM_ENGINE_CE1   : engn = 5; break;
110 111 112 113 114 115 116
	default:
		return -1;
	}

	return engn;
}

117
static inline struct nvkm_engine *
B
Ben Skeggs 已提交
118
gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
119
{
120 121
	struct nvkm_device *device = fifo->base.engine.subdev.device;

122
	switch (engn) {
123 124 125 126 127 128
	case 0: engn = NVKM_ENGINE_GR; break;
	case 1: engn = NVKM_ENGINE_MSVLD; break;
	case 2: engn = NVKM_ENGINE_MSPPP; break;
	case 3: engn = NVKM_ENGINE_MSPDEC; break;
	case 4: engn = NVKM_ENGINE_CE0; break;
	case 5: engn = NVKM_ENGINE_CE1; break;
129 130 131 132
	default:
		return NULL;
	}

133
	return nvkm_device_engine(device, engn);
134 135 136
}

static void
137
gf100_fifo_recover_work(struct work_struct *work)
138
{
B
Ben Skeggs 已提交
139
	struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
140
	struct nvkm_device *device = fifo->base.engine.subdev.device;
141
	struct nvkm_engine *engine;
142 143 144 145
	unsigned long flags;
	u32 engn, engm = 0;
	u64 mask, todo;

B
Ben Skeggs 已提交
146 147 148 149
	spin_lock_irqsave(&fifo->base.lock, flags);
	mask = fifo->mask;
	fifo->mask = 0ULL;
	spin_unlock_irqrestore(&fifo->base.lock, flags);
150 151

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
B
Ben Skeggs 已提交
152
		engm |= 1 << gf100_fifo_engidx(fifo, engn);
153
	nvkm_mask(device, 0x002630, engm, engm);
154 155

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
156 157 158
		if ((engine = nvkm_device_engine(device, engn))) {
			nvkm_subdev_fini(&engine->subdev, false);
			WARN_ON(nvkm_subdev_init(&engine->subdev));
159 160 161
		}
	}

162
	gf100_fifo_runlist_commit(fifo);
163 164
	nvkm_wr32(device, 0x00262c, engm);
	nvkm_mask(device, 0x002630, engm, 0x00000000);
165 166 167
}

static void
B
Ben Skeggs 已提交
168
gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
169
		   struct gf100_fifo_chan *chan)
170
{
171 172
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
173 174
	u32 chid = chan->base.chid;

175
	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
176
		   nvkm_subdev_name[engine->subdev.index], chid);
177
	assert_spin_locked(&fifo->base.lock);
178

179
	nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
180 181
	list_del_init(&chan->head);
	chan->killed = true;
182

183
	fifo->mask |= 1ULL << engine->subdev.index;
B
Ben Skeggs 已提交
184
	schedule_work(&fifo->fault);
185 186
}

187 188
static const struct nvkm_enum
gf100_fifo_sched_reason[] = {
B
Ben Skeggs 已提交
189 190 191 192
	{ 0x0a, "CTXSW_TIMEOUT" },
	{}
};

193
static void
B
Ben Skeggs 已提交
194
gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
195
{
196
	struct nvkm_device *device = fifo->base.engine.subdev.device;
197 198
	struct nvkm_engine *engine;
	struct gf100_fifo_chan *chan;
199
	unsigned long flags;
200 201
	u32 engn;

202
	spin_lock_irqsave(&fifo->base.lock, flags);
203
	for (engn = 0; engn < 6; engn++) {
204
		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
205 206 207 208 209 210 211 212
		u32 busy = (stat & 0x80000000);
		u32 save = (stat & 0x00100000); /* maybe? */
		u32 unk0 = (stat & 0x00040000);
		u32 unk1 = (stat & 0x00001000);
		u32 chid = (stat & 0x0000007f);
		(void)save;

		if (busy && unk0 && unk1) {
213 214 215 216 217 218 219 220 221
			list_for_each_entry(chan, &fifo->chan, head) {
				if (chan->base.chid == chid) {
					engine = gf100_fifo_engine(fifo, engn);
					if (!engine)
						break;
					gf100_fifo_recover(fifo, engine, chan);
					break;
				}
			}
222 223
		}
	}
224
	spin_unlock_irqrestore(&fifo->base.lock, flags);
225 226
}

B
Ben Skeggs 已提交
227
static void
B
Ben Skeggs 已提交
228
gf100_fifo_intr_sched(struct gf100_fifo *fifo)
B
Ben Skeggs 已提交
229
{
230 231
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
232
	u32 intr = nvkm_rd32(device, 0x00254c);
B
Ben Skeggs 已提交
233
	u32 code = intr & 0x000000ff;
234
	const struct nvkm_enum *en;
B
Ben Skeggs 已提交
235

236
	en = nvkm_enum_find(gf100_fifo_sched_reason, code);
B
Ben Skeggs 已提交
237

238
	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
239 240 241

	switch (code) {
	case 0x0a:
B
Ben Skeggs 已提交
242
		gf100_fifo_intr_sched_ctxsw(fifo);
243 244 245 246
		break;
	default:
		break;
	}
B
Ben Skeggs 已提交
247 248
}

249 250
static const struct nvkm_enum
gf100_fifo_fault_engine[] = {
251 252 253 254 255 256 257
	{ 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
	{ 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
	{ 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
	{ 0x07, "PFIFO", NULL, NVKM_ENGINE_FIFO },
	{ 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
	{ 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
B
Ben Skeggs 已提交
258
	{ 0x13, "PCOUNTER" },
259 260 261
	{ 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
	{ 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 },
	{ 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 },
262
	{ 0x17, "PMU" },
263 264 265
	{}
};

266 267
static const struct nvkm_enum
gf100_fifo_fault_reason[] = {
B
Ben Skeggs 已提交
268 269 270 271 272 273 274 275 276
	{ 0x00, "PT_NOT_PRESENT" },
	{ 0x01, "PT_TOO_SHORT" },
	{ 0x02, "PAGE_NOT_PRESENT" },
	{ 0x03, "VM_LIMIT_EXCEEDED" },
	{ 0x04, "NO_CHANNEL" },
	{ 0x05, "PAGE_SYSTEM_ONLY" },
	{ 0x06, "PAGE_READ_ONLY" },
	{ 0x0a, "COMPRESSED_SYSRAM" },
	{ 0x0c, "INVALID_STORAGE_TYPE" },
277 278 279
	{}
};

280 281
static const struct nvkm_enum
gf100_fifo_fault_hubclient[] = {
282 283 284 285 286 287 288 289
	{ 0x01, "PCOPY0" },
	{ 0x02, "PCOPY1" },
	{ 0x04, "DISPATCH" },
	{ 0x05, "CTXCTL" },
	{ 0x06, "PFIFO" },
	{ 0x07, "BAR_READ" },
	{ 0x08, "BAR_WRITE" },
	{ 0x0b, "PVP" },
290
	{ 0x0c, "PMSPPP" },
291
	{ 0x0d, "PMSVLD" },
292
	{ 0x11, "PCOUNTER" },
293
	{ 0x12, "PMU" },
294 295 296 297 298
	{ 0x14, "CCACHE" },
	{ 0x15, "CCACHE_POST" },
	{}
};

299 300
static const struct nvkm_enum
gf100_fifo_fault_gpcclient[] = {
301 302 303 304 305 306 307
	{ 0x01, "TEX" },
	{ 0x0c, "ESETUP" },
	{ 0x0e, "CTXCTL" },
	{ 0x0f, "PROP" },
	{}
};

308
static void
B
Ben Skeggs 已提交
309
gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
310
{
311 312
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
313 314 315 316
	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
317
	u32 gpc    = (stat & 0x1f000000) >> 24;
318
	u32 client = (stat & 0x00001f00) >> 8;
319 320 321
	u32 write  = (stat & 0x00000080);
	u32 hub    = (stat & 0x00000040);
	u32 reason = (stat & 0x0000000f);
322
	const struct nvkm_enum *er, *eu, *ec;
323 324 325
	struct nvkm_engine *engine = NULL;
	struct nvkm_fifo_chan *chan;
	unsigned long flags;
326
	char gpcid[8] = "";
327

328 329
	er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
	eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
330 331 332 333 334 335 336
	if (hub) {
		ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
	} else {
		ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
	}

337
	if (eu) {
338
		switch (eu->data2) {
339
		case NVKM_SUBDEV_BAR:
340
			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
341
			break;
342
		case NVKM_SUBDEV_INSTMEM:
343
			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
344
			break;
345
		case NVKM_ENGINE_IFB:
346
			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
347 348
			break;
		default:
349
			engine = nvkm_device_engine(device, eu->data2);
350
			break;
351
		}
352
	}
353

354 355
	chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);

356 357 358 359 360
	nvkm_error(subdev,
		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
		   "reason %02x [%s] on channel %d [%010llx %s]\n",
		   write ? "write" : "read", (u64)vahi << 32 | valo,
		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
361
		   reason, er ? er->name : "", chan ? chan->chid : -1,
362 363
		   (u64)inst << 12,
		   chan ? chan->object.client->name : "unknown");
364

365 366 367
	if (engine && chan)
		gf100_fifo_recover(fifo, engine, (void *)chan);
	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
368 369
}

370 371
static const struct nvkm_bitfield
gf100_fifo_pbdma_intr[] = {
372 373 374 375 376
/*	{ 0x00008000, "" }	seen with null ib push */
	{ 0x00200000, "ILLEGAL_MTHD" },
	{ 0x00800000, "EMPTY_SUBC" },
	{}
};
377

378
static void
B
Ben Skeggs 已提交
379
gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
380
{
381 382
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
383 384 385 386
	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
387
	u32 subc = (addr & 0x00070000) >> 16;
388
	u32 mthd = (addr & 0x00003ffc);
389 390
	struct nvkm_fifo_chan *chan;
	unsigned long flags;
391 392
	u32 show= stat;
	char msg[128];
393

394
	if (stat & 0x00800000) {
395 396 397 398
		if (device->sw) {
			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
				show &= ~0x00800000;
		}
399 400
	}

401
	if (show) {
402
		nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
403 404 405 406 407
		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
				   "subc %d mthd %04x data %08x\n",
			   unit, show, msg, chid, chan ? chan->inst->addr : 0,
			   chan ? chan->object.client->name : "unknown",
408
			   subc, mthd, data);
409
		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
410
	}
411

412 413
	nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
414 415
}

B
Ben Skeggs 已提交
416
static void
B
Ben Skeggs 已提交
417
gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
B
Ben Skeggs 已提交
418
{
419 420
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
421
	u32 intr = nvkm_rd32(device, 0x002a00);
B
Ben Skeggs 已提交
422 423

	if (intr & 0x10000000) {
B
Ben Skeggs 已提交
424
		wake_up(&fifo->runlist.wait);
425
		nvkm_wr32(device, 0x002a00, 0x10000000);
B
Ben Skeggs 已提交
426 427 428 429
		intr &= ~0x10000000;
	}

	if (intr) {
430
		nvkm_error(subdev, "RUNLIST %08x\n", intr);
431
		nvkm_wr32(device, 0x002a00, intr);
B
Ben Skeggs 已提交
432 433 434
	}
}

B
Ben Skeggs 已提交
435
static void
B
Ben Skeggs 已提交
436
gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
B
Ben Skeggs 已提交
437
{
438 439
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
440 441
	u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
	u32 inte = nvkm_rd32(device, 0x002628);
B
Ben Skeggs 已提交
442 443
	u32 unkn;

444
	nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
445

B
Ben Skeggs 已提交
446 447 448
	for (unkn = 0; unkn < 8; unkn++) {
		u32 ints = (intr >> (unkn * 0x04)) & inte;
		if (ints & 0x1) {
B
Ben Skeggs 已提交
449
			nvkm_fifo_uevent(&fifo->base);
B
Ben Skeggs 已提交
450 451 452
			ints &= ~1;
		}
		if (ints) {
453 454
			nvkm_error(subdev, "ENGINE %d %d %01x",
				   engn, unkn, ints);
455
			nvkm_mask(device, 0x002628, ints, 0);
B
Ben Skeggs 已提交
456 457 458 459
		}
	}
}

460
void
B
Ben Skeggs 已提交
461
gf100_fifo_intr_engine(struct gf100_fifo *fifo)
B
Ben Skeggs 已提交
462
{
463 464
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x0025a4);
B
Ben Skeggs 已提交
465 466
	while (mask) {
		u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
467
		gf100_fifo_intr_engine_unit(fifo, unit);
B
Ben Skeggs 已提交
468 469 470 471
		mask &= ~(1 << unit);
	}
}

472
static void
473
gf100_fifo_intr(struct nvkm_fifo *base)
474
{
475 476 477
	struct gf100_fifo *fifo = gf100_fifo(base);
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
478 479
	u32 mask = nvkm_rd32(device, 0x002140);
	u32 stat = nvkm_rd32(device, 0x002100) & mask;
480

481
	if (stat & 0x00000001) {
482
		u32 intr = nvkm_rd32(device, 0x00252c);
483
		nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
484
		nvkm_wr32(device, 0x002100, 0x00000001);
485 486 487
		stat &= ~0x00000001;
	}

488
	if (stat & 0x00000100) {
B
Ben Skeggs 已提交
489
		gf100_fifo_intr_sched(fifo);
490
		nvkm_wr32(device, 0x002100, 0x00000100);
491 492 493
		stat &= ~0x00000100;
	}

494
	if (stat & 0x00010000) {
495
		u32 intr = nvkm_rd32(device, 0x00256c);
496
		nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
497
		nvkm_wr32(device, 0x002100, 0x00010000);
498 499 500 501
		stat &= ~0x00010000;
	}

	if (stat & 0x01000000) {
502
		u32 intr = nvkm_rd32(device, 0x00258c);
503
		nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
504
		nvkm_wr32(device, 0x002100, 0x01000000);
505 506 507
		stat &= ~0x01000000;
	}

508
	if (stat & 0x10000000) {
509
		u32 mask = nvkm_rd32(device, 0x00259c);
510 511
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
512
			gf100_fifo_intr_fault(fifo, unit);
513
			nvkm_wr32(device, 0x00259c, (1 << unit));
514
			mask &= ~(1 << unit);
515 516 517 518 519
		}
		stat &= ~0x10000000;
	}

	if (stat & 0x20000000) {
520
		u32 mask = nvkm_rd32(device, 0x0025a0);
521 522
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
523
			gf100_fifo_intr_pbdma(fifo, unit);
524
			nvkm_wr32(device, 0x0025a0, (1 << unit));
525
			mask &= ~(1 << unit);
526 527 528 529
		}
		stat &= ~0x20000000;
	}

530
	if (stat & 0x40000000) {
B
Ben Skeggs 已提交
531
		gf100_fifo_intr_runlist(fifo);
532 533 534
		stat &= ~0x40000000;
	}

535
	if (stat & 0x80000000) {
B
Ben Skeggs 已提交
536
		gf100_fifo_intr_engine(fifo);
537 538 539
		stat &= ~0x80000000;
	}

540
	if (stat) {
541
		nvkm_error(subdev, "INTR %08x\n", stat);
542 543
		nvkm_mask(device, 0x002140, stat, 0x00000000);
		nvkm_wr32(device, 0x002100, stat);
544 545
	}
}
546

547
static int
548
gf100_fifo_oneinit(struct nvkm_fifo *base)
549
{
550 551 552
	struct gf100_fifo *fifo = gf100_fifo(base);
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	int ret;
553

554 555 556 557 558 559 560
	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
			      false, &fifo->runlist.mem[0]);
	if (ret)
		return ret;

	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
			      false, &fifo->runlist.mem[1]);
561 562 563
	if (ret)
		return ret;

564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
	init_waitqueue_head(&fifo->runlist.wait);

	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
			      0x1000, false, &fifo->user.mem);
	if (ret)
		return ret;

	ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
	if (ret)
		return ret;

	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
	return 0;
}

static void
gf100_fifo_fini(struct nvkm_fifo *base)
{
	struct gf100_fifo *fifo = gf100_fifo(base);
	flush_work(&fifo->fault);
}

static void
gf100_fifo_init(struct nvkm_fifo *base)
{
	struct gf100_fifo *fifo = gf100_fifo(base);
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
	int i;

594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
	nvkm_wr32(device, 0x000204, 0xffffffff);
	nvkm_wr32(device, 0x002204, 0xffffffff);

	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);

	/* assign engines to PBDMAs */
	if (fifo->spoon_nr >= 3) {
		nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
		nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
		nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
		nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
		nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
		nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
	}

	/* PBDMA[n] */
	for (i = 0; i < fifo->spoon_nr; i++) {
		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
	}

	nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);

	nvkm_wr32(device, 0x002100, 0xffffffff);
	nvkm_wr32(device, 0x002140, 0x7fffffff);
	nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
623 624
}

625 626
static void *
gf100_fifo_dtor(struct nvkm_fifo *base)
627
{
628
	struct gf100_fifo *fifo = gf100_fifo(base);
629 630 631 632
	nvkm_vm_put(&fifo->user.bar);
	nvkm_memory_del(&fifo->user.mem);
	nvkm_memory_del(&fifo->runlist.mem[0]);
	nvkm_memory_del(&fifo->runlist.mem[1]);
633
	return fifo;
634
}
635

636
static const struct nvkm_fifo_func
637 638 639 640 641 642 643 644
gf100_fifo = {
	.dtor = gf100_fifo_dtor,
	.oneinit = gf100_fifo_oneinit,
	.init = gf100_fifo_init,
	.fini = gf100_fifo_fini,
	.intr = gf100_fifo_intr,
	.uevent_init = gf100_fifo_uevent_init,
	.uevent_fini = gf100_fifo_uevent_fini,
645 646 647 648 649 650
	.chan = {
		&gf100_fifo_gpfifo_oclass,
		NULL
	},
};

651 652
int
gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
653
{
B
Ben Skeggs 已提交
654
	struct gf100_fifo *fifo;
655

656 657
	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
		return -ENOMEM;
658
	INIT_LIST_HEAD(&fifo->chan);
B
Ben Skeggs 已提交
659
	INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
660
	*pfifo = &fifo->base;
661

662
	return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
663
}