gf100.c 17.3 KB
Newer Older
1
/*
2
 * Copyright 2012 Red Hat Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */
24 25
#include "gf100.h"
#include "changf100.h"
26

27 28
#include <core/client.h>
#include <core/enum.h>
29
#include <core/gpuobj.h>
30
#include <subdev/bar.h>
31
#include <engine/sw.h>
32

33
#include <nvif/class.h>
34

35
static void
36
gf100_fifo_uevent_init(struct nvkm_fifo *fifo)
37 38 39 40
{
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
}
41

42
static void
43
gf100_fifo_uevent_fini(struct nvkm_fifo *fifo)
44 45 46 47
{
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
}
48

49
void
B
Ben Skeggs 已提交
50
gf100_fifo_runlist_update(struct gf100_fifo *fifo)
51
{
52
	struct gf100_fifo_chan *chan;
53 54
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
55
	struct nvkm_memory *cur;
56
	int nr = 0;
57

58
	mutex_lock(&subdev->mutex);
B
Ben Skeggs 已提交
59 60
	cur = fifo->runlist.mem[fifo->runlist.active];
	fifo->runlist.active = !fifo->runlist.active;
61

62
	nvkm_kmap(cur);
63 64 65 66
	list_for_each_entry(chan, &fifo->chan, head) {
		nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
		nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
		nr++;
67
	}
68
	nvkm_done(cur);
69

70
	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
71
	nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
72

B
Ben Skeggs 已提交
73
	if (wait_event_timeout(fifo->runlist.wait,
74
			       !(nvkm_rd32(device, 0x00227c) & 0x00100000),
75
			       msecs_to_jiffies(2000)) == 0)
76
		nvkm_error(subdev, "runlist update timeout\n");
77
	mutex_unlock(&subdev->mutex);
78
}
79

80
static inline int
B
Ben Skeggs 已提交
81
gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
82 83
{
	switch (engn) {
84 85 86 87 88 89
	case NVKM_ENGINE_GR    : engn = 0; break;
	case NVKM_ENGINE_MSVLD : engn = 1; break;
	case NVKM_ENGINE_MSPPP : engn = 2; break;
	case NVKM_ENGINE_MSPDEC: engn = 3; break;
	case NVKM_ENGINE_CE0   : engn = 4; break;
	case NVKM_ENGINE_CE1   : engn = 5; break;
90 91 92 93 94 95 96
	default:
		return -1;
	}

	return engn;
}

97
static inline struct nvkm_engine *
B
Ben Skeggs 已提交
98
gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
99
{
100 101
	struct nvkm_device *device = fifo->base.engine.subdev.device;

102
	switch (engn) {
103 104 105 106 107 108
	case 0: engn = NVKM_ENGINE_GR; break;
	case 1: engn = NVKM_ENGINE_MSVLD; break;
	case 2: engn = NVKM_ENGINE_MSPPP; break;
	case 3: engn = NVKM_ENGINE_MSPDEC; break;
	case 4: engn = NVKM_ENGINE_CE0; break;
	case 5: engn = NVKM_ENGINE_CE1; break;
109 110 111 112
	default:
		return NULL;
	}

113
	return nvkm_device_engine(device, engn);
114 115 116
}

static void
117
gf100_fifo_recover_work(struct work_struct *work)
118
{
B
Ben Skeggs 已提交
119
	struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
120
	struct nvkm_device *device = fifo->base.engine.subdev.device;
121
	struct nvkm_engine *engine;
122 123 124 125
	unsigned long flags;
	u32 engn, engm = 0;
	u64 mask, todo;

B
Ben Skeggs 已提交
126 127 128 129
	spin_lock_irqsave(&fifo->base.lock, flags);
	mask = fifo->mask;
	fifo->mask = 0ULL;
	spin_unlock_irqrestore(&fifo->base.lock, flags);
130 131

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
B
Ben Skeggs 已提交
132
		engm |= 1 << gf100_fifo_engidx(fifo, engn);
133
	nvkm_mask(device, 0x002630, engm, engm);
134 135

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
136 137 138
		if ((engine = nvkm_device_engine(device, engn))) {
			nvkm_subdev_fini(&engine->subdev, false);
			WARN_ON(nvkm_subdev_init(&engine->subdev));
139 140 141
		}
	}

B
Ben Skeggs 已提交
142
	gf100_fifo_runlist_update(fifo);
143 144
	nvkm_wr32(device, 0x00262c, engm);
	nvkm_mask(device, 0x002630, engm, 0x00000000);
145 146 147
}

static void
B
Ben Skeggs 已提交
148
gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
149
		   struct gf100_fifo_chan *chan)
150
{
151 152
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
153 154
	u32 chid = chan->base.chid;

155
	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
156
		   nvkm_subdev_name[engine->subdev.index], chid);
157
	assert_spin_locked(&fifo->base.lock);
158

159
	nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
160 161
	list_del_init(&chan->head);
	chan->killed = true;
162

163
	fifo->mask |= 1ULL << engine->subdev.index;
B
Ben Skeggs 已提交
164
	schedule_work(&fifo->fault);
165 166
}

167 168
static const struct nvkm_enum
gf100_fifo_sched_reason[] = {
B
Ben Skeggs 已提交
169 170 171 172
	{ 0x0a, "CTXSW_TIMEOUT" },
	{}
};

173
static void
B
Ben Skeggs 已提交
174
gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
175
{
176
	struct nvkm_device *device = fifo->base.engine.subdev.device;
177 178
	struct nvkm_engine *engine;
	struct gf100_fifo_chan *chan;
179
	unsigned long flags;
180 181
	u32 engn;

182
	spin_lock_irqsave(&fifo->base.lock, flags);
183
	for (engn = 0; engn < 6; engn++) {
184
		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
185 186 187 188 189 190 191 192
		u32 busy = (stat & 0x80000000);
		u32 save = (stat & 0x00100000); /* maybe? */
		u32 unk0 = (stat & 0x00040000);
		u32 unk1 = (stat & 0x00001000);
		u32 chid = (stat & 0x0000007f);
		(void)save;

		if (busy && unk0 && unk1) {
193 194 195 196 197 198 199 200 201
			list_for_each_entry(chan, &fifo->chan, head) {
				if (chan->base.chid == chid) {
					engine = gf100_fifo_engine(fifo, engn);
					if (!engine)
						break;
					gf100_fifo_recover(fifo, engine, chan);
					break;
				}
			}
202 203
		}
	}
204
	spin_unlock_irqrestore(&fifo->base.lock, flags);
205 206
}

B
Ben Skeggs 已提交
207
static void
B
Ben Skeggs 已提交
208
gf100_fifo_intr_sched(struct gf100_fifo *fifo)
B
Ben Skeggs 已提交
209
{
210 211
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
212
	u32 intr = nvkm_rd32(device, 0x00254c);
B
Ben Skeggs 已提交
213
	u32 code = intr & 0x000000ff;
214
	const struct nvkm_enum *en;
B
Ben Skeggs 已提交
215

216
	en = nvkm_enum_find(gf100_fifo_sched_reason, code);
B
Ben Skeggs 已提交
217

218
	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
219 220 221

	switch (code) {
	case 0x0a:
B
Ben Skeggs 已提交
222
		gf100_fifo_intr_sched_ctxsw(fifo);
223 224 225 226
		break;
	default:
		break;
	}
B
Ben Skeggs 已提交
227 228
}

229 230
static const struct nvkm_enum
gf100_fifo_fault_engine[] = {
231 232 233 234 235 236 237
	{ 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
	{ 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
	{ 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
	{ 0x07, "PFIFO", NULL, NVKM_ENGINE_FIFO },
	{ 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
	{ 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
B
Ben Skeggs 已提交
238
	{ 0x13, "PCOUNTER" },
239 240 241
	{ 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
	{ 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 },
	{ 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 },
B
Ben Skeggs 已提交
242
	{ 0x17, "PDAEMON" },
243 244 245
	{}
};

246 247
static const struct nvkm_enum
gf100_fifo_fault_reason[] = {
B
Ben Skeggs 已提交
248 249 250 251 252 253 254 255 256
	{ 0x00, "PT_NOT_PRESENT" },
	{ 0x01, "PT_TOO_SHORT" },
	{ 0x02, "PAGE_NOT_PRESENT" },
	{ 0x03, "VM_LIMIT_EXCEEDED" },
	{ 0x04, "NO_CHANNEL" },
	{ 0x05, "PAGE_SYSTEM_ONLY" },
	{ 0x06, "PAGE_READ_ONLY" },
	{ 0x0a, "COMPRESSED_SYSRAM" },
	{ 0x0c, "INVALID_STORAGE_TYPE" },
257 258 259
	{}
};

260 261
static const struct nvkm_enum
gf100_fifo_fault_hubclient[] = {
262 263 264 265 266 267 268 269
	{ 0x01, "PCOPY0" },
	{ 0x02, "PCOPY1" },
	{ 0x04, "DISPATCH" },
	{ 0x05, "CTXCTL" },
	{ 0x06, "PFIFO" },
	{ 0x07, "BAR_READ" },
	{ 0x08, "BAR_WRITE" },
	{ 0x0b, "PVP" },
270
	{ 0x0c, "PMSPPP" },
271
	{ 0x0d, "PMSVLD" },
272 273 274 275 276 277 278
	{ 0x11, "PCOUNTER" },
	{ 0x12, "PDAEMON" },
	{ 0x14, "CCACHE" },
	{ 0x15, "CCACHE_POST" },
	{}
};

279 280
static const struct nvkm_enum
gf100_fifo_fault_gpcclient[] = {
281 282 283 284 285 286 287
	{ 0x01, "TEX" },
	{ 0x0c, "ESETUP" },
	{ 0x0e, "CTXCTL" },
	{ 0x0f, "PROP" },
	{}
};

288
static void
B
Ben Skeggs 已提交
289
gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
290
{
291 292
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
293 294 295 296
	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
297
	u32 gpc    = (stat & 0x1f000000) >> 24;
298
	u32 client = (stat & 0x00001f00) >> 8;
299 300 301
	u32 write  = (stat & 0x00000080);
	u32 hub    = (stat & 0x00000040);
	u32 reason = (stat & 0x0000000f);
302
	const struct nvkm_enum *er, *eu, *ec;
303 304 305
	struct nvkm_engine *engine = NULL;
	struct nvkm_fifo_chan *chan;
	unsigned long flags;
306
	char gpcid[8] = "";
307

308 309
	er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
	eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
310 311 312 313 314 315 316
	if (hub) {
		ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
	} else {
		ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
	}

317
	if (eu) {
318
		switch (eu->data2) {
319
		case NVKM_SUBDEV_BAR:
320
			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
321
			break;
322
		case NVKM_SUBDEV_INSTMEM:
323
			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
324
			break;
325
		case NVKM_ENGINE_IFB:
326
			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
327 328
			break;
		default:
329
			engine = nvkm_device_engine(device, eu->data2);
330
			break;
331
		}
332
	}
333

334 335
	chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);

336 337 338 339 340
	nvkm_error(subdev,
		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
		   "reason %02x [%s] on channel %d [%010llx %s]\n",
		   write ? "write" : "read", (u64)vahi << 32 | valo,
		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
341
		   reason, er ? er->name : "", chan ? chan->chid : -1,
342 343
		   (u64)inst << 12,
		   chan ? chan->object.client->name : "unknown");
344

345 346 347
	if (engine && chan)
		gf100_fifo_recover(fifo, engine, (void *)chan);
	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
348 349
}

350 351
static const struct nvkm_bitfield
gf100_fifo_pbdma_intr[] = {
352 353 354 355 356
/*	{ 0x00008000, "" }	seen with null ib push */
	{ 0x00200000, "ILLEGAL_MTHD" },
	{ 0x00800000, "EMPTY_SUBC" },
	{}
};
357

358
static void
B
Ben Skeggs 已提交
359
gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
360
{
361 362
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
363 364 365 366
	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
367
	u32 subc = (addr & 0x00070000) >> 16;
368
	u32 mthd = (addr & 0x00003ffc);
369 370
	struct nvkm_fifo_chan *chan;
	unsigned long flags;
371 372
	u32 show= stat;
	char msg[128];
373

374
	if (stat & 0x00800000) {
375 376 377 378
		if (device->sw) {
			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
				show &= ~0x00800000;
		}
379 380
	}

381
	if (show) {
382
		nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
383 384 385 386 387
		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
				   "subc %d mthd %04x data %08x\n",
			   unit, show, msg, chid, chan ? chan->inst->addr : 0,
			   chan ? chan->object.client->name : "unknown",
388
			   subc, mthd, data);
389
		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
390
	}
391

392 393
	nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
394 395
}

B
Ben Skeggs 已提交
396
static void
B
Ben Skeggs 已提交
397
gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
B
Ben Skeggs 已提交
398
{
399 400
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
401
	u32 intr = nvkm_rd32(device, 0x002a00);
B
Ben Skeggs 已提交
402 403

	if (intr & 0x10000000) {
B
Ben Skeggs 已提交
404
		wake_up(&fifo->runlist.wait);
405
		nvkm_wr32(device, 0x002a00, 0x10000000);
B
Ben Skeggs 已提交
406 407 408 409
		intr &= ~0x10000000;
	}

	if (intr) {
410
		nvkm_error(subdev, "RUNLIST %08x\n", intr);
411
		nvkm_wr32(device, 0x002a00, intr);
B
Ben Skeggs 已提交
412 413 414
	}
}

B
Ben Skeggs 已提交
415
static void
B
Ben Skeggs 已提交
416
gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
B
Ben Skeggs 已提交
417
{
418 419
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
420 421
	u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
	u32 inte = nvkm_rd32(device, 0x002628);
B
Ben Skeggs 已提交
422 423
	u32 unkn;

424
	nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
425

B
Ben Skeggs 已提交
426 427 428
	for (unkn = 0; unkn < 8; unkn++) {
		u32 ints = (intr >> (unkn * 0x04)) & inte;
		if (ints & 0x1) {
B
Ben Skeggs 已提交
429
			nvkm_fifo_uevent(&fifo->base);
B
Ben Skeggs 已提交
430 431 432
			ints &= ~1;
		}
		if (ints) {
433 434
			nvkm_error(subdev, "ENGINE %d %d %01x",
				   engn, unkn, ints);
435
			nvkm_mask(device, 0x002628, ints, 0);
B
Ben Skeggs 已提交
436 437 438 439
		}
	}
}

440
void
B
Ben Skeggs 已提交
441
gf100_fifo_intr_engine(struct gf100_fifo *fifo)
B
Ben Skeggs 已提交
442
{
443 444
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x0025a4);
B
Ben Skeggs 已提交
445 446
	while (mask) {
		u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
447
		gf100_fifo_intr_engine_unit(fifo, unit);
B
Ben Skeggs 已提交
448 449 450 451
		mask &= ~(1 << unit);
	}
}

452
static void
453
gf100_fifo_intr(struct nvkm_fifo *base)
454
{
455 456 457
	struct gf100_fifo *fifo = gf100_fifo(base);
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
458 459
	u32 mask = nvkm_rd32(device, 0x002140);
	u32 stat = nvkm_rd32(device, 0x002100) & mask;
460

461
	if (stat & 0x00000001) {
462
		u32 intr = nvkm_rd32(device, 0x00252c);
463
		nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
464
		nvkm_wr32(device, 0x002100, 0x00000001);
465 466 467
		stat &= ~0x00000001;
	}

468
	if (stat & 0x00000100) {
B
Ben Skeggs 已提交
469
		gf100_fifo_intr_sched(fifo);
470
		nvkm_wr32(device, 0x002100, 0x00000100);
471 472 473
		stat &= ~0x00000100;
	}

474
	if (stat & 0x00010000) {
475
		u32 intr = nvkm_rd32(device, 0x00256c);
476
		nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
477
		nvkm_wr32(device, 0x002100, 0x00010000);
478 479 480 481
		stat &= ~0x00010000;
	}

	if (stat & 0x01000000) {
482
		u32 intr = nvkm_rd32(device, 0x00258c);
483
		nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
484
		nvkm_wr32(device, 0x002100, 0x01000000);
485 486 487
		stat &= ~0x01000000;
	}

488
	if (stat & 0x10000000) {
489
		u32 mask = nvkm_rd32(device, 0x00259c);
490 491
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
492
			gf100_fifo_intr_fault(fifo, unit);
493
			nvkm_wr32(device, 0x00259c, (1 << unit));
494
			mask &= ~(1 << unit);
495 496 497 498 499
		}
		stat &= ~0x10000000;
	}

	if (stat & 0x20000000) {
500
		u32 mask = nvkm_rd32(device, 0x0025a0);
501 502
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
503
			gf100_fifo_intr_pbdma(fifo, unit);
504
			nvkm_wr32(device, 0x0025a0, (1 << unit));
505
			mask &= ~(1 << unit);
506 507 508 509
		}
		stat &= ~0x20000000;
	}

510
	if (stat & 0x40000000) {
B
Ben Skeggs 已提交
511
		gf100_fifo_intr_runlist(fifo);
512 513 514
		stat &= ~0x40000000;
	}

515
	if (stat & 0x80000000) {
B
Ben Skeggs 已提交
516
		gf100_fifo_intr_engine(fifo);
517 518 519
		stat &= ~0x80000000;
	}

520
	if (stat) {
521
		nvkm_error(subdev, "INTR %08x\n", stat);
522 523
		nvkm_mask(device, 0x002140, stat, 0x00000000);
		nvkm_wr32(device, 0x002100, stat);
524 525
	}
}
526

527
static int
528
gf100_fifo_oneinit(struct nvkm_fifo *base)
529
{
530 531 532
	struct gf100_fifo *fifo = gf100_fifo(base);
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	int ret;
533

534 535 536 537 538 539 540
	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
			      false, &fifo->runlist.mem[0]);
	if (ret)
		return ret;

	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
			      false, &fifo->runlist.mem[1]);
541 542 543
	if (ret)
		return ret;

544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
	init_waitqueue_head(&fifo->runlist.wait);

	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
			      0x1000, false, &fifo->user.mem);
	if (ret)
		return ret;

	ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
	if (ret)
		return ret;

	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
	return 0;
}

static void
gf100_fifo_fini(struct nvkm_fifo *base)
{
	struct gf100_fifo *fifo = gf100_fifo(base);
	flush_work(&fifo->fault);
}

static void
gf100_fifo_init(struct nvkm_fifo *base)
{
	struct gf100_fifo *fifo = gf100_fifo(base);
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
	int i;

574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
	nvkm_wr32(device, 0x000204, 0xffffffff);
	nvkm_wr32(device, 0x002204, 0xffffffff);

	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);

	/* assign engines to PBDMAs */
	if (fifo->spoon_nr >= 3) {
		nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
		nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
		nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
		nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
		nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
		nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
	}

	/* PBDMA[n] */
	for (i = 0; i < fifo->spoon_nr; i++) {
		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
	}

	nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);

	nvkm_wr32(device, 0x002100, 0xffffffff);
	nvkm_wr32(device, 0x002140, 0x7fffffff);
	nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
603 604
}

605 606
static void *
gf100_fifo_dtor(struct nvkm_fifo *base)
607
{
608
	struct gf100_fifo *fifo = gf100_fifo(base);
609 610 611 612
	nvkm_vm_put(&fifo->user.bar);
	nvkm_memory_del(&fifo->user.mem);
	nvkm_memory_del(&fifo->runlist.mem[0]);
	nvkm_memory_del(&fifo->runlist.mem[1]);
613
	return fifo;
614
}
615

616
static const struct nvkm_fifo_func
617 618 619 620 621 622 623 624
gf100_fifo = {
	.dtor = gf100_fifo_dtor,
	.oneinit = gf100_fifo_oneinit,
	.init = gf100_fifo_init,
	.fini = gf100_fifo_fini,
	.intr = gf100_fifo_intr,
	.uevent_init = gf100_fifo_uevent_init,
	.uevent_fini = gf100_fifo_uevent_fini,
625 626 627 628 629 630
	.chan = {
		&gf100_fifo_gpfifo_oclass,
		NULL
	},
};

631 632
int
gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
633
{
B
Ben Skeggs 已提交
634
	struct gf100_fifo *fifo;
635

636 637
	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
		return -ENOMEM;
638
	INIT_LIST_HEAD(&fifo->chan);
B
Ben Skeggs 已提交
639
	INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
640
	*pfifo = &fifo->base;
641

642
	return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
643
}