gf100.c 17.3 KB
Newer Older
1
/*
2
 * Copyright 2012 Red Hat Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */
24 25
#include "gf100.h"
#include "changf100.h"
26

27 28
#include <core/client.h>
#include <core/enum.h>
29
#include <core/gpuobj.h>
30
#include <core/handle.h>
31
#include <subdev/bar.h>
32
#include <engine/sw.h>
33

34
#include <nvif/class.h>
35

36
static void
37
gf100_fifo_uevent_init(struct nvkm_fifo *fifo)
38 39 40 41
{
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
}
42

43
static void
44
gf100_fifo_uevent_fini(struct nvkm_fifo *fifo)
45 46 47 48
{
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
}
49

50
void
B
Ben Skeggs 已提交
51
gf100_fifo_runlist_update(struct gf100_fifo *fifo)
52
{
53
	struct gf100_fifo_chan *chan;
54 55
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
56
	struct nvkm_memory *cur;
57
	int nr = 0;
58

59
	mutex_lock(&subdev->mutex);
B
Ben Skeggs 已提交
60 61
	cur = fifo->runlist.mem[fifo->runlist.active];
	fifo->runlist.active = !fifo->runlist.active;
62

63
	nvkm_kmap(cur);
64 65 66 67
	list_for_each_entry(chan, &fifo->chan, head) {
		nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
		nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
		nr++;
68
	}
69
	nvkm_done(cur);
70

71
	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
72
	nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
73

B
Ben Skeggs 已提交
74
	if (wait_event_timeout(fifo->runlist.wait,
75
			       !(nvkm_rd32(device, 0x00227c) & 0x00100000),
76
			       msecs_to_jiffies(2000)) == 0)
77
		nvkm_error(subdev, "runlist update timeout\n");
78
	mutex_unlock(&subdev->mutex);
79
}
80

81
static inline int
B
Ben Skeggs 已提交
82
gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
83 84
{
	switch (engn) {
85 86 87 88 89 90
	case NVDEV_ENGINE_GR    : engn = 0; break;
	case NVDEV_ENGINE_MSVLD : engn = 1; break;
	case NVDEV_ENGINE_MSPPP : engn = 2; break;
	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
	case NVDEV_ENGINE_CE0   : engn = 4; break;
	case NVDEV_ENGINE_CE1   : engn = 5; break;
91 92 93 94 95 96 97
	default:
		return -1;
	}

	return engn;
}

98
static inline struct nvkm_engine *
B
Ben Skeggs 已提交
99
gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
100
{
101 102
	struct nvkm_device *device = fifo->base.engine.subdev.device;

103 104
	switch (engn) {
	case 0: engn = NVDEV_ENGINE_GR; break;
105
	case 1: engn = NVDEV_ENGINE_MSVLD; break;
106
	case 2: engn = NVDEV_ENGINE_MSPPP; break;
107
	case 3: engn = NVDEV_ENGINE_MSPDEC; break;
108 109
	case 4: engn = NVDEV_ENGINE_CE0; break;
	case 5: engn = NVDEV_ENGINE_CE1; break;
110 111 112 113
	default:
		return NULL;
	}

114
	return nvkm_device_engine(device, engn);
115 116 117
}

static void
118
gf100_fifo_recover_work(struct work_struct *work)
119
{
B
Ben Skeggs 已提交
120
	struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
121
	struct nvkm_device *device = fifo->base.engine.subdev.device;
122
	struct nvkm_engine *engine;
123 124 125 126
	unsigned long flags;
	u32 engn, engm = 0;
	u64 mask, todo;

B
Ben Skeggs 已提交
127 128 129 130
	spin_lock_irqsave(&fifo->base.lock, flags);
	mask = fifo->mask;
	fifo->mask = 0ULL;
	spin_unlock_irqrestore(&fifo->base.lock, flags);
131 132

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
B
Ben Skeggs 已提交
133
		engm |= 1 << gf100_fifo_engidx(fifo, engn);
134
	nvkm_mask(device, 0x002630, engm, engm);
135 136

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
137 138 139
		if ((engine = nvkm_device_engine(device, engn))) {
			nvkm_subdev_fini(&engine->subdev, false);
			WARN_ON(nvkm_subdev_init(&engine->subdev));
140 141 142
		}
	}

B
Ben Skeggs 已提交
143
	gf100_fifo_runlist_update(fifo);
144 145
	nvkm_wr32(device, 0x00262c, engm);
	nvkm_mask(device, 0x002630, engm, 0x00000000);
146 147 148
}

static void
B
Ben Skeggs 已提交
149
gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
150
		   struct gf100_fifo_chan *chan)
151
{
152 153
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
154 155
	u32 chid = chan->base.chid;

156
	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
157
		   nvkm_subdev_name[engine->subdev.index], chid);
158
	assert_spin_locked(&fifo->base.lock);
159

160
	nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
161 162
	list_del_init(&chan->head);
	chan->killed = true;
163

164
	fifo->mask |= 1ULL << engine->subdev.index;
B
Ben Skeggs 已提交
165
	schedule_work(&fifo->fault);
166 167
}

168 169
static const struct nvkm_enum
gf100_fifo_sched_reason[] = {
B
Ben Skeggs 已提交
170 171 172 173
	{ 0x0a, "CTXSW_TIMEOUT" },
	{}
};

174
static void
B
Ben Skeggs 已提交
175
gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
176
{
177
	struct nvkm_device *device = fifo->base.engine.subdev.device;
178 179
	struct nvkm_engine *engine;
	struct gf100_fifo_chan *chan;
180
	unsigned long flags;
181 182
	u32 engn;

183
	spin_lock_irqsave(&fifo->base.lock, flags);
184
	for (engn = 0; engn < 6; engn++) {
185
		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
186 187 188 189 190 191 192 193
		u32 busy = (stat & 0x80000000);
		u32 save = (stat & 0x00100000); /* maybe? */
		u32 unk0 = (stat & 0x00040000);
		u32 unk1 = (stat & 0x00001000);
		u32 chid = (stat & 0x0000007f);
		(void)save;

		if (busy && unk0 && unk1) {
194 195 196 197 198 199 200 201 202
			list_for_each_entry(chan, &fifo->chan, head) {
				if (chan->base.chid == chid) {
					engine = gf100_fifo_engine(fifo, engn);
					if (!engine)
						break;
					gf100_fifo_recover(fifo, engine, chan);
					break;
				}
			}
203 204
		}
	}
205
	spin_unlock_irqrestore(&fifo->base.lock, flags);
206 207
}

B
Ben Skeggs 已提交
208
static void
B
Ben Skeggs 已提交
209
gf100_fifo_intr_sched(struct gf100_fifo *fifo)
B
Ben Skeggs 已提交
210
{
211 212
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
213
	u32 intr = nvkm_rd32(device, 0x00254c);
B
Ben Skeggs 已提交
214
	u32 code = intr & 0x000000ff;
215
	const struct nvkm_enum *en;
B
Ben Skeggs 已提交
216

217
	en = nvkm_enum_find(gf100_fifo_sched_reason, code);
B
Ben Skeggs 已提交
218

219
	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
220 221 222

	switch (code) {
	case 0x0a:
B
Ben Skeggs 已提交
223
		gf100_fifo_intr_sched_ctxsw(fifo);
224 225 226 227
		break;
	default:
		break;
	}
B
Ben Skeggs 已提交
228 229
}

230 231
static const struct nvkm_enum
gf100_fifo_fault_engine[] = {
232
	{ 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
233 234 235
	{ 0x03, "PEEPHOLE", NULL, NVDEV_ENGINE_IFB },
	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
236
	{ 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
237
	{ 0x10, "PMSVLD", NULL, NVDEV_ENGINE_MSVLD },
238
	{ 0x11, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
B
Ben Skeggs 已提交
239
	{ 0x13, "PCOUNTER" },
240
	{ 0x14, "PMSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
241 242
	{ 0x15, "PCE0", NULL, NVDEV_ENGINE_CE0 },
	{ 0x16, "PCE1", NULL, NVDEV_ENGINE_CE1 },
B
Ben Skeggs 已提交
243
	{ 0x17, "PDAEMON" },
244 245 246
	{}
};

247 248
static const struct nvkm_enum
gf100_fifo_fault_reason[] = {
B
Ben Skeggs 已提交
249 250 251 252 253 254 255 256 257
	{ 0x00, "PT_NOT_PRESENT" },
	{ 0x01, "PT_TOO_SHORT" },
	{ 0x02, "PAGE_NOT_PRESENT" },
	{ 0x03, "VM_LIMIT_EXCEEDED" },
	{ 0x04, "NO_CHANNEL" },
	{ 0x05, "PAGE_SYSTEM_ONLY" },
	{ 0x06, "PAGE_READ_ONLY" },
	{ 0x0a, "COMPRESSED_SYSRAM" },
	{ 0x0c, "INVALID_STORAGE_TYPE" },
258 259 260
	{}
};

261 262
static const struct nvkm_enum
gf100_fifo_fault_hubclient[] = {
263 264 265 266 267 268 269 270
	{ 0x01, "PCOPY0" },
	{ 0x02, "PCOPY1" },
	{ 0x04, "DISPATCH" },
	{ 0x05, "CTXCTL" },
	{ 0x06, "PFIFO" },
	{ 0x07, "BAR_READ" },
	{ 0x08, "BAR_WRITE" },
	{ 0x0b, "PVP" },
271
	{ 0x0c, "PMSPPP" },
272
	{ 0x0d, "PMSVLD" },
273 274 275 276 277 278 279
	{ 0x11, "PCOUNTER" },
	{ 0x12, "PDAEMON" },
	{ 0x14, "CCACHE" },
	{ 0x15, "CCACHE_POST" },
	{}
};

280 281
static const struct nvkm_enum
gf100_fifo_fault_gpcclient[] = {
282 283 284 285 286 287 288
	{ 0x01, "TEX" },
	{ 0x0c, "ESETUP" },
	{ 0x0e, "CTXCTL" },
	{ 0x0f, "PROP" },
	{}
};

289
static void
B
Ben Skeggs 已提交
290
gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
291
{
292 293
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
294 295 296 297
	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
298
	u32 gpc    = (stat & 0x1f000000) >> 24;
299
	u32 client = (stat & 0x00001f00) >> 8;
300 301 302
	u32 write  = (stat & 0x00000080);
	u32 hub    = (stat & 0x00000040);
	u32 reason = (stat & 0x0000000f);
303
	const struct nvkm_enum *er, *eu, *ec;
304 305 306
	struct nvkm_engine *engine = NULL;
	struct nvkm_fifo_chan *chan;
	unsigned long flags;
307
	char gpcid[8] = "";
308

309 310
	er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
	eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
311 312 313 314 315 316 317
	if (hub) {
		ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
	} else {
		ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
	}

318
	if (eu) {
319 320
		switch (eu->data2) {
		case NVDEV_SUBDEV_BAR:
321
			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
322 323
			break;
		case NVDEV_SUBDEV_INSTMEM:
324
			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
325 326
			break;
		case NVDEV_ENGINE_IFB:
327
			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
328 329
			break;
		default:
330
			engine = nvkm_device_engine(device, eu->data2);
331
			break;
332
		}
333
	}
334

335 336
	chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);

337 338 339 340 341
	nvkm_error(subdev,
		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
		   "reason %02x [%s] on channel %d [%010llx %s]\n",
		   write ? "write" : "read", (u64)vahi << 32 | valo,
		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
342
		   reason, er ? er->name : "", chan ? chan->chid : -1,
343 344
		   (u64)inst << 12,
		   chan ? chan->object.client->name : "unknown");
345

346 347 348
	if (engine && chan)
		gf100_fifo_recover(fifo, engine, (void *)chan);
	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
349 350
}

351 352
static const struct nvkm_bitfield
gf100_fifo_pbdma_intr[] = {
353 354 355 356 357
/*	{ 0x00008000, "" }	seen with null ib push */
	{ 0x00200000, "ILLEGAL_MTHD" },
	{ 0x00800000, "EMPTY_SUBC" },
	{}
};
358

359
static void
B
Ben Skeggs 已提交
360
gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
361
{
362 363
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
364 365 366 367
	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
368
	u32 subc = (addr & 0x00070000) >> 16;
369
	u32 mthd = (addr & 0x00003ffc);
370 371
	struct nvkm_fifo_chan *chan;
	unsigned long flags;
372 373
	u32 show= stat;
	char msg[128];
374

375
	if (stat & 0x00800000) {
376 377 378 379
		if (device->sw) {
			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
				show &= ~0x00800000;
		}
380 381
	}

382
	if (show) {
383
		nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
384 385 386 387 388
		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
				   "subc %d mthd %04x data %08x\n",
			   unit, show, msg, chid, chan ? chan->inst->addr : 0,
			   chan ? chan->object.client->name : "unknown",
389
			   subc, mthd, data);
390
		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
391
	}
392

393 394
	nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
395 396
}

B
Ben Skeggs 已提交
397
static void
B
Ben Skeggs 已提交
398
gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
B
Ben Skeggs 已提交
399
{
400 401
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
402
	u32 intr = nvkm_rd32(device, 0x002a00);
B
Ben Skeggs 已提交
403 404

	if (intr & 0x10000000) {
B
Ben Skeggs 已提交
405
		wake_up(&fifo->runlist.wait);
406
		nvkm_wr32(device, 0x002a00, 0x10000000);
B
Ben Skeggs 已提交
407 408 409 410
		intr &= ~0x10000000;
	}

	if (intr) {
411
		nvkm_error(subdev, "RUNLIST %08x\n", intr);
412
		nvkm_wr32(device, 0x002a00, intr);
B
Ben Skeggs 已提交
413 414 415
	}
}

B
Ben Skeggs 已提交
416
static void
B
Ben Skeggs 已提交
417
gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
B
Ben Skeggs 已提交
418
{
419 420
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
421 422
	u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
	u32 inte = nvkm_rd32(device, 0x002628);
B
Ben Skeggs 已提交
423 424
	u32 unkn;

425
	nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
426

B
Ben Skeggs 已提交
427 428 429
	for (unkn = 0; unkn < 8; unkn++) {
		u32 ints = (intr >> (unkn * 0x04)) & inte;
		if (ints & 0x1) {
B
Ben Skeggs 已提交
430
			nvkm_fifo_uevent(&fifo->base);
B
Ben Skeggs 已提交
431 432 433
			ints &= ~1;
		}
		if (ints) {
434 435
			nvkm_error(subdev, "ENGINE %d %d %01x",
				   engn, unkn, ints);
436
			nvkm_mask(device, 0x002628, ints, 0);
B
Ben Skeggs 已提交
437 438 439 440
		}
	}
}

441
void
B
Ben Skeggs 已提交
442
gf100_fifo_intr_engine(struct gf100_fifo *fifo)
B
Ben Skeggs 已提交
443
{
444 445
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x0025a4);
B
Ben Skeggs 已提交
446 447
	while (mask) {
		u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
448
		gf100_fifo_intr_engine_unit(fifo, unit);
B
Ben Skeggs 已提交
449 450 451 452
		mask &= ~(1 << unit);
	}
}

453
static void
454
gf100_fifo_intr(struct nvkm_fifo *base)
455
{
456 457 458
	struct gf100_fifo *fifo = gf100_fifo(base);
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
459 460
	u32 mask = nvkm_rd32(device, 0x002140);
	u32 stat = nvkm_rd32(device, 0x002100) & mask;
461

462
	if (stat & 0x00000001) {
463
		u32 intr = nvkm_rd32(device, 0x00252c);
464
		nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
465
		nvkm_wr32(device, 0x002100, 0x00000001);
466 467 468
		stat &= ~0x00000001;
	}

469
	if (stat & 0x00000100) {
B
Ben Skeggs 已提交
470
		gf100_fifo_intr_sched(fifo);
471
		nvkm_wr32(device, 0x002100, 0x00000100);
472 473 474
		stat &= ~0x00000100;
	}

475
	if (stat & 0x00010000) {
476
		u32 intr = nvkm_rd32(device, 0x00256c);
477
		nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
478
		nvkm_wr32(device, 0x002100, 0x00010000);
479 480 481 482
		stat &= ~0x00010000;
	}

	if (stat & 0x01000000) {
483
		u32 intr = nvkm_rd32(device, 0x00258c);
484
		nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
485
		nvkm_wr32(device, 0x002100, 0x01000000);
486 487 488
		stat &= ~0x01000000;
	}

489
	if (stat & 0x10000000) {
490
		u32 mask = nvkm_rd32(device, 0x00259c);
491 492
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
493
			gf100_fifo_intr_fault(fifo, unit);
494
			nvkm_wr32(device, 0x00259c, (1 << unit));
495
			mask &= ~(1 << unit);
496 497 498 499 500
		}
		stat &= ~0x10000000;
	}

	if (stat & 0x20000000) {
501
		u32 mask = nvkm_rd32(device, 0x0025a0);
502 503
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
504
			gf100_fifo_intr_pbdma(fifo, unit);
505
			nvkm_wr32(device, 0x0025a0, (1 << unit));
506
			mask &= ~(1 << unit);
507 508 509 510
		}
		stat &= ~0x20000000;
	}

511
	if (stat & 0x40000000) {
B
Ben Skeggs 已提交
512
		gf100_fifo_intr_runlist(fifo);
513 514 515
		stat &= ~0x40000000;
	}

516
	if (stat & 0x80000000) {
B
Ben Skeggs 已提交
517
		gf100_fifo_intr_engine(fifo);
518 519 520
		stat &= ~0x80000000;
	}

521
	if (stat) {
522
		nvkm_error(subdev, "INTR %08x\n", stat);
523 524
		nvkm_mask(device, 0x002140, stat, 0x00000000);
		nvkm_wr32(device, 0x002100, stat);
525 526
	}
}
527

528
static int
529
gf100_fifo_oneinit(struct nvkm_fifo *base)
530
{
531 532 533
	struct gf100_fifo *fifo = gf100_fifo(base);
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	int ret;
534

535 536 537 538 539 540 541
	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
			      false, &fifo->runlist.mem[0]);
	if (ret)
		return ret;

	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
			      false, &fifo->runlist.mem[1]);
542 543 544
	if (ret)
		return ret;

545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
	init_waitqueue_head(&fifo->runlist.wait);

	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
			      0x1000, false, &fifo->user.mem);
	if (ret)
		return ret;

	ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
	if (ret)
		return ret;

	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
	return 0;
}

static void
gf100_fifo_fini(struct nvkm_fifo *base)
{
	struct gf100_fifo *fifo = gf100_fifo(base);
	flush_work(&fifo->fault);
}

static void
gf100_fifo_init(struct nvkm_fifo *base)
{
	struct gf100_fifo *fifo = gf100_fifo(base);
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
	int i;

575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
	nvkm_wr32(device, 0x000204, 0xffffffff);
	nvkm_wr32(device, 0x002204, 0xffffffff);

	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);

	/* assign engines to PBDMAs */
	if (fifo->spoon_nr >= 3) {
		nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
		nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
		nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
		nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
		nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
		nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
	}

	/* PBDMA[n] */
	for (i = 0; i < fifo->spoon_nr; i++) {
		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
	}

	nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);

	nvkm_wr32(device, 0x002100, 0xffffffff);
	nvkm_wr32(device, 0x002140, 0x7fffffff);
	nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
604 605
}

606 607
static void *
gf100_fifo_dtor(struct nvkm_fifo *base)
608
{
609
	struct gf100_fifo *fifo = gf100_fifo(base);
610 611 612 613
	nvkm_vm_put(&fifo->user.bar);
	nvkm_memory_del(&fifo->user.mem);
	nvkm_memory_del(&fifo->runlist.mem[0]);
	nvkm_memory_del(&fifo->runlist.mem[1]);
614
	return fifo;
615
}
616

617
static const struct nvkm_fifo_func
618 619 620 621 622 623 624 625
gf100_fifo = {
	.dtor = gf100_fifo_dtor,
	.oneinit = gf100_fifo_oneinit,
	.init = gf100_fifo_init,
	.fini = gf100_fifo_fini,
	.intr = gf100_fifo_intr,
	.uevent_init = gf100_fifo_uevent_init,
	.uevent_fini = gf100_fifo_uevent_fini,
626 627 628 629 630 631
	.chan = {
		&gf100_fifo_gpfifo_oclass,
		NULL
	},
};

632 633
int
gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
634
{
B
Ben Skeggs 已提交
635
	struct gf100_fifo *fifo;
636

637 638
	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
		return -ENOMEM;
639
	INIT_LIST_HEAD(&fifo->chan);
B
Ben Skeggs 已提交
640
	INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
641
	*pfifo = &fifo->base;
642

643
	return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
644
}