gk104.c 20.7 KB
Newer Older
1
/*
2
 * Copyright 2012 Red Hat Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */
24
#include "gk104.h"
25
#include "changk104.h"
26

27 28
#include <core/client.h>
#include <core/enum.h>
29
#include <core/gpuobj.h>
30
#include <subdev/bar.h>
31
#include <engine/sw.h>
32

33
#include <nvif/class.h>
34

35 36
void
gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
37 38 39 40
{
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
}
41

42 43
void
gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
44 45 46 47
{
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
}
48

49
void
B
Ben Skeggs 已提交
50
gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
51
{
B
Ben Skeggs 已提交
52
	struct gk104_fifo_engn *engn = &fifo->engine[engine];
53
	struct gk104_fifo_chan *chan;
54 55
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
56
	struct nvkm_memory *cur;
57
	int nr = 0;
58

59
	mutex_lock(&subdev->mutex);
B
Ben Skeggs 已提交
60 61
	cur = engn->runlist[engn->cur_runlist];
	engn->cur_runlist = !engn->cur_runlist;
62

63
	nvkm_kmap(cur);
64 65 66 67
	list_for_each_entry(chan, &engn->chan, head) {
		nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
		nvkm_wo32(cur, (nr * 8) + 4, 0x00000000);
		nr++;
68
	}
69
	nvkm_done(cur);
70

71
	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
72
	nvkm_wr32(device, 0x002274, (engine << 20) | nr);
73

74
	if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
75 76
			       (engine * 0x08)) & 0x00100000),
				msecs_to_jiffies(2000)) == 0)
77
		nvkm_error(subdev, "runlist %d update timeout\n", engine);
78
	mutex_unlock(&subdev->mutex);
79 80
}

81
static inline struct nvkm_engine *
B
Ben Skeggs 已提交
82
gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
83
{
84
	struct nvkm_device *device = fifo->base.engine.subdev.device;
85 86
	u64 subdevs = gk104_fifo_engine_subdev(engn);
	if (subdevs)
87
		return nvkm_device_engine(device, __ffs(subdevs));
88
	return NULL;
89 90
}

91
static void
92
gk104_fifo_recover_work(struct work_struct *work)
93
{
B
Ben Skeggs 已提交
94
	struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
95
	struct nvkm_device *device = fifo->base.engine.subdev.device;
96
	struct nvkm_engine *engine;
97 98 99 100
	unsigned long flags;
	u32 engn, engm = 0;
	u64 mask, todo;

B
Ben Skeggs 已提交
101 102 103 104
	spin_lock_irqsave(&fifo->base.lock, flags);
	mask = fifo->mask;
	fifo->mask = 0ULL;
	spin_unlock_irqrestore(&fifo->base.lock, flags);
105 106

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
107
		engm |= 1 << gk104_fifo_subdev_engine(engn);
108
	nvkm_mask(device, 0x002630, engm, engm);
109 110

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
111 112 113
		if ((engine = nvkm_device_engine(device, engn))) {
			nvkm_subdev_fini(&engine->subdev, false);
			WARN_ON(nvkm_subdev_init(&engine->subdev));
114
		}
115
		gk104_fifo_runlist_update(fifo, gk104_fifo_subdev_engine(engn));
116 117
	}

118 119
	nvkm_wr32(device, 0x00262c, engm);
	nvkm_mask(device, 0x002630, engm, 0x00000000);
120 121 122
}

static void
B
Ben Skeggs 已提交
123
gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
124
		  struct gk104_fifo_chan *chan)
125
{
126 127
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
128 129
	u32 chid = chan->base.chid;

130
	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
131
		   nvkm_subdev_name[engine->subdev.index], chid);
132
	assert_spin_locked(&fifo->base.lock);
133

134
	nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
135 136
	list_del_init(&chan->head);
	chan->killed = true;
137

138
	fifo->mask |= 1ULL << engine->subdev.index;
B
Ben Skeggs 已提交
139
	schedule_work(&fifo->fault);
140 141
}

142 143
static const struct nvkm_enum
gk104_fifo_bind_reason[] = {
B
Ben Skeggs 已提交
144 145 146 147 148 149 150 151 152 153
	{ 0x01, "BIND_NOT_UNBOUND" },
	{ 0x02, "SNOOP_WITHOUT_BAR1" },
	{ 0x03, "UNBIND_WHILE_RUNNING" },
	{ 0x05, "INVALID_RUNLIST" },
	{ 0x06, "INVALID_CTX_TGT" },
	{ 0x0b, "UNBIND_WHILE_PARKED" },
	{}
};

static void
B
Ben Skeggs 已提交
154
gk104_fifo_intr_bind(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
155
{
156 157
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
158
	u32 intr = nvkm_rd32(device, 0x00252c);
B
Ben Skeggs 已提交
159
	u32 code = intr & 0x000000ff;
160 161
	const struct nvkm_enum *en =
		nvkm_enum_find(gk104_fifo_bind_reason, code);
B
Ben Skeggs 已提交
162

163
	nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
B
Ben Skeggs 已提交
164 165
}

166 167
static const struct nvkm_enum
gk104_fifo_sched_reason[] = {
168 169 170 171
	{ 0x0a, "CTXSW_TIMEOUT" },
	{}
};

172
static void
B
Ben Skeggs 已提交
173
gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
174
{
175
	struct nvkm_device *device = fifo->base.engine.subdev.device;
176 177
	struct nvkm_engine *engine;
	struct gk104_fifo_chan *chan;
178
	unsigned long flags;
179 180
	u32 engn;

181
	spin_lock_irqsave(&fifo->base.lock, flags);
182
	for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) {
183
		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
184 185 186 187 188 189 190 191 192 193
		u32 busy = (stat & 0x80000000);
		u32 next = (stat & 0x07ff0000) >> 16;
		u32 chsw = (stat & 0x00008000);
		u32 save = (stat & 0x00004000);
		u32 load = (stat & 0x00002000);
		u32 prev = (stat & 0x000007ff);
		u32 chid = load ? next : prev;
		(void)save;

		if (busy && chsw) {
194 195 196 197 198 199 200 201 202
			list_for_each_entry(chan, &fifo->engine[engn].chan, head) {
				if (chan->base.chid == chid) {
					engine = gk104_fifo_engine(fifo, engn);
					if (!engine)
						break;
					gk104_fifo_recover(fifo, engine, chan);
					break;
				}
			}
203 204
		}
	}
205
	spin_unlock_irqrestore(&fifo->base.lock, flags);
206 207
}

208
static void
B
Ben Skeggs 已提交
209
gk104_fifo_intr_sched(struct gk104_fifo *fifo)
210
{
211 212
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
213
	u32 intr = nvkm_rd32(device, 0x00254c);
214
	u32 code = intr & 0x000000ff;
215 216
	const struct nvkm_enum *en =
		nvkm_enum_find(gk104_fifo_sched_reason, code);
217

218
	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
219 220 221

	switch (code) {
	case 0x0a:
B
Ben Skeggs 已提交
222
		gk104_fifo_intr_sched_ctxsw(fifo);
223 224 225 226
		break;
	default:
		break;
	}
227 228 229
}

static void
B
Ben Skeggs 已提交
230
gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
231
{
232 233
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
234
	u32 stat = nvkm_rd32(device, 0x00256c);
235
	nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
236
	nvkm_wr32(device, 0x00256c, stat);
237 238 239
}

static void
B
Ben Skeggs 已提交
240
gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
241
{
242 243
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
244
	u32 stat = nvkm_rd32(device, 0x00259c);
245
	nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
246 247
}

248 249
static const struct nvkm_enum
gk104_fifo_fault_engine[] = {
250 251 252 253 254 255 256 257 258
	{ 0x00, "GR", NULL, NVKM_ENGINE_GR },
	{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
	{ 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
	{ 0x07, "PBDMA0", NULL, NVKM_ENGINE_FIFO },
	{ 0x08, "PBDMA1", NULL, NVKM_ENGINE_FIFO },
	{ 0x09, "PBDMA2", NULL, NVKM_ENGINE_FIFO },
	{ 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
	{ 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
259
	{ 0x13, "PERF" },
260 261 262
	{ 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
	{ 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
	{ 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
263
	{ 0x17, "PMU" },
264 265
	{ 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
	{ 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
266 267 268
	{}
};

269 270
static const struct nvkm_enum
gk104_fifo_fault_reason[] = {
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
	{ 0x00, "PDE" },
	{ 0x01, "PDE_SIZE" },
	{ 0x02, "PTE" },
	{ 0x03, "VA_LIMIT_VIOLATION" },
	{ 0x04, "UNBOUND_INST_BLOCK" },
	{ 0x05, "PRIV_VIOLATION" },
	{ 0x06, "RO_VIOLATION" },
	{ 0x07, "WO_VIOLATION" },
	{ 0x08, "PITCH_MASK_VIOLATION" },
	{ 0x09, "WORK_CREATION" },
	{ 0x0a, "UNSUPPORTED_APERTURE" },
	{ 0x0b, "COMPRESSION_FAILURE" },
	{ 0x0c, "UNSUPPORTED_KIND" },
	{ 0x0d, "REGION_VIOLATION" },
	{ 0x0e, "BOTH_PTES_VALID" },
	{ 0x0f, "INFO_TYPE_POISONED" },
287 288 289
	{}
};

290 291
static const struct nvkm_enum
gk104_fifo_fault_hubclient[] = {
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
	{ 0x00, "VIP" },
	{ 0x01, "CE0" },
	{ 0x02, "CE1" },
	{ 0x03, "DNISO" },
	{ 0x04, "FE" },
	{ 0x05, "FECS" },
	{ 0x06, "HOST" },
	{ 0x07, "HOST_CPU" },
	{ 0x08, "HOST_CPU_NB" },
	{ 0x09, "ISO" },
	{ 0x0a, "MMU" },
	{ 0x0b, "MSPDEC" },
	{ 0x0c, "MSPPP" },
	{ 0x0d, "MSVLD" },
	{ 0x0e, "NISO" },
	{ 0x0f, "P2P" },
	{ 0x10, "PD" },
	{ 0x11, "PERF" },
	{ 0x12, "PMU" },
	{ 0x13, "RASTERTWOD" },
	{ 0x14, "SCC" },
	{ 0x15, "SCC_NB" },
	{ 0x16, "SEC" },
	{ 0x17, "SSYNC" },
316
	{ 0x18, "GR_CE" },
317 318 319 320 321 322 323
	{ 0x19, "CE2" },
	{ 0x1a, "XV" },
	{ 0x1b, "MMU_NB" },
	{ 0x1c, "MSENC" },
	{ 0x1d, "DFALCON" },
	{ 0x1e, "SKED" },
	{ 0x1f, "AFALCON" },
324 325 326
	{}
};

327 328
static const struct nvkm_enum
gk104_fifo_fault_gpcclient[] = {
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
	{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
	{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
	{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
	{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
	{ 0x0c, "RAST" },
	{ 0x0d, "GCC" },
	{ 0x0e, "GPCCS" },
	{ 0x0f, "PROP_0" },
	{ 0x10, "PROP_1" },
	{ 0x11, "PROP_2" },
	{ 0x12, "PROP_3" },
	{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
	{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
	{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
	{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
	{ 0x1f, "GPM" },
	{ 0x20, "LTP_UTLB_0" },
	{ 0x21, "LTP_UTLB_1" },
	{ 0x22, "LTP_UTLB_2" },
	{ 0x23, "LTP_UTLB_3" },
	{ 0x24, "GPC_RGG_UTLB" },
350 351 352
	{}
};

353
static void
B
Ben Skeggs 已提交
354
gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
355
{
356 357
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
358 359 360 361
	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
362
	u32 gpc    = (stat & 0x1f000000) >> 24;
363
	u32 client = (stat & 0x00001f00) >> 8;
364 365 366
	u32 write  = (stat & 0x00000080);
	u32 hub    = (stat & 0x00000040);
	u32 reason = (stat & 0x0000000f);
367
	const struct nvkm_enum *er, *eu, *ec;
368 369 370
	struct nvkm_engine *engine = NULL;
	struct nvkm_fifo_chan *chan;
	unsigned long flags;
371
	char gpcid[8] = "";
372

373 374
	er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
	eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
375 376 377 378 379 380 381
	if (hub) {
		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
	} else {
		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
	}

382 383
	if (eu) {
		switch (eu->data2) {
384
		case NVKM_SUBDEV_BAR:
385
			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
386
			break;
387
		case NVKM_SUBDEV_INSTMEM:
388
			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
389
			break;
390
		case NVKM_ENGINE_IFB:
391
			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
392 393
			break;
		default:
394
			engine = nvkm_device_engine(device, eu->data2);
395
			break;
396
		}
397 398
	}

399 400
	chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);

401 402 403 404 405
	nvkm_error(subdev,
		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
		   "reason %02x [%s] on channel %d [%010llx %s]\n",
		   write ? "write" : "read", (u64)vahi << 32 | valo,
		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
406
		   reason, er ? er->name : "", chan ? chan->chid : -1,
407 408
		   (u64)inst << 12,
		   chan ? chan->object.client->name : "unknown");
409

410 411 412
	if (engine && chan)
		gk104_fifo_recover(fifo, engine, (void *)chan);
	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
413 414
}

415
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
	{ 0x00000001, "MEMREQ" },
	{ 0x00000002, "MEMACK_TIMEOUT" },
	{ 0x00000004, "MEMACK_EXTRA" },
	{ 0x00000008, "MEMDAT_TIMEOUT" },
	{ 0x00000010, "MEMDAT_EXTRA" },
	{ 0x00000020, "MEMFLUSH" },
	{ 0x00000040, "MEMOP" },
	{ 0x00000080, "LBCONNECT" },
	{ 0x00000100, "LBREQ" },
	{ 0x00000200, "LBACK_TIMEOUT" },
	{ 0x00000400, "LBACK_EXTRA" },
	{ 0x00000800, "LBDAT_TIMEOUT" },
	{ 0x00001000, "LBDAT_EXTRA" },
	{ 0x00002000, "GPFIFO" },
	{ 0x00004000, "GPPTR" },
	{ 0x00008000, "GPENTRY" },
	{ 0x00010000, "GPCRC" },
	{ 0x00020000, "PBPTR" },
	{ 0x00040000, "PBENTRY" },
	{ 0x00080000, "PBCRC" },
	{ 0x00100000, "XBARCONNECT" },
	{ 0x00200000, "METHOD" },
	{ 0x00400000, "METHODCRC" },
	{ 0x00800000, "DEVICE" },
	{ 0x02000000, "SEMAPHORE" },
	{ 0x04000000, "ACQUIRE" },
	{ 0x08000000, "PRI" },
	{ 0x20000000, "NO_CTXSW_SEG" },
	{ 0x40000000, "PBSEG" },
	{ 0x80000000, "SIGNATURE" },
	{}
};
448

449
static void
B
Ben Skeggs 已提交
450
gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
451
{
452 453
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
454 455 456 457 458
	u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
459
	u32 subc = (addr & 0x00070000) >> 16;
460
	u32 mthd = (addr & 0x00003ffc);
461
	u32 show = stat;
462 463
	struct nvkm_fifo_chan *chan;
	unsigned long flags;
464
	char msg[128];
465

466
	if (stat & 0x00800000) {
467 468 469 470
		if (device->sw) {
			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
				show &= ~0x00800000;
		}
471
		nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
472 473
	}

474
	if (show) {
475
		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
476 477 478 479 480
		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
				   "subc %d mthd %04x data %08x\n",
			   unit, show, msg, chid, chan ? chan->inst->addr : 0,
			   chan ? chan->object.client->name : "unknown",
481
			   subc, mthd, data);
482
		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
483
	}
484

485
	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
486 487
}

488
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
489 490 491 492 493 494 495 496 497
	{ 0x00000001, "HCE_RE_ILLEGAL_OP" },
	{ 0x00000002, "HCE_RE_ALIGNB" },
	{ 0x00000004, "HCE_PRIV" },
	{ 0x00000008, "HCE_ILLEGAL_MTHD" },
	{ 0x00000010, "HCE_ILLEGAL_CLASS" },
	{}
};

static void
B
Ben Skeggs 已提交
498
gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
499
{
500 501
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
502 503 504
	u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
	u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
505
	char msg[128];
506 507

	if (stat) {
508 509 510 511 512
		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
			   unit, stat, msg, chid,
			   nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
			   nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
513 514
	}

515
	nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
516 517
}

B
Ben Skeggs 已提交
518
static void
B
Ben Skeggs 已提交
519
gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
520
{
521 522
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x002a00);
B
Ben Skeggs 已提交
523 524
	while (mask) {
		u32 engn = __ffs(mask);
B
Ben Skeggs 已提交
525
		wake_up(&fifo->engine[engn].wait);
526
		nvkm_wr32(device, 0x002a00, 1 << engn);
B
Ben Skeggs 已提交
527 528 529 530
		mask &= ~(1 << engn);
	}
}

B
Ben Skeggs 已提交
531
static void
B
Ben Skeggs 已提交
532
gk104_fifo_intr_engine(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
533
{
B
Ben Skeggs 已提交
534
	nvkm_fifo_uevent(&fifo->base);
B
Ben Skeggs 已提交
535 536
}

537 538
void
gk104_fifo_intr(struct nvkm_fifo *base)
539
{
540 541 542
	struct gk104_fifo *fifo = gk104_fifo(base);
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
543 544
	u32 mask = nvkm_rd32(device, 0x002140);
	u32 stat = nvkm_rd32(device, 0x002100) & mask;
545

546
	if (stat & 0x00000001) {
B
Ben Skeggs 已提交
547
		gk104_fifo_intr_bind(fifo);
548
		nvkm_wr32(device, 0x002100, 0x00000001);
549 550 551 552
		stat &= ~0x00000001;
	}

	if (stat & 0x00000010) {
553
		nvkm_error(subdev, "PIO_ERROR\n");
554
		nvkm_wr32(device, 0x002100, 0x00000010);
555 556 557
		stat &= ~0x00000010;
	}

558
	if (stat & 0x00000100) {
B
Ben Skeggs 已提交
559
		gk104_fifo_intr_sched(fifo);
560
		nvkm_wr32(device, 0x002100, 0x00000100);
561 562 563
		stat &= ~0x00000100;
	}

564
	if (stat & 0x00010000) {
B
Ben Skeggs 已提交
565
		gk104_fifo_intr_chsw(fifo);
566
		nvkm_wr32(device, 0x002100, 0x00010000);
567 568 569 570
		stat &= ~0x00010000;
	}

	if (stat & 0x00800000) {
571
		nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
572
		nvkm_wr32(device, 0x002100, 0x00800000);
573 574 575 576
		stat &= ~0x00800000;
	}

	if (stat & 0x01000000) {
577
		nvkm_error(subdev, "LB_ERROR\n");
578
		nvkm_wr32(device, 0x002100, 0x01000000);
579 580 581 582
		stat &= ~0x01000000;
	}

	if (stat & 0x08000000) {
B
Ben Skeggs 已提交
583
		gk104_fifo_intr_dropped_fault(fifo);
584
		nvkm_wr32(device, 0x002100, 0x08000000);
585 586 587
		stat &= ~0x08000000;
	}

588
	if (stat & 0x10000000) {
589
		u32 mask = nvkm_rd32(device, 0x00259c);
590 591
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
592
			gk104_fifo_intr_fault(fifo, unit);
593
			nvkm_wr32(device, 0x00259c, (1 << unit));
594
			mask &= ~(1 << unit);
595 596 597 598 599
		}
		stat &= ~0x10000000;
	}

	if (stat & 0x20000000) {
600
		u32 mask = nvkm_rd32(device, 0x0025a0);
601 602
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
603 604
			gk104_fifo_intr_pbdma_0(fifo, unit);
			gk104_fifo_intr_pbdma_1(fifo, unit);
605
			nvkm_wr32(device, 0x0025a0, (1 << unit));
606
			mask &= ~(1 << unit);
607 608 609 610 611
		}
		stat &= ~0x20000000;
	}

	if (stat & 0x40000000) {
B
Ben Skeggs 已提交
612
		gk104_fifo_intr_runlist(fifo);
613 614 615
		stat &= ~0x40000000;
	}

616
	if (stat & 0x80000000) {
617
		nvkm_wr32(device, 0x002100, 0x80000000);
B
Ben Skeggs 已提交
618
		gk104_fifo_intr_engine(fifo);
619 620 621
		stat &= ~0x80000000;
	}

622
	if (stat) {
623
		nvkm_error(subdev, "INTR %08x\n", stat);
624 625
		nvkm_mask(device, 0x002140, stat, 0x00000000);
		nvkm_wr32(device, 0x002100, stat);
626 627
	}
}
628

629 630 631 632 633 634 635 636 637 638
void
gk104_fifo_fini(struct nvkm_fifo *base)
{
	struct gk104_fifo *fifo = gk104_fifo(base);
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	flush_work(&fifo->fault);
	/* allow mmu fault interrupts, even when we're not using fifo */
	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
}

639
int
640
gk104_fifo_oneinit(struct nvkm_fifo *base)
641
{
642
	struct gk104_fifo *fifo = gk104_fifo(base);
643
	struct nvkm_device *device = fifo->base.engine.subdev.device;
644
	int ret, i;
645

646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
				      0x8000, 0x1000, false,
				      &fifo->engine[i].runlist[0]);
		if (ret)
			return ret;

		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
				      0x8000, 0x1000, false,
				      &fifo->engine[i].runlist[1]);
		if (ret)
			return ret;

		init_waitqueue_head(&fifo->engine[i].wait);
		INIT_LIST_HEAD(&fifo->engine[i].chan);
	}

	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
			      fifo->base.nr * 0x200, 0x1000, true,
			      &fifo->user.mem);
666 667 668
	if (ret)
		return ret;

669 670 671 672 673 674
	ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
			    &fifo->user.bar);
	if (ret)
		return ret;

	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
675 676 677
	return 0;
}

678 679
void
gk104_fifo_init(struct nvkm_fifo *base)
B
Ben Skeggs 已提交
680
{
681
	struct gk104_fifo *fifo = gk104_fifo(base);
682 683
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
684
	int i;
B
Ben Skeggs 已提交
685

B
Ben Skeggs 已提交
686
	/* enable all available PBDMA units */
687 688
	nvkm_wr32(device, 0x000204, 0xffffffff);
	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
689
	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
B
Ben Skeggs 已提交
690

B
Ben Skeggs 已提交
691
	/* PBDMA[n] */
B
Ben Skeggs 已提交
692
	for (i = 0; i < fifo->spoon_nr; i++) {
693 694 695
		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
B
Ben Skeggs 已提交
696 697
	}

698
	/* PBDMA[n].HCE */
B
Ben Skeggs 已提交
699
	for (i = 0; i < fifo->spoon_nr; i++) {
700 701
		nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
702 703
	}

704
	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
B
Ben Skeggs 已提交
705

706 707
	nvkm_wr32(device, 0x002100, 0xffffffff);
	nvkm_wr32(device, 0x002140, 0x7fffffff);
B
Ben Skeggs 已提交
708 709
}

710 711
void *
gk104_fifo_dtor(struct nvkm_fifo *base)
B
Ben Skeggs 已提交
712
{
713
	struct gk104_fifo *fifo = gk104_fifo(base);
B
Ben Skeggs 已提交
714 715
	int i;

716 717
	nvkm_vm_put(&fifo->user.bar);
	nvkm_memory_del(&fifo->user.mem);
B
Ben Skeggs 已提交
718

719
	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
720 721
		nvkm_memory_del(&fifo->engine[i].runlist[1]);
		nvkm_memory_del(&fifo->engine[i].runlist[0]);
B
Ben Skeggs 已提交
722 723
	}

724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
	return fifo;
}

int
gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
		int index, int nr, struct nvkm_fifo **pfifo)
{
	struct gk104_fifo *fifo;

	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
		return -ENOMEM;
	INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
	*pfifo = &fifo->base;

	return nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
B
Ben Skeggs 已提交
739 740
}

741
static const struct nvkm_fifo_func
742 743 744 745 746 747 748 749
gk104_fifo = {
	.dtor = gk104_fifo_dtor,
	.oneinit = gk104_fifo_oneinit,
	.init = gk104_fifo_init,
	.fini = gk104_fifo_fini,
	.intr = gk104_fifo_intr,
	.uevent_init = gk104_fifo_uevent_init,
	.uevent_fini = gk104_fifo_uevent_fini,
750 751 752 753 754 755
	.chan = {
		&gk104_fifo_gpfifo_oclass,
		NULL
	},
};

B
Ben Skeggs 已提交
756
int
757
gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
758
{
759
	return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);
760
}