gk104.c 20.8 KB
Newer Older
1
/*
2
 * Copyright 2012 Red Hat Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */
24
#include "gk104.h"
25
#include "changk104.h"
26

27 28
#include <core/client.h>
#include <core/enum.h>
29
#include <core/gpuobj.h>
30
#include <core/handle.h>
31
#include <subdev/bar.h>
32
#include <engine/sw.h>
33

34
#include <nvif/class.h>
35

36 37
void
gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
38 39 40 41
{
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
}
42

43 44
void
gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
45 46 47 48
{
	struct nvkm_device *device = fifo->engine.subdev.device;
	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
}
49

50
void
B
Ben Skeggs 已提交
51
gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
52
{
B
Ben Skeggs 已提交
53
	struct gk104_fifo_engn *engn = &fifo->engine[engine];
54
	struct gk104_fifo_chan *chan;
55 56
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
57
	struct nvkm_memory *cur;
58
	int nr = 0;
59

60
	mutex_lock(&subdev->mutex);
B
Ben Skeggs 已提交
61 62
	cur = engn->runlist[engn->cur_runlist];
	engn->cur_runlist = !engn->cur_runlist;
63

64
	nvkm_kmap(cur);
65 66 67 68
	list_for_each_entry(chan, &engn->chan, head) {
		nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
		nvkm_wo32(cur, (nr * 8) + 4, 0x00000000);
		nr++;
69
	}
70
	nvkm_done(cur);
71

72
	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
73
	nvkm_wr32(device, 0x002274, (engine << 20) | nr);
74

75
	if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
76 77
			       (engine * 0x08)) & 0x00100000),
				msecs_to_jiffies(2000)) == 0)
78
		nvkm_error(subdev, "runlist %d update timeout\n", engine);
79
	mutex_unlock(&subdev->mutex);
80 81
}

82
static inline struct nvkm_engine *
B
Ben Skeggs 已提交
83
gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
84
{
85
	struct nvkm_device *device = fifo->base.engine.subdev.device;
86 87
	u64 subdevs = gk104_fifo_engine_subdev(engn);
	if (subdevs)
88
		return nvkm_device_engine(device, __ffs(subdevs));
89
	return NULL;
90 91
}

92
static void
93
gk104_fifo_recover_work(struct work_struct *work)
94
{
B
Ben Skeggs 已提交
95
	struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
96
	struct nvkm_device *device = fifo->base.engine.subdev.device;
97
	struct nvkm_engine *engine;
98 99 100 101
	unsigned long flags;
	u32 engn, engm = 0;
	u64 mask, todo;

B
Ben Skeggs 已提交
102 103 104 105
	spin_lock_irqsave(&fifo->base.lock, flags);
	mask = fifo->mask;
	fifo->mask = 0ULL;
	spin_unlock_irqrestore(&fifo->base.lock, flags);
106 107

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
108
		engm |= 1 << gk104_fifo_subdev_engine(engn);
109
	nvkm_mask(device, 0x002630, engm, engm);
110 111

	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
112 113 114
		if ((engine = nvkm_device_engine(device, engn))) {
			nvkm_subdev_fini(&engine->subdev, false);
			WARN_ON(nvkm_subdev_init(&engine->subdev));
115
		}
116
		gk104_fifo_runlist_update(fifo, gk104_fifo_subdev_engine(engn));
117 118
	}

119 120
	nvkm_wr32(device, 0x00262c, engm);
	nvkm_mask(device, 0x002630, engm, 0x00000000);
121 122 123
}

static void
B
Ben Skeggs 已提交
124
gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
125
		  struct gk104_fifo_chan *chan)
126
{
127 128
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
129 130
	u32 chid = chan->base.chid;

131
	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
132
		   nvkm_subdev_name[engine->subdev.index], chid);
133
	assert_spin_locked(&fifo->base.lock);
134

135
	nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
136 137
	list_del_init(&chan->head);
	chan->killed = true;
138

139
	fifo->mask |= 1ULL << engine->subdev.index;
B
Ben Skeggs 已提交
140
	schedule_work(&fifo->fault);
141 142
}

143 144
static const struct nvkm_enum
gk104_fifo_bind_reason[] = {
B
Ben Skeggs 已提交
145 146 147 148 149 150 151 152 153 154
	{ 0x01, "BIND_NOT_UNBOUND" },
	{ 0x02, "SNOOP_WITHOUT_BAR1" },
	{ 0x03, "UNBIND_WHILE_RUNNING" },
	{ 0x05, "INVALID_RUNLIST" },
	{ 0x06, "INVALID_CTX_TGT" },
	{ 0x0b, "UNBIND_WHILE_PARKED" },
	{}
};

static void
B
Ben Skeggs 已提交
155
gk104_fifo_intr_bind(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
156
{
157 158
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
159
	u32 intr = nvkm_rd32(device, 0x00252c);
B
Ben Skeggs 已提交
160
	u32 code = intr & 0x000000ff;
161 162
	const struct nvkm_enum *en =
		nvkm_enum_find(gk104_fifo_bind_reason, code);
B
Ben Skeggs 已提交
163

164
	nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
B
Ben Skeggs 已提交
165 166
}

167 168
static const struct nvkm_enum
gk104_fifo_sched_reason[] = {
169 170 171 172
	{ 0x0a, "CTXSW_TIMEOUT" },
	{}
};

173
static void
B
Ben Skeggs 已提交
174
gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
175
{
176
	struct nvkm_device *device = fifo->base.engine.subdev.device;
177 178
	struct nvkm_engine *engine;
	struct gk104_fifo_chan *chan;
179
	unsigned long flags;
180 181
	u32 engn;

182
	spin_lock_irqsave(&fifo->base.lock, flags);
183
	for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) {
184
		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
185 186 187 188 189 190 191 192 193 194
		u32 busy = (stat & 0x80000000);
		u32 next = (stat & 0x07ff0000) >> 16;
		u32 chsw = (stat & 0x00008000);
		u32 save = (stat & 0x00004000);
		u32 load = (stat & 0x00002000);
		u32 prev = (stat & 0x000007ff);
		u32 chid = load ? next : prev;
		(void)save;

		if (busy && chsw) {
195 196 197 198 199 200 201 202 203
			list_for_each_entry(chan, &fifo->engine[engn].chan, head) {
				if (chan->base.chid == chid) {
					engine = gk104_fifo_engine(fifo, engn);
					if (!engine)
						break;
					gk104_fifo_recover(fifo, engine, chan);
					break;
				}
			}
204 205
		}
	}
206
	spin_unlock_irqrestore(&fifo->base.lock, flags);
207 208
}

209
static void
B
Ben Skeggs 已提交
210
gk104_fifo_intr_sched(struct gk104_fifo *fifo)
211
{
212 213
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
214
	u32 intr = nvkm_rd32(device, 0x00254c);
215
	u32 code = intr & 0x000000ff;
216 217
	const struct nvkm_enum *en =
		nvkm_enum_find(gk104_fifo_sched_reason, code);
218

219
	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
220 221 222

	switch (code) {
	case 0x0a:
B
Ben Skeggs 已提交
223
		gk104_fifo_intr_sched_ctxsw(fifo);
224 225 226 227
		break;
	default:
		break;
	}
228 229 230
}

static void
B
Ben Skeggs 已提交
231
gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
232
{
233 234
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
235
	u32 stat = nvkm_rd32(device, 0x00256c);
236
	nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
237
	nvkm_wr32(device, 0x00256c, stat);
238 239 240
}

static void
B
Ben Skeggs 已提交
241
gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
242
{
243 244
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
245
	u32 stat = nvkm_rd32(device, 0x00259c);
246
	nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
247 248
}

249 250
static const struct nvkm_enum
gk104_fifo_fault_engine[] = {
251
	{ 0x00, "GR", NULL, NVDEV_ENGINE_GR },
252
	{ 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
253 254
	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
255 256 257
	{ 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
	{ 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
	{ 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
258
	{ 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD },
259
	{ 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP },
260
	{ 0x13, "PERF" },
261
	{ 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
262 263
	{ 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 },
	{ 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 },
264
	{ 0x17, "PMU" },
265
	{ 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC },
266
	{ 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 },
267 268 269
	{}
};

270 271
static const struct nvkm_enum
gk104_fifo_fault_reason[] = {
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
	{ 0x00, "PDE" },
	{ 0x01, "PDE_SIZE" },
	{ 0x02, "PTE" },
	{ 0x03, "VA_LIMIT_VIOLATION" },
	{ 0x04, "UNBOUND_INST_BLOCK" },
	{ 0x05, "PRIV_VIOLATION" },
	{ 0x06, "RO_VIOLATION" },
	{ 0x07, "WO_VIOLATION" },
	{ 0x08, "PITCH_MASK_VIOLATION" },
	{ 0x09, "WORK_CREATION" },
	{ 0x0a, "UNSUPPORTED_APERTURE" },
	{ 0x0b, "COMPRESSION_FAILURE" },
	{ 0x0c, "UNSUPPORTED_KIND" },
	{ 0x0d, "REGION_VIOLATION" },
	{ 0x0e, "BOTH_PTES_VALID" },
	{ 0x0f, "INFO_TYPE_POISONED" },
288 289 290
	{}
};

291 292
static const struct nvkm_enum
gk104_fifo_fault_hubclient[] = {
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
	{ 0x00, "VIP" },
	{ 0x01, "CE0" },
	{ 0x02, "CE1" },
	{ 0x03, "DNISO" },
	{ 0x04, "FE" },
	{ 0x05, "FECS" },
	{ 0x06, "HOST" },
	{ 0x07, "HOST_CPU" },
	{ 0x08, "HOST_CPU_NB" },
	{ 0x09, "ISO" },
	{ 0x0a, "MMU" },
	{ 0x0b, "MSPDEC" },
	{ 0x0c, "MSPPP" },
	{ 0x0d, "MSVLD" },
	{ 0x0e, "NISO" },
	{ 0x0f, "P2P" },
	{ 0x10, "PD" },
	{ 0x11, "PERF" },
	{ 0x12, "PMU" },
	{ 0x13, "RASTERTWOD" },
	{ 0x14, "SCC" },
	{ 0x15, "SCC_NB" },
	{ 0x16, "SEC" },
	{ 0x17, "SSYNC" },
317
	{ 0x18, "GR_CE" },
318 319 320 321 322 323 324
	{ 0x19, "CE2" },
	{ 0x1a, "XV" },
	{ 0x1b, "MMU_NB" },
	{ 0x1c, "MSENC" },
	{ 0x1d, "DFALCON" },
	{ 0x1e, "SKED" },
	{ 0x1f, "AFALCON" },
325 326 327
	{}
};

328 329
static const struct nvkm_enum
gk104_fifo_fault_gpcclient[] = {
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
	{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
	{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
	{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
	{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
	{ 0x0c, "RAST" },
	{ 0x0d, "GCC" },
	{ 0x0e, "GPCCS" },
	{ 0x0f, "PROP_0" },
	{ 0x10, "PROP_1" },
	{ 0x11, "PROP_2" },
	{ 0x12, "PROP_3" },
	{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
	{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
	{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
	{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
	{ 0x1f, "GPM" },
	{ 0x20, "LTP_UTLB_0" },
	{ 0x21, "LTP_UTLB_1" },
	{ 0x22, "LTP_UTLB_2" },
	{ 0x23, "LTP_UTLB_3" },
	{ 0x24, "GPC_RGG_UTLB" },
351 352 353
	{}
};

354
static void
B
Ben Skeggs 已提交
355
gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
356
{
357 358
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
359 360 361 362
	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
363
	u32 gpc    = (stat & 0x1f000000) >> 24;
364
	u32 client = (stat & 0x00001f00) >> 8;
365 366 367
	u32 write  = (stat & 0x00000080);
	u32 hub    = (stat & 0x00000040);
	u32 reason = (stat & 0x0000000f);
368
	const struct nvkm_enum *er, *eu, *ec;
369 370 371
	struct nvkm_engine *engine = NULL;
	struct nvkm_fifo_chan *chan;
	unsigned long flags;
372
	char gpcid[8] = "";
373

374 375
	er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
	eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
376 377 378 379 380 381 382
	if (hub) {
		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
	} else {
		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
	}

383 384 385
	if (eu) {
		switch (eu->data2) {
		case NVDEV_SUBDEV_BAR:
386
			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
387 388
			break;
		case NVDEV_SUBDEV_INSTMEM:
389
			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
390 391
			break;
		case NVDEV_ENGINE_IFB:
392
			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
393 394
			break;
		default:
395
			engine = nvkm_device_engine(device, eu->data2);
396
			break;
397
		}
398 399
	}

400 401
	chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);

402 403 404 405 406
	nvkm_error(subdev,
		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
		   "reason %02x [%s] on channel %d [%010llx %s]\n",
		   write ? "write" : "read", (u64)vahi << 32 | valo,
		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
407
		   reason, er ? er->name : "", chan ? chan->chid : -1,
408 409
		   (u64)inst << 12,
		   chan ? chan->object.client->name : "unknown");
410

411 412 413
	if (engine && chan)
		gk104_fifo_recover(fifo, engine, (void *)chan);
	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
414 415
}

416
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
	{ 0x00000001, "MEMREQ" },
	{ 0x00000002, "MEMACK_TIMEOUT" },
	{ 0x00000004, "MEMACK_EXTRA" },
	{ 0x00000008, "MEMDAT_TIMEOUT" },
	{ 0x00000010, "MEMDAT_EXTRA" },
	{ 0x00000020, "MEMFLUSH" },
	{ 0x00000040, "MEMOP" },
	{ 0x00000080, "LBCONNECT" },
	{ 0x00000100, "LBREQ" },
	{ 0x00000200, "LBACK_TIMEOUT" },
	{ 0x00000400, "LBACK_EXTRA" },
	{ 0x00000800, "LBDAT_TIMEOUT" },
	{ 0x00001000, "LBDAT_EXTRA" },
	{ 0x00002000, "GPFIFO" },
	{ 0x00004000, "GPPTR" },
	{ 0x00008000, "GPENTRY" },
	{ 0x00010000, "GPCRC" },
	{ 0x00020000, "PBPTR" },
	{ 0x00040000, "PBENTRY" },
	{ 0x00080000, "PBCRC" },
	{ 0x00100000, "XBARCONNECT" },
	{ 0x00200000, "METHOD" },
	{ 0x00400000, "METHODCRC" },
	{ 0x00800000, "DEVICE" },
	{ 0x02000000, "SEMAPHORE" },
	{ 0x04000000, "ACQUIRE" },
	{ 0x08000000, "PRI" },
	{ 0x20000000, "NO_CTXSW_SEG" },
	{ 0x40000000, "PBSEG" },
	{ 0x80000000, "SIGNATURE" },
	{}
};
449

450
static void
B
Ben Skeggs 已提交
451
gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
452
{
453 454
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
455 456 457 458 459
	u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
460
	u32 subc = (addr & 0x00070000) >> 16;
461
	u32 mthd = (addr & 0x00003ffc);
462
	u32 show = stat;
463 464
	struct nvkm_fifo_chan *chan;
	unsigned long flags;
465
	char msg[128];
466

467
	if (stat & 0x00800000) {
468 469 470 471
		if (device->sw) {
			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
				show &= ~0x00800000;
		}
472
		nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
473 474
	}

475
	if (show) {
476
		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
477 478 479 480 481
		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
				   "subc %d mthd %04x data %08x\n",
			   unit, show, msg, chid, chan ? chan->inst->addr : 0,
			   chan ? chan->object.client->name : "unknown",
482
			   subc, mthd, data);
483
		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
484
	}
485

486
	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
487 488
}

489
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
490 491 492 493 494 495 496 497 498
	{ 0x00000001, "HCE_RE_ILLEGAL_OP" },
	{ 0x00000002, "HCE_RE_ALIGNB" },
	{ 0x00000004, "HCE_PRIV" },
	{ 0x00000008, "HCE_ILLEGAL_MTHD" },
	{ 0x00000010, "HCE_ILLEGAL_CLASS" },
	{}
};

static void
B
Ben Skeggs 已提交
499
gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
500
{
501 502
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
503 504 505
	u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
	u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
506
	char msg[128];
507 508

	if (stat) {
509 510 511 512 513
		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
			   unit, stat, msg, chid,
			   nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
			   nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
514 515
	}

516
	nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
517 518
}

B
Ben Skeggs 已提交
519
static void
B
Ben Skeggs 已提交
520
gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
521
{
522 523
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x002a00);
B
Ben Skeggs 已提交
524 525
	while (mask) {
		u32 engn = __ffs(mask);
B
Ben Skeggs 已提交
526
		wake_up(&fifo->engine[engn].wait);
527
		nvkm_wr32(device, 0x002a00, 1 << engn);
B
Ben Skeggs 已提交
528 529 530 531
		mask &= ~(1 << engn);
	}
}

B
Ben Skeggs 已提交
532
static void
B
Ben Skeggs 已提交
533
gk104_fifo_intr_engine(struct gk104_fifo *fifo)
B
Ben Skeggs 已提交
534
{
B
Ben Skeggs 已提交
535
	nvkm_fifo_uevent(&fifo->base);
B
Ben Skeggs 已提交
536 537
}

538 539
void
gk104_fifo_intr(struct nvkm_fifo *base)
540
{
541 542 543
	struct gk104_fifo *fifo = gk104_fifo(base);
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
544 545
	u32 mask = nvkm_rd32(device, 0x002140);
	u32 stat = nvkm_rd32(device, 0x002100) & mask;
546

547
	if (stat & 0x00000001) {
B
Ben Skeggs 已提交
548
		gk104_fifo_intr_bind(fifo);
549
		nvkm_wr32(device, 0x002100, 0x00000001);
550 551 552 553
		stat &= ~0x00000001;
	}

	if (stat & 0x00000010) {
554
		nvkm_error(subdev, "PIO_ERROR\n");
555
		nvkm_wr32(device, 0x002100, 0x00000010);
556 557 558
		stat &= ~0x00000010;
	}

559
	if (stat & 0x00000100) {
B
Ben Skeggs 已提交
560
		gk104_fifo_intr_sched(fifo);
561
		nvkm_wr32(device, 0x002100, 0x00000100);
562 563 564
		stat &= ~0x00000100;
	}

565
	if (stat & 0x00010000) {
B
Ben Skeggs 已提交
566
		gk104_fifo_intr_chsw(fifo);
567
		nvkm_wr32(device, 0x002100, 0x00010000);
568 569 570 571
		stat &= ~0x00010000;
	}

	if (stat & 0x00800000) {
572
		nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
573
		nvkm_wr32(device, 0x002100, 0x00800000);
574 575 576 577
		stat &= ~0x00800000;
	}

	if (stat & 0x01000000) {
578
		nvkm_error(subdev, "LB_ERROR\n");
579
		nvkm_wr32(device, 0x002100, 0x01000000);
580 581 582 583
		stat &= ~0x01000000;
	}

	if (stat & 0x08000000) {
B
Ben Skeggs 已提交
584
		gk104_fifo_intr_dropped_fault(fifo);
585
		nvkm_wr32(device, 0x002100, 0x08000000);
586 587 588
		stat &= ~0x08000000;
	}

589
	if (stat & 0x10000000) {
590
		u32 mask = nvkm_rd32(device, 0x00259c);
591 592
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
593
			gk104_fifo_intr_fault(fifo, unit);
594
			nvkm_wr32(device, 0x00259c, (1 << unit));
595
			mask &= ~(1 << unit);
596 597 598 599 600
		}
		stat &= ~0x10000000;
	}

	if (stat & 0x20000000) {
601
		u32 mask = nvkm_rd32(device, 0x0025a0);
602 603
		while (mask) {
			u32 unit = __ffs(mask);
B
Ben Skeggs 已提交
604 605
			gk104_fifo_intr_pbdma_0(fifo, unit);
			gk104_fifo_intr_pbdma_1(fifo, unit);
606
			nvkm_wr32(device, 0x0025a0, (1 << unit));
607
			mask &= ~(1 << unit);
608 609 610 611 612
		}
		stat &= ~0x20000000;
	}

	if (stat & 0x40000000) {
B
Ben Skeggs 已提交
613
		gk104_fifo_intr_runlist(fifo);
614 615 616
		stat &= ~0x40000000;
	}

617
	if (stat & 0x80000000) {
618
		nvkm_wr32(device, 0x002100, 0x80000000);
B
Ben Skeggs 已提交
619
		gk104_fifo_intr_engine(fifo);
620 621 622
		stat &= ~0x80000000;
	}

623
	if (stat) {
624
		nvkm_error(subdev, "INTR %08x\n", stat);
625 626
		nvkm_mask(device, 0x002140, stat, 0x00000000);
		nvkm_wr32(device, 0x002100, stat);
627 628
	}
}
629

630 631 632 633 634 635 636 637 638 639
void
gk104_fifo_fini(struct nvkm_fifo *base)
{
	struct gk104_fifo *fifo = gk104_fifo(base);
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	flush_work(&fifo->fault);
	/* allow mmu fault interrupts, even when we're not using fifo */
	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
}

640
int
641
gk104_fifo_oneinit(struct nvkm_fifo *base)
642
{
643
	struct gk104_fifo *fifo = gk104_fifo(base);
644
	struct nvkm_device *device = fifo->base.engine.subdev.device;
645
	int ret, i;
646

647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
				      0x8000, 0x1000, false,
				      &fifo->engine[i].runlist[0]);
		if (ret)
			return ret;

		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
				      0x8000, 0x1000, false,
				      &fifo->engine[i].runlist[1]);
		if (ret)
			return ret;

		init_waitqueue_head(&fifo->engine[i].wait);
		INIT_LIST_HEAD(&fifo->engine[i].chan);
	}

	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
			      fifo->base.nr * 0x200, 0x1000, true,
			      &fifo->user.mem);
667 668 669
	if (ret)
		return ret;

670 671 672 673 674 675
	ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
			    &fifo->user.bar);
	if (ret)
		return ret;

	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
676 677 678
	return 0;
}

679 680
void
gk104_fifo_init(struct nvkm_fifo *base)
B
Ben Skeggs 已提交
681
{
682
	struct gk104_fifo *fifo = gk104_fifo(base);
683 684
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
685
	int i;
B
Ben Skeggs 已提交
686

B
Ben Skeggs 已提交
687
	/* enable all available PBDMA units */
688 689
	nvkm_wr32(device, 0x000204, 0xffffffff);
	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
690
	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
B
Ben Skeggs 已提交
691

B
Ben Skeggs 已提交
692
	/* PBDMA[n] */
B
Ben Skeggs 已提交
693
	for (i = 0; i < fifo->spoon_nr; i++) {
694 695 696
		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
B
Ben Skeggs 已提交
697 698
	}

699
	/* PBDMA[n].HCE */
B
Ben Skeggs 已提交
700
	for (i = 0; i < fifo->spoon_nr; i++) {
701 702
		nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
703 704
	}

705
	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
B
Ben Skeggs 已提交
706

707 708
	nvkm_wr32(device, 0x002100, 0xffffffff);
	nvkm_wr32(device, 0x002140, 0x7fffffff);
B
Ben Skeggs 已提交
709 710
}

711 712
void *
gk104_fifo_dtor(struct nvkm_fifo *base)
B
Ben Skeggs 已提交
713
{
714
	struct gk104_fifo *fifo = gk104_fifo(base);
B
Ben Skeggs 已提交
715 716
	int i;

717 718
	nvkm_vm_put(&fifo->user.bar);
	nvkm_memory_del(&fifo->user.mem);
B
Ben Skeggs 已提交
719

720
	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
721 722
		nvkm_memory_del(&fifo->engine[i].runlist[1]);
		nvkm_memory_del(&fifo->engine[i].runlist[0]);
B
Ben Skeggs 已提交
723 724
	}

725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
	return fifo;
}

int
gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
		int index, int nr, struct nvkm_fifo **pfifo)
{
	struct gk104_fifo *fifo;

	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
		return -ENOMEM;
	INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
	*pfifo = &fifo->base;

	return nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
B
Ben Skeggs 已提交
740 741
}

742
static const struct nvkm_fifo_func
743 744 745 746 747 748 749 750
gk104_fifo = {
	.dtor = gk104_fifo_dtor,
	.oneinit = gk104_fifo_oneinit,
	.init = gk104_fifo_init,
	.fini = gk104_fifo_fini,
	.intr = gk104_fifo_intr,
	.uevent_init = gk104_fifo_uevent_init,
	.uevent_fini = gk104_fifo_uevent_fini,
751 752 753 754 755 756
	.chan = {
		&gk104_fifo_gpfifo_oclass,
		NULL
	},
};

B
Ben Skeggs 已提交
757
int
758
gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
759
{
760
	return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);
761
}