nv50.c 15.4 KB
Newer Older
1
/*
2
 * Copyright 2012 Red Hat Inc.
3
 *
4 5 6 7 8 9
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
10
 *
11 12
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
13
 *
14 15 16 17 18 19 20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
21
 *
22
 * Authors: Ben Skeggs
23
 */
24 25
#include "nv50.h"
#include "nv04.h"
26

27 28
#include <core/client.h>
#include <core/engctx.h>
29
#include <core/ramht.h>
30
#include <subdev/bar.h>
31 32
#include <subdev/mmu.h>
#include <subdev/timer.h>
33

34 35
#include <nvif/class.h>
#include <nvif/unpack.h>
36 37 38 39

/*******************************************************************************
 * FIFO channel objects
 ******************************************************************************/
40

41 42
static void
nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv)
43
{
44 45
	struct nvkm_bar *bar = nvkm_bar(priv);
	struct nvkm_gpuobj *cur;
46
	int i, p;
47

48 49
	cur = priv->playlist[priv->cur_playlist];
	priv->cur_playlist = !priv->cur_playlist;
50

51 52
	for (i = priv->base.min, p = 0; i < priv->base.max; i++) {
		if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000)
53
			nv_wo32(cur, p++ * 4, i);
54
	}
55

56
	bar->flush(bar);
57

58 59 60
	nv_wr32(priv, 0x0032f4, cur->addr >> 12);
	nv_wr32(priv, 0x0032ec, p);
	nv_wr32(priv, 0x002500, 0x00000101);
61 62 63 64 65 66 67
}

void
nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
{
	mutex_lock(&nv_subdev(priv)->mutex);
	nv50_fifo_playlist_update_locked(priv);
68
	mutex_unlock(&nv_subdev(priv)->mutex);
69 70
}

71
static int
72
nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
73
{
74
	struct nvkm_bar *bar = nvkm_bar(parent);
75
	struct nv50_fifo_base *base = (void *)parent->parent;
76
	struct nvkm_gpuobj *ectx = (void *)object;
77 78 79 80 81 82 83 84 85 86
	u64 limit = ectx->addr + ectx->size - 1;
	u64 start = ectx->addr;
	u32 addr;

	switch (nv_engidx(object->engine)) {
	case NVDEV_ENGINE_SW   : return 0;
	case NVDEV_ENGINE_GR   : addr = 0x0000; break;
	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
	default:
		return -EINVAL;
87 88
	}

89
	nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
90 91 92 93 94 95 96 97 98
	nv_wo32(base->eng, addr + 0x00, 0x00190000);
	nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
	nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
	nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
					upper_32_bits(start));
	nv_wo32(base->eng, addr + 0x10, 0x00000000);
	nv_wo32(base->eng, addr + 0x14, 0x00000000);
	bar->flush(bar);
	return 0;
99 100
}

101
static int
102 103
nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
			 struct nvkm_object *object)
104
{
105
	struct nvkm_bar *bar = nvkm_bar(parent);
106 107 108 109 110 111 112 113 114 115 116 117 118 119
	struct nv50_fifo_priv *priv = (void *)parent->engine;
	struct nv50_fifo_base *base = (void *)parent->parent;
	struct nv50_fifo_chan *chan = (void *)parent;
	u32 addr, me;
	int ret = 0;

	switch (nv_engidx(object->engine)) {
	case NVDEV_ENGINE_SW   : return 0;
	case NVDEV_ENGINE_GR   : addr = 0x0000; break;
	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
	default:
		return -EINVAL;
	}

120 121 122 123 124 125 126 127 128 129 130 131
	/* HW bug workaround:
	 *
	 * PFIFO will hang forever if the connected engines don't report
	 * that they've processed the context switch request.
	 *
	 * In order for the kickoff to work, we need to ensure all the
	 * connected engines are in a state where they can answer.
	 *
	 * Newer chipsets don't seem to suffer from this issue, and well,
	 * there's also a "ignore these engines" bitmask reg we can use
	 * if we hit the issue there..
	 */
132
	me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001);
133 134

	/* do the kickoff... */
135 136
	nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
	if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
137
		nv_error(priv, "channel %d [%s] unload timeout\n",
138
			 chan->base.chid, nvkm_client_name(chan));
139 140
		if (suspend)
			ret = -EBUSY;
141
	}
142
	nv_wr32(priv, 0x00b860, me);
143 144 145 146 147 148 149 150 151 152 153

	if (ret == 0) {
		nv_wo32(base->eng, addr + 0x00, 0x00000000);
		nv_wo32(base->eng, addr + 0x04, 0x00000000);
		nv_wo32(base->eng, addr + 0x08, 0x00000000);
		nv_wo32(base->eng, addr + 0x0c, 0x00000000);
		nv_wo32(base->eng, addr + 0x10, 0x00000000);
		nv_wo32(base->eng, addr + 0x14, 0x00000000);
		bar->flush(bar);
	}

154
	return ret;
155 156
}

157
static int
158 159
nv50_fifo_object_attach(struct nvkm_object *parent,
			struct nvkm_object *object, u32 handle)
160
{
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
	struct nv50_fifo_chan *chan = (void *)parent;
	u32 context;

	if (nv_iclass(object, NV_GPUOBJ_CLASS))
		context = nv_gpuobj(object)->node->offset >> 4;
	else
		context = 0x00000004; /* just non-zero */

	switch (nv_engidx(object->engine)) {
	case NVDEV_ENGINE_DMAOBJ:
	case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
	case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
	case NVDEV_ENGINE_MPEG  : context |= 0x00200000; break;
	default:
		return -EINVAL;
176
	}
177

178
	return nvkm_ramht_insert(chan->ramht, 0, handle, context);
179 180 181
}

void
182
nv50_fifo_object_detach(struct nvkm_object *parent, int cookie)
183 184
{
	struct nv50_fifo_chan *chan = (void *)parent;
185
	nvkm_ramht_remove(chan->ramht, cookie);
186 187
}

188
static int
189 190 191
nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
			struct nvkm_oclass *oclass, void *data, u32 size,
			struct nvkm_object **pobject)
192
{
193 194 195
	union {
		struct nv03_channel_dma_v0 v0;
	} *args = data;
196
	struct nvkm_bar *bar = nvkm_bar(parent);
197 198 199 200
	struct nv50_fifo_base *base = (void *)parent;
	struct nv50_fifo_chan *chan;
	int ret;

201 202 203 204 205 206 207
	nv_ioctl(parent, "create channel dma size %d\n", size);
	if (nvif_unpack(args->v0, 0, 0, false)) {
		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
				 "offset %016llx\n", args->v0.version,
			 args->v0.pushbuf, args->v0.offset);
	} else
		return ret;
208

209 210 211 212 213 214
	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
				       0x2000, args->v0.pushbuf,
				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
				       (1ULL << NVDEV_ENGINE_SW) |
				       (1ULL << NVDEV_ENGINE_GR) |
				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
215 216 217 218
	*pobject = nv_object(chan);
	if (ret)
		return ret;

219 220
	args->v0.chid = chan->base.chid;

221 222 223 224 225
	nv_parent(chan)->context_attach = nv50_fifo_context_attach;
	nv_parent(chan)->context_detach = nv50_fifo_context_detach;
	nv_parent(chan)->object_attach = nv50_fifo_object_attach;
	nv_parent(chan)->object_detach = nv50_fifo_object_detach;

226 227
	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
			     &chan->ramht);
228 229 230
	if (ret)
		return ret;

231 232 233 234
	nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
	nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
	nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
	nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
235 236 237 238 239 240 241 242 243
	nv_wo32(base->ramfc, 0x3c, 0x003f6078);
	nv_wo32(base->ramfc, 0x44, 0x01003fff);
	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
	nv_wo32(base->ramfc, 0x4c, 0xffffffff);
	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
	nv_wo32(base->ramfc, 0x78, 0x00000000);
	nv_wo32(base->ramfc, 0x7c, 0x30000001);
	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
				   (4 << 24) /* SEARCH_FULL */ |
244
				   (chan->ramht->gpuobj.node->offset >> 4));
245 246 247 248 249
	bar->flush(bar);
	return 0;
}

static int
250 251 252
nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
			struct nvkm_oclass *oclass, void *data, u32 size,
			struct nvkm_object **pobject)
253
{
254 255 256
	union {
		struct nv50_channel_gpfifo_v0 v0;
	} *args = data;
257
	struct nvkm_bar *bar = nvkm_bar(parent);
258 259 260 261 262
	struct nv50_fifo_base *base = (void *)parent;
	struct nv50_fifo_chan *chan;
	u64 ioffset, ilength;
	int ret;

263 264 265 266 267 268 269 270
	nv_ioctl(parent, "create channel gpfifo size %d\n", size);
	if (nvif_unpack(args->v0, 0, 0, false)) {
		nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
				 "ioffset %016llx ilength %08x\n",
			 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
			 args->v0.ilength);
	} else
		return ret;
271

272 273 274 275 276 277
	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
				       0x2000, args->v0.pushbuf,
				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
				       (1ULL << NVDEV_ENGINE_SW) |
				       (1ULL << NVDEV_ENGINE_GR) |
				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
278 279 280
	*pobject = nv_object(chan);
	if (ret)
		return ret;
281

282 283
	args->v0.chid = chan->base.chid;

284 285 286 287
	nv_parent(chan)->context_attach = nv50_fifo_context_attach;
	nv_parent(chan)->context_detach = nv50_fifo_context_detach;
	nv_parent(chan)->object_attach = nv50_fifo_object_attach;
	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
288

289 290
	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
			     &chan->ramht);
291 292 293
	if (ret)
		return ret;

294 295
	ioffset = args->v0.ioffset;
	ilength = order_base_2(args->v0.ilength / 8);
296 297 298 299 300 301 302 303 304 305 306

	nv_wo32(base->ramfc, 0x3c, 0x403f6078);
	nv_wo32(base->ramfc, 0x44, 0x01003fff);
	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
	nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
	nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
	nv_wo32(base->ramfc, 0x78, 0x00000000);
	nv_wo32(base->ramfc, 0x7c, 0x30000001);
	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
				   (4 << 24) /* SEARCH_FULL */ |
307
				   (chan->ramht->gpuobj.node->offset >> 4));
308
	bar->flush(bar);
309 310 311
	return 0;
}

312
void
313
nv50_fifo_chan_dtor(struct nvkm_object *object)
314 315
{
	struct nv50_fifo_chan *chan = (void *)object;
316 317
	nvkm_ramht_ref(NULL, &chan->ramht);
	nvkm_fifo_channel_destroy(&chan->base);
318 319
}

320
static int
321
nv50_fifo_chan_init(struct nvkm_object *object)
322
{
323 324 325
	struct nv50_fifo_priv *priv = (void *)object->engine;
	struct nv50_fifo_base *base = (void *)object->parent;
	struct nv50_fifo_chan *chan = (void *)object;
326
	struct nvkm_gpuobj *ramfc = base->ramfc;
327 328
	u32 chid = chan->base.chid;
	int ret;
329

330
	ret = nvkm_fifo_channel_init(&chan->base);
331 332 333 334 335
	if (ret)
		return ret;

	nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
	nv50_fifo_playlist_update(priv);
336 337 338
	return 0;
}

339
int
340
nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend)
341
{
342 343 344 345 346 347 348 349 350
	struct nv50_fifo_priv *priv = (void *)object->engine;
	struct nv50_fifo_chan *chan = (void *)object;
	u32 chid = chan->base.chid;

	/* remove channel from playlist, fifo will unload context */
	nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
	nv50_fifo_playlist_update(priv);
	nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000);

351
	return nvkm_fifo_channel_fini(&chan->base, suspend);
352
}
353

354
static struct nvkm_ofuncs
355 356 357 358 359
nv50_fifo_ofuncs_dma = {
	.ctor = nv50_fifo_chan_ctor_dma,
	.dtor = nv50_fifo_chan_dtor,
	.init = nv50_fifo_chan_init,
	.fini = nv50_fifo_chan_fini,
360 361 362 363
	.map  = _nvkm_fifo_channel_map,
	.rd32 = _nvkm_fifo_channel_rd32,
	.wr32 = _nvkm_fifo_channel_wr32,
	.ntfy = _nvkm_fifo_channel_ntfy
364 365
};

366
static struct nvkm_ofuncs
367 368
nv50_fifo_ofuncs_ind = {
	.ctor = nv50_fifo_chan_ctor_ind,
369 370 371
	.dtor = nv50_fifo_chan_dtor,
	.init = nv50_fifo_chan_init,
	.fini = nv50_fifo_chan_fini,
372 373 374 375
	.map  = _nvkm_fifo_channel_map,
	.rd32 = _nvkm_fifo_channel_rd32,
	.wr32 = _nvkm_fifo_channel_wr32,
	.ntfy = _nvkm_fifo_channel_ntfy
376 377
};

378
static struct nvkm_oclass
379
nv50_fifo_sclass[] = {
380 381
	{ NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma },
	{ NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind },
382 383 384 385 386 387 388 389
	{}
};

/*******************************************************************************
 * FIFO context - basically just the instmem reserved for the channel
 ******************************************************************************/

static int
390 391 392
nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		       struct nvkm_oclass *oclass, void *data, u32 size,
		       struct nvkm_object **pobject)
393
{
394 395
	struct nv50_fifo_base *base;
	int ret;
396

397 398
	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
				       0x1000, NVOBJ_FLAG_HEAP, &base);
399 400 401
	*pobject = nv_object(base);
	if (ret)
		return ret;
402

403 404
	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200,
			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
405 406 407
	if (ret)
		return ret;

408 409
	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0,
			      NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
410 411 412
	if (ret)
		return ret;

413 414
	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0,
			      &base->pgd);
415 416 417
	if (ret)
		return ret;

418
	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
419 420
	if (ret)
		return ret;
421

422
	return 0;
423 424
}

425
void
426
nv50_fifo_context_dtor(struct nvkm_object *object)
427 428
{
	struct nv50_fifo_base *base = (void *)object;
429 430 431 432 433 434
	nvkm_vm_ref(NULL, &base->vm, base->pgd);
	nvkm_gpuobj_ref(NULL, &base->pgd);
	nvkm_gpuobj_ref(NULL, &base->eng);
	nvkm_gpuobj_ref(NULL, &base->ramfc);
	nvkm_gpuobj_ref(NULL, &base->cache);
	nvkm_fifo_context_destroy(&base->base);
435 436
}

437
static struct nvkm_oclass
438 439
nv50_fifo_cclass = {
	.handle = NV_ENGCTX(FIFO, 0x50),
440
	.ofuncs = &(struct nvkm_ofuncs) {
441 442
		.ctor = nv50_fifo_context_ctor,
		.dtor = nv50_fifo_context_dtor,
443 444 445 446
		.init = _nvkm_fifo_context_init,
		.fini = _nvkm_fifo_context_fini,
		.rd32 = _nvkm_fifo_context_rd32,
		.wr32 = _nvkm_fifo_context_wr32,
447 448 449 450 451 452 453 454
	},
};

/*******************************************************************************
 * PFIFO engine
 ******************************************************************************/

static int
455 456 457
nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
	       struct nvkm_oclass *oclass, void *data, u32 size,
	       struct nvkm_object **pobject)
458 459 460 461
{
	struct nv50_fifo_priv *priv;
	int ret;

462
	ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &priv);
463
	*pobject = nv_object(priv);
464
	if (ret)
465
		return ret;
466

467 468
	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
			      &priv->playlist[0]);
469
	if (ret)
470
		return ret;
471

472 473
	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
			      &priv->playlist[1]);
474
	if (ret)
475 476 477 478 479 480
		return ret;

	nv_subdev(priv)->unit = 0x00000100;
	nv_subdev(priv)->intr = nv04_fifo_intr;
	nv_engine(priv)->cclass = &nv50_fifo_cclass;
	nv_engine(priv)->sclass = nv50_fifo_sclass;
481 482
	priv->base.pause = nv04_fifo_pause;
	priv->base.start = nv04_fifo_start;
483 484 485 486
	return 0;
}

void
487
nv50_fifo_dtor(struct nvkm_object *object)
488 489 490
{
	struct nv50_fifo_priv *priv = (void *)object;

491 492
	nvkm_gpuobj_ref(NULL, &priv->playlist[1]);
	nvkm_gpuobj_ref(NULL, &priv->playlist[0]);
493

494
	nvkm_fifo_destroy(&priv->base);
495
}
496 497

int
498
nv50_fifo_init(struct nvkm_object *object)
499 500 501 502
{
	struct nv50_fifo_priv *priv = (void *)object;
	int ret, i;

503
	ret = nvkm_fifo_init(&priv->base);
504 505 506 507 508 509 510 511 512
	if (ret)
		return ret;

	nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
	nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
	nv_wr32(priv, 0x00250c, 0x6f3cfc34);
	nv_wr32(priv, 0x002044, 0x01003fff);

	nv_wr32(priv, 0x002100, 0xffffffff);
513
	nv_wr32(priv, 0x002140, 0xbfffffff);
514 515 516

	for (i = 0; i < 128; i++)
		nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
517
	nv50_fifo_playlist_update_locked(priv);
518 519 520 521 522 523 524

	nv_wr32(priv, 0x003200, 0x00000001);
	nv_wr32(priv, 0x003250, 0x00000001);
	nv_wr32(priv, 0x002500, 0x00000001);
	return 0;
}

525 526
struct nvkm_oclass *
nv50_fifo_oclass = &(struct nvkm_oclass) {
527
	.handle = NV_ENGINE(FIFO, 0x50),
528
	.ofuncs = &(struct nvkm_ofuncs) {
529 530 531
		.ctor = nv50_fifo_ctor,
		.dtor = nv50_fifo_dtor,
		.init = nv50_fifo_init,
532
		.fini = _nvkm_fifo_fini,
533 534
	},
};