nv50_display.c 84.6 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 * Copyright 2011 Red Hat Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */

25
#include <linux/dma-mapping.h>
26

27
#include <drm/drmP.h>
28
#include <drm/drm_atomic.h>
29
#include <drm/drm_crtc_helper.h>
30
#include <drm/drm_dp_helper.h>
31
#include <drm/drm_fb_helper.h>
32
#include <drm/drm_plane_helper.h>
33

34
#include <nvif/class.h>
35
#include <nvif/cl0002.h>
36 37 38 39 40 41
#include <nvif/cl5070.h>
#include <nvif/cl507a.h>
#include <nvif/cl507b.h>
#include <nvif/cl507c.h>
#include <nvif/cl507d.h>
#include <nvif/cl507e.h>
42

43
#include "nouveau_drv.h"
44 45
#include "nouveau_dma.h"
#include "nouveau_gem.h"
46 47 48
#include "nouveau_connector.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
49
#include "nouveau_fence.h"
50
#include "nv50_display.h"
51

52 53
#define EVO_DMA_NR 9

54
#define EVO_MASTER  (0x00)
55
#define EVO_FLIP(c) (0x01 + (c))
56 57
#define EVO_OVLY(c) (0x05 + (c))
#define EVO_OIMM(c) (0x09 + (c))
58 59
#define EVO_CURS(c) (0x0d + (c))

60 61
/* offsets in shared sync bo of various structures */
#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
62 63 64
#define EVO_MAST_NTFY     EVO_SYNC(      0, 0x00)
#define EVO_FLIP_SEM0(c)  EVO_SYNC((c) + 1, 0x00)
#define EVO_FLIP_SEM1(c)  EVO_SYNC((c) + 1, 0x10)
65

66 67 68 69 70 71 72 73
/******************************************************************************
 * Atomic state
 *****************************************************************************/
#define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)

struct nv50_head_atom {
	struct drm_crtc_state state;

74 75 76 77 78 79 80
	struct {
		u16 iW;
		u16 iH;
		u16 oW;
		u16 oH;
	} view;

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
	struct nv50_head_mode {
		bool interlace;
		u32 clock;
		struct {
			u16 active;
			u16 synce;
			u16 blanke;
			u16 blanks;
		} h;
		struct {
			u32 active;
			u16 synce;
			u16 blanke;
			u16 blanks;
			u16 blank2s;
			u16 blank2e;
			u16 blankus;
		} v;
	} mode;

101 102 103 104 105
	struct {
		u32 handle;
		u64 offset:40;
	} lut;

106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
	struct {
		bool visible;
		u32 handle;
		u64 offset:40;
		u8  format;
		u8  kind:7;
		u8  layout:1;
		u8  block:4;
		u32 pitch:20;
		u16 x;
		u16 y;
		u16 w;
		u16 h;
	} core;

121 122 123 124 125 126 127 128
	struct {
		bool visible;
		u32 handle;
		u64 offset:40;
		u8  layout:1;
		u8  format:1;
	} curs;

129 130 131 132 133 134 135 136 137
	struct {
		u8  depth;
		u8  cpp;
		u16 x;
		u16 y;
		u16 w;
		u16 h;
	} base;

138 139 140 141
	struct {
		u8 cpp;
	} ovly;

142 143 144 145 146 147
	struct {
		bool enable:1;
		u8 bits:2;
		u8 mode:4;
	} dither;

148 149 150
	union {
		struct {
			bool core:1;
151
			bool curs:1;
152 153 154 155
		};
		u8 mask;
	} clr;

156 157
	union {
		struct {
158
			bool core:1;
159
			bool curs:1;
160
			bool view:1;
161
			bool mode:1;
162 163
			bool base:1;
			bool ovly:1;
164
			bool dither:1;
165 166 167 168 169
		};
		u16 mask;
	} set;
};

170 171 172 173
/******************************************************************************
 * EVO channel
 *****************************************************************************/

174
struct nv50_chan {
175
	struct nvif_object user;
176
	struct nvif_device *device;
177 178 179
};

static int
180
nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
181
		 const s32 *oclass, u8 head, void *data, u32 size,
182
		 struct nv50_chan *chan)
183
{
184 185
	struct nvif_sclass *sclass;
	int ret, i, n;
186

187 188
	chan->device = device;

189
	ret = n = nvif_object_sclass_get(disp, &sclass);
190 191 192
	if (ret < 0)
		return ret;

193
	while (oclass[0]) {
194 195
		for (i = 0; i < n; i++) {
			if (sclass[i].oclass == oclass[0]) {
196
				ret = nvif_object_init(disp, 0, oclass[0],
197
						       data, size, &chan->user);
198 199
				if (ret == 0)
					nvif_object_map(&chan->user);
200
				nvif_object_sclass_put(&sclass);
201 202
				return ret;
			}
203
		}
204
		oclass++;
205
	}
206

207
	nvif_object_sclass_put(&sclass);
208
	return -ENOSYS;
209 210 211
}

static void
212
nv50_chan_destroy(struct nv50_chan *chan)
213
{
214
	nvif_object_fini(&chan->user);
215 216 217 218 219 220
}

/******************************************************************************
 * PIO EVO channel
 *****************************************************************************/

221 222
struct nv50_pioc {
	struct nv50_chan base;
223 224 225
};

static void
226
nv50_pioc_destroy(struct nv50_pioc *pioc)
227
{
228
	nv50_chan_destroy(&pioc->base);
229 230 231
}

static int
232
nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp,
233
		 const s32 *oclass, u8 head, void *data, u32 size,
234
		 struct nv50_pioc *pioc)
235
{
236 237
	return nv50_chan_create(device, disp, oclass, head, data, size,
				&pioc->base);
238 239 240 241 242 243 244 245 246 247 248
}

/******************************************************************************
 * Cursor Immediate
 *****************************************************************************/

struct nv50_curs {
	struct nv50_pioc base;
};

static int
249 250
nv50_curs_create(struct nvif_device *device, struct nvif_object *disp,
		 int head, struct nv50_curs *curs)
251
{
252
	struct nv50_disp_cursor_v0 args = {
253 254
		.head = head,
	};
255
	static const s32 oclass[] = {
256 257 258 259 260
		GK104_DISP_CURSOR,
		GF110_DISP_CURSOR,
		GT214_DISP_CURSOR,
		G82_DISP_CURSOR,
		NV50_DISP_CURSOR,
261 262 263
		0
	};

264 265
	return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
				&curs->base);
266 267 268 269 270 271 272 273 274 275 276
}

/******************************************************************************
 * Overlay Immediate
 *****************************************************************************/

struct nv50_oimm {
	struct nv50_pioc base;
};

static int
277 278
nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
		 int head, struct nv50_oimm *oimm)
279
{
280
	struct nv50_disp_cursor_v0 args = {
281 282
		.head = head,
	};
283
	static const s32 oclass[] = {
284 285 286 287 288
		GK104_DISP_OVERLAY,
		GF110_DISP_OVERLAY,
		GT214_DISP_OVERLAY,
		G82_DISP_OVERLAY,
		NV50_DISP_OVERLAY,
289 290 291
		0
	};

292 293
	return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
				&oimm->base);
294 295 296 297 298 299
}

/******************************************************************************
 * DMA EVO channel
 *****************************************************************************/

300 301
struct nv50_dmac {
	struct nv50_chan base;
302 303
	dma_addr_t handle;
	u32 *ptr;
304

305 306 307
	struct nvif_object sync;
	struct nvif_object vram;

308 309 310 311
	/* Protects against concurrent pushbuf access to this channel, lock is
	 * grabbed by evo_wait (if the pushbuf reservation is successful) and
	 * dropped again by evo_kick. */
	struct mutex lock;
312 313 314
};

static void
315
nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
316
{
317 318
	struct nvif_device *device = dmac->base.device;

319 320 321 322 323
	nvif_object_fini(&dmac->vram);
	nvif_object_fini(&dmac->sync);

	nv50_chan_destroy(&dmac->base);

324
	if (dmac->ptr) {
325 326
		struct device *dev = nvxx_device(device)->dev;
		dma_free_coherent(dev, PAGE_SIZE, dmac->ptr, dmac->handle);
327 328 329
	}
}

330
static int
331
nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
332
		 const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
333
		 struct nv50_dmac *dmac)
334
{
335
	struct nv50_disp_core_channel_dma_v0 *args = data;
336
	struct nvif_object pushbuf;
337 338
	int ret;

339 340
	mutex_init(&dmac->lock);

341 342
	dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
				       &dmac->handle, GFP_KERNEL);
343 344 345
	if (!dmac->ptr)
		return -ENOMEM;

346 347
	ret = nvif_object_init(&device->object, 0, NV_DMA_FROM_MEMORY,
			       &(struct nv_dma_v0) {
348 349
					.target = NV_DMA_V0_TARGET_PCI_US,
					.access = NV_DMA_V0_ACCESS_RD,
350 351
					.start = dmac->handle + 0x0000,
					.limit = dmac->handle + 0x0fff,
352
			       }, sizeof(struct nv_dma_v0), &pushbuf);
353
	if (ret)
354
		return ret;
355

356 357
	args->pushbuf = nvif_handle(&pushbuf);

358 359
	ret = nv50_chan_create(device, disp, oclass, head, data, size,
			       &dmac->base);
360
	nvif_object_fini(&pushbuf);
361 362 363
	if (ret)
		return ret;

364
	ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
365 366 367
			       &(struct nv_dma_v0) {
					.target = NV_DMA_V0_TARGET_VRAM,
					.access = NV_DMA_V0_ACCESS_RDWR,
368 369
					.start = syncbuf + 0x0000,
					.limit = syncbuf + 0x0fff,
370
			       }, sizeof(struct nv_dma_v0),
371
			       &dmac->sync);
372 373 374
	if (ret)
		return ret;

375
	ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY,
376 377 378
			       &(struct nv_dma_v0) {
					.target = NV_DMA_V0_TARGET_VRAM,
					.access = NV_DMA_V0_ACCESS_RDWR,
379
					.start = 0,
380
					.limit = device->info.ram_user - 1,
381
			       }, sizeof(struct nv_dma_v0),
382
			       &dmac->vram);
383
	if (ret)
384 385
		return ret;

386 387 388
	return ret;
}

389 390 391 392
/******************************************************************************
 * Core
 *****************************************************************************/

393 394
struct nv50_mast {
	struct nv50_dmac base;
395 396
};

397
static int
398 399
nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
		 u64 syncbuf, struct nv50_mast *core)
400
{
401 402
	struct nv50_disp_core_channel_dma_v0 args = {
		.pushbuf = 0xb0007d00,
403
	};
404
	static const s32 oclass[] = {
405
		GP104_DISP_CORE_CHANNEL_DMA,
406
		GP100_DISP_CORE_CHANNEL_DMA,
407
		GM200_DISP_CORE_CHANNEL_DMA,
408 409 410 411 412 413 414 415 416
		GM107_DISP_CORE_CHANNEL_DMA,
		GK110_DISP_CORE_CHANNEL_DMA,
		GK104_DISP_CORE_CHANNEL_DMA,
		GF110_DISP_CORE_CHANNEL_DMA,
		GT214_DISP_CORE_CHANNEL_DMA,
		GT206_DISP_CORE_CHANNEL_DMA,
		GT200_DISP_CORE_CHANNEL_DMA,
		G82_DISP_CORE_CHANNEL_DMA,
		NV50_DISP_CORE_CHANNEL_DMA,
417 418 419
		0
	};

420 421
	return nv50_dmac_create(device, disp, oclass, 0, &args, sizeof(args),
				syncbuf, &core->base);
422 423 424 425 426
}

/******************************************************************************
 * Base
 *****************************************************************************/
427

428 429
struct nv50_sync {
	struct nv50_dmac base;
430 431
	u32 addr;
	u32 data;
432 433
};

434
static int
435 436
nv50_base_create(struct nvif_device *device, struct nvif_object *disp,
		 int head, u64 syncbuf, struct nv50_sync *base)
437
{
438 439
	struct nv50_disp_base_channel_dma_v0 args = {
		.pushbuf = 0xb0007c00 | head,
440 441
		.head = head,
	};
442
	static const s32 oclass[] = {
443 444 445 446 447 448 449
		GK110_DISP_BASE_CHANNEL_DMA,
		GK104_DISP_BASE_CHANNEL_DMA,
		GF110_DISP_BASE_CHANNEL_DMA,
		GT214_DISP_BASE_CHANNEL_DMA,
		GT200_DISP_BASE_CHANNEL_DMA,
		G82_DISP_BASE_CHANNEL_DMA,
		NV50_DISP_BASE_CHANNEL_DMA,
450 451 452
		0
	};

453
	return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
454 455 456 457 458 459 460
				syncbuf, &base->base);
}

/******************************************************************************
 * Overlay
 *****************************************************************************/

461 462
struct nv50_ovly {
	struct nv50_dmac base;
463
};
464

465
static int
466 467
nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
		 int head, u64 syncbuf, struct nv50_ovly *ovly)
468
{
469 470
	struct nv50_disp_overlay_channel_dma_v0 args = {
		.pushbuf = 0xb0007e00 | head,
471 472
		.head = head,
	};
473
	static const s32 oclass[] = {
474 475 476 477 478 479
		GK104_DISP_OVERLAY_CONTROL_DMA,
		GF110_DISP_OVERLAY_CONTROL_DMA,
		GT214_DISP_OVERLAY_CHANNEL_DMA,
		GT200_DISP_OVERLAY_CHANNEL_DMA,
		G82_DISP_OVERLAY_CHANNEL_DMA,
		NV50_DISP_OVERLAY_CHANNEL_DMA,
480 481 482
		0
	};

483
	return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
484 485
				syncbuf, &ovly->base);
}
486

487
struct nv50_head {
488
	struct nouveau_crtc base;
B
Ben Skeggs 已提交
489
	struct nouveau_bo *image;
490 491 492 493
	struct nv50_curs curs;
	struct nv50_sync sync;
	struct nv50_ovly ovly;
	struct nv50_oimm oimm;
494 495 496

	struct nv50_head_atom arm;
	struct nv50_head_atom asy;
497 498
};

499 500 501 502 503 504
#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
#define nv50_curs(c) (&nv50_head(c)->curs)
#define nv50_sync(c) (&nv50_head(c)->sync)
#define nv50_ovly(c) (&nv50_head(c)->ovly)
#define nv50_oimm(c) (&nv50_head(c)->oimm)
#define nv50_chan(c) (&(c)->base.base)
505 506 507 508 509 510 511
#define nv50_vers(c) nv50_chan(c)->user.oclass

struct nv50_fbdma {
	struct list_head head;
	struct nvif_object core;
	struct nvif_object base[4];
};
512

513
struct nv50_disp {
514
	struct nvif_object *disp;
515
	struct nv50_mast mast;
516

517
	struct list_head fbdma;
518 519

	struct nouveau_bo *sync;
520 521
};

522 523
static struct nv50_disp *
nv50_disp(struct drm_device *dev)
524
{
525
	return nouveau_display(dev)->priv;
526 527
}

528
#define nv50_mast(d) (&nv50_disp(d)->mast)
529

530
static struct drm_crtc *
531
nv50_display_crtc_get(struct drm_encoder *encoder)
532 533 534 535 536 537 538
{
	return nouveau_encoder(encoder)->crtc;
}

/******************************************************************************
 * EVO channel helpers
 *****************************************************************************/
539
static u32 *
540
evo_wait(void *evoc, int nr)
541
{
542
	struct nv50_dmac *dmac = evoc;
543
	struct nvif_device *device = dmac->base.device;
544
	u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
545

546
	mutex_lock(&dmac->lock);
547
	if (put + nr >= (PAGE_SIZE / 4) - 8) {
548
		dmac->ptr[put] = 0x20000000;
549

550
		nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
551 552 553 554
		if (nvif_msec(device, 2000,
			if (!nvif_rd32(&dmac->base.user, 0x0004))
				break;
		) < 0) {
555
			mutex_unlock(&dmac->lock);
B
Ben Skeggs 已提交
556
			printk(KERN_ERR "nouveau: evo channel stalled\n");
557 558 559 560 561 562
			return NULL;
		}

		put = 0;
	}

563
	return dmac->ptr + put;
564 565 566
}

static void
567
evo_kick(u32 *push, void *evoc)
568
{
569
	struct nv50_dmac *dmac = evoc;
570
	nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
571
	mutex_unlock(&dmac->lock);
572 573
}

574 575
#define evo_mthd(p,m,s) do {                                                   \
	const u32 _m = (m), _s = (s);                                          \
576 577
	if (drm_debug & DRM_UT_KMS)                                            \
		printk(KERN_ERR "%04x %d %s\n", _m, _s, __func__);             \
578 579
	*((p)++) = ((_s << 18) | _m);                                          \
} while(0)
580

581 582
#define evo_data(p,d) do {                                                     \
	const u32 _d = (d);                                                    \
583 584
	if (drm_debug & DRM_UT_KMS)                                            \
		printk(KERN_ERR "\t%08x\n", _d);                               \
585 586
	*((p)++) = _d;                                                         \
} while(0)
587

588 589 590
static bool
evo_sync_wait(void *data)
{
591 592 593 594
	if (nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000)
		return true;
	usleep_range(1, 2);
	return false;
595 596 597
}

static int
598
evo_sync(struct drm_device *dev)
599
{
600
	struct nvif_device *device = &nouveau_drm(dev)->device;
601 602
	struct nv50_disp *disp = nv50_disp(dev);
	struct nv50_mast *mast = nv50_mast(dev);
603
	u32 *push = evo_wait(mast, 8);
604
	if (push) {
605
		nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
606
		evo_mthd(push, 0x0084, 1);
607
		evo_data(push, 0x80000000 | EVO_MAST_NTFY);
608 609 610
		evo_mthd(push, 0x0080, 2);
		evo_data(push, 0x00000000);
		evo_data(push, 0x00000000);
611
		evo_kick(push, mast);
612 613 614 615
		if (nvif_msec(device, 2000,
			if (evo_sync_wait(disp->sync))
				break;
		) >= 0)
616 617 618 619 620 621 622
			return 0;
	}

	return -EBUSY;
}

/******************************************************************************
623
 * Page flipping channel
624 625
 *****************************************************************************/
struct nouveau_bo *
626
nv50_display_crtc_sema(struct drm_device *dev, int crtc)
627
{
628
	return nv50_disp(dev)->sync;
629 630
}

631 632 633 634 635 636 637 638 639 640
struct nv50_display_flip {
	struct nv50_disp *disp;
	struct nv50_sync *chan;
};

static bool
nv50_display_flip_wait(void *data)
{
	struct nv50_display_flip *flip = data;
	if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
641
					      flip->chan->data)
642 643 644 645 646
		return true;
	usleep_range(1, 2);
	return false;
}

647
void
648
nv50_display_flip_stop(struct drm_crtc *crtc)
649
{
650
	struct nvif_device *device = &nouveau_drm(crtc->dev)->device;
651 652 653 654
	struct nv50_display_flip flip = {
		.disp = nv50_disp(crtc->dev),
		.chan = nv50_sync(crtc),
	};
655 656
	u32 *push;

657
	push = evo_wait(flip.chan, 8);
658 659 660 661 662 663 664 665 666
	if (push) {
		evo_mthd(push, 0x0084, 1);
		evo_data(push, 0x00000000);
		evo_mthd(push, 0x0094, 1);
		evo_data(push, 0x00000000);
		evo_mthd(push, 0x00c0, 1);
		evo_data(push, 0x00000000);
		evo_mthd(push, 0x0080, 1);
		evo_data(push, 0x00000000);
667
		evo_kick(push, flip.chan);
668
	}
669

670 671 672 673
	nvif_msec(device, 2000,
		if (nv50_display_flip_wait(&flip))
			break;
	);
674 675 676
}

int
677
nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
678 679 680 681
		       struct nouveau_channel *chan, u32 swap_interval)
{
	struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
B
Ben Skeggs 已提交
682
	struct nv50_head *head = nv50_head(crtc);
683
	struct nv50_sync *sync = nv50_sync(crtc);
684
	u32 *push;
B
Ben Skeggs 已提交
685
	int ret;
686

687 688 689 690
	if (crtc->primary->fb->width != fb->width ||
	    crtc->primary->fb->height != fb->height)
		return -EINVAL;

691 692 693
	swap_interval <<= 4;
	if (swap_interval == 0)
		swap_interval |= 0x100;
694 695
	if (chan == NULL)
		evo_sync(crtc->dev);
696

697
	push = evo_wait(sync, 128);
698 699 700
	if (unlikely(push == NULL))
		return -EBUSY;

701
	if (chan && chan->user.oclass < G82_CHANNEL_GPFIFO) {
702 703 704 705 706
		ret = RING_SPACE(chan, 8);
		if (ret)
			return ret;

		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
B
Ben Skeggs 已提交
707
		OUT_RING  (chan, NvEvoSema0 + nv_crtc->index);
708 709 710 711 712 713 714
		OUT_RING  (chan, sync->addr ^ 0x10);
		BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
		OUT_RING  (chan, sync->data + 1);
		BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
		OUT_RING  (chan, sync->addr);
		OUT_RING  (chan, sync->data);
	} else
715
	if (chan && chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
B
Ben Skeggs 已提交
716
		u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
717 718 719 720 721
		ret = RING_SPACE(chan, 12);
		if (ret)
			return ret;

		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
722
		OUT_RING  (chan, chan->vram.handle);
723 724 725 726 727 728 729 730 731 732 733 734
		BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
		OUT_RING  (chan, upper_32_bits(addr ^ 0x10));
		OUT_RING  (chan, lower_32_bits(addr ^ 0x10));
		OUT_RING  (chan, sync->data + 1);
		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
		BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
		OUT_RING  (chan, upper_32_bits(addr));
		OUT_RING  (chan, lower_32_bits(addr));
		OUT_RING  (chan, sync->data);
		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
	} else
	if (chan) {
B
Ben Skeggs 已提交
735
		u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
		ret = RING_SPACE(chan, 10);
		if (ret)
			return ret;

		BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
		OUT_RING  (chan, upper_32_bits(addr ^ 0x10));
		OUT_RING  (chan, lower_32_bits(addr ^ 0x10));
		OUT_RING  (chan, sync->data + 1);
		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG |
				 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
		BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
		OUT_RING  (chan, upper_32_bits(addr));
		OUT_RING  (chan, lower_32_bits(addr));
		OUT_RING  (chan, sync->data);
		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL |
				 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
	}
753

754 755 756
	if (chan) {
		sync->addr ^= 0x10;
		sync->data++;
757 758 759 760 761 762 763 764 765 766 767 768 769
		FIRE_RING (chan);
	}

	/* queue the flip */
	evo_mthd(push, 0x0100, 1);
	evo_data(push, 0xfffe0000);
	evo_mthd(push, 0x0084, 1);
	evo_data(push, swap_interval);
	if (!(swap_interval & 0x00000100)) {
		evo_mthd(push, 0x00e0, 1);
		evo_data(push, 0x40000000);
	}
	evo_mthd(push, 0x0088, 4);
770 771 772
	evo_data(push, sync->addr);
	evo_data(push, sync->data++);
	evo_data(push, sync->data);
773
	evo_data(push, sync->base.sync.handle);
774 775 776 777
	evo_mthd(push, 0x00a0, 2);
	evo_data(push, 0x00000000);
	evo_data(push, 0x00000000);
	evo_mthd(push, 0x00c0, 1);
778
	evo_data(push, nv_fb->r_handle);
779 780 781
	evo_mthd(push, 0x0110, 2);
	evo_data(push, 0x00000000);
	evo_data(push, 0x00000000);
782
	if (nv50_vers(sync) < GF110_DISP_BASE_CHANNEL_DMA) {
783 784 785 786 787 788 789 790 791 792 793 794 795 796
		evo_mthd(push, 0x0800, 5);
		evo_data(push, nv_fb->nvbo->bo.offset >> 8);
		evo_data(push, 0);
		evo_data(push, (fb->height << 16) | fb->width);
		evo_data(push, nv_fb->r_pitch);
		evo_data(push, nv_fb->r_format);
	} else {
		evo_mthd(push, 0x0400, 5);
		evo_data(push, nv_fb->nvbo->bo.offset >> 8);
		evo_data(push, 0);
		evo_data(push, (fb->height << 16) | fb->width);
		evo_data(push, nv_fb->r_pitch);
		evo_data(push, nv_fb->r_format);
	}
797 798
	evo_mthd(push, 0x0080, 1);
	evo_data(push, 0x00000000);
799
	evo_kick(push, sync);
B
Ben Skeggs 已提交
800 801

	nouveau_bo_ref(nv_fb->nvbo, &head->image);
802 803 804
	return 0;
}

805 806 807
/******************************************************************************
 * Head
 *****************************************************************************/
808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
static void
nv50_head_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
	u32 *push;
	if ((push = evo_wait(core, 2))) {
		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
			evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
		else
		if (core->base.user.oclass < GK104_DISP_CORE_CHANNEL_DMA)
			evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
		else
			evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
		evo_data(push, (asyh->dither.mode << 3) |
			       (asyh->dither.bits << 1) |
			        asyh->dither.enable);
		evo_kick(push, core);
	}
}

828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
static void
nv50_head_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
{
	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
	u32 bounds = 0;
	u32 *push;

	if (asyh->base.cpp) {
		switch (asyh->base.cpp) {
		case 8: bounds |= 0x00000500; break;
		case 4: bounds |= 0x00000300; break;
		case 2: bounds |= 0x00000100; break;
		default:
			WARN_ON(1);
			break;
		}
		bounds |= 0x00000001;
	}

	if ((push = evo_wait(core, 2))) {
		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
			evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
		else
			evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
		evo_data(push, bounds);
		evo_kick(push, core);
	}
}

static void
nv50_head_base(struct nv50_head *head, struct nv50_head_atom *asyh)
{
	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
	u32 bounds = 0;
	u32 *push;

	if (asyh->base.cpp) {
		switch (asyh->base.cpp) {
		case 8: bounds |= 0x00000500; break;
		case 4: bounds |= 0x00000300; break;
		case 2: bounds |= 0x00000100; break;
		case 1: bounds |= 0x00000000; break;
		default:
			WARN_ON(1);
			break;
		}
		bounds |= 0x00000001;
	}

	if ((push = evo_wait(core, 2))) {
		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
			evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
		else
			evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
		evo_data(push, bounds);
		evo_kick(push, core);
	}
}

887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
static void
nv50_head_curs_clr(struct nv50_head *head)
{
	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
	u32 *push;
	if ((push = evo_wait(core, 4))) {
		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
			evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
			evo_data(push, 0x05000000);
		} else
		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
			evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
			evo_data(push, 0x05000000);
			evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
			evo_data(push, 0x00000000);
		} else {
			evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
			evo_data(push, 0x05000000);
			evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
			evo_data(push, 0x00000000);
		}
		evo_kick(push, core);
	}
}

static void
nv50_head_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
	u32 *push;
	if ((push = evo_wait(core, 5))) {
		if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) {
			evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
			evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
						    (asyh->curs.format << 24));
			evo_data(push, asyh->curs.offset >> 8);
		} else
		if (core->base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) {
			evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
			evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
						    (asyh->curs.format << 24));
			evo_data(push, asyh->curs.offset >> 8);
			evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
			evo_data(push, asyh->curs.handle);
		} else {
			evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
			evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
						    (asyh->curs.format << 24));
			evo_data(push, asyh->curs.offset >> 8);
			evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
			evo_data(push, asyh->curs.handle);
		}
		evo_kick(push, core);
	}
}

943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
static void
nv50_head_core_clr(struct nv50_head *head)
{
	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
	u32 *push;
	if ((push = evo_wait(core, 2))) {
		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
			evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
		else
			evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
		evo_data(push, 0x00000000);
		evo_kick(push, core);
	}
}

static void
nv50_head_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
	u32 *push;
	if ((push = evo_wait(core, 9))) {
		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
			evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
			evo_data(push, asyh->core.offset >> 8);
			evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
			evo_data(push, (asyh->core.h << 16) | asyh->core.w);
			evo_data(push, asyh->core.layout << 20 |
				       (asyh->core.pitch >> 8) << 8 |
				       asyh->core.block);
			evo_data(push, asyh->core.kind << 16 |
				       asyh->core.format << 8);
			evo_data(push, asyh->core.handle);
			evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
			evo_data(push, (asyh->core.y << 16) | asyh->core.x);
		} else
		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
			evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
			evo_data(push, asyh->core.offset >> 8);
			evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
			evo_data(push, (asyh->core.h << 16) | asyh->core.w);
			evo_data(push, asyh->core.layout << 20 |
				       (asyh->core.pitch >> 8) << 8 |
				       asyh->core.block);
			evo_data(push, asyh->core.format << 8);
			evo_data(push, asyh->core.handle);
			evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
			evo_data(push, (asyh->core.y << 16) | asyh->core.x);
		} else {
			evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
			evo_data(push, asyh->core.offset >> 8);
			evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
			evo_data(push, (asyh->core.h << 16) | asyh->core.w);
			evo_data(push, asyh->core.layout << 24 |
				       (asyh->core.pitch >> 8) << 8 |
				       asyh->core.block);
			evo_data(push, asyh->core.format << 8);
			evo_data(push, asyh->core.handle);
			evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
			evo_data(push, (asyh->core.y << 16) | asyh->core.x);
		}
		evo_kick(push, core);
	}
}

1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
static void
nv50_head_lut_clr(struct nv50_head *head)
{
	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
	u32 *push;
	if ((push = evo_wait(core, 4))) {
		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
			evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
			evo_data(push, 0x40000000);
		} else
		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
			evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
			evo_data(push, 0x40000000);
			evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
			evo_data(push, 0x00000000);
		} else {
			evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1);
			evo_data(push, 0x03000000);
			evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
			evo_data(push, 0x00000000);
		}
		evo_kick(push, core);
	}
}

static void
nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
	u32 *push;
	if ((push = evo_wait(core, 7))) {
		if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
			evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
			evo_data(push, 0xc0000000);
			evo_data(push, asyh->lut.offset >> 8);
		} else
		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
			evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
			evo_data(push, 0xc0000000);
			evo_data(push, asyh->lut.offset >> 8);
			evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
			evo_data(push, asyh->lut.handle);
		} else {
			evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
			evo_data(push, 0x83000000);
			evo_data(push, asyh->lut.offset >> 8);
			evo_data(push, 0x00000000);
			evo_data(push, 0x00000000);
			evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
			evo_data(push, asyh->lut.handle);
		}
		evo_kick(push, core);
	}
}

1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
static void
nv50_head_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
	struct nv50_head_mode *m = &asyh->mode;
	u32 *push;
	if ((push = evo_wait(core, 14))) {
		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
			evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
			evo_data(push, 0x00800000 | m->clock);
			evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
			evo_mthd(push, 0x0810 + (head->base.index * 0x400), 6);
			evo_data(push, 0x00000000);
			evo_data(push, (m->v.active  << 16) | m->h.active );
			evo_data(push, (m->v.synce   << 16) | m->h.synce  );
			evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
			evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
			evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
			evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
			evo_data(push, 0x00000000);
		} else {
			evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
			evo_data(push, 0x00000000);
			evo_data(push, (m->v.active  << 16) | m->h.active );
			evo_data(push, (m->v.synce   << 16) | m->h.synce  );
			evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
			evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
			evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
			evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
			evo_data(push, 0x00000000); /* ??? */
			evo_data(push, 0xffffff00);
			evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
			evo_data(push, m->clock * 1000);
			evo_data(push, 0x00200000); /* ??? */
			evo_data(push, m->clock * 1000);
		}
		evo_kick(push, core);
	}
}

1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
static void
nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
	struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
	u32 *push;
	if ((push = evo_wait(core, 10))) {
		if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
			evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
			evo_data(push, 0x00000000);
			evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
			evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
			evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
		} else {
			evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
			evo_data(push, 0x00000000);
			evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
			evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
			evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
			evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
		}
		evo_kick(push, core);
	}
}

1130 1131 1132
static void
nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
{
1133 1134
	if (asyh->clr.core && (!asyh->set.core || y))
		nv50_head_lut_clr(head);
1135 1136
	if (asyh->clr.core && (!asyh->set.core || y))
		nv50_head_core_clr(head);
1137 1138
	if (asyh->clr.curs && (!asyh->set.curs || y))
		nv50_head_curs_clr(head);
1139 1140
}

1141 1142 1143
static void
nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
1144
	if (asyh->set.view   ) nv50_head_view    (head, asyh);
1145
	if (asyh->set.mode   ) nv50_head_mode    (head, asyh);
1146
	if (asyh->set.core   ) nv50_head_lut_set (head, asyh);
1147
	if (asyh->set.core   ) nv50_head_core_set(head, asyh);
1148
	if (asyh->set.curs   ) nv50_head_curs_set(head, asyh);
1149 1150
	if (asyh->set.base   ) nv50_head_base    (head, asyh);
	if (asyh->set.ovly   ) nv50_head_ovly    (head, asyh);
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
	if (asyh->set.dither ) nv50_head_dither  (head, asyh);
}

static void
nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
			      struct nv50_head_atom *asyh,
			      struct nouveau_conn_atom *asyc)
{
	struct drm_connector *connector = asyc->state.connector;
	u32 mode = 0x00;

	if (asyc->dither.mode == DITHERING_MODE_AUTO) {
		if (asyh->base.depth > connector->display_info.bpc * 3)
			mode = DITHERING_MODE_DYNAMIC2X2;
	} else {
		mode = asyc->dither.mode;
	}

	if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
		if (connector->display_info.bpc >= 8)
			mode |= DITHERING_DEPTH_8BPC;
	} else {
		mode |= asyc->dither.depth;
	}

	asyh->dither.enable = mode;
	asyh->dither.bits = mode >> 1;
	asyh->dither.mode = mode >> 3;
	asyh->set.dither = true;
1180 1181
}

1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
static void
nv50_head_atomic_check_view(struct nv50_head_atom *armh,
			    struct nv50_head_atom *asyh,
			    struct nouveau_conn_atom *asyc)
{
	struct drm_connector *connector = asyc->state.connector;
	struct drm_display_mode *omode = &asyh->state.adjusted_mode;
	struct drm_display_mode *umode = &asyh->state.mode;
	int mode = asyc->scaler.mode;
	struct edid *edid;

	if (connector->edid_blob_ptr)
		edid = (struct edid *)connector->edid_blob_ptr->data;
	else
		edid = NULL;

	if (!asyc->scaler.full) {
		if (mode == DRM_MODE_SCALE_NONE)
			omode = umode;
	} else {
		/* Non-EDID LVDS/eDP mode. */
		mode = DRM_MODE_SCALE_FULLSCREEN;
	}

	asyh->view.iW = umode->hdisplay;
	asyh->view.iH = umode->vdisplay;
	asyh->view.oW = omode->hdisplay;
	asyh->view.oH = omode->vdisplay;
	if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
		asyh->view.oH *= 2;

	/* Add overscan compensation if necessary, will keep the aspect
	 * ratio the same as the backend mode unless overridden by the
	 * user setting both hborder and vborder properties.
	 */
	if ((asyc->scaler.underscan.mode == UNDERSCAN_ON ||
	    (asyc->scaler.underscan.mode == UNDERSCAN_AUTO &&
	     drm_detect_hdmi_monitor(edid)))) {
		u32 bX = asyc->scaler.underscan.hborder;
		u32 bY = asyc->scaler.underscan.vborder;
		u32 r = (asyh->view.oH << 19) / asyh->view.oW;

		if (bX) {
			asyh->view.oW -= (bX * 2);
			if (bY) asyh->view.oH -= (bY * 2);
			else    asyh->view.oH  = ((asyh->view.oW * r) + (r / 2)) >> 19;
		} else {
			asyh->view.oW -= (asyh->view.oW >> 4) + 32;
			if (bY) asyh->view.oH -= (bY * 2);
			else    asyh->view.oH  = ((asyh->view.oW * r) + (r / 2)) >> 19;
		}
	}

	/* Handle CENTER/ASPECT scaling, taking into account the areas
	 * removed already for overscan compensation.
	 */
	switch (mode) {
	case DRM_MODE_SCALE_CENTER:
		asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
		asyh->view.oH = min((u16)umode->vdisplay, asyh->view.oH);
		/* fall-through */
	case DRM_MODE_SCALE_ASPECT:
		if (asyh->view.oH < asyh->view.oW) {
			u32 r = (asyh->view.iW << 19) / asyh->view.iH;
			asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
		} else {
			u32 r = (asyh->view.iH << 19) / asyh->view.iW;
			asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
		}
		break;
	default:
		break;
	}

	asyh->set.view = true;
}

1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
static void
nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
	struct drm_display_mode *mode = &asyh->state.adjusted_mode;
	u32 ilace   = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
	u32 vscan   = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
	u32 hbackp  =  mode->htotal - mode->hsync_end;
	u32 vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
	u32 hfrontp =  mode->hsync_start - mode->hdisplay;
	u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
	struct nv50_head_mode *m = &asyh->mode;

	m->h.active = mode->htotal;
	m->h.synce  = mode->hsync_end - mode->hsync_start - 1;
	m->h.blanke = m->h.synce + hbackp;
	m->h.blanks = mode->htotal - hfrontp - 1;

	m->v.active = mode->vtotal * vscan / ilace;
	m->v.synce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
	m->v.blanke = m->v.synce + vbackp;
	m->v.blanks = m->v.active - vfrontp - 1;

	/*XXX: Safe underestimate, even "0" works */
	m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
	m->v.blankus *= 1000;
	m->v.blankus /= mode->clock;

	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
		m->v.blank2e =  m->v.active + m->v.synce + vbackp;
		m->v.blank2s =  m->v.blank2e + (mode->vdisplay * vscan / ilace);
		m->v.active  = (m->v.active * 2) + 1;
		m->interlace = true;
	} else {
		m->v.blank2e = 0;
		m->v.blank2s = 1;
		m->interlace = false;
	}
	m->clock = mode->clock;

	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
	asyh->set.mode = true;
}

static int
nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
{
	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
1306
	struct nv50_disp *disp = nv50_disp(crtc->dev);
1307 1308 1309 1310 1311
	struct nv50_head *head = nv50_head(crtc);
	struct nv50_head_atom *armh = &head->arm;
	struct nv50_head_atom *asyh = nv50_head_atom(state);

	NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
1312
	asyh->clr.mask = 0;
1313 1314 1315 1316 1317
	asyh->set.mask = 0;

	if (asyh->state.active) {
		if (asyh->state.mode_changed)
			nv50_head_atomic_check_mode(head, asyh);
1318 1319 1320 1321 1322 1323 1324

		if ((asyh->core.visible = (asyh->base.cpp != 0))) {
			asyh->core.x = asyh->base.x;
			asyh->core.y = asyh->base.y;
			asyh->core.w = asyh->base.w;
			asyh->core.h = asyh->base.h;
		} else
1325
		if ((asyh->core.visible = asyh->curs.visible)) {
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
			/*XXX: We need to either find some way of having the
			 *     primary base layer appear black, while still
			 *     being able to display the other layers, or we
			 *     need to allocate a dummy black surface here.
			 */
			asyh->core.x = 0;
			asyh->core.y = 0;
			asyh->core.w = asyh->state.mode.hdisplay;
			asyh->core.h = asyh->state.mode.vdisplay;
		}
		asyh->core.handle = disp->mast.base.vram.handle;
		asyh->core.offset = 0;
		asyh->core.format = 0xcf;
		asyh->core.kind = 0;
		asyh->core.layout = 1;
		asyh->core.block = 0;
		asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
1343 1344
		asyh->lut.handle = disp->mast.base.vram.handle;
		asyh->lut.offset = head->base.lut.nvbo->bo.offset;
1345 1346
		asyh->set.base = armh->base.cpp != asyh->base.cpp;
		asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
1347 1348
	} else {
		asyh->core.visible = false;
1349
		asyh->curs.visible = false;
1350 1351
		asyh->base.cpp = 0;
		asyh->ovly.cpp = 0;
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
	}

	if (!drm_atomic_crtc_needs_modeset(&asyh->state)) {
		if (asyh->core.visible) {
			if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core)))
				asyh->set.core = true;
		} else
		if (armh->core.visible) {
			asyh->clr.core = true;
		}
1362 1363 1364 1365 1366 1367 1368 1369

		if (asyh->curs.visible) {
			if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs)))
				asyh->set.curs = true;
		} else
		if (armh->curs.visible) {
			asyh->clr.curs = true;
		}
1370 1371
	} else {
		asyh->clr.core = armh->core.visible;
1372
		asyh->clr.curs = armh->curs.visible;
1373
		asyh->set.core = asyh->core.visible;
1374
		asyh->set.curs = asyh->curs.visible;
1375 1376 1377 1378 1379 1380 1381
	}

	memcpy(armh, asyh, sizeof(*asyh));
	asyh->state.mode_changed = 0;
	return 0;
}

1382 1383 1384 1385
/******************************************************************************
 * CRTC
 *****************************************************************************/
static int
1386
nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
1387
{
1388
	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
1389 1390
	struct nv50_head *head = nv50_head(&nv_crtc->base);
	struct nv50_head_atom *asyh = &head->asy;
1391
	struct nouveau_connector *nv_connector;
1392 1393
	struct nouveau_conn_atom asyc;
	u32 *push;
1394

1395
	nv_connector = nouveau_crtc_connector_get(nv_crtc);
1396

1397 1398 1399 1400 1401 1402 1403
	asyc.state.connector = &nv_connector->base;
	asyc.dither.mode = nv_connector->dithering_mode;
	asyc.dither.depth = nv_connector->dithering_depth;
	asyh->state.crtc = &nv_crtc->base;
	nv50_head_atomic_check(&head->base.base, &asyh->state);
	nv50_head_atomic_check_dither(&head->arm, asyh, &asyc);
	nv50_head_flush_set(head, asyh);
1404

1405 1406
	if (update) {
		if ((push = evo_wait(mast, 2))) {
1407 1408
			evo_mthd(push, 0x0080, 1);
			evo_data(push, 0x00000000);
1409
			evo_kick(push, mast);
1410 1411 1412 1413 1414 1415 1416
		}
	}

	return 0;
}

static int
1417
nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
1418
{
1419 1420
	struct nv50_head *head = nv50_head(&nv_crtc->base);
	struct nv50_head_atom *asyh = &head->asy;
1421
	struct drm_crtc *crtc = &nv_crtc->base;
B
Ben Skeggs 已提交
1422
	struct nouveau_connector *nv_connector;
1423
	struct nouveau_conn_atom asyc;
B
Ben Skeggs 已提交
1424 1425

	nv_connector = nouveau_crtc_connector_get(nv_crtc);
1426

1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
	asyc.state.connector = &nv_connector->base;
	asyc.scaler.mode = nv_connector->scaling_mode;
	asyc.scaler.full = nv_connector->scaling_full;
	asyc.scaler.underscan.mode = nv_connector->underscan;
	asyc.scaler.underscan.hborder = nv_connector->underscan_hborder;
	asyc.scaler.underscan.vborder = nv_connector->underscan_vborder;
	nv50_head_atomic_check(&head->base.base, &asyh->state);
	nv50_head_atomic_check_view(&head->arm, asyh, &asyc);
	nv50_head_flush_set(head, asyh);

	if (update) {
		nv50_display_flip_stop(crtc);
		nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
1440 1441 1442 1443 1444
	}

	return 0;
}

1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
static int
nv50_crtc_set_raster_vblank_dmi(struct nouveau_crtc *nv_crtc, u32 usec)
{
	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
	u32 *push;

	push = evo_wait(mast, 8);
	if (!push)
		return -ENOMEM;

	evo_mthd(push, 0x0828 + (nv_crtc->index * 0x400), 1);
	evo_data(push, usec);
	evo_kick(push, mast);
	return 0;
}

1461
static int
1462
nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
1463
{
1464
	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
1465 1466 1467 1468 1469 1470 1471 1472 1473
	u32 *push, hue, vib;
	int adj;

	adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
	vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
	hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;

	push = evo_wait(mast, 16);
	if (push) {
1474
		if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
			evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
			evo_data(push, (hue << 20) | (vib << 8));
		} else {
			evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1);
			evo_data(push, (hue << 20) | (vib << 8));
		}

		if (update) {
			evo_mthd(push, 0x0080, 1);
			evo_data(push, 0x00000000);
		}
		evo_kick(push, mast);
	}

	return 0;
}

1492
static int
1493
nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
1494 1495 1496
		    int x, int y, bool update)
{
	struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
1497 1498 1499
	struct nv50_head *head = nv50_head(&nv_crtc->base);
	struct nv50_head_atom *asyh = &head->asy;
	const struct drm_format_info *info;
1500

1501 1502 1503
	info = drm_format_info(nvfb->base.pixel_format);
	if (!info || !info->depth)
		return -EINVAL;
1504

1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517
	asyh->base.depth = info->depth;
	asyh->base.cpp = info->cpp[0];
	asyh->base.x = x;
	asyh->base.y = y;
	asyh->base.w = nvfb->base.width;
	asyh->base.h = nvfb->base.height;
	nv50_head_atomic_check(&head->base.base, &asyh->state);
	nv50_head_flush_set(head, asyh);

	if (update) {
		struct nv50_mast *core = nv50_mast(nv_crtc->base.dev);
		u32 *push = evo_wait(core, 2);
		if (push) {
1518 1519
			evo_mthd(push, 0x0080, 1);
			evo_data(push, 0x00000000);
1520
			evo_kick(push, core);
1521
		}
1522 1523
	}

1524
	nv_crtc->fb.handle = nvfb->r_handle;
1525 1526 1527 1528
	return 0;
}

static void
1529
nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
1530
{
1531
	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
	struct nv50_head *head = nv50_head(&nv_crtc->base);
	struct nv50_head_atom *asyh = &head->asy;

	asyh->curs.visible = true;
	asyh->curs.handle = mast->base.vram.handle;
	asyh->curs.offset = nv_crtc->cursor.nvbo->bo.offset;
	asyh->curs.layout = 1;
	asyh->curs.format = 1;
	nv50_head_atomic_check(&head->base.base, &asyh->state);
	nv50_head_flush_set(head, asyh);
1542 1543 1544
}

static void
1545
nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
1546
{
1547 1548 1549 1550 1551 1552
	struct nv50_head *head = nv50_head(&nv_crtc->base);
	struct nv50_head_atom *asyh = &head->asy;

	asyh->curs.visible = false;
	nv50_head_atomic_check(&head->base.base, &asyh->state);
	nv50_head_flush_clr(head, asyh, false);
1553
}
1554

1555
static void
1556
nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
1557
{
1558
	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
1559

1560
	if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
1561
		nv50_crtc_cursor_show(nv_crtc);
1562
	else
1563
		nv50_crtc_cursor_hide(nv_crtc);
1564 1565 1566 1567

	if (update) {
		u32 *push = evo_wait(mast, 2);
		if (push) {
1568 1569
			evo_mthd(push, 0x0080, 1);
			evo_data(push, 0x00000000);
1570
			evo_kick(push, mast);
1571 1572 1573 1574 1575
		}
	}
}

static void
1576
nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
1577 1578 1579 1580
{
}

static void
1581
nv50_crtc_prepare(struct drm_crtc *crtc)
1582
{
1583 1584
	struct nv50_head *head = nv50_head(crtc);
	struct nv50_head_atom *asyh = &head->asy;
1585

1586
	nv50_display_flip_stop(crtc);
1587

1588 1589 1590
	asyh->state.active = false;
	nv50_head_atomic_check(&head->base.base, &asyh->state);
	nv50_head_flush_clr(head, asyh, false);
1591 1592 1593
}

static void
1594
nv50_crtc_commit(struct drm_crtc *crtc)
1595
{
1596 1597
	struct nv50_head *head = nv50_head(crtc);
	struct nv50_head_atom *asyh = &head->asy;
1598

1599 1600 1601
	asyh->state.active = true;
	nv50_head_atomic_check(&head->base.base, &asyh->state);
	nv50_head_flush_set(head, asyh);
1602

1603
	nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
1604 1605 1606
}

static bool
1607
nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
1608 1609
		     struct drm_display_mode *adjusted_mode)
{
1610
	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
1611 1612 1613 1614
	return true;
}

static int
1615
nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
1616
{
1617
	struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->primary->fb);
B
Ben Skeggs 已提交
1618
	struct nv50_head *head = nv50_head(crtc);
1619 1620
	int ret;

1621
	ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, true);
B
Ben Skeggs 已提交
1622 1623 1624 1625
	if (ret == 0) {
		if (head->image)
			nouveau_bo_unpin(head->image);
		nouveau_bo_ref(nvfb->nvbo, &head->image);
1626 1627
	}

B
Ben Skeggs 已提交
1628
	return ret;
1629 1630 1631
}

static int
1632
nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
1633 1634 1635
		   struct drm_display_mode *mode, int x, int y,
		   struct drm_framebuffer *old_fb)
{
1636
	struct nv50_mast *mast = nv50_mast(crtc->dev);
1637 1638 1639
	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
	struct nouveau_connector *nv_connector;
	int ret;
1640 1641
	struct nv50_head *head = nv50_head(crtc);
	struct nv50_head_atom *asyh = &head->asy;
1642

1643 1644 1645 1646 1647
	memcpy(&asyh->state.mode, umode, sizeof(*umode));
	memcpy(&asyh->state.adjusted_mode, mode, sizeof(*mode));
	asyh->state.active = true;
	asyh->state.mode_changed = true;
	nv50_head_atomic_check(&head->base.base, &asyh->state);
1648

1649
	ret = nv50_crtc_swap_fbs(crtc, old_fb);
1650 1651 1652
	if (ret)
		return ret;

1653 1654
	nv50_head_flush_set(head, asyh);

1655
	nv_connector = nouveau_crtc_connector_get(nv_crtc);
1656 1657
	nv50_crtc_set_dither(nv_crtc, false);
	nv50_crtc_set_scale(nv_crtc, false);
1658 1659 1660

	/* G94 only accepts this after setting scale */
	if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA)
1661
		nv50_crtc_set_raster_vblank_dmi(nv_crtc, asyh->mode.v.blankus);
1662

1663
	nv50_crtc_set_color_vibrance(nv_crtc, false);
1664
	nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, false);
1665 1666 1667 1668
	return 0;
}

static int
1669
nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
1670 1671
			struct drm_framebuffer *old_fb)
{
1672
	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
1673 1674 1675
	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
	int ret;

1676
	if (!crtc->primary->fb) {
1677
		NV_DEBUG(drm, "No FB bound\n");
1678 1679 1680
		return 0;
	}

1681
	ret = nv50_crtc_swap_fbs(crtc, old_fb);
1682 1683 1684
	if (ret)
		return ret;

1685
	nv50_display_flip_stop(crtc);
1686 1687
	nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, true);
	nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
1688 1689 1690 1691
	return 0;
}

static int
1692
nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
1693 1694 1695 1696
			       struct drm_framebuffer *fb, int x, int y,
			       enum mode_set_atomic state)
{
	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1697 1698
	nv50_display_flip_stop(crtc);
	nv50_crtc_set_image(nv_crtc, fb, x, y, true);
1699 1700 1701 1702
	return 0;
}

static void
1703
nv50_crtc_lut_load(struct drm_crtc *crtc)
1704
{
1705
	struct nv50_disp *disp = nv50_disp(crtc->dev);
1706 1707 1708 1709 1710
	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
	void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
	int i;

	for (i = 0; i < 256; i++) {
1711 1712 1713 1714
		u16 r = nv_crtc->lut.r[i] >> 2;
		u16 g = nv_crtc->lut.g[i] >> 2;
		u16 b = nv_crtc->lut.b[i] >> 2;

1715
		if (disp->disp->oclass < GF110_DISP) {
1716 1717 1718 1719 1720 1721 1722 1723
			writew(r + 0x0000, lut + (i * 0x08) + 0);
			writew(g + 0x0000, lut + (i * 0x08) + 2);
			writew(b + 0x0000, lut + (i * 0x08) + 4);
		} else {
			writew(r + 0x6000, lut + (i * 0x20) + 0);
			writew(g + 0x6000, lut + (i * 0x20) + 2);
			writew(b + 0x6000, lut + (i * 0x20) + 4);
		}
1724 1725 1726
	}
}

B
Ben Skeggs 已提交
1727 1728 1729 1730
static void
nv50_crtc_disable(struct drm_crtc *crtc)
{
	struct nv50_head *head = nv50_head(crtc);
1731
	evo_sync(crtc->dev);
B
Ben Skeggs 已提交
1732 1733 1734 1735 1736
	if (head->image)
		nouveau_bo_unpin(head->image);
	nouveau_bo_ref(NULL, &head->image);
}

1737
static int
1738
nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
1739 1740 1741
		     uint32_t handle, uint32_t width, uint32_t height)
{
	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1742 1743 1744
	struct drm_gem_object *gem = NULL;
	struct nouveau_bo *nvbo = NULL;
	int ret = 0;
1745

1746
	if (handle) {
1747 1748 1749
		if (width != 64 || height != 64)
			return -EINVAL;

1750
		gem = drm_gem_object_lookup(file_priv, handle);
1751 1752 1753 1754
		if (unlikely(!gem))
			return -ENOENT;
		nvbo = nouveau_gem_object(gem);

1755
		ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
1756 1757
	}

1758
	if (ret == 0) {
1759 1760 1761
		if (nv_crtc->cursor.nvbo)
			nouveau_bo_unpin(nv_crtc->cursor.nvbo);
		nouveau_bo_ref(nvbo, &nv_crtc->cursor.nvbo);
1762
	}
1763
	drm_gem_object_unreference_unlocked(gem);
1764

1765
	nv50_crtc_cursor_show_hide(nv_crtc, true, true);
1766 1767 1768 1769
	return ret;
}

static int
1770
nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1771
{
1772
	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1773 1774
	struct nv50_curs *curs = nv50_curs(crtc);
	struct nv50_chan *chan = nv50_chan(curs);
1775 1776
	nvif_wr32(&chan->user, 0x0084, (y << 16) | (x & 0xffff));
	nvif_wr32(&chan->user, 0x0080, 0x00000000);
1777 1778 1779

	nv_crtc->cursor_saved_x = x;
	nv_crtc->cursor_saved_y = y;
1780 1781 1782
	return 0;
}

1783
static int
1784
nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
1785
		    uint32_t size)
1786 1787 1788 1789
{
	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
	u32 i;

1790
	for (i = 0; i < size; i++) {
1791 1792 1793 1794 1795
		nv_crtc->lut.r[i] = r[i];
		nv_crtc->lut.g[i] = g[i];
		nv_crtc->lut.b[i] = b[i];
	}

1796
	nv50_crtc_lut_load(crtc);
1797 1798

	return 0;
1799 1800
}

1801 1802 1803 1804 1805 1806 1807 1808
static void
nv50_crtc_cursor_restore(struct nouveau_crtc *nv_crtc, int x, int y)
{
	nv50_crtc_cursor_move(&nv_crtc->base, x, y);

	nv50_crtc_cursor_show_hide(nv_crtc, true, true);
}

1809
static void
1810
nv50_crtc_destroy(struct drm_crtc *crtc)
1811 1812
{
	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1813 1814
	struct nv50_disp *disp = nv50_disp(crtc->dev);
	struct nv50_head *head = nv50_head(crtc);
1815
	struct nv50_fbdma *fbdma;
B
Ben Skeggs 已提交
1816

1817 1818 1819 1820 1821 1822 1823 1824
	list_for_each_entry(fbdma, &disp->fbdma, head) {
		nvif_object_fini(&fbdma->base[nv_crtc->index]);
	}

	nv50_dmac_destroy(&head->ovly.base, disp->disp);
	nv50_pioc_destroy(&head->oimm.base);
	nv50_dmac_destroy(&head->sync.base, disp->disp);
	nv50_pioc_destroy(&head->curs.base);
B
Ben Skeggs 已提交
1825 1826 1827 1828 1829 1830 1831 1832

	/*XXX: this shouldn't be necessary, but the core doesn't call
	 *     disconnect() during the cleanup paths
	 */
	if (head->image)
		nouveau_bo_unpin(head->image);
	nouveau_bo_ref(NULL, &head->image);

1833
	/*XXX: ditto */
1834 1835 1836
	if (nv_crtc->cursor.nvbo)
		nouveau_bo_unpin(nv_crtc->cursor.nvbo);
	nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
B
Ben Skeggs 已提交
1837

1838
	nouveau_bo_unmap(nv_crtc->lut.nvbo);
1839 1840
	if (nv_crtc->lut.nvbo)
		nouveau_bo_unpin(nv_crtc->lut.nvbo);
1841
	nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
B
Ben Skeggs 已提交
1842

1843 1844 1845 1846
	drm_crtc_cleanup(crtc);
	kfree(crtc);
}

1847 1848 1849 1850 1851 1852 1853 1854 1855
static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
	.dpms = nv50_crtc_dpms,
	.prepare = nv50_crtc_prepare,
	.commit = nv50_crtc_commit,
	.mode_fixup = nv50_crtc_mode_fixup,
	.mode_set = nv50_crtc_mode_set,
	.mode_set_base = nv50_crtc_mode_set_base,
	.mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
	.load_lut = nv50_crtc_lut_load,
B
Ben Skeggs 已提交
1856
	.disable = nv50_crtc_disable,
1857 1858
};

1859 1860 1861 1862
static const struct drm_crtc_funcs nv50_crtc_func = {
	.cursor_set = nv50_crtc_cursor_set,
	.cursor_move = nv50_crtc_cursor_move,
	.gamma_set = nv50_crtc_gamma_set,
1863
	.set_config = nouveau_crtc_set_config,
1864
	.destroy = nv50_crtc_destroy,
1865
	.page_flip = nouveau_crtc_page_flip,
1866 1867 1868
};

static int
1869
nv50_crtc_create(struct drm_device *dev, int index)
1870
{
1871 1872
	struct nouveau_drm *drm = nouveau_drm(dev);
	struct nvif_device *device = &drm->device;
1873 1874
	struct nv50_disp *disp = nv50_disp(dev);
	struct nv50_head *head;
1875 1876 1877
	struct drm_crtc *crtc;
	int ret, i;

1878 1879
	head = kzalloc(sizeof(*head), GFP_KERNEL);
	if (!head)
1880 1881
		return -ENOMEM;

1882
	head->base.index = index;
1883 1884
	head->base.color_vibrance = 50;
	head->base.vibrant_hue = 0;
1885
	head->base.cursor.set_pos = nv50_crtc_cursor_restore;
1886
	for (i = 0; i < 256; i++) {
1887 1888 1889
		head->base.lut.r[i] = i << 8;
		head->base.lut.g[i] = i << 8;
		head->base.lut.b[i] = i << 8;
1890 1891
	}

1892
	crtc = &head->base.base;
1893 1894
	drm_crtc_init(dev, crtc, &nv50_crtc_func);
	drm_crtc_helper_add(crtc, &nv50_crtc_hfunc);
1895 1896
	drm_mode_crtc_set_gamma_size(crtc, 256);

1897
	ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
1898
			     0, 0x0000, NULL, NULL, &head->base.lut.nvbo);
1899
	if (!ret) {
1900
		ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true);
1901
		if (!ret) {
1902
			ret = nouveau_bo_map(head->base.lut.nvbo);
1903 1904 1905
			if (ret)
				nouveau_bo_unpin(head->base.lut.nvbo);
		}
1906 1907 1908 1909 1910 1911 1912 1913
		if (ret)
			nouveau_bo_ref(NULL, &head->base.lut.nvbo);
	}

	if (ret)
		goto out;

	/* allocate cursor resources */
1914
	ret = nv50_curs_create(device, disp->disp, index, &head->curs);
1915 1916 1917
	if (ret)
		goto out;

1918
	/* allocate page flip / sync resources */
1919 1920
	ret = nv50_base_create(device, disp->disp, index, disp->sync->bo.offset,
			       &head->sync);
1921 1922 1923
	if (ret)
		goto out;

1924 1925
	head->sync.addr = EVO_FLIP_SEM0(index);
	head->sync.data = 0x00000000;
1926

1927
	/* allocate overlay resources */
1928
	ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
1929 1930 1931
	if (ret)
		goto out;

1932 1933
	ret = nv50_ovly_create(device, disp->disp, index, disp->sync->bo.offset,
			       &head->ovly);
1934 1935
	if (ret)
		goto out;
1936 1937 1938

out:
	if (ret)
1939
		nv50_crtc_destroy(crtc);
1940 1941 1942
	return ret;
}

1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955
/******************************************************************************
 * Encoder helpers
 *****************************************************************************/
static bool
nv50_encoder_mode_fixup(struct drm_encoder *encoder,
			const struct drm_display_mode *mode,
			struct drm_display_mode *adjusted_mode)
{
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
	struct nouveau_connector *nv_connector;

	nv_connector = nouveau_encoder_connector_get(nv_encoder);
	if (nv_connector && nv_connector->native_mode) {
1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
		nv_connector->scaling_full = false;
		if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) {
			switch (nv_connector->type) {
			case DCB_CONNECTOR_LVDS:
			case DCB_CONNECTOR_LVDS_SPWG:
			case DCB_CONNECTOR_eDP:
				/* force use of scaler for non-edid modes */
				if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
					return true;
				nv_connector->scaling_full = true;
				break;
			default:
				return true;
			}
		}

		drm_mode_copy(adjusted_mode, nv_connector->native_mode);
1973 1974 1975 1976 1977
	}

	return true;
}

1978 1979 1980
/******************************************************************************
 * DAC
 *****************************************************************************/
B
Ben Skeggs 已提交
1981
static void
1982
nv50_dac_dpms(struct drm_encoder *encoder, int mode)
B
Ben Skeggs 已提交
1983 1984
{
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1985
	struct nv50_disp *disp = nv50_disp(encoder->dev);
1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000
	struct {
		struct nv50_disp_mthd_v1 base;
		struct nv50_disp_dac_pwr_v0 pwr;
	} args = {
		.base.version = 1,
		.base.method = NV50_DISP_MTHD_V1_DAC_PWR,
		.base.hasht  = nv_encoder->dcb->hasht,
		.base.hashm  = nv_encoder->dcb->hashm,
		.pwr.state = 1,
		.pwr.data  = 1,
		.pwr.vsync = (mode != DRM_MODE_DPMS_SUSPEND &&
			      mode != DRM_MODE_DPMS_OFF),
		.pwr.hsync = (mode != DRM_MODE_DPMS_STANDBY &&
			      mode != DRM_MODE_DPMS_OFF),
	};
B
Ben Skeggs 已提交
2001

2002
	nvif_mthd(disp->disp, 0, &args, sizeof(args));
B
Ben Skeggs 已提交
2003 2004 2005
}

static void
2006
nv50_dac_commit(struct drm_encoder *encoder)
B
Ben Skeggs 已提交
2007 2008 2009 2010
{
}

static void
2011
nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
B
Ben Skeggs 已提交
2012 2013
		  struct drm_display_mode *adjusted_mode)
{
2014
	struct nv50_mast *mast = nv50_mast(encoder->dev);
B
Ben Skeggs 已提交
2015 2016
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2017
	u32 *push;
B
Ben Skeggs 已提交
2018

2019
	nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
B
Ben Skeggs 已提交
2020

2021
	push = evo_wait(mast, 8);
B
Ben Skeggs 已提交
2022
	if (push) {
2023
		if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053
			u32 syncs = 0x00000000;

			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
				syncs |= 0x00000001;
			if (mode->flags & DRM_MODE_FLAG_NVSYNC)
				syncs |= 0x00000002;

			evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
			evo_data(push, 1 << nv_crtc->index);
			evo_data(push, syncs);
		} else {
			u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
			u32 syncs = 0x00000001;

			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
				syncs |= 0x00000008;
			if (mode->flags & DRM_MODE_FLAG_NVSYNC)
				syncs |= 0x00000010;

			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
				magic |= 0x00000001;

			evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
			evo_data(push, syncs);
			evo_data(push, magic);
			evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
			evo_data(push, 1 << nv_crtc->index);
		}

		evo_kick(push, mast);
B
Ben Skeggs 已提交
2054 2055 2056 2057 2058 2059
	}

	nv_encoder->crtc = encoder->crtc;
}

static void
2060
nv50_dac_disconnect(struct drm_encoder *encoder)
B
Ben Skeggs 已提交
2061 2062
{
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2063
	struct nv50_mast *mast = nv50_mast(encoder->dev);
2064
	const int or = nv_encoder->or;
B
Ben Skeggs 已提交
2065 2066 2067
	u32 *push;

	if (nv_encoder->crtc) {
2068
		nv50_crtc_prepare(nv_encoder->crtc);
B
Ben Skeggs 已提交
2069

2070
		push = evo_wait(mast, 4);
B
Ben Skeggs 已提交
2071
		if (push) {
2072
			if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2073 2074 2075 2076 2077 2078 2079
				evo_mthd(push, 0x0400 + (or * 0x080), 1);
				evo_data(push, 0x00000000);
			} else {
				evo_mthd(push, 0x0180 + (or * 0x020), 1);
				evo_data(push, 0x00000000);
			}
			evo_kick(push, mast);
B
Ben Skeggs 已提交
2080 2081
		}
	}
2082 2083

	nv_encoder->crtc = NULL;
B
Ben Skeggs 已提交
2084 2085
}

2086
static enum drm_connector_status
2087
nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
2088
{
2089
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2090
	struct nv50_disp *disp = nv50_disp(encoder->dev);
2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104
	struct {
		struct nv50_disp_mthd_v1 base;
		struct nv50_disp_dac_load_v0 load;
	} args = {
		.base.version = 1,
		.base.method = NV50_DISP_MTHD_V1_DAC_LOAD,
		.base.hasht  = nv_encoder->dcb->hasht,
		.base.hashm  = nv_encoder->dcb->hashm,
	};
	int ret;

	args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval;
	if (args.load.data == 0)
		args.load.data = 340;
B
Ben Skeggs 已提交
2105

2106 2107
	ret = nvif_mthd(disp->disp, 0, &args, sizeof(args));
	if (ret || !args.load.load)
2108
		return connector_status_disconnected;
B
Ben Skeggs 已提交
2109

2110
	return connector_status_connected;
2111 2112
}

B
Ben Skeggs 已提交
2113
static void
2114
nv50_dac_destroy(struct drm_encoder *encoder)
B
Ben Skeggs 已提交
2115 2116 2117 2118 2119
{
	drm_encoder_cleanup(encoder);
	kfree(encoder);
}

2120 2121
static const struct drm_encoder_helper_funcs nv50_dac_hfunc = {
	.dpms = nv50_dac_dpms,
2122
	.mode_fixup = nv50_encoder_mode_fixup,
2123 2124 2125 2126 2127 2128
	.prepare = nv50_dac_disconnect,
	.commit = nv50_dac_commit,
	.mode_set = nv50_dac_mode_set,
	.disable = nv50_dac_disconnect,
	.get_crtc = nv50_display_crtc_get,
	.detect = nv50_dac_detect
B
Ben Skeggs 已提交
2129 2130
};

2131 2132
static const struct drm_encoder_funcs nv50_dac_func = {
	.destroy = nv50_dac_destroy,
B
Ben Skeggs 已提交
2133 2134 2135
};

static int
2136
nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
B
Ben Skeggs 已提交
2137
{
2138
	struct nouveau_drm *drm = nouveau_drm(connector->dev);
2139
	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
2140
	struct nvkm_i2c_bus *bus;
B
Ben Skeggs 已提交
2141 2142
	struct nouveau_encoder *nv_encoder;
	struct drm_encoder *encoder;
2143
	int type = DRM_MODE_ENCODER_DAC;
B
Ben Skeggs 已提交
2144 2145 2146 2147 2148 2149

	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
	if (!nv_encoder)
		return -ENOMEM;
	nv_encoder->dcb = dcbe;
	nv_encoder->or = ffs(dcbe->or) - 1;
2150 2151 2152 2153

	bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
	if (bus)
		nv_encoder->i2c = &bus->i2c;
B
Ben Skeggs 已提交
2154 2155 2156 2157

	encoder = to_drm_encoder(nv_encoder);
	encoder->possible_crtcs = dcbe->heads;
	encoder->possible_clones = 0;
2158 2159
	drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
			 "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
2160
	drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
B
Ben Skeggs 已提交
2161 2162 2163 2164

	drm_mode_connector_attach_encoder(connector, encoder);
	return 0;
}
2165

2166 2167 2168 2169
/******************************************************************************
 * Audio
 *****************************************************************************/
static void
2170
nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
2171 2172
{
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
B
Ben Skeggs 已提交
2173
	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2174
	struct nouveau_connector *nv_connector;
2175
	struct nv50_disp *disp = nv50_disp(encoder->dev);
2176 2177 2178 2179 2180
	struct __packed {
		struct {
			struct nv50_disp_mthd_v1 mthd;
			struct nv50_disp_sor_hda_eld_v0 eld;
		} base;
2181 2182
		u8 data[sizeof(nv_connector->base.eld)];
	} args = {
2183 2184 2185
		.base.mthd.version = 1,
		.base.mthd.method  = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
		.base.mthd.hasht   = nv_encoder->dcb->hasht,
B
Ben Skeggs 已提交
2186 2187
		.base.mthd.hashm   = (0xf0ff & nv_encoder->dcb->hashm) |
				     (0x0100 << nv_crtc->index),
2188
	};
2189 2190 2191 2192 2193 2194

	nv_connector = nouveau_encoder_connector_get(nv_encoder);
	if (!drm_detect_monitor_audio(nv_connector->edid))
		return;

	drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
2195
	memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
2196

2197 2198
	nvif_mthd(disp->disp, 0, &args,
		  sizeof(args.base) + drm_eld_size(args.data));
2199 2200 2201
}

static void
B
Ben Skeggs 已提交
2202
nv50_audio_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
2203 2204
{
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2205
	struct nv50_disp *disp = nv50_disp(encoder->dev);
2206 2207 2208 2209 2210 2211 2212
	struct {
		struct nv50_disp_mthd_v1 base;
		struct nv50_disp_sor_hda_eld_v0 eld;
	} args = {
		.base.version = 1,
		.base.method  = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
		.base.hasht   = nv_encoder->dcb->hasht,
B
Ben Skeggs 已提交
2213 2214
		.base.hashm   = (0xf0ff & nv_encoder->dcb->hashm) |
				(0x0100 << nv_crtc->index),
2215
	};
2216

2217
	nvif_mthd(disp->disp, 0, &args, sizeof(args));
2218 2219 2220 2221 2222 2223
}

/******************************************************************************
 * HDMI
 *****************************************************************************/
static void
2224
nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
2225
{
2226 2227
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2228
	struct nv50_disp *disp = nv50_disp(encoder->dev);
2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241
	struct {
		struct nv50_disp_mthd_v1 base;
		struct nv50_disp_sor_hdmi_pwr_v0 pwr;
	} args = {
		.base.version = 1,
		.base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
		.base.hasht  = nv_encoder->dcb->hasht,
		.base.hashm  = (0xf0ff & nv_encoder->dcb->hashm) |
			       (0x0100 << nv_crtc->index),
		.pwr.state = 1,
		.pwr.rekey = 56, /* binary driver, and tegra, constant */
	};
	struct nouveau_connector *nv_connector;
2242 2243 2244 2245 2246 2247 2248
	u32 max_ac_packet;

	nv_connector = nouveau_encoder_connector_get(nv_encoder);
	if (!drm_detect_hdmi_monitor(nv_connector->edid))
		return;

	max_ac_packet  = mode->htotal - mode->hdisplay;
2249
	max_ac_packet -= args.pwr.rekey;
2250
	max_ac_packet -= 18; /* constant from tegra */
2251
	args.pwr.max_ac_packet = max_ac_packet / 32;
B
Ben Skeggs 已提交
2252

2253
	nvif_mthd(disp->disp, 0, &args, sizeof(args));
2254
	nv50_audio_mode_set(encoder, mode);
2255 2256 2257
}

static void
2258
nv50_hdmi_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
2259
{
2260
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2261
	struct nv50_disp *disp = nv50_disp(encoder->dev);
2262 2263 2264 2265 2266 2267 2268 2269 2270 2271
	struct {
		struct nv50_disp_mthd_v1 base;
		struct nv50_disp_sor_hdmi_pwr_v0 pwr;
	} args = {
		.base.version = 1,
		.base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
		.base.hasht  = nv_encoder->dcb->hasht,
		.base.hashm  = (0xf0ff & nv_encoder->dcb->hashm) |
			       (0x0100 << nv_crtc->index),
	};
2272

2273
	nvif_mthd(disp->disp, 0, &args, sizeof(args));
2274 2275
}

2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377
/******************************************************************************
 * MST
 *****************************************************************************/
struct nv50_mstm {
	struct nouveau_encoder *outp;

	struct drm_dp_mst_topology_mgr mgr;
};

static int
nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
{
	struct nouveau_encoder *outp = mstm->outp;
	struct {
		struct nv50_disp_mthd_v1 base;
		struct nv50_disp_sor_dp_mst_link_v0 mst;
	} args = {
		.base.version = 1,
		.base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
		.base.hasht = outp->dcb->hasht,
		.base.hashm = outp->dcb->hashm,
		.mst.state = state,
	};
	struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
	struct nvif_object *disp = &drm->display->disp;
	int ret;

	if (dpcd >= 0x12) {
		ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
		if (ret < 0)
			return ret;

		dpcd &= ~DP_MST_EN;
		if (state)
			dpcd |= DP_MST_EN;

		ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
		if (ret < 0)
			return ret;
	}

	return nvif_mthd(disp, 0, &args, sizeof(args));
}

int
nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
{
	int ret, state = 0;

	if (!mstm)
		return 0;

	if (dpcd[0] >= 0x12 && allow) {
		ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
		if (ret < 0)
			return ret;

		state = dpcd[1] & DP_MST_CAP;
	}

	ret = nv50_mstm_enable(mstm, dpcd[0], state);
	if (ret)
		return ret;

	ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
	if (ret)
		return nv50_mstm_enable(mstm, dpcd[0], 0);

	return mstm->mgr.mst_state;
}

static void
nv50_mstm_del(struct nv50_mstm **pmstm)
{
	struct nv50_mstm *mstm = *pmstm;
	if (mstm) {
		kfree(*pmstm);
		*pmstm = NULL;
	}
}

static int
nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
	      int conn_base_id, struct nv50_mstm **pmstm)
{
	const int max_payloads = hweight8(outp->dcb->heads);
	struct drm_device *dev = outp->base.base.dev;
	struct nv50_mstm *mstm;
	int ret;

	if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
		return -ENOMEM;
	mstm->outp = outp;

	ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev->dev, aux, aux_max,
					   max_payloads, conn_base_id);
	if (ret)
		return ret;

	return 0;
}

2378 2379 2380
/******************************************************************************
 * SOR
 *****************************************************************************/
2381
static void
2382
nv50_sor_dpms(struct drm_encoder *encoder, int mode)
2383 2384
{
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395
	struct nv50_disp *disp = nv50_disp(encoder->dev);
	struct {
		struct nv50_disp_mthd_v1 base;
		struct nv50_disp_sor_pwr_v0 pwr;
	} args = {
		.base.version = 1,
		.base.method = NV50_DISP_MTHD_V1_SOR_PWR,
		.base.hasht  = nv_encoder->dcb->hasht,
		.base.hashm  = nv_encoder->dcb->hashm,
		.pwr.state = mode == DRM_MODE_DPMS_ON,
	};
2396 2397 2398 2399 2400 2401 2402 2403 2404 2405
	struct {
		struct nv50_disp_mthd_v1 base;
		struct nv50_disp_sor_dp_pwr_v0 pwr;
	} link = {
		.base.version = 1,
		.base.method = NV50_DISP_MTHD_V1_SOR_DP_PWR,
		.base.hasht  = nv_encoder->dcb->hasht,
		.base.hashm  = nv_encoder->dcb->hashm,
		.pwr.state = mode == DRM_MODE_DPMS_ON,
	};
2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417
	struct drm_device *dev = encoder->dev;
	struct drm_encoder *partner;

	nv_encoder->last_dpms = mode;

	list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
		struct nouveau_encoder *nv_partner = nouveau_encoder(partner);

		if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
			continue;

		if (nv_partner != nv_encoder &&
2418
		    nv_partner->dcb->or == nv_encoder->dcb->or) {
2419 2420 2421 2422 2423 2424
			if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
				return;
			break;
		}
	}

2425
	if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
2426 2427
		args.pwr.state = 1;
		nvif_mthd(disp->disp, 0, &args, sizeof(args));
2428
		nvif_mthd(disp->disp, 0, &link, sizeof(link));
2429
	} else {
2430
		nvif_mthd(disp->disp, 0, &args, sizeof(args));
2431
	}
2432 2433
}

2434
static void
2435
nv50_sor_ctrl(struct nouveau_encoder *nv_encoder, u32 mask, u32 data)
2436
{
2437 2438 2439
	struct nv50_mast *mast = nv50_mast(nv_encoder->base.base.dev);
	u32 temp = (nv_encoder->ctrl & ~mask) | (data & mask), *push;
	if (temp != nv_encoder->ctrl && (push = evo_wait(mast, 2))) {
2440
		if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2441 2442 2443 2444 2445
			evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
			evo_data(push, (nv_encoder->ctrl = temp));
		} else {
			evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
			evo_data(push, (nv_encoder->ctrl = temp));
2446
		}
2447
		evo_kick(push, mast);
2448
	}
2449 2450 2451 2452 2453 2454 2455
}

static void
nv50_sor_disconnect(struct drm_encoder *encoder)
{
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
	struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
2456 2457 2458

	nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
	nv_encoder->crtc = NULL;
2459 2460 2461 2462

	if (nv_crtc) {
		nv50_crtc_prepare(&nv_crtc->base);
		nv50_sor_ctrl(nv_encoder, 1 << nv_crtc->index, 0);
B
Ben Skeggs 已提交
2463
		nv50_audio_disconnect(encoder, nv_crtc);
2464 2465
		nv50_hdmi_disconnect(&nv_encoder->base.base, nv_crtc);
	}
2466 2467
}

2468
static void
2469
nv50_sor_commit(struct drm_encoder *encoder)
2470 2471 2472 2473
{
}

static void
2474
nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
2475
		  struct drm_display_mode *mode)
2476
{
2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
	struct {
		struct nv50_disp_mthd_v1 base;
		struct nv50_disp_sor_lvds_script_v0 lvds;
	} lvds = {
		.base.version = 1,
		.base.method  = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT,
		.base.hasht   = nv_encoder->dcb->hasht,
		.base.hashm   = nv_encoder->dcb->hashm,
	};
2488 2489
	struct nv50_disp *disp = nv50_disp(encoder->dev);
	struct nv50_mast *mast = nv50_mast(encoder->dev);
2490
	struct drm_device *dev = encoder->dev;
2491
	struct nouveau_drm *drm = nouveau_drm(dev);
2492
	struct nouveau_connector *nv_connector;
2493
	struct nvbios *bios = &drm->vbios;
2494
	u32 mask, ctrl;
2495 2496 2497
	u8 owner = 1 << nv_crtc->index;
	u8 proto = 0xf;
	u8 depth = 0x0;
2498

2499
	nv_connector = nouveau_encoder_connector_get(nv_encoder);
2500 2501
	nv_encoder->crtc = encoder->crtc;

2502
	switch (nv_encoder->dcb->type) {
2503
	case DCB_OUTPUT_TMDS:
2504
		if (nv_encoder->dcb->sorconf.link & 1) {
2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515
			proto = 0x1;
			/* Only enable dual-link if:
			 *  - Need to (i.e. rate > 165MHz)
			 *  - DCB says we can
			 *  - Not an HDMI monitor, since there's no dual-link
			 *    on HDMI.
			 */
			if (mode->clock >= 165000 &&
			    nv_encoder->dcb->duallink_possible &&
			    !drm_detect_hdmi_monitor(nv_connector->edid))
				proto |= 0x4;
2516
		} else {
2517
			proto = 0x2;
2518 2519
		}

2520
		nv50_hdmi_mode_set(&nv_encoder->base.base, mode);
2521
		break;
2522
	case DCB_OUTPUT_LVDS:
2523 2524
		proto = 0x0;

2525 2526
		if (bios->fp_no_ddc) {
			if (bios->fp.dual_link)
2527
				lvds.lvds.script |= 0x0100;
2528
			if (bios->fp.if_is_24bit)
2529
				lvds.lvds.script |= 0x0200;
2530
		} else {
2531
			if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
2532
				if (((u8 *)nv_connector->edid)[121] == 2)
2533
					lvds.lvds.script |= 0x0100;
2534 2535
			} else
			if (mode->clock >= bios->fp.duallink_transition_clk) {
2536
				lvds.lvds.script |= 0x0100;
2537
			}
2538

2539
			if (lvds.lvds.script & 0x0100) {
2540
				if (bios->fp.strapless_is_24bit & 2)
2541
					lvds.lvds.script |= 0x0200;
2542 2543
			} else {
				if (bios->fp.strapless_is_24bit & 1)
2544
					lvds.lvds.script |= 0x0200;
2545 2546 2547
			}

			if (nv_connector->base.display_info.bpc == 8)
2548
				lvds.lvds.script |= 0x0200;
2549
		}
2550

2551
		nvif_mthd(disp->disp, 0, &lvds, sizeof(lvds));
2552
		break;
2553
	case DCB_OUTPUT_DP:
2554
		if (nv_connector->base.display_info.bpc == 6) {
2555
			nv_encoder->dp.datarate = mode->clock * 18 / 8;
2556
			depth = 0x2;
2557 2558
		} else
		if (nv_connector->base.display_info.bpc == 8) {
2559
			nv_encoder->dp.datarate = mode->clock * 24 / 8;
2560
			depth = 0x5;
2561 2562 2563
		} else {
			nv_encoder->dp.datarate = mode->clock * 30 / 8;
			depth = 0x6;
2564
		}
2565 2566

		if (nv_encoder->dcb->sorconf.link & 1)
2567
			proto = 0x8;
2568
		else
2569
			proto = 0x9;
2570
		nv50_audio_mode_set(encoder, mode);
2571
		break;
2572 2573 2574 2575
	default:
		BUG_ON(1);
		break;
	}
2576

2577
	nv50_sor_dpms(&nv_encoder->base.base, DRM_MODE_DPMS_ON);
2578

2579
	if (nv50_vers(mast) >= GF110_DISP) {
2580 2581
		u32 *push = evo_wait(mast, 3);
		if (push) {
2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595
			u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
			u32 syncs = 0x00000001;

			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
				syncs |= 0x00000008;
			if (mode->flags & DRM_MODE_FLAG_NVSYNC)
				syncs |= 0x00000010;

			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
				magic |= 0x00000001;

			evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
			evo_data(push, syncs | (depth << 6));
			evo_data(push, magic);
2596
			evo_kick(push, mast);
2597 2598
		}

2599 2600 2601 2602 2603 2604 2605 2606 2607
		ctrl = proto << 8;
		mask = 0x00000f00;
	} else {
		ctrl = (depth << 16) | (proto << 8);
		if (mode->flags & DRM_MODE_FLAG_NHSYNC)
			ctrl |= 0x00001000;
		if (mode->flags & DRM_MODE_FLAG_NVSYNC)
			ctrl |= 0x00002000;
		mask = 0x000f3f00;
2608 2609
	}

2610
	nv50_sor_ctrl(nv_encoder, mask | owner, ctrl | owner);
2611 2612 2613
}

static void
2614
nv50_sor_destroy(struct drm_encoder *encoder)
2615
{
2616 2617
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
	nv50_mstm_del(&nv_encoder->dp.mstm);
2618 2619 2620 2621
	drm_encoder_cleanup(encoder);
	kfree(encoder);
}

2622 2623
static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
	.dpms = nv50_sor_dpms,
2624
	.mode_fixup = nv50_encoder_mode_fixup,
2625
	.prepare = nv50_sor_disconnect,
2626 2627 2628 2629
	.commit = nv50_sor_commit,
	.mode_set = nv50_sor_mode_set,
	.disable = nv50_sor_disconnect,
	.get_crtc = nv50_display_crtc_get,
2630 2631
};

2632 2633
static const struct drm_encoder_funcs nv50_sor_func = {
	.destroy = nv50_sor_destroy,
2634 2635 2636
};

static int
2637
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
2638
{
2639
	struct nouveau_connector *nv_connector = nouveau_connector(connector);
2640
	struct nouveau_drm *drm = nouveau_drm(connector->dev);
2641
	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
2642 2643
	struct nouveau_encoder *nv_encoder;
	struct drm_encoder *encoder;
2644
	int type, ret;
2645 2646 2647 2648 2649 2650 2651 2652 2653

	switch (dcbe->type) {
	case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
	case DCB_OUTPUT_TMDS:
	case DCB_OUTPUT_DP:
	default:
		type = DRM_MODE_ENCODER_TMDS;
		break;
	}
2654 2655 2656 2657 2658 2659 2660 2661

	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
	if (!nv_encoder)
		return -ENOMEM;
	nv_encoder->dcb = dcbe;
	nv_encoder->or = ffs(dcbe->or) - 1;
	nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;

2662 2663 2664
	encoder = to_drm_encoder(nv_encoder);
	encoder->possible_crtcs = dcbe->heads;
	encoder->possible_clones = 0;
2665 2666
	drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
			 "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
2667 2668 2669 2670
	drm_encoder_helper_add(encoder, &nv50_sor_hfunc);

	drm_mode_connector_attach_encoder(connector, encoder);

2671 2672 2673 2674 2675 2676 2677
	if (dcbe->type == DCB_OUTPUT_DP) {
		struct nvkm_i2c_aux *aux =
			nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
		if (aux) {
			nv_encoder->i2c = &aux->i2c;
			nv_encoder->aux = aux;
		}
2678 2679 2680 2681 2682 2683 2684 2685 2686

		/*TODO: Use DP Info Table to check for support. */
		if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) {
			ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
					    nv_connector->base.base.id,
					    &nv_encoder->dp.mstm);
			if (ret)
				return ret;
		}
2687 2688 2689 2690 2691 2692 2693
	} else {
		struct nvkm_i2c_bus *bus =
			nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
		if (bus)
			nv_encoder->i2c = &bus->i2c;
	}

2694 2695
	return 0;
}
2696

2697 2698 2699 2700 2701 2702 2703 2704 2705
/******************************************************************************
 * PIOR
 *****************************************************************************/

static void
nv50_pior_dpms(struct drm_encoder *encoder, int mode)
{
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
	struct nv50_disp *disp = nv50_disp(encoder->dev);
2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718
	struct {
		struct nv50_disp_mthd_v1 base;
		struct nv50_disp_pior_pwr_v0 pwr;
	} args = {
		.base.version = 1,
		.base.method = NV50_DISP_MTHD_V1_PIOR_PWR,
		.base.hasht  = nv_encoder->dcb->hasht,
		.base.hashm  = nv_encoder->dcb->hashm,
		.pwr.state = mode == DRM_MODE_DPMS_ON,
		.pwr.type = nv_encoder->dcb->type,
	};

	nvif_mthd(disp->disp, 0, &args, sizeof(args));
2719 2720 2721 2722 2723 2724 2725
}

static bool
nv50_pior_mode_fixup(struct drm_encoder *encoder,
		     const struct drm_display_mode *mode,
		     struct drm_display_mode *adjusted_mode)
{
2726 2727
	if (!nv50_encoder_mode_fixup(encoder, mode, adjusted_mode))
		return false;
2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770
	adjusted_mode->clock *= 2;
	return true;
}

static void
nv50_pior_commit(struct drm_encoder *encoder)
{
}

static void
nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
		   struct drm_display_mode *adjusted_mode)
{
	struct nv50_mast *mast = nv50_mast(encoder->dev);
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
	struct nouveau_connector *nv_connector;
	u8 owner = 1 << nv_crtc->index;
	u8 proto, depth;
	u32 *push;

	nv_connector = nouveau_encoder_connector_get(nv_encoder);
	switch (nv_connector->base.display_info.bpc) {
	case 10: depth = 0x6; break;
	case  8: depth = 0x5; break;
	case  6: depth = 0x2; break;
	default: depth = 0x0; break;
	}

	switch (nv_encoder->dcb->type) {
	case DCB_OUTPUT_TMDS:
	case DCB_OUTPUT_DP:
		proto = 0x0;
		break;
	default:
		BUG_ON(1);
		break;
	}

	nv50_pior_dpms(encoder, DRM_MODE_DPMS_ON);

	push = evo_wait(mast, 8);
	if (push) {
2771
		if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799
			u32 ctrl = (depth << 16) | (proto << 8) | owner;
			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
				ctrl |= 0x00001000;
			if (mode->flags & DRM_MODE_FLAG_NVSYNC)
				ctrl |= 0x00002000;
			evo_mthd(push, 0x0700 + (nv_encoder->or * 0x040), 1);
			evo_data(push, ctrl);
		}

		evo_kick(push, mast);
	}

	nv_encoder->crtc = encoder->crtc;
}

static void
nv50_pior_disconnect(struct drm_encoder *encoder)
{
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
	struct nv50_mast *mast = nv50_mast(encoder->dev);
	const int or = nv_encoder->or;
	u32 *push;

	if (nv_encoder->crtc) {
		nv50_crtc_prepare(nv_encoder->crtc);

		push = evo_wait(mast, 4);
		if (push) {
2800
			if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835
				evo_mthd(push, 0x0700 + (or * 0x040), 1);
				evo_data(push, 0x00000000);
			}
			evo_kick(push, mast);
		}
	}

	nv_encoder->crtc = NULL;
}

static void
nv50_pior_destroy(struct drm_encoder *encoder)
{
	drm_encoder_cleanup(encoder);
	kfree(encoder);
}

static const struct drm_encoder_helper_funcs nv50_pior_hfunc = {
	.dpms = nv50_pior_dpms,
	.mode_fixup = nv50_pior_mode_fixup,
	.prepare = nv50_pior_disconnect,
	.commit = nv50_pior_commit,
	.mode_set = nv50_pior_mode_set,
	.disable = nv50_pior_disconnect,
	.get_crtc = nv50_display_crtc_get,
};

static const struct drm_encoder_funcs nv50_pior_func = {
	.destroy = nv50_pior_destroy,
};

static int
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
	struct nouveau_drm *drm = nouveau_drm(connector->dev);
2836
	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
2837 2838 2839
	struct nvkm_i2c_bus *bus = NULL;
	struct nvkm_i2c_aux *aux = NULL;
	struct i2c_adapter *ddc;
2840 2841 2842 2843 2844 2845
	struct nouveau_encoder *nv_encoder;
	struct drm_encoder *encoder;
	int type;

	switch (dcbe->type) {
	case DCB_OUTPUT_TMDS:
2846 2847
		bus  = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
		ddc  = bus ? &bus->i2c : NULL;
2848 2849 2850
		type = DRM_MODE_ENCODER_TMDS;
		break;
	case DCB_OUTPUT_DP:
2851 2852
		aux  = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
		ddc  = aux ? &aux->i2c : NULL;
2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864
		type = DRM_MODE_ENCODER_TMDS;
		break;
	default:
		return -ENODEV;
	}

	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
	if (!nv_encoder)
		return -ENOMEM;
	nv_encoder->dcb = dcbe;
	nv_encoder->or = ffs(dcbe->or) - 1;
	nv_encoder->i2c = ddc;
2865
	nv_encoder->aux = aux;
2866 2867 2868 2869

	encoder = to_drm_encoder(nv_encoder);
	encoder->possible_crtcs = dcbe->heads;
	encoder->possible_clones = 0;
2870 2871
	drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
			 "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
2872 2873 2874 2875 2876 2877
	drm_encoder_helper_add(encoder, &nv50_pior_hfunc);

	drm_mode_connector_attach_encoder(connector, encoder);
	return 0;
}

2878 2879 2880 2881
/******************************************************************************
 * Framebuffer
 *****************************************************************************/

2882
static void
2883
nv50_fbdma_fini(struct nv50_fbdma *fbdma)
2884
{
2885 2886 2887 2888
	int i;
	for (i = 0; i < ARRAY_SIZE(fbdma->base); i++)
		nvif_object_fini(&fbdma->base[i]);
	nvif_object_fini(&fbdma->core);
2889 2890 2891 2892 2893 2894 2895 2896 2897 2898
	list_del(&fbdma->head);
	kfree(fbdma);
}

static int
nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kind)
{
	struct nouveau_drm *drm = nouveau_drm(dev);
	struct nv50_disp *disp = nv50_disp(dev);
	struct nv50_mast *mast = nv50_mast(dev);
2899 2900 2901 2902 2903
	struct __attribute__ ((packed)) {
		struct nv_dma_v0 base;
		union {
			struct nv50_dma_v0 nv50;
			struct gf100_dma_v0 gf100;
2904
			struct gf119_dma_v0 gf119;
2905 2906
		};
	} args = {};
2907 2908
	struct nv50_fbdma *fbdma;
	struct drm_crtc *crtc;
2909
	u32 size = sizeof(args.base);
2910 2911 2912
	int ret;

	list_for_each_entry(fbdma, &disp->fbdma, head) {
2913
		if (fbdma->core.handle == name)
2914 2915 2916 2917 2918 2919 2920 2921
			return 0;
	}

	fbdma = kzalloc(sizeof(*fbdma), GFP_KERNEL);
	if (!fbdma)
		return -ENOMEM;
	list_add(&fbdma->head, &disp->fbdma);

2922 2923 2924 2925
	args.base.target = NV_DMA_V0_TARGET_VRAM;
	args.base.access = NV_DMA_V0_ACCESS_RDWR;
	args.base.start = offset;
	args.base.limit = offset + length - 1;
2926

2927
	if (drm->device.info.chipset < 0x80) {
2928 2929
		args.nv50.part = NV50_DMA_V0_PART_256;
		size += sizeof(args.nv50);
2930
	} else
2931
	if (drm->device.info.chipset < 0xc0) {
2932 2933 2934
		args.nv50.part = NV50_DMA_V0_PART_256;
		args.nv50.kind = kind;
		size += sizeof(args.nv50);
2935
	} else
2936
	if (drm->device.info.chipset < 0xd0) {
2937 2938
		args.gf100.kind = kind;
		size += sizeof(args.gf100);
2939
	} else {
2940 2941 2942
		args.gf119.page = GF119_DMA_V0_PAGE_LP;
		args.gf119.kind = kind;
		size += sizeof(args.gf119);
2943 2944 2945
	}

	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2946
		struct nv50_head *head = nv50_head(crtc);
2947 2948
		int ret = nvif_object_init(&head->sync.base.base.user, name,
					   NV_DMA_IN_MEMORY, &args, size,
2949
					   &fbdma->base[head->base.index]);
2950
		if (ret) {
2951
			nv50_fbdma_fini(fbdma);
2952 2953 2954 2955
			return ret;
		}
	}

2956 2957
	ret = nvif_object_init(&mast->base.base.user, name, NV_DMA_IN_MEMORY,
			       &args, size, &fbdma->core);
2958
	if (ret) {
2959
		nv50_fbdma_fini(fbdma);
2960 2961 2962 2963 2964 2965
		return ret;
	}

	return 0;
}

2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976
static void
nv50_fb_dtor(struct drm_framebuffer *fb)
{
}

static int
nv50_fb_ctor(struct drm_framebuffer *fb)
{
	struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
	struct nouveau_drm *drm = nouveau_drm(fb->dev);
	struct nouveau_bo *nvbo = nv_fb->nvbo;
2977 2978 2979
	struct nv50_disp *disp = nv50_disp(fb->dev);
	u8 kind = nouveau_bo_tile_layout(nvbo) >> 8;
	u8 tile = nvbo->tile_mode;
2980

2981
	if (drm->device.info.chipset >= 0xc0)
2982 2983
		tile >>= 4; /* yep.. */

2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995
	switch (fb->depth) {
	case  8: nv_fb->r_format = 0x1e00; break;
	case 15: nv_fb->r_format = 0xe900; break;
	case 16: nv_fb->r_format = 0xe800; break;
	case 24:
	case 32: nv_fb->r_format = 0xcf00; break;
	case 30: nv_fb->r_format = 0xd100; break;
	default:
		 NV_ERROR(drm, "unknown depth %d\n", fb->depth);
		 return -EINVAL;
	}

2996
	if (disp->disp->oclass < G82_DISP) {
2997 2998 2999 3000
		nv_fb->r_pitch   = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
					    (fb->pitches[0] | 0x00100000);
		nv_fb->r_format |= kind << 16;
	} else
3001
	if (disp->disp->oclass < GF110_DISP) {
3002 3003
		nv_fb->r_pitch  = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
					   (fb->pitches[0] | 0x00100000);
3004
	} else {
3005 3006
		nv_fb->r_pitch  = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
					   (fb->pitches[0] | 0x01000000);
3007
	}
3008
	nv_fb->r_handle = 0xffff0000 | kind;
3009

3010 3011
	return nv50_fbdma_init(fb->dev, nv_fb->r_handle, 0,
			       drm->device.info.ram_user, kind);
3012 3013
}

3014 3015 3016
/******************************************************************************
 * Init
 *****************************************************************************/
3017

3018
void
3019
nv50_display_fini(struct drm_device *dev)
3020 3021 3022 3023
{
}

int
3024
nv50_display_init(struct drm_device *dev)
3025
{
3026 3027 3028 3029 3030 3031 3032 3033 3034 3035
	struct nv50_disp *disp = nv50_disp(dev);
	struct drm_crtc *crtc;
	u32 *push;

	push = evo_wait(nv50_mast(dev), 32);
	if (!push)
		return -EBUSY;

	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
		struct nv50_sync *sync = nv50_sync(crtc);
3036 3037

		nv50_crtc_lut_load(crtc);
3038
		nouveau_bo_wr32(disp->sync, sync->addr / 4, sync->data);
3039
	}
3040

3041
	evo_mthd(push, 0x0088, 1);
3042
	evo_data(push, nv50_mast(dev)->base.sync.handle);
3043 3044
	evo_kick(push, nv50_mast(dev));
	return 0;
3045 3046 3047
}

void
3048
nv50_display_destroy(struct drm_device *dev)
3049
{
3050
	struct nv50_disp *disp = nv50_disp(dev);
3051 3052 3053
	struct nv50_fbdma *fbdma, *fbtmp;

	list_for_each_entry_safe(fbdma, fbtmp, &disp->fbdma, head) {
3054
		nv50_fbdma_fini(fbdma);
3055
	}
3056

3057
	nv50_dmac_destroy(&disp->mast.base, disp->disp);
3058

3059
	nouveau_bo_unmap(disp->sync);
3060 3061
	if (disp->sync)
		nouveau_bo_unpin(disp->sync);
3062
	nouveau_bo_ref(NULL, &disp->sync);
3063

3064
	nouveau_display(dev)->priv = NULL;
3065 3066 3067 3068
	kfree(disp);
}

int
3069
nv50_display_create(struct drm_device *dev)
3070
{
3071
	struct nvif_device *device = &nouveau_drm(dev)->device;
3072 3073
	struct nouveau_drm *drm = nouveau_drm(dev);
	struct dcb_table *dcb = &drm->vbios.dcb;
3074
	struct drm_connector *connector, *tmp;
3075
	struct nv50_disp *disp;
3076
	struct dcb_output *dcbe;
3077
	int crtcs, ret, i;
3078 3079 3080 3081

	disp = kzalloc(sizeof(*disp), GFP_KERNEL);
	if (!disp)
		return -ENOMEM;
3082
	INIT_LIST_HEAD(&disp->fbdma);
3083 3084

	nouveau_display(dev)->priv = disp;
3085 3086 3087
	nouveau_display(dev)->dtor = nv50_display_destroy;
	nouveau_display(dev)->init = nv50_display_init;
	nouveau_display(dev)->fini = nv50_display_fini;
3088 3089
	nouveau_display(dev)->fb_ctor = nv50_fb_ctor;
	nouveau_display(dev)->fb_dtor = nv50_fb_dtor;
3090
	disp->disp = &nouveau_display(dev)->disp;
3091

3092 3093
	/* small shared memory area we use for notifiers and semaphores */
	ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
3094
			     0, 0x0000, NULL, NULL, &disp->sync);
3095
	if (!ret) {
3096
		ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
3097
		if (!ret) {
3098
			ret = nouveau_bo_map(disp->sync);
3099 3100 3101
			if (ret)
				nouveau_bo_unpin(disp->sync);
		}
3102 3103 3104 3105 3106 3107 3108 3109
		if (ret)
			nouveau_bo_ref(NULL, &disp->sync);
	}

	if (ret)
		goto out;

	/* allocate master evo channel */
3110
	ret = nv50_core_create(device, disp->disp, disp->sync->bo.offset,
3111
			      &disp->mast);
3112 3113 3114
	if (ret)
		goto out;

3115
	/* create crtc objects to represent the hw heads */
3116
	if (disp->disp->oclass >= GF110_DISP)
3117
		crtcs = nvif_rd32(&device->object, 0x022448);
3118 3119 3120
	else
		crtcs = 2;

3121
	for (i = 0; i < crtcs; i++) {
3122
		ret = nv50_crtc_create(dev, i);
3123 3124 3125 3126
		if (ret)
			goto out;
	}

3127 3128 3129 3130 3131 3132
	/* create encoder/connector objects based on VBIOS DCB table */
	for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
		connector = nouveau_connector_create(dev, dcbe->connector);
		if (IS_ERR(connector))
			continue;

3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148
		if (dcbe->location == DCB_LOC_ON_CHIP) {
			switch (dcbe->type) {
			case DCB_OUTPUT_TMDS:
			case DCB_OUTPUT_LVDS:
			case DCB_OUTPUT_DP:
				ret = nv50_sor_create(connector, dcbe);
				break;
			case DCB_OUTPUT_ANALOG:
				ret = nv50_dac_create(connector, dcbe);
				break;
			default:
				ret = -ENODEV;
				break;
			}
		} else {
			ret = nv50_pior_create(connector, dcbe);
3149 3150
		}

3151 3152 3153 3154
		if (ret) {
			NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
				     dcbe->location, dcbe->type,
				     ffs(dcbe->or) - 1, ret);
3155
			ret = 0;
3156 3157 3158 3159 3160 3161 3162 3163
		}
	}

	/* cull any connectors we created that don't have an encoder */
	list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
		if (connector->encoder_ids[0])
			continue;

3164
		NV_WARN(drm, "%s has no encoders, removing\n",
3165
			connector->name);
3166 3167 3168
		connector->funcs->destroy(connector);
	}

3169 3170
out:
	if (ret)
3171
		nv50_display_destroy(dev);
3172 3173
	return ret;
}