mdp4_crtc.c 17.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
/*
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include "mdp4_kms.h"

#include <drm/drm_mode.h>
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_flip_work.h"

struct mdp4_crtc {
	struct drm_crtc base;
	char name[8];
	int id;
	int ovlp;
	enum mdp4_dma dma;
	bool enabled;

	/* which mixer/encoder we route output to: */
	int mixer;

	struct {
		spinlock_t lock;
		bool stale;
		uint32_t width, height;
R
Rob Clark 已提交
40
		uint32_t x, y;
41 42 43 44 45 46 47 48 49 50 51 52 53

		/* next cursor to scan-out: */
		uint32_t next_iova;
		struct drm_gem_object *next_bo;

		/* current cursor being scanned out: */
		struct drm_gem_object *scanout_bo;
	} cursor;


	/* if there is a pending flip, these will be non-null: */
	struct drm_pending_vblank_event *event;

R
Rob Clark 已提交
54 55 56 57
#define PENDING_CURSOR 0x1
#define PENDING_FLIP   0x2
	atomic_t pending;

58 59 60
	/* for unref'ing cursor bo's after scanout completes: */
	struct drm_flip_work unref_cursor_work;

R
Rob Clark 已提交
61 62
	struct mdp_irq vblank;
	struct mdp_irq err;
63 64 65 66 67 68
};
#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)

static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
{
	struct msm_drm_private *priv = crtc->dev->dev_private;
R
Rob Clark 已提交
69
	return to_mdp4_kms(to_mdp_kms(priv->kms));
70 71
}

R
Rob Clark 已提交
72
static void request_pending(struct drm_crtc *crtc, uint32_t pending)
73 74 75
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);

R
Rob Clark 已提交
76 77 78 79 80 81 82 83
	atomic_or(pending, &mdp4_crtc->pending);
	mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
}

static void crtc_flush(struct drm_crtc *crtc)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
84 85
	struct drm_plane *plane;
	uint32_t flush = 0;
R
Rob Clark 已提交
86

87 88 89
	for_each_plane_on_crtc(crtc, plane) {
		enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
		flush |= pipe2flush(pipe_id);
R
Rob Clark 已提交
90
	}
91

R
Rob Clark 已提交
92 93 94 95 96 97 98
	flush |= ovlp2flush(mdp4_crtc->ovlp);

	DBG("%s: flush=%08x", mdp4_crtc->name, flush);

	mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
}

R
Rob Clark 已提交
99 100
/* if file!=NULL, this is preclose potential cancel-flip path */
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
101 102 103 104 105 106 107 108 109
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct drm_device *dev = crtc->dev;
	struct drm_pending_vblank_event *event;
	unsigned long flags;

	spin_lock_irqsave(&dev->event_lock, flags);
	event = mdp4_crtc->event;
	if (event) {
R
Rob Clark 已提交
110 111 112 113 114 115
		/* if regular vblank case (!file) or if cancel-flip from
		 * preclose on file that requested flip, then send the
		 * event:
		 */
		if (!file || (event->base.file_priv == file)) {
			mdp4_crtc->event = NULL;
R
Rob Clark 已提交
116
			DBG("%s: send event: %p", mdp4_crtc->name, event);
117
			drm_send_vblank_event(dev, mdp4_crtc->id, event);
R
Rob Clark 已提交
118
		}
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
	}
	spin_unlock_irqrestore(&dev->event_lock, flags);
}

static void unref_cursor_worker(struct drm_flip_work *work, void *val)
{
	struct mdp4_crtc *mdp4_crtc =
		container_of(work, struct mdp4_crtc, unref_cursor_work);
	struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);

	msm_gem_put_iova(val, mdp4_kms->id);
	drm_gem_object_unreference_unlocked(val);
}

static void mdp4_crtc_destroy(struct drm_crtc *crtc)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);

	drm_crtc_cleanup(crtc);
	drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);

	kfree(mdp4_crtc);
}

static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
	bool enabled = (mode == DRM_MODE_DPMS_ON);

	DBG("%s: mode=%d", mdp4_crtc->name, mode);

	if (enabled != mdp4_crtc->enabled) {
		if (enabled) {
			mdp4_enable(mdp4_kms);
R
Rob Clark 已提交
154
			mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
155
		} else {
R
Rob Clark 已提交
156
			mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
			mdp4_disable(mdp4_kms);
		}
		mdp4_crtc->enabled = enabled;
	}
}

static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
		const struct drm_display_mode *mode,
		struct drm_display_mode *adjusted_mode)
{
	return true;
}

static void blend_setup(struct drm_crtc *crtc)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
174
	struct drm_plane *plane;
175 176
	int i, ovlp = mdp4_crtc->ovlp;
	uint32_t mixer_cfg = 0;
R
Rob Clark 已提交
177
	static const enum mdp_mixer_stage_id stages[] = {
R
Rob Clark 已提交
178 179 180 181 182 183 184 185 186 187 188 189 190 191
			STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
	};
	/* statically (for now) map planes to mixer stage (z-order): */
	static const int idxs[] = {
			[VG1]  = 1,
			[VG2]  = 2,
			[RGB1] = 0,
			[RGB2] = 0,
			[RGB3] = 0,
			[VG3]  = 3,
			[VG4]  = 4,

	};
	bool alpha[4]= { false, false, false, false };
192

193 194 195 196 197 198
	/* Don't rely on value read back from hw, but instead use our
	 * own shadowed value.  Possibly disable/reenable looses the
	 * previous value and goes back to power-on default?
	 */
	mixer_cfg = mdp4_kms->mixer_cfg;

199 200 201 202 203
	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);

204 205 206 207 208
	for_each_plane_on_crtc(crtc, plane) {
		enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
		int idx = idxs[pipe_id];
		if (idx > 0) {
			const struct mdp_format *format =
R
Rob Clark 已提交
209
					to_mdp_format(msm_framebuffer_format(plane->fb));
210
			alpha[idx-1] = format->alpha_enable;
R
Rob Clark 已提交
211
		}
212 213
		mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
				pipe_id, stages[idx]);
R
Rob Clark 已提交
214 215 216 217 218
	}

	/* this shouldn't happen.. and seems to cause underflow: */
	WARN_ON(!mixer_cfg);

219
	for (i = 0; i < 4; i++) {
R
Rob Clark 已提交
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
		uint32_t op;

		if (alpha[i]) {
			op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
					MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
					MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
		} else {
			op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
					MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
		}

		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
235 236 237 238 239 240
		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
	}

241
	mdp4_kms->mixer_cfg = mixer_cfg;
242 243 244
	mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
}

R
Rob Clark 已提交
245
static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
246 247 248 249
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
	enum mdp4_dma dma = mdp4_crtc->dma;
R
Rob Clark 已提交
250 251 252 253 254
	int ovlp = mdp4_crtc->ovlp;
	struct drm_display_mode *mode;

	if (WARN_ON(!crtc->state))
		return;
255

R
Rob Clark 已提交
256
	mode = &crtc->state->adjusted_mode;
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272

	DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
			mdp4_crtc->name, mode->base.id, mode->name,
			mode->vrefresh, mode->clock,
			mode->hdisplay, mode->hsync_start,
			mode->hsync_end, mode->htotal,
			mode->vdisplay, mode->vsync_start,
			mode->vsync_end, mode->vtotal,
			mode->type, mode->flags);

	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
			MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
			MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));

	/* take data from pipe: */
	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
273
	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
274 275 276 277 278 279 280 281
	mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
			MDP4_DMA_DST_SIZE_WIDTH(0) |
			MDP4_DMA_DST_SIZE_HEIGHT(0));

	mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
	mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
			MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
			MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
282
	mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297

	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);

	if (dma == DMA_E) {
		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
	}
}

static void mdp4_crtc_prepare(struct drm_crtc *crtc)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	DBG("%s", mdp4_crtc->name);
	/* make sure we hold a ref to mdp clks while setting up mode: */
298
	drm_crtc_vblank_get(crtc);
299 300 301 302 303 304 305 306 307 308
	mdp4_enable(get_kms(crtc));
	mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}

static void mdp4_crtc_commit(struct drm_crtc *crtc)
{
	mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
	crtc_flush(crtc);
	/* drop the ref to mdp clk's that we got in prepare: */
	mdp4_disable(get_kms(crtc));
309
	drm_crtc_vblank_put(crtc);
310 311
}

R
Rob Clark 已提交
312 313 314 315 316 317
static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
{
}

static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
		struct drm_crtc_state *state)
318
{
R
Rob Clark 已提交
319 320
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct drm_device *dev = crtc->dev;
321

R
Rob Clark 已提交
322
	DBG("%s: check", mdp4_crtc->name);
323

R
Rob Clark 已提交
324 325 326
	if (mdp4_crtc->event) {
		dev_err(dev->dev, "already pending flip!\n");
		return -EBUSY;
R
Rob Clark 已提交
327 328
	}

R
Rob Clark 已提交
329
	// TODO anything else to check?
R
Rob Clark 已提交
330 331

	return 0;
332 333
}

R
Rob Clark 已提交
334
static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc)
335
{
R
Rob Clark 已提交
336 337
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	DBG("%s: begin", mdp4_crtc->name);
338 339
}

R
Rob Clark 已提交
340
static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
341 342 343
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct drm_device *dev = crtc->dev;
R
Rob Clark 已提交
344
	unsigned long flags;
345

R
Rob Clark 已提交
346
	DBG("%s: flush", mdp4_crtc->name);
347

R
Rob Clark 已提交
348
	WARN_ON(mdp4_crtc->event);
349

R
Rob Clark 已提交
350
	spin_lock_irqsave(&dev->event_lock, flags);
R
Rob Clark 已提交
351
	mdp4_crtc->event = crtc->state->event;
R
Rob Clark 已提交
352 353
	spin_unlock_irqrestore(&dev->event_lock, flags);

R
Rob Clark 已提交
354 355 356
	blend_setup(crtc);
	crtc_flush(crtc);
	request_pending(crtc, PENDING_FLIP);
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
}

static int mdp4_crtc_set_property(struct drm_crtc *crtc,
		struct drm_property *property, uint64_t val)
{
	// XXX
	return -EINVAL;
}

#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64

/* called from IRQ to update cursor related registers (if needed).  The
 * cursor registers, other than x/y position, appear not to be double
 * buffered, and changing them other than from vblank seems to trigger
 * underflow.
 */
static void update_cursor(struct drm_crtc *crtc)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
R
Rob Clark 已提交
377
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
	enum mdp4_dma dma = mdp4_crtc->dma;
	unsigned long flags;

	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
	if (mdp4_crtc->cursor.stale) {
		struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
		struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
		uint32_t iova = mdp4_crtc->cursor.next_iova;

		if (next_bo) {
			/* take a obj ref + iova ref when we start scanning out: */
			drm_gem_object_reference(next_bo);
			msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);

			/* enable cursor: */
			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
					MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
					MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
					MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
					MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
		} else {
			/* disable cursor: */
402 403
			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
					mdp4_kms->blank_cursor_iova);
404 405 406 407 408 409 410 411 412
		}

		/* and drop the iova ref + obj rev when done scanning out: */
		if (prev_bo)
			drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);

		mdp4_crtc->cursor.scanout_bo = next_bo;
		mdp4_crtc->cursor.stale = false;
	}
R
Rob Clark 已提交
413 414 415 416 417

	mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
			MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
			MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
}

static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
		struct drm_file *file_priv, uint32_t handle,
		uint32_t width, uint32_t height)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
	struct drm_device *dev = crtc->dev;
	struct drm_gem_object *cursor_bo, *old_bo;
	unsigned long flags;
	uint32_t iova;
	int ret;

	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
		dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
		return -EINVAL;
	}

	if (handle) {
		cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
		if (!cursor_bo)
			return -ENOENT;
	} else {
		cursor_bo = NULL;
	}

	if (cursor_bo) {
		ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
		if (ret)
			goto fail;
	} else {
		iova = 0;
	}

	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
	old_bo = mdp4_crtc->cursor.next_bo;
	mdp4_crtc->cursor.next_bo   = cursor_bo;
	mdp4_crtc->cursor.next_iova = iova;
	mdp4_crtc->cursor.width     = width;
	mdp4_crtc->cursor.height    = height;
	mdp4_crtc->cursor.stale     = true;
	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);

	if (old_bo) {
		/* drop our previous reference: */
465
		drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
466 467
	}

R
Rob Clark 已提交
468 469
	request_pending(crtc, PENDING_CURSOR);

470 471 472 473 474 475 476 477 478 479
	return 0;

fail:
	drm_gem_object_unreference_unlocked(cursor_bo);
	return ret;
}

static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
R
Rob Clark 已提交
480
	unsigned long flags;
481

R
Rob Clark 已提交
482 483 484 485 486 487 488
	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
	mdp4_crtc->cursor.x = x;
	mdp4_crtc->cursor.y = y;
	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);

	crtc_flush(crtc);
	request_pending(crtc, PENDING_CURSOR);
489 490 491 492 493

	return 0;
}

static const struct drm_crtc_funcs mdp4_crtc_funcs = {
R
Rob Clark 已提交
494
	.set_config = drm_atomic_helper_set_config,
495
	.destroy = mdp4_crtc_destroy,
R
Rob Clark 已提交
496
	.page_flip = drm_atomic_helper_page_flip,
497 498 499
	.set_property = mdp4_crtc_set_property,
	.cursor_set = mdp4_crtc_cursor_set,
	.cursor_move = mdp4_crtc_cursor_move,
R
Rob Clark 已提交
500 501 502
	.reset = drm_atomic_helper_crtc_reset,
	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
503 504 505 506 507
};

static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
	.dpms = mdp4_crtc_dpms,
	.mode_fixup = mdp4_crtc_mode_fixup,
R
Rob Clark 已提交
508 509 510
	.mode_set_nofb = mdp4_crtc_mode_set_nofb,
	.mode_set = drm_helper_crtc_mode_set,
	.mode_set_base = drm_helper_crtc_mode_set_base,
511 512 513
	.prepare = mdp4_crtc_prepare,
	.commit = mdp4_crtc_commit,
	.load_lut = mdp4_crtc_load_lut,
R
Rob Clark 已提交
514 515 516
	.atomic_check = mdp4_crtc_atomic_check,
	.atomic_begin = mdp4_crtc_atomic_begin,
	.atomic_flush = mdp4_crtc_atomic_flush,
517 518
};

R
Rob Clark 已提交
519
static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
520 521 522 523
{
	struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
	struct drm_crtc *crtc = &mdp4_crtc->base;
	struct msm_drm_private *priv = crtc->dev->dev_private;
R
Rob Clark 已提交
524
	unsigned pending;
525

R
Rob Clark 已提交
526
	mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
527

R
Rob Clark 已提交
528 529 530 531 532 533 534 535 536 537
	pending = atomic_xchg(&mdp4_crtc->pending, 0);

	if (pending & PENDING_FLIP) {
		complete_flip(crtc, NULL);
	}

	if (pending & PENDING_CURSOR) {
		update_cursor(crtc);
		drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
	}
538 539
}

R
Rob Clark 已提交
540
static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
541 542 543 544 545 546 547 548 549 550 551 552 553
{
	struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
	struct drm_crtc *crtc = &mdp4_crtc->base;
	DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
	crtc_flush(crtc);
}

uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	return mdp4_crtc->vblank.irqmask;
}

R
Rob Clark 已提交
554
void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
555
{
R
Rob Clark 已提交
556 557
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	DBG("%s: cancel: %p", mdp4_crtc->name, file);
R
Rob Clark 已提交
558
	complete_flip(crtc, file);
559 560 561 562 563 564 565 566 567 568 569 570
}

/* set dma config, ie. the format the encoder wants. */
void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);

	mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
}

/* set interface for routing crtc->encoder: */
571
void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
	uint32_t intf_sel;

	intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);

	switch (mdp4_crtc->dma) {
	case DMA_P:
		intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
		intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
		break;
	case DMA_S:
		intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
		intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
		break;
	case DMA_E:
		intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
		intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
		break;
	}

	if (intf == INTF_DSI_VIDEO) {
		intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
		intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
	} else if (intf == INTF_DSI_CMD) {
		intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
		intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
	}

602 603
	mdp4_crtc->mixer = mixer;

604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
	blend_setup(crtc);

	DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);

	mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
}

static const char *dma_names[] = {
		"DMA_P", "DMA_S", "DMA_E",
};

/* initialize crtc */
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
		struct drm_plane *plane, int id, int ovlp_id,
		enum mdp4_dma dma_id)
{
	struct drm_crtc *crtc = NULL;
	struct mdp4_crtc *mdp4_crtc;

	mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
624 625
	if (!mdp4_crtc)
		return ERR_PTR(-ENOMEM);
626 627 628

	crtc = &mdp4_crtc->base;

R
Rob Clark 已提交
629
	mdp4_crtc->id = id;
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644

	mdp4_crtc->ovlp = ovlp_id;
	mdp4_crtc->dma = dma_id;

	mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
	mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;

	mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
	mdp4_crtc->err.irq = mdp4_crtc_err_irq;

	snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
			dma_names[dma_id], ovlp_id);

	spin_lock_init(&mdp4_crtc->cursor.lock);

645
	drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
646 647
			"unref cursor", unref_cursor_worker);

648
	drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs);
649
	drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
650
	plane->crtc = crtc;
651

R
Rob Clark 已提交
652
	mdp4_plane_install_properties(plane, &crtc->base);
653 654 655

	return crtc;
}