mdp4_crtc.c 18.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
/*
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include "mdp4_kms.h"

#include <drm/drm_mode.h>
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_flip_work.h"

struct mdp4_crtc {
	struct drm_crtc base;
	char name[8];
	int id;
	int ovlp;
	enum mdp4_dma dma;
	bool enabled;

	/* which mixer/encoder we route output to: */
	int mixer;

	struct {
		spinlock_t lock;
		bool stale;
		uint32_t width, height;
R
Rob Clark 已提交
40
		uint32_t x, y;
41 42 43 44 45 46 47 48 49 50 51 52 53

		/* next cursor to scan-out: */
		uint32_t next_iova;
		struct drm_gem_object *next_bo;

		/* current cursor being scanned out: */
		struct drm_gem_object *scanout_bo;
	} cursor;


	/* if there is a pending flip, these will be non-null: */
	struct drm_pending_vblank_event *event;

R
Rob Clark 已提交
54 55 56 57
#define PENDING_CURSOR 0x1
#define PENDING_FLIP   0x2
	atomic_t pending;

58 59 60
	/* for unref'ing cursor bo's after scanout completes: */
	struct drm_flip_work unref_cursor_work;

R
Rob Clark 已提交
61 62
	struct mdp_irq vblank;
	struct mdp_irq err;
63 64 65 66 67 68
};
#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)

static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
{
	struct msm_drm_private *priv = crtc->dev->dev_private;
R
Rob Clark 已提交
69
	return to_mdp4_kms(to_mdp_kms(priv->kms));
70 71
}

R
Rob Clark 已提交
72
static void request_pending(struct drm_crtc *crtc, uint32_t pending)
73 74 75
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);

R
Rob Clark 已提交
76 77 78 79 80 81 82 83
	atomic_or(pending, &mdp4_crtc->pending);
	mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
}

static void crtc_flush(struct drm_crtc *crtc)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
84 85
	struct drm_plane *plane;
	uint32_t flush = 0;
R
Rob Clark 已提交
86

87
	drm_atomic_crtc_for_each_plane(plane, crtc) {
88 89
		enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
		flush |= pipe2flush(pipe_id);
R
Rob Clark 已提交
90
	}
91

R
Rob Clark 已提交
92 93 94 95 96 97 98
	flush |= ovlp2flush(mdp4_crtc->ovlp);

	DBG("%s: flush=%08x", mdp4_crtc->name, flush);

	mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
}

R
Rob Clark 已提交
99 100
/* if file!=NULL, this is preclose potential cancel-flip path */
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
101 102 103 104 105 106 107 108 109
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct drm_device *dev = crtc->dev;
	struct drm_pending_vblank_event *event;
	unsigned long flags;

	spin_lock_irqsave(&dev->event_lock, flags);
	event = mdp4_crtc->event;
	if (event) {
R
Rob Clark 已提交
110 111 112 113 114 115
		/* if regular vblank case (!file) or if cancel-flip from
		 * preclose on file that requested flip, then send the
		 * event:
		 */
		if (!file || (event->base.file_priv == file)) {
			mdp4_crtc->event = NULL;
R
Rob Clark 已提交
116
			DBG("%s: send event: %p", mdp4_crtc->name, event);
117
			drm_send_vblank_event(dev, mdp4_crtc->id, event);
R
Rob Clark 已提交
118
		}
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
	}
	spin_unlock_irqrestore(&dev->event_lock, flags);
}

static void unref_cursor_worker(struct drm_flip_work *work, void *val)
{
	struct mdp4_crtc *mdp4_crtc =
		container_of(work, struct mdp4_crtc, unref_cursor_work);
	struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);

	msm_gem_put_iova(val, mdp4_kms->id);
	drm_gem_object_unreference_unlocked(val);
}

static void mdp4_crtc_destroy(struct drm_crtc *crtc)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);

	drm_crtc_cleanup(crtc);
	drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);

	kfree(mdp4_crtc);
}

static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
	bool enabled = (mode == DRM_MODE_DPMS_ON);

	DBG("%s: mode=%d", mdp4_crtc->name, mode);

	if (enabled != mdp4_crtc->enabled) {
		if (enabled) {
			mdp4_enable(mdp4_kms);
R
Rob Clark 已提交
154
			mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
155
		} else {
R
Rob Clark 已提交
156
			mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
157 158 159 160 161 162 163 164 165 166 167 168 169
			mdp4_disable(mdp4_kms);
		}
		mdp4_crtc->enabled = enabled;
	}
}

static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
		const struct drm_display_mode *mode,
		struct drm_display_mode *adjusted_mode)
{
	return true;
}

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
/* statically (for now) map planes to mixer stage (z-order): */
static const int idxs[] = {
		[VG1]  = 1,
		[VG2]  = 2,
		[RGB1] = 0,
		[RGB2] = 0,
		[RGB3] = 0,
		[VG3]  = 3,
		[VG4]  = 4,

};

/* setup mixer config, for which we need to consider all crtc's and
 * the planes attached to them
 *
 * TODO may possibly need some extra locking here
 */
static void setup_mixer(struct mdp4_kms *mdp4_kms)
188
{
189 190
	struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
	struct drm_crtc *crtc;
191
	uint32_t mixer_cfg = 0;
R
Rob Clark 已提交
192
	static const enum mdp_mixer_stage_id stages[] = {
R
Rob Clark 已提交
193 194 195
			STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
	};

196 197 198
	list_for_each_entry(crtc, &config->crtc_list, head) {
		struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
		struct drm_plane *plane;
199

200
		drm_atomic_crtc_for_each_plane(plane, crtc) {
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
			enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
			int idx = idxs[pipe_id];
			mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
					pipe_id, stages[idx]);
		}
	}

	mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
}

static void blend_setup(struct drm_crtc *crtc)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
	struct drm_plane *plane;
	int i, ovlp = mdp4_crtc->ovlp;
	bool alpha[4]= { false, false, false, false };
218

219 220 221 222 223
	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);

224
	drm_atomic_crtc_for_each_plane(plane, crtc) {
225 226 227 228
		enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
		int idx = idxs[pipe_id];
		if (idx > 0) {
			const struct mdp_format *format =
R
Rob Clark 已提交
229
					to_mdp_format(msm_framebuffer_format(plane->fb));
230
			alpha[idx-1] = format->alpha_enable;
R
Rob Clark 已提交
231 232 233
		}
	}

234
	for (i = 0; i < 4; i++) {
R
Rob Clark 已提交
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
		uint32_t op;

		if (alpha[i]) {
			op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
					MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
					MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
		} else {
			op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
					MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
		}

		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
250 251 252 253 254 255
		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
	}

256
	setup_mixer(mdp4_kms);
257 258
}

R
Rob Clark 已提交
259
static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
260 261 262 263
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
	enum mdp4_dma dma = mdp4_crtc->dma;
R
Rob Clark 已提交
264 265 266 267 268
	int ovlp = mdp4_crtc->ovlp;
	struct drm_display_mode *mode;

	if (WARN_ON(!crtc->state))
		return;
269

R
Rob Clark 已提交
270
	mode = &crtc->state->adjusted_mode;
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286

	DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
			mdp4_crtc->name, mode->base.id, mode->name,
			mode->vrefresh, mode->clock,
			mode->hdisplay, mode->hsync_start,
			mode->hsync_end, mode->htotal,
			mode->vdisplay, mode->vsync_start,
			mode->vsync_end, mode->vtotal,
			mode->type, mode->flags);

	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
			MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
			MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));

	/* take data from pipe: */
	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
287
	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
288 289 290 291 292 293 294 295
	mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
			MDP4_DMA_DST_SIZE_WIDTH(0) |
			MDP4_DMA_DST_SIZE_HEIGHT(0));

	mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
	mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
			MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
			MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
296
	mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311

	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);

	if (dma == DMA_E) {
		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
	}
}

static void mdp4_crtc_prepare(struct drm_crtc *crtc)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	DBG("%s", mdp4_crtc->name);
	/* make sure we hold a ref to mdp clks while setting up mode: */
312
	drm_crtc_vblank_get(crtc);
313 314 315 316 317 318 319 320 321 322
	mdp4_enable(get_kms(crtc));
	mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}

static void mdp4_crtc_commit(struct drm_crtc *crtc)
{
	mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
	crtc_flush(crtc);
	/* drop the ref to mdp clk's that we got in prepare: */
	mdp4_disable(get_kms(crtc));
323
	drm_crtc_vblank_put(crtc);
324 325
}

R
Rob Clark 已提交
326 327 328 329 330 331
static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
{
}

static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
		struct drm_crtc_state *state)
332
{
R
Rob Clark 已提交
333 334
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct drm_device *dev = crtc->dev;
335

R
Rob Clark 已提交
336
	DBG("%s: check", mdp4_crtc->name);
337

R
Rob Clark 已提交
338 339 340
	if (mdp4_crtc->event) {
		dev_err(dev->dev, "already pending flip!\n");
		return -EBUSY;
R
Rob Clark 已提交
341 342
	}

R
Rob Clark 已提交
343
	// TODO anything else to check?
R
Rob Clark 已提交
344 345

	return 0;
346 347
}

R
Rob Clark 已提交
348
static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc)
349
{
R
Rob Clark 已提交
350 351
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	DBG("%s: begin", mdp4_crtc->name);
352 353
}

R
Rob Clark 已提交
354
static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
355 356 357
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct drm_device *dev = crtc->dev;
R
Rob Clark 已提交
358
	unsigned long flags;
359

R
Rob Clark 已提交
360
	DBG("%s: flush", mdp4_crtc->name);
361

R
Rob Clark 已提交
362
	WARN_ON(mdp4_crtc->event);
363

R
Rob Clark 已提交
364
	spin_lock_irqsave(&dev->event_lock, flags);
R
Rob Clark 已提交
365
	mdp4_crtc->event = crtc->state->event;
R
Rob Clark 已提交
366 367
	spin_unlock_irqrestore(&dev->event_lock, flags);

R
Rob Clark 已提交
368 369 370
	blend_setup(crtc);
	crtc_flush(crtc);
	request_pending(crtc, PENDING_FLIP);
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
}

static int mdp4_crtc_set_property(struct drm_crtc *crtc,
		struct drm_property *property, uint64_t val)
{
	// XXX
	return -EINVAL;
}

#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64

/* called from IRQ to update cursor related registers (if needed).  The
 * cursor registers, other than x/y position, appear not to be double
 * buffered, and changing them other than from vblank seems to trigger
 * underflow.
 */
static void update_cursor(struct drm_crtc *crtc)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
R
Rob Clark 已提交
391
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
	enum mdp4_dma dma = mdp4_crtc->dma;
	unsigned long flags;

	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
	if (mdp4_crtc->cursor.stale) {
		struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
		struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
		uint32_t iova = mdp4_crtc->cursor.next_iova;

		if (next_bo) {
			/* take a obj ref + iova ref when we start scanning out: */
			drm_gem_object_reference(next_bo);
			msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);

			/* enable cursor: */
			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
					MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
					MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
					MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
					MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
		} else {
			/* disable cursor: */
416 417
			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
					mdp4_kms->blank_cursor_iova);
418 419 420 421 422 423 424 425 426
		}

		/* and drop the iova ref + obj rev when done scanning out: */
		if (prev_bo)
			drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);

		mdp4_crtc->cursor.scanout_bo = next_bo;
		mdp4_crtc->cursor.stale = false;
	}
R
Rob Clark 已提交
427 428 429 430 431

	mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
			MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
			MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));

432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
}

static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
		struct drm_file *file_priv, uint32_t handle,
		uint32_t width, uint32_t height)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
	struct drm_device *dev = crtc->dev;
	struct drm_gem_object *cursor_bo, *old_bo;
	unsigned long flags;
	uint32_t iova;
	int ret;

	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
		dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
		return -EINVAL;
	}

	if (handle) {
		cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
		if (!cursor_bo)
			return -ENOENT;
	} else {
		cursor_bo = NULL;
	}

	if (cursor_bo) {
		ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
		if (ret)
			goto fail;
	} else {
		iova = 0;
	}

	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
	old_bo = mdp4_crtc->cursor.next_bo;
	mdp4_crtc->cursor.next_bo   = cursor_bo;
	mdp4_crtc->cursor.next_iova = iova;
	mdp4_crtc->cursor.width     = width;
	mdp4_crtc->cursor.height    = height;
	mdp4_crtc->cursor.stale     = true;
	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);

	if (old_bo) {
		/* drop our previous reference: */
479
		drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
480 481
	}

R
Rob Clark 已提交
482 483
	request_pending(crtc, PENDING_CURSOR);

484 485 486 487 488 489 490 491 492 493
	return 0;

fail:
	drm_gem_object_unreference_unlocked(cursor_bo);
	return ret;
}

static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
R
Rob Clark 已提交
494
	unsigned long flags;
495

R
Rob Clark 已提交
496 497 498 499 500 501 502
	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
	mdp4_crtc->cursor.x = x;
	mdp4_crtc->cursor.y = y;
	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);

	crtc_flush(crtc);
	request_pending(crtc, PENDING_CURSOR);
503 504 505 506 507

	return 0;
}

static const struct drm_crtc_funcs mdp4_crtc_funcs = {
R
Rob Clark 已提交
508
	.set_config = drm_atomic_helper_set_config,
509
	.destroy = mdp4_crtc_destroy,
R
Rob Clark 已提交
510
	.page_flip = drm_atomic_helper_page_flip,
511 512 513
	.set_property = mdp4_crtc_set_property,
	.cursor_set = mdp4_crtc_cursor_set,
	.cursor_move = mdp4_crtc_cursor_move,
R
Rob Clark 已提交
514 515 516
	.reset = drm_atomic_helper_crtc_reset,
	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
517 518 519 520 521
};

static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
	.dpms = mdp4_crtc_dpms,
	.mode_fixup = mdp4_crtc_mode_fixup,
R
Rob Clark 已提交
522 523 524
	.mode_set_nofb = mdp4_crtc_mode_set_nofb,
	.mode_set = drm_helper_crtc_mode_set,
	.mode_set_base = drm_helper_crtc_mode_set_base,
525 526 527
	.prepare = mdp4_crtc_prepare,
	.commit = mdp4_crtc_commit,
	.load_lut = mdp4_crtc_load_lut,
R
Rob Clark 已提交
528 529 530
	.atomic_check = mdp4_crtc_atomic_check,
	.atomic_begin = mdp4_crtc_atomic_begin,
	.atomic_flush = mdp4_crtc_atomic_flush,
531 532
};

R
Rob Clark 已提交
533
static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
534 535 536 537
{
	struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
	struct drm_crtc *crtc = &mdp4_crtc->base;
	struct msm_drm_private *priv = crtc->dev->dev_private;
R
Rob Clark 已提交
538
	unsigned pending;
539

R
Rob Clark 已提交
540
	mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
541

R
Rob Clark 已提交
542 543 544 545 546 547 548 549 550 551
	pending = atomic_xchg(&mdp4_crtc->pending, 0);

	if (pending & PENDING_FLIP) {
		complete_flip(crtc, NULL);
	}

	if (pending & PENDING_CURSOR) {
		update_cursor(crtc);
		drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
	}
552 553
}

R
Rob Clark 已提交
554
static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
555 556 557 558 559 560 561 562 563 564 565 566 567
{
	struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
	struct drm_crtc *crtc = &mdp4_crtc->base;
	DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
	crtc_flush(crtc);
}

uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	return mdp4_crtc->vblank.irqmask;
}

R
Rob Clark 已提交
568
void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
569
{
R
Rob Clark 已提交
570 571
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	DBG("%s: cancel: %p", mdp4_crtc->name, file);
R
Rob Clark 已提交
572
	complete_flip(crtc, file);
573 574 575 576 577 578 579 580 581 582 583 584
}

/* set dma config, ie. the format the encoder wants. */
void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);

	mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
}

/* set interface for routing crtc->encoder: */
585
void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
	uint32_t intf_sel;

	intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);

	switch (mdp4_crtc->dma) {
	case DMA_P:
		intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
		intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
		break;
	case DMA_S:
		intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
		intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
		break;
	case DMA_E:
		intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
		intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
		break;
	}

	if (intf == INTF_DSI_VIDEO) {
		intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
		intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
	} else if (intf == INTF_DSI_CMD) {
		intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
		intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
	}

616 617
	mdp4_crtc->mixer = mixer;

618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
	blend_setup(crtc);

	DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);

	mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
}

static const char *dma_names[] = {
		"DMA_P", "DMA_S", "DMA_E",
};

/* initialize crtc */
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
		struct drm_plane *plane, int id, int ovlp_id,
		enum mdp4_dma dma_id)
{
	struct drm_crtc *crtc = NULL;
	struct mdp4_crtc *mdp4_crtc;

	mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
638 639
	if (!mdp4_crtc)
		return ERR_PTR(-ENOMEM);
640 641 642

	crtc = &mdp4_crtc->base;

R
Rob Clark 已提交
643
	mdp4_crtc->id = id;
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658

	mdp4_crtc->ovlp = ovlp_id;
	mdp4_crtc->dma = dma_id;

	mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
	mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;

	mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
	mdp4_crtc->err.irq = mdp4_crtc_err_irq;

	snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
			dma_names[dma_id], ovlp_id);

	spin_lock_init(&mdp4_crtc->cursor.lock);

659
	drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
660 661
			"unref cursor", unref_cursor_worker);

662
	drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs);
663
	drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
664
	plane->crtc = crtc;
665

R
Rob Clark 已提交
666
	mdp4_plane_install_properties(plane, &crtc->base);
667 668 669

	return crtc;
}