mdp5_crtc.c 23.5 KB
Newer Older
R
Rob Clark 已提交
1
/*
2
 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
R
Rob Clark 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include "mdp5_kms.h"

R
Rob Clark 已提交
21
#include <linux/sort.h>
R
Rob Clark 已提交
22 23 24 25 26
#include <drm/drm_mode.h>
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_flip_work.h"

27 28 29
#define CURSOR_WIDTH	64
#define CURSOR_HEIGHT	64

R
Rob Clark 已提交
30 31 32 33 34
struct mdp5_crtc {
	struct drm_crtc base;
	int id;
	bool enabled;

35 36 37 38 39 40
	/* layer mixer used for this CRTC (+ its lock): */
#define GET_LM_ID(crtc_id)	((crtc_id == 3) ? 5 : crtc_id)
	int lm;
	spinlock_t lm_lock;	/* protect REG_MDP5_LM_* registers */

	/* CTL used for this CRTC: */
41
	struct mdp5_ctl *ctl;
R
Rob Clark 已提交
42 43 44 45

	/* if there is a pending flip, these will be non-null: */
	struct drm_pending_vblank_event *event;

46 47 48 49 50
	/* Bits have been flushed at the last commit,
	 * used to decide if a vsync has happened since last commit.
	 */
	u32 flushed_mask;

R
Rob Clark 已提交
51 52 53 54
#define PENDING_CURSOR 0x1
#define PENDING_FLIP   0x2
	atomic_t pending;

55 56 57
	/* for unref'ing cursor bo's after scanout completes: */
	struct drm_flip_work unref_cursor_work;

R
Rob Clark 已提交
58 59
	struct mdp_irq vblank;
	struct mdp_irq err;
60 61 62 63 64
	struct mdp_irq pp_done;

	struct completion pp_completion;

	bool cmd_mode;
65 66 67 68 69 70 71

	struct {
		/* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
		spinlock_t lock;

		/* current cursor being scanned out: */
		struct drm_gem_object *scanout_bo;
R
Rob Clark 已提交
72 73
		uint32_t width, height;
		uint32_t x, y;
74
	} cursor;
R
Rob Clark 已提交
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
};
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)

static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
{
	struct msm_drm_private *priv = crtc->dev->dev_private;
	return to_mdp5_kms(to_mdp_kms(priv->kms));
}

static void request_pending(struct drm_crtc *crtc, uint32_t pending)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);

	atomic_or(pending, &mdp5_crtc->pending);
	mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
}

92 93 94 95 96 97
static void request_pp_done_pending(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	reinit_completion(&mdp5_crtc->pp_completion);
}

98
static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
99 100 101
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);

R
Rob Clark 已提交
102
	DBG("%s: flush=%08x", crtc->name, flush_mask);
103
	return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
104 105 106 107 108 109 110
}

/*
 * flush updates, to make sure hw is updated to new scanout fb,
 * so that we can safely queue unref to current fb (ie. next
 * vblank we know hw is done w/ previous scanout_fb).
 */
111
static u32 crtc_flush_all(struct drm_crtc *crtc)
R
Rob Clark 已提交
112 113
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
114
	struct drm_plane *plane;
115 116
	uint32_t flush_mask = 0;

117 118
	/* this should not happen: */
	if (WARN_ON(!mdp5_crtc->ctl))
119
		return 0;
R
Rob Clark 已提交
120

121
	drm_atomic_crtc_for_each_plane(plane, crtc) {
122
		flush_mask |= mdp5_plane_get_flush(plane);
R
Rob Clark 已提交
123
	}
124 125

	flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);
126

127
	return crtc_flush(crtc, flush_mask);
R
Rob Clark 已提交
128 129 130 131 132 133 134 135
}

/* if file!=NULL, this is preclose potential cancel-flip path */
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	struct drm_device *dev = crtc->dev;
	struct drm_pending_vblank_event *event;
136
	unsigned long flags;
R
Rob Clark 已提交
137 138 139 140 141 142 143 144 145 146

	spin_lock_irqsave(&dev->event_lock, flags);
	event = mdp5_crtc->event;
	if (event) {
		/* if regular vblank case (!file) or if cancel-flip from
		 * preclose on file that requested flip, then send the
		 * event:
		 */
		if (!file || (event->base.file_priv == file)) {
			mdp5_crtc->event = NULL;
R
Rob Clark 已提交
147
			DBG("%s: send event: %p", crtc->name, event);
148
			drm_crtc_send_vblank_event(crtc, event);
R
Rob Clark 已提交
149 150 151 152
		}
	}
	spin_unlock_irqrestore(&dev->event_lock, flags);

153
	if (mdp5_crtc->ctl && !crtc->state->enable) {
154
		/* set STAGE_UNUSED for all layers */
155
		mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0);
156 157
		mdp5_crtc->ctl = NULL;
	}
R
Rob Clark 已提交
158 159
}

160 161 162 163 164 165 166 167 168 169
static void unref_cursor_worker(struct drm_flip_work *work, void *val)
{
	struct mdp5_crtc *mdp5_crtc =
		container_of(work, struct mdp5_crtc, unref_cursor_work);
	struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);

	msm_gem_put_iova(val, mdp5_kms->id);
	drm_gem_object_unreference_unlocked(val);
}

R
Rob Clark 已提交
170 171 172 173 174
static void mdp5_crtc_destroy(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);

	drm_crtc_cleanup(crtc);
175
	drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
R
Rob Clark 已提交
176 177 178 179

	kfree(mdp5_crtc);
}

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
{
	switch (stage) {
	case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
	case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
	case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
	case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
	case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
	case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
	case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
	default:
		return 0;
	}
}

195 196 197
/*
 * blend_setup() - blend all the planes of a CRTC
 *
198 199 200
 * If no base layer is available, border will be enabled as the base layer.
 * Otherwise all layers will be blended based on their stage calculated
 * in mdp5_crtc_atomic_check.
201
 */
R
Rob Clark 已提交
202 203 204 205
static void blend_setup(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
206 207
	struct drm_plane *plane;
	const struct mdp5_cfg_hw *hw_cfg;
208 209 210 211
	struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
	const struct mdp_format *format;
	uint32_t lm = mdp5_crtc->lm;
	uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
212
	unsigned long flags;
213
	enum mdp5_pipe stage[STAGE_MAX + 1] = { SSPP_NONE };
214
	int i, plane_cnt = 0;
215 216
	bool bg_alpha_enabled = false;
	u32 mixer_op_mode = 0;
217
#define blender(stage)	((stage) - STAGE0)
R
Rob Clark 已提交
218

219
	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
R
Rob Clark 已提交
220

221 222 223 224 225 226
	spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);

	/* ctl could be released already when we are shutting down: */
	if (!mdp5_crtc->ctl)
		goto out;

227
	/* Collect all plane information */
228
	drm_atomic_crtc_for_each_plane(plane, crtc) {
229 230 231 232 233
		pstate = to_mdp5_plane_state(plane->state);
		pstates[pstate->stage] = pstate;
		stage[pstate->stage] = mdp5_plane_pipe(plane);
		plane_cnt++;
	}
R
Rob Clark 已提交
234

235
	if (!pstates[STAGE_BASE]) {
236 237
		ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
		DBG("Border Color is enabled");
238 239 240 241 242
	} else if (plane_cnt) {
		format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));

		if (format->alpha_enable)
			bg_alpha_enabled = true;
243 244 245 246 247 248 249 250 251 252 253 254 255 256
	}

	/* The reset for blending */
	for (i = STAGE0; i <= STAGE_MAX; i++) {
		if (!pstates[i])
			continue;

		format = to_mdp_format(
			msm_framebuffer_format(pstates[i]->base.fb));
		plane = pstates[i]->base.plane;
		blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
			MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
		fg_alpha = pstates[i]->alpha;
		bg_alpha = 0xFF - pstates[i]->alpha;
257 258 259 260 261 262

		if (!format->alpha_enable && bg_alpha_enabled)
			mixer_op_mode = 0;
		else
			mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);

263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
		DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);

		if (format->alpha_enable && pstates[i]->premultiplied) {
			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
			if (fg_alpha != 0xff) {
				bg_alpha = fg_alpha;
				blend_op |=
					MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
					MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
			} else {
				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
			}
		} else if (format->alpha_enable) {
			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
			if (fg_alpha != 0xff) {
				bg_alpha = fg_alpha;
				blend_op |=
				       MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
				       MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
				       MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
				       MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
			} else {
				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
			}
		}
290

291 292
		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
				blender(i)), blend_op);
293
		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
294
				blender(i)), fg_alpha);
295
		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
296
				blender(i)), bg_alpha);
297 298
	}

299 300
	mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), mixer_op_mode);

301
	mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags);
302 303 304

out:
	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
R
Rob Clark 已提交
305 306
}

R
Rob Clark 已提交
307
static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
R
Rob Clark 已提交
308 309 310
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
311
	unsigned long flags;
R
Rob Clark 已提交
312 313 314 315
	struct drm_display_mode *mode;

	if (WARN_ON(!crtc->state))
		return;
R
Rob Clark 已提交
316

R
Rob Clark 已提交
317
	mode = &crtc->state->adjusted_mode;
R
Rob Clark 已提交
318 319

	DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
R
Rob Clark 已提交
320
			crtc->name, mode->base.id, mode->name,
R
Rob Clark 已提交
321 322 323 324 325 326 327
			mode->vrefresh, mode->clock,
			mode->hdisplay, mode->hsync_start,
			mode->hsync_end, mode->htotal,
			mode->vdisplay, mode->vsync_start,
			mode->vsync_end, mode->vtotal,
			mode->type, mode->flags);

328 329
	spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
	mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
R
Rob Clark 已提交
330 331
			MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
			MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
332
	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
R
Rob Clark 已提交
333 334
}

335
static void mdp5_crtc_disable(struct drm_crtc *crtc)
R
Rob Clark 已提交
336 337
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
338 339
	struct mdp5_kms *mdp5_kms = get_kms(crtc);

R
Rob Clark 已提交
340
	DBG("%s", crtc->name);
341 342 343 344

	if (WARN_ON(!mdp5_crtc->enabled))
		return;

345 346 347
	if (mdp5_crtc->cmd_mode)
		mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);

348 349 350 351
	mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
	mdp5_disable(mdp5_kms);

	mdp5_crtc->enabled = false;
R
Rob Clark 已提交
352 353
}

354
static void mdp5_crtc_enable(struct drm_crtc *crtc)
R
Rob Clark 已提交
355
{
R
Rob Clark 已提交
356
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
357 358
	struct mdp5_kms *mdp5_kms = get_kms(crtc);

R
Rob Clark 已提交
359
	DBG("%s", crtc->name);
360 361 362 363 364 365 366

	if (WARN_ON(mdp5_crtc->enabled))
		return;

	mdp5_enable(mdp5_kms);
	mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);

367 368 369
	if (mdp5_crtc->cmd_mode)
		mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);

370
	mdp5_crtc->enabled = true;
R
Rob Clark 已提交
371 372
}

R
Rob Clark 已提交
373 374 375 376 377 378
struct plane_state {
	struct drm_plane *plane;
	struct mdp5_plane_state *state;
};

static int pstate_cmp(const void *a, const void *b)
R
Rob Clark 已提交
379
{
R
Rob Clark 已提交
380 381 382
	struct plane_state *pa = (struct plane_state *)a;
	struct plane_state *pb = (struct plane_state *)b;
	return pa->state->zpos - pb->state->zpos;
R
Rob Clark 已提交
383 384
}

385 386 387 388 389 390 391 392 393
/* is there a helper for this? */
static bool is_fullscreen(struct drm_crtc_state *cstate,
		struct drm_plane_state *pstate)
{
	return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
		((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
		((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
}

R
Rob Clark 已提交
394 395
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
		struct drm_crtc_state *state)
396
{
R
Rob Clark 已提交
397 398 399
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
	struct drm_plane *plane;
	struct drm_device *dev = crtc->dev;
400 401
	struct plane_state pstates[STAGE_MAX + 1];
	const struct mdp5_cfg_hw *hw_cfg;
402
	const struct drm_plane_state *pstate;
403
	bool cursor_plane = false;
404
	int cnt = 0, base = 0, i;
405

R
Rob Clark 已提交
406
	DBG("%s: check", crtc->name);
407

408
	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
R
Rob Clark 已提交
409 410 411 412
		pstates[cnt].plane = plane;
		pstates[cnt].state = to_mdp5_plane_state(pstate);

		cnt++;
413 414 415

		if (plane->type == DRM_PLANE_TYPE_CURSOR)
			cursor_plane = true;
R
Rob Clark 已提交
416 417
	}

418
	/* assign a stage based on sorted zpos property */
R
Rob Clark 已提交
419 420
	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);

421 422 423 424 425 426
	/* if the bottom-most layer is not fullscreen, we need to use
	 * it for solid-color:
	 */
	if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base))
		base++;

427 428 429 430
	/* trigger a warning if cursor isn't the highest zorder */
	WARN_ON(cursor_plane &&
		(pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));

431 432 433 434 435 436
	/* verify that there are not too many planes attached to crtc
	 * and that we don't have conflicting mixer stages:
	 */
	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);

	if ((cnt + base) >= hw_cfg->lm.nb_stages) {
R
Rob Clark 已提交
437
		dev_err(dev->dev, "too many planes! cnt=%d, base=%d\n", cnt, base);
438 439 440
		return -EINVAL;
	}

R
Rob Clark 已提交
441
	for (i = 0; i < cnt; i++) {
442 443 444 445
		if (cursor_plane && (i == (cnt - 1)))
			pstates[i].state->stage = hw_cfg->lm.nb_stages;
		else
			pstates[i].state->stage = STAGE_BASE + i + base;
R
Rob Clark 已提交
446
		DBG("%s: assign pipe %s on stage=%d", crtc->name,
447
				pstates[i].plane->name,
R
Rob Clark 已提交
448 449 450 451
				pstates[i].state->stage);
	}

	return 0;
452 453
}

454 455
static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
				   struct drm_crtc_state *old_crtc_state)
R
Rob Clark 已提交
456
{
R
Rob Clark 已提交
457
	DBG("%s: begin", crtc->name);
R
Rob Clark 已提交
458
}
459

460 461
static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
				   struct drm_crtc_state *old_crtc_state)
R
Rob Clark 已提交
462 463 464 465 466
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	struct drm_device *dev = crtc->dev;
	unsigned long flags;

R
Rob Clark 已提交
467
	DBG("%s: event: %p", crtc->name, crtc->state->event);
R
Rob Clark 已提交
468

R
Rob Clark 已提交
469
	WARN_ON(mdp5_crtc->event);
R
Rob Clark 已提交
470 471

	spin_lock_irqsave(&dev->event_lock, flags);
R
Rob Clark 已提交
472
	mdp5_crtc->event = crtc->state->event;
R
Rob Clark 已提交
473 474
	spin_unlock_irqrestore(&dev->event_lock, flags);

475 476 477 478 479 480 481 482
	/*
	 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
	 * it means we are trying to flush a CRTC whose state is disabled:
	 * nothing else needs to be done.
	 */
	if (unlikely(!mdp5_crtc->ctl))
		return;

R
Rob Clark 已提交
483
	blend_setup(crtc);
484

485 486 487 488 489 490 491 492 493
	/* PP_DONE irq is only used by command mode for now.
	 * It is better to request pending before FLUSH and START trigger
	 * to make sure no pp_done irq missed.
	 * This is safe because no pp_done will happen before SW trigger
	 * in command mode.
	 */
	if (mdp5_crtc->cmd_mode)
		request_pp_done_pending(crtc);

494 495
	mdp5_crtc->flushed_mask = crtc_flush_all(crtc);

R
Rob Clark 已提交
496
	request_pending(crtc, PENDING_FLIP);
R
Rob Clark 已提交
497 498
}

R
Rob Clark 已提交
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	uint32_t xres = crtc->mode.hdisplay;
	uint32_t yres = crtc->mode.vdisplay;

	/*
	 * Cursor Region Of Interest (ROI) is a plane read from cursor
	 * buffer to render. The ROI region is determined by the visibility of
	 * the cursor point. In the default Cursor image the cursor point will
	 * be at the top left of the cursor image, unless it is specified
	 * otherwise using hotspot feature.
	 *
	 * If the cursor point reaches the right (xres - x < cursor.width) or
	 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
	 * width and ROI height need to be evaluated to crop the cursor image
	 * accordingly.
	 * (xres-x) will be new cursor width when x > (xres - cursor.width)
	 * (yres-y) will be new cursor height when y > (yres - cursor.height)
	 */
	*roi_w = min(mdp5_crtc->cursor.width, xres -
			mdp5_crtc->cursor.x);
	*roi_h = min(mdp5_crtc->cursor.height, yres -
			mdp5_crtc->cursor.y);
}

525 526 527 528 529 530 531
static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
		struct drm_file *file, uint32_t handle,
		uint32_t width, uint32_t height)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	struct drm_device *dev = crtc->dev;
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
532
	struct drm_gem_object *cursor_bo, *old_bo = NULL;
R
Rob Clark 已提交
533 534
	uint32_t blendcfg, stride;
	uint64_t cursor_addr;
535
	int ret, lm;
536 537
	enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
R
Rob Clark 已提交
538
	uint32_t roi_w, roi_h;
539
	bool cursor_enable = true;
540 541 542 543 544 545 546 547 548 549 550 551
	unsigned long flags;

	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
		dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
		return -EINVAL;
	}

	if (NULL == mdp5_crtc->ctl)
		return -EINVAL;

	if (!handle) {
		DBG("Cursor off");
552 553
		cursor_enable = false;
		goto set_cursor;
554 555
	}

556
	cursor_bo = drm_gem_object_lookup(file, handle);
557 558 559 560 561 562 563 564
	if (!cursor_bo)
		return -ENOENT;

	ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
	if (ret)
		return -EINVAL;

	lm = mdp5_crtc->lm;
565
	stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
566 567 568 569

	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
	old_bo = mdp5_crtc->cursor.scanout_bo;

R
Rob Clark 已提交
570 571 572 573 574 575
	mdp5_crtc->cursor.scanout_bo = cursor_bo;
	mdp5_crtc->cursor.width = width;
	mdp5_crtc->cursor.height = height;

	get_roi(crtc, &roi_w, &roi_h);

576 577 578 579 580 581 582
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
			MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
			MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
			MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
R
Rob Clark 已提交
583 584
			MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
			MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
585 586 587 588 589 590 591 592
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);

	blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
	blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);

	spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);

593 594 595 596 597
set_cursor:
	ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable);
	if (ret) {
		dev_err(dev->dev, "failed to %sable cursor: %d\n",
				cursor_enable ? "en" : "dis", ret);
598
		goto end;
599
	}
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620

	crtc_flush(crtc, flush_mask);

end:
	if (old_bo) {
		drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
		/* enable vblank to complete cursor work: */
		request_pending(crtc, PENDING_CURSOR);
	}
	return ret;
}

static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
	uint32_t roi_w;
	uint32_t roi_h;
	unsigned long flags;

621 622 623 624
	/* In case the CRTC is disabled, just drop the cursor update */
	if (unlikely(!crtc->state->enable))
		return 0;

R
Rob Clark 已提交
625 626
	mdp5_crtc->cursor.x = x = max(x, 0);
	mdp5_crtc->cursor.y = y = max(y, 0);
627

R
Rob Clark 已提交
628
	get_roi(crtc, &roi_w, &roi_h);
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643

	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
			MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
			MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm),
			MDP5_LM_CURSOR_START_XY_Y_START(y) |
			MDP5_LM_CURSOR_START_XY_X_START(x));
	spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);

	crtc_flush(crtc, flush_mask);

	return 0;
}

R
Rob Clark 已提交
644
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
R
Rob Clark 已提交
645
	.set_config = drm_atomic_helper_set_config,
R
Rob Clark 已提交
646
	.destroy = mdp5_crtc_destroy,
R
Rob Clark 已提交
647
	.page_flip = drm_atomic_helper_page_flip,
648
	.set_property = drm_atomic_helper_crtc_set_property,
R
Rob Clark 已提交
649 650 651
	.reset = drm_atomic_helper_crtc_reset,
	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
652 653
	.cursor_set = mdp5_crtc_cursor_set,
	.cursor_move = mdp5_crtc_cursor_move,
R
Rob Clark 已提交
654 655
};

656 657 658 659 660 661 662 663 664 665
static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
	.set_config = drm_atomic_helper_set_config,
	.destroy = mdp5_crtc_destroy,
	.page_flip = drm_atomic_helper_page_flip,
	.set_property = drm_atomic_helper_crtc_set_property,
	.reset = drm_atomic_helper_crtc_reset,
	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};

R
Rob Clark 已提交
666
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
R
Rob Clark 已提交
667
	.mode_set_nofb = mdp5_crtc_mode_set_nofb,
668 669
	.disable = mdp5_crtc_disable,
	.enable = mdp5_crtc_enable,
R
Rob Clark 已提交
670 671 672
	.atomic_check = mdp5_crtc_atomic_check,
	.atomic_begin = mdp5_crtc_atomic_begin,
	.atomic_flush = mdp5_crtc_atomic_flush,
R
Rob Clark 已提交
673 674 675 676 677 678
};

static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
	struct drm_crtc *crtc = &mdp5_crtc->base;
679
	struct msm_drm_private *priv = crtc->dev->dev_private;
R
Rob Clark 已提交
680 681 682 683 684 685 686 687 688
	unsigned pending;

	mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);

	pending = atomic_xchg(&mdp5_crtc->pending, 0);

	if (pending & PENDING_FLIP) {
		complete_flip(crtc, NULL);
	}
689 690 691

	if (pending & PENDING_CURSOR)
		drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
R
Rob Clark 已提交
692 693 694 695 696
}

static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
697

R
Rob Clark 已提交
698
	DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
R
Rob Clark 已提交
699 700
}

701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
								pp_done);

	complete(&mdp5_crtc->pp_completion);
}

static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
{
	struct drm_device *dev = crtc->dev;
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	int ret;

	ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
						msecs_to_jiffies(50));
	if (ret == 0)
		dev_warn(dev->dev, "pp done time out, lm=%d\n", mdp5_crtc->lm);
}

721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
{
	struct drm_device *dev = crtc->dev;
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	int ret;

	/* Should not call this function if crtc is disabled. */
	if (!mdp5_crtc->ctl)
		return;

	ret = drm_crtc_vblank_get(crtc);
	if (ret)
		return;

	ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
		((mdp5_ctl_get_commit_status(mdp5_crtc->ctl) &
		mdp5_crtc->flushed_mask) == 0),
		msecs_to_jiffies(50));
	if (ret <= 0)
		dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);

	mdp5_crtc->flushed_mask = 0;

	drm_crtc_vblank_put(crtc);
}

R
Rob Clark 已提交
747 748 749 750 751 752
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	return mdp5_crtc->vblank.irqmask;
}

753 754
void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
		struct mdp5_interface *intf, struct mdp5_ctl *ctl)
R
Rob Clark 已提交
755 756 757
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
758
	int lm = mdp5_crtc_get_lm(crtc);
R
Rob Clark 已提交
759 760

	/* now that we know what irq's we want: */
761
	mdp5_crtc->err.irqmask = intf2err(intf->num);
762 763 764 765 766 767 768 769 770 771 772 773
	mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);

	if ((intf->type == INTF_DSI) &&
		(intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
		mdp5_crtc->pp_done.irqmask = lm2ppdone(lm);
		mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
		mdp5_crtc->cmd_mode = true;
	} else {
		mdp5_crtc->pp_done.irqmask = 0;
		mdp5_crtc->pp_done.irq = NULL;
		mdp5_crtc->cmd_mode = false;
	}
774

775
	mdp_irq_update(&mdp5_kms->base);
R
Rob Clark 已提交
776

777 778
	mdp5_crtc->ctl = ctl;
	mdp5_ctl_set_pipeline(ctl, intf, lm);
779
}
R
Rob Clark 已提交
780

781 782 783
int mdp5_crtc_get_lm(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
784 785
	return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
}
786

787 788
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
{
789 790 791 792 793 794
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);

	if (mdp5_crtc->cmd_mode)
		mdp5_crtc_wait_for_pp_done(crtc);
	else
		mdp5_crtc_wait_for_flush_done(crtc);
795 796
}

R
Rob Clark 已提交
797 798
/* initialize crtc */
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
799 800
				struct drm_plane *plane,
				struct drm_plane *cursor_plane, int id)
R
Rob Clark 已提交
801 802 803 804 805
{
	struct drm_crtc *crtc = NULL;
	struct mdp5_crtc *mdp5_crtc;

	mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
806 807
	if (!mdp5_crtc)
		return ERR_PTR(-ENOMEM);
R
Rob Clark 已提交
808 809 810 811

	crtc = &mdp5_crtc->base;

	mdp5_crtc->id = id;
812 813 814
	mdp5_crtc->lm = GET_LM_ID(id);

	spin_lock_init(&mdp5_crtc->lm_lock);
815
	spin_lock_init(&mdp5_crtc->cursor.lock);
816
	init_completion(&mdp5_crtc->pp_completion);
R
Rob Clark 已提交
817 818 819 820

	mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
	mdp5_crtc->err.irq = mdp5_crtc_err_irq;

821 822 823 824 825 826
	if (cursor_plane)
		drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
					  &mdp5_crtc_no_lm_cursor_funcs, NULL);
	else
		drm_crtc_init_with_planes(dev, crtc, plane, NULL,
					  &mdp5_crtc_funcs, NULL);
827 828 829 830

	drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
			"unref cursor", unref_cursor_worker);

R
Rob Clark 已提交
831
	drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
832
	plane->crtc = crtc;
R
Rob Clark 已提交
833 834 835

	return crtc;
}