mdp5_crtc.c 34.7 KB
Newer Older
R
Rob Clark 已提交
1
/*
2
 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
R
Rob Clark 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */

R
Rob Clark 已提交
19
#include <linux/sort.h>
R
Rob Clark 已提交
20
#include <drm/drm_mode.h>
21 22 23 24 25
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_flip_work.h>

#include "mdp5_kms.h"
R
Rob Clark 已提交
26

27 28 29
#define CURSOR_WIDTH	64
#define CURSOR_HEIGHT	64

R
Rob Clark 已提交
30 31 32 33 34
struct mdp5_crtc {
	struct drm_crtc base;
	int id;
	bool enabled;

35
	spinlock_t lm_lock;     /* protect REG_MDP5_LM_* registers */
36

R
Rob Clark 已提交
37 38 39
	/* if there is a pending flip, these will be non-null: */
	struct drm_pending_vblank_event *event;

40 41 42 43 44
	/* Bits have been flushed at the last commit,
	 * used to decide if a vsync has happened since last commit.
	 */
	u32 flushed_mask;

R
Rob Clark 已提交
45 46 47 48
#define PENDING_CURSOR 0x1
#define PENDING_FLIP   0x2
	atomic_t pending;

49 50 51
	/* for unref'ing cursor bo's after scanout completes: */
	struct drm_flip_work unref_cursor_work;

R
Rob Clark 已提交
52 53
	struct mdp_irq vblank;
	struct mdp_irq err;
54 55 56 57
	struct mdp_irq pp_done;

	struct completion pp_completion;

58 59
	bool lm_cursor_enabled;

60 61 62 63 64 65
	struct {
		/* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
		spinlock_t lock;

		/* current cursor being scanned out: */
		struct drm_gem_object *scanout_bo;
66
		uint64_t iova;
R
Rob Clark 已提交
67
		uint32_t width, height;
68
		int x, y;
69
	} cursor;
R
Rob Clark 已提交
70 71 72
};
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)

73 74
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);

R
Rob Clark 已提交
75 76 77 78 79 80 81 82 83 84 85 86 87 88
static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
{
	struct msm_drm_private *priv = crtc->dev->dev_private;
	return to_mdp5_kms(to_mdp_kms(priv->kms));
}

static void request_pending(struct drm_crtc *crtc, uint32_t pending)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);

	atomic_or(pending, &mdp5_crtc->pending);
	mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
}

89 90 91 92 93 94
static void request_pp_done_pending(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	reinit_completion(&mdp5_crtc->pp_completion);
}

95
static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
96
{
97 98
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
99
	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
100 101 102
	bool start = !mdp5_cstate->defer_start;

	mdp5_cstate->defer_start = false;
103

R
Rob Clark 已提交
104
	DBG("%s: flush=%08x", crtc->name, flush_mask);
105 106

	return mdp5_ctl_commit(ctl, pipeline, flush_mask, start);
107 108 109 110 111 112 113
}

/*
 * flush updates, to make sure hw is updated to new scanout fb,
 * so that we can safely queue unref to current fb (ie. next
 * vblank we know hw is done w/ previous scanout_fb).
 */
114
static u32 crtc_flush_all(struct drm_crtc *crtc)
R
Rob Clark 已提交
115
{
116
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
117
	struct mdp5_hw_mixer *mixer, *r_mixer;
118
	struct drm_plane *plane;
119 120
	uint32_t flush_mask = 0;

121
	/* this should not happen: */
122
	if (WARN_ON(!mdp5_cstate->ctl))
123
		return 0;
R
Rob Clark 已提交
124

125
	drm_atomic_crtc_for_each_plane(plane, crtc) {
126 127
		if (!plane->state->visible)
			continue;
128
		flush_mask |= mdp5_plane_get_flush(plane);
R
Rob Clark 已提交
129
	}
130

131
	mixer = mdp5_cstate->pipeline.mixer;
132
	flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
133

134 135 136 137
	r_mixer = mdp5_cstate->pipeline.r_mixer;
	if (r_mixer)
		flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);

138
	return crtc_flush(crtc, flush_mask);
R
Rob Clark 已提交
139 140 141 142 143
}

/* if file!=NULL, this is preclose potential cancel-flip path */
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
{
144
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
145
	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
R
Rob Clark 已提交
146
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
147
	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
R
Rob Clark 已提交
148 149
	struct drm_device *dev = crtc->dev;
	struct drm_pending_vblank_event *event;
150
	unsigned long flags;
R
Rob Clark 已提交
151 152 153 154

	spin_lock_irqsave(&dev->event_lock, flags);
	event = mdp5_crtc->event;
	if (event) {
155 156 157
		mdp5_crtc->event = NULL;
		DBG("%s: send event: %p", crtc->name, event);
		drm_crtc_send_vblank_event(crtc, event);
R
Rob Clark 已提交
158 159 160
	}
	spin_unlock_irqrestore(&dev->event_lock, flags);

161
	if (ctl && !crtc->state->enable) {
162
		/* set STAGE_UNUSED for all layers */
163
		mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
164 165
		/* XXX: What to do here? */
		/* mdp5_crtc->ctl = NULL; */
166
	}
R
Rob Clark 已提交
167 168
}

169 170 171 172 173
static void unref_cursor_worker(struct drm_flip_work *work, void *val)
{
	struct mdp5_crtc *mdp5_crtc =
		container_of(work, struct mdp5_crtc, unref_cursor_work);
	struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
174
	struct msm_kms *kms = &mdp5_kms->base.base;
175

176
	msm_gem_unpin_iova(val, kms->aspace);
177
	drm_gem_object_put_unlocked(val);
178 179
}

R
Rob Clark 已提交
180 181 182 183 184
static void mdp5_crtc_destroy(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);

	drm_crtc_cleanup(crtc);
185
	drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
R
Rob Clark 已提交
186 187 188 189

	kfree(mdp5_crtc);
}

190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
{
	switch (stage) {
	case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
	case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
	case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
	case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
	case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
	case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
	case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
	default:
		return 0;
	}
}

205 206 207 208 209 210
/*
 * left/right pipe offsets for the stage array used in blend_setup()
 */
#define PIPE_LEFT	0
#define PIPE_RIGHT	1

211 212 213
/*
 * blend_setup() - blend all the planes of a CRTC
 *
214 215 216
 * If no base layer is available, border will be enabled as the base layer.
 * Otherwise all layers will be blended based on their stage calculated
 * in mdp5_crtc_atomic_check.
217
 */
R
Rob Clark 已提交
218 219 220
static void blend_setup(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
221
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
222
	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
R
Rob Clark 已提交
223
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
224 225
	struct drm_plane *plane;
	const struct mdp5_cfg_hw *hw_cfg;
226 227
	struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
	const struct mdp_format *format;
228
	struct mdp5_hw_mixer *mixer = pipeline->mixer;
229
	uint32_t lm = mixer->lm;
230 231
	struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
	uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
232
	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
233
	uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
234
	unsigned long flags;
235 236
	enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
	enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
237
	int i, plane_cnt = 0;
238 239
	bool bg_alpha_enabled = false;
	u32 mixer_op_mode = 0;
240
	u32 val;
241
#define blender(stage)	((stage) - STAGE0)
R
Rob Clark 已提交
242

243
	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
R
Rob Clark 已提交
244

245 246 247
	spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);

	/* ctl could be released already when we are shutting down: */
248 249
	/* XXX: Can this happen now? */
	if (!ctl)
250 251
		goto out;

252
	/* Collect all plane information */
253
	drm_atomic_crtc_for_each_plane(plane, crtc) {
254 255
		enum mdp5_pipe right_pipe;

256 257 258
		if (!plane->state->visible)
			continue;

259 260
		pstate = to_mdp5_plane_state(plane->state);
		pstates[pstate->stage] = pstate;
261 262 263 264 265 266 267 268
		stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
		/*
		 * if we have a right mixer, stage the same pipe as we
		 * have on the left mixer
		 */
		if (r_mixer)
			r_stage[pstate->stage][PIPE_LEFT] =
						mdp5_plane_pipe(plane);
269 270 271 272 273 274 275 276 277 278
		/*
		 * if we have a right pipe (i.e, the plane comprises of 2
		 * hwpipes, then stage the right pipe on the right side of both
		 * the layer mixers
		 */
		right_pipe = mdp5_plane_right_pipe(plane);
		if (right_pipe) {
			stage[pstate->stage][PIPE_RIGHT] = right_pipe;
			r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
		}
279

280 281
		plane_cnt++;
	}
R
Rob Clark 已提交
282

283
	if (!pstates[STAGE_BASE]) {
284 285
		ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
		DBG("Border Color is enabled");
286 287 288 289 290
	} else if (plane_cnt) {
		format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));

		if (format->alpha_enable)
			bg_alpha_enabled = true;
291 292 293 294 295 296 297 298 299 300 301 302 303 304
	}

	/* The reset for blending */
	for (i = STAGE0; i <= STAGE_MAX; i++) {
		if (!pstates[i])
			continue;

		format = to_mdp_format(
			msm_framebuffer_format(pstates[i]->base.fb));
		plane = pstates[i]->base.plane;
		blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
			MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
		fg_alpha = pstates[i]->alpha;
		bg_alpha = 0xFF - pstates[i]->alpha;
305 306 307 308 309 310

		if (!format->alpha_enable && bg_alpha_enabled)
			mixer_op_mode = 0;
		else
			mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
		DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);

		if (format->alpha_enable && pstates[i]->premultiplied) {
			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
			if (fg_alpha != 0xff) {
				bg_alpha = fg_alpha;
				blend_op |=
					MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
					MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
			} else {
				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
			}
		} else if (format->alpha_enable) {
			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
			if (fg_alpha != 0xff) {
				bg_alpha = fg_alpha;
				blend_op |=
				       MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
				       MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
				       MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
				       MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
			} else {
				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
			}
		}
338

339 340
		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
				blender(i)), blend_op);
341
		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
342
				blender(i)), fg_alpha);
343
		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
344
				blender(i)), bg_alpha);
345 346 347 348 349 350 351 352
		if (r_mixer) {
			mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
					blender(i)), blend_op);
			mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
					blender(i)), fg_alpha);
			mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
					blender(i)), bg_alpha);
		}
353 354
	}

355 356 357 358 359
	val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
	mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
		   val | mixer_op_mode);
	if (r_mixer) {
		val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
360
		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
361 362
			   val | mixer_op_mode);
	}
363

364 365
	mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
		       ctl_blend_flags);
366 367
out:
	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
R
Rob Clark 已提交
368 369
}

R
Rob Clark 已提交
370
static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
R
Rob Clark 已提交
371 372
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
373
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
R
Rob Clark 已提交
374
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
375
	struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
376
	struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
377
	uint32_t lm = mixer->lm;
378
	u32 mixer_width, val;
379
	unsigned long flags;
R
Rob Clark 已提交
380 381 382 383
	struct drm_display_mode *mode;

	if (WARN_ON(!crtc->state))
		return;
R
Rob Clark 已提交
384

R
Rob Clark 已提交
385
	mode = &crtc->state->adjusted_mode;
R
Rob Clark 已提交
386

387
	DBG("%s: set mode: " DRM_MODE_FMT, crtc->name, DRM_MODE_ARG(mode));
R
Rob Clark 已提交
388

389 390 391 392
	mixer_width = mode->hdisplay;
	if (r_mixer)
		mixer_width /= 2;

393
	spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
394
	mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
395
			MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
R
Rob Clark 已提交
396
			MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
397 398 399 400 401 402 403 404 405 406 407

	/* Assign mixer to LEFT side in source split mode */
	val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
	val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
	mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);

	if (r_mixer) {
		u32 r_lm = r_mixer->lm;

		mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
			   MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
408
			   MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
409 410 411 412 413 414 415

		/* Assign mixer to RIGHT side in source split mode */
		val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
		val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
	}

416
	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
R
Rob Clark 已提交
417 418
}

419 420
static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
				     struct drm_crtc_state *old_state)
R
Rob Clark 已提交
421 422
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
423
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
424
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
425
	struct device *dev = &mdp5_kms->pdev->dev;
426
	unsigned long flags;
427

R
Rob Clark 已提交
428
	DBG("%s", crtc->name);
429 430 431 432

	if (WARN_ON(!mdp5_crtc->enabled))
		return;

433 434 435
	/* Disable/save vblank irq handling before power is disabled */
	drm_crtc_vblank_off(crtc);

436
	if (mdp5_cstate->cmd_mode)
437 438
		mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);

439
	mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
R
Rob Clark 已提交
440
	pm_runtime_put_sync(dev);
441

442 443 444 445 446 447 448 449
	if (crtc->state->event && !crtc->state->active) {
		WARN_ON(mdp5_crtc->event);
		spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags);
		drm_crtc_send_vblank_event(crtc, crtc->state->event);
		crtc->state->event = NULL;
		spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags);
	}

450
	mdp5_crtc->enabled = false;
R
Rob Clark 已提交
451 452
}

453 454
static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
				    struct drm_crtc_state *old_state)
R
Rob Clark 已提交
455
{
R
Rob Clark 已提交
456
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
457
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
458
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
459
	struct device *dev = &mdp5_kms->pdev->dev;
460

R
Rob Clark 已提交
461
	DBG("%s", crtc->name);
462 463 464 465

	if (WARN_ON(mdp5_crtc->enabled))
		return;

466
	pm_runtime_get_sync(dev);
467

468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
	if (mdp5_crtc->lm_cursor_enabled) {
		/*
		 * Restore LM cursor state, as it might have been lost
		 * with suspend:
		 */
		if (mdp5_crtc->cursor.iova) {
			unsigned long flags;

			spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
			mdp5_crtc_restore_cursor(crtc);
			spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);

			mdp5_ctl_set_cursor(mdp5_cstate->ctl,
					    &mdp5_cstate->pipeline, 0, true);
		} else {
			mdp5_ctl_set_cursor(mdp5_cstate->ctl,
					    &mdp5_cstate->pipeline, 0, false);
		}
486 487
	}

488 489 490
	/* Restore vblank irq handling after power is enabled */
	drm_crtc_vblank_on(crtc);

491 492
	mdp5_crtc_mode_set_nofb(crtc);

493 494
	mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);

495
	if (mdp5_cstate->cmd_mode)
496 497
		mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);

498
	mdp5_crtc->enabled = true;
R
Rob Clark 已提交
499 500
}

501
int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
502 503
			     struct drm_crtc_state *new_crtc_state,
			     bool need_right_mixer)
504 505 506 507
{
	struct mdp5_crtc_state *mdp5_cstate =
			to_mdp5_crtc_state(new_crtc_state);
	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
508
	struct mdp5_interface *intf;
509 510 511 512
	bool new_mixer = false;

	new_mixer = !pipeline->mixer;

513 514 515 516
	if ((need_right_mixer && !pipeline->r_mixer) ||
	    (!need_right_mixer && pipeline->r_mixer))
		new_mixer = true;

517 518
	if (new_mixer) {
		struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
519 520 521 522 523 524 525
		struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
		u32 caps;
		int ret;

		caps = MDP_LM_CAP_DISPLAY;
		if (need_right_mixer)
			caps |= MDP_LM_CAP_PAIR;
526

527 528 529 530 531
		ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
					&pipeline->mixer, need_right_mixer ?
					&pipeline->r_mixer : NULL);
		if (ret)
			return ret;
532 533

		mdp5_mixer_release(new_crtc_state->state, old_mixer);
534 535 536 537 538
		if (old_r_mixer) {
			mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
			if (!need_right_mixer)
				pipeline->r_mixer = NULL;
		}
539 540
	}

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
	/*
	 * these should have been already set up in the encoder's atomic
	 * check (called by drm_atomic_helper_check_modeset)
	 */
	intf = pipeline->intf;

	mdp5_cstate->err_irqmask = intf2err(intf->num);
	mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);

	if ((intf->type == INTF_DSI) &&
	    (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
		mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
		mdp5_cstate->cmd_mode = true;
	} else {
		mdp5_cstate->pp_done_irqmask = 0;
		mdp5_cstate->cmd_mode = false;
	}

559 560 561
	return 0;
}

R
Rob Clark 已提交
562 563 564 565 566 567
struct plane_state {
	struct drm_plane *plane;
	struct mdp5_plane_state *state;
};

static int pstate_cmp(const void *a, const void *b)
R
Rob Clark 已提交
568
{
R
Rob Clark 已提交
569 570 571
	struct plane_state *pa = (struct plane_state *)a;
	struct plane_state *pb = (struct plane_state *)b;
	return pa->state->zpos - pb->state->zpos;
R
Rob Clark 已提交
572 573
}

574 575 576 577 578 579 580 581 582
/* is there a helper for this? */
static bool is_fullscreen(struct drm_crtc_state *cstate,
		struct drm_plane_state *pstate)
{
	return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
		((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
		((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
}

583
static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
					struct drm_crtc_state *new_crtc_state,
					struct drm_plane_state *bpstate)
{
	struct mdp5_crtc_state *mdp5_cstate =
			to_mdp5_crtc_state(new_crtc_state);

	/*
	 * if we're in source split mode, it's mandatory to have
	 * border out on the base stage
	 */
	if (mdp5_cstate->pipeline.r_mixer)
		return STAGE0;

	/* if the bottom-most layer is not fullscreen, we need to use
	 * it for solid-color:
	 */
	if (!is_fullscreen(new_crtc_state, bpstate))
		return STAGE0;

	return STAGE_BASE;
}

R
Rob Clark 已提交
606 607
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
		struct drm_crtc_state *state)
608
{
R
Rob Clark 已提交
609 610 611
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
	struct drm_plane *plane;
	struct drm_device *dev = crtc->dev;
612 613
	struct plane_state pstates[STAGE_MAX + 1];
	const struct mdp5_cfg_hw *hw_cfg;
614
	const struct drm_plane_state *pstate;
615
	const struct drm_display_mode *mode = &state->adjusted_mode;
616
	bool cursor_plane = false;
617
	bool need_right_mixer = false;
618
	int cnt = 0, i;
619
	int ret;
620
	enum mdp_mixer_stage_id start;
621

R
Rob Clark 已提交
622
	DBG("%s: check", crtc->name);
623

624
	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
625 626 627
		if (!pstate->visible)
			continue;

R
Rob Clark 已提交
628 629 630
		pstates[cnt].plane = plane;
		pstates[cnt].state = to_mdp5_plane_state(pstate);

631 632 633 634 635 636
		/*
		 * if any plane on this crtc uses 2 hwpipes, then we need
		 * the crtc to have a right hwmixer.
		 */
		if (pstates[cnt].state->r_hwpipe)
			need_right_mixer = true;
R
Rob Clark 已提交
637
		cnt++;
638 639 640

		if (plane->type == DRM_PLANE_TYPE_CURSOR)
			cursor_plane = true;
R
Rob Clark 已提交
641 642
	}

643 644 645 646
	/* bail out early if there aren't any planes */
	if (!cnt)
		return 0;

647 648 649 650 651 652 653 654 655 656
	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);

	/*
	 * we need a right hwmixer if the mode's width is greater than a single
	 * LM's max width
	 */
	if (mode->hdisplay > hw_cfg->lm.max_width)
		need_right_mixer = true;

	ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
657
	if (ret) {
658
		DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
659 660 661
		return ret;
	}

662
	/* assign a stage based on sorted zpos property */
R
Rob Clark 已提交
663 664
	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);

665 666 667 668
	/* trigger a warning if cursor isn't the highest zorder */
	WARN_ON(cursor_plane &&
		(pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));

669 670
	start = get_start_stage(crtc, state, &pstates[0].state->base);

671 672 673
	/* verify that there are not too many planes attached to crtc
	 * and that we don't have conflicting mixer stages:
	 */
674
	if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
675
		DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
676
			cnt, start);
677 678 679
		return -EINVAL;
	}

R
Rob Clark 已提交
680
	for (i = 0; i < cnt; i++) {
681 682 683
		if (cursor_plane && (i == (cnt - 1)))
			pstates[i].state->stage = hw_cfg->lm.nb_stages;
		else
684
			pstates[i].state->stage = start + i;
R
Rob Clark 已提交
685
		DBG("%s: assign pipe %s on stage=%d", crtc->name,
686
				pstates[i].plane->name,
R
Rob Clark 已提交
687 688 689 690
				pstates[i].state->stage);
	}

	return 0;
691 692
}

693 694
static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
				   struct drm_crtc_state *old_crtc_state)
R
Rob Clark 已提交
695
{
R
Rob Clark 已提交
696
	DBG("%s: begin", crtc->name);
R
Rob Clark 已提交
697
}
698

699 700
static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
				   struct drm_crtc_state *old_crtc_state)
R
Rob Clark 已提交
701 702
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
703
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
R
Rob Clark 已提交
704 705 706
	struct drm_device *dev = crtc->dev;
	unsigned long flags;

R
Rob Clark 已提交
707
	DBG("%s: event: %p", crtc->name, crtc->state->event);
R
Rob Clark 已提交
708

R
Rob Clark 已提交
709
	WARN_ON(mdp5_crtc->event);
R
Rob Clark 已提交
710 711

	spin_lock_irqsave(&dev->event_lock, flags);
R
Rob Clark 已提交
712
	mdp5_crtc->event = crtc->state->event;
713
	crtc->state->event = NULL;
R
Rob Clark 已提交
714 715
	spin_unlock_irqrestore(&dev->event_lock, flags);

716 717 718 719 720
	/*
	 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
	 * it means we are trying to flush a CRTC whose state is disabled:
	 * nothing else needs to be done.
	 */
721 722
	/* XXX: Can this happen now ? */
	if (unlikely(!mdp5_cstate->ctl))
723 724
		return;

R
Rob Clark 已提交
725
	blend_setup(crtc);
726

727 728 729 730 731 732
	/* PP_DONE irq is only used by command mode for now.
	 * It is better to request pending before FLUSH and START trigger
	 * to make sure no pp_done irq missed.
	 * This is safe because no pp_done will happen before SW trigger
	 * in command mode.
	 */
733
	if (mdp5_cstate->cmd_mode)
734 735
		request_pp_done_pending(crtc);

736 737
	mdp5_crtc->flushed_mask = crtc_flush_all(crtc);

738 739 740 741 742
	/* XXX are we leaking out state here? */
	mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
	mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
	mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;

R
Rob Clark 已提交
743
	request_pending(crtc, PENDING_FLIP);
R
Rob Clark 已提交
744 745
}

R
Rob Clark 已提交
746 747 748 749 750 751 752 753 754 755
static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	uint32_t xres = crtc->mode.hdisplay;
	uint32_t yres = crtc->mode.vdisplay;

	/*
	 * Cursor Region Of Interest (ROI) is a plane read from cursor
	 * buffer to render. The ROI region is determined by the visibility of
	 * the cursor point. In the default Cursor image the cursor point will
756
	 * be at the top left of the cursor image.
R
Rob Clark 已提交
757
	 *
758
	 * Without rotation:
R
Rob Clark 已提交
759 760 761 762 763 764
	 * If the cursor point reaches the right (xres - x < cursor.width) or
	 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
	 * width and ROI height need to be evaluated to crop the cursor image
	 * accordingly.
	 * (xres-x) will be new cursor width when x > (xres - cursor.width)
	 * (yres-y) will be new cursor height when y > (yres - cursor.height)
765 766 767 768 769
	 *
	 * With rotation:
	 * We get negative x and/or y coordinates.
	 * (cursor.width - abs(x)) will be new cursor width when x < 0
	 * (cursor.height - abs(y)) will be new cursor width when y < 0
R
Rob Clark 已提交
770
	 */
771 772
	if (mdp5_crtc->cursor.x >= 0)
		*roi_w = min(mdp5_crtc->cursor.width, xres -
R
Rob Clark 已提交
773
			mdp5_crtc->cursor.x);
774 775 776 777
	else
		*roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x);
	if (mdp5_crtc->cursor.y >= 0)
		*roi_h = min(mdp5_crtc->cursor.height, yres -
R
Rob Clark 已提交
778
			mdp5_crtc->cursor.y);
779 780
	else
		*roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y);
R
Rob Clark 已提交
781 782
}

783 784 785 786 787 788 789
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
{
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
	const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
	uint32_t blendcfg, stride;
790
	uint32_t x, y, src_x, src_y, width, height;
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
	uint32_t roi_w, roi_h;
	int lm;

	assert_spin_locked(&mdp5_crtc->cursor.lock);

	lm = mdp5_cstate->pipeline.mixer->lm;

	x = mdp5_crtc->cursor.x;
	y = mdp5_crtc->cursor.y;
	width = mdp5_crtc->cursor.width;
	height = mdp5_crtc->cursor.height;

	stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);

	get_roi(crtc, &roi_w, &roi_h);

807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
	/* If cusror buffer overlaps due to rotation on the
	 * upper or left screen border the pixel offset inside
	 * the cursor buffer of the ROI is the positive overlap
	 * distance.
	 */
	if (mdp5_crtc->cursor.x < 0) {
		src_x = abs(mdp5_crtc->cursor.x);
		x = 0;
	} else {
		src_x = 0;
	}
	if (mdp5_crtc->cursor.y < 0) {
		src_y = abs(mdp5_crtc->cursor.y);
		y = 0;
	} else {
		src_y = 0;
	}
	DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
		crtc->name, x, y, roi_w, roi_h, src_x, src_y);

827 828 829 830 831 832 833 834 835 836 837 838
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
			MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
			MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
			MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
			MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
			MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
			MDP5_LM_CURSOR_START_XY_Y_START(y) |
			MDP5_LM_CURSOR_START_XY_X_START(x));
839 840 841
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm),
			MDP5_LM_CURSOR_XY_SRC_Y(src_y) |
			MDP5_LM_CURSOR_XY_SRC_X(src_x));
842 843 844 845 846 847 848 849
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
			mdp5_crtc->cursor.iova);

	blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
	blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
}

850 851 852 853 854
static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
		struct drm_file *file, uint32_t handle,
		uint32_t width, uint32_t height)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
855
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
856
	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
857 858
	struct drm_device *dev = crtc->dev;
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
859
	struct platform_device *pdev = mdp5_kms->pdev;
860
	struct msm_kms *kms = &mdp5_kms->base.base;
861
	struct drm_gem_object *cursor_bo, *old_bo = NULL;
862
	struct mdp5_ctl *ctl;
863
	int ret;
864
	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
865
	bool cursor_enable = true;
866 867
	unsigned long flags;

868 869 870 871 872 873
	if (!mdp5_crtc->lm_cursor_enabled) {
		dev_warn(dev->dev,
			 "cursor_set is deprecated with cursor planes\n");
		return -EINVAL;
	}

874
	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
875
		DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
876 877 878
		return -EINVAL;
	}

879 880
	ctl = mdp5_cstate->ctl;
	if (!ctl)
881 882
		return -EINVAL;

883 884 885 886
	/* don't support LM cursors when we we have source split enabled */
	if (mdp5_cstate->pipeline.r_mixer)
		return -EINVAL;

887 888
	if (!handle) {
		DBG("Cursor off");
889
		cursor_enable = false;
890
		mdp5_crtc->cursor.iova = 0;
891
		pm_runtime_get_sync(&pdev->dev);
892
		goto set_cursor;
893 894
	}

895
	cursor_bo = drm_gem_object_lookup(file, handle);
896 897 898
	if (!cursor_bo)
		return -ENOENT;

899
	ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
900
			&mdp5_crtc->cursor.iova);
901 902 903
	if (ret)
		return -EINVAL;

904 905
	pm_runtime_get_sync(&pdev->dev);

906 907 908
	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
	old_bo = mdp5_crtc->cursor.scanout_bo;

R
Rob Clark 已提交
909 910 911 912
	mdp5_crtc->cursor.scanout_bo = cursor_bo;
	mdp5_crtc->cursor.width = width;
	mdp5_crtc->cursor.height = height;

913
	mdp5_crtc_restore_cursor(crtc);
914 915 916

	spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);

917
set_cursor:
918
	ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
919
	if (ret) {
920
		DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
921
				cursor_enable ? "en" : "dis", ret);
922
		goto end;
923
	}
924 925 926 927

	crtc_flush(crtc, flush_mask);

end:
R
Rob Clark 已提交
928
	pm_runtime_put_sync(&pdev->dev);
929 930 931 932 933 934 935 936 937 938 939 940
	if (old_bo) {
		drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
		/* enable vblank to complete cursor work: */
		request_pending(crtc, PENDING_CURSOR);
	}
	return ret;
}

static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
941
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
942
	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
943
	struct drm_device *dev = crtc->dev;
944 945 946 947
	uint32_t roi_w;
	uint32_t roi_h;
	unsigned long flags;

948 949 950 951 952 953
	if (!mdp5_crtc->lm_cursor_enabled) {
		dev_warn(dev->dev,
			 "cursor_move is deprecated with cursor planes\n");
		return -EINVAL;
	}

954 955 956 957
	/* don't support LM cursors when we we have source split enabled */
	if (mdp5_cstate->pipeline.r_mixer)
		return -EINVAL;

958 959 960 961
	/* In case the CRTC is disabled, just drop the cursor update */
	if (unlikely(!crtc->state->enable))
		return 0;

962 963 964
	/* accept negative x/y coordinates up to maximum cursor overlap */
	mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width);
	mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height);
965

R
Rob Clark 已提交
966
	get_roi(crtc, &roi_w, &roi_h);
967

968
	pm_runtime_get_sync(&mdp5_kms->pdev->dev);
969

970
	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
971
	mdp5_crtc_restore_cursor(crtc);
972 973 974 975
	spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);

	crtc_flush(crtc, flush_mask);

R
Rob Clark 已提交
976
	pm_runtime_put_sync(&mdp5_kms->pdev->dev);
977

978 979 980
	return 0;
}

981 982 983 984 985 986
static void
mdp5_crtc_atomic_print_state(struct drm_printer *p,
			     const struct drm_crtc_state *state)
{
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
987
	struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
988 989 990 991

	if (WARN_ON(!pipeline))
		return;

992 993 994
	if (mdp5_cstate->ctl)
		drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl));

995 996
	drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
			pipeline->mixer->name : "(null)");
997 998 999 1000

	if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
		drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
			   pipeline->r_mixer->name : "(null)");
1001 1002

	drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
}

static void mdp5_crtc_reset(struct drm_crtc *crtc)
{
	struct mdp5_crtc_state *mdp5_cstate;

	if (crtc->state) {
		__drm_atomic_helper_crtc_destroy_state(crtc->state);
		kfree(to_mdp5_crtc_state(crtc->state));
	}

	mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);

	if (mdp5_cstate) {
		mdp5_cstate->base.crtc = crtc;
		crtc->state = &mdp5_cstate->base;
	}
}

static struct drm_crtc_state *
mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
{
	struct mdp5_crtc_state *mdp5_cstate;

	if (WARN_ON(!crtc->state))
		return NULL;

	mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
			      sizeof(*mdp5_cstate), GFP_KERNEL);
	if (!mdp5_cstate)
		return NULL;

	__drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);

	return &mdp5_cstate->base;
}

static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
{
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);

	__drm_atomic_helper_crtc_destroy_state(state);

	kfree(mdp5_cstate);
}

R
Rob Clark 已提交
1049
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
R
Rob Clark 已提交
1050
	.set_config = drm_atomic_helper_set_config,
R
Rob Clark 已提交
1051
	.destroy = mdp5_crtc_destroy,
R
Rob Clark 已提交
1052
	.page_flip = drm_atomic_helper_page_flip,
1053 1054 1055
	.reset = mdp5_crtc_reset,
	.atomic_duplicate_state = mdp5_crtc_duplicate_state,
	.atomic_destroy_state = mdp5_crtc_destroy_state,
1056 1057
	.cursor_set = mdp5_crtc_cursor_set,
	.cursor_move = mdp5_crtc_cursor_move,
1058
	.atomic_print_state = mdp5_crtc_atomic_print_state,
R
Rob Clark 已提交
1059 1060 1061
};

static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
R
Rob Clark 已提交
1062 1063 1064 1065
	.mode_set_nofb = mdp5_crtc_mode_set_nofb,
	.atomic_check = mdp5_crtc_atomic_check,
	.atomic_begin = mdp5_crtc_atomic_begin,
	.atomic_flush = mdp5_crtc_atomic_flush,
1066
	.atomic_enable = mdp5_crtc_atomic_enable,
1067
	.atomic_disable = mdp5_crtc_atomic_disable,
R
Rob Clark 已提交
1068 1069 1070 1071 1072 1073
};

static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
	struct drm_crtc *crtc = &mdp5_crtc->base;
1074
	struct msm_drm_private *priv = crtc->dev->dev_private;
R
Rob Clark 已提交
1075 1076 1077 1078 1079 1080 1081 1082 1083
	unsigned pending;

	mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);

	pending = atomic_xchg(&mdp5_crtc->pending, 0);

	if (pending & PENDING_FLIP) {
		complete_flip(crtc, NULL);
	}
1084 1085 1086

	if (pending & PENDING_CURSOR)
		drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
R
Rob Clark 已提交
1087 1088 1089 1090 1091
}

static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
1092

R
Rob Clark 已提交
1093
	DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
R
Rob Clark 已提交
1094 1095
}

1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
								pp_done);

	complete(&mdp5_crtc->pp_completion);
}

static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
{
	struct drm_device *dev = crtc->dev;
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1108
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1109 1110 1111 1112 1113
	int ret;

	ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
						msecs_to_jiffies(50));
	if (ret == 0)
1114
		dev_warn(dev->dev, "pp done time out, lm=%d\n",
1115
			 mdp5_cstate->pipeline.mixer->lm);
1116 1117
}

1118 1119 1120 1121
static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
{
	struct drm_device *dev = crtc->dev;
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1122 1123
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
1124 1125 1126
	int ret;

	/* Should not call this function if crtc is disabled. */
1127
	if (!ctl)
1128 1129 1130 1131 1132 1133 1134
		return;

	ret = drm_crtc_vblank_get(crtc);
	if (ret)
		return;

	ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
1135
		((mdp5_ctl_get_commit_status(ctl) &
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
		mdp5_crtc->flushed_mask) == 0),
		msecs_to_jiffies(50));
	if (ret <= 0)
		dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);

	mdp5_crtc->flushed_mask = 0;

	drm_crtc_vblank_put(crtc);
}

R
Rob Clark 已提交
1146 1147 1148 1149 1150 1151
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	return mdp5_crtc->vblank.irqmask;
}

1152
void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
R
Rob Clark 已提交
1153
{
1154
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
R
Rob Clark 已提交
1155
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
1156

1157
	/* should this be done elsewhere ? */
1158
	mdp_irq_update(&mdp5_kms->base);
R
Rob Clark 已提交
1159

1160
	mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
1161
}
R
Rob Clark 已提交
1162

1163 1164
struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
{
1165
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1166

1167
	return mdp5_cstate->ctl;
1168 1169
}

1170
struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
1171
{
1172 1173 1174 1175 1176 1177 1178 1179 1180
	struct mdp5_crtc_state *mdp5_cstate;

	if (WARN_ON(!crtc))
		return ERR_PTR(-EINVAL);

	mdp5_cstate = to_mdp5_crtc_state(crtc->state);

	return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
		ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
1181
}
1182

1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
{
	struct mdp5_crtc_state *mdp5_cstate;

	if (WARN_ON(!crtc))
		return ERR_PTR(-EINVAL);

	mdp5_cstate = to_mdp5_crtc_state(crtc->state);

	return &mdp5_cstate->pipeline;
}

1195 1196
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
{
1197
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1198

1199
	if (mdp5_cstate->cmd_mode)
1200 1201 1202
		mdp5_crtc_wait_for_pp_done(crtc);
	else
		mdp5_crtc_wait_for_flush_done(crtc);
1203 1204
}

R
Rob Clark 已提交
1205 1206
/* initialize crtc */
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
1207 1208
				struct drm_plane *plane,
				struct drm_plane *cursor_plane, int id)
R
Rob Clark 已提交
1209 1210 1211 1212 1213
{
	struct drm_crtc *crtc = NULL;
	struct mdp5_crtc *mdp5_crtc;

	mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
1214 1215
	if (!mdp5_crtc)
		return ERR_PTR(-ENOMEM);
R
Rob Clark 已提交
1216 1217 1218 1219

	crtc = &mdp5_crtc->base;

	mdp5_crtc->id = id;
1220 1221

	spin_lock_init(&mdp5_crtc->lm_lock);
1222
	spin_lock_init(&mdp5_crtc->cursor.lock);
1223
	init_completion(&mdp5_crtc->pp_completion);
R
Rob Clark 已提交
1224 1225 1226

	mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
	mdp5_crtc->err.irq = mdp5_crtc_err_irq;
1227
	mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
R
Rob Clark 已提交
1228

1229 1230 1231 1232
	mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;

	drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
				  &mdp5_crtc_funcs, NULL);
1233 1234 1235 1236

	drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
			"unref cursor", unref_cursor_worker);

R
Rob Clark 已提交
1237 1238 1239 1240
	drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);

	return crtc;
}