mdp5_crtc.c 34.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
R
Rob Clark 已提交
2
/*
3
 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
R
Rob Clark 已提交
4 5 6 7
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 */

R
Rob Clark 已提交
8
#include <linux/sort.h>
R
Rob Clark 已提交
9
#include <drm/drm_mode.h>
10 11
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
12
#include <drm/drm_probe_helper.h>
13 14

#include "mdp5_kms.h"
R
Rob Clark 已提交
15

16 17 18
#define CURSOR_WIDTH	64
#define CURSOR_HEIGHT	64

R
Rob Clark 已提交
19 20 21 22 23
struct mdp5_crtc {
	struct drm_crtc base;
	int id;
	bool enabled;

24
	spinlock_t lm_lock;     /* protect REG_MDP5_LM_* registers */
25

R
Rob Clark 已提交
26 27 28
	/* if there is a pending flip, these will be non-null: */
	struct drm_pending_vblank_event *event;

29 30 31 32 33
	/* Bits have been flushed at the last commit,
	 * used to decide if a vsync has happened since last commit.
	 */
	u32 flushed_mask;

R
Rob Clark 已提交
34 35 36 37
#define PENDING_CURSOR 0x1
#define PENDING_FLIP   0x2
	atomic_t pending;

38 39 40
	/* for unref'ing cursor bo's after scanout completes: */
	struct drm_flip_work unref_cursor_work;

R
Rob Clark 已提交
41 42
	struct mdp_irq vblank;
	struct mdp_irq err;
43 44 45 46
	struct mdp_irq pp_done;

	struct completion pp_completion;

47 48
	bool lm_cursor_enabled;

49 50 51 52 53 54
	struct {
		/* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
		spinlock_t lock;

		/* current cursor being scanned out: */
		struct drm_gem_object *scanout_bo;
55
		uint64_t iova;
R
Rob Clark 已提交
56
		uint32_t width, height;
57
		int x, y;
58
	} cursor;
R
Rob Clark 已提交
59 60 61
};
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)

62 63
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);

R
Rob Clark 已提交
64 65 66 67 68 69 70 71 72 73 74 75 76 77
static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
{
	struct msm_drm_private *priv = crtc->dev->dev_private;
	return to_mdp5_kms(to_mdp_kms(priv->kms));
}

static void request_pending(struct drm_crtc *crtc, uint32_t pending)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);

	atomic_or(pending, &mdp5_crtc->pending);
	mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
}

78 79 80 81 82 83
static void request_pp_done_pending(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	reinit_completion(&mdp5_crtc->pp_completion);
}

84
static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
85
{
86 87
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
88
	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
89 90 91
	bool start = !mdp5_cstate->defer_start;

	mdp5_cstate->defer_start = false;
92

R
Rob Clark 已提交
93
	DBG("%s: flush=%08x", crtc->name, flush_mask);
94 95

	return mdp5_ctl_commit(ctl, pipeline, flush_mask, start);
96 97 98 99 100 101 102
}

/*
 * flush updates, to make sure hw is updated to new scanout fb,
 * so that we can safely queue unref to current fb (ie. next
 * vblank we know hw is done w/ previous scanout_fb).
 */
103
static u32 crtc_flush_all(struct drm_crtc *crtc)
R
Rob Clark 已提交
104
{
105
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
106
	struct mdp5_hw_mixer *mixer, *r_mixer;
107
	struct drm_plane *plane;
108 109
	uint32_t flush_mask = 0;

110
	/* this should not happen: */
111
	if (WARN_ON(!mdp5_cstate->ctl))
112
		return 0;
R
Rob Clark 已提交
113

114
	drm_atomic_crtc_for_each_plane(plane, crtc) {
115 116
		if (!plane->state->visible)
			continue;
117
		flush_mask |= mdp5_plane_get_flush(plane);
R
Rob Clark 已提交
118
	}
119

120
	mixer = mdp5_cstate->pipeline.mixer;
121
	flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
122

123 124 125 126
	r_mixer = mdp5_cstate->pipeline.r_mixer;
	if (r_mixer)
		flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);

127
	return crtc_flush(crtc, flush_mask);
R
Rob Clark 已提交
128 129 130 131 132
}

/* if file!=NULL, this is preclose potential cancel-flip path */
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
{
133
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
134
	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
R
Rob Clark 已提交
135
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
136
	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
R
Rob Clark 已提交
137 138
	struct drm_device *dev = crtc->dev;
	struct drm_pending_vblank_event *event;
139
	unsigned long flags;
R
Rob Clark 已提交
140 141 142 143

	spin_lock_irqsave(&dev->event_lock, flags);
	event = mdp5_crtc->event;
	if (event) {
144 145 146
		mdp5_crtc->event = NULL;
		DBG("%s: send event: %p", crtc->name, event);
		drm_crtc_send_vblank_event(crtc, event);
R
Rob Clark 已提交
147 148 149
	}
	spin_unlock_irqrestore(&dev->event_lock, flags);

150
	if (ctl && !crtc->state->enable) {
151
		/* set STAGE_UNUSED for all layers */
152
		mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
153 154
		/* XXX: What to do here? */
		/* mdp5_crtc->ctl = NULL; */
155
	}
R
Rob Clark 已提交
156 157
}

158 159 160 161 162
static void unref_cursor_worker(struct drm_flip_work *work, void *val)
{
	struct mdp5_crtc *mdp5_crtc =
		container_of(work, struct mdp5_crtc, unref_cursor_work);
	struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
163
	struct msm_kms *kms = &mdp5_kms->base.base;
164

165
	msm_gem_unpin_iova(val, kms->aspace);
166
	drm_gem_object_put_unlocked(val);
167 168
}

R
Rob Clark 已提交
169 170 171 172 173
static void mdp5_crtc_destroy(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);

	drm_crtc_cleanup(crtc);
174
	drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
R
Rob Clark 已提交
175 176 177 178

	kfree(mdp5_crtc);
}

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
{
	switch (stage) {
	case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
	case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
	case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
	case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
	case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
	case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
	case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
	default:
		return 0;
	}
}

194 195 196 197 198 199
/*
 * left/right pipe offsets for the stage array used in blend_setup()
 */
#define PIPE_LEFT	0
#define PIPE_RIGHT	1

200 201 202
/*
 * blend_setup() - blend all the planes of a CRTC
 *
203 204 205
 * If no base layer is available, border will be enabled as the base layer.
 * Otherwise all layers will be blended based on their stage calculated
 * in mdp5_crtc_atomic_check.
206
 */
R
Rob Clark 已提交
207 208 209
static void blend_setup(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
210
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
211
	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
R
Rob Clark 已提交
212
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
213 214
	struct drm_plane *plane;
	const struct mdp5_cfg_hw *hw_cfg;
215 216
	struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
	const struct mdp_format *format;
217
	struct mdp5_hw_mixer *mixer = pipeline->mixer;
218
	uint32_t lm = mixer->lm;
219 220
	struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
	uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
221
	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
222
	uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
223
	unsigned long flags;
224 225
	enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
	enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
226
	int i, plane_cnt = 0;
227 228
	bool bg_alpha_enabled = false;
	u32 mixer_op_mode = 0;
229
	u32 val;
230
#define blender(stage)	((stage) - STAGE0)
R
Rob Clark 已提交
231

232
	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
R
Rob Clark 已提交
233

234 235 236
	spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);

	/* ctl could be released already when we are shutting down: */
237 238
	/* XXX: Can this happen now? */
	if (!ctl)
239 240
		goto out;

241
	/* Collect all plane information */
242
	drm_atomic_crtc_for_each_plane(plane, crtc) {
243 244
		enum mdp5_pipe right_pipe;

245 246 247
		if (!plane->state->visible)
			continue;

248 249
		pstate = to_mdp5_plane_state(plane->state);
		pstates[pstate->stage] = pstate;
250 251 252 253 254 255 256 257
		stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
		/*
		 * if we have a right mixer, stage the same pipe as we
		 * have on the left mixer
		 */
		if (r_mixer)
			r_stage[pstate->stage][PIPE_LEFT] =
						mdp5_plane_pipe(plane);
258 259 260 261 262 263 264 265 266 267
		/*
		 * if we have a right pipe (i.e, the plane comprises of 2
		 * hwpipes, then stage the right pipe on the right side of both
		 * the layer mixers
		 */
		right_pipe = mdp5_plane_right_pipe(plane);
		if (right_pipe) {
			stage[pstate->stage][PIPE_RIGHT] = right_pipe;
			r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
		}
268

269 270
		plane_cnt++;
	}
R
Rob Clark 已提交
271

272
	if (!pstates[STAGE_BASE]) {
273 274
		ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
		DBG("Border Color is enabled");
275 276 277 278 279
	} else if (plane_cnt) {
		format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));

		if (format->alpha_enable)
			bg_alpha_enabled = true;
280 281 282 283 284 285 286 287 288 289 290 291 292 293
	}

	/* The reset for blending */
	for (i = STAGE0; i <= STAGE_MAX; i++) {
		if (!pstates[i])
			continue;

		format = to_mdp_format(
			msm_framebuffer_format(pstates[i]->base.fb));
		plane = pstates[i]->base.plane;
		blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
			MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
		fg_alpha = pstates[i]->alpha;
		bg_alpha = 0xFF - pstates[i]->alpha;
294 295 296 297 298 299

		if (!format->alpha_enable && bg_alpha_enabled)
			mixer_op_mode = 0;
		else
			mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);

300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
		DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);

		if (format->alpha_enable && pstates[i]->premultiplied) {
			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
			if (fg_alpha != 0xff) {
				bg_alpha = fg_alpha;
				blend_op |=
					MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
					MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
			} else {
				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
			}
		} else if (format->alpha_enable) {
			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
			if (fg_alpha != 0xff) {
				bg_alpha = fg_alpha;
				blend_op |=
				       MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
				       MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
				       MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
				       MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
			} else {
				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
			}
		}
327

328 329
		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
				blender(i)), blend_op);
330
		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
331
				blender(i)), fg_alpha);
332
		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
333
				blender(i)), bg_alpha);
334 335 336 337 338 339 340 341
		if (r_mixer) {
			mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
					blender(i)), blend_op);
			mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
					blender(i)), fg_alpha);
			mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
					blender(i)), bg_alpha);
		}
342 343
	}

344 345 346 347 348
	val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
	mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
		   val | mixer_op_mode);
	if (r_mixer) {
		val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
349
		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
350 351
			   val | mixer_op_mode);
	}
352

353 354
	mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
		       ctl_blend_flags);
355 356
out:
	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
R
Rob Clark 已提交
357 358
}

R
Rob Clark 已提交
359
static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
R
Rob Clark 已提交
360 361
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
362
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
R
Rob Clark 已提交
363
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
364
	struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
365
	struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
366
	uint32_t lm = mixer->lm;
367
	u32 mixer_width, val;
368
	unsigned long flags;
R
Rob Clark 已提交
369 370 371 372
	struct drm_display_mode *mode;

	if (WARN_ON(!crtc->state))
		return;
R
Rob Clark 已提交
373

R
Rob Clark 已提交
374
	mode = &crtc->state->adjusted_mode;
R
Rob Clark 已提交
375

376
	DBG("%s: set mode: " DRM_MODE_FMT, crtc->name, DRM_MODE_ARG(mode));
R
Rob Clark 已提交
377

378 379 380 381
	mixer_width = mode->hdisplay;
	if (r_mixer)
		mixer_width /= 2;

382
	spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
383
	mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
384
			MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
R
Rob Clark 已提交
385
			MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
386 387 388 389 390 391 392 393 394 395 396

	/* Assign mixer to LEFT side in source split mode */
	val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
	val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
	mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);

	if (r_mixer) {
		u32 r_lm = r_mixer->lm;

		mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
			   MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
397
			   MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
398 399 400 401 402 403 404

		/* Assign mixer to RIGHT side in source split mode */
		val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
		val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
	}

405
	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
R
Rob Clark 已提交
406 407
}

408 409
static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
				     struct drm_crtc_state *old_state)
R
Rob Clark 已提交
410 411
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
412
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
413
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
414
	struct device *dev = &mdp5_kms->pdev->dev;
415
	unsigned long flags;
416

R
Rob Clark 已提交
417
	DBG("%s", crtc->name);
418 419 420 421

	if (WARN_ON(!mdp5_crtc->enabled))
		return;

422 423 424
	/* Disable/save vblank irq handling before power is disabled */
	drm_crtc_vblank_off(crtc);

425
	if (mdp5_cstate->cmd_mode)
426 427
		mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);

428
	mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
R
Rob Clark 已提交
429
	pm_runtime_put_sync(dev);
430

431 432 433 434 435 436 437 438
	if (crtc->state->event && !crtc->state->active) {
		WARN_ON(mdp5_crtc->event);
		spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags);
		drm_crtc_send_vblank_event(crtc, crtc->state->event);
		crtc->state->event = NULL;
		spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags);
	}

439
	mdp5_crtc->enabled = false;
R
Rob Clark 已提交
440 441
}

442 443
static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
				    struct drm_crtc_state *old_state)
R
Rob Clark 已提交
444
{
R
Rob Clark 已提交
445
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
446
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
447
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
448
	struct device *dev = &mdp5_kms->pdev->dev;
449

R
Rob Clark 已提交
450
	DBG("%s", crtc->name);
451 452 453 454

	if (WARN_ON(mdp5_crtc->enabled))
		return;

455
	pm_runtime_get_sync(dev);
456

457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
	if (mdp5_crtc->lm_cursor_enabled) {
		/*
		 * Restore LM cursor state, as it might have been lost
		 * with suspend:
		 */
		if (mdp5_crtc->cursor.iova) {
			unsigned long flags;

			spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
			mdp5_crtc_restore_cursor(crtc);
			spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);

			mdp5_ctl_set_cursor(mdp5_cstate->ctl,
					    &mdp5_cstate->pipeline, 0, true);
		} else {
			mdp5_ctl_set_cursor(mdp5_cstate->ctl,
					    &mdp5_cstate->pipeline, 0, false);
		}
475 476
	}

477 478 479
	/* Restore vblank irq handling after power is enabled */
	drm_crtc_vblank_on(crtc);

480 481
	mdp5_crtc_mode_set_nofb(crtc);

482 483
	mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);

484
	if (mdp5_cstate->cmd_mode)
485 486
		mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);

487
	mdp5_crtc->enabled = true;
R
Rob Clark 已提交
488 489
}

490
int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
491 492
			     struct drm_crtc_state *new_crtc_state,
			     bool need_right_mixer)
493 494 495 496
{
	struct mdp5_crtc_state *mdp5_cstate =
			to_mdp5_crtc_state(new_crtc_state);
	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
497
	struct mdp5_interface *intf;
498 499 500 501
	bool new_mixer = false;

	new_mixer = !pipeline->mixer;

502 503 504 505
	if ((need_right_mixer && !pipeline->r_mixer) ||
	    (!need_right_mixer && pipeline->r_mixer))
		new_mixer = true;

506 507
	if (new_mixer) {
		struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
508 509 510 511 512 513 514
		struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
		u32 caps;
		int ret;

		caps = MDP_LM_CAP_DISPLAY;
		if (need_right_mixer)
			caps |= MDP_LM_CAP_PAIR;
515

516 517 518 519 520
		ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
					&pipeline->mixer, need_right_mixer ?
					&pipeline->r_mixer : NULL);
		if (ret)
			return ret;
521 522

		mdp5_mixer_release(new_crtc_state->state, old_mixer);
523 524 525 526 527
		if (old_r_mixer) {
			mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
			if (!need_right_mixer)
				pipeline->r_mixer = NULL;
		}
528 529
	}

530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
	/*
	 * these should have been already set up in the encoder's atomic
	 * check (called by drm_atomic_helper_check_modeset)
	 */
	intf = pipeline->intf;

	mdp5_cstate->err_irqmask = intf2err(intf->num);
	mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);

	if ((intf->type == INTF_DSI) &&
	    (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
		mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
		mdp5_cstate->cmd_mode = true;
	} else {
		mdp5_cstate->pp_done_irqmask = 0;
		mdp5_cstate->cmd_mode = false;
	}

548 549 550
	return 0;
}

R
Rob Clark 已提交
551 552 553 554 555 556
struct plane_state {
	struct drm_plane *plane;
	struct mdp5_plane_state *state;
};

static int pstate_cmp(const void *a, const void *b)
R
Rob Clark 已提交
557
{
R
Rob Clark 已提交
558 559 560
	struct plane_state *pa = (struct plane_state *)a;
	struct plane_state *pb = (struct plane_state *)b;
	return pa->state->zpos - pb->state->zpos;
R
Rob Clark 已提交
561 562
}

563 564 565 566 567 568 569 570 571
/* is there a helper for this? */
static bool is_fullscreen(struct drm_crtc_state *cstate,
		struct drm_plane_state *pstate)
{
	return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
		((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
		((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
}

572
static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
					struct drm_crtc_state *new_crtc_state,
					struct drm_plane_state *bpstate)
{
	struct mdp5_crtc_state *mdp5_cstate =
			to_mdp5_crtc_state(new_crtc_state);

	/*
	 * if we're in source split mode, it's mandatory to have
	 * border out on the base stage
	 */
	if (mdp5_cstate->pipeline.r_mixer)
		return STAGE0;

	/* if the bottom-most layer is not fullscreen, we need to use
	 * it for solid-color:
	 */
	if (!is_fullscreen(new_crtc_state, bpstate))
		return STAGE0;

	return STAGE_BASE;
}

R
Rob Clark 已提交
595 596
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
		struct drm_crtc_state *state)
597
{
R
Rob Clark 已提交
598 599 600
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
	struct drm_plane *plane;
	struct drm_device *dev = crtc->dev;
601 602
	struct plane_state pstates[STAGE_MAX + 1];
	const struct mdp5_cfg_hw *hw_cfg;
603
	const struct drm_plane_state *pstate;
604
	const struct drm_display_mode *mode = &state->adjusted_mode;
605
	bool cursor_plane = false;
606
	bool need_right_mixer = false;
607
	int cnt = 0, i;
608
	int ret;
609
	enum mdp_mixer_stage_id start;
610

R
Rob Clark 已提交
611
	DBG("%s: check", crtc->name);
612

613
	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
614 615 616
		if (!pstate->visible)
			continue;

R
Rob Clark 已提交
617 618 619
		pstates[cnt].plane = plane;
		pstates[cnt].state = to_mdp5_plane_state(pstate);

620 621 622 623 624 625
		/*
		 * if any plane on this crtc uses 2 hwpipes, then we need
		 * the crtc to have a right hwmixer.
		 */
		if (pstates[cnt].state->r_hwpipe)
			need_right_mixer = true;
R
Rob Clark 已提交
626
		cnt++;
627 628 629

		if (plane->type == DRM_PLANE_TYPE_CURSOR)
			cursor_plane = true;
R
Rob Clark 已提交
630 631
	}

632 633 634 635
	/* bail out early if there aren't any planes */
	if (!cnt)
		return 0;

636 637 638 639 640 641 642 643 644 645
	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);

	/*
	 * we need a right hwmixer if the mode's width is greater than a single
	 * LM's max width
	 */
	if (mode->hdisplay > hw_cfg->lm.max_width)
		need_right_mixer = true;

	ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
646
	if (ret) {
647
		DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
648 649 650
		return ret;
	}

651
	/* assign a stage based on sorted zpos property */
R
Rob Clark 已提交
652 653
	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);

654 655 656 657
	/* trigger a warning if cursor isn't the highest zorder */
	WARN_ON(cursor_plane &&
		(pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));

658 659
	start = get_start_stage(crtc, state, &pstates[0].state->base);

660 661 662
	/* verify that there are not too many planes attached to crtc
	 * and that we don't have conflicting mixer stages:
	 */
663
	if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
664
		DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
665
			cnt, start);
666 667 668
		return -EINVAL;
	}

R
Rob Clark 已提交
669
	for (i = 0; i < cnt; i++) {
670 671 672
		if (cursor_plane && (i == (cnt - 1)))
			pstates[i].state->stage = hw_cfg->lm.nb_stages;
		else
673
			pstates[i].state->stage = start + i;
R
Rob Clark 已提交
674
		DBG("%s: assign pipe %s on stage=%d", crtc->name,
675
				pstates[i].plane->name,
R
Rob Clark 已提交
676 677 678 679
				pstates[i].state->stage);
	}

	return 0;
680 681
}

682 683
static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
				   struct drm_crtc_state *old_crtc_state)
R
Rob Clark 已提交
684
{
R
Rob Clark 已提交
685
	DBG("%s: begin", crtc->name);
R
Rob Clark 已提交
686
}
687

688 689
static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
				   struct drm_crtc_state *old_crtc_state)
R
Rob Clark 已提交
690 691
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
692
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
R
Rob Clark 已提交
693 694 695
	struct drm_device *dev = crtc->dev;
	unsigned long flags;

R
Rob Clark 已提交
696
	DBG("%s: event: %p", crtc->name, crtc->state->event);
R
Rob Clark 已提交
697

R
Rob Clark 已提交
698
	WARN_ON(mdp5_crtc->event);
R
Rob Clark 已提交
699 700

	spin_lock_irqsave(&dev->event_lock, flags);
R
Rob Clark 已提交
701
	mdp5_crtc->event = crtc->state->event;
702
	crtc->state->event = NULL;
R
Rob Clark 已提交
703 704
	spin_unlock_irqrestore(&dev->event_lock, flags);

705 706 707 708 709
	/*
	 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
	 * it means we are trying to flush a CRTC whose state is disabled:
	 * nothing else needs to be done.
	 */
710 711
	/* XXX: Can this happen now ? */
	if (unlikely(!mdp5_cstate->ctl))
712 713
		return;

R
Rob Clark 已提交
714
	blend_setup(crtc);
715

716 717 718 719 720 721
	/* PP_DONE irq is only used by command mode for now.
	 * It is better to request pending before FLUSH and START trigger
	 * to make sure no pp_done irq missed.
	 * This is safe because no pp_done will happen before SW trigger
	 * in command mode.
	 */
722
	if (mdp5_cstate->cmd_mode)
723 724
		request_pp_done_pending(crtc);

725 726
	mdp5_crtc->flushed_mask = crtc_flush_all(crtc);

727 728 729 730 731
	/* XXX are we leaking out state here? */
	mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
	mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
	mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;

R
Rob Clark 已提交
732
	request_pending(crtc, PENDING_FLIP);
R
Rob Clark 已提交
733 734
}

R
Rob Clark 已提交
735 736 737 738 739 740 741 742 743 744
static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	uint32_t xres = crtc->mode.hdisplay;
	uint32_t yres = crtc->mode.vdisplay;

	/*
	 * Cursor Region Of Interest (ROI) is a plane read from cursor
	 * buffer to render. The ROI region is determined by the visibility of
	 * the cursor point. In the default Cursor image the cursor point will
745
	 * be at the top left of the cursor image.
R
Rob Clark 已提交
746
	 *
747
	 * Without rotation:
R
Rob Clark 已提交
748 749 750 751 752 753
	 * If the cursor point reaches the right (xres - x < cursor.width) or
	 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
	 * width and ROI height need to be evaluated to crop the cursor image
	 * accordingly.
	 * (xres-x) will be new cursor width when x > (xres - cursor.width)
	 * (yres-y) will be new cursor height when y > (yres - cursor.height)
754 755 756 757 758
	 *
	 * With rotation:
	 * We get negative x and/or y coordinates.
	 * (cursor.width - abs(x)) will be new cursor width when x < 0
	 * (cursor.height - abs(y)) will be new cursor width when y < 0
R
Rob Clark 已提交
759
	 */
760 761
	if (mdp5_crtc->cursor.x >= 0)
		*roi_w = min(mdp5_crtc->cursor.width, xres -
R
Rob Clark 已提交
762
			mdp5_crtc->cursor.x);
763 764 765 766
	else
		*roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x);
	if (mdp5_crtc->cursor.y >= 0)
		*roi_h = min(mdp5_crtc->cursor.height, yres -
R
Rob Clark 已提交
767
			mdp5_crtc->cursor.y);
768 769
	else
		*roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y);
R
Rob Clark 已提交
770 771
}

772 773 774 775 776 777 778
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
{
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
	const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
	uint32_t blendcfg, stride;
779
	uint32_t x, y, src_x, src_y, width, height;
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
	uint32_t roi_w, roi_h;
	int lm;

	assert_spin_locked(&mdp5_crtc->cursor.lock);

	lm = mdp5_cstate->pipeline.mixer->lm;

	x = mdp5_crtc->cursor.x;
	y = mdp5_crtc->cursor.y;
	width = mdp5_crtc->cursor.width;
	height = mdp5_crtc->cursor.height;

	stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);

	get_roi(crtc, &roi_w, &roi_h);

796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
	/* If cusror buffer overlaps due to rotation on the
	 * upper or left screen border the pixel offset inside
	 * the cursor buffer of the ROI is the positive overlap
	 * distance.
	 */
	if (mdp5_crtc->cursor.x < 0) {
		src_x = abs(mdp5_crtc->cursor.x);
		x = 0;
	} else {
		src_x = 0;
	}
	if (mdp5_crtc->cursor.y < 0) {
		src_y = abs(mdp5_crtc->cursor.y);
		y = 0;
	} else {
		src_y = 0;
	}
	DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
		crtc->name, x, y, roi_w, roi_h, src_x, src_y);

816 817 818 819 820 821 822 823 824 825 826 827
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
			MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
			MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
			MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
			MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
			MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
			MDP5_LM_CURSOR_START_XY_Y_START(y) |
			MDP5_LM_CURSOR_START_XY_X_START(x));
828 829 830
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm),
			MDP5_LM_CURSOR_XY_SRC_Y(src_y) |
			MDP5_LM_CURSOR_XY_SRC_X(src_x));
831 832 833 834 835 836 837 838
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
			mdp5_crtc->cursor.iova);

	blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
	blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
}

839 840 841 842 843
static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
		struct drm_file *file, uint32_t handle,
		uint32_t width, uint32_t height)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
844
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
845
	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
846 847
	struct drm_device *dev = crtc->dev;
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
848
	struct platform_device *pdev = mdp5_kms->pdev;
849
	struct msm_kms *kms = &mdp5_kms->base.base;
850
	struct drm_gem_object *cursor_bo, *old_bo = NULL;
851
	struct mdp5_ctl *ctl;
852
	int ret;
853
	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
854
	bool cursor_enable = true;
855 856
	unsigned long flags;

857 858 859 860 861 862
	if (!mdp5_crtc->lm_cursor_enabled) {
		dev_warn(dev->dev,
			 "cursor_set is deprecated with cursor planes\n");
		return -EINVAL;
	}

863
	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
864
		DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
865 866 867
		return -EINVAL;
	}

868 869
	ctl = mdp5_cstate->ctl;
	if (!ctl)
870 871
		return -EINVAL;

872 873 874 875
	/* don't support LM cursors when we we have source split enabled */
	if (mdp5_cstate->pipeline.r_mixer)
		return -EINVAL;

876 877
	if (!handle) {
		DBG("Cursor off");
878
		cursor_enable = false;
879
		mdp5_crtc->cursor.iova = 0;
880
		pm_runtime_get_sync(&pdev->dev);
881
		goto set_cursor;
882 883
	}

884
	cursor_bo = drm_gem_object_lookup(file, handle);
885 886 887
	if (!cursor_bo)
		return -ENOENT;

888
	ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
889
			&mdp5_crtc->cursor.iova);
890 891 892
	if (ret)
		return -EINVAL;

893 894
	pm_runtime_get_sync(&pdev->dev);

895 896 897
	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
	old_bo = mdp5_crtc->cursor.scanout_bo;

R
Rob Clark 已提交
898 899 900 901
	mdp5_crtc->cursor.scanout_bo = cursor_bo;
	mdp5_crtc->cursor.width = width;
	mdp5_crtc->cursor.height = height;

902
	mdp5_crtc_restore_cursor(crtc);
903 904 905

	spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);

906
set_cursor:
907
	ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
908
	if (ret) {
909
		DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
910
				cursor_enable ? "en" : "dis", ret);
911
		goto end;
912
	}
913 914 915 916

	crtc_flush(crtc, flush_mask);

end:
R
Rob Clark 已提交
917
	pm_runtime_put_sync(&pdev->dev);
918 919 920 921 922 923 924 925 926 927 928 929
	if (old_bo) {
		drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
		/* enable vblank to complete cursor work: */
		request_pending(crtc, PENDING_CURSOR);
	}
	return ret;
}

static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
930
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
931
	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
932
	struct drm_device *dev = crtc->dev;
933 934 935 936
	uint32_t roi_w;
	uint32_t roi_h;
	unsigned long flags;

937 938 939 940 941 942
	if (!mdp5_crtc->lm_cursor_enabled) {
		dev_warn(dev->dev,
			 "cursor_move is deprecated with cursor planes\n");
		return -EINVAL;
	}

943 944 945 946
	/* don't support LM cursors when we we have source split enabled */
	if (mdp5_cstate->pipeline.r_mixer)
		return -EINVAL;

947 948 949 950
	/* In case the CRTC is disabled, just drop the cursor update */
	if (unlikely(!crtc->state->enable))
		return 0;

951 952 953
	/* accept negative x/y coordinates up to maximum cursor overlap */
	mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width);
	mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height);
954

R
Rob Clark 已提交
955
	get_roi(crtc, &roi_w, &roi_h);
956

957
	pm_runtime_get_sync(&mdp5_kms->pdev->dev);
958

959
	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
960
	mdp5_crtc_restore_cursor(crtc);
961 962 963 964
	spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);

	crtc_flush(crtc, flush_mask);

R
Rob Clark 已提交
965
	pm_runtime_put_sync(&mdp5_kms->pdev->dev);
966

967 968 969
	return 0;
}

970 971 972 973 974 975
static void
mdp5_crtc_atomic_print_state(struct drm_printer *p,
			     const struct drm_crtc_state *state)
{
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
976
	struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
977 978 979 980

	if (WARN_ON(!pipeline))
		return;

981 982 983
	if (mdp5_cstate->ctl)
		drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl));

984 985
	drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
			pipeline->mixer->name : "(null)");
986 987 988 989

	if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
		drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
			   pipeline->r_mixer->name : "(null)");
990 991

	drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
}

static void mdp5_crtc_reset(struct drm_crtc *crtc)
{
	struct mdp5_crtc_state *mdp5_cstate;

	if (crtc->state) {
		__drm_atomic_helper_crtc_destroy_state(crtc->state);
		kfree(to_mdp5_crtc_state(crtc->state));
	}

	mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);

	if (mdp5_cstate) {
		mdp5_cstate->base.crtc = crtc;
		crtc->state = &mdp5_cstate->base;
	}
}

static struct drm_crtc_state *
mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
{
	struct mdp5_crtc_state *mdp5_cstate;

	if (WARN_ON(!crtc->state))
		return NULL;

	mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
			      sizeof(*mdp5_cstate), GFP_KERNEL);
	if (!mdp5_cstate)
		return NULL;

	__drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);

	return &mdp5_cstate->base;
}

static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
{
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);

	__drm_atomic_helper_crtc_destroy_state(state);

	kfree(mdp5_cstate);
}

R
Rob Clark 已提交
1038
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
R
Rob Clark 已提交
1039
	.set_config = drm_atomic_helper_set_config,
R
Rob Clark 已提交
1040
	.destroy = mdp5_crtc_destroy,
R
Rob Clark 已提交
1041
	.page_flip = drm_atomic_helper_page_flip,
1042 1043 1044
	.reset = mdp5_crtc_reset,
	.atomic_duplicate_state = mdp5_crtc_duplicate_state,
	.atomic_destroy_state = mdp5_crtc_destroy_state,
1045 1046
	.cursor_set = mdp5_crtc_cursor_set,
	.cursor_move = mdp5_crtc_cursor_move,
1047
	.atomic_print_state = mdp5_crtc_atomic_print_state,
R
Rob Clark 已提交
1048 1049 1050
};

static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
R
Rob Clark 已提交
1051 1052 1053 1054
	.mode_set_nofb = mdp5_crtc_mode_set_nofb,
	.atomic_check = mdp5_crtc_atomic_check,
	.atomic_begin = mdp5_crtc_atomic_begin,
	.atomic_flush = mdp5_crtc_atomic_flush,
1055
	.atomic_enable = mdp5_crtc_atomic_enable,
1056
	.atomic_disable = mdp5_crtc_atomic_disable,
R
Rob Clark 已提交
1057 1058 1059 1060 1061 1062
};

static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
	struct drm_crtc *crtc = &mdp5_crtc->base;
1063
	struct msm_drm_private *priv = crtc->dev->dev_private;
R
Rob Clark 已提交
1064 1065 1066 1067 1068 1069 1070 1071 1072
	unsigned pending;

	mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);

	pending = atomic_xchg(&mdp5_crtc->pending, 0);

	if (pending & PENDING_FLIP) {
		complete_flip(crtc, NULL);
	}
1073 1074 1075

	if (pending & PENDING_CURSOR)
		drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
R
Rob Clark 已提交
1076 1077 1078 1079 1080
}

static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
1081

R
Rob Clark 已提交
1082
	DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
R
Rob Clark 已提交
1083 1084
}

1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
								pp_done);

	complete(&mdp5_crtc->pp_completion);
}

static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
{
	struct drm_device *dev = crtc->dev;
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1097
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1098 1099 1100 1101 1102
	int ret;

	ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
						msecs_to_jiffies(50));
	if (ret == 0)
1103
		dev_warn(dev->dev, "pp done time out, lm=%d\n",
1104
			 mdp5_cstate->pipeline.mixer->lm);
1105 1106
}

1107 1108 1109 1110
static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
{
	struct drm_device *dev = crtc->dev;
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
1111 1112
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
1113 1114 1115
	int ret;

	/* Should not call this function if crtc is disabled. */
1116
	if (!ctl)
1117 1118 1119 1120 1121 1122 1123
		return;

	ret = drm_crtc_vblank_get(crtc);
	if (ret)
		return;

	ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
1124
		((mdp5_ctl_get_commit_status(ctl) &
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
		mdp5_crtc->flushed_mask) == 0),
		msecs_to_jiffies(50));
	if (ret <= 0)
		dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);

	mdp5_crtc->flushed_mask = 0;

	drm_crtc_vblank_put(crtc);
}

R
Rob Clark 已提交
1135 1136 1137 1138 1139 1140
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	return mdp5_crtc->vblank.irqmask;
}

1141
void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
R
Rob Clark 已提交
1142
{
1143
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
R
Rob Clark 已提交
1144
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
1145

1146
	/* should this be done elsewhere ? */
1147
	mdp_irq_update(&mdp5_kms->base);
R
Rob Clark 已提交
1148

1149
	mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
1150
}
R
Rob Clark 已提交
1151

1152 1153
struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
{
1154
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1155

1156
	return mdp5_cstate->ctl;
1157 1158
}

1159
struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
1160
{
1161 1162 1163 1164 1165 1166 1167 1168 1169
	struct mdp5_crtc_state *mdp5_cstate;

	if (WARN_ON(!crtc))
		return ERR_PTR(-EINVAL);

	mdp5_cstate = to_mdp5_crtc_state(crtc->state);

	return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
		ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
1170
}
1171

1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
{
	struct mdp5_crtc_state *mdp5_cstate;

	if (WARN_ON(!crtc))
		return ERR_PTR(-EINVAL);

	mdp5_cstate = to_mdp5_crtc_state(crtc->state);

	return &mdp5_cstate->pipeline;
}

1184 1185
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
{
1186
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
1187

1188
	if (mdp5_cstate->cmd_mode)
1189 1190 1191
		mdp5_crtc_wait_for_pp_done(crtc);
	else
		mdp5_crtc_wait_for_flush_done(crtc);
1192 1193
}

R
Rob Clark 已提交
1194 1195
/* initialize crtc */
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
1196 1197
				struct drm_plane *plane,
				struct drm_plane *cursor_plane, int id)
R
Rob Clark 已提交
1198 1199 1200 1201 1202
{
	struct drm_crtc *crtc = NULL;
	struct mdp5_crtc *mdp5_crtc;

	mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
1203 1204
	if (!mdp5_crtc)
		return ERR_PTR(-ENOMEM);
R
Rob Clark 已提交
1205 1206 1207 1208

	crtc = &mdp5_crtc->base;

	mdp5_crtc->id = id;
1209 1210

	spin_lock_init(&mdp5_crtc->lm_lock);
1211
	spin_lock_init(&mdp5_crtc->cursor.lock);
1212
	init_completion(&mdp5_crtc->pp_completion);
R
Rob Clark 已提交
1213 1214 1215

	mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
	mdp5_crtc->err.irq = mdp5_crtc_err_irq;
1216
	mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
R
Rob Clark 已提交
1217

1218 1219 1220 1221
	mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;

	drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
				  &mdp5_crtc_funcs, NULL);
1222 1223 1224 1225

	drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
			"unref cursor", unref_cursor_worker);

R
Rob Clark 已提交
1226 1227 1228 1229
	drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);

	return crtc;
}