mdp5_crtc.c 21.7 KB
Newer Older
R
Rob Clark 已提交
1
/*
2
 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
R
Rob Clark 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include "mdp5_kms.h"

R
Rob Clark 已提交
21
#include <linux/sort.h>
R
Rob Clark 已提交
22 23 24 25 26
#include <drm/drm_mode.h>
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_flip_work.h"

27 28 29
#define CURSOR_WIDTH	64
#define CURSOR_HEIGHT	64

30 31
#define SSPP_MAX	(SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */

R
Rob Clark 已提交
32 33 34 35 36 37
struct mdp5_crtc {
	struct drm_crtc base;
	char name[8];
	int id;
	bool enabled;

38 39 40 41 42 43
	/* layer mixer used for this CRTC (+ its lock): */
#define GET_LM_ID(crtc_id)	((crtc_id == 3) ? 5 : crtc_id)
	int lm;
	spinlock_t lm_lock;	/* protect REG_MDP5_LM_* registers */

	/* CTL used for this CRTC: */
44
	struct mdp5_ctl *ctl;
R
Rob Clark 已提交
45 46 47 48

	/* if there is a pending flip, these will be non-null: */
	struct drm_pending_vblank_event *event;

49 50 51 52 53
	/* Bits have been flushed at the last commit,
	 * used to decide if a vsync has happened since last commit.
	 */
	u32 flushed_mask;

R
Rob Clark 已提交
54 55 56 57
#define PENDING_CURSOR 0x1
#define PENDING_FLIP   0x2
	atomic_t pending;

58 59 60
	/* for unref'ing cursor bo's after scanout completes: */
	struct drm_flip_work unref_cursor_work;

R
Rob Clark 已提交
61 62
	struct mdp_irq vblank;
	struct mdp_irq err;
63 64 65 66 67
	struct mdp_irq pp_done;

	struct completion pp_completion;

	bool cmd_mode;
68 69 70 71 72 73 74

	struct {
		/* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
		spinlock_t lock;

		/* current cursor being scanned out: */
		struct drm_gem_object *scanout_bo;
R
Rob Clark 已提交
75 76
		uint32_t width, height;
		uint32_t x, y;
77
	} cursor;
R
Rob Clark 已提交
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
};
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)

static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
{
	struct msm_drm_private *priv = crtc->dev->dev_private;
	return to_mdp5_kms(to_mdp_kms(priv->kms));
}

static void request_pending(struct drm_crtc *crtc, uint32_t pending)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);

	atomic_or(pending, &mdp5_crtc->pending);
	mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
}

95 96 97 98 99 100
static void request_pp_done_pending(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	reinit_completion(&mdp5_crtc->pp_completion);
}

101
static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
102 103 104 105
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);

	DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
106
	return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
107 108 109 110 111 112 113
}

/*
 * flush updates, to make sure hw is updated to new scanout fb,
 * so that we can safely queue unref to current fb (ie. next
 * vblank we know hw is done w/ previous scanout_fb).
 */
114
static u32 crtc_flush_all(struct drm_crtc *crtc)
R
Rob Clark 已提交
115 116
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
117
	struct drm_plane *plane;
118 119
	uint32_t flush_mask = 0;

120 121
	/* this should not happen: */
	if (WARN_ON(!mdp5_crtc->ctl))
122
		return 0;
R
Rob Clark 已提交
123

124
	drm_atomic_crtc_for_each_plane(plane, crtc) {
125
		flush_mask |= mdp5_plane_get_flush(plane);
R
Rob Clark 已提交
126
	}
127 128

	flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);
129

130
	return crtc_flush(crtc, flush_mask);
R
Rob Clark 已提交
131 132 133 134 135 136 137 138
}

/* if file!=NULL, this is preclose potential cancel-flip path */
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	struct drm_device *dev = crtc->dev;
	struct drm_pending_vblank_event *event;
139 140
	struct drm_plane *plane;
	unsigned long flags;
R
Rob Clark 已提交
141 142 143 144 145 146 147 148 149 150

	spin_lock_irqsave(&dev->event_lock, flags);
	event = mdp5_crtc->event;
	if (event) {
		/* if regular vblank case (!file) or if cancel-flip from
		 * preclose on file that requested flip, then send the
		 * event:
		 */
		if (!file || (event->base.file_priv == file)) {
			mdp5_crtc->event = NULL;
R
Rob Clark 已提交
151
			DBG("%s: send event: %p", mdp5_crtc->name, event);
152
			drm_crtc_send_vblank_event(crtc, event);
R
Rob Clark 已提交
153 154 155 156
		}
	}
	spin_unlock_irqrestore(&dev->event_lock, flags);

157
	drm_atomic_crtc_for_each_plane(plane, crtc) {
158
		mdp5_plane_complete_flip(plane);
159
	}
160 161

	if (mdp5_crtc->ctl && !crtc->state->enable) {
162
		/* set STAGE_UNUSED for all layers */
163
		mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0);
164 165
		mdp5_crtc->ctl = NULL;
	}
R
Rob Clark 已提交
166 167
}

168 169 170 171 172 173 174 175 176 177
static void unref_cursor_worker(struct drm_flip_work *work, void *val)
{
	struct mdp5_crtc *mdp5_crtc =
		container_of(work, struct mdp5_crtc, unref_cursor_work);
	struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);

	msm_gem_put_iova(val, mdp5_kms->id);
	drm_gem_object_unreference_unlocked(val);
}

R
Rob Clark 已提交
178 179 180 181 182
static void mdp5_crtc_destroy(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);

	drm_crtc_cleanup(crtc);
183
	drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
R
Rob Clark 已提交
184 185 186 187

	kfree(mdp5_crtc);
}

188 189 190
/*
 * blend_setup() - blend all the planes of a CRTC
 *
191 192 193
 * If no base layer is available, border will be enabled as the base layer.
 * Otherwise all layers will be blended based on their stage calculated
 * in mdp5_crtc_atomic_check.
194
 */
R
Rob Clark 已提交
195 196 197 198
static void blend_setup(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
199 200
	struct drm_plane *plane;
	const struct mdp5_cfg_hw *hw_cfg;
201 202 203 204
	struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
	const struct mdp_format *format;
	uint32_t lm = mdp5_crtc->lm;
	uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
205
	unsigned long flags;
206 207 208
	uint8_t stage[STAGE_MAX + 1];
	int i, plane_cnt = 0;
#define blender(stage)	((stage) - STAGE0)
R
Rob Clark 已提交
209

210
	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
R
Rob Clark 已提交
211

212 213 214 215 216 217
	spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);

	/* ctl could be released already when we are shutting down: */
	if (!mdp5_crtc->ctl)
		goto out;

218
	/* Collect all plane information */
219
	drm_atomic_crtc_for_each_plane(plane, crtc) {
220 221 222 223 224
		pstate = to_mdp5_plane_state(plane->state);
		pstates[pstate->stage] = pstate;
		stage[pstate->stage] = mdp5_plane_pipe(plane);
		plane_cnt++;
	}
R
Rob Clark 已提交
225

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
	/*
	* If there is no base layer, enable border color.
	* Although it's not possbile in current blend logic,
	* put it here as a reminder.
	*/
	if (!pstates[STAGE_BASE] && plane_cnt) {
		ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
		DBG("Border Color is enabled");
	}

	/* The reset for blending */
	for (i = STAGE0; i <= STAGE_MAX; i++) {
		if (!pstates[i])
			continue;

		format = to_mdp_format(
			msm_framebuffer_format(pstates[i]->base.fb));
		plane = pstates[i]->base.plane;
		blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
			MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
		fg_alpha = pstates[i]->alpha;
		bg_alpha = 0xFF - pstates[i]->alpha;
		DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);

		if (format->alpha_enable && pstates[i]->premultiplied) {
			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
			if (fg_alpha != 0xff) {
				bg_alpha = fg_alpha;
				blend_op |=
					MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
					MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
			} else {
				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
			}
		} else if (format->alpha_enable) {
			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
			if (fg_alpha != 0xff) {
				bg_alpha = fg_alpha;
				blend_op |=
				       MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
				       MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
				       MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
				       MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
			} else {
				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
			}
		}
275

276 277
		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
				blender(i)), blend_op);
278
		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
279
				blender(i)), fg_alpha);
280
		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
281
				blender(i)), bg_alpha);
282 283
	}

284
	mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags);
285 286 287

out:
	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
R
Rob Clark 已提交
288 289
}

R
Rob Clark 已提交
290
static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
R
Rob Clark 已提交
291 292 293
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
294
	unsigned long flags;
R
Rob Clark 已提交
295 296 297 298
	struct drm_display_mode *mode;

	if (WARN_ON(!crtc->state))
		return;
R
Rob Clark 已提交
299

R
Rob Clark 已提交
300
	mode = &crtc->state->adjusted_mode;
R
Rob Clark 已提交
301 302 303 304 305 306 307 308 309 310

	DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
			mdp5_crtc->name, mode->base.id, mode->name,
			mode->vrefresh, mode->clock,
			mode->hdisplay, mode->hsync_start,
			mode->hsync_end, mode->htotal,
			mode->vdisplay, mode->vsync_start,
			mode->vsync_end, mode->vtotal,
			mode->type, mode->flags);

311 312
	spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
	mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
R
Rob Clark 已提交
313 314
			MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
			MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
315
	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
R
Rob Clark 已提交
316 317
}

318
static void mdp5_crtc_disable(struct drm_crtc *crtc)
R
Rob Clark 已提交
319 320
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
321 322
	struct mdp5_kms *mdp5_kms = get_kms(crtc);

R
Rob Clark 已提交
323
	DBG("%s", mdp5_crtc->name);
324 325 326 327

	if (WARN_ON(!mdp5_crtc->enabled))
		return;

328 329 330
	if (mdp5_crtc->cmd_mode)
		mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);

331 332 333 334
	mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
	mdp5_disable(mdp5_kms);

	mdp5_crtc->enabled = false;
R
Rob Clark 已提交
335 336
}

337
static void mdp5_crtc_enable(struct drm_crtc *crtc)
R
Rob Clark 已提交
338
{
R
Rob Clark 已提交
339
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
340 341
	struct mdp5_kms *mdp5_kms = get_kms(crtc);

R
Rob Clark 已提交
342
	DBG("%s", mdp5_crtc->name);
343 344 345 346 347 348 349

	if (WARN_ON(mdp5_crtc->enabled))
		return;

	mdp5_enable(mdp5_kms);
	mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);

350 351 352
	if (mdp5_crtc->cmd_mode)
		mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);

353
	mdp5_crtc->enabled = true;
R
Rob Clark 已提交
354 355
}

R
Rob Clark 已提交
356 357 358 359 360 361
struct plane_state {
	struct drm_plane *plane;
	struct mdp5_plane_state *state;
};

static int pstate_cmp(const void *a, const void *b)
R
Rob Clark 已提交
362
{
R
Rob Clark 已提交
363 364 365
	struct plane_state *pa = (struct plane_state *)a;
	struct plane_state *pb = (struct plane_state *)b;
	return pa->state->zpos - pb->state->zpos;
R
Rob Clark 已提交
366 367
}

R
Rob Clark 已提交
368 369
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
		struct drm_crtc_state *state)
370 371
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
R
Rob Clark 已提交
372 373 374
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
	struct drm_plane *plane;
	struct drm_device *dev = crtc->dev;
375 376
	struct plane_state pstates[STAGE_MAX + 1];
	const struct mdp5_cfg_hw *hw_cfg;
377
	const struct drm_plane_state *pstate;
R
Rob Clark 已提交
378
	int cnt = 0, i;
379

R
Rob Clark 已提交
380
	DBG("%s: check", mdp5_crtc->name);
381

R
Rob Clark 已提交
382 383 384
	/* verify that there are not too many planes attached to crtc
	 * and that we don't have conflicting mixer stages:
	 */
385
	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
386
	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
387
		if (cnt >= (hw_cfg->lm.nb_stages)) {
R
Rob Clark 已提交
388 389 390 391 392 393 394 395 396 397 398
			dev_err(dev->dev, "too many planes!\n");
			return -EINVAL;
		}


		pstates[cnt].plane = plane;
		pstates[cnt].state = to_mdp5_plane_state(pstate);

		cnt++;
	}

399
	/* assign a stage based on sorted zpos property */
R
Rob Clark 已提交
400 401 402 403 404 405 406 407 408 409
	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);

	for (i = 0; i < cnt; i++) {
		pstates[i].state->stage = STAGE_BASE + i;
		DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
				pipe2name(mdp5_plane_pipe(pstates[i].plane)),
				pstates[i].state->stage);
	}

	return 0;
410 411
}

412 413
static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
				   struct drm_crtc_state *old_crtc_state)
R
Rob Clark 已提交
414 415 416 417
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	DBG("%s: begin", mdp5_crtc->name);
}
418

419 420
static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
				   struct drm_crtc_state *old_crtc_state)
R
Rob Clark 已提交
421 422 423 424 425
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	struct drm_device *dev = crtc->dev;
	unsigned long flags;

426
	DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
R
Rob Clark 已提交
427

R
Rob Clark 已提交
428
	WARN_ON(mdp5_crtc->event);
R
Rob Clark 已提交
429 430

	spin_lock_irqsave(&dev->event_lock, flags);
R
Rob Clark 已提交
431
	mdp5_crtc->event = crtc->state->event;
R
Rob Clark 已提交
432 433
	spin_unlock_irqrestore(&dev->event_lock, flags);

434 435 436 437 438 439 440 441
	/*
	 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
	 * it means we are trying to flush a CRTC whose state is disabled:
	 * nothing else needs to be done.
	 */
	if (unlikely(!mdp5_crtc->ctl))
		return;

R
Rob Clark 已提交
442
	blend_setup(crtc);
443

444 445 446 447 448 449 450 451 452
	/* PP_DONE irq is only used by command mode for now.
	 * It is better to request pending before FLUSH and START trigger
	 * to make sure no pp_done irq missed.
	 * This is safe because no pp_done will happen before SW trigger
	 * in command mode.
	 */
	if (mdp5_crtc->cmd_mode)
		request_pp_done_pending(crtc);

453 454
	mdp5_crtc->flushed_mask = crtc_flush_all(crtc);

R
Rob Clark 已提交
455
	request_pending(crtc, PENDING_FLIP);
R
Rob Clark 已提交
456 457
}

R
Rob Clark 已提交
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	uint32_t xres = crtc->mode.hdisplay;
	uint32_t yres = crtc->mode.vdisplay;

	/*
	 * Cursor Region Of Interest (ROI) is a plane read from cursor
	 * buffer to render. The ROI region is determined by the visibility of
	 * the cursor point. In the default Cursor image the cursor point will
	 * be at the top left of the cursor image, unless it is specified
	 * otherwise using hotspot feature.
	 *
	 * If the cursor point reaches the right (xres - x < cursor.width) or
	 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
	 * width and ROI height need to be evaluated to crop the cursor image
	 * accordingly.
	 * (xres-x) will be new cursor width when x > (xres - cursor.width)
	 * (yres-y) will be new cursor height when y > (yres - cursor.height)
	 */
	*roi_w = min(mdp5_crtc->cursor.width, xres -
			mdp5_crtc->cursor.x);
	*roi_h = min(mdp5_crtc->cursor.height, yres -
			mdp5_crtc->cursor.y);
}

484 485 486 487 488 489 490
static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
		struct drm_file *file, uint32_t handle,
		uint32_t width, uint32_t height)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	struct drm_device *dev = crtc->dev;
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
491
	struct drm_gem_object *cursor_bo, *old_bo = NULL;
492
	uint32_t blendcfg, cursor_addr, stride;
493
	int ret, lm;
494 495
	enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
R
Rob Clark 已提交
496
	uint32_t roi_w, roi_h;
497
	bool cursor_enable = true;
498 499 500 501 502 503 504 505 506 507 508 509
	unsigned long flags;

	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
		dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
		return -EINVAL;
	}

	if (NULL == mdp5_crtc->ctl)
		return -EINVAL;

	if (!handle) {
		DBG("Cursor off");
510 511
		cursor_enable = false;
		goto set_cursor;
512 513
	}

514
	cursor_bo = drm_gem_object_lookup(file, handle);
515 516 517 518 519 520 521 522
	if (!cursor_bo)
		return -ENOENT;

	ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
	if (ret)
		return -EINVAL;

	lm = mdp5_crtc->lm;
523
	stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
524 525 526 527

	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
	old_bo = mdp5_crtc->cursor.scanout_bo;

R
Rob Clark 已提交
528 529 530 531 532 533
	mdp5_crtc->cursor.scanout_bo = cursor_bo;
	mdp5_crtc->cursor.width = width;
	mdp5_crtc->cursor.height = height;

	get_roi(crtc, &roi_w, &roi_h);

534 535 536 537 538 539 540
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
			MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
			MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
			MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
R
Rob Clark 已提交
541 542
			MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
			MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
543 544 545 546 547 548 549 550
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);

	blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
	blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);

	spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);

551 552 553 554 555
set_cursor:
	ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable);
	if (ret) {
		dev_err(dev->dev, "failed to %sable cursor: %d\n",
				cursor_enable ? "en" : "dis", ret);
556
		goto end;
557
	}
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578

	crtc_flush(crtc, flush_mask);

end:
	if (old_bo) {
		drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
		/* enable vblank to complete cursor work: */
		request_pending(crtc, PENDING_CURSOR);
	}
	return ret;
}

static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
	uint32_t roi_w;
	uint32_t roi_h;
	unsigned long flags;

579 580 581 582
	/* In case the CRTC is disabled, just drop the cursor update */
	if (unlikely(!crtc->state->enable))
		return 0;

R
Rob Clark 已提交
583 584
	mdp5_crtc->cursor.x = x = max(x, 0);
	mdp5_crtc->cursor.y = y = max(y, 0);
585

R
Rob Clark 已提交
586
	get_roi(crtc, &roi_w, &roi_h);
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601

	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
			MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
			MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm),
			MDP5_LM_CURSOR_START_XY_Y_START(y) |
			MDP5_LM_CURSOR_START_XY_X_START(x));
	spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);

	crtc_flush(crtc, flush_mask);

	return 0;
}

R
Rob Clark 已提交
602
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
R
Rob Clark 已提交
603
	.set_config = drm_atomic_helper_set_config,
R
Rob Clark 已提交
604
	.destroy = mdp5_crtc_destroy,
R
Rob Clark 已提交
605
	.page_flip = drm_atomic_helper_page_flip,
606
	.set_property = drm_atomic_helper_crtc_set_property,
R
Rob Clark 已提交
607 608 609
	.reset = drm_atomic_helper_crtc_reset,
	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
610 611
	.cursor_set = mdp5_crtc_cursor_set,
	.cursor_move = mdp5_crtc_cursor_move,
R
Rob Clark 已提交
612 613 614
};

static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
R
Rob Clark 已提交
615
	.mode_set_nofb = mdp5_crtc_mode_set_nofb,
616 617
	.disable = mdp5_crtc_disable,
	.enable = mdp5_crtc_enable,
R
Rob Clark 已提交
618 619 620
	.atomic_check = mdp5_crtc_atomic_check,
	.atomic_begin = mdp5_crtc_atomic_begin,
	.atomic_flush = mdp5_crtc_atomic_flush,
R
Rob Clark 已提交
621 622 623 624 625 626
};

static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
	struct drm_crtc *crtc = &mdp5_crtc->base;
627
	struct msm_drm_private *priv = crtc->dev->dev_private;
R
Rob Clark 已提交
628 629 630 631 632 633 634 635 636
	unsigned pending;

	mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);

	pending = atomic_xchg(&mdp5_crtc->pending, 0);

	if (pending & PENDING_FLIP) {
		complete_flip(crtc, NULL);
	}
637 638 639

	if (pending & PENDING_CURSOR)
		drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
R
Rob Clark 已提交
640 641 642 643 644
}

static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
645

R
Rob Clark 已提交
646 647 648
	DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
}

649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
{
	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
								pp_done);

	complete(&mdp5_crtc->pp_completion);
}

static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
{
	struct drm_device *dev = crtc->dev;
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	int ret;

	ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
						msecs_to_jiffies(50));
	if (ret == 0)
		dev_warn(dev->dev, "pp done time out, lm=%d\n", mdp5_crtc->lm);
}

669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
{
	struct drm_device *dev = crtc->dev;
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	int ret;

	/* Should not call this function if crtc is disabled. */
	if (!mdp5_crtc->ctl)
		return;

	ret = drm_crtc_vblank_get(crtc);
	if (ret)
		return;

	ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
		((mdp5_ctl_get_commit_status(mdp5_crtc->ctl) &
		mdp5_crtc->flushed_mask) == 0),
		msecs_to_jiffies(50));
	if (ret <= 0)
		dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);

	mdp5_crtc->flushed_mask = 0;

	drm_crtc_vblank_put(crtc);
}

R
Rob Clark 已提交
695 696 697 698 699 700
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	return mdp5_crtc->vblank.irqmask;
}

701 702
void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
		struct mdp5_interface *intf, struct mdp5_ctl *ctl)
R
Rob Clark 已提交
703 704 705
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
706
	int lm = mdp5_crtc_get_lm(crtc);
R
Rob Clark 已提交
707 708

	/* now that we know what irq's we want: */
709
	mdp5_crtc->err.irqmask = intf2err(intf->num);
710 711 712 713 714 715 716 717 718 719 720 721
	mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);

	if ((intf->type == INTF_DSI) &&
		(intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
		mdp5_crtc->pp_done.irqmask = lm2ppdone(lm);
		mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
		mdp5_crtc->cmd_mode = true;
	} else {
		mdp5_crtc->pp_done.irqmask = 0;
		mdp5_crtc->pp_done.irq = NULL;
		mdp5_crtc->cmd_mode = false;
	}
722

723
	mdp_irq_update(&mdp5_kms->base);
R
Rob Clark 已提交
724

725 726
	mdp5_crtc->ctl = ctl;
	mdp5_ctl_set_pipeline(ctl, intf, lm);
727
}
R
Rob Clark 已提交
728

729 730 731
int mdp5_crtc_get_lm(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
732 733
	return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
}
734

735 736
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
{
737 738 739 740 741 742
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);

	if (mdp5_crtc->cmd_mode)
		mdp5_crtc_wait_for_pp_done(crtc);
	else
		mdp5_crtc_wait_for_flush_done(crtc);
743 744
}

R
Rob Clark 已提交
745 746 747 748 749 750 751 752
/* initialize crtc */
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
		struct drm_plane *plane, int id)
{
	struct drm_crtc *crtc = NULL;
	struct mdp5_crtc *mdp5_crtc;

	mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
753 754
	if (!mdp5_crtc)
		return ERR_PTR(-ENOMEM);
R
Rob Clark 已提交
755 756 757 758

	crtc = &mdp5_crtc->base;

	mdp5_crtc->id = id;
759 760 761
	mdp5_crtc->lm = GET_LM_ID(id);

	spin_lock_init(&mdp5_crtc->lm_lock);
762
	spin_lock_init(&mdp5_crtc->cursor.lock);
763
	init_completion(&mdp5_crtc->pp_completion);
R
Rob Clark 已提交
764 765 766 767 768 769 770

	mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
	mdp5_crtc->err.irq = mdp5_crtc_err_irq;

	snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
			pipe2name(mdp5_plane_pipe(plane)), id);

771 772
	drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs,
				  NULL);
773 774 775 776

	drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
			"unref cursor", unref_cursor_worker);

R
Rob Clark 已提交
777
	drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
778
	plane->crtc = crtc;
R
Rob Clark 已提交
779 780 781

	return crtc;
}