rcar_du_crtc.c 17.3 KB
Newer Older
1 2 3
/*
 * rcar_du_crtc.c  --  R-Car Display Unit CRTCs
 *
4
 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 6 7 8 9 10 11 12 13 14 15 16 17
 *
 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 */

#include <linux/clk.h>
#include <linux/mutex.h>

#include <drm/drmP.h>
18 19
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
20 21 22 23
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
24
#include <drm/drm_plane_helper.h>
25 26 27 28 29 30 31 32 33

#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
#include "rcar_du_plane.h"
#include "rcar_du_regs.h"

static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
{
34
	struct rcar_du_device *rcdu = rcrtc->group->dev;
35 36 37 38 39 40

	return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
}

static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
{
41
	struct rcar_du_device *rcdu = rcrtc->group->dev;
42 43 44 45 46 47

	rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
}

static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
{
48
	struct rcar_du_device *rcdu = rcrtc->group->dev;
49 50 51 52 53 54 55

	rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
		      rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
}

static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
{
56
	struct rcar_du_device *rcdu = rcrtc->group->dev;
57 58 59 60 61 62 63 64

	rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
		      rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
}

static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg,
				 u32 clr, u32 set)
{
65
	struct rcar_du_device *rcdu = rcrtc->group->dev;
66 67 68 69 70
	u32 value = rcar_du_read(rcdu, rcrtc->mmio_offset + reg);

	rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set);
}

71 72 73 74 75 76 77 78
static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc)
{
	int ret;

	ret = clk_prepare_enable(rcrtc->clock);
	if (ret < 0)
		return ret;

79 80 81 82
	ret = clk_prepare_enable(rcrtc->extclock);
	if (ret < 0)
		goto error_clock;

83
	ret = rcar_du_group_get(rcrtc->group);
84
	if (ret < 0)
85 86 87
		goto error_group;

	return 0;
88

89 90 91 92
error_group:
	clk_disable_unprepare(rcrtc->extclock);
error_clock:
	clk_disable_unprepare(rcrtc->clock);
93 94 95 96 97
	return ret;
}

static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
{
98
	rcar_du_group_put(rcrtc->group);
99 100

	clk_disable_unprepare(rcrtc->extclock);
101 102 103
	clk_disable_unprepare(rcrtc->clock);
}

104 105 106 107
/* -----------------------------------------------------------------------------
 * Hardware Setup
 */

108 109
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
110
	const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
111
	unsigned long mode_clock = mode->clock * 1000;
112 113
	unsigned long clk;
	u32 value;
114
	u32 escr;
115 116
	u32 div;

117 118 119
	/* Compute the clock divisor and select the internal or external dot
	 * clock based on the requested frequency.
	 */
120
	clk = clk_get_rate(rcrtc->clock);
121
	div = DIV_ROUND_CLOSEST(clk, mode_clock);
122
	div = clamp(div, 1U, 64U) - 1;
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
	escr = div | ESCR_DCLKSEL_CLKS;

	if (rcrtc->extclock) {
		unsigned long extclk;
		unsigned long extrate;
		unsigned long rate;
		u32 extdiv;

		extclk = clk_get_rate(rcrtc->extclock);
		extdiv = DIV_ROUND_CLOSEST(extclk, mode_clock);
		extdiv = clamp(extdiv, 1U, 64U) - 1;

		rate = clk / (div + 1);
		extrate = extclk / (extdiv + 1);

		if (abs((long)extrate - (long)mode_clock) <
		    abs((long)rate - (long)mode_clock)) {
			dev_dbg(rcrtc->group->dev->dev,
				"crtc%u: using external clock\n", rcrtc->index);
			escr = extdiv | ESCR_DCLKSEL_DCLKIN;
		}
	}
145

146
	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR,
147
			    escr);
148
	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
149 150 151 152

	/* Signal polarities */
	value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
	      | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
153
	      | DSMR_DIPM_DE | DSMR_CSPM;
154 155 156 157 158 159 160 161 162 163
	rcar_du_crtc_write(rcrtc, DSMR, value);

	/* Display timings */
	rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19);
	rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start +
					mode->hdisplay - 19);
	rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end -
					mode->hsync_start - 1);
	rcar_du_crtc_write(rcrtc, HCR,  mode->htotal - 1);

164 165 166 167 168 169 170 171 172
	rcar_du_crtc_write(rcrtc, VDSR, mode->crtc_vtotal -
					mode->crtc_vsync_end - 2);
	rcar_du_crtc_write(rcrtc, VDER, mode->crtc_vtotal -
					mode->crtc_vsync_end +
					mode->crtc_vdisplay - 2);
	rcar_du_crtc_write(rcrtc, VSPR, mode->crtc_vtotal -
					mode->crtc_vsync_end +
					mode->crtc_vsync_start - 1);
	rcar_du_crtc_write(rcrtc, VCR,  mode->crtc_vtotal - 1);
173 174 175 176 177

	rcar_du_crtc_write(rcrtc, DESR,  mode->htotal - mode->hsync_start);
	rcar_du_crtc_write(rcrtc, DEWR,  mode->hdisplay);
}

178 179
void rcar_du_crtc_route_output(struct drm_crtc *crtc,
			       enum rcar_du_output output)
180 181
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
182
	struct rcar_du_device *rcdu = rcrtc->group->dev;
183 184 185 186

	/* Store the route from the CRTC output to the DU output. The DU will be
	 * configured when starting the CRTC.
	 */
187
	rcrtc->outputs |= BIT(output);
188

189 190 191 192
	/* Store RGB routing to DPAD0, the hardware will be configured when
	 * starting the CRTC.
	 */
	if (output == RCAR_DU_OUTPUT_DPAD0)
193
		rcdu->dpad0_source = rcrtc->index;
194 195
}

196 197 198 199 200 201
static unsigned int plane_zpos(struct rcar_du_plane *plane)
{
	return to_rcar_du_plane_state(plane->plane.state)->zpos;
}

static void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
202 203 204 205 206 207 208 209 210
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
	struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
	unsigned int num_planes = 0;
	unsigned int prio = 0;
	unsigned int i;
	u32 dptsr = 0;
	u32 dspr = 0;

211 212
	for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
		struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
213 214 215 216 217 218 219
		unsigned int j;

		if (plane->crtc != &rcrtc->crtc || !plane->enabled)
			continue;

		/* Insert the plane in the sorted planes array. */
		for (j = num_planes++; j > 0; --j) {
220
			if (plane_zpos(planes[j-1]) <= plane_zpos(plane))
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
				break;
			planes[j] = planes[j-1];
		}

		planes[j] = plane;
		prio += plane->format->planes * 4;
	}

	for (i = 0; i < num_planes; ++i) {
		struct rcar_du_plane *plane = planes[i];
		unsigned int index = plane->hwindex;

		prio -= 4;
		dspr |= (index + 1) << prio;
		dptsr |= DPTSR_PnDK(index) |  DPTSR_PnTS(index);

		if (plane->format->planes == 2) {
			index = (index + 1) % 8;

			prio -= 4;
			dspr |= (index + 1) << prio;
			dptsr |= DPTSR_PnDK(index) |  DPTSR_PnTS(index);
		}
	}

	/* Select display timing and dot clock generator 2 for planes associated
	 * with superposition controller 2.
	 */
249 250
	if (rcrtc->index % 2) {
		u32 value = rcar_du_group_read(rcrtc->group, DPTSR);
251 252 253 254 255 256 257 258 259

		/* The DPTSR register is updated when the display controller is
		 * stopped. We thus need to restart the DU. Once again, sorry
		 * for the flicker. One way to mitigate the issue would be to
		 * pre-associate planes with CRTCs (either with a fixed 4/4
		 * split, or through a module parameter). Flicker would then
		 * occur only if we need to break the pre-association.
		 */
		if (value != dptsr) {
260
			rcar_du_group_write(rcrtc->group, DPTSR, dptsr);
261 262
			if (rcrtc->group->used_crtcs)
				rcar_du_group_restart(rcrtc->group);
263 264 265
		}
	}

266 267
	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
			    dspr);
268 269
}

270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
/* -----------------------------------------------------------------------------
 * Page Flip
 */

void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
				   struct drm_file *file)
{
	struct drm_pending_vblank_event *event;
	struct drm_device *dev = rcrtc->crtc.dev;
	unsigned long flags;

	/* Destroy the pending vertical blanking event associated with the
	 * pending page flip, if any, and disable vertical blanking interrupts.
	 */
	spin_lock_irqsave(&dev->event_lock, flags);
	event = rcrtc->event;
	if (event && event->base.file_priv == file) {
		rcrtc->event = NULL;
		event->base.destroy(&event->base);
289
		drm_crtc_vblank_put(&rcrtc->crtc);
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
	}
	spin_unlock_irqrestore(&dev->event_lock, flags);
}

static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
{
	struct drm_pending_vblank_event *event;
	struct drm_device *dev = rcrtc->crtc.dev;
	unsigned long flags;

	spin_lock_irqsave(&dev->event_lock, flags);
	event = rcrtc->event;
	rcrtc->event = NULL;
	spin_unlock_irqrestore(&dev->event_lock, flags);

	if (event == NULL)
		return;

	spin_lock_irqsave(&dev->event_lock, flags);
	drm_send_vblank_event(dev, rcrtc->index, event);
310
	wake_up(&rcrtc->flip_wait);
311 312
	spin_unlock_irqrestore(&dev->event_lock, flags);

313
	drm_crtc_vblank_put(&rcrtc->crtc);
314 315
}

316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc)
{
	struct drm_device *dev = rcrtc->crtc.dev;
	unsigned long flags;
	bool pending;

	spin_lock_irqsave(&dev->event_lock, flags);
	pending = rcrtc->event != NULL;
	spin_unlock_irqrestore(&dev->event_lock, flags);

	return pending;
}

static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
{
	struct rcar_du_device *rcdu = rcrtc->group->dev;

	if (wait_event_timeout(rcrtc->flip_wait,
			       !rcar_du_crtc_page_flip_pending(rcrtc),
			       msecs_to_jiffies(50)))
		return;

	dev_warn(rcdu->dev, "page flip timeout\n");

	rcar_du_crtc_finish_page_flip(rcrtc);
}

343 344 345 346
/* -----------------------------------------------------------------------------
 * Start/Stop and Suspend/Resume
 */

347 348 349
static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
{
	struct drm_crtc *crtc = &rcrtc->crtc;
350
	bool interlaced;
351 352 353 354 355 356 357 358 359 360 361 362 363 364
	unsigned int i;

	if (rcrtc->started)
		return;

	if (WARN_ON(rcrtc->plane->format == NULL))
		return;

	/* Set display off and background to black */
	rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0));
	rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0));

	/* Configure display timings and output routing */
	rcar_du_crtc_set_display_timing(rcrtc);
365
	rcar_du_group_set_routing(rcrtc->group);
366

367
	/* FIXME: Commit the planes state. This is required here as the CRTC can
368
	 * be started from the system resume handler, which don't go
369
	 * through .atomic_plane_update() and .atomic_flush() to commit plane
370 371 372 373
	 * state. Additionally, given that the plane state atomic commit occurs
	 * between CRTC disable and enable, the hardware state could also be
	 * lost due to runtime PM, requiring a full commit here. This will be
	 * fixed later after switching to atomic updates completely.
374
	 */
375
	mutex_lock(&rcrtc->group->planes.lock);
376
	rcar_du_crtc_update_planes(crtc);
377
	mutex_unlock(&rcrtc->group->planes.lock);
378

379 380
	for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
		struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
381 382 383 384 385 386 387 388 389 390 391

		if (plane->crtc != crtc || !plane->enabled)
			continue;

		rcar_du_plane_setup(plane);
	}

	/* Select master sync mode. This enables display operation in master
	 * sync mode (with the HSYNC and VSYNC signals configured as outputs and
	 * actively driven).
	 */
392 393 394 395
	interlaced = rcrtc->crtc.mode.flags & DRM_MODE_FLAG_INTERLACE;
	rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK | DSYSR_SCM_MASK,
			     (interlaced ? DSYSR_SCM_INT_VIDEO : 0) |
			     DSYSR_TVM_MASTER);
396

397
	rcar_du_group_start_stop(rcrtc->group, true);
398

399 400 401
	/* Turn vertical blanking interrupt reporting back on. */
	drm_crtc_vblank_on(crtc);

402 403 404 405 406 407 408 409 410 411
	rcrtc->started = true;
}

static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
{
	struct drm_crtc *crtc = &rcrtc->crtc;

	if (!rcrtc->started)
		return;

412 413 414
	/* Disable vertical blanking interrupt reporting. We first need to wait
	 * for page flip completion before stopping the CRTC as userspace
	 * expects page flips to eventually complete.
415 416
	 */
	rcar_du_crtc_wait_page_flip(rcrtc);
417
	drm_crtc_vblank_off(crtc);
418

419 420 421 422 423
	/* Select switch sync mode. This stops display operation and configures
	 * the HSYNC and VSYNC signals as inputs.
	 */
	rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH);

424
	rcar_du_group_start_stop(rcrtc->group, false);
425 426 427 428 429 430 431

	rcrtc->started = false;
}

void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
{
	rcar_du_crtc_stop(rcrtc);
432
	rcar_du_crtc_put(rcrtc);
433 434 435 436
}

void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
{
437
	if (!rcrtc->enabled)
438 439
		return;

440
	rcar_du_crtc_get(rcrtc);
441 442 443
	rcar_du_crtc_start(rcrtc);
}

444 445 446 447
/* -----------------------------------------------------------------------------
 * CRTC Functions
 */

448
static void rcar_du_crtc_enable(struct drm_crtc *crtc)
449 450 451
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);

452 453 454 455 456 457 458 459 460 461 462 463
	if (rcrtc->enabled)
		return;

	rcar_du_crtc_get(rcrtc);
	rcar_du_crtc_start(rcrtc);

	rcrtc->enabled = true;
}

static void rcar_du_crtc_disable(struct drm_crtc *crtc)
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
464

465
	if (!rcrtc->enabled)
466 467
		return;

468 469
	rcar_du_crtc_stop(rcrtc);
	rcar_du_crtc_put(rcrtc);
470

471
	rcrtc->enabled = false;
472
	rcrtc->outputs = 0;
473 474
}

475 476 477 478 479 480 481 482
static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
				    const struct drm_display_mode *mode,
				    struct drm_display_mode *adjusted_mode)
{
	/* TODO Fixup modes */
	return true;
}

483 484
static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc)
{
485
	struct drm_pending_vblank_event *event = crtc->state->event;
486
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
487 488
	struct drm_device *dev = rcrtc->crtc.dev;
	unsigned long flags;
489 490 491 492 493

	/* We need to access the hardware during atomic update, acquire a
	 * reference to the CRTC.
	 */
	rcar_du_crtc_get(rcrtc);
494 495 496 497 498 499 500 501 502 503

	if (event) {
		event->pipe = rcrtc->index;

		WARN_ON(drm_crtc_vblank_get(crtc) != 0);

		spin_lock_irqsave(&dev->event_lock, flags);
		rcrtc->event = event;
		spin_unlock_irqrestore(&dev->event_lock, flags);
	}
504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
}

static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc)
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);

	/* We're done, apply the configuration and drop the reference acquired
	 * in .atomic_begin().
	 */
	mutex_lock(&rcrtc->group->planes.lock);
	rcar_du_crtc_update_planes(crtc);
	mutex_unlock(&rcrtc->group->planes.lock);

	rcar_du_crtc_put(rcrtc);
}

520 521 522
static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
	.mode_fixup = rcar_du_crtc_mode_fixup,
	.disable = rcar_du_crtc_disable,
523
	.enable = rcar_du_crtc_enable,
524 525
	.atomic_begin = rcar_du_crtc_atomic_begin,
	.atomic_flush = rcar_du_crtc_atomic_flush,
526 527 528
};

static const struct drm_crtc_funcs crtc_funcs = {
529
	.reset = drm_atomic_helper_crtc_reset,
530
	.destroy = drm_crtc_cleanup,
531
	.set_config = drm_atomic_helper_set_config,
532
	.page_flip = drm_atomic_helper_page_flip,
533 534
	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
535 536
};

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
/* -----------------------------------------------------------------------------
 * Interrupt Handling
 */

static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
{
	struct rcar_du_crtc *rcrtc = arg;
	irqreturn_t ret = IRQ_NONE;
	u32 status;

	status = rcar_du_crtc_read(rcrtc, DSSR);
	rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);

	if (status & DSSR_FRM) {
		drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
		rcar_du_crtc_finish_page_flip(rcrtc);
		ret = IRQ_HANDLED;
	}

	return ret;
}

/* -----------------------------------------------------------------------------
 * Initialization
 */

563
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
564
{
565 566 567 568
	static const unsigned int mmio_offsets[] = {
		DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET
	};

569
	struct rcar_du_device *rcdu = rgrp->dev;
570
	struct platform_device *pdev = to_platform_device(rcdu->dev);
571 572
	struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index];
	struct drm_crtc *crtc = &rcrtc->crtc;
573
	unsigned int irqflags;
574 575
	struct clk *clk;
	char clk_name[9];
576 577
	char *name;
	int irq;
578 579
	int ret;

580
	/* Get the CRTC clock and the optional external clock. */
581 582 583 584 585 586 587 588 589 590 591 592 593
	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
		sprintf(clk_name, "du.%u", index);
		name = clk_name;
	} else {
		name = NULL;
	}

	rcrtc->clock = devm_clk_get(rcdu->dev, name);
	if (IS_ERR(rcrtc->clock)) {
		dev_err(rcdu->dev, "no clock for CRTC %u\n", index);
		return PTR_ERR(rcrtc->clock);
	}

594 595 596 597 598 599 600 601 602
	sprintf(clk_name, "dclkin.%u", index);
	clk = devm_clk_get(rcdu->dev, clk_name);
	if (!IS_ERR(clk)) {
		rcrtc->extclock = clk;
	} else if (PTR_ERR(rcrtc->clock) == -EPROBE_DEFER) {
		dev_info(rcdu->dev, "can't get external clock %u\n", index);
		return -EPROBE_DEFER;
	}

603 604
	init_waitqueue_head(&rcrtc->flip_wait);

605
	rcrtc->group = rgrp;
606
	rcrtc->mmio_offset = mmio_offsets[index];
607
	rcrtc->index = index;
608
	rcrtc->enabled = false;
609
	rcrtc->plane = &rgrp->planes.planes[index % 2];
610 611 612

	rcrtc->plane->crtc = crtc;

613 614
	ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, &rcrtc->plane->plane,
					NULL, &crtc_funcs);
615 616 617 618 619
	if (ret < 0)
		return ret;

	drm_crtc_helper_add(crtc, &crtc_helper_funcs);

620 621 622
	/* Start with vertical blanking interrupt reporting disabled. */
	drm_crtc_vblank_off(crtc);

623 624 625 626 627 628 629 630 631 632 633
	/* Register the interrupt handler. */
	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
		irq = platform_get_irq(pdev, index);
		irqflags = 0;
	} else {
		irq = platform_get_irq(pdev, 0);
		irqflags = IRQF_SHARED;
	}

	if (irq < 0) {
		dev_err(rcdu->dev, "no IRQ for CRTC %u\n", index);
J
Julia Lawall 已提交
634
		return irq;
635 636 637 638 639 640 641 642 643 644
	}

	ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
			       dev_name(rcdu->dev), rcrtc);
	if (ret < 0) {
		dev_err(rcdu->dev,
			"failed to register IRQ for CRTC %u\n", index);
		return ret;
	}

645 646 647 648 649 650 651 652 653 654 655 656
	return 0;
}

void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable)
{
	if (enable) {
		rcar_du_crtc_write(rcrtc, DSRCR, DSRCR_VBCL);
		rcar_du_crtc_set(rcrtc, DIER, DIER_VBE);
	} else {
		rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
	}
}