rcar_du_crtc.c 19.0 KB
Newer Older
1 2 3
/*
 * rcar_du_crtc.c  --  R-Car Display Unit CRTCs
 *
4
 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 *
 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 */

#include <linux/clk.h>
#include <linux/mutex.h>

#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
22
#include <drm/drm_plane_helper.h>
23 24 25 26 27 28 29 30 31

#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
#include "rcar_du_plane.h"
#include "rcar_du_regs.h"

static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
{
32
	struct rcar_du_device *rcdu = rcrtc->group->dev;
33 34 35 36 37 38

	return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
}

static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
{
39
	struct rcar_du_device *rcdu = rcrtc->group->dev;
40 41 42 43 44 45

	rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
}

static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
{
46
	struct rcar_du_device *rcdu = rcrtc->group->dev;
47 48 49 50 51 52 53

	rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
		      rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
}

static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
{
54
	struct rcar_du_device *rcdu = rcrtc->group->dev;
55 56 57 58 59 60 61 62

	rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
		      rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
}

static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg,
				 u32 clr, u32 set)
{
63
	struct rcar_du_device *rcdu = rcrtc->group->dev;
64 65 66 67 68
	u32 value = rcar_du_read(rcdu, rcrtc->mmio_offset + reg);

	rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set);
}

69 70 71 72 73 74 75 76
static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc)
{
	int ret;

	ret = clk_prepare_enable(rcrtc->clock);
	if (ret < 0)
		return ret;

77 78 79 80
	ret = clk_prepare_enable(rcrtc->extclock);
	if (ret < 0)
		goto error_clock;

81
	ret = rcar_du_group_get(rcrtc->group);
82
	if (ret < 0)
83 84 85
		goto error_group;

	return 0;
86

87 88 89 90
error_group:
	clk_disable_unprepare(rcrtc->extclock);
error_clock:
	clk_disable_unprepare(rcrtc->clock);
91 92 93 94 95
	return ret;
}

static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
{
96
	rcar_du_group_put(rcrtc->group);
97 98

	clk_disable_unprepare(rcrtc->extclock);
99 100 101
	clk_disable_unprepare(rcrtc->clock);
}

102 103 104 105
/* -----------------------------------------------------------------------------
 * Hardware Setup
 */

106 107
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
108
	const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
109
	unsigned long mode_clock = mode->clock * 1000;
110 111
	unsigned long clk;
	u32 value;
112
	u32 escr;
113 114
	u32 div;

115 116 117
	/* Compute the clock divisor and select the internal or external dot
	 * clock based on the requested frequency.
	 */
118
	clk = clk_get_rate(rcrtc->clock);
119
	div = DIV_ROUND_CLOSEST(clk, mode_clock);
120
	div = clamp(div, 1U, 64U) - 1;
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
	escr = div | ESCR_DCLKSEL_CLKS;

	if (rcrtc->extclock) {
		unsigned long extclk;
		unsigned long extrate;
		unsigned long rate;
		u32 extdiv;

		extclk = clk_get_rate(rcrtc->extclock);
		extdiv = DIV_ROUND_CLOSEST(extclk, mode_clock);
		extdiv = clamp(extdiv, 1U, 64U) - 1;

		rate = clk / (div + 1);
		extrate = extclk / (extdiv + 1);

		if (abs((long)extrate - (long)mode_clock) <
		    abs((long)rate - (long)mode_clock)) {
			dev_dbg(rcrtc->group->dev->dev,
				"crtc%u: using external clock\n", rcrtc->index);
			escr = extdiv | ESCR_DCLKSEL_DCLKIN;
		}
	}
143

144
	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR,
145
			    escr);
146
	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
147 148 149 150

	/* Signal polarities */
	value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
	      | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
151
	      | DSMR_DIPM_DE | DSMR_CSPM;
152 153 154 155 156 157 158 159 160 161
	rcar_du_crtc_write(rcrtc, DSMR, value);

	/* Display timings */
	rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19);
	rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start +
					mode->hdisplay - 19);
	rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end -
					mode->hsync_start - 1);
	rcar_du_crtc_write(rcrtc, HCR,  mode->htotal - 1);

162 163 164 165 166 167 168 169 170
	rcar_du_crtc_write(rcrtc, VDSR, mode->crtc_vtotal -
					mode->crtc_vsync_end - 2);
	rcar_du_crtc_write(rcrtc, VDER, mode->crtc_vtotal -
					mode->crtc_vsync_end +
					mode->crtc_vdisplay - 2);
	rcar_du_crtc_write(rcrtc, VSPR, mode->crtc_vtotal -
					mode->crtc_vsync_end +
					mode->crtc_vsync_start - 1);
	rcar_du_crtc_write(rcrtc, VCR,  mode->crtc_vtotal - 1);
171 172 173 174 175

	rcar_du_crtc_write(rcrtc, DESR,  mode->htotal - mode->hsync_start);
	rcar_du_crtc_write(rcrtc, DEWR,  mode->hdisplay);
}

176 177
void rcar_du_crtc_route_output(struct drm_crtc *crtc,
			       enum rcar_du_output output)
178 179
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
180
	struct rcar_du_device *rcdu = rcrtc->group->dev;
181 182 183 184

	/* Store the route from the CRTC output to the DU output. The DU will be
	 * configured when starting the CRTC.
	 */
185
	rcrtc->outputs |= BIT(output);
186

187 188 189 190
	/* Store RGB routing to DPAD0, the hardware will be configured when
	 * starting the CRTC.
	 */
	if (output == RCAR_DU_OUTPUT_DPAD0)
191
		rcdu->dpad0_source = rcrtc->index;
192 193 194 195 196 197 198 199 200 201 202 203
}

void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
	struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
	unsigned int num_planes = 0;
	unsigned int prio = 0;
	unsigned int i;
	u32 dptsr = 0;
	u32 dspr = 0;

204 205
	for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
		struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
		unsigned int j;

		if (plane->crtc != &rcrtc->crtc || !plane->enabled)
			continue;

		/* Insert the plane in the sorted planes array. */
		for (j = num_planes++; j > 0; --j) {
			if (planes[j-1]->zpos <= plane->zpos)
				break;
			planes[j] = planes[j-1];
		}

		planes[j] = plane;
		prio += plane->format->planes * 4;
	}

	for (i = 0; i < num_planes; ++i) {
		struct rcar_du_plane *plane = planes[i];
		unsigned int index = plane->hwindex;

		prio -= 4;
		dspr |= (index + 1) << prio;
		dptsr |= DPTSR_PnDK(index) |  DPTSR_PnTS(index);

		if (plane->format->planes == 2) {
			index = (index + 1) % 8;

			prio -= 4;
			dspr |= (index + 1) << prio;
			dptsr |= DPTSR_PnDK(index) |  DPTSR_PnTS(index);
		}
	}

	/* Select display timing and dot clock generator 2 for planes associated
	 * with superposition controller 2.
	 */
242 243
	if (rcrtc->index % 2) {
		u32 value = rcar_du_group_read(rcrtc->group, DPTSR);
244 245 246 247 248 249 250 251 252

		/* The DPTSR register is updated when the display controller is
		 * stopped. We thus need to restart the DU. Once again, sorry
		 * for the flicker. One way to mitigate the issue would be to
		 * pre-associate planes with CRTCs (either with a fixed 4/4
		 * split, or through a module parameter). Flicker would then
		 * occur only if we need to break the pre-association.
		 */
		if (value != dptsr) {
253
			rcar_du_group_write(rcrtc->group, DPTSR, dptsr);
254 255
			if (rcrtc->group->used_crtcs)
				rcar_du_group_restart(rcrtc->group);
256 257 258
		}
	}

259 260
	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
			    dspr);
261 262
}

263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
/* -----------------------------------------------------------------------------
 * Page Flip
 */

void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
				   struct drm_file *file)
{
	struct drm_pending_vblank_event *event;
	struct drm_device *dev = rcrtc->crtc.dev;
	unsigned long flags;

	/* Destroy the pending vertical blanking event associated with the
	 * pending page flip, if any, and disable vertical blanking interrupts.
	 */
	spin_lock_irqsave(&dev->event_lock, flags);
	event = rcrtc->event;
	if (event && event->base.file_priv == file) {
		rcrtc->event = NULL;
		event->base.destroy(&event->base);
282
		drm_crtc_vblank_put(&rcrtc->crtc);
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
	}
	spin_unlock_irqrestore(&dev->event_lock, flags);
}

static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
{
	struct drm_pending_vblank_event *event;
	struct drm_device *dev = rcrtc->crtc.dev;
	unsigned long flags;

	spin_lock_irqsave(&dev->event_lock, flags);
	event = rcrtc->event;
	rcrtc->event = NULL;
	spin_unlock_irqrestore(&dev->event_lock, flags);

	if (event == NULL)
		return;

	spin_lock_irqsave(&dev->event_lock, flags);
	drm_send_vblank_event(dev, rcrtc->index, event);
303
	wake_up(&rcrtc->flip_wait);
304 305
	spin_unlock_irqrestore(&dev->event_lock, flags);

306
	drm_crtc_vblank_put(&rcrtc->crtc);
307 308
}

309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc)
{
	struct drm_device *dev = rcrtc->crtc.dev;
	unsigned long flags;
	bool pending;

	spin_lock_irqsave(&dev->event_lock, flags);
	pending = rcrtc->event != NULL;
	spin_unlock_irqrestore(&dev->event_lock, flags);

	return pending;
}

static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
{
	struct rcar_du_device *rcdu = rcrtc->group->dev;

	if (wait_event_timeout(rcrtc->flip_wait,
			       !rcar_du_crtc_page_flip_pending(rcrtc),
			       msecs_to_jiffies(50)))
		return;

	dev_warn(rcdu->dev, "page flip timeout\n");

	rcar_du_crtc_finish_page_flip(rcrtc);
}

336 337 338 339
/* -----------------------------------------------------------------------------
 * Start/Stop and Suspend/Resume
 */

340 341 342
static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
{
	struct drm_crtc *crtc = &rcrtc->crtc;
343
	bool interlaced;
344 345 346 347 348 349 350 351 352 353 354 355 356 357
	unsigned int i;

	if (rcrtc->started)
		return;

	if (WARN_ON(rcrtc->plane->format == NULL))
		return;

	/* Set display off and background to black */
	rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0));
	rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0));

	/* Configure display timings and output routing */
	rcar_du_crtc_set_display_timing(rcrtc);
358
	rcar_du_group_set_routing(rcrtc->group);
359

360 361 362 363 364 365 366 367 368 369
	/* FIXME: Commit the planes state. This is required here as the CRTC can
	 * be started from the DPMS and system resume handler, which don't go
	 * through .atomic_plane_update() and .atomic_flush() to commit plane
	 * state. Similarly a mode set operation without any update to planes
	 * will not go through atomic plane configuration either. Additionally,
	 * given that the plane state atomic commit occurs between CRTC disable
	 * and enable, the hardware state could also be lost due to runtime PM,
	 * requiring a full commit here. This will be fixed later after
	 * switching to atomic updates completely.
	 */
370
	mutex_lock(&rcrtc->group->planes.lock);
371
	rcar_du_crtc_update_planes(crtc);
372
	mutex_unlock(&rcrtc->group->planes.lock);
373

374 375
	for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
		struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
376 377 378 379 380 381 382 383 384 385 386

		if (plane->crtc != crtc || !plane->enabled)
			continue;

		rcar_du_plane_setup(plane);
	}

	/* Select master sync mode. This enables display operation in master
	 * sync mode (with the HSYNC and VSYNC signals configured as outputs and
	 * actively driven).
	 */
387 388 389 390
	interlaced = rcrtc->crtc.mode.flags & DRM_MODE_FLAG_INTERLACE;
	rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK | DSYSR_SCM_MASK,
			     (interlaced ? DSYSR_SCM_INT_VIDEO : 0) |
			     DSYSR_TVM_MASTER);
391

392
	rcar_du_group_start_stop(rcrtc->group, true);
393

394 395 396
	/* Turn vertical blanking interrupt reporting back on. */
	drm_crtc_vblank_on(crtc);

397 398 399 400 401 402 403 404 405 406
	rcrtc->started = true;
}

static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
{
	struct drm_crtc *crtc = &rcrtc->crtc;

	if (!rcrtc->started)
		return;

407 408 409
	/* Disable vertical blanking interrupt reporting. We first need to wait
	 * for page flip completion before stopping the CRTC as userspace
	 * expects page flips to eventually complete.
410 411
	 */
	rcar_du_crtc_wait_page_flip(rcrtc);
412
	drm_crtc_vblank_off(crtc);
413

414 415 416 417 418
	/* Select switch sync mode. This stops display operation and configures
	 * the HSYNC and VSYNC signals as inputs.
	 */
	rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH);

419
	rcar_du_group_start_stop(rcrtc->group, false);
420 421 422 423 424 425 426

	rcrtc->started = false;
}

void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
{
	rcar_du_crtc_stop(rcrtc);
427
	rcar_du_crtc_put(rcrtc);
428 429 430 431 432 433 434
}

void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
{
	if (rcrtc->dpms != DRM_MODE_DPMS_ON)
		return;

435
	rcar_du_crtc_get(rcrtc);
436 437 438 439 440 441 442
	rcar_du_crtc_start(rcrtc);
}

static void rcar_du_crtc_update_base(struct rcar_du_crtc *rcrtc)
{
	struct drm_crtc *crtc = &rcrtc->crtc;

443
	rcar_du_plane_compute_base(rcrtc->plane, crtc->primary->fb);
444 445 446
	rcar_du_plane_update_base(rcrtc->plane);
}

447 448 449 450
/* -----------------------------------------------------------------------------
 * CRTC Functions
 */

451 452 453 454
static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode)
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);

455 456 457
	if (mode != DRM_MODE_DPMS_ON)
		mode = DRM_MODE_DPMS_OFF;

458 459 460 461
	if (rcrtc->dpms == mode)
		return;

	if (mode == DRM_MODE_DPMS_ON) {
462
		rcar_du_crtc_get(rcrtc);
463 464 465
		rcar_du_crtc_start(rcrtc);
	} else {
		rcar_du_crtc_stop(rcrtc);
466
		rcar_du_crtc_put(rcrtc);
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
	}

	rcrtc->dpms = mode;
}

static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
				    const struct drm_display_mode *mode,
				    struct drm_display_mode *adjusted_mode)
{
	/* TODO Fixup modes */
	return true;
}

static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc)
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);

	/* We need to access the hardware during mode set, acquire a reference
485
	 * to the CRTC.
486
	 */
487
	rcar_du_crtc_get(rcrtc);
488

489
	/* Stop the CRTC, force the DPMS mode to off as a result. */
490 491 492
	rcar_du_crtc_stop(rcrtc);

	rcrtc->dpms = DRM_MODE_DPMS_OFF;
493
	rcrtc->outputs = 0;
494 495
}

496
static void rcar_du_crtc_mode_set_nofb(struct drm_crtc *crtc)
497
{
498 499 500 501
	/* No-op. We should configure the display timings here, but as we're
	 * called with the CRTC disabled clocks might be off, and we thus can't
	 * access the hardware. Let's just configure everything when enabling
	 * the CRTC.
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
	 */
}

static void rcar_du_crtc_mode_commit(struct drm_crtc *crtc)
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);

	/* We're done, restart the CRTC and set the DPMS mode to on. The
	 * reference to the DU acquired at prepare() time will thus be released
	 * by the DPMS handler (possibly called by the disable() handler).
	 */
	rcar_du_crtc_start(rcrtc);
	rcrtc->dpms = DRM_MODE_DPMS_ON;
}

static void rcar_du_crtc_disable(struct drm_crtc *crtc)
{
	rcar_du_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}

522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc)
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);

	/* We need to access the hardware during atomic update, acquire a
	 * reference to the CRTC.
	 */
	rcar_du_crtc_get(rcrtc);
}

static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc)
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);

	/* We're done, apply the configuration and drop the reference acquired
	 * in .atomic_begin().
	 */
	mutex_lock(&rcrtc->group->planes.lock);
	rcar_du_crtc_update_planes(crtc);
	mutex_unlock(&rcrtc->group->planes.lock);

	rcar_du_crtc_put(rcrtc);
}

546 547 548 549 550
static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
	.dpms = rcar_du_crtc_dpms,
	.mode_fixup = rcar_du_crtc_mode_fixup,
	.prepare = rcar_du_crtc_mode_prepare,
	.commit = rcar_du_crtc_mode_commit,
551 552 553
	.mode_set = drm_helper_crtc_mode_set,
	.mode_set_nofb = rcar_du_crtc_mode_set_nofb,
	.mode_set_base = drm_helper_crtc_mode_set_base,
554
	.disable = rcar_du_crtc_disable,
555 556
	.atomic_begin = rcar_du_crtc_atomic_begin,
	.atomic_flush = rcar_du_crtc_atomic_flush,
557 558 559 560
};

static int rcar_du_crtc_page_flip(struct drm_crtc *crtc,
				  struct drm_framebuffer *fb,
561 562
				  struct drm_pending_vblank_event *event,
				  uint32_t page_flip_flags)
563 564 565 566 567 568 569 570 571 572 573 574
{
	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
	struct drm_device *dev = rcrtc->crtc.dev;
	unsigned long flags;

	spin_lock_irqsave(&dev->event_lock, flags);
	if (rcrtc->event != NULL) {
		spin_unlock_irqrestore(&dev->event_lock, flags);
		return -EBUSY;
	}
	spin_unlock_irqrestore(&dev->event_lock, flags);

575
	crtc->primary->fb = fb;
576 577 578 579
	rcar_du_crtc_update_base(rcrtc);

	if (event) {
		event->pipe = rcrtc->index;
580
		drm_crtc_vblank_get(crtc);
581 582 583 584 585 586 587 588 589 590 591 592 593 594
		spin_lock_irqsave(&dev->event_lock, flags);
		rcrtc->event = event;
		spin_unlock_irqrestore(&dev->event_lock, flags);
	}

	return 0;
}

static const struct drm_crtc_funcs crtc_funcs = {
	.destroy = drm_crtc_cleanup,
	.set_config = drm_crtc_helper_set_config,
	.page_flip = rcar_du_crtc_page_flip,
};

595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
/* -----------------------------------------------------------------------------
 * Interrupt Handling
 */

static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
{
	struct rcar_du_crtc *rcrtc = arg;
	irqreturn_t ret = IRQ_NONE;
	u32 status;

	status = rcar_du_crtc_read(rcrtc, DSSR);
	rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);

	if (status & DSSR_FRM) {
		drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
		rcar_du_crtc_finish_page_flip(rcrtc);
		ret = IRQ_HANDLED;
	}

	return ret;
}

/* -----------------------------------------------------------------------------
 * Initialization
 */

621
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
622
{
623 624 625 626
	static const unsigned int mmio_offsets[] = {
		DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET
	};

627
	struct rcar_du_device *rcdu = rgrp->dev;
628
	struct platform_device *pdev = to_platform_device(rcdu->dev);
629 630
	struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index];
	struct drm_crtc *crtc = &rcrtc->crtc;
631
	unsigned int irqflags;
632 633
	struct clk *clk;
	char clk_name[9];
634 635
	char *name;
	int irq;
636 637
	int ret;

638
	/* Get the CRTC clock and the optional external clock. */
639 640 641 642 643 644 645 646 647 648 649 650 651
	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
		sprintf(clk_name, "du.%u", index);
		name = clk_name;
	} else {
		name = NULL;
	}

	rcrtc->clock = devm_clk_get(rcdu->dev, name);
	if (IS_ERR(rcrtc->clock)) {
		dev_err(rcdu->dev, "no clock for CRTC %u\n", index);
		return PTR_ERR(rcrtc->clock);
	}

652 653 654 655 656 657 658 659 660
	sprintf(clk_name, "dclkin.%u", index);
	clk = devm_clk_get(rcdu->dev, clk_name);
	if (!IS_ERR(clk)) {
		rcrtc->extclock = clk;
	} else if (PTR_ERR(rcrtc->clock) == -EPROBE_DEFER) {
		dev_info(rcdu->dev, "can't get external clock %u\n", index);
		return -EPROBE_DEFER;
	}

661 662
	init_waitqueue_head(&rcrtc->flip_wait);

663
	rcrtc->group = rgrp;
664
	rcrtc->mmio_offset = mmio_offsets[index];
665 666
	rcrtc->index = index;
	rcrtc->dpms = DRM_MODE_DPMS_OFF;
667
	rcrtc->plane = &rgrp->planes.planes[index % 2];
668 669 670

	rcrtc->plane->crtc = crtc;

671 672
	ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, &rcrtc->plane->plane,
					NULL, &crtc_funcs);
673 674 675 676 677
	if (ret < 0)
		return ret;

	drm_crtc_helper_add(crtc, &crtc_helper_funcs);

678 679 680
	/* Start with vertical blanking interrupt reporting disabled. */
	drm_crtc_vblank_off(crtc);

681 682 683 684 685 686 687 688 689 690 691
	/* Register the interrupt handler. */
	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
		irq = platform_get_irq(pdev, index);
		irqflags = 0;
	} else {
		irq = platform_get_irq(pdev, 0);
		irqflags = IRQF_SHARED;
	}

	if (irq < 0) {
		dev_err(rcdu->dev, "no IRQ for CRTC %u\n", index);
J
Julia Lawall 已提交
692
		return irq;
693 694 695 696 697 698 699 700 701 702
	}

	ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
			       dev_name(rcdu->dev), rcrtc);
	if (ret < 0) {
		dev_err(rcdu->dev,
			"failed to register IRQ for CRTC %u\n", index);
		return ret;
	}

703 704 705 706 707 708 709 710 711 712 713 714
	return 0;
}

void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable)
{
	if (enable) {
		rcar_du_crtc_write(rcrtc, DSRCR, DSRCR_VBCL);
		rcar_du_crtc_set(rcrtc, DIER, DIER_VBE);
	} else {
		rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
	}
}