mtk_drm_crtc.c 22.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
/*
 * Copyright (c) 2015 MediaTek Inc.
 */

S
Sam Ravnborg 已提交
6 7
#include <linux/clk.h>
#include <linux/pm_runtime.h>
8
#include <linux/soc/mediatek/mtk-cmdq.h>
9
#include <linux/soc/mediatek/mtk-mmsys.h>
10
#include <linux/soc/mediatek/mtk-mutex.h>
S
Sam Ravnborg 已提交
11

12
#include <asm/barrier.h>
S
Sam Ravnborg 已提交
13 14
#include <soc/mediatek/smi.h>

15
#include <drm/drm_atomic.h>
16 17
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
18
#include <drm/drm_probe_helper.h>
S
Sam Ravnborg 已提交
19
#include <drm/drm_vblank.h>
20 21 22 23 24 25 26

#include "mtk_drm_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_gem.h"
#include "mtk_drm_plane.h"

27
/*
28 29 30
 * struct mtk_drm_crtc - MediaTek specific crtc structure.
 * @base: crtc object.
 * @enabled: records whether crtc_enable succeeded
31
 * @planes: array of 4 drm_plane structures, one for each overlay plane
32
 * @pending_planes: whether any plane has pending changes to be applied
33
 * @mmsys_dev: pointer to the mmsys device for configuration registers
34 35 36
 * @mutex: handle to one of the ten disp_mutex streams
 * @ddp_comp_nr: number of components in ddp_comp
 * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc
37 38
 *
 * TODO: Needs update: this header is missing a bunch of member descriptions.
39 40 41 42 43 44 45 46
 */
struct mtk_drm_crtc {
	struct drm_crtc			base;
	bool				enabled;

	bool				pending_needs_vblank;
	struct drm_pending_vblank_event	*event;

47 48
	struct drm_plane		*planes;
	unsigned int			layer_nr;
49
	bool				pending_planes;
50
	bool				pending_async_planes;
51

52 53 54 55 56
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
	struct cmdq_client		*cmdq_client;
	u32				cmdq_event;
#endif

57
	struct device			*mmsys_dev;
58
	struct mtk_mutex		*mutex;
59 60
	unsigned int			ddp_comp_nr;
	struct mtk_ddp_comp		**ddp_comp;
61 62 63

	/* lock for display hardware access */
	struct mutex			hw_lock;
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
};

struct mtk_crtc_state {
	struct drm_crtc_state		base;

	bool				pending_config;
	unsigned int			pending_width;
	unsigned int			pending_height;
	unsigned int			pending_vrefresh;
};

static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c)
{
	return container_of(c, struct mtk_drm_crtc, base);
}

static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s)
{
	return container_of(s, struct mtk_crtc_state, base);
}

static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
{
	struct drm_crtc *crtc = &mtk_crtc->base;
	unsigned long flags;

	spin_lock_irqsave(&crtc->dev->event_lock, flags);
	drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
	drm_crtc_vblank_put(crtc);
	mtk_crtc->event = NULL;
	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}

static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
{
	drm_crtc_handle_vblank(&mtk_crtc->base);
	if (mtk_crtc->pending_needs_vblank) {
		mtk_drm_crtc_finish_page_flip(mtk_crtc);
		mtk_crtc->pending_needs_vblank = false;
	}
}

static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);

110
	mtk_mutex_put(mtk_crtc->mutex);
111 112 113 114 115 116 117 118

	drm_crtc_cleanup(crtc);
}

static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
{
	struct mtk_crtc_state *state;

119
	if (crtc->state)
120
		__drm_atomic_helper_crtc_destroy_state(crtc->state);
121

122 123
	kfree(to_mtk_crtc_state(crtc->state));
	crtc->state = NULL;
124

125 126 127
	state = kzalloc(sizeof(*state), GFP_KERNEL);
	if (state)
		__drm_atomic_helper_crtc_reset(crtc, &state->base);
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
}

static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc)
{
	struct mtk_crtc_state *state;

	state = kzalloc(sizeof(*state), GFP_KERNEL);
	if (!state)
		return NULL;

	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);

	WARN_ON(state->base.crtc != crtc);
	state->base.crtc = crtc;

	return &state->base;
}

static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc,
				       struct drm_crtc_state *state)
{
149
	__drm_atomic_helper_crtc_destroy_state(state);
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
	kfree(to_mtk_crtc_state(state));
}

static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc,
				    const struct drm_display_mode *mode,
				    struct drm_display_mode *adjusted_mode)
{
	/* Nothing to do here, but this callback is mandatory. */
	return true;
}

static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
	struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);

	state->pending_width = crtc->mode.hdisplay;
	state->pending_height = crtc->mode.vdisplay;
V
Ville Syrjälä 已提交
167
	state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode);
168 169 170 171 172 173 174 175 176 177
	wmb();	/* Make sure the above parameters are set before update */
	state->pending_config = true;
}

static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
{
	int ret;
	int i;

	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
178
		ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]);
179 180 181 182 183 184 185 186 187
		if (ret) {
			DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
			goto err;
		}
	}

	return 0;
err:
	while (--i >= 0)
188
		mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
189 190 191 192 193 194 195 196
	return ret;
}

static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
{
	int i;

	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
197
		mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
198 199
}

200 201 202 203 204 205 206 207
static
struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
						struct drm_plane *plane,
						unsigned int *local_layer)
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
	struct mtk_ddp_comp *comp;
	int i, count = 0;
208
	unsigned int local_index = plane - mtk_crtc->planes;
209 210 211

	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
		comp = mtk_crtc->ddp_comp[i];
212 213
		if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) {
			*local_layer = local_index - count;
214 215 216 217 218 219 220 221 222
			return comp;
		}
		count += mtk_ddp_comp_layer_nr(comp);
	}

	WARN(1, "Failed to find component for plane %d\n", plane->index);
	return NULL;
}

223 224 225 226 227 228 229
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
static void ddp_cmdq_cb(struct cmdq_cb_data data)
{
	cmdq_pkt_destroy(data.data);
}
#endif

230 231 232
static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
{
	struct drm_crtc *crtc = &mtk_crtc->base;
233 234
	struct drm_connector *connector;
	struct drm_encoder *encoder;
235
	struct drm_connector_list_iter conn_iter;
236
	unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC;
237 238 239 240 241 242 243 244
	int ret;
	int i;

	if (WARN_ON(!crtc->state))
		return -EINVAL;

	width = crtc->state->adjusted_mode.hdisplay;
	height = crtc->state->adjusted_mode.vdisplay;
V
Ville Syrjälä 已提交
245
	vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode);
246

247 248 249 250
	drm_for_each_encoder(encoder, crtc->dev) {
		if (encoder->crtc != crtc)
			continue;

251 252
		drm_connector_list_iter_begin(crtc->dev, &conn_iter);
		drm_for_each_connector_iter(connector, &conn_iter) {
253 254 255 256 257 258
			if (connector->encoder != encoder)
				continue;
			if (connector->display_info.bpc != 0 &&
			    bpc > connector->display_info.bpc)
				bpc = connector->display_info.bpc;
		}
259
		drm_connector_list_iter_end(&conn_iter);
260 261
	}

262 263 264 265 266 267
	ret = pm_runtime_get_sync(crtc->dev->dev);
	if (ret < 0) {
		DRM_ERROR("Failed to enable power domain: %d\n", ret);
		return ret;
	}

268
	ret = mtk_mutex_prepare(mtk_crtc->mutex);
269 270 271 272 273 274 275 276 277 278 279 280
	if (ret < 0) {
		DRM_ERROR("Failed to enable mutex clock: %d\n", ret);
		goto err_pm_runtime_put;
	}

	ret = mtk_crtc_ddp_clk_enable(mtk_crtc);
	if (ret < 0) {
		DRM_ERROR("Failed to enable component clocks: %d\n", ret);
		goto err_mutex_unprepare;
	}

	for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
281 282 283
		mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
				      mtk_crtc->ddp_comp[i]->id,
				      mtk_crtc->ddp_comp[i + 1]->id);
284
		mtk_mutex_add_comp(mtk_crtc->mutex,
285 286
					mtk_crtc->ddp_comp[i]->id);
	}
287 288
	mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
	mtk_mutex_enable(mtk_crtc->mutex);
289 290 291 292

	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
		struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];

293 294 295
		if (i == 1)
			mtk_ddp_comp_bgclr_in_on(comp);

296
		mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL);
297 298 299 300
		mtk_ddp_comp_start(comp);
	}

	/* Initially configure all planes */
301
	for (i = 0; i < mtk_crtc->layer_nr; i++) {
302
		struct drm_plane *plane = &mtk_crtc->planes[i];
303
		struct mtk_plane_state *plane_state;
304
		struct mtk_ddp_comp *comp;
305
		unsigned int local_layer;
306 307

		plane_state = to_mtk_plane_state(plane->state);
308
		comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
309 310
		if (comp)
			mtk_ddp_comp_layer_config(comp, local_layer,
311
						  plane_state, NULL);
312 313 314 315 316
	}

	return 0;

err_mutex_unprepare:
317
	mtk_mutex_unprepare(mtk_crtc->mutex);
318 319 320 321 322 323 324 325
err_pm_runtime_put:
	pm_runtime_put(crtc->dev->dev);
	return ret;
}

static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
{
	struct drm_device *drm = mtk_crtc->base.dev;
326
	struct drm_crtc *crtc = &mtk_crtc->base;
327 328
	int i;

329
	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
330
		mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
331 332 333 334
		if (i == 1)
			mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]);
	}

335
	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
336
		mtk_mutex_remove_comp(mtk_crtc->mutex,
337
					   mtk_crtc->ddp_comp[i]->id);
338
	mtk_mutex_disable(mtk_crtc->mutex);
339
	for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
340 341 342
		mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
					 mtk_crtc->ddp_comp[i]->id,
					 mtk_crtc->ddp_comp[i + 1]->id);
343
		mtk_mutex_remove_comp(mtk_crtc->mutex,
344 345
					   mtk_crtc->ddp_comp[i]->id);
	}
346
	mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
347
	mtk_crtc_ddp_clk_disable(mtk_crtc);
348
	mtk_mutex_unprepare(mtk_crtc->mutex);
349 350

	pm_runtime_put(drm->dev);
351 352 353 354 355 356 357

	if (crtc->state->event && !crtc->state->active) {
		spin_lock_irq(&crtc->dev->event_lock);
		drm_crtc_send_vblank_event(crtc, crtc->state->event);
		crtc->state->event = NULL;
		spin_unlock_irq(&crtc->dev->event_lock);
	}
358 359
}

360 361
static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
				struct cmdq_pkt *cmdq_handle)
362 363 364
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
	struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
365
	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
366
	unsigned int i;
367
	unsigned int local_layer;
368 369 370 371 372 373 374

	/*
	 * TODO: instead of updating the registers here, we should prepare
	 * working registers in atomic_commit and let the hardware command
	 * queue update module registers on vblank.
	 */
	if (state->pending_config) {
375
		mtk_ddp_comp_config(comp, state->pending_width,
376
				    state->pending_height,
377 378
				    state->pending_vrefresh, 0,
				    cmdq_handle);
379 380 381 382 383

		state->pending_config = false;
	}

	if (mtk_crtc->pending_planes) {
384
		for (i = 0; i < mtk_crtc->layer_nr; i++) {
385 386 387 388 389
			struct drm_plane *plane = &mtk_crtc->planes[i];
			struct mtk_plane_state *plane_state;

			plane_state = to_mtk_plane_state(plane->state);

390 391 392 393 394 395
			if (!plane_state->pending.config)
				continue;

			comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
							  &local_layer);

396 397
			if (comp)
				mtk_ddp_comp_layer_config(comp, local_layer,
398 399
							  plane_state,
							  cmdq_handle);
400
			plane_state->pending.config = false;
401 402 403
		}
		mtk_crtc->pending_planes = false;
	}
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419

	if (mtk_crtc->pending_async_planes) {
		for (i = 0; i < mtk_crtc->layer_nr; i++) {
			struct drm_plane *plane = &mtk_crtc->planes[i];
			struct mtk_plane_state *plane_state;

			plane_state = to_mtk_plane_state(plane->state);

			if (!plane_state->pending.async_config)
				continue;

			comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
							  &local_layer);

			if (comp)
				mtk_ddp_comp_layer_config(comp, local_layer,
420 421
							  plane_state,
							  cmdq_handle);
422 423 424 425 426 427 428 429
			plane_state->pending.async_config = false;
		}
		mtk_crtc->pending_async_planes = false;
	}
}

static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
{
430 431 432
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
	struct cmdq_pkt *cmdq_handle;
#endif
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
	struct drm_crtc *crtc = &mtk_crtc->base;
	struct mtk_drm_private *priv = crtc->dev->dev_private;
	unsigned int pending_planes = 0, pending_async_planes = 0;
	int i;

	mutex_lock(&mtk_crtc->hw_lock);
	for (i = 0; i < mtk_crtc->layer_nr; i++) {
		struct drm_plane *plane = &mtk_crtc->planes[i];
		struct mtk_plane_state *plane_state;

		plane_state = to_mtk_plane_state(plane->state);
		if (plane_state->pending.dirty) {
			plane_state->pending.config = true;
			plane_state->pending.dirty = false;
			pending_planes |= BIT(i);
		} else if (plane_state->pending.async_dirty) {
			plane_state->pending.async_config = true;
			plane_state->pending.async_dirty = false;
			pending_async_planes |= BIT(i);
		}
	}
	if (pending_planes)
		mtk_crtc->pending_planes = true;
	if (pending_async_planes)
		mtk_crtc->pending_async_planes = true;

	if (priv->data->shadow_register) {
460
		mtk_mutex_acquire(mtk_crtc->mutex);
461
		mtk_crtc_ddp_config(crtc, NULL);
462
		mtk_mutex_release(mtk_crtc->mutex);
463
	}
464 465
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
	if (mtk_crtc->cmdq_client) {
466
		mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
467 468
		cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
		cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
469
		cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
470
		mtk_crtc_ddp_config(crtc, cmdq_handle);
471
		cmdq_pkt_finalize(cmdq_handle);
472 473 474
		cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
	}
#endif
475
	mutex_unlock(&mtk_crtc->hw_lock);
476 477
}

478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
static void mtk_crtc_ddp_irq(void *data)
{
	struct drm_crtc *crtc = data;
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
	struct mtk_drm_private *priv = crtc->dev->dev_private;

#if IS_REACHABLE(CONFIG_MTK_CMDQ)
	if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
#else
	if (!priv->data->shadow_register)
#endif
		mtk_crtc_ddp_config(crtc, NULL);

	mtk_drm_finish_page_flip(mtk_crtc);
}

static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];

	mtk_ddp_comp_enable_vblank(comp, mtk_crtc_ddp_irq, &mtk_crtc->base);

	return 0;
}

static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];

	mtk_ddp_comp_disable_vblank(comp);
}

512 513 514 515 516 517 518
int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
			     struct mtk_plane_state *state)
{
	unsigned int local_layer;
	struct mtk_ddp_comp *comp;

	comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
519 520 521
	if (comp)
		return mtk_ddp_comp_layer_check(comp, local_layer, state);
	return 0;
522 523
}

524 525 526 527 528 529 530 531 532 533 534 535 536 537
void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
			       struct drm_plane_state *new_state)
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
	const struct drm_plane_helper_funcs *plane_helper_funcs =
			plane->helper_private;

	if (!mtk_crtc->enabled)
		return;

	plane_helper_funcs->atomic_update(plane, new_state);
	mtk_drm_crtc_hw_config(mtk_crtc);
}

538
static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
539
				       struct drm_atomic_state *state)
540 541
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
542
	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
543 544 545 546
	int ret;

	DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);

547
	ret = mtk_smi_larb_get(comp->larb_dev);
548 549 550 551 552 553 554
	if (ret) {
		DRM_ERROR("Failed to get larb: %d\n", ret);
		return;
	}

	ret = mtk_crtc_ddp_hw_init(mtk_crtc);
	if (ret) {
555
		mtk_smi_larb_put(comp->larb_dev);
556 557 558 559 560 561 562
		return;
	}

	drm_crtc_vblank_on(crtc);
	mtk_crtc->enabled = true;
}

563
static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
564
					struct drm_atomic_state *state)
565 566
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
567
	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
568 569 570 571 572 573 574
	int i;

	DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
	if (!mtk_crtc->enabled)
		return;

	/* Set all pending plane state to disabled */
575
	for (i = 0; i < mtk_crtc->layer_nr; i++) {
576
		struct drm_plane *plane = &mtk_crtc->planes[i];
577 578 579 580 581 582 583 584
		struct mtk_plane_state *plane_state;

		plane_state = to_mtk_plane_state(plane->state);
		plane_state->pending.enable = false;
		plane_state->pending.config = true;
	}
	mtk_crtc->pending_planes = true;

585
	mtk_drm_crtc_hw_config(mtk_crtc);
586 587 588 589 590
	/* Wait for planes to be disabled */
	drm_crtc_wait_one_vblank(crtc);

	drm_crtc_vblank_off(crtc);
	mtk_crtc_ddp_hw_fini(mtk_crtc);
591
	mtk_smi_larb_put(comp->larb_dev);
592 593 594 595 596

	mtk_crtc->enabled = false;
}

static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
597
				      struct drm_atomic_state *state)
598
{
599 600 601
	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
									  crtc);
	struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
602 603
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);

604
	if (mtk_crtc->event && mtk_crtc_state->base.event)
605 606
		DRM_ERROR("new event while there is still a pending event\n");

607 608
	if (mtk_crtc_state->base.event) {
		mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc);
609
		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
610 611
		mtk_crtc->event = mtk_crtc_state->base.event;
		mtk_crtc_state->base.event = NULL;
612 613 614 615
	}
}

static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
616
				      struct drm_atomic_state *state)
617 618 619 620 621 622
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
	int i;

	if (mtk_crtc->event)
		mtk_crtc->pending_needs_vblank = true;
623
	if (crtc->state->color_mgmt_changed)
624
		for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
625
			mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
626 627
			mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state);
		}
628
	mtk_drm_crtc_hw_config(mtk_crtc);
629 630 631 632 633 634 635 636 637
}

static const struct drm_crtc_funcs mtk_crtc_funcs = {
	.set_config		= drm_atomic_helper_set_config,
	.page_flip		= drm_atomic_helper_page_flip,
	.destroy		= mtk_drm_crtc_destroy,
	.reset			= mtk_drm_crtc_reset,
	.atomic_duplicate_state	= mtk_drm_crtc_duplicate_state,
	.atomic_destroy_state	= mtk_drm_crtc_destroy_state,
638
	.gamma_set		= drm_atomic_helper_legacy_gamma_set,
639 640
	.enable_vblank		= mtk_drm_crtc_enable_vblank,
	.disable_vblank		= mtk_drm_crtc_disable_vblank,
641 642 643 644 645 646 647
};

static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
	.mode_fixup	= mtk_drm_crtc_mode_fixup,
	.mode_set_nofb	= mtk_drm_crtc_mode_set_nofb,
	.atomic_begin	= mtk_drm_crtc_atomic_begin,
	.atomic_flush	= mtk_drm_crtc_atomic_flush,
648
	.atomic_enable	= mtk_drm_crtc_atomic_enable,
649
	.atomic_disable	= mtk_drm_crtc_atomic_disable,
650 651 652 653
};

static int mtk_drm_crtc_init(struct drm_device *drm,
			     struct mtk_drm_crtc *mtk_crtc,
654
			     unsigned int pipe)
655
{
656 657 658 659 660 661 662 663 664 665
	struct drm_plane *primary = NULL;
	struct drm_plane *cursor = NULL;
	int i, ret;

	for (i = 0; i < mtk_crtc->layer_nr; i++) {
		if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
			primary = &mtk_crtc->planes[i];
		else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
			cursor = &mtk_crtc->planes[i];
	}
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680

	ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
					&mtk_crtc_funcs, NULL);
	if (ret)
		goto err_cleanup_crtc;

	drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs);

	return 0;

err_cleanup_crtc:
	drm_crtc_cleanup(&mtk_crtc->base);
	return ret;
}

S
Sean Paul 已提交
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
					int comp_idx)
{
	struct mtk_ddp_comp *comp;

	if (comp_idx > 1)
		return 0;

	comp = mtk_crtc->ddp_comp[comp_idx];
	if (!comp->funcs)
		return 0;

	if (comp_idx == 1 && !comp->funcs->bgclr_in_on)
		return 0;

	return mtk_ddp_comp_layer_nr(comp);
}

static inline
700 701
enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
					    unsigned int num_planes)
S
Sean Paul 已提交
702 703 704
{
	if (plane_idx == 0)
		return DRM_PLANE_TYPE_PRIMARY;
705
	else if (plane_idx == (num_planes - 1))
S
Sean Paul 已提交
706 707 708 709 710 711 712 713 714 715 716
		return DRM_PLANE_TYPE_CURSOR;
	else
		return DRM_PLANE_TYPE_OVERLAY;

}

static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
					 struct mtk_drm_crtc *mtk_crtc,
					 int comp_idx, int pipe)
{
	int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx);
717
	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx];
S
Sean Paul 已提交
718 719 720 721 722 723
	int i, ret;

	for (i = 0; i < num_planes; i++) {
		ret = mtk_plane_init(drm_dev,
				&mtk_crtc->planes[mtk_crtc->layer_nr],
				BIT(pipe),
724 725
				mtk_drm_crtc_plane_type(mtk_crtc->layer_nr,
							num_planes),
726
				mtk_ddp_comp_supported_rotations(comp));
S
Sean Paul 已提交
727 728 729 730 731 732 733 734
		if (ret)
			return ret;

		mtk_crtc->layer_nr++;
	}
	return 0;
}

735 736 737 738 739 740
int mtk_drm_crtc_create(struct drm_device *drm_dev,
			const enum mtk_ddp_comp_id *path, unsigned int path_len)
{
	struct mtk_drm_private *priv = drm_dev->dev_private;
	struct device *dev = drm_dev->dev;
	struct mtk_drm_crtc *mtk_crtc;
S
Sean Paul 已提交
741
	unsigned int num_comp_planes = 0;
742 743 744
	int pipe = priv->num_pipes;
	int ret;
	int i;
745
	bool has_ctm = false;
746
	uint gamma_lut_size = 0;
747

748 749 750
	if (!path)
		return 0;

751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
	for (i = 0; i < path_len; i++) {
		enum mtk_ddp_comp_id comp_id = path[i];
		struct device_node *node;

		node = priv->comp_node[comp_id];
		if (!node) {
			dev_info(dev,
				 "Not creating crtc %d because component %d is disabled or missing\n",
				 pipe, comp_id);
			return 0;
		}
	}

	mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL);
	if (!mtk_crtc)
		return -ENOMEM;

768
	mtk_crtc->mmsys_dev = priv->mmsys_dev;
769 770 771 772
	mtk_crtc->ddp_comp_nr = path_len;
	mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
						sizeof(*mtk_crtc->ddp_comp),
						GFP_KERNEL);
773 774
	if (!mtk_crtc->ddp_comp)
		return -ENOMEM;
775

776
	mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev);
777 778 779 780 781 782 783 784 785 786 787 788
	if (IS_ERR(mtk_crtc->mutex)) {
		ret = PTR_ERR(mtk_crtc->mutex);
		dev_err(dev, "Failed to get mutex: %d\n", ret);
		return ret;
	}

	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
		enum mtk_ddp_comp_id comp_id = path[i];
		struct mtk_ddp_comp *comp;
		struct device_node *node;

		node = priv->comp_node[comp_id];
789
		comp = &priv->ddp_comp[comp_id];
790
		if (!comp) {
791
			dev_err(dev, "Component %pOF not initialized\n", node);
792
			ret = -ENODEV;
793
			return ret;
794 795 796
		}

		mtk_crtc->ddp_comp[i] = comp;
797

798 799 800 801 802 803 804
		if (comp->funcs) {
			if (comp->funcs->gamma_set)
				gamma_lut_size = MTK_LUT_SIZE;

			if (comp->funcs->ctm_set)
				has_ctm = true;
		}
805 806
	}

S
Sean Paul 已提交
807 808 809 810 811
	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
		num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i);

	mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
					sizeof(struct drm_plane), GFP_KERNEL);
812

S
Sean Paul 已提交
813 814 815
	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
		ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
						    pipe);
816
		if (ret)
817
			return ret;
818 819
	}

820
	ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe);
821
	if (ret < 0)
822
		return ret;
823 824 825

	if (gamma_lut_size)
		drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
826
	drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
827
	priv->num_pipes++;
828
	mutex_init(&mtk_crtc->hw_lock);
829

830 831
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
	mtk_crtc->cmdq_client =
832
			cmdq_mbox_create(mtk_crtc->mmsys_dev,
833
					 drm_crtc_index(&mtk_crtc->base));
834 835 836 837 838
	if (IS_ERR(mtk_crtc->cmdq_client)) {
		dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
			drm_crtc_index(&mtk_crtc->base));
		mtk_crtc->cmdq_client = NULL;
	}
839 840 841 842 843 844 845 846 847 848 849 850 851

	if (mtk_crtc->cmdq_client) {
		ret = of_property_read_u32_index(priv->mutex_node,
						 "mediatek,gce-events",
						 drm_crtc_index(&mtk_crtc->base),
						 &mtk_crtc->cmdq_event);
		if (ret) {
			dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
				drm_crtc_index(&mtk_crtc->base));
			cmdq_mbox_destroy(mtk_crtc->cmdq_client);
			mtk_crtc->cmdq_client = NULL;
		}
	}
852
#endif
853 854
	return 0;
}