mtk_drm_crtc.c 22.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
/*
 * Copyright (c) 2015 MediaTek Inc.
 */

S
Sam Ravnborg 已提交
6 7
#include <linux/clk.h>
#include <linux/pm_runtime.h>
8
#include <linux/soc/mediatek/mtk-cmdq.h>
9
#include <linux/soc/mediatek/mtk-mmsys.h>
S
Sam Ravnborg 已提交
10

11
#include <asm/barrier.h>
S
Sam Ravnborg 已提交
12 13
#include <soc/mediatek/smi.h>

14 15
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
16
#include <drm/drm_probe_helper.h>
S
Sam Ravnborg 已提交
17
#include <drm/drm_vblank.h>
18 19 20 21 22 23 24 25 26 27 28 29

#include "mtk_drm_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_gem.h"
#include "mtk_drm_plane.h"

/**
 * struct mtk_drm_crtc - MediaTek specific crtc structure.
 * @base: crtc object.
 * @enabled: records whether crtc_enable succeeded
30
 * @planes: array of 4 drm_plane structures, one for each overlay plane
31
 * @pending_planes: whether any plane has pending changes to be applied
32
 * @mmsys_dev: pointer to the mmsys device for configuration registers
33 34 35 36 37 38 39 40 41 42 43
 * @mutex: handle to one of the ten disp_mutex streams
 * @ddp_comp_nr: number of components in ddp_comp
 * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc
 */
struct mtk_drm_crtc {
	struct drm_crtc			base;
	bool				enabled;

	bool				pending_needs_vblank;
	struct drm_pending_vblank_event	*event;

44 45
	struct drm_plane		*planes;
	unsigned int			layer_nr;
46
	bool				pending_planes;
47
	bool				pending_async_planes;
48

49 50 51 52 53
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
	struct cmdq_client		*cmdq_client;
	u32				cmdq_event;
#endif

54
	struct device			*mmsys_dev;
55 56 57
	struct mtk_disp_mutex		*mutex;
	unsigned int			ddp_comp_nr;
	struct mtk_ddp_comp		**ddp_comp;
58 59 60

	/* lock for display hardware access */
	struct mutex			hw_lock;
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
};

struct mtk_crtc_state {
	struct drm_crtc_state		base;

	bool				pending_config;
	unsigned int			pending_width;
	unsigned int			pending_height;
	unsigned int			pending_vrefresh;
};

static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c)
{
	return container_of(c, struct mtk_drm_crtc, base);
}

static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s)
{
	return container_of(s, struct mtk_crtc_state, base);
}

static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
{
	struct drm_crtc *crtc = &mtk_crtc->base;
	unsigned long flags;

	spin_lock_irqsave(&crtc->dev->event_lock, flags);
	drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
	drm_crtc_vblank_put(crtc);
	mtk_crtc->event = NULL;
	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}

static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
{
	drm_crtc_handle_vblank(&mtk_crtc->base);
	if (mtk_crtc->pending_needs_vblank) {
		mtk_drm_crtc_finish_page_flip(mtk_crtc);
		mtk_crtc->pending_needs_vblank = false;
	}
}

static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);

	mtk_disp_mutex_put(mtk_crtc->mutex);

	drm_crtc_cleanup(crtc);
}

static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
{
	struct mtk_crtc_state *state;

116
	if (crtc->state)
117
		__drm_atomic_helper_crtc_destroy_state(crtc->state);
118

119 120
	kfree(to_mtk_crtc_state(crtc->state));
	crtc->state = NULL;
121

122 123 124
	state = kzalloc(sizeof(*state), GFP_KERNEL);
	if (state)
		__drm_atomic_helper_crtc_reset(crtc, &state->base);
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
}

static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc)
{
	struct mtk_crtc_state *state;

	state = kzalloc(sizeof(*state), GFP_KERNEL);
	if (!state)
		return NULL;

	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);

	WARN_ON(state->base.crtc != crtc);
	state->base.crtc = crtc;

	return &state->base;
}

static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc,
				       struct drm_crtc_state *state)
{
146
	__drm_atomic_helper_crtc_destroy_state(state);
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
	kfree(to_mtk_crtc_state(state));
}

static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc,
				    const struct drm_display_mode *mode,
				    struct drm_display_mode *adjusted_mode)
{
	/* Nothing to do here, but this callback is mandatory. */
	return true;
}

static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
	struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);

	state->pending_width = crtc->mode.hdisplay;
	state->pending_height = crtc->mode.vdisplay;
V
Ville Syrjälä 已提交
164
	state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode);
165 166 167 168
	wmb();	/* Make sure the above parameters are set before update */
	state->pending_config = true;
}

169
static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
170
{
171
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
172
	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
173

174
	mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base);
175 176 177 178

	return 0;
}

179
static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
180
{
181
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
182
	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
183

184
	mtk_ddp_comp_disable_vblank(comp);
185 186 187 188 189 190 191 192
}

static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
{
	int ret;
	int i;

	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
193
		ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk);
194 195 196 197 198 199 200 201 202
		if (ret) {
			DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
			goto err;
		}
	}

	return 0;
err:
	while (--i >= 0)
203
		clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
204 205 206 207 208 209 210 211
	return ret;
}

static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
{
	int i;

	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
212
		clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
213 214
}

215 216 217 218 219 220 221 222
static
struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
						struct drm_plane *plane,
						unsigned int *local_layer)
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
	struct mtk_ddp_comp *comp;
	int i, count = 0;
223
	unsigned int local_index = plane - mtk_crtc->planes;
224 225 226

	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
		comp = mtk_crtc->ddp_comp[i];
227 228
		if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) {
			*local_layer = local_index - count;
229 230 231 232 233 234 235 236 237
			return comp;
		}
		count += mtk_ddp_comp_layer_nr(comp);
	}

	WARN(1, "Failed to find component for plane %d\n", plane->index);
	return NULL;
}

238 239 240 241 242 243 244
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
static void ddp_cmdq_cb(struct cmdq_cb_data data)
{
	cmdq_pkt_destroy(data.data);
}
#endif

245 246 247
static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
{
	struct drm_crtc *crtc = &mtk_crtc->base;
248 249
	struct drm_connector *connector;
	struct drm_encoder *encoder;
250
	struct drm_connector_list_iter conn_iter;
251
	unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC;
252 253 254 255 256 257 258 259
	int ret;
	int i;

	if (WARN_ON(!crtc->state))
		return -EINVAL;

	width = crtc->state->adjusted_mode.hdisplay;
	height = crtc->state->adjusted_mode.vdisplay;
V
Ville Syrjälä 已提交
260
	vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode);
261

262 263 264 265
	drm_for_each_encoder(encoder, crtc->dev) {
		if (encoder->crtc != crtc)
			continue;

266 267
		drm_connector_list_iter_begin(crtc->dev, &conn_iter);
		drm_for_each_connector_iter(connector, &conn_iter) {
268 269 270 271 272 273
			if (connector->encoder != encoder)
				continue;
			if (connector->display_info.bpc != 0 &&
			    bpc > connector->display_info.bpc)
				bpc = connector->display_info.bpc;
		}
274
		drm_connector_list_iter_end(&conn_iter);
275 276
	}

277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
	ret = pm_runtime_get_sync(crtc->dev->dev);
	if (ret < 0) {
		DRM_ERROR("Failed to enable power domain: %d\n", ret);
		return ret;
	}

	ret = mtk_disp_mutex_prepare(mtk_crtc->mutex);
	if (ret < 0) {
		DRM_ERROR("Failed to enable mutex clock: %d\n", ret);
		goto err_pm_runtime_put;
	}

	ret = mtk_crtc_ddp_clk_enable(mtk_crtc);
	if (ret < 0) {
		DRM_ERROR("Failed to enable component clocks: %d\n", ret);
		goto err_mutex_unprepare;
	}

	for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
296 297 298
		mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
				      mtk_crtc->ddp_comp[i]->id,
				      mtk_crtc->ddp_comp[i + 1]->id);
299 300 301 302 303 304 305 306 307
		mtk_disp_mutex_add_comp(mtk_crtc->mutex,
					mtk_crtc->ddp_comp[i]->id);
	}
	mtk_disp_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
	mtk_disp_mutex_enable(mtk_crtc->mutex);

	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
		struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];

308 309 310
		if (i == 1)
			mtk_ddp_comp_bgclr_in_on(comp);

311
		mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL);
312 313 314 315
		mtk_ddp_comp_start(comp);
	}

	/* Initially configure all planes */
316
	for (i = 0; i < mtk_crtc->layer_nr; i++) {
317
		struct drm_plane *plane = &mtk_crtc->planes[i];
318
		struct mtk_plane_state *plane_state;
319
		struct mtk_ddp_comp *comp;
320
		unsigned int local_layer;
321 322

		plane_state = to_mtk_plane_state(plane->state);
323
		comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
324 325
		if (comp)
			mtk_ddp_comp_layer_config(comp, local_layer,
326
						  plane_state, NULL);
327 328 329 330 331 332 333 334 335 336 337 338 339 340
	}

	return 0;

err_mutex_unprepare:
	mtk_disp_mutex_unprepare(mtk_crtc->mutex);
err_pm_runtime_put:
	pm_runtime_put(crtc->dev->dev);
	return ret;
}

static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
{
	struct drm_device *drm = mtk_crtc->base.dev;
341
	struct drm_crtc *crtc = &mtk_crtc->base;
342 343
	int i;

344
	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
345
		mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
346 347 348 349
		if (i == 1)
			mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]);
	}

350 351 352 353 354
	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
		mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
					   mtk_crtc->ddp_comp[i]->id);
	mtk_disp_mutex_disable(mtk_crtc->mutex);
	for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
355 356 357
		mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
					 mtk_crtc->ddp_comp[i]->id,
					 mtk_crtc->ddp_comp[i + 1]->id);
358 359 360 361 362 363 364 365
		mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
					   mtk_crtc->ddp_comp[i]->id);
	}
	mtk_disp_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
	mtk_crtc_ddp_clk_disable(mtk_crtc);
	mtk_disp_mutex_unprepare(mtk_crtc->mutex);

	pm_runtime_put(drm->dev);
366 367 368 369 370 371 372

	if (crtc->state->event && !crtc->state->active) {
		spin_lock_irq(&crtc->dev->event_lock);
		drm_crtc_send_vblank_event(crtc, crtc->state->event);
		crtc->state->event = NULL;
		spin_unlock_irq(&crtc->dev->event_lock);
	}
373 374
}

375 376
static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
				struct cmdq_pkt *cmdq_handle)
377 378 379
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
	struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
380
	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
381
	unsigned int i;
382
	unsigned int local_layer;
383 384 385 386 387 388 389

	/*
	 * TODO: instead of updating the registers here, we should prepare
	 * working registers in atomic_commit and let the hardware command
	 * queue update module registers on vblank.
	 */
	if (state->pending_config) {
390
		mtk_ddp_comp_config(comp, state->pending_width,
391
				    state->pending_height,
392 393
				    state->pending_vrefresh, 0,
				    cmdq_handle);
394 395 396 397 398

		state->pending_config = false;
	}

	if (mtk_crtc->pending_planes) {
399
		for (i = 0; i < mtk_crtc->layer_nr; i++) {
400 401 402 403 404
			struct drm_plane *plane = &mtk_crtc->planes[i];
			struct mtk_plane_state *plane_state;

			plane_state = to_mtk_plane_state(plane->state);

405 406 407 408 409 410
			if (!plane_state->pending.config)
				continue;

			comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
							  &local_layer);

411 412
			if (comp)
				mtk_ddp_comp_layer_config(comp, local_layer,
413 414
							  plane_state,
							  cmdq_handle);
415
			plane_state->pending.config = false;
416 417 418
		}
		mtk_crtc->pending_planes = false;
	}
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434

	if (mtk_crtc->pending_async_planes) {
		for (i = 0; i < mtk_crtc->layer_nr; i++) {
			struct drm_plane *plane = &mtk_crtc->planes[i];
			struct mtk_plane_state *plane_state;

			plane_state = to_mtk_plane_state(plane->state);

			if (!plane_state->pending.async_config)
				continue;

			comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
							  &local_layer);

			if (comp)
				mtk_ddp_comp_layer_config(comp, local_layer,
435 436
							  plane_state,
							  cmdq_handle);
437 438 439 440 441 442 443 444
			plane_state->pending.async_config = false;
		}
		mtk_crtc->pending_async_planes = false;
	}
}

static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
{
445 446 447
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
	struct cmdq_pkt *cmdq_handle;
#endif
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
	struct drm_crtc *crtc = &mtk_crtc->base;
	struct mtk_drm_private *priv = crtc->dev->dev_private;
	unsigned int pending_planes = 0, pending_async_planes = 0;
	int i;

	mutex_lock(&mtk_crtc->hw_lock);
	for (i = 0; i < mtk_crtc->layer_nr; i++) {
		struct drm_plane *plane = &mtk_crtc->planes[i];
		struct mtk_plane_state *plane_state;

		plane_state = to_mtk_plane_state(plane->state);
		if (plane_state->pending.dirty) {
			plane_state->pending.config = true;
			plane_state->pending.dirty = false;
			pending_planes |= BIT(i);
		} else if (plane_state->pending.async_dirty) {
			plane_state->pending.async_config = true;
			plane_state->pending.async_dirty = false;
			pending_async_planes |= BIT(i);
		}
	}
	if (pending_planes)
		mtk_crtc->pending_planes = true;
	if (pending_async_planes)
		mtk_crtc->pending_async_planes = true;

	if (priv->data->shadow_register) {
		mtk_disp_mutex_acquire(mtk_crtc->mutex);
476
		mtk_crtc_ddp_config(crtc, NULL);
477 478
		mtk_disp_mutex_release(mtk_crtc->mutex);
	}
479 480
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
	if (mtk_crtc->cmdq_client) {
481
		mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
482 483 484 485
		cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
		cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
		cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
		mtk_crtc_ddp_config(crtc, cmdq_handle);
486
		cmdq_pkt_finalize(cmdq_handle);
487 488 489
		cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
	}
#endif
490
	mutex_unlock(&mtk_crtc->hw_lock);
491 492
}

493 494 495 496 497 498 499
int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
			     struct mtk_plane_state *state)
{
	unsigned int local_layer;
	struct mtk_ddp_comp *comp;

	comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
500 501 502
	if (comp)
		return mtk_ddp_comp_layer_check(comp, local_layer, state);
	return 0;
503 504
}

505 506 507 508 509 510 511 512 513 514 515 516 517 518
void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
			       struct drm_plane_state *new_state)
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
	const struct drm_plane_helper_funcs *plane_helper_funcs =
			plane->helper_private;

	if (!mtk_crtc->enabled)
		return;

	plane_helper_funcs->atomic_update(plane, new_state);
	mtk_drm_crtc_hw_config(mtk_crtc);
}

519 520
static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
				       struct drm_crtc_state *old_state)
521 522
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
523
	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
524 525 526 527
	int ret;

	DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);

528
	ret = mtk_smi_larb_get(comp->larb_dev);
529 530 531 532 533 534 535
	if (ret) {
		DRM_ERROR("Failed to get larb: %d\n", ret);
		return;
	}

	ret = mtk_crtc_ddp_hw_init(mtk_crtc);
	if (ret) {
536
		mtk_smi_larb_put(comp->larb_dev);
537 538 539 540 541 542 543
		return;
	}

	drm_crtc_vblank_on(crtc);
	mtk_crtc->enabled = true;
}

544 545
static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
					struct drm_crtc_state *old_state)
546 547
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
548
	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
549 550 551 552 553 554 555
	int i;

	DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
	if (!mtk_crtc->enabled)
		return;

	/* Set all pending plane state to disabled */
556
	for (i = 0; i < mtk_crtc->layer_nr; i++) {
557
		struct drm_plane *plane = &mtk_crtc->planes[i];
558 559 560 561 562 563 564 565
		struct mtk_plane_state *plane_state;

		plane_state = to_mtk_plane_state(plane->state);
		plane_state->pending.enable = false;
		plane_state->pending.config = true;
	}
	mtk_crtc->pending_planes = true;

566
	mtk_drm_crtc_hw_config(mtk_crtc);
567 568 569 570 571
	/* Wait for planes to be disabled */
	drm_crtc_wait_one_vblank(crtc);

	drm_crtc_vblank_off(crtc);
	mtk_crtc_ddp_hw_fini(mtk_crtc);
572
	mtk_smi_larb_put(comp->larb_dev);
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601

	mtk_crtc->enabled = false;
}

static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
				      struct drm_crtc_state *old_crtc_state)
{
	struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);

	if (mtk_crtc->event && state->base.event)
		DRM_ERROR("new event while there is still a pending event\n");

	if (state->base.event) {
		state->base.event->pipe = drm_crtc_index(crtc);
		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
		mtk_crtc->event = state->base.event;
		state->base.event = NULL;
	}
}

static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
				      struct drm_crtc_state *old_crtc_state)
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
	int i;

	if (mtk_crtc->event)
		mtk_crtc->pending_needs_vblank = true;
602
	if (crtc->state->color_mgmt_changed)
603
		for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
604
			mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
605 606
			mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state);
		}
607
	mtk_drm_crtc_hw_config(mtk_crtc);
608 609 610 611 612 613 614 615 616
}

static const struct drm_crtc_funcs mtk_crtc_funcs = {
	.set_config		= drm_atomic_helper_set_config,
	.page_flip		= drm_atomic_helper_page_flip,
	.destroy		= mtk_drm_crtc_destroy,
	.reset			= mtk_drm_crtc_reset,
	.atomic_duplicate_state	= mtk_drm_crtc_duplicate_state,
	.atomic_destroy_state	= mtk_drm_crtc_destroy_state,
617
	.gamma_set		= drm_atomic_helper_legacy_gamma_set,
618 619
	.enable_vblank		= mtk_drm_crtc_enable_vblank,
	.disable_vblank		= mtk_drm_crtc_disable_vblank,
620 621 622 623 624 625 626
};

static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
	.mode_fixup	= mtk_drm_crtc_mode_fixup,
	.mode_set_nofb	= mtk_drm_crtc_mode_set_nofb,
	.atomic_begin	= mtk_drm_crtc_atomic_begin,
	.atomic_flush	= mtk_drm_crtc_atomic_flush,
627
	.atomic_enable	= mtk_drm_crtc_atomic_enable,
628
	.atomic_disable	= mtk_drm_crtc_atomic_disable,
629 630 631 632
};

static int mtk_drm_crtc_init(struct drm_device *drm,
			     struct mtk_drm_crtc *mtk_crtc,
633
			     unsigned int pipe)
634
{
635 636 637 638 639 640 641 642 643 644
	struct drm_plane *primary = NULL;
	struct drm_plane *cursor = NULL;
	int i, ret;

	for (i = 0; i < mtk_crtc->layer_nr; i++) {
		if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
			primary = &mtk_crtc->planes[i];
		else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
			cursor = &mtk_crtc->planes[i];
	}
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659

	ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
					&mtk_crtc_funcs, NULL);
	if (ret)
		goto err_cleanup_crtc;

	drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs);

	return 0;

err_cleanup_crtc:
	drm_crtc_cleanup(&mtk_crtc->base);
	return ret;
}

660
void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
661 662
{
	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
663
	struct mtk_drm_private *priv = crtc->dev->dev_private;
664

665 666 667
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
	if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
#else
668
	if (!priv->data->shadow_register)
669 670
#endif
		mtk_crtc_ddp_config(crtc, NULL);
671 672 673 674

	mtk_drm_finish_page_flip(mtk_crtc);
}

S
Sean Paul 已提交
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
					int comp_idx)
{
	struct mtk_ddp_comp *comp;

	if (comp_idx > 1)
		return 0;

	comp = mtk_crtc->ddp_comp[comp_idx];
	if (!comp->funcs)
		return 0;

	if (comp_idx == 1 && !comp->funcs->bgclr_in_on)
		return 0;

	return mtk_ddp_comp_layer_nr(comp);
}

static inline
694 695
enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
					    unsigned int num_planes)
S
Sean Paul 已提交
696 697 698
{
	if (plane_idx == 0)
		return DRM_PLANE_TYPE_PRIMARY;
699
	else if (plane_idx == (num_planes - 1))
S
Sean Paul 已提交
700 701 702 703 704 705 706 707 708 709 710
		return DRM_PLANE_TYPE_CURSOR;
	else
		return DRM_PLANE_TYPE_OVERLAY;

}

static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
					 struct mtk_drm_crtc *mtk_crtc,
					 int comp_idx, int pipe)
{
	int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx);
711
	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx];
S
Sean Paul 已提交
712 713 714 715 716 717
	int i, ret;

	for (i = 0; i < num_planes; i++) {
		ret = mtk_plane_init(drm_dev,
				&mtk_crtc->planes[mtk_crtc->layer_nr],
				BIT(pipe),
718 719
				mtk_drm_crtc_plane_type(mtk_crtc->layer_nr,
							num_planes),
720
				mtk_ddp_comp_supported_rotations(comp));
S
Sean Paul 已提交
721 722 723 724 725 726 727 728
		if (ret)
			return ret;

		mtk_crtc->layer_nr++;
	}
	return 0;
}

729 730 731 732 733 734
int mtk_drm_crtc_create(struct drm_device *drm_dev,
			const enum mtk_ddp_comp_id *path, unsigned int path_len)
{
	struct mtk_drm_private *priv = drm_dev->dev_private;
	struct device *dev = drm_dev->dev;
	struct mtk_drm_crtc *mtk_crtc;
S
Sean Paul 已提交
735
	unsigned int num_comp_planes = 0;
736 737 738
	int pipe = priv->num_pipes;
	int ret;
	int i;
739
	bool has_ctm = false;
740
	uint gamma_lut_size = 0;
741

742 743 744
	if (!path)
		return 0;

745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
	for (i = 0; i < path_len; i++) {
		enum mtk_ddp_comp_id comp_id = path[i];
		struct device_node *node;

		node = priv->comp_node[comp_id];
		if (!node) {
			dev_info(dev,
				 "Not creating crtc %d because component %d is disabled or missing\n",
				 pipe, comp_id);
			return 0;
		}
	}

	mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL);
	if (!mtk_crtc)
		return -ENOMEM;

762
	mtk_crtc->mmsys_dev = priv->mmsys_dev;
763 764 765 766
	mtk_crtc->ddp_comp_nr = path_len;
	mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
						sizeof(*mtk_crtc->ddp_comp),
						GFP_KERNEL);
767 768
	if (!mtk_crtc->ddp_comp)
		return -ENOMEM;
769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784

	mtk_crtc->mutex = mtk_disp_mutex_get(priv->mutex_dev, pipe);
	if (IS_ERR(mtk_crtc->mutex)) {
		ret = PTR_ERR(mtk_crtc->mutex);
		dev_err(dev, "Failed to get mutex: %d\n", ret);
		return ret;
	}

	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
		enum mtk_ddp_comp_id comp_id = path[i];
		struct mtk_ddp_comp *comp;
		struct device_node *node;

		node = priv->comp_node[comp_id];
		comp = priv->ddp_comp[comp_id];
		if (!comp) {
785
			dev_err(dev, "Component %pOF not initialized\n", node);
786
			ret = -ENODEV;
787
			return ret;
788 789 790
		}

		mtk_crtc->ddp_comp[i] = comp;
791

792 793 794 795 796 797 798
		if (comp->funcs) {
			if (comp->funcs->gamma_set)
				gamma_lut_size = MTK_LUT_SIZE;

			if (comp->funcs->ctm_set)
				has_ctm = true;
		}
799 800
	}

S
Sean Paul 已提交
801 802 803 804 805
	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
		num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i);

	mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
					sizeof(struct drm_plane), GFP_KERNEL);
806

S
Sean Paul 已提交
807 808 809
	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
		ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
						    pipe);
810
		if (ret)
811
			return ret;
812 813
	}

814
	ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe);
815
	if (ret < 0)
816
		return ret;
817 818 819

	if (gamma_lut_size)
		drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
820
	drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
821
	priv->num_pipes++;
822
	mutex_init(&mtk_crtc->hw_lock);
823

824 825
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
	mtk_crtc->cmdq_client =
826 827
			cmdq_mbox_create(mtk_crtc->mmsys_dev,
					 drm_crtc_index(&mtk_crtc->base),
828 829 830 831 832 833
					 2000);
	if (IS_ERR(mtk_crtc->cmdq_client)) {
		dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
			drm_crtc_index(&mtk_crtc->base));
		mtk_crtc->cmdq_client = NULL;
	}
834 835 836 837 838 839 840 841 842 843 844 845 846

	if (mtk_crtc->cmdq_client) {
		ret = of_property_read_u32_index(priv->mutex_node,
						 "mediatek,gce-events",
						 drm_crtc_index(&mtk_crtc->base),
						 &mtk_crtc->cmdq_event);
		if (ret) {
			dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
				drm_crtc_index(&mtk_crtc->base));
			cmdq_mbox_destroy(mtk_crtc->cmdq_client);
			mtk_crtc->cmdq_client = NULL;
		}
	}
847
#endif
848 849
	return 0;
}