exynos_mixer.c 33.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * Copyright (C) 2011 Samsung Electronics Co.Ltd
 * Authors:
 * Seung-Woo Kim <sw0312.kim@samsung.com>
 *	Inki Dae <inki.dae@samsung.com>
 *	Joonyoung Shim <jy0922.shim@samsung.com>
 *
 * Based on drivers/media/video/s5p-tv/mixer_reg.c
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 *
 */

17
#include <drm/drmP.h>
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32

#include "regs-mixer.h"
#include "regs-vp.h"

#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
33
#include <linux/of.h>
34
#include <linux/component.h>
35 36 37 38

#include <drm/exynos_drm.h>

#include "exynos_drm_drv.h"
39
#include "exynos_drm_crtc.h"
40
#include "exynos_drm_fb.h"
41
#include "exynos_drm_plane.h"
42
#include "exynos_drm_iommu.h"
43

44
#define MIXER_WIN_NR		3
45
#define VP_DEFAULT_WIN		2
46

47 48 49 50 51 52
/* The pixelformats that are natively supported by the mixer. */
#define MXR_FORMAT_RGB565	4
#define MXR_FORMAT_ARGB1555	5
#define MXR_FORMAT_ARGB4444	6
#define MXR_FORMAT_ARGB8888	7

53 54 55 56 57 58 59
struct mixer_resources {
	int			irq;
	void __iomem		*mixer_regs;
	void __iomem		*vp_regs;
	spinlock_t		reg_slock;
	struct clk		*mixer;
	struct clk		*vp;
60
	struct clk		*hdmi;
61 62
	struct clk		*sclk_mixer;
	struct clk		*sclk_hdmi;
63
	struct clk		*mout_mixer;
64 65
};

66 67 68
enum mixer_version_id {
	MXR_VER_0_0_0_16,
	MXR_VER_16_0_33_0,
69
	MXR_VER_128_0_0_184,
70 71
};

72 73
enum mixer_flag_bits {
	MXR_BIT_POWERED,
74
	MXR_BIT_VSYNC,
75 76
};

77 78
static const uint32_t mixer_formats[] = {
	DRM_FORMAT_XRGB4444,
79
	DRM_FORMAT_ARGB4444,
80
	DRM_FORMAT_XRGB1555,
81
	DRM_FORMAT_ARGB1555,
82 83 84 85 86 87 88 89 90 91
	DRM_FORMAT_RGB565,
	DRM_FORMAT_XRGB8888,
	DRM_FORMAT_ARGB8888,
};

static const uint32_t vp_formats[] = {
	DRM_FORMAT_NV12,
	DRM_FORMAT_NV21,
};

92
struct mixer_context {
93
	struct platform_device *pdev;
J
Joonyoung Shim 已提交
94
	struct device		*dev;
95
	struct drm_device	*drm_dev;
96
	struct exynos_drm_crtc	*crtc;
97
	struct exynos_drm_plane	planes[MIXER_WIN_NR];
98
	int			pipe;
99
	unsigned long		flags;
100
	bool			interlace;
101
	bool			vp_enabled;
102
	bool			has_sclk;
103 104

	struct mixer_resources	mixer_res;
105
	enum mixer_version_id	mxr_ver;
106 107
	wait_queue_head_t	wait_vsync_queue;
	atomic_t		wait_vsync_event;
108 109 110 111
};

struct mixer_drv_data {
	enum mixer_version_id	version;
112
	bool					is_vp_enabled;
113
	bool					has_sclk;
114 115
};

116 117 118 119 120 121
static const struct exynos_drm_plane_config plane_configs[MIXER_WIN_NR] = {
	{
		.zpos = 0,
		.type = DRM_PLANE_TYPE_PRIMARY,
		.pixel_formats = mixer_formats,
		.num_pixel_formats = ARRAY_SIZE(mixer_formats),
122 123
		.capabilities = EXYNOS_DRM_PLANE_CAP_DOUBLE |
				EXYNOS_DRM_PLANE_CAP_ZPOS,
124 125 126 127 128
	}, {
		.zpos = 1,
		.type = DRM_PLANE_TYPE_CURSOR,
		.pixel_formats = mixer_formats,
		.num_pixel_formats = ARRAY_SIZE(mixer_formats),
129 130
		.capabilities = EXYNOS_DRM_PLANE_CAP_DOUBLE |
				EXYNOS_DRM_PLANE_CAP_ZPOS,
131 132 133 134 135
	}, {
		.zpos = 2,
		.type = DRM_PLANE_TYPE_OVERLAY,
		.pixel_formats = vp_formats,
		.num_pixel_formats = ARRAY_SIZE(vp_formats),
136 137
		.capabilities = EXYNOS_DRM_PLANE_CAP_SCALE |
				EXYNOS_DRM_PLANE_CAP_ZPOS,
138 139 140
	},
};

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
static const u8 filter_y_horiz_tap8[] = {
	0,	-1,	-1,	-1,	-1,	-1,	-1,	-1,
	-1,	-1,	-1,	-1,	-1,	0,	0,	0,
	0,	2,	4,	5,	6,	6,	6,	6,
	6,	5,	5,	4,	3,	2,	1,	1,
	0,	-6,	-12,	-16,	-18,	-20,	-21,	-20,
	-20,	-18,	-16,	-13,	-10,	-8,	-5,	-2,
	127,	126,	125,	121,	114,	107,	99,	89,
	79,	68,	57,	46,	35,	25,	16,	8,
};

static const u8 filter_y_vert_tap4[] = {
	0,	-3,	-6,	-8,	-8,	-8,	-8,	-7,
	-6,	-5,	-4,	-3,	-2,	-1,	-1,	0,
	127,	126,	124,	118,	111,	102,	92,	81,
	70,	59,	48,	37,	27,	19,	11,	5,
	0,	5,	11,	19,	27,	37,	48,	59,
	70,	81,	92,	102,	111,	118,	124,	126,
	0,	0,	-1,	-1,	-2,	-3,	-4,	-5,
	-6,	-7,	-8,	-8,	-8,	-8,	-6,	-3,
};

static const u8 filter_cr_horiz_tap4[] = {
	0,	-3,	-6,	-8,	-8,	-8,	-8,	-7,
	-6,	-5,	-4,	-3,	-2,	-1,	-1,	0,
	127,	126,	124,	118,	111,	102,	92,	81,
	70,	59,	48,	37,	27,	19,	11,	5,
};

170 171 172 173
static inline bool is_alpha_format(unsigned int pixel_format)
{
	switch (pixel_format) {
	case DRM_FORMAT_ARGB8888:
174 175
	case DRM_FORMAT_ARGB1555:
	case DRM_FORMAT_ARGB4444:
176 177 178 179 180 181
		return true;
	default:
		return false;
	}
}

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id)
{
	return readl(res->vp_regs + reg_id);
}

static inline void vp_reg_write(struct mixer_resources *res, u32 reg_id,
				 u32 val)
{
	writel(val, res->vp_regs + reg_id);
}

static inline void vp_reg_writemask(struct mixer_resources *res, u32 reg_id,
				 u32 val, u32 mask)
{
	u32 old = vp_reg_read(res, reg_id);

	val = (val & mask) | (old & ~mask);
	writel(val, res->vp_regs + reg_id);
}

static inline u32 mixer_reg_read(struct mixer_resources *res, u32 reg_id)
{
	return readl(res->mixer_regs + reg_id);
}

static inline void mixer_reg_write(struct mixer_resources *res, u32 reg_id,
				 u32 val)
{
	writel(val, res->mixer_regs + reg_id);
}

static inline void mixer_reg_writemask(struct mixer_resources *res,
				 u32 reg_id, u32 val, u32 mask)
{
	u32 old = mixer_reg_read(res, reg_id);

	val = (val & mask) | (old & ~mask);
	writel(val, res->mixer_regs + reg_id);
}

static void mixer_regs_dump(struct mixer_context *ctx)
{
#define DUMPREG(reg_id) \
do { \
	DRM_DEBUG_KMS(#reg_id " = %08x\n", \
		(u32)readl(ctx->mixer_res.mixer_regs + reg_id)); \
} while (0)

	DUMPREG(MXR_STATUS);
	DUMPREG(MXR_CFG);
	DUMPREG(MXR_INT_EN);
	DUMPREG(MXR_INT_STATUS);

	DUMPREG(MXR_LAYER_CFG);
	DUMPREG(MXR_VIDEO_CFG);

	DUMPREG(MXR_GRAPHIC0_CFG);
	DUMPREG(MXR_GRAPHIC0_BASE);
	DUMPREG(MXR_GRAPHIC0_SPAN);
	DUMPREG(MXR_GRAPHIC0_WH);
	DUMPREG(MXR_GRAPHIC0_SXY);
	DUMPREG(MXR_GRAPHIC0_DXY);

	DUMPREG(MXR_GRAPHIC1_CFG);
	DUMPREG(MXR_GRAPHIC1_BASE);
	DUMPREG(MXR_GRAPHIC1_SPAN);
	DUMPREG(MXR_GRAPHIC1_WH);
	DUMPREG(MXR_GRAPHIC1_SXY);
	DUMPREG(MXR_GRAPHIC1_DXY);
#undef DUMPREG
}

static void vp_regs_dump(struct mixer_context *ctx)
{
#define DUMPREG(reg_id) \
do { \
	DRM_DEBUG_KMS(#reg_id " = %08x\n", \
		(u32) readl(ctx->mixer_res.vp_regs + reg_id)); \
} while (0)

	DUMPREG(VP_ENABLE);
	DUMPREG(VP_SRESET);
	DUMPREG(VP_SHADOW_UPDATE);
	DUMPREG(VP_FIELD_ID);
	DUMPREG(VP_MODE);
	DUMPREG(VP_IMG_SIZE_Y);
	DUMPREG(VP_IMG_SIZE_C);
	DUMPREG(VP_PER_RATE_CTRL);
	DUMPREG(VP_TOP_Y_PTR);
	DUMPREG(VP_BOT_Y_PTR);
	DUMPREG(VP_TOP_C_PTR);
	DUMPREG(VP_BOT_C_PTR);
	DUMPREG(VP_ENDIAN_MODE);
	DUMPREG(VP_SRC_H_POSITION);
	DUMPREG(VP_SRC_V_POSITION);
	DUMPREG(VP_SRC_WIDTH);
	DUMPREG(VP_SRC_HEIGHT);
	DUMPREG(VP_DST_H_POSITION);
	DUMPREG(VP_DST_V_POSITION);
	DUMPREG(VP_DST_WIDTH);
	DUMPREG(VP_DST_HEIGHT);
	DUMPREG(VP_H_RATIO);
	DUMPREG(VP_V_RATIO);

#undef DUMPREG
}

static inline void vp_filter_set(struct mixer_resources *res,
		int reg_id, const u8 *data, unsigned int size)
{
	/* assure 4-byte align */
	BUG_ON(size & 3);
	for (; size; size -= 4, reg_id += 4, data += 4) {
		u32 val = (data[0] << 24) |  (data[1] << 16) |
			(data[2] << 8) | data[3];
		vp_reg_write(res, reg_id, val);
	}
}

static void vp_default_filter(struct mixer_resources *res)
{
	vp_filter_set(res, VP_POLY8_Y0_LL,
304
		filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
305
	vp_filter_set(res, VP_POLY4_Y0_LL,
306
		filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
307
	vp_filter_set(res, VP_POLY4_C0_LL,
308
		filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
309 310
}

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
static void mixer_cfg_gfx_blend(struct mixer_context *ctx, unsigned int win,
				bool alpha)
{
	struct mixer_resources *res = &ctx->mixer_res;
	u32 val;

	val  = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
	if (alpha) {
		/* blending based on pixel alpha */
		val |= MXR_GRP_CFG_BLEND_PRE_MUL;
		val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
	}
	mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
			    val, MXR_GRP_CFG_MISC_MASK);
}

static void mixer_cfg_vp_blend(struct mixer_context *ctx)
{
	struct mixer_resources *res = &ctx->mixer_res;
	u32 val;

	/*
	 * No blending at the moment since the NV12/NV21 pixelformats don't
	 * have an alpha channel. However the mixer supports a global alpha
	 * value for a layer. Once this functionality is exposed, we can
	 * support blending of the video layer through this.
	 */
	val = 0;
	mixer_reg_write(res, MXR_VIDEO_CFG, val);
}

342 343 344 345 346 347 348 349
static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
{
	struct mixer_resources *res = &ctx->mixer_res;

	/* block update on vsync */
	mixer_reg_writemask(res, MXR_STATUS, enable ?
			MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);

350 351
	if (ctx->vp_enabled)
		vp_reg_write(res, VP_SHADOW_UPDATE, enable ?
352 353 354 355 356 357 358 359 360 361
			VP_SHADOW_UPDATE_ENABLE : 0);
}

static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
{
	struct mixer_resources *res = &ctx->mixer_res;
	u32 val;

	/* choosing between interlace and progressive mode */
	val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE :
362
				MXR_CFG_SCAN_PROGRESSIVE);
363

364 365 366 367 368 369 370 371 372 373 374 375 376
	if (ctx->mxr_ver != MXR_VER_128_0_0_184) {
		/* choosing between proper HD and SD mode */
		if (height <= 480)
			val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
		else if (height <= 576)
			val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
		else if (height <= 720)
			val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
		else if (height <= 1080)
			val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
		else
			val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
	}
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421

	mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK);
}

static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
{
	struct mixer_resources *res = &ctx->mixer_res;
	u32 val;

	if (height == 480) {
		val = MXR_CFG_RGB601_0_255;
	} else if (height == 576) {
		val = MXR_CFG_RGB601_0_255;
	} else if (height == 720) {
		val = MXR_CFG_RGB709_16_235;
		mixer_reg_write(res, MXR_CM_COEFF_Y,
				(1 << 30) | (94 << 20) | (314 << 10) |
				(32 << 0));
		mixer_reg_write(res, MXR_CM_COEFF_CB,
				(972 << 20) | (851 << 10) | (225 << 0));
		mixer_reg_write(res, MXR_CM_COEFF_CR,
				(225 << 20) | (820 << 10) | (1004 << 0));
	} else if (height == 1080) {
		val = MXR_CFG_RGB709_16_235;
		mixer_reg_write(res, MXR_CM_COEFF_Y,
				(1 << 30) | (94 << 20) | (314 << 10) |
				(32 << 0));
		mixer_reg_write(res, MXR_CM_COEFF_CB,
				(972 << 20) | (851 << 10) | (225 << 0));
		mixer_reg_write(res, MXR_CM_COEFF_CR,
				(225 << 20) | (820 << 10) | (1004 << 0));
	} else {
		val = MXR_CFG_RGB709_16_235;
		mixer_reg_write(res, MXR_CM_COEFF_Y,
				(1 << 30) | (94 << 20) | (314 << 10) |
				(32 << 0));
		mixer_reg_write(res, MXR_CM_COEFF_CB,
				(972 << 20) | (851 << 10) | (225 << 0));
		mixer_reg_write(res, MXR_CM_COEFF_CR,
				(225 << 20) | (820 << 10) | (1004 << 0));
	}

	mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
}

422
static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win,
423
			    unsigned int priority, bool enable)
424 425 426 427 428 429 430
{
	struct mixer_resources *res = &ctx->mixer_res;
	u32 val = enable ? ~0 : 0;

	switch (win) {
	case 0:
		mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
431 432 433
		mixer_reg_writemask(res, MXR_LAYER_CFG,
				    MXR_LAYER_CFG_GRP0_VAL(priority),
				    MXR_LAYER_CFG_GRP0_MASK);
434 435 436
		break;
	case 1:
		mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
437 438 439
		mixer_reg_writemask(res, MXR_LAYER_CFG,
				    MXR_LAYER_CFG_GRP1_VAL(priority),
				    MXR_LAYER_CFG_GRP1_MASK);
440
		break;
441
	case VP_DEFAULT_WIN:
442 443 444 445
		if (ctx->vp_enabled) {
			vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
			mixer_reg_writemask(res, MXR_CFG, val,
				MXR_CFG_VP_ENABLE);
446 447 448
			mixer_reg_writemask(res, MXR_LAYER_CFG,
					    MXR_LAYER_CFG_VP_VAL(priority),
					    MXR_LAYER_CFG_VP_MASK);
449
		}
450 451 452 453 454 455 456 457 458 459 460
		break;
	}
}

static void mixer_run(struct mixer_context *ctx)
{
	struct mixer_resources *res = &ctx->mixer_res;

	mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
}

461 462 463 464 465 466 467 468 469 470 471 472
static void mixer_stop(struct mixer_context *ctx)
{
	struct mixer_resources *res = &ctx->mixer_res;
	int timeout = 20;

	mixer_reg_writemask(res, MXR_STATUS, 0, MXR_STATUS_REG_RUN);

	while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
			--timeout)
		usleep_range(10000, 12000);
}

473 474
static void vp_video_buffer(struct mixer_context *ctx,
			    struct exynos_drm_plane *plane)
475
{
476 477
	struct exynos_drm_plane_state *state =
				to_exynos_plane_state(plane->base.state);
478
	struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode;
479
	struct mixer_resources *res = &ctx->mixer_res;
480
	struct drm_framebuffer *fb = state->base.fb;
481 482 483 484 485 486
	unsigned long flags;
	dma_addr_t luma_addr[2], chroma_addr[2];
	bool tiled_mode = false;
	bool crcb_mode = false;
	u32 val;

487
	switch (fb->pixel_format) {
488
	case DRM_FORMAT_NV12:
489 490
		crcb_mode = false;
		break;
491 492 493
	case DRM_FORMAT_NV21:
		crcb_mode = true;
		break;
494 495
	default:
		DRM_ERROR("pixel format for vp is wrong [%d].\n",
496
				fb->pixel_format);
497 498 499
		return;
	}

500 501
	luma_addr[0] = exynos_drm_fb_dma_addr(fb, 0);
	chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1);
502

503
	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
504 505 506 507 508
		ctx->interlace = true;
		if (tiled_mode) {
			luma_addr[1] = luma_addr[0] + 0x40;
			chroma_addr[1] = chroma_addr[0] + 0x40;
		} else {
509 510
			luma_addr[1] = luma_addr[0] + fb->pitches[0];
			chroma_addr[1] = chroma_addr[0] + fb->pitches[0];
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
		}
	} else {
		ctx->interlace = false;
		luma_addr[1] = 0;
		chroma_addr[1] = 0;
	}

	spin_lock_irqsave(&res->reg_slock, flags);

	/* interlace or progressive scan mode */
	val = (ctx->interlace ? ~0 : 0);
	vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);

	/* setup format */
	val = (crcb_mode ? VP_MODE_NV21 : VP_MODE_NV12);
	val |= (tiled_mode ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
	vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);

	/* setting size of input image */
530 531
	vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
		VP_IMG_VSIZE(fb->height));
532
	/* chroma height has to reduced by 2 to avoid chroma distorions */
533 534
	vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
		VP_IMG_VSIZE(fb->height / 2));
535

536 537
	vp_reg_write(res, VP_SRC_WIDTH, state->src.w);
	vp_reg_write(res, VP_SRC_HEIGHT, state->src.h);
538
	vp_reg_write(res, VP_SRC_H_POSITION,
539 540
			VP_SRC_H_POSITION_VAL(state->src.x));
	vp_reg_write(res, VP_SRC_V_POSITION, state->src.y);
541

542 543
	vp_reg_write(res, VP_DST_WIDTH, state->crtc.w);
	vp_reg_write(res, VP_DST_H_POSITION, state->crtc.x);
544
	if (ctx->interlace) {
545 546
		vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h / 2);
		vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y / 2);
547
	} else {
548 549
		vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h);
		vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y);
550 551
	}

552 553
	vp_reg_write(res, VP_H_RATIO, state->h_ratio);
	vp_reg_write(res, VP_V_RATIO, state->v_ratio);
554 555 556 557 558 559 560 561 562

	vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);

	/* set buffer address to vp */
	vp_reg_write(res, VP_TOP_Y_PTR, luma_addr[0]);
	vp_reg_write(res, VP_BOT_Y_PTR, luma_addr[1]);
	vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
	vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);

563 564
	mixer_cfg_scan(ctx, mode->vdisplay);
	mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
565
	mixer_cfg_layer(ctx, plane->index, state->zpos + 1, true);
566
	mixer_cfg_vp_blend(ctx);
567 568 569 570
	mixer_run(ctx);

	spin_unlock_irqrestore(&res->reg_slock, flags);

571
	mixer_regs_dump(ctx);
572 573 574
	vp_regs_dump(ctx);
}

575 576 577 578
static void mixer_layer_update(struct mixer_context *ctx)
{
	struct mixer_resources *res = &ctx->mixer_res;

579
	mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
580 581
}

582 583
static void mixer_graph_buffer(struct mixer_context *ctx,
			       struct exynos_drm_plane *plane)
584
{
585 586
	struct exynos_drm_plane_state *state =
				to_exynos_plane_state(plane->base.state);
587
	struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode;
588
	struct mixer_resources *res = &ctx->mixer_res;
589
	struct drm_framebuffer *fb = state->base.fb;
590
	unsigned long flags;
591
	unsigned int win = plane->index;
592
	unsigned int x_ratio = 0, y_ratio = 0;
593 594 595 596 597
	unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
	dma_addr_t dma_addr;
	unsigned int fmt;
	u32 val;

598
	switch (fb->pixel_format) {
599
	case DRM_FORMAT_XRGB4444:
600
	case DRM_FORMAT_ARGB4444:
601 602 603 604
		fmt = MXR_FORMAT_ARGB4444;
		break;

	case DRM_FORMAT_XRGB1555:
605
	case DRM_FORMAT_ARGB1555:
606 607
		fmt = MXR_FORMAT_ARGB1555;
		break;
608

609 610
	case DRM_FORMAT_RGB565:
		fmt = MXR_FORMAT_RGB565;
611
		break;
612 613 614 615

	case DRM_FORMAT_XRGB8888:
	case DRM_FORMAT_ARGB8888:
		fmt = MXR_FORMAT_ARGB8888;
616
		break;
617

618
	default:
619 620
		DRM_DEBUG_KMS("pixelformat unsupported by mixer\n");
		return;
621 622
	}

623 624 625
	/* ratio is already checked by common plane code */
	x_ratio = state->h_ratio == (1 << 15);
	y_ratio = state->v_ratio == (1 << 15);
626

627 628
	dst_x_offset = state->crtc.x;
	dst_y_offset = state->crtc.y;
629 630

	/* converting dma address base and source offset */
631
	dma_addr = exynos_drm_fb_dma_addr(fb, 0)
632 633
		+ (state->src.x * fb->bits_per_pixel >> 3)
		+ (state->src.y * fb->pitches[0]);
634 635 636
	src_x_offset = 0;
	src_y_offset = 0;

637
	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
638 639 640 641 642 643 644 645 646 647 648
		ctx->interlace = true;
	else
		ctx->interlace = false;

	spin_lock_irqsave(&res->reg_slock, flags);

	/* setup format */
	mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
		MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);

	/* setup geometry */
649
	mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
650
			fb->pitches[0] / (fb->bits_per_pixel >> 3));
651

652 653
	/* setup display size */
	if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
654
		win == DEFAULT_WIN) {
655 656
		val  = MXR_MXR_RES_HEIGHT(mode->vdisplay);
		val |= MXR_MXR_RES_WIDTH(mode->hdisplay);
657 658 659
		mixer_reg_write(res, MXR_RESOLUTION, val);
	}

660 661
	val  = MXR_GRP_WH_WIDTH(state->src.w);
	val |= MXR_GRP_WH_HEIGHT(state->src.h);
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
	val |= MXR_GRP_WH_H_SCALE(x_ratio);
	val |= MXR_GRP_WH_V_SCALE(y_ratio);
	mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);

	/* setup offsets in source image */
	val  = MXR_GRP_SXY_SX(src_x_offset);
	val |= MXR_GRP_SXY_SY(src_y_offset);
	mixer_reg_write(res, MXR_GRAPHIC_SXY(win), val);

	/* setup offsets in display image */
	val  = MXR_GRP_DXY_DX(dst_x_offset);
	val |= MXR_GRP_DXY_DY(dst_y_offset);
	mixer_reg_write(res, MXR_GRAPHIC_DXY(win), val);

	/* set buffer address to mixer */
	mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);

679 680
	mixer_cfg_scan(ctx, mode->vdisplay);
	mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
681
	mixer_cfg_layer(ctx, win, state->zpos + 1, true);
682
	mixer_cfg_gfx_blend(ctx, win, is_alpha_format(fb->pixel_format));
683 684

	/* layer update mandatory for mixer 16.0.33.0 */
685 686
	if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
		ctx->mxr_ver == MXR_VER_128_0_0_184)
687 688
		mixer_layer_update(ctx);

689 690 691
	mixer_run(ctx);

	spin_unlock_irqrestore(&res->reg_slock, flags);
692 693

	mixer_regs_dump(ctx);
694 695 696 697 698 699 700 701 702 703 704 705
}

static void vp_win_reset(struct mixer_context *ctx)
{
	struct mixer_resources *res = &ctx->mixer_res;
	int tries = 100;

	vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING);
	for (tries = 100; tries; --tries) {
		/* waiting until VP_SRESET_PROCESSING is 0 */
		if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
			break;
706
		mdelay(10);
707 708 709 710
	}
	WARN(tries == 0, "failed to reset Video Processor\n");
}

J
Joonyoung Shim 已提交
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
static void mixer_win_reset(struct mixer_context *ctx)
{
	struct mixer_resources *res = &ctx->mixer_res;
	unsigned long flags;

	spin_lock_irqsave(&res->reg_slock, flags);

	mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);

	/* set output in RGB888 mode */
	mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);

	/* 16 beat burst in DMA */
	mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
		MXR_STATUS_BURST_MASK);

727 728
	/* reset default layer priority */
	mixer_reg_write(res, MXR_LAYER_CFG, 0);
J
Joonyoung Shim 已提交
729 730 731 732 733 734

	/* setting background color */
	mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
	mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
	mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);

735 736 737 738 739
	if (ctx->vp_enabled) {
		/* configuration of Video Processor Registers */
		vp_win_reset(ctx);
		vp_default_filter(res);
	}
J
Joonyoung Shim 已提交
740 741 742 743

	/* disable all layers */
	mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
	mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
744 745
	if (ctx->vp_enabled)
		mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
J
Joonyoung Shim 已提交
746 747 748 749

	spin_unlock_irqrestore(&res->reg_slock, flags);
}

750 751 752 753 754
static irqreturn_t mixer_irq_handler(int irq, void *arg)
{
	struct mixer_context *ctx = arg;
	struct mixer_resources *res = &ctx->mixer_res;
	u32 val, base, shadow;
755
	int win;
756 757 758 759 760 761 762 763

	spin_lock(&res->reg_slock);

	/* read interrupt status for handling and clearing flags for VSYNC */
	val = mixer_reg_read(res, MXR_INT_STATUS);

	/* handling VSYNC */
	if (val & MXR_INT_STATUS_VSYNC) {
764 765 766 767
		/* vsync interrupt use different bit for read and clear */
		val |= MXR_INT_CLEAR_VSYNC;
		val &= ~MXR_INT_STATUS_VSYNC;

768 769 770 771 772 773 774 775 776 777 778 779 780
		/* interlace scan need to check shadow register */
		if (ctx->interlace) {
			base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
			shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
			if (base != shadow)
				goto out;

			base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1));
			shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
			if (base != shadow)
				goto out;
		}

781
		drm_crtc_handle_vblank(&ctx->crtc->base);
782 783 784 785 786 787 788 789
		for (win = 0 ; win < MIXER_WIN_NR ; win++) {
			struct exynos_drm_plane *plane = &ctx->planes[win];

			if (!plane->pending_fb)
				continue;

			exynos_drm_crtc_finish_update(ctx->crtc, plane);
		}
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821

		/* set wait vsync event to zero and wake up queue. */
		if (atomic_read(&ctx->wait_vsync_event)) {
			atomic_set(&ctx->wait_vsync_event, 0);
			wake_up(&ctx->wait_vsync_queue);
		}
	}

out:
	/* clear interrupts */
	mixer_reg_write(res, MXR_INT_STATUS, val);

	spin_unlock(&res->reg_slock);

	return IRQ_HANDLED;
}

static int mixer_resources_init(struct mixer_context *mixer_ctx)
{
	struct device *dev = &mixer_ctx->pdev->dev;
	struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
	struct resource *res;
	int ret;

	spin_lock_init(&mixer_res->reg_slock);

	mixer_res->mixer = devm_clk_get(dev, "mixer");
	if (IS_ERR(mixer_res->mixer)) {
		dev_err(dev, "failed to get clock 'mixer'\n");
		return -ENODEV;
	}

822 823 824 825 826 827
	mixer_res->hdmi = devm_clk_get(dev, "hdmi");
	if (IS_ERR(mixer_res->hdmi)) {
		dev_err(dev, "failed to get clock 'hdmi'\n");
		return PTR_ERR(mixer_res->hdmi);
	}

828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
	mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
	if (IS_ERR(mixer_res->sclk_hdmi)) {
		dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
		return -ENODEV;
	}
	res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(dev, "get memory resource failed.\n");
		return -ENXIO;
	}

	mixer_res->mixer_regs = devm_ioremap(dev, res->start,
							resource_size(res));
	if (mixer_res->mixer_regs == NULL) {
		dev_err(dev, "register mapping failed.\n");
		return -ENXIO;
	}

	res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_IRQ, 0);
	if (res == NULL) {
		dev_err(dev, "get interrupt resource failed.\n");
		return -ENXIO;
	}

	ret = devm_request_irq(dev, res->start, mixer_irq_handler,
						0, "drm_mixer", mixer_ctx);
	if (ret) {
		dev_err(dev, "request interrupt failed.\n");
		return ret;
	}
	mixer_res->irq = res->start;

	return 0;
}

static int vp_resources_init(struct mixer_context *mixer_ctx)
{
	struct device *dev = &mixer_ctx->pdev->dev;
	struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
	struct resource *res;

	mixer_res->vp = devm_clk_get(dev, "vp");
	if (IS_ERR(mixer_res->vp)) {
		dev_err(dev, "failed to get clock 'vp'\n");
		return -ENODEV;
	}

875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
	if (mixer_ctx->has_sclk) {
		mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
		if (IS_ERR(mixer_res->sclk_mixer)) {
			dev_err(dev, "failed to get clock 'sclk_mixer'\n");
			return -ENODEV;
		}
		mixer_res->mout_mixer = devm_clk_get(dev, "mout_mixer");
		if (IS_ERR(mixer_res->mout_mixer)) {
			dev_err(dev, "failed to get clock 'mout_mixer'\n");
			return -ENODEV;
		}

		if (mixer_res->sclk_hdmi && mixer_res->mout_mixer)
			clk_set_parent(mixer_res->mout_mixer,
				       mixer_res->sclk_hdmi);
	}
891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907

	res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 1);
	if (res == NULL) {
		dev_err(dev, "get memory resource failed.\n");
		return -ENXIO;
	}

	mixer_res->vp_regs = devm_ioremap(dev, res->start,
							resource_size(res));
	if (mixer_res->vp_regs == NULL) {
		dev_err(dev, "register mapping failed.\n");
		return -ENXIO;
	}

	return 0;
}

908
static int mixer_initialize(struct mixer_context *mixer_ctx,
909
			struct drm_device *drm_dev)
910 911
{
	int ret;
912 913
	struct exynos_drm_private *priv;
	priv = drm_dev->dev_private;
914

915
	mixer_ctx->drm_dev = drm_dev;
916
	mixer_ctx->pipe = priv->pipe++;
917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933

	/* acquire resources: regs, irqs, clocks */
	ret = mixer_resources_init(mixer_ctx);
	if (ret) {
		DRM_ERROR("mixer_resources_init failed ret=%d\n", ret);
		return ret;
	}

	if (mixer_ctx->vp_enabled) {
		/* acquire vp resources: regs, irqs, clocks */
		ret = vp_resources_init(mixer_ctx);
		if (ret) {
			DRM_ERROR("vp_resources_init failed ret=%d\n", ret);
			return ret;
		}
	}

934
	ret = drm_iommu_attach_device(drm_dev, mixer_ctx->dev);
935 936
	if (ret)
		priv->pipe--;
937

938
	return ret;
939 940
}

941
static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
942
{
943
	drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
944 945
}

946
static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
947
{
948
	struct mixer_context *mixer_ctx = crtc->ctx;
949 950
	struct mixer_resources *res = &mixer_ctx->mixer_res;

951 952
	__set_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
953
		return 0;
954 955

	/* enable vsync interrupt */
956 957
	mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
	mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
958 959 960 961

	return 0;
}

962
static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
963
{
964
	struct mixer_context *mixer_ctx = crtc->ctx;
965 966
	struct mixer_resources *res = &mixer_ctx->mixer_res;

967 968 969
	__clear_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);

	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
970 971
		return;

972
	/* disable vsync interrupt */
973
	mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
974 975 976
	mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}

977 978 979 980 981 982 983 984 985 986
static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
{
	struct mixer_context *mixer_ctx = crtc->ctx;

	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
		return;

	mixer_vsync_set_update(mixer_ctx, false);
}

987 988
static void mixer_update_plane(struct exynos_drm_crtc *crtc,
			       struct exynos_drm_plane *plane)
989
{
990
	struct mixer_context *mixer_ctx = crtc->ctx;
991

992
	DRM_DEBUG_KMS("win: %d\n", plane->index);
993

994
	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
995 996
		return;

997
	if (plane->index == VP_DEFAULT_WIN)
998
		vp_video_buffer(mixer_ctx, plane);
999
	else
1000
		mixer_graph_buffer(mixer_ctx, plane);
1001 1002
}

1003 1004
static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
				struct exynos_drm_plane *plane)
1005
{
1006
	struct mixer_context *mixer_ctx = crtc->ctx;
1007 1008 1009
	struct mixer_resources *res = &mixer_ctx->mixer_res;
	unsigned long flags;

1010
	DRM_DEBUG_KMS("win: %d\n", plane->index);
1011

1012
	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
1013 1014
		return;

1015
	spin_lock_irqsave(&res->reg_slock, flags);
1016
	mixer_cfg_layer(mixer_ctx, plane->index, 0, false);
1017 1018 1019 1020 1021 1022 1023 1024 1025
	spin_unlock_irqrestore(&res->reg_slock, flags);
}

static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
{
	struct mixer_context *mixer_ctx = crtc->ctx;

	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
		return;
1026 1027 1028 1029

	mixer_vsync_set_update(mixer_ctx, true);
}

1030
static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
1031
{
1032
	struct mixer_context *mixer_ctx = crtc->ctx;
1033
	int err;
1034

1035
	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
1036 1037
		return;

1038
	err = drm_vblank_get(mixer_ctx->drm_dev, mixer_ctx->pipe);
1039 1040 1041 1042
	if (err < 0) {
		DRM_DEBUG_KMS("failed to acquire vblank counter\n");
		return;
	}
1043

1044 1045 1046 1047 1048 1049 1050 1051
	atomic_set(&mixer_ctx->wait_vsync_event, 1);

	/*
	 * wait for MIXER to signal VSYNC interrupt or return after
	 * timeout which is set to 50ms (refresh rate of 20).
	 */
	if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
				!atomic_read(&mixer_ctx->wait_vsync_event),
D
Daniel Vetter 已提交
1052
				HZ/20))
1053
		DRM_DEBUG_KMS("vblank wait timed out.\n");
1054

1055
	drm_vblank_put(mixer_ctx->drm_dev, mixer_ctx->pipe);
1056 1057
}

1058
static void mixer_enable(struct exynos_drm_crtc *crtc)
1059
{
1060
	struct mixer_context *ctx = crtc->ctx;
1061 1062
	struct mixer_resources *res = &ctx->mixer_res;

1063
	if (test_bit(MXR_BIT_POWERED, &ctx->flags))
1064 1065
		return;

1066 1067
	pm_runtime_get_sync(ctx->dev);

1068 1069
	exynos_drm_pipe_clk_enable(crtc, true);

1070 1071
	mixer_vsync_set_update(ctx, false);

1072 1073
	mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);

1074
	if (test_bit(MXR_BIT_VSYNC, &ctx->flags)) {
1075
		mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
1076 1077
		mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
	}
1078
	mixer_win_reset(ctx);
1079

1080 1081
	mixer_vsync_set_update(ctx, true);

1082
	set_bit(MXR_BIT_POWERED, &ctx->flags);
1083 1084
}

1085
static void mixer_disable(struct exynos_drm_crtc *crtc)
1086
{
1087
	struct mixer_context *ctx = crtc->ctx;
1088
	int i;
1089

1090
	if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
1091
		return;
1092

1093
	mixer_stop(ctx);
1094
	mixer_regs_dump(ctx);
1095 1096

	for (i = 0; i < MIXER_WIN_NR; i++)
1097
		mixer_disable_plane(crtc, &ctx->planes[i]);
1098

1099 1100
	exynos_drm_pipe_clk_enable(crtc, false);

1101
	pm_runtime_put(ctx->dev);
1102

1103
	clear_bit(MXR_BIT_POWERED, &ctx->flags);
1104 1105
}

1106
/* Only valid for Mixer version 16.0.33.0 */
1107 1108
static int mixer_atomic_check(struct exynos_drm_crtc *crtc,
		       struct drm_crtc_state *state)
1109
{
1110
	struct drm_display_mode *mode = &state->adjusted_mode;
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
	u32 w, h;

	w = mode->hdisplay;
	h = mode->vdisplay;

	DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n",
		mode->hdisplay, mode->vdisplay, mode->vrefresh,
		(mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);

	if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
		(w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
		(w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
		return 0;

	return -EINVAL;
}

1128
static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
1129 1130
	.enable			= mixer_enable,
	.disable		= mixer_disable,
1131 1132
	.enable_vblank		= mixer_enable_vblank,
	.disable_vblank		= mixer_disable_vblank,
1133
	.wait_for_vblank	= mixer_wait_for_vblank,
1134
	.atomic_begin		= mixer_atomic_begin,
1135 1136
	.update_plane		= mixer_update_plane,
	.disable_plane		= mixer_disable_plane,
1137
	.atomic_flush		= mixer_atomic_flush,
1138
	.atomic_check		= mixer_atomic_check,
1139
};
1140

1141 1142 1143 1144 1145
static struct mixer_drv_data exynos5420_mxr_drv_data = {
	.version = MXR_VER_128_0_0_184,
	.is_vp_enabled = 0,
};

1146
static struct mixer_drv_data exynos5250_mxr_drv_data = {
1147 1148 1149 1150
	.version = MXR_VER_16_0_33_0,
	.is_vp_enabled = 0,
};

1151 1152 1153 1154 1155
static struct mixer_drv_data exynos4212_mxr_drv_data = {
	.version = MXR_VER_0_0_0_16,
	.is_vp_enabled = 1,
};

1156
static struct mixer_drv_data exynos4210_mxr_drv_data = {
1157
	.version = MXR_VER_0_0_0_16,
1158
	.is_vp_enabled = 1,
1159
	.has_sclk = 1,
1160 1161
};

1162 1163
static struct of_device_id mixer_match_types[] = {
	{
1164 1165 1166 1167 1168 1169
		.compatible = "samsung,exynos4210-mixer",
		.data	= &exynos4210_mxr_drv_data,
	}, {
		.compatible = "samsung,exynos4212-mixer",
		.data	= &exynos4212_mxr_drv_data,
	}, {
1170
		.compatible = "samsung,exynos5-mixer",
1171 1172 1173 1174
		.data	= &exynos5250_mxr_drv_data,
	}, {
		.compatible = "samsung,exynos5250-mixer",
		.data	= &exynos5250_mxr_drv_data,
1175 1176 1177
	}, {
		.compatible = "samsung,exynos5420-mixer",
		.data	= &exynos5420_mxr_drv_data,
1178 1179 1180 1181
	}, {
		/* end node */
	}
};
1182
MODULE_DEVICE_TABLE(of, mixer_match_types);
1183

1184
static int mixer_bind(struct device *dev, struct device *manager, void *data)
1185
{
1186
	struct mixer_context *ctx = dev_get_drvdata(dev);
1187
	struct drm_device *drm_dev = data;
1188
	struct exynos_drm_plane *exynos_plane;
1189
	unsigned int i;
1190
	int ret;
1191

A
Alban Browaeys 已提交
1192 1193 1194 1195
	ret = mixer_initialize(ctx, drm_dev);
	if (ret)
		return ret;

1196 1197
	for (i = 0; i < MIXER_WIN_NR; i++) {
		if (i == VP_DEFAULT_WIN && !ctx->vp_enabled)
1198 1199
			continue;

1200
		ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
1201
					1 << ctx->pipe, &plane_configs[i]);
1202 1203 1204 1205
		if (ret)
			return ret;
	}

1206
	exynos_plane = &ctx->planes[DEFAULT_WIN];
1207 1208 1209
	ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
					   ctx->pipe, EXYNOS_DISPLAY_TYPE_HDMI,
					   &mixer_crtc_ops, ctx);
1210
	if (IS_ERR(ctx->crtc)) {
A
Alban Browaeys 已提交
1211
		mixer_ctx_remove(ctx);
1212 1213
		ret = PTR_ERR(ctx->crtc);
		goto free_ctx;
1214
	}
1215 1216

	return 0;
1217 1218 1219 1220

free_ctx:
	devm_kfree(dev, ctx);
	return ret;
1221 1222
}

1223
static void mixer_unbind(struct device *dev, struct device *master, void *data)
1224
{
1225
	struct mixer_context *ctx = dev_get_drvdata(dev);
1226

1227
	mixer_ctx_remove(ctx);
1228 1229 1230 1231 1232 1233 1234 1235 1236
}

static const struct component_ops mixer_component_ops = {
	.bind	= mixer_bind,
	.unbind	= mixer_unbind,
};

static int mixer_probe(struct platform_device *pdev)
{
1237 1238 1239
	struct device *dev = &pdev->dev;
	struct mixer_drv_data *drv;
	struct mixer_context *ctx;
1240 1241
	int ret;

1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
	ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
	if (!ctx) {
		DRM_ERROR("failed to alloc mixer context.\n");
		return -ENOMEM;
	}

	if (dev->of_node) {
		const struct of_device_id *match;

		match = of_match_node(mixer_match_types, dev->of_node);
		drv = (struct mixer_drv_data *)match->data;
	}

	ctx->pdev = pdev;
	ctx->dev = dev;
	ctx->vp_enabled = drv->is_vp_enabled;
	ctx->has_sclk = drv->has_sclk;
	ctx->mxr_ver = drv->version;
	init_waitqueue_head(&ctx->wait_vsync_queue);
	atomic_set(&ctx->wait_vsync_event, 0);

	platform_set_drvdata(pdev, ctx);

1265
	ret = component_add(&pdev->dev, &mixer_component_ops);
1266 1267
	if (!ret)
		pm_runtime_enable(dev);
1268 1269

	return ret;
1270 1271 1272 1273
}

static int mixer_remove(struct platform_device *pdev)
{
1274 1275
	pm_runtime_disable(&pdev->dev);

1276 1277
	component_del(&pdev->dev, &mixer_component_ops);

1278 1279 1280
	return 0;
}

1281
static int __maybe_unused exynos_mixer_suspend(struct device *dev)
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
{
	struct mixer_context *ctx = dev_get_drvdata(dev);
	struct mixer_resources *res = &ctx->mixer_res;

	clk_disable_unprepare(res->hdmi);
	clk_disable_unprepare(res->mixer);
	if (ctx->vp_enabled) {
		clk_disable_unprepare(res->vp);
		if (ctx->has_sclk)
			clk_disable_unprepare(res->sclk_mixer);
	}

	return 0;
}

1297
static int __maybe_unused exynos_mixer_resume(struct device *dev)
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337
{
	struct mixer_context *ctx = dev_get_drvdata(dev);
	struct mixer_resources *res = &ctx->mixer_res;
	int ret;

	ret = clk_prepare_enable(res->mixer);
	if (ret < 0) {
		DRM_ERROR("Failed to prepare_enable the mixer clk [%d]\n", ret);
		return ret;
	}
	ret = clk_prepare_enable(res->hdmi);
	if (ret < 0) {
		DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
		return ret;
	}
	if (ctx->vp_enabled) {
		ret = clk_prepare_enable(res->vp);
		if (ret < 0) {
			DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n",
				  ret);
			return ret;
		}
		if (ctx->has_sclk) {
			ret = clk_prepare_enable(res->sclk_mixer);
			if (ret < 0) {
				DRM_ERROR("Failed to prepare_enable the " \
					   "sclk_mixer clk [%d]\n",
					  ret);
				return ret;
			}
		}
	}

	return 0;
}

static const struct dev_pm_ops exynos_mixer_pm_ops = {
	SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
};

1338 1339
struct platform_driver mixer_driver = {
	.driver = {
1340
		.name = "exynos-mixer",
1341
		.owner = THIS_MODULE,
1342
		.pm = &exynos_mixer_pm_ops,
1343
		.of_match_table = mixer_match_types,
1344 1345
	},
	.probe = mixer_probe,
1346
	.remove = mixer_remove,
1347
};