mdp5_ctl.c 15.6 KB
Newer Older
1
/*
2
 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include "mdp5_kms.h"
#include "mdp5_ctl.h"

/*
 * CTL - MDP Control Pool Manager
 *
 * Controls are shared between all CRTCs.
 *
 * They are intended to be used for data path configuration.
 * The top level register programming describes the complete data path for
 * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
 *
 * Hardware capabilities determine the number of concurrent data paths
 *
 * In certain use cases (high-resolution dual pipe), one single CTL can be
 * shared across multiple CRTCs.
 *
 * Because the number of CTLs can be less than the number of CRTCs,
 * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
 * requested by the client (in mdp5_crtc_mode_set()).
 */

36 37
struct op_mode {
	struct mdp5_interface intf;
38 39 40

	bool encoder_enabled;
	uint32_t start_mask;
41 42
};

43
struct mdp5_ctl {
44 45
	struct mdp5_ctl_manager *ctlm;

46
	u32 id;
47
	int lm;
48 49 50 51

	/* whether this CTL has been allocated or not: */
	bool busy;

52 53
	/* Operation Mode Configuration for the Pipeline */
	struct op_mode pipeline;
54 55 56 57 58

	/* REG_MDP5_CTL_*(<id>) registers access info + lock: */
	spinlock_t hw_lock;
	u32 reg_offset;

59 60
	/* when do CTL registers need to be flushed? (mask of trigger bits) */
	u32 pending_ctl_trigger;
61 62 63 64 65 66 67 68 69 70 71

	bool cursor_on;
};

struct mdp5_ctl_manager {
	struct drm_device *dev;

	/* number of CTL / Layer Mixers in this hw config: */
	u32 nlm;
	u32 nctl;

72 73 74
	/* to filter out non-present bits in the current hardware config */
	u32 flush_hw_mask;

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
	/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
	spinlock_t pool_lock;
	struct mdp5_ctl ctls[MAX_CTL];
};

static inline
struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
{
	struct msm_drm_private *priv = ctl_mgr->dev->dev_private;

	return to_mdp5_kms(to_mdp_kms(priv->kms));
}

static inline
void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
{
91
	struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
92 93 94 95 96 97 98 99

	(void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
	mdp5_write(mdp5_kms, reg, data);
}

static inline
u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
{
100
	struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
101 102 103 104 105

	(void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
	return mdp5_read(mdp5_kms, reg);
}

106 107 108 109 110 111 112
static void set_display_intf(struct mdp5_kms *mdp5_kms,
		struct mdp5_interface *intf)
{
	unsigned long flags;
	u32 intf_sel;

	spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
113
	intf_sel = mdp5_read(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0));
114 115 116

	switch (intf->num) {
	case 0:
117 118
		intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
		intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF0(intf->type);
119 120
		break;
	case 1:
121 122
		intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
		intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF1(intf->type);
123 124
		break;
	case 2:
125 126
		intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
		intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF2(intf->type);
127 128
		break;
	case 3:
129 130
		intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
		intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF3(intf->type);
131 132 133 134 135 136
		break;
	default:
		BUG();
		break;
	}

137
	mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel);
138 139
	spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
}
140

141
static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
142 143
{
	unsigned long flags;
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
	u32 ctl_op = 0;

	if (!mdp5_cfg_intf_is_virtual(intf->type))
		ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);

	switch (intf->type) {
	case INTF_DSI:
		if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
			ctl_op |= MDP5_CTL_OP_CMD_MODE;
		break;

	case INTF_WB:
		if (intf->mode == MDP5_INTF_WB_MODE_LINE)
			ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
		break;

	default:
		break;
	}
163 164

	spin_lock_irqsave(&ctl->hw_lock, flags);
165
	ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
166
	spin_unlock_irqrestore(&ctl->hw_lock, flags);
167 168
}

169 170
int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl,
		struct mdp5_interface *intf, int lm)
171 172 173 174
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);

175 176 177 178 179 180 181 182 183
	if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) {
		dev_err(mdp5_kms->dev->dev,
			"CTL %d is allocated by INTF %d, but used by INTF %d\n",
			ctl->id, ctl->pipeline.intf.num, intf->num);
		return -EINVAL;
	}

	ctl->lm = lm;

184 185
	memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));

186 187 188
	ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
				   mdp_ctl_flush_mask_encoder(intf);

189 190 191 192 193
	/* Virtual interfaces need not set a display intf (e.g.: Writeback) */
	if (!mdp5_cfg_intf_is_virtual(intf->type))
		set_display_intf(mdp5_kms, intf);

	set_ctl_op(ctl, intf);
194 195 196 197

	return 0;
}

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
static bool start_signal_needed(struct mdp5_ctl *ctl)
{
	struct op_mode *pipeline = &ctl->pipeline;

	if (!pipeline->encoder_enabled || pipeline->start_mask != 0)
		return false;

	switch (pipeline->intf.type) {
	case INTF_WB:
		return true;
	case INTF_DSI:
		return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND;
	default:
		return false;
	}
}

/*
 * send_start_signal() - Overlay Processor Start Signal
 *
 * For a given control operation (display pipeline), a START signal needs to be
 * executed in order to kick off operation and activate all layers.
 * e.g.: DSI command mode, Writeback
 */
static void send_start_signal(struct mdp5_ctl *ctl)
{
	unsigned long flags;

	spin_lock_irqsave(&ctl->hw_lock, flags);
	ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
	spin_unlock_irqrestore(&ctl->hw_lock, flags);
}

static void refill_start_mask(struct mdp5_ctl *ctl)
{
	struct op_mode *pipeline = &ctl->pipeline;
	struct mdp5_interface *intf = &ctl->pipeline.intf;

	pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm);

	/*
	 * Writeback encoder needs to program & flush
	 * address registers for each page flip..
	 */
	if (intf->type == INTF_WB)
		pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf);
}

/**
 * mdp5_ctl_set_encoder_state() - set the encoder state
 *
 * @enable: true, when encoder is ready for data streaming; false, otherwise.
 *
 * Note:
 * This encoder state is needed to trigger START signal (data path kickoff).
 */
int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
{
	if (WARN_ON(!ctl))
		return -EINVAL;

	ctl->pipeline.encoder_enabled = enabled;
	DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off");

	if (start_signal_needed(ctl)) {
		send_start_signal(ctl);
		refill_start_mask(ctl);
	}

	return 0;
}

/*
 * Note:
 * CTL registers need to be flushed after calling this function
 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
 */
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
276
{
277
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
278 279
	unsigned long flags;
	u32 blend_cfg;
280
	int lm = ctl->lm;
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297

	if (unlikely(WARN_ON(lm < 0))) {
		dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
				ctl->id, lm);
		return -EINVAL;
	}

	spin_lock_irqsave(&ctl->hw_lock, flags);

	blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));

	if (enable)
		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
	else
		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;

	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
298
	ctl->cursor_on = enable;
299 300 301

	spin_unlock_irqrestore(&ctl->hw_lock, flags);

302
	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
303 304 305 306

	return 0;
}

307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
		enum mdp_mixer_stage_id stage)
{
	switch (pipe) {
	case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
	case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
	case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
	case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
	case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
	case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
	case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
	case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
	case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
	case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
	default:	return 0;
	}
}

static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
		enum mdp_mixer_stage_id stage)
{
	if (stage < STAGE6)
		return 0;

	switch (pipe) {
	case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
	case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
	case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
	case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
	case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
	case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
	case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
	case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
	case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
	case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
	default:	return 0;
	}
}

346
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
347
	u32 ctl_blend_op_flags)
348 349
{
	unsigned long flags;
350 351 352 353 354 355 356 357 358
	u32 blend_cfg = 0, blend_ext_cfg = 0;
	int i, start_stage;

	if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
		start_stage = STAGE0;
		blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
	} else {
		start_stage = STAGE_BASE;
	}
359

360 361 362 363 364 365
	for (i = start_stage; i < start_stage + stage_cnt; i++) {
		blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
		blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
	}

	spin_lock_irqsave(&ctl->hw_lock, flags);
366 367 368
	if (ctl->cursor_on)
		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;

369 370
	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
	ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
371 372
	spin_unlock_irqrestore(&ctl->hw_lock, flags);

373
	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);
374

375
	DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
376 377
		blend_cfg, blend_ext_cfg);

378 379 380
	return 0;
}

381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
{
	if (intf->type == INTF_WB)
		return MDP5_CTL_FLUSH_WB;

	switch (intf->num) {
	case 0: return MDP5_CTL_FLUSH_TIMING_0;
	case 1: return MDP5_CTL_FLUSH_TIMING_1;
	case 2: return MDP5_CTL_FLUSH_TIMING_2;
	case 3: return MDP5_CTL_FLUSH_TIMING_3;
	default: return 0;
	}
}

u32 mdp_ctl_flush_mask_cursor(int cursor_id)
{
	switch (cursor_id) {
	case 0: return MDP5_CTL_FLUSH_CURSOR_0;
	case 1: return MDP5_CTL_FLUSH_CURSOR_1;
	default: return 0;
	}
}

u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
{
	switch (pipe) {
	case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
	case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
	case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
	case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
	case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
	case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
	case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
	case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
	case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
	case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
	default:        return 0;
	}
}

u32 mdp_ctl_flush_mask_lm(int lm)
{
	switch (lm) {
	case 0:  return MDP5_CTL_FLUSH_LM0;
	case 1:  return MDP5_CTL_FLUSH_LM1;
	case 2:  return MDP5_CTL_FLUSH_LM2;
	case 5:  return MDP5_CTL_FLUSH_LM5;
	default: return 0;
	}
}

static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	u32 sw_mask = 0;
#define BIT_NEEDS_SW_FIX(bit) \
	(!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))

	/* for some targets, cursor bit is the same as LM bit */
	if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
		sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm);

	return sw_mask;
}

/**
 * mdp5_ctl_commit() - Register Flush
 *
 * The flush register is used to indicate several registers are all
 * programmed, and are safe to update to the back copy of the double
 * buffered registers.
 *
 * Some registers FLUSH bits are shared when the hardware does not have
 * dedicated bits for them; handling these is the job of fix_sw_flush().
 *
 * CTL registers need to be flushed in some circumstances; if that is the
 * case, some trigger bits will be present in both flush mask and
 * ctl->pending_ctl_trigger.
459 460
 *
 * Return H/W flushed bit mask.
461
 */
462
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
463
{
464
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
465
	struct op_mode *pipeline = &ctl->pipeline;
466 467
	unsigned long flags;

468
	pipeline->start_mask &= ~flush_mask;
469

470 471
	VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
			pipeline->start_mask, ctl->pending_ctl_trigger);
472

473 474 475
	if (ctl->pending_ctl_trigger & flush_mask) {
		flush_mask |= MDP5_CTL_FLUSH_CTL;
		ctl->pending_ctl_trigger = 0;
476 477
	}

478
	flush_mask |= fix_sw_flush(ctl, flush_mask);
479

480
	flush_mask &= ctl_mgr->flush_hw_mask;
481

482 483 484 485 486 487 488 489 490 491 492
	if (flush_mask) {
		spin_lock_irqsave(&ctl->hw_lock, flags);
		ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
		spin_unlock_irqrestore(&ctl->hw_lock, flags);
	}

	if (start_signal_needed(ctl)) {
		send_start_signal(ctl);
		refill_start_mask(ctl);
	}

493 494 495 496 497 498
	return flush_mask;
}

u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
{
	return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
499 500
}

501 502 503 504 505
int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
{
	return WARN_ON(!ctl) ? -EINVAL : ctl->id;
}

506
/*
507
 * mdp5_ctl_request() - CTL allocation
508 509 510
 *
 * @return first free CTL
 */
511
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
512
		int intf_num)
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
{
	struct mdp5_ctl *ctl = NULL;
	unsigned long flags;
	int c;

	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);

	for (c = 0; c < ctl_mgr->nctl; c++)
		if (!ctl_mgr->ctls[c].busy)
			break;

	if (unlikely(c >= ctl_mgr->nctl)) {
		dev_err(ctl_mgr->dev->dev, "No more CTL available!");
		goto unlock;
	}

	ctl = &ctl_mgr->ctls[c];
530 531
	ctl->pipeline.intf.num = intf_num;
	ctl->lm = -1;
532
	ctl->busy = true;
533
	ctl->pending_ctl_trigger = 0;
534 535 536 537 538 539 540
	DBG("CTL %d allocated", ctl->id);

unlock:
	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
	return ctl;
}

541
void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
542 543 544 545 546 547 548 549 550 551 552 553 554
{
	unsigned long flags;
	int c;

	for (c = 0; c < ctl_mgr->nctl; c++) {
		struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];

		spin_lock_irqsave(&ctl->hw_lock, flags);
		ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
		spin_unlock_irqrestore(&ctl->hw_lock, flags);
	}
}

555
void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
556 557 558 559
{
	kfree(ctl_mgr);
}

560 561
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
		void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
562
{
563
	struct mdp5_ctl_manager *ctl_mgr;
564
	const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
565 566 567
	unsigned long flags;
	int c, ret;

568 569 570 571 572 573 574
	ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
	if (!ctl_mgr) {
		dev_err(dev->dev, "failed to allocate CTL manager\n");
		ret = -ENOMEM;
		goto fail;
	}

575 576 577 578 579 580 581 582 583 584 585
	if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
		dev_err(dev->dev, "Increase static pool size to at least %d\n",
				ctl_cfg->count);
		ret = -ENOSPC;
		goto fail;
	}

	/* initialize the CTL manager: */
	ctl_mgr->dev = dev;
	ctl_mgr->nlm = hw_cfg->lm.count;
	ctl_mgr->nctl = ctl_cfg->count;
586
	ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
587 588 589 590 591 592 593 594 595 596 597 598
	spin_lock_init(&ctl_mgr->pool_lock);

	/* initialize each CTL of the pool: */
	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
	for (c = 0; c < ctl_mgr->nctl; c++) {
		struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];

		if (WARN_ON(!ctl_cfg->base[c])) {
			dev_err(dev->dev, "CTL_%d: base is null!\n", c);
			ret = -EINVAL;
			goto fail;
		}
599
		ctl->ctlm = ctl_mgr;
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
		ctl->id = c;
		ctl->reg_offset = ctl_cfg->base[c];
		ctl->busy = false;
		spin_lock_init(&ctl->hw_lock);
	}
	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
	DBG("Pool of %d CTLs created.", ctl_mgr->nctl);

	return ctl_mgr;

fail:
	if (ctl_mgr)
		mdp5_ctlm_destroy(ctl_mgr);

	return ERR_PTR(ret);
}