mdp5_ctl.c 7.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
/*
 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include "mdp5_kms.h"
#include "mdp5_ctl.h"

/*
 * CTL - MDP Control Pool Manager
 *
 * Controls are shared between all CRTCs.
 *
 * They are intended to be used for data path configuration.
 * The top level register programming describes the complete data path for
 * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
 *
 * Hardware capabilities determine the number of concurrent data paths
 *
 * In certain use cases (high-resolution dual pipe), one single CTL can be
 * shared across multiple CRTCs.
 *
 * Because the number of CTLs can be less than the number of CRTCs,
 * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
 * requested by the client (in mdp5_crtc_mode_set()).
 */

struct mdp5_ctl {
	u32 id;

	/* whether this CTL has been allocated or not: */
	bool busy;

	/* memory output connection (@see mdp5_ctl_mode): */
	u32 mode;

	/* REG_MDP5_CTL_*(<id>) registers access info + lock: */
	spinlock_t hw_lock;
	u32 reg_offset;

	/* flush mask used to commit CTL registers */
	u32 flush_mask;

	bool cursor_on;
53 54

	struct drm_crtc *crtc;
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
};

struct mdp5_ctl_manager {
	struct drm_device *dev;

	/* number of CTL / Layer Mixers in this hw config: */
	u32 nlm;
	u32 nctl;

	/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
	spinlock_t pool_lock;
	struct mdp5_ctl ctls[MAX_CTL];
};

static struct mdp5_ctl_manager mdp5_ctl_mgr;

static inline
struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
{
	struct msm_drm_private *priv = ctl_mgr->dev->dev_private;

	return to_mdp5_kms(to_mdp_kms(priv->kms));
}

static inline
void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
{
	struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);

	(void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
	mdp5_write(mdp5_kms, reg, data);
}

static inline
u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
{
	struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);

	(void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
	return mdp5_read(mdp5_kms, reg);
}


100
int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf)
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
{
	unsigned long flags;
	static const enum mdp5_intfnum intfnum[] = {
			INTF0, INTF1, INTF2, INTF3,
	};

	spin_lock_irqsave(&ctl->hw_lock, flags);
	ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id),
			MDP5_CTL_OP_MODE(ctl->mode) |
			MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	return 0;
}

116
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
{
	struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
	unsigned long flags;
	u32 blend_cfg;
	int lm;

	lm = mdp5_crtc_get_lm(ctl->crtc);
	if (unlikely(WARN_ON(lm < 0))) {
		dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
				ctl->id, lm);
		return -EINVAL;
	}

	spin_lock_irqsave(&ctl->hw_lock, flags);

	blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));

	if (enable)
		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
	else
		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;

	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);

	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	ctl->cursor_on = enable;

	return 0;
}


149
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
{
	unsigned long flags;

	if (ctl->cursor_on)
		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
	else
		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;

	spin_lock_irqsave(&ctl->hw_lock, flags);
	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	return 0;
}

165
int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
{
	struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
	unsigned long flags;

	if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
		int lm = mdp5_crtc_get_lm(ctl->crtc);

		if (unlikely(WARN_ON(lm < 0))) {
			dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
					ctl->id, lm);
			return -EINVAL;
		}

		/* for current targets, cursor bit is the same as LM bit */
		flush_mask |= mdp_ctl_flush_mask_lm(lm);
	}

	spin_lock_irqsave(&ctl->hw_lock, flags);
	ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	return 0;
}

190
u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
191 192 193 194
{
	return ctl->flush_mask;
}

195
void mdp5_ctl_release(struct mdp5_ctl *ctl)
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
{
	struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
	unsigned long flags;

	if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
		dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
				ctl->id, ctl->busy);
		return;
	}

	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
	ctl->busy = false;
	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);

	DBG("CTL %d released", ctl->id);
}

/*
 * mdp5_ctl_request() - CTL dynamic allocation
 *
 * Note: Current implementation considers that we can only have one CRTC per CTL
 *
 * @return first free CTL
 */
220 221
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
		struct drm_crtc *crtc)
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
{
	struct mdp5_ctl *ctl = NULL;
	unsigned long flags;
	int c;

	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);

	for (c = 0; c < ctl_mgr->nctl; c++)
		if (!ctl_mgr->ctls[c].busy)
			break;

	if (unlikely(c >= ctl_mgr->nctl)) {
		dev_err(ctl_mgr->dev->dev, "No more CTL available!");
		goto unlock;
	}

	ctl = &ctl_mgr->ctls[c];

	ctl->crtc = crtc;
	ctl->busy = true;
	DBG("CTL %d allocated", ctl->id);

unlock:
	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
	return ctl;
}

249
void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
250 251 252 253 254 255 256 257 258 259 260 261 262
{
	unsigned long flags;
	int c;

	for (c = 0; c < ctl_mgr->nctl; c++) {
		struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];

		spin_lock_irqsave(&ctl->hw_lock, flags);
		ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
		spin_unlock_irqrestore(&ctl->hw_lock, flags);
	}
}

263
void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
264 265 266 267
{
	kfree(ctl_mgr);
}

268 269
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
		void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
{
	struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
	const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
	unsigned long flags;
	int c, ret;

	if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
		dev_err(dev->dev, "Increase static pool size to at least %d\n",
				ctl_cfg->count);
		ret = -ENOSPC;
		goto fail;
	}

	/* initialize the CTL manager: */
	ctl_mgr->dev = dev;
	ctl_mgr->nlm = hw_cfg->lm.count;
	ctl_mgr->nctl = ctl_cfg->count;
	spin_lock_init(&ctl_mgr->pool_lock);

	/* initialize each CTL of the pool: */
	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
	for (c = 0; c < ctl_mgr->nctl; c++) {
		struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];

		if (WARN_ON(!ctl_cfg->base[c])) {
			dev_err(dev->dev, "CTL_%d: base is null!\n", c);
			ret = -EINVAL;
			goto fail;
		}
		ctl->id = c;
		ctl->mode = MODE_NONE;
		ctl->reg_offset = ctl_cfg->base[c];
		ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
		ctl->busy = false;
		spin_lock_init(&ctl->hw_lock);
	}
	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
	DBG("Pool of %d CTLs created.", ctl_mgr->nctl);

	return ctl_mgr;

fail:
	if (ctl_mgr)
		mdp5_ctlm_destroy(ctl_mgr);

	return ERR_PTR(ret);
}